diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c74a964e8f4c84a2920686bab03fd2c4153eee74 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "0f19f4fa-5d1f-488f-a0ef-2821e6b96808", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/training_log_0f19f4fa-5d1f-488f-a0ef-2821e6b96808.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/training_log_0f19f4fa-5d1f-488f-a0ef-2821e6b96808.txt new file mode 100644 index 0000000000000000000000000000000000000000..dea3dcf4c7aca4d138763d4425e5ef59a34bddba --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42/training_log_0f19f4fa-5d1f-488f-a0ef-2821e6b96808.txt @@ -0,0 +1,4264 @@ +[2025-09-11 12:38:20] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:38:20 2025 --- +[2025-09-11 12:38:20] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:38:20 2025 --- +[2025-09-11 12:38:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:38:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:38:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:38:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:38:20] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:38:20] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:38:20] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42 +[2025-09-11 12:38:20] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_42 +[2025-09-11 12:38:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:38:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:38:20] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:38:20] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:38:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:38:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:38:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:38:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:38:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:38:21] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:38:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:38:21] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:38:21] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:38:21] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:38:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:38:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:38:23] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:38:23] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:38:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:38:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:38:29] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:38:29] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:38:29] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:38:29] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:39:05] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:39:05] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:39:05] [Rank 0] PRINT: Starting training... +[2025-09-11 12:39:05] [Rank 0] PRINT: Starting training... +[2025-09-11 12:39:07] [Rank 0] step:21/10000 train_time:1142ms step_avg:54.40ms +[2025-09-11 12:39:07] [Rank 0] step:21/10000 train_time:1142ms step_avg:54.40ms +[2025-09-11 12:39:07] [Rank 0] step:41/10000 train_time:1871ms step_avg:45.63ms +[2025-09-11 12:39:07] [Rank 0] step:41/10000 train_time:1871ms step_avg:45.63ms +[2025-09-11 12:39:08] [Rank 0] step:61/10000 train_time:2599ms step_avg:42.60ms +[2025-09-11 12:39:08] [Rank 0] step:61/10000 train_time:2599ms step_avg:42.60ms +[2025-09-11 12:39:09] [Rank 0] step:81/10000 train_time:3326ms step_avg:41.07ms +[2025-09-11 12:39:09] [Rank 0] step:81/10000 train_time:3326ms step_avg:41.07ms +[2025-09-11 12:39:09] [Rank 0] step:101/10000 train_time:4054ms step_avg:40.14ms +[2025-09-11 12:39:09] [Rank 0] step:101/10000 train_time:4054ms step_avg:40.14ms +[2025-09-11 12:39:10] [Rank 0] step:121/10000 train_time:4782ms step_avg:39.52ms +[2025-09-11 12:39:10] [Rank 0] step:121/10000 train_time:4782ms step_avg:39.52ms +[2025-09-11 12:39:11] [Rank 0] step:141/10000 train_time:5509ms step_avg:39.07ms +[2025-09-11 12:39:11] [Rank 0] step:141/10000 train_time:5509ms step_avg:39.07ms +[2025-09-11 12:39:12] [Rank 0] step:161/10000 train_time:6236ms step_avg:38.73ms +[2025-09-11 12:39:12] [Rank 0] step:161/10000 train_time:6236ms step_avg:38.73ms +[2025-09-11 12:39:12] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 12:39:12] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 12:39:13] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.26ms +[2025-09-11 12:39:13] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.26ms +[2025-09-11 12:39:14] [Rank 0] step:221/10000 train_time:8419ms step_avg:38.09ms +[2025-09-11 12:39:14] [Rank 0] step:221/10000 train_time:8419ms step_avg:38.09ms +[2025-09-11 12:39:15] [Rank 0] step:241/10000 train_time:9147ms step_avg:37.95ms +[2025-09-11 12:39:15] [Rank 0] step:241/10000 train_time:9147ms step_avg:37.95ms +[2025-09-11 12:39:15] [Rank 0] step:261/10000 train_time:9875ms step_avg:37.84ms +[2025-09-11 12:39:15] [Rank 0] step:261/10000 train_time:9875ms step_avg:37.84ms +[2025-09-11 12:39:16] [Rank 0] step:281/10000 train_time:10602ms step_avg:37.73ms +[2025-09-11 12:39:16] [Rank 0] step:281/10000 train_time:10602ms step_avg:37.73ms +[2025-09-11 12:39:17] [Rank 0] step:301/10000 train_time:11330ms step_avg:37.64ms +[2025-09-11 12:39:17] [Rank 0] step:301/10000 train_time:11330ms step_avg:37.64ms +[2025-09-11 12:39:17] [Rank 0] step:321/10000 train_time:12057ms step_avg:37.56ms +[2025-09-11 12:39:17] [Rank 0] step:321/10000 train_time:12057ms step_avg:37.56ms +[2025-09-11 12:39:18] [Rank 0] step:341/10000 train_time:12785ms step_avg:37.49ms +[2025-09-11 12:39:18] [Rank 0] step:341/10000 train_time:12785ms step_avg:37.49ms +[2025-09-11 12:39:19] [Rank 0] step:361/10000 train_time:13512ms step_avg:37.43ms +[2025-09-11 12:39:19] [Rank 0] step:361/10000 train_time:13512ms step_avg:37.43ms +[2025-09-11 12:39:20] [Rank 0] step:381/10000 train_time:14240ms step_avg:37.38ms +[2025-09-11 12:39:20] [Rank 0] step:381/10000 train_time:14240ms step_avg:37.38ms +[2025-09-11 12:39:20] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:39:20] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:39:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:39:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:39:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:40:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:40:06] [Rank 0] PRINT: step:400/10000 val_loss:7.2894 total_sharp:1.4870e-03 L1_sharp:5.8279e-02 L2_sharp:5.8495e-02 L3_sharp:6.7034e-02 L4_sharp:8.1892e-02 L5_sharp:9.4824e-02 L6_sharp:8.9761e-02 L7_sharp:9.1190e-02 L8_sharp:1.0521e-01 L9_sharp:1.2702e-01 L10_sharp:1.2718e-01 L11_sharp:1.3483e-01 L12_sharp:1.3939e-01 total_fnorm:5.4136e+00 total_l1_linf:1.4431e+04 total_spectral:2.7069e+00 L1_fnorm:5.5242e-02 L2_fnorm:5.5121e-02 L3_fnorm:5.4747e-02 L4_fnorm:5.4404e-02 L5_fnorm:5.3806e-02 L6_fnorm:5.3926e-02 L7_fnorm:5.2804e-02 L8_fnorm:5.1635e-02 L9_fnorm:5.0972e-02 L10_fnorm:4.8635e-02 L11_fnorm:4.5768e-02 L12_fnorm:4.4107e-02 L1_l1linf:2.2712e-02 L2_l1linf:2.2717e-02 L3_l1linf:2.2645e-02 L4_l1linf:2.2553e-02 L5_l1linf:2.2562e-02 L6_l1linf:2.2050e-02 L7_l1linf:2.1854e-02 L8_l1linf:2.1490e-02 L9_l1linf:2.0727e-02 L10_l1linf:2.0184e-02 L11_l1linf:1.9450e-02 L12_l1linf:1.8277e-02 L1_spectral:6.0229e-04 L2_spectral:6.0218e-04 L3_spectral:6.0247e-04 L4_spectral:6.0244e-04 L5_spectral:6.0232e-04 L6_spectral:6.0285e-04 L7_spectral:6.0215e-04 L8_spectral:6.0225e-04 L9_spectral:6.0222e-04 L10_spectral:6.0231e-04 L11_spectral:6.0193e-04 L12_spectral:6.0186e-04 train_time:14948ms step_avg:37.37ms +[2025-09-11 12:40:06] [Rank 0] PRINT: step:400/10000 val_loss:7.2894 total_sharp:1.4870e-03 L1_sharp:5.8279e-02 L2_sharp:5.8495e-02 L3_sharp:6.7034e-02 L4_sharp:8.1892e-02 L5_sharp:9.4824e-02 L6_sharp:8.9761e-02 L7_sharp:9.1190e-02 L8_sharp:1.0521e-01 L9_sharp:1.2702e-01 L10_sharp:1.2718e-01 L11_sharp:1.3483e-01 L12_sharp:1.3939e-01 total_fnorm:5.4136e+00 total_l1_linf:1.4431e+04 total_spectral:2.7069e+00 L1_fnorm:5.5242e-02 L2_fnorm:5.5121e-02 L3_fnorm:5.4747e-02 L4_fnorm:5.4404e-02 L5_fnorm:5.3806e-02 L6_fnorm:5.3926e-02 L7_fnorm:5.2804e-02 L8_fnorm:5.1635e-02 L9_fnorm:5.0972e-02 L10_fnorm:4.8635e-02 L11_fnorm:4.5768e-02 L12_fnorm:4.4107e-02 L1_l1linf:2.2712e-02 L2_l1linf:2.2717e-02 L3_l1linf:2.2645e-02 L4_l1linf:2.2553e-02 L5_l1linf:2.2562e-02 L6_l1linf:2.2050e-02 L7_l1linf:2.1854e-02 L8_l1linf:2.1490e-02 L9_l1linf:2.0727e-02 L10_l1linf:2.0184e-02 L11_l1linf:1.9450e-02 L12_l1linf:1.8277e-02 L1_spectral:6.0229e-04 L2_spectral:6.0218e-04 L3_spectral:6.0247e-04 L4_spectral:6.0244e-04 L5_spectral:6.0232e-04 L6_spectral:6.0285e-04 L7_spectral:6.0215e-04 L8_spectral:6.0225e-04 L9_spectral:6.0222e-04 L10_spectral:6.0231e-04 L11_spectral:6.0193e-04 L12_spectral:6.0186e-04 train_time:14948ms step_avg:37.37ms +[2025-09-11 12:40:35] [Rank 0] step:401/10000 train_time:44435ms step_avg:110.81ms +[2025-09-11 12:40:35] [Rank 0] step:401/10000 train_time:44435ms step_avg:110.81ms +[2025-09-11 12:40:38] [Rank 0] step:421/10000 train_time:46526ms step_avg:110.51ms +[2025-09-11 12:40:38] [Rank 0] step:421/10000 train_time:46526ms step_avg:110.51ms +[2025-09-11 12:40:38] [Rank 0] step:441/10000 train_time:47475ms step_avg:107.65ms +[2025-09-11 12:40:38] [Rank 0] step:441/10000 train_time:47475ms step_avg:107.65ms +[2025-09-11 12:40:39] [Rank 0] step:461/10000 train_time:48112ms step_avg:104.37ms +[2025-09-11 12:40:39] [Rank 0] step:461/10000 train_time:48112ms step_avg:104.37ms +[2025-09-11 12:40:40] [Rank 0] step:481/10000 train_time:48750ms step_avg:101.35ms +[2025-09-11 12:40:40] [Rank 0] step:481/10000 train_time:48750ms step_avg:101.35ms +[2025-09-11 12:40:40] [Rank 0] step:501/10000 train_time:49388ms step_avg:98.58ms +[2025-09-11 12:40:40] [Rank 0] step:501/10000 train_time:49388ms step_avg:98.58ms +[2025-09-11 12:40:41] [Rank 0] step:521/10000 train_time:50025ms step_avg:96.02ms +[2025-09-11 12:40:41] [Rank 0] step:521/10000 train_time:50025ms step_avg:96.02ms +[2025-09-11 12:40:42] [Rank 0] step:541/10000 train_time:50663ms step_avg:93.65ms +[2025-09-11 12:40:42] [Rank 0] step:541/10000 train_time:50663ms step_avg:93.65ms +[2025-09-11 12:40:42] [Rank 0] step:561/10000 train_time:51301ms step_avg:91.45ms +[2025-09-11 12:40:42] [Rank 0] step:561/10000 train_time:51301ms step_avg:91.45ms +[2025-09-11 12:40:43] [Rank 0] step:581/10000 train_time:51939ms step_avg:89.40ms +[2025-09-11 12:40:43] [Rank 0] step:581/10000 train_time:51939ms step_avg:89.40ms +[2025-09-11 12:40:44] [Rank 0] step:601/10000 train_time:52576ms step_avg:87.48ms +[2025-09-11 12:40:44] [Rank 0] step:601/10000 train_time:52576ms step_avg:87.48ms +[2025-09-11 12:40:44] [Rank 0] step:621/10000 train_time:53214ms step_avg:85.69ms +[2025-09-11 12:40:44] [Rank 0] step:621/10000 train_time:53214ms step_avg:85.69ms +[2025-09-11 12:40:45] [Rank 0] step:641/10000 train_time:53851ms step_avg:84.01ms +[2025-09-11 12:40:45] [Rank 0] step:641/10000 train_time:53851ms step_avg:84.01ms +[2025-09-11 12:40:46] [Rank 0] step:661/10000 train_time:54489ms step_avg:82.43ms +[2025-09-11 12:40:46] [Rank 0] step:661/10000 train_time:54489ms step_avg:82.43ms +[2025-09-11 12:40:46] [Rank 0] step:681/10000 train_time:55126ms step_avg:80.95ms +[2025-09-11 12:40:46] [Rank 0] step:681/10000 train_time:55126ms step_avg:80.95ms +[2025-09-11 12:40:47] [Rank 0] step:701/10000 train_time:55763ms step_avg:79.55ms +[2025-09-11 12:40:47] [Rank 0] step:701/10000 train_time:55763ms step_avg:79.55ms +[2025-09-11 12:40:47] [Rank 0] step:721/10000 train_time:56400ms step_avg:78.22ms +[2025-09-11 12:40:47] [Rank 0] step:721/10000 train_time:56400ms step_avg:78.22ms +[2025-09-11 12:40:48] [Rank 0] step:741/10000 train_time:57037ms step_avg:76.97ms +[2025-09-11 12:40:48] [Rank 0] step:741/10000 train_time:57037ms step_avg:76.97ms +[2025-09-11 12:40:49] [Rank 0] step:761/10000 train_time:57683ms step_avg:75.80ms +[2025-09-11 12:40:49] [Rank 0] step:761/10000 train_time:57683ms step_avg:75.80ms +[2025-09-11 12:40:49] [Rank 0] step:781/10000 train_time:58325ms step_avg:74.68ms +[2025-09-11 12:40:49] [Rank 0] step:781/10000 train_time:58325ms step_avg:74.68ms +[2025-09-11 12:40:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:40:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:41:34] [Rank 0] PRINT: step:800/10000 val_loss:6.9106 total_sharp:2.2016e-02 L1_sharp:4.9780e-01 L2_sharp:4.4171e-01 L3_sharp:5.2197e-01 L4_sharp:6.1379e-01 L5_sharp:7.9399e-01 L6_sharp:6.8317e-01 L7_sharp:8.2587e-01 L8_sharp:1.0302e+00 L9_sharp:9.5644e-01 L10_sharp:1.4604e+00 L11_sharp:1.8098e+00 L12_sharp:2.1052e+00 total_fnorm:3.0781e+00 total_l1_linf:3.3280e+03 total_spectral:1.5391e+00 L1_fnorm:4.3945e-02 L2_fnorm:4.5654e-02 L3_fnorm:4.5410e-02 L4_fnorm:4.4434e-02 L5_fnorm:4.2480e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.2969e-02 L8_fnorm:3.9551e-02 L9_fnorm:4.0039e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.3203e-02 L12_fnorm:3.0029e-02 L1_l1linf:1.9043e-02 L2_l1linf:1.9287e-02 L3_l1linf:1.8921e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.8921e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.7700e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.4282e-02 L12_l1linf:1.3611e-02 L1_spectral:7.1376e-04 L2_spectral:7.1342e-04 L3_spectral:7.1608e-04 L4_spectral:7.1452e-04 L5_spectral:7.1855e-04 L6_spectral:7.1942e-04 L7_spectral:7.2291e-04 L8_spectral:7.2356e-04 L9_spectral:7.1990e-04 L10_spectral:7.1523e-04 L11_spectral:7.0032e-04 L12_spectral:6.9930e-04 train_time:58949ms step_avg:73.69ms +[2025-09-11 12:41:34] [Rank 0] PRINT: step:800/10000 val_loss:6.9106 total_sharp:2.2016e-02 L1_sharp:4.9780e-01 L2_sharp:4.4171e-01 L3_sharp:5.2197e-01 L4_sharp:6.1379e-01 L5_sharp:7.9399e-01 L6_sharp:6.8317e-01 L7_sharp:8.2587e-01 L8_sharp:1.0302e+00 L9_sharp:9.5644e-01 L10_sharp:1.4604e+00 L11_sharp:1.8098e+00 L12_sharp:2.1052e+00 total_fnorm:3.0781e+00 total_l1_linf:3.3280e+03 total_spectral:1.5391e+00 L1_fnorm:4.3945e-02 L2_fnorm:4.5654e-02 L3_fnorm:4.5410e-02 L4_fnorm:4.4434e-02 L5_fnorm:4.2480e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.2969e-02 L8_fnorm:3.9551e-02 L9_fnorm:4.0039e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.3203e-02 L12_fnorm:3.0029e-02 L1_l1linf:1.9043e-02 L2_l1linf:1.9287e-02 L3_l1linf:1.8921e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.8921e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.7700e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.4282e-02 L12_l1linf:1.3611e-02 L1_spectral:7.1376e-04 L2_spectral:7.1342e-04 L3_spectral:7.1608e-04 L4_spectral:7.1452e-04 L5_spectral:7.1855e-04 L6_spectral:7.1942e-04 L7_spectral:7.2291e-04 L8_spectral:7.2356e-04 L9_spectral:7.1990e-04 L10_spectral:7.1523e-04 L11_spectral:7.0032e-04 L12_spectral:6.9930e-04 train_time:58949ms step_avg:73.69ms +[2025-09-11 12:41:35] [Rank 0] step:801/10000 train_time:60295ms step_avg:75.27ms +[2025-09-11 12:41:35] [Rank 0] step:801/10000 train_time:60295ms step_avg:75.27ms +[2025-09-11 12:41:36] [Rank 0] step:821/10000 train_time:60952ms step_avg:74.24ms +[2025-09-11 12:41:36] [Rank 0] step:821/10000 train_time:60952ms step_avg:74.24ms +[2025-09-11 12:41:36] [Rank 0] step:841/10000 train_time:61596ms step_avg:73.24ms +[2025-09-11 12:41:36] [Rank 0] step:841/10000 train_time:61596ms step_avg:73.24ms +[2025-09-11 12:41:37] [Rank 0] step:861/10000 train_time:62239ms step_avg:72.29ms +[2025-09-11 12:41:37] [Rank 0] step:861/10000 train_time:62239ms step_avg:72.29ms +[2025-09-11 12:41:38] [Rank 0] step:881/10000 train_time:62882ms step_avg:71.38ms +[2025-09-11 12:41:38] [Rank 0] step:881/10000 train_time:62882ms step_avg:71.38ms +[2025-09-11 12:41:38] [Rank 0] step:901/10000 train_time:63525ms step_avg:70.50ms +[2025-09-11 12:41:38] [Rank 0] step:901/10000 train_time:63525ms step_avg:70.50ms +[2025-09-11 12:41:39] [Rank 0] step:921/10000 train_time:64475ms step_avg:70.01ms +[2025-09-11 12:41:39] [Rank 0] step:921/10000 train_time:64475ms step_avg:70.01ms +[2025-09-11 12:41:40] [Rank 0] step:941/10000 train_time:65118ms step_avg:69.20ms +[2025-09-11 12:41:40] [Rank 0] step:941/10000 train_time:65118ms step_avg:69.20ms +[2025-09-11 12:41:40] [Rank 0] step:961/10000 train_time:65761ms step_avg:68.43ms +[2025-09-11 12:41:40] [Rank 0] step:961/10000 train_time:65761ms step_avg:68.43ms +[2025-09-11 12:41:41] [Rank 0] step:981/10000 train_time:66403ms step_avg:67.69ms +[2025-09-11 12:41:41] [Rank 0] step:981/10000 train_time:66403ms step_avg:67.69ms +[2025-09-11 12:41:42] [Rank 0] step:1001/10000 train_time:67335ms step_avg:67.27ms +[2025-09-11 12:41:42] [Rank 0] step:1001/10000 train_time:67335ms step_avg:67.27ms +[2025-09-11 12:41:43] [Rank 0] step:1021/10000 train_time:67978ms step_avg:66.58ms +[2025-09-11 12:41:43] [Rank 0] step:1021/10000 train_time:67978ms step_avg:66.58ms +[2025-09-11 12:41:43] [Rank 0] step:1041/10000 train_time:68621ms step_avg:65.92ms +[2025-09-11 12:41:43] [Rank 0] step:1041/10000 train_time:68621ms step_avg:65.92ms +[2025-09-11 12:41:44] [Rank 0] step:1061/10000 train_time:69264ms step_avg:65.28ms +[2025-09-11 12:41:44] [Rank 0] step:1061/10000 train_time:69264ms step_avg:65.28ms +[2025-09-11 12:41:45] [Rank 0] step:1081/10000 train_time:69907ms step_avg:64.67ms +[2025-09-11 12:41:45] [Rank 0] step:1081/10000 train_time:69907ms step_avg:64.67ms +[2025-09-11 12:41:45] [Rank 0] step:1101/10000 train_time:70550ms step_avg:64.08ms +[2025-09-11 12:41:45] [Rank 0] step:1101/10000 train_time:70550ms step_avg:64.08ms +[2025-09-11 12:41:46] [Rank 0] step:1121/10000 train_time:71193ms step_avg:63.51ms +[2025-09-11 12:41:46] [Rank 0] step:1121/10000 train_time:71193ms step_avg:63.51ms +[2025-09-11 12:41:46] [Rank 0] step:1141/10000 train_time:71836ms step_avg:62.96ms +[2025-09-11 12:41:46] [Rank 0] step:1141/10000 train_time:71836ms step_avg:62.96ms +[2025-09-11 12:41:47] [Rank 0] step:1161/10000 train_time:72479ms step_avg:62.43ms +[2025-09-11 12:41:47] [Rank 0] step:1161/10000 train_time:72479ms step_avg:62.43ms +[2025-09-11 12:41:48] [Rank 0] step:1181/10000 train_time:73121ms step_avg:61.91ms +[2025-09-11 12:41:48] [Rank 0] step:1181/10000 train_time:73121ms step_avg:61.91ms +[2025-09-11 12:41:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:41:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:41:58] [Rank 0] PRINT: step:1200/10000 val_loss:6.6816 total_sharp:3.1774e-02 L1_sharp:5.0359e-01 L2_sharp:4.3241e-01 L3_sharp:4.4556e-01 L4_sharp:4.7062e-01 L5_sharp:5.5720e-01 L6_sharp:5.2547e-01 L7_sharp:5.7061e-01 L8_sharp:7.7216e-01 L9_sharp:9.3690e-01 L10_sharp:1.1362e+00 L11_sharp:1.5931e+00 L12_sharp:1.3814e+00 total_fnorm:2.3281e+00 total_l1_linf:2.1920e+03 total_spectral:1.1641e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.1504e-02 L12_fnorm:3.5889e-02 L1_l1linf:1.6968e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.6846e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.7090e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.7456e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.7334e-02 L11_l1linf:1.5564e-02 L12_l1linf:1.3855e-02 L1_spectral:7.2812e-04 L2_spectral:7.2455e-04 L3_spectral:7.2577e-04 L4_spectral:7.1811e-04 L5_spectral:7.1315e-04 L6_spectral:7.1822e-04 L7_spectral:7.2064e-04 L8_spectral:7.2986e-04 L9_spectral:7.4170e-04 L10_spectral:7.4435e-04 L11_spectral:7.2963e-04 L12_spectral:7.0289e-04 train_time:73746ms step_avg:61.45ms +[2025-09-11 12:41:58] [Rank 0] PRINT: step:1200/10000 val_loss:6.6816 total_sharp:3.1774e-02 L1_sharp:5.0359e-01 L2_sharp:4.3241e-01 L3_sharp:4.4556e-01 L4_sharp:4.7062e-01 L5_sharp:5.5720e-01 L6_sharp:5.2547e-01 L7_sharp:5.7061e-01 L8_sharp:7.7216e-01 L9_sharp:9.3690e-01 L10_sharp:1.1362e+00 L11_sharp:1.5931e+00 L12_sharp:1.3814e+00 total_fnorm:2.3281e+00 total_l1_linf:2.1920e+03 total_spectral:1.1641e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.1504e-02 L12_fnorm:3.5889e-02 L1_l1linf:1.6968e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.6846e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.7090e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.7456e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.7334e-02 L11_l1linf:1.5564e-02 L12_l1linf:1.3855e-02 L1_spectral:7.2812e-04 L2_spectral:7.2455e-04 L3_spectral:7.2577e-04 L4_spectral:7.1811e-04 L5_spectral:7.1315e-04 L6_spectral:7.1822e-04 L7_spectral:7.2064e-04 L8_spectral:7.2986e-04 L9_spectral:7.4170e-04 L10_spectral:7.4435e-04 L11_spectral:7.2963e-04 L12_spectral:7.0289e-04 train_time:73746ms step_avg:61.45ms +[2025-09-11 12:42:00] [Rank 0] step:1201/10000 train_time:75092ms step_avg:62.52ms +[2025-09-11 12:42:00] [Rank 0] step:1201/10000 train_time:75092ms step_avg:62.52ms +[2025-09-11 12:42:00] [Rank 0] step:1221/10000 train_time:75739ms step_avg:62.03ms +[2025-09-11 12:42:00] [Rank 0] step:1221/10000 train_time:75739ms step_avg:62.03ms +[2025-09-11 12:42:01] [Rank 0] step:1241/10000 train_time:76384ms step_avg:61.55ms +[2025-09-11 12:42:01] [Rank 0] step:1241/10000 train_time:76384ms step_avg:61.55ms +[2025-09-11 12:42:02] [Rank 0] step:1261/10000 train_time:77028ms step_avg:61.08ms +[2025-09-11 12:42:02] [Rank 0] step:1261/10000 train_time:77028ms step_avg:61.08ms +[2025-09-11 12:42:02] [Rank 0] step:1281/10000 train_time:77671ms step_avg:60.63ms +[2025-09-11 12:42:02] [Rank 0] step:1281/10000 train_time:77671ms step_avg:60.63ms +[2025-09-11 12:42:03] [Rank 0] step:1301/10000 train_time:78315ms step_avg:60.20ms +[2025-09-11 12:42:03] [Rank 0] step:1301/10000 train_time:78315ms step_avg:60.20ms +[2025-09-11 12:42:04] [Rank 0] step:1321/10000 train_time:78957ms step_avg:59.77ms +[2025-09-11 12:42:04] [Rank 0] step:1321/10000 train_time:78957ms step_avg:59.77ms +[2025-09-11 12:42:04] [Rank 0] step:1341/10000 train_time:79600ms step_avg:59.36ms +[2025-09-11 12:42:04] [Rank 0] step:1341/10000 train_time:79600ms step_avg:59.36ms +[2025-09-11 12:42:05] [Rank 0] step:1361/10000 train_time:80244ms step_avg:58.96ms +[2025-09-11 12:42:05] [Rank 0] step:1361/10000 train_time:80244ms step_avg:58.96ms +[2025-09-11 12:42:06] [Rank 0] step:1381/10000 train_time:80888ms step_avg:58.57ms +[2025-09-11 12:42:06] [Rank 0] step:1381/10000 train_time:80888ms step_avg:58.57ms +[2025-09-11 12:42:06] [Rank 0] step:1401/10000 train_time:81529ms step_avg:58.19ms +[2025-09-11 12:42:06] [Rank 0] step:1401/10000 train_time:81529ms step_avg:58.19ms +[2025-09-11 12:42:07] [Rank 0] step:1421/10000 train_time:82172ms step_avg:57.83ms +[2025-09-11 12:42:07] [Rank 0] step:1421/10000 train_time:82172ms step_avg:57.83ms +[2025-09-11 12:42:08] [Rank 0] step:1441/10000 train_time:82814ms step_avg:57.47ms +[2025-09-11 12:42:08] [Rank 0] step:1441/10000 train_time:82814ms step_avg:57.47ms +[2025-09-11 12:42:08] [Rank 0] step:1461/10000 train_time:83457ms step_avg:57.12ms +[2025-09-11 12:42:08] [Rank 0] step:1461/10000 train_time:83457ms step_avg:57.12ms +[2025-09-11 12:42:09] [Rank 0] step:1481/10000 train_time:84100ms step_avg:56.79ms +[2025-09-11 12:42:09] [Rank 0] step:1481/10000 train_time:84100ms step_avg:56.79ms +[2025-09-11 12:42:09] [Rank 0] step:1501/10000 train_time:84747ms step_avg:56.46ms +[2025-09-11 12:42:09] [Rank 0] step:1501/10000 train_time:84747ms step_avg:56.46ms +[2025-09-11 12:42:10] [Rank 0] step:1521/10000 train_time:85394ms step_avg:56.14ms +[2025-09-11 12:42:10] [Rank 0] step:1521/10000 train_time:85394ms step_avg:56.14ms +[2025-09-11 12:42:11] [Rank 0] step:1541/10000 train_time:86041ms step_avg:55.83ms +[2025-09-11 12:42:11] [Rank 0] step:1541/10000 train_time:86041ms step_avg:55.83ms +[2025-09-11 12:42:11] [Rank 0] step:1561/10000 train_time:86688ms step_avg:55.53ms +[2025-09-11 12:42:11] [Rank 0] step:1561/10000 train_time:86688ms step_avg:55.53ms +[2025-09-11 12:42:12] [Rank 0] step:1581/10000 train_time:87335ms step_avg:55.24ms +[2025-09-11 12:42:12] [Rank 0] step:1581/10000 train_time:87335ms step_avg:55.24ms +[2025-09-11 12:42:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:42:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:42:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:42:23] [Rank 0] PRINT: step:1600/10000 val_loss:6.5166 total_sharp:8.1312e-02 L1_sharp:6.3513e-01 L2_sharp:5.3214e-01 L3_sharp:5.8745e-01 L4_sharp:6.8307e-01 L5_sharp:8.9032e-01 L6_sharp:1.1106e+00 L7_sharp:1.5088e+00 L8_sharp:2.1112e+00 L9_sharp:2.8272e+00 L10_sharp:3.1627e+00 L11_sharp:2.1825e+00 L12_sharp:2.1069e+00 total_fnorm:2.0938e+00 total_l1_linf:1.8240e+03 total_spectral:1.0469e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.5898e-02 L11_fnorm:4.4189e-02 L12_fnorm:3.7842e-02 L1_l1linf:1.5869e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6846e-02 L9_l1linf:1.6846e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.2756e-02 L1_spectral:7.3913e-04 L2_spectral:7.4322e-04 L3_spectral:7.4179e-04 L4_spectral:7.3714e-04 L5_spectral:7.3248e-04 L6_spectral:7.3525e-04 L7_spectral:7.3474e-04 L8_spectral:7.3271e-04 L9_spectral:7.4171e-04 L10_spectral:7.4290e-04 L11_spectral:7.3071e-04 L12_spectral:6.9350e-04 train_time:87964ms step_avg:54.98ms +[2025-09-11 12:42:23] [Rank 0] PRINT: step:1600/10000 val_loss:6.5166 total_sharp:8.1312e-02 L1_sharp:6.3513e-01 L2_sharp:5.3214e-01 L3_sharp:5.8745e-01 L4_sharp:6.8307e-01 L5_sharp:8.9032e-01 L6_sharp:1.1106e+00 L7_sharp:1.5088e+00 L8_sharp:2.1112e+00 L9_sharp:2.8272e+00 L10_sharp:3.1627e+00 L11_sharp:2.1825e+00 L12_sharp:2.1069e+00 total_fnorm:2.0938e+00 total_l1_linf:1.8240e+03 total_spectral:1.0469e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.5898e-02 L11_fnorm:4.4189e-02 L12_fnorm:3.7842e-02 L1_l1linf:1.5869e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6846e-02 L9_l1linf:1.6846e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.2756e-02 L1_spectral:7.3913e-04 L2_spectral:7.4322e-04 L3_spectral:7.4179e-04 L4_spectral:7.3714e-04 L5_spectral:7.3248e-04 L6_spectral:7.3525e-04 L7_spectral:7.3474e-04 L8_spectral:7.3271e-04 L9_spectral:7.4171e-04 L10_spectral:7.4290e-04 L11_spectral:7.3071e-04 L12_spectral:6.9350e-04 train_time:87964ms step_avg:54.98ms +[2025-09-11 12:42:24] [Rank 0] step:1601/10000 train_time:89325ms step_avg:55.79ms +[2025-09-11 12:42:24] [Rank 0] step:1601/10000 train_time:89325ms step_avg:55.79ms +[2025-09-11 12:42:25] [Rank 0] step:1621/10000 train_time:89980ms step_avg:55.51ms +[2025-09-11 12:42:25] [Rank 0] step:1621/10000 train_time:89980ms step_avg:55.51ms +[2025-09-11 12:42:25] [Rank 0] step:1641/10000 train_time:90628ms step_avg:55.23ms +[2025-09-11 12:42:25] [Rank 0] step:1641/10000 train_time:90628ms step_avg:55.23ms +[2025-09-11 12:42:26] [Rank 0] step:1661/10000 train_time:91276ms step_avg:54.95ms +[2025-09-11 12:42:26] [Rank 0] step:1661/10000 train_time:91276ms step_avg:54.95ms +[2025-09-11 12:42:27] [Rank 0] step:1681/10000 train_time:91926ms step_avg:54.69ms +[2025-09-11 12:42:27] [Rank 0] step:1681/10000 train_time:91926ms step_avg:54.69ms +[2025-09-11 12:42:27] [Rank 0] step:1701/10000 train_time:92573ms step_avg:54.42ms +[2025-09-11 12:42:27] [Rank 0] step:1701/10000 train_time:92573ms step_avg:54.42ms +[2025-09-11 12:42:28] [Rank 0] step:1721/10000 train_time:93220ms step_avg:54.17ms +[2025-09-11 12:42:28] [Rank 0] step:1721/10000 train_time:93220ms step_avg:54.17ms +[2025-09-11 12:42:29] [Rank 0] step:1741/10000 train_time:93867ms step_avg:53.92ms +[2025-09-11 12:42:29] [Rank 0] step:1741/10000 train_time:93867ms step_avg:53.92ms +[2025-09-11 12:42:29] [Rank 0] step:1761/10000 train_time:94514ms step_avg:53.67ms +[2025-09-11 12:42:29] [Rank 0] step:1761/10000 train_time:94514ms step_avg:53.67ms +[2025-09-11 12:42:30] [Rank 0] step:1781/10000 train_time:95162ms step_avg:53.43ms +[2025-09-11 12:42:30] [Rank 0] step:1781/10000 train_time:95162ms step_avg:53.43ms +[2025-09-11 12:42:31] [Rank 0] step:1801/10000 train_time:95809ms step_avg:53.20ms +[2025-09-11 12:42:31] [Rank 0] step:1801/10000 train_time:95809ms step_avg:53.20ms +[2025-09-11 12:42:31] [Rank 0] step:1821/10000 train_time:96456ms step_avg:52.97ms +[2025-09-11 12:42:31] [Rank 0] step:1821/10000 train_time:96456ms step_avg:52.97ms +[2025-09-11 12:42:32] [Rank 0] step:1841/10000 train_time:97103ms step_avg:52.74ms +[2025-09-11 12:42:32] [Rank 0] step:1841/10000 train_time:97103ms step_avg:52.74ms +[2025-09-11 12:42:33] [Rank 0] step:1861/10000 train_time:97751ms step_avg:52.53ms +[2025-09-11 12:42:33] [Rank 0] step:1861/10000 train_time:97751ms step_avg:52.53ms +[2025-09-11 12:42:33] [Rank 0] step:1881/10000 train_time:98397ms step_avg:52.31ms +[2025-09-11 12:42:33] [Rank 0] step:1881/10000 train_time:98397ms step_avg:52.31ms +[2025-09-11 12:42:34] [Rank 0] step:1901/10000 train_time:99044ms step_avg:52.10ms +[2025-09-11 12:42:34] [Rank 0] step:1901/10000 train_time:99044ms step_avg:52.10ms +[2025-09-11 12:42:34] [Rank 0] step:1921/10000 train_time:99692ms step_avg:51.90ms +[2025-09-11 12:42:34] [Rank 0] step:1921/10000 train_time:99692ms step_avg:51.90ms +[2025-09-11 12:42:35] [Rank 0] step:1941/10000 train_time:100339ms step_avg:51.69ms +[2025-09-11 12:42:35] [Rank 0] step:1941/10000 train_time:100339ms step_avg:51.69ms +[2025-09-11 12:42:36] [Rank 0] step:1961/10000 train_time:100987ms step_avg:51.50ms +[2025-09-11 12:42:36] [Rank 0] step:1961/10000 train_time:100987ms step_avg:51.50ms +[2025-09-11 12:42:36] [Rank 0] step:1981/10000 train_time:101634ms step_avg:51.30ms +[2025-09-11 12:42:36] [Rank 0] step:1981/10000 train_time:101634ms step_avg:51.30ms +[2025-09-11 12:42:37] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:42:37] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:42:47] [Rank 0] PRINT: step:2000/10000 val_loss:6.3879 total_sharp:1.1280e-01 L1_sharp:4.0903e-01 L2_sharp:3.7927e-01 L3_sharp:4.3738e-01 L4_sharp:5.0393e-01 L5_sharp:7.4751e-01 L6_sharp:8.5895e-01 L7_sharp:1.3526e+00 L8_sharp:2.3838e+00 L9_sharp:4.0518e+00 L10_sharp:5.0673e+00 L11_sharp:5.3011e+00 L12_sharp:5.0092e+00 total_fnorm:1.9766e+00 total_l1_linf:1.7040e+03 total_spectral:9.9219e-01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.6143e-02 L12_fnorm:3.9795e-02 L1_l1linf:1.4587e-02 L2_l1linf:1.4832e-02 L3_l1linf:1.4832e-02 L4_l1linf:1.5137e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5442e-02 L9_l1linf:1.6235e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.2756e-02 L1_spectral:7.5823e-04 L2_spectral:7.5405e-04 L3_spectral:7.5490e-04 L4_spectral:7.5326e-04 L5_spectral:7.5208e-04 L6_spectral:7.5254e-04 L7_spectral:7.5531e-04 L8_spectral:7.3899e-04 L9_spectral:7.5002e-04 L10_spectral:7.4974e-04 L11_spectral:7.3363e-04 L12_spectral:6.9281e-04 train_time:102263ms step_avg:51.13ms +[2025-09-11 12:42:47] [Rank 0] PRINT: step:2000/10000 val_loss:6.3879 total_sharp:1.1280e-01 L1_sharp:4.0903e-01 L2_sharp:3.7927e-01 L3_sharp:4.3738e-01 L4_sharp:5.0393e-01 L5_sharp:7.4751e-01 L6_sharp:8.5895e-01 L7_sharp:1.3526e+00 L8_sharp:2.3838e+00 L9_sharp:4.0518e+00 L10_sharp:5.0673e+00 L11_sharp:5.3011e+00 L12_sharp:5.0092e+00 total_fnorm:1.9766e+00 total_l1_linf:1.7040e+03 total_spectral:9.9219e-01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.6143e-02 L12_fnorm:3.9795e-02 L1_l1linf:1.4587e-02 L2_l1linf:1.4832e-02 L3_l1linf:1.4832e-02 L4_l1linf:1.5137e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5442e-02 L9_l1linf:1.6235e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.2756e-02 L1_spectral:7.5823e-04 L2_spectral:7.5405e-04 L3_spectral:7.5490e-04 L4_spectral:7.5326e-04 L5_spectral:7.5208e-04 L6_spectral:7.5254e-04 L7_spectral:7.5531e-04 L8_spectral:7.3899e-04 L9_spectral:7.5002e-04 L10_spectral:7.4974e-04 L11_spectral:7.3363e-04 L12_spectral:6.9281e-04 train_time:102263ms step_avg:51.13ms +[2025-09-11 12:42:49] [Rank 0] step:2001/10000 train_time:103688ms step_avg:51.82ms +[2025-09-11 12:42:49] [Rank 0] step:2001/10000 train_time:103688ms step_avg:51.82ms +[2025-09-11 12:42:49] [Rank 0] step:2021/10000 train_time:104371ms step_avg:51.64ms +[2025-09-11 12:42:49] [Rank 0] step:2021/10000 train_time:104371ms step_avg:51.64ms +[2025-09-11 12:42:50] [Rank 0] step:2041/10000 train_time:105019ms step_avg:51.45ms +[2025-09-11 12:42:50] [Rank 0] step:2041/10000 train_time:105019ms step_avg:51.45ms +[2025-09-11 12:42:51] [Rank 0] step:2061/10000 train_time:105666ms step_avg:51.27ms +[2025-09-11 12:42:51] [Rank 0] step:2061/10000 train_time:105666ms step_avg:51.27ms +[2025-09-11 12:42:51] [Rank 0] step:2081/10000 train_time:106313ms step_avg:51.09ms +[2025-09-11 12:42:51] [Rank 0] step:2081/10000 train_time:106313ms step_avg:51.09ms +[2025-09-11 12:42:52] [Rank 0] step:2101/10000 train_time:106960ms step_avg:50.91ms +[2025-09-11 12:42:52] [Rank 0] step:2101/10000 train_time:106960ms step_avg:50.91ms +[2025-09-11 12:42:53] [Rank 0] step:2121/10000 train_time:107607ms step_avg:50.73ms +[2025-09-11 12:42:53] [Rank 0] step:2121/10000 train_time:107607ms step_avg:50.73ms +[2025-09-11 12:42:53] [Rank 0] step:2141/10000 train_time:108253ms step_avg:50.56ms +[2025-09-11 12:42:53] [Rank 0] step:2141/10000 train_time:108253ms step_avg:50.56ms +[2025-09-11 12:42:54] [Rank 0] step:2161/10000 train_time:108899ms step_avg:50.39ms +[2025-09-11 12:42:54] [Rank 0] step:2161/10000 train_time:108899ms step_avg:50.39ms +[2025-09-11 12:42:55] [Rank 0] step:2181/10000 train_time:109545ms step_avg:50.23ms +[2025-09-11 12:42:55] [Rank 0] step:2181/10000 train_time:109545ms step_avg:50.23ms +[2025-09-11 12:42:55] [Rank 0] step:2201/10000 train_time:110191ms step_avg:50.06ms +[2025-09-11 12:42:55] [Rank 0] step:2201/10000 train_time:110191ms step_avg:50.06ms +[2025-09-11 12:42:56] [Rank 0] step:2221/10000 train_time:110837ms step_avg:49.90ms +[2025-09-11 12:42:56] [Rank 0] step:2221/10000 train_time:110837ms step_avg:49.90ms +[2025-09-11 12:42:57] [Rank 0] step:2241/10000 train_time:111497ms step_avg:49.75ms +[2025-09-11 12:42:57] [Rank 0] step:2241/10000 train_time:111497ms step_avg:49.75ms +[2025-09-11 12:42:57] [Rank 0] step:2261/10000 train_time:112157ms step_avg:49.60ms +[2025-09-11 12:42:57] [Rank 0] step:2261/10000 train_time:112157ms step_avg:49.60ms +[2025-09-11 12:42:58] [Rank 0] step:2281/10000 train_time:112817ms step_avg:49.46ms +[2025-09-11 12:42:58] [Rank 0] step:2281/10000 train_time:112817ms step_avg:49.46ms +[2025-09-11 12:42:59] [Rank 0] step:2301/10000 train_time:113476ms step_avg:49.32ms +[2025-09-11 12:42:59] [Rank 0] step:2301/10000 train_time:113476ms step_avg:49.32ms +[2025-09-11 12:42:59] [Rank 0] step:2321/10000 train_time:114136ms step_avg:49.18ms +[2025-09-11 12:42:59] [Rank 0] step:2321/10000 train_time:114136ms step_avg:49.18ms +[2025-09-11 12:43:00] [Rank 0] step:2341/10000 train_time:114796ms step_avg:49.04ms +[2025-09-11 12:43:00] [Rank 0] step:2341/10000 train_time:114796ms step_avg:49.04ms +[2025-09-11 12:43:01] [Rank 0] step:2361/10000 train_time:115456ms step_avg:48.90ms +[2025-09-11 12:43:01] [Rank 0] step:2361/10000 train_time:115456ms step_avg:48.90ms +[2025-09-11 12:43:01] [Rank 0] step:2381/10000 train_time:116116ms step_avg:48.77ms +[2025-09-11 12:43:01] [Rank 0] step:2381/10000 train_time:116116ms step_avg:48.77ms +[2025-09-11 12:43:02] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:43:02] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:43:16] [Rank 0] PRINT: step:2400/10000 val_loss:6.2773 total_sharp:8.4269e-02 L1_sharp:3.1749e-01 L2_sharp:3.1856e-01 L3_sharp:3.3861e-01 L4_sharp:4.1547e-01 L5_sharp:5.4873e-01 L6_sharp:6.6567e-01 L7_sharp:9.4269e-01 L8_sharp:1.4351e+00 L9_sharp:2.1242e+00 L10_sharp:2.8831e+00 L11_sharp:3.0565e+00 L12_sharp:4.1645e+00 total_fnorm:1.8438e+00 total_l1_linf:1.5200e+03 total_spectral:9.2188e-01 L1_fnorm:4.7852e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1748e-02 L1_l1linf:1.4099e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.3977e-02 L4_l1linf:1.4587e-02 L5_l1linf:1.4221e-02 L6_l1linf:1.4648e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4893e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.2634e-02 L1_spectral:7.6331e-04 L2_spectral:7.6474e-04 L3_spectral:7.6507e-04 L4_spectral:7.5987e-04 L5_spectral:7.6264e-04 L6_spectral:7.6302e-04 L7_spectral:7.6283e-04 L8_spectral:7.5998e-04 L9_spectral:7.5873e-04 L10_spectral:7.6020e-04 L11_spectral:7.5404e-04 L12_spectral:7.0219e-04 train_time:116758ms step_avg:48.65ms +[2025-09-11 12:43:16] [Rank 0] PRINT: step:2400/10000 val_loss:6.2773 total_sharp:8.4269e-02 L1_sharp:3.1749e-01 L2_sharp:3.1856e-01 L3_sharp:3.3861e-01 L4_sharp:4.1547e-01 L5_sharp:5.4873e-01 L6_sharp:6.6567e-01 L7_sharp:9.4269e-01 L8_sharp:1.4351e+00 L9_sharp:2.1242e+00 L10_sharp:2.8831e+00 L11_sharp:3.0565e+00 L12_sharp:4.1645e+00 total_fnorm:1.8438e+00 total_l1_linf:1.5200e+03 total_spectral:9.2188e-01 L1_fnorm:4.7852e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1748e-02 L1_l1linf:1.4099e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.3977e-02 L4_l1linf:1.4587e-02 L5_l1linf:1.4221e-02 L6_l1linf:1.4648e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4893e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.2634e-02 L1_spectral:7.6331e-04 L2_spectral:7.6474e-04 L3_spectral:7.6507e-04 L4_spectral:7.5987e-04 L5_spectral:7.6264e-04 L6_spectral:7.6302e-04 L7_spectral:7.6283e-04 L8_spectral:7.5998e-04 L9_spectral:7.5873e-04 L10_spectral:7.6020e-04 L11_spectral:7.5404e-04 L12_spectral:7.0219e-04 train_time:116758ms step_avg:48.65ms +[2025-09-11 12:43:17] [Rank 0] step:2401/10000 train_time:118134ms step_avg:49.20ms +[2025-09-11 12:43:17] [Rank 0] step:2401/10000 train_time:118134ms step_avg:49.20ms +[2025-09-11 12:43:18] [Rank 0] step:2421/10000 train_time:118798ms step_avg:49.07ms +[2025-09-11 12:43:18] [Rank 0] step:2421/10000 train_time:118798ms step_avg:49.07ms +[2025-09-11 12:43:18] [Rank 0] step:2441/10000 train_time:119459ms step_avg:48.94ms +[2025-09-11 12:43:18] [Rank 0] step:2441/10000 train_time:119459ms step_avg:48.94ms +[2025-09-11 12:43:19] [Rank 0] step:2461/10000 train_time:120120ms step_avg:48.81ms +[2025-09-11 12:43:19] [Rank 0] step:2461/10000 train_time:120120ms step_avg:48.81ms +[2025-09-11 12:43:20] [Rank 0] step:2481/10000 train_time:120782ms step_avg:48.68ms +[2025-09-11 12:43:20] [Rank 0] step:2481/10000 train_time:120782ms step_avg:48.68ms +[2025-09-11 12:43:20] [Rank 0] step:2501/10000 train_time:121442ms step_avg:48.56ms +[2025-09-11 12:43:20] [Rank 0] step:2501/10000 train_time:121442ms step_avg:48.56ms +[2025-09-11 12:43:21] [Rank 0] step:2521/10000 train_time:122102ms step_avg:48.43ms +[2025-09-11 12:43:21] [Rank 0] step:2521/10000 train_time:122102ms step_avg:48.43ms +[2025-09-11 12:43:22] [Rank 0] step:2541/10000 train_time:122762ms step_avg:48.31ms +[2025-09-11 12:43:22] [Rank 0] step:2541/10000 train_time:122762ms step_avg:48.31ms +[2025-09-11 12:43:22] [Rank 0] step:2561/10000 train_time:123422ms step_avg:48.19ms +[2025-09-11 12:43:22] [Rank 0] step:2561/10000 train_time:123422ms step_avg:48.19ms +[2025-09-11 12:43:23] [Rank 0] step:2581/10000 train_time:124082ms step_avg:48.08ms +[2025-09-11 12:43:23] [Rank 0] step:2581/10000 train_time:124082ms step_avg:48.08ms +[2025-09-11 12:43:24] [Rank 0] step:2601/10000 train_time:124742ms step_avg:47.96ms +[2025-09-11 12:43:24] [Rank 0] step:2601/10000 train_time:124742ms step_avg:47.96ms +[2025-09-11 12:43:24] [Rank 0] step:2621/10000 train_time:125403ms step_avg:47.85ms +[2025-09-11 12:43:24] [Rank 0] step:2621/10000 train_time:125403ms step_avg:47.85ms +[2025-09-11 12:43:25] [Rank 0] step:2641/10000 train_time:126063ms step_avg:47.73ms +[2025-09-11 12:43:25] [Rank 0] step:2641/10000 train_time:126063ms step_avg:47.73ms +[2025-09-11 12:43:26] [Rank 0] step:2661/10000 train_time:126724ms step_avg:47.62ms +[2025-09-11 12:43:26] [Rank 0] step:2661/10000 train_time:126724ms step_avg:47.62ms +[2025-09-11 12:43:26] [Rank 0] step:2681/10000 train_time:127384ms step_avg:47.51ms +[2025-09-11 12:43:26] [Rank 0] step:2681/10000 train_time:127384ms step_avg:47.51ms +[2025-09-11 12:43:27] [Rank 0] step:2701/10000 train_time:128047ms step_avg:47.41ms +[2025-09-11 12:43:27] [Rank 0] step:2701/10000 train_time:128047ms step_avg:47.41ms +[2025-09-11 12:43:27] [Rank 0] step:2721/10000 train_time:128707ms step_avg:47.30ms +[2025-09-11 12:43:27] [Rank 0] step:2721/10000 train_time:128707ms step_avg:47.30ms +[2025-09-11 12:43:28] [Rank 0] step:2741/10000 train_time:129368ms step_avg:47.20ms +[2025-09-11 12:43:28] [Rank 0] step:2741/10000 train_time:129368ms step_avg:47.20ms +[2025-09-11 12:43:29] [Rank 0] step:2761/10000 train_time:130029ms step_avg:47.09ms +[2025-09-11 12:43:29] [Rank 0] step:2761/10000 train_time:130029ms step_avg:47.09ms +[2025-09-11 12:43:29] [Rank 0] step:2781/10000 train_time:130690ms step_avg:46.99ms +[2025-09-11 12:43:29] [Rank 0] step:2781/10000 train_time:130690ms step_avg:46.99ms +[2025-09-11 12:43:30] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:43:30] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:43:40] [Rank 0] PRINT: step:2800/10000 val_loss:6.1887 total_sharp:1.1644e-01 L1_sharp:2.8912e-01 L2_sharp:2.9375e-01 L3_sharp:3.6882e-01 L4_sharp:4.6370e-01 L5_sharp:7.4350e-01 L6_sharp:9.3724e-01 L7_sharp:1.2418e+00 L8_sharp:2.1263e+00 L9_sharp:3.2456e+00 L10_sharp:4.2541e+00 L11_sharp:4.3356e+00 L12_sharp:3.5768e+00 total_fnorm:1.7734e+00 total_l1_linf:1.3920e+03 total_spectral:8.9062e-01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.1748e-02 L1_l1linf:1.3550e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.3794e-02 L4_l1linf:1.3977e-02 L5_l1linf:1.4221e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.4465e-02 L8_l1linf:1.5076e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.2695e-02 L1_spectral:7.7126e-04 L2_spectral:7.7688e-04 L3_spectral:7.7033e-04 L4_spectral:7.6394e-04 L5_spectral:7.6602e-04 L6_spectral:7.6930e-04 L7_spectral:7.6287e-04 L8_spectral:7.5724e-04 L9_spectral:7.5911e-04 L10_spectral:7.5621e-04 L11_spectral:7.5303e-04 L12_spectral:6.8805e-04 train_time:131331ms step_avg:46.90ms +[2025-09-11 12:43:40] [Rank 0] PRINT: step:2800/10000 val_loss:6.1887 total_sharp:1.1644e-01 L1_sharp:2.8912e-01 L2_sharp:2.9375e-01 L3_sharp:3.6882e-01 L4_sharp:4.6370e-01 L5_sharp:7.4350e-01 L6_sharp:9.3724e-01 L7_sharp:1.2418e+00 L8_sharp:2.1263e+00 L9_sharp:3.2456e+00 L10_sharp:4.2541e+00 L11_sharp:4.3356e+00 L12_sharp:3.5768e+00 total_fnorm:1.7734e+00 total_l1_linf:1.3920e+03 total_spectral:8.9062e-01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.1748e-02 L1_l1linf:1.3550e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.3794e-02 L4_l1linf:1.3977e-02 L5_l1linf:1.4221e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.4465e-02 L8_l1linf:1.5076e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.2695e-02 L1_spectral:7.7126e-04 L2_spectral:7.7688e-04 L3_spectral:7.7033e-04 L4_spectral:7.6394e-04 L5_spectral:7.6602e-04 L6_spectral:7.6930e-04 L7_spectral:7.6287e-04 L8_spectral:7.5724e-04 L9_spectral:7.5911e-04 L10_spectral:7.5621e-04 L11_spectral:7.5303e-04 L12_spectral:6.8805e-04 train_time:131331ms step_avg:46.90ms +[2025-09-11 12:43:42] [Rank 0] step:2801/10000 train_time:132733ms step_avg:47.39ms +[2025-09-11 12:43:42] [Rank 0] step:2801/10000 train_time:132733ms step_avg:47.39ms +[2025-09-11 12:43:42] [Rank 0] step:2821/10000 train_time:133399ms step_avg:47.29ms +[2025-09-11 12:43:42] [Rank 0] step:2821/10000 train_time:133399ms step_avg:47.29ms +[2025-09-11 12:43:43] [Rank 0] step:2841/10000 train_time:134061ms step_avg:47.19ms +[2025-09-11 12:43:43] [Rank 0] step:2841/10000 train_time:134061ms step_avg:47.19ms +[2025-09-11 12:43:44] [Rank 0] step:2861/10000 train_time:134723ms step_avg:47.09ms +[2025-09-11 12:43:44] [Rank 0] step:2861/10000 train_time:134723ms step_avg:47.09ms +[2025-09-11 12:43:44] [Rank 0] step:2881/10000 train_time:135385ms step_avg:46.99ms +[2025-09-11 12:43:44] [Rank 0] step:2881/10000 train_time:135385ms step_avg:46.99ms +[2025-09-11 12:43:45] [Rank 0] step:2901/10000 train_time:136047ms step_avg:46.90ms +[2025-09-11 12:43:45] [Rank 0] step:2901/10000 train_time:136047ms step_avg:46.90ms +[2025-09-11 12:43:46] [Rank 0] step:2921/10000 train_time:136967ms step_avg:46.89ms +[2025-09-11 12:43:46] [Rank 0] step:2921/10000 train_time:136967ms step_avg:46.89ms +[2025-09-11 12:43:47] [Rank 0] step:2941/10000 train_time:137629ms step_avg:46.80ms +[2025-09-11 12:43:47] [Rank 0] step:2941/10000 train_time:137629ms step_avg:46.80ms +[2025-09-11 12:43:47] [Rank 0] step:2961/10000 train_time:138290ms step_avg:46.70ms +[2025-09-11 12:43:47] [Rank 0] step:2961/10000 train_time:138290ms step_avg:46.70ms +[2025-09-11 12:43:48] [Rank 0] step:2981/10000 train_time:139252ms step_avg:46.71ms +[2025-09-11 12:43:48] [Rank 0] step:2981/10000 train_time:139252ms step_avg:46.71ms +[2025-09-11 12:43:49] [Rank 0] step:3001/10000 train_time:139917ms step_avg:46.62ms +[2025-09-11 12:43:49] [Rank 0] step:3001/10000 train_time:139917ms step_avg:46.62ms +[2025-09-11 12:43:50] [Rank 0] step:3021/10000 train_time:140580ms step_avg:46.53ms +[2025-09-11 12:43:50] [Rank 0] step:3021/10000 train_time:140580ms step_avg:46.53ms +[2025-09-11 12:43:50] [Rank 0] step:3041/10000 train_time:141244ms step_avg:46.45ms +[2025-09-11 12:43:50] [Rank 0] step:3041/10000 train_time:141244ms step_avg:46.45ms +[2025-09-11 12:43:51] [Rank 0] step:3061/10000 train_time:141908ms step_avg:46.36ms +[2025-09-11 12:43:51] [Rank 0] step:3061/10000 train_time:141908ms step_avg:46.36ms +[2025-09-11 12:43:52] [Rank 0] step:3081/10000 train_time:142572ms step_avg:46.27ms +[2025-09-11 12:43:52] [Rank 0] step:3081/10000 train_time:142572ms step_avg:46.27ms +[2025-09-11 12:43:52] [Rank 0] step:3101/10000 train_time:143237ms step_avg:46.19ms +[2025-09-11 12:43:52] [Rank 0] step:3101/10000 train_time:143237ms step_avg:46.19ms +[2025-09-11 12:43:53] [Rank 0] step:3121/10000 train_time:143901ms step_avg:46.11ms +[2025-09-11 12:43:53] [Rank 0] step:3121/10000 train_time:143901ms step_avg:46.11ms +[2025-09-11 12:43:54] [Rank 0] step:3141/10000 train_time:144565ms step_avg:46.03ms +[2025-09-11 12:43:54] [Rank 0] step:3141/10000 train_time:144565ms step_avg:46.03ms +[2025-09-11 12:43:54] [Rank 0] step:3161/10000 train_time:145229ms step_avg:45.94ms +[2025-09-11 12:43:54] [Rank 0] step:3161/10000 train_time:145229ms step_avg:45.94ms +[2025-09-11 12:43:55] [Rank 0] step:3181/10000 train_time:145892ms step_avg:45.86ms +[2025-09-11 12:43:55] [Rank 0] step:3181/10000 train_time:145892ms step_avg:45.86ms +[2025-09-11 12:43:56] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:43:56] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:43:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:43:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:06] [Rank 0] PRINT: step:3200/10000 val_loss:6.1090 total_sharp:1.0013e-01 L1_sharp:2.4756e-01 L2_sharp:2.4463e-01 L3_sharp:2.9243e-01 L4_sharp:3.7624e-01 L5_sharp:5.5536e-01 L6_sharp:9.4511e-01 L7_sharp:1.2983e+00 L8_sharp:1.9353e+00 L9_sharp:3.0590e+00 L10_sharp:3.3675e+00 L11_sharp:3.5665e+00 L12_sharp:3.6140e+00 total_fnorm:1.8203e+00 total_l1_linf:1.4880e+03 total_spectral:9.1406e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2939e-02 L2_l1linf:1.3000e-02 L3_l1linf:1.2939e-02 L4_l1linf:1.3245e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3855e-02 L8_l1linf:1.3916e-02 L9_l1linf:1.4465e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2878e-02 L1_spectral:7.7808e-04 L2_spectral:7.7844e-04 L3_spectral:7.7146e-04 L4_spectral:7.7532e-04 L5_spectral:7.7500e-04 L6_spectral:7.7719e-04 L7_spectral:7.7929e-04 L8_spectral:7.7353e-04 L9_spectral:7.7838e-04 L10_spectral:7.7319e-04 L11_spectral:7.7296e-04 L12_spectral:7.2834e-04 train_time:146538ms step_avg:45.79ms +[2025-09-11 12:44:06] [Rank 0] PRINT: step:3200/10000 val_loss:6.1090 total_sharp:1.0013e-01 L1_sharp:2.4756e-01 L2_sharp:2.4463e-01 L3_sharp:2.9243e-01 L4_sharp:3.7624e-01 L5_sharp:5.5536e-01 L6_sharp:9.4511e-01 L7_sharp:1.2983e+00 L8_sharp:1.9353e+00 L9_sharp:3.0590e+00 L10_sharp:3.3675e+00 L11_sharp:3.5665e+00 L12_sharp:3.6140e+00 total_fnorm:1.8203e+00 total_l1_linf:1.4880e+03 total_spectral:9.1406e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2939e-02 L2_l1linf:1.3000e-02 L3_l1linf:1.2939e-02 L4_l1linf:1.3245e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3855e-02 L8_l1linf:1.3916e-02 L9_l1linf:1.4465e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2878e-02 L1_spectral:7.7808e-04 L2_spectral:7.7844e-04 L3_spectral:7.7146e-04 L4_spectral:7.7532e-04 L5_spectral:7.7500e-04 L6_spectral:7.7719e-04 L7_spectral:7.7929e-04 L8_spectral:7.7353e-04 L9_spectral:7.7838e-04 L10_spectral:7.7319e-04 L11_spectral:7.7296e-04 L12_spectral:7.2834e-04 train_time:146538ms step_avg:45.79ms +[2025-09-11 12:44:07] [Rank 0] step:3201/10000 train_time:147945ms step_avg:46.22ms +[2025-09-11 12:44:07] [Rank 0] step:3201/10000 train_time:147945ms step_avg:46.22ms +[2025-09-11 12:44:08] [Rank 0] step:3221/10000 train_time:148613ms step_avg:46.14ms +[2025-09-11 12:44:08] [Rank 0] step:3221/10000 train_time:148613ms step_avg:46.14ms +[2025-09-11 12:44:08] [Rank 0] step:3241/10000 train_time:149278ms step_avg:46.06ms +[2025-09-11 12:44:08] [Rank 0] step:3241/10000 train_time:149278ms step_avg:46.06ms +[2025-09-11 12:44:09] [Rank 0] step:3261/10000 train_time:149944ms step_avg:45.98ms +[2025-09-11 12:44:09] [Rank 0] step:3261/10000 train_time:149944ms step_avg:45.98ms +[2025-09-11 12:44:10] [Rank 0] step:3281/10000 train_time:150608ms step_avg:45.90ms +[2025-09-11 12:44:10] [Rank 0] step:3281/10000 train_time:150608ms step_avg:45.90ms +[2025-09-11 12:44:10] [Rank 0] step:3301/10000 train_time:151272ms step_avg:45.83ms +[2025-09-11 12:44:10] [Rank 0] step:3301/10000 train_time:151272ms step_avg:45.83ms +[2025-09-11 12:44:11] [Rank 0] step:3321/10000 train_time:151936ms step_avg:45.75ms +[2025-09-11 12:44:11] [Rank 0] step:3321/10000 train_time:151936ms step_avg:45.75ms +[2025-09-11 12:44:12] [Rank 0] step:3341/10000 train_time:152600ms step_avg:45.67ms +[2025-09-11 12:44:12] [Rank 0] step:3341/10000 train_time:152600ms step_avg:45.67ms +[2025-09-11 12:44:12] [Rank 0] step:3361/10000 train_time:153264ms step_avg:45.60ms +[2025-09-11 12:44:12] [Rank 0] step:3361/10000 train_time:153264ms step_avg:45.60ms +[2025-09-11 12:44:13] [Rank 0] step:3381/10000 train_time:153928ms step_avg:45.53ms +[2025-09-11 12:44:13] [Rank 0] step:3381/10000 train_time:153928ms step_avg:45.53ms +[2025-09-11 12:44:14] [Rank 0] step:3401/10000 train_time:154592ms step_avg:45.45ms +[2025-09-11 12:44:14] [Rank 0] step:3401/10000 train_time:154592ms step_avg:45.45ms +[2025-09-11 12:44:14] [Rank 0] step:3421/10000 train_time:155255ms step_avg:45.38ms +[2025-09-11 12:44:14] [Rank 0] step:3421/10000 train_time:155255ms step_avg:45.38ms +[2025-09-11 12:44:15] [Rank 0] step:3441/10000 train_time:155919ms step_avg:45.31ms +[2025-09-11 12:44:15] [Rank 0] step:3441/10000 train_time:155919ms step_avg:45.31ms +[2025-09-11 12:44:16] [Rank 0] step:3461/10000 train_time:156582ms step_avg:45.24ms +[2025-09-11 12:44:16] [Rank 0] step:3461/10000 train_time:156582ms step_avg:45.24ms +[2025-09-11 12:44:16] [Rank 0] step:3481/10000 train_time:157245ms step_avg:45.17ms +[2025-09-11 12:44:16] [Rank 0] step:3481/10000 train_time:157245ms step_avg:45.17ms +[2025-09-11 12:44:17] [Rank 0] step:3501/10000 train_time:157909ms step_avg:45.10ms +[2025-09-11 12:44:17] [Rank 0] step:3501/10000 train_time:157909ms step_avg:45.10ms +[2025-09-11 12:44:18] [Rank 0] step:3521/10000 train_time:158572ms step_avg:45.04ms +[2025-09-11 12:44:18] [Rank 0] step:3521/10000 train_time:158572ms step_avg:45.04ms +[2025-09-11 12:44:18] [Rank 0] step:3541/10000 train_time:159236ms step_avg:44.97ms +[2025-09-11 12:44:18] [Rank 0] step:3541/10000 train_time:159236ms step_avg:44.97ms +[2025-09-11 12:44:19] [Rank 0] step:3561/10000 train_time:159899ms step_avg:44.90ms +[2025-09-11 12:44:19] [Rank 0] step:3561/10000 train_time:159899ms step_avg:44.90ms +[2025-09-11 12:44:20] [Rank 0] step:3581/10000 train_time:160563ms step_avg:44.84ms +[2025-09-11 12:44:20] [Rank 0] step:3581/10000 train_time:160563ms step_avg:44.84ms +[2025-09-11 12:44:20] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:44:20] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:44:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:44:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:30] [Rank 0] PRINT: step:3600/10000 val_loss:6.0531 total_sharp:1.0095e-01 L1_sharp:2.2054e-01 L2_sharp:2.1060e-01 L3_sharp:2.5326e-01 L4_sharp:3.2982e-01 L5_sharp:4.6142e-01 L6_sharp:7.7484e-01 L7_sharp:1.1715e+00 L8_sharp:1.7729e+00 L9_sharp:3.3193e+00 L10_sharp:3.4736e+00 L11_sharp:2.7913e+00 L12_sharp:2.4923e+00 total_fnorm:1.6484e+00 total_l1_linf:1.2960e+03 total_spectral:8.2812e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2390e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.2878e-02 L6_l1linf:1.3062e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3550e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4404e-02 L11_l1linf:1.5076e-02 L12_l1linf:1.3123e-02 L1_spectral:7.8750e-04 L2_spectral:7.7993e-04 L3_spectral:7.8211e-04 L4_spectral:7.8165e-04 L5_spectral:7.7970e-04 L6_spectral:7.8218e-04 L7_spectral:7.8672e-04 L8_spectral:7.7474e-04 L9_spectral:7.8032e-04 L10_spectral:7.7808e-04 L11_spectral:7.7357e-04 L12_spectral:7.2888e-04 train_time:161208ms step_avg:44.78ms +[2025-09-11 12:44:30] [Rank 0] PRINT: step:3600/10000 val_loss:6.0531 total_sharp:1.0095e-01 L1_sharp:2.2054e-01 L2_sharp:2.1060e-01 L3_sharp:2.5326e-01 L4_sharp:3.2982e-01 L5_sharp:4.6142e-01 L6_sharp:7.7484e-01 L7_sharp:1.1715e+00 L8_sharp:1.7729e+00 L9_sharp:3.3193e+00 L10_sharp:3.4736e+00 L11_sharp:2.7913e+00 L12_sharp:2.4923e+00 total_fnorm:1.6484e+00 total_l1_linf:1.2960e+03 total_spectral:8.2812e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2390e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.2878e-02 L6_l1linf:1.3062e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3550e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4404e-02 L11_l1linf:1.5076e-02 L12_l1linf:1.3123e-02 L1_spectral:7.8750e-04 L2_spectral:7.7993e-04 L3_spectral:7.8211e-04 L4_spectral:7.8165e-04 L5_spectral:7.7970e-04 L6_spectral:7.8218e-04 L7_spectral:7.8672e-04 L8_spectral:7.7474e-04 L9_spectral:7.8032e-04 L10_spectral:7.7808e-04 L11_spectral:7.7357e-04 L12_spectral:7.2888e-04 train_time:161208ms step_avg:44.78ms +[2025-09-11 12:44:32] [Rank 0] step:3601/10000 train_time:162602ms step_avg:45.15ms +[2025-09-11 12:44:32] [Rank 0] step:3601/10000 train_time:162602ms step_avg:45.15ms +[2025-09-11 12:44:33] [Rank 0] step:3621/10000 train_time:163279ms step_avg:45.09ms +[2025-09-11 12:44:33] [Rank 0] step:3621/10000 train_time:163279ms step_avg:45.09ms +[2025-09-11 12:44:33] [Rank 0] step:3641/10000 train_time:163944ms step_avg:45.03ms +[2025-09-11 12:44:33] [Rank 0] step:3641/10000 train_time:163944ms step_avg:45.03ms +[2025-09-11 12:44:34] [Rank 0] step:3661/10000 train_time:164607ms step_avg:44.96ms +[2025-09-11 12:44:34] [Rank 0] step:3661/10000 train_time:164607ms step_avg:44.96ms +[2025-09-11 12:44:35] [Rank 0] step:3681/10000 train_time:165272ms step_avg:44.90ms +[2025-09-11 12:44:35] [Rank 0] step:3681/10000 train_time:165272ms step_avg:44.90ms +[2025-09-11 12:44:35] [Rank 0] step:3701/10000 train_time:165935ms step_avg:44.84ms +[2025-09-11 12:44:35] [Rank 0] step:3701/10000 train_time:165935ms step_avg:44.84ms +[2025-09-11 12:44:36] [Rank 0] step:3721/10000 train_time:166609ms step_avg:44.78ms +[2025-09-11 12:44:36] [Rank 0] step:3721/10000 train_time:166609ms step_avg:44.78ms +[2025-09-11 12:44:37] [Rank 0] step:3741/10000 train_time:167284ms step_avg:44.72ms +[2025-09-11 12:44:37] [Rank 0] step:3741/10000 train_time:167284ms step_avg:44.72ms +[2025-09-11 12:44:37] [Rank 0] step:3761/10000 train_time:167959ms step_avg:44.66ms +[2025-09-11 12:44:37] [Rank 0] step:3761/10000 train_time:167959ms step_avg:44.66ms +[2025-09-11 12:44:38] [Rank 0] step:3781/10000 train_time:168633ms step_avg:44.60ms +[2025-09-11 12:44:38] [Rank 0] step:3781/10000 train_time:168633ms step_avg:44.60ms +[2025-09-11 12:44:39] [Rank 0] step:3801/10000 train_time:169308ms step_avg:44.54ms +[2025-09-11 12:44:39] [Rank 0] step:3801/10000 train_time:169308ms step_avg:44.54ms +[2025-09-11 12:44:39] [Rank 0] step:3821/10000 train_time:169983ms step_avg:44.49ms +[2025-09-11 12:44:39] [Rank 0] step:3821/10000 train_time:169983ms step_avg:44.49ms +[2025-09-11 12:44:40] [Rank 0] step:3841/10000 train_time:170659ms step_avg:44.43ms +[2025-09-11 12:44:40] [Rank 0] step:3841/10000 train_time:170659ms step_avg:44.43ms +[2025-09-11 12:44:41] [Rank 0] step:3861/10000 train_time:171333ms step_avg:44.38ms +[2025-09-11 12:44:41] [Rank 0] step:3861/10000 train_time:171333ms step_avg:44.38ms +[2025-09-11 12:44:41] [Rank 0] step:3881/10000 train_time:172007ms step_avg:44.32ms +[2025-09-11 12:44:41] [Rank 0] step:3881/10000 train_time:172007ms step_avg:44.32ms +[2025-09-11 12:44:42] [Rank 0] step:3901/10000 train_time:172681ms step_avg:44.27ms +[2025-09-11 12:44:42] [Rank 0] step:3901/10000 train_time:172681ms step_avg:44.27ms +[2025-09-11 12:44:43] [Rank 0] step:3921/10000 train_time:173356ms step_avg:44.21ms +[2025-09-11 12:44:43] [Rank 0] step:3921/10000 train_time:173356ms step_avg:44.21ms +[2025-09-11 12:44:43] [Rank 0] step:3941/10000 train_time:174032ms step_avg:44.16ms +[2025-09-11 12:44:43] [Rank 0] step:3941/10000 train_time:174032ms step_avg:44.16ms +[2025-09-11 12:44:44] [Rank 0] step:3961/10000 train_time:174706ms step_avg:44.11ms +[2025-09-11 12:44:44] [Rank 0] step:3961/10000 train_time:174706ms step_avg:44.11ms +[2025-09-11 12:44:45] [Rank 0] step:3981/10000 train_time:175381ms step_avg:44.05ms +[2025-09-11 12:44:45] [Rank 0] step:3981/10000 train_time:175381ms step_avg:44.05ms +[2025-09-11 12:44:45] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:44:45] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:44:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:44:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.9956 total_sharp:1.2185e-01 L1_sharp:1.9499e-01 L2_sharp:2.3006e-01 L3_sharp:3.0649e-01 L4_sharp:4.1923e-01 L5_sharp:5.8563e-01 L6_sharp:7.9001e-01 L7_sharp:1.1877e+00 L8_sharp:2.2120e+00 L9_sharp:3.4508e+00 L10_sharp:5.3228e+00 L11_sharp:4.8575e+00 L12_sharp:3.7859e+00 total_fnorm:1.7188e+00 total_l1_linf:1.3200e+03 total_spectral:8.6328e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.2725e-02 L1_l1linf:1.2573e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2512e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3428e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4465e-02 L10_l1linf:1.4709e-02 L11_l1linf:1.5137e-02 L12_l1linf:1.2268e-02 L1_spectral:7.8389e-04 L2_spectral:7.8720e-04 L3_spectral:7.8365e-04 L4_spectral:7.8319e-04 L5_spectral:7.7105e-04 L6_spectral:7.8236e-04 L7_spectral:7.7645e-04 L8_spectral:7.6797e-04 L9_spectral:7.7528e-04 L10_spectral:7.7838e-04 L11_spectral:7.7042e-04 L12_spectral:6.9776e-04 train_time:176036ms step_avg:44.01ms +[2025-09-11 12:44:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.9956 total_sharp:1.2185e-01 L1_sharp:1.9499e-01 L2_sharp:2.3006e-01 L3_sharp:3.0649e-01 L4_sharp:4.1923e-01 L5_sharp:5.8563e-01 L6_sharp:7.9001e-01 L7_sharp:1.1877e+00 L8_sharp:2.2120e+00 L9_sharp:3.4508e+00 L10_sharp:5.3228e+00 L11_sharp:4.8575e+00 L12_sharp:3.7859e+00 total_fnorm:1.7188e+00 total_l1_linf:1.3200e+03 total_spectral:8.6328e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.2725e-02 L1_l1linf:1.2573e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2512e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3428e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4465e-02 L10_l1linf:1.4709e-02 L11_l1linf:1.5137e-02 L12_l1linf:1.2268e-02 L1_spectral:7.8389e-04 L2_spectral:7.8720e-04 L3_spectral:7.8365e-04 L4_spectral:7.8319e-04 L5_spectral:7.7105e-04 L6_spectral:7.8236e-04 L7_spectral:7.7645e-04 L8_spectral:7.6797e-04 L9_spectral:7.7528e-04 L10_spectral:7.7838e-04 L11_spectral:7.7042e-04 L12_spectral:6.9776e-04 train_time:176036ms step_avg:44.01ms +[2025-09-11 12:44:57] [Rank 0] step:4001/10000 train_time:177464ms step_avg:44.35ms +[2025-09-11 12:44:57] [Rank 0] step:4001/10000 train_time:177464ms step_avg:44.35ms +[2025-09-11 12:44:58] [Rank 0] step:4021/10000 train_time:178172ms step_avg:44.31ms +[2025-09-11 12:44:58] [Rank 0] step:4021/10000 train_time:178172ms step_avg:44.31ms +[2025-09-11 12:44:59] [Rank 0] step:4041/10000 train_time:178848ms step_avg:44.26ms +[2025-09-11 12:44:59] [Rank 0] step:4041/10000 train_time:178848ms step_avg:44.26ms +[2025-09-11 12:44:59] [Rank 0] step:4061/10000 train_time:179521ms step_avg:44.21ms +[2025-09-11 12:44:59] [Rank 0] step:4061/10000 train_time:179521ms step_avg:44.21ms +[2025-09-11 12:45:00] [Rank 0] step:4081/10000 train_time:180196ms step_avg:44.15ms +[2025-09-11 12:45:00] [Rank 0] step:4081/10000 train_time:180196ms step_avg:44.15ms +[2025-09-11 12:45:01] [Rank 0] step:4101/10000 train_time:180871ms step_avg:44.10ms +[2025-09-11 12:45:01] [Rank 0] step:4101/10000 train_time:180871ms step_avg:44.10ms +[2025-09-11 12:45:01] [Rank 0] step:4121/10000 train_time:181603ms step_avg:44.07ms +[2025-09-11 12:45:01] [Rank 0] step:4121/10000 train_time:181603ms step_avg:44.07ms +[2025-09-11 12:45:02] [Rank 0] step:4141/10000 train_time:182357ms step_avg:44.04ms +[2025-09-11 12:45:02] [Rank 0] step:4141/10000 train_time:182357ms step_avg:44.04ms +[2025-09-11 12:45:03] [Rank 0] step:4161/10000 train_time:183031ms step_avg:43.99ms +[2025-09-11 12:45:03] [Rank 0] step:4161/10000 train_time:183031ms step_avg:43.99ms +[2025-09-11 12:45:03] [Rank 0] step:4181/10000 train_time:183706ms step_avg:43.94ms +[2025-09-11 12:45:03] [Rank 0] step:4181/10000 train_time:183706ms step_avg:43.94ms +[2025-09-11 12:45:04] [Rank 0] step:4201/10000 train_time:184381ms step_avg:43.89ms +[2025-09-11 12:45:04] [Rank 0] step:4201/10000 train_time:184381ms step_avg:43.89ms +[2025-09-11 12:45:05] [Rank 0] step:4221/10000 train_time:185055ms step_avg:43.84ms +[2025-09-11 12:45:05] [Rank 0] step:4221/10000 train_time:185055ms step_avg:43.84ms +[2025-09-11 12:45:05] [Rank 0] step:4241/10000 train_time:185729ms step_avg:43.79ms +[2025-09-11 12:45:05] [Rank 0] step:4241/10000 train_time:185729ms step_avg:43.79ms +[2025-09-11 12:45:06] [Rank 0] step:4261/10000 train_time:186404ms step_avg:43.75ms +[2025-09-11 12:45:06] [Rank 0] step:4261/10000 train_time:186404ms step_avg:43.75ms +[2025-09-11 12:45:07] [Rank 0] step:4281/10000 train_time:187080ms step_avg:43.70ms +[2025-09-11 12:45:07] [Rank 0] step:4281/10000 train_time:187080ms step_avg:43.70ms +[2025-09-11 12:45:07] [Rank 0] step:4301/10000 train_time:187755ms step_avg:43.65ms +[2025-09-11 12:45:07] [Rank 0] step:4301/10000 train_time:187755ms step_avg:43.65ms +[2025-09-11 12:45:08] [Rank 0] step:4321/10000 train_time:188428ms step_avg:43.61ms +[2025-09-11 12:45:08] [Rank 0] step:4321/10000 train_time:188428ms step_avg:43.61ms +[2025-09-11 12:45:09] [Rank 0] step:4341/10000 train_time:189103ms step_avg:43.56ms +[2025-09-11 12:45:09] [Rank 0] step:4341/10000 train_time:189103ms step_avg:43.56ms +[2025-09-11 12:45:09] [Rank 0] step:4361/10000 train_time:189777ms step_avg:43.52ms +[2025-09-11 12:45:09] [Rank 0] step:4361/10000 train_time:189777ms step_avg:43.52ms +[2025-09-11 12:45:10] [Rank 0] step:4381/10000 train_time:190452ms step_avg:43.47ms +[2025-09-11 12:45:10] [Rank 0] step:4381/10000 train_time:190452ms step_avg:43.47ms +[2025-09-11 12:45:11] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:45:11] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:45:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:45:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:45:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:45:21] [Rank 0] PRINT: step:4400/10000 val_loss:5.9503 total_sharp:1.1417e-01 L1_sharp:1.7041e-01 L2_sharp:2.1068e-01 L3_sharp:2.7943e-01 L4_sharp:4.0554e-01 L5_sharp:5.8362e-01 L6_sharp:7.6283e-01 L7_sharp:1.0904e+00 L8_sharp:1.9036e+00 L9_sharp:2.9368e+00 L10_sharp:4.2240e+00 L11_sharp:3.4702e+00 L12_sharp:2.6668e+00 total_fnorm:1.6172e+00 total_l1_linf:1.2160e+03 total_spectral:8.1250e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1992e-02 L1_l1linf:1.2695e-02 L2_l1linf:1.2756e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2939e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3245e-02 L7_l1linf:1.3611e-02 L8_l1linf:1.4160e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4893e-02 L11_l1linf:1.5015e-02 L12_l1linf:1.1230e-02 L1_spectral:7.8661e-04 L2_spectral:7.8432e-04 L3_spectral:7.8609e-04 L4_spectral:7.8327e-04 L5_spectral:7.8711e-04 L6_spectral:7.8604e-04 L7_spectral:7.8280e-04 L8_spectral:7.7119e-04 L9_spectral:7.8180e-04 L10_spectral:7.7655e-04 L11_spectral:7.7544e-04 L12_spectral:6.8537e-04 train_time:191107ms step_avg:43.43ms +[2025-09-11 12:45:21] [Rank 0] PRINT: step:4400/10000 val_loss:5.9503 total_sharp:1.1417e-01 L1_sharp:1.7041e-01 L2_sharp:2.1068e-01 L3_sharp:2.7943e-01 L4_sharp:4.0554e-01 L5_sharp:5.8362e-01 L6_sharp:7.6283e-01 L7_sharp:1.0904e+00 L8_sharp:1.9036e+00 L9_sharp:2.9368e+00 L10_sharp:4.2240e+00 L11_sharp:3.4702e+00 L12_sharp:2.6668e+00 total_fnorm:1.6172e+00 total_l1_linf:1.2160e+03 total_spectral:8.1250e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1992e-02 L1_l1linf:1.2695e-02 L2_l1linf:1.2756e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2939e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3245e-02 L7_l1linf:1.3611e-02 L8_l1linf:1.4160e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4893e-02 L11_l1linf:1.5015e-02 L12_l1linf:1.1230e-02 L1_spectral:7.8661e-04 L2_spectral:7.8432e-04 L3_spectral:7.8609e-04 L4_spectral:7.8327e-04 L5_spectral:7.8711e-04 L6_spectral:7.8604e-04 L7_spectral:7.8280e-04 L8_spectral:7.7119e-04 L9_spectral:7.8180e-04 L10_spectral:7.7655e-04 L11_spectral:7.7544e-04 L12_spectral:6.8537e-04 train_time:191107ms step_avg:43.43ms +[2025-09-11 12:45:23] [Rank 0] step:4401/10000 train_time:192558ms step_avg:43.75ms +[2025-09-11 12:45:23] [Rank 0] step:4401/10000 train_time:192558ms step_avg:43.75ms +[2025-09-11 12:45:23] [Rank 0] step:4421/10000 train_time:193265ms step_avg:43.72ms +[2025-09-11 12:45:23] [Rank 0] step:4421/10000 train_time:193265ms step_avg:43.72ms +[2025-09-11 12:45:24] [Rank 0] step:4441/10000 train_time:193942ms step_avg:43.67ms +[2025-09-11 12:45:24] [Rank 0] step:4441/10000 train_time:193942ms step_avg:43.67ms +[2025-09-11 12:45:25] [Rank 0] step:4461/10000 train_time:194619ms step_avg:43.63ms +[2025-09-11 12:45:25] [Rank 0] step:4461/10000 train_time:194619ms step_avg:43.63ms +[2025-09-11 12:45:25] [Rank 0] step:4481/10000 train_time:195296ms step_avg:43.58ms +[2025-09-11 12:45:25] [Rank 0] step:4481/10000 train_time:195296ms step_avg:43.58ms +[2025-09-11 12:45:26] [Rank 0] step:4501/10000 train_time:195974ms step_avg:43.54ms +[2025-09-11 12:45:26] [Rank 0] step:4501/10000 train_time:195974ms step_avg:43.54ms +[2025-09-11 12:45:27] [Rank 0] step:4521/10000 train_time:196651ms step_avg:43.50ms +[2025-09-11 12:45:27] [Rank 0] step:4521/10000 train_time:196651ms step_avg:43.50ms +[2025-09-11 12:45:27] [Rank 0] step:4541/10000 train_time:197330ms step_avg:43.46ms +[2025-09-11 12:45:27] [Rank 0] step:4541/10000 train_time:197330ms step_avg:43.46ms +[2025-09-11 12:45:28] [Rank 0] step:4561/10000 train_time:198007ms step_avg:43.41ms +[2025-09-11 12:45:28] [Rank 0] step:4561/10000 train_time:198007ms step_avg:43.41ms +[2025-09-11 12:45:29] [Rank 0] step:4581/10000 train_time:198684ms step_avg:43.37ms +[2025-09-11 12:45:29] [Rank 0] step:4581/10000 train_time:198684ms step_avg:43.37ms +[2025-09-11 12:45:30] [Rank 0] step:4601/10000 train_time:199360ms step_avg:43.33ms +[2025-09-11 12:45:30] [Rank 0] step:4601/10000 train_time:199360ms step_avg:43.33ms +[2025-09-11 12:45:30] [Rank 0] step:4621/10000 train_time:200038ms step_avg:43.29ms +[2025-09-11 12:45:30] [Rank 0] step:4621/10000 train_time:200038ms step_avg:43.29ms +[2025-09-11 12:45:31] [Rank 0] step:4641/10000 train_time:200714ms step_avg:43.25ms +[2025-09-11 12:45:31] [Rank 0] step:4641/10000 train_time:200714ms step_avg:43.25ms +[2025-09-11 12:45:32] [Rank 0] step:4661/10000 train_time:201391ms step_avg:43.21ms +[2025-09-11 12:45:32] [Rank 0] step:4661/10000 train_time:201391ms step_avg:43.21ms +[2025-09-11 12:45:32] [Rank 0] step:4681/10000 train_time:202068ms step_avg:43.17ms +[2025-09-11 12:45:32] [Rank 0] step:4681/10000 train_time:202068ms step_avg:43.17ms +[2025-09-11 12:45:33] [Rank 0] step:4701/10000 train_time:202745ms step_avg:43.13ms +[2025-09-11 12:45:33] [Rank 0] step:4701/10000 train_time:202745ms step_avg:43.13ms +[2025-09-11 12:45:34] [Rank 0] step:4721/10000 train_time:203421ms step_avg:43.09ms +[2025-09-11 12:45:34] [Rank 0] step:4721/10000 train_time:203421ms step_avg:43.09ms +[2025-09-11 12:45:34] [Rank 0] step:4741/10000 train_time:204098ms step_avg:43.05ms +[2025-09-11 12:45:34] [Rank 0] step:4741/10000 train_time:204098ms step_avg:43.05ms +[2025-09-11 12:45:35] [Rank 0] step:4761/10000 train_time:204776ms step_avg:43.01ms +[2025-09-11 12:45:35] [Rank 0] step:4761/10000 train_time:204776ms step_avg:43.01ms +[2025-09-11 12:45:36] [Rank 0] step:4781/10000 train_time:205452ms step_avg:42.97ms +[2025-09-11 12:45:36] [Rank 0] step:4781/10000 train_time:205452ms step_avg:42.97ms +[2025-09-11 12:45:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:45:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:45:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:45:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:45:46] [Rank 0] PRINT: step:4800/10000 val_loss:5.9052 total_sharp:1.1760e-01 L1_sharp:1.5392e-01 L2_sharp:1.8354e-01 L3_sharp:2.5179e-01 L4_sharp:3.4885e-01 L5_sharp:5.0651e-01 L6_sharp:7.6579e-01 L7_sharp:1.0825e+00 L8_sharp:1.9692e+00 L9_sharp:2.9714e+00 L10_sharp:4.2007e+00 L11_sharp:3.5193e+00 L12_sharp:2.6488e+00 total_fnorm:1.5703e+00 total_l1_linf:1.1840e+03 total_spectral:7.8906e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1414e-02 L2_l1linf:1.1597e-02 L3_l1linf:1.2207e-02 L4_l1linf:1.2024e-02 L5_l1linf:1.2390e-02 L6_l1linf:1.2451e-02 L7_l1linf:1.2756e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3611e-02 L10_l1linf:1.3855e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.2207e-02 L1_spectral:7.8831e-04 L2_spectral:7.9362e-04 L3_spectral:7.8973e-04 L4_spectral:7.8777e-04 L5_spectral:7.8846e-04 L6_spectral:7.8851e-04 L7_spectral:7.9257e-04 L8_spectral:7.8609e-04 L9_spectral:7.8629e-04 L10_spectral:7.9024e-04 L11_spectral:7.8656e-04 L12_spectral:7.0925e-04 train_time:206108ms step_avg:42.94ms +[2025-09-11 12:45:46] [Rank 0] PRINT: step:4800/10000 val_loss:5.9052 total_sharp:1.1760e-01 L1_sharp:1.5392e-01 L2_sharp:1.8354e-01 L3_sharp:2.5179e-01 L4_sharp:3.4885e-01 L5_sharp:5.0651e-01 L6_sharp:7.6579e-01 L7_sharp:1.0825e+00 L8_sharp:1.9692e+00 L9_sharp:2.9714e+00 L10_sharp:4.2007e+00 L11_sharp:3.5193e+00 L12_sharp:2.6488e+00 total_fnorm:1.5703e+00 total_l1_linf:1.1840e+03 total_spectral:7.8906e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1414e-02 L2_l1linf:1.1597e-02 L3_l1linf:1.2207e-02 L4_l1linf:1.2024e-02 L5_l1linf:1.2390e-02 L6_l1linf:1.2451e-02 L7_l1linf:1.2756e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3611e-02 L10_l1linf:1.3855e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.2207e-02 L1_spectral:7.8831e-04 L2_spectral:7.9362e-04 L3_spectral:7.8973e-04 L4_spectral:7.8777e-04 L5_spectral:7.8846e-04 L6_spectral:7.8851e-04 L7_spectral:7.9257e-04 L8_spectral:7.8609e-04 L9_spectral:7.8629e-04 L10_spectral:7.9024e-04 L11_spectral:7.8656e-04 L12_spectral:7.0925e-04 train_time:206108ms step_avg:42.94ms +[2025-09-11 12:45:48] [Rank 0] step:4801/10000 train_time:207548ms step_avg:43.23ms +[2025-09-11 12:45:48] [Rank 0] step:4801/10000 train_time:207548ms step_avg:43.23ms +[2025-09-11 12:45:49] [Rank 0] step:4821/10000 train_time:208248ms step_avg:43.20ms +[2025-09-11 12:45:49] [Rank 0] step:4821/10000 train_time:208248ms step_avg:43.20ms +[2025-09-11 12:45:49] [Rank 0] step:4841/10000 train_time:208927ms step_avg:43.16ms +[2025-09-11 12:45:49] [Rank 0] step:4841/10000 train_time:208927ms step_avg:43.16ms +[2025-09-11 12:45:50] [Rank 0] step:4861/10000 train_time:209605ms step_avg:43.12ms +[2025-09-11 12:45:50] [Rank 0] step:4861/10000 train_time:209605ms step_avg:43.12ms +[2025-09-11 12:45:51] [Rank 0] step:4881/10000 train_time:210283ms step_avg:43.08ms +[2025-09-11 12:45:51] [Rank 0] step:4881/10000 train_time:210283ms step_avg:43.08ms +[2025-09-11 12:45:51] [Rank 0] step:4901/10000 train_time:210962ms step_avg:43.04ms +[2025-09-11 12:45:51] [Rank 0] step:4901/10000 train_time:210962ms step_avg:43.04ms +[2025-09-11 12:45:52] [Rank 0] step:4921/10000 train_time:211920ms step_avg:43.06ms +[2025-09-11 12:45:52] [Rank 0] step:4921/10000 train_time:211920ms step_avg:43.06ms +[2025-09-11 12:45:53] [Rank 0] step:4941/10000 train_time:212598ms step_avg:43.03ms +[2025-09-11 12:45:53] [Rank 0] step:4941/10000 train_time:212598ms step_avg:43.03ms +[2025-09-11 12:45:54] [Rank 0] step:4961/10000 train_time:213276ms step_avg:42.99ms +[2025-09-11 12:45:54] [Rank 0] step:4961/10000 train_time:213276ms step_avg:42.99ms +[2025-09-11 12:45:55] [Rank 0] step:4981/10000 train_time:214222ms step_avg:43.01ms +[2025-09-11 12:45:55] [Rank 0] step:4981/10000 train_time:214222ms step_avg:43.01ms +[2025-09-11 12:45:55] [Rank 0] step:5001/10000 train_time:214901ms step_avg:42.97ms +[2025-09-11 12:45:55] [Rank 0] step:5001/10000 train_time:214901ms step_avg:42.97ms +[2025-09-11 12:45:56] [Rank 0] step:5021/10000 train_time:215578ms step_avg:42.94ms +[2025-09-11 12:45:56] [Rank 0] step:5021/10000 train_time:215578ms step_avg:42.94ms +[2025-09-11 12:45:57] [Rank 0] step:5041/10000 train_time:216254ms step_avg:42.90ms +[2025-09-11 12:45:57] [Rank 0] step:5041/10000 train_time:216254ms step_avg:42.90ms +[2025-09-11 12:45:57] [Rank 0] step:5061/10000 train_time:216932ms step_avg:42.86ms +[2025-09-11 12:45:57] [Rank 0] step:5061/10000 train_time:216932ms step_avg:42.86ms +[2025-09-11 12:45:58] [Rank 0] step:5081/10000 train_time:217609ms step_avg:42.83ms +[2025-09-11 12:45:58] [Rank 0] step:5081/10000 train_time:217609ms step_avg:42.83ms +[2025-09-11 12:45:59] [Rank 0] step:5101/10000 train_time:218287ms step_avg:42.79ms +[2025-09-11 12:45:59] [Rank 0] step:5101/10000 train_time:218287ms step_avg:42.79ms +[2025-09-11 12:45:59] [Rank 0] step:5121/10000 train_time:218964ms step_avg:42.76ms +[2025-09-11 12:45:59] [Rank 0] step:5121/10000 train_time:218964ms step_avg:42.76ms +[2025-09-11 12:46:00] [Rank 0] step:5141/10000 train_time:219642ms step_avg:42.72ms +[2025-09-11 12:46:00] [Rank 0] step:5141/10000 train_time:219642ms step_avg:42.72ms +[2025-09-11 12:46:01] [Rank 0] step:5161/10000 train_time:220321ms step_avg:42.69ms +[2025-09-11 12:46:01] [Rank 0] step:5161/10000 train_time:220321ms step_avg:42.69ms +[2025-09-11 12:46:01] [Rank 0] step:5181/10000 train_time:220998ms step_avg:42.66ms +[2025-09-11 12:46:01] [Rank 0] step:5181/10000 train_time:220998ms step_avg:42.66ms +[2025-09-11 12:46:02] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:46:02] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:46:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:46:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:46:16] [Rank 0] PRINT: step:5200/10000 val_loss:5.8681 total_sharp:1.1062e-01 L1_sharp:1.8974e-01 L2_sharp:1.9050e-01 L3_sharp:2.0086e-01 L4_sharp:2.1673e-01 L5_sharp:3.0508e-01 L6_sharp:4.3546e-01 L7_sharp:7.0452e-01 L8_sharp:1.2891e+00 L9_sharp:2.7899e+00 L10_sharp:4.1234e+00 L11_sharp:4.3284e+00 L12_sharp:4.9397e+00 total_fnorm:1.5000e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4189e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.1353e-02 L3_l1linf:1.2024e-02 L4_l1linf:1.1841e-02 L5_l1linf:1.1963e-02 L6_l1linf:1.1963e-02 L7_l1linf:1.2268e-02 L8_l1linf:1.2573e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3794e-02 L11_l1linf:1.4160e-02 L12_l1linf:1.2146e-02 L1_spectral:7.9471e-04 L2_spectral:7.9250e-04 L3_spectral:7.9570e-04 L4_spectral:7.8980e-04 L5_spectral:7.9246e-04 L6_spectral:7.9999e-04 L7_spectral:7.9570e-04 L8_spectral:7.9377e-04 L9_spectral:7.9084e-04 L10_spectral:7.9122e-04 L11_spectral:7.8967e-04 L12_spectral:7.1826e-04 train_time:221664ms step_avg:42.63ms +[2025-09-11 12:46:16] [Rank 0] PRINT: step:5200/10000 val_loss:5.8681 total_sharp:1.1062e-01 L1_sharp:1.8974e-01 L2_sharp:1.9050e-01 L3_sharp:2.0086e-01 L4_sharp:2.1673e-01 L5_sharp:3.0508e-01 L6_sharp:4.3546e-01 L7_sharp:7.0452e-01 L8_sharp:1.2891e+00 L9_sharp:2.7899e+00 L10_sharp:4.1234e+00 L11_sharp:4.3284e+00 L12_sharp:4.9397e+00 total_fnorm:1.5000e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4189e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.1353e-02 L3_l1linf:1.2024e-02 L4_l1linf:1.1841e-02 L5_l1linf:1.1963e-02 L6_l1linf:1.1963e-02 L7_l1linf:1.2268e-02 L8_l1linf:1.2573e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3794e-02 L11_l1linf:1.4160e-02 L12_l1linf:1.2146e-02 L1_spectral:7.9471e-04 L2_spectral:7.9250e-04 L3_spectral:7.9570e-04 L4_spectral:7.8980e-04 L5_spectral:7.9246e-04 L6_spectral:7.9999e-04 L7_spectral:7.9570e-04 L8_spectral:7.9377e-04 L9_spectral:7.9084e-04 L10_spectral:7.9122e-04 L11_spectral:7.8967e-04 L12_spectral:7.1826e-04 train_time:221664ms step_avg:42.63ms +[2025-09-11 12:46:18] [Rank 0] step:5201/10000 train_time:223090ms step_avg:42.89ms +[2025-09-11 12:46:18] [Rank 0] step:5201/10000 train_time:223090ms step_avg:42.89ms +[2025-09-11 12:46:18] [Rank 0] step:5221/10000 train_time:223797ms step_avg:42.86ms +[2025-09-11 12:46:18] [Rank 0] step:5221/10000 train_time:223797ms step_avg:42.86ms +[2025-09-11 12:46:19] [Rank 0] step:5241/10000 train_time:224483ms step_avg:42.83ms +[2025-09-11 12:46:19] [Rank 0] step:5241/10000 train_time:224483ms step_avg:42.83ms +[2025-09-11 12:46:20] [Rank 0] step:5261/10000 train_time:225171ms step_avg:42.80ms +[2025-09-11 12:46:20] [Rank 0] step:5261/10000 train_time:225171ms step_avg:42.80ms +[2025-09-11 12:46:20] [Rank 0] step:5281/10000 train_time:225858ms step_avg:42.77ms +[2025-09-11 12:46:20] [Rank 0] step:5281/10000 train_time:225858ms step_avg:42.77ms +[2025-09-11 12:46:21] [Rank 0] step:5301/10000 train_time:226545ms step_avg:42.74ms +[2025-09-11 12:46:21] [Rank 0] step:5301/10000 train_time:226545ms step_avg:42.74ms +[2025-09-11 12:46:22] [Rank 0] step:5321/10000 train_time:227232ms step_avg:42.70ms +[2025-09-11 12:46:22] [Rank 0] step:5321/10000 train_time:227232ms step_avg:42.70ms +[2025-09-11 12:46:23] [Rank 0] step:5341/10000 train_time:227919ms step_avg:42.67ms +[2025-09-11 12:46:23] [Rank 0] step:5341/10000 train_time:227919ms step_avg:42.67ms +[2025-09-11 12:46:23] [Rank 0] step:5361/10000 train_time:228606ms step_avg:42.64ms +[2025-09-11 12:46:23] [Rank 0] step:5361/10000 train_time:228606ms step_avg:42.64ms +[2025-09-11 12:46:24] [Rank 0] step:5381/10000 train_time:229294ms step_avg:42.61ms +[2025-09-11 12:46:24] [Rank 0] step:5381/10000 train_time:229294ms step_avg:42.61ms +[2025-09-11 12:46:25] [Rank 0] step:5401/10000 train_time:229980ms step_avg:42.58ms +[2025-09-11 12:46:25] [Rank 0] step:5401/10000 train_time:229980ms step_avg:42.58ms +[2025-09-11 12:46:25] [Rank 0] step:5421/10000 train_time:230668ms step_avg:42.55ms +[2025-09-11 12:46:25] [Rank 0] step:5421/10000 train_time:230668ms step_avg:42.55ms +[2025-09-11 12:46:26] [Rank 0] step:5441/10000 train_time:231355ms step_avg:42.52ms +[2025-09-11 12:46:26] [Rank 0] step:5441/10000 train_time:231355ms step_avg:42.52ms +[2025-09-11 12:46:27] [Rank 0] step:5461/10000 train_time:232043ms step_avg:42.49ms +[2025-09-11 12:46:27] [Rank 0] step:5461/10000 train_time:232043ms step_avg:42.49ms +[2025-09-11 12:46:27] [Rank 0] step:5481/10000 train_time:232730ms step_avg:42.46ms +[2025-09-11 12:46:27] [Rank 0] step:5481/10000 train_time:232730ms step_avg:42.46ms +[2025-09-11 12:46:28] [Rank 0] step:5501/10000 train_time:233417ms step_avg:42.43ms +[2025-09-11 12:46:28] [Rank 0] step:5501/10000 train_time:233417ms step_avg:42.43ms +[2025-09-11 12:46:29] [Rank 0] step:5521/10000 train_time:234104ms step_avg:42.40ms +[2025-09-11 12:46:29] [Rank 0] step:5521/10000 train_time:234104ms step_avg:42.40ms +[2025-09-11 12:46:29] [Rank 0] step:5541/10000 train_time:234793ms step_avg:42.37ms +[2025-09-11 12:46:29] [Rank 0] step:5541/10000 train_time:234793ms step_avg:42.37ms +[2025-09-11 12:46:30] [Rank 0] step:5561/10000 train_time:235484ms step_avg:42.35ms +[2025-09-11 12:46:30] [Rank 0] step:5561/10000 train_time:235484ms step_avg:42.35ms +[2025-09-11 12:46:31] [Rank 0] step:5581/10000 train_time:236171ms step_avg:42.32ms +[2025-09-11 12:46:31] [Rank 0] step:5581/10000 train_time:236171ms step_avg:42.32ms +[2025-09-11 12:46:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:46:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:46:42] [Rank 0] PRINT: step:5600/10000 val_loss:5.8336 total_sharp:1.0085e-01 L1_sharp:1.1267e-01 L2_sharp:1.1928e-01 L3_sharp:1.3931e-01 L4_sharp:1.6984e-01 L5_sharp:2.6586e-01 L6_sharp:4.7177e-01 L7_sharp:6.9174e-01 L8_sharp:1.3242e+00 L9_sharp:2.3596e+00 L10_sharp:3.7619e+00 L11_sharp:4.4955e+00 L12_sharp:4.0025e+00 total_fnorm:1.5078e+00 total_l1_linf:1.1040e+03 total_spectral:7.5781e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1108e-02 L2_l1linf:1.1230e-02 L3_l1linf:1.1780e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.2024e-02 L6_l1linf:1.1902e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.2817e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.1841e-02 L1_spectral:7.9583e-04 L2_spectral:7.9807e-04 L3_spectral:7.9656e-04 L4_spectral:7.9504e-04 L5_spectral:7.9309e-04 L6_spectral:7.9212e-04 L7_spectral:8.0081e-04 L8_spectral:7.8293e-04 L9_spectral:7.8946e-04 L10_spectral:7.8471e-04 L11_spectral:7.9211e-04 L12_spectral:7.1580e-04 train_time:236838ms step_avg:42.29ms +[2025-09-11 12:46:42] [Rank 0] PRINT: step:5600/10000 val_loss:5.8336 total_sharp:1.0085e-01 L1_sharp:1.1267e-01 L2_sharp:1.1928e-01 L3_sharp:1.3931e-01 L4_sharp:1.6984e-01 L5_sharp:2.6586e-01 L6_sharp:4.7177e-01 L7_sharp:6.9174e-01 L8_sharp:1.3242e+00 L9_sharp:2.3596e+00 L10_sharp:3.7619e+00 L11_sharp:4.4955e+00 L12_sharp:4.0025e+00 total_fnorm:1.5078e+00 total_l1_linf:1.1040e+03 total_spectral:7.5781e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1108e-02 L2_l1linf:1.1230e-02 L3_l1linf:1.1780e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.2024e-02 L6_l1linf:1.1902e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.2817e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.1841e-02 L1_spectral:7.9583e-04 L2_spectral:7.9807e-04 L3_spectral:7.9656e-04 L4_spectral:7.9504e-04 L5_spectral:7.9309e-04 L6_spectral:7.9212e-04 L7_spectral:8.0081e-04 L8_spectral:7.8293e-04 L9_spectral:7.8946e-04 L10_spectral:7.8471e-04 L11_spectral:7.9211e-04 L12_spectral:7.1580e-04 train_time:236838ms step_avg:42.29ms +[2025-09-11 12:46:43] [Rank 0] step:5601/10000 train_time:238287ms step_avg:42.54ms +[2025-09-11 12:46:43] [Rank 0] step:5601/10000 train_time:238287ms step_avg:42.54ms +[2025-09-11 12:46:44] [Rank 0] step:5621/10000 train_time:239011ms step_avg:42.52ms +[2025-09-11 12:46:44] [Rank 0] step:5621/10000 train_time:239011ms step_avg:42.52ms +[2025-09-11 12:46:45] [Rank 0] step:5641/10000 train_time:239699ms step_avg:42.49ms +[2025-09-11 12:46:45] [Rank 0] step:5641/10000 train_time:239699ms step_avg:42.49ms +[2025-09-11 12:46:45] [Rank 0] step:5661/10000 train_time:240386ms step_avg:42.46ms +[2025-09-11 12:46:45] [Rank 0] step:5661/10000 train_time:240386ms step_avg:42.46ms +[2025-09-11 12:46:46] [Rank 0] step:5681/10000 train_time:241075ms step_avg:42.44ms +[2025-09-11 12:46:46] [Rank 0] step:5681/10000 train_time:241075ms step_avg:42.44ms +[2025-09-11 12:46:47] [Rank 0] step:5701/10000 train_time:241765ms step_avg:42.41ms +[2025-09-11 12:46:47] [Rank 0] step:5701/10000 train_time:241765ms step_avg:42.41ms +[2025-09-11 12:46:47] [Rank 0] step:5721/10000 train_time:242452ms step_avg:42.38ms +[2025-09-11 12:46:47] [Rank 0] step:5721/10000 train_time:242452ms step_avg:42.38ms +[2025-09-11 12:46:48] [Rank 0] step:5741/10000 train_time:243140ms step_avg:42.35ms +[2025-09-11 12:46:48] [Rank 0] step:5741/10000 train_time:243140ms step_avg:42.35ms +[2025-09-11 12:46:49] [Rank 0] step:5761/10000 train_time:243828ms step_avg:42.32ms +[2025-09-11 12:46:49] [Rank 0] step:5761/10000 train_time:243828ms step_avg:42.32ms +[2025-09-11 12:46:50] [Rank 0] step:5781/10000 train_time:244517ms step_avg:42.30ms +[2025-09-11 12:46:50] [Rank 0] step:5781/10000 train_time:244517ms step_avg:42.30ms +[2025-09-11 12:46:50] [Rank 0] step:5801/10000 train_time:245206ms step_avg:42.27ms +[2025-09-11 12:46:50] [Rank 0] step:5801/10000 train_time:245206ms step_avg:42.27ms +[2025-09-11 12:46:51] [Rank 0] step:5821/10000 train_time:245893ms step_avg:42.24ms +[2025-09-11 12:46:51] [Rank 0] step:5821/10000 train_time:245893ms step_avg:42.24ms +[2025-09-11 12:46:52] [Rank 0] step:5841/10000 train_time:246582ms step_avg:42.22ms +[2025-09-11 12:46:52] [Rank 0] step:5841/10000 train_time:246582ms step_avg:42.22ms +[2025-09-11 12:46:52] [Rank 0] step:5861/10000 train_time:247269ms step_avg:42.19ms +[2025-09-11 12:46:52] [Rank 0] step:5861/10000 train_time:247269ms step_avg:42.19ms +[2025-09-11 12:46:53] [Rank 0] step:5881/10000 train_time:247956ms step_avg:42.16ms +[2025-09-11 12:46:53] [Rank 0] step:5881/10000 train_time:247956ms step_avg:42.16ms +[2025-09-11 12:46:54] [Rank 0] step:5901/10000 train_time:248643ms step_avg:42.14ms +[2025-09-11 12:46:54] [Rank 0] step:5901/10000 train_time:248643ms step_avg:42.14ms +[2025-09-11 12:46:54] [Rank 0] step:5921/10000 train_time:249332ms step_avg:42.11ms +[2025-09-11 12:46:54] [Rank 0] step:5921/10000 train_time:249332ms step_avg:42.11ms +[2025-09-11 12:46:55] [Rank 0] step:5941/10000 train_time:250294ms step_avg:42.13ms +[2025-09-11 12:46:55] [Rank 0] step:5941/10000 train_time:250294ms step_avg:42.13ms +[2025-09-11 12:46:56] [Rank 0] step:5961/10000 train_time:250984ms step_avg:42.10ms +[2025-09-11 12:46:56] [Rank 0] step:5961/10000 train_time:250984ms step_avg:42.10ms +[2025-09-11 12:46:57] [Rank 0] step:5981/10000 train_time:251672ms step_avg:42.08ms +[2025-09-11 12:46:57] [Rank 0] step:5981/10000 train_time:251672ms step_avg:42.08ms +[2025-09-11 12:46:58] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:46:58] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:47:08] [Rank 0] PRINT: step:6000/10000 val_loss:5.7889 total_sharp:1.0270e-01 L1_sharp:1.0162e-01 L2_sharp:1.1725e-01 L3_sharp:1.4385e-01 L4_sharp:1.7676e-01 L5_sharp:2.5514e-01 L6_sharp:4.0975e-01 L7_sharp:5.7403e-01 L8_sharp:9.2341e-01 L9_sharp:2.1372e+00 L10_sharp:3.8207e+00 L11_sharp:4.7629e+00 L12_sharp:5.7670e+00 total_fnorm:1.4453e+00 total_l1_linf:1.0400e+03 total_spectral:7.2656e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.0986e-02 L2_l1linf:1.1597e-02 L3_l1linf:1.1475e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.1780e-02 L7_l1linf:1.2146e-02 L8_l1linf:1.2146e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.4099e-02 L12_l1linf:1.2207e-02 L1_spectral:7.9734e-04 L2_spectral:8.0090e-04 L3_spectral:7.9244e-04 L4_spectral:7.9070e-04 L5_spectral:7.9549e-04 L6_spectral:7.9763e-04 L7_spectral:7.9574e-04 L8_spectral:7.9301e-04 L9_spectral:7.9804e-04 L10_spectral:7.9411e-04 L11_spectral:7.9026e-04 L12_spectral:7.2391e-04 train_time:252644ms step_avg:42.11ms +[2025-09-11 12:47:08] [Rank 0] PRINT: step:6000/10000 val_loss:5.7889 total_sharp:1.0270e-01 L1_sharp:1.0162e-01 L2_sharp:1.1725e-01 L3_sharp:1.4385e-01 L4_sharp:1.7676e-01 L5_sharp:2.5514e-01 L6_sharp:4.0975e-01 L7_sharp:5.7403e-01 L8_sharp:9.2341e-01 L9_sharp:2.1372e+00 L10_sharp:3.8207e+00 L11_sharp:4.7629e+00 L12_sharp:5.7670e+00 total_fnorm:1.4453e+00 total_l1_linf:1.0400e+03 total_spectral:7.2656e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.0986e-02 L2_l1linf:1.1597e-02 L3_l1linf:1.1475e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.1780e-02 L7_l1linf:1.2146e-02 L8_l1linf:1.2146e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.4099e-02 L12_l1linf:1.2207e-02 L1_spectral:7.9734e-04 L2_spectral:8.0090e-04 L3_spectral:7.9244e-04 L4_spectral:7.9070e-04 L5_spectral:7.9549e-04 L6_spectral:7.9763e-04 L7_spectral:7.9574e-04 L8_spectral:7.9301e-04 L9_spectral:7.9804e-04 L10_spectral:7.9411e-04 L11_spectral:7.9026e-04 L12_spectral:7.2391e-04 train_time:252644ms step_avg:42.11ms +[2025-09-11 12:47:10] [Rank 0] step:6001/10000 train_time:254179ms step_avg:42.36ms +[2025-09-11 12:47:10] [Rank 0] step:6001/10000 train_time:254179ms step_avg:42.36ms +[2025-09-11 12:47:11] [Rank 0] step:6021/10000 train_time:255286ms step_avg:42.40ms +[2025-09-11 12:47:11] [Rank 0] step:6021/10000 train_time:255286ms step_avg:42.40ms +[2025-09-11 12:47:11] [Rank 0] step:6041/10000 train_time:255978ms step_avg:42.37ms +[2025-09-11 12:47:11] [Rank 0] step:6041/10000 train_time:255978ms step_avg:42.37ms +[2025-09-11 12:47:12] [Rank 0] step:6061/10000 train_time:256667ms step_avg:42.35ms +[2025-09-11 12:47:12] [Rank 0] step:6061/10000 train_time:256667ms step_avg:42.35ms +[2025-09-11 12:47:13] [Rank 0] step:6081/10000 train_time:257359ms step_avg:42.32ms +[2025-09-11 12:47:13] [Rank 0] step:6081/10000 train_time:257359ms step_avg:42.32ms +[2025-09-11 12:47:13] [Rank 0] step:6101/10000 train_time:258047ms step_avg:42.30ms +[2025-09-11 12:47:13] [Rank 0] step:6101/10000 train_time:258047ms step_avg:42.30ms +[2025-09-11 12:47:14] [Rank 0] step:6121/10000 train_time:258738ms step_avg:42.27ms +[2025-09-11 12:47:14] [Rank 0] step:6121/10000 train_time:258738ms step_avg:42.27ms +[2025-09-11 12:47:15] [Rank 0] step:6141/10000 train_time:259429ms step_avg:42.25ms +[2025-09-11 12:47:15] [Rank 0] step:6141/10000 train_time:259429ms step_avg:42.25ms +[2025-09-11 12:47:16] [Rank 0] step:6161/10000 train_time:260119ms step_avg:42.22ms +[2025-09-11 12:47:16] [Rank 0] step:6161/10000 train_time:260119ms step_avg:42.22ms +[2025-09-11 12:47:16] [Rank 0] step:6181/10000 train_time:260807ms step_avg:42.20ms +[2025-09-11 12:47:16] [Rank 0] step:6181/10000 train_time:260807ms step_avg:42.20ms +[2025-09-11 12:47:17] [Rank 0] step:6201/10000 train_time:261498ms step_avg:42.17ms +[2025-09-11 12:47:17] [Rank 0] step:6201/10000 train_time:261498ms step_avg:42.17ms +[2025-09-11 12:47:18] [Rank 0] step:6221/10000 train_time:262190ms step_avg:42.15ms +[2025-09-11 12:47:18] [Rank 0] step:6221/10000 train_time:262190ms step_avg:42.15ms +[2025-09-11 12:47:18] [Rank 0] step:6241/10000 train_time:262879ms step_avg:42.12ms +[2025-09-11 12:47:18] [Rank 0] step:6241/10000 train_time:262879ms step_avg:42.12ms +[2025-09-11 12:47:19] [Rank 0] step:6261/10000 train_time:263567ms step_avg:42.10ms +[2025-09-11 12:47:19] [Rank 0] step:6261/10000 train_time:263567ms step_avg:42.10ms +[2025-09-11 12:47:20] [Rank 0] step:6281/10000 train_time:264258ms step_avg:42.07ms +[2025-09-11 12:47:20] [Rank 0] step:6281/10000 train_time:264258ms step_avg:42.07ms +[2025-09-11 12:47:20] [Rank 0] step:6301/10000 train_time:264947ms step_avg:42.05ms +[2025-09-11 12:47:20] [Rank 0] step:6301/10000 train_time:264947ms step_avg:42.05ms +[2025-09-11 12:47:21] [Rank 0] step:6321/10000 train_time:265640ms step_avg:42.02ms +[2025-09-11 12:47:21] [Rank 0] step:6321/10000 train_time:265640ms step_avg:42.02ms +[2025-09-11 12:47:22] [Rank 0] step:6341/10000 train_time:266331ms step_avg:42.00ms +[2025-09-11 12:47:22] [Rank 0] step:6341/10000 train_time:266331ms step_avg:42.00ms +[2025-09-11 12:47:22] [Rank 0] step:6361/10000 train_time:267022ms step_avg:41.98ms +[2025-09-11 12:47:22] [Rank 0] step:6361/10000 train_time:267022ms step_avg:41.98ms +[2025-09-11 12:47:23] [Rank 0] step:6381/10000 train_time:267712ms step_avg:41.95ms +[2025-09-11 12:47:23] [Rank 0] step:6381/10000 train_time:267712ms step_avg:41.95ms +[2025-09-11 12:47:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:47:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:47:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:47:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:47:34] [Rank 0] PRINT: step:6400/10000 val_loss:5.7590 total_sharp:9.0966e-02 L1_sharp:9.0003e-02 L2_sharp:9.8794e-02 L3_sharp:1.2482e-01 L4_sharp:1.4640e-01 L5_sharp:2.2040e-01 L6_sharp:3.7811e-01 L7_sharp:5.7601e-01 L8_sharp:1.0635e+00 L9_sharp:2.2632e+00 L10_sharp:3.3861e+00 L11_sharp:3.3061e+00 L12_sharp:3.4910e+00 total_fnorm:1.2500e+00 total_l1_linf:8.5600e+02 total_spectral:6.2891e-01 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.1016e-02 L4_fnorm:4.1016e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0771e-02 L7_fnorm:4.0771e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.8818e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.1553e-03 L3_l1linf:9.2773e-03 L4_l1linf:9.5215e-03 L5_l1linf:9.8267e-03 L6_l1linf:9.9487e-03 L7_l1linf:1.0315e-02 L8_l1linf:1.0498e-02 L9_l1linf:1.1108e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1780e-02 L12_l1linf:1.0437e-02 L1_spectral:7.3203e-04 L2_spectral:7.2428e-04 L3_spectral:7.2389e-04 L4_spectral:7.2292e-04 L5_spectral:7.1975e-04 L6_spectral:7.2548e-04 L7_spectral:7.1953e-04 L8_spectral:7.1430e-04 L9_spectral:7.1793e-04 L10_spectral:7.0816e-04 L11_spectral:7.1436e-04 L12_spectral:6.3968e-04 train_time:268381ms step_avg:41.93ms +[2025-09-11 12:47:34] [Rank 0] PRINT: step:6400/10000 val_loss:5.7590 total_sharp:9.0966e-02 L1_sharp:9.0003e-02 L2_sharp:9.8794e-02 L3_sharp:1.2482e-01 L4_sharp:1.4640e-01 L5_sharp:2.2040e-01 L6_sharp:3.7811e-01 L7_sharp:5.7601e-01 L8_sharp:1.0635e+00 L9_sharp:2.2632e+00 L10_sharp:3.3861e+00 L11_sharp:3.3061e+00 L12_sharp:3.4910e+00 total_fnorm:1.2500e+00 total_l1_linf:8.5600e+02 total_spectral:6.2891e-01 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.1016e-02 L4_fnorm:4.1016e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0771e-02 L7_fnorm:4.0771e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.8818e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.1553e-03 L3_l1linf:9.2773e-03 L4_l1linf:9.5215e-03 L5_l1linf:9.8267e-03 L6_l1linf:9.9487e-03 L7_l1linf:1.0315e-02 L8_l1linf:1.0498e-02 L9_l1linf:1.1108e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1780e-02 L12_l1linf:1.0437e-02 L1_spectral:7.3203e-04 L2_spectral:7.2428e-04 L3_spectral:7.2389e-04 L4_spectral:7.2292e-04 L5_spectral:7.1975e-04 L6_spectral:7.2548e-04 L7_spectral:7.1953e-04 L8_spectral:7.1430e-04 L9_spectral:7.1793e-04 L10_spectral:7.0816e-04 L11_spectral:7.1436e-04 L12_spectral:6.3968e-04 train_time:268381ms step_avg:41.93ms +[2025-09-11 12:47:36] [Rank 0] step:6401/10000 train_time:269842ms step_avg:42.16ms +[2025-09-11 12:47:36] [Rank 0] step:6401/10000 train_time:269842ms step_avg:42.16ms +[2025-09-11 12:47:36] [Rank 0] step:6421/10000 train_time:270551ms step_avg:42.14ms +[2025-09-11 12:47:36] [Rank 0] step:6421/10000 train_time:270551ms step_avg:42.14ms +[2025-09-11 12:47:37] [Rank 0] step:6441/10000 train_time:271243ms step_avg:42.11ms +[2025-09-11 12:47:37] [Rank 0] step:6441/10000 train_time:271243ms step_avg:42.11ms +[2025-09-11 12:47:38] [Rank 0] step:6461/10000 train_time:271934ms step_avg:42.09ms +[2025-09-11 12:47:38] [Rank 0] step:6461/10000 train_time:271934ms step_avg:42.09ms +[2025-09-11 12:47:39] [Rank 0] step:6481/10000 train_time:272626ms step_avg:42.07ms +[2025-09-11 12:47:39] [Rank 0] step:6481/10000 train_time:272626ms step_avg:42.07ms +[2025-09-11 12:47:39] [Rank 0] step:6501/10000 train_time:273319ms step_avg:42.04ms +[2025-09-11 12:47:39] [Rank 0] step:6501/10000 train_time:273319ms step_avg:42.04ms +[2025-09-11 12:47:40] [Rank 0] step:6521/10000 train_time:274010ms step_avg:42.02ms +[2025-09-11 12:47:40] [Rank 0] step:6521/10000 train_time:274010ms step_avg:42.02ms +[2025-09-11 12:47:41] [Rank 0] step:6541/10000 train_time:274699ms step_avg:42.00ms +[2025-09-11 12:47:41] [Rank 0] step:6541/10000 train_time:274699ms step_avg:42.00ms +[2025-09-11 12:47:41] [Rank 0] step:6561/10000 train_time:275390ms step_avg:41.97ms +[2025-09-11 12:47:41] [Rank 0] step:6561/10000 train_time:275390ms step_avg:41.97ms +[2025-09-11 12:47:42] [Rank 0] step:6581/10000 train_time:276081ms step_avg:41.95ms +[2025-09-11 12:47:42] [Rank 0] step:6581/10000 train_time:276081ms step_avg:41.95ms +[2025-09-11 12:47:43] [Rank 0] step:6601/10000 train_time:276772ms step_avg:41.93ms +[2025-09-11 12:47:43] [Rank 0] step:6601/10000 train_time:276772ms step_avg:41.93ms +[2025-09-11 12:47:43] [Rank 0] step:6621/10000 train_time:277462ms step_avg:41.91ms +[2025-09-11 12:47:43] [Rank 0] step:6621/10000 train_time:277462ms step_avg:41.91ms +[2025-09-11 12:47:44] [Rank 0] step:6641/10000 train_time:278153ms step_avg:41.88ms +[2025-09-11 12:47:44] [Rank 0] step:6641/10000 train_time:278153ms step_avg:41.88ms +[2025-09-11 12:47:45] [Rank 0] step:6661/10000 train_time:278845ms step_avg:41.86ms +[2025-09-11 12:47:45] [Rank 0] step:6661/10000 train_time:278845ms step_avg:41.86ms +[2025-09-11 12:47:45] [Rank 0] step:6681/10000 train_time:279543ms step_avg:41.84ms +[2025-09-11 12:47:45] [Rank 0] step:6681/10000 train_time:279543ms step_avg:41.84ms +[2025-09-11 12:47:46] [Rank 0] step:6701/10000 train_time:280239ms step_avg:41.82ms +[2025-09-11 12:47:46] [Rank 0] step:6701/10000 train_time:280239ms step_avg:41.82ms +[2025-09-11 12:47:47] [Rank 0] step:6721/10000 train_time:280937ms step_avg:41.80ms +[2025-09-11 12:47:47] [Rank 0] step:6721/10000 train_time:280937ms step_avg:41.80ms +[2025-09-11 12:47:48] [Rank 0] step:6741/10000 train_time:281635ms step_avg:41.78ms +[2025-09-11 12:47:48] [Rank 0] step:6741/10000 train_time:281635ms step_avg:41.78ms +[2025-09-11 12:47:48] [Rank 0] step:6761/10000 train_time:282330ms step_avg:41.76ms +[2025-09-11 12:47:48] [Rank 0] step:6761/10000 train_time:282330ms step_avg:41.76ms +[2025-09-11 12:47:49] [Rank 0] step:6781/10000 train_time:283028ms step_avg:41.74ms +[2025-09-11 12:47:49] [Rank 0] step:6781/10000 train_time:283028ms step_avg:41.74ms +[2025-09-11 12:47:50] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:47:50] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:00] [Rank 0] PRINT: step:6800/10000 val_loss:5.7275 total_sharp:7.5882e-02 L1_sharp:9.8042e-02 L2_sharp:1.0281e-01 L3_sharp:1.3154e-01 L4_sharp:1.6276e-01 L5_sharp:2.4932e-01 L6_sharp:4.2831e-01 L7_sharp:6.6316e-01 L8_sharp:1.0863e+00 L9_sharp:2.0393e+00 L10_sharp:3.2303e+00 L11_sharp:3.3049e+00 L12_sharp:2.2125e+00 total_fnorm:1.1094e+00 total_l1_linf:7.1200e+02 total_spectral:5.5859e-01 L1_fnorm:3.4424e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4912e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.4424e-02 L9_fnorm:3.4912e-02 L10_fnorm:3.4668e-02 L11_fnorm:3.5156e-02 L12_fnorm:3.3691e-02 L1_l1linf:7.1106e-03 L2_l1linf:7.2937e-03 L3_l1linf:7.3853e-03 L4_l1linf:7.6294e-03 L5_l1linf:7.8735e-03 L6_l1linf:7.9346e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.4229e-03 L9_l1linf:8.6060e-03 L10_l1linf:8.9111e-03 L11_l1linf:9.2773e-03 L12_l1linf:8.7280e-03 L1_spectral:6.3887e-04 L2_spectral:6.4422e-04 L3_spectral:6.4194e-04 L4_spectral:6.4206e-04 L5_spectral:6.4402e-04 L6_spectral:6.3913e-04 L7_spectral:6.3966e-04 L8_spectral:6.3323e-04 L9_spectral:6.3811e-04 L10_spectral:6.2969e-04 L11_spectral:6.2431e-04 L12_spectral:5.7034e-04 train_time:283705ms step_avg:41.72ms +[2025-09-11 12:48:00] [Rank 0] PRINT: step:6800/10000 val_loss:5.7275 total_sharp:7.5882e-02 L1_sharp:9.8042e-02 L2_sharp:1.0281e-01 L3_sharp:1.3154e-01 L4_sharp:1.6276e-01 L5_sharp:2.4932e-01 L6_sharp:4.2831e-01 L7_sharp:6.6316e-01 L8_sharp:1.0863e+00 L9_sharp:2.0393e+00 L10_sharp:3.2303e+00 L11_sharp:3.3049e+00 L12_sharp:2.2125e+00 total_fnorm:1.1094e+00 total_l1_linf:7.1200e+02 total_spectral:5.5859e-01 L1_fnorm:3.4424e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4912e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.4424e-02 L9_fnorm:3.4912e-02 L10_fnorm:3.4668e-02 L11_fnorm:3.5156e-02 L12_fnorm:3.3691e-02 L1_l1linf:7.1106e-03 L2_l1linf:7.2937e-03 L3_l1linf:7.3853e-03 L4_l1linf:7.6294e-03 L5_l1linf:7.8735e-03 L6_l1linf:7.9346e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.4229e-03 L9_l1linf:8.6060e-03 L10_l1linf:8.9111e-03 L11_l1linf:9.2773e-03 L12_l1linf:8.7280e-03 L1_spectral:6.3887e-04 L2_spectral:6.4422e-04 L3_spectral:6.4194e-04 L4_spectral:6.4206e-04 L5_spectral:6.4402e-04 L6_spectral:6.3913e-04 L7_spectral:6.3966e-04 L8_spectral:6.3323e-04 L9_spectral:6.3811e-04 L10_spectral:6.2969e-04 L11_spectral:6.2431e-04 L12_spectral:5.7034e-04 train_time:283705ms step_avg:41.72ms +[2025-09-11 12:48:02] [Rank 0] step:6801/10000 train_time:285166ms step_avg:41.93ms +[2025-09-11 12:48:02] [Rank 0] step:6801/10000 train_time:285166ms step_avg:41.93ms +[2025-09-11 12:48:02] [Rank 0] step:6821/10000 train_time:285912ms step_avg:41.92ms +[2025-09-11 12:48:02] [Rank 0] step:6821/10000 train_time:285912ms step_avg:41.92ms +[2025-09-11 12:48:03] [Rank 0] step:6841/10000 train_time:286612ms step_avg:41.90ms +[2025-09-11 12:48:03] [Rank 0] step:6841/10000 train_time:286612ms step_avg:41.90ms +[2025-09-11 12:48:04] [Rank 0] step:6861/10000 train_time:287311ms step_avg:41.88ms +[2025-09-11 12:48:04] [Rank 0] step:6861/10000 train_time:287311ms step_avg:41.88ms +[2025-09-11 12:48:04] [Rank 0] step:6881/10000 train_time:288010ms step_avg:41.86ms +[2025-09-11 12:48:04] [Rank 0] step:6881/10000 train_time:288010ms step_avg:41.86ms +[2025-09-11 12:48:05] [Rank 0] step:6901/10000 train_time:288707ms step_avg:41.84ms +[2025-09-11 12:48:05] [Rank 0] step:6901/10000 train_time:288707ms step_avg:41.84ms +[2025-09-11 12:48:06] [Rank 0] step:6921/10000 train_time:289404ms step_avg:41.82ms +[2025-09-11 12:48:06] [Rank 0] step:6921/10000 train_time:289404ms step_avg:41.82ms +[2025-09-11 12:48:06] [Rank 0] step:6941/10000 train_time:290102ms step_avg:41.80ms +[2025-09-11 12:48:06] [Rank 0] step:6941/10000 train_time:290102ms step_avg:41.80ms +[2025-09-11 12:48:07] [Rank 0] step:6961/10000 train_time:290801ms step_avg:41.78ms +[2025-09-11 12:48:07] [Rank 0] step:6961/10000 train_time:290801ms step_avg:41.78ms +[2025-09-11 12:48:08] [Rank 0] step:6981/10000 train_time:291500ms step_avg:41.76ms +[2025-09-11 12:48:08] [Rank 0] step:6981/10000 train_time:291500ms step_avg:41.76ms +[2025-09-11 12:48:09] [Rank 0] step:7001/10000 train_time:292198ms step_avg:41.74ms +[2025-09-11 12:48:09] [Rank 0] step:7001/10000 train_time:292198ms step_avg:41.74ms +[2025-09-11 12:48:09] [Rank 0] step:7021/10000 train_time:292896ms step_avg:41.72ms +[2025-09-11 12:48:09] [Rank 0] step:7021/10000 train_time:292896ms step_avg:41.72ms +[2025-09-11 12:48:10] [Rank 0] step:7041/10000 train_time:293593ms step_avg:41.70ms +[2025-09-11 12:48:10] [Rank 0] step:7041/10000 train_time:293593ms step_avg:41.70ms +[2025-09-11 12:48:11] [Rank 0] step:7061/10000 train_time:294291ms step_avg:41.68ms +[2025-09-11 12:48:11] [Rank 0] step:7061/10000 train_time:294291ms step_avg:41.68ms +[2025-09-11 12:48:11] [Rank 0] step:7081/10000 train_time:294988ms step_avg:41.66ms +[2025-09-11 12:48:11] [Rank 0] step:7081/10000 train_time:294988ms step_avg:41.66ms +[2025-09-11 12:48:12] [Rank 0] step:7101/10000 train_time:295686ms step_avg:41.64ms +[2025-09-11 12:48:12] [Rank 0] step:7101/10000 train_time:295686ms step_avg:41.64ms +[2025-09-11 12:48:13] [Rank 0] step:7121/10000 train_time:296386ms step_avg:41.62ms +[2025-09-11 12:48:13] [Rank 0] step:7121/10000 train_time:296386ms step_avg:41.62ms +[2025-09-11 12:48:13] [Rank 0] step:7141/10000 train_time:297085ms step_avg:41.60ms +[2025-09-11 12:48:13] [Rank 0] step:7141/10000 train_time:297085ms step_avg:41.60ms +[2025-09-11 12:48:14] [Rank 0] step:7161/10000 train_time:297783ms step_avg:41.58ms +[2025-09-11 12:48:14] [Rank 0] step:7161/10000 train_time:297783ms step_avg:41.58ms +[2025-09-11 12:48:15] [Rank 0] step:7181/10000 train_time:298480ms step_avg:41.57ms +[2025-09-11 12:48:15] [Rank 0] step:7181/10000 train_time:298480ms step_avg:41.57ms +[2025-09-11 12:48:16] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:48:16] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:48:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:48:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:26] [Rank 0] PRINT: step:7200/10000 val_loss:5.7029 total_sharp:7.5119e-02 L1_sharp:7.7849e-02 L2_sharp:8.6863e-02 L3_sharp:1.1309e-01 L4_sharp:1.2741e-01 L5_sharp:1.6294e-01 L6_sharp:2.6112e-01 L7_sharp:4.0019e-01 L8_sharp:8.6131e-01 L9_sharp:1.9507e+00 L10_sharp:3.0224e+00 L11_sharp:3.3790e+00 L12_sharp:2.5813e+00 total_fnorm:8.9453e-01 total_l1_linf:5.2800e+02 total_spectral:4.4922e-01 L1_fnorm:2.8931e-02 L2_fnorm:2.9175e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9541e-02 L5_fnorm:2.9541e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9297e-02 L8_fnorm:2.9175e-02 L9_fnorm:2.9419e-02 L10_fnorm:2.9419e-02 L11_fnorm:2.9663e-02 L12_fnorm:2.7588e-02 L1_l1linf:5.7373e-03 L2_l1linf:5.7068e-03 L3_l1linf:5.7983e-03 L4_l1linf:6.1951e-03 L5_l1linf:6.1340e-03 L6_l1linf:6.3171e-03 L7_l1linf:6.5613e-03 L8_l1linf:6.6528e-03 L9_l1linf:7.0801e-03 L10_l1linf:7.3242e-03 L11_l1linf:7.7820e-03 L12_l1linf:6.9580e-03 L1_spectral:5.5744e-04 L2_spectral:5.6476e-04 L3_spectral:5.6942e-04 L4_spectral:5.6579e-04 L5_spectral:5.6777e-04 L6_spectral:5.6344e-04 L7_spectral:5.5908e-04 L8_spectral:5.5919e-04 L9_spectral:5.5226e-04 L10_spectral:5.4614e-04 L11_spectral:5.4749e-04 L12_spectral:4.8099e-04 train_time:299159ms step_avg:41.55ms +[2025-09-11 12:48:26] [Rank 0] PRINT: step:7200/10000 val_loss:5.7029 total_sharp:7.5119e-02 L1_sharp:7.7849e-02 L2_sharp:8.6863e-02 L3_sharp:1.1309e-01 L4_sharp:1.2741e-01 L5_sharp:1.6294e-01 L6_sharp:2.6112e-01 L7_sharp:4.0019e-01 L8_sharp:8.6131e-01 L9_sharp:1.9507e+00 L10_sharp:3.0224e+00 L11_sharp:3.3790e+00 L12_sharp:2.5813e+00 total_fnorm:8.9453e-01 total_l1_linf:5.2800e+02 total_spectral:4.4922e-01 L1_fnorm:2.8931e-02 L2_fnorm:2.9175e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9541e-02 L5_fnorm:2.9541e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9297e-02 L8_fnorm:2.9175e-02 L9_fnorm:2.9419e-02 L10_fnorm:2.9419e-02 L11_fnorm:2.9663e-02 L12_fnorm:2.7588e-02 L1_l1linf:5.7373e-03 L2_l1linf:5.7068e-03 L3_l1linf:5.7983e-03 L4_l1linf:6.1951e-03 L5_l1linf:6.1340e-03 L6_l1linf:6.3171e-03 L7_l1linf:6.5613e-03 L8_l1linf:6.6528e-03 L9_l1linf:7.0801e-03 L10_l1linf:7.3242e-03 L11_l1linf:7.7820e-03 L12_l1linf:6.9580e-03 L1_spectral:5.5744e-04 L2_spectral:5.6476e-04 L3_spectral:5.6942e-04 L4_spectral:5.6579e-04 L5_spectral:5.6777e-04 L6_spectral:5.6344e-04 L7_spectral:5.5908e-04 L8_spectral:5.5919e-04 L9_spectral:5.5226e-04 L10_spectral:5.4614e-04 L11_spectral:5.4749e-04 L12_spectral:4.8099e-04 train_time:299159ms step_avg:41.55ms +[2025-09-11 12:48:27] [Rank 0] step:7201/10000 train_time:300596ms step_avg:41.74ms +[2025-09-11 12:48:27] [Rank 0] step:7201/10000 train_time:300596ms step_avg:41.74ms +[2025-09-11 12:48:28] [Rank 0] step:7221/10000 train_time:301338ms step_avg:41.73ms +[2025-09-11 12:48:28] [Rank 0] step:7221/10000 train_time:301338ms step_avg:41.73ms +[2025-09-11 12:48:29] [Rank 0] step:7241/10000 train_time:302036ms step_avg:41.71ms +[2025-09-11 12:48:29] [Rank 0] step:7241/10000 train_time:302036ms step_avg:41.71ms +[2025-09-11 12:48:30] [Rank 0] step:7261/10000 train_time:302737ms step_avg:41.69ms +[2025-09-11 12:48:30] [Rank 0] step:7261/10000 train_time:302737ms step_avg:41.69ms +[2025-09-11 12:48:30] [Rank 0] step:7281/10000 train_time:303441ms step_avg:41.68ms +[2025-09-11 12:48:30] [Rank 0] step:7281/10000 train_time:303441ms step_avg:41.68ms +[2025-09-11 12:48:31] [Rank 0] step:7301/10000 train_time:304139ms step_avg:41.66ms +[2025-09-11 12:48:31] [Rank 0] step:7301/10000 train_time:304139ms step_avg:41.66ms +[2025-09-11 12:48:32] [Rank 0] step:7321/10000 train_time:304840ms step_avg:41.64ms +[2025-09-11 12:48:32] [Rank 0] step:7321/10000 train_time:304840ms step_avg:41.64ms +[2025-09-11 12:48:32] [Rank 0] step:7341/10000 train_time:305540ms step_avg:41.62ms +[2025-09-11 12:48:32] [Rank 0] step:7341/10000 train_time:305540ms step_avg:41.62ms +[2025-09-11 12:48:33] [Rank 0] step:7361/10000 train_time:306238ms step_avg:41.60ms +[2025-09-11 12:48:33] [Rank 0] step:7361/10000 train_time:306238ms step_avg:41.60ms +[2025-09-11 12:48:34] [Rank 0] step:7381/10000 train_time:306938ms step_avg:41.58ms +[2025-09-11 12:48:34] [Rank 0] step:7381/10000 train_time:306938ms step_avg:41.58ms +[2025-09-11 12:48:35] [Rank 0] step:7401/10000 train_time:307636ms step_avg:41.57ms +[2025-09-11 12:48:35] [Rank 0] step:7401/10000 train_time:307636ms step_avg:41.57ms +[2025-09-11 12:48:35] [Rank 0] step:7421/10000 train_time:308334ms step_avg:41.55ms +[2025-09-11 12:48:35] [Rank 0] step:7421/10000 train_time:308334ms step_avg:41.55ms +[2025-09-11 12:48:36] [Rank 0] step:7441/10000 train_time:309034ms step_avg:41.53ms +[2025-09-11 12:48:36] [Rank 0] step:7441/10000 train_time:309034ms step_avg:41.53ms +[2025-09-11 12:48:37] [Rank 0] step:7461/10000 train_time:309733ms step_avg:41.51ms +[2025-09-11 12:48:37] [Rank 0] step:7461/10000 train_time:309733ms step_avg:41.51ms +[2025-09-11 12:48:37] [Rank 0] step:7481/10000 train_time:310434ms step_avg:41.50ms +[2025-09-11 12:48:37] [Rank 0] step:7481/10000 train_time:310434ms step_avg:41.50ms +[2025-09-11 12:48:38] [Rank 0] step:7501/10000 train_time:311133ms step_avg:41.48ms +[2025-09-11 12:48:38] [Rank 0] step:7501/10000 train_time:311133ms step_avg:41.48ms +[2025-09-11 12:48:39] [Rank 0] step:7521/10000 train_time:311833ms step_avg:41.46ms +[2025-09-11 12:48:39] [Rank 0] step:7521/10000 train_time:311833ms step_avg:41.46ms +[2025-09-11 12:48:39] [Rank 0] step:7541/10000 train_time:312531ms step_avg:41.44ms +[2025-09-11 12:48:39] [Rank 0] step:7541/10000 train_time:312531ms step_avg:41.44ms +[2025-09-11 12:48:40] [Rank 0] step:7561/10000 train_time:313232ms step_avg:41.43ms +[2025-09-11 12:48:40] [Rank 0] step:7561/10000 train_time:313232ms step_avg:41.43ms +[2025-09-11 12:48:41] [Rank 0] step:7581/10000 train_time:313934ms step_avg:41.41ms +[2025-09-11 12:48:41] [Rank 0] step:7581/10000 train_time:313934ms step_avg:41.41ms +[2025-09-11 12:48:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:48:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:48:52] [Rank 0] PRINT: step:7600/10000 val_loss:5.6878 total_sharp:8.6545e-02 L1_sharp:9.2582e-02 L2_sharp:9.2728e-02 L3_sharp:1.3069e-01 L4_sharp:1.5936e-01 L5_sharp:2.3710e-01 L6_sharp:3.8632e-01 L7_sharp:4.6357e-01 L8_sharp:7.4158e-01 L9_sharp:1.3710e+00 L10_sharp:2.5487e+00 L11_sharp:3.5206e+00 L12_sharp:4.7747e+00 total_fnorm:6.6797e-01 total_l1_linf:3.7200e+02 total_spectral:3.3594e-01 L1_fnorm:2.3560e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4170e-02 L7_fnorm:2.4048e-02 L8_fnorm:2.4048e-02 L9_fnorm:2.4292e-02 L10_fnorm:2.4048e-02 L11_fnorm:2.4292e-02 L12_fnorm:2.2583e-02 L1_l1linf:4.3335e-03 L2_l1linf:4.3335e-03 L3_l1linf:4.5166e-03 L4_l1linf:4.6387e-03 L5_l1linf:4.8218e-03 L6_l1linf:4.9133e-03 L7_l1linf:5.0049e-03 L8_l1linf:5.2490e-03 L9_l1linf:5.4016e-03 L10_l1linf:5.6152e-03 L11_l1linf:6.1951e-03 L12_l1linf:5.4016e-03 L1_spectral:4.8201e-04 L2_spectral:4.8033e-04 L3_spectral:4.8575e-04 L4_spectral:4.8872e-04 L5_spectral:4.8290e-04 L6_spectral:4.8170e-04 L7_spectral:4.7937e-04 L8_spectral:4.7698e-04 L9_spectral:4.7102e-04 L10_spectral:4.6736e-04 L11_spectral:4.5803e-04 L12_spectral:4.0912e-04 train_time:314614ms step_avg:41.40ms +[2025-09-11 12:48:52] [Rank 0] PRINT: step:7600/10000 val_loss:5.6878 total_sharp:8.6545e-02 L1_sharp:9.2582e-02 L2_sharp:9.2728e-02 L3_sharp:1.3069e-01 L4_sharp:1.5936e-01 L5_sharp:2.3710e-01 L6_sharp:3.8632e-01 L7_sharp:4.6357e-01 L8_sharp:7.4158e-01 L9_sharp:1.3710e+00 L10_sharp:2.5487e+00 L11_sharp:3.5206e+00 L12_sharp:4.7747e+00 total_fnorm:6.6797e-01 total_l1_linf:3.7200e+02 total_spectral:3.3594e-01 L1_fnorm:2.3560e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4170e-02 L7_fnorm:2.4048e-02 L8_fnorm:2.4048e-02 L9_fnorm:2.4292e-02 L10_fnorm:2.4048e-02 L11_fnorm:2.4292e-02 L12_fnorm:2.2583e-02 L1_l1linf:4.3335e-03 L2_l1linf:4.3335e-03 L3_l1linf:4.5166e-03 L4_l1linf:4.6387e-03 L5_l1linf:4.8218e-03 L6_l1linf:4.9133e-03 L7_l1linf:5.0049e-03 L8_l1linf:5.2490e-03 L9_l1linf:5.4016e-03 L10_l1linf:5.6152e-03 L11_l1linf:6.1951e-03 L12_l1linf:5.4016e-03 L1_spectral:4.8201e-04 L2_spectral:4.8033e-04 L3_spectral:4.8575e-04 L4_spectral:4.8872e-04 L5_spectral:4.8290e-04 L6_spectral:4.8170e-04 L7_spectral:4.7937e-04 L8_spectral:4.7698e-04 L9_spectral:4.7102e-04 L10_spectral:4.6736e-04 L11_spectral:4.5803e-04 L12_spectral:4.0912e-04 train_time:314614ms step_avg:41.40ms +[2025-09-11 12:48:53] [Rank 0] step:7601/10000 train_time:316084ms step_avg:41.58ms +[2025-09-11 12:48:53] [Rank 0] step:7601/10000 train_time:316084ms step_avg:41.58ms +[2025-09-11 12:48:54] [Rank 0] step:7621/10000 train_time:316810ms step_avg:41.57ms +[2025-09-11 12:48:54] [Rank 0] step:7621/10000 train_time:316810ms step_avg:41.57ms +[2025-09-11 12:48:55] [Rank 0] step:7641/10000 train_time:317512ms step_avg:41.55ms +[2025-09-11 12:48:55] [Rank 0] step:7641/10000 train_time:317512ms step_avg:41.55ms +[2025-09-11 12:48:55] [Rank 0] step:7661/10000 train_time:318210ms step_avg:41.54ms +[2025-09-11 12:48:55] [Rank 0] step:7661/10000 train_time:318210ms step_avg:41.54ms +[2025-09-11 12:48:56] [Rank 0] step:7681/10000 train_time:318911ms step_avg:41.52ms +[2025-09-11 12:48:56] [Rank 0] step:7681/10000 train_time:318911ms step_avg:41.52ms +[2025-09-11 12:48:57] [Rank 0] step:7701/10000 train_time:319612ms step_avg:41.50ms +[2025-09-11 12:48:57] [Rank 0] step:7701/10000 train_time:319612ms step_avg:41.50ms +[2025-09-11 12:48:58] [Rank 0] step:7721/10000 train_time:320313ms step_avg:41.49ms +[2025-09-11 12:48:58] [Rank 0] step:7721/10000 train_time:320313ms step_avg:41.49ms +[2025-09-11 12:48:58] [Rank 0] step:7741/10000 train_time:321014ms step_avg:41.47ms +[2025-09-11 12:48:58] [Rank 0] step:7741/10000 train_time:321014ms step_avg:41.47ms +[2025-09-11 12:48:59] [Rank 0] step:7761/10000 train_time:321713ms step_avg:41.45ms +[2025-09-11 12:48:59] [Rank 0] step:7761/10000 train_time:321713ms step_avg:41.45ms +[2025-09-11 12:49:00] [Rank 0] step:7781/10000 train_time:322415ms step_avg:41.44ms +[2025-09-11 12:49:00] [Rank 0] step:7781/10000 train_time:322415ms step_avg:41.44ms +[2025-09-11 12:49:00] [Rank 0] step:7801/10000 train_time:323116ms step_avg:41.42ms +[2025-09-11 12:49:00] [Rank 0] step:7801/10000 train_time:323116ms step_avg:41.42ms +[2025-09-11 12:49:01] [Rank 0] step:7821/10000 train_time:323816ms step_avg:41.40ms +[2025-09-11 12:49:01] [Rank 0] step:7821/10000 train_time:323816ms step_avg:41.40ms +[2025-09-11 12:49:02] [Rank 0] step:7841/10000 train_time:324816ms step_avg:41.43ms +[2025-09-11 12:49:02] [Rank 0] step:7841/10000 train_time:324816ms step_avg:41.43ms +[2025-09-11 12:49:03] [Rank 0] step:7861/10000 train_time:325519ms step_avg:41.41ms +[2025-09-11 12:49:03] [Rank 0] step:7861/10000 train_time:325519ms step_avg:41.41ms +[2025-09-11 12:49:03] [Rank 0] step:7881/10000 train_time:326220ms step_avg:41.39ms +[2025-09-11 12:49:03] [Rank 0] step:7881/10000 train_time:326220ms step_avg:41.39ms +[2025-09-11 12:49:04] [Rank 0] step:7901/10000 train_time:327215ms step_avg:41.41ms +[2025-09-11 12:49:04] [Rank 0] step:7901/10000 train_time:327215ms step_avg:41.41ms +[2025-09-11 12:49:05] [Rank 0] step:7921/10000 train_time:327915ms step_avg:41.40ms +[2025-09-11 12:49:05] [Rank 0] step:7921/10000 train_time:327915ms step_avg:41.40ms +[2025-09-11 12:49:06] [Rank 0] step:7941/10000 train_time:328617ms step_avg:41.38ms +[2025-09-11 12:49:06] [Rank 0] step:7941/10000 train_time:328617ms step_avg:41.38ms +[2025-09-11 12:49:07] [Rank 0] step:7961/10000 train_time:329316ms step_avg:41.37ms +[2025-09-11 12:49:07] [Rank 0] step:7961/10000 train_time:329316ms step_avg:41.37ms +[2025-09-11 12:49:07] [Rank 0] step:7981/10000 train_time:330019ms step_avg:41.35ms +[2025-09-11 12:49:07] [Rank 0] step:7981/10000 train_time:330019ms step_avg:41.35ms +[2025-09-11 12:49:08] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:49:08] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:49:19] [Rank 0] PRINT: step:8000/10000 val_loss:5.6804 total_sharp:8.6447e-02 L1_sharp:7.0684e-02 L2_sharp:7.8623e-02 L3_sharp:1.0827e-01 L4_sharp:1.4776e-01 L5_sharp:1.9923e-01 L6_sharp:3.0426e-01 L7_sharp:5.1226e-01 L8_sharp:9.2503e-01 L9_sharp:1.7618e+00 L10_sharp:3.2434e+00 L11_sharp:3.9116e+00 L12_sharp:2.2770e+00 total_fnorm:5.4297e-01 total_l1_linf:2.8400e+02 total_spectral:2.7148e-01 L1_fnorm:1.8921e-02 L2_fnorm:1.9043e-02 L3_fnorm:1.9165e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.9165e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.9287e-02 L11_fnorm:1.9409e-02 L12_fnorm:1.7700e-02 L1_l1linf:3.1128e-03 L2_l1linf:3.1281e-03 L3_l1linf:3.3112e-03 L4_l1linf:3.4790e-03 L5_l1linf:3.5400e-03 L6_l1linf:3.7994e-03 L7_l1linf:3.7231e-03 L8_l1linf:3.9978e-03 L9_l1linf:4.2725e-03 L10_l1linf:4.3640e-03 L11_l1linf:4.6997e-03 L12_l1linf:3.9673e-03 L1_spectral:3.9852e-04 L2_spectral:4.0234e-04 L3_spectral:3.9977e-04 L4_spectral:4.0264e-04 L5_spectral:4.0236e-04 L6_spectral:3.9298e-04 L7_spectral:3.9141e-04 L8_spectral:3.9193e-04 L9_spectral:3.8525e-04 L10_spectral:3.7832e-04 L11_spectral:3.7137e-04 L12_spectral:3.3020e-04 train_time:330697ms step_avg:41.34ms +[2025-09-11 12:49:19] [Rank 0] PRINT: step:8000/10000 val_loss:5.6804 total_sharp:8.6447e-02 L1_sharp:7.0684e-02 L2_sharp:7.8623e-02 L3_sharp:1.0827e-01 L4_sharp:1.4776e-01 L5_sharp:1.9923e-01 L6_sharp:3.0426e-01 L7_sharp:5.1226e-01 L8_sharp:9.2503e-01 L9_sharp:1.7618e+00 L10_sharp:3.2434e+00 L11_sharp:3.9116e+00 L12_sharp:2.2770e+00 total_fnorm:5.4297e-01 total_l1_linf:2.8400e+02 total_spectral:2.7148e-01 L1_fnorm:1.8921e-02 L2_fnorm:1.9043e-02 L3_fnorm:1.9165e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.9165e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.9287e-02 L11_fnorm:1.9409e-02 L12_fnorm:1.7700e-02 L1_l1linf:3.1128e-03 L2_l1linf:3.1281e-03 L3_l1linf:3.3112e-03 L4_l1linf:3.4790e-03 L5_l1linf:3.5400e-03 L6_l1linf:3.7994e-03 L7_l1linf:3.7231e-03 L8_l1linf:3.9978e-03 L9_l1linf:4.2725e-03 L10_l1linf:4.3640e-03 L11_l1linf:4.6997e-03 L12_l1linf:3.9673e-03 L1_spectral:3.9852e-04 L2_spectral:4.0234e-04 L3_spectral:3.9977e-04 L4_spectral:4.0264e-04 L5_spectral:4.0236e-04 L6_spectral:3.9298e-04 L7_spectral:3.9141e-04 L8_spectral:3.9193e-04 L9_spectral:3.8525e-04 L10_spectral:3.7832e-04 L11_spectral:3.7137e-04 L12_spectral:3.3020e-04 train_time:330697ms step_avg:41.34ms +[2025-09-11 12:49:20] [Rank 0] step:8001/10000 train_time:332153ms step_avg:41.51ms +[2025-09-11 12:49:20] [Rank 0] step:8001/10000 train_time:332153ms step_avg:41.51ms +[2025-09-11 12:49:21] [Rank 0] step:8021/10000 train_time:332885ms step_avg:41.50ms +[2025-09-11 12:49:21] [Rank 0] step:8021/10000 train_time:332885ms step_avg:41.50ms +[2025-09-11 12:49:22] [Rank 0] step:8041/10000 train_time:333587ms step_avg:41.49ms +[2025-09-11 12:49:22] [Rank 0] step:8041/10000 train_time:333587ms step_avg:41.49ms +[2025-09-11 12:49:22] [Rank 0] step:8061/10000 train_time:334290ms step_avg:41.47ms +[2025-09-11 12:49:22] [Rank 0] step:8061/10000 train_time:334290ms step_avg:41.47ms +[2025-09-11 12:49:23] [Rank 0] step:8081/10000 train_time:334989ms step_avg:41.45ms +[2025-09-11 12:49:23] [Rank 0] step:8081/10000 train_time:334989ms step_avg:41.45ms +[2025-09-11 12:49:24] [Rank 0] step:8101/10000 train_time:335689ms step_avg:41.44ms +[2025-09-11 12:49:24] [Rank 0] step:8101/10000 train_time:335689ms step_avg:41.44ms +[2025-09-11 12:49:24] [Rank 0] step:8121/10000 train_time:336395ms step_avg:41.42ms +[2025-09-11 12:49:24] [Rank 0] step:8121/10000 train_time:336395ms step_avg:41.42ms +[2025-09-11 12:49:26] [Rank 0] step:8141/10000 train_time:337871ms step_avg:41.50ms +[2025-09-11 12:49:26] [Rank 0] step:8141/10000 train_time:337871ms step_avg:41.50ms +[2025-09-11 12:49:27] [Rank 0] step:8161/10000 train_time:338576ms step_avg:41.49ms +[2025-09-11 12:49:27] [Rank 0] step:8161/10000 train_time:338576ms step_avg:41.49ms +[2025-09-11 12:49:27] [Rank 0] step:8181/10000 train_time:339288ms step_avg:41.47ms +[2025-09-11 12:49:27] [Rank 0] step:8181/10000 train_time:339288ms step_avg:41.47ms +[2025-09-11 12:49:28] [Rank 0] step:8201/10000 train_time:339997ms step_avg:41.46ms +[2025-09-11 12:49:28] [Rank 0] step:8201/10000 train_time:339997ms step_avg:41.46ms +[2025-09-11 12:49:29] [Rank 0] step:8221/10000 train_time:340704ms step_avg:41.44ms +[2025-09-11 12:49:29] [Rank 0] step:8221/10000 train_time:340704ms step_avg:41.44ms +[2025-09-11 12:49:29] [Rank 0] step:8241/10000 train_time:341419ms step_avg:41.43ms +[2025-09-11 12:49:29] [Rank 0] step:8241/10000 train_time:341419ms step_avg:41.43ms +[2025-09-11 12:49:30] [Rank 0] step:8261/10000 train_time:342126ms step_avg:41.41ms +[2025-09-11 12:49:30] [Rank 0] step:8261/10000 train_time:342126ms step_avg:41.41ms +[2025-09-11 12:49:31] [Rank 0] step:8281/10000 train_time:342829ms step_avg:41.40ms +[2025-09-11 12:49:31] [Rank 0] step:8281/10000 train_time:342829ms step_avg:41.40ms +[2025-09-11 12:49:32] [Rank 0] step:8301/10000 train_time:343536ms step_avg:41.38ms +[2025-09-11 12:49:32] [Rank 0] step:8301/10000 train_time:343536ms step_avg:41.38ms +[2025-09-11 12:49:32] [Rank 0] step:8321/10000 train_time:344244ms step_avg:41.37ms +[2025-09-11 12:49:32] [Rank 0] step:8321/10000 train_time:344244ms step_avg:41.37ms +[2025-09-11 12:49:33] [Rank 0] step:8341/10000 train_time:344957ms step_avg:41.36ms +[2025-09-11 12:49:33] [Rank 0] step:8341/10000 train_time:344957ms step_avg:41.36ms +[2025-09-11 12:49:34] [Rank 0] step:8361/10000 train_time:345660ms step_avg:41.34ms +[2025-09-11 12:49:34] [Rank 0] step:8361/10000 train_time:345660ms step_avg:41.34ms +[2025-09-11 12:49:34] [Rank 0] step:8381/10000 train_time:346370ms step_avg:41.33ms +[2025-09-11 12:49:34] [Rank 0] step:8381/10000 train_time:346370ms step_avg:41.33ms +[2025-09-11 12:49:35] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:49:35] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:49:46] [Rank 0] PRINT: step:8400/10000 val_loss:5.6701 total_sharp:7.1252e-02 L1_sharp:5.6197e-02 L2_sharp:7.0481e-02 L3_sharp:8.7895e-02 L4_sharp:1.0949e-01 L5_sharp:1.6355e-01 L6_sharp:2.9339e-01 L7_sharp:4.1715e-01 L8_sharp:7.5537e-01 L9_sharp:1.4443e+00 L10_sharp:2.1932e+00 L11_sharp:2.5816e+00 L12_sharp:2.2101e+00 total_fnorm:4.0234e-01 total_l1_linf:1.8800e+02 total_spectral:2.0312e-01 L1_fnorm:1.4465e-02 L2_fnorm:1.4587e-02 L3_fnorm:1.4771e-02 L4_fnorm:1.4832e-02 L5_fnorm:1.4893e-02 L6_fnorm:1.4771e-02 L7_fnorm:1.4771e-02 L8_fnorm:1.4771e-02 L9_fnorm:1.4893e-02 L10_fnorm:1.4832e-02 L11_fnorm:1.4893e-02 L12_fnorm:1.3672e-02 L1_l1linf:2.1667e-03 L2_l1linf:2.3193e-03 L3_l1linf:2.3804e-03 L4_l1linf:2.4567e-03 L5_l1linf:2.5024e-03 L6_l1linf:2.6398e-03 L7_l1linf:2.7008e-03 L8_l1linf:2.7924e-03 L9_l1linf:3.0060e-03 L10_l1linf:3.0518e-03 L11_l1linf:3.2959e-03 L12_l1linf:2.8229e-03 L1_spectral:3.1490e-04 L2_spectral:3.1545e-04 L3_spectral:3.1971e-04 L4_spectral:3.1615e-04 L5_spectral:3.1976e-04 L6_spectral:3.1278e-04 L7_spectral:3.1384e-04 L8_spectral:3.0729e-04 L9_spectral:3.0670e-04 L10_spectral:3.0122e-04 L11_spectral:2.9630e-04 L12_spectral:2.5931e-04 train_time:347060ms step_avg:41.32ms +[2025-09-11 12:49:46] [Rank 0] PRINT: step:8400/10000 val_loss:5.6701 total_sharp:7.1252e-02 L1_sharp:5.6197e-02 L2_sharp:7.0481e-02 L3_sharp:8.7895e-02 L4_sharp:1.0949e-01 L5_sharp:1.6355e-01 L6_sharp:2.9339e-01 L7_sharp:4.1715e-01 L8_sharp:7.5537e-01 L9_sharp:1.4443e+00 L10_sharp:2.1932e+00 L11_sharp:2.5816e+00 L12_sharp:2.2101e+00 total_fnorm:4.0234e-01 total_l1_linf:1.8800e+02 total_spectral:2.0312e-01 L1_fnorm:1.4465e-02 L2_fnorm:1.4587e-02 L3_fnorm:1.4771e-02 L4_fnorm:1.4832e-02 L5_fnorm:1.4893e-02 L6_fnorm:1.4771e-02 L7_fnorm:1.4771e-02 L8_fnorm:1.4771e-02 L9_fnorm:1.4893e-02 L10_fnorm:1.4832e-02 L11_fnorm:1.4893e-02 L12_fnorm:1.3672e-02 L1_l1linf:2.1667e-03 L2_l1linf:2.3193e-03 L3_l1linf:2.3804e-03 L4_l1linf:2.4567e-03 L5_l1linf:2.5024e-03 L6_l1linf:2.6398e-03 L7_l1linf:2.7008e-03 L8_l1linf:2.7924e-03 L9_l1linf:3.0060e-03 L10_l1linf:3.0518e-03 L11_l1linf:3.2959e-03 L12_l1linf:2.8229e-03 L1_spectral:3.1490e-04 L2_spectral:3.1545e-04 L3_spectral:3.1971e-04 L4_spectral:3.1615e-04 L5_spectral:3.1976e-04 L6_spectral:3.1278e-04 L7_spectral:3.1384e-04 L8_spectral:3.0729e-04 L9_spectral:3.0670e-04 L10_spectral:3.0122e-04 L11_spectral:2.9630e-04 L12_spectral:2.5931e-04 train_time:347060ms step_avg:41.32ms +[2025-09-11 12:49:47] [Rank 0] step:8401/10000 train_time:348537ms step_avg:41.49ms +[2025-09-11 12:49:47] [Rank 0] step:8401/10000 train_time:348537ms step_avg:41.49ms +[2025-09-11 12:49:48] [Rank 0] step:8421/10000 train_time:349267ms step_avg:41.48ms +[2025-09-11 12:49:48] [Rank 0] step:8421/10000 train_time:349267ms step_avg:41.48ms +[2025-09-11 12:49:49] [Rank 0] step:8441/10000 train_time:349975ms step_avg:41.46ms +[2025-09-11 12:49:49] [Rank 0] step:8441/10000 train_time:349975ms step_avg:41.46ms +[2025-09-11 12:49:49] [Rank 0] step:8461/10000 train_time:350684ms step_avg:41.45ms +[2025-09-11 12:49:49] [Rank 0] step:8461/10000 train_time:350684ms step_avg:41.45ms +[2025-09-11 12:49:50] [Rank 0] step:8481/10000 train_time:351400ms step_avg:41.43ms +[2025-09-11 12:49:50] [Rank 0] step:8481/10000 train_time:351400ms step_avg:41.43ms +[2025-09-11 12:49:51] [Rank 0] step:8501/10000 train_time:352107ms step_avg:41.42ms +[2025-09-11 12:49:51] [Rank 0] step:8501/10000 train_time:352107ms step_avg:41.42ms +[2025-09-11 12:49:51] [Rank 0] step:8521/10000 train_time:352813ms step_avg:41.41ms +[2025-09-11 12:49:51] [Rank 0] step:8521/10000 train_time:352813ms step_avg:41.41ms +[2025-09-11 12:49:52] [Rank 0] step:8541/10000 train_time:353520ms step_avg:41.39ms +[2025-09-11 12:49:52] [Rank 0] step:8541/10000 train_time:353520ms step_avg:41.39ms +[2025-09-11 12:49:53] [Rank 0] step:8561/10000 train_time:354232ms step_avg:41.38ms +[2025-09-11 12:49:53] [Rank 0] step:8561/10000 train_time:354232ms step_avg:41.38ms +[2025-09-11 12:49:53] [Rank 0] step:8581/10000 train_time:354943ms step_avg:41.36ms +[2025-09-11 12:49:53] [Rank 0] step:8581/10000 train_time:354943ms step_avg:41.36ms +[2025-09-11 12:49:54] [Rank 0] step:8601/10000 train_time:355651ms step_avg:41.35ms +[2025-09-11 12:49:54] [Rank 0] step:8601/10000 train_time:355651ms step_avg:41.35ms +[2025-09-11 12:49:55] [Rank 0] step:8621/10000 train_time:356358ms step_avg:41.34ms +[2025-09-11 12:49:55] [Rank 0] step:8621/10000 train_time:356358ms step_avg:41.34ms +[2025-09-11 12:49:56] [Rank 0] step:8641/10000 train_time:357064ms step_avg:41.32ms +[2025-09-11 12:49:56] [Rank 0] step:8641/10000 train_time:357064ms step_avg:41.32ms +[2025-09-11 12:49:56] [Rank 0] step:8661/10000 train_time:357772ms step_avg:41.31ms +[2025-09-11 12:49:56] [Rank 0] step:8661/10000 train_time:357772ms step_avg:41.31ms +[2025-09-11 12:49:57] [Rank 0] step:8681/10000 train_time:358481ms step_avg:41.29ms +[2025-09-11 12:49:57] [Rank 0] step:8681/10000 train_time:358481ms step_avg:41.29ms +[2025-09-11 12:49:58] [Rank 0] step:8701/10000 train_time:359187ms step_avg:41.28ms +[2025-09-11 12:49:58] [Rank 0] step:8701/10000 train_time:359187ms step_avg:41.28ms +[2025-09-11 12:49:58] [Rank 0] step:8721/10000 train_time:359895ms step_avg:41.27ms +[2025-09-11 12:49:58] [Rank 0] step:8721/10000 train_time:359895ms step_avg:41.27ms +[2025-09-11 12:49:59] [Rank 0] step:8741/10000 train_time:360599ms step_avg:41.25ms +[2025-09-11 12:49:59] [Rank 0] step:8741/10000 train_time:360599ms step_avg:41.25ms +[2025-09-11 12:50:00] [Rank 0] step:8761/10000 train_time:361309ms step_avg:41.24ms +[2025-09-11 12:50:00] [Rank 0] step:8761/10000 train_time:361309ms step_avg:41.24ms +[2025-09-11 12:50:01] [Rank 0] step:8781/10000 train_time:362014ms step_avg:41.23ms +[2025-09-11 12:50:01] [Rank 0] step:8781/10000 train_time:362014ms step_avg:41.23ms +[2025-09-11 12:50:01] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:50:01] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:50:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:50:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:50:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:50:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:50:16] [Rank 0] PRINT: step:8800/10000 val_loss:5.6642 total_sharp:5.5344e-02 L1_sharp:4.2415e-02 L2_sharp:5.3575e-02 L3_sharp:6.9771e-02 L4_sharp:7.1292e-02 L5_sharp:1.1824e-01 L6_sharp:2.0815e-01 L7_sharp:2.7319e-01 L8_sharp:4.7799e-01 L9_sharp:1.1054e+00 L10_sharp:2.0590e+00 L11_sharp:2.5856e+00 L12_sharp:2.3549e+00 total_fnorm:2.8906e-01 total_l1_linf:1.1900e+02 total_spectral:1.4551e-01 L1_fnorm:1.0315e-02 L2_fnorm:1.0376e-02 L3_fnorm:1.0498e-02 L4_fnorm:1.0559e-02 L5_fnorm:1.0620e-02 L6_fnorm:1.0559e-02 L7_fnorm:1.0559e-02 L8_fnorm:1.0559e-02 L9_fnorm:1.0559e-02 L10_fnorm:1.0559e-02 L11_fnorm:1.0620e-02 L12_fnorm:9.7046e-03 L1_l1linf:1.4725e-03 L2_l1linf:1.4496e-03 L3_l1linf:1.5030e-03 L4_l1linf:1.5411e-03 L5_l1linf:1.5869e-03 L6_l1linf:1.6632e-03 L7_l1linf:1.6861e-03 L8_l1linf:1.7471e-03 L9_l1linf:1.8845e-03 L10_l1linf:1.9379e-03 L11_l1linf:2.1820e-03 L12_l1linf:1.9226e-03 L1_spectral:2.2877e-04 L2_spectral:2.3253e-04 L3_spectral:2.3350e-04 L4_spectral:2.3489e-04 L5_spectral:2.3456e-04 L6_spectral:2.2953e-04 L7_spectral:2.2688e-04 L8_spectral:2.2533e-04 L9_spectral:2.2397e-04 L10_spectral:2.1934e-04 L11_spectral:2.1630e-04 L12_spectral:1.8847e-04 train_time:362755ms step_avg:41.22ms +[2025-09-11 12:50:16] [Rank 0] PRINT: step:8800/10000 val_loss:5.6642 total_sharp:5.5344e-02 L1_sharp:4.2415e-02 L2_sharp:5.3575e-02 L3_sharp:6.9771e-02 L4_sharp:7.1292e-02 L5_sharp:1.1824e-01 L6_sharp:2.0815e-01 L7_sharp:2.7319e-01 L8_sharp:4.7799e-01 L9_sharp:1.1054e+00 L10_sharp:2.0590e+00 L11_sharp:2.5856e+00 L12_sharp:2.3549e+00 total_fnorm:2.8906e-01 total_l1_linf:1.1900e+02 total_spectral:1.4551e-01 L1_fnorm:1.0315e-02 L2_fnorm:1.0376e-02 L3_fnorm:1.0498e-02 L4_fnorm:1.0559e-02 L5_fnorm:1.0620e-02 L6_fnorm:1.0559e-02 L7_fnorm:1.0559e-02 L8_fnorm:1.0559e-02 L9_fnorm:1.0559e-02 L10_fnorm:1.0559e-02 L11_fnorm:1.0620e-02 L12_fnorm:9.7046e-03 L1_l1linf:1.4725e-03 L2_l1linf:1.4496e-03 L3_l1linf:1.5030e-03 L4_l1linf:1.5411e-03 L5_l1linf:1.5869e-03 L6_l1linf:1.6632e-03 L7_l1linf:1.6861e-03 L8_l1linf:1.7471e-03 L9_l1linf:1.8845e-03 L10_l1linf:1.9379e-03 L11_l1linf:2.1820e-03 L12_l1linf:1.9226e-03 L1_spectral:2.2877e-04 L2_spectral:2.3253e-04 L3_spectral:2.3350e-04 L4_spectral:2.3489e-04 L5_spectral:2.3456e-04 L6_spectral:2.2953e-04 L7_spectral:2.2688e-04 L8_spectral:2.2533e-04 L9_spectral:2.2397e-04 L10_spectral:2.1934e-04 L11_spectral:2.1630e-04 L12_spectral:1.8847e-04 train_time:362755ms step_avg:41.22ms +[2025-09-11 12:50:18] [Rank 0] step:8801/10000 train_time:364924ms step_avg:41.46ms +[2025-09-11 12:50:18] [Rank 0] step:8801/10000 train_time:364924ms step_avg:41.46ms +[2025-09-11 12:50:19] [Rank 0] step:8821/10000 train_time:365660ms step_avg:41.45ms +[2025-09-11 12:50:19] [Rank 0] step:8821/10000 train_time:365660ms step_avg:41.45ms +[2025-09-11 12:50:19] [Rank 0] step:8841/10000 train_time:366369ms step_avg:41.44ms +[2025-09-11 12:50:19] [Rank 0] step:8841/10000 train_time:366369ms step_avg:41.44ms +[2025-09-11 12:50:20] [Rank 0] step:8861/10000 train_time:367076ms step_avg:41.43ms +[2025-09-11 12:50:20] [Rank 0] step:8861/10000 train_time:367076ms step_avg:41.43ms +[2025-09-11 12:50:21] [Rank 0] step:8881/10000 train_time:367785ms step_avg:41.41ms +[2025-09-11 12:50:21] [Rank 0] step:8881/10000 train_time:367785ms step_avg:41.41ms +[2025-09-11 12:50:22] [Rank 0] step:8901/10000 train_time:368495ms step_avg:41.40ms +[2025-09-11 12:50:22] [Rank 0] step:8901/10000 train_time:368495ms step_avg:41.40ms +[2025-09-11 12:50:22] [Rank 0] step:8921/10000 train_time:369199ms step_avg:41.39ms +[2025-09-11 12:50:22] [Rank 0] step:8921/10000 train_time:369199ms step_avg:41.39ms +[2025-09-11 12:50:23] [Rank 0] step:8941/10000 train_time:369910ms step_avg:41.37ms +[2025-09-11 12:50:23] [Rank 0] step:8941/10000 train_time:369910ms step_avg:41.37ms +[2025-09-11 12:50:24] [Rank 0] step:8961/10000 train_time:370626ms step_avg:41.36ms +[2025-09-11 12:50:24] [Rank 0] step:8961/10000 train_time:370626ms step_avg:41.36ms +[2025-09-11 12:50:24] [Rank 0] step:8981/10000 train_time:371337ms step_avg:41.35ms +[2025-09-11 12:50:24] [Rank 0] step:8981/10000 train_time:371337ms step_avg:41.35ms +[2025-09-11 12:50:25] [Rank 0] step:9001/10000 train_time:372040ms step_avg:41.33ms +[2025-09-11 12:50:25] [Rank 0] step:9001/10000 train_time:372040ms step_avg:41.33ms +[2025-09-11 12:50:26] [Rank 0] step:9021/10000 train_time:372748ms step_avg:41.32ms +[2025-09-11 12:50:26] [Rank 0] step:9021/10000 train_time:372748ms step_avg:41.32ms +[2025-09-11 12:50:27] [Rank 0] step:9041/10000 train_time:373458ms step_avg:41.31ms +[2025-09-11 12:50:27] [Rank 0] step:9041/10000 train_time:373458ms step_avg:41.31ms +[2025-09-11 12:50:27] [Rank 0] step:9061/10000 train_time:374165ms step_avg:41.29ms +[2025-09-11 12:50:27] [Rank 0] step:9061/10000 train_time:374165ms step_avg:41.29ms +[2025-09-11 12:50:28] [Rank 0] step:9081/10000 train_time:374877ms step_avg:41.28ms +[2025-09-11 12:50:28] [Rank 0] step:9081/10000 train_time:374877ms step_avg:41.28ms +[2025-09-11 12:50:29] [Rank 0] step:9101/10000 train_time:375589ms step_avg:41.27ms +[2025-09-11 12:50:29] [Rank 0] step:9101/10000 train_time:375589ms step_avg:41.27ms +[2025-09-11 12:50:29] [Rank 0] step:9121/10000 train_time:376301ms step_avg:41.26ms +[2025-09-11 12:50:29] [Rank 0] step:9121/10000 train_time:376301ms step_avg:41.26ms +[2025-09-11 12:50:30] [Rank 0] step:9141/10000 train_time:377007ms step_avg:41.24ms +[2025-09-11 12:50:30] [Rank 0] step:9141/10000 train_time:377007ms step_avg:41.24ms +[2025-09-11 12:50:31] [Rank 0] step:9161/10000 train_time:377719ms step_avg:41.23ms +[2025-09-11 12:50:31] [Rank 0] step:9161/10000 train_time:377719ms step_avg:41.23ms +[2025-09-11 12:50:31] [Rank 0] step:9181/10000 train_time:378430ms step_avg:41.22ms +[2025-09-11 12:50:31] [Rank 0] step:9181/10000 train_time:378430ms step_avg:41.22ms +[2025-09-11 12:50:32] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:50:32] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:50:43] [Rank 0] PRINT: step:9200/10000 val_loss:5.6576 total_sharp:7.9265e-02 L1_sharp:4.1841e-02 L2_sharp:4.1006e-02 L3_sharp:5.5684e-02 L4_sharp:8.6337e-02 L5_sharp:1.0632e-01 L6_sharp:2.3251e-01 L7_sharp:3.6100e-01 L8_sharp:6.9098e-01 L9_sharp:1.2899e+00 L10_sharp:2.5278e+00 L11_sharp:3.2566e+00 L12_sharp:2.9516e+00 total_fnorm:1.7773e-01 total_l1_linf:6.4500e+01 total_spectral:8.9355e-02 L1_fnorm:6.7749e-03 L2_fnorm:6.8665e-03 L3_fnorm:6.8970e-03 L4_fnorm:6.9580e-03 L5_fnorm:7.0190e-03 L6_fnorm:6.9275e-03 L7_fnorm:6.9275e-03 L8_fnorm:6.8970e-03 L9_fnorm:7.0190e-03 L10_fnorm:6.9580e-03 L11_fnorm:6.9885e-03 L12_fnorm:6.2561e-03 L1_l1linf:8.6975e-04 L2_l1linf:9.1934e-04 L3_l1linf:9.2316e-04 L4_l1linf:9.2316e-04 L5_l1linf:9.1934e-04 L6_l1linf:1.0300e-03 L7_l1linf:1.0071e-03 L8_l1linf:1.0529e-03 L9_l1linf:1.1673e-03 L10_l1linf:1.2131e-03 L11_l1linf:1.2741e-03 L12_l1linf:1.0757e-03 L1_spectral:1.5394e-04 L2_spectral:1.5564e-04 L3_spectral:1.5656e-04 L4_spectral:1.5654e-04 L5_spectral:1.5866e-04 L6_spectral:1.5158e-04 L7_spectral:1.5129e-04 L8_spectral:1.5031e-04 L9_spectral:1.4856e-04 L10_spectral:1.4442e-04 L11_spectral:1.4092e-04 L12_spectral:1.2506e-04 train_time:379123ms step_avg:41.21ms +[2025-09-11 12:50:43] [Rank 0] PRINT: step:9200/10000 val_loss:5.6576 total_sharp:7.9265e-02 L1_sharp:4.1841e-02 L2_sharp:4.1006e-02 L3_sharp:5.5684e-02 L4_sharp:8.6337e-02 L5_sharp:1.0632e-01 L6_sharp:2.3251e-01 L7_sharp:3.6100e-01 L8_sharp:6.9098e-01 L9_sharp:1.2899e+00 L10_sharp:2.5278e+00 L11_sharp:3.2566e+00 L12_sharp:2.9516e+00 total_fnorm:1.7773e-01 total_l1_linf:6.4500e+01 total_spectral:8.9355e-02 L1_fnorm:6.7749e-03 L2_fnorm:6.8665e-03 L3_fnorm:6.8970e-03 L4_fnorm:6.9580e-03 L5_fnorm:7.0190e-03 L6_fnorm:6.9275e-03 L7_fnorm:6.9275e-03 L8_fnorm:6.8970e-03 L9_fnorm:7.0190e-03 L10_fnorm:6.9580e-03 L11_fnorm:6.9885e-03 L12_fnorm:6.2561e-03 L1_l1linf:8.6975e-04 L2_l1linf:9.1934e-04 L3_l1linf:9.2316e-04 L4_l1linf:9.2316e-04 L5_l1linf:9.1934e-04 L6_l1linf:1.0300e-03 L7_l1linf:1.0071e-03 L8_l1linf:1.0529e-03 L9_l1linf:1.1673e-03 L10_l1linf:1.2131e-03 L11_l1linf:1.2741e-03 L12_l1linf:1.0757e-03 L1_spectral:1.5394e-04 L2_spectral:1.5564e-04 L3_spectral:1.5656e-04 L4_spectral:1.5654e-04 L5_spectral:1.5866e-04 L6_spectral:1.5158e-04 L7_spectral:1.5129e-04 L8_spectral:1.5031e-04 L9_spectral:1.4856e-04 L10_spectral:1.4442e-04 L11_spectral:1.4092e-04 L12_spectral:1.2506e-04 train_time:379123ms step_avg:41.21ms +[2025-09-11 12:50:45] [Rank 0] step:9201/10000 train_time:381312ms step_avg:41.44ms +[2025-09-11 12:50:45] [Rank 0] step:9201/10000 train_time:381312ms step_avg:41.44ms +[2025-09-11 12:50:46] [Rank 0] step:9221/10000 train_time:382057ms step_avg:41.43ms +[2025-09-11 12:50:46] [Rank 0] step:9221/10000 train_time:382057ms step_avg:41.43ms +[2025-09-11 12:50:47] [Rank 0] step:9241/10000 train_time:382765ms step_avg:41.42ms +[2025-09-11 12:50:47] [Rank 0] step:9241/10000 train_time:382765ms step_avg:41.42ms +[2025-09-11 12:50:47] [Rank 0] step:9261/10000 train_time:383475ms step_avg:41.41ms +[2025-09-11 12:50:47] [Rank 0] step:9261/10000 train_time:383475ms step_avg:41.41ms +[2025-09-11 12:50:48] [Rank 0] step:9281/10000 train_time:384185ms step_avg:41.39ms +[2025-09-11 12:50:48] [Rank 0] step:9281/10000 train_time:384185ms step_avg:41.39ms +[2025-09-11 12:50:49] [Rank 0] step:9301/10000 train_time:384891ms step_avg:41.38ms +[2025-09-11 12:50:49] [Rank 0] step:9301/10000 train_time:384891ms step_avg:41.38ms +[2025-09-11 12:50:50] [Rank 0] step:9321/10000 train_time:385601ms step_avg:41.37ms +[2025-09-11 12:50:50] [Rank 0] step:9321/10000 train_time:385601ms step_avg:41.37ms +[2025-09-11 12:50:50] [Rank 0] step:9341/10000 train_time:386307ms step_avg:41.36ms +[2025-09-11 12:50:50] [Rank 0] step:9341/10000 train_time:386307ms step_avg:41.36ms +[2025-09-11 12:50:51] [Rank 0] step:9361/10000 train_time:387011ms step_avg:41.34ms +[2025-09-11 12:50:51] [Rank 0] step:9361/10000 train_time:387011ms step_avg:41.34ms +[2025-09-11 12:50:52] [Rank 0] step:9381/10000 train_time:387719ms step_avg:41.33ms +[2025-09-11 12:50:52] [Rank 0] step:9381/10000 train_time:387719ms step_avg:41.33ms +[2025-09-11 12:50:52] [Rank 0] step:9401/10000 train_time:388429ms step_avg:41.32ms +[2025-09-11 12:50:52] [Rank 0] step:9401/10000 train_time:388429ms step_avg:41.32ms +[2025-09-11 12:50:53] [Rank 0] step:9421/10000 train_time:389139ms step_avg:41.31ms +[2025-09-11 12:50:53] [Rank 0] step:9421/10000 train_time:389139ms step_avg:41.31ms +[2025-09-11 12:50:54] [Rank 0] step:9441/10000 train_time:389850ms step_avg:41.29ms +[2025-09-11 12:50:54] [Rank 0] step:9441/10000 train_time:389850ms step_avg:41.29ms +[2025-09-11 12:50:54] [Rank 0] step:9461/10000 train_time:390558ms step_avg:41.28ms +[2025-09-11 12:50:54] [Rank 0] step:9461/10000 train_time:390558ms step_avg:41.28ms +[2025-09-11 12:50:55] [Rank 0] step:9481/10000 train_time:391269ms step_avg:41.27ms +[2025-09-11 12:50:55] [Rank 0] step:9481/10000 train_time:391269ms step_avg:41.27ms +[2025-09-11 12:50:56] [Rank 0] step:9501/10000 train_time:391978ms step_avg:41.26ms +[2025-09-11 12:50:56] [Rank 0] step:9501/10000 train_time:391978ms step_avg:41.26ms +[2025-09-11 12:50:57] [Rank 0] step:9521/10000 train_time:392691ms step_avg:41.24ms +[2025-09-11 12:50:57] [Rank 0] step:9521/10000 train_time:392691ms step_avg:41.24ms +[2025-09-11 12:50:57] [Rank 0] step:9541/10000 train_time:393398ms step_avg:41.23ms +[2025-09-11 12:50:57] [Rank 0] step:9541/10000 train_time:393398ms step_avg:41.23ms +[2025-09-11 12:50:58] [Rank 0] step:9561/10000 train_time:394107ms step_avg:41.22ms +[2025-09-11 12:50:58] [Rank 0] step:9561/10000 train_time:394107ms step_avg:41.22ms +[2025-09-11 12:50:59] [Rank 0] step:9581/10000 train_time:394817ms step_avg:41.21ms +[2025-09-11 12:50:59] [Rank 0] step:9581/10000 train_time:394817ms step_avg:41.21ms +[2025-09-11 12:50:59] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:50:59] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:51:14] [Rank 0] PRINT: step:9600/10000 val_loss:5.6544 total_sharp:2.7372e-02 L1_sharp:3.5195e-02 L2_sharp:3.7785e-02 L3_sharp:4.4900e-02 L4_sharp:4.3642e-02 L5_sharp:6.8892e-02 L6_sharp:1.3473e-01 L7_sharp:2.0650e-01 L8_sharp:3.6094e-01 L9_sharp:6.3031e-01 L10_sharp:1.2380e+00 L11_sharp:1.5130e+00 L12_sharp:1.6431e+00 total_fnorm:1.0449e-01 total_l1_linf:3.1000e+01 total_spectral:5.2734e-02 L1_fnorm:3.7537e-03 L2_fnorm:3.7689e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8757e-03 L5_fnorm:3.8910e-03 L6_fnorm:3.8605e-03 L7_fnorm:3.8757e-03 L8_fnorm:3.8910e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.9062e-03 L11_fnorm:3.9062e-03 L12_fnorm:3.4943e-03 L1_l1linf:4.2915e-04 L2_l1linf:4.4060e-04 L3_l1linf:4.6539e-04 L4_l1linf:4.4823e-04 L5_l1linf:4.8447e-04 L6_l1linf:4.6349e-04 L7_l1linf:4.7302e-04 L8_l1linf:5.6076e-04 L9_l1linf:5.5695e-04 L10_l1linf:5.7220e-04 L11_l1linf:6.0272e-04 L12_l1linf:5.1880e-04 L1_spectral:8.7797e-05 L2_spectral:8.7909e-05 L3_spectral:8.9080e-05 L4_spectral:8.8716e-05 L5_spectral:8.9356e-05 L6_spectral:8.7563e-05 L7_spectral:8.6739e-05 L8_spectral:8.6896e-05 L9_spectral:8.4815e-05 L10_spectral:8.2996e-05 L11_spectral:8.0106e-05 L12_spectral:7.1457e-05 train_time:395504ms step_avg:41.20ms +[2025-09-11 12:51:14] [Rank 0] PRINT: step:9600/10000 val_loss:5.6544 total_sharp:2.7372e-02 L1_sharp:3.5195e-02 L2_sharp:3.7785e-02 L3_sharp:4.4900e-02 L4_sharp:4.3642e-02 L5_sharp:6.8892e-02 L6_sharp:1.3473e-01 L7_sharp:2.0650e-01 L8_sharp:3.6094e-01 L9_sharp:6.3031e-01 L10_sharp:1.2380e+00 L11_sharp:1.5130e+00 L12_sharp:1.6431e+00 total_fnorm:1.0449e-01 total_l1_linf:3.1000e+01 total_spectral:5.2734e-02 L1_fnorm:3.7537e-03 L2_fnorm:3.7689e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8757e-03 L5_fnorm:3.8910e-03 L6_fnorm:3.8605e-03 L7_fnorm:3.8757e-03 L8_fnorm:3.8910e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.9062e-03 L11_fnorm:3.9062e-03 L12_fnorm:3.4943e-03 L1_l1linf:4.2915e-04 L2_l1linf:4.4060e-04 L3_l1linf:4.6539e-04 L4_l1linf:4.4823e-04 L5_l1linf:4.8447e-04 L6_l1linf:4.6349e-04 L7_l1linf:4.7302e-04 L8_l1linf:5.6076e-04 L9_l1linf:5.5695e-04 L10_l1linf:5.7220e-04 L11_l1linf:6.0272e-04 L12_l1linf:5.1880e-04 L1_spectral:8.7797e-05 L2_spectral:8.7909e-05 L3_spectral:8.9080e-05 L4_spectral:8.8716e-05 L5_spectral:8.9356e-05 L6_spectral:8.7563e-05 L7_spectral:8.6739e-05 L8_spectral:8.6896e-05 L9_spectral:8.4815e-05 L10_spectral:8.2996e-05 L11_spectral:8.0106e-05 L12_spectral:7.1457e-05 train_time:395504ms step_avg:41.20ms +[2025-09-11 12:51:16] [Rank 0] step:9601/10000 train_time:397750ms step_avg:41.43ms +[2025-09-11 12:51:16] [Rank 0] step:9601/10000 train_time:397750ms step_avg:41.43ms +[2025-09-11 12:51:17] [Rank 0] step:9621/10000 train_time:398475ms step_avg:41.42ms +[2025-09-11 12:51:17] [Rank 0] step:9621/10000 train_time:398475ms step_avg:41.42ms +[2025-09-11 12:51:18] [Rank 0] step:9641/10000 train_time:399189ms step_avg:41.41ms +[2025-09-11 12:51:18] [Rank 0] step:9641/10000 train_time:399189ms step_avg:41.41ms +[2025-09-11 12:51:18] [Rank 0] step:9661/10000 train_time:399909ms step_avg:41.39ms +[2025-09-11 12:51:18] [Rank 0] step:9661/10000 train_time:399909ms step_avg:41.39ms +[2025-09-11 12:51:19] [Rank 0] step:9681/10000 train_time:400623ms step_avg:41.38ms +[2025-09-11 12:51:19] [Rank 0] step:9681/10000 train_time:400623ms step_avg:41.38ms +[2025-09-11 12:51:20] [Rank 0] step:9701/10000 train_time:401337ms step_avg:41.37ms +[2025-09-11 12:51:20] [Rank 0] step:9701/10000 train_time:401337ms step_avg:41.37ms +[2025-09-11 12:51:20] [Rank 0] step:9721/10000 train_time:402057ms step_avg:41.36ms +[2025-09-11 12:51:20] [Rank 0] step:9721/10000 train_time:402057ms step_avg:41.36ms +[2025-09-11 12:51:21] [Rank 0] step:9741/10000 train_time:402773ms step_avg:41.35ms +[2025-09-11 12:51:21] [Rank 0] step:9741/10000 train_time:402773ms step_avg:41.35ms +[2025-09-11 12:51:22] [Rank 0] step:9761/10000 train_time:403489ms step_avg:41.34ms +[2025-09-11 12:51:22] [Rank 0] step:9761/10000 train_time:403489ms step_avg:41.34ms +[2025-09-11 12:51:23] [Rank 0] step:9781/10000 train_time:404202ms step_avg:41.33ms +[2025-09-11 12:51:23] [Rank 0] step:9781/10000 train_time:404202ms step_avg:41.33ms +[2025-09-11 12:51:23] [Rank 0] step:9801/10000 train_time:404922ms step_avg:41.31ms +[2025-09-11 12:51:23] [Rank 0] step:9801/10000 train_time:404922ms step_avg:41.31ms +[2025-09-11 12:51:24] [Rank 0] step:9821/10000 train_time:405639ms step_avg:41.30ms +[2025-09-11 12:51:24] [Rank 0] step:9821/10000 train_time:405639ms step_avg:41.30ms +[2025-09-11 12:51:25] [Rank 0] step:9841/10000 train_time:406359ms step_avg:41.29ms +[2025-09-11 12:51:25] [Rank 0] step:9841/10000 train_time:406359ms step_avg:41.29ms +[2025-09-11 12:51:25] [Rank 0] step:9861/10000 train_time:407073ms step_avg:41.28ms +[2025-09-11 12:51:25] [Rank 0] step:9861/10000 train_time:407073ms step_avg:41.28ms +[2025-09-11 12:51:26] [Rank 0] step:9881/10000 train_time:407789ms step_avg:41.27ms +[2025-09-11 12:51:26] [Rank 0] step:9881/10000 train_time:407789ms step_avg:41.27ms +[2025-09-11 12:51:27] [Rank 0] step:9901/10000 train_time:408501ms step_avg:41.26ms +[2025-09-11 12:51:27] [Rank 0] step:9901/10000 train_time:408501ms step_avg:41.26ms +[2025-09-11 12:51:28] [Rank 0] step:9921/10000 train_time:409217ms step_avg:41.25ms +[2025-09-11 12:51:28] [Rank 0] step:9921/10000 train_time:409217ms step_avg:41.25ms +[2025-09-11 12:51:28] [Rank 0] step:9941/10000 train_time:409937ms step_avg:41.24ms +[2025-09-11 12:51:28] [Rank 0] step:9941/10000 train_time:409937ms step_avg:41.24ms +[2025-09-11 12:51:29] [Rank 0] step:9961/10000 train_time:410658ms step_avg:41.23ms +[2025-09-11 12:51:29] [Rank 0] step:9961/10000 train_time:410658ms step_avg:41.23ms +[2025-09-11 12:51:30] [Rank 0] step:9981/10000 train_time:411375ms step_avg:41.22ms +[2025-09-11 12:51:30] [Rank 0] step:9981/10000 train_time:411375ms step_avg:41.22ms +[2025-09-11 12:51:30] [Rank 0] step:10000/10000 train_time:412064ms step_avg:41.21ms +[2025-09-11 12:51:30] [Rank 0] step:10000/10000 train_time:412064ms step_avg:41.21ms +[2025-09-11 12:51:30] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:51:30] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:51:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:51:41] [Rank 0] PRINT: step:10000/10000 val_loss:5.6540 total_sharp:2.4230e-02 L1_sharp:2.5408e-02 L2_sharp:2.2222e-02 L3_sharp:3.2310e-02 L4_sharp:3.9738e-02 L5_sharp:6.4402e-02 L6_sharp:1.0424e-01 L7_sharp:1.7347e-01 L8_sharp:3.3798e-01 L9_sharp:5.8943e-01 L10_sharp:1.1225e+00 L11_sharp:1.2565e+00 L12_sharp:1.4180e+00 total_fnorm:4.0283e-02 total_l1_linf:8.7500e+00 total_spectral:2.0264e-02 L1_fnorm:1.4725e-03 L2_fnorm:1.4877e-03 L3_fnorm:1.5030e-03 L4_fnorm:1.5030e-03 L5_fnorm:1.5259e-03 L6_fnorm:1.5030e-03 L7_fnorm:1.5030e-03 L8_fnorm:1.5030e-03 L9_fnorm:1.5182e-03 L10_fnorm:1.5182e-03 L11_fnorm:1.5259e-03 L12_fnorm:1.3428e-03 L1_l1linf:1.4019e-04 L2_l1linf:1.4687e-04 L3_l1linf:1.5068e-04 L4_l1linf:1.4400e-04 L5_l1linf:1.6689e-04 L6_l1linf:1.4496e-04 L7_l1linf:1.5831e-04 L8_l1linf:1.7166e-04 L9_l1linf:1.7643e-04 L10_l1linf:1.8311e-04 L11_l1linf:1.9264e-04 L12_l1linf:1.6212e-04 L1_spectral:3.4701e-05 L2_spectral:3.5554e-05 L3_spectral:3.5850e-05 L4_spectral:3.5412e-05 L5_spectral:3.6232e-05 L6_spectral:3.4811e-05 L7_spectral:3.4310e-05 L8_spectral:3.4712e-05 L9_spectral:3.3790e-05 L10_spectral:3.2892e-05 L11_spectral:3.2298e-05 L12_spectral:2.8853e-05 train_time:412083ms step_avg:41.21ms +[2025-09-11 12:51:41] [Rank 0] PRINT: step:10000/10000 val_loss:5.6540 total_sharp:2.4230e-02 L1_sharp:2.5408e-02 L2_sharp:2.2222e-02 L3_sharp:3.2310e-02 L4_sharp:3.9738e-02 L5_sharp:6.4402e-02 L6_sharp:1.0424e-01 L7_sharp:1.7347e-01 L8_sharp:3.3798e-01 L9_sharp:5.8943e-01 L10_sharp:1.1225e+00 L11_sharp:1.2565e+00 L12_sharp:1.4180e+00 total_fnorm:4.0283e-02 total_l1_linf:8.7500e+00 total_spectral:2.0264e-02 L1_fnorm:1.4725e-03 L2_fnorm:1.4877e-03 L3_fnorm:1.5030e-03 L4_fnorm:1.5030e-03 L5_fnorm:1.5259e-03 L6_fnorm:1.5030e-03 L7_fnorm:1.5030e-03 L8_fnorm:1.5030e-03 L9_fnorm:1.5182e-03 L10_fnorm:1.5182e-03 L11_fnorm:1.5259e-03 L12_fnorm:1.3428e-03 L1_l1linf:1.4019e-04 L2_l1linf:1.4687e-04 L3_l1linf:1.5068e-04 L4_l1linf:1.4400e-04 L5_l1linf:1.6689e-04 L6_l1linf:1.4496e-04 L7_l1linf:1.5831e-04 L8_l1linf:1.7166e-04 L9_l1linf:1.7643e-04 L10_l1linf:1.8311e-04 L11_l1linf:1.9264e-04 L12_l1linf:1.6212e-04 L1_spectral:3.4701e-05 L2_spectral:3.5554e-05 L3_spectral:3.5850e-05 L4_spectral:3.5412e-05 L5_spectral:3.6232e-05 L6_spectral:3.4811e-05 L7_spectral:3.4310e-05 L8_spectral:3.4712e-05 L9_spectral:3.3790e-05 L10_spectral:3.2892e-05 L11_spectral:3.2298e-05 L12_spectral:2.8853e-05 train_time:412083ms step_avg:41.21ms +[2025-09-11 12:51:41] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:51:41 2025 --- +[2025-09-11 12:51:41] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:51:41 2025 --- +[2025-09-11 12:51:41] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 12:51:41] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a21ba0bdfbd657f5f0b9f1b231a84c63d83453f7 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "a285383b-2309-488b-9713-632fe4bea5ec", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/training_log_a285383b-2309-488b-9713-632fe4bea5ec.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/training_log_a285383b-2309-488b-9713-632fe4bea5ec.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4d1a5f8172e64230b255bd05f348f69aea83545 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45/training_log_a285383b-2309-488b-9713-632fe4bea5ec.txt @@ -0,0 +1,4264 @@ +[2025-09-11 13:19:52] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:19:52 2025 --- +[2025-09-11 13:19:52] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:19:52 2025 --- +[2025-09-11 13:19:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:19:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:19:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:19:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:19:52] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:19:52] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:19:52] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45 +[2025-09-11 13:19:52] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.0005_seed_45 +[2025-09-11 13:19:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:19:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:19:52] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:19:52] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:19:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:19:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:19:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:19:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:19:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:19:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:19:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:19:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:19:53] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:19:53] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:19:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:19:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:19:56] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:19:56] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:19:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:19:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:20:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:20:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:20:01] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:20:01] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:20:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:20:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:20:40] [Rank 0] PRINT: Starting training... +[2025-09-11 13:20:40] [Rank 0] PRINT: Starting training... +[2025-09-11 13:20:41] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.02ms +[2025-09-11 13:20:41] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.02ms +[2025-09-11 13:20:42] [Rank 0] step:41/10000 train_time:2123ms step_avg:51.79ms +[2025-09-11 13:20:42] [Rank 0] step:41/10000 train_time:2123ms step_avg:51.79ms +[2025-09-11 13:20:43] [Rank 0] step:61/10000 train_time:2849ms step_avg:46.70ms +[2025-09-11 13:20:43] [Rank 0] step:61/10000 train_time:2849ms step_avg:46.70ms +[2025-09-11 13:20:44] [Rank 0] step:81/10000 train_time:3575ms step_avg:44.14ms +[2025-09-11 13:20:44] [Rank 0] step:81/10000 train_time:3575ms step_avg:44.14ms +[2025-09-11 13:20:44] [Rank 0] step:101/10000 train_time:4301ms step_avg:42.59ms +[2025-09-11 13:20:44] [Rank 0] step:101/10000 train_time:4301ms step_avg:42.59ms +[2025-09-11 13:20:45] [Rank 0] step:121/10000 train_time:5027ms step_avg:41.55ms +[2025-09-11 13:20:45] [Rank 0] step:121/10000 train_time:5027ms step_avg:41.55ms +[2025-09-11 13:20:46] [Rank 0] step:141/10000 train_time:5753ms step_avg:40.80ms +[2025-09-11 13:20:46] [Rank 0] step:141/10000 train_time:5753ms step_avg:40.80ms +[2025-09-11 13:20:46] [Rank 0] step:161/10000 train_time:6478ms step_avg:40.24ms +[2025-09-11 13:20:46] [Rank 0] step:161/10000 train_time:6478ms step_avg:40.24ms +[2025-09-11 13:20:47] [Rank 0] step:181/10000 train_time:7204ms step_avg:39.80ms +[2025-09-11 13:20:47] [Rank 0] step:181/10000 train_time:7204ms step_avg:39.80ms +[2025-09-11 13:20:48] [Rank 0] step:201/10000 train_time:7928ms step_avg:39.44ms +[2025-09-11 13:20:48] [Rank 0] step:201/10000 train_time:7928ms step_avg:39.44ms +[2025-09-11 13:20:49] [Rank 0] step:221/10000 train_time:8654ms step_avg:39.16ms +[2025-09-11 13:20:49] [Rank 0] step:221/10000 train_time:8654ms step_avg:39.16ms +[2025-09-11 13:20:49] [Rank 0] step:241/10000 train_time:9379ms step_avg:38.92ms +[2025-09-11 13:20:49] [Rank 0] step:241/10000 train_time:9379ms step_avg:38.92ms +[2025-09-11 13:20:50] [Rank 0] step:261/10000 train_time:10105ms step_avg:38.72ms +[2025-09-11 13:20:50] [Rank 0] step:261/10000 train_time:10105ms step_avg:38.72ms +[2025-09-11 13:20:51] [Rank 0] step:281/10000 train_time:10830ms step_avg:38.54ms +[2025-09-11 13:20:51] [Rank 0] step:281/10000 train_time:10830ms step_avg:38.54ms +[2025-09-11 13:20:51] [Rank 0] step:301/10000 train_time:11555ms step_avg:38.39ms +[2025-09-11 13:20:51] [Rank 0] step:301/10000 train_time:11555ms step_avg:38.39ms +[2025-09-11 13:20:52] [Rank 0] step:321/10000 train_time:12281ms step_avg:38.26ms +[2025-09-11 13:20:52] [Rank 0] step:321/10000 train_time:12281ms step_avg:38.26ms +[2025-09-11 13:20:53] [Rank 0] step:341/10000 train_time:13005ms step_avg:38.14ms +[2025-09-11 13:20:53] [Rank 0] step:341/10000 train_time:13005ms step_avg:38.14ms +[2025-09-11 13:20:54] [Rank 0] step:361/10000 train_time:13731ms step_avg:38.04ms +[2025-09-11 13:20:54] [Rank 0] step:361/10000 train_time:13731ms step_avg:38.04ms +[2025-09-11 13:20:54] [Rank 0] step:381/10000 train_time:14457ms step_avg:37.94ms +[2025-09-11 13:20:54] [Rank 0] step:381/10000 train_time:14457ms step_avg:37.94ms +[2025-09-11 13:20:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:20:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:21:42] [Rank 0] PRINT: step:400/10000 val_loss:7.2884 total_sharp:1.9135e-03 L1_sharp:5.3638e-02 L2_sharp:6.2235e-02 L3_sharp:6.2520e-02 L4_sharp:8.0989e-02 L5_sharp:8.7879e-02 L6_sharp:9.9224e-02 L7_sharp:1.1077e-01 L8_sharp:1.2081e-01 L9_sharp:1.2565e-01 L10_sharp:1.0686e-01 L11_sharp:1.0308e-01 L12_sharp:1.4317e-01 total_fnorm:5.4313e+00 total_l1_linf:1.4472e+04 total_spectral:2.7155e+00 L1_fnorm:5.5141e-02 L2_fnorm:5.5352e-02 L3_fnorm:5.4784e-02 L4_fnorm:5.4560e-02 L5_fnorm:5.4083e-02 L6_fnorm:5.3917e-02 L7_fnorm:5.3449e-02 L8_fnorm:5.2081e-02 L9_fnorm:5.0403e-02 L10_fnorm:4.8526e-02 L11_fnorm:4.5550e-02 L12_fnorm:4.3506e-02 L1_l1linf:2.2821e-02 L2_l1linf:2.2740e-02 L3_l1linf:2.2677e-02 L4_l1linf:2.2468e-02 L5_l1linf:2.2469e-02 L6_l1linf:2.2294e-02 L7_l1linf:2.1882e-02 L8_l1linf:2.1368e-02 L9_l1linf:2.0851e-02 L10_l1linf:1.9941e-02 L11_l1linf:1.9314e-02 L12_l1linf:1.8172e-02 L1_spectral:6.0245e-04 L2_spectral:6.0251e-04 L3_spectral:6.0239e-04 L4_spectral:6.0228e-04 L5_spectral:6.0232e-04 L6_spectral:6.0235e-04 L7_spectral:6.0224e-04 L8_spectral:6.0226e-04 L9_spectral:6.0220e-04 L10_spectral:6.0210e-04 L11_spectral:6.0183e-04 L12_spectral:6.0186e-04 train_time:15162ms step_avg:37.90ms +[2025-09-11 13:21:42] [Rank 0] PRINT: step:400/10000 val_loss:7.2884 total_sharp:1.9135e-03 L1_sharp:5.3638e-02 L2_sharp:6.2235e-02 L3_sharp:6.2520e-02 L4_sharp:8.0989e-02 L5_sharp:8.7879e-02 L6_sharp:9.9224e-02 L7_sharp:1.1077e-01 L8_sharp:1.2081e-01 L9_sharp:1.2565e-01 L10_sharp:1.0686e-01 L11_sharp:1.0308e-01 L12_sharp:1.4317e-01 total_fnorm:5.4313e+00 total_l1_linf:1.4472e+04 total_spectral:2.7155e+00 L1_fnorm:5.5141e-02 L2_fnorm:5.5352e-02 L3_fnorm:5.4784e-02 L4_fnorm:5.4560e-02 L5_fnorm:5.4083e-02 L6_fnorm:5.3917e-02 L7_fnorm:5.3449e-02 L8_fnorm:5.2081e-02 L9_fnorm:5.0403e-02 L10_fnorm:4.8526e-02 L11_fnorm:4.5550e-02 L12_fnorm:4.3506e-02 L1_l1linf:2.2821e-02 L2_l1linf:2.2740e-02 L3_l1linf:2.2677e-02 L4_l1linf:2.2468e-02 L5_l1linf:2.2469e-02 L6_l1linf:2.2294e-02 L7_l1linf:2.1882e-02 L8_l1linf:2.1368e-02 L9_l1linf:2.0851e-02 L10_l1linf:1.9941e-02 L11_l1linf:1.9314e-02 L12_l1linf:1.8172e-02 L1_spectral:6.0245e-04 L2_spectral:6.0251e-04 L3_spectral:6.0239e-04 L4_spectral:6.0228e-04 L5_spectral:6.0232e-04 L6_spectral:6.0235e-04 L7_spectral:6.0224e-04 L8_spectral:6.0226e-04 L9_spectral:6.0220e-04 L10_spectral:6.0210e-04 L11_spectral:6.0183e-04 L12_spectral:6.0186e-04 train_time:15162ms step_avg:37.90ms +[2025-09-11 13:22:12] [Rank 0] step:401/10000 train_time:45088ms step_avg:112.44ms +[2025-09-11 13:22:12] [Rank 0] step:401/10000 train_time:45088ms step_avg:112.44ms +[2025-09-11 13:22:15] [Rank 0] step:421/10000 train_time:47992ms step_avg:114.00ms +[2025-09-11 13:22:15] [Rank 0] step:421/10000 train_time:47992ms step_avg:114.00ms +[2025-09-11 13:22:16] [Rank 0] step:441/10000 train_time:48631ms step_avg:110.27ms +[2025-09-11 13:22:16] [Rank 0] step:441/10000 train_time:48631ms step_avg:110.27ms +[2025-09-11 13:22:16] [Rank 0] step:461/10000 train_time:49270ms step_avg:106.88ms +[2025-09-11 13:22:16] [Rank 0] step:461/10000 train_time:49270ms step_avg:106.88ms +[2025-09-11 13:22:17] [Rank 0] step:481/10000 train_time:49909ms step_avg:103.76ms +[2025-09-11 13:22:17] [Rank 0] step:481/10000 train_time:49909ms step_avg:103.76ms +[2025-09-11 13:22:18] [Rank 0] step:501/10000 train_time:50548ms step_avg:100.89ms +[2025-09-11 13:22:18] [Rank 0] step:501/10000 train_time:50548ms step_avg:100.89ms +[2025-09-11 13:22:18] [Rank 0] step:521/10000 train_time:51187ms step_avg:98.25ms +[2025-09-11 13:22:18] [Rank 0] step:521/10000 train_time:51187ms step_avg:98.25ms +[2025-09-11 13:22:19] [Rank 0] step:541/10000 train_time:51826ms step_avg:95.80ms +[2025-09-11 13:22:19] [Rank 0] step:541/10000 train_time:51826ms step_avg:95.80ms +[2025-09-11 13:22:20] [Rank 0] step:561/10000 train_time:52465ms step_avg:93.52ms +[2025-09-11 13:22:20] [Rank 0] step:561/10000 train_time:52465ms step_avg:93.52ms +[2025-09-11 13:22:20] [Rank 0] step:581/10000 train_time:53104ms step_avg:91.40ms +[2025-09-11 13:22:20] [Rank 0] step:581/10000 train_time:53104ms step_avg:91.40ms +[2025-09-11 13:22:21] [Rank 0] step:601/10000 train_time:53744ms step_avg:89.42ms +[2025-09-11 13:22:21] [Rank 0] step:601/10000 train_time:53744ms step_avg:89.42ms +[2025-09-11 13:22:22] [Rank 0] step:621/10000 train_time:54382ms step_avg:87.57ms +[2025-09-11 13:22:22] [Rank 0] step:621/10000 train_time:54382ms step_avg:87.57ms +[2025-09-11 13:22:22] [Rank 0] step:641/10000 train_time:55021ms step_avg:85.84ms +[2025-09-11 13:22:22] [Rank 0] step:641/10000 train_time:55021ms step_avg:85.84ms +[2025-09-11 13:22:23] [Rank 0] step:661/10000 train_time:55659ms step_avg:84.20ms +[2025-09-11 13:22:23] [Rank 0] step:661/10000 train_time:55659ms step_avg:84.20ms +[2025-09-11 13:22:23] [Rank 0] step:681/10000 train_time:56298ms step_avg:82.67ms +[2025-09-11 13:22:23] [Rank 0] step:681/10000 train_time:56298ms step_avg:82.67ms +[2025-09-11 13:22:24] [Rank 0] step:701/10000 train_time:56936ms step_avg:81.22ms +[2025-09-11 13:22:24] [Rank 0] step:701/10000 train_time:56936ms step_avg:81.22ms +[2025-09-11 13:22:25] [Rank 0] step:721/10000 train_time:57574ms step_avg:79.85ms +[2025-09-11 13:22:25] [Rank 0] step:721/10000 train_time:57574ms step_avg:79.85ms +[2025-09-11 13:22:25] [Rank 0] step:741/10000 train_time:58213ms step_avg:78.56ms +[2025-09-11 13:22:25] [Rank 0] step:741/10000 train_time:58213ms step_avg:78.56ms +[2025-09-11 13:22:26] [Rank 0] step:761/10000 train_time:58857ms step_avg:77.34ms +[2025-09-11 13:22:26] [Rank 0] step:761/10000 train_time:58857ms step_avg:77.34ms +[2025-09-11 13:22:27] [Rank 0] step:781/10000 train_time:59500ms step_avg:76.18ms +[2025-09-11 13:22:27] [Rank 0] step:781/10000 train_time:59500ms step_avg:76.18ms +[2025-09-11 13:22:27] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:22:27] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:23:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:23:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:23:14] [Rank 0] PRINT: step:800/10000 val_loss:6.9039 total_sharp:2.1884e-02 L1_sharp:4.7124e-01 L2_sharp:5.2553e-01 L3_sharp:5.4538e-01 L4_sharp:6.1057e-01 L5_sharp:8.0422e-01 L6_sharp:7.7875e-01 L7_sharp:8.1013e-01 L8_sharp:1.0311e+00 L9_sharp:9.8274e-01 L10_sharp:1.1042e+00 L11_sharp:1.4332e+00 L12_sharp:1.9747e+00 total_fnorm:3.0781e+00 total_l1_linf:3.3760e+03 total_spectral:1.5391e+00 L1_fnorm:4.4434e-02 L2_fnorm:4.6143e-02 L3_fnorm:4.5654e-02 L4_fnorm:4.4922e-02 L5_fnorm:4.2725e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.3457e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0039e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.4424e-02 L12_fnorm:3.0029e-02 L1_l1linf:1.8921e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.8921e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.9043e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.6968e-02 L10_l1linf:1.5625e-02 L11_l1linf:1.4587e-02 L12_l1linf:1.3306e-02 L1_spectral:7.1702e-04 L2_spectral:7.1505e-04 L3_spectral:7.1369e-04 L4_spectral:7.1968e-04 L5_spectral:7.1790e-04 L6_spectral:7.2065e-04 L7_spectral:7.2656e-04 L8_spectral:7.2570e-04 L9_spectral:7.2220e-04 L10_spectral:7.1050e-04 L11_spectral:7.0316e-04 L12_spectral:6.9551e-04 train_time:60126ms step_avg:75.16ms +[2025-09-11 13:23:14] [Rank 0] PRINT: step:800/10000 val_loss:6.9039 total_sharp:2.1884e-02 L1_sharp:4.7124e-01 L2_sharp:5.2553e-01 L3_sharp:5.4538e-01 L4_sharp:6.1057e-01 L5_sharp:8.0422e-01 L6_sharp:7.7875e-01 L7_sharp:8.1013e-01 L8_sharp:1.0311e+00 L9_sharp:9.8274e-01 L10_sharp:1.1042e+00 L11_sharp:1.4332e+00 L12_sharp:1.9747e+00 total_fnorm:3.0781e+00 total_l1_linf:3.3760e+03 total_spectral:1.5391e+00 L1_fnorm:4.4434e-02 L2_fnorm:4.6143e-02 L3_fnorm:4.5654e-02 L4_fnorm:4.4922e-02 L5_fnorm:4.2725e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.3457e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0039e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.4424e-02 L12_fnorm:3.0029e-02 L1_l1linf:1.8921e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.8921e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.9043e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.6968e-02 L10_l1linf:1.5625e-02 L11_l1linf:1.4587e-02 L12_l1linf:1.3306e-02 L1_spectral:7.1702e-04 L2_spectral:7.1505e-04 L3_spectral:7.1369e-04 L4_spectral:7.1968e-04 L5_spectral:7.1790e-04 L6_spectral:7.2065e-04 L7_spectral:7.2656e-04 L8_spectral:7.2570e-04 L9_spectral:7.2220e-04 L10_spectral:7.1050e-04 L11_spectral:7.0316e-04 L12_spectral:6.9551e-04 train_time:60126ms step_avg:75.16ms +[2025-09-11 13:23:16] [Rank 0] step:801/10000 train_time:61822ms step_avg:77.18ms +[2025-09-11 13:23:16] [Rank 0] step:801/10000 train_time:61822ms step_avg:77.18ms +[2025-09-11 13:23:16] [Rank 0] step:821/10000 train_time:62468ms step_avg:76.09ms +[2025-09-11 13:23:16] [Rank 0] step:821/10000 train_time:62468ms step_avg:76.09ms +[2025-09-11 13:23:17] [Rank 0] step:841/10000 train_time:63112ms step_avg:75.04ms +[2025-09-11 13:23:17] [Rank 0] step:841/10000 train_time:63112ms step_avg:75.04ms +[2025-09-11 13:23:18] [Rank 0] step:861/10000 train_time:63756ms step_avg:74.05ms +[2025-09-11 13:23:18] [Rank 0] step:861/10000 train_time:63756ms step_avg:74.05ms +[2025-09-11 13:23:18] [Rank 0] step:881/10000 train_time:64399ms step_avg:73.10ms +[2025-09-11 13:23:18] [Rank 0] step:881/10000 train_time:64399ms step_avg:73.10ms +[2025-09-11 13:23:19] [Rank 0] step:901/10000 train_time:65043ms step_avg:72.19ms +[2025-09-11 13:23:19] [Rank 0] step:901/10000 train_time:65043ms step_avg:72.19ms +[2025-09-11 13:23:20] [Rank 0] step:921/10000 train_time:65686ms step_avg:71.32ms +[2025-09-11 13:23:20] [Rank 0] step:921/10000 train_time:65686ms step_avg:71.32ms +[2025-09-11 13:23:20] [Rank 0] step:941/10000 train_time:66329ms step_avg:70.49ms +[2025-09-11 13:23:20] [Rank 0] step:941/10000 train_time:66329ms step_avg:70.49ms +[2025-09-11 13:23:21] [Rank 0] step:961/10000 train_time:66972ms step_avg:69.69ms +[2025-09-11 13:23:21] [Rank 0] step:961/10000 train_time:66972ms step_avg:69.69ms +[2025-09-11 13:23:22] [Rank 0] step:981/10000 train_time:67614ms step_avg:68.92ms +[2025-09-11 13:23:22] [Rank 0] step:981/10000 train_time:67614ms step_avg:68.92ms +[2025-09-11 13:23:22] [Rank 0] step:1001/10000 train_time:68257ms step_avg:68.19ms +[2025-09-11 13:23:22] [Rank 0] step:1001/10000 train_time:68257ms step_avg:68.19ms +[2025-09-11 13:23:23] [Rank 0] step:1021/10000 train_time:68900ms step_avg:67.48ms +[2025-09-11 13:23:23] [Rank 0] step:1021/10000 train_time:68900ms step_avg:67.48ms +[2025-09-11 13:23:24] [Rank 0] step:1041/10000 train_time:69543ms step_avg:66.80ms +[2025-09-11 13:23:24] [Rank 0] step:1041/10000 train_time:69543ms step_avg:66.80ms +[2025-09-11 13:23:24] [Rank 0] step:1061/10000 train_time:70185ms step_avg:66.15ms +[2025-09-11 13:23:24] [Rank 0] step:1061/10000 train_time:70185ms step_avg:66.15ms +[2025-09-11 13:23:25] [Rank 0] step:1081/10000 train_time:70829ms step_avg:65.52ms +[2025-09-11 13:23:25] [Rank 0] step:1081/10000 train_time:70829ms step_avg:65.52ms +[2025-09-11 13:23:25] [Rank 0] step:1101/10000 train_time:71471ms step_avg:64.91ms +[2025-09-11 13:23:25] [Rank 0] step:1101/10000 train_time:71471ms step_avg:64.91ms +[2025-09-11 13:23:26] [Rank 0] step:1121/10000 train_time:72114ms step_avg:64.33ms +[2025-09-11 13:23:26] [Rank 0] step:1121/10000 train_time:72114ms step_avg:64.33ms +[2025-09-11 13:23:27] [Rank 0] step:1141/10000 train_time:72757ms step_avg:63.77ms +[2025-09-11 13:23:27] [Rank 0] step:1141/10000 train_time:72757ms step_avg:63.77ms +[2025-09-11 13:23:27] [Rank 0] step:1161/10000 train_time:73400ms step_avg:63.22ms +[2025-09-11 13:23:27] [Rank 0] step:1161/10000 train_time:73400ms step_avg:63.22ms +[2025-09-11 13:23:28] [Rank 0] step:1181/10000 train_time:74043ms step_avg:62.69ms +[2025-09-11 13:23:28] [Rank 0] step:1181/10000 train_time:74043ms step_avg:62.69ms +[2025-09-11 13:23:29] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:23:29] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:23:39] [Rank 0] PRINT: step:1200/10000 val_loss:6.6730 total_sharp:3.4092e-02 L1_sharp:3.8485e-01 L2_sharp:4.5030e-01 L3_sharp:4.2981e-01 L4_sharp:4.6342e-01 L5_sharp:5.0182e-01 L6_sharp:5.2042e-01 L7_sharp:6.5344e-01 L8_sharp:8.2632e-01 L9_sharp:1.3272e+00 L10_sharp:1.9563e+00 L11_sharp:1.9760e+00 L12_sharp:1.3345e+00 total_fnorm:2.3438e+00 total_l1_linf:2.2240e+03 total_spectral:1.1719e+00 L1_fnorm:4.8584e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.2969e-02 L12_fnorm:3.5889e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7090e-02 L4_l1linf:1.7212e-02 L5_l1linf:1.7212e-02 L6_l1linf:1.7212e-02 L7_l1linf:1.7334e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7212e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.3672e-02 L1_spectral:7.2801e-04 L2_spectral:7.2772e-04 L3_spectral:7.2420e-04 L4_spectral:7.1864e-04 L5_spectral:7.1099e-04 L6_spectral:7.1670e-04 L7_spectral:7.2081e-04 L8_spectral:7.3195e-04 L9_spectral:7.3896e-04 L10_spectral:7.4289e-04 L11_spectral:7.3164e-04 L12_spectral:7.0180e-04 train_time:74668ms step_avg:62.22ms +[2025-09-11 13:23:39] [Rank 0] PRINT: step:1200/10000 val_loss:6.6730 total_sharp:3.4092e-02 L1_sharp:3.8485e-01 L2_sharp:4.5030e-01 L3_sharp:4.2981e-01 L4_sharp:4.6342e-01 L5_sharp:5.0182e-01 L6_sharp:5.2042e-01 L7_sharp:6.5344e-01 L8_sharp:8.2632e-01 L9_sharp:1.3272e+00 L10_sharp:1.9563e+00 L11_sharp:1.9760e+00 L12_sharp:1.3345e+00 total_fnorm:2.3438e+00 total_l1_linf:2.2240e+03 total_spectral:1.1719e+00 L1_fnorm:4.8584e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.2969e-02 L12_fnorm:3.5889e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7090e-02 L4_l1linf:1.7212e-02 L5_l1linf:1.7212e-02 L6_l1linf:1.7212e-02 L7_l1linf:1.7334e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7212e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.3672e-02 L1_spectral:7.2801e-04 L2_spectral:7.2772e-04 L3_spectral:7.2420e-04 L4_spectral:7.1864e-04 L5_spectral:7.1099e-04 L6_spectral:7.1670e-04 L7_spectral:7.2081e-04 L8_spectral:7.3195e-04 L9_spectral:7.3896e-04 L10_spectral:7.4289e-04 L11_spectral:7.3164e-04 L12_spectral:7.0180e-04 train_time:74668ms step_avg:62.22ms +[2025-09-11 13:23:41] [Rank 0] step:1201/10000 train_time:76469ms step_avg:63.67ms +[2025-09-11 13:23:41] [Rank 0] step:1201/10000 train_time:76469ms step_avg:63.67ms +[2025-09-11 13:23:42] [Rank 0] step:1221/10000 train_time:77143ms step_avg:63.18ms +[2025-09-11 13:23:42] [Rank 0] step:1221/10000 train_time:77143ms step_avg:63.18ms +[2025-09-11 13:23:42] [Rank 0] step:1241/10000 train_time:77787ms step_avg:62.68ms +[2025-09-11 13:23:42] [Rank 0] step:1241/10000 train_time:77787ms step_avg:62.68ms +[2025-09-11 13:23:43] [Rank 0] step:1261/10000 train_time:78431ms step_avg:62.20ms +[2025-09-11 13:23:43] [Rank 0] step:1261/10000 train_time:78431ms step_avg:62.20ms +[2025-09-11 13:23:43] [Rank 0] step:1281/10000 train_time:79075ms step_avg:61.73ms +[2025-09-11 13:23:43] [Rank 0] step:1281/10000 train_time:79075ms step_avg:61.73ms +[2025-09-11 13:23:44] [Rank 0] step:1301/10000 train_time:79718ms step_avg:61.27ms +[2025-09-11 13:23:44] [Rank 0] step:1301/10000 train_time:79718ms step_avg:61.27ms +[2025-09-11 13:23:45] [Rank 0] step:1321/10000 train_time:80362ms step_avg:60.83ms +[2025-09-11 13:23:45] [Rank 0] step:1321/10000 train_time:80362ms step_avg:60.83ms +[2025-09-11 13:23:45] [Rank 0] step:1341/10000 train_time:81005ms step_avg:60.41ms +[2025-09-11 13:23:45] [Rank 0] step:1341/10000 train_time:81005ms step_avg:60.41ms +[2025-09-11 13:23:46] [Rank 0] step:1361/10000 train_time:81649ms step_avg:59.99ms +[2025-09-11 13:23:46] [Rank 0] step:1361/10000 train_time:81649ms step_avg:59.99ms +[2025-09-11 13:23:47] [Rank 0] step:1381/10000 train_time:82292ms step_avg:59.59ms +[2025-09-11 13:23:47] [Rank 0] step:1381/10000 train_time:82292ms step_avg:59.59ms +[2025-09-11 13:23:47] [Rank 0] step:1401/10000 train_time:82935ms step_avg:59.20ms +[2025-09-11 13:23:47] [Rank 0] step:1401/10000 train_time:82935ms step_avg:59.20ms +[2025-09-11 13:23:48] [Rank 0] step:1421/10000 train_time:83577ms step_avg:58.82ms +[2025-09-11 13:23:48] [Rank 0] step:1421/10000 train_time:83577ms step_avg:58.82ms +[2025-09-11 13:23:49] [Rank 0] step:1441/10000 train_time:84524ms step_avg:58.66ms +[2025-09-11 13:23:49] [Rank 0] step:1441/10000 train_time:84524ms step_avg:58.66ms +[2025-09-11 13:23:50] [Rank 0] step:1461/10000 train_time:85167ms step_avg:58.29ms +[2025-09-11 13:23:50] [Rank 0] step:1461/10000 train_time:85167ms step_avg:58.29ms +[2025-09-11 13:23:50] [Rank 0] step:1481/10000 train_time:85809ms step_avg:57.94ms +[2025-09-11 13:23:50] [Rank 0] step:1481/10000 train_time:85809ms step_avg:57.94ms +[2025-09-11 13:23:51] [Rank 0] step:1501/10000 train_time:86760ms step_avg:57.80ms +[2025-09-11 13:23:51] [Rank 0] step:1501/10000 train_time:86760ms step_avg:57.80ms +[2025-09-11 13:23:52] [Rank 0] step:1521/10000 train_time:87407ms step_avg:57.47ms +[2025-09-11 13:23:52] [Rank 0] step:1521/10000 train_time:87407ms step_avg:57.47ms +[2025-09-11 13:23:52] [Rank 0] step:1541/10000 train_time:88055ms step_avg:57.14ms +[2025-09-11 13:23:52] [Rank 0] step:1541/10000 train_time:88055ms step_avg:57.14ms +[2025-09-11 13:23:53] [Rank 0] step:1561/10000 train_time:88701ms step_avg:56.82ms +[2025-09-11 13:23:53] [Rank 0] step:1561/10000 train_time:88701ms step_avg:56.82ms +[2025-09-11 13:23:54] [Rank 0] step:1581/10000 train_time:89349ms step_avg:56.51ms +[2025-09-11 13:23:54] [Rank 0] step:1581/10000 train_time:89349ms step_avg:56.51ms +[2025-09-11 13:23:54] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:23:54] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:04] [Rank 0] PRINT: step:1600/10000 val_loss:6.5067 total_sharp:7.3601e-02 L1_sharp:4.8819e-01 L2_sharp:5.4347e-01 L3_sharp:5.3211e-01 L4_sharp:5.9502e-01 L5_sharp:7.6973e-01 L6_sharp:8.4807e-01 L7_sharp:1.2175e+00 L8_sharp:1.7786e+00 L9_sharp:2.2477e+00 L10_sharp:2.2804e+00 L11_sharp:2.2888e+00 L12_sharp:3.2060e+00 total_fnorm:2.0938e+00 total_l1_linf:1.8320e+03 total_spectral:1.0469e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.4922e-02 L12_fnorm:3.6865e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6846e-02 L9_l1linf:1.7090e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2939e-02 L1_spectral:7.4799e-04 L2_spectral:7.4618e-04 L3_spectral:7.4149e-04 L4_spectral:7.4045e-04 L5_spectral:7.3108e-04 L6_spectral:7.3620e-04 L7_spectral:7.3965e-04 L8_spectral:7.2449e-04 L9_spectral:7.4013e-04 L10_spectral:7.4491e-04 L11_spectral:7.2782e-04 L12_spectral:6.9220e-04 train_time:89977ms step_avg:56.24ms +[2025-09-11 13:24:04] [Rank 0] PRINT: step:1600/10000 val_loss:6.5067 total_sharp:7.3601e-02 L1_sharp:4.8819e-01 L2_sharp:5.4347e-01 L3_sharp:5.3211e-01 L4_sharp:5.9502e-01 L5_sharp:7.6973e-01 L6_sharp:8.4807e-01 L7_sharp:1.2175e+00 L8_sharp:1.7786e+00 L9_sharp:2.2477e+00 L10_sharp:2.2804e+00 L11_sharp:2.2888e+00 L12_sharp:3.2060e+00 total_fnorm:2.0938e+00 total_l1_linf:1.8320e+03 total_spectral:1.0469e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.4922e-02 L12_fnorm:3.6865e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6846e-02 L9_l1linf:1.7090e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2939e-02 L1_spectral:7.4799e-04 L2_spectral:7.4618e-04 L3_spectral:7.4149e-04 L4_spectral:7.4045e-04 L5_spectral:7.3108e-04 L6_spectral:7.3620e-04 L7_spectral:7.3965e-04 L8_spectral:7.2449e-04 L9_spectral:7.4013e-04 L10_spectral:7.4491e-04 L11_spectral:7.2782e-04 L12_spectral:6.9220e-04 train_time:89977ms step_avg:56.24ms +[2025-09-11 13:24:06] [Rank 0] step:1601/10000 train_time:91764ms step_avg:57.32ms +[2025-09-11 13:24:06] [Rank 0] step:1601/10000 train_time:91764ms step_avg:57.32ms +[2025-09-11 13:24:07] [Rank 0] step:1621/10000 train_time:92415ms step_avg:57.01ms +[2025-09-11 13:24:07] [Rank 0] step:1621/10000 train_time:92415ms step_avg:57.01ms +[2025-09-11 13:24:08] [Rank 0] step:1641/10000 train_time:93065ms step_avg:56.71ms +[2025-09-11 13:24:08] [Rank 0] step:1641/10000 train_time:93065ms step_avg:56.71ms +[2025-09-11 13:24:08] [Rank 0] step:1661/10000 train_time:93713ms step_avg:56.42ms +[2025-09-11 13:24:08] [Rank 0] step:1661/10000 train_time:93713ms step_avg:56.42ms +[2025-09-11 13:24:09] [Rank 0] step:1681/10000 train_time:94362ms step_avg:56.13ms +[2025-09-11 13:24:09] [Rank 0] step:1681/10000 train_time:94362ms step_avg:56.13ms +[2025-09-11 13:24:09] [Rank 0] step:1701/10000 train_time:95010ms step_avg:55.86ms +[2025-09-11 13:24:09] [Rank 0] step:1701/10000 train_time:95010ms step_avg:55.86ms +[2025-09-11 13:24:10] [Rank 0] step:1721/10000 train_time:95659ms step_avg:55.58ms +[2025-09-11 13:24:10] [Rank 0] step:1721/10000 train_time:95659ms step_avg:55.58ms +[2025-09-11 13:24:11] [Rank 0] step:1741/10000 train_time:96308ms step_avg:55.32ms +[2025-09-11 13:24:11] [Rank 0] step:1741/10000 train_time:96308ms step_avg:55.32ms +[2025-09-11 13:24:11] [Rank 0] step:1761/10000 train_time:96955ms step_avg:55.06ms +[2025-09-11 13:24:11] [Rank 0] step:1761/10000 train_time:96955ms step_avg:55.06ms +[2025-09-11 13:24:12] [Rank 0] step:1781/10000 train_time:97604ms step_avg:54.80ms +[2025-09-11 13:24:12] [Rank 0] step:1781/10000 train_time:97604ms step_avg:54.80ms +[2025-09-11 13:24:13] [Rank 0] step:1801/10000 train_time:98252ms step_avg:54.55ms +[2025-09-11 13:24:13] [Rank 0] step:1801/10000 train_time:98252ms step_avg:54.55ms +[2025-09-11 13:24:13] [Rank 0] step:1821/10000 train_time:98900ms step_avg:54.31ms +[2025-09-11 13:24:13] [Rank 0] step:1821/10000 train_time:98900ms step_avg:54.31ms +[2025-09-11 13:24:14] [Rank 0] step:1841/10000 train_time:99549ms step_avg:54.07ms +[2025-09-11 13:24:14] [Rank 0] step:1841/10000 train_time:99549ms step_avg:54.07ms +[2025-09-11 13:24:15] [Rank 0] step:1861/10000 train_time:100198ms step_avg:53.84ms +[2025-09-11 13:24:15] [Rank 0] step:1861/10000 train_time:100198ms step_avg:53.84ms +[2025-09-11 13:24:15] [Rank 0] step:1881/10000 train_time:100846ms step_avg:53.61ms +[2025-09-11 13:24:15] [Rank 0] step:1881/10000 train_time:100846ms step_avg:53.61ms +[2025-09-11 13:24:16] [Rank 0] step:1901/10000 train_time:101495ms step_avg:53.39ms +[2025-09-11 13:24:16] [Rank 0] step:1901/10000 train_time:101495ms step_avg:53.39ms +[2025-09-11 13:24:17] [Rank 0] step:1921/10000 train_time:102144ms step_avg:53.17ms +[2025-09-11 13:24:17] [Rank 0] step:1921/10000 train_time:102144ms step_avg:53.17ms +[2025-09-11 13:24:17] [Rank 0] step:1941/10000 train_time:102792ms step_avg:52.96ms +[2025-09-11 13:24:17] [Rank 0] step:1941/10000 train_time:102792ms step_avg:52.96ms +[2025-09-11 13:24:18] [Rank 0] step:1961/10000 train_time:103441ms step_avg:52.75ms +[2025-09-11 13:24:18] [Rank 0] step:1961/10000 train_time:103441ms step_avg:52.75ms +[2025-09-11 13:24:19] [Rank 0] step:1981/10000 train_time:104088ms step_avg:52.54ms +[2025-09-11 13:24:19] [Rank 0] step:1981/10000 train_time:104088ms step_avg:52.54ms +[2025-09-11 13:24:19] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:24:19] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:24:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:30] [Rank 0] PRINT: step:2000/10000 val_loss:6.3819 total_sharp:1.0403e-01 L1_sharp:3.7479e-01 L2_sharp:3.8872e-01 L3_sharp:4.5107e-01 L4_sharp:5.2642e-01 L5_sharp:7.1159e-01 L6_sharp:9.1913e-01 L7_sharp:1.4695e+00 L8_sharp:2.4473e+00 L9_sharp:3.2997e+00 L10_sharp:4.7131e+00 L11_sharp:4.0759e+00 L12_sharp:4.1262e+00 total_fnorm:1.9766e+00 total_l1_linf:1.7040e+03 total_spectral:9.9219e-01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.6875e-02 L12_fnorm:3.9795e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4954e-02 L3_l1linf:1.5015e-02 L4_l1linf:1.4954e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5198e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5564e-02 L9_l1linf:1.5991e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.5503e-02 L12_l1linf:1.2878e-02 L1_spectral:7.6172e-04 L2_spectral:7.5523e-04 L3_spectral:7.5338e-04 L4_spectral:7.5270e-04 L5_spectral:7.4903e-04 L6_spectral:7.5026e-04 L7_spectral:7.5727e-04 L8_spectral:7.3632e-04 L9_spectral:7.5242e-04 L10_spectral:7.4931e-04 L11_spectral:7.4174e-04 L12_spectral:6.9937e-04 train_time:104719ms step_avg:52.36ms +[2025-09-11 13:24:30] [Rank 0] PRINT: step:2000/10000 val_loss:6.3819 total_sharp:1.0403e-01 L1_sharp:3.7479e-01 L2_sharp:3.8872e-01 L3_sharp:4.5107e-01 L4_sharp:5.2642e-01 L5_sharp:7.1159e-01 L6_sharp:9.1913e-01 L7_sharp:1.4695e+00 L8_sharp:2.4473e+00 L9_sharp:3.2997e+00 L10_sharp:4.7131e+00 L11_sharp:4.0759e+00 L12_sharp:4.1262e+00 total_fnorm:1.9766e+00 total_l1_linf:1.7040e+03 total_spectral:9.9219e-01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.6875e-02 L12_fnorm:3.9795e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4954e-02 L3_l1linf:1.5015e-02 L4_l1linf:1.4954e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5198e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5564e-02 L9_l1linf:1.5991e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.5503e-02 L12_l1linf:1.2878e-02 L1_spectral:7.6172e-04 L2_spectral:7.5523e-04 L3_spectral:7.5338e-04 L4_spectral:7.5270e-04 L5_spectral:7.4903e-04 L6_spectral:7.5026e-04 L7_spectral:7.5727e-04 L8_spectral:7.3632e-04 L9_spectral:7.5242e-04 L10_spectral:7.4931e-04 L11_spectral:7.4174e-04 L12_spectral:6.9937e-04 train_time:104719ms step_avg:52.36ms +[2025-09-11 13:24:32] [Rank 0] step:2001/10000 train_time:106691ms step_avg:53.32ms +[2025-09-11 13:24:32] [Rank 0] step:2001/10000 train_time:106691ms step_avg:53.32ms +[2025-09-11 13:24:33] [Rank 0] step:2021/10000 train_time:107364ms step_avg:53.12ms +[2025-09-11 13:24:33] [Rank 0] step:2021/10000 train_time:107364ms step_avg:53.12ms +[2025-09-11 13:24:33] [Rank 0] step:2041/10000 train_time:108024ms step_avg:52.93ms +[2025-09-11 13:24:33] [Rank 0] step:2041/10000 train_time:108024ms step_avg:52.93ms +[2025-09-11 13:24:34] [Rank 0] step:2061/10000 train_time:108673ms step_avg:52.73ms +[2025-09-11 13:24:34] [Rank 0] step:2061/10000 train_time:108673ms step_avg:52.73ms +[2025-09-11 13:24:35] [Rank 0] step:2081/10000 train_time:109322ms step_avg:52.53ms +[2025-09-11 13:24:35] [Rank 0] step:2081/10000 train_time:109322ms step_avg:52.53ms +[2025-09-11 13:24:35] [Rank 0] step:2101/10000 train_time:109970ms step_avg:52.34ms +[2025-09-11 13:24:35] [Rank 0] step:2101/10000 train_time:109970ms step_avg:52.34ms +[2025-09-11 13:24:36] [Rank 0] step:2121/10000 train_time:110619ms step_avg:52.15ms +[2025-09-11 13:24:36] [Rank 0] step:2121/10000 train_time:110619ms step_avg:52.15ms +[2025-09-11 13:24:37] [Rank 0] step:2141/10000 train_time:111267ms step_avg:51.97ms +[2025-09-11 13:24:37] [Rank 0] step:2141/10000 train_time:111267ms step_avg:51.97ms +[2025-09-11 13:24:37] [Rank 0] step:2161/10000 train_time:111915ms step_avg:51.79ms +[2025-09-11 13:24:37] [Rank 0] step:2161/10000 train_time:111915ms step_avg:51.79ms +[2025-09-11 13:24:38] [Rank 0] step:2181/10000 train_time:112563ms step_avg:51.61ms +[2025-09-11 13:24:38] [Rank 0] step:2181/10000 train_time:112563ms step_avg:51.61ms +[2025-09-11 13:24:38] [Rank 0] step:2201/10000 train_time:113211ms step_avg:51.44ms +[2025-09-11 13:24:38] [Rank 0] step:2201/10000 train_time:113211ms step_avg:51.44ms +[2025-09-11 13:24:39] [Rank 0] step:2221/10000 train_time:113858ms step_avg:51.26ms +[2025-09-11 13:24:39] [Rank 0] step:2221/10000 train_time:113858ms step_avg:51.26ms +[2025-09-11 13:24:40] [Rank 0] step:2241/10000 train_time:114519ms step_avg:51.10ms +[2025-09-11 13:24:40] [Rank 0] step:2241/10000 train_time:114519ms step_avg:51.10ms +[2025-09-11 13:24:40] [Rank 0] step:2261/10000 train_time:115181ms step_avg:50.94ms +[2025-09-11 13:24:40] [Rank 0] step:2261/10000 train_time:115181ms step_avg:50.94ms +[2025-09-11 13:24:41] [Rank 0] step:2281/10000 train_time:115842ms step_avg:50.79ms +[2025-09-11 13:24:41] [Rank 0] step:2281/10000 train_time:115842ms step_avg:50.79ms +[2025-09-11 13:24:42] [Rank 0] step:2301/10000 train_time:116504ms step_avg:50.63ms +[2025-09-11 13:24:42] [Rank 0] step:2301/10000 train_time:116504ms step_avg:50.63ms +[2025-09-11 13:24:42] [Rank 0] step:2321/10000 train_time:117166ms step_avg:50.48ms +[2025-09-11 13:24:42] [Rank 0] step:2321/10000 train_time:117166ms step_avg:50.48ms +[2025-09-11 13:24:43] [Rank 0] step:2341/10000 train_time:117827ms step_avg:50.33ms +[2025-09-11 13:24:43] [Rank 0] step:2341/10000 train_time:117827ms step_avg:50.33ms +[2025-09-11 13:24:44] [Rank 0] step:2361/10000 train_time:118489ms step_avg:50.19ms +[2025-09-11 13:24:44] [Rank 0] step:2361/10000 train_time:118489ms step_avg:50.19ms +[2025-09-11 13:24:44] [Rank 0] step:2381/10000 train_time:119151ms step_avg:50.04ms +[2025-09-11 13:24:44] [Rank 0] step:2381/10000 train_time:119151ms step_avg:50.04ms +[2025-09-11 13:24:45] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:24:45] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:24:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:24:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:24:55] [Rank 0] PRINT: step:2400/10000 val_loss:6.2743 total_sharp:1.1329e-01 L1_sharp:3.2104e-01 L2_sharp:3.8718e-01 L3_sharp:4.9445e-01 L4_sharp:6.4768e-01 L5_sharp:8.4108e-01 L6_sharp:1.1325e+00 L7_sharp:1.5096e+00 L8_sharp:2.1765e+00 L9_sharp:2.7835e+00 L10_sharp:2.7480e+00 L11_sharp:2.8404e+00 L12_sharp:4.4513e+00 total_fnorm:1.8281e+00 total_l1_linf:1.5120e+03 total_spectral:9.1797e-01 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1260e-02 L1_l1linf:1.4221e-02 L2_l1linf:1.4465e-02 L3_l1linf:1.4526e-02 L4_l1linf:1.4404e-02 L5_l1linf:1.4587e-02 L6_l1linf:1.4771e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.5137e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.2817e-02 L1_spectral:7.6572e-04 L2_spectral:7.6143e-04 L3_spectral:7.6003e-04 L4_spectral:7.6211e-04 L5_spectral:7.5597e-04 L6_spectral:7.5845e-04 L7_spectral:7.5778e-04 L8_spectral:7.4541e-04 L9_spectral:7.5983e-04 L10_spectral:7.5477e-04 L11_spectral:7.6031e-04 L12_spectral:6.9607e-04 train_time:119794ms step_avg:49.91ms +[2025-09-11 13:24:55] [Rank 0] PRINT: step:2400/10000 val_loss:6.2743 total_sharp:1.1329e-01 L1_sharp:3.2104e-01 L2_sharp:3.8718e-01 L3_sharp:4.9445e-01 L4_sharp:6.4768e-01 L5_sharp:8.4108e-01 L6_sharp:1.1325e+00 L7_sharp:1.5096e+00 L8_sharp:2.1765e+00 L9_sharp:2.7835e+00 L10_sharp:2.7480e+00 L11_sharp:2.8404e+00 L12_sharp:4.4513e+00 total_fnorm:1.8281e+00 total_l1_linf:1.5120e+03 total_spectral:9.1797e-01 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1260e-02 L1_l1linf:1.4221e-02 L2_l1linf:1.4465e-02 L3_l1linf:1.4526e-02 L4_l1linf:1.4404e-02 L5_l1linf:1.4587e-02 L6_l1linf:1.4771e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.5137e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.2817e-02 L1_spectral:7.6572e-04 L2_spectral:7.6143e-04 L3_spectral:7.6003e-04 L4_spectral:7.6211e-04 L5_spectral:7.5597e-04 L6_spectral:7.5845e-04 L7_spectral:7.5778e-04 L8_spectral:7.4541e-04 L9_spectral:7.5983e-04 L10_spectral:7.5477e-04 L11_spectral:7.6031e-04 L12_spectral:6.9607e-04 train_time:119794ms step_avg:49.91ms +[2025-09-11 13:24:57] [Rank 0] step:2401/10000 train_time:121620ms step_avg:50.65ms +[2025-09-11 13:24:57] [Rank 0] step:2401/10000 train_time:121620ms step_avg:50.65ms +[2025-09-11 13:24:58] [Rank 0] step:2421/10000 train_time:122285ms step_avg:50.51ms +[2025-09-11 13:24:58] [Rank 0] step:2421/10000 train_time:122285ms step_avg:50.51ms +[2025-09-11 13:24:58] [Rank 0] step:2441/10000 train_time:122948ms step_avg:50.37ms +[2025-09-11 13:24:58] [Rank 0] step:2441/10000 train_time:122948ms step_avg:50.37ms +[2025-09-11 13:24:59] [Rank 0] step:2461/10000 train_time:123610ms step_avg:50.23ms +[2025-09-11 13:24:59] [Rank 0] step:2461/10000 train_time:123610ms step_avg:50.23ms +[2025-09-11 13:25:00] [Rank 0] step:2481/10000 train_time:124273ms step_avg:50.09ms +[2025-09-11 13:25:00] [Rank 0] step:2481/10000 train_time:124273ms step_avg:50.09ms +[2025-09-11 13:25:00] [Rank 0] step:2501/10000 train_time:124936ms step_avg:49.95ms +[2025-09-11 13:25:00] [Rank 0] step:2501/10000 train_time:124936ms step_avg:49.95ms +[2025-09-11 13:25:01] [Rank 0] step:2521/10000 train_time:125664ms step_avg:49.85ms +[2025-09-11 13:25:01] [Rank 0] step:2521/10000 train_time:125664ms step_avg:49.85ms +[2025-09-11 13:25:02] [Rank 0] step:2541/10000 train_time:126381ms step_avg:49.74ms +[2025-09-11 13:25:02] [Rank 0] step:2541/10000 train_time:126381ms step_avg:49.74ms +[2025-09-11 13:25:02] [Rank 0] step:2561/10000 train_time:127075ms step_avg:49.62ms +[2025-09-11 13:25:02] [Rank 0] step:2561/10000 train_time:127075ms step_avg:49.62ms +[2025-09-11 13:25:03] [Rank 0] step:2581/10000 train_time:127737ms step_avg:49.49ms +[2025-09-11 13:25:03] [Rank 0] step:2581/10000 train_time:127737ms step_avg:49.49ms +[2025-09-11 13:25:04] [Rank 0] step:2601/10000 train_time:128399ms step_avg:49.37ms +[2025-09-11 13:25:04] [Rank 0] step:2601/10000 train_time:128399ms step_avg:49.37ms +[2025-09-11 13:25:04] [Rank 0] step:2621/10000 train_time:129062ms step_avg:49.24ms +[2025-09-11 13:25:04] [Rank 0] step:2621/10000 train_time:129062ms step_avg:49.24ms +[2025-09-11 13:25:05] [Rank 0] step:2641/10000 train_time:129724ms step_avg:49.12ms +[2025-09-11 13:25:05] [Rank 0] step:2641/10000 train_time:129724ms step_avg:49.12ms +[2025-09-11 13:25:06] [Rank 0] step:2661/10000 train_time:130386ms step_avg:49.00ms +[2025-09-11 13:25:06] [Rank 0] step:2661/10000 train_time:130386ms step_avg:49.00ms +[2025-09-11 13:25:06] [Rank 0] step:2681/10000 train_time:131048ms step_avg:48.88ms +[2025-09-11 13:25:06] [Rank 0] step:2681/10000 train_time:131048ms step_avg:48.88ms +[2025-09-11 13:25:07] [Rank 0] step:2701/10000 train_time:131711ms step_avg:48.76ms +[2025-09-11 13:25:07] [Rank 0] step:2701/10000 train_time:131711ms step_avg:48.76ms +[2025-09-11 13:25:08] [Rank 0] step:2721/10000 train_time:132372ms step_avg:48.65ms +[2025-09-11 13:25:08] [Rank 0] step:2721/10000 train_time:132372ms step_avg:48.65ms +[2025-09-11 13:25:08] [Rank 0] step:2741/10000 train_time:133036ms step_avg:48.54ms +[2025-09-11 13:25:08] [Rank 0] step:2741/10000 train_time:133036ms step_avg:48.54ms +[2025-09-11 13:25:09] [Rank 0] step:2761/10000 train_time:133698ms step_avg:48.42ms +[2025-09-11 13:25:09] [Rank 0] step:2761/10000 train_time:133698ms step_avg:48.42ms +[2025-09-11 13:25:10] [Rank 0] step:2781/10000 train_time:134359ms step_avg:48.31ms +[2025-09-11 13:25:10] [Rank 0] step:2781/10000 train_time:134359ms step_avg:48.31ms +[2025-09-11 13:25:10] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:25:10] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:25:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:25:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:25:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:25:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:25:21] [Rank 0] PRINT: step:2800/10000 val_loss:6.1898 total_sharp:1.0073e-01 L1_sharp:2.5302e-01 L2_sharp:2.9362e-01 L3_sharp:3.3009e-01 L4_sharp:4.3496e-01 L5_sharp:6.1637e-01 L6_sharp:8.8720e-01 L7_sharp:1.1690e+00 L8_sharp:1.6059e+00 L9_sharp:2.5232e+00 L10_sharp:3.4483e+00 L11_sharp:3.9447e+00 L12_sharp:4.2117e+00 total_fnorm:1.7344e+00 total_l1_linf:1.3680e+03 total_spectral:8.7109e-01 L1_fnorm:4.7607e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.1016e-02 L1_l1linf:1.3306e-02 L2_l1linf:1.3367e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.4038e-02 L6_l1linf:1.4343e-02 L7_l1linf:1.4404e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4954e-02 L10_l1linf:1.5442e-02 L11_l1linf:1.5503e-02 L12_l1linf:1.2451e-02 L1_spectral:7.7378e-04 L2_spectral:7.6632e-04 L3_spectral:7.6922e-04 L4_spectral:7.6792e-04 L5_spectral:7.6174e-04 L6_spectral:7.6592e-04 L7_spectral:7.6509e-04 L8_spectral:7.5886e-04 L9_spectral:7.6871e-04 L10_spectral:7.6739e-04 L11_spectral:7.6634e-04 L12_spectral:6.9162e-04 train_time:135003ms step_avg:48.22ms +[2025-09-11 13:25:21] [Rank 0] PRINT: step:2800/10000 val_loss:6.1898 total_sharp:1.0073e-01 L1_sharp:2.5302e-01 L2_sharp:2.9362e-01 L3_sharp:3.3009e-01 L4_sharp:4.3496e-01 L5_sharp:6.1637e-01 L6_sharp:8.8720e-01 L7_sharp:1.1690e+00 L8_sharp:1.6059e+00 L9_sharp:2.5232e+00 L10_sharp:3.4483e+00 L11_sharp:3.9447e+00 L12_sharp:4.2117e+00 total_fnorm:1.7344e+00 total_l1_linf:1.3680e+03 total_spectral:8.7109e-01 L1_fnorm:4.7607e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.1016e-02 L1_l1linf:1.3306e-02 L2_l1linf:1.3367e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.4038e-02 L6_l1linf:1.4343e-02 L7_l1linf:1.4404e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4954e-02 L10_l1linf:1.5442e-02 L11_l1linf:1.5503e-02 L12_l1linf:1.2451e-02 L1_spectral:7.7378e-04 L2_spectral:7.6632e-04 L3_spectral:7.6922e-04 L4_spectral:7.6792e-04 L5_spectral:7.6174e-04 L6_spectral:7.6592e-04 L7_spectral:7.6509e-04 L8_spectral:7.5886e-04 L9_spectral:7.6871e-04 L10_spectral:7.6739e-04 L11_spectral:7.6634e-04 L12_spectral:6.9162e-04 train_time:135003ms step_avg:48.22ms +[2025-09-11 13:25:23] [Rank 0] step:2801/10000 train_time:136817ms step_avg:48.85ms +[2025-09-11 13:25:23] [Rank 0] step:2801/10000 train_time:136817ms step_avg:48.85ms +[2025-09-11 13:25:24] [Rank 0] step:2821/10000 train_time:137482ms step_avg:48.74ms +[2025-09-11 13:25:24] [Rank 0] step:2821/10000 train_time:137482ms step_avg:48.74ms +[2025-09-11 13:25:24] [Rank 0] step:2841/10000 train_time:138144ms step_avg:48.63ms +[2025-09-11 13:25:24] [Rank 0] step:2841/10000 train_time:138144ms step_avg:48.63ms +[2025-09-11 13:25:25] [Rank 0] step:2861/10000 train_time:138806ms step_avg:48.52ms +[2025-09-11 13:25:25] [Rank 0] step:2861/10000 train_time:138806ms step_avg:48.52ms +[2025-09-11 13:25:26] [Rank 0] step:2881/10000 train_time:139468ms step_avg:48.41ms +[2025-09-11 13:25:26] [Rank 0] step:2881/10000 train_time:139468ms step_avg:48.41ms +[2025-09-11 13:25:26] [Rank 0] step:2901/10000 train_time:140129ms step_avg:48.30ms +[2025-09-11 13:25:26] [Rank 0] step:2901/10000 train_time:140129ms step_avg:48.30ms +[2025-09-11 13:25:27] [Rank 0] step:2921/10000 train_time:140790ms step_avg:48.20ms +[2025-09-11 13:25:27] [Rank 0] step:2921/10000 train_time:140790ms step_avg:48.20ms +[2025-09-11 13:25:28] [Rank 0] step:2941/10000 train_time:141452ms step_avg:48.10ms +[2025-09-11 13:25:28] [Rank 0] step:2941/10000 train_time:141452ms step_avg:48.10ms +[2025-09-11 13:25:28] [Rank 0] step:2961/10000 train_time:142114ms step_avg:48.00ms +[2025-09-11 13:25:28] [Rank 0] step:2961/10000 train_time:142114ms step_avg:48.00ms +[2025-09-11 13:25:29] [Rank 0] step:2981/10000 train_time:142777ms step_avg:47.90ms +[2025-09-11 13:25:29] [Rank 0] step:2981/10000 train_time:142777ms step_avg:47.90ms +[2025-09-11 13:25:30] [Rank 0] step:3001/10000 train_time:143442ms step_avg:47.80ms +[2025-09-11 13:25:30] [Rank 0] step:3001/10000 train_time:143442ms step_avg:47.80ms +[2025-09-11 13:25:30] [Rank 0] step:3021/10000 train_time:144105ms step_avg:47.70ms +[2025-09-11 13:25:30] [Rank 0] step:3021/10000 train_time:144105ms step_avg:47.70ms +[2025-09-11 13:25:31] [Rank 0] step:3041/10000 train_time:144770ms step_avg:47.61ms +[2025-09-11 13:25:31] [Rank 0] step:3041/10000 train_time:144770ms step_avg:47.61ms +[2025-09-11 13:25:32] [Rank 0] step:3061/10000 train_time:145434ms step_avg:47.51ms +[2025-09-11 13:25:32] [Rank 0] step:3061/10000 train_time:145434ms step_avg:47.51ms +[2025-09-11 13:25:32] [Rank 0] step:3081/10000 train_time:146097ms step_avg:47.42ms +[2025-09-11 13:25:32] [Rank 0] step:3081/10000 train_time:146097ms step_avg:47.42ms +[2025-09-11 13:25:33] [Rank 0] step:3101/10000 train_time:146761ms step_avg:47.33ms +[2025-09-11 13:25:33] [Rank 0] step:3101/10000 train_time:146761ms step_avg:47.33ms +[2025-09-11 13:25:33] [Rank 0] step:3121/10000 train_time:147426ms step_avg:47.24ms +[2025-09-11 13:25:33] [Rank 0] step:3121/10000 train_time:147426ms step_avg:47.24ms +[2025-09-11 13:25:34] [Rank 0] step:3141/10000 train_time:148090ms step_avg:47.15ms +[2025-09-11 13:25:34] [Rank 0] step:3141/10000 train_time:148090ms step_avg:47.15ms +[2025-09-11 13:25:35] [Rank 0] step:3161/10000 train_time:148754ms step_avg:47.06ms +[2025-09-11 13:25:35] [Rank 0] step:3161/10000 train_time:148754ms step_avg:47.06ms +[2025-09-11 13:25:35] [Rank 0] step:3181/10000 train_time:149418ms step_avg:46.97ms +[2025-09-11 13:25:35] [Rank 0] step:3181/10000 train_time:149418ms step_avg:46.97ms +[2025-09-11 13:25:36] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:25:36] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:25:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:25:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:25:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:25:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:25:47] [Rank 0] PRINT: step:3200/10000 val_loss:6.1114 total_sharp:8.0159e-02 L1_sharp:1.8845e-01 L2_sharp:2.3991e-01 L3_sharp:2.6838e-01 L4_sharp:3.4554e-01 L5_sharp:4.3583e-01 L6_sharp:6.3350e-01 L7_sharp:9.8192e-01 L8_sharp:1.6001e+00 L9_sharp:2.4379e+00 L10_sharp:3.1531e+00 L11_sharp:2.6210e+00 L12_sharp:2.8278e+00 total_fnorm:1.8125e+00 total_l1_linf:1.4720e+03 total_spectral:9.1016e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.3213e-02 L1_l1linf:1.2573e-02 L2_l1linf:1.2756e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3062e-02 L5_l1linf:1.3000e-02 L6_l1linf:1.3367e-02 L7_l1linf:1.3611e-02 L8_l1linf:1.3794e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4709e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2878e-02 L1_spectral:7.7620e-04 L2_spectral:7.8017e-04 L3_spectral:7.7547e-04 L4_spectral:7.7172e-04 L5_spectral:7.6930e-04 L6_spectral:7.7370e-04 L7_spectral:7.8112e-04 L8_spectral:7.7035e-04 L9_spectral:7.7815e-04 L10_spectral:7.7869e-04 L11_spectral:7.7961e-04 L12_spectral:7.1630e-04 train_time:150063ms step_avg:46.89ms +[2025-09-11 13:25:47] [Rank 0] PRINT: step:3200/10000 val_loss:6.1114 total_sharp:8.0159e-02 L1_sharp:1.8845e-01 L2_sharp:2.3991e-01 L3_sharp:2.6838e-01 L4_sharp:3.4554e-01 L5_sharp:4.3583e-01 L6_sharp:6.3350e-01 L7_sharp:9.8192e-01 L8_sharp:1.6001e+00 L9_sharp:2.4379e+00 L10_sharp:3.1531e+00 L11_sharp:2.6210e+00 L12_sharp:2.8278e+00 total_fnorm:1.8125e+00 total_l1_linf:1.4720e+03 total_spectral:9.1016e-01 L1_fnorm:4.7363e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.3213e-02 L1_l1linf:1.2573e-02 L2_l1linf:1.2756e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3062e-02 L5_l1linf:1.3000e-02 L6_l1linf:1.3367e-02 L7_l1linf:1.3611e-02 L8_l1linf:1.3794e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4709e-02 L11_l1linf:1.5381e-02 L12_l1linf:1.2878e-02 L1_spectral:7.7620e-04 L2_spectral:7.8017e-04 L3_spectral:7.7547e-04 L4_spectral:7.7172e-04 L5_spectral:7.6930e-04 L6_spectral:7.7370e-04 L7_spectral:7.8112e-04 L8_spectral:7.7035e-04 L9_spectral:7.7815e-04 L10_spectral:7.7869e-04 L11_spectral:7.7961e-04 L12_spectral:7.1630e-04 train_time:150063ms step_avg:46.89ms +[2025-09-11 13:25:49] [Rank 0] step:3201/10000 train_time:152042ms step_avg:47.50ms +[2025-09-11 13:25:49] [Rank 0] step:3201/10000 train_time:152042ms step_avg:47.50ms +[2025-09-11 13:25:50] [Rank 0] step:3221/10000 train_time:152712ms step_avg:47.41ms +[2025-09-11 13:25:50] [Rank 0] step:3221/10000 train_time:152712ms step_avg:47.41ms +[2025-09-11 13:25:50] [Rank 0] step:3241/10000 train_time:153378ms step_avg:47.32ms +[2025-09-11 13:25:50] [Rank 0] step:3241/10000 train_time:153378ms step_avg:47.32ms +[2025-09-11 13:25:51] [Rank 0] step:3261/10000 train_time:154043ms step_avg:47.24ms +[2025-09-11 13:25:51] [Rank 0] step:3261/10000 train_time:154043ms step_avg:47.24ms +[2025-09-11 13:25:52] [Rank 0] step:3281/10000 train_time:154708ms step_avg:47.15ms +[2025-09-11 13:25:52] [Rank 0] step:3281/10000 train_time:154708ms step_avg:47.15ms +[2025-09-11 13:25:52] [Rank 0] step:3301/10000 train_time:155374ms step_avg:47.07ms +[2025-09-11 13:25:52] [Rank 0] step:3301/10000 train_time:155374ms step_avg:47.07ms +[2025-09-11 13:25:53] [Rank 0] step:3321/10000 train_time:156038ms step_avg:46.99ms +[2025-09-11 13:25:53] [Rank 0] step:3321/10000 train_time:156038ms step_avg:46.99ms +[2025-09-11 13:25:54] [Rank 0] step:3341/10000 train_time:156703ms step_avg:46.90ms +[2025-09-11 13:25:54] [Rank 0] step:3341/10000 train_time:156703ms step_avg:46.90ms +[2025-09-11 13:25:54] [Rank 0] step:3361/10000 train_time:157368ms step_avg:46.82ms +[2025-09-11 13:25:54] [Rank 0] step:3361/10000 train_time:157368ms step_avg:46.82ms +[2025-09-11 13:25:55] [Rank 0] step:3381/10000 train_time:158309ms step_avg:46.82ms +[2025-09-11 13:25:55] [Rank 0] step:3381/10000 train_time:158309ms step_avg:46.82ms +[2025-09-11 13:25:56] [Rank 0] step:3401/10000 train_time:158973ms step_avg:46.74ms +[2025-09-11 13:25:56] [Rank 0] step:3401/10000 train_time:158973ms step_avg:46.74ms +[2025-09-11 13:25:56] [Rank 0] step:3421/10000 train_time:159638ms step_avg:46.66ms +[2025-09-11 13:25:56] [Rank 0] step:3421/10000 train_time:159638ms step_avg:46.66ms +[2025-09-11 13:25:57] [Rank 0] step:3441/10000 train_time:160303ms step_avg:46.59ms +[2025-09-11 13:25:57] [Rank 0] step:3441/10000 train_time:160303ms step_avg:46.59ms +[2025-09-11 13:25:58] [Rank 0] step:3461/10000 train_time:161218ms step_avg:46.58ms +[2025-09-11 13:25:58] [Rank 0] step:3461/10000 train_time:161218ms step_avg:46.58ms +[2025-09-11 13:25:59] [Rank 0] step:3481/10000 train_time:161883ms step_avg:46.50ms +[2025-09-11 13:25:59] [Rank 0] step:3481/10000 train_time:161883ms step_avg:46.50ms +[2025-09-11 13:25:59] [Rank 0] step:3501/10000 train_time:162548ms step_avg:46.43ms +[2025-09-11 13:25:59] [Rank 0] step:3501/10000 train_time:162548ms step_avg:46.43ms +[2025-09-11 13:26:00] [Rank 0] step:3521/10000 train_time:163213ms step_avg:46.35ms +[2025-09-11 13:26:00] [Rank 0] step:3521/10000 train_time:163213ms step_avg:46.35ms +[2025-09-11 13:26:01] [Rank 0] step:3541/10000 train_time:163877ms step_avg:46.28ms +[2025-09-11 13:26:01] [Rank 0] step:3541/10000 train_time:163877ms step_avg:46.28ms +[2025-09-11 13:26:01] [Rank 0] step:3561/10000 train_time:164542ms step_avg:46.21ms +[2025-09-11 13:26:01] [Rank 0] step:3561/10000 train_time:164542ms step_avg:46.21ms +[2025-09-11 13:26:02] [Rank 0] step:3581/10000 train_time:165208ms step_avg:46.13ms +[2025-09-11 13:26:02] [Rank 0] step:3581/10000 train_time:165208ms step_avg:46.13ms +[2025-09-11 13:26:03] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:26:03] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:26:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:26:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:26:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:26:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:26:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:26:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:26:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:26:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:26:16] [Rank 0] PRINT: step:3600/10000 val_loss:6.0608 total_sharp:7.4703e-02 L1_sharp:1.7177e-01 L2_sharp:2.0103e-01 L3_sharp:2.1912e-01 L4_sharp:2.5594e-01 L5_sharp:3.5824e-01 L6_sharp:4.5630e-01 L7_sharp:7.3824e-01 L8_sharp:1.1454e+00 L9_sharp:1.9156e+00 L10_sharp:2.4518e+00 L11_sharp:2.4650e+00 L12_sharp:2.4937e+00 total_fnorm:1.6328e+00 total_l1_linf:1.2800e+03 total_spectral:8.2031e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.2329e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2939e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.2817e-02 L7_l1linf:1.3123e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3794e-02 L10_l1linf:1.4221e-02 L11_l1linf:1.4954e-02 L12_l1linf:1.3062e-02 L1_spectral:7.8574e-04 L2_spectral:7.7965e-04 L3_spectral:7.7827e-04 L4_spectral:7.8675e-04 L5_spectral:7.8106e-04 L6_spectral:7.7891e-04 L7_spectral:7.8148e-04 L8_spectral:7.8009e-04 L9_spectral:7.8739e-04 L10_spectral:7.8029e-04 L11_spectral:7.8951e-04 L12_spectral:7.2610e-04 train_time:165856ms step_avg:46.07ms +[2025-09-11 13:26:16] [Rank 0] PRINT: step:3600/10000 val_loss:6.0608 total_sharp:7.4703e-02 L1_sharp:1.7177e-01 L2_sharp:2.0103e-01 L3_sharp:2.1912e-01 L4_sharp:2.5594e-01 L5_sharp:3.5824e-01 L6_sharp:4.5630e-01 L7_sharp:7.3824e-01 L8_sharp:1.1454e+00 L9_sharp:1.9156e+00 L10_sharp:2.4518e+00 L11_sharp:2.4650e+00 L12_sharp:2.4937e+00 total_fnorm:1.6328e+00 total_l1_linf:1.2800e+03 total_spectral:8.2031e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7363e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.2329e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2939e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.2817e-02 L7_l1linf:1.3123e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3794e-02 L10_l1linf:1.4221e-02 L11_l1linf:1.4954e-02 L12_l1linf:1.3062e-02 L1_spectral:7.8574e-04 L2_spectral:7.7965e-04 L3_spectral:7.7827e-04 L4_spectral:7.8675e-04 L5_spectral:7.8106e-04 L6_spectral:7.7891e-04 L7_spectral:7.8148e-04 L8_spectral:7.8009e-04 L9_spectral:7.8739e-04 L10_spectral:7.8029e-04 L11_spectral:7.8951e-04 L12_spectral:7.2610e-04 train_time:165856ms step_avg:46.07ms +[2025-09-11 13:26:18] [Rank 0] step:3601/10000 train_time:167808ms step_avg:46.60ms +[2025-09-11 13:26:18] [Rank 0] step:3601/10000 train_time:167808ms step_avg:46.60ms +[2025-09-11 13:26:19] [Rank 0] step:3621/10000 train_time:168475ms step_avg:46.53ms +[2025-09-11 13:26:19] [Rank 0] step:3621/10000 train_time:168475ms step_avg:46.53ms +[2025-09-11 13:26:20] [Rank 0] step:3641/10000 train_time:169141ms step_avg:46.45ms +[2025-09-11 13:26:20] [Rank 0] step:3641/10000 train_time:169141ms step_avg:46.45ms +[2025-09-11 13:26:20] [Rank 0] step:3661/10000 train_time:169805ms step_avg:46.38ms +[2025-09-11 13:26:20] [Rank 0] step:3661/10000 train_time:169805ms step_avg:46.38ms +[2025-09-11 13:26:21] [Rank 0] step:3681/10000 train_time:170470ms step_avg:46.31ms +[2025-09-11 13:26:21] [Rank 0] step:3681/10000 train_time:170470ms step_avg:46.31ms +[2025-09-11 13:26:22] [Rank 0] step:3701/10000 train_time:171135ms step_avg:46.24ms +[2025-09-11 13:26:22] [Rank 0] step:3701/10000 train_time:171135ms step_avg:46.24ms +[2025-09-11 13:26:22] [Rank 0] step:3721/10000 train_time:171809ms step_avg:46.17ms +[2025-09-11 13:26:22] [Rank 0] step:3721/10000 train_time:171809ms step_avg:46.17ms +[2025-09-11 13:26:23] [Rank 0] step:3741/10000 train_time:172484ms step_avg:46.11ms +[2025-09-11 13:26:23] [Rank 0] step:3741/10000 train_time:172484ms step_avg:46.11ms +[2025-09-11 13:26:24] [Rank 0] step:3761/10000 train_time:173159ms step_avg:46.04ms +[2025-09-11 13:26:24] [Rank 0] step:3761/10000 train_time:173159ms step_avg:46.04ms +[2025-09-11 13:26:24] [Rank 0] step:3781/10000 train_time:173835ms step_avg:45.98ms +[2025-09-11 13:26:24] [Rank 0] step:3781/10000 train_time:173835ms step_avg:45.98ms +[2025-09-11 13:26:25] [Rank 0] step:3801/10000 train_time:174510ms step_avg:45.91ms +[2025-09-11 13:26:25] [Rank 0] step:3801/10000 train_time:174510ms step_avg:45.91ms +[2025-09-11 13:26:26] [Rank 0] step:3821/10000 train_time:175186ms step_avg:45.85ms +[2025-09-11 13:26:26] [Rank 0] step:3821/10000 train_time:175186ms step_avg:45.85ms +[2025-09-11 13:26:26] [Rank 0] step:3841/10000 train_time:175861ms step_avg:45.79ms +[2025-09-11 13:26:26] [Rank 0] step:3841/10000 train_time:175861ms step_avg:45.79ms +[2025-09-11 13:26:27] [Rank 0] step:3861/10000 train_time:176536ms step_avg:45.72ms +[2025-09-11 13:26:27] [Rank 0] step:3861/10000 train_time:176536ms step_avg:45.72ms +[2025-09-11 13:26:28] [Rank 0] step:3881/10000 train_time:177211ms step_avg:45.66ms +[2025-09-11 13:26:28] [Rank 0] step:3881/10000 train_time:177211ms step_avg:45.66ms +[2025-09-11 13:26:28] [Rank 0] step:3901/10000 train_time:177885ms step_avg:45.60ms +[2025-09-11 13:26:28] [Rank 0] step:3901/10000 train_time:177885ms step_avg:45.60ms +[2025-09-11 13:26:29] [Rank 0] step:3921/10000 train_time:178560ms step_avg:45.54ms +[2025-09-11 13:26:29] [Rank 0] step:3921/10000 train_time:178560ms step_avg:45.54ms +[2025-09-11 13:26:30] [Rank 0] step:3941/10000 train_time:179236ms step_avg:45.48ms +[2025-09-11 13:26:30] [Rank 0] step:3941/10000 train_time:179236ms step_avg:45.48ms +[2025-09-11 13:26:30] [Rank 0] step:3961/10000 train_time:179911ms step_avg:45.42ms +[2025-09-11 13:26:30] [Rank 0] step:3961/10000 train_time:179911ms step_avg:45.42ms +[2025-09-11 13:26:31] [Rank 0] step:3981/10000 train_time:180585ms step_avg:45.36ms +[2025-09-11 13:26:31] [Rank 0] step:3981/10000 train_time:180585ms step_avg:45.36ms +[2025-09-11 13:26:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:26:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:26:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:26:42] [Rank 0] PRINT: step:4000/10000 val_loss:5.9996 total_sharp:1.2552e-01 L1_sharp:1.8011e-01 L2_sharp:2.3592e-01 L3_sharp:3.0611e-01 L4_sharp:4.1372e-01 L5_sharp:5.4661e-01 L6_sharp:7.8076e-01 L7_sharp:1.3000e+00 L8_sharp:2.0673e+00 L9_sharp:3.4299e+00 L10_sharp:4.9703e+00 L11_sharp:5.7307e+00 L12_sharp:6.0518e+00 total_fnorm:1.7578e+00 total_l1_linf:1.3280e+03 total_spectral:8.8281e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.0771e-02 L1_l1linf:1.2451e-02 L2_l1linf:1.2695e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3245e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4954e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.1597e-02 L1_spectral:7.8408e-04 L2_spectral:7.8264e-04 L3_spectral:7.8148e-04 L4_spectral:7.8786e-04 L5_spectral:7.7282e-04 L6_spectral:7.7753e-04 L7_spectral:7.7789e-04 L8_spectral:7.6522e-04 L9_spectral:7.7846e-04 L10_spectral:7.6902e-04 L11_spectral:7.6801e-04 L12_spectral:6.7695e-04 train_time:181241ms step_avg:45.31ms +[2025-09-11 13:26:42] [Rank 0] PRINT: step:4000/10000 val_loss:5.9996 total_sharp:1.2552e-01 L1_sharp:1.8011e-01 L2_sharp:2.3592e-01 L3_sharp:3.0611e-01 L4_sharp:4.1372e-01 L5_sharp:5.4661e-01 L6_sharp:7.8076e-01 L7_sharp:1.3000e+00 L8_sharp:2.0673e+00 L9_sharp:3.4299e+00 L10_sharp:4.9703e+00 L11_sharp:5.7307e+00 L12_sharp:6.0518e+00 total_fnorm:1.7578e+00 total_l1_linf:1.3280e+03 total_spectral:8.8281e-01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.0771e-02 L1_l1linf:1.2451e-02 L2_l1linf:1.2695e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3245e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4954e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.1597e-02 L1_spectral:7.8408e-04 L2_spectral:7.8264e-04 L3_spectral:7.8148e-04 L4_spectral:7.8786e-04 L5_spectral:7.7282e-04 L6_spectral:7.7753e-04 L7_spectral:7.7789e-04 L8_spectral:7.6522e-04 L9_spectral:7.7846e-04 L10_spectral:7.6902e-04 L11_spectral:7.6801e-04 L12_spectral:6.7695e-04 train_time:181241ms step_avg:45.31ms +[2025-09-11 13:26:44] [Rank 0] step:4001/10000 train_time:183149ms step_avg:45.78ms +[2025-09-11 13:26:44] [Rank 0] step:4001/10000 train_time:183149ms step_avg:45.78ms +[2025-09-11 13:26:45] [Rank 0] step:4021/10000 train_time:183828ms step_avg:45.72ms +[2025-09-11 13:26:45] [Rank 0] step:4021/10000 train_time:183828ms step_avg:45.72ms +[2025-09-11 13:26:46] [Rank 0] step:4041/10000 train_time:184504ms step_avg:45.66ms +[2025-09-11 13:26:46] [Rank 0] step:4041/10000 train_time:184504ms step_avg:45.66ms +[2025-09-11 13:26:46] [Rank 0] step:4061/10000 train_time:185177ms step_avg:45.60ms +[2025-09-11 13:26:46] [Rank 0] step:4061/10000 train_time:185177ms step_avg:45.60ms +[2025-09-11 13:26:47] [Rank 0] step:4081/10000 train_time:185853ms step_avg:45.54ms +[2025-09-11 13:26:47] [Rank 0] step:4081/10000 train_time:185853ms step_avg:45.54ms +[2025-09-11 13:26:48] [Rank 0] step:4101/10000 train_time:186527ms step_avg:45.48ms +[2025-09-11 13:26:48] [Rank 0] step:4101/10000 train_time:186527ms step_avg:45.48ms +[2025-09-11 13:26:48] [Rank 0] step:4121/10000 train_time:187203ms step_avg:45.43ms +[2025-09-11 13:26:48] [Rank 0] step:4121/10000 train_time:187203ms step_avg:45.43ms +[2025-09-11 13:26:49] [Rank 0] step:4141/10000 train_time:187878ms step_avg:45.37ms +[2025-09-11 13:26:49] [Rank 0] step:4141/10000 train_time:187878ms step_avg:45.37ms +[2025-09-11 13:26:50] [Rank 0] step:4161/10000 train_time:188553ms step_avg:45.31ms +[2025-09-11 13:26:50] [Rank 0] step:4161/10000 train_time:188553ms step_avg:45.31ms +[2025-09-11 13:26:50] [Rank 0] step:4181/10000 train_time:189228ms step_avg:45.26ms +[2025-09-11 13:26:50] [Rank 0] step:4181/10000 train_time:189228ms step_avg:45.26ms +[2025-09-11 13:26:51] [Rank 0] step:4201/10000 train_time:189903ms step_avg:45.20ms +[2025-09-11 13:26:51] [Rank 0] step:4201/10000 train_time:189903ms step_avg:45.20ms +[2025-09-11 13:26:52] [Rank 0] step:4221/10000 train_time:190578ms step_avg:45.15ms +[2025-09-11 13:26:52] [Rank 0] step:4221/10000 train_time:190578ms step_avg:45.15ms +[2025-09-11 13:26:52] [Rank 0] step:4241/10000 train_time:191253ms step_avg:45.10ms +[2025-09-11 13:26:52] [Rank 0] step:4241/10000 train_time:191253ms step_avg:45.10ms +[2025-09-11 13:26:53] [Rank 0] step:4261/10000 train_time:191929ms step_avg:45.04ms +[2025-09-11 13:26:53] [Rank 0] step:4261/10000 train_time:191929ms step_avg:45.04ms +[2025-09-11 13:26:54] [Rank 0] step:4281/10000 train_time:192605ms step_avg:44.99ms +[2025-09-11 13:26:54] [Rank 0] step:4281/10000 train_time:192605ms step_avg:44.99ms +[2025-09-11 13:26:54] [Rank 0] step:4301/10000 train_time:193281ms step_avg:44.94ms +[2025-09-11 13:26:54] [Rank 0] step:4301/10000 train_time:193281ms step_avg:44.94ms +[2025-09-11 13:26:55] [Rank 0] step:4321/10000 train_time:193956ms step_avg:44.89ms +[2025-09-11 13:26:55] [Rank 0] step:4321/10000 train_time:193956ms step_avg:44.89ms +[2025-09-11 13:26:56] [Rank 0] step:4341/10000 train_time:194632ms step_avg:44.84ms +[2025-09-11 13:26:56] [Rank 0] step:4341/10000 train_time:194632ms step_avg:44.84ms +[2025-09-11 13:26:56] [Rank 0] step:4361/10000 train_time:195306ms step_avg:44.78ms +[2025-09-11 13:26:56] [Rank 0] step:4361/10000 train_time:195306ms step_avg:44.78ms +[2025-09-11 13:26:57] [Rank 0] step:4381/10000 train_time:195983ms step_avg:44.73ms +[2025-09-11 13:26:57] [Rank 0] step:4381/10000 train_time:195983ms step_avg:44.73ms +[2025-09-11 13:26:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:26:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:27:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:27:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:27:09] [Rank 0] PRINT: step:4400/10000 val_loss:5.9523 total_sharp:1.2161e-01 L1_sharp:1.7252e-01 L2_sharp:2.1488e-01 L3_sharp:2.5454e-01 L4_sharp:3.4774e-01 L5_sharp:4.8073e-01 L6_sharp:8.4114e-01 L7_sharp:1.2380e+00 L8_sharp:1.8315e+00 L9_sharp:3.3002e+00 L10_sharp:4.4015e+00 L11_sharp:3.6452e+00 L12_sharp:2.9095e+00 total_fnorm:1.6172e+00 total_l1_linf:1.2080e+03 total_spectral:8.1250e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.1504e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2756e-02 L5_l1linf:1.3062e-02 L6_l1linf:1.3428e-02 L7_l1linf:1.3672e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4832e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.1353e-02 L1_spectral:7.8153e-04 L2_spectral:7.8647e-04 L3_spectral:7.8269e-04 L4_spectral:7.8576e-04 L5_spectral:7.8046e-04 L6_spectral:7.7921e-04 L7_spectral:7.8242e-04 L8_spectral:7.7442e-04 L9_spectral:7.7947e-04 L10_spectral:7.7230e-04 L11_spectral:7.7770e-04 L12_spectral:6.8630e-04 train_time:196638ms step_avg:44.69ms +[2025-09-11 13:27:09] [Rank 0] PRINT: step:4400/10000 val_loss:5.9523 total_sharp:1.2161e-01 L1_sharp:1.7252e-01 L2_sharp:2.1488e-01 L3_sharp:2.5454e-01 L4_sharp:3.4774e-01 L5_sharp:4.8073e-01 L6_sharp:8.4114e-01 L7_sharp:1.2380e+00 L8_sharp:1.8315e+00 L9_sharp:3.3002e+00 L10_sharp:4.4015e+00 L11_sharp:3.6452e+00 L12_sharp:2.9095e+00 total_fnorm:1.6172e+00 total_l1_linf:1.2080e+03 total_spectral:8.1250e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.1504e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2756e-02 L5_l1linf:1.3062e-02 L6_l1linf:1.3428e-02 L7_l1linf:1.3672e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4832e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.1353e-02 L1_spectral:7.8153e-04 L2_spectral:7.8647e-04 L3_spectral:7.8269e-04 L4_spectral:7.8576e-04 L5_spectral:7.8046e-04 L6_spectral:7.7921e-04 L7_spectral:7.8242e-04 L8_spectral:7.7442e-04 L9_spectral:7.7947e-04 L10_spectral:7.7230e-04 L11_spectral:7.7770e-04 L12_spectral:6.8630e-04 train_time:196638ms step_avg:44.69ms +[2025-09-11 13:27:12] [Rank 0] step:4401/10000 train_time:199371ms step_avg:45.30ms +[2025-09-11 13:27:12] [Rank 0] step:4401/10000 train_time:199371ms step_avg:45.30ms +[2025-09-11 13:27:12] [Rank 0] step:4421/10000 train_time:200335ms step_avg:45.31ms +[2025-09-11 13:27:12] [Rank 0] step:4421/10000 train_time:200335ms step_avg:45.31ms +[2025-09-11 13:27:13] [Rank 0] step:4441/10000 train_time:201012ms step_avg:45.26ms +[2025-09-11 13:27:13] [Rank 0] step:4441/10000 train_time:201012ms step_avg:45.26ms +[2025-09-11 13:27:14] [Rank 0] step:4461/10000 train_time:201689ms step_avg:45.21ms +[2025-09-11 13:27:14] [Rank 0] step:4461/10000 train_time:201689ms step_avg:45.21ms +[2025-09-11 13:27:14] [Rank 0] step:4481/10000 train_time:202366ms step_avg:45.16ms +[2025-09-11 13:27:14] [Rank 0] step:4481/10000 train_time:202366ms step_avg:45.16ms +[2025-09-11 13:27:15] [Rank 0] step:4501/10000 train_time:203045ms step_avg:45.11ms +[2025-09-11 13:27:15] [Rank 0] step:4501/10000 train_time:203045ms step_avg:45.11ms +[2025-09-11 13:27:16] [Rank 0] step:4521/10000 train_time:203722ms step_avg:45.06ms +[2025-09-11 13:27:16] [Rank 0] step:4521/10000 train_time:203722ms step_avg:45.06ms +[2025-09-11 13:27:16] [Rank 0] step:4541/10000 train_time:204400ms step_avg:45.01ms +[2025-09-11 13:27:16] [Rank 0] step:4541/10000 train_time:204400ms step_avg:45.01ms +[2025-09-11 13:27:17] [Rank 0] step:4561/10000 train_time:205076ms step_avg:44.96ms +[2025-09-11 13:27:17] [Rank 0] step:4561/10000 train_time:205076ms step_avg:44.96ms +[2025-09-11 13:27:18] [Rank 0] step:4581/10000 train_time:205754ms step_avg:44.91ms +[2025-09-11 13:27:18] [Rank 0] step:4581/10000 train_time:205754ms step_avg:44.91ms +[2025-09-11 13:27:18] [Rank 0] step:4601/10000 train_time:206432ms step_avg:44.87ms +[2025-09-11 13:27:18] [Rank 0] step:4601/10000 train_time:206432ms step_avg:44.87ms +[2025-09-11 13:27:19] [Rank 0] step:4621/10000 train_time:207109ms step_avg:44.82ms +[2025-09-11 13:27:19] [Rank 0] step:4621/10000 train_time:207109ms step_avg:44.82ms +[2025-09-11 13:27:20] [Rank 0] step:4641/10000 train_time:207787ms step_avg:44.77ms +[2025-09-11 13:27:20] [Rank 0] step:4641/10000 train_time:207787ms step_avg:44.77ms +[2025-09-11 13:27:20] [Rank 0] step:4661/10000 train_time:208464ms step_avg:44.73ms +[2025-09-11 13:27:20] [Rank 0] step:4661/10000 train_time:208464ms step_avg:44.73ms +[2025-09-11 13:27:21] [Rank 0] step:4681/10000 train_time:209141ms step_avg:44.68ms +[2025-09-11 13:27:21] [Rank 0] step:4681/10000 train_time:209141ms step_avg:44.68ms +[2025-09-11 13:27:22] [Rank 0] step:4701/10000 train_time:209819ms step_avg:44.63ms +[2025-09-11 13:27:22] [Rank 0] step:4701/10000 train_time:209819ms step_avg:44.63ms +[2025-09-11 13:27:22] [Rank 0] step:4721/10000 train_time:210496ms step_avg:44.59ms +[2025-09-11 13:27:22] [Rank 0] step:4721/10000 train_time:210496ms step_avg:44.59ms +[2025-09-11 13:27:23] [Rank 0] step:4741/10000 train_time:211173ms step_avg:44.54ms +[2025-09-11 13:27:23] [Rank 0] step:4741/10000 train_time:211173ms step_avg:44.54ms +[2025-09-11 13:27:24] [Rank 0] step:4761/10000 train_time:211852ms step_avg:44.50ms +[2025-09-11 13:27:24] [Rank 0] step:4761/10000 train_time:211852ms step_avg:44.50ms +[2025-09-11 13:27:24] [Rank 0] step:4781/10000 train_time:212529ms step_avg:44.45ms +[2025-09-11 13:27:24] [Rank 0] step:4781/10000 train_time:212529ms step_avg:44.45ms +[2025-09-11 13:27:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:27:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:27:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:27:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:27:36] [Rank 0] PRINT: step:4800/10000 val_loss:5.9117 total_sharp:1.0159e-01 L1_sharp:1.2406e-01 L2_sharp:1.4950e-01 L3_sharp:1.6861e-01 L4_sharp:2.1586e-01 L5_sharp:3.1384e-01 L6_sharp:5.8574e-01 L7_sharp:9.7642e-01 L8_sharp:1.3967e+00 L9_sharp:2.4995e+00 L10_sharp:3.9121e+00 L11_sharp:4.0734e+00 L12_sharp:4.3617e+00 total_fnorm:1.5781e+00 total_l1_linf:1.1760e+03 total_spectral:7.9297e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3457e-02 L1_l1linf:1.1841e-02 L2_l1linf:1.1719e-02 L3_l1linf:1.1841e-02 L4_l1linf:1.1841e-02 L5_l1linf:1.2268e-02 L6_l1linf:1.2268e-02 L7_l1linf:1.2390e-02 L8_l1linf:1.2939e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3855e-02 L11_l1linf:1.4526e-02 L12_l1linf:1.1963e-02 L1_spectral:7.9021e-04 L2_spectral:7.8811e-04 L3_spectral:7.9010e-04 L4_spectral:7.8971e-04 L5_spectral:7.8868e-04 L6_spectral:7.9151e-04 L7_spectral:7.8963e-04 L8_spectral:7.8404e-04 L9_spectral:7.9207e-04 L10_spectral:7.8647e-04 L11_spectral:7.9546e-04 L12_spectral:7.1248e-04 train_time:213186ms step_avg:44.41ms +[2025-09-11 13:27:36] [Rank 0] PRINT: step:4800/10000 val_loss:5.9117 total_sharp:1.0159e-01 L1_sharp:1.2406e-01 L2_sharp:1.4950e-01 L3_sharp:1.6861e-01 L4_sharp:2.1586e-01 L5_sharp:3.1384e-01 L6_sharp:5.8574e-01 L7_sharp:9.7642e-01 L8_sharp:1.3967e+00 L9_sharp:2.4995e+00 L10_sharp:3.9121e+00 L11_sharp:4.0734e+00 L12_sharp:4.3617e+00 total_fnorm:1.5781e+00 total_l1_linf:1.1760e+03 total_spectral:7.9297e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3457e-02 L1_l1linf:1.1841e-02 L2_l1linf:1.1719e-02 L3_l1linf:1.1841e-02 L4_l1linf:1.1841e-02 L5_l1linf:1.2268e-02 L6_l1linf:1.2268e-02 L7_l1linf:1.2390e-02 L8_l1linf:1.2939e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3855e-02 L11_l1linf:1.4526e-02 L12_l1linf:1.1963e-02 L1_spectral:7.9021e-04 L2_spectral:7.8811e-04 L3_spectral:7.9010e-04 L4_spectral:7.8971e-04 L5_spectral:7.8868e-04 L6_spectral:7.9151e-04 L7_spectral:7.8963e-04 L8_spectral:7.8404e-04 L9_spectral:7.9207e-04 L10_spectral:7.8647e-04 L11_spectral:7.9546e-04 L12_spectral:7.1248e-04 train_time:213186ms step_avg:44.41ms +[2025-09-11 13:27:38] [Rank 0] step:4801/10000 train_time:215188ms step_avg:44.82ms +[2025-09-11 13:27:38] [Rank 0] step:4801/10000 train_time:215188ms step_avg:44.82ms +[2025-09-11 13:27:39] [Rank 0] step:4821/10000 train_time:215899ms step_avg:44.78ms +[2025-09-11 13:27:39] [Rank 0] step:4821/10000 train_time:215899ms step_avg:44.78ms +[2025-09-11 13:27:39] [Rank 0] step:4841/10000 train_time:216579ms step_avg:44.74ms +[2025-09-11 13:27:39] [Rank 0] step:4841/10000 train_time:216579ms step_avg:44.74ms +[2025-09-11 13:27:40] [Rank 0] step:4861/10000 train_time:217258ms step_avg:44.69ms +[2025-09-11 13:27:40] [Rank 0] step:4861/10000 train_time:217258ms step_avg:44.69ms +[2025-09-11 13:27:41] [Rank 0] step:4881/10000 train_time:217936ms step_avg:44.65ms +[2025-09-11 13:27:41] [Rank 0] step:4881/10000 train_time:217936ms step_avg:44.65ms +[2025-09-11 13:27:41] [Rank 0] step:4901/10000 train_time:218617ms step_avg:44.61ms +[2025-09-11 13:27:41] [Rank 0] step:4901/10000 train_time:218617ms step_avg:44.61ms +[2025-09-11 13:27:42] [Rank 0] step:4921/10000 train_time:219295ms step_avg:44.56ms +[2025-09-11 13:27:42] [Rank 0] step:4921/10000 train_time:219295ms step_avg:44.56ms +[2025-09-11 13:27:43] [Rank 0] step:4941/10000 train_time:219974ms step_avg:44.52ms +[2025-09-11 13:27:43] [Rank 0] step:4941/10000 train_time:219974ms step_avg:44.52ms +[2025-09-11 13:27:43] [Rank 0] step:4961/10000 train_time:220653ms step_avg:44.48ms +[2025-09-11 13:27:43] [Rank 0] step:4961/10000 train_time:220653ms step_avg:44.48ms +[2025-09-11 13:27:44] [Rank 0] step:4981/10000 train_time:221331ms step_avg:44.44ms +[2025-09-11 13:27:44] [Rank 0] step:4981/10000 train_time:221331ms step_avg:44.44ms +[2025-09-11 13:27:45] [Rank 0] step:5001/10000 train_time:222011ms step_avg:44.39ms +[2025-09-11 13:27:45] [Rank 0] step:5001/10000 train_time:222011ms step_avg:44.39ms +[2025-09-11 13:27:45] [Rank 0] step:5021/10000 train_time:222689ms step_avg:44.35ms +[2025-09-11 13:27:45] [Rank 0] step:5021/10000 train_time:222689ms step_avg:44.35ms +[2025-09-11 13:27:46] [Rank 0] step:5041/10000 train_time:223366ms step_avg:44.31ms +[2025-09-11 13:27:46] [Rank 0] step:5041/10000 train_time:223366ms step_avg:44.31ms +[2025-09-11 13:27:47] [Rank 0] step:5061/10000 train_time:224046ms step_avg:44.27ms +[2025-09-11 13:27:47] [Rank 0] step:5061/10000 train_time:224046ms step_avg:44.27ms +[2025-09-11 13:27:47] [Rank 0] step:5081/10000 train_time:224723ms step_avg:44.23ms +[2025-09-11 13:27:47] [Rank 0] step:5081/10000 train_time:224723ms step_avg:44.23ms +[2025-09-11 13:27:48] [Rank 0] step:5101/10000 train_time:225402ms step_avg:44.19ms +[2025-09-11 13:27:48] [Rank 0] step:5101/10000 train_time:225402ms step_avg:44.19ms +[2025-09-11 13:27:49] [Rank 0] step:5121/10000 train_time:226079ms step_avg:44.15ms +[2025-09-11 13:27:49] [Rank 0] step:5121/10000 train_time:226079ms step_avg:44.15ms +[2025-09-11 13:27:49] [Rank 0] step:5141/10000 train_time:226759ms step_avg:44.11ms +[2025-09-11 13:27:49] [Rank 0] step:5141/10000 train_time:226759ms step_avg:44.11ms +[2025-09-11 13:27:50] [Rank 0] step:5161/10000 train_time:227439ms step_avg:44.07ms +[2025-09-11 13:27:50] [Rank 0] step:5161/10000 train_time:227439ms step_avg:44.07ms +[2025-09-11 13:27:51] [Rank 0] step:5181/10000 train_time:228117ms step_avg:44.03ms +[2025-09-11 13:27:51] [Rank 0] step:5181/10000 train_time:228117ms step_avg:44.03ms +[2025-09-11 13:27:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:27:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:27:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:27:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:02] [Rank 0] PRINT: step:5200/10000 val_loss:5.8778 total_sharp:1.0197e-01 L1_sharp:1.1173e-01 L2_sharp:1.3076e-01 L3_sharp:1.6418e-01 L4_sharp:1.9385e-01 L5_sharp:2.8185e-01 L6_sharp:4.7013e-01 L7_sharp:8.4606e-01 L8_sharp:1.3416e+00 L9_sharp:2.5332e+00 L10_sharp:3.6813e+00 L11_sharp:3.4355e+00 L12_sharp:5.0569e+00 total_fnorm:1.5000e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1719e-02 L2_l1linf:1.1780e-02 L3_l1linf:1.1841e-02 L4_l1linf:1.1963e-02 L5_l1linf:1.2207e-02 L6_l1linf:1.2390e-02 L7_l1linf:1.2390e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.4343e-02 L12_l1linf:1.2085e-02 L1_spectral:7.9010e-04 L2_spectral:7.9101e-04 L3_spectral:7.8996e-04 L4_spectral:7.9306e-04 L5_spectral:7.8992e-04 L6_spectral:7.9351e-04 L7_spectral:7.9190e-04 L8_spectral:7.9394e-04 L9_spectral:7.8971e-04 L10_spectral:7.8124e-04 L11_spectral:7.9632e-04 L12_spectral:7.2470e-04 train_time:228782ms step_avg:44.00ms +[2025-09-11 13:28:02] [Rank 0] PRINT: step:5200/10000 val_loss:5.8778 total_sharp:1.0197e-01 L1_sharp:1.1173e-01 L2_sharp:1.3076e-01 L3_sharp:1.6418e-01 L4_sharp:1.9385e-01 L5_sharp:2.8185e-01 L6_sharp:4.7013e-01 L7_sharp:8.4606e-01 L8_sharp:1.3416e+00 L9_sharp:2.5332e+00 L10_sharp:3.6813e+00 L11_sharp:3.4355e+00 L12_sharp:5.0569e+00 total_fnorm:1.5000e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6875e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1719e-02 L2_l1linf:1.1780e-02 L3_l1linf:1.1841e-02 L4_l1linf:1.1963e-02 L5_l1linf:1.2207e-02 L6_l1linf:1.2390e-02 L7_l1linf:1.2390e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.4343e-02 L12_l1linf:1.2085e-02 L1_spectral:7.9010e-04 L2_spectral:7.9101e-04 L3_spectral:7.8996e-04 L4_spectral:7.9306e-04 L5_spectral:7.8992e-04 L6_spectral:7.9351e-04 L7_spectral:7.9190e-04 L8_spectral:7.9394e-04 L9_spectral:7.8971e-04 L10_spectral:7.8124e-04 L11_spectral:7.9632e-04 L12_spectral:7.2470e-04 train_time:228782ms step_avg:44.00ms +[2025-09-11 13:28:04] [Rank 0] step:5201/10000 train_time:230853ms step_avg:44.39ms +[2025-09-11 13:28:04] [Rank 0] step:5201/10000 train_time:230853ms step_avg:44.39ms +[2025-09-11 13:28:05] [Rank 0] step:5221/10000 train_time:231559ms step_avg:44.35ms +[2025-09-11 13:28:05] [Rank 0] step:5221/10000 train_time:231559ms step_avg:44.35ms +[2025-09-11 13:28:06] [Rank 0] step:5241/10000 train_time:232247ms step_avg:44.31ms +[2025-09-11 13:28:06] [Rank 0] step:5241/10000 train_time:232247ms step_avg:44.31ms +[2025-09-11 13:28:06] [Rank 0] step:5261/10000 train_time:232938ms step_avg:44.28ms +[2025-09-11 13:28:06] [Rank 0] step:5261/10000 train_time:232938ms step_avg:44.28ms +[2025-09-11 13:28:07] [Rank 0] step:5281/10000 train_time:233627ms step_avg:44.24ms +[2025-09-11 13:28:07] [Rank 0] step:5281/10000 train_time:233627ms step_avg:44.24ms +[2025-09-11 13:28:08] [Rank 0] step:5301/10000 train_time:234314ms step_avg:44.20ms +[2025-09-11 13:28:08] [Rank 0] step:5301/10000 train_time:234314ms step_avg:44.20ms +[2025-09-11 13:28:09] [Rank 0] step:5321/10000 train_time:235001ms step_avg:44.16ms +[2025-09-11 13:28:09] [Rank 0] step:5321/10000 train_time:235001ms step_avg:44.16ms +[2025-09-11 13:28:09] [Rank 0] step:5341/10000 train_time:235688ms step_avg:44.13ms +[2025-09-11 13:28:09] [Rank 0] step:5341/10000 train_time:235688ms step_avg:44.13ms +[2025-09-11 13:28:10] [Rank 0] step:5361/10000 train_time:236375ms step_avg:44.09ms +[2025-09-11 13:28:10] [Rank 0] step:5361/10000 train_time:236375ms step_avg:44.09ms +[2025-09-11 13:28:11] [Rank 0] step:5381/10000 train_time:237065ms step_avg:44.06ms +[2025-09-11 13:28:11] [Rank 0] step:5381/10000 train_time:237065ms step_avg:44.06ms +[2025-09-11 13:28:11] [Rank 0] step:5401/10000 train_time:237752ms step_avg:44.02ms +[2025-09-11 13:28:11] [Rank 0] step:5401/10000 train_time:237752ms step_avg:44.02ms +[2025-09-11 13:28:12] [Rank 0] step:5421/10000 train_time:238441ms step_avg:43.98ms +[2025-09-11 13:28:12] [Rank 0] step:5421/10000 train_time:238441ms step_avg:43.98ms +[2025-09-11 13:28:13] [Rank 0] step:5441/10000 train_time:239130ms step_avg:43.95ms +[2025-09-11 13:28:13] [Rank 0] step:5441/10000 train_time:239130ms step_avg:43.95ms +[2025-09-11 13:28:13] [Rank 0] step:5461/10000 train_time:239818ms step_avg:43.91ms +[2025-09-11 13:28:13] [Rank 0] step:5461/10000 train_time:239818ms step_avg:43.91ms +[2025-09-11 13:28:14] [Rank 0] step:5481/10000 train_time:240506ms step_avg:43.88ms +[2025-09-11 13:28:14] [Rank 0] step:5481/10000 train_time:240506ms step_avg:43.88ms +[2025-09-11 13:28:15] [Rank 0] step:5501/10000 train_time:241193ms step_avg:43.85ms +[2025-09-11 13:28:15] [Rank 0] step:5501/10000 train_time:241193ms step_avg:43.85ms +[2025-09-11 13:28:15] [Rank 0] step:5521/10000 train_time:241881ms step_avg:43.81ms +[2025-09-11 13:28:15] [Rank 0] step:5521/10000 train_time:241881ms step_avg:43.81ms +[2025-09-11 13:28:16] [Rank 0] step:5541/10000 train_time:242571ms step_avg:43.78ms +[2025-09-11 13:28:16] [Rank 0] step:5541/10000 train_time:242571ms step_avg:43.78ms +[2025-09-11 13:28:17] [Rank 0] step:5561/10000 train_time:243261ms step_avg:43.74ms +[2025-09-11 13:28:17] [Rank 0] step:5561/10000 train_time:243261ms step_avg:43.74ms +[2025-09-11 13:28:17] [Rank 0] step:5581/10000 train_time:243952ms step_avg:43.71ms +[2025-09-11 13:28:17] [Rank 0] step:5581/10000 train_time:243952ms step_avg:43.71ms +[2025-09-11 13:28:18] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:28:18] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:28:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:28:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:29] [Rank 0] PRINT: step:5600/10000 val_loss:5.8407 total_sharp:9.2639e-02 L1_sharp:8.8319e-02 L2_sharp:1.1054e-01 L3_sharp:1.4143e-01 L4_sharp:1.7110e-01 L5_sharp:2.4553e-01 L6_sharp:3.7490e-01 L7_sharp:7.0768e-01 L8_sharp:1.1838e+00 L9_sharp:2.2532e+00 L10_sharp:3.6299e+00 L11_sharp:3.7508e+00 L12_sharp:3.2264e+00 total_fnorm:1.5078e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1169e-02 L2_l1linf:1.1719e-02 L3_l1linf:1.1536e-02 L4_l1linf:1.1780e-02 L5_l1linf:1.2146e-02 L6_l1linf:1.2085e-02 L7_l1linf:1.2451e-02 L8_l1linf:1.2573e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3550e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.2207e-02 L1_spectral:7.9200e-04 L2_spectral:7.9485e-04 L3_spectral:8.0182e-04 L4_spectral:7.9645e-04 L5_spectral:7.9254e-04 L6_spectral:7.9097e-04 L7_spectral:8.0043e-04 L8_spectral:7.9010e-04 L9_spectral:7.9255e-04 L10_spectral:7.9208e-04 L11_spectral:7.8564e-04 L12_spectral:7.1257e-04 train_time:244621ms step_avg:43.68ms +[2025-09-11 13:28:29] [Rank 0] PRINT: step:5600/10000 val_loss:5.8407 total_sharp:9.2639e-02 L1_sharp:8.8319e-02 L2_sharp:1.1054e-01 L3_sharp:1.4143e-01 L4_sharp:1.7110e-01 L5_sharp:2.4553e-01 L6_sharp:3.7490e-01 L7_sharp:7.0768e-01 L8_sharp:1.1838e+00 L9_sharp:2.2532e+00 L10_sharp:3.6299e+00 L11_sharp:3.7508e+00 L12_sharp:3.2264e+00 total_fnorm:1.5078e+00 total_l1_linf:1.1040e+03 total_spectral:7.5391e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.1169e-02 L2_l1linf:1.1719e-02 L3_l1linf:1.1536e-02 L4_l1linf:1.1780e-02 L5_l1linf:1.2146e-02 L6_l1linf:1.2085e-02 L7_l1linf:1.2451e-02 L8_l1linf:1.2573e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3550e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.2207e-02 L1_spectral:7.9200e-04 L2_spectral:7.9485e-04 L3_spectral:8.0182e-04 L4_spectral:7.9645e-04 L5_spectral:7.9254e-04 L6_spectral:7.9097e-04 L7_spectral:8.0043e-04 L8_spectral:7.9010e-04 L9_spectral:7.9255e-04 L10_spectral:7.9208e-04 L11_spectral:7.8564e-04 L12_spectral:7.1257e-04 train_time:244621ms step_avg:43.68ms +[2025-09-11 13:28:31] [Rank 0] step:5601/10000 train_time:246551ms step_avg:44.02ms +[2025-09-11 13:28:31] [Rank 0] step:5601/10000 train_time:246551ms step_avg:44.02ms +[2025-09-11 13:28:32] [Rank 0] step:5621/10000 train_time:247267ms step_avg:43.99ms +[2025-09-11 13:28:32] [Rank 0] step:5621/10000 train_time:247267ms step_avg:43.99ms +[2025-09-11 13:28:32] [Rank 0] step:5641/10000 train_time:247955ms step_avg:43.96ms +[2025-09-11 13:28:32] [Rank 0] step:5641/10000 train_time:247955ms step_avg:43.96ms +[2025-09-11 13:28:33] [Rank 0] step:5661/10000 train_time:248643ms step_avg:43.92ms +[2025-09-11 13:28:33] [Rank 0] step:5661/10000 train_time:248643ms step_avg:43.92ms +[2025-09-11 13:28:34] [Rank 0] step:5681/10000 train_time:249332ms step_avg:43.89ms +[2025-09-11 13:28:34] [Rank 0] step:5681/10000 train_time:249332ms step_avg:43.89ms +[2025-09-11 13:28:34] [Rank 0] step:5701/10000 train_time:250022ms step_avg:43.86ms +[2025-09-11 13:28:34] [Rank 0] step:5701/10000 train_time:250022ms step_avg:43.86ms +[2025-09-11 13:28:35] [Rank 0] step:5721/10000 train_time:250711ms step_avg:43.82ms +[2025-09-11 13:28:35] [Rank 0] step:5721/10000 train_time:250711ms step_avg:43.82ms +[2025-09-11 13:28:36] [Rank 0] step:5741/10000 train_time:251400ms step_avg:43.79ms +[2025-09-11 13:28:36] [Rank 0] step:5741/10000 train_time:251400ms step_avg:43.79ms +[2025-09-11 13:28:36] [Rank 0] step:5761/10000 train_time:252090ms step_avg:43.76ms +[2025-09-11 13:28:36] [Rank 0] step:5761/10000 train_time:252090ms step_avg:43.76ms +[2025-09-11 13:28:37] [Rank 0] step:5781/10000 train_time:252779ms step_avg:43.73ms +[2025-09-11 13:28:37] [Rank 0] step:5781/10000 train_time:252779ms step_avg:43.73ms +[2025-09-11 13:28:38] [Rank 0] step:5801/10000 train_time:253470ms step_avg:43.69ms +[2025-09-11 13:28:38] [Rank 0] step:5801/10000 train_time:253470ms step_avg:43.69ms +[2025-09-11 13:28:38] [Rank 0] step:5821/10000 train_time:254158ms step_avg:43.66ms +[2025-09-11 13:28:38] [Rank 0] step:5821/10000 train_time:254158ms step_avg:43.66ms +[2025-09-11 13:28:39] [Rank 0] step:5841/10000 train_time:254848ms step_avg:43.63ms +[2025-09-11 13:28:39] [Rank 0] step:5841/10000 train_time:254848ms step_avg:43.63ms +[2025-09-11 13:28:40] [Rank 0] step:5861/10000 train_time:255536ms step_avg:43.60ms +[2025-09-11 13:28:40] [Rank 0] step:5861/10000 train_time:255536ms step_avg:43.60ms +[2025-09-11 13:28:41] [Rank 0] step:5881/10000 train_time:256225ms step_avg:43.57ms +[2025-09-11 13:28:41] [Rank 0] step:5881/10000 train_time:256225ms step_avg:43.57ms +[2025-09-11 13:28:41] [Rank 0] step:5901/10000 train_time:256913ms step_avg:43.54ms +[2025-09-11 13:28:41] [Rank 0] step:5901/10000 train_time:256913ms step_avg:43.54ms +[2025-09-11 13:28:42] [Rank 0] step:5921/10000 train_time:257606ms step_avg:43.51ms +[2025-09-11 13:28:42] [Rank 0] step:5921/10000 train_time:257606ms step_avg:43.51ms +[2025-09-11 13:28:43] [Rank 0] step:5941/10000 train_time:258297ms step_avg:43.48ms +[2025-09-11 13:28:43] [Rank 0] step:5941/10000 train_time:258297ms step_avg:43.48ms +[2025-09-11 13:28:43] [Rank 0] step:5961/10000 train_time:258986ms step_avg:43.45ms +[2025-09-11 13:28:43] [Rank 0] step:5961/10000 train_time:258986ms step_avg:43.45ms +[2025-09-11 13:28:44] [Rank 0] step:5981/10000 train_time:259675ms step_avg:43.42ms +[2025-09-11 13:28:44] [Rank 0] step:5981/10000 train_time:259675ms step_avg:43.42ms +[2025-09-11 13:28:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:28:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:28:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:28:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:28:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:28:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:28:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:28:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:28:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:28:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:28:56] [Rank 0] PRINT: step:6000/10000 val_loss:5.7963 total_sharp:7.6974e-02 L1_sharp:9.4801e-02 L2_sharp:1.1222e-01 L3_sharp:1.4041e-01 L4_sharp:1.7143e-01 L5_sharp:2.3867e-01 L6_sharp:3.6794e-01 L7_sharp:6.2804e-01 L8_sharp:9.6024e-01 L9_sharp:1.8525e+00 L10_sharp:2.4604e+00 L11_sharp:2.9240e+00 L12_sharp:2.9077e+00 total_fnorm:1.4531e+00 total_l1_linf:1.0560e+03 total_spectral:7.3047e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.0986e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1414e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.2207e-02 L7_l1linf:1.2268e-02 L8_l1linf:1.2268e-02 L9_l1linf:1.2817e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.2268e-02 L1_spectral:7.9650e-04 L2_spectral:7.9599e-04 L3_spectral:7.9943e-04 L4_spectral:7.9861e-04 L5_spectral:7.9354e-04 L6_spectral:7.9665e-04 L7_spectral:7.9017e-04 L8_spectral:7.8674e-04 L9_spectral:7.9795e-04 L10_spectral:7.8972e-04 L11_spectral:7.9380e-04 L12_spectral:7.2988e-04 train_time:260348ms step_avg:43.39ms +[2025-09-11 13:28:56] [Rank 0] PRINT: step:6000/10000 val_loss:5.7963 total_sharp:7.6974e-02 L1_sharp:9.4801e-02 L2_sharp:1.1222e-01 L3_sharp:1.4041e-01 L4_sharp:1.7143e-01 L5_sharp:2.3867e-01 L6_sharp:3.6794e-01 L7_sharp:6.2804e-01 L8_sharp:9.6024e-01 L9_sharp:1.8525e+00 L10_sharp:2.4604e+00 L11_sharp:2.9240e+00 L12_sharp:2.9077e+00 total_fnorm:1.4531e+00 total_l1_linf:1.0560e+03 total_spectral:7.3047e-01 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.0986e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1414e-02 L4_l1linf:1.1597e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.2207e-02 L7_l1linf:1.2268e-02 L8_l1linf:1.2268e-02 L9_l1linf:1.2817e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.2268e-02 L1_spectral:7.9650e-04 L2_spectral:7.9599e-04 L3_spectral:7.9943e-04 L4_spectral:7.9861e-04 L5_spectral:7.9354e-04 L6_spectral:7.9665e-04 L7_spectral:7.9017e-04 L8_spectral:7.8674e-04 L9_spectral:7.9795e-04 L10_spectral:7.8972e-04 L11_spectral:7.9380e-04 L12_spectral:7.2988e-04 train_time:260348ms step_avg:43.39ms +[2025-09-11 13:28:58] [Rank 0] step:6001/10000 train_time:262463ms step_avg:43.74ms +[2025-09-11 13:28:58] [Rank 0] step:6001/10000 train_time:262463ms step_avg:43.74ms +[2025-09-11 13:28:58] [Rank 0] step:6021/10000 train_time:263178ms step_avg:43.71ms +[2025-09-11 13:28:58] [Rank 0] step:6021/10000 train_time:263178ms step_avg:43.71ms +[2025-09-11 13:28:59] [Rank 0] step:6041/10000 train_time:263870ms step_avg:43.68ms +[2025-09-11 13:28:59] [Rank 0] step:6041/10000 train_time:263870ms step_avg:43.68ms +[2025-09-11 13:29:00] [Rank 0] step:6061/10000 train_time:264560ms step_avg:43.65ms +[2025-09-11 13:29:00] [Rank 0] step:6061/10000 train_time:264560ms step_avg:43.65ms +[2025-09-11 13:29:01] [Rank 0] step:6081/10000 train_time:265251ms step_avg:43.62ms +[2025-09-11 13:29:01] [Rank 0] step:6081/10000 train_time:265251ms step_avg:43.62ms +[2025-09-11 13:29:01] [Rank 0] step:6101/10000 train_time:265941ms step_avg:43.59ms +[2025-09-11 13:29:01] [Rank 0] step:6101/10000 train_time:265941ms step_avg:43.59ms +[2025-09-11 13:29:02] [Rank 0] step:6121/10000 train_time:266633ms step_avg:43.56ms +[2025-09-11 13:29:02] [Rank 0] step:6121/10000 train_time:266633ms step_avg:43.56ms +[2025-09-11 13:29:03] [Rank 0] step:6141/10000 train_time:267325ms step_avg:43.53ms +[2025-09-11 13:29:03] [Rank 0] step:6141/10000 train_time:267325ms step_avg:43.53ms +[2025-09-11 13:29:03] [Rank 0] step:6161/10000 train_time:268015ms step_avg:43.50ms +[2025-09-11 13:29:03] [Rank 0] step:6161/10000 train_time:268015ms step_avg:43.50ms +[2025-09-11 13:29:04] [Rank 0] step:6181/10000 train_time:268704ms step_avg:43.47ms +[2025-09-11 13:29:04] [Rank 0] step:6181/10000 train_time:268704ms step_avg:43.47ms +[2025-09-11 13:29:05] [Rank 0] step:6201/10000 train_time:269689ms step_avg:43.49ms +[2025-09-11 13:29:05] [Rank 0] step:6201/10000 train_time:269689ms step_avg:43.49ms +[2025-09-11 13:29:06] [Rank 0] step:6221/10000 train_time:270381ms step_avg:43.46ms +[2025-09-11 13:29:06] [Rank 0] step:6221/10000 train_time:270381ms step_avg:43.46ms +[2025-09-11 13:29:06] [Rank 0] step:6241/10000 train_time:271071ms step_avg:43.43ms +[2025-09-11 13:29:06] [Rank 0] step:6241/10000 train_time:271071ms step_avg:43.43ms +[2025-09-11 13:29:07] [Rank 0] step:6261/10000 train_time:272062ms step_avg:43.45ms +[2025-09-11 13:29:07] [Rank 0] step:6261/10000 train_time:272062ms step_avg:43.45ms +[2025-09-11 13:29:08] [Rank 0] step:6281/10000 train_time:272753ms step_avg:43.43ms +[2025-09-11 13:29:08] [Rank 0] step:6281/10000 train_time:272753ms step_avg:43.43ms +[2025-09-11 13:29:09] [Rank 0] step:6301/10000 train_time:273441ms step_avg:43.40ms +[2025-09-11 13:29:09] [Rank 0] step:6301/10000 train_time:273441ms step_avg:43.40ms +[2025-09-11 13:29:09] [Rank 0] step:6321/10000 train_time:274140ms step_avg:43.37ms +[2025-09-11 13:29:09] [Rank 0] step:6321/10000 train_time:274140ms step_avg:43.37ms +[2025-09-11 13:29:10] [Rank 0] step:6341/10000 train_time:274831ms step_avg:43.34ms +[2025-09-11 13:29:10] [Rank 0] step:6341/10000 train_time:274831ms step_avg:43.34ms +[2025-09-11 13:29:11] [Rank 0] step:6361/10000 train_time:275522ms step_avg:43.31ms +[2025-09-11 13:29:11] [Rank 0] step:6361/10000 train_time:275522ms step_avg:43.31ms +[2025-09-11 13:29:12] [Rank 0] step:6381/10000 train_time:276214ms step_avg:43.29ms +[2025-09-11 13:29:12] [Rank 0] step:6381/10000 train_time:276214ms step_avg:43.29ms +[2025-09-11 13:29:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:29:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:29:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:29:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:29:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:29:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:29:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:29:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:29:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:29:24] [Rank 0] PRINT: step:6400/10000 val_loss:5.7630 total_sharp:9.2080e-02 L1_sharp:9.0423e-02 L2_sharp:1.1647e-01 L3_sharp:1.2939e-01 L4_sharp:1.4515e-01 L5_sharp:2.0595e-01 L6_sharp:3.7449e-01 L7_sharp:6.8332e-01 L8_sharp:1.1707e+00 L9_sharp:2.4286e+00 L10_sharp:3.3852e+00 L11_sharp:3.1602e+00 L12_sharp:2.5036e+00 total_fnorm:1.2578e+00 total_l1_linf:8.6000e+02 total_spectral:6.3281e-01 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.1016e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0771e-02 L7_fnorm:4.0771e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1504e-02 L12_fnorm:3.8574e-02 L1_l1linf:8.9722e-03 L2_l1linf:9.2163e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.5825e-03 L5_l1linf:9.7656e-03 L6_l1linf:1.0132e-02 L7_l1linf:1.0254e-02 L8_l1linf:1.0803e-02 L9_l1linf:1.1108e-02 L10_l1linf:1.1414e-02 L11_l1linf:1.1780e-02 L12_l1linf:1.0254e-02 L1_spectral:7.1859e-04 L2_spectral:7.2788e-04 L3_spectral:7.2313e-04 L4_spectral:7.2302e-04 L5_spectral:7.2251e-04 L6_spectral:7.2266e-04 L7_spectral:7.1498e-04 L8_spectral:7.1556e-04 L9_spectral:7.1422e-04 L10_spectral:7.0942e-04 L11_spectral:7.0831e-04 L12_spectral:6.4329e-04 train_time:276885ms step_avg:43.26ms +[2025-09-11 13:29:24] [Rank 0] PRINT: step:6400/10000 val_loss:5.7630 total_sharp:9.2080e-02 L1_sharp:9.0423e-02 L2_sharp:1.1647e-01 L3_sharp:1.2939e-01 L4_sharp:1.4515e-01 L5_sharp:2.0595e-01 L6_sharp:3.7449e-01 L7_sharp:6.8332e-01 L8_sharp:1.1707e+00 L9_sharp:2.4286e+00 L10_sharp:3.3852e+00 L11_sharp:3.1602e+00 L12_sharp:2.5036e+00 total_fnorm:1.2578e+00 total_l1_linf:8.6000e+02 total_spectral:6.3281e-01 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.1016e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0771e-02 L7_fnorm:4.0771e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1504e-02 L12_fnorm:3.8574e-02 L1_l1linf:8.9722e-03 L2_l1linf:9.2163e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.5825e-03 L5_l1linf:9.7656e-03 L6_l1linf:1.0132e-02 L7_l1linf:1.0254e-02 L8_l1linf:1.0803e-02 L9_l1linf:1.1108e-02 L10_l1linf:1.1414e-02 L11_l1linf:1.1780e-02 L12_l1linf:1.0254e-02 L1_spectral:7.1859e-04 L2_spectral:7.2788e-04 L3_spectral:7.2313e-04 L4_spectral:7.2302e-04 L5_spectral:7.2251e-04 L6_spectral:7.2266e-04 L7_spectral:7.1498e-04 L8_spectral:7.1556e-04 L9_spectral:7.1422e-04 L10_spectral:7.0942e-04 L11_spectral:7.0831e-04 L12_spectral:6.4329e-04 train_time:276885ms step_avg:43.26ms +[2025-09-11 13:29:26] [Rank 0] step:6401/10000 train_time:278879ms step_avg:43.57ms +[2025-09-11 13:29:26] [Rank 0] step:6401/10000 train_time:278879ms step_avg:43.57ms +[2025-09-11 13:29:26] [Rank 0] step:6421/10000 train_time:279588ms step_avg:43.54ms +[2025-09-11 13:29:26] [Rank 0] step:6421/10000 train_time:279588ms step_avg:43.54ms +[2025-09-11 13:29:27] [Rank 0] step:6441/10000 train_time:280279ms step_avg:43.51ms +[2025-09-11 13:29:27] [Rank 0] step:6441/10000 train_time:280279ms step_avg:43.51ms +[2025-09-11 13:29:28] [Rank 0] step:6461/10000 train_time:280970ms step_avg:43.49ms +[2025-09-11 13:29:28] [Rank 0] step:6461/10000 train_time:280970ms step_avg:43.49ms +[2025-09-11 13:29:28] [Rank 0] step:6481/10000 train_time:281663ms step_avg:43.46ms +[2025-09-11 13:29:28] [Rank 0] step:6481/10000 train_time:281663ms step_avg:43.46ms +[2025-09-11 13:29:29] [Rank 0] step:6501/10000 train_time:282356ms step_avg:43.43ms +[2025-09-11 13:29:29] [Rank 0] step:6501/10000 train_time:282356ms step_avg:43.43ms +[2025-09-11 13:29:30] [Rank 0] step:6521/10000 train_time:283048ms step_avg:43.41ms +[2025-09-11 13:29:30] [Rank 0] step:6521/10000 train_time:283048ms step_avg:43.41ms +[2025-09-11 13:29:30] [Rank 0] step:6541/10000 train_time:283738ms step_avg:43.38ms +[2025-09-11 13:29:30] [Rank 0] step:6541/10000 train_time:283738ms step_avg:43.38ms +[2025-09-11 13:29:31] [Rank 0] step:6561/10000 train_time:284429ms step_avg:43.35ms +[2025-09-11 13:29:31] [Rank 0] step:6561/10000 train_time:284429ms step_avg:43.35ms +[2025-09-11 13:29:32] [Rank 0] step:6581/10000 train_time:285120ms step_avg:43.32ms +[2025-09-11 13:29:32] [Rank 0] step:6581/10000 train_time:285120ms step_avg:43.32ms +[2025-09-11 13:29:33] [Rank 0] step:6601/10000 train_time:285812ms step_avg:43.30ms +[2025-09-11 13:29:33] [Rank 0] step:6601/10000 train_time:285812ms step_avg:43.30ms +[2025-09-11 13:29:33] [Rank 0] step:6621/10000 train_time:286502ms step_avg:43.27ms +[2025-09-11 13:29:33] [Rank 0] step:6621/10000 train_time:286502ms step_avg:43.27ms +[2025-09-11 13:29:34] [Rank 0] step:6641/10000 train_time:287194ms step_avg:43.25ms +[2025-09-11 13:29:34] [Rank 0] step:6641/10000 train_time:287194ms step_avg:43.25ms +[2025-09-11 13:29:35] [Rank 0] step:6661/10000 train_time:287887ms step_avg:43.22ms +[2025-09-11 13:29:35] [Rank 0] step:6661/10000 train_time:287887ms step_avg:43.22ms +[2025-09-11 13:29:35] [Rank 0] step:6681/10000 train_time:288585ms step_avg:43.19ms +[2025-09-11 13:29:35] [Rank 0] step:6681/10000 train_time:288585ms step_avg:43.19ms +[2025-09-11 13:29:36] [Rank 0] step:6701/10000 train_time:289283ms step_avg:43.17ms +[2025-09-11 13:29:36] [Rank 0] step:6701/10000 train_time:289283ms step_avg:43.17ms +[2025-09-11 13:29:37] [Rank 0] step:6721/10000 train_time:289981ms step_avg:43.15ms +[2025-09-11 13:29:37] [Rank 0] step:6721/10000 train_time:289981ms step_avg:43.15ms +[2025-09-11 13:29:37] [Rank 0] step:6741/10000 train_time:290679ms step_avg:43.12ms +[2025-09-11 13:29:37] [Rank 0] step:6741/10000 train_time:290679ms step_avg:43.12ms +[2025-09-11 13:29:38] [Rank 0] step:6761/10000 train_time:291375ms step_avg:43.10ms +[2025-09-11 13:29:38] [Rank 0] step:6761/10000 train_time:291375ms step_avg:43.10ms +[2025-09-11 13:29:39] [Rank 0] step:6781/10000 train_time:292075ms step_avg:43.07ms +[2025-09-11 13:29:39] [Rank 0] step:6781/10000 train_time:292075ms step_avg:43.07ms +[2025-09-11 13:29:39] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:29:39] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:29:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:29:51] [Rank 0] PRINT: step:6800/10000 val_loss:5.7322 total_sharp:1.0032e-01 L1_sharp:9.6405e-02 L2_sharp:1.1161e-01 L3_sharp:1.4273e-01 L4_sharp:2.0394e-01 L5_sharp:2.4999e-01 L6_sharp:3.5928e-01 L7_sharp:5.9646e-01 L8_sharp:1.0413e+00 L9_sharp:2.1983e+00 L10_sharp:3.9791e+00 L11_sharp:5.7986e+00 L12_sharp:4.9079e+00 total_fnorm:1.1172e+00 total_l1_linf:7.1600e+02 total_spectral:5.6250e-01 L1_fnorm:3.4424e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4668e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4668e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4424e-02 L8_fnorm:3.4668e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4668e-02 L11_fnorm:3.5156e-02 L12_fnorm:3.2715e-02 L1_l1linf:7.0496e-03 L2_l1linf:7.1411e-03 L3_l1linf:7.4768e-03 L4_l1linf:7.6599e-03 L5_l1linf:7.6904e-03 L6_l1linf:8.3008e-03 L7_l1linf:8.2397e-03 L8_l1linf:8.4839e-03 L9_l1linf:8.6670e-03 L10_l1linf:9.2773e-03 L11_l1linf:9.4604e-03 L12_l1linf:8.3618e-03 L1_spectral:6.4183e-04 L2_spectral:6.4440e-04 L3_spectral:6.4158e-04 L4_spectral:6.4446e-04 L5_spectral:6.3941e-04 L6_spectral:6.4399e-04 L7_spectral:6.3546e-04 L8_spectral:6.4566e-04 L9_spectral:6.3521e-04 L10_spectral:6.2868e-04 L11_spectral:6.2637e-04 L12_spectral:5.6046e-04 train_time:292752ms step_avg:43.05ms +[2025-09-11 13:29:51] [Rank 0] PRINT: step:6800/10000 val_loss:5.7322 total_sharp:1.0032e-01 L1_sharp:9.6405e-02 L2_sharp:1.1161e-01 L3_sharp:1.4273e-01 L4_sharp:2.0394e-01 L5_sharp:2.4999e-01 L6_sharp:3.5928e-01 L7_sharp:5.9646e-01 L8_sharp:1.0413e+00 L9_sharp:2.1983e+00 L10_sharp:3.9791e+00 L11_sharp:5.7986e+00 L12_sharp:4.9079e+00 total_fnorm:1.1172e+00 total_l1_linf:7.1600e+02 total_spectral:5.6250e-01 L1_fnorm:3.4424e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4668e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4668e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4424e-02 L8_fnorm:3.4668e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4668e-02 L11_fnorm:3.5156e-02 L12_fnorm:3.2715e-02 L1_l1linf:7.0496e-03 L2_l1linf:7.1411e-03 L3_l1linf:7.4768e-03 L4_l1linf:7.6599e-03 L5_l1linf:7.6904e-03 L6_l1linf:8.3008e-03 L7_l1linf:8.2397e-03 L8_l1linf:8.4839e-03 L9_l1linf:8.6670e-03 L10_l1linf:9.2773e-03 L11_l1linf:9.4604e-03 L12_l1linf:8.3618e-03 L1_spectral:6.4183e-04 L2_spectral:6.4440e-04 L3_spectral:6.4158e-04 L4_spectral:6.4446e-04 L5_spectral:6.3941e-04 L6_spectral:6.4399e-04 L7_spectral:6.3546e-04 L8_spectral:6.4566e-04 L9_spectral:6.3521e-04 L10_spectral:6.2868e-04 L11_spectral:6.2637e-04 L12_spectral:5.6046e-04 train_time:292752ms step_avg:43.05ms +[2025-09-11 13:29:53] [Rank 0] step:6801/10000 train_time:294738ms step_avg:43.34ms +[2025-09-11 13:29:53] [Rank 0] step:6801/10000 train_time:294738ms step_avg:43.34ms +[2025-09-11 13:29:53] [Rank 0] step:6821/10000 train_time:295451ms step_avg:43.31ms +[2025-09-11 13:29:53] [Rank 0] step:6821/10000 train_time:295451ms step_avg:43.31ms +[2025-09-11 13:29:54] [Rank 0] step:6841/10000 train_time:296152ms step_avg:43.29ms +[2025-09-11 13:29:54] [Rank 0] step:6841/10000 train_time:296152ms step_avg:43.29ms +[2025-09-11 13:29:55] [Rank 0] step:6861/10000 train_time:296857ms step_avg:43.27ms +[2025-09-11 13:29:55] [Rank 0] step:6861/10000 train_time:296857ms step_avg:43.27ms +[2025-09-11 13:29:55] [Rank 0] step:6881/10000 train_time:297557ms step_avg:43.24ms +[2025-09-11 13:29:55] [Rank 0] step:6881/10000 train_time:297557ms step_avg:43.24ms +[2025-09-11 13:29:56] [Rank 0] step:6901/10000 train_time:298254ms step_avg:43.22ms +[2025-09-11 13:29:56] [Rank 0] step:6901/10000 train_time:298254ms step_avg:43.22ms +[2025-09-11 13:29:57] [Rank 0] step:6921/10000 train_time:298951ms step_avg:43.19ms +[2025-09-11 13:29:57] [Rank 0] step:6921/10000 train_time:298951ms step_avg:43.19ms +[2025-09-11 13:29:57] [Rank 0] step:6941/10000 train_time:299649ms step_avg:43.17ms +[2025-09-11 13:29:57] [Rank 0] step:6941/10000 train_time:299649ms step_avg:43.17ms +[2025-09-11 13:29:58] [Rank 0] step:6961/10000 train_time:300348ms step_avg:43.15ms +[2025-09-11 13:29:58] [Rank 0] step:6961/10000 train_time:300348ms step_avg:43.15ms +[2025-09-11 13:29:59] [Rank 0] step:6981/10000 train_time:301056ms step_avg:43.13ms +[2025-09-11 13:29:59] [Rank 0] step:6981/10000 train_time:301056ms step_avg:43.13ms +[2025-09-11 13:30:00] [Rank 0] step:7001/10000 train_time:301754ms step_avg:43.10ms +[2025-09-11 13:30:00] [Rank 0] step:7001/10000 train_time:301754ms step_avg:43.10ms +[2025-09-11 13:30:00] [Rank 0] step:7021/10000 train_time:302453ms step_avg:43.08ms +[2025-09-11 13:30:00] [Rank 0] step:7021/10000 train_time:302453ms step_avg:43.08ms +[2025-09-11 13:30:01] [Rank 0] step:7041/10000 train_time:303210ms step_avg:43.06ms +[2025-09-11 13:30:01] [Rank 0] step:7041/10000 train_time:303210ms step_avg:43.06ms +[2025-09-11 13:30:02] [Rank 0] step:7061/10000 train_time:303921ms step_avg:43.04ms +[2025-09-11 13:30:02] [Rank 0] step:7061/10000 train_time:303921ms step_avg:43.04ms +[2025-09-11 13:30:03] [Rank 0] step:7081/10000 train_time:304682ms step_avg:43.03ms +[2025-09-11 13:30:03] [Rank 0] step:7081/10000 train_time:304682ms step_avg:43.03ms +[2025-09-11 13:30:03] [Rank 0] step:7101/10000 train_time:305381ms step_avg:43.01ms +[2025-09-11 13:30:03] [Rank 0] step:7101/10000 train_time:305381ms step_avg:43.01ms +[2025-09-11 13:30:04] [Rank 0] step:7121/10000 train_time:306081ms step_avg:42.98ms +[2025-09-11 13:30:04] [Rank 0] step:7121/10000 train_time:306081ms step_avg:42.98ms +[2025-09-11 13:30:05] [Rank 0] step:7141/10000 train_time:306779ms step_avg:42.96ms +[2025-09-11 13:30:05] [Rank 0] step:7141/10000 train_time:306779ms step_avg:42.96ms +[2025-09-11 13:30:05] [Rank 0] step:7161/10000 train_time:307479ms step_avg:42.94ms +[2025-09-11 13:30:05] [Rank 0] step:7161/10000 train_time:307479ms step_avg:42.94ms +[2025-09-11 13:30:06] [Rank 0] step:7181/10000 train_time:308176ms step_avg:42.92ms +[2025-09-11 13:30:06] [Rank 0] step:7181/10000 train_time:308176ms step_avg:42.92ms +[2025-09-11 13:30:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:30:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:30:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:30:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:30:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:30:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:30:18] [Rank 0] PRINT: step:7200/10000 val_loss:5.7099 total_sharp:7.4942e-02 L1_sharp:6.1584e-02 L2_sharp:8.0045e-02 L3_sharp:8.9572e-02 L4_sharp:1.1724e-01 L5_sharp:1.5961e-01 L6_sharp:2.5779e-01 L7_sharp:3.7579e-01 L8_sharp:6.8227e-01 L9_sharp:1.7213e+00 L10_sharp:3.0184e+00 L11_sharp:4.2948e+00 L12_sharp:2.8578e+00 total_fnorm:8.9453e-01 total_l1_linf:5.2800e+02 total_spectral:4.4922e-01 L1_fnorm:2.8931e-02 L2_fnorm:2.9053e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9541e-02 L5_fnorm:2.9419e-02 L6_fnorm:2.9541e-02 L7_fnorm:2.9419e-02 L8_fnorm:2.9541e-02 L9_fnorm:2.9419e-02 L10_fnorm:2.9297e-02 L11_fnorm:2.9663e-02 L12_fnorm:2.7222e-02 L1_l1linf:5.6152e-03 L2_l1linf:5.7068e-03 L3_l1linf:6.0120e-03 L4_l1linf:6.1646e-03 L5_l1linf:6.1340e-03 L6_l1linf:6.4392e-03 L7_l1linf:6.5308e-03 L8_l1linf:6.8054e-03 L9_l1linf:6.8665e-03 L10_l1linf:7.2327e-03 L11_l1linf:7.6599e-03 L12_l1linf:6.7444e-03 L1_spectral:5.6594e-04 L2_spectral:5.6378e-04 L3_spectral:5.6544e-04 L4_spectral:5.6737e-04 L5_spectral:5.6240e-04 L6_spectral:5.6346e-04 L7_spectral:5.6114e-04 L8_spectral:5.6188e-04 L9_spectral:5.5217e-04 L10_spectral:5.4537e-04 L11_spectral:5.4029e-04 L12_spectral:4.7565e-04 train_time:308855ms step_avg:42.90ms +[2025-09-11 13:30:18] [Rank 0] PRINT: step:7200/10000 val_loss:5.7099 total_sharp:7.4942e-02 L1_sharp:6.1584e-02 L2_sharp:8.0045e-02 L3_sharp:8.9572e-02 L4_sharp:1.1724e-01 L5_sharp:1.5961e-01 L6_sharp:2.5779e-01 L7_sharp:3.7579e-01 L8_sharp:6.8227e-01 L9_sharp:1.7213e+00 L10_sharp:3.0184e+00 L11_sharp:4.2948e+00 L12_sharp:2.8578e+00 total_fnorm:8.9453e-01 total_l1_linf:5.2800e+02 total_spectral:4.4922e-01 L1_fnorm:2.8931e-02 L2_fnorm:2.9053e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9541e-02 L5_fnorm:2.9419e-02 L6_fnorm:2.9541e-02 L7_fnorm:2.9419e-02 L8_fnorm:2.9541e-02 L9_fnorm:2.9419e-02 L10_fnorm:2.9297e-02 L11_fnorm:2.9663e-02 L12_fnorm:2.7222e-02 L1_l1linf:5.6152e-03 L2_l1linf:5.7068e-03 L3_l1linf:6.0120e-03 L4_l1linf:6.1646e-03 L5_l1linf:6.1340e-03 L6_l1linf:6.4392e-03 L7_l1linf:6.5308e-03 L8_l1linf:6.8054e-03 L9_l1linf:6.8665e-03 L10_l1linf:7.2327e-03 L11_l1linf:7.6599e-03 L12_l1linf:6.7444e-03 L1_spectral:5.6594e-04 L2_spectral:5.6378e-04 L3_spectral:5.6544e-04 L4_spectral:5.6737e-04 L5_spectral:5.6240e-04 L6_spectral:5.6346e-04 L7_spectral:5.6114e-04 L8_spectral:5.6188e-04 L9_spectral:5.5217e-04 L10_spectral:5.4537e-04 L11_spectral:5.4029e-04 L12_spectral:4.7565e-04 train_time:308855ms step_avg:42.90ms +[2025-09-11 13:30:20] [Rank 0] step:7201/10000 train_time:310815ms step_avg:43.16ms +[2025-09-11 13:30:20] [Rank 0] step:7201/10000 train_time:310815ms step_avg:43.16ms +[2025-09-11 13:30:20] [Rank 0] step:7221/10000 train_time:311542ms step_avg:43.14ms +[2025-09-11 13:30:20] [Rank 0] step:7221/10000 train_time:311542ms step_avg:43.14ms +[2025-09-11 13:30:21] [Rank 0] step:7241/10000 train_time:312240ms step_avg:43.12ms +[2025-09-11 13:30:21] [Rank 0] step:7241/10000 train_time:312240ms step_avg:43.12ms +[2025-09-11 13:30:22] [Rank 0] step:7261/10000 train_time:312942ms step_avg:43.10ms +[2025-09-11 13:30:22] [Rank 0] step:7261/10000 train_time:312942ms step_avg:43.10ms +[2025-09-11 13:30:22] [Rank 0] step:7281/10000 train_time:313646ms step_avg:43.08ms +[2025-09-11 13:30:22] [Rank 0] step:7281/10000 train_time:313646ms step_avg:43.08ms +[2025-09-11 13:30:23] [Rank 0] step:7301/10000 train_time:314345ms step_avg:43.06ms +[2025-09-11 13:30:23] [Rank 0] step:7301/10000 train_time:314345ms step_avg:43.06ms +[2025-09-11 13:30:24] [Rank 0] step:7321/10000 train_time:315045ms step_avg:43.03ms +[2025-09-11 13:30:24] [Rank 0] step:7321/10000 train_time:315045ms step_avg:43.03ms +[2025-09-11 13:30:25] [Rank 0] step:7341/10000 train_time:315746ms step_avg:43.01ms +[2025-09-11 13:30:25] [Rank 0] step:7341/10000 train_time:315746ms step_avg:43.01ms +[2025-09-11 13:30:25] [Rank 0] step:7361/10000 train_time:316445ms step_avg:42.99ms +[2025-09-11 13:30:25] [Rank 0] step:7361/10000 train_time:316445ms step_avg:42.99ms +[2025-09-11 13:30:26] [Rank 0] step:7381/10000 train_time:317146ms step_avg:42.97ms +[2025-09-11 13:30:26] [Rank 0] step:7381/10000 train_time:317146ms step_avg:42.97ms +[2025-09-11 13:30:27] [Rank 0] step:7401/10000 train_time:317844ms step_avg:42.95ms +[2025-09-11 13:30:27] [Rank 0] step:7401/10000 train_time:317844ms step_avg:42.95ms +[2025-09-11 13:30:27] [Rank 0] step:7421/10000 train_time:318543ms step_avg:42.92ms +[2025-09-11 13:30:27] [Rank 0] step:7421/10000 train_time:318543ms step_avg:42.92ms +[2025-09-11 13:30:28] [Rank 0] step:7441/10000 train_time:319244ms step_avg:42.90ms +[2025-09-11 13:30:28] [Rank 0] step:7441/10000 train_time:319244ms step_avg:42.90ms +[2025-09-11 13:30:29] [Rank 0] step:7461/10000 train_time:319946ms step_avg:42.88ms +[2025-09-11 13:30:29] [Rank 0] step:7461/10000 train_time:319946ms step_avg:42.88ms +[2025-09-11 13:30:29] [Rank 0] step:7481/10000 train_time:320647ms step_avg:42.86ms +[2025-09-11 13:30:29] [Rank 0] step:7481/10000 train_time:320647ms step_avg:42.86ms +[2025-09-11 13:30:30] [Rank 0] step:7501/10000 train_time:321348ms step_avg:42.84ms +[2025-09-11 13:30:30] [Rank 0] step:7501/10000 train_time:321348ms step_avg:42.84ms +[2025-09-11 13:30:31] [Rank 0] step:7521/10000 train_time:322049ms step_avg:42.82ms +[2025-09-11 13:30:31] [Rank 0] step:7521/10000 train_time:322049ms step_avg:42.82ms +[2025-09-11 13:30:32] [Rank 0] step:7541/10000 train_time:322748ms step_avg:42.80ms +[2025-09-11 13:30:32] [Rank 0] step:7541/10000 train_time:322748ms step_avg:42.80ms +[2025-09-11 13:30:32] [Rank 0] step:7561/10000 train_time:323451ms step_avg:42.78ms +[2025-09-11 13:30:32] [Rank 0] step:7561/10000 train_time:323451ms step_avg:42.78ms +[2025-09-11 13:30:33] [Rank 0] step:7581/10000 train_time:324153ms step_avg:42.76ms +[2025-09-11 13:30:33] [Rank 0] step:7581/10000 train_time:324153ms step_avg:42.76ms +[2025-09-11 13:30:34] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:30:34] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:30:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:30:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.6925 total_sharp:8.9907e-02 L1_sharp:5.2519e-02 L2_sharp:7.5335e-02 L3_sharp:8.5659e-02 L4_sharp:1.1580e-01 L5_sharp:1.7251e-01 L6_sharp:2.9251e-01 L7_sharp:5.2009e-01 L8_sharp:7.6172e-01 L9_sharp:1.8447e+00 L10_sharp:2.6094e+00 L11_sharp:3.6086e+00 L12_sharp:3.4746e+00 total_fnorm:6.6406e-01 total_l1_linf:3.7000e+02 total_spectral:3.3398e-01 L1_fnorm:2.3682e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4170e-02 L7_fnorm:2.4048e-02 L8_fnorm:2.4170e-02 L9_fnorm:2.4048e-02 L10_fnorm:2.4048e-02 L11_fnorm:2.4414e-02 L12_fnorm:2.2217e-02 L1_l1linf:4.3030e-03 L2_l1linf:4.4556e-03 L3_l1linf:4.4861e-03 L4_l1linf:4.6082e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.9133e-03 L7_l1linf:5.0659e-03 L8_l1linf:5.1880e-03 L9_l1linf:5.4626e-03 L10_l1linf:5.6152e-03 L11_l1linf:6.2561e-03 L12_l1linf:5.2490e-03 L1_spectral:4.8355e-04 L2_spectral:4.8023e-04 L3_spectral:4.8174e-04 L4_spectral:4.8800e-04 L5_spectral:4.8631e-04 L6_spectral:4.8388e-04 L7_spectral:4.7641e-04 L8_spectral:4.8045e-04 L9_spectral:4.6754e-04 L10_spectral:4.6128e-04 L11_spectral:4.5752e-04 L12_spectral:4.0309e-04 train_time:324835ms step_avg:42.74ms +[2025-09-11 13:30:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.6925 total_sharp:8.9907e-02 L1_sharp:5.2519e-02 L2_sharp:7.5335e-02 L3_sharp:8.5659e-02 L4_sharp:1.1580e-01 L5_sharp:1.7251e-01 L6_sharp:2.9251e-01 L7_sharp:5.2009e-01 L8_sharp:7.6172e-01 L9_sharp:1.8447e+00 L10_sharp:2.6094e+00 L11_sharp:3.6086e+00 L12_sharp:3.4746e+00 total_fnorm:6.6406e-01 total_l1_linf:3.7000e+02 total_spectral:3.3398e-01 L1_fnorm:2.3682e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4170e-02 L7_fnorm:2.4048e-02 L8_fnorm:2.4170e-02 L9_fnorm:2.4048e-02 L10_fnorm:2.4048e-02 L11_fnorm:2.4414e-02 L12_fnorm:2.2217e-02 L1_l1linf:4.3030e-03 L2_l1linf:4.4556e-03 L3_l1linf:4.4861e-03 L4_l1linf:4.6082e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.9133e-03 L7_l1linf:5.0659e-03 L8_l1linf:5.1880e-03 L9_l1linf:5.4626e-03 L10_l1linf:5.6152e-03 L11_l1linf:6.2561e-03 L12_l1linf:5.2490e-03 L1_spectral:4.8355e-04 L2_spectral:4.8023e-04 L3_spectral:4.8174e-04 L4_spectral:4.8800e-04 L5_spectral:4.8631e-04 L6_spectral:4.8388e-04 L7_spectral:4.7641e-04 L8_spectral:4.8045e-04 L9_spectral:4.6754e-04 L10_spectral:4.6128e-04 L11_spectral:4.5752e-04 L12_spectral:4.0309e-04 train_time:324835ms step_avg:42.74ms +[2025-09-11 13:30:47] [Rank 0] step:7601/10000 train_time:326934ms step_avg:43.01ms +[2025-09-11 13:30:47] [Rank 0] step:7601/10000 train_time:326934ms step_avg:43.01ms +[2025-09-11 13:30:47] [Rank 0] step:7621/10000 train_time:327664ms step_avg:42.99ms +[2025-09-11 13:30:47] [Rank 0] step:7621/10000 train_time:327664ms step_avg:42.99ms +[2025-09-11 13:30:48] [Rank 0] step:7641/10000 train_time:328470ms step_avg:42.99ms +[2025-09-11 13:30:48] [Rank 0] step:7641/10000 train_time:328470ms step_avg:42.99ms +[2025-09-11 13:30:49] [Rank 0] step:7661/10000 train_time:329170ms step_avg:42.97ms +[2025-09-11 13:30:49] [Rank 0] step:7661/10000 train_time:329170ms step_avg:42.97ms +[2025-09-11 13:30:50] [Rank 0] step:7681/10000 train_time:329871ms step_avg:42.95ms +[2025-09-11 13:30:50] [Rank 0] step:7681/10000 train_time:329871ms step_avg:42.95ms +[2025-09-11 13:30:50] [Rank 0] step:7701/10000 train_time:330573ms step_avg:42.93ms +[2025-09-11 13:30:50] [Rank 0] step:7701/10000 train_time:330573ms step_avg:42.93ms +[2025-09-11 13:30:51] [Rank 0] step:7721/10000 train_time:331274ms step_avg:42.91ms +[2025-09-11 13:30:51] [Rank 0] step:7721/10000 train_time:331274ms step_avg:42.91ms +[2025-09-11 13:30:52] [Rank 0] step:7741/10000 train_time:331976ms step_avg:42.89ms +[2025-09-11 13:30:52] [Rank 0] step:7741/10000 train_time:331976ms step_avg:42.89ms +[2025-09-11 13:30:52] [Rank 0] step:7761/10000 train_time:332676ms step_avg:42.87ms +[2025-09-11 13:30:52] [Rank 0] step:7761/10000 train_time:332676ms step_avg:42.87ms +[2025-09-11 13:30:53] [Rank 0] step:7781/10000 train_time:333379ms step_avg:42.85ms +[2025-09-11 13:30:53] [Rank 0] step:7781/10000 train_time:333379ms step_avg:42.85ms +[2025-09-11 13:30:54] [Rank 0] step:7801/10000 train_time:334079ms step_avg:42.83ms +[2025-09-11 13:30:54] [Rank 0] step:7801/10000 train_time:334079ms step_avg:42.83ms +[2025-09-11 13:30:54] [Rank 0] step:7821/10000 train_time:334780ms step_avg:42.81ms +[2025-09-11 13:30:54] [Rank 0] step:7821/10000 train_time:334780ms step_avg:42.81ms +[2025-09-11 13:30:55] [Rank 0] step:7841/10000 train_time:335482ms step_avg:42.79ms +[2025-09-11 13:30:55] [Rank 0] step:7841/10000 train_time:335482ms step_avg:42.79ms +[2025-09-11 13:30:56] [Rank 0] step:7861/10000 train_time:336186ms step_avg:42.77ms +[2025-09-11 13:30:56] [Rank 0] step:7861/10000 train_time:336186ms step_avg:42.77ms +[2025-09-11 13:30:57] [Rank 0] step:7881/10000 train_time:336886ms step_avg:42.75ms +[2025-09-11 13:30:57] [Rank 0] step:7881/10000 train_time:336886ms step_avg:42.75ms +[2025-09-11 13:30:57] [Rank 0] step:7901/10000 train_time:337589ms step_avg:42.73ms +[2025-09-11 13:30:57] [Rank 0] step:7901/10000 train_time:337589ms step_avg:42.73ms +[2025-09-11 13:30:58] [Rank 0] step:7921/10000 train_time:338289ms step_avg:42.71ms +[2025-09-11 13:30:58] [Rank 0] step:7921/10000 train_time:338289ms step_avg:42.71ms +[2025-09-11 13:30:59] [Rank 0] step:7941/10000 train_time:338992ms step_avg:42.69ms +[2025-09-11 13:30:59] [Rank 0] step:7941/10000 train_time:338992ms step_avg:42.69ms +[2025-09-11 13:30:59] [Rank 0] step:7961/10000 train_time:339692ms step_avg:42.67ms +[2025-09-11 13:30:59] [Rank 0] step:7961/10000 train_time:339692ms step_avg:42.67ms +[2025-09-11 13:31:00] [Rank 0] step:7981/10000 train_time:340396ms step_avg:42.65ms +[2025-09-11 13:31:00] [Rank 0] step:7981/10000 train_time:340396ms step_avg:42.65ms +[2025-09-11 13:31:01] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:31:01] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:31:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:31:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:31:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:31:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:31:15] [Rank 0] PRINT: step:8000/10000 val_loss:5.6806 total_sharp:8.3630e-02 L1_sharp:6.5318e-02 L2_sharp:7.3844e-02 L3_sharp:9.5425e-02 L4_sharp:1.4364e-01 L5_sharp:1.7253e-01 L6_sharp:2.8317e-01 L7_sharp:4.7156e-01 L8_sharp:7.7747e-01 L9_sharp:1.5787e+00 L10_sharp:2.9125e+00 L11_sharp:3.5872e+00 L12_sharp:2.5144e+00 total_fnorm:5.4297e-01 total_l1_linf:2.8400e+02 total_spectral:2.7148e-01 L1_fnorm:1.8799e-02 L2_fnorm:1.9043e-02 L3_fnorm:1.9165e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9287e-02 L7_fnorm:1.9043e-02 L8_fnorm:1.9287e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.9287e-02 L11_fnorm:1.9409e-02 L12_fnorm:1.7578e-02 L1_l1linf:3.0365e-03 L2_l1linf:3.2043e-03 L3_l1linf:3.2349e-03 L4_l1linf:3.4332e-03 L5_l1linf:3.5400e-03 L6_l1linf:3.6469e-03 L7_l1linf:3.8147e-03 L8_l1linf:3.9673e-03 L9_l1linf:4.1809e-03 L10_l1linf:4.3945e-03 L11_l1linf:4.6387e-03 L12_l1linf:3.9368e-03 L1_spectral:3.9375e-04 L2_spectral:4.0150e-04 L3_spectral:4.0202e-04 L4_spectral:4.0105e-04 L5_spectral:4.0173e-04 L6_spectral:3.9663e-04 L7_spectral:3.9127e-04 L8_spectral:3.9909e-04 L9_spectral:3.8389e-04 L10_spectral:3.7671e-04 L11_spectral:3.6751e-04 L12_spectral:3.2755e-04 train_time:341074ms step_avg:42.63ms +[2025-09-11 13:31:15] [Rank 0] PRINT: step:8000/10000 val_loss:5.6806 total_sharp:8.3630e-02 L1_sharp:6.5318e-02 L2_sharp:7.3844e-02 L3_sharp:9.5425e-02 L4_sharp:1.4364e-01 L5_sharp:1.7253e-01 L6_sharp:2.8317e-01 L7_sharp:4.7156e-01 L8_sharp:7.7747e-01 L9_sharp:1.5787e+00 L10_sharp:2.9125e+00 L11_sharp:3.5872e+00 L12_sharp:2.5144e+00 total_fnorm:5.4297e-01 total_l1_linf:2.8400e+02 total_spectral:2.7148e-01 L1_fnorm:1.8799e-02 L2_fnorm:1.9043e-02 L3_fnorm:1.9165e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9287e-02 L7_fnorm:1.9043e-02 L8_fnorm:1.9287e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.9287e-02 L11_fnorm:1.9409e-02 L12_fnorm:1.7578e-02 L1_l1linf:3.0365e-03 L2_l1linf:3.2043e-03 L3_l1linf:3.2349e-03 L4_l1linf:3.4332e-03 L5_l1linf:3.5400e-03 L6_l1linf:3.6469e-03 L7_l1linf:3.8147e-03 L8_l1linf:3.9673e-03 L9_l1linf:4.1809e-03 L10_l1linf:4.3945e-03 L11_l1linf:4.6387e-03 L12_l1linf:3.9368e-03 L1_spectral:3.9375e-04 L2_spectral:4.0150e-04 L3_spectral:4.0202e-04 L4_spectral:4.0105e-04 L5_spectral:4.0173e-04 L6_spectral:3.9663e-04 L7_spectral:3.9127e-04 L8_spectral:3.9909e-04 L9_spectral:3.8389e-04 L10_spectral:3.7671e-04 L11_spectral:3.6751e-04 L12_spectral:3.2755e-04 train_time:341074ms step_avg:42.63ms +[2025-09-11 13:31:17] [Rank 0] step:8001/10000 train_time:343233ms step_avg:42.90ms +[2025-09-11 13:31:17] [Rank 0] step:8001/10000 train_time:343233ms step_avg:42.90ms +[2025-09-11 13:31:18] [Rank 0] step:8021/10000 train_time:343969ms step_avg:42.88ms +[2025-09-11 13:31:18] [Rank 0] step:8021/10000 train_time:343969ms step_avg:42.88ms +[2025-09-11 13:31:18] [Rank 0] step:8041/10000 train_time:344671ms step_avg:42.86ms +[2025-09-11 13:31:18] [Rank 0] step:8041/10000 train_time:344671ms step_avg:42.86ms +[2025-09-11 13:31:19] [Rank 0] step:8061/10000 train_time:345375ms step_avg:42.85ms +[2025-09-11 13:31:19] [Rank 0] step:8061/10000 train_time:345375ms step_avg:42.85ms +[2025-09-11 13:31:20] [Rank 0] step:8081/10000 train_time:346075ms step_avg:42.83ms +[2025-09-11 13:31:20] [Rank 0] step:8081/10000 train_time:346075ms step_avg:42.83ms +[2025-09-11 13:31:20] [Rank 0] step:8101/10000 train_time:346776ms step_avg:42.81ms +[2025-09-11 13:31:20] [Rank 0] step:8101/10000 train_time:346776ms step_avg:42.81ms +[2025-09-11 13:31:21] [Rank 0] step:8121/10000 train_time:347482ms step_avg:42.79ms +[2025-09-11 13:31:21] [Rank 0] step:8121/10000 train_time:347482ms step_avg:42.79ms +[2025-09-11 13:31:23] [Rank 0] step:8141/10000 train_time:348913ms step_avg:42.86ms +[2025-09-11 13:31:23] [Rank 0] step:8141/10000 train_time:348913ms step_avg:42.86ms +[2025-09-11 13:31:23] [Rank 0] step:8161/10000 train_time:349618ms step_avg:42.84ms +[2025-09-11 13:31:23] [Rank 0] step:8161/10000 train_time:349618ms step_avg:42.84ms +[2025-09-11 13:31:24] [Rank 0] step:8181/10000 train_time:350331ms step_avg:42.82ms +[2025-09-11 13:31:24] [Rank 0] step:8181/10000 train_time:350331ms step_avg:42.82ms +[2025-09-11 13:31:25] [Rank 0] step:8201/10000 train_time:351040ms step_avg:42.80ms +[2025-09-11 13:31:25] [Rank 0] step:8201/10000 train_time:351040ms step_avg:42.80ms +[2025-09-11 13:31:25] [Rank 0] step:8221/10000 train_time:351748ms step_avg:42.79ms +[2025-09-11 13:31:25] [Rank 0] step:8221/10000 train_time:351748ms step_avg:42.79ms +[2025-09-11 13:31:26] [Rank 0] step:8241/10000 train_time:352464ms step_avg:42.77ms +[2025-09-11 13:31:26] [Rank 0] step:8241/10000 train_time:352464ms step_avg:42.77ms +[2025-09-11 13:31:27] [Rank 0] step:8261/10000 train_time:353171ms step_avg:42.75ms +[2025-09-11 13:31:27] [Rank 0] step:8261/10000 train_time:353171ms step_avg:42.75ms +[2025-09-11 13:31:28] [Rank 0] step:8281/10000 train_time:353877ms step_avg:42.73ms +[2025-09-11 13:31:28] [Rank 0] step:8281/10000 train_time:353877ms step_avg:42.73ms +[2025-09-11 13:31:28] [Rank 0] step:8301/10000 train_time:354585ms step_avg:42.72ms +[2025-09-11 13:31:28] [Rank 0] step:8301/10000 train_time:354585ms step_avg:42.72ms +[2025-09-11 13:31:29] [Rank 0] step:8321/10000 train_time:355292ms step_avg:42.70ms +[2025-09-11 13:31:29] [Rank 0] step:8321/10000 train_time:355292ms step_avg:42.70ms +[2025-09-11 13:31:30] [Rank 0] step:8341/10000 train_time:356006ms step_avg:42.68ms +[2025-09-11 13:31:30] [Rank 0] step:8341/10000 train_time:356006ms step_avg:42.68ms +[2025-09-11 13:31:30] [Rank 0] step:8361/10000 train_time:356710ms step_avg:42.66ms +[2025-09-11 13:31:30] [Rank 0] step:8361/10000 train_time:356710ms step_avg:42.66ms +[2025-09-11 13:31:31] [Rank 0] step:8381/10000 train_time:357421ms step_avg:42.65ms +[2025-09-11 13:31:31] [Rank 0] step:8381/10000 train_time:357421ms step_avg:42.65ms +[2025-09-11 13:31:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:31:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:31:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:31:42] [Rank 0] PRINT: step:8400/10000 val_loss:5.6712 total_sharp:6.0817e-02 L1_sharp:4.9583e-02 L2_sharp:6.8107e-02 L3_sharp:8.0497e-02 L4_sharp:9.7559e-02 L5_sharp:1.5005e-01 L6_sharp:2.0457e-01 L7_sharp:3.9698e-01 L8_sharp:5.3269e-01 L9_sharp:1.1894e+00 L10_sharp:2.1970e+00 L11_sharp:2.2426e+00 L12_sharp:2.6791e+00 total_fnorm:4.0234e-01 total_l1_linf:1.8800e+02 total_spectral:2.0215e-01 L1_fnorm:1.4465e-02 L2_fnorm:1.4587e-02 L3_fnorm:1.4771e-02 L4_fnorm:1.4893e-02 L5_fnorm:1.4954e-02 L6_fnorm:1.4832e-02 L7_fnorm:1.4709e-02 L8_fnorm:1.4954e-02 L9_fnorm:1.4832e-02 L10_fnorm:1.4709e-02 L11_fnorm:1.4893e-02 L12_fnorm:1.3550e-02 L1_l1linf:2.2125e-03 L2_l1linf:2.2430e-03 L3_l1linf:2.3346e-03 L4_l1linf:2.4567e-03 L5_l1linf:2.5024e-03 L6_l1linf:2.5482e-03 L7_l1linf:2.6245e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.8839e-03 L10_l1linf:3.0823e-03 L11_l1linf:3.2654e-03 L12_l1linf:2.8687e-03 L1_spectral:3.1711e-04 L2_spectral:3.1547e-04 L3_spectral:3.1883e-04 L4_spectral:3.2343e-04 L5_spectral:3.2159e-04 L6_spectral:3.1322e-04 L7_spectral:3.0927e-04 L8_spectral:3.1464e-04 L9_spectral:3.0471e-04 L10_spectral:2.9801e-04 L11_spectral:2.8893e-04 L12_spectral:2.6095e-04 train_time:358111ms step_avg:42.63ms +[2025-09-11 13:31:42] [Rank 0] PRINT: step:8400/10000 val_loss:5.6712 total_sharp:6.0817e-02 L1_sharp:4.9583e-02 L2_sharp:6.8107e-02 L3_sharp:8.0497e-02 L4_sharp:9.7559e-02 L5_sharp:1.5005e-01 L6_sharp:2.0457e-01 L7_sharp:3.9698e-01 L8_sharp:5.3269e-01 L9_sharp:1.1894e+00 L10_sharp:2.1970e+00 L11_sharp:2.2426e+00 L12_sharp:2.6791e+00 total_fnorm:4.0234e-01 total_l1_linf:1.8800e+02 total_spectral:2.0215e-01 L1_fnorm:1.4465e-02 L2_fnorm:1.4587e-02 L3_fnorm:1.4771e-02 L4_fnorm:1.4893e-02 L5_fnorm:1.4954e-02 L6_fnorm:1.4832e-02 L7_fnorm:1.4709e-02 L8_fnorm:1.4954e-02 L9_fnorm:1.4832e-02 L10_fnorm:1.4709e-02 L11_fnorm:1.4893e-02 L12_fnorm:1.3550e-02 L1_l1linf:2.2125e-03 L2_l1linf:2.2430e-03 L3_l1linf:2.3346e-03 L4_l1linf:2.4567e-03 L5_l1linf:2.5024e-03 L6_l1linf:2.5482e-03 L7_l1linf:2.6245e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.8839e-03 L10_l1linf:3.0823e-03 L11_l1linf:3.2654e-03 L12_l1linf:2.8687e-03 L1_spectral:3.1711e-04 L2_spectral:3.1547e-04 L3_spectral:3.1883e-04 L4_spectral:3.2343e-04 L5_spectral:3.2159e-04 L6_spectral:3.1322e-04 L7_spectral:3.0927e-04 L8_spectral:3.1464e-04 L9_spectral:3.0471e-04 L10_spectral:2.9801e-04 L11_spectral:2.8893e-04 L12_spectral:2.6095e-04 train_time:358111ms step_avg:42.63ms +[2025-09-11 13:31:45] [Rank 0] step:8401/10000 train_time:360315ms step_avg:42.89ms +[2025-09-11 13:31:45] [Rank 0] step:8401/10000 train_time:360315ms step_avg:42.89ms +[2025-09-11 13:31:45] [Rank 0] step:8421/10000 train_time:361062ms step_avg:42.88ms +[2025-09-11 13:31:45] [Rank 0] step:8421/10000 train_time:361062ms step_avg:42.88ms +[2025-09-11 13:31:46] [Rank 0] step:8441/10000 train_time:361772ms step_avg:42.86ms +[2025-09-11 13:31:46] [Rank 0] step:8441/10000 train_time:361772ms step_avg:42.86ms +[2025-09-11 13:31:47] [Rank 0] step:8461/10000 train_time:362482ms step_avg:42.84ms +[2025-09-11 13:31:47] [Rank 0] step:8461/10000 train_time:362482ms step_avg:42.84ms +[2025-09-11 13:31:48] [Rank 0] step:8481/10000 train_time:363192ms step_avg:42.82ms +[2025-09-11 13:31:48] [Rank 0] step:8481/10000 train_time:363192ms step_avg:42.82ms +[2025-09-11 13:31:48] [Rank 0] step:8501/10000 train_time:363900ms step_avg:42.81ms +[2025-09-11 13:31:48] [Rank 0] step:8501/10000 train_time:363900ms step_avg:42.81ms +[2025-09-11 13:31:49] [Rank 0] step:8521/10000 train_time:364607ms step_avg:42.79ms +[2025-09-11 13:31:49] [Rank 0] step:8521/10000 train_time:364607ms step_avg:42.79ms +[2025-09-11 13:31:50] [Rank 0] step:8541/10000 train_time:365314ms step_avg:42.77ms +[2025-09-11 13:31:50] [Rank 0] step:8541/10000 train_time:365314ms step_avg:42.77ms +[2025-09-11 13:31:50] [Rank 0] step:8561/10000 train_time:366027ms step_avg:42.76ms +[2025-09-11 13:31:50] [Rank 0] step:8561/10000 train_time:366027ms step_avg:42.76ms +[2025-09-11 13:31:51] [Rank 0] step:8581/10000 train_time:366740ms step_avg:42.74ms +[2025-09-11 13:31:51] [Rank 0] step:8581/10000 train_time:366740ms step_avg:42.74ms +[2025-09-11 13:31:52] [Rank 0] step:8601/10000 train_time:367449ms step_avg:42.72ms +[2025-09-11 13:31:52] [Rank 0] step:8601/10000 train_time:367449ms step_avg:42.72ms +[2025-09-11 13:31:53] [Rank 0] step:8621/10000 train_time:368156ms step_avg:42.70ms +[2025-09-11 13:31:53] [Rank 0] step:8621/10000 train_time:368156ms step_avg:42.70ms +[2025-09-11 13:31:53] [Rank 0] step:8641/10000 train_time:368863ms step_avg:42.69ms +[2025-09-11 13:31:53] [Rank 0] step:8641/10000 train_time:368863ms step_avg:42.69ms +[2025-09-11 13:31:54] [Rank 0] step:8661/10000 train_time:369572ms step_avg:42.67ms +[2025-09-11 13:31:54] [Rank 0] step:8661/10000 train_time:369572ms step_avg:42.67ms +[2025-09-11 13:31:55] [Rank 0] step:8681/10000 train_time:370281ms step_avg:42.65ms +[2025-09-11 13:31:55] [Rank 0] step:8681/10000 train_time:370281ms step_avg:42.65ms +[2025-09-11 13:31:55] [Rank 0] step:8701/10000 train_time:370990ms step_avg:42.64ms +[2025-09-11 13:31:55] [Rank 0] step:8701/10000 train_time:370990ms step_avg:42.64ms +[2025-09-11 13:31:56] [Rank 0] step:8721/10000 train_time:371698ms step_avg:42.62ms +[2025-09-11 13:31:56] [Rank 0] step:8721/10000 train_time:371698ms step_avg:42.62ms +[2025-09-11 13:31:57] [Rank 0] step:8741/10000 train_time:372403ms step_avg:42.60ms +[2025-09-11 13:31:57] [Rank 0] step:8741/10000 train_time:372403ms step_avg:42.60ms +[2025-09-11 13:31:57] [Rank 0] step:8761/10000 train_time:373114ms step_avg:42.59ms +[2025-09-11 13:31:57] [Rank 0] step:8761/10000 train_time:373114ms step_avg:42.59ms +[2025-09-11 13:31:58] [Rank 0] step:8781/10000 train_time:373820ms step_avg:42.57ms +[2025-09-11 13:31:58] [Rank 0] step:8781/10000 train_time:373820ms step_avg:42.57ms +[2025-09-11 13:31:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:31:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:32:10] [Rank 0] PRINT: step:8800/10000 val_loss:5.6645 total_sharp:7.9758e-02 L1_sharp:4.2510e-02 L2_sharp:5.8647e-02 L3_sharp:6.6881e-02 L4_sharp:8.8177e-02 L5_sharp:1.3562e-01 L6_sharp:2.4304e-01 L7_sharp:3.5302e-01 L8_sharp:5.7816e-01 L9_sharp:1.4635e+00 L10_sharp:2.6787e+00 L11_sharp:3.5948e+00 L12_sharp:3.9595e+00 total_fnorm:2.8906e-01 total_l1_linf:1.1850e+02 total_spectral:1.4453e-01 L1_fnorm:1.0254e-02 L2_fnorm:1.0437e-02 L3_fnorm:1.0498e-02 L4_fnorm:1.0620e-02 L5_fnorm:1.0681e-02 L6_fnorm:1.0559e-02 L7_fnorm:1.0498e-02 L8_fnorm:1.0681e-02 L9_fnorm:1.0559e-02 L10_fnorm:1.0559e-02 L11_fnorm:1.0620e-02 L12_fnorm:9.5215e-03 L1_l1linf:1.4267e-03 L2_l1linf:1.5564e-03 L3_l1linf:1.5106e-03 L4_l1linf:1.5793e-03 L5_l1linf:1.5488e-03 L6_l1linf:1.7166e-03 L7_l1linf:1.6937e-03 L8_l1linf:1.8005e-03 L9_l1linf:1.9073e-03 L10_l1linf:1.9989e-03 L11_l1linf:2.0599e-03 L12_l1linf:1.8539e-03 L1_spectral:2.3165e-04 L2_spectral:2.2992e-04 L3_spectral:2.3274e-04 L4_spectral:2.3506e-04 L5_spectral:2.3801e-04 L6_spectral:2.3087e-04 L7_spectral:2.2559e-04 L8_spectral:2.3300e-04 L9_spectral:2.2078e-04 L10_spectral:2.1949e-04 L11_spectral:2.0883e-04 L12_spectral:1.8896e-04 train_time:374505ms step_avg:42.56ms +[2025-09-11 13:32:10] [Rank 0] PRINT: step:8800/10000 val_loss:5.6645 total_sharp:7.9758e-02 L1_sharp:4.2510e-02 L2_sharp:5.8647e-02 L3_sharp:6.6881e-02 L4_sharp:8.8177e-02 L5_sharp:1.3562e-01 L6_sharp:2.4304e-01 L7_sharp:3.5302e-01 L8_sharp:5.7816e-01 L9_sharp:1.4635e+00 L10_sharp:2.6787e+00 L11_sharp:3.5948e+00 L12_sharp:3.9595e+00 total_fnorm:2.8906e-01 total_l1_linf:1.1850e+02 total_spectral:1.4453e-01 L1_fnorm:1.0254e-02 L2_fnorm:1.0437e-02 L3_fnorm:1.0498e-02 L4_fnorm:1.0620e-02 L5_fnorm:1.0681e-02 L6_fnorm:1.0559e-02 L7_fnorm:1.0498e-02 L8_fnorm:1.0681e-02 L9_fnorm:1.0559e-02 L10_fnorm:1.0559e-02 L11_fnorm:1.0620e-02 L12_fnorm:9.5215e-03 L1_l1linf:1.4267e-03 L2_l1linf:1.5564e-03 L3_l1linf:1.5106e-03 L4_l1linf:1.5793e-03 L5_l1linf:1.5488e-03 L6_l1linf:1.7166e-03 L7_l1linf:1.6937e-03 L8_l1linf:1.8005e-03 L9_l1linf:1.9073e-03 L10_l1linf:1.9989e-03 L11_l1linf:2.0599e-03 L12_l1linf:1.8539e-03 L1_spectral:2.3165e-04 L2_spectral:2.2992e-04 L3_spectral:2.3274e-04 L4_spectral:2.3506e-04 L5_spectral:2.3801e-04 L6_spectral:2.3087e-04 L7_spectral:2.2559e-04 L8_spectral:2.3300e-04 L9_spectral:2.2078e-04 L10_spectral:2.1949e-04 L11_spectral:2.0883e-04 L12_spectral:1.8896e-04 train_time:374505ms step_avg:42.56ms +[2025-09-11 13:32:13] [Rank 0] step:8801/10000 train_time:377528ms step_avg:42.90ms +[2025-09-11 13:32:13] [Rank 0] step:8801/10000 train_time:377528ms step_avg:42.90ms +[2025-09-11 13:32:14] [Rank 0] step:8821/10000 train_time:378266ms step_avg:42.88ms +[2025-09-11 13:32:14] [Rank 0] step:8821/10000 train_time:378266ms step_avg:42.88ms +[2025-09-11 13:32:15] [Rank 0] step:8841/10000 train_time:379123ms step_avg:42.88ms +[2025-09-11 13:32:15] [Rank 0] step:8841/10000 train_time:379123ms step_avg:42.88ms +[2025-09-11 13:32:16] [Rank 0] step:8861/10000 train_time:379953ms step_avg:42.88ms +[2025-09-11 13:32:16] [Rank 0] step:8861/10000 train_time:379953ms step_avg:42.88ms +[2025-09-11 13:32:16] [Rank 0] step:8881/10000 train_time:380663ms step_avg:42.86ms +[2025-09-11 13:32:16] [Rank 0] step:8881/10000 train_time:380663ms step_avg:42.86ms +[2025-09-11 13:32:17] [Rank 0] step:8901/10000 train_time:381374ms step_avg:42.85ms +[2025-09-11 13:32:17] [Rank 0] step:8901/10000 train_time:381374ms step_avg:42.85ms +[2025-09-11 13:32:18] [Rank 0] step:8921/10000 train_time:382349ms step_avg:42.86ms +[2025-09-11 13:32:18] [Rank 0] step:8921/10000 train_time:382349ms step_avg:42.86ms +[2025-09-11 13:32:19] [Rank 0] step:8941/10000 train_time:383061ms step_avg:42.84ms +[2025-09-11 13:32:19] [Rank 0] step:8941/10000 train_time:383061ms step_avg:42.84ms +[2025-09-11 13:32:20] [Rank 0] step:8961/10000 train_time:383778ms step_avg:42.83ms +[2025-09-11 13:32:20] [Rank 0] step:8961/10000 train_time:383778ms step_avg:42.83ms +[2025-09-11 13:32:20] [Rank 0] step:8981/10000 train_time:384491ms step_avg:42.81ms +[2025-09-11 13:32:20] [Rank 0] step:8981/10000 train_time:384491ms step_avg:42.81ms +[2025-09-11 13:32:21] [Rank 0] step:9001/10000 train_time:385195ms step_avg:42.79ms +[2025-09-11 13:32:21] [Rank 0] step:9001/10000 train_time:385195ms step_avg:42.79ms +[2025-09-11 13:32:22] [Rank 0] step:9021/10000 train_time:385904ms step_avg:42.78ms +[2025-09-11 13:32:22] [Rank 0] step:9021/10000 train_time:385904ms step_avg:42.78ms +[2025-09-11 13:32:22] [Rank 0] step:9041/10000 train_time:386615ms step_avg:42.76ms +[2025-09-11 13:32:22] [Rank 0] step:9041/10000 train_time:386615ms step_avg:42.76ms +[2025-09-11 13:32:23] [Rank 0] step:9061/10000 train_time:387322ms step_avg:42.75ms +[2025-09-11 13:32:23] [Rank 0] step:9061/10000 train_time:387322ms step_avg:42.75ms +[2025-09-11 13:32:24] [Rank 0] step:9081/10000 train_time:388040ms step_avg:42.73ms +[2025-09-11 13:32:24] [Rank 0] step:9081/10000 train_time:388040ms step_avg:42.73ms +[2025-09-11 13:32:25] [Rank 0] step:9101/10000 train_time:388753ms step_avg:42.72ms +[2025-09-11 13:32:25] [Rank 0] step:9101/10000 train_time:388753ms step_avg:42.72ms +[2025-09-11 13:32:25] [Rank 0] step:9121/10000 train_time:389466ms step_avg:42.70ms +[2025-09-11 13:32:25] [Rank 0] step:9121/10000 train_time:389466ms step_avg:42.70ms +[2025-09-11 13:32:26] [Rank 0] step:9141/10000 train_time:390174ms step_avg:42.68ms +[2025-09-11 13:32:26] [Rank 0] step:9141/10000 train_time:390174ms step_avg:42.68ms +[2025-09-11 13:32:27] [Rank 0] step:9161/10000 train_time:390886ms step_avg:42.67ms +[2025-09-11 13:32:27] [Rank 0] step:9161/10000 train_time:390886ms step_avg:42.67ms +[2025-09-11 13:32:27] [Rank 0] step:9181/10000 train_time:391598ms step_avg:42.65ms +[2025-09-11 13:32:27] [Rank 0] step:9181/10000 train_time:391598ms step_avg:42.65ms +[2025-09-11 13:32:28] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:32:28] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:32:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:32:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:32:39] [Rank 0] PRINT: step:9200/10000 val_loss:5.6589 total_sharp:7.6372e-02 L1_sharp:3.1167e-02 L2_sharp:4.9914e-02 L3_sharp:6.4624e-02 L4_sharp:1.0297e-01 L5_sharp:1.4348e-01 L6_sharp:2.1866e-01 L7_sharp:2.9241e-01 L8_sharp:6.3234e-01 L9_sharp:1.1235e+00 L10_sharp:2.1685e+00 L11_sharp:3.8389e+00 L12_sharp:3.6711e+00 total_fnorm:1.7676e-01 total_l1_linf:6.4500e+01 total_spectral:8.9355e-02 L1_fnorm:6.8054e-03 L2_fnorm:6.8359e-03 L3_fnorm:6.8970e-03 L4_fnorm:6.9885e-03 L5_fnorm:6.9885e-03 L6_fnorm:6.9580e-03 L7_fnorm:6.8665e-03 L8_fnorm:7.0190e-03 L9_fnorm:6.9885e-03 L10_fnorm:6.9580e-03 L11_fnorm:7.0190e-03 L12_fnorm:6.1340e-03 L1_l1linf:9.0027e-04 L2_l1linf:8.9264e-04 L3_l1linf:9.0790e-04 L4_l1linf:9.0790e-04 L5_l1linf:9.9182e-04 L6_l1linf:9.8419e-04 L7_l1linf:1.0147e-03 L8_l1linf:1.0834e-03 L9_l1linf:1.1444e-03 L10_l1linf:1.1902e-03 L11_l1linf:1.3733e-03 L12_l1linf:1.0300e-03 L1_spectral:1.5346e-04 L2_spectral:1.5509e-04 L3_spectral:1.5659e-04 L4_spectral:1.5654e-04 L5_spectral:1.5581e-04 L6_spectral:1.5185e-04 L7_spectral:1.4822e-04 L8_spectral:1.5489e-04 L9_spectral:1.4794e-04 L10_spectral:1.4492e-04 L11_spectral:1.4040e-04 L12_spectral:1.2350e-04 train_time:392291ms step_avg:42.64ms +[2025-09-11 13:32:39] [Rank 0] PRINT: step:9200/10000 val_loss:5.6589 total_sharp:7.6372e-02 L1_sharp:3.1167e-02 L2_sharp:4.9914e-02 L3_sharp:6.4624e-02 L4_sharp:1.0297e-01 L5_sharp:1.4348e-01 L6_sharp:2.1866e-01 L7_sharp:2.9241e-01 L8_sharp:6.3234e-01 L9_sharp:1.1235e+00 L10_sharp:2.1685e+00 L11_sharp:3.8389e+00 L12_sharp:3.6711e+00 total_fnorm:1.7676e-01 total_l1_linf:6.4500e+01 total_spectral:8.9355e-02 L1_fnorm:6.8054e-03 L2_fnorm:6.8359e-03 L3_fnorm:6.8970e-03 L4_fnorm:6.9885e-03 L5_fnorm:6.9885e-03 L6_fnorm:6.9580e-03 L7_fnorm:6.8665e-03 L8_fnorm:7.0190e-03 L9_fnorm:6.9885e-03 L10_fnorm:6.9580e-03 L11_fnorm:7.0190e-03 L12_fnorm:6.1340e-03 L1_l1linf:9.0027e-04 L2_l1linf:8.9264e-04 L3_l1linf:9.0790e-04 L4_l1linf:9.0790e-04 L5_l1linf:9.9182e-04 L6_l1linf:9.8419e-04 L7_l1linf:1.0147e-03 L8_l1linf:1.0834e-03 L9_l1linf:1.1444e-03 L10_l1linf:1.1902e-03 L11_l1linf:1.3733e-03 L12_l1linf:1.0300e-03 L1_spectral:1.5346e-04 L2_spectral:1.5509e-04 L3_spectral:1.5659e-04 L4_spectral:1.5654e-04 L5_spectral:1.5581e-04 L6_spectral:1.5185e-04 L7_spectral:1.4822e-04 L8_spectral:1.5489e-04 L9_spectral:1.4794e-04 L10_spectral:1.4492e-04 L11_spectral:1.4040e-04 L12_spectral:1.2350e-04 train_time:392291ms step_avg:42.64ms +[2025-09-11 13:32:41] [Rank 0] step:9201/10000 train_time:394478ms step_avg:42.87ms +[2025-09-11 13:32:41] [Rank 0] step:9201/10000 train_time:394478ms step_avg:42.87ms +[2025-09-11 13:32:42] [Rank 0] step:9221/10000 train_time:395200ms step_avg:42.86ms +[2025-09-11 13:32:42] [Rank 0] step:9221/10000 train_time:395200ms step_avg:42.86ms +[2025-09-11 13:32:43] [Rank 0] step:9241/10000 train_time:395908ms step_avg:42.84ms +[2025-09-11 13:32:43] [Rank 0] step:9241/10000 train_time:395908ms step_avg:42.84ms +[2025-09-11 13:32:43] [Rank 0] step:9261/10000 train_time:396619ms step_avg:42.83ms +[2025-09-11 13:32:43] [Rank 0] step:9261/10000 train_time:396619ms step_avg:42.83ms +[2025-09-11 13:32:44] [Rank 0] step:9281/10000 train_time:397330ms step_avg:42.81ms +[2025-09-11 13:32:44] [Rank 0] step:9281/10000 train_time:397330ms step_avg:42.81ms +[2025-09-11 13:32:45] [Rank 0] step:9301/10000 train_time:398036ms step_avg:42.80ms +[2025-09-11 13:32:45] [Rank 0] step:9301/10000 train_time:398036ms step_avg:42.80ms +[2025-09-11 13:32:46] [Rank 0] step:9321/10000 train_time:398748ms step_avg:42.78ms +[2025-09-11 13:32:46] [Rank 0] step:9321/10000 train_time:398748ms step_avg:42.78ms +[2025-09-11 13:32:46] [Rank 0] step:9341/10000 train_time:399454ms step_avg:42.76ms +[2025-09-11 13:32:46] [Rank 0] step:9341/10000 train_time:399454ms step_avg:42.76ms +[2025-09-11 13:32:47] [Rank 0] step:9361/10000 train_time:400159ms step_avg:42.75ms +[2025-09-11 13:32:47] [Rank 0] step:9361/10000 train_time:400159ms step_avg:42.75ms +[2025-09-11 13:32:48] [Rank 0] step:9381/10000 train_time:400869ms step_avg:42.73ms +[2025-09-11 13:32:48] [Rank 0] step:9381/10000 train_time:400869ms step_avg:42.73ms +[2025-09-11 13:32:48] [Rank 0] step:9401/10000 train_time:401581ms step_avg:42.72ms +[2025-09-11 13:32:48] [Rank 0] step:9401/10000 train_time:401581ms step_avg:42.72ms +[2025-09-11 13:32:49] [Rank 0] step:9421/10000 train_time:402292ms step_avg:42.70ms +[2025-09-11 13:32:49] [Rank 0] step:9421/10000 train_time:402292ms step_avg:42.70ms +[2025-09-11 13:32:50] [Rank 0] step:9441/10000 train_time:403005ms step_avg:42.69ms +[2025-09-11 13:32:50] [Rank 0] step:9441/10000 train_time:403005ms step_avg:42.69ms +[2025-09-11 13:32:51] [Rank 0] step:9461/10000 train_time:403715ms step_avg:42.67ms +[2025-09-11 13:32:51] [Rank 0] step:9461/10000 train_time:403715ms step_avg:42.67ms +[2025-09-11 13:32:51] [Rank 0] step:9481/10000 train_time:404427ms step_avg:42.66ms +[2025-09-11 13:32:51] [Rank 0] step:9481/10000 train_time:404427ms step_avg:42.66ms +[2025-09-11 13:32:52] [Rank 0] step:9501/10000 train_time:405139ms step_avg:42.64ms +[2025-09-11 13:32:52] [Rank 0] step:9501/10000 train_time:405139ms step_avg:42.64ms +[2025-09-11 13:32:53] [Rank 0] step:9521/10000 train_time:405854ms step_avg:42.63ms +[2025-09-11 13:32:53] [Rank 0] step:9521/10000 train_time:405854ms step_avg:42.63ms +[2025-09-11 13:32:53] [Rank 0] step:9541/10000 train_time:406561ms step_avg:42.61ms +[2025-09-11 13:32:53] [Rank 0] step:9541/10000 train_time:406561ms step_avg:42.61ms +[2025-09-11 13:32:54] [Rank 0] step:9561/10000 train_time:407273ms step_avg:42.60ms +[2025-09-11 13:32:54] [Rank 0] step:9561/10000 train_time:407273ms step_avg:42.60ms +[2025-09-11 13:32:55] [Rank 0] step:9581/10000 train_time:407985ms step_avg:42.58ms +[2025-09-11 13:32:55] [Rank 0] step:9581/10000 train_time:407985ms step_avg:42.58ms +[2025-09-11 13:32:56] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:32:56] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:33:07] [Rank 0] PRINT: step:9600/10000 val_loss:5.6559 total_sharp:3.1926e-02 L1_sharp:2.8214e-02 L2_sharp:3.8049e-02 L3_sharp:5.5150e-02 L4_sharp:5.9576e-02 L5_sharp:8.5082e-02 L6_sharp:1.4391e-01 L7_sharp:2.0515e-01 L8_sharp:3.2988e-01 L9_sharp:7.2769e-01 L10_sharp:1.1180e+00 L11_sharp:1.6622e+00 L12_sharp:1.7492e+00 total_fnorm:1.0400e-01 total_l1_linf:3.1000e+01 total_spectral:5.2490e-02 L1_fnorm:3.7689e-03 L2_fnorm:3.7994e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8910e-03 L5_fnorm:3.9062e-03 L6_fnorm:3.8910e-03 L7_fnorm:3.8605e-03 L8_fnorm:3.9062e-03 L9_fnorm:3.8910e-03 L10_fnorm:3.8910e-03 L11_fnorm:3.9062e-03 L12_fnorm:3.4180e-03 L1_l1linf:4.6539e-04 L2_l1linf:4.2152e-04 L3_l1linf:4.6158e-04 L4_l1linf:4.4441e-04 L5_l1linf:4.5013e-04 L6_l1linf:4.5776e-04 L7_l1linf:4.9210e-04 L8_l1linf:5.3024e-04 L9_l1linf:5.4550e-04 L10_l1linf:5.7220e-04 L11_l1linf:6.2561e-04 L12_l1linf:5.0735e-04 L1_spectral:8.7224e-05 L2_spectral:8.8990e-05 L3_spectral:8.9497e-05 L4_spectral:9.0042e-05 L5_spectral:9.0862e-05 L6_spectral:8.8447e-05 L7_spectral:8.5440e-05 L8_spectral:8.8032e-05 L9_spectral:8.4446e-05 L10_spectral:8.2653e-05 L11_spectral:7.8626e-05 L12_spectral:7.0022e-05 train_time:408673ms step_avg:42.57ms +[2025-09-11 13:33:07] [Rank 0] PRINT: step:9600/10000 val_loss:5.6559 total_sharp:3.1926e-02 L1_sharp:2.8214e-02 L2_sharp:3.8049e-02 L3_sharp:5.5150e-02 L4_sharp:5.9576e-02 L5_sharp:8.5082e-02 L6_sharp:1.4391e-01 L7_sharp:2.0515e-01 L8_sharp:3.2988e-01 L9_sharp:7.2769e-01 L10_sharp:1.1180e+00 L11_sharp:1.6622e+00 L12_sharp:1.7492e+00 total_fnorm:1.0400e-01 total_l1_linf:3.1000e+01 total_spectral:5.2490e-02 L1_fnorm:3.7689e-03 L2_fnorm:3.7994e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8910e-03 L5_fnorm:3.9062e-03 L6_fnorm:3.8910e-03 L7_fnorm:3.8605e-03 L8_fnorm:3.9062e-03 L9_fnorm:3.8910e-03 L10_fnorm:3.8910e-03 L11_fnorm:3.9062e-03 L12_fnorm:3.4180e-03 L1_l1linf:4.6539e-04 L2_l1linf:4.2152e-04 L3_l1linf:4.6158e-04 L4_l1linf:4.4441e-04 L5_l1linf:4.5013e-04 L6_l1linf:4.5776e-04 L7_l1linf:4.9210e-04 L8_l1linf:5.3024e-04 L9_l1linf:5.4550e-04 L10_l1linf:5.7220e-04 L11_l1linf:6.2561e-04 L12_l1linf:5.0735e-04 L1_spectral:8.7224e-05 L2_spectral:8.8990e-05 L3_spectral:8.9497e-05 L4_spectral:9.0042e-05 L5_spectral:9.0862e-05 L6_spectral:8.8447e-05 L7_spectral:8.5440e-05 L8_spectral:8.8032e-05 L9_spectral:8.4446e-05 L10_spectral:8.2653e-05 L11_spectral:7.8626e-05 L12_spectral:7.0022e-05 train_time:408673ms step_avg:42.57ms +[2025-09-11 13:33:09] [Rank 0] step:9601/10000 train_time:410915ms step_avg:42.80ms +[2025-09-11 13:33:09] [Rank 0] step:9601/10000 train_time:410915ms step_avg:42.80ms +[2025-09-11 13:33:10] [Rank 0] step:9621/10000 train_time:411639ms step_avg:42.79ms +[2025-09-11 13:33:10] [Rank 0] step:9621/10000 train_time:411639ms step_avg:42.79ms +[2025-09-11 13:33:10] [Rank 0] step:9641/10000 train_time:412354ms step_avg:42.77ms +[2025-09-11 13:33:10] [Rank 0] step:9641/10000 train_time:412354ms step_avg:42.77ms +[2025-09-11 13:33:11] [Rank 0] step:9661/10000 train_time:413076ms step_avg:42.76ms +[2025-09-11 13:33:11] [Rank 0] step:9661/10000 train_time:413076ms step_avg:42.76ms +[2025-09-11 13:33:12] [Rank 0] step:9681/10000 train_time:413790ms step_avg:42.74ms +[2025-09-11 13:33:12] [Rank 0] step:9681/10000 train_time:413790ms step_avg:42.74ms +[2025-09-11 13:33:12] [Rank 0] step:9701/10000 train_time:414506ms step_avg:42.73ms +[2025-09-11 13:33:12] [Rank 0] step:9701/10000 train_time:414506ms step_avg:42.73ms +[2025-09-11 13:33:13] [Rank 0] step:9721/10000 train_time:415225ms step_avg:42.71ms +[2025-09-11 13:33:13] [Rank 0] step:9721/10000 train_time:415225ms step_avg:42.71ms +[2025-09-11 13:33:14] [Rank 0] step:9741/10000 train_time:415942ms step_avg:42.70ms +[2025-09-11 13:33:14] [Rank 0] step:9741/10000 train_time:415942ms step_avg:42.70ms +[2025-09-11 13:33:15] [Rank 0] step:9761/10000 train_time:416659ms step_avg:42.69ms +[2025-09-11 13:33:15] [Rank 0] step:9761/10000 train_time:416659ms step_avg:42.69ms +[2025-09-11 13:33:15] [Rank 0] step:9781/10000 train_time:417374ms step_avg:42.67ms +[2025-09-11 13:33:15] [Rank 0] step:9781/10000 train_time:417374ms step_avg:42.67ms +[2025-09-11 13:33:16] [Rank 0] step:9801/10000 train_time:418096ms step_avg:42.66ms +[2025-09-11 13:33:16] [Rank 0] step:9801/10000 train_time:418096ms step_avg:42.66ms +[2025-09-11 13:33:17] [Rank 0] step:9821/10000 train_time:418813ms step_avg:42.64ms +[2025-09-11 13:33:17] [Rank 0] step:9821/10000 train_time:418813ms step_avg:42.64ms +[2025-09-11 13:33:17] [Rank 0] step:9841/10000 train_time:419533ms step_avg:42.63ms +[2025-09-11 13:33:17] [Rank 0] step:9841/10000 train_time:419533ms step_avg:42.63ms +[2025-09-11 13:33:18] [Rank 0] step:9861/10000 train_time:420543ms step_avg:42.65ms +[2025-09-11 13:33:18] [Rank 0] step:9861/10000 train_time:420543ms step_avg:42.65ms +[2025-09-11 13:33:19] [Rank 0] step:9881/10000 train_time:421259ms step_avg:42.63ms +[2025-09-11 13:33:19] [Rank 0] step:9881/10000 train_time:421259ms step_avg:42.63ms +[2025-09-11 13:33:20] [Rank 0] step:9901/10000 train_time:421973ms step_avg:42.62ms +[2025-09-11 13:33:20] [Rank 0] step:9901/10000 train_time:421973ms step_avg:42.62ms +[2025-09-11 13:33:21] [Rank 0] step:9921/10000 train_time:422834ms step_avg:42.62ms +[2025-09-11 13:33:21] [Rank 0] step:9921/10000 train_time:422834ms step_avg:42.62ms +[2025-09-11 13:33:22] [Rank 0] step:9941/10000 train_time:423674ms step_avg:42.62ms +[2025-09-11 13:33:22] [Rank 0] step:9941/10000 train_time:423674ms step_avg:42.62ms +[2025-09-11 13:33:22] [Rank 0] step:9961/10000 train_time:424395ms step_avg:42.61ms +[2025-09-11 13:33:22] [Rank 0] step:9961/10000 train_time:424395ms step_avg:42.61ms +[2025-09-11 13:33:23] [Rank 0] step:9981/10000 train_time:425113ms step_avg:42.59ms +[2025-09-11 13:33:23] [Rank 0] step:9981/10000 train_time:425113ms step_avg:42.59ms +[2025-09-11 13:33:24] [Rank 0] step:10000/10000 train_time:425803ms step_avg:42.58ms +[2025-09-11 13:33:24] [Rank 0] step:10000/10000 train_time:425803ms step_avg:42.58ms +[2025-09-11 13:33:24] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:33:24] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:33:35] [Rank 0] PRINT: step:10000/10000 val_loss:5.6550 total_sharp:2.4907e-02 L1_sharp:1.5637e-02 L2_sharp:2.6956e-02 L3_sharp:4.2625e-02 L4_sharp:4.7504e-02 L5_sharp:7.7209e-02 L6_sharp:1.1108e-01 L7_sharp:1.9924e-01 L8_sharp:3.0243e-01 L9_sharp:6.4217e-01 L10_sharp:9.4192e-01 L11_sharp:1.2930e+00 L12_sharp:1.5474e+00 total_fnorm:3.9795e-02 total_l1_linf:8.6875e+00 total_spectral:2.0020e-02 L1_fnorm:1.4801e-03 L2_fnorm:1.4801e-03 L3_fnorm:1.4954e-03 L4_fnorm:1.5182e-03 L5_fnorm:1.5259e-03 L6_fnorm:1.5030e-03 L7_fnorm:1.4954e-03 L8_fnorm:1.5259e-03 L9_fnorm:1.5182e-03 L10_fnorm:1.5182e-03 L11_fnorm:1.5259e-03 L12_fnorm:1.3351e-03 L1_l1linf:1.3733e-04 L2_l1linf:1.4973e-04 L3_l1linf:1.3924e-04 L4_l1linf:1.4687e-04 L5_l1linf:1.4877e-04 L6_l1linf:1.4496e-04 L7_l1linf:1.6212e-04 L8_l1linf:1.5831e-04 L9_l1linf:1.6689e-04 L10_l1linf:1.8120e-04 L11_l1linf:1.8978e-04 L12_l1linf:1.6594e-04 L1_spectral:3.5432e-05 L2_spectral:3.5584e-05 L3_spectral:3.5257e-05 L4_spectral:3.5670e-05 L5_spectral:3.6134e-05 L6_spectral:3.4752e-05 L7_spectral:3.4133e-05 L8_spectral:3.5070e-05 L9_spectral:3.3348e-05 L10_spectral:3.2035e-05 L11_spectral:3.1435e-05 L12_spectral:2.9682e-05 train_time:425823ms step_avg:42.58ms +[2025-09-11 13:33:35] [Rank 0] PRINT: step:10000/10000 val_loss:5.6550 total_sharp:2.4907e-02 L1_sharp:1.5637e-02 L2_sharp:2.6956e-02 L3_sharp:4.2625e-02 L4_sharp:4.7504e-02 L5_sharp:7.7209e-02 L6_sharp:1.1108e-01 L7_sharp:1.9924e-01 L8_sharp:3.0243e-01 L9_sharp:6.4217e-01 L10_sharp:9.4192e-01 L11_sharp:1.2930e+00 L12_sharp:1.5474e+00 total_fnorm:3.9795e-02 total_l1_linf:8.6875e+00 total_spectral:2.0020e-02 L1_fnorm:1.4801e-03 L2_fnorm:1.4801e-03 L3_fnorm:1.4954e-03 L4_fnorm:1.5182e-03 L5_fnorm:1.5259e-03 L6_fnorm:1.5030e-03 L7_fnorm:1.4954e-03 L8_fnorm:1.5259e-03 L9_fnorm:1.5182e-03 L10_fnorm:1.5182e-03 L11_fnorm:1.5259e-03 L12_fnorm:1.3351e-03 L1_l1linf:1.3733e-04 L2_l1linf:1.4973e-04 L3_l1linf:1.3924e-04 L4_l1linf:1.4687e-04 L5_l1linf:1.4877e-04 L6_l1linf:1.4496e-04 L7_l1linf:1.6212e-04 L8_l1linf:1.5831e-04 L9_l1linf:1.6689e-04 L10_l1linf:1.8120e-04 L11_l1linf:1.8978e-04 L12_l1linf:1.6594e-04 L1_spectral:3.5432e-05 L2_spectral:3.5584e-05 L3_spectral:3.5257e-05 L4_spectral:3.5670e-05 L5_spectral:3.6134e-05 L6_spectral:3.4752e-05 L7_spectral:3.4133e-05 L8_spectral:3.5070e-05 L9_spectral:3.3348e-05 L10_spectral:3.2035e-05 L11_spectral:3.1435e-05 L12_spectral:2.9682e-05 train_time:425823ms step_avg:42.58ms +[2025-09-11 13:33:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:33:35 2025 --- +[2025-09-11 13:33:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:33:35 2025 --- +[2025-09-11 13:33:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 13:33:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..42e949aa9f5f032110a73f2aa6bd8c8e5997c762 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "e87ef02d-29ff-4238-9fbb-1698562443df", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/training_log_e87ef02d-29ff-4238-9fbb-1698562443df.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/training_log_e87ef02d-29ff-4238-9fbb-1698562443df.txt new file mode 100644 index 0000000000000000000000000000000000000000..c8c7c6f8de80bd2a549bc6b9e2e63daeb13098f4 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42/training_log_e87ef02d-29ff-4238-9fbb-1698562443df.txt @@ -0,0 +1,4264 @@ +[2025-09-11 13:05:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:05:48 2025 --- +[2025-09-11 13:05:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:05:48 2025 --- +[2025-09-11 13:05:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:05:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:05:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:05:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:05:48] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 13:05:48] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 13:05:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42 +[2025-09-11 13:05:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_42 +[2025-09-11 13:05:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:05:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:05:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:05:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:05:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:05:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:05:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:05:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:05:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:05:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:05:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:05:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:05:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:05:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:05:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:05:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:05:51] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:05:51] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:05:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:05:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:05:56] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:05:56] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:05:56] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:05:56] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:06:33] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:06:33] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:06:33] [Rank 0] PRINT: Starting training... +[2025-09-11 13:06:33] [Rank 0] PRINT: Starting training... +[2025-09-11 13:06:35] [Rank 0] step:21/10000 train_time:1543ms step_avg:73.47ms +[2025-09-11 13:06:35] [Rank 0] step:21/10000 train_time:1543ms step_avg:73.47ms +[2025-09-11 13:06:35] [Rank 0] step:41/10000 train_time:2271ms step_avg:55.38ms +[2025-09-11 13:06:35] [Rank 0] step:41/10000 train_time:2271ms step_avg:55.38ms +[2025-09-11 13:06:36] [Rank 0] step:61/10000 train_time:2998ms step_avg:49.14ms +[2025-09-11 13:06:36] [Rank 0] step:61/10000 train_time:2998ms step_avg:49.14ms +[2025-09-11 13:06:37] [Rank 0] step:81/10000 train_time:3725ms step_avg:45.98ms +[2025-09-11 13:06:37] [Rank 0] step:81/10000 train_time:3725ms step_avg:45.98ms +[2025-09-11 13:06:37] [Rank 0] step:101/10000 train_time:4452ms step_avg:44.08ms +[2025-09-11 13:06:37] [Rank 0] step:101/10000 train_time:4452ms step_avg:44.08ms +[2025-09-11 13:06:38] [Rank 0] step:121/10000 train_time:5179ms step_avg:42.80ms +[2025-09-11 13:06:38] [Rank 0] step:121/10000 train_time:5179ms step_avg:42.80ms +[2025-09-11 13:06:39] [Rank 0] step:141/10000 train_time:5905ms step_avg:41.88ms +[2025-09-11 13:06:39] [Rank 0] step:141/10000 train_time:5905ms step_avg:41.88ms +[2025-09-11 13:06:40] [Rank 0] step:161/10000 train_time:6632ms step_avg:41.19ms +[2025-09-11 13:06:40] [Rank 0] step:161/10000 train_time:6632ms step_avg:41.19ms +[2025-09-11 13:06:40] [Rank 0] step:181/10000 train_time:7358ms step_avg:40.65ms +[2025-09-11 13:06:40] [Rank 0] step:181/10000 train_time:7358ms step_avg:40.65ms +[2025-09-11 13:06:41] [Rank 0] step:201/10000 train_time:8084ms step_avg:40.22ms +[2025-09-11 13:06:41] [Rank 0] step:201/10000 train_time:8084ms step_avg:40.22ms +[2025-09-11 13:06:42] [Rank 0] step:221/10000 train_time:8810ms step_avg:39.86ms +[2025-09-11 13:06:42] [Rank 0] step:221/10000 train_time:8810ms step_avg:39.86ms +[2025-09-11 13:06:43] [Rank 0] step:241/10000 train_time:9535ms step_avg:39.57ms +[2025-09-11 13:06:43] [Rank 0] step:241/10000 train_time:9535ms step_avg:39.57ms +[2025-09-11 13:06:43] [Rank 0] step:261/10000 train_time:10261ms step_avg:39.31ms +[2025-09-11 13:06:43] [Rank 0] step:261/10000 train_time:10261ms step_avg:39.31ms +[2025-09-11 13:06:44] [Rank 0] step:281/10000 train_time:10987ms step_avg:39.10ms +[2025-09-11 13:06:44] [Rank 0] step:281/10000 train_time:10987ms step_avg:39.10ms +[2025-09-11 13:06:45] [Rank 0] step:301/10000 train_time:11713ms step_avg:38.91ms +[2025-09-11 13:06:45] [Rank 0] step:301/10000 train_time:11713ms step_avg:38.91ms +[2025-09-11 13:06:45] [Rank 0] step:321/10000 train_time:12439ms step_avg:38.75ms +[2025-09-11 13:06:45] [Rank 0] step:321/10000 train_time:12439ms step_avg:38.75ms +[2025-09-11 13:06:46] [Rank 0] step:341/10000 train_time:13165ms step_avg:38.61ms +[2025-09-11 13:06:46] [Rank 0] step:341/10000 train_time:13165ms step_avg:38.61ms +[2025-09-11 13:06:47] [Rank 0] step:361/10000 train_time:13891ms step_avg:38.48ms +[2025-09-11 13:06:47] [Rank 0] step:361/10000 train_time:13891ms step_avg:38.48ms +[2025-09-11 13:06:48] [Rank 0] step:381/10000 train_time:14617ms step_avg:38.36ms +[2025-09-11 13:06:48] [Rank 0] step:381/10000 train_time:14617ms step_avg:38.36ms +[2025-09-11 13:06:48] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:06:48] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:07:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:07:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:07:34] [Rank 0] PRINT: step:400/10000 val_loss:7.1837 total_sharp:1.8206e-02 L1_sharp:1.6304e-01 L2_sharp:1.8424e-01 L3_sharp:1.9624e-01 L4_sharp:2.6632e-01 L5_sharp:3.1984e-01 L6_sharp:4.5715e-01 L7_sharp:4.5145e-01 L8_sharp:5.5321e-01 L9_sharp:6.5062e-01 L10_sharp:8.7047e-01 L11_sharp:1.3166e+00 L12_sharp:1.1662e+00 total_fnorm:5.4370e+00 total_l1_linf:1.5262e+04 total_spectral:2.7186e+00 L1_fnorm:1.1933e-01 L2_fnorm:1.1883e-01 L3_fnorm:1.1805e-01 L4_fnorm:1.1632e-01 L5_fnorm:1.1492e-01 L6_fnorm:1.1120e-01 L7_fnorm:1.0892e-01 L8_fnorm:1.0453e-01 L9_fnorm:9.8387e-02 L10_fnorm:8.3613e-02 L11_fnorm:7.8218e-02 L12_fnorm:7.2653e-02 L1_l1linf:4.4746e-02 L2_l1linf:4.4656e-02 L3_l1linf:4.4648e-02 L4_l1linf:4.4333e-02 L5_l1linf:4.4350e-02 L6_l1linf:4.3577e-02 L7_l1linf:4.1664e-02 L8_l1linf:3.8561e-02 L9_l1linf:3.4562e-02 L10_l1linf:3.1256e-02 L11_l1linf:3.0165e-02 L12_l1linf:2.7759e-02 L1_spectral:1.2048e-03 L2_spectral:1.2047e-03 L3_spectral:1.2052e-03 L4_spectral:1.2048e-03 L5_spectral:1.2048e-03 L6_spectral:1.2051e-03 L7_spectral:1.2046e-03 L8_spectral:1.2048e-03 L9_spectral:1.2047e-03 L10_spectral:1.2041e-03 L11_spectral:1.2042e-03 L12_spectral:1.2044e-03 train_time:15322ms step_avg:38.31ms +[2025-09-11 13:07:34] [Rank 0] PRINT: step:400/10000 val_loss:7.1837 total_sharp:1.8206e-02 L1_sharp:1.6304e-01 L2_sharp:1.8424e-01 L3_sharp:1.9624e-01 L4_sharp:2.6632e-01 L5_sharp:3.1984e-01 L6_sharp:4.5715e-01 L7_sharp:4.5145e-01 L8_sharp:5.5321e-01 L9_sharp:6.5062e-01 L10_sharp:8.7047e-01 L11_sharp:1.3166e+00 L12_sharp:1.1662e+00 total_fnorm:5.4370e+00 total_l1_linf:1.5262e+04 total_spectral:2.7186e+00 L1_fnorm:1.1933e-01 L2_fnorm:1.1883e-01 L3_fnorm:1.1805e-01 L4_fnorm:1.1632e-01 L5_fnorm:1.1492e-01 L6_fnorm:1.1120e-01 L7_fnorm:1.0892e-01 L8_fnorm:1.0453e-01 L9_fnorm:9.8387e-02 L10_fnorm:8.3613e-02 L11_fnorm:7.8218e-02 L12_fnorm:7.2653e-02 L1_l1linf:4.4746e-02 L2_l1linf:4.4656e-02 L3_l1linf:4.4648e-02 L4_l1linf:4.4333e-02 L5_l1linf:4.4350e-02 L6_l1linf:4.3577e-02 L7_l1linf:4.1664e-02 L8_l1linf:3.8561e-02 L9_l1linf:3.4562e-02 L10_l1linf:3.1256e-02 L11_l1linf:3.0165e-02 L12_l1linf:2.7759e-02 L1_spectral:1.2048e-03 L2_spectral:1.2047e-03 L3_spectral:1.2052e-03 L4_spectral:1.2048e-03 L5_spectral:1.2048e-03 L6_spectral:1.2051e-03 L7_spectral:1.2046e-03 L8_spectral:1.2048e-03 L9_spectral:1.2047e-03 L10_spectral:1.2041e-03 L11_spectral:1.2042e-03 L12_spectral:1.2044e-03 train_time:15322ms step_avg:38.31ms +[2025-09-11 13:08:04] [Rank 0] step:401/10000 train_time:45194ms step_avg:112.70ms +[2025-09-11 13:08:04] [Rank 0] step:401/10000 train_time:45194ms step_avg:112.70ms +[2025-09-11 13:08:06] [Rank 0] step:421/10000 train_time:47477ms step_avg:112.77ms +[2025-09-11 13:08:06] [Rank 0] step:421/10000 train_time:47477ms step_avg:112.77ms +[2025-09-11 13:08:07] [Rank 0] step:441/10000 train_time:48116ms step_avg:109.11ms +[2025-09-11 13:08:07] [Rank 0] step:441/10000 train_time:48116ms step_avg:109.11ms +[2025-09-11 13:08:07] [Rank 0] step:461/10000 train_time:48754ms step_avg:105.76ms +[2025-09-11 13:08:07] [Rank 0] step:461/10000 train_time:48754ms step_avg:105.76ms +[2025-09-11 13:08:08] [Rank 0] step:481/10000 train_time:49391ms step_avg:102.68ms +[2025-09-11 13:08:08] [Rank 0] step:481/10000 train_time:49391ms step_avg:102.68ms +[2025-09-11 13:08:09] [Rank 0] step:501/10000 train_time:50029ms step_avg:99.86ms +[2025-09-11 13:08:09] [Rank 0] step:501/10000 train_time:50029ms step_avg:99.86ms +[2025-09-11 13:08:09] [Rank 0] step:521/10000 train_time:50668ms step_avg:97.25ms +[2025-09-11 13:08:09] [Rank 0] step:521/10000 train_time:50668ms step_avg:97.25ms +[2025-09-11 13:08:10] [Rank 0] step:541/10000 train_time:51306ms step_avg:94.83ms +[2025-09-11 13:08:10] [Rank 0] step:541/10000 train_time:51306ms step_avg:94.83ms +[2025-09-11 13:08:11] [Rank 0] step:561/10000 train_time:51944ms step_avg:92.59ms +[2025-09-11 13:08:11] [Rank 0] step:561/10000 train_time:51944ms step_avg:92.59ms +[2025-09-11 13:08:11] [Rank 0] step:581/10000 train_time:52582ms step_avg:90.50ms +[2025-09-11 13:08:11] [Rank 0] step:581/10000 train_time:52582ms step_avg:90.50ms +[2025-09-11 13:08:12] [Rank 0] step:601/10000 train_time:53221ms step_avg:88.55ms +[2025-09-11 13:08:12] [Rank 0] step:601/10000 train_time:53221ms step_avg:88.55ms +[2025-09-11 13:08:13] [Rank 0] step:621/10000 train_time:53859ms step_avg:86.73ms +[2025-09-11 13:08:13] [Rank 0] step:621/10000 train_time:53859ms step_avg:86.73ms +[2025-09-11 13:08:13] [Rank 0] step:641/10000 train_time:54497ms step_avg:85.02ms +[2025-09-11 13:08:13] [Rank 0] step:641/10000 train_time:54497ms step_avg:85.02ms +[2025-09-11 13:08:14] [Rank 0] step:661/10000 train_time:55135ms step_avg:83.41ms +[2025-09-11 13:08:14] [Rank 0] step:661/10000 train_time:55135ms step_avg:83.41ms +[2025-09-11 13:08:14] [Rank 0] step:681/10000 train_time:55772ms step_avg:81.90ms +[2025-09-11 13:08:14] [Rank 0] step:681/10000 train_time:55772ms step_avg:81.90ms +[2025-09-11 13:08:15] [Rank 0] step:701/10000 train_time:56409ms step_avg:80.47ms +[2025-09-11 13:08:15] [Rank 0] step:701/10000 train_time:56409ms step_avg:80.47ms +[2025-09-11 13:08:16] [Rank 0] step:721/10000 train_time:57047ms step_avg:79.12ms +[2025-09-11 13:08:16] [Rank 0] step:721/10000 train_time:57047ms step_avg:79.12ms +[2025-09-11 13:08:16] [Rank 0] step:741/10000 train_time:57684ms step_avg:77.85ms +[2025-09-11 13:08:16] [Rank 0] step:741/10000 train_time:57684ms step_avg:77.85ms +[2025-09-11 13:08:17] [Rank 0] step:761/10000 train_time:58327ms step_avg:76.65ms +[2025-09-11 13:08:17] [Rank 0] step:761/10000 train_time:58327ms step_avg:76.65ms +[2025-09-11 13:08:18] [Rank 0] step:781/10000 train_time:58970ms step_avg:75.51ms +[2025-09-11 13:08:18] [Rank 0] step:781/10000 train_time:58970ms step_avg:75.51ms +[2025-09-11 13:08:18] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:08:18] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:08:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:08:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:02] [Rank 0] PRINT: step:800/10000 val_loss:6.7057 total_sharp:1.0943e-01 L1_sharp:1.7460e-01 L2_sharp:1.8311e-01 L3_sharp:1.8774e-01 L4_sharp:2.3014e-01 L5_sharp:3.4060e-01 L6_sharp:4.8396e-01 L7_sharp:7.0963e-01 L8_sharp:1.3425e+00 L9_sharp:1.5421e+00 L10_sharp:2.0567e+00 L11_sharp:2.4717e+00 L12_sharp:2.2816e+00 total_fnorm:2.8438e+00 total_l1_linf:3.4880e+03 total_spectral:1.4375e+00 L1_fnorm:1.1377e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.0986e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1084e-01 L8_fnorm:1.0254e-01 L9_fnorm:1.0205e-01 L10_fnorm:9.1309e-02 L11_fnorm:7.8125e-02 L12_fnorm:6.3477e-02 L1_l1linf:4.0527e-02 L2_l1linf:4.0283e-02 L3_l1linf:4.0283e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0039e-02 L6_l1linf:4.1016e-02 L7_l1linf:4.0527e-02 L8_l1linf:3.9795e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.1494e-02 L11_l1linf:2.4658e-02 L12_l1linf:2.0386e-02 L1_spectral:1.6007e-03 L2_spectral:1.6107e-03 L3_spectral:1.6091e-03 L4_spectral:1.6180e-03 L5_spectral:1.5992e-03 L6_spectral:1.6046e-03 L7_spectral:1.6139e-03 L8_spectral:1.5558e-03 L9_spectral:1.5629e-03 L10_spectral:1.4728e-03 L11_spectral:1.4275e-03 L12_spectral:1.3166e-03 train_time:59594ms step_avg:74.49ms +[2025-09-11 13:09:02] [Rank 0] PRINT: step:800/10000 val_loss:6.7057 total_sharp:1.0943e-01 L1_sharp:1.7460e-01 L2_sharp:1.8311e-01 L3_sharp:1.8774e-01 L4_sharp:2.3014e-01 L5_sharp:3.4060e-01 L6_sharp:4.8396e-01 L7_sharp:7.0963e-01 L8_sharp:1.3425e+00 L9_sharp:1.5421e+00 L10_sharp:2.0567e+00 L11_sharp:2.4717e+00 L12_sharp:2.2816e+00 total_fnorm:2.8438e+00 total_l1_linf:3.4880e+03 total_spectral:1.4375e+00 L1_fnorm:1.1377e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.0986e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1084e-01 L8_fnorm:1.0254e-01 L9_fnorm:1.0205e-01 L10_fnorm:9.1309e-02 L11_fnorm:7.8125e-02 L12_fnorm:6.3477e-02 L1_l1linf:4.0527e-02 L2_l1linf:4.0283e-02 L3_l1linf:4.0283e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0039e-02 L6_l1linf:4.1016e-02 L7_l1linf:4.0527e-02 L8_l1linf:3.9795e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.1494e-02 L11_l1linf:2.4658e-02 L12_l1linf:2.0386e-02 L1_spectral:1.6007e-03 L2_spectral:1.6107e-03 L3_spectral:1.6091e-03 L4_spectral:1.6180e-03 L5_spectral:1.5992e-03 L6_spectral:1.6046e-03 L7_spectral:1.6139e-03 L8_spectral:1.5558e-03 L9_spectral:1.5629e-03 L10_spectral:1.4728e-03 L11_spectral:1.4275e-03 L12_spectral:1.3166e-03 train_time:59594ms step_avg:74.49ms +[2025-09-11 13:09:03] [Rank 0] step:801/10000 train_time:61390ms step_avg:76.64ms +[2025-09-11 13:09:03] [Rank 0] step:801/10000 train_time:61390ms step_avg:76.64ms +[2025-09-11 13:09:04] [Rank 0] step:821/10000 train_time:62037ms step_avg:75.56ms +[2025-09-11 13:09:04] [Rank 0] step:821/10000 train_time:62037ms step_avg:75.56ms +[2025-09-11 13:09:05] [Rank 0] step:841/10000 train_time:62681ms step_avg:74.53ms +[2025-09-11 13:09:05] [Rank 0] step:841/10000 train_time:62681ms step_avg:74.53ms +[2025-09-11 13:09:06] [Rank 0] step:861/10000 train_time:63632ms step_avg:73.90ms +[2025-09-11 13:09:06] [Rank 0] step:861/10000 train_time:63632ms step_avg:73.90ms +[2025-09-11 13:09:06] [Rank 0] step:881/10000 train_time:64276ms step_avg:72.96ms +[2025-09-11 13:09:06] [Rank 0] step:881/10000 train_time:64276ms step_avg:72.96ms +[2025-09-11 13:09:07] [Rank 0] step:901/10000 train_time:64919ms step_avg:72.05ms +[2025-09-11 13:09:07] [Rank 0] step:901/10000 train_time:64919ms step_avg:72.05ms +[2025-09-11 13:09:08] [Rank 0] step:921/10000 train_time:65562ms step_avg:71.19ms +[2025-09-11 13:09:08] [Rank 0] step:921/10000 train_time:65562ms step_avg:71.19ms +[2025-09-11 13:09:08] [Rank 0] step:941/10000 train_time:66205ms step_avg:70.36ms +[2025-09-11 13:09:08] [Rank 0] step:941/10000 train_time:66205ms step_avg:70.36ms +[2025-09-11 13:09:09] [Rank 0] step:961/10000 train_time:66848ms step_avg:69.56ms +[2025-09-11 13:09:09] [Rank 0] step:961/10000 train_time:66848ms step_avg:69.56ms +[2025-09-11 13:09:10] [Rank 0] step:981/10000 train_time:67492ms step_avg:68.80ms +[2025-09-11 13:09:10] [Rank 0] step:981/10000 train_time:67492ms step_avg:68.80ms +[2025-09-11 13:09:10] [Rank 0] step:1001/10000 train_time:68135ms step_avg:68.07ms +[2025-09-11 13:09:10] [Rank 0] step:1001/10000 train_time:68135ms step_avg:68.07ms +[2025-09-11 13:09:11] [Rank 0] step:1021/10000 train_time:68778ms step_avg:67.36ms +[2025-09-11 13:09:11] [Rank 0] step:1021/10000 train_time:68778ms step_avg:67.36ms +[2025-09-11 13:09:11] [Rank 0] step:1041/10000 train_time:69421ms step_avg:66.69ms +[2025-09-11 13:09:11] [Rank 0] step:1041/10000 train_time:69421ms step_avg:66.69ms +[2025-09-11 13:09:12] [Rank 0] step:1061/10000 train_time:70064ms step_avg:66.04ms +[2025-09-11 13:09:12] [Rank 0] step:1061/10000 train_time:70064ms step_avg:66.04ms +[2025-09-11 13:09:13] [Rank 0] step:1081/10000 train_time:70706ms step_avg:65.41ms +[2025-09-11 13:09:13] [Rank 0] step:1081/10000 train_time:70706ms step_avg:65.41ms +[2025-09-11 13:09:13] [Rank 0] step:1101/10000 train_time:71349ms step_avg:64.80ms +[2025-09-11 13:09:13] [Rank 0] step:1101/10000 train_time:71349ms step_avg:64.80ms +[2025-09-11 13:09:14] [Rank 0] step:1121/10000 train_time:71992ms step_avg:64.22ms +[2025-09-11 13:09:14] [Rank 0] step:1121/10000 train_time:71992ms step_avg:64.22ms +[2025-09-11 13:09:15] [Rank 0] step:1141/10000 train_time:72635ms step_avg:63.66ms +[2025-09-11 13:09:15] [Rank 0] step:1141/10000 train_time:72635ms step_avg:63.66ms +[2025-09-11 13:09:15] [Rank 0] step:1161/10000 train_time:73278ms step_avg:63.12ms +[2025-09-11 13:09:15] [Rank 0] step:1161/10000 train_time:73278ms step_avg:63.12ms +[2025-09-11 13:09:16] [Rank 0] step:1181/10000 train_time:73921ms step_avg:62.59ms +[2025-09-11 13:09:16] [Rank 0] step:1181/10000 train_time:73921ms step_avg:62.59ms +[2025-09-11 13:09:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:09:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:09:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:09:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:27] [Rank 0] PRINT: step:1200/10000 val_loss:6.4604 total_sharp:9.6955e-02 L1_sharp:1.2120e-01 L2_sharp:1.2496e-01 L3_sharp:1.1742e-01 L4_sharp:1.2482e-01 L5_sharp:1.9461e-01 L6_sharp:2.8588e-01 L7_sharp:4.1058e-01 L8_sharp:6.7368e-01 L9_sharp:9.8971e-01 L10_sharp:8.1440e-01 L11_sharp:8.6441e-01 L12_sharp:1.0422e+00 total_fnorm:2.2656e+00 total_l1_linf:2.6400e+03 total_spectral:1.1484e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.0596e-01 L12_fnorm:8.9844e-02 L1_l1linf:3.5889e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5889e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6621e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.9307e-02 L10_l1linf:3.8818e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.6489e-02 L1_spectral:1.5953e-03 L2_spectral:1.6179e-03 L3_spectral:1.6104e-03 L4_spectral:1.6216e-03 L5_spectral:1.6035e-03 L6_spectral:1.6178e-03 L7_spectral:1.6154e-03 L8_spectral:1.5892e-03 L9_spectral:1.5913e-03 L10_spectral:1.5817e-03 L11_spectral:1.5463e-03 L12_spectral:1.5140e-03 train_time:74546ms step_avg:62.12ms +[2025-09-11 13:09:27] [Rank 0] PRINT: step:1200/10000 val_loss:6.4604 total_sharp:9.6955e-02 L1_sharp:1.2120e-01 L2_sharp:1.2496e-01 L3_sharp:1.1742e-01 L4_sharp:1.2482e-01 L5_sharp:1.9461e-01 L6_sharp:2.8588e-01 L7_sharp:4.1058e-01 L8_sharp:6.7368e-01 L9_sharp:9.8971e-01 L10_sharp:8.1440e-01 L11_sharp:8.6441e-01 L12_sharp:1.0422e+00 total_fnorm:2.2656e+00 total_l1_linf:2.6400e+03 total_spectral:1.1484e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.0596e-01 L12_fnorm:8.9844e-02 L1_l1linf:3.5889e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5889e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6621e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.9307e-02 L10_l1linf:3.8818e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.6489e-02 L1_spectral:1.5953e-03 L2_spectral:1.6179e-03 L3_spectral:1.6104e-03 L4_spectral:1.6216e-03 L5_spectral:1.6035e-03 L6_spectral:1.6178e-03 L7_spectral:1.6154e-03 L8_spectral:1.5892e-03 L9_spectral:1.5913e-03 L10_spectral:1.5817e-03 L11_spectral:1.5463e-03 L12_spectral:1.5140e-03 train_time:74546ms step_avg:62.12ms +[2025-09-11 13:09:29] [Rank 0] step:1201/10000 train_time:76276ms step_avg:63.51ms +[2025-09-11 13:09:29] [Rank 0] step:1201/10000 train_time:76276ms step_avg:63.51ms +[2025-09-11 13:09:29] [Rank 0] step:1221/10000 train_time:76923ms step_avg:63.00ms +[2025-09-11 13:09:29] [Rank 0] step:1221/10000 train_time:76923ms step_avg:63.00ms +[2025-09-11 13:09:30] [Rank 0] step:1241/10000 train_time:77567ms step_avg:62.50ms +[2025-09-11 13:09:30] [Rank 0] step:1241/10000 train_time:77567ms step_avg:62.50ms +[2025-09-11 13:09:31] [Rank 0] step:1261/10000 train_time:78211ms step_avg:62.02ms +[2025-09-11 13:09:31] [Rank 0] step:1261/10000 train_time:78211ms step_avg:62.02ms +[2025-09-11 13:09:31] [Rank 0] step:1281/10000 train_time:78854ms step_avg:61.56ms +[2025-09-11 13:09:31] [Rank 0] step:1281/10000 train_time:78854ms step_avg:61.56ms +[2025-09-11 13:09:32] [Rank 0] step:1301/10000 train_time:79497ms step_avg:61.10ms +[2025-09-11 13:09:32] [Rank 0] step:1301/10000 train_time:79497ms step_avg:61.10ms +[2025-09-11 13:09:33] [Rank 0] step:1321/10000 train_time:80140ms step_avg:60.67ms +[2025-09-11 13:09:33] [Rank 0] step:1321/10000 train_time:80140ms step_avg:60.67ms +[2025-09-11 13:09:33] [Rank 0] step:1341/10000 train_time:80783ms step_avg:60.24ms +[2025-09-11 13:09:33] [Rank 0] step:1341/10000 train_time:80783ms step_avg:60.24ms +[2025-09-11 13:09:34] [Rank 0] step:1361/10000 train_time:81427ms step_avg:59.83ms +[2025-09-11 13:09:34] [Rank 0] step:1361/10000 train_time:81427ms step_avg:59.83ms +[2025-09-11 13:09:35] [Rank 0] step:1381/10000 train_time:82071ms step_avg:59.43ms +[2025-09-11 13:09:35] [Rank 0] step:1381/10000 train_time:82071ms step_avg:59.43ms +[2025-09-11 13:09:35] [Rank 0] step:1401/10000 train_time:82713ms step_avg:59.04ms +[2025-09-11 13:09:35] [Rank 0] step:1401/10000 train_time:82713ms step_avg:59.04ms +[2025-09-11 13:09:36] [Rank 0] step:1421/10000 train_time:83356ms step_avg:58.66ms +[2025-09-11 13:09:36] [Rank 0] step:1421/10000 train_time:83356ms step_avg:58.66ms +[2025-09-11 13:09:36] [Rank 0] step:1441/10000 train_time:83999ms step_avg:58.29ms +[2025-09-11 13:09:36] [Rank 0] step:1441/10000 train_time:83999ms step_avg:58.29ms +[2025-09-11 13:09:37] [Rank 0] step:1461/10000 train_time:84641ms step_avg:57.93ms +[2025-09-11 13:09:37] [Rank 0] step:1461/10000 train_time:84641ms step_avg:57.93ms +[2025-09-11 13:09:38] [Rank 0] step:1481/10000 train_time:85284ms step_avg:57.59ms +[2025-09-11 13:09:38] [Rank 0] step:1481/10000 train_time:85284ms step_avg:57.59ms +[2025-09-11 13:09:38] [Rank 0] step:1501/10000 train_time:85930ms step_avg:57.25ms +[2025-09-11 13:09:38] [Rank 0] step:1501/10000 train_time:85930ms step_avg:57.25ms +[2025-09-11 13:09:39] [Rank 0] step:1521/10000 train_time:86578ms step_avg:56.92ms +[2025-09-11 13:09:39] [Rank 0] step:1521/10000 train_time:86578ms step_avg:56.92ms +[2025-09-11 13:09:40] [Rank 0] step:1541/10000 train_time:87225ms step_avg:56.60ms +[2025-09-11 13:09:40] [Rank 0] step:1541/10000 train_time:87225ms step_avg:56.60ms +[2025-09-11 13:09:40] [Rank 0] step:1561/10000 train_time:87873ms step_avg:56.29ms +[2025-09-11 13:09:40] [Rank 0] step:1561/10000 train_time:87873ms step_avg:56.29ms +[2025-09-11 13:09:41] [Rank 0] step:1581/10000 train_time:88520ms step_avg:55.99ms +[2025-09-11 13:09:41] [Rank 0] step:1581/10000 train_time:88520ms step_avg:55.99ms +[2025-09-11 13:09:42] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:09:42] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:09:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:09:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:09:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:09:52] [Rank 0] PRINT: step:1600/10000 val_loss:6.3057 total_sharp:9.7277e-02 L1_sharp:1.2016e-01 L2_sharp:1.1589e-01 L3_sharp:1.1679e-01 L4_sharp:1.2637e-01 L5_sharp:1.5815e-01 L6_sharp:1.9453e-01 L7_sharp:2.7255e-01 L8_sharp:4.6212e-01 L9_sharp:5.1091e-01 L10_sharp:5.8251e-01 L11_sharp:7.4280e-01 L12_sharp:1.0965e+00 total_fnorm:2.0312e+00 total_l1_linf:2.3040e+03 total_spectral:1.0312e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1377e-01 L12_fnorm:9.7656e-02 L1_l1linf:3.4668e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.4668e-02 L4_l1linf:3.4424e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.7354e-02 L10_l1linf:3.8330e-02 L11_l1linf:3.7598e-02 L12_l1linf:2.5879e-02 L1_spectral:1.5943e-03 L2_spectral:1.6214e-03 L3_spectral:1.6241e-03 L4_spectral:1.6137e-03 L5_spectral:1.6084e-03 L6_spectral:1.6165e-03 L7_spectral:1.6122e-03 L8_spectral:1.5919e-03 L9_spectral:1.6015e-03 L10_spectral:1.5957e-03 L11_spectral:1.5699e-03 L12_spectral:1.5667e-03 train_time:89149ms step_avg:55.72ms +[2025-09-11 13:09:52] [Rank 0] PRINT: step:1600/10000 val_loss:6.3057 total_sharp:9.7277e-02 L1_sharp:1.2016e-01 L2_sharp:1.1589e-01 L3_sharp:1.1679e-01 L4_sharp:1.2637e-01 L5_sharp:1.5815e-01 L6_sharp:1.9453e-01 L7_sharp:2.7255e-01 L8_sharp:4.6212e-01 L9_sharp:5.1091e-01 L10_sharp:5.8251e-01 L11_sharp:7.4280e-01 L12_sharp:1.0965e+00 total_fnorm:2.0312e+00 total_l1_linf:2.3040e+03 total_spectral:1.0312e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1377e-01 L12_fnorm:9.7656e-02 L1_l1linf:3.4668e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.4668e-02 L4_l1linf:3.4424e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.7354e-02 L10_l1linf:3.8330e-02 L11_l1linf:3.7598e-02 L12_l1linf:2.5879e-02 L1_spectral:1.5943e-03 L2_spectral:1.6214e-03 L3_spectral:1.6241e-03 L4_spectral:1.6137e-03 L5_spectral:1.6084e-03 L6_spectral:1.6165e-03 L7_spectral:1.6122e-03 L8_spectral:1.5919e-03 L9_spectral:1.6015e-03 L10_spectral:1.5957e-03 L11_spectral:1.5699e-03 L12_spectral:1.5667e-03 train_time:89149ms step_avg:55.72ms +[2025-09-11 13:09:54] [Rank 0] step:1601/10000 train_time:90989ms step_avg:56.83ms +[2025-09-11 13:09:54] [Rank 0] step:1601/10000 train_time:90989ms step_avg:56.83ms +[2025-09-11 13:09:55] [Rank 0] step:1621/10000 train_time:91642ms step_avg:56.53ms +[2025-09-11 13:09:55] [Rank 0] step:1621/10000 train_time:91642ms step_avg:56.53ms +[2025-09-11 13:09:55] [Rank 0] step:1641/10000 train_time:92294ms step_avg:56.24ms +[2025-09-11 13:09:55] [Rank 0] step:1641/10000 train_time:92294ms step_avg:56.24ms +[2025-09-11 13:09:56] [Rank 0] step:1661/10000 train_time:92942ms step_avg:55.96ms +[2025-09-11 13:09:56] [Rank 0] step:1661/10000 train_time:92942ms step_avg:55.96ms +[2025-09-11 13:09:56] [Rank 0] step:1681/10000 train_time:93591ms step_avg:55.68ms +[2025-09-11 13:09:56] [Rank 0] step:1681/10000 train_time:93591ms step_avg:55.68ms +[2025-09-11 13:09:57] [Rank 0] step:1701/10000 train_time:94239ms step_avg:55.40ms +[2025-09-11 13:09:57] [Rank 0] step:1701/10000 train_time:94239ms step_avg:55.40ms +[2025-09-11 13:09:58] [Rank 0] step:1721/10000 train_time:94886ms step_avg:55.13ms +[2025-09-11 13:09:58] [Rank 0] step:1721/10000 train_time:94886ms step_avg:55.13ms +[2025-09-11 13:09:58] [Rank 0] step:1741/10000 train_time:95534ms step_avg:54.87ms +[2025-09-11 13:09:58] [Rank 0] step:1741/10000 train_time:95534ms step_avg:54.87ms +[2025-09-11 13:09:59] [Rank 0] step:1761/10000 train_time:96181ms step_avg:54.62ms +[2025-09-11 13:09:59] [Rank 0] step:1761/10000 train_time:96181ms step_avg:54.62ms +[2025-09-11 13:10:00] [Rank 0] step:1781/10000 train_time:96829ms step_avg:54.37ms +[2025-09-11 13:10:00] [Rank 0] step:1781/10000 train_time:96829ms step_avg:54.37ms +[2025-09-11 13:10:00] [Rank 0] step:1801/10000 train_time:97476ms step_avg:54.12ms +[2025-09-11 13:10:00] [Rank 0] step:1801/10000 train_time:97476ms step_avg:54.12ms +[2025-09-11 13:10:01] [Rank 0] step:1821/10000 train_time:98199ms step_avg:53.93ms +[2025-09-11 13:10:01] [Rank 0] step:1821/10000 train_time:98199ms step_avg:53.93ms +[2025-09-11 13:10:02] [Rank 0] step:1841/10000 train_time:98905ms step_avg:53.72ms +[2025-09-11 13:10:02] [Rank 0] step:1841/10000 train_time:98905ms step_avg:53.72ms +[2025-09-11 13:10:02] [Rank 0] step:1861/10000 train_time:99552ms step_avg:53.49ms +[2025-09-11 13:10:02] [Rank 0] step:1861/10000 train_time:99552ms step_avg:53.49ms +[2025-09-11 13:10:03] [Rank 0] step:1881/10000 train_time:100200ms step_avg:53.27ms +[2025-09-11 13:10:03] [Rank 0] step:1881/10000 train_time:100200ms step_avg:53.27ms +[2025-09-11 13:10:04] [Rank 0] step:1901/10000 train_time:100847ms step_avg:53.05ms +[2025-09-11 13:10:04] [Rank 0] step:1901/10000 train_time:100847ms step_avg:53.05ms +[2025-09-11 13:10:04] [Rank 0] step:1921/10000 train_time:101494ms step_avg:52.83ms +[2025-09-11 13:10:04] [Rank 0] step:1921/10000 train_time:101494ms step_avg:52.83ms +[2025-09-11 13:10:05] [Rank 0] step:1941/10000 train_time:102142ms step_avg:52.62ms +[2025-09-11 13:10:05] [Rank 0] step:1941/10000 train_time:102142ms step_avg:52.62ms +[2025-09-11 13:10:06] [Rank 0] step:1961/10000 train_time:102789ms step_avg:52.42ms +[2025-09-11 13:10:06] [Rank 0] step:1961/10000 train_time:102789ms step_avg:52.42ms +[2025-09-11 13:10:07] [Rank 0] step:1981/10000 train_time:103711ms step_avg:52.35ms +[2025-09-11 13:10:07] [Rank 0] step:1981/10000 train_time:103711ms step_avg:52.35ms +[2025-09-11 13:10:07] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:10:07] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:10:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:10:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:10:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:10:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:10:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:10:20] [Rank 0] PRINT: step:2000/10000 val_loss:6.1805 total_sharp:8.6692e-02 L1_sharp:6.2446e-02 L2_sharp:5.8063e-02 L3_sharp:5.7242e-02 L4_sharp:6.8076e-02 L5_sharp:9.8036e-02 L6_sharp:1.3719e-01 L7_sharp:1.9699e-01 L8_sharp:3.8620e-01 L9_sharp:4.6900e-01 L10_sharp:5.1712e-01 L11_sharp:7.1174e-01 L12_sharp:1.7555e+00 total_fnorm:1.9922e+00 total_l1_linf:2.2560e+03 total_spectral:1.0156e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:9.9609e-02 L1_l1linf:3.4180e-02 L2_l1linf:3.3447e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.5889e-02 L10_l1linf:3.6865e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.3926e-02 L1_spectral:1.5991e-03 L2_spectral:1.6263e-03 L3_spectral:1.6215e-03 L4_spectral:1.6301e-03 L5_spectral:1.6083e-03 L6_spectral:1.6269e-03 L7_spectral:1.6244e-03 L8_spectral:1.6125e-03 L9_spectral:1.5987e-03 L10_spectral:1.6009e-03 L11_spectral:1.6015e-03 L12_spectral:1.5977e-03 train_time:104341ms step_avg:52.17ms +[2025-09-11 13:10:20] [Rank 0] PRINT: step:2000/10000 val_loss:6.1805 total_sharp:8.6692e-02 L1_sharp:6.2446e-02 L2_sharp:5.8063e-02 L3_sharp:5.7242e-02 L4_sharp:6.8076e-02 L5_sharp:9.8036e-02 L6_sharp:1.3719e-01 L7_sharp:1.9699e-01 L8_sharp:3.8620e-01 L9_sharp:4.6900e-01 L10_sharp:5.1712e-01 L11_sharp:7.1174e-01 L12_sharp:1.7555e+00 total_fnorm:1.9922e+00 total_l1_linf:2.2560e+03 total_spectral:1.0156e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:9.9609e-02 L1_l1linf:3.4180e-02 L2_l1linf:3.3447e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.5889e-02 L10_l1linf:3.6865e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.3926e-02 L1_spectral:1.5991e-03 L2_spectral:1.6263e-03 L3_spectral:1.6215e-03 L4_spectral:1.6301e-03 L5_spectral:1.6083e-03 L6_spectral:1.6269e-03 L7_spectral:1.6244e-03 L8_spectral:1.6125e-03 L9_spectral:1.5987e-03 L10_spectral:1.6009e-03 L11_spectral:1.6015e-03 L12_spectral:1.5977e-03 train_time:104341ms step_avg:52.17ms +[2025-09-11 13:10:22] [Rank 0] step:2001/10000 train_time:106045ms step_avg:53.00ms +[2025-09-11 13:10:22] [Rank 0] step:2001/10000 train_time:106045ms step_avg:53.00ms +[2025-09-11 13:10:23] [Rank 0] step:2021/10000 train_time:106682ms step_avg:52.79ms +[2025-09-11 13:10:23] [Rank 0] step:2021/10000 train_time:106682ms step_avg:52.79ms +[2025-09-11 13:10:23] [Rank 0] step:2041/10000 train_time:107329ms step_avg:52.59ms +[2025-09-11 13:10:23] [Rank 0] step:2041/10000 train_time:107329ms step_avg:52.59ms +[2025-09-11 13:10:24] [Rank 0] step:2061/10000 train_time:107977ms step_avg:52.39ms +[2025-09-11 13:10:24] [Rank 0] step:2061/10000 train_time:107977ms step_avg:52.39ms +[2025-09-11 13:10:25] [Rank 0] step:2081/10000 train_time:108624ms step_avg:52.20ms +[2025-09-11 13:10:25] [Rank 0] step:2081/10000 train_time:108624ms step_avg:52.20ms +[2025-09-11 13:10:25] [Rank 0] step:2101/10000 train_time:109271ms step_avg:52.01ms +[2025-09-11 13:10:25] [Rank 0] step:2101/10000 train_time:109271ms step_avg:52.01ms +[2025-09-11 13:10:26] [Rank 0] step:2121/10000 train_time:109919ms step_avg:51.82ms +[2025-09-11 13:10:26] [Rank 0] step:2121/10000 train_time:109919ms step_avg:51.82ms +[2025-09-11 13:10:27] [Rank 0] step:2141/10000 train_time:110565ms step_avg:51.64ms +[2025-09-11 13:10:27] [Rank 0] step:2141/10000 train_time:110565ms step_avg:51.64ms +[2025-09-11 13:10:27] [Rank 0] step:2161/10000 train_time:111213ms step_avg:51.46ms +[2025-09-11 13:10:27] [Rank 0] step:2161/10000 train_time:111213ms step_avg:51.46ms +[2025-09-11 13:10:28] [Rank 0] step:2181/10000 train_time:111859ms step_avg:51.29ms +[2025-09-11 13:10:28] [Rank 0] step:2181/10000 train_time:111859ms step_avg:51.29ms +[2025-09-11 13:10:29] [Rank 0] step:2201/10000 train_time:112507ms step_avg:51.12ms +[2025-09-11 13:10:29] [Rank 0] step:2201/10000 train_time:112507ms step_avg:51.12ms +[2025-09-11 13:10:29] [Rank 0] step:2221/10000 train_time:113153ms step_avg:50.95ms +[2025-09-11 13:10:29] [Rank 0] step:2221/10000 train_time:113153ms step_avg:50.95ms +[2025-09-11 13:10:30] [Rank 0] step:2241/10000 train_time:113812ms step_avg:50.79ms +[2025-09-11 13:10:30] [Rank 0] step:2241/10000 train_time:113812ms step_avg:50.79ms +[2025-09-11 13:10:31] [Rank 0] step:2261/10000 train_time:114473ms step_avg:50.63ms +[2025-09-11 13:10:31] [Rank 0] step:2261/10000 train_time:114473ms step_avg:50.63ms +[2025-09-11 13:10:31] [Rank 0] step:2281/10000 train_time:115133ms step_avg:50.47ms +[2025-09-11 13:10:31] [Rank 0] step:2281/10000 train_time:115133ms step_avg:50.47ms +[2025-09-11 13:10:32] [Rank 0] step:2301/10000 train_time:115793ms step_avg:50.32ms +[2025-09-11 13:10:32] [Rank 0] step:2301/10000 train_time:115793ms step_avg:50.32ms +[2025-09-11 13:10:33] [Rank 0] step:2321/10000 train_time:116453ms step_avg:50.17ms +[2025-09-11 13:10:33] [Rank 0] step:2321/10000 train_time:116453ms step_avg:50.17ms +[2025-09-11 13:10:33] [Rank 0] step:2341/10000 train_time:117113ms step_avg:50.03ms +[2025-09-11 13:10:33] [Rank 0] step:2341/10000 train_time:117113ms step_avg:50.03ms +[2025-09-11 13:10:34] [Rank 0] step:2361/10000 train_time:117774ms step_avg:49.88ms +[2025-09-11 13:10:34] [Rank 0] step:2361/10000 train_time:117774ms step_avg:49.88ms +[2025-09-11 13:10:35] [Rank 0] step:2381/10000 train_time:118433ms step_avg:49.74ms +[2025-09-11 13:10:35] [Rank 0] step:2381/10000 train_time:118433ms step_avg:49.74ms +[2025-09-11 13:10:35] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:10:35] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:10:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:10:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:10:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:10:46] [Rank 0] PRINT: step:2400/10000 val_loss:6.0660 total_sharp:9.2364e-02 L1_sharp:4.1734e-02 L2_sharp:4.4199e-02 L3_sharp:4.5388e-02 L4_sharp:5.7760e-02 L5_sharp:7.8598e-02 L6_sharp:1.2931e-01 L7_sharp:2.1117e-01 L8_sharp:3.9139e-01 L9_sharp:5.8152e-01 L10_sharp:6.1317e-01 L11_sharp:7.2296e-01 L12_sharp:1.0310e+00 total_fnorm:1.8594e+00 total_l1_linf:2.0800e+03 total_spectral:9.4531e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0400e-01 L1_l1linf:3.3936e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.2715e-02 L6_l1linf:3.2715e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.3447e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5645e-02 L11_l1linf:3.5889e-02 L12_l1linf:2.6367e-02 L1_spectral:1.6066e-03 L2_spectral:1.6229e-03 L3_spectral:1.6228e-03 L4_spectral:1.6175e-03 L5_spectral:1.6139e-03 L6_spectral:1.6087e-03 L7_spectral:1.6359e-03 L8_spectral:1.6153e-03 L9_spectral:1.6047e-03 L10_spectral:1.6106e-03 L11_spectral:1.5932e-03 L12_spectral:1.5946e-03 train_time:119075ms step_avg:49.61ms +[2025-09-11 13:10:46] [Rank 0] PRINT: step:2400/10000 val_loss:6.0660 total_sharp:9.2364e-02 L1_sharp:4.1734e-02 L2_sharp:4.4199e-02 L3_sharp:4.5388e-02 L4_sharp:5.7760e-02 L5_sharp:7.8598e-02 L6_sharp:1.2931e-01 L7_sharp:2.1117e-01 L8_sharp:3.9139e-01 L9_sharp:5.8152e-01 L10_sharp:6.1317e-01 L11_sharp:7.2296e-01 L12_sharp:1.0310e+00 total_fnorm:1.8594e+00 total_l1_linf:2.0800e+03 total_spectral:9.4531e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0400e-01 L1_l1linf:3.3936e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.2715e-02 L6_l1linf:3.2715e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.3447e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5645e-02 L11_l1linf:3.5889e-02 L12_l1linf:2.6367e-02 L1_spectral:1.6066e-03 L2_spectral:1.6229e-03 L3_spectral:1.6228e-03 L4_spectral:1.6175e-03 L5_spectral:1.6139e-03 L6_spectral:1.6087e-03 L7_spectral:1.6359e-03 L8_spectral:1.6153e-03 L9_spectral:1.6047e-03 L10_spectral:1.6106e-03 L11_spectral:1.5932e-03 L12_spectral:1.5946e-03 train_time:119075ms step_avg:49.61ms +[2025-09-11 13:10:48] [Rank 0] step:2401/10000 train_time:120876ms step_avg:50.34ms +[2025-09-11 13:10:48] [Rank 0] step:2401/10000 train_time:120876ms step_avg:50.34ms +[2025-09-11 13:10:49] [Rank 0] step:2421/10000 train_time:121536ms step_avg:50.20ms +[2025-09-11 13:10:49] [Rank 0] step:2421/10000 train_time:121536ms step_avg:50.20ms +[2025-09-11 13:10:49] [Rank 0] step:2441/10000 train_time:122199ms step_avg:50.06ms +[2025-09-11 13:10:49] [Rank 0] step:2441/10000 train_time:122199ms step_avg:50.06ms +[2025-09-11 13:10:50] [Rank 0] step:2461/10000 train_time:122860ms step_avg:49.92ms +[2025-09-11 13:10:50] [Rank 0] step:2461/10000 train_time:122860ms step_avg:49.92ms +[2025-09-11 13:10:51] [Rank 0] step:2481/10000 train_time:123523ms step_avg:49.79ms +[2025-09-11 13:10:51] [Rank 0] step:2481/10000 train_time:123523ms step_avg:49.79ms +[2025-09-11 13:10:51] [Rank 0] step:2501/10000 train_time:124184ms step_avg:49.65ms +[2025-09-11 13:10:51] [Rank 0] step:2501/10000 train_time:124184ms step_avg:49.65ms +[2025-09-11 13:10:52] [Rank 0] step:2521/10000 train_time:124846ms step_avg:49.52ms +[2025-09-11 13:10:52] [Rank 0] step:2521/10000 train_time:124846ms step_avg:49.52ms +[2025-09-11 13:10:53] [Rank 0] step:2541/10000 train_time:125507ms step_avg:49.39ms +[2025-09-11 13:10:53] [Rank 0] step:2541/10000 train_time:125507ms step_avg:49.39ms +[2025-09-11 13:10:53] [Rank 0] step:2561/10000 train_time:126168ms step_avg:49.27ms +[2025-09-11 13:10:53] [Rank 0] step:2561/10000 train_time:126168ms step_avg:49.27ms +[2025-09-11 13:10:54] [Rank 0] step:2581/10000 train_time:126830ms step_avg:49.14ms +[2025-09-11 13:10:54] [Rank 0] step:2581/10000 train_time:126830ms step_avg:49.14ms +[2025-09-11 13:10:55] [Rank 0] step:2601/10000 train_time:127491ms step_avg:49.02ms +[2025-09-11 13:10:55] [Rank 0] step:2601/10000 train_time:127491ms step_avg:49.02ms +[2025-09-11 13:10:55] [Rank 0] step:2621/10000 train_time:128154ms step_avg:48.90ms +[2025-09-11 13:10:55] [Rank 0] step:2621/10000 train_time:128154ms step_avg:48.90ms +[2025-09-11 13:10:56] [Rank 0] step:2641/10000 train_time:128815ms step_avg:48.78ms +[2025-09-11 13:10:56] [Rank 0] step:2641/10000 train_time:128815ms step_avg:48.78ms +[2025-09-11 13:10:57] [Rank 0] step:2661/10000 train_time:129477ms step_avg:48.66ms +[2025-09-11 13:10:57] [Rank 0] step:2661/10000 train_time:129477ms step_avg:48.66ms +[2025-09-11 13:10:57] [Rank 0] step:2681/10000 train_time:130137ms step_avg:48.54ms +[2025-09-11 13:10:57] [Rank 0] step:2681/10000 train_time:130137ms step_avg:48.54ms +[2025-09-11 13:10:58] [Rank 0] step:2701/10000 train_time:130799ms step_avg:48.43ms +[2025-09-11 13:10:58] [Rank 0] step:2701/10000 train_time:130799ms step_avg:48.43ms +[2025-09-11 13:10:59] [Rank 0] step:2721/10000 train_time:131460ms step_avg:48.31ms +[2025-09-11 13:10:59] [Rank 0] step:2721/10000 train_time:131460ms step_avg:48.31ms +[2025-09-11 13:10:59] [Rank 0] step:2741/10000 train_time:132122ms step_avg:48.20ms +[2025-09-11 13:10:59] [Rank 0] step:2741/10000 train_time:132122ms step_avg:48.20ms +[2025-09-11 13:11:00] [Rank 0] step:2761/10000 train_time:132783ms step_avg:48.09ms +[2025-09-11 13:11:00] [Rank 0] step:2761/10000 train_time:132783ms step_avg:48.09ms +[2025-09-11 13:11:01] [Rank 0] step:2781/10000 train_time:133444ms step_avg:47.98ms +[2025-09-11 13:11:01] [Rank 0] step:2781/10000 train_time:133444ms step_avg:47.98ms +[2025-09-11 13:11:01] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:11:01] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:11:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:11:16] [Rank 0] PRINT: step:2800/10000 val_loss:5.9795 total_sharp:1.2006e-01 L1_sharp:5.5437e-02 L2_sharp:5.7068e-02 L3_sharp:5.1382e-02 L4_sharp:6.2990e-02 L5_sharp:9.2893e-02 L6_sharp:1.5136e-01 L7_sharp:2.0767e-01 L8_sharp:4.1757e-01 L9_sharp:5.5243e-01 L10_sharp:6.7513e-01 L11_sharp:7.9870e-01 L12_sharp:1.3525e+00 total_fnorm:1.7344e+00 total_l1_linf:1.9200e+03 total_spectral:8.7500e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0352e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.2715e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3936e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.4414e-02 L1_spectral:1.6043e-03 L2_spectral:1.6169e-03 L3_spectral:1.6290e-03 L4_spectral:1.6220e-03 L5_spectral:1.6156e-03 L6_spectral:1.6247e-03 L7_spectral:1.6220e-03 L8_spectral:1.6059e-03 L9_spectral:1.6042e-03 L10_spectral:1.6096e-03 L11_spectral:1.6062e-03 L12_spectral:1.6053e-03 train_time:134087ms step_avg:47.89ms +[2025-09-11 13:11:16] [Rank 0] PRINT: step:2800/10000 val_loss:5.9795 total_sharp:1.2006e-01 L1_sharp:5.5437e-02 L2_sharp:5.7068e-02 L3_sharp:5.1382e-02 L4_sharp:6.2990e-02 L5_sharp:9.2893e-02 L6_sharp:1.5136e-01 L7_sharp:2.0767e-01 L8_sharp:4.1757e-01 L9_sharp:5.5243e-01 L10_sharp:6.7513e-01 L11_sharp:7.9870e-01 L12_sharp:1.3525e+00 total_fnorm:1.7344e+00 total_l1_linf:1.9200e+03 total_spectral:8.7500e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0352e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.2715e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3936e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.4414e-02 L1_spectral:1.6043e-03 L2_spectral:1.6169e-03 L3_spectral:1.6290e-03 L4_spectral:1.6220e-03 L5_spectral:1.6156e-03 L6_spectral:1.6247e-03 L7_spectral:1.6220e-03 L8_spectral:1.6059e-03 L9_spectral:1.6042e-03 L10_spectral:1.6096e-03 L11_spectral:1.6062e-03 L12_spectral:1.6053e-03 train_time:134087ms step_avg:47.89ms +[2025-09-11 13:11:18] [Rank 0] step:2801/10000 train_time:135857ms step_avg:48.50ms +[2025-09-11 13:11:18] [Rank 0] step:2801/10000 train_time:135857ms step_avg:48.50ms +[2025-09-11 13:11:18] [Rank 0] step:2821/10000 train_time:136506ms step_avg:48.39ms +[2025-09-11 13:11:18] [Rank 0] step:2821/10000 train_time:136506ms step_avg:48.39ms +[2025-09-11 13:11:19] [Rank 0] step:2841/10000 train_time:137168ms step_avg:48.28ms +[2025-09-11 13:11:19] [Rank 0] step:2841/10000 train_time:137168ms step_avg:48.28ms +[2025-09-11 13:11:20] [Rank 0] step:2861/10000 train_time:137829ms step_avg:48.18ms +[2025-09-11 13:11:20] [Rank 0] step:2861/10000 train_time:137829ms step_avg:48.18ms +[2025-09-11 13:11:20] [Rank 0] step:2881/10000 train_time:138490ms step_avg:48.07ms +[2025-09-11 13:11:20] [Rank 0] step:2881/10000 train_time:138490ms step_avg:48.07ms +[2025-09-11 13:11:21] [Rank 0] step:2901/10000 train_time:139151ms step_avg:47.97ms +[2025-09-11 13:11:21] [Rank 0] step:2901/10000 train_time:139151ms step_avg:47.97ms +[2025-09-11 13:11:22] [Rank 0] step:2921/10000 train_time:139812ms step_avg:47.86ms +[2025-09-11 13:11:22] [Rank 0] step:2921/10000 train_time:139812ms step_avg:47.86ms +[2025-09-11 13:11:22] [Rank 0] step:2941/10000 train_time:140473ms step_avg:47.76ms +[2025-09-11 13:11:22] [Rank 0] step:2941/10000 train_time:140473ms step_avg:47.76ms +[2025-09-11 13:11:23] [Rank 0] step:2961/10000 train_time:141133ms step_avg:47.66ms +[2025-09-11 13:11:23] [Rank 0] step:2961/10000 train_time:141133ms step_avg:47.66ms +[2025-09-11 13:11:24] [Rank 0] step:2981/10000 train_time:141796ms step_avg:47.57ms +[2025-09-11 13:11:24] [Rank 0] step:2981/10000 train_time:141796ms step_avg:47.57ms +[2025-09-11 13:11:24] [Rank 0] step:3001/10000 train_time:142459ms step_avg:47.47ms +[2025-09-11 13:11:24] [Rank 0] step:3001/10000 train_time:142459ms step_avg:47.47ms +[2025-09-11 13:11:25] [Rank 0] step:3021/10000 train_time:143123ms step_avg:47.38ms +[2025-09-11 13:11:25] [Rank 0] step:3021/10000 train_time:143123ms step_avg:47.38ms +[2025-09-11 13:11:26] [Rank 0] step:3041/10000 train_time:143787ms step_avg:47.28ms +[2025-09-11 13:11:26] [Rank 0] step:3041/10000 train_time:143787ms step_avg:47.28ms +[2025-09-11 13:11:26] [Rank 0] step:3061/10000 train_time:144450ms step_avg:47.19ms +[2025-09-11 13:11:26] [Rank 0] step:3061/10000 train_time:144450ms step_avg:47.19ms +[2025-09-11 13:11:27] [Rank 0] step:3081/10000 train_time:145113ms step_avg:47.10ms +[2025-09-11 13:11:27] [Rank 0] step:3081/10000 train_time:145113ms step_avg:47.10ms +[2025-09-11 13:11:27] [Rank 0] step:3101/10000 train_time:145777ms step_avg:47.01ms +[2025-09-11 13:11:27] [Rank 0] step:3101/10000 train_time:145777ms step_avg:47.01ms +[2025-09-11 13:11:28] [Rank 0] step:3121/10000 train_time:146440ms step_avg:46.92ms +[2025-09-11 13:11:28] [Rank 0] step:3121/10000 train_time:146440ms step_avg:46.92ms +[2025-09-11 13:11:29] [Rank 0] step:3141/10000 train_time:147103ms step_avg:46.83ms +[2025-09-11 13:11:29] [Rank 0] step:3141/10000 train_time:147103ms step_avg:46.83ms +[2025-09-11 13:11:29] [Rank 0] step:3161/10000 train_time:147766ms step_avg:46.75ms +[2025-09-11 13:11:29] [Rank 0] step:3161/10000 train_time:147766ms step_avg:46.75ms +[2025-09-11 13:11:30] [Rank 0] step:3181/10000 train_time:148429ms step_avg:46.66ms +[2025-09-11 13:11:30] [Rank 0] step:3181/10000 train_time:148429ms step_avg:46.66ms +[2025-09-11 13:11:31] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:11:31] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:11:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:11:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:11:42] [Rank 0] PRINT: step:3200/10000 val_loss:5.8930 total_sharp:7.8853e-02 L1_sharp:3.2999e-02 L2_sharp:3.4367e-02 L3_sharp:3.6282e-02 L4_sharp:4.7813e-02 L5_sharp:6.7548e-02 L6_sharp:1.2334e-01 L7_sharp:1.8880e-01 L8_sharp:3.0018e-01 L9_sharp:4.1011e-01 L10_sharp:5.0752e-01 L11_sharp:5.3962e-01 L12_sharp:1.0113e+00 total_fnorm:1.8359e+00 total_l1_linf:2.0640e+03 total_spectral:9.4141e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0742e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.1982e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.5146e-02 L1_spectral:1.6179e-03 L2_spectral:1.6218e-03 L3_spectral:1.6296e-03 L4_spectral:1.6177e-03 L5_spectral:1.6099e-03 L6_spectral:1.6195e-03 L7_spectral:1.6230e-03 L8_spectral:1.6086e-03 L9_spectral:1.6194e-03 L10_spectral:1.6131e-03 L11_spectral:1.6010e-03 L12_spectral:1.6071e-03 train_time:149073ms step_avg:46.59ms +[2025-09-11 13:11:42] [Rank 0] PRINT: step:3200/10000 val_loss:5.8930 total_sharp:7.8853e-02 L1_sharp:3.2999e-02 L2_sharp:3.4367e-02 L3_sharp:3.6282e-02 L4_sharp:4.7813e-02 L5_sharp:6.7548e-02 L6_sharp:1.2334e-01 L7_sharp:1.8880e-01 L8_sharp:3.0018e-01 L9_sharp:4.1011e-01 L10_sharp:5.0752e-01 L11_sharp:5.3962e-01 L12_sharp:1.0113e+00 total_fnorm:1.8359e+00 total_l1_linf:2.0640e+03 total_spectral:9.4141e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0742e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.1982e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.5146e-02 L1_spectral:1.6179e-03 L2_spectral:1.6218e-03 L3_spectral:1.6296e-03 L4_spectral:1.6177e-03 L5_spectral:1.6099e-03 L6_spectral:1.6195e-03 L7_spectral:1.6230e-03 L8_spectral:1.6086e-03 L9_spectral:1.6194e-03 L10_spectral:1.6131e-03 L11_spectral:1.6010e-03 L12_spectral:1.6071e-03 train_time:149073ms step_avg:46.59ms +[2025-09-11 13:11:44] [Rank 0] step:3201/10000 train_time:150849ms step_avg:47.13ms +[2025-09-11 13:11:44] [Rank 0] step:3201/10000 train_time:150849ms step_avg:47.13ms +[2025-09-11 13:11:44] [Rank 0] step:3221/10000 train_time:151518ms step_avg:47.04ms +[2025-09-11 13:11:44] [Rank 0] step:3221/10000 train_time:151518ms step_avg:47.04ms +[2025-09-11 13:11:45] [Rank 0] step:3241/10000 train_time:152183ms step_avg:46.96ms +[2025-09-11 13:11:45] [Rank 0] step:3241/10000 train_time:152183ms step_avg:46.96ms +[2025-09-11 13:11:46] [Rank 0] step:3261/10000 train_time:152847ms step_avg:46.87ms +[2025-09-11 13:11:46] [Rank 0] step:3261/10000 train_time:152847ms step_avg:46.87ms +[2025-09-11 13:11:46] [Rank 0] step:3281/10000 train_time:153510ms step_avg:46.79ms +[2025-09-11 13:11:46] [Rank 0] step:3281/10000 train_time:153510ms step_avg:46.79ms +[2025-09-11 13:11:47] [Rank 0] step:3301/10000 train_time:154174ms step_avg:46.71ms +[2025-09-11 13:11:47] [Rank 0] step:3301/10000 train_time:154174ms step_avg:46.71ms +[2025-09-11 13:11:48] [Rank 0] step:3321/10000 train_time:154836ms step_avg:46.62ms +[2025-09-11 13:11:48] [Rank 0] step:3321/10000 train_time:154836ms step_avg:46.62ms +[2025-09-11 13:11:48] [Rank 0] step:3341/10000 train_time:155500ms step_avg:46.54ms +[2025-09-11 13:11:48] [Rank 0] step:3341/10000 train_time:155500ms step_avg:46.54ms +[2025-09-11 13:11:49] [Rank 0] step:3361/10000 train_time:156163ms step_avg:46.46ms +[2025-09-11 13:11:49] [Rank 0] step:3361/10000 train_time:156163ms step_avg:46.46ms +[2025-09-11 13:11:50] [Rank 0] step:3381/10000 train_time:156826ms step_avg:46.38ms +[2025-09-11 13:11:50] [Rank 0] step:3381/10000 train_time:156826ms step_avg:46.38ms +[2025-09-11 13:11:50] [Rank 0] step:3401/10000 train_time:157489ms step_avg:46.31ms +[2025-09-11 13:11:50] [Rank 0] step:3401/10000 train_time:157489ms step_avg:46.31ms +[2025-09-11 13:11:51] [Rank 0] step:3421/10000 train_time:158152ms step_avg:46.23ms +[2025-09-11 13:11:51] [Rank 0] step:3421/10000 train_time:158152ms step_avg:46.23ms +[2025-09-11 13:11:52] [Rank 0] step:3441/10000 train_time:158815ms step_avg:46.15ms +[2025-09-11 13:11:52] [Rank 0] step:3441/10000 train_time:158815ms step_avg:46.15ms +[2025-09-11 13:11:52] [Rank 0] step:3461/10000 train_time:159478ms step_avg:46.08ms +[2025-09-11 13:11:52] [Rank 0] step:3461/10000 train_time:159478ms step_avg:46.08ms +[2025-09-11 13:11:53] [Rank 0] step:3481/10000 train_time:160141ms step_avg:46.00ms +[2025-09-11 13:11:53] [Rank 0] step:3481/10000 train_time:160141ms step_avg:46.00ms +[2025-09-11 13:11:54] [Rank 0] step:3501/10000 train_time:160804ms step_avg:45.93ms +[2025-09-11 13:11:54] [Rank 0] step:3501/10000 train_time:160804ms step_avg:45.93ms +[2025-09-11 13:11:54] [Rank 0] step:3521/10000 train_time:161467ms step_avg:45.86ms +[2025-09-11 13:11:54] [Rank 0] step:3521/10000 train_time:161467ms step_avg:45.86ms +[2025-09-11 13:11:55] [Rank 0] step:3541/10000 train_time:162129ms step_avg:45.79ms +[2025-09-11 13:11:55] [Rank 0] step:3541/10000 train_time:162129ms step_avg:45.79ms +[2025-09-11 13:11:55] [Rank 0] step:3561/10000 train_time:162794ms step_avg:45.72ms +[2025-09-11 13:11:55] [Rank 0] step:3561/10000 train_time:162794ms step_avg:45.72ms +[2025-09-11 13:11:56] [Rank 0] step:3581/10000 train_time:163457ms step_avg:45.65ms +[2025-09-11 13:11:56] [Rank 0] step:3581/10000 train_time:163457ms step_avg:45.65ms +[2025-09-11 13:11:57] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:11:57] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:11:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:11:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:12:08] [Rank 0] PRINT: step:3600/10000 val_loss:5.8322 total_sharp:9.1679e-02 L1_sharp:3.0612e-02 L2_sharp:2.9549e-02 L3_sharp:3.2505e-02 L4_sharp:3.9098e-02 L5_sharp:5.7935e-02 L6_sharp:8.9177e-02 L7_sharp:1.8228e-01 L8_sharp:2.9738e-01 L9_sharp:4.3243e-01 L10_sharp:5.1115e-01 L11_sharp:6.0035e-01 L12_sharp:1.1323e+00 total_fnorm:1.6797e+00 total_l1_linf:1.8640e+03 total_spectral:8.5547e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0742e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1250e-02 L7_l1linf:3.1006e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.4658e-02 L1_spectral:1.6216e-03 L2_spectral:1.6320e-03 L3_spectral:1.6383e-03 L4_spectral:1.6241e-03 L5_spectral:1.6139e-03 L6_spectral:1.6110e-03 L7_spectral:1.6303e-03 L8_spectral:1.6188e-03 L9_spectral:1.6173e-03 L10_spectral:1.6134e-03 L11_spectral:1.6062e-03 L12_spectral:1.6171e-03 train_time:164102ms step_avg:45.58ms +[2025-09-11 13:12:08] [Rank 0] PRINT: step:3600/10000 val_loss:5.8322 total_sharp:9.1679e-02 L1_sharp:3.0612e-02 L2_sharp:2.9549e-02 L3_sharp:3.2505e-02 L4_sharp:3.9098e-02 L5_sharp:5.7935e-02 L6_sharp:8.9177e-02 L7_sharp:1.8228e-01 L8_sharp:2.9738e-01 L9_sharp:4.3243e-01 L10_sharp:5.1115e-01 L11_sharp:6.0035e-01 L12_sharp:1.1323e+00 total_fnorm:1.6797e+00 total_l1_linf:1.8640e+03 total_spectral:8.5547e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0742e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1250e-02 L7_l1linf:3.1006e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.4658e-02 L1_spectral:1.6216e-03 L2_spectral:1.6320e-03 L3_spectral:1.6383e-03 L4_spectral:1.6241e-03 L5_spectral:1.6139e-03 L6_spectral:1.6110e-03 L7_spectral:1.6303e-03 L8_spectral:1.6188e-03 L9_spectral:1.6173e-03 L10_spectral:1.6134e-03 L11_spectral:1.6062e-03 L12_spectral:1.6171e-03 train_time:164102ms step_avg:45.58ms +[2025-09-11 13:12:09] [Rank 0] step:3601/10000 train_time:165851ms step_avg:46.06ms +[2025-09-11 13:12:09] [Rank 0] step:3601/10000 train_time:165851ms step_avg:46.06ms +[2025-09-11 13:12:10] [Rank 0] step:3621/10000 train_time:166531ms step_avg:45.99ms +[2025-09-11 13:12:10] [Rank 0] step:3621/10000 train_time:166531ms step_avg:45.99ms +[2025-09-11 13:12:11] [Rank 0] step:3641/10000 train_time:167194ms step_avg:45.92ms +[2025-09-11 13:12:11] [Rank 0] step:3641/10000 train_time:167194ms step_avg:45.92ms +[2025-09-11 13:12:11] [Rank 0] step:3661/10000 train_time:167857ms step_avg:45.85ms +[2025-09-11 13:12:11] [Rank 0] step:3661/10000 train_time:167857ms step_avg:45.85ms +[2025-09-11 13:12:12] [Rank 0] step:3681/10000 train_time:168520ms step_avg:45.78ms +[2025-09-11 13:12:12] [Rank 0] step:3681/10000 train_time:168520ms step_avg:45.78ms +[2025-09-11 13:12:13] [Rank 0] step:3701/10000 train_time:169183ms step_avg:45.71ms +[2025-09-11 13:12:13] [Rank 0] step:3701/10000 train_time:169183ms step_avg:45.71ms +[2025-09-11 13:12:14] [Rank 0] step:3721/10000 train_time:170158ms step_avg:45.73ms +[2025-09-11 13:12:14] [Rank 0] step:3721/10000 train_time:170158ms step_avg:45.73ms +[2025-09-11 13:12:14] [Rank 0] step:3741/10000 train_time:170831ms step_avg:45.66ms +[2025-09-11 13:12:14] [Rank 0] step:3741/10000 train_time:170831ms step_avg:45.66ms +[2025-09-11 13:12:15] [Rank 0] step:3761/10000 train_time:171505ms step_avg:45.60ms +[2025-09-11 13:12:15] [Rank 0] step:3761/10000 train_time:171505ms step_avg:45.60ms +[2025-09-11 13:12:16] [Rank 0] step:3781/10000 train_time:172444ms step_avg:45.61ms +[2025-09-11 13:12:16] [Rank 0] step:3781/10000 train_time:172444ms step_avg:45.61ms +[2025-09-11 13:12:17] [Rank 0] step:3801/10000 train_time:173117ms step_avg:45.55ms +[2025-09-11 13:12:17] [Rank 0] step:3801/10000 train_time:173117ms step_avg:45.55ms +[2025-09-11 13:12:17] [Rank 0] step:3821/10000 train_time:173791ms step_avg:45.48ms +[2025-09-11 13:12:17] [Rank 0] step:3821/10000 train_time:173791ms step_avg:45.48ms +[2025-09-11 13:12:18] [Rank 0] step:3841/10000 train_time:174465ms step_avg:45.42ms +[2025-09-11 13:12:18] [Rank 0] step:3841/10000 train_time:174465ms step_avg:45.42ms +[2025-09-11 13:12:19] [Rank 0] step:3861/10000 train_time:175139ms step_avg:45.36ms +[2025-09-11 13:12:19] [Rank 0] step:3861/10000 train_time:175139ms step_avg:45.36ms +[2025-09-11 13:12:19] [Rank 0] step:3881/10000 train_time:175812ms step_avg:45.30ms +[2025-09-11 13:12:19] [Rank 0] step:3881/10000 train_time:175812ms step_avg:45.30ms +[2025-09-11 13:12:20] [Rank 0] step:3901/10000 train_time:176485ms step_avg:45.24ms +[2025-09-11 13:12:20] [Rank 0] step:3901/10000 train_time:176485ms step_avg:45.24ms +[2025-09-11 13:12:21] [Rank 0] step:3921/10000 train_time:177159ms step_avg:45.18ms +[2025-09-11 13:12:21] [Rank 0] step:3921/10000 train_time:177159ms step_avg:45.18ms +[2025-09-11 13:12:21] [Rank 0] step:3941/10000 train_time:177833ms step_avg:45.12ms +[2025-09-11 13:12:21] [Rank 0] step:3941/10000 train_time:177833ms step_avg:45.12ms +[2025-09-11 13:12:22] [Rank 0] step:3961/10000 train_time:178507ms step_avg:45.07ms +[2025-09-11 13:12:22] [Rank 0] step:3961/10000 train_time:178507ms step_avg:45.07ms +[2025-09-11 13:12:23] [Rank 0] step:3981/10000 train_time:179180ms step_avg:45.01ms +[2025-09-11 13:12:23] [Rank 0] step:3981/10000 train_time:179180ms step_avg:45.01ms +[2025-09-11 13:12:23] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:12:23] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:12:34] [Rank 0] PRINT: step:4000/10000 val_loss:5.7723 total_sharp:1.1684e-01 L1_sharp:3.4117e-02 L2_sharp:3.2620e-02 L3_sharp:3.2146e-02 L4_sharp:4.5002e-02 L5_sharp:6.9825e-02 L6_sharp:1.6066e-01 L7_sharp:2.5019e-01 L8_sharp:3.9416e-01 L9_sharp:5.7535e-01 L10_sharp:8.2215e-01 L11_sharp:9.9382e-01 L12_sharp:1.2388e+00 total_fnorm:1.7734e+00 total_l1_linf:1.8960e+03 total_spectral:9.1016e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0645e-01 L1_l1linf:3.0762e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0884e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0640e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.3682e-02 L1_spectral:1.6159e-03 L2_spectral:1.6230e-03 L3_spectral:1.6324e-03 L4_spectral:1.6227e-03 L5_spectral:1.6234e-03 L6_spectral:1.6174e-03 L7_spectral:1.6256e-03 L8_spectral:1.6148e-03 L9_spectral:1.6152e-03 L10_spectral:1.6044e-03 L11_spectral:1.5999e-03 L12_spectral:1.6114e-03 train_time:179835ms step_avg:44.96ms +[2025-09-11 13:12:34] [Rank 0] PRINT: step:4000/10000 val_loss:5.7723 total_sharp:1.1684e-01 L1_sharp:3.4117e-02 L2_sharp:3.2620e-02 L3_sharp:3.2146e-02 L4_sharp:4.5002e-02 L5_sharp:6.9825e-02 L6_sharp:1.6066e-01 L7_sharp:2.5019e-01 L8_sharp:3.9416e-01 L9_sharp:5.7535e-01 L10_sharp:8.2215e-01 L11_sharp:9.9382e-01 L12_sharp:1.2388e+00 total_fnorm:1.7734e+00 total_l1_linf:1.8960e+03 total_spectral:9.1016e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0645e-01 L1_l1linf:3.0762e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0884e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0640e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.3682e-02 L1_spectral:1.6159e-03 L2_spectral:1.6230e-03 L3_spectral:1.6324e-03 L4_spectral:1.6227e-03 L5_spectral:1.6234e-03 L6_spectral:1.6174e-03 L7_spectral:1.6256e-03 L8_spectral:1.6148e-03 L9_spectral:1.6152e-03 L10_spectral:1.6044e-03 L11_spectral:1.5999e-03 L12_spectral:1.6114e-03 train_time:179835ms step_avg:44.96ms +[2025-09-11 13:12:36] [Rank 0] step:4001/10000 train_time:181593ms step_avg:45.39ms +[2025-09-11 13:12:36] [Rank 0] step:4001/10000 train_time:181593ms step_avg:45.39ms +[2025-09-11 13:12:37] [Rank 0] step:4021/10000 train_time:182288ms step_avg:45.33ms +[2025-09-11 13:12:37] [Rank 0] step:4021/10000 train_time:182288ms step_avg:45.33ms +[2025-09-11 13:12:38] [Rank 0] step:4041/10000 train_time:182965ms step_avg:45.28ms +[2025-09-11 13:12:38] [Rank 0] step:4041/10000 train_time:182965ms step_avg:45.28ms +[2025-09-11 13:12:38] [Rank 0] step:4061/10000 train_time:183638ms step_avg:45.22ms +[2025-09-11 13:12:38] [Rank 0] step:4061/10000 train_time:183638ms step_avg:45.22ms +[2025-09-11 13:12:39] [Rank 0] step:4081/10000 train_time:184315ms step_avg:45.16ms +[2025-09-11 13:12:39] [Rank 0] step:4081/10000 train_time:184315ms step_avg:45.16ms +[2025-09-11 13:12:40] [Rank 0] step:4101/10000 train_time:184990ms step_avg:45.11ms +[2025-09-11 13:12:40] [Rank 0] step:4101/10000 train_time:184990ms step_avg:45.11ms +[2025-09-11 13:12:40] [Rank 0] step:4121/10000 train_time:185666ms step_avg:45.05ms +[2025-09-11 13:12:40] [Rank 0] step:4121/10000 train_time:185666ms step_avg:45.05ms +[2025-09-11 13:12:41] [Rank 0] step:4141/10000 train_time:186341ms step_avg:45.00ms +[2025-09-11 13:12:41] [Rank 0] step:4141/10000 train_time:186341ms step_avg:45.00ms +[2025-09-11 13:12:42] [Rank 0] step:4161/10000 train_time:187015ms step_avg:44.94ms +[2025-09-11 13:12:42] [Rank 0] step:4161/10000 train_time:187015ms step_avg:44.94ms +[2025-09-11 13:12:42] [Rank 0] step:4181/10000 train_time:187691ms step_avg:44.89ms +[2025-09-11 13:12:42] [Rank 0] step:4181/10000 train_time:187691ms step_avg:44.89ms +[2025-09-11 13:12:43] [Rank 0] step:4201/10000 train_time:188367ms step_avg:44.84ms +[2025-09-11 13:12:43] [Rank 0] step:4201/10000 train_time:188367ms step_avg:44.84ms +[2025-09-11 13:12:44] [Rank 0] step:4221/10000 train_time:189042ms step_avg:44.79ms +[2025-09-11 13:12:44] [Rank 0] step:4221/10000 train_time:189042ms step_avg:44.79ms +[2025-09-11 13:12:44] [Rank 0] step:4241/10000 train_time:189717ms step_avg:44.73ms +[2025-09-11 13:12:44] [Rank 0] step:4241/10000 train_time:189717ms step_avg:44.73ms +[2025-09-11 13:12:45] [Rank 0] step:4261/10000 train_time:190393ms step_avg:44.68ms +[2025-09-11 13:12:45] [Rank 0] step:4261/10000 train_time:190393ms step_avg:44.68ms +[2025-09-11 13:12:46] [Rank 0] step:4281/10000 train_time:191069ms step_avg:44.63ms +[2025-09-11 13:12:46] [Rank 0] step:4281/10000 train_time:191069ms step_avg:44.63ms +[2025-09-11 13:12:46] [Rank 0] step:4301/10000 train_time:191745ms step_avg:44.58ms +[2025-09-11 13:12:46] [Rank 0] step:4301/10000 train_time:191745ms step_avg:44.58ms +[2025-09-11 13:12:47] [Rank 0] step:4321/10000 train_time:192420ms step_avg:44.53ms +[2025-09-11 13:12:47] [Rank 0] step:4321/10000 train_time:192420ms step_avg:44.53ms +[2025-09-11 13:12:48] [Rank 0] step:4341/10000 train_time:193095ms step_avg:44.48ms +[2025-09-11 13:12:48] [Rank 0] step:4341/10000 train_time:193095ms step_avg:44.48ms +[2025-09-11 13:12:48] [Rank 0] step:4361/10000 train_time:193770ms step_avg:44.43ms +[2025-09-11 13:12:48] [Rank 0] step:4361/10000 train_time:193770ms step_avg:44.43ms +[2025-09-11 13:12:49] [Rank 0] step:4381/10000 train_time:194446ms step_avg:44.38ms +[2025-09-11 13:12:49] [Rank 0] step:4381/10000 train_time:194446ms step_avg:44.38ms +[2025-09-11 13:12:50] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:12:50] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:01] [Rank 0] PRINT: step:4400/10000 val_loss:5.7233 total_sharp:7.9064e-02 L1_sharp:2.2951e-02 L2_sharp:2.4754e-02 L3_sharp:2.4145e-02 L4_sharp:3.4310e-02 L5_sharp:4.8902e-02 L6_sharp:6.8813e-02 L7_sharp:9.9473e-02 L8_sharp:1.8073e-01 L9_sharp:2.9549e-01 L10_sharp:4.2781e-01 L11_sharp:5.8039e-01 L12_sharp:1.3062e+00 total_fnorm:1.6172e+00 total_l1_linf:1.7440e+03 total_spectral:8.2812e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0498e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.0273e-02 L3_l1linf:3.0640e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1006e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9907e-02 L8_l1linf:3.0273e-02 L9_l1linf:3.1250e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.0996e-02 L1_spectral:1.6259e-03 L2_spectral:1.6321e-03 L3_spectral:1.6222e-03 L4_spectral:1.6210e-03 L5_spectral:1.6130e-03 L6_spectral:1.6057e-03 L7_spectral:1.6199e-03 L8_spectral:1.6046e-03 L9_spectral:1.5999e-03 L10_spectral:1.6082e-03 L11_spectral:1.5996e-03 L12_spectral:1.6022e-03 train_time:195101ms step_avg:44.34ms +[2025-09-11 13:13:01] [Rank 0] PRINT: step:4400/10000 val_loss:5.7233 total_sharp:7.9064e-02 L1_sharp:2.2951e-02 L2_sharp:2.4754e-02 L3_sharp:2.4145e-02 L4_sharp:3.4310e-02 L5_sharp:4.8902e-02 L6_sharp:6.8813e-02 L7_sharp:9.9473e-02 L8_sharp:1.8073e-01 L9_sharp:2.9549e-01 L10_sharp:4.2781e-01 L11_sharp:5.8039e-01 L12_sharp:1.3062e+00 total_fnorm:1.6172e+00 total_l1_linf:1.7440e+03 total_spectral:8.2812e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0498e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.0273e-02 L3_l1linf:3.0640e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1006e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9907e-02 L8_l1linf:3.0273e-02 L9_l1linf:3.1250e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.0996e-02 L1_spectral:1.6259e-03 L2_spectral:1.6321e-03 L3_spectral:1.6222e-03 L4_spectral:1.6210e-03 L5_spectral:1.6130e-03 L6_spectral:1.6057e-03 L7_spectral:1.6199e-03 L8_spectral:1.6046e-03 L9_spectral:1.5999e-03 L10_spectral:1.6082e-03 L11_spectral:1.5996e-03 L12_spectral:1.6022e-03 train_time:195101ms step_avg:44.34ms +[2025-09-11 13:13:03] [Rank 0] step:4401/10000 train_time:197103ms step_avg:44.79ms +[2025-09-11 13:13:03] [Rank 0] step:4401/10000 train_time:197103ms step_avg:44.79ms +[2025-09-11 13:13:04] [Rank 0] step:4421/10000 train_time:197792ms step_avg:44.74ms +[2025-09-11 13:13:04] [Rank 0] step:4421/10000 train_time:197792ms step_avg:44.74ms +[2025-09-11 13:13:04] [Rank 0] step:4441/10000 train_time:198467ms step_avg:44.69ms +[2025-09-11 13:13:04] [Rank 0] step:4441/10000 train_time:198467ms step_avg:44.69ms +[2025-09-11 13:13:05] [Rank 0] step:4461/10000 train_time:199144ms step_avg:44.64ms +[2025-09-11 13:13:05] [Rank 0] step:4461/10000 train_time:199144ms step_avg:44.64ms +[2025-09-11 13:13:06] [Rank 0] step:4481/10000 train_time:199820ms step_avg:44.59ms +[2025-09-11 13:13:06] [Rank 0] step:4481/10000 train_time:199820ms step_avg:44.59ms +[2025-09-11 13:13:06] [Rank 0] step:4501/10000 train_time:200498ms step_avg:44.55ms +[2025-09-11 13:13:06] [Rank 0] step:4501/10000 train_time:200498ms step_avg:44.55ms +[2025-09-11 13:13:07] [Rank 0] step:4521/10000 train_time:201175ms step_avg:44.50ms +[2025-09-11 13:13:07] [Rank 0] step:4521/10000 train_time:201175ms step_avg:44.50ms +[2025-09-11 13:13:08] [Rank 0] step:4541/10000 train_time:201852ms step_avg:44.45ms +[2025-09-11 13:13:08] [Rank 0] step:4541/10000 train_time:201852ms step_avg:44.45ms +[2025-09-11 13:13:08] [Rank 0] step:4561/10000 train_time:202528ms step_avg:44.40ms +[2025-09-11 13:13:08] [Rank 0] step:4561/10000 train_time:202528ms step_avg:44.40ms +[2025-09-11 13:13:09] [Rank 0] step:4581/10000 train_time:203204ms step_avg:44.36ms +[2025-09-11 13:13:09] [Rank 0] step:4581/10000 train_time:203204ms step_avg:44.36ms +[2025-09-11 13:13:10] [Rank 0] step:4601/10000 train_time:203881ms step_avg:44.31ms +[2025-09-11 13:13:10] [Rank 0] step:4601/10000 train_time:203881ms step_avg:44.31ms +[2025-09-11 13:13:10] [Rank 0] step:4621/10000 train_time:204557ms step_avg:44.27ms +[2025-09-11 13:13:10] [Rank 0] step:4621/10000 train_time:204557ms step_avg:44.27ms +[2025-09-11 13:13:11] [Rank 0] step:4641/10000 train_time:205234ms step_avg:44.22ms +[2025-09-11 13:13:11] [Rank 0] step:4641/10000 train_time:205234ms step_avg:44.22ms +[2025-09-11 13:13:12] [Rank 0] step:4661/10000 train_time:205909ms step_avg:44.18ms +[2025-09-11 13:13:12] [Rank 0] step:4661/10000 train_time:205909ms step_avg:44.18ms +[2025-09-11 13:13:12] [Rank 0] step:4681/10000 train_time:206585ms step_avg:44.13ms +[2025-09-11 13:13:12] [Rank 0] step:4681/10000 train_time:206585ms step_avg:44.13ms +[2025-09-11 13:13:13] [Rank 0] step:4701/10000 train_time:207261ms step_avg:44.09ms +[2025-09-11 13:13:13] [Rank 0] step:4701/10000 train_time:207261ms step_avg:44.09ms +[2025-09-11 13:13:14] [Rank 0] step:4721/10000 train_time:207938ms step_avg:44.05ms +[2025-09-11 13:13:14] [Rank 0] step:4721/10000 train_time:207938ms step_avg:44.05ms +[2025-09-11 13:13:14] [Rank 0] step:4741/10000 train_time:208614ms step_avg:44.00ms +[2025-09-11 13:13:14] [Rank 0] step:4741/10000 train_time:208614ms step_avg:44.00ms +[2025-09-11 13:13:15] [Rank 0] step:4761/10000 train_time:209291ms step_avg:43.96ms +[2025-09-11 13:13:15] [Rank 0] step:4761/10000 train_time:209291ms step_avg:43.96ms +[2025-09-11 13:13:16] [Rank 0] step:4781/10000 train_time:209966ms step_avg:43.92ms +[2025-09-11 13:13:16] [Rank 0] step:4781/10000 train_time:209966ms step_avg:43.92ms +[2025-09-11 13:13:17] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:13:17] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:13:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:13:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:28] [Rank 0] PRINT: step:4800/10000 val_loss:5.6819 total_sharp:8.0460e-02 L1_sharp:1.9171e-02 L2_sharp:2.2677e-02 L3_sharp:2.2502e-02 L4_sharp:2.7844e-02 L5_sharp:4.0338e-02 L6_sharp:7.2803e-02 L7_sharp:1.5327e-01 L8_sharp:2.5742e-01 L9_sharp:3.4098e-01 L10_sharp:4.2152e-01 L11_sharp:5.5072e-01 L12_sharp:8.7324e-01 total_fnorm:1.5625e+00 total_l1_linf:1.7120e+03 total_spectral:8.0469e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.0762e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0518e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9663e-02 L8_l1linf:2.9541e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1738e-02 L12_l1linf:2.2949e-02 L1_spectral:1.6105e-03 L2_spectral:1.6345e-03 L3_spectral:1.6364e-03 L4_spectral:1.6313e-03 L5_spectral:1.6166e-03 L6_spectral:1.6054e-03 L7_spectral:1.6208e-03 L8_spectral:1.6111e-03 L9_spectral:1.6057e-03 L10_spectral:1.6208e-03 L11_spectral:1.6079e-03 L12_spectral:1.6200e-03 train_time:210901ms step_avg:43.94ms +[2025-09-11 13:13:28] [Rank 0] PRINT: step:4800/10000 val_loss:5.6819 total_sharp:8.0460e-02 L1_sharp:1.9171e-02 L2_sharp:2.2677e-02 L3_sharp:2.2502e-02 L4_sharp:2.7844e-02 L5_sharp:4.0338e-02 L6_sharp:7.2803e-02 L7_sharp:1.5327e-01 L8_sharp:2.5742e-01 L9_sharp:3.4098e-01 L10_sharp:4.2152e-01 L11_sharp:5.5072e-01 L12_sharp:8.7324e-01 total_fnorm:1.5625e+00 total_l1_linf:1.7120e+03 total_spectral:8.0469e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.0762e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0518e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9663e-02 L8_l1linf:2.9541e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1738e-02 L12_l1linf:2.2949e-02 L1_spectral:1.6105e-03 L2_spectral:1.6345e-03 L3_spectral:1.6364e-03 L4_spectral:1.6313e-03 L5_spectral:1.6166e-03 L6_spectral:1.6054e-03 L7_spectral:1.6208e-03 L8_spectral:1.6111e-03 L9_spectral:1.6057e-03 L10_spectral:1.6208e-03 L11_spectral:1.6079e-03 L12_spectral:1.6200e-03 train_time:210901ms step_avg:43.94ms +[2025-09-11 13:13:30] [Rank 0] step:4801/10000 train_time:212755ms step_avg:44.31ms +[2025-09-11 13:13:30] [Rank 0] step:4801/10000 train_time:212755ms step_avg:44.31ms +[2025-09-11 13:13:30] [Rank 0] step:4821/10000 train_time:213429ms step_avg:44.27ms +[2025-09-11 13:13:30] [Rank 0] step:4821/10000 train_time:213429ms step_avg:44.27ms +[2025-09-11 13:13:31] [Rank 0] step:4841/10000 train_time:214107ms step_avg:44.23ms +[2025-09-11 13:13:31] [Rank 0] step:4841/10000 train_time:214107ms step_avg:44.23ms +[2025-09-11 13:13:32] [Rank 0] step:4861/10000 train_time:214785ms step_avg:44.19ms +[2025-09-11 13:13:32] [Rank 0] step:4861/10000 train_time:214785ms step_avg:44.19ms +[2025-09-11 13:13:32] [Rank 0] step:4881/10000 train_time:215462ms step_avg:44.14ms +[2025-09-11 13:13:32] [Rank 0] step:4881/10000 train_time:215462ms step_avg:44.14ms +[2025-09-11 13:13:33] [Rank 0] step:4901/10000 train_time:216140ms step_avg:44.10ms +[2025-09-11 13:13:33] [Rank 0] step:4901/10000 train_time:216140ms step_avg:44.10ms +[2025-09-11 13:13:34] [Rank 0] step:4921/10000 train_time:216818ms step_avg:44.06ms +[2025-09-11 13:13:34] [Rank 0] step:4921/10000 train_time:216818ms step_avg:44.06ms +[2025-09-11 13:13:34] [Rank 0] step:4941/10000 train_time:217494ms step_avg:44.02ms +[2025-09-11 13:13:34] [Rank 0] step:4941/10000 train_time:217494ms step_avg:44.02ms +[2025-09-11 13:13:35] [Rank 0] step:4961/10000 train_time:218172ms step_avg:43.98ms +[2025-09-11 13:13:35] [Rank 0] step:4961/10000 train_time:218172ms step_avg:43.98ms +[2025-09-11 13:13:36] [Rank 0] step:4981/10000 train_time:218850ms step_avg:43.94ms +[2025-09-11 13:13:36] [Rank 0] step:4981/10000 train_time:218850ms step_avg:43.94ms +[2025-09-11 13:13:36] [Rank 0] step:5001/10000 train_time:219528ms step_avg:43.90ms +[2025-09-11 13:13:36] [Rank 0] step:5001/10000 train_time:219528ms step_avg:43.90ms +[2025-09-11 13:13:37] [Rank 0] step:5021/10000 train_time:220205ms step_avg:43.86ms +[2025-09-11 13:13:37] [Rank 0] step:5021/10000 train_time:220205ms step_avg:43.86ms +[2025-09-11 13:13:38] [Rank 0] step:5041/10000 train_time:220880ms step_avg:43.82ms +[2025-09-11 13:13:38] [Rank 0] step:5041/10000 train_time:220880ms step_avg:43.82ms +[2025-09-11 13:13:38] [Rank 0] step:5061/10000 train_time:221557ms step_avg:43.78ms +[2025-09-11 13:13:38] [Rank 0] step:5061/10000 train_time:221557ms step_avg:43.78ms +[2025-09-11 13:13:39] [Rank 0] step:5081/10000 train_time:222234ms step_avg:43.74ms +[2025-09-11 13:13:39] [Rank 0] step:5081/10000 train_time:222234ms step_avg:43.74ms +[2025-09-11 13:13:40] [Rank 0] step:5101/10000 train_time:222911ms step_avg:43.70ms +[2025-09-11 13:13:40] [Rank 0] step:5101/10000 train_time:222911ms step_avg:43.70ms +[2025-09-11 13:13:40] [Rank 0] step:5121/10000 train_time:223588ms step_avg:43.66ms +[2025-09-11 13:13:40] [Rank 0] step:5121/10000 train_time:223588ms step_avg:43.66ms +[2025-09-11 13:13:41] [Rank 0] step:5141/10000 train_time:224266ms step_avg:43.62ms +[2025-09-11 13:13:41] [Rank 0] step:5141/10000 train_time:224266ms step_avg:43.62ms +[2025-09-11 13:13:42] [Rank 0] step:5161/10000 train_time:224944ms step_avg:43.59ms +[2025-09-11 13:13:42] [Rank 0] step:5161/10000 train_time:224944ms step_avg:43.59ms +[2025-09-11 13:13:42] [Rank 0] step:5181/10000 train_time:225621ms step_avg:43.55ms +[2025-09-11 13:13:42] [Rank 0] step:5181/10000 train_time:225621ms step_avg:43.55ms +[2025-09-11 13:13:43] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:13:43] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:13:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:13:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:13:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:13:54] [Rank 0] PRINT: step:5200/10000 val_loss:5.6502 total_sharp:7.4110e-02 L1_sharp:1.8273e-02 L2_sharp:2.2286e-02 L3_sharp:2.0408e-02 L4_sharp:2.6020e-02 L5_sharp:4.3722e-02 L6_sharp:6.3360e-02 L7_sharp:1.0175e-01 L8_sharp:1.6048e-01 L9_sharp:2.6142e-01 L10_sharp:3.9256e-01 L11_sharp:5.3254e-01 L12_sharp:1.0629e+00 total_fnorm:1.5234e+00 total_l1_linf:1.6320e+03 total_spectral:7.7344e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0889e-01 L1_l1linf:3.0029e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0884e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.9419e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.2583e-02 L1_spectral:1.6270e-03 L2_spectral:1.6313e-03 L3_spectral:1.6313e-03 L4_spectral:1.6313e-03 L5_spectral:1.6188e-03 L6_spectral:1.6206e-03 L7_spectral:1.6200e-03 L8_spectral:1.6098e-03 L9_spectral:1.6068e-03 L10_spectral:1.6002e-03 L11_spectral:1.6154e-03 L12_spectral:1.6057e-03 train_time:226284ms step_avg:43.52ms +[2025-09-11 13:13:54] [Rank 0] PRINT: step:5200/10000 val_loss:5.6502 total_sharp:7.4110e-02 L1_sharp:1.8273e-02 L2_sharp:2.2286e-02 L3_sharp:2.0408e-02 L4_sharp:2.6020e-02 L5_sharp:4.3722e-02 L6_sharp:6.3360e-02 L7_sharp:1.0175e-01 L8_sharp:1.6048e-01 L9_sharp:2.6142e-01 L10_sharp:3.9256e-01 L11_sharp:5.3254e-01 L12_sharp:1.0629e+00 total_fnorm:1.5234e+00 total_l1_linf:1.6320e+03 total_spectral:7.7344e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0889e-01 L1_l1linf:3.0029e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0884e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.9419e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.2583e-02 L1_spectral:1.6270e-03 L2_spectral:1.6313e-03 L3_spectral:1.6313e-03 L4_spectral:1.6313e-03 L5_spectral:1.6188e-03 L6_spectral:1.6206e-03 L7_spectral:1.6200e-03 L8_spectral:1.6098e-03 L9_spectral:1.6068e-03 L10_spectral:1.6002e-03 L11_spectral:1.6154e-03 L12_spectral:1.6057e-03 train_time:226284ms step_avg:43.52ms +[2025-09-11 13:13:56] [Rank 0] step:5201/10000 train_time:228127ms step_avg:43.86ms +[2025-09-11 13:13:56] [Rank 0] step:5201/10000 train_time:228127ms step_avg:43.86ms +[2025-09-11 13:13:57] [Rank 0] step:5221/10000 train_time:228825ms step_avg:43.83ms +[2025-09-11 13:13:57] [Rank 0] step:5221/10000 train_time:228825ms step_avg:43.83ms +[2025-09-11 13:13:57] [Rank 0] step:5241/10000 train_time:229513ms step_avg:43.79ms +[2025-09-11 13:13:57] [Rank 0] step:5241/10000 train_time:229513ms step_avg:43.79ms +[2025-09-11 13:13:58] [Rank 0] step:5261/10000 train_time:230199ms step_avg:43.76ms +[2025-09-11 13:13:58] [Rank 0] step:5261/10000 train_time:230199ms step_avg:43.76ms +[2025-09-11 13:13:59] [Rank 0] step:5281/10000 train_time:230885ms step_avg:43.72ms +[2025-09-11 13:13:59] [Rank 0] step:5281/10000 train_time:230885ms step_avg:43.72ms +[2025-09-11 13:13:59] [Rank 0] step:5301/10000 train_time:231571ms step_avg:43.68ms +[2025-09-11 13:13:59] [Rank 0] step:5301/10000 train_time:231571ms step_avg:43.68ms +[2025-09-11 13:14:00] [Rank 0] step:5321/10000 train_time:232256ms step_avg:43.65ms +[2025-09-11 13:14:00] [Rank 0] step:5321/10000 train_time:232256ms step_avg:43.65ms +[2025-09-11 13:14:01] [Rank 0] step:5341/10000 train_time:232942ms step_avg:43.61ms +[2025-09-11 13:14:01] [Rank 0] step:5341/10000 train_time:232942ms step_avg:43.61ms +[2025-09-11 13:14:02] [Rank 0] step:5361/10000 train_time:233628ms step_avg:43.58ms +[2025-09-11 13:14:02] [Rank 0] step:5361/10000 train_time:233628ms step_avg:43.58ms +[2025-09-11 13:14:02] [Rank 0] step:5381/10000 train_time:234316ms step_avg:43.54ms +[2025-09-11 13:14:02] [Rank 0] step:5381/10000 train_time:234316ms step_avg:43.54ms +[2025-09-11 13:14:03] [Rank 0] step:5401/10000 train_time:234999ms step_avg:43.51ms +[2025-09-11 13:14:03] [Rank 0] step:5401/10000 train_time:234999ms step_avg:43.51ms +[2025-09-11 13:14:04] [Rank 0] step:5421/10000 train_time:235687ms step_avg:43.48ms +[2025-09-11 13:14:04] [Rank 0] step:5421/10000 train_time:235687ms step_avg:43.48ms +[2025-09-11 13:14:04] [Rank 0] step:5441/10000 train_time:236373ms step_avg:43.44ms +[2025-09-11 13:14:04] [Rank 0] step:5441/10000 train_time:236373ms step_avg:43.44ms +[2025-09-11 13:14:05] [Rank 0] step:5461/10000 train_time:237059ms step_avg:43.41ms +[2025-09-11 13:14:05] [Rank 0] step:5461/10000 train_time:237059ms step_avg:43.41ms +[2025-09-11 13:14:06] [Rank 0] step:5481/10000 train_time:237746ms step_avg:43.38ms +[2025-09-11 13:14:06] [Rank 0] step:5481/10000 train_time:237746ms step_avg:43.38ms +[2025-09-11 13:14:06] [Rank 0] step:5501/10000 train_time:238430ms step_avg:43.34ms +[2025-09-11 13:14:06] [Rank 0] step:5501/10000 train_time:238430ms step_avg:43.34ms +[2025-09-11 13:14:07] [Rank 0] step:5521/10000 train_time:239116ms step_avg:43.31ms +[2025-09-11 13:14:07] [Rank 0] step:5521/10000 train_time:239116ms step_avg:43.31ms +[2025-09-11 13:14:08] [Rank 0] step:5541/10000 train_time:239804ms step_avg:43.28ms +[2025-09-11 13:14:08] [Rank 0] step:5541/10000 train_time:239804ms step_avg:43.28ms +[2025-09-11 13:14:08] [Rank 0] step:5561/10000 train_time:240492ms step_avg:43.25ms +[2025-09-11 13:14:08] [Rank 0] step:5561/10000 train_time:240492ms step_avg:43.25ms +[2025-09-11 13:14:09] [Rank 0] step:5581/10000 train_time:241180ms step_avg:43.21ms +[2025-09-11 13:14:09] [Rank 0] step:5581/10000 train_time:241180ms step_avg:43.21ms +[2025-09-11 13:14:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:14:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:14:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:14:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:14:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:14:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:14:21] [Rank 0] PRINT: step:5600/10000 val_loss:5.6200 total_sharp:8.0392e-02 L1_sharp:1.8786e-02 L2_sharp:2.0962e-02 L3_sharp:2.0063e-02 L4_sharp:2.2476e-02 L5_sharp:4.1175e-02 L6_sharp:7.0504e-02 L7_sharp:1.2285e-01 L8_sharp:1.8016e-01 L9_sharp:2.7915e-01 L10_sharp:4.0582e-01 L11_sharp:5.4023e-01 L12_sharp:9.7507e-01 total_fnorm:1.5000e+00 total_l1_linf:1.6320e+03 total_spectral:7.7344e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0889e-01 L1_l1linf:2.9663e-02 L2_l1linf:2.9785e-02 L3_l1linf:2.9907e-02 L4_l1linf:2.9907e-02 L5_l1linf:2.9297e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0518e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.2461e-02 L1_spectral:1.6193e-03 L2_spectral:1.6283e-03 L3_spectral:1.6238e-03 L4_spectral:1.6222e-03 L5_spectral:1.6192e-03 L6_spectral:1.6316e-03 L7_spectral:1.6329e-03 L8_spectral:1.6175e-03 L9_spectral:1.6120e-03 L10_spectral:1.6076e-03 L11_spectral:1.6043e-03 L12_spectral:1.6219e-03 train_time:241848ms step_avg:43.19ms +[2025-09-11 13:14:21] [Rank 0] PRINT: step:5600/10000 val_loss:5.6200 total_sharp:8.0392e-02 L1_sharp:1.8786e-02 L2_sharp:2.0962e-02 L3_sharp:2.0063e-02 L4_sharp:2.2476e-02 L5_sharp:4.1175e-02 L6_sharp:7.0504e-02 L7_sharp:1.2285e-01 L8_sharp:1.8016e-01 L9_sharp:2.7915e-01 L10_sharp:4.0582e-01 L11_sharp:5.4023e-01 L12_sharp:9.7507e-01 total_fnorm:1.5000e+00 total_l1_linf:1.6320e+03 total_spectral:7.7344e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0889e-01 L1_l1linf:2.9663e-02 L2_l1linf:2.9785e-02 L3_l1linf:2.9907e-02 L4_l1linf:2.9907e-02 L5_l1linf:2.9297e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0518e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.2461e-02 L1_spectral:1.6193e-03 L2_spectral:1.6283e-03 L3_spectral:1.6238e-03 L4_spectral:1.6222e-03 L5_spectral:1.6192e-03 L6_spectral:1.6316e-03 L7_spectral:1.6329e-03 L8_spectral:1.6175e-03 L9_spectral:1.6120e-03 L10_spectral:1.6076e-03 L11_spectral:1.6043e-03 L12_spectral:1.6219e-03 train_time:241848ms step_avg:43.19ms +[2025-09-11 13:14:23] [Rank 0] step:5601/10000 train_time:243674ms step_avg:43.51ms +[2025-09-11 13:14:23] [Rank 0] step:5601/10000 train_time:243674ms step_avg:43.51ms +[2025-09-11 13:14:24] [Rank 0] step:5621/10000 train_time:244356ms step_avg:43.47ms +[2025-09-11 13:14:24] [Rank 0] step:5621/10000 train_time:244356ms step_avg:43.47ms +[2025-09-11 13:14:24] [Rank 0] step:5641/10000 train_time:245042ms step_avg:43.44ms +[2025-09-11 13:14:24] [Rank 0] step:5641/10000 train_time:245042ms step_avg:43.44ms +[2025-09-11 13:14:25] [Rank 0] step:5661/10000 train_time:245728ms step_avg:43.41ms +[2025-09-11 13:14:25] [Rank 0] step:5661/10000 train_time:245728ms step_avg:43.41ms +[2025-09-11 13:14:26] [Rank 0] step:5681/10000 train_time:246416ms step_avg:43.38ms +[2025-09-11 13:14:26] [Rank 0] step:5681/10000 train_time:246416ms step_avg:43.38ms +[2025-09-11 13:14:26] [Rank 0] step:5701/10000 train_time:247106ms step_avg:43.34ms +[2025-09-11 13:14:26] [Rank 0] step:5701/10000 train_time:247106ms step_avg:43.34ms +[2025-09-11 13:14:27] [Rank 0] step:5721/10000 train_time:247791ms step_avg:43.31ms +[2025-09-11 13:14:27] [Rank 0] step:5721/10000 train_time:247791ms step_avg:43.31ms +[2025-09-11 13:14:28] [Rank 0] step:5741/10000 train_time:248479ms step_avg:43.28ms +[2025-09-11 13:14:28] [Rank 0] step:5741/10000 train_time:248479ms step_avg:43.28ms +[2025-09-11 13:14:28] [Rank 0] step:5761/10000 train_time:249167ms step_avg:43.25ms +[2025-09-11 13:14:28] [Rank 0] step:5761/10000 train_time:249167ms step_avg:43.25ms +[2025-09-11 13:14:29] [Rank 0] step:5781/10000 train_time:249855ms step_avg:43.22ms +[2025-09-11 13:14:29] [Rank 0] step:5781/10000 train_time:249855ms step_avg:43.22ms +[2025-09-11 13:14:30] [Rank 0] step:5801/10000 train_time:250544ms step_avg:43.19ms +[2025-09-11 13:14:30] [Rank 0] step:5801/10000 train_time:250544ms step_avg:43.19ms +[2025-09-11 13:14:30] [Rank 0] step:5821/10000 train_time:251231ms step_avg:43.16ms +[2025-09-11 13:14:30] [Rank 0] step:5821/10000 train_time:251231ms step_avg:43.16ms +[2025-09-11 13:14:31] [Rank 0] step:5841/10000 train_time:251920ms step_avg:43.13ms +[2025-09-11 13:14:31] [Rank 0] step:5841/10000 train_time:251920ms step_avg:43.13ms +[2025-09-11 13:14:32] [Rank 0] step:5861/10000 train_time:252606ms step_avg:43.10ms +[2025-09-11 13:14:32] [Rank 0] step:5861/10000 train_time:252606ms step_avg:43.10ms +[2025-09-11 13:14:33] [Rank 0] step:5881/10000 train_time:253293ms step_avg:43.07ms +[2025-09-11 13:14:33] [Rank 0] step:5881/10000 train_time:253293ms step_avg:43.07ms +[2025-09-11 13:14:33] [Rank 0] step:5901/10000 train_time:253979ms step_avg:43.04ms +[2025-09-11 13:14:33] [Rank 0] step:5901/10000 train_time:253979ms step_avg:43.04ms +[2025-09-11 13:14:34] [Rank 0] step:5921/10000 train_time:254669ms step_avg:43.01ms +[2025-09-11 13:14:34] [Rank 0] step:5921/10000 train_time:254669ms step_avg:43.01ms +[2025-09-11 13:14:35] [Rank 0] step:5941/10000 train_time:255358ms step_avg:42.98ms +[2025-09-11 13:14:35] [Rank 0] step:5941/10000 train_time:255358ms step_avg:42.98ms +[2025-09-11 13:14:35] [Rank 0] step:5961/10000 train_time:256046ms step_avg:42.95ms +[2025-09-11 13:14:35] [Rank 0] step:5961/10000 train_time:256046ms step_avg:42.95ms +[2025-09-11 13:14:36] [Rank 0] step:5981/10000 train_time:256734ms step_avg:42.92ms +[2025-09-11 13:14:36] [Rank 0] step:5981/10000 train_time:256734ms step_avg:42.92ms +[2025-09-11 13:14:37] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:14:37] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:14:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:14:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:14:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:14:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:14:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:14:48] [Rank 0] PRINT: step:6000/10000 val_loss:5.5872 total_sharp:8.5070e-02 L1_sharp:1.9100e-02 L2_sharp:1.9254e-02 L3_sharp:1.8483e-02 L4_sharp:2.5365e-02 L5_sharp:3.8310e-02 L6_sharp:5.9844e-02 L7_sharp:1.0315e-01 L8_sharp:1.6515e-01 L9_sharp:2.4221e-01 L10_sharp:4.0990e-01 L11_sharp:5.8643e-01 L12_sharp:1.5221e+00 total_fnorm:1.4688e+00 total_l1_linf:1.5760e+03 total_spectral:7.5391e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0986e-01 L1_l1linf:2.9541e-02 L2_l1linf:2.9663e-02 L3_l1linf:2.9785e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.8564e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9907e-02 L11_l1linf:3.0518e-02 L12_l1linf:2.2217e-02 L1_spectral:1.6183e-03 L2_spectral:1.6400e-03 L3_spectral:1.6277e-03 L4_spectral:1.6362e-03 L5_spectral:1.6194e-03 L6_spectral:1.6173e-03 L7_spectral:1.6262e-03 L8_spectral:1.6146e-03 L9_spectral:1.6062e-03 L10_spectral:1.6187e-03 L11_spectral:1.6114e-03 L12_spectral:1.6092e-03 train_time:257406ms step_avg:42.90ms +[2025-09-11 13:14:48] [Rank 0] PRINT: step:6000/10000 val_loss:5.5872 total_sharp:8.5070e-02 L1_sharp:1.9100e-02 L2_sharp:1.9254e-02 L3_sharp:1.8483e-02 L4_sharp:2.5365e-02 L5_sharp:3.8310e-02 L6_sharp:5.9844e-02 L7_sharp:1.0315e-01 L8_sharp:1.6515e-01 L9_sharp:2.4221e-01 L10_sharp:4.0990e-01 L11_sharp:5.8643e-01 L12_sharp:1.5221e+00 total_fnorm:1.4688e+00 total_l1_linf:1.5760e+03 total_spectral:7.5391e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0986e-01 L1_l1linf:2.9541e-02 L2_l1linf:2.9663e-02 L3_l1linf:2.9785e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.8564e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9907e-02 L11_l1linf:3.0518e-02 L12_l1linf:2.2217e-02 L1_spectral:1.6183e-03 L2_spectral:1.6400e-03 L3_spectral:1.6277e-03 L4_spectral:1.6362e-03 L5_spectral:1.6194e-03 L6_spectral:1.6173e-03 L7_spectral:1.6262e-03 L8_spectral:1.6146e-03 L9_spectral:1.6062e-03 L10_spectral:1.6187e-03 L11_spectral:1.6114e-03 L12_spectral:1.6092e-03 train_time:257406ms step_avg:42.90ms +[2025-09-11 13:14:50] [Rank 0] step:6001/10000 train_time:259213ms step_avg:43.19ms +[2025-09-11 13:14:50] [Rank 0] step:6001/10000 train_time:259213ms step_avg:43.19ms +[2025-09-11 13:14:50] [Rank 0] step:6021/10000 train_time:259904ms step_avg:43.17ms +[2025-09-11 13:14:50] [Rank 0] step:6021/10000 train_time:259904ms step_avg:43.17ms +[2025-09-11 13:14:51] [Rank 0] step:6041/10000 train_time:260595ms step_avg:43.14ms +[2025-09-11 13:14:51] [Rank 0] step:6041/10000 train_time:260595ms step_avg:43.14ms +[2025-09-11 13:14:52] [Rank 0] step:6061/10000 train_time:261284ms step_avg:43.11ms +[2025-09-11 13:14:52] [Rank 0] step:6061/10000 train_time:261284ms step_avg:43.11ms +[2025-09-11 13:14:53] [Rank 0] step:6081/10000 train_time:261974ms step_avg:43.08ms +[2025-09-11 13:14:53] [Rank 0] step:6081/10000 train_time:261974ms step_avg:43.08ms +[2025-09-11 13:14:53] [Rank 0] step:6101/10000 train_time:262663ms step_avg:43.05ms +[2025-09-11 13:14:53] [Rank 0] step:6101/10000 train_time:262663ms step_avg:43.05ms +[2025-09-11 13:14:54] [Rank 0] step:6121/10000 train_time:263354ms step_avg:43.02ms +[2025-09-11 13:14:54] [Rank 0] step:6121/10000 train_time:263354ms step_avg:43.02ms +[2025-09-11 13:14:55] [Rank 0] step:6141/10000 train_time:264045ms step_avg:43.00ms +[2025-09-11 13:14:55] [Rank 0] step:6141/10000 train_time:264045ms step_avg:43.00ms +[2025-09-11 13:14:55] [Rank 0] step:6161/10000 train_time:264734ms step_avg:42.97ms +[2025-09-11 13:14:55] [Rank 0] step:6161/10000 train_time:264734ms step_avg:42.97ms +[2025-09-11 13:14:56] [Rank 0] step:6181/10000 train_time:265422ms step_avg:42.94ms +[2025-09-11 13:14:56] [Rank 0] step:6181/10000 train_time:265422ms step_avg:42.94ms +[2025-09-11 13:14:57] [Rank 0] step:6201/10000 train_time:266113ms step_avg:42.91ms +[2025-09-11 13:14:57] [Rank 0] step:6201/10000 train_time:266113ms step_avg:42.91ms +[2025-09-11 13:14:57] [Rank 0] step:6221/10000 train_time:266804ms step_avg:42.89ms +[2025-09-11 13:14:57] [Rank 0] step:6221/10000 train_time:266804ms step_avg:42.89ms +[2025-09-11 13:14:58] [Rank 0] step:6241/10000 train_time:267495ms step_avg:42.86ms +[2025-09-11 13:14:58] [Rank 0] step:6241/10000 train_time:267495ms step_avg:42.86ms +[2025-09-11 13:14:59] [Rank 0] step:6261/10000 train_time:268183ms step_avg:42.83ms +[2025-09-11 13:14:59] [Rank 0] step:6261/10000 train_time:268183ms step_avg:42.83ms +[2025-09-11 13:14:59] [Rank 0] step:6281/10000 train_time:268873ms step_avg:42.81ms +[2025-09-11 13:14:59] [Rank 0] step:6281/10000 train_time:268873ms step_avg:42.81ms +[2025-09-11 13:15:00] [Rank 0] step:6301/10000 train_time:269561ms step_avg:42.78ms +[2025-09-11 13:15:00] [Rank 0] step:6301/10000 train_time:269561ms step_avg:42.78ms +[2025-09-11 13:15:01] [Rank 0] step:6321/10000 train_time:270254ms step_avg:42.75ms +[2025-09-11 13:15:01] [Rank 0] step:6321/10000 train_time:270254ms step_avg:42.75ms +[2025-09-11 13:15:02] [Rank 0] step:6341/10000 train_time:270996ms step_avg:42.74ms +[2025-09-11 13:15:02] [Rank 0] step:6341/10000 train_time:270996ms step_avg:42.74ms +[2025-09-11 13:15:02] [Rank 0] step:6361/10000 train_time:271784ms step_avg:42.73ms +[2025-09-11 13:15:02] [Rank 0] step:6361/10000 train_time:271784ms step_avg:42.73ms +[2025-09-11 13:15:03] [Rank 0] step:6381/10000 train_time:272474ms step_avg:42.70ms +[2025-09-11 13:15:03] [Rank 0] step:6381/10000 train_time:272474ms step_avg:42.70ms +[2025-09-11 13:15:04] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:15:04] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:15:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:15:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:15:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:15:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:15:17] [Rank 0] PRINT: step:6400/10000 val_loss:5.5622 total_sharp:6.0770e-02 L1_sharp:1.3952e-02 L2_sharp:1.5097e-02 L3_sharp:1.4252e-02 L4_sharp:1.8139e-02 L5_sharp:2.8998e-02 L6_sharp:4.6509e-02 L7_sharp:8.5781e-02 L8_sharp:1.3835e-01 L9_sharp:2.0508e-01 L10_sharp:3.1385e-01 L11_sharp:4.3739e-01 L12_sharp:8.8548e-01 total_fnorm:1.2812e+00 total_l1_linf:1.3040e+03 total_spectral:6.5234e-01 L1_fnorm:1.0107e-01 L2_fnorm:1.0156e-01 L3_fnorm:1.0205e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0059e-01 L7_fnorm:1.0059e-01 L8_fnorm:1.0010e-01 L9_fnorm:1.0059e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0059e-01 L12_fnorm:9.5215e-02 L1_l1linf:2.4902e-02 L2_l1linf:2.5269e-02 L3_l1linf:2.5513e-02 L4_l1linf:2.5635e-02 L5_l1linf:2.5635e-02 L6_l1linf:2.5024e-02 L7_l1linf:2.4536e-02 L8_l1linf:2.3926e-02 L9_l1linf:2.4780e-02 L10_l1linf:2.5146e-02 L11_l1linf:2.6001e-02 L12_l1linf:1.8921e-02 L1_spectral:1.4597e-03 L2_spectral:1.4469e-03 L3_spectral:1.4519e-03 L4_spectral:1.4418e-03 L5_spectral:1.4492e-03 L6_spectral:1.4476e-03 L7_spectral:1.4482e-03 L8_spectral:1.4461e-03 L9_spectral:1.4489e-03 L10_spectral:1.4399e-03 L11_spectral:1.4274e-03 L12_spectral:1.4276e-03 train_time:273144ms step_avg:42.68ms +[2025-09-11 13:15:17] [Rank 0] PRINT: step:6400/10000 val_loss:5.5622 total_sharp:6.0770e-02 L1_sharp:1.3952e-02 L2_sharp:1.5097e-02 L3_sharp:1.4252e-02 L4_sharp:1.8139e-02 L5_sharp:2.8998e-02 L6_sharp:4.6509e-02 L7_sharp:8.5781e-02 L8_sharp:1.3835e-01 L9_sharp:2.0508e-01 L10_sharp:3.1385e-01 L11_sharp:4.3739e-01 L12_sharp:8.8548e-01 total_fnorm:1.2812e+00 total_l1_linf:1.3040e+03 total_spectral:6.5234e-01 L1_fnorm:1.0107e-01 L2_fnorm:1.0156e-01 L3_fnorm:1.0205e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0059e-01 L7_fnorm:1.0059e-01 L8_fnorm:1.0010e-01 L9_fnorm:1.0059e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0059e-01 L12_fnorm:9.5215e-02 L1_l1linf:2.4902e-02 L2_l1linf:2.5269e-02 L3_l1linf:2.5513e-02 L4_l1linf:2.5635e-02 L5_l1linf:2.5635e-02 L6_l1linf:2.5024e-02 L7_l1linf:2.4536e-02 L8_l1linf:2.3926e-02 L9_l1linf:2.4780e-02 L10_l1linf:2.5146e-02 L11_l1linf:2.6001e-02 L12_l1linf:1.8921e-02 L1_spectral:1.4597e-03 L2_spectral:1.4469e-03 L3_spectral:1.4519e-03 L4_spectral:1.4418e-03 L5_spectral:1.4492e-03 L6_spectral:1.4476e-03 L7_spectral:1.4482e-03 L8_spectral:1.4461e-03 L9_spectral:1.4489e-03 L10_spectral:1.4399e-03 L11_spectral:1.4274e-03 L12_spectral:1.4276e-03 train_time:273144ms step_avg:42.68ms +[2025-09-11 13:15:19] [Rank 0] step:6401/10000 train_time:275033ms step_avg:42.97ms +[2025-09-11 13:15:19] [Rank 0] step:6401/10000 train_time:275033ms step_avg:42.97ms +[2025-09-11 13:15:19] [Rank 0] step:6421/10000 train_time:275745ms step_avg:42.94ms +[2025-09-11 13:15:19] [Rank 0] step:6421/10000 train_time:275745ms step_avg:42.94ms +[2025-09-11 13:15:20] [Rank 0] step:6441/10000 train_time:276436ms step_avg:42.92ms +[2025-09-11 13:15:20] [Rank 0] step:6441/10000 train_time:276436ms step_avg:42.92ms +[2025-09-11 13:15:21] [Rank 0] step:6461/10000 train_time:277128ms step_avg:42.89ms +[2025-09-11 13:15:21] [Rank 0] step:6461/10000 train_time:277128ms step_avg:42.89ms +[2025-09-11 13:15:21] [Rank 0] step:6481/10000 train_time:277821ms step_avg:42.87ms +[2025-09-11 13:15:21] [Rank 0] step:6481/10000 train_time:277821ms step_avg:42.87ms +[2025-09-11 13:15:22] [Rank 0] step:6501/10000 train_time:278514ms step_avg:42.84ms +[2025-09-11 13:15:22] [Rank 0] step:6501/10000 train_time:278514ms step_avg:42.84ms +[2025-09-11 13:15:23] [Rank 0] step:6521/10000 train_time:279485ms step_avg:42.86ms +[2025-09-11 13:15:23] [Rank 0] step:6521/10000 train_time:279485ms step_avg:42.86ms +[2025-09-11 13:15:24] [Rank 0] step:6541/10000 train_time:280175ms step_avg:42.83ms +[2025-09-11 13:15:24] [Rank 0] step:6541/10000 train_time:280175ms step_avg:42.83ms +[2025-09-11 13:15:24] [Rank 0] step:6561/10000 train_time:280866ms step_avg:42.81ms +[2025-09-11 13:15:24] [Rank 0] step:6561/10000 train_time:280866ms step_avg:42.81ms +[2025-09-11 13:15:25] [Rank 0] step:6581/10000 train_time:281707ms step_avg:42.81ms +[2025-09-11 13:15:25] [Rank 0] step:6581/10000 train_time:281707ms step_avg:42.81ms +[2025-09-11 13:15:26] [Rank 0] step:6601/10000 train_time:282507ms step_avg:42.80ms +[2025-09-11 13:15:26] [Rank 0] step:6601/10000 train_time:282507ms step_avg:42.80ms +[2025-09-11 13:15:27] [Rank 0] step:6621/10000 train_time:283197ms step_avg:42.77ms +[2025-09-11 13:15:27] [Rank 0] step:6621/10000 train_time:283197ms step_avg:42.77ms +[2025-09-11 13:15:27] [Rank 0] step:6641/10000 train_time:283888ms step_avg:42.75ms +[2025-09-11 13:15:27] [Rank 0] step:6641/10000 train_time:283888ms step_avg:42.75ms +[2025-09-11 13:15:28] [Rank 0] step:6661/10000 train_time:284580ms step_avg:42.72ms +[2025-09-11 13:15:28] [Rank 0] step:6661/10000 train_time:284580ms step_avg:42.72ms +[2025-09-11 13:15:29] [Rank 0] step:6681/10000 train_time:285278ms step_avg:42.70ms +[2025-09-11 13:15:29] [Rank 0] step:6681/10000 train_time:285278ms step_avg:42.70ms +[2025-09-11 13:15:30] [Rank 0] step:6701/10000 train_time:285975ms step_avg:42.68ms +[2025-09-11 13:15:30] [Rank 0] step:6701/10000 train_time:285975ms step_avg:42.68ms +[2025-09-11 13:15:30] [Rank 0] step:6721/10000 train_time:286673ms step_avg:42.65ms +[2025-09-11 13:15:30] [Rank 0] step:6721/10000 train_time:286673ms step_avg:42.65ms +[2025-09-11 13:15:31] [Rank 0] step:6741/10000 train_time:287371ms step_avg:42.63ms +[2025-09-11 13:15:31] [Rank 0] step:6741/10000 train_time:287371ms step_avg:42.63ms +[2025-09-11 13:15:32] [Rank 0] step:6761/10000 train_time:288067ms step_avg:42.61ms +[2025-09-11 13:15:32] [Rank 0] step:6761/10000 train_time:288067ms step_avg:42.61ms +[2025-09-11 13:15:32] [Rank 0] step:6781/10000 train_time:288766ms step_avg:42.58ms +[2025-09-11 13:15:32] [Rank 0] step:6781/10000 train_time:288766ms step_avg:42.58ms +[2025-09-11 13:15:33] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:15:33] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:15:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:15:44] [Rank 0] PRINT: step:6800/10000 val_loss:5.5352 total_sharp:5.8479e-02 L1_sharp:1.8612e-02 L2_sharp:2.0496e-02 L3_sharp:1.3928e-02 L4_sharp:2.3407e-02 L5_sharp:2.9345e-02 L6_sharp:5.3373e-02 L7_sharp:8.0358e-02 L8_sharp:1.2149e-01 L9_sharp:2.1806e-01 L10_sharp:3.5427e-01 L11_sharp:4.7313e-01 L12_sharp:7.6004e-01 total_fnorm:1.1250e+00 total_l1_linf:1.0880e+03 total_spectral:5.8203e-01 L1_fnorm:8.6914e-02 L2_fnorm:8.7402e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6426e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.5938e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5938e-02 L12_fnorm:8.1543e-02 L1_l1linf:2.0752e-02 L2_l1linf:2.1118e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.1240e-02 L6_l1linf:2.0752e-02 L7_l1linf:2.0386e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:2.0508e-02 L11_l1linf:2.1240e-02 L12_l1linf:1.5747e-02 L1_spectral:1.2975e-03 L2_spectral:1.2988e-03 L3_spectral:1.2977e-03 L4_spectral:1.3007e-03 L5_spectral:1.2969e-03 L6_spectral:1.2924e-03 L7_spectral:1.3104e-03 L8_spectral:1.2975e-03 L9_spectral:1.2986e-03 L10_spectral:1.2877e-03 L11_spectral:1.2671e-03 L12_spectral:1.2418e-03 train_time:289443ms step_avg:42.57ms +[2025-09-11 13:15:44] [Rank 0] PRINT: step:6800/10000 val_loss:5.5352 total_sharp:5.8479e-02 L1_sharp:1.8612e-02 L2_sharp:2.0496e-02 L3_sharp:1.3928e-02 L4_sharp:2.3407e-02 L5_sharp:2.9345e-02 L6_sharp:5.3373e-02 L7_sharp:8.0358e-02 L8_sharp:1.2149e-01 L9_sharp:2.1806e-01 L10_sharp:3.5427e-01 L11_sharp:4.7313e-01 L12_sharp:7.6004e-01 total_fnorm:1.1250e+00 total_l1_linf:1.0880e+03 total_spectral:5.8203e-01 L1_fnorm:8.6914e-02 L2_fnorm:8.7402e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6426e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.5938e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5938e-02 L12_fnorm:8.1543e-02 L1_l1linf:2.0752e-02 L2_l1linf:2.1118e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.1240e-02 L6_l1linf:2.0752e-02 L7_l1linf:2.0386e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:2.0508e-02 L11_l1linf:2.1240e-02 L12_l1linf:1.5747e-02 L1_spectral:1.2975e-03 L2_spectral:1.2988e-03 L3_spectral:1.2977e-03 L4_spectral:1.3007e-03 L5_spectral:1.2969e-03 L6_spectral:1.2924e-03 L7_spectral:1.3104e-03 L8_spectral:1.2975e-03 L9_spectral:1.2986e-03 L10_spectral:1.2877e-03 L11_spectral:1.2671e-03 L12_spectral:1.2418e-03 train_time:289443ms step_avg:42.57ms +[2025-09-11 13:15:46] [Rank 0] step:6801/10000 train_time:291376ms step_avg:42.84ms +[2025-09-11 13:15:46] [Rank 0] step:6801/10000 train_time:291376ms step_avg:42.84ms +[2025-09-11 13:15:47] [Rank 0] step:6821/10000 train_time:292103ms step_avg:42.82ms +[2025-09-11 13:15:47] [Rank 0] step:6821/10000 train_time:292103ms step_avg:42.82ms +[2025-09-11 13:15:48] [Rank 0] step:6841/10000 train_time:292803ms step_avg:42.80ms +[2025-09-11 13:15:48] [Rank 0] step:6841/10000 train_time:292803ms step_avg:42.80ms +[2025-09-11 13:15:48] [Rank 0] step:6861/10000 train_time:293501ms step_avg:42.78ms +[2025-09-11 13:15:48] [Rank 0] step:6861/10000 train_time:293501ms step_avg:42.78ms +[2025-09-11 13:15:49] [Rank 0] step:6881/10000 train_time:294201ms step_avg:42.76ms +[2025-09-11 13:15:49] [Rank 0] step:6881/10000 train_time:294201ms step_avg:42.76ms +[2025-09-11 13:15:50] [Rank 0] step:6901/10000 train_time:294898ms step_avg:42.73ms +[2025-09-11 13:15:50] [Rank 0] step:6901/10000 train_time:294898ms step_avg:42.73ms +[2025-09-11 13:15:50] [Rank 0] step:6921/10000 train_time:295596ms step_avg:42.71ms +[2025-09-11 13:15:50] [Rank 0] step:6921/10000 train_time:295596ms step_avg:42.71ms +[2025-09-11 13:15:51] [Rank 0] step:6941/10000 train_time:296293ms step_avg:42.69ms +[2025-09-11 13:15:51] [Rank 0] step:6941/10000 train_time:296293ms step_avg:42.69ms +[2025-09-11 13:15:52] [Rank 0] step:6961/10000 train_time:296992ms step_avg:42.67ms +[2025-09-11 13:15:52] [Rank 0] step:6961/10000 train_time:296992ms step_avg:42.67ms +[2025-09-11 13:15:52] [Rank 0] step:6981/10000 train_time:297692ms step_avg:42.64ms +[2025-09-11 13:15:52] [Rank 0] step:6981/10000 train_time:297692ms step_avg:42.64ms +[2025-09-11 13:15:53] [Rank 0] step:7001/10000 train_time:298389ms step_avg:42.62ms +[2025-09-11 13:15:53] [Rank 0] step:7001/10000 train_time:298389ms step_avg:42.62ms +[2025-09-11 13:15:54] [Rank 0] step:7021/10000 train_time:299088ms step_avg:42.60ms +[2025-09-11 13:15:54] [Rank 0] step:7021/10000 train_time:299088ms step_avg:42.60ms +[2025-09-11 13:15:55] [Rank 0] step:7041/10000 train_time:299784ms step_avg:42.58ms +[2025-09-11 13:15:55] [Rank 0] step:7041/10000 train_time:299784ms step_avg:42.58ms +[2025-09-11 13:15:55] [Rank 0] step:7061/10000 train_time:300483ms step_avg:42.56ms +[2025-09-11 13:15:55] [Rank 0] step:7061/10000 train_time:300483ms step_avg:42.56ms +[2025-09-11 13:15:56] [Rank 0] step:7081/10000 train_time:301180ms step_avg:42.53ms +[2025-09-11 13:15:56] [Rank 0] step:7081/10000 train_time:301180ms step_avg:42.53ms +[2025-09-11 13:15:57] [Rank 0] step:7101/10000 train_time:301878ms step_avg:42.51ms +[2025-09-11 13:15:57] [Rank 0] step:7101/10000 train_time:301878ms step_avg:42.51ms +[2025-09-11 13:15:57] [Rank 0] step:7121/10000 train_time:302578ms step_avg:42.49ms +[2025-09-11 13:15:57] [Rank 0] step:7121/10000 train_time:302578ms step_avg:42.49ms +[2025-09-11 13:15:58] [Rank 0] step:7141/10000 train_time:303276ms step_avg:42.47ms +[2025-09-11 13:15:58] [Rank 0] step:7141/10000 train_time:303276ms step_avg:42.47ms +[2025-09-11 13:15:59] [Rank 0] step:7161/10000 train_time:303975ms step_avg:42.45ms +[2025-09-11 13:15:59] [Rank 0] step:7161/10000 train_time:303975ms step_avg:42.45ms +[2025-09-11 13:15:59] [Rank 0] step:7181/10000 train_time:304672ms step_avg:42.43ms +[2025-09-11 13:15:59] [Rank 0] step:7181/10000 train_time:304672ms step_avg:42.43ms +[2025-09-11 13:16:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:16:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:16:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:16:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:16:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:16:15] [Rank 0] PRINT: step:7200/10000 val_loss:5.5131 total_sharp:6.8157e-02 L1_sharp:1.2627e-02 L2_sharp:1.4990e-02 L3_sharp:1.6067e-02 L4_sharp:2.4145e-02 L5_sharp:2.7780e-02 L6_sharp:4.4809e-02 L7_sharp:8.0104e-02 L8_sharp:1.3243e-01 L9_sharp:2.2634e-01 L10_sharp:4.0037e-01 L11_sharp:5.2658e-01 L12_sharp:7.7263e-01 total_fnorm:9.2578e-01 total_l1_linf:8.3200e+02 total_spectral:4.6875e-01 L1_fnorm:7.3730e-02 L2_fnorm:7.4707e-02 L3_fnorm:7.4707e-02 L4_fnorm:7.4219e-02 L5_fnorm:7.3730e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3730e-02 L8_fnorm:7.2754e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.2266e-02 L12_fnorm:6.7871e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7578e-02 L4_l1linf:1.7700e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6357e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.2573e-02 L1_spectral:1.1529e-03 L2_spectral:1.1638e-03 L3_spectral:1.1584e-03 L4_spectral:1.1584e-03 L5_spectral:1.1539e-03 L6_spectral:1.1481e-03 L7_spectral:1.1448e-03 L8_spectral:1.1451e-03 L9_spectral:1.1427e-03 L10_spectral:1.1382e-03 L11_spectral:1.1111e-03 L12_spectral:1.0670e-03 train_time:305350ms step_avg:42.41ms +[2025-09-11 13:16:15] [Rank 0] PRINT: step:7200/10000 val_loss:5.5131 total_sharp:6.8157e-02 L1_sharp:1.2627e-02 L2_sharp:1.4990e-02 L3_sharp:1.6067e-02 L4_sharp:2.4145e-02 L5_sharp:2.7780e-02 L6_sharp:4.4809e-02 L7_sharp:8.0104e-02 L8_sharp:1.3243e-01 L9_sharp:2.2634e-01 L10_sharp:4.0037e-01 L11_sharp:5.2658e-01 L12_sharp:7.7263e-01 total_fnorm:9.2578e-01 total_l1_linf:8.3200e+02 total_spectral:4.6875e-01 L1_fnorm:7.3730e-02 L2_fnorm:7.4707e-02 L3_fnorm:7.4707e-02 L4_fnorm:7.4219e-02 L5_fnorm:7.3730e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3730e-02 L8_fnorm:7.2754e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.2266e-02 L12_fnorm:6.7871e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7578e-02 L4_l1linf:1.7700e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6357e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.2573e-02 L1_spectral:1.1529e-03 L2_spectral:1.1638e-03 L3_spectral:1.1584e-03 L4_spectral:1.1584e-03 L5_spectral:1.1539e-03 L6_spectral:1.1481e-03 L7_spectral:1.1448e-03 L8_spectral:1.1451e-03 L9_spectral:1.1427e-03 L10_spectral:1.1382e-03 L11_spectral:1.1111e-03 L12_spectral:1.0670e-03 train_time:305350ms step_avg:42.41ms +[2025-09-11 13:16:17] [Rank 0] step:7201/10000 train_time:307205ms step_avg:42.66ms +[2025-09-11 13:16:17] [Rank 0] step:7201/10000 train_time:307205ms step_avg:42.66ms +[2025-09-11 13:16:18] [Rank 0] step:7221/10000 train_time:307917ms step_avg:42.64ms +[2025-09-11 13:16:18] [Rank 0] step:7221/10000 train_time:307917ms step_avg:42.64ms +[2025-09-11 13:16:18] [Rank 0] step:7241/10000 train_time:308615ms step_avg:42.62ms +[2025-09-11 13:16:18] [Rank 0] step:7241/10000 train_time:308615ms step_avg:42.62ms +[2025-09-11 13:16:19] [Rank 0] step:7261/10000 train_time:309316ms step_avg:42.60ms +[2025-09-11 13:16:19] [Rank 0] step:7261/10000 train_time:309316ms step_avg:42.60ms +[2025-09-11 13:16:20] [Rank 0] step:7281/10000 train_time:310019ms step_avg:42.58ms +[2025-09-11 13:16:20] [Rank 0] step:7281/10000 train_time:310019ms step_avg:42.58ms +[2025-09-11 13:16:21] [Rank 0] step:7301/10000 train_time:310717ms step_avg:42.56ms +[2025-09-11 13:16:21] [Rank 0] step:7301/10000 train_time:310717ms step_avg:42.56ms +[2025-09-11 13:16:21] [Rank 0] step:7321/10000 train_time:311416ms step_avg:42.54ms +[2025-09-11 13:16:21] [Rank 0] step:7321/10000 train_time:311416ms step_avg:42.54ms +[2025-09-11 13:16:22] [Rank 0] step:7341/10000 train_time:312115ms step_avg:42.52ms +[2025-09-11 13:16:22] [Rank 0] step:7341/10000 train_time:312115ms step_avg:42.52ms +[2025-09-11 13:16:23] [Rank 0] step:7361/10000 train_time:312814ms step_avg:42.50ms +[2025-09-11 13:16:23] [Rank 0] step:7361/10000 train_time:312814ms step_avg:42.50ms +[2025-09-11 13:16:23] [Rank 0] step:7381/10000 train_time:313514ms step_avg:42.48ms +[2025-09-11 13:16:23] [Rank 0] step:7381/10000 train_time:313514ms step_avg:42.48ms +[2025-09-11 13:16:24] [Rank 0] step:7401/10000 train_time:314211ms step_avg:42.46ms +[2025-09-11 13:16:24] [Rank 0] step:7401/10000 train_time:314211ms step_avg:42.46ms +[2025-09-11 13:16:25] [Rank 0] step:7421/10000 train_time:314909ms step_avg:42.43ms +[2025-09-11 13:16:25] [Rank 0] step:7421/10000 train_time:314909ms step_avg:42.43ms +[2025-09-11 13:16:25] [Rank 0] step:7441/10000 train_time:315609ms step_avg:42.41ms +[2025-09-11 13:16:25] [Rank 0] step:7441/10000 train_time:315609ms step_avg:42.41ms +[2025-09-11 13:16:26] [Rank 0] step:7461/10000 train_time:316603ms step_avg:42.43ms +[2025-09-11 13:16:26] [Rank 0] step:7461/10000 train_time:316603ms step_avg:42.43ms +[2025-09-11 13:16:27] [Rank 0] step:7481/10000 train_time:317303ms step_avg:42.41ms +[2025-09-11 13:16:27] [Rank 0] step:7481/10000 train_time:317303ms step_avg:42.41ms +[2025-09-11 13:16:28] [Rank 0] step:7501/10000 train_time:318001ms step_avg:42.39ms +[2025-09-11 13:16:28] [Rank 0] step:7501/10000 train_time:318001ms step_avg:42.39ms +[2025-09-11 13:16:29] [Rank 0] step:7521/10000 train_time:318976ms step_avg:42.41ms +[2025-09-11 13:16:29] [Rank 0] step:7521/10000 train_time:318976ms step_avg:42.41ms +[2025-09-11 13:16:29] [Rank 0] step:7541/10000 train_time:319673ms step_avg:42.39ms +[2025-09-11 13:16:29] [Rank 0] step:7541/10000 train_time:319673ms step_avg:42.39ms +[2025-09-11 13:16:30] [Rank 0] step:7561/10000 train_time:320374ms step_avg:42.37ms +[2025-09-11 13:16:30] [Rank 0] step:7561/10000 train_time:320374ms step_avg:42.37ms +[2025-09-11 13:16:31] [Rank 0] step:7581/10000 train_time:321075ms step_avg:42.35ms +[2025-09-11 13:16:31] [Rank 0] step:7581/10000 train_time:321075ms step_avg:42.35ms +[2025-09-11 13:16:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:16:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:16:43] [Rank 0] PRINT: step:7600/10000 val_loss:5.5006 total_sharp:7.4811e-02 L1_sharp:1.5304e-02 L2_sharp:1.7593e-02 L3_sharp:1.4784e-02 L4_sharp:2.5078e-02 L5_sharp:3.0972e-02 L6_sharp:5.5402e-02 L7_sharp:8.7702e-02 L8_sharp:1.4710e-01 L9_sharp:2.3726e-01 L10_sharp:3.6249e-01 L11_sharp:4.4720e-01 L12_sharp:1.0018e+00 total_fnorm:6.8359e-01 total_l1_linf:6.1200e+02 total_spectral:3.5547e-01 L1_fnorm:6.1035e-02 L2_fnorm:6.1523e-02 L3_fnorm:6.1523e-02 L4_fnorm:6.1279e-02 L5_fnorm:6.0791e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0547e-02 L8_fnorm:6.0303e-02 L9_fnorm:6.0547e-02 L10_fnorm:6.0059e-02 L11_fnorm:5.9570e-02 L12_fnorm:5.5420e-02 L1_l1linf:1.3489e-02 L2_l1linf:1.3367e-02 L3_l1linf:1.3916e-02 L4_l1linf:1.3733e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3611e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3123e-02 L9_l1linf:1.3245e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3550e-02 L12_l1linf:9.5215e-03 L1_spectral:1.0067e-03 L2_spectral:1.0053e-03 L3_spectral:1.0043e-03 L4_spectral:1.0014e-03 L5_spectral:9.9250e-04 L6_spectral:9.9649e-04 L7_spectral:9.9352e-04 L8_spectral:9.8367e-04 L9_spectral:9.8388e-04 L10_spectral:9.7248e-04 L11_spectral:9.5693e-04 L12_spectral:8.8546e-04 train_time:321755ms step_avg:42.34ms +[2025-09-11 13:16:43] [Rank 0] PRINT: step:7600/10000 val_loss:5.5006 total_sharp:7.4811e-02 L1_sharp:1.5304e-02 L2_sharp:1.7593e-02 L3_sharp:1.4784e-02 L4_sharp:2.5078e-02 L5_sharp:3.0972e-02 L6_sharp:5.5402e-02 L7_sharp:8.7702e-02 L8_sharp:1.4710e-01 L9_sharp:2.3726e-01 L10_sharp:3.6249e-01 L11_sharp:4.4720e-01 L12_sharp:1.0018e+00 total_fnorm:6.8359e-01 total_l1_linf:6.1200e+02 total_spectral:3.5547e-01 L1_fnorm:6.1035e-02 L2_fnorm:6.1523e-02 L3_fnorm:6.1523e-02 L4_fnorm:6.1279e-02 L5_fnorm:6.0791e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0547e-02 L8_fnorm:6.0303e-02 L9_fnorm:6.0547e-02 L10_fnorm:6.0059e-02 L11_fnorm:5.9570e-02 L12_fnorm:5.5420e-02 L1_l1linf:1.3489e-02 L2_l1linf:1.3367e-02 L3_l1linf:1.3916e-02 L4_l1linf:1.3733e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3611e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3123e-02 L9_l1linf:1.3245e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3550e-02 L12_l1linf:9.5215e-03 L1_spectral:1.0067e-03 L2_spectral:1.0053e-03 L3_spectral:1.0043e-03 L4_spectral:1.0014e-03 L5_spectral:9.9250e-04 L6_spectral:9.9649e-04 L7_spectral:9.9352e-04 L8_spectral:9.8367e-04 L9_spectral:9.8388e-04 L10_spectral:9.7248e-04 L11_spectral:9.5693e-04 L12_spectral:8.8546e-04 train_time:321755ms step_avg:42.34ms +[2025-09-11 13:16:45] [Rank 0] step:7601/10000 train_time:323677ms step_avg:42.58ms +[2025-09-11 13:16:45] [Rank 0] step:7601/10000 train_time:323677ms step_avg:42.58ms +[2025-09-11 13:16:46] [Rank 0] step:7621/10000 train_time:324400ms step_avg:42.57ms +[2025-09-11 13:16:46] [Rank 0] step:7621/10000 train_time:324400ms step_avg:42.57ms +[2025-09-11 13:16:46] [Rank 0] step:7641/10000 train_time:325102ms step_avg:42.55ms +[2025-09-11 13:16:46] [Rank 0] step:7641/10000 train_time:325102ms step_avg:42.55ms +[2025-09-11 13:16:47] [Rank 0] step:7661/10000 train_time:325800ms step_avg:42.53ms +[2025-09-11 13:16:47] [Rank 0] step:7661/10000 train_time:325800ms step_avg:42.53ms +[2025-09-11 13:16:48] [Rank 0] step:7681/10000 train_time:326500ms step_avg:42.51ms +[2025-09-11 13:16:48] [Rank 0] step:7681/10000 train_time:326500ms step_avg:42.51ms +[2025-09-11 13:16:48] [Rank 0] step:7701/10000 train_time:327200ms step_avg:42.49ms +[2025-09-11 13:16:48] [Rank 0] step:7701/10000 train_time:327200ms step_avg:42.49ms +[2025-09-11 13:16:49] [Rank 0] step:7721/10000 train_time:327901ms step_avg:42.47ms +[2025-09-11 13:16:49] [Rank 0] step:7721/10000 train_time:327901ms step_avg:42.47ms +[2025-09-11 13:16:50] [Rank 0] step:7741/10000 train_time:328601ms step_avg:42.45ms +[2025-09-11 13:16:50] [Rank 0] step:7741/10000 train_time:328601ms step_avg:42.45ms +[2025-09-11 13:16:50] [Rank 0] step:7761/10000 train_time:329299ms step_avg:42.43ms +[2025-09-11 13:16:50] [Rank 0] step:7761/10000 train_time:329299ms step_avg:42.43ms +[2025-09-11 13:16:51] [Rank 0] step:7781/10000 train_time:330001ms step_avg:42.41ms +[2025-09-11 13:16:51] [Rank 0] step:7781/10000 train_time:330001ms step_avg:42.41ms +[2025-09-11 13:16:52] [Rank 0] step:7801/10000 train_time:330700ms step_avg:42.39ms +[2025-09-11 13:16:52] [Rank 0] step:7801/10000 train_time:330700ms step_avg:42.39ms +[2025-09-11 13:16:53] [Rank 0] step:7821/10000 train_time:331399ms step_avg:42.37ms +[2025-09-11 13:16:53] [Rank 0] step:7821/10000 train_time:331399ms step_avg:42.37ms +[2025-09-11 13:16:53] [Rank 0] step:7841/10000 train_time:332100ms step_avg:42.35ms +[2025-09-11 13:16:53] [Rank 0] step:7841/10000 train_time:332100ms step_avg:42.35ms +[2025-09-11 13:16:54] [Rank 0] step:7861/10000 train_time:332803ms step_avg:42.34ms +[2025-09-11 13:16:54] [Rank 0] step:7861/10000 train_time:332803ms step_avg:42.34ms +[2025-09-11 13:16:55] [Rank 0] step:7881/10000 train_time:333503ms step_avg:42.32ms +[2025-09-11 13:16:55] [Rank 0] step:7881/10000 train_time:333503ms step_avg:42.32ms +[2025-09-11 13:16:55] [Rank 0] step:7901/10000 train_time:334204ms step_avg:42.30ms +[2025-09-11 13:16:55] [Rank 0] step:7901/10000 train_time:334204ms step_avg:42.30ms +[2025-09-11 13:16:56] [Rank 0] step:7921/10000 train_time:334905ms step_avg:42.28ms +[2025-09-11 13:16:56] [Rank 0] step:7921/10000 train_time:334905ms step_avg:42.28ms +[2025-09-11 13:16:57] [Rank 0] step:7941/10000 train_time:335607ms step_avg:42.26ms +[2025-09-11 13:16:57] [Rank 0] step:7941/10000 train_time:335607ms step_avg:42.26ms +[2025-09-11 13:16:57] [Rank 0] step:7961/10000 train_time:336307ms step_avg:42.24ms +[2025-09-11 13:16:57] [Rank 0] step:7961/10000 train_time:336307ms step_avg:42.24ms +[2025-09-11 13:16:58] [Rank 0] step:7981/10000 train_time:337010ms step_avg:42.23ms +[2025-09-11 13:16:58] [Rank 0] step:7981/10000 train_time:337010ms step_avg:42.23ms +[2025-09-11 13:16:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:16:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:17:11] [Rank 0] PRINT: step:8000/10000 val_loss:5.4899 total_sharp:8.1115e-02 L1_sharp:1.8503e-02 L2_sharp:1.7452e-02 L3_sharp:1.4861e-02 L4_sharp:1.9868e-02 L5_sharp:2.9239e-02 L6_sharp:5.8855e-02 L7_sharp:9.9867e-02 L8_sharp:1.8409e-01 L9_sharp:2.8017e-01 L10_sharp:4.3722e-01 L11_sharp:4.8060e-01 L12_sharp:6.8744e-01 total_fnorm:5.7812e-01 total_l1_linf:4.6600e+02 total_spectral:2.9102e-01 L1_fnorm:4.9316e-02 L2_fnorm:4.9805e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8340e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.0315e-02 L2_l1linf:1.0315e-02 L3_l1linf:1.0315e-02 L4_l1linf:1.0315e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0132e-02 L7_l1linf:9.8877e-03 L8_l1linf:1.0071e-02 L9_l1linf:1.0559e-02 L10_l1linf:1.0254e-02 L11_l1linf:1.0132e-02 L12_l1linf:7.4768e-03 L1_spectral:8.3965e-04 L2_spectral:8.4631e-04 L3_spectral:8.4737e-04 L4_spectral:8.4089e-04 L5_spectral:8.4019e-04 L6_spectral:8.3264e-04 L7_spectral:8.3497e-04 L8_spectral:8.2622e-04 L9_spectral:8.2523e-04 L10_spectral:8.0107e-04 L11_spectral:7.7093e-04 L12_spectral:7.2168e-04 train_time:337689ms step_avg:42.21ms +[2025-09-11 13:17:11] [Rank 0] PRINT: step:8000/10000 val_loss:5.4899 total_sharp:8.1115e-02 L1_sharp:1.8503e-02 L2_sharp:1.7452e-02 L3_sharp:1.4861e-02 L4_sharp:1.9868e-02 L5_sharp:2.9239e-02 L6_sharp:5.8855e-02 L7_sharp:9.9867e-02 L8_sharp:1.8409e-01 L9_sharp:2.8017e-01 L10_sharp:4.3722e-01 L11_sharp:4.8060e-01 L12_sharp:6.8744e-01 total_fnorm:5.7812e-01 total_l1_linf:4.6600e+02 total_spectral:2.9102e-01 L1_fnorm:4.9316e-02 L2_fnorm:4.9805e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8340e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.0315e-02 L2_l1linf:1.0315e-02 L3_l1linf:1.0315e-02 L4_l1linf:1.0315e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0132e-02 L7_l1linf:9.8877e-03 L8_l1linf:1.0071e-02 L9_l1linf:1.0559e-02 L10_l1linf:1.0254e-02 L11_l1linf:1.0132e-02 L12_l1linf:7.4768e-03 L1_spectral:8.3965e-04 L2_spectral:8.4631e-04 L3_spectral:8.4737e-04 L4_spectral:8.4089e-04 L5_spectral:8.4019e-04 L6_spectral:8.3264e-04 L7_spectral:8.3497e-04 L8_spectral:8.2622e-04 L9_spectral:8.2523e-04 L10_spectral:8.0107e-04 L11_spectral:7.7093e-04 L12_spectral:7.2168e-04 train_time:337689ms step_avg:42.21ms +[2025-09-11 13:17:13] [Rank 0] step:8001/10000 train_time:340220ms step_avg:42.52ms +[2025-09-11 13:17:13] [Rank 0] step:8001/10000 train_time:340220ms step_avg:42.52ms +[2025-09-11 13:17:14] [Rank 0] step:8021/10000 train_time:340946ms step_avg:42.51ms +[2025-09-11 13:17:14] [Rank 0] step:8021/10000 train_time:340946ms step_avg:42.51ms +[2025-09-11 13:17:15] [Rank 0] step:8041/10000 train_time:341647ms step_avg:42.49ms +[2025-09-11 13:17:15] [Rank 0] step:8041/10000 train_time:341647ms step_avg:42.49ms +[2025-09-11 13:17:15] [Rank 0] step:8061/10000 train_time:342349ms step_avg:42.47ms +[2025-09-11 13:17:15] [Rank 0] step:8061/10000 train_time:342349ms step_avg:42.47ms +[2025-09-11 13:17:16] [Rank 0] step:8081/10000 train_time:343048ms step_avg:42.45ms +[2025-09-11 13:17:16] [Rank 0] step:8081/10000 train_time:343048ms step_avg:42.45ms +[2025-09-11 13:17:17] [Rank 0] step:8101/10000 train_time:343746ms step_avg:42.43ms +[2025-09-11 13:17:17] [Rank 0] step:8101/10000 train_time:343746ms step_avg:42.43ms +[2025-09-11 13:17:17] [Rank 0] step:8121/10000 train_time:344450ms step_avg:42.41ms +[2025-09-11 13:17:17] [Rank 0] step:8121/10000 train_time:344450ms step_avg:42.41ms +[2025-09-11 13:17:19] [Rank 0] step:8141/10000 train_time:345874ms step_avg:42.49ms +[2025-09-11 13:17:19] [Rank 0] step:8141/10000 train_time:345874ms step_avg:42.49ms +[2025-09-11 13:17:20] [Rank 0] step:8161/10000 train_time:346577ms step_avg:42.47ms +[2025-09-11 13:17:20] [Rank 0] step:8161/10000 train_time:346577ms step_avg:42.47ms +[2025-09-11 13:17:20] [Rank 0] step:8181/10000 train_time:347287ms step_avg:42.45ms +[2025-09-11 13:17:20] [Rank 0] step:8181/10000 train_time:347287ms step_avg:42.45ms +[2025-09-11 13:17:21] [Rank 0] step:8201/10000 train_time:347994ms step_avg:42.43ms +[2025-09-11 13:17:21] [Rank 0] step:8201/10000 train_time:347994ms step_avg:42.43ms +[2025-09-11 13:17:22] [Rank 0] step:8221/10000 train_time:348700ms step_avg:42.42ms +[2025-09-11 13:17:22] [Rank 0] step:8221/10000 train_time:348700ms step_avg:42.42ms +[2025-09-11 13:17:22] [Rank 0] step:8241/10000 train_time:349414ms step_avg:42.40ms +[2025-09-11 13:17:22] [Rank 0] step:8241/10000 train_time:349414ms step_avg:42.40ms +[2025-09-11 13:17:23] [Rank 0] step:8261/10000 train_time:350120ms step_avg:42.38ms +[2025-09-11 13:17:23] [Rank 0] step:8261/10000 train_time:350120ms step_avg:42.38ms +[2025-09-11 13:17:24] [Rank 0] step:8281/10000 train_time:350822ms step_avg:42.36ms +[2025-09-11 13:17:24] [Rank 0] step:8281/10000 train_time:350822ms step_avg:42.36ms +[2025-09-11 13:17:24] [Rank 0] step:8301/10000 train_time:351529ms step_avg:42.35ms +[2025-09-11 13:17:24] [Rank 0] step:8301/10000 train_time:351529ms step_avg:42.35ms +[2025-09-11 13:17:25] [Rank 0] step:8321/10000 train_time:352234ms step_avg:42.33ms +[2025-09-11 13:17:25] [Rank 0] step:8321/10000 train_time:352234ms step_avg:42.33ms +[2025-09-11 13:17:26] [Rank 0] step:8341/10000 train_time:352946ms step_avg:42.31ms +[2025-09-11 13:17:26] [Rank 0] step:8341/10000 train_time:352946ms step_avg:42.31ms +[2025-09-11 13:17:27] [Rank 0] step:8361/10000 train_time:353648ms step_avg:42.30ms +[2025-09-11 13:17:27] [Rank 0] step:8361/10000 train_time:353648ms step_avg:42.30ms +[2025-09-11 13:17:27] [Rank 0] step:8381/10000 train_time:354357ms step_avg:42.28ms +[2025-09-11 13:17:27] [Rank 0] step:8381/10000 train_time:354357ms step_avg:42.28ms +[2025-09-11 13:17:28] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:17:28] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:17:38] [Rank 0] PRINT: step:8400/10000 val_loss:5.4786 total_sharp:5.8638e-02 L1_sharp:1.4403e-02 L2_sharp:1.3339e-02 L3_sharp:1.6092e-02 L4_sharp:1.7760e-02 L5_sharp:2.6989e-02 L6_sharp:5.0667e-02 L7_sharp:7.9158e-02 L8_sharp:1.0518e-01 L9_sharp:1.8609e-01 L10_sharp:2.7258e-01 L11_sharp:3.7388e-01 L12_sharp:6.0112e-01 total_fnorm:4.3555e-01 total_l1_linf:3.1800e+02 total_spectral:2.1973e-01 L1_fnorm:3.8330e-02 L2_fnorm:3.8574e-02 L3_fnorm:3.8574e-02 L4_fnorm:3.8574e-02 L5_fnorm:3.8086e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7842e-02 L8_fnorm:3.7598e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7109e-02 L12_fnorm:3.4180e-02 L1_l1linf:7.3853e-03 L2_l1linf:7.6904e-03 L3_l1linf:7.4463e-03 L4_l1linf:7.4768e-03 L5_l1linf:7.3853e-03 L6_l1linf:7.2937e-03 L7_l1linf:7.2632e-03 L8_l1linf:7.3242e-03 L9_l1linf:7.0801e-03 L10_l1linf:7.3547e-03 L11_l1linf:7.2632e-03 L12_l1linf:5.2490e-03 L1_spectral:6.7709e-04 L2_spectral:6.7692e-04 L3_spectral:6.7531e-04 L4_spectral:6.7411e-04 L5_spectral:6.6871e-04 L6_spectral:6.6209e-04 L7_spectral:6.5782e-04 L8_spectral:6.5583e-04 L9_spectral:6.5545e-04 L10_spectral:6.4866e-04 L11_spectral:6.2201e-04 L12_spectral:5.6465e-04 train_time:355046ms step_avg:42.27ms +[2025-09-11 13:17:38] [Rank 0] PRINT: step:8400/10000 val_loss:5.4786 total_sharp:5.8638e-02 L1_sharp:1.4403e-02 L2_sharp:1.3339e-02 L3_sharp:1.6092e-02 L4_sharp:1.7760e-02 L5_sharp:2.6989e-02 L6_sharp:5.0667e-02 L7_sharp:7.9158e-02 L8_sharp:1.0518e-01 L9_sharp:1.8609e-01 L10_sharp:2.7258e-01 L11_sharp:3.7388e-01 L12_sharp:6.0112e-01 total_fnorm:4.3555e-01 total_l1_linf:3.1800e+02 total_spectral:2.1973e-01 L1_fnorm:3.8330e-02 L2_fnorm:3.8574e-02 L3_fnorm:3.8574e-02 L4_fnorm:3.8574e-02 L5_fnorm:3.8086e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7842e-02 L8_fnorm:3.7598e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7109e-02 L12_fnorm:3.4180e-02 L1_l1linf:7.3853e-03 L2_l1linf:7.6904e-03 L3_l1linf:7.4463e-03 L4_l1linf:7.4768e-03 L5_l1linf:7.3853e-03 L6_l1linf:7.2937e-03 L7_l1linf:7.2632e-03 L8_l1linf:7.3242e-03 L9_l1linf:7.0801e-03 L10_l1linf:7.3547e-03 L11_l1linf:7.2632e-03 L12_l1linf:5.2490e-03 L1_spectral:6.7709e-04 L2_spectral:6.7692e-04 L3_spectral:6.7531e-04 L4_spectral:6.7411e-04 L5_spectral:6.6871e-04 L6_spectral:6.6209e-04 L7_spectral:6.5782e-04 L8_spectral:6.5583e-04 L9_spectral:6.5545e-04 L10_spectral:6.4866e-04 L11_spectral:6.2201e-04 L12_spectral:5.6465e-04 train_time:355046ms step_avg:42.27ms +[2025-09-11 13:17:40] [Rank 0] step:8401/10000 train_time:356968ms step_avg:42.49ms +[2025-09-11 13:17:40] [Rank 0] step:8401/10000 train_time:356968ms step_avg:42.49ms +[2025-09-11 13:17:41] [Rank 0] step:8421/10000 train_time:357697ms step_avg:42.48ms +[2025-09-11 13:17:41] [Rank 0] step:8421/10000 train_time:357697ms step_avg:42.48ms +[2025-09-11 13:17:42] [Rank 0] step:8441/10000 train_time:358405ms step_avg:42.46ms +[2025-09-11 13:17:42] [Rank 0] step:8441/10000 train_time:358405ms step_avg:42.46ms +[2025-09-11 13:17:42] [Rank 0] step:8461/10000 train_time:359114ms step_avg:42.44ms +[2025-09-11 13:17:42] [Rank 0] step:8461/10000 train_time:359114ms step_avg:42.44ms +[2025-09-11 13:17:43] [Rank 0] step:8481/10000 train_time:359822ms step_avg:42.43ms +[2025-09-11 13:17:43] [Rank 0] step:8481/10000 train_time:359822ms step_avg:42.43ms +[2025-09-11 13:17:44] [Rank 0] step:8501/10000 train_time:360529ms step_avg:42.41ms +[2025-09-11 13:17:44] [Rank 0] step:8501/10000 train_time:360529ms step_avg:42.41ms +[2025-09-11 13:17:45] [Rank 0] step:8521/10000 train_time:361235ms step_avg:42.39ms +[2025-09-11 13:17:45] [Rank 0] step:8521/10000 train_time:361235ms step_avg:42.39ms +[2025-09-11 13:17:45] [Rank 0] step:8541/10000 train_time:361943ms step_avg:42.38ms +[2025-09-11 13:17:45] [Rank 0] step:8541/10000 train_time:361943ms step_avg:42.38ms +[2025-09-11 13:17:46] [Rank 0] step:8561/10000 train_time:362654ms step_avg:42.36ms +[2025-09-11 13:17:46] [Rank 0] step:8561/10000 train_time:362654ms step_avg:42.36ms +[2025-09-11 13:17:47] [Rank 0] step:8581/10000 train_time:363365ms step_avg:42.35ms +[2025-09-11 13:17:47] [Rank 0] step:8581/10000 train_time:363365ms step_avg:42.35ms +[2025-09-11 13:17:47] [Rank 0] step:8601/10000 train_time:364073ms step_avg:42.33ms +[2025-09-11 13:17:47] [Rank 0] step:8601/10000 train_time:364073ms step_avg:42.33ms +[2025-09-11 13:17:48] [Rank 0] step:8621/10000 train_time:364778ms step_avg:42.31ms +[2025-09-11 13:17:48] [Rank 0] step:8621/10000 train_time:364778ms step_avg:42.31ms +[2025-09-11 13:17:49] [Rank 0] step:8641/10000 train_time:365485ms step_avg:42.30ms +[2025-09-11 13:17:49] [Rank 0] step:8641/10000 train_time:365485ms step_avg:42.30ms +[2025-09-11 13:17:49] [Rank 0] step:8661/10000 train_time:366192ms step_avg:42.28ms +[2025-09-11 13:17:49] [Rank 0] step:8661/10000 train_time:366192ms step_avg:42.28ms +[2025-09-11 13:17:50] [Rank 0] step:8681/10000 train_time:366901ms step_avg:42.26ms +[2025-09-11 13:17:50] [Rank 0] step:8681/10000 train_time:366901ms step_avg:42.26ms +[2025-09-11 13:17:51] [Rank 0] step:8701/10000 train_time:367606ms step_avg:42.25ms +[2025-09-11 13:17:51] [Rank 0] step:8701/10000 train_time:367606ms step_avg:42.25ms +[2025-09-11 13:17:52] [Rank 0] step:8721/10000 train_time:368314ms step_avg:42.23ms +[2025-09-11 13:17:52] [Rank 0] step:8721/10000 train_time:368314ms step_avg:42.23ms +[2025-09-11 13:17:52] [Rank 0] step:8741/10000 train_time:369017ms step_avg:42.22ms +[2025-09-11 13:17:52] [Rank 0] step:8741/10000 train_time:369017ms step_avg:42.22ms +[2025-09-11 13:17:53] [Rank 0] step:8761/10000 train_time:369726ms step_avg:42.20ms +[2025-09-11 13:17:53] [Rank 0] step:8761/10000 train_time:369726ms step_avg:42.20ms +[2025-09-11 13:17:54] [Rank 0] step:8781/10000 train_time:370430ms step_avg:42.19ms +[2025-09-11 13:17:54] [Rank 0] step:8781/10000 train_time:370430ms step_avg:42.19ms +[2025-09-11 13:17:54] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:17:54] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:18:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:05] [Rank 0] PRINT: step:8800/10000 val_loss:5.4704 total_sharp:4.7167e-02 L1_sharp:1.4873e-02 L2_sharp:1.1491e-02 L3_sharp:1.1451e-02 L4_sharp:1.6332e-02 L5_sharp:1.8742e-02 L6_sharp:3.2245e-02 L7_sharp:5.5355e-02 L8_sharp:6.6118e-02 L9_sharp:1.7375e-01 L10_sharp:2.2002e-01 L11_sharp:3.0536e-01 L12_sharp:6.1171e-01 total_fnorm:3.1055e-01 total_l1_linf:2.0300e+02 total_spectral:1.5625e-01 L1_fnorm:2.7344e-02 L2_fnorm:2.7710e-02 L3_fnorm:2.7588e-02 L4_fnorm:2.7588e-02 L5_fnorm:2.7344e-02 L6_fnorm:2.7100e-02 L7_fnorm:2.7100e-02 L8_fnorm:2.6733e-02 L9_fnorm:2.6978e-02 L10_fnorm:2.6611e-02 L11_fnorm:2.6367e-02 L12_fnorm:2.4292e-02 L1_l1linf:4.7302e-03 L2_l1linf:4.9133e-03 L3_l1linf:4.8828e-03 L4_l1linf:4.9438e-03 L5_l1linf:4.7913e-03 L6_l1linf:4.8523e-03 L7_l1linf:4.9133e-03 L8_l1linf:4.6692e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.6387e-03 L11_l1linf:4.7302e-03 L12_l1linf:3.4943e-03 L1_spectral:4.9971e-04 L2_spectral:5.0636e-04 L3_spectral:5.0301e-04 L4_spectral:5.0116e-04 L5_spectral:4.9434e-04 L6_spectral:4.8882e-04 L7_spectral:4.8380e-04 L8_spectral:4.8464e-04 L9_spectral:4.8262e-04 L10_spectral:4.7036e-04 L11_spectral:4.5143e-04 L12_spectral:4.1424e-04 train_time:371114ms step_avg:42.17ms +[2025-09-11 13:18:05] [Rank 0] PRINT: step:8800/10000 val_loss:5.4704 total_sharp:4.7167e-02 L1_sharp:1.4873e-02 L2_sharp:1.1491e-02 L3_sharp:1.1451e-02 L4_sharp:1.6332e-02 L5_sharp:1.8742e-02 L6_sharp:3.2245e-02 L7_sharp:5.5355e-02 L8_sharp:6.6118e-02 L9_sharp:1.7375e-01 L10_sharp:2.2002e-01 L11_sharp:3.0536e-01 L12_sharp:6.1171e-01 total_fnorm:3.1055e-01 total_l1_linf:2.0300e+02 total_spectral:1.5625e-01 L1_fnorm:2.7344e-02 L2_fnorm:2.7710e-02 L3_fnorm:2.7588e-02 L4_fnorm:2.7588e-02 L5_fnorm:2.7344e-02 L6_fnorm:2.7100e-02 L7_fnorm:2.7100e-02 L8_fnorm:2.6733e-02 L9_fnorm:2.6978e-02 L10_fnorm:2.6611e-02 L11_fnorm:2.6367e-02 L12_fnorm:2.4292e-02 L1_l1linf:4.7302e-03 L2_l1linf:4.9133e-03 L3_l1linf:4.8828e-03 L4_l1linf:4.9438e-03 L5_l1linf:4.7913e-03 L6_l1linf:4.8523e-03 L7_l1linf:4.9133e-03 L8_l1linf:4.6692e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.6387e-03 L11_l1linf:4.7302e-03 L12_l1linf:3.4943e-03 L1_spectral:4.9971e-04 L2_spectral:5.0636e-04 L3_spectral:5.0301e-04 L4_spectral:5.0116e-04 L5_spectral:4.9434e-04 L6_spectral:4.8882e-04 L7_spectral:4.8380e-04 L8_spectral:4.8464e-04 L9_spectral:4.8262e-04 L10_spectral:4.7036e-04 L11_spectral:4.5143e-04 L12_spectral:4.1424e-04 train_time:371114ms step_avg:42.17ms +[2025-09-11 13:18:07] [Rank 0] step:8801/10000 train_time:373193ms step_avg:42.40ms +[2025-09-11 13:18:07] [Rank 0] step:8801/10000 train_time:373193ms step_avg:42.40ms +[2025-09-11 13:18:08] [Rank 0] step:8821/10000 train_time:373924ms step_avg:42.39ms +[2025-09-11 13:18:08] [Rank 0] step:8821/10000 train_time:373924ms step_avg:42.39ms +[2025-09-11 13:18:08] [Rank 0] step:8841/10000 train_time:374632ms step_avg:42.37ms +[2025-09-11 13:18:08] [Rank 0] step:8841/10000 train_time:374632ms step_avg:42.37ms +[2025-09-11 13:18:09] [Rank 0] step:8861/10000 train_time:375339ms step_avg:42.36ms +[2025-09-11 13:18:09] [Rank 0] step:8861/10000 train_time:375339ms step_avg:42.36ms +[2025-09-11 13:18:10] [Rank 0] step:8881/10000 train_time:376047ms step_avg:42.34ms +[2025-09-11 13:18:10] [Rank 0] step:8881/10000 train_time:376047ms step_avg:42.34ms +[2025-09-11 13:18:11] [Rank 0] step:8901/10000 train_time:376757ms step_avg:42.33ms +[2025-09-11 13:18:11] [Rank 0] step:8901/10000 train_time:376757ms step_avg:42.33ms +[2025-09-11 13:18:11] [Rank 0] step:8921/10000 train_time:377461ms step_avg:42.31ms +[2025-09-11 13:18:11] [Rank 0] step:8921/10000 train_time:377461ms step_avg:42.31ms +[2025-09-11 13:18:12] [Rank 0] step:8941/10000 train_time:378174ms step_avg:42.30ms +[2025-09-11 13:18:12] [Rank 0] step:8941/10000 train_time:378174ms step_avg:42.30ms +[2025-09-11 13:18:13] [Rank 0] step:8961/10000 train_time:378889ms step_avg:42.28ms +[2025-09-11 13:18:13] [Rank 0] step:8961/10000 train_time:378889ms step_avg:42.28ms +[2025-09-11 13:18:13] [Rank 0] step:8981/10000 train_time:379601ms step_avg:42.27ms +[2025-09-11 13:18:13] [Rank 0] step:8981/10000 train_time:379601ms step_avg:42.27ms +[2025-09-11 13:18:14] [Rank 0] step:9001/10000 train_time:380304ms step_avg:42.25ms +[2025-09-11 13:18:14] [Rank 0] step:9001/10000 train_time:380304ms step_avg:42.25ms +[2025-09-11 13:18:15] [Rank 0] step:9021/10000 train_time:381011ms step_avg:42.24ms +[2025-09-11 13:18:15] [Rank 0] step:9021/10000 train_time:381011ms step_avg:42.24ms +[2025-09-11 13:18:15] [Rank 0] step:9041/10000 train_time:381720ms step_avg:42.22ms +[2025-09-11 13:18:15] [Rank 0] step:9041/10000 train_time:381720ms step_avg:42.22ms +[2025-09-11 13:18:16] [Rank 0] step:9061/10000 train_time:382427ms step_avg:42.21ms +[2025-09-11 13:18:16] [Rank 0] step:9061/10000 train_time:382427ms step_avg:42.21ms +[2025-09-11 13:18:17] [Rank 0] step:9081/10000 train_time:383137ms step_avg:42.19ms +[2025-09-11 13:18:17] [Rank 0] step:9081/10000 train_time:383137ms step_avg:42.19ms +[2025-09-11 13:18:18] [Rank 0] step:9101/10000 train_time:383849ms step_avg:42.18ms +[2025-09-11 13:18:18] [Rank 0] step:9101/10000 train_time:383849ms step_avg:42.18ms +[2025-09-11 13:18:18] [Rank 0] step:9121/10000 train_time:384561ms step_avg:42.16ms +[2025-09-11 13:18:18] [Rank 0] step:9121/10000 train_time:384561ms step_avg:42.16ms +[2025-09-11 13:18:19] [Rank 0] step:9141/10000 train_time:385266ms step_avg:42.15ms +[2025-09-11 13:18:19] [Rank 0] step:9141/10000 train_time:385266ms step_avg:42.15ms +[2025-09-11 13:18:20] [Rank 0] step:9161/10000 train_time:385977ms step_avg:42.13ms +[2025-09-11 13:18:20] [Rank 0] step:9161/10000 train_time:385977ms step_avg:42.13ms +[2025-09-11 13:18:20] [Rank 0] step:9181/10000 train_time:386687ms step_avg:42.12ms +[2025-09-11 13:18:20] [Rank 0] step:9181/10000 train_time:386687ms step_avg:42.12ms +[2025-09-11 13:18:21] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:18:21] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:32] [Rank 0] PRINT: step:9200/10000 val_loss:5.4648 total_sharp:5.9999e-02 L1_sharp:8.3353e-03 L2_sharp:6.1135e-03 L3_sharp:8.9626e-03 L4_sharp:1.0090e-02 L5_sharp:1.6186e-02 L6_sharp:4.3883e-02 L7_sharp:6.7189e-02 L8_sharp:9.1894e-02 L9_sharp:1.6942e-01 L10_sharp:2.3084e-01 L11_sharp:3.1087e-01 L12_sharp:7.5755e-01 total_fnorm:1.9141e-01 total_l1_linf:1.1300e+02 total_spectral:9.7168e-02 L1_fnorm:1.8188e-02 L2_fnorm:1.8311e-02 L3_fnorm:1.8311e-02 L4_fnorm:1.8188e-02 L5_fnorm:1.8066e-02 L6_fnorm:1.7944e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7456e-02 L12_fnorm:1.5869e-02 L1_l1linf:2.7313e-03 L2_l1linf:2.8381e-03 L3_l1linf:2.8839e-03 L4_l1linf:2.8687e-03 L5_l1linf:2.8687e-03 L6_l1linf:2.8381e-03 L7_l1linf:2.7466e-03 L8_l1linf:2.6703e-03 L9_l1linf:2.7313e-03 L10_l1linf:2.8229e-03 L11_l1linf:2.9449e-03 L12_l1linf:2.0294e-03 L1_spectral:3.4709e-04 L2_spectral:3.4464e-04 L3_spectral:3.4347e-04 L4_spectral:3.4041e-04 L5_spectral:3.3770e-04 L6_spectral:3.3038e-04 L7_spectral:3.2809e-04 L8_spectral:3.2769e-04 L9_spectral:3.3020e-04 L10_spectral:3.1801e-04 L11_spectral:3.0118e-04 L12_spectral:2.7625e-04 train_time:387379ms step_avg:42.11ms +[2025-09-11 13:18:32] [Rank 0] PRINT: step:9200/10000 val_loss:5.4648 total_sharp:5.9999e-02 L1_sharp:8.3353e-03 L2_sharp:6.1135e-03 L3_sharp:8.9626e-03 L4_sharp:1.0090e-02 L5_sharp:1.6186e-02 L6_sharp:4.3883e-02 L7_sharp:6.7189e-02 L8_sharp:9.1894e-02 L9_sharp:1.6942e-01 L10_sharp:2.3084e-01 L11_sharp:3.1087e-01 L12_sharp:7.5755e-01 total_fnorm:1.9141e-01 total_l1_linf:1.1300e+02 total_spectral:9.7168e-02 L1_fnorm:1.8188e-02 L2_fnorm:1.8311e-02 L3_fnorm:1.8311e-02 L4_fnorm:1.8188e-02 L5_fnorm:1.8066e-02 L6_fnorm:1.7944e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7456e-02 L12_fnorm:1.5869e-02 L1_l1linf:2.7313e-03 L2_l1linf:2.8381e-03 L3_l1linf:2.8839e-03 L4_l1linf:2.8687e-03 L5_l1linf:2.8687e-03 L6_l1linf:2.8381e-03 L7_l1linf:2.7466e-03 L8_l1linf:2.6703e-03 L9_l1linf:2.7313e-03 L10_l1linf:2.8229e-03 L11_l1linf:2.9449e-03 L12_l1linf:2.0294e-03 L1_spectral:3.4709e-04 L2_spectral:3.4464e-04 L3_spectral:3.4347e-04 L4_spectral:3.4041e-04 L5_spectral:3.3770e-04 L6_spectral:3.3038e-04 L7_spectral:3.2809e-04 L8_spectral:3.2769e-04 L9_spectral:3.3020e-04 L10_spectral:3.1801e-04 L11_spectral:3.0118e-04 L12_spectral:2.7625e-04 train_time:387379ms step_avg:42.11ms +[2025-09-11 13:18:34] [Rank 0] step:9201/10000 train_time:389611ms step_avg:42.34ms +[2025-09-11 13:18:34] [Rank 0] step:9201/10000 train_time:389611ms step_avg:42.34ms +[2025-09-11 13:18:35] [Rank 0] step:9221/10000 train_time:390489ms step_avg:42.35ms +[2025-09-11 13:18:35] [Rank 0] step:9221/10000 train_time:390489ms step_avg:42.35ms +[2025-09-11 13:18:36] [Rank 0] step:9241/10000 train_time:391320ms step_avg:42.35ms +[2025-09-11 13:18:36] [Rank 0] step:9241/10000 train_time:391320ms step_avg:42.35ms +[2025-09-11 13:18:36] [Rank 0] step:9261/10000 train_time:392031ms step_avg:42.33ms +[2025-09-11 13:18:36] [Rank 0] step:9261/10000 train_time:392031ms step_avg:42.33ms +[2025-09-11 13:18:37] [Rank 0] step:9281/10000 train_time:392740ms step_avg:42.32ms +[2025-09-11 13:18:37] [Rank 0] step:9281/10000 train_time:392740ms step_avg:42.32ms +[2025-09-11 13:18:38] [Rank 0] step:9301/10000 train_time:393445ms step_avg:42.30ms +[2025-09-11 13:18:38] [Rank 0] step:9301/10000 train_time:393445ms step_avg:42.30ms +[2025-09-11 13:18:39] [Rank 0] step:9321/10000 train_time:394154ms step_avg:42.29ms +[2025-09-11 13:18:39] [Rank 0] step:9321/10000 train_time:394154ms step_avg:42.29ms +[2025-09-11 13:18:39] [Rank 0] step:9341/10000 train_time:394860ms step_avg:42.27ms +[2025-09-11 13:18:39] [Rank 0] step:9341/10000 train_time:394860ms step_avg:42.27ms +[2025-09-11 13:18:40] [Rank 0] step:9361/10000 train_time:395564ms step_avg:42.26ms +[2025-09-11 13:18:40] [Rank 0] step:9361/10000 train_time:395564ms step_avg:42.26ms +[2025-09-11 13:18:41] [Rank 0] step:9381/10000 train_time:396271ms step_avg:42.24ms +[2025-09-11 13:18:41] [Rank 0] step:9381/10000 train_time:396271ms step_avg:42.24ms +[2025-09-11 13:18:41] [Rank 0] step:9401/10000 train_time:396982ms step_avg:42.23ms +[2025-09-11 13:18:41] [Rank 0] step:9401/10000 train_time:396982ms step_avg:42.23ms +[2025-09-11 13:18:42] [Rank 0] step:9421/10000 train_time:397691ms step_avg:42.21ms +[2025-09-11 13:18:42] [Rank 0] step:9421/10000 train_time:397691ms step_avg:42.21ms +[2025-09-11 13:18:43] [Rank 0] step:9441/10000 train_time:398402ms step_avg:42.20ms +[2025-09-11 13:18:43] [Rank 0] step:9441/10000 train_time:398402ms step_avg:42.20ms +[2025-09-11 13:18:44] [Rank 0] step:9461/10000 train_time:399110ms step_avg:42.18ms +[2025-09-11 13:18:44] [Rank 0] step:9461/10000 train_time:399110ms step_avg:42.18ms +[2025-09-11 13:18:44] [Rank 0] step:9481/10000 train_time:399819ms step_avg:42.17ms +[2025-09-11 13:18:44] [Rank 0] step:9481/10000 train_time:399819ms step_avg:42.17ms +[2025-09-11 13:18:45] [Rank 0] step:9501/10000 train_time:400529ms step_avg:42.16ms +[2025-09-11 13:18:45] [Rank 0] step:9501/10000 train_time:400529ms step_avg:42.16ms +[2025-09-11 13:18:46] [Rank 0] step:9521/10000 train_time:401241ms step_avg:42.14ms +[2025-09-11 13:18:46] [Rank 0] step:9521/10000 train_time:401241ms step_avg:42.14ms +[2025-09-11 13:18:46] [Rank 0] step:9541/10000 train_time:401948ms step_avg:42.13ms +[2025-09-11 13:18:46] [Rank 0] step:9541/10000 train_time:401948ms step_avg:42.13ms +[2025-09-11 13:18:47] [Rank 0] step:9561/10000 train_time:402656ms step_avg:42.11ms +[2025-09-11 13:18:47] [Rank 0] step:9561/10000 train_time:402656ms step_avg:42.11ms +[2025-09-11 13:18:48] [Rank 0] step:9581/10000 train_time:403365ms step_avg:42.10ms +[2025-09-11 13:18:48] [Rank 0] step:9581/10000 train_time:403365ms step_avg:42.10ms +[2025-09-11 13:18:48] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:18:48] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:18:59] [Rank 0] PRINT: step:9600/10000 val_loss:5.4606 total_sharp:2.6365e-02 L1_sharp:5.4222e-03 L2_sharp:6.6772e-03 L3_sharp:6.2657e-03 L4_sharp:6.7550e-03 L5_sharp:1.2709e-02 L6_sharp:1.6485e-02 L7_sharp:3.3414e-02 L8_sharp:6.0302e-02 L9_sharp:1.0848e-01 L10_sharp:1.6120e-01 L11_sharp:1.9720e-01 L12_sharp:4.5734e-01 total_fnorm:1.1084e-01 total_l1_linf:5.3500e+01 total_spectral:5.7373e-02 L1_fnorm:1.0071e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0193e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0010e-02 L6_fnorm:1.0010e-02 L7_fnorm:1.0010e-02 L8_fnorm:1.0010e-02 L9_fnorm:1.0071e-02 L10_fnorm:9.9487e-03 L11_fnorm:9.7046e-03 L12_fnorm:8.8501e-03 L1_l1linf:1.3733e-03 L2_l1linf:1.3504e-03 L3_l1linf:1.2970e-03 L4_l1linf:1.3428e-03 L5_l1linf:1.3351e-03 L6_l1linf:1.2894e-03 L7_l1linf:1.3351e-03 L8_l1linf:1.3275e-03 L9_l1linf:1.3580e-03 L10_l1linf:1.4877e-03 L11_l1linf:1.4343e-03 L12_l1linf:9.4604e-04 L1_spectral:1.9629e-04 L2_spectral:1.9814e-04 L3_spectral:1.9869e-04 L4_spectral:1.9514e-04 L5_spectral:1.9253e-04 L6_spectral:1.8864e-04 L7_spectral:1.9026e-04 L8_spectral:1.8691e-04 L9_spectral:1.8604e-04 L10_spectral:1.8066e-04 L11_spectral:1.7027e-04 L12_spectral:1.5591e-04 train_time:404051ms step_avg:42.09ms +[2025-09-11 13:18:59] [Rank 0] PRINT: step:9600/10000 val_loss:5.4606 total_sharp:2.6365e-02 L1_sharp:5.4222e-03 L2_sharp:6.6772e-03 L3_sharp:6.2657e-03 L4_sharp:6.7550e-03 L5_sharp:1.2709e-02 L6_sharp:1.6485e-02 L7_sharp:3.3414e-02 L8_sharp:6.0302e-02 L9_sharp:1.0848e-01 L10_sharp:1.6120e-01 L11_sharp:1.9720e-01 L12_sharp:4.5734e-01 total_fnorm:1.1084e-01 total_l1_linf:5.3500e+01 total_spectral:5.7373e-02 L1_fnorm:1.0071e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0193e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0010e-02 L6_fnorm:1.0010e-02 L7_fnorm:1.0010e-02 L8_fnorm:1.0010e-02 L9_fnorm:1.0071e-02 L10_fnorm:9.9487e-03 L11_fnorm:9.7046e-03 L12_fnorm:8.8501e-03 L1_l1linf:1.3733e-03 L2_l1linf:1.3504e-03 L3_l1linf:1.2970e-03 L4_l1linf:1.3428e-03 L5_l1linf:1.3351e-03 L6_l1linf:1.2894e-03 L7_l1linf:1.3351e-03 L8_l1linf:1.3275e-03 L9_l1linf:1.3580e-03 L10_l1linf:1.4877e-03 L11_l1linf:1.4343e-03 L12_l1linf:9.4604e-04 L1_spectral:1.9629e-04 L2_spectral:1.9814e-04 L3_spectral:1.9869e-04 L4_spectral:1.9514e-04 L5_spectral:1.9253e-04 L6_spectral:1.8864e-04 L7_spectral:1.9026e-04 L8_spectral:1.8691e-04 L9_spectral:1.8604e-04 L10_spectral:1.8066e-04 L11_spectral:1.7027e-04 L12_spectral:1.5591e-04 train_time:404051ms step_avg:42.09ms +[2025-09-11 13:19:02] [Rank 0] step:9601/10000 train_time:406272ms step_avg:42.32ms +[2025-09-11 13:19:02] [Rank 0] step:9601/10000 train_time:406272ms step_avg:42.32ms +[2025-09-11 13:19:02] [Rank 0] step:9621/10000 train_time:406997ms step_avg:42.30ms +[2025-09-11 13:19:02] [Rank 0] step:9621/10000 train_time:406997ms step_avg:42.30ms +[2025-09-11 13:19:03] [Rank 0] step:9641/10000 train_time:407710ms step_avg:42.29ms +[2025-09-11 13:19:03] [Rank 0] step:9641/10000 train_time:407710ms step_avg:42.29ms +[2025-09-11 13:19:04] [Rank 0] step:9661/10000 train_time:408430ms step_avg:42.28ms +[2025-09-11 13:19:04] [Rank 0] step:9661/10000 train_time:408430ms step_avg:42.28ms +[2025-09-11 13:19:05] [Rank 0] step:9681/10000 train_time:409144ms step_avg:42.26ms +[2025-09-11 13:19:05] [Rank 0] step:9681/10000 train_time:409144ms step_avg:42.26ms +[2025-09-11 13:19:05] [Rank 0] step:9701/10000 train_time:409859ms step_avg:42.25ms +[2025-09-11 13:19:05] [Rank 0] step:9701/10000 train_time:409859ms step_avg:42.25ms +[2025-09-11 13:19:06] [Rank 0] step:9721/10000 train_time:410578ms step_avg:42.24ms +[2025-09-11 13:19:06] [Rank 0] step:9721/10000 train_time:410578ms step_avg:42.24ms +[2025-09-11 13:19:07] [Rank 0] step:9741/10000 train_time:411295ms step_avg:42.22ms +[2025-09-11 13:19:07] [Rank 0] step:9741/10000 train_time:411295ms step_avg:42.22ms +[2025-09-11 13:19:07] [Rank 0] step:9761/10000 train_time:412010ms step_avg:42.21ms +[2025-09-11 13:19:07] [Rank 0] step:9761/10000 train_time:412010ms step_avg:42.21ms +[2025-09-11 13:19:08] [Rank 0] step:9781/10000 train_time:412724ms step_avg:42.20ms +[2025-09-11 13:19:08] [Rank 0] step:9781/10000 train_time:412724ms step_avg:42.20ms +[2025-09-11 13:19:09] [Rank 0] step:9801/10000 train_time:413444ms step_avg:42.18ms +[2025-09-11 13:19:09] [Rank 0] step:9801/10000 train_time:413444ms step_avg:42.18ms +[2025-09-11 13:19:10] [Rank 0] step:9821/10000 train_time:414159ms step_avg:42.17ms +[2025-09-11 13:19:10] [Rank 0] step:9821/10000 train_time:414159ms step_avg:42.17ms +[2025-09-11 13:19:10] [Rank 0] step:9841/10000 train_time:414879ms step_avg:42.16ms +[2025-09-11 13:19:10] [Rank 0] step:9841/10000 train_time:414879ms step_avg:42.16ms +[2025-09-11 13:19:11] [Rank 0] step:9861/10000 train_time:415593ms step_avg:42.15ms +[2025-09-11 13:19:11] [Rank 0] step:9861/10000 train_time:415593ms step_avg:42.15ms +[2025-09-11 13:19:12] [Rank 0] step:9881/10000 train_time:416309ms step_avg:42.13ms +[2025-09-11 13:19:12] [Rank 0] step:9881/10000 train_time:416309ms step_avg:42.13ms +[2025-09-11 13:19:12] [Rank 0] step:9901/10000 train_time:417022ms step_avg:42.12ms +[2025-09-11 13:19:12] [Rank 0] step:9901/10000 train_time:417022ms step_avg:42.12ms +[2025-09-11 13:19:13] [Rank 0] step:9921/10000 train_time:417736ms step_avg:42.11ms +[2025-09-11 13:19:13] [Rank 0] step:9921/10000 train_time:417736ms step_avg:42.11ms +[2025-09-11 13:19:14] [Rank 0] step:9941/10000 train_time:418455ms step_avg:42.09ms +[2025-09-11 13:19:14] [Rank 0] step:9941/10000 train_time:418455ms step_avg:42.09ms +[2025-09-11 13:19:15] [Rank 0] step:9961/10000 train_time:419176ms step_avg:42.08ms +[2025-09-11 13:19:15] [Rank 0] step:9961/10000 train_time:419176ms step_avg:42.08ms +[2025-09-11 13:19:15] [Rank 0] step:9981/10000 train_time:419893ms step_avg:42.07ms +[2025-09-11 13:19:15] [Rank 0] step:9981/10000 train_time:419893ms step_avg:42.07ms +[2025-09-11 13:19:16] [Rank 0] step:10000/10000 train_time:420582ms step_avg:42.06ms +[2025-09-11 13:19:16] [Rank 0] step:10000/10000 train_time:420582ms step_avg:42.06ms +[2025-09-11 13:19:16] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:19:16] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:19:27] [Rank 0] PRINT: step:10000/10000 val_loss:5.4590 total_sharp:2.1818e-02 L1_sharp:3.4538e-03 L2_sharp:6.2462e-03 L3_sharp:4.4615e-03 L4_sharp:6.5632e-03 L5_sharp:1.0353e-02 L6_sharp:2.4485e-02 L7_sharp:3.8669e-02 L8_sharp:4.8139e-02 L9_sharp:8.7921e-02 L10_sharp:1.2153e-01 L11_sharp:1.4867e-01 L12_sharp:4.2582e-01 total_fnorm:4.2725e-02 total_l1_linf:1.5188e+01 total_spectral:2.2095e-02 L1_fnorm:3.9673e-03 L2_fnorm:3.9978e-03 L3_fnorm:3.9978e-03 L4_fnorm:4.0283e-03 L5_fnorm:3.9368e-03 L6_fnorm:3.9368e-03 L7_fnorm:3.9368e-03 L8_fnorm:3.8910e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8147e-03 L12_fnorm:3.4332e-03 L1_l1linf:4.1580e-04 L2_l1linf:4.0245e-04 L3_l1linf:4.1580e-04 L4_l1linf:4.5204e-04 L5_l1linf:4.1199e-04 L6_l1linf:3.9291e-04 L7_l1linf:4.4060e-04 L8_l1linf:4.0436e-04 L9_l1linf:4.3106e-04 L10_l1linf:4.0245e-04 L11_l1linf:4.4250e-04 L12_l1linf:2.9945e-04 L1_spectral:8.0315e-05 L2_spectral:7.9968e-05 L3_spectral:7.9070e-05 L4_spectral:7.9481e-05 L5_spectral:7.6969e-05 L6_spectral:7.7424e-05 L7_spectral:7.7123e-05 L8_spectral:7.4887e-05 L9_spectral:7.4062e-05 L10_spectral:7.2101e-05 L11_spectral:6.8660e-05 L12_spectral:6.3359e-05 train_time:420600ms step_avg:42.06ms +[2025-09-11 13:19:27] [Rank 0] PRINT: step:10000/10000 val_loss:5.4590 total_sharp:2.1818e-02 L1_sharp:3.4538e-03 L2_sharp:6.2462e-03 L3_sharp:4.4615e-03 L4_sharp:6.5632e-03 L5_sharp:1.0353e-02 L6_sharp:2.4485e-02 L7_sharp:3.8669e-02 L8_sharp:4.8139e-02 L9_sharp:8.7921e-02 L10_sharp:1.2153e-01 L11_sharp:1.4867e-01 L12_sharp:4.2582e-01 total_fnorm:4.2725e-02 total_l1_linf:1.5188e+01 total_spectral:2.2095e-02 L1_fnorm:3.9673e-03 L2_fnorm:3.9978e-03 L3_fnorm:3.9978e-03 L4_fnorm:4.0283e-03 L5_fnorm:3.9368e-03 L6_fnorm:3.9368e-03 L7_fnorm:3.9368e-03 L8_fnorm:3.8910e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8147e-03 L12_fnorm:3.4332e-03 L1_l1linf:4.1580e-04 L2_l1linf:4.0245e-04 L3_l1linf:4.1580e-04 L4_l1linf:4.5204e-04 L5_l1linf:4.1199e-04 L6_l1linf:3.9291e-04 L7_l1linf:4.4060e-04 L8_l1linf:4.0436e-04 L9_l1linf:4.3106e-04 L10_l1linf:4.0245e-04 L11_l1linf:4.4250e-04 L12_l1linf:2.9945e-04 L1_spectral:8.0315e-05 L2_spectral:7.9968e-05 L3_spectral:7.9070e-05 L4_spectral:7.9481e-05 L5_spectral:7.6969e-05 L6_spectral:7.7424e-05 L7_spectral:7.7123e-05 L8_spectral:7.4887e-05 L9_spectral:7.4062e-05 L10_spectral:7.2101e-05 L11_spectral:6.8660e-05 L12_spectral:6.3359e-05 train_time:420600ms step_avg:42.06ms +[2025-09-11 13:19:27] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:19:27 2025 --- +[2025-09-11 13:19:27] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:19:27 2025 --- +[2025-09-11 13:19:27] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 13:19:27] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..571617f61e4c03d3be2cb927007566bc5dbbb86e --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "24f806e5-3cbe-4bd4-b906-8bdd46616d49", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/training_log_24f806e5-3cbe-4bd4-b906-8bdd46616d49.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/training_log_24f806e5-3cbe-4bd4-b906-8bdd46616d49.txt new file mode 100644 index 0000000000000000000000000000000000000000..c97eeebcd43ad882f50f5066eb9b1f94c59004e4 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45/training_log_24f806e5-3cbe-4bd4-b906-8bdd46616d49.txt @@ -0,0 +1,4264 @@ +[2025-09-11 13:47:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:47:39 2025 --- +[2025-09-11 13:47:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:47:39 2025 --- +[2025-09-11 13:47:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:47:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:47:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:47:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:47:39] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:47:39] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:47:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45 +[2025-09-11 13:47:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.001_seed_45 +[2025-09-11 13:47:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:47:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:47:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:47:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:47:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:47:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:47:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:47:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:47:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:47:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:47:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:47:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:47:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:47:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:47:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:47:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:47:42] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:47:42] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:47:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:47:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:47:48] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:47:48] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:47:48] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:47:48] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:48:24] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:48:24] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:48:24] [Rank 0] PRINT: Starting training... +[2025-09-11 13:48:24] [Rank 0] PRINT: Starting training... +[2025-09-11 13:48:25] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.01ms +[2025-09-11 13:48:25] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.01ms +[2025-09-11 13:48:26] [Rank 0] step:41/10000 train_time:1925ms step_avg:46.95ms +[2025-09-11 13:48:26] [Rank 0] step:41/10000 train_time:1925ms step_avg:46.95ms +[2025-09-11 13:48:27] [Rank 0] step:61/10000 train_time:2655ms step_avg:43.52ms +[2025-09-11 13:48:27] [Rank 0] step:61/10000 train_time:2655ms step_avg:43.52ms +[2025-09-11 13:48:28] [Rank 0] step:81/10000 train_time:3385ms step_avg:41.79ms +[2025-09-11 13:48:28] [Rank 0] step:81/10000 train_time:3385ms step_avg:41.79ms +[2025-09-11 13:48:28] [Rank 0] step:101/10000 train_time:4115ms step_avg:40.74ms +[2025-09-11 13:48:28] [Rank 0] step:101/10000 train_time:4115ms step_avg:40.74ms +[2025-09-11 13:48:29] [Rank 0] step:121/10000 train_time:4845ms step_avg:40.04ms +[2025-09-11 13:48:29] [Rank 0] step:121/10000 train_time:4845ms step_avg:40.04ms +[2025-09-11 13:48:30] [Rank 0] step:141/10000 train_time:5576ms step_avg:39.54ms +[2025-09-11 13:48:30] [Rank 0] step:141/10000 train_time:5576ms step_avg:39.54ms +[2025-09-11 13:48:30] [Rank 0] step:161/10000 train_time:6306ms step_avg:39.17ms +[2025-09-11 13:48:30] [Rank 0] step:161/10000 train_time:6306ms step_avg:39.17ms +[2025-09-11 13:48:31] [Rank 0] step:181/10000 train_time:7036ms step_avg:38.87ms +[2025-09-11 13:48:31] [Rank 0] step:181/10000 train_time:7036ms step_avg:38.87ms +[2025-09-11 13:48:32] [Rank 0] step:201/10000 train_time:7766ms step_avg:38.64ms +[2025-09-11 13:48:32] [Rank 0] step:201/10000 train_time:7766ms step_avg:38.64ms +[2025-09-11 13:48:33] [Rank 0] step:221/10000 train_time:8496ms step_avg:38.44ms +[2025-09-11 13:48:33] [Rank 0] step:221/10000 train_time:8496ms step_avg:38.44ms +[2025-09-11 13:48:33] [Rank 0] step:241/10000 train_time:9226ms step_avg:38.28ms +[2025-09-11 13:48:33] [Rank 0] step:241/10000 train_time:9226ms step_avg:38.28ms +[2025-09-11 13:48:34] [Rank 0] step:261/10000 train_time:9956ms step_avg:38.15ms +[2025-09-11 13:48:34] [Rank 0] step:261/10000 train_time:9956ms step_avg:38.15ms +[2025-09-11 13:48:35] [Rank 0] step:281/10000 train_time:10686ms step_avg:38.03ms +[2025-09-11 13:48:35] [Rank 0] step:281/10000 train_time:10686ms step_avg:38.03ms +[2025-09-11 13:48:36] [Rank 0] step:301/10000 train_time:11416ms step_avg:37.93ms +[2025-09-11 13:48:36] [Rank 0] step:301/10000 train_time:11416ms step_avg:37.93ms +[2025-09-11 13:48:36] [Rank 0] step:321/10000 train_time:12146ms step_avg:37.84ms +[2025-09-11 13:48:36] [Rank 0] step:321/10000 train_time:12146ms step_avg:37.84ms +[2025-09-11 13:48:37] [Rank 0] step:341/10000 train_time:12876ms step_avg:37.76ms +[2025-09-11 13:48:37] [Rank 0] step:341/10000 train_time:12876ms step_avg:37.76ms +[2025-09-11 13:48:38] [Rank 0] step:361/10000 train_time:13605ms step_avg:37.69ms +[2025-09-11 13:48:38] [Rank 0] step:361/10000 train_time:13605ms step_avg:37.69ms +[2025-09-11 13:48:38] [Rank 0] step:381/10000 train_time:14335ms step_avg:37.62ms +[2025-09-11 13:48:38] [Rank 0] step:381/10000 train_time:14335ms step_avg:37.62ms +[2025-09-11 13:48:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:48:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:49:26] [Rank 0] PRINT: step:400/10000 val_loss:7.1753 total_sharp:1.8231e-02 L1_sharp:1.4870e-01 L2_sharp:1.6199e-01 L3_sharp:1.7952e-01 L4_sharp:1.9008e-01 L5_sharp:2.7656e-01 L6_sharp:4.1741e-01 L7_sharp:6.1618e-01 L8_sharp:7.2267e-01 L9_sharp:8.0888e-01 L10_sharp:6.3985e-01 L11_sharp:5.7139e-01 L12_sharp:8.9414e-01 total_fnorm:5.4502e+00 total_l1_linf:1.5296e+04 total_spectral:2.7252e+00 L1_fnorm:1.1910e-01 L2_fnorm:1.1871e-01 L3_fnorm:1.1777e-01 L4_fnorm:1.1689e-01 L5_fnorm:1.1443e-01 L6_fnorm:1.1326e-01 L7_fnorm:1.0692e-01 L8_fnorm:1.0514e-01 L9_fnorm:9.6923e-02 L10_fnorm:8.8190e-02 L11_fnorm:8.3699e-02 L12_fnorm:7.6054e-02 L1_l1linf:4.4739e-02 L2_l1linf:4.4763e-02 L3_l1linf:4.4714e-02 L4_l1linf:4.4549e-02 L5_l1linf:4.4035e-02 L6_l1linf:4.3727e-02 L7_l1linf:4.1609e-02 L8_l1linf:3.7964e-02 L9_l1linf:3.3913e-02 L10_l1linf:3.1797e-02 L11_l1linf:3.0945e-02 L12_l1linf:2.9087e-02 L1_spectral:1.2049e-03 L2_spectral:1.2048e-03 L3_spectral:1.2052e-03 L4_spectral:1.2048e-03 L5_spectral:1.2052e-03 L6_spectral:1.2046e-03 L7_spectral:1.2041e-03 L8_spectral:1.2050e-03 L9_spectral:1.2046e-03 L10_spectral:1.2042e-03 L11_spectral:1.2042e-03 L12_spectral:1.2044e-03 train_time:15044ms step_avg:37.61ms +[2025-09-11 13:49:26] [Rank 0] PRINT: step:400/10000 val_loss:7.1753 total_sharp:1.8231e-02 L1_sharp:1.4870e-01 L2_sharp:1.6199e-01 L3_sharp:1.7952e-01 L4_sharp:1.9008e-01 L5_sharp:2.7656e-01 L6_sharp:4.1741e-01 L7_sharp:6.1618e-01 L8_sharp:7.2267e-01 L9_sharp:8.0888e-01 L10_sharp:6.3985e-01 L11_sharp:5.7139e-01 L12_sharp:8.9414e-01 total_fnorm:5.4502e+00 total_l1_linf:1.5296e+04 total_spectral:2.7252e+00 L1_fnorm:1.1910e-01 L2_fnorm:1.1871e-01 L3_fnorm:1.1777e-01 L4_fnorm:1.1689e-01 L5_fnorm:1.1443e-01 L6_fnorm:1.1326e-01 L7_fnorm:1.0692e-01 L8_fnorm:1.0514e-01 L9_fnorm:9.6923e-02 L10_fnorm:8.8190e-02 L11_fnorm:8.3699e-02 L12_fnorm:7.6054e-02 L1_l1linf:4.4739e-02 L2_l1linf:4.4763e-02 L3_l1linf:4.4714e-02 L4_l1linf:4.4549e-02 L5_l1linf:4.4035e-02 L6_l1linf:4.3727e-02 L7_l1linf:4.1609e-02 L8_l1linf:3.7964e-02 L9_l1linf:3.3913e-02 L10_l1linf:3.1797e-02 L11_l1linf:3.0945e-02 L12_l1linf:2.9087e-02 L1_spectral:1.2049e-03 L2_spectral:1.2048e-03 L3_spectral:1.2052e-03 L4_spectral:1.2048e-03 L5_spectral:1.2052e-03 L6_spectral:1.2046e-03 L7_spectral:1.2041e-03 L8_spectral:1.2050e-03 L9_spectral:1.2046e-03 L10_spectral:1.2042e-03 L11_spectral:1.2042e-03 L12_spectral:1.2044e-03 train_time:15044ms step_avg:37.61ms +[2025-09-11 13:49:56] [Rank 0] step:401/10000 train_time:45346ms step_avg:113.08ms +[2025-09-11 13:49:56] [Rank 0] step:401/10000 train_time:45346ms step_avg:113.08ms +[2025-09-11 13:49:58] [Rank 0] step:421/10000 train_time:47877ms step_avg:113.72ms +[2025-09-11 13:49:58] [Rank 0] step:421/10000 train_time:47877ms step_avg:113.72ms +[2025-09-11 13:49:59] [Rank 0] step:441/10000 train_time:48520ms step_avg:110.02ms +[2025-09-11 13:49:59] [Rank 0] step:441/10000 train_time:48520ms step_avg:110.02ms +[2025-09-11 13:50:00] [Rank 0] step:461/10000 train_time:49163ms step_avg:106.64ms +[2025-09-11 13:50:00] [Rank 0] step:461/10000 train_time:49163ms step_avg:106.64ms +[2025-09-11 13:50:00] [Rank 0] step:481/10000 train_time:49805ms step_avg:103.54ms +[2025-09-11 13:50:00] [Rank 0] step:481/10000 train_time:49805ms step_avg:103.54ms +[2025-09-11 13:50:01] [Rank 0] step:501/10000 train_time:50512ms step_avg:100.82ms +[2025-09-11 13:50:01] [Rank 0] step:501/10000 train_time:50512ms step_avg:100.82ms +[2025-09-11 13:50:02] [Rank 0] step:521/10000 train_time:51250ms step_avg:98.37ms +[2025-09-11 13:50:02] [Rank 0] step:521/10000 train_time:51250ms step_avg:98.37ms +[2025-09-11 13:50:03] [Rank 0] step:541/10000 train_time:51892ms step_avg:95.92ms +[2025-09-11 13:50:03] [Rank 0] step:541/10000 train_time:51892ms step_avg:95.92ms +[2025-09-11 13:50:03] [Rank 0] step:561/10000 train_time:52534ms step_avg:93.64ms +[2025-09-11 13:50:03] [Rank 0] step:561/10000 train_time:52534ms step_avg:93.64ms +[2025-09-11 13:50:04] [Rank 0] step:581/10000 train_time:53177ms step_avg:91.53ms +[2025-09-11 13:50:04] [Rank 0] step:581/10000 train_time:53177ms step_avg:91.53ms +[2025-09-11 13:50:04] [Rank 0] step:601/10000 train_time:53818ms step_avg:89.55ms +[2025-09-11 13:50:04] [Rank 0] step:601/10000 train_time:53818ms step_avg:89.55ms +[2025-09-11 13:50:05] [Rank 0] step:621/10000 train_time:54461ms step_avg:87.70ms +[2025-09-11 13:50:05] [Rank 0] step:621/10000 train_time:54461ms step_avg:87.70ms +[2025-09-11 13:50:06] [Rank 0] step:641/10000 train_time:55102ms step_avg:85.96ms +[2025-09-11 13:50:06] [Rank 0] step:641/10000 train_time:55102ms step_avg:85.96ms +[2025-09-11 13:50:06] [Rank 0] step:661/10000 train_time:55745ms step_avg:84.33ms +[2025-09-11 13:50:06] [Rank 0] step:661/10000 train_time:55745ms step_avg:84.33ms +[2025-09-11 13:50:07] [Rank 0] step:681/10000 train_time:56386ms step_avg:82.80ms +[2025-09-11 13:50:07] [Rank 0] step:681/10000 train_time:56386ms step_avg:82.80ms +[2025-09-11 13:50:08] [Rank 0] step:701/10000 train_time:57029ms step_avg:81.35ms +[2025-09-11 13:50:08] [Rank 0] step:701/10000 train_time:57029ms step_avg:81.35ms +[2025-09-11 13:50:08] [Rank 0] step:721/10000 train_time:57670ms step_avg:79.99ms +[2025-09-11 13:50:08] [Rank 0] step:721/10000 train_time:57670ms step_avg:79.99ms +[2025-09-11 13:50:09] [Rank 0] step:741/10000 train_time:58312ms step_avg:78.69ms +[2025-09-11 13:50:09] [Rank 0] step:741/10000 train_time:58312ms step_avg:78.69ms +[2025-09-11 13:50:10] [Rank 0] step:761/10000 train_time:59109ms step_avg:77.67ms +[2025-09-11 13:50:10] [Rank 0] step:761/10000 train_time:59109ms step_avg:77.67ms +[2025-09-11 13:50:11] [Rank 0] step:781/10000 train_time:59896ms step_avg:76.69ms +[2025-09-11 13:50:11] [Rank 0] step:781/10000 train_time:59896ms step_avg:76.69ms +[2025-09-11 13:50:11] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:50:11] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:50:56] [Rank 0] PRINT: step:800/10000 val_loss:6.7018 total_sharp:1.3032e-01 L1_sharp:1.9047e-01 L2_sharp:2.0029e-01 L3_sharp:2.3494e-01 L4_sharp:2.8129e-01 L5_sharp:3.7666e-01 L6_sharp:4.7205e-01 L7_sharp:8.4170e-01 L8_sharp:1.4618e+00 L9_sharp:1.9838e+00 L10_sharp:2.0441e+00 L11_sharp:2.4852e+00 L12_sharp:3.1382e+00 total_fnorm:2.8750e+00 total_l1_linf:3.5680e+03 total_spectral:1.4453e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1035e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0547e-01 L9_fnorm:1.0498e-01 L10_fnorm:9.5703e-02 L11_fnorm:8.6426e-02 L12_fnorm:6.4453e-02 L1_l1linf:4.0283e-02 L2_l1linf:4.0039e-02 L3_l1linf:3.9795e-02 L4_l1linf:3.9795e-02 L5_l1linf:3.9795e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0283e-02 L8_l1linf:3.9795e-02 L9_l1linf:3.7598e-02 L10_l1linf:3.3447e-02 L11_l1linf:2.6489e-02 L12_l1linf:2.0386e-02 L1_spectral:1.6064e-03 L2_spectral:1.6040e-03 L3_spectral:1.6123e-03 L4_spectral:1.6174e-03 L5_spectral:1.6105e-03 L6_spectral:1.6139e-03 L7_spectral:1.5948e-03 L8_spectral:1.5651e-03 L9_spectral:1.5578e-03 L10_spectral:1.5117e-03 L11_spectral:1.4909e-03 L12_spectral:1.3784e-03 train_time:60525ms step_avg:75.66ms +[2025-09-11 13:50:56] [Rank 0] PRINT: step:800/10000 val_loss:6.7018 total_sharp:1.3032e-01 L1_sharp:1.9047e-01 L2_sharp:2.0029e-01 L3_sharp:2.3494e-01 L4_sharp:2.8129e-01 L5_sharp:3.7666e-01 L6_sharp:4.7205e-01 L7_sharp:8.4170e-01 L8_sharp:1.4618e+00 L9_sharp:1.9838e+00 L10_sharp:2.0441e+00 L11_sharp:2.4852e+00 L12_sharp:3.1382e+00 total_fnorm:2.8750e+00 total_l1_linf:3.5680e+03 total_spectral:1.4453e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1035e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0547e-01 L9_fnorm:1.0498e-01 L10_fnorm:9.5703e-02 L11_fnorm:8.6426e-02 L12_fnorm:6.4453e-02 L1_l1linf:4.0283e-02 L2_l1linf:4.0039e-02 L3_l1linf:3.9795e-02 L4_l1linf:3.9795e-02 L5_l1linf:3.9795e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0283e-02 L8_l1linf:3.9795e-02 L9_l1linf:3.7598e-02 L10_l1linf:3.3447e-02 L11_l1linf:2.6489e-02 L12_l1linf:2.0386e-02 L1_spectral:1.6064e-03 L2_spectral:1.6040e-03 L3_spectral:1.6123e-03 L4_spectral:1.6174e-03 L5_spectral:1.6105e-03 L6_spectral:1.6139e-03 L7_spectral:1.5948e-03 L8_spectral:1.5651e-03 L9_spectral:1.5578e-03 L10_spectral:1.5117e-03 L11_spectral:1.4909e-03 L12_spectral:1.3784e-03 train_time:60525ms step_avg:75.66ms +[2025-09-11 13:50:58] [Rank 0] step:801/10000 train_time:62334ms step_avg:77.82ms +[2025-09-11 13:50:58] [Rank 0] step:801/10000 train_time:62334ms step_avg:77.82ms +[2025-09-11 13:50:58] [Rank 0] step:821/10000 train_time:62985ms step_avg:76.72ms +[2025-09-11 13:50:58] [Rank 0] step:821/10000 train_time:62985ms step_avg:76.72ms +[2025-09-11 13:50:59] [Rank 0] step:841/10000 train_time:63633ms step_avg:75.66ms +[2025-09-11 13:50:59] [Rank 0] step:841/10000 train_time:63633ms step_avg:75.66ms +[2025-09-11 13:50:59] [Rank 0] step:861/10000 train_time:64282ms step_avg:74.66ms +[2025-09-11 13:50:59] [Rank 0] step:861/10000 train_time:64282ms step_avg:74.66ms +[2025-09-11 13:51:00] [Rank 0] step:881/10000 train_time:64928ms step_avg:73.70ms +[2025-09-11 13:51:00] [Rank 0] step:881/10000 train_time:64928ms step_avg:73.70ms +[2025-09-11 13:51:01] [Rank 0] step:901/10000 train_time:65574ms step_avg:72.78ms +[2025-09-11 13:51:01] [Rank 0] step:901/10000 train_time:65574ms step_avg:72.78ms +[2025-09-11 13:51:01] [Rank 0] step:921/10000 train_time:66221ms step_avg:71.90ms +[2025-09-11 13:51:01] [Rank 0] step:921/10000 train_time:66221ms step_avg:71.90ms +[2025-09-11 13:51:02] [Rank 0] step:941/10000 train_time:66869ms step_avg:71.06ms +[2025-09-11 13:51:02] [Rank 0] step:941/10000 train_time:66869ms step_avg:71.06ms +[2025-09-11 13:51:03] [Rank 0] step:961/10000 train_time:67516ms step_avg:70.26ms +[2025-09-11 13:51:03] [Rank 0] step:961/10000 train_time:67516ms step_avg:70.26ms +[2025-09-11 13:51:03] [Rank 0] step:981/10000 train_time:68163ms step_avg:69.48ms +[2025-09-11 13:51:03] [Rank 0] step:981/10000 train_time:68163ms step_avg:69.48ms +[2025-09-11 13:51:04] [Rank 0] step:1001/10000 train_time:68809ms step_avg:68.74ms +[2025-09-11 13:51:04] [Rank 0] step:1001/10000 train_time:68809ms step_avg:68.74ms +[2025-09-11 13:51:05] [Rank 0] step:1021/10000 train_time:69456ms step_avg:68.03ms +[2025-09-11 13:51:05] [Rank 0] step:1021/10000 train_time:69456ms step_avg:68.03ms +[2025-09-11 13:51:05] [Rank 0] step:1041/10000 train_time:70102ms step_avg:67.34ms +[2025-09-11 13:51:05] [Rank 0] step:1041/10000 train_time:70102ms step_avg:67.34ms +[2025-09-11 13:51:06] [Rank 0] step:1061/10000 train_time:70748ms step_avg:66.68ms +[2025-09-11 13:51:06] [Rank 0] step:1061/10000 train_time:70748ms step_avg:66.68ms +[2025-09-11 13:51:07] [Rank 0] step:1081/10000 train_time:71394ms step_avg:66.04ms +[2025-09-11 13:51:07] [Rank 0] step:1081/10000 train_time:71394ms step_avg:66.04ms +[2025-09-11 13:51:07] [Rank 0] step:1101/10000 train_time:72083ms step_avg:65.47ms +[2025-09-11 13:51:07] [Rank 0] step:1101/10000 train_time:72083ms step_avg:65.47ms +[2025-09-11 13:51:08] [Rank 0] step:1121/10000 train_time:72729ms step_avg:64.88ms +[2025-09-11 13:51:08] [Rank 0] step:1121/10000 train_time:72729ms step_avg:64.88ms +[2025-09-11 13:51:09] [Rank 0] step:1141/10000 train_time:73375ms step_avg:64.31ms +[2025-09-11 13:51:09] [Rank 0] step:1141/10000 train_time:73375ms step_avg:64.31ms +[2025-09-11 13:51:09] [Rank 0] step:1161/10000 train_time:74022ms step_avg:63.76ms +[2025-09-11 13:51:09] [Rank 0] step:1161/10000 train_time:74022ms step_avg:63.76ms +[2025-09-11 13:51:10] [Rank 0] step:1181/10000 train_time:74668ms step_avg:63.22ms +[2025-09-11 13:51:10] [Rank 0] step:1181/10000 train_time:74668ms step_avg:63.22ms +[2025-09-11 13:51:10] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:51:10] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:51:21] [Rank 0] PRINT: step:1200/10000 val_loss:6.4575 total_sharp:9.1694e-02 L1_sharp:1.3004e-01 L2_sharp:1.3360e-01 L3_sharp:1.3079e-01 L4_sharp:1.5517e-01 L5_sharp:2.1986e-01 L6_sharp:2.7876e-01 L7_sharp:4.7963e-01 L8_sharp:6.5334e-01 L9_sharp:7.1223e-01 L10_sharp:6.9254e-01 L11_sharp:5.7420e-01 L12_sharp:1.3704e+00 total_fnorm:2.3125e+00 total_l1_linf:2.7360e+03 total_spectral:1.1719e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1279e-01 L11_fnorm:1.1133e-01 L12_fnorm:8.8867e-02 L1_l1linf:3.6621e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.6621e-02 L5_l1linf:3.5889e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6377e-02 L8_l1linf:3.8086e-02 L9_l1linf:3.9307e-02 L10_l1linf:3.9062e-02 L11_l1linf:3.6133e-02 L12_l1linf:2.9175e-02 L1_spectral:1.6027e-03 L2_spectral:1.6299e-03 L3_spectral:1.6069e-03 L4_spectral:1.6061e-03 L5_spectral:1.6027e-03 L6_spectral:1.6227e-03 L7_spectral:1.6170e-03 L8_spectral:1.6054e-03 L9_spectral:1.6048e-03 L10_spectral:1.5673e-03 L11_spectral:1.5907e-03 L12_spectral:1.5078e-03 train_time:75297ms step_avg:62.75ms +[2025-09-11 13:51:21] [Rank 0] PRINT: step:1200/10000 val_loss:6.4575 total_sharp:9.1694e-02 L1_sharp:1.3004e-01 L2_sharp:1.3360e-01 L3_sharp:1.3079e-01 L4_sharp:1.5517e-01 L5_sharp:2.1986e-01 L6_sharp:2.7876e-01 L7_sharp:4.7963e-01 L8_sharp:6.5334e-01 L9_sharp:7.1223e-01 L10_sharp:6.9254e-01 L11_sharp:5.7420e-01 L12_sharp:1.3704e+00 total_fnorm:2.3125e+00 total_l1_linf:2.7360e+03 total_spectral:1.1719e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1279e-01 L11_fnorm:1.1133e-01 L12_fnorm:8.8867e-02 L1_l1linf:3.6621e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.6621e-02 L5_l1linf:3.5889e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6377e-02 L8_l1linf:3.8086e-02 L9_l1linf:3.9307e-02 L10_l1linf:3.9062e-02 L11_l1linf:3.6133e-02 L12_l1linf:2.9175e-02 L1_spectral:1.6027e-03 L2_spectral:1.6299e-03 L3_spectral:1.6069e-03 L4_spectral:1.6061e-03 L5_spectral:1.6027e-03 L6_spectral:1.6227e-03 L7_spectral:1.6170e-03 L8_spectral:1.6054e-03 L9_spectral:1.6048e-03 L10_spectral:1.5673e-03 L11_spectral:1.5907e-03 L12_spectral:1.5078e-03 train_time:75297ms step_avg:62.75ms +[2025-09-11 13:51:23] [Rank 0] step:1201/10000 train_time:77096ms step_avg:64.19ms +[2025-09-11 13:51:23] [Rank 0] step:1201/10000 train_time:77096ms step_avg:64.19ms +[2025-09-11 13:51:24] [Rank 0] step:1221/10000 train_time:77746ms step_avg:63.67ms +[2025-09-11 13:51:24] [Rank 0] step:1221/10000 train_time:77746ms step_avg:63.67ms +[2025-09-11 13:51:25] [Rank 0] step:1241/10000 train_time:78393ms step_avg:63.17ms +[2025-09-11 13:51:25] [Rank 0] step:1241/10000 train_time:78393ms step_avg:63.17ms +[2025-09-11 13:51:25] [Rank 0] step:1261/10000 train_time:79040ms step_avg:62.68ms +[2025-09-11 13:51:25] [Rank 0] step:1261/10000 train_time:79040ms step_avg:62.68ms +[2025-09-11 13:51:26] [Rank 0] step:1281/10000 train_time:79686ms step_avg:62.21ms +[2025-09-11 13:51:26] [Rank 0] step:1281/10000 train_time:79686ms step_avg:62.21ms +[2025-09-11 13:51:26] [Rank 0] step:1301/10000 train_time:80333ms step_avg:61.75ms +[2025-09-11 13:51:26] [Rank 0] step:1301/10000 train_time:80333ms step_avg:61.75ms +[2025-09-11 13:51:27] [Rank 0] step:1321/10000 train_time:80981ms step_avg:61.30ms +[2025-09-11 13:51:27] [Rank 0] step:1321/10000 train_time:80981ms step_avg:61.30ms +[2025-09-11 13:51:28] [Rank 0] step:1341/10000 train_time:81628ms step_avg:60.87ms +[2025-09-11 13:51:28] [Rank 0] step:1341/10000 train_time:81628ms step_avg:60.87ms +[2025-09-11 13:51:28] [Rank 0] step:1361/10000 train_time:82274ms step_avg:60.45ms +[2025-09-11 13:51:28] [Rank 0] step:1361/10000 train_time:82274ms step_avg:60.45ms +[2025-09-11 13:51:29] [Rank 0] step:1381/10000 train_time:82920ms step_avg:60.04ms +[2025-09-11 13:51:29] [Rank 0] step:1381/10000 train_time:82920ms step_avg:60.04ms +[2025-09-11 13:51:30] [Rank 0] step:1401/10000 train_time:83566ms step_avg:59.65ms +[2025-09-11 13:51:30] [Rank 0] step:1401/10000 train_time:83566ms step_avg:59.65ms +[2025-09-11 13:51:30] [Rank 0] step:1421/10000 train_time:84211ms step_avg:59.26ms +[2025-09-11 13:51:30] [Rank 0] step:1421/10000 train_time:84211ms step_avg:59.26ms +[2025-09-11 13:51:31] [Rank 0] step:1441/10000 train_time:84858ms step_avg:58.89ms +[2025-09-11 13:51:31] [Rank 0] step:1441/10000 train_time:84858ms step_avg:58.89ms +[2025-09-11 13:51:32] [Rank 0] step:1461/10000 train_time:85504ms step_avg:58.52ms +[2025-09-11 13:51:32] [Rank 0] step:1461/10000 train_time:85504ms step_avg:58.52ms +[2025-09-11 13:51:32] [Rank 0] step:1481/10000 train_time:86151ms step_avg:58.17ms +[2025-09-11 13:51:32] [Rank 0] step:1481/10000 train_time:86151ms step_avg:58.17ms +[2025-09-11 13:51:33] [Rank 0] step:1501/10000 train_time:86801ms step_avg:57.83ms +[2025-09-11 13:51:33] [Rank 0] step:1501/10000 train_time:86801ms step_avg:57.83ms +[2025-09-11 13:51:34] [Rank 0] step:1521/10000 train_time:87451ms step_avg:57.50ms +[2025-09-11 13:51:34] [Rank 0] step:1521/10000 train_time:87451ms step_avg:57.50ms +[2025-09-11 13:51:34] [Rank 0] step:1541/10000 train_time:88101ms step_avg:57.17ms +[2025-09-11 13:51:34] [Rank 0] step:1541/10000 train_time:88101ms step_avg:57.17ms +[2025-09-11 13:51:35] [Rank 0] step:1561/10000 train_time:88751ms step_avg:56.86ms +[2025-09-11 13:51:35] [Rank 0] step:1561/10000 train_time:88751ms step_avg:56.86ms +[2025-09-11 13:51:36] [Rank 0] step:1581/10000 train_time:89400ms step_avg:56.55ms +[2025-09-11 13:51:36] [Rank 0] step:1581/10000 train_time:89400ms step_avg:56.55ms +[2025-09-11 13:51:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:51:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:51:46] [Rank 0] PRINT: step:1600/10000 val_loss:6.2987 total_sharp:8.8512e-02 L1_sharp:1.0606e-01 L2_sharp:1.0031e-01 L3_sharp:1.0221e-01 L4_sharp:1.2868e-01 L5_sharp:1.6588e-01 L6_sharp:1.7860e-01 L7_sharp:2.5953e-01 L8_sharp:4.1109e-01 L9_sharp:4.2762e-01 L10_sharp:4.8982e-01 L11_sharp:5.8315e-01 L12_sharp:1.3877e+00 total_fnorm:2.0469e+00 total_l1_linf:2.3520e+03 total_spectral:1.0391e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:9.5215e-02 L1_l1linf:3.4912e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.4668e-02 L4_l1linf:3.4424e-02 L5_l1linf:3.4424e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4424e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.6865e-02 L12_l1linf:2.5146e-02 L1_spectral:1.6097e-03 L2_spectral:1.6234e-03 L3_spectral:1.6302e-03 L4_spectral:1.6129e-03 L5_spectral:1.6157e-03 L6_spectral:1.6195e-03 L7_spectral:1.6136e-03 L8_spectral:1.6042e-03 L9_spectral:1.6129e-03 L10_spectral:1.5916e-03 L11_spectral:1.6041e-03 L12_spectral:1.5543e-03 train_time:90032ms step_avg:56.27ms +[2025-09-11 13:51:46] [Rank 0] PRINT: step:1600/10000 val_loss:6.2987 total_sharp:8.8512e-02 L1_sharp:1.0606e-01 L2_sharp:1.0031e-01 L3_sharp:1.0221e-01 L4_sharp:1.2868e-01 L5_sharp:1.6588e-01 L6_sharp:1.7860e-01 L7_sharp:2.5953e-01 L8_sharp:4.1109e-01 L9_sharp:4.2762e-01 L10_sharp:4.8982e-01 L11_sharp:5.8315e-01 L12_sharp:1.3877e+00 total_fnorm:2.0469e+00 total_l1_linf:2.3520e+03 total_spectral:1.0391e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:9.5215e-02 L1_l1linf:3.4912e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.4668e-02 L4_l1linf:3.4424e-02 L5_l1linf:3.4424e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4424e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.6865e-02 L12_l1linf:2.5146e-02 L1_spectral:1.6097e-03 L2_spectral:1.6234e-03 L3_spectral:1.6302e-03 L4_spectral:1.6129e-03 L5_spectral:1.6157e-03 L6_spectral:1.6195e-03 L7_spectral:1.6136e-03 L8_spectral:1.6042e-03 L9_spectral:1.6129e-03 L10_spectral:1.5916e-03 L11_spectral:1.6041e-03 L12_spectral:1.5543e-03 train_time:90032ms step_avg:56.27ms +[2025-09-11 13:51:48] [Rank 0] step:1601/10000 train_time:91504ms step_avg:57.15ms +[2025-09-11 13:51:48] [Rank 0] step:1601/10000 train_time:91504ms step_avg:57.15ms +[2025-09-11 13:51:49] [Rank 0] step:1621/10000 train_time:92149ms step_avg:56.85ms +[2025-09-11 13:51:49] [Rank 0] step:1621/10000 train_time:92149ms step_avg:56.85ms +[2025-09-11 13:51:49] [Rank 0] step:1641/10000 train_time:92800ms step_avg:56.55ms +[2025-09-11 13:51:49] [Rank 0] step:1641/10000 train_time:92800ms step_avg:56.55ms +[2025-09-11 13:51:50] [Rank 0] step:1661/10000 train_time:93451ms step_avg:56.26ms +[2025-09-11 13:51:50] [Rank 0] step:1661/10000 train_time:93451ms step_avg:56.26ms +[2025-09-11 13:51:51] [Rank 0] step:1681/10000 train_time:94101ms step_avg:55.98ms +[2025-09-11 13:51:51] [Rank 0] step:1681/10000 train_time:94101ms step_avg:55.98ms +[2025-09-11 13:51:51] [Rank 0] step:1701/10000 train_time:94751ms step_avg:55.70ms +[2025-09-11 13:51:51] [Rank 0] step:1701/10000 train_time:94751ms step_avg:55.70ms +[2025-09-11 13:51:52] [Rank 0] step:1721/10000 train_time:95400ms step_avg:55.43ms +[2025-09-11 13:51:52] [Rank 0] step:1721/10000 train_time:95400ms step_avg:55.43ms +[2025-09-11 13:51:53] [Rank 0] step:1741/10000 train_time:96050ms step_avg:55.17ms +[2025-09-11 13:51:53] [Rank 0] step:1741/10000 train_time:96050ms step_avg:55.17ms +[2025-09-11 13:51:53] [Rank 0] step:1761/10000 train_time:96699ms step_avg:54.91ms +[2025-09-11 13:51:53] [Rank 0] step:1761/10000 train_time:96699ms step_avg:54.91ms +[2025-09-11 13:51:54] [Rank 0] step:1781/10000 train_time:97350ms step_avg:54.66ms +[2025-09-11 13:51:54] [Rank 0] step:1781/10000 train_time:97350ms step_avg:54.66ms +[2025-09-11 13:51:54] [Rank 0] step:1801/10000 train_time:97999ms step_avg:54.41ms +[2025-09-11 13:51:54] [Rank 0] step:1801/10000 train_time:97999ms step_avg:54.41ms +[2025-09-11 13:51:55] [Rank 0] step:1821/10000 train_time:98650ms step_avg:54.17ms +[2025-09-11 13:51:55] [Rank 0] step:1821/10000 train_time:98650ms step_avg:54.17ms +[2025-09-11 13:51:56] [Rank 0] step:1841/10000 train_time:99299ms step_avg:53.94ms +[2025-09-11 13:51:56] [Rank 0] step:1841/10000 train_time:99299ms step_avg:53.94ms +[2025-09-11 13:51:56] [Rank 0] step:1861/10000 train_time:99949ms step_avg:53.71ms +[2025-09-11 13:51:56] [Rank 0] step:1861/10000 train_time:99949ms step_avg:53.71ms +[2025-09-11 13:51:57] [Rank 0] step:1881/10000 train_time:100599ms step_avg:53.48ms +[2025-09-11 13:51:57] [Rank 0] step:1881/10000 train_time:100599ms step_avg:53.48ms +[2025-09-11 13:51:58] [Rank 0] step:1901/10000 train_time:101248ms step_avg:53.26ms +[2025-09-11 13:51:58] [Rank 0] step:1901/10000 train_time:101248ms step_avg:53.26ms +[2025-09-11 13:51:58] [Rank 0] step:1921/10000 train_time:101898ms step_avg:53.04ms +[2025-09-11 13:51:58] [Rank 0] step:1921/10000 train_time:101898ms step_avg:53.04ms +[2025-09-11 13:51:59] [Rank 0] step:1941/10000 train_time:102548ms step_avg:52.83ms +[2025-09-11 13:51:59] [Rank 0] step:1941/10000 train_time:102548ms step_avg:52.83ms +[2025-09-11 13:52:00] [Rank 0] step:1961/10000 train_time:103198ms step_avg:52.62ms +[2025-09-11 13:52:00] [Rank 0] step:1961/10000 train_time:103198ms step_avg:52.62ms +[2025-09-11 13:52:00] [Rank 0] step:1981/10000 train_time:103848ms step_avg:52.42ms +[2025-09-11 13:52:00] [Rank 0] step:1981/10000 train_time:103848ms step_avg:52.42ms +[2025-09-11 13:52:01] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:52:01] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:52:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:52:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:52:15] [Rank 0] PRINT: step:2000/10000 val_loss:6.1744 total_sharp:1.1167e-01 L1_sharp:7.9485e-02 L2_sharp:6.9300e-02 L3_sharp:6.6251e-02 L4_sharp:8.0623e-02 L5_sharp:1.2573e-01 L6_sharp:1.6323e-01 L7_sharp:3.5278e-01 L8_sharp:4.6816e-01 L9_sharp:5.8439e-01 L10_sharp:7.7268e-01 L11_sharp:7.4531e-01 L12_sharp:2.3413e+00 total_fnorm:1.9844e+00 total_l1_linf:2.2560e+03 total_spectral:1.0078e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1768e-01 L12_fnorm:9.7168e-02 L1_l1linf:3.4180e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.3936e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3447e-02 L7_l1linf:3.3936e-02 L8_l1linf:3.5400e-02 L9_l1linf:3.5889e-02 L10_l1linf:3.7354e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.3315e-02 L1_spectral:1.5969e-03 L2_spectral:1.6191e-03 L3_spectral:1.6208e-03 L4_spectral:1.6300e-03 L5_spectral:1.6149e-03 L6_spectral:1.6201e-03 L7_spectral:1.6128e-03 L8_spectral:1.6152e-03 L9_spectral:1.6210e-03 L10_spectral:1.6033e-03 L11_spectral:1.6165e-03 L12_spectral:1.5738e-03 train_time:104480ms step_avg:52.24ms +[2025-09-11 13:52:15] [Rank 0] PRINT: step:2000/10000 val_loss:6.1744 total_sharp:1.1167e-01 L1_sharp:7.9485e-02 L2_sharp:6.9300e-02 L3_sharp:6.6251e-02 L4_sharp:8.0623e-02 L5_sharp:1.2573e-01 L6_sharp:1.6323e-01 L7_sharp:3.5278e-01 L8_sharp:4.6816e-01 L9_sharp:5.8439e-01 L10_sharp:7.7268e-01 L11_sharp:7.4531e-01 L12_sharp:2.3413e+00 total_fnorm:1.9844e+00 total_l1_linf:2.2560e+03 total_spectral:1.0078e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1768e-01 L12_fnorm:9.7168e-02 L1_l1linf:3.4180e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.3936e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3447e-02 L7_l1linf:3.3936e-02 L8_l1linf:3.5400e-02 L9_l1linf:3.5889e-02 L10_l1linf:3.7354e-02 L11_l1linf:3.6621e-02 L12_l1linf:2.3315e-02 L1_spectral:1.5969e-03 L2_spectral:1.6191e-03 L3_spectral:1.6208e-03 L4_spectral:1.6300e-03 L5_spectral:1.6149e-03 L6_spectral:1.6201e-03 L7_spectral:1.6128e-03 L8_spectral:1.6152e-03 L9_spectral:1.6210e-03 L10_spectral:1.6033e-03 L11_spectral:1.6165e-03 L12_spectral:1.5738e-03 train_time:104480ms step_avg:52.24ms +[2025-09-11 13:52:17] [Rank 0] step:2001/10000 train_time:105881ms step_avg:52.91ms +[2025-09-11 13:52:17] [Rank 0] step:2001/10000 train_time:105881ms step_avg:52.91ms +[2025-09-11 13:52:17] [Rank 0] step:2021/10000 train_time:106633ms step_avg:52.76ms +[2025-09-11 13:52:17] [Rank 0] step:2021/10000 train_time:106633ms step_avg:52.76ms +[2025-09-11 13:52:18] [Rank 0] step:2041/10000 train_time:107283ms step_avg:52.56ms +[2025-09-11 13:52:18] [Rank 0] step:2041/10000 train_time:107283ms step_avg:52.56ms +[2025-09-11 13:52:19] [Rank 0] step:2061/10000 train_time:107934ms step_avg:52.37ms +[2025-09-11 13:52:19] [Rank 0] step:2061/10000 train_time:107934ms step_avg:52.37ms +[2025-09-11 13:52:20] [Rank 0] step:2081/10000 train_time:108857ms step_avg:52.31ms +[2025-09-11 13:52:20] [Rank 0] step:2081/10000 train_time:108857ms step_avg:52.31ms +[2025-09-11 13:52:20] [Rank 0] step:2101/10000 train_time:109507ms step_avg:52.12ms +[2025-09-11 13:52:20] [Rank 0] step:2101/10000 train_time:109507ms step_avg:52.12ms +[2025-09-11 13:52:21] [Rank 0] step:2121/10000 train_time:110158ms step_avg:51.94ms +[2025-09-11 13:52:21] [Rank 0] step:2121/10000 train_time:110158ms step_avg:51.94ms +[2025-09-11 13:52:22] [Rank 0] step:2141/10000 train_time:110807ms step_avg:51.75ms +[2025-09-11 13:52:22] [Rank 0] step:2141/10000 train_time:110807ms step_avg:51.75ms +[2025-09-11 13:52:22] [Rank 0] step:2161/10000 train_time:111456ms step_avg:51.58ms +[2025-09-11 13:52:22] [Rank 0] step:2161/10000 train_time:111456ms step_avg:51.58ms +[2025-09-11 13:52:23] [Rank 0] step:2181/10000 train_time:112106ms step_avg:51.40ms +[2025-09-11 13:52:23] [Rank 0] step:2181/10000 train_time:112106ms step_avg:51.40ms +[2025-09-11 13:52:23] [Rank 0] step:2201/10000 train_time:112755ms step_avg:51.23ms +[2025-09-11 13:52:23] [Rank 0] step:2201/10000 train_time:112755ms step_avg:51.23ms +[2025-09-11 13:52:24] [Rank 0] step:2221/10000 train_time:113403ms step_avg:51.06ms +[2025-09-11 13:52:24] [Rank 0] step:2221/10000 train_time:113403ms step_avg:51.06ms +[2025-09-11 13:52:25] [Rank 0] step:2241/10000 train_time:114065ms step_avg:50.90ms +[2025-09-11 13:52:25] [Rank 0] step:2241/10000 train_time:114065ms step_avg:50.90ms +[2025-09-11 13:52:25] [Rank 0] step:2261/10000 train_time:114727ms step_avg:50.74ms +[2025-09-11 13:52:25] [Rank 0] step:2261/10000 train_time:114727ms step_avg:50.74ms +[2025-09-11 13:52:26] [Rank 0] step:2281/10000 train_time:115390ms step_avg:50.59ms +[2025-09-11 13:52:26] [Rank 0] step:2281/10000 train_time:115390ms step_avg:50.59ms +[2025-09-11 13:52:27] [Rank 0] step:2301/10000 train_time:116053ms step_avg:50.44ms +[2025-09-11 13:52:27] [Rank 0] step:2301/10000 train_time:116053ms step_avg:50.44ms +[2025-09-11 13:52:27] [Rank 0] step:2321/10000 train_time:116716ms step_avg:50.29ms +[2025-09-11 13:52:27] [Rank 0] step:2321/10000 train_time:116716ms step_avg:50.29ms +[2025-09-11 13:52:28] [Rank 0] step:2341/10000 train_time:117378ms step_avg:50.14ms +[2025-09-11 13:52:28] [Rank 0] step:2341/10000 train_time:117378ms step_avg:50.14ms +[2025-09-11 13:52:29] [Rank 0] step:2361/10000 train_time:118040ms step_avg:50.00ms +[2025-09-11 13:52:29] [Rank 0] step:2361/10000 train_time:118040ms step_avg:50.00ms +[2025-09-11 13:52:29] [Rank 0] step:2381/10000 train_time:118702ms step_avg:49.85ms +[2025-09-11 13:52:29] [Rank 0] step:2381/10000 train_time:118702ms step_avg:49.85ms +[2025-09-11 13:52:30] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:52:30] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:52:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:52:40] [Rank 0] PRINT: step:2400/10000 val_loss:6.0697 total_sharp:8.8086e-02 L1_sharp:4.4966e-02 L2_sharp:4.3248e-02 L3_sharp:4.3962e-02 L4_sharp:5.2922e-02 L5_sharp:7.1362e-02 L6_sharp:1.1189e-01 L7_sharp:2.2168e-01 L8_sharp:3.4795e-01 L9_sharp:4.6933e-01 L10_sharp:6.0337e-01 L11_sharp:6.2504e-01 L12_sharp:1.1210e+00 total_fnorm:1.8203e+00 total_l1_linf:2.0480e+03 total_spectral:9.2578e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.0254e-01 L1_l1linf:3.3691e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3447e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.2959e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.2715e-02 L8_l1linf:3.3447e-02 L9_l1linf:3.5156e-02 L10_l1linf:3.5645e-02 L11_l1linf:3.6377e-02 L12_l1linf:2.5024e-02 L1_spectral:1.6216e-03 L2_spectral:1.6178e-03 L3_spectral:1.6256e-03 L4_spectral:1.6335e-03 L5_spectral:1.6141e-03 L6_spectral:1.6310e-03 L7_spectral:1.6135e-03 L8_spectral:1.6062e-03 L9_spectral:1.6152e-03 L10_spectral:1.6065e-03 L11_spectral:1.6127e-03 L12_spectral:1.5948e-03 train_time:119346ms step_avg:49.73ms +[2025-09-11 13:52:40] [Rank 0] PRINT: step:2400/10000 val_loss:6.0697 total_sharp:8.8086e-02 L1_sharp:4.4966e-02 L2_sharp:4.3248e-02 L3_sharp:4.3962e-02 L4_sharp:5.2922e-02 L5_sharp:7.1362e-02 L6_sharp:1.1189e-01 L7_sharp:2.2168e-01 L8_sharp:3.4795e-01 L9_sharp:4.6933e-01 L10_sharp:6.0337e-01 L11_sharp:6.2504e-01 L12_sharp:1.1210e+00 total_fnorm:1.8203e+00 total_l1_linf:2.0480e+03 total_spectral:9.2578e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.0254e-01 L1_l1linf:3.3691e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3447e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.2959e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.2715e-02 L8_l1linf:3.3447e-02 L9_l1linf:3.5156e-02 L10_l1linf:3.5645e-02 L11_l1linf:3.6377e-02 L12_l1linf:2.5024e-02 L1_spectral:1.6216e-03 L2_spectral:1.6178e-03 L3_spectral:1.6256e-03 L4_spectral:1.6335e-03 L5_spectral:1.6141e-03 L6_spectral:1.6310e-03 L7_spectral:1.6135e-03 L8_spectral:1.6062e-03 L9_spectral:1.6152e-03 L10_spectral:1.6065e-03 L11_spectral:1.6127e-03 L12_spectral:1.5948e-03 train_time:119346ms step_avg:49.73ms +[2025-09-11 13:52:42] [Rank 0] step:2401/10000 train_time:120764ms step_avg:50.30ms +[2025-09-11 13:52:42] [Rank 0] step:2401/10000 train_time:120764ms step_avg:50.30ms +[2025-09-11 13:52:43] [Rank 0] step:2421/10000 train_time:121432ms step_avg:50.16ms +[2025-09-11 13:52:43] [Rank 0] step:2421/10000 train_time:121432ms step_avg:50.16ms +[2025-09-11 13:52:43] [Rank 0] step:2441/10000 train_time:122095ms step_avg:50.02ms +[2025-09-11 13:52:43] [Rank 0] step:2441/10000 train_time:122095ms step_avg:50.02ms +[2025-09-11 13:52:44] [Rank 0] step:2461/10000 train_time:122759ms step_avg:49.88ms +[2025-09-11 13:52:44] [Rank 0] step:2461/10000 train_time:122759ms step_avg:49.88ms +[2025-09-11 13:52:45] [Rank 0] step:2481/10000 train_time:123423ms step_avg:49.75ms +[2025-09-11 13:52:45] [Rank 0] step:2481/10000 train_time:123423ms step_avg:49.75ms +[2025-09-11 13:52:45] [Rank 0] step:2501/10000 train_time:124086ms step_avg:49.61ms +[2025-09-11 13:52:45] [Rank 0] step:2501/10000 train_time:124086ms step_avg:49.61ms +[2025-09-11 13:52:46] [Rank 0] step:2521/10000 train_time:124750ms step_avg:49.48ms +[2025-09-11 13:52:46] [Rank 0] step:2521/10000 train_time:124750ms step_avg:49.48ms +[2025-09-11 13:52:47] [Rank 0] step:2541/10000 train_time:125413ms step_avg:49.36ms +[2025-09-11 13:52:47] [Rank 0] step:2541/10000 train_time:125413ms step_avg:49.36ms +[2025-09-11 13:52:47] [Rank 0] step:2561/10000 train_time:126075ms step_avg:49.23ms +[2025-09-11 13:52:47] [Rank 0] step:2561/10000 train_time:126075ms step_avg:49.23ms +[2025-09-11 13:52:48] [Rank 0] step:2581/10000 train_time:126740ms step_avg:49.10ms +[2025-09-11 13:52:48] [Rank 0] step:2581/10000 train_time:126740ms step_avg:49.10ms +[2025-09-11 13:52:49] [Rank 0] step:2601/10000 train_time:127401ms step_avg:48.98ms +[2025-09-11 13:52:49] [Rank 0] step:2601/10000 train_time:127401ms step_avg:48.98ms +[2025-09-11 13:52:49] [Rank 0] step:2621/10000 train_time:128067ms step_avg:48.86ms +[2025-09-11 13:52:49] [Rank 0] step:2621/10000 train_time:128067ms step_avg:48.86ms +[2025-09-11 13:52:50] [Rank 0] step:2641/10000 train_time:128728ms step_avg:48.74ms +[2025-09-11 13:52:50] [Rank 0] step:2641/10000 train_time:128728ms step_avg:48.74ms +[2025-09-11 13:52:51] [Rank 0] step:2661/10000 train_time:129396ms step_avg:48.63ms +[2025-09-11 13:52:51] [Rank 0] step:2661/10000 train_time:129396ms step_avg:48.63ms +[2025-09-11 13:52:51] [Rank 0] step:2681/10000 train_time:130058ms step_avg:48.51ms +[2025-09-11 13:52:51] [Rank 0] step:2681/10000 train_time:130058ms step_avg:48.51ms +[2025-09-11 13:52:52] [Rank 0] step:2701/10000 train_time:130722ms step_avg:48.40ms +[2025-09-11 13:52:52] [Rank 0] step:2701/10000 train_time:130722ms step_avg:48.40ms +[2025-09-11 13:52:53] [Rank 0] step:2721/10000 train_time:131384ms step_avg:48.29ms +[2025-09-11 13:52:53] [Rank 0] step:2721/10000 train_time:131384ms step_avg:48.29ms +[2025-09-11 13:52:53] [Rank 0] step:2741/10000 train_time:132048ms step_avg:48.18ms +[2025-09-11 13:52:53] [Rank 0] step:2741/10000 train_time:132048ms step_avg:48.18ms +[2025-09-11 13:52:54] [Rank 0] step:2761/10000 train_time:132710ms step_avg:48.07ms +[2025-09-11 13:52:54] [Rank 0] step:2761/10000 train_time:132710ms step_avg:48.07ms +[2025-09-11 13:52:55] [Rank 0] step:2781/10000 train_time:133372ms step_avg:47.96ms +[2025-09-11 13:52:55] [Rank 0] step:2781/10000 train_time:133372ms step_avg:47.96ms +[2025-09-11 13:52:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:52:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:06] [Rank 0] PRINT: step:2800/10000 val_loss:5.9836 total_sharp:1.3589e-01 L1_sharp:4.7744e-02 L2_sharp:4.3272e-02 L3_sharp:3.8842e-02 L4_sharp:5.3436e-02 L5_sharp:7.2934e-02 L6_sharp:1.2918e-01 L7_sharp:2.7155e-01 L8_sharp:3.8558e-01 L9_sharp:5.9270e-01 L10_sharp:8.2093e-01 L11_sharp:9.9210e-01 L12_sharp:1.6928e+00 total_fnorm:1.7266e+00 total_l1_linf:1.9040e+03 total_spectral:8.7500e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0205e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2715e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.4912e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.3315e-02 L1_spectral:1.6016e-03 L2_spectral:1.6142e-03 L3_spectral:1.6254e-03 L4_spectral:1.6113e-03 L5_spectral:1.6184e-03 L6_spectral:1.6188e-03 L7_spectral:1.6198e-03 L8_spectral:1.6195e-03 L9_spectral:1.6236e-03 L10_spectral:1.6018e-03 L11_spectral:1.6316e-03 L12_spectral:1.6013e-03 train_time:134018ms step_avg:47.86ms +[2025-09-11 13:53:06] [Rank 0] PRINT: step:2800/10000 val_loss:5.9836 total_sharp:1.3589e-01 L1_sharp:4.7744e-02 L2_sharp:4.3272e-02 L3_sharp:3.8842e-02 L4_sharp:5.3436e-02 L5_sharp:7.2934e-02 L6_sharp:1.2918e-01 L7_sharp:2.7155e-01 L8_sharp:3.8558e-01 L9_sharp:5.9270e-01 L10_sharp:8.2093e-01 L11_sharp:9.9210e-01 L12_sharp:1.6928e+00 total_fnorm:1.7266e+00 total_l1_linf:1.9040e+03 total_spectral:8.7500e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0205e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2715e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.4912e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.3315e-02 L1_spectral:1.6016e-03 L2_spectral:1.6142e-03 L3_spectral:1.6254e-03 L4_spectral:1.6113e-03 L5_spectral:1.6184e-03 L6_spectral:1.6188e-03 L7_spectral:1.6198e-03 L8_spectral:1.6195e-03 L9_spectral:1.6236e-03 L10_spectral:1.6018e-03 L11_spectral:1.6316e-03 L12_spectral:1.6013e-03 train_time:134018ms step_avg:47.86ms +[2025-09-11 13:53:07] [Rank 0] step:2801/10000 train_time:135488ms step_avg:48.37ms +[2025-09-11 13:53:07] [Rank 0] step:2801/10000 train_time:135488ms step_avg:48.37ms +[2025-09-11 13:53:08] [Rank 0] step:2821/10000 train_time:136165ms step_avg:48.27ms +[2025-09-11 13:53:08] [Rank 0] step:2821/10000 train_time:136165ms step_avg:48.27ms +[2025-09-11 13:53:08] [Rank 0] step:2841/10000 train_time:136834ms step_avg:48.16ms +[2025-09-11 13:53:08] [Rank 0] step:2841/10000 train_time:136834ms step_avg:48.16ms +[2025-09-11 13:53:09] [Rank 0] step:2861/10000 train_time:137501ms step_avg:48.06ms +[2025-09-11 13:53:09] [Rank 0] step:2861/10000 train_time:137501ms step_avg:48.06ms +[2025-09-11 13:53:10] [Rank 0] step:2881/10000 train_time:138165ms step_avg:47.96ms +[2025-09-11 13:53:10] [Rank 0] step:2881/10000 train_time:138165ms step_avg:47.96ms +[2025-09-11 13:53:10] [Rank 0] step:2901/10000 train_time:138838ms step_avg:47.86ms +[2025-09-11 13:53:10] [Rank 0] step:2901/10000 train_time:138838ms step_avg:47.86ms +[2025-09-11 13:53:11] [Rank 0] step:2921/10000 train_time:139502ms step_avg:47.76ms +[2025-09-11 13:53:11] [Rank 0] step:2921/10000 train_time:139502ms step_avg:47.76ms +[2025-09-11 13:53:12] [Rank 0] step:2941/10000 train_time:140166ms step_avg:47.66ms +[2025-09-11 13:53:12] [Rank 0] step:2941/10000 train_time:140166ms step_avg:47.66ms +[2025-09-11 13:53:12] [Rank 0] step:2961/10000 train_time:140829ms step_avg:47.56ms +[2025-09-11 13:53:12] [Rank 0] step:2961/10000 train_time:140829ms step_avg:47.56ms +[2025-09-11 13:53:13] [Rank 0] step:2981/10000 train_time:141495ms step_avg:47.47ms +[2025-09-11 13:53:13] [Rank 0] step:2981/10000 train_time:141495ms step_avg:47.47ms +[2025-09-11 13:53:14] [Rank 0] step:3001/10000 train_time:142163ms step_avg:47.37ms +[2025-09-11 13:53:14] [Rank 0] step:3001/10000 train_time:142163ms step_avg:47.37ms +[2025-09-11 13:53:14] [Rank 0] step:3021/10000 train_time:142830ms step_avg:47.28ms +[2025-09-11 13:53:14] [Rank 0] step:3021/10000 train_time:142830ms step_avg:47.28ms +[2025-09-11 13:53:15] [Rank 0] step:3041/10000 train_time:143497ms step_avg:47.19ms +[2025-09-11 13:53:15] [Rank 0] step:3041/10000 train_time:143497ms step_avg:47.19ms +[2025-09-11 13:53:16] [Rank 0] step:3061/10000 train_time:144164ms step_avg:47.10ms +[2025-09-11 13:53:16] [Rank 0] step:3061/10000 train_time:144164ms step_avg:47.10ms +[2025-09-11 13:53:16] [Rank 0] step:3081/10000 train_time:144831ms step_avg:47.01ms +[2025-09-11 13:53:16] [Rank 0] step:3081/10000 train_time:144831ms step_avg:47.01ms +[2025-09-11 13:53:17] [Rank 0] step:3101/10000 train_time:145498ms step_avg:46.92ms +[2025-09-11 13:53:17] [Rank 0] step:3101/10000 train_time:145498ms step_avg:46.92ms +[2025-09-11 13:53:18] [Rank 0] step:3121/10000 train_time:146164ms step_avg:46.83ms +[2025-09-11 13:53:18] [Rank 0] step:3121/10000 train_time:146164ms step_avg:46.83ms +[2025-09-11 13:53:18] [Rank 0] step:3141/10000 train_time:146831ms step_avg:46.75ms +[2025-09-11 13:53:18] [Rank 0] step:3141/10000 train_time:146831ms step_avg:46.75ms +[2025-09-11 13:53:19] [Rank 0] step:3161/10000 train_time:147498ms step_avg:46.66ms +[2025-09-11 13:53:19] [Rank 0] step:3161/10000 train_time:147498ms step_avg:46.66ms +[2025-09-11 13:53:20] [Rank 0] step:3181/10000 train_time:148430ms step_avg:46.66ms +[2025-09-11 13:53:20] [Rank 0] step:3181/10000 train_time:148430ms step_avg:46.66ms +[2025-09-11 13:53:21] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:53:21] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:31] [Rank 0] PRINT: step:3200/10000 val_loss:5.8979 total_sharp:7.2230e-02 L1_sharp:3.1095e-02 L2_sharp:3.1976e-02 L3_sharp:3.4823e-02 L4_sharp:4.6485e-02 L5_sharp:6.4989e-02 L6_sharp:1.0145e-01 L7_sharp:1.8502e-01 L8_sharp:2.7244e-01 L9_sharp:3.9107e-01 L10_sharp:5.3619e-01 L11_sharp:5.0290e-01 L12_sharp:9.3087e-01 total_fnorm:1.8438e+00 total_l1_linf:2.0400e+03 total_spectral:9.4531e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.1982e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.1982e-02 L10_l1linf:3.2959e-02 L11_l1linf:3.4424e-02 L12_l1linf:2.5635e-02 L1_spectral:1.6196e-03 L2_spectral:1.6387e-03 L3_spectral:1.6437e-03 L4_spectral:1.6279e-03 L5_spectral:1.6133e-03 L6_spectral:1.6220e-03 L7_spectral:1.6053e-03 L8_spectral:1.6310e-03 L9_spectral:1.6169e-03 L10_spectral:1.6053e-03 L11_spectral:1.6266e-03 L12_spectral:1.5918e-03 train_time:149078ms step_avg:46.59ms +[2025-09-11 13:53:31] [Rank 0] PRINT: step:3200/10000 val_loss:5.8979 total_sharp:7.2230e-02 L1_sharp:3.1095e-02 L2_sharp:3.1976e-02 L3_sharp:3.4823e-02 L4_sharp:4.6485e-02 L5_sharp:6.4989e-02 L6_sharp:1.0145e-01 L7_sharp:1.8502e-01 L8_sharp:2.7244e-01 L9_sharp:3.9107e-01 L10_sharp:5.3619e-01 L11_sharp:5.0290e-01 L12_sharp:9.3087e-01 total_fnorm:1.8438e+00 total_l1_linf:2.0400e+03 total_spectral:9.4531e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.1982e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.1982e-02 L10_l1linf:3.2959e-02 L11_l1linf:3.4424e-02 L12_l1linf:2.5635e-02 L1_spectral:1.6196e-03 L2_spectral:1.6387e-03 L3_spectral:1.6437e-03 L4_spectral:1.6279e-03 L5_spectral:1.6133e-03 L6_spectral:1.6220e-03 L7_spectral:1.6053e-03 L8_spectral:1.6310e-03 L9_spectral:1.6169e-03 L10_spectral:1.6053e-03 L11_spectral:1.6266e-03 L12_spectral:1.5918e-03 train_time:149078ms step_avg:46.59ms +[2025-09-11 13:53:33] [Rank 0] step:3201/10000 train_time:150861ms step_avg:47.13ms +[2025-09-11 13:53:33] [Rank 0] step:3201/10000 train_time:150861ms step_avg:47.13ms +[2025-09-11 13:53:34] [Rank 0] step:3221/10000 train_time:151533ms step_avg:47.05ms +[2025-09-11 13:53:34] [Rank 0] step:3221/10000 train_time:151533ms step_avg:47.05ms +[2025-09-11 13:53:35] [Rank 0] step:3241/10000 train_time:152201ms step_avg:46.96ms +[2025-09-11 13:53:35] [Rank 0] step:3241/10000 train_time:152201ms step_avg:46.96ms +[2025-09-11 13:53:35] [Rank 0] step:3261/10000 train_time:152868ms step_avg:46.88ms +[2025-09-11 13:53:35] [Rank 0] step:3261/10000 train_time:152868ms step_avg:46.88ms +[2025-09-11 13:53:36] [Rank 0] step:3281/10000 train_time:153533ms step_avg:46.79ms +[2025-09-11 13:53:36] [Rank 0] step:3281/10000 train_time:153533ms step_avg:46.79ms +[2025-09-11 13:53:37] [Rank 0] step:3301/10000 train_time:154199ms step_avg:46.71ms +[2025-09-11 13:53:37] [Rank 0] step:3301/10000 train_time:154199ms step_avg:46.71ms +[2025-09-11 13:53:37] [Rank 0] step:3321/10000 train_time:154863ms step_avg:46.63ms +[2025-09-11 13:53:37] [Rank 0] step:3321/10000 train_time:154863ms step_avg:46.63ms +[2025-09-11 13:53:38] [Rank 0] step:3341/10000 train_time:155530ms step_avg:46.55ms +[2025-09-11 13:53:38] [Rank 0] step:3341/10000 train_time:155530ms step_avg:46.55ms +[2025-09-11 13:53:39] [Rank 0] step:3361/10000 train_time:156195ms step_avg:46.47ms +[2025-09-11 13:53:39] [Rank 0] step:3361/10000 train_time:156195ms step_avg:46.47ms +[2025-09-11 13:53:39] [Rank 0] step:3381/10000 train_time:156861ms step_avg:46.39ms +[2025-09-11 13:53:39] [Rank 0] step:3381/10000 train_time:156861ms step_avg:46.39ms +[2025-09-11 13:53:40] [Rank 0] step:3401/10000 train_time:157526ms step_avg:46.32ms +[2025-09-11 13:53:40] [Rank 0] step:3401/10000 train_time:157526ms step_avg:46.32ms +[2025-09-11 13:53:41] [Rank 0] step:3421/10000 train_time:158191ms step_avg:46.24ms +[2025-09-11 13:53:41] [Rank 0] step:3421/10000 train_time:158191ms step_avg:46.24ms +[2025-09-11 13:53:41] [Rank 0] step:3441/10000 train_time:158856ms step_avg:46.17ms +[2025-09-11 13:53:41] [Rank 0] step:3441/10000 train_time:158856ms step_avg:46.17ms +[2025-09-11 13:53:42] [Rank 0] step:3461/10000 train_time:159522ms step_avg:46.09ms +[2025-09-11 13:53:42] [Rank 0] step:3461/10000 train_time:159522ms step_avg:46.09ms +[2025-09-11 13:53:43] [Rank 0] step:3481/10000 train_time:160187ms step_avg:46.02ms +[2025-09-11 13:53:43] [Rank 0] step:3481/10000 train_time:160187ms step_avg:46.02ms +[2025-09-11 13:53:43] [Rank 0] step:3501/10000 train_time:160853ms step_avg:45.94ms +[2025-09-11 13:53:43] [Rank 0] step:3501/10000 train_time:160853ms step_avg:45.94ms +[2025-09-11 13:53:44] [Rank 0] step:3521/10000 train_time:161518ms step_avg:45.87ms +[2025-09-11 13:53:44] [Rank 0] step:3521/10000 train_time:161518ms step_avg:45.87ms +[2025-09-11 13:53:45] [Rank 0] step:3541/10000 train_time:162183ms step_avg:45.80ms +[2025-09-11 13:53:45] [Rank 0] step:3541/10000 train_time:162183ms step_avg:45.80ms +[2025-09-11 13:53:45] [Rank 0] step:3561/10000 train_time:162848ms step_avg:45.73ms +[2025-09-11 13:53:45] [Rank 0] step:3561/10000 train_time:162848ms step_avg:45.73ms +[2025-09-11 13:53:46] [Rank 0] step:3581/10000 train_time:163515ms step_avg:45.66ms +[2025-09-11 13:53:46] [Rank 0] step:3581/10000 train_time:163515ms step_avg:45.66ms +[2025-09-11 13:53:47] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:53:47] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:53:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:53:57] [Rank 0] PRINT: step:3600/10000 val_loss:5.8347 total_sharp:9.0522e-02 L1_sharp:2.8463e-02 L2_sharp:2.7866e-02 L3_sharp:2.8646e-02 L4_sharp:3.9715e-02 L5_sharp:4.5390e-02 L6_sharp:9.8316e-02 L7_sharp:2.2263e-01 L8_sharp:3.2416e-01 L9_sharp:3.5735e-01 L10_sharp:5.1634e-01 L11_sharp:5.5015e-01 L12_sharp:9.0191e-01 total_fnorm:1.6562e+00 total_l1_linf:1.8400e+03 total_spectral:8.4766e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0884e-02 L7_l1linf:3.0640e-02 L8_l1linf:3.0762e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.3447e-02 L12_l1linf:2.5269e-02 L1_spectral:1.6182e-03 L2_spectral:1.6226e-03 L3_spectral:1.6330e-03 L4_spectral:1.6266e-03 L5_spectral:1.6239e-03 L6_spectral:1.6231e-03 L7_spectral:1.6178e-03 L8_spectral:1.6101e-03 L9_spectral:1.6147e-03 L10_spectral:1.6123e-03 L11_spectral:1.6158e-03 L12_spectral:1.5887e-03 train_time:164161ms step_avg:45.60ms +[2025-09-11 13:53:57] [Rank 0] PRINT: step:3600/10000 val_loss:5.8347 total_sharp:9.0522e-02 L1_sharp:2.8463e-02 L2_sharp:2.7866e-02 L3_sharp:2.8646e-02 L4_sharp:3.9715e-02 L5_sharp:4.5390e-02 L6_sharp:9.8316e-02 L7_sharp:2.2263e-01 L8_sharp:3.2416e-01 L9_sharp:3.5735e-01 L10_sharp:5.1634e-01 L11_sharp:5.5015e-01 L12_sharp:9.0191e-01 total_fnorm:1.6562e+00 total_l1_linf:1.8400e+03 total_spectral:8.4766e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0884e-02 L7_l1linf:3.0640e-02 L8_l1linf:3.0762e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.3447e-02 L12_l1linf:2.5269e-02 L1_spectral:1.6182e-03 L2_spectral:1.6226e-03 L3_spectral:1.6330e-03 L4_spectral:1.6266e-03 L5_spectral:1.6239e-03 L6_spectral:1.6231e-03 L7_spectral:1.6178e-03 L8_spectral:1.6101e-03 L9_spectral:1.6147e-03 L10_spectral:1.6123e-03 L11_spectral:1.6158e-03 L12_spectral:1.5887e-03 train_time:164161ms step_avg:45.60ms +[2025-09-11 13:53:59] [Rank 0] step:3601/10000 train_time:166153ms step_avg:46.14ms +[2025-09-11 13:53:59] [Rank 0] step:3601/10000 train_time:166153ms step_avg:46.14ms +[2025-09-11 13:54:00] [Rank 0] step:3621/10000 train_time:166847ms step_avg:46.08ms +[2025-09-11 13:54:00] [Rank 0] step:3621/10000 train_time:166847ms step_avg:46.08ms +[2025-09-11 13:54:01] [Rank 0] step:3641/10000 train_time:167514ms step_avg:46.01ms +[2025-09-11 13:54:01] [Rank 0] step:3641/10000 train_time:167514ms step_avg:46.01ms +[2025-09-11 13:54:01] [Rank 0] step:3661/10000 train_time:168180ms step_avg:45.94ms +[2025-09-11 13:54:01] [Rank 0] step:3661/10000 train_time:168180ms step_avg:45.94ms +[2025-09-11 13:54:02] [Rank 0] step:3681/10000 train_time:168848ms step_avg:45.87ms +[2025-09-11 13:54:02] [Rank 0] step:3681/10000 train_time:168848ms step_avg:45.87ms +[2025-09-11 13:54:03] [Rank 0] step:3701/10000 train_time:169514ms step_avg:45.80ms +[2025-09-11 13:54:03] [Rank 0] step:3701/10000 train_time:169514ms step_avg:45.80ms +[2025-09-11 13:54:03] [Rank 0] step:3721/10000 train_time:170190ms step_avg:45.74ms +[2025-09-11 13:54:03] [Rank 0] step:3721/10000 train_time:170190ms step_avg:45.74ms +[2025-09-11 13:54:04] [Rank 0] step:3741/10000 train_time:170866ms step_avg:45.67ms +[2025-09-11 13:54:04] [Rank 0] step:3741/10000 train_time:170866ms step_avg:45.67ms +[2025-09-11 13:54:05] [Rank 0] step:3761/10000 train_time:171543ms step_avg:45.61ms +[2025-09-11 13:54:05] [Rank 0] step:3761/10000 train_time:171543ms step_avg:45.61ms +[2025-09-11 13:54:05] [Rank 0] step:3781/10000 train_time:172220ms step_avg:45.55ms +[2025-09-11 13:54:05] [Rank 0] step:3781/10000 train_time:172220ms step_avg:45.55ms +[2025-09-11 13:54:06] [Rank 0] step:3801/10000 train_time:172896ms step_avg:45.49ms +[2025-09-11 13:54:06] [Rank 0] step:3801/10000 train_time:172896ms step_avg:45.49ms +[2025-09-11 13:54:07] [Rank 0] step:3821/10000 train_time:173573ms step_avg:45.43ms +[2025-09-11 13:54:07] [Rank 0] step:3821/10000 train_time:173573ms step_avg:45.43ms +[2025-09-11 13:54:07] [Rank 0] step:3841/10000 train_time:174249ms step_avg:45.37ms +[2025-09-11 13:54:07] [Rank 0] step:3841/10000 train_time:174249ms step_avg:45.37ms +[2025-09-11 13:54:08] [Rank 0] step:3861/10000 train_time:174925ms step_avg:45.31ms +[2025-09-11 13:54:08] [Rank 0] step:3861/10000 train_time:174925ms step_avg:45.31ms +[2025-09-11 13:54:09] [Rank 0] step:3881/10000 train_time:175601ms step_avg:45.25ms +[2025-09-11 13:54:09] [Rank 0] step:3881/10000 train_time:175601ms step_avg:45.25ms +[2025-09-11 13:54:09] [Rank 0] step:3901/10000 train_time:176277ms step_avg:45.19ms +[2025-09-11 13:54:09] [Rank 0] step:3901/10000 train_time:176277ms step_avg:45.19ms +[2025-09-11 13:54:10] [Rank 0] step:3921/10000 train_time:176954ms step_avg:45.13ms +[2025-09-11 13:54:10] [Rank 0] step:3921/10000 train_time:176954ms step_avg:45.13ms +[2025-09-11 13:54:11] [Rank 0] step:3941/10000 train_time:177632ms step_avg:45.07ms +[2025-09-11 13:54:11] [Rank 0] step:3941/10000 train_time:177632ms step_avg:45.07ms +[2025-09-11 13:54:11] [Rank 0] step:3961/10000 train_time:178308ms step_avg:45.02ms +[2025-09-11 13:54:11] [Rank 0] step:3961/10000 train_time:178308ms step_avg:45.02ms +[2025-09-11 13:54:12] [Rank 0] step:3981/10000 train_time:178984ms step_avg:44.96ms +[2025-09-11 13:54:12] [Rank 0] step:3981/10000 train_time:178984ms step_avg:44.96ms +[2025-09-11 13:54:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:54:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:54:23] [Rank 0] PRINT: step:4000/10000 val_loss:5.7762 total_sharp:1.3261e-01 L1_sharp:3.3695e-02 L2_sharp:3.2123e-02 L3_sharp:3.2706e-02 L4_sharp:4.1525e-02 L5_sharp:6.9071e-02 L6_sharp:1.2872e-01 L7_sharp:2.6257e-01 L8_sharp:4.0941e-01 L9_sharp:6.1412e-01 L10_sharp:9.1172e-01 L11_sharp:1.1816e+00 L12_sharp:1.6673e+00 total_fnorm:1.7500e+00 total_l1_linf:1.8720e+03 total_spectral:8.9844e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0254e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0884e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.0884e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.3315e-02 L1_spectral:1.6080e-03 L2_spectral:1.6273e-03 L3_spectral:1.6287e-03 L4_spectral:1.6200e-03 L5_spectral:1.6235e-03 L6_spectral:1.6168e-03 L7_spectral:1.6125e-03 L8_spectral:1.6152e-03 L9_spectral:1.6109e-03 L10_spectral:1.6097e-03 L11_spectral:1.6201e-03 L12_spectral:1.5975e-03 train_time:179641ms step_avg:44.91ms +[2025-09-11 13:54:23] [Rank 0] PRINT: step:4000/10000 val_loss:5.7762 total_sharp:1.3261e-01 L1_sharp:3.3695e-02 L2_sharp:3.2123e-02 L3_sharp:3.2706e-02 L4_sharp:4.1525e-02 L5_sharp:6.9071e-02 L6_sharp:1.2872e-01 L7_sharp:2.6257e-01 L8_sharp:4.0941e-01 L9_sharp:6.1412e-01 L10_sharp:9.1172e-01 L11_sharp:1.1816e+00 L12_sharp:1.6673e+00 total_fnorm:1.7500e+00 total_l1_linf:1.8720e+03 total_spectral:8.9844e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0254e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0884e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.0884e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.3315e-02 L1_spectral:1.6080e-03 L2_spectral:1.6273e-03 L3_spectral:1.6287e-03 L4_spectral:1.6200e-03 L5_spectral:1.6235e-03 L6_spectral:1.6168e-03 L7_spectral:1.6125e-03 L8_spectral:1.6152e-03 L9_spectral:1.6109e-03 L10_spectral:1.6097e-03 L11_spectral:1.6201e-03 L12_spectral:1.5975e-03 train_time:179641ms step_avg:44.91ms +[2025-09-11 13:54:25] [Rank 0] step:4001/10000 train_time:181726ms step_avg:45.42ms +[2025-09-11 13:54:25] [Rank 0] step:4001/10000 train_time:181726ms step_avg:45.42ms +[2025-09-11 13:54:26] [Rank 0] step:4021/10000 train_time:182590ms step_avg:45.41ms +[2025-09-11 13:54:26] [Rank 0] step:4021/10000 train_time:182590ms step_avg:45.41ms +[2025-09-11 13:54:27] [Rank 0] step:4041/10000 train_time:183268ms step_avg:45.35ms +[2025-09-11 13:54:27] [Rank 0] step:4041/10000 train_time:183268ms step_avg:45.35ms +[2025-09-11 13:54:28] [Rank 0] step:4061/10000 train_time:183945ms step_avg:45.30ms +[2025-09-11 13:54:28] [Rank 0] step:4061/10000 train_time:183945ms step_avg:45.30ms +[2025-09-11 13:54:28] [Rank 0] step:4081/10000 train_time:184620ms step_avg:45.24ms +[2025-09-11 13:54:28] [Rank 0] step:4081/10000 train_time:184620ms step_avg:45.24ms +[2025-09-11 13:54:29] [Rank 0] step:4101/10000 train_time:185296ms step_avg:45.18ms +[2025-09-11 13:54:29] [Rank 0] step:4101/10000 train_time:185296ms step_avg:45.18ms +[2025-09-11 13:54:30] [Rank 0] step:4121/10000 train_time:185974ms step_avg:45.13ms +[2025-09-11 13:54:30] [Rank 0] step:4121/10000 train_time:185974ms step_avg:45.13ms +[2025-09-11 13:54:30] [Rank 0] step:4141/10000 train_time:186650ms step_avg:45.07ms +[2025-09-11 13:54:30] [Rank 0] step:4141/10000 train_time:186650ms step_avg:45.07ms +[2025-09-11 13:54:31] [Rank 0] step:4161/10000 train_time:187325ms step_avg:45.02ms +[2025-09-11 13:54:31] [Rank 0] step:4161/10000 train_time:187325ms step_avg:45.02ms +[2025-09-11 13:54:32] [Rank 0] step:4181/10000 train_time:188000ms step_avg:44.97ms +[2025-09-11 13:54:32] [Rank 0] step:4181/10000 train_time:188000ms step_avg:44.97ms +[2025-09-11 13:54:32] [Rank 0] step:4201/10000 train_time:188677ms step_avg:44.91ms +[2025-09-11 13:54:32] [Rank 0] step:4201/10000 train_time:188677ms step_avg:44.91ms +[2025-09-11 13:54:33] [Rank 0] step:4221/10000 train_time:189352ms step_avg:44.86ms +[2025-09-11 13:54:33] [Rank 0] step:4221/10000 train_time:189352ms step_avg:44.86ms +[2025-09-11 13:54:34] [Rank 0] step:4241/10000 train_time:190028ms step_avg:44.81ms +[2025-09-11 13:54:34] [Rank 0] step:4241/10000 train_time:190028ms step_avg:44.81ms +[2025-09-11 13:54:34] [Rank 0] step:4261/10000 train_time:190704ms step_avg:44.76ms +[2025-09-11 13:54:34] [Rank 0] step:4261/10000 train_time:190704ms step_avg:44.76ms +[2025-09-11 13:54:35] [Rank 0] step:4281/10000 train_time:191381ms step_avg:44.70ms +[2025-09-11 13:54:35] [Rank 0] step:4281/10000 train_time:191381ms step_avg:44.70ms +[2025-09-11 13:54:36] [Rank 0] step:4301/10000 train_time:192058ms step_avg:44.65ms +[2025-09-11 13:54:36] [Rank 0] step:4301/10000 train_time:192058ms step_avg:44.65ms +[2025-09-11 13:54:36] [Rank 0] step:4321/10000 train_time:192733ms step_avg:44.60ms +[2025-09-11 13:54:36] [Rank 0] step:4321/10000 train_time:192733ms step_avg:44.60ms +[2025-09-11 13:54:37] [Rank 0] step:4341/10000 train_time:193409ms step_avg:44.55ms +[2025-09-11 13:54:37] [Rank 0] step:4341/10000 train_time:193409ms step_avg:44.55ms +[2025-09-11 13:54:38] [Rank 0] step:4361/10000 train_time:194090ms step_avg:44.51ms +[2025-09-11 13:54:38] [Rank 0] step:4361/10000 train_time:194090ms step_avg:44.51ms +[2025-09-11 13:54:38] [Rank 0] step:4381/10000 train_time:194767ms step_avg:44.46ms +[2025-09-11 13:54:38] [Rank 0] step:4381/10000 train_time:194767ms step_avg:44.46ms +[2025-09-11 13:54:39] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:54:39] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:54:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:54:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:54:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:54:50] [Rank 0] PRINT: step:4400/10000 val_loss:5.7328 total_sharp:7.4602e-02 L1_sharp:2.5159e-02 L2_sharp:2.6385e-02 L3_sharp:2.3418e-02 L4_sharp:3.8789e-02 L5_sharp:4.6803e-02 L6_sharp:7.7598e-02 L7_sharp:1.6607e-01 L8_sharp:2.2537e-01 L9_sharp:3.0414e-01 L10_sharp:4.3925e-01 L11_sharp:4.8559e-01 L12_sharp:8.3161e-01 total_fnorm:1.6172e+00 total_l1_linf:1.7440e+03 total_spectral:8.2812e-01 L1_fnorm:1.1426e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0449e-01 L1_l1linf:3.0884e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.0640e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.0151e-02 L8_l1linf:3.0640e-02 L9_l1linf:3.0762e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.2339e-02 L1_spectral:1.6133e-03 L2_spectral:1.6298e-03 L3_spectral:1.6278e-03 L4_spectral:1.6199e-03 L5_spectral:1.6163e-03 L6_spectral:1.6221e-03 L7_spectral:1.6155e-03 L8_spectral:1.6010e-03 L9_spectral:1.6052e-03 L10_spectral:1.5961e-03 L11_spectral:1.6100e-03 L12_spectral:1.6063e-03 train_time:195423ms step_avg:44.41ms +[2025-09-11 13:54:50] [Rank 0] PRINT: step:4400/10000 val_loss:5.7328 total_sharp:7.4602e-02 L1_sharp:2.5159e-02 L2_sharp:2.6385e-02 L3_sharp:2.3418e-02 L4_sharp:3.8789e-02 L5_sharp:4.6803e-02 L6_sharp:7.7598e-02 L7_sharp:1.6607e-01 L8_sharp:2.2537e-01 L9_sharp:3.0414e-01 L10_sharp:4.3925e-01 L11_sharp:4.8559e-01 L12_sharp:8.3161e-01 total_fnorm:1.6172e+00 total_l1_linf:1.7440e+03 total_spectral:8.2812e-01 L1_fnorm:1.1426e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0449e-01 L1_l1linf:3.0884e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.0640e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.0151e-02 L8_l1linf:3.0640e-02 L9_l1linf:3.0762e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.2339e-02 L1_spectral:1.6133e-03 L2_spectral:1.6298e-03 L3_spectral:1.6278e-03 L4_spectral:1.6199e-03 L5_spectral:1.6163e-03 L6_spectral:1.6221e-03 L7_spectral:1.6155e-03 L8_spectral:1.6010e-03 L9_spectral:1.6052e-03 L10_spectral:1.5961e-03 L11_spectral:1.6100e-03 L12_spectral:1.6063e-03 train_time:195423ms step_avg:44.41ms +[2025-09-11 13:54:52] [Rank 0] step:4401/10000 train_time:197552ms step_avg:44.89ms +[2025-09-11 13:54:52] [Rank 0] step:4401/10000 train_time:197552ms step_avg:44.89ms +[2025-09-11 13:54:53] [Rank 0] step:4421/10000 train_time:198254ms step_avg:44.84ms +[2025-09-11 13:54:53] [Rank 0] step:4421/10000 train_time:198254ms step_avg:44.84ms +[2025-09-11 13:54:54] [Rank 0] step:4441/10000 train_time:198932ms step_avg:44.79ms +[2025-09-11 13:54:54] [Rank 0] step:4441/10000 train_time:198932ms step_avg:44.79ms +[2025-09-11 13:54:54] [Rank 0] step:4461/10000 train_time:199612ms step_avg:44.75ms +[2025-09-11 13:54:54] [Rank 0] step:4461/10000 train_time:199612ms step_avg:44.75ms +[2025-09-11 13:54:55] [Rank 0] step:4481/10000 train_time:200290ms step_avg:44.70ms +[2025-09-11 13:54:55] [Rank 0] step:4481/10000 train_time:200290ms step_avg:44.70ms +[2025-09-11 13:54:56] [Rank 0] step:4501/10000 train_time:200971ms step_avg:44.65ms +[2025-09-11 13:54:56] [Rank 0] step:4501/10000 train_time:200971ms step_avg:44.65ms +[2025-09-11 13:54:56] [Rank 0] step:4521/10000 train_time:201650ms step_avg:44.60ms +[2025-09-11 13:54:56] [Rank 0] step:4521/10000 train_time:201650ms step_avg:44.60ms +[2025-09-11 13:54:57] [Rank 0] step:4541/10000 train_time:202329ms step_avg:44.56ms +[2025-09-11 13:54:57] [Rank 0] step:4541/10000 train_time:202329ms step_avg:44.56ms +[2025-09-11 13:54:58] [Rank 0] step:4561/10000 train_time:203008ms step_avg:44.51ms +[2025-09-11 13:54:58] [Rank 0] step:4561/10000 train_time:203008ms step_avg:44.51ms +[2025-09-11 13:54:58] [Rank 0] step:4581/10000 train_time:203687ms step_avg:44.46ms +[2025-09-11 13:54:58] [Rank 0] step:4581/10000 train_time:203687ms step_avg:44.46ms +[2025-09-11 13:54:59] [Rank 0] step:4601/10000 train_time:204367ms step_avg:44.42ms +[2025-09-11 13:54:59] [Rank 0] step:4601/10000 train_time:204367ms step_avg:44.42ms +[2025-09-11 13:55:00] [Rank 0] step:4621/10000 train_time:205046ms step_avg:44.37ms +[2025-09-11 13:55:00] [Rank 0] step:4621/10000 train_time:205046ms step_avg:44.37ms +[2025-09-11 13:55:00] [Rank 0] step:4641/10000 train_time:205725ms step_avg:44.33ms +[2025-09-11 13:55:00] [Rank 0] step:4641/10000 train_time:205725ms step_avg:44.33ms +[2025-09-11 13:55:01] [Rank 0] step:4661/10000 train_time:206457ms step_avg:44.29ms +[2025-09-11 13:55:01] [Rank 0] step:4661/10000 train_time:206457ms step_avg:44.29ms +[2025-09-11 13:55:02] [Rank 0] step:4681/10000 train_time:207193ms step_avg:44.26ms +[2025-09-11 13:55:02] [Rank 0] step:4681/10000 train_time:207193ms step_avg:44.26ms +[2025-09-11 13:55:03] [Rank 0] step:4701/10000 train_time:207871ms step_avg:44.22ms +[2025-09-11 13:55:03] [Rank 0] step:4701/10000 train_time:207871ms step_avg:44.22ms +[2025-09-11 13:55:03] [Rank 0] step:4721/10000 train_time:208551ms step_avg:44.18ms +[2025-09-11 13:55:03] [Rank 0] step:4721/10000 train_time:208551ms step_avg:44.18ms +[2025-09-11 13:55:04] [Rank 0] step:4741/10000 train_time:209230ms step_avg:44.13ms +[2025-09-11 13:55:04] [Rank 0] step:4741/10000 train_time:209230ms step_avg:44.13ms +[2025-09-11 13:55:05] [Rank 0] step:4761/10000 train_time:209910ms step_avg:44.09ms +[2025-09-11 13:55:05] [Rank 0] step:4761/10000 train_time:209910ms step_avg:44.09ms +[2025-09-11 13:55:05] [Rank 0] step:4781/10000 train_time:210588ms step_avg:44.05ms +[2025-09-11 13:55:05] [Rank 0] step:4781/10000 train_time:210588ms step_avg:44.05ms +[2025-09-11 13:55:06] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:55:06] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:55:17] [Rank 0] PRINT: step:4800/10000 val_loss:5.6897 total_sharp:9.0319e-02 L1_sharp:2.2932e-02 L2_sharp:2.2713e-02 L3_sharp:2.5908e-02 L4_sharp:3.7610e-02 L5_sharp:6.1094e-02 L6_sharp:1.0197e-01 L7_sharp:2.1911e-01 L8_sharp:2.5257e-01 L9_sharp:3.2699e-01 L10_sharp:4.7384e-01 L11_sharp:4.9506e-01 L12_sharp:8.5526e-01 total_fnorm:1.5938e+00 total_l1_linf:1.7280e+03 total_spectral:8.2031e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.0396e-02 L2_l1linf:3.0151e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.0762e-02 L6_l1linf:2.9541e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9419e-02 L9_l1linf:3.0273e-02 L10_l1linf:3.1006e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.3193e-02 L1_spectral:1.6389e-03 L2_spectral:1.6345e-03 L3_spectral:1.6322e-03 L4_spectral:1.6345e-03 L5_spectral:1.6150e-03 L6_spectral:1.6167e-03 L7_spectral:1.6182e-03 L8_spectral:1.6255e-03 L9_spectral:1.6184e-03 L10_spectral:1.6224e-03 L11_spectral:1.6269e-03 L12_spectral:1.6024e-03 train_time:211246ms step_avg:44.01ms +[2025-09-11 13:55:17] [Rank 0] PRINT: step:4800/10000 val_loss:5.6897 total_sharp:9.0319e-02 L1_sharp:2.2932e-02 L2_sharp:2.2713e-02 L3_sharp:2.5908e-02 L4_sharp:3.7610e-02 L5_sharp:6.1094e-02 L6_sharp:1.0197e-01 L7_sharp:2.1911e-01 L8_sharp:2.5257e-01 L9_sharp:3.2699e-01 L10_sharp:4.7384e-01 L11_sharp:4.9506e-01 L12_sharp:8.5526e-01 total_fnorm:1.5938e+00 total_l1_linf:1.7280e+03 total_spectral:8.2031e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.0396e-02 L2_l1linf:3.0151e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.0762e-02 L6_l1linf:2.9541e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9419e-02 L9_l1linf:3.0273e-02 L10_l1linf:3.1006e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.3193e-02 L1_spectral:1.6389e-03 L2_spectral:1.6345e-03 L3_spectral:1.6322e-03 L4_spectral:1.6345e-03 L5_spectral:1.6150e-03 L6_spectral:1.6167e-03 L7_spectral:1.6182e-03 L8_spectral:1.6255e-03 L9_spectral:1.6184e-03 L10_spectral:1.6224e-03 L11_spectral:1.6269e-03 L12_spectral:1.6024e-03 train_time:211246ms step_avg:44.01ms +[2025-09-11 13:55:19] [Rank 0] step:4801/10000 train_time:212995ms step_avg:44.36ms +[2025-09-11 13:55:19] [Rank 0] step:4801/10000 train_time:212995ms step_avg:44.36ms +[2025-09-11 13:55:20] [Rank 0] step:4821/10000 train_time:213682ms step_avg:44.32ms +[2025-09-11 13:55:20] [Rank 0] step:4821/10000 train_time:213682ms step_avg:44.32ms +[2025-09-11 13:55:20] [Rank 0] step:4841/10000 train_time:214362ms step_avg:44.28ms +[2025-09-11 13:55:20] [Rank 0] step:4841/10000 train_time:214362ms step_avg:44.28ms +[2025-09-11 13:55:21] [Rank 0] step:4861/10000 train_time:215041ms step_avg:44.24ms +[2025-09-11 13:55:21] [Rank 0] step:4861/10000 train_time:215041ms step_avg:44.24ms +[2025-09-11 13:55:22] [Rank 0] step:4881/10000 train_time:215721ms step_avg:44.20ms +[2025-09-11 13:55:22] [Rank 0] step:4881/10000 train_time:215721ms step_avg:44.20ms +[2025-09-11 13:55:22] [Rank 0] step:4901/10000 train_time:216400ms step_avg:44.15ms +[2025-09-11 13:55:22] [Rank 0] step:4901/10000 train_time:216400ms step_avg:44.15ms +[2025-09-11 13:55:23] [Rank 0] step:4921/10000 train_time:217080ms step_avg:44.11ms +[2025-09-11 13:55:23] [Rank 0] step:4921/10000 train_time:217080ms step_avg:44.11ms +[2025-09-11 13:55:24] [Rank 0] step:4941/10000 train_time:217759ms step_avg:44.07ms +[2025-09-11 13:55:24] [Rank 0] step:4941/10000 train_time:217759ms step_avg:44.07ms +[2025-09-11 13:55:25] [Rank 0] step:4961/10000 train_time:218437ms step_avg:44.03ms +[2025-09-11 13:55:25] [Rank 0] step:4961/10000 train_time:218437ms step_avg:44.03ms +[2025-09-11 13:55:25] [Rank 0] step:4981/10000 train_time:219116ms step_avg:43.99ms +[2025-09-11 13:55:25] [Rank 0] step:4981/10000 train_time:219116ms step_avg:43.99ms +[2025-09-11 13:55:26] [Rank 0] step:5001/10000 train_time:219796ms step_avg:43.95ms +[2025-09-11 13:55:26] [Rank 0] step:5001/10000 train_time:219796ms step_avg:43.95ms +[2025-09-11 13:55:27] [Rank 0] step:5021/10000 train_time:220728ms step_avg:43.96ms +[2025-09-11 13:55:27] [Rank 0] step:5021/10000 train_time:220728ms step_avg:43.96ms +[2025-09-11 13:55:27] [Rank 0] step:5041/10000 train_time:221406ms step_avg:43.92ms +[2025-09-11 13:55:27] [Rank 0] step:5041/10000 train_time:221406ms step_avg:43.92ms +[2025-09-11 13:55:28] [Rank 0] step:5061/10000 train_time:222085ms step_avg:43.88ms +[2025-09-11 13:55:28] [Rank 0] step:5061/10000 train_time:222085ms step_avg:43.88ms +[2025-09-11 13:55:29] [Rank 0] step:5081/10000 train_time:223046ms step_avg:43.90ms +[2025-09-11 13:55:29] [Rank 0] step:5081/10000 train_time:223046ms step_avg:43.90ms +[2025-09-11 13:55:30] [Rank 0] step:5101/10000 train_time:223725ms step_avg:43.86ms +[2025-09-11 13:55:30] [Rank 0] step:5101/10000 train_time:223725ms step_avg:43.86ms +[2025-09-11 13:55:30] [Rank 0] step:5121/10000 train_time:224403ms step_avg:43.82ms +[2025-09-11 13:55:30] [Rank 0] step:5121/10000 train_time:224403ms step_avg:43.82ms +[2025-09-11 13:55:31] [Rank 0] step:5141/10000 train_time:225083ms step_avg:43.78ms +[2025-09-11 13:55:31] [Rank 0] step:5141/10000 train_time:225083ms step_avg:43.78ms +[2025-09-11 13:55:32] [Rank 0] step:5161/10000 train_time:225762ms step_avg:43.74ms +[2025-09-11 13:55:32] [Rank 0] step:5161/10000 train_time:225762ms step_avg:43.74ms +[2025-09-11 13:55:33] [Rank 0] step:5181/10000 train_time:226442ms step_avg:43.71ms +[2025-09-11 13:55:33] [Rank 0] step:5181/10000 train_time:226442ms step_avg:43.71ms +[2025-09-11 13:55:33] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:55:33] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:55:44] [Rank 0] PRINT: step:5200/10000 val_loss:5.6540 total_sharp:8.7071e-02 L1_sharp:2.4693e-02 L2_sharp:2.6103e-02 L3_sharp:2.3264e-02 L4_sharp:3.1116e-02 L5_sharp:4.1185e-02 L6_sharp:5.8442e-02 L7_sharp:1.1750e-01 L8_sharp:1.7930e-01 L9_sharp:2.6467e-01 L10_sharp:4.7283e-01 L11_sharp:5.8953e-01 L12_sharp:1.5373e+00 total_fnorm:1.5547e+00 total_l1_linf:1.6560e+03 total_spectral:7.8516e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.0518e-02 L2_l1linf:3.0396e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9175e-02 L9_l1linf:2.9053e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1982e-02 L12_l1linf:2.2339e-02 L1_spectral:1.6249e-03 L2_spectral:1.6441e-03 L3_spectral:1.6364e-03 L4_spectral:1.6265e-03 L5_spectral:1.6159e-03 L6_spectral:1.6194e-03 L7_spectral:1.6099e-03 L8_spectral:1.6308e-03 L9_spectral:1.6186e-03 L10_spectral:1.6211e-03 L11_spectral:1.6171e-03 L12_spectral:1.6067e-03 train_time:227106ms step_avg:43.67ms +[2025-09-11 13:55:44] [Rank 0] PRINT: step:5200/10000 val_loss:5.6540 total_sharp:8.7071e-02 L1_sharp:2.4693e-02 L2_sharp:2.6103e-02 L3_sharp:2.3264e-02 L4_sharp:3.1116e-02 L5_sharp:4.1185e-02 L6_sharp:5.8442e-02 L7_sharp:1.1750e-01 L8_sharp:1.7930e-01 L9_sharp:2.6467e-01 L10_sharp:4.7283e-01 L11_sharp:5.8953e-01 L12_sharp:1.5373e+00 total_fnorm:1.5547e+00 total_l1_linf:1.6560e+03 total_spectral:7.8516e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.0518e-02 L2_l1linf:3.0396e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9907e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9175e-02 L9_l1linf:2.9053e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1982e-02 L12_l1linf:2.2339e-02 L1_spectral:1.6249e-03 L2_spectral:1.6441e-03 L3_spectral:1.6364e-03 L4_spectral:1.6265e-03 L5_spectral:1.6159e-03 L6_spectral:1.6194e-03 L7_spectral:1.6099e-03 L8_spectral:1.6308e-03 L9_spectral:1.6186e-03 L10_spectral:1.6211e-03 L11_spectral:1.6171e-03 L12_spectral:1.6067e-03 train_time:227106ms step_avg:43.67ms +[2025-09-11 13:55:46] [Rank 0] step:5201/10000 train_time:229004ms step_avg:44.03ms +[2025-09-11 13:55:46] [Rank 0] step:5201/10000 train_time:229004ms step_avg:44.03ms +[2025-09-11 13:55:46] [Rank 0] step:5221/10000 train_time:229717ms step_avg:44.00ms +[2025-09-11 13:55:46] [Rank 0] step:5221/10000 train_time:229717ms step_avg:44.00ms +[2025-09-11 13:55:47] [Rank 0] step:5241/10000 train_time:230406ms step_avg:43.96ms +[2025-09-11 13:55:47] [Rank 0] step:5241/10000 train_time:230406ms step_avg:43.96ms +[2025-09-11 13:55:48] [Rank 0] step:5261/10000 train_time:231095ms step_avg:43.93ms +[2025-09-11 13:55:48] [Rank 0] step:5261/10000 train_time:231095ms step_avg:43.93ms +[2025-09-11 13:55:48] [Rank 0] step:5281/10000 train_time:231784ms step_avg:43.89ms +[2025-09-11 13:55:48] [Rank 0] step:5281/10000 train_time:231784ms step_avg:43.89ms +[2025-09-11 13:55:49] [Rank 0] step:5301/10000 train_time:232472ms step_avg:43.85ms +[2025-09-11 13:55:49] [Rank 0] step:5301/10000 train_time:232472ms step_avg:43.85ms +[2025-09-11 13:55:50] [Rank 0] step:5321/10000 train_time:233161ms step_avg:43.82ms +[2025-09-11 13:55:50] [Rank 0] step:5321/10000 train_time:233161ms step_avg:43.82ms +[2025-09-11 13:55:50] [Rank 0] step:5341/10000 train_time:233850ms step_avg:43.78ms +[2025-09-11 13:55:50] [Rank 0] step:5341/10000 train_time:233850ms step_avg:43.78ms +[2025-09-11 13:55:51] [Rank 0] step:5361/10000 train_time:234541ms step_avg:43.75ms +[2025-09-11 13:55:51] [Rank 0] step:5361/10000 train_time:234541ms step_avg:43.75ms +[2025-09-11 13:55:52] [Rank 0] step:5381/10000 train_time:235230ms step_avg:43.71ms +[2025-09-11 13:55:52] [Rank 0] step:5381/10000 train_time:235230ms step_avg:43.71ms +[2025-09-11 13:55:53] [Rank 0] step:5401/10000 train_time:235917ms step_avg:43.68ms +[2025-09-11 13:55:53] [Rank 0] step:5401/10000 train_time:235917ms step_avg:43.68ms +[2025-09-11 13:55:53] [Rank 0] step:5421/10000 train_time:236608ms step_avg:43.65ms +[2025-09-11 13:55:53] [Rank 0] step:5421/10000 train_time:236608ms step_avg:43.65ms +[2025-09-11 13:55:54] [Rank 0] step:5441/10000 train_time:237297ms step_avg:43.61ms +[2025-09-11 13:55:54] [Rank 0] step:5441/10000 train_time:237297ms step_avg:43.61ms +[2025-09-11 13:55:55] [Rank 0] step:5461/10000 train_time:237985ms step_avg:43.58ms +[2025-09-11 13:55:55] [Rank 0] step:5461/10000 train_time:237985ms step_avg:43.58ms +[2025-09-11 13:55:55] [Rank 0] step:5481/10000 train_time:238675ms step_avg:43.55ms +[2025-09-11 13:55:55] [Rank 0] step:5481/10000 train_time:238675ms step_avg:43.55ms +[2025-09-11 13:55:56] [Rank 0] step:5501/10000 train_time:239362ms step_avg:43.51ms +[2025-09-11 13:55:56] [Rank 0] step:5501/10000 train_time:239362ms step_avg:43.51ms +[2025-09-11 13:55:57] [Rank 0] step:5521/10000 train_time:240051ms step_avg:43.48ms +[2025-09-11 13:55:57] [Rank 0] step:5521/10000 train_time:240051ms step_avg:43.48ms +[2025-09-11 13:55:57] [Rank 0] step:5541/10000 train_time:240741ms step_avg:43.45ms +[2025-09-11 13:55:57] [Rank 0] step:5541/10000 train_time:240741ms step_avg:43.45ms +[2025-09-11 13:55:58] [Rank 0] step:5561/10000 train_time:241433ms step_avg:43.42ms +[2025-09-11 13:55:58] [Rank 0] step:5561/10000 train_time:241433ms step_avg:43.42ms +[2025-09-11 13:55:59] [Rank 0] step:5581/10000 train_time:242125ms step_avg:43.38ms +[2025-09-11 13:55:59] [Rank 0] step:5581/10000 train_time:242125ms step_avg:43.38ms +[2025-09-11 13:55:59] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:55:59] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:56:10] [Rank 0] PRINT: step:5600/10000 val_loss:5.6237 total_sharp:6.8578e-02 L1_sharp:1.9199e-02 L2_sharp:2.1675e-02 L3_sharp:2.2304e-02 L4_sharp:2.4282e-02 L5_sharp:3.5209e-02 L6_sharp:5.1714e-02 L7_sharp:1.0011e-01 L8_sharp:1.3507e-01 L9_sharp:2.3634e-01 L10_sharp:4.4083e-01 L11_sharp:5.8058e-01 L12_sharp:9.7752e-01 total_fnorm:1.5391e+00 total_l1_linf:1.6720e+03 total_spectral:7.9688e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0742e-01 L1_l1linf:2.9419e-02 L2_l1linf:3.0151e-02 L3_l1linf:2.9907e-02 L4_l1linf:3.0151e-02 L5_l1linf:3.0029e-02 L6_l1linf:2.9541e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.8809e-02 L9_l1linf:2.9297e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.2705e-02 L1_spectral:1.6182e-03 L2_spectral:1.6306e-03 L3_spectral:1.6335e-03 L4_spectral:1.6315e-03 L5_spectral:1.6154e-03 L6_spectral:1.6226e-03 L7_spectral:1.6033e-03 L8_spectral:1.6112e-03 L9_spectral:1.6138e-03 L10_spectral:1.6082e-03 L11_spectral:1.6087e-03 L12_spectral:1.5988e-03 train_time:242794ms step_avg:43.36ms +[2025-09-11 13:56:10] [Rank 0] PRINT: step:5600/10000 val_loss:5.6237 total_sharp:6.8578e-02 L1_sharp:1.9199e-02 L2_sharp:2.1675e-02 L3_sharp:2.2304e-02 L4_sharp:2.4282e-02 L5_sharp:3.5209e-02 L6_sharp:5.1714e-02 L7_sharp:1.0011e-01 L8_sharp:1.3507e-01 L9_sharp:2.3634e-01 L10_sharp:4.4083e-01 L11_sharp:5.8058e-01 L12_sharp:9.7752e-01 total_fnorm:1.5391e+00 total_l1_linf:1.6720e+03 total_spectral:7.9688e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0742e-01 L1_l1linf:2.9419e-02 L2_l1linf:3.0151e-02 L3_l1linf:2.9907e-02 L4_l1linf:3.0151e-02 L5_l1linf:3.0029e-02 L6_l1linf:2.9541e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.8809e-02 L9_l1linf:2.9297e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.2715e-02 L12_l1linf:2.2705e-02 L1_spectral:1.6182e-03 L2_spectral:1.6306e-03 L3_spectral:1.6335e-03 L4_spectral:1.6315e-03 L5_spectral:1.6154e-03 L6_spectral:1.6226e-03 L7_spectral:1.6033e-03 L8_spectral:1.6112e-03 L9_spectral:1.6138e-03 L10_spectral:1.6082e-03 L11_spectral:1.6087e-03 L12_spectral:1.5988e-03 train_time:242794ms step_avg:43.36ms +[2025-09-11 13:56:11] [Rank 0] step:5601/10000 train_time:244770ms step_avg:43.70ms +[2025-09-11 13:56:11] [Rank 0] step:5601/10000 train_time:244770ms step_avg:43.70ms +[2025-09-11 13:56:12] [Rank 0] step:5621/10000 train_time:245464ms step_avg:43.67ms +[2025-09-11 13:56:12] [Rank 0] step:5621/10000 train_time:245464ms step_avg:43.67ms +[2025-09-11 13:56:13] [Rank 0] step:5641/10000 train_time:246152ms step_avg:43.64ms +[2025-09-11 13:56:13] [Rank 0] step:5641/10000 train_time:246152ms step_avg:43.64ms +[2025-09-11 13:56:14] [Rank 0] step:5661/10000 train_time:246840ms step_avg:43.60ms +[2025-09-11 13:56:14] [Rank 0] step:5661/10000 train_time:246840ms step_avg:43.60ms +[2025-09-11 13:56:14] [Rank 0] step:5681/10000 train_time:247529ms step_avg:43.57ms +[2025-09-11 13:56:14] [Rank 0] step:5681/10000 train_time:247529ms step_avg:43.57ms +[2025-09-11 13:56:15] [Rank 0] step:5701/10000 train_time:248220ms step_avg:43.54ms +[2025-09-11 13:56:15] [Rank 0] step:5701/10000 train_time:248220ms step_avg:43.54ms +[2025-09-11 13:56:16] [Rank 0] step:5721/10000 train_time:248908ms step_avg:43.51ms +[2025-09-11 13:56:16] [Rank 0] step:5721/10000 train_time:248908ms step_avg:43.51ms +[2025-09-11 13:56:16] [Rank 0] step:5741/10000 train_time:249598ms step_avg:43.48ms +[2025-09-11 13:56:16] [Rank 0] step:5741/10000 train_time:249598ms step_avg:43.48ms +[2025-09-11 13:56:17] [Rank 0] step:5761/10000 train_time:250288ms step_avg:43.45ms +[2025-09-11 13:56:17] [Rank 0] step:5761/10000 train_time:250288ms step_avg:43.45ms +[2025-09-11 13:56:18] [Rank 0] step:5781/10000 train_time:250978ms step_avg:43.41ms +[2025-09-11 13:56:18] [Rank 0] step:5781/10000 train_time:250978ms step_avg:43.41ms +[2025-09-11 13:56:18] [Rank 0] step:5801/10000 train_time:251669ms step_avg:43.38ms +[2025-09-11 13:56:18] [Rank 0] step:5801/10000 train_time:251669ms step_avg:43.38ms +[2025-09-11 13:56:19] [Rank 0] step:5821/10000 train_time:252357ms step_avg:43.35ms +[2025-09-11 13:56:19] [Rank 0] step:5821/10000 train_time:252357ms step_avg:43.35ms +[2025-09-11 13:56:20] [Rank 0] step:5841/10000 train_time:253047ms step_avg:43.32ms +[2025-09-11 13:56:20] [Rank 0] step:5841/10000 train_time:253047ms step_avg:43.32ms +[2025-09-11 13:56:20] [Rank 0] step:5861/10000 train_time:253735ms step_avg:43.29ms +[2025-09-11 13:56:20] [Rank 0] step:5861/10000 train_time:253735ms step_avg:43.29ms +[2025-09-11 13:56:21] [Rank 0] step:5881/10000 train_time:254424ms step_avg:43.26ms +[2025-09-11 13:56:21] [Rank 0] step:5881/10000 train_time:254424ms step_avg:43.26ms +[2025-09-11 13:56:22] [Rank 0] step:5901/10000 train_time:255111ms step_avg:43.23ms +[2025-09-11 13:56:22] [Rank 0] step:5901/10000 train_time:255111ms step_avg:43.23ms +[2025-09-11 13:56:23] [Rank 0] step:5921/10000 train_time:255802ms step_avg:43.20ms +[2025-09-11 13:56:23] [Rank 0] step:5921/10000 train_time:255802ms step_avg:43.20ms +[2025-09-11 13:56:23] [Rank 0] step:5941/10000 train_time:256492ms step_avg:43.17ms +[2025-09-11 13:56:23] [Rank 0] step:5941/10000 train_time:256492ms step_avg:43.17ms +[2025-09-11 13:56:24] [Rank 0] step:5961/10000 train_time:257182ms step_avg:43.14ms +[2025-09-11 13:56:24] [Rank 0] step:5961/10000 train_time:257182ms step_avg:43.14ms +[2025-09-11 13:56:25] [Rank 0] step:5981/10000 train_time:257872ms step_avg:43.12ms +[2025-09-11 13:56:25] [Rank 0] step:5981/10000 train_time:257872ms step_avg:43.12ms +[2025-09-11 13:56:25] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:56:25] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:56:35] [Rank 0] PRINT: step:6000/10000 val_loss:5.5910 total_sharp:6.7280e-02 L1_sharp:1.7258e-02 L2_sharp:1.9175e-02 L3_sharp:1.9327e-02 L4_sharp:2.3877e-02 L5_sharp:3.3326e-02 L6_sharp:5.4473e-02 L7_sharp:1.0081e-01 L8_sharp:1.5867e-01 L9_sharp:2.1864e-01 L10_sharp:3.4663e-01 L11_sharp:4.9272e-01 L12_sharp:9.1499e-01 total_fnorm:1.4922e+00 total_l1_linf:1.6080e+03 total_spectral:7.6562e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0889e-01 L1_l1linf:2.9297e-02 L2_l1linf:3.0029e-02 L3_l1linf:2.9907e-02 L4_l1linf:2.9663e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8564e-02 L9_l1linf:2.8687e-02 L10_l1linf:2.8809e-02 L11_l1linf:3.0640e-02 L12_l1linf:2.2949e-02 L1_spectral:1.6291e-03 L2_spectral:1.6272e-03 L3_spectral:1.6285e-03 L4_spectral:1.6312e-03 L5_spectral:1.6269e-03 L6_spectral:1.6191e-03 L7_spectral:1.6251e-03 L8_spectral:1.6172e-03 L9_spectral:1.6155e-03 L10_spectral:1.6093e-03 L11_spectral:1.6140e-03 L12_spectral:1.6050e-03 train_time:258545ms step_avg:43.09ms +[2025-09-11 13:56:35] [Rank 0] PRINT: step:6000/10000 val_loss:5.5910 total_sharp:6.7280e-02 L1_sharp:1.7258e-02 L2_sharp:1.9175e-02 L3_sharp:1.9327e-02 L4_sharp:2.3877e-02 L5_sharp:3.3326e-02 L6_sharp:5.4473e-02 L7_sharp:1.0081e-01 L8_sharp:1.5867e-01 L9_sharp:2.1864e-01 L10_sharp:3.4663e-01 L11_sharp:4.9272e-01 L12_sharp:9.1499e-01 total_fnorm:1.4922e+00 total_l1_linf:1.6080e+03 total_spectral:7.6562e-01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.0889e-01 L1_l1linf:2.9297e-02 L2_l1linf:3.0029e-02 L3_l1linf:2.9907e-02 L4_l1linf:2.9663e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8564e-02 L9_l1linf:2.8687e-02 L10_l1linf:2.8809e-02 L11_l1linf:3.0640e-02 L12_l1linf:2.2949e-02 L1_spectral:1.6291e-03 L2_spectral:1.6272e-03 L3_spectral:1.6285e-03 L4_spectral:1.6312e-03 L5_spectral:1.6269e-03 L6_spectral:1.6191e-03 L7_spectral:1.6251e-03 L8_spectral:1.6172e-03 L9_spectral:1.6155e-03 L10_spectral:1.6093e-03 L11_spectral:1.6140e-03 L12_spectral:1.6050e-03 train_time:258545ms step_avg:43.09ms +[2025-09-11 13:56:37] [Rank 0] step:6001/10000 train_time:260002ms step_avg:43.33ms +[2025-09-11 13:56:37] [Rank 0] step:6001/10000 train_time:260002ms step_avg:43.33ms +[2025-09-11 13:56:38] [Rank 0] step:6021/10000 train_time:260708ms step_avg:43.30ms +[2025-09-11 13:56:38] [Rank 0] step:6021/10000 train_time:260708ms step_avg:43.30ms +[2025-09-11 13:56:38] [Rank 0] step:6041/10000 train_time:261401ms step_avg:43.27ms +[2025-09-11 13:56:38] [Rank 0] step:6041/10000 train_time:261401ms step_avg:43.27ms +[2025-09-11 13:56:39] [Rank 0] step:6061/10000 train_time:262092ms step_avg:43.24ms +[2025-09-11 13:56:39] [Rank 0] step:6061/10000 train_time:262092ms step_avg:43.24ms +[2025-09-11 13:56:40] [Rank 0] step:6081/10000 train_time:262784ms step_avg:43.21ms +[2025-09-11 13:56:40] [Rank 0] step:6081/10000 train_time:262784ms step_avg:43.21ms +[2025-09-11 13:56:40] [Rank 0] step:6101/10000 train_time:263475ms step_avg:43.19ms +[2025-09-11 13:56:40] [Rank 0] step:6101/10000 train_time:263475ms step_avg:43.19ms +[2025-09-11 13:56:41] [Rank 0] step:6121/10000 train_time:264166ms step_avg:43.16ms +[2025-09-11 13:56:41] [Rank 0] step:6121/10000 train_time:264166ms step_avg:43.16ms +[2025-09-11 13:56:42] [Rank 0] step:6141/10000 train_time:264858ms step_avg:43.13ms +[2025-09-11 13:56:42] [Rank 0] step:6141/10000 train_time:264858ms step_avg:43.13ms +[2025-09-11 13:56:42] [Rank 0] step:6161/10000 train_time:265548ms step_avg:43.10ms +[2025-09-11 13:56:42] [Rank 0] step:6161/10000 train_time:265548ms step_avg:43.10ms +[2025-09-11 13:56:43] [Rank 0] step:6181/10000 train_time:266238ms step_avg:43.07ms +[2025-09-11 13:56:43] [Rank 0] step:6181/10000 train_time:266238ms step_avg:43.07ms +[2025-09-11 13:56:44] [Rank 0] step:6201/10000 train_time:266930ms step_avg:43.05ms +[2025-09-11 13:56:44] [Rank 0] step:6201/10000 train_time:266930ms step_avg:43.05ms +[2025-09-11 13:56:44] [Rank 0] step:6221/10000 train_time:267622ms step_avg:43.02ms +[2025-09-11 13:56:44] [Rank 0] step:6221/10000 train_time:267622ms step_avg:43.02ms +[2025-09-11 13:56:45] [Rank 0] step:6241/10000 train_time:268313ms step_avg:42.99ms +[2025-09-11 13:56:45] [Rank 0] step:6241/10000 train_time:268313ms step_avg:42.99ms +[2025-09-11 13:56:46] [Rank 0] step:6261/10000 train_time:269002ms step_avg:42.96ms +[2025-09-11 13:56:46] [Rank 0] step:6261/10000 train_time:269002ms step_avg:42.96ms +[2025-09-11 13:56:47] [Rank 0] step:6281/10000 train_time:269693ms step_avg:42.94ms +[2025-09-11 13:56:47] [Rank 0] step:6281/10000 train_time:269693ms step_avg:42.94ms +[2025-09-11 13:56:47] [Rank 0] step:6301/10000 train_time:270382ms step_avg:42.91ms +[2025-09-11 13:56:47] [Rank 0] step:6301/10000 train_time:270382ms step_avg:42.91ms +[2025-09-11 13:56:48] [Rank 0] step:6321/10000 train_time:271076ms step_avg:42.89ms +[2025-09-11 13:56:48] [Rank 0] step:6321/10000 train_time:271076ms step_avg:42.89ms +[2025-09-11 13:56:49] [Rank 0] step:6341/10000 train_time:271769ms step_avg:42.86ms +[2025-09-11 13:56:49] [Rank 0] step:6341/10000 train_time:271769ms step_avg:42.86ms +[2025-09-11 13:56:49] [Rank 0] step:6361/10000 train_time:272460ms step_avg:42.83ms +[2025-09-11 13:56:49] [Rank 0] step:6361/10000 train_time:272460ms step_avg:42.83ms +[2025-09-11 13:56:50] [Rank 0] step:6381/10000 train_time:273151ms step_avg:42.81ms +[2025-09-11 13:56:50] [Rank 0] step:6381/10000 train_time:273151ms step_avg:42.81ms +[2025-09-11 13:56:51] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:56:51] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:01] [Rank 0] PRINT: step:6400/10000 val_loss:5.5627 total_sharp:7.2534e-02 L1_sharp:2.3931e-02 L2_sharp:1.8489e-02 L3_sharp:1.9226e-02 L4_sharp:2.2611e-02 L5_sharp:3.4410e-02 L6_sharp:5.4678e-02 L7_sharp:9.4815e-02 L8_sharp:1.5156e-01 L9_sharp:2.4723e-01 L10_sharp:4.0480e-01 L11_sharp:5.1109e-01 L12_sharp:8.3782e-01 total_fnorm:1.3125e+00 total_l1_linf:1.3280e+03 total_spectral:6.6406e-01 L1_fnorm:1.0107e-01 L2_fnorm:1.0156e-01 L3_fnorm:1.0205e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0059e-01 L7_fnorm:1.0059e-01 L8_fnorm:1.0010e-01 L9_fnorm:1.0059e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0107e-01 L12_fnorm:9.4238e-02 L1_l1linf:2.5024e-02 L2_l1linf:2.5635e-02 L3_l1linf:2.5391e-02 L4_l1linf:2.5391e-02 L5_l1linf:2.5513e-02 L6_l1linf:2.4780e-02 L7_l1linf:2.4902e-02 L8_l1linf:2.4170e-02 L9_l1linf:2.4658e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.6611e-02 L12_l1linf:1.9287e-02 L1_spectral:1.4399e-03 L2_spectral:1.4473e-03 L3_spectral:1.4474e-03 L4_spectral:1.4476e-03 L5_spectral:1.4370e-03 L6_spectral:1.4347e-03 L7_spectral:1.4452e-03 L8_spectral:1.4483e-03 L9_spectral:1.4523e-03 L10_spectral:1.4346e-03 L11_spectral:1.4433e-03 L12_spectral:1.4364e-03 train_time:273821ms step_avg:42.78ms +[2025-09-11 13:57:01] [Rank 0] PRINT: step:6400/10000 val_loss:5.5627 total_sharp:7.2534e-02 L1_sharp:2.3931e-02 L2_sharp:1.8489e-02 L3_sharp:1.9226e-02 L4_sharp:2.2611e-02 L5_sharp:3.4410e-02 L6_sharp:5.4678e-02 L7_sharp:9.4815e-02 L8_sharp:1.5156e-01 L9_sharp:2.4723e-01 L10_sharp:4.0480e-01 L11_sharp:5.1109e-01 L12_sharp:8.3782e-01 total_fnorm:1.3125e+00 total_l1_linf:1.3280e+03 total_spectral:6.6406e-01 L1_fnorm:1.0107e-01 L2_fnorm:1.0156e-01 L3_fnorm:1.0205e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0059e-01 L7_fnorm:1.0059e-01 L8_fnorm:1.0010e-01 L9_fnorm:1.0059e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0107e-01 L12_fnorm:9.4238e-02 L1_l1linf:2.5024e-02 L2_l1linf:2.5635e-02 L3_l1linf:2.5391e-02 L4_l1linf:2.5391e-02 L5_l1linf:2.5513e-02 L6_l1linf:2.4780e-02 L7_l1linf:2.4902e-02 L8_l1linf:2.4170e-02 L9_l1linf:2.4658e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.6611e-02 L12_l1linf:1.9287e-02 L1_spectral:1.4399e-03 L2_spectral:1.4473e-03 L3_spectral:1.4474e-03 L4_spectral:1.4476e-03 L5_spectral:1.4370e-03 L6_spectral:1.4347e-03 L7_spectral:1.4452e-03 L8_spectral:1.4483e-03 L9_spectral:1.4523e-03 L10_spectral:1.4346e-03 L11_spectral:1.4433e-03 L12_spectral:1.4364e-03 train_time:273821ms step_avg:42.78ms +[2025-09-11 13:57:03] [Rank 0] step:6401/10000 train_time:275280ms step_avg:43.01ms +[2025-09-11 13:57:03] [Rank 0] step:6401/10000 train_time:275280ms step_avg:43.01ms +[2025-09-11 13:57:03] [Rank 0] step:6421/10000 train_time:275981ms step_avg:42.98ms +[2025-09-11 13:57:03] [Rank 0] step:6421/10000 train_time:275981ms step_avg:42.98ms +[2025-09-11 13:57:04] [Rank 0] step:6441/10000 train_time:276672ms step_avg:42.95ms +[2025-09-11 13:57:04] [Rank 0] step:6441/10000 train_time:276672ms step_avg:42.95ms +[2025-09-11 13:57:05] [Rank 0] step:6461/10000 train_time:277365ms step_avg:42.93ms +[2025-09-11 13:57:05] [Rank 0] step:6461/10000 train_time:277365ms step_avg:42.93ms +[2025-09-11 13:57:05] [Rank 0] step:6481/10000 train_time:278059ms step_avg:42.90ms +[2025-09-11 13:57:05] [Rank 0] step:6481/10000 train_time:278059ms step_avg:42.90ms +[2025-09-11 13:57:06] [Rank 0] step:6501/10000 train_time:278754ms step_avg:42.88ms +[2025-09-11 13:57:06] [Rank 0] step:6501/10000 train_time:278754ms step_avg:42.88ms +[2025-09-11 13:57:07] [Rank 0] step:6521/10000 train_time:279446ms step_avg:42.85ms +[2025-09-11 13:57:07] [Rank 0] step:6521/10000 train_time:279446ms step_avg:42.85ms +[2025-09-11 13:57:07] [Rank 0] step:6541/10000 train_time:280137ms step_avg:42.83ms +[2025-09-11 13:57:07] [Rank 0] step:6541/10000 train_time:280137ms step_avg:42.83ms +[2025-09-11 13:57:08] [Rank 0] step:6561/10000 train_time:280828ms step_avg:42.80ms +[2025-09-11 13:57:08] [Rank 0] step:6561/10000 train_time:280828ms step_avg:42.80ms +[2025-09-11 13:57:09] [Rank 0] step:6581/10000 train_time:281520ms step_avg:42.78ms +[2025-09-11 13:57:09] [Rank 0] step:6581/10000 train_time:281520ms step_avg:42.78ms +[2025-09-11 13:57:09] [Rank 0] step:6601/10000 train_time:282212ms step_avg:42.75ms +[2025-09-11 13:57:09] [Rank 0] step:6601/10000 train_time:282212ms step_avg:42.75ms +[2025-09-11 13:57:10] [Rank 0] step:6621/10000 train_time:282901ms step_avg:42.73ms +[2025-09-11 13:57:10] [Rank 0] step:6621/10000 train_time:282901ms step_avg:42.73ms +[2025-09-11 13:57:11] [Rank 0] step:6641/10000 train_time:283594ms step_avg:42.70ms +[2025-09-11 13:57:11] [Rank 0] step:6641/10000 train_time:283594ms step_avg:42.70ms +[2025-09-11 13:57:12] [Rank 0] step:6661/10000 train_time:284285ms step_avg:42.68ms +[2025-09-11 13:57:12] [Rank 0] step:6661/10000 train_time:284285ms step_avg:42.68ms +[2025-09-11 13:57:12] [Rank 0] step:6681/10000 train_time:284983ms step_avg:42.66ms +[2025-09-11 13:57:12] [Rank 0] step:6681/10000 train_time:284983ms step_avg:42.66ms +[2025-09-11 13:57:13] [Rank 0] step:6701/10000 train_time:285681ms step_avg:42.63ms +[2025-09-11 13:57:13] [Rank 0] step:6701/10000 train_time:285681ms step_avg:42.63ms +[2025-09-11 13:57:14] [Rank 0] step:6721/10000 train_time:286378ms step_avg:42.61ms +[2025-09-11 13:57:14] [Rank 0] step:6721/10000 train_time:286378ms step_avg:42.61ms +[2025-09-11 13:57:14] [Rank 0] step:6741/10000 train_time:287076ms step_avg:42.59ms +[2025-09-11 13:57:14] [Rank 0] step:6741/10000 train_time:287076ms step_avg:42.59ms +[2025-09-11 13:57:15] [Rank 0] step:6761/10000 train_time:287772ms step_avg:42.56ms +[2025-09-11 13:57:15] [Rank 0] step:6761/10000 train_time:287772ms step_avg:42.56ms +[2025-09-11 13:57:16] [Rank 0] step:6781/10000 train_time:288470ms step_avg:42.54ms +[2025-09-11 13:57:16] [Rank 0] step:6781/10000 train_time:288470ms step_avg:42.54ms +[2025-09-11 13:57:16] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:57:16] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:27] [Rank 0] PRINT: step:6800/10000 val_loss:5.5374 total_sharp:6.3290e-02 L1_sharp:1.6839e-02 L2_sharp:1.7360e-02 L3_sharp:1.7653e-02 L4_sharp:2.1235e-02 L5_sharp:3.0100e-02 L6_sharp:5.1862e-02 L7_sharp:9.5193e-02 L8_sharp:1.3446e-01 L9_sharp:2.4112e-01 L10_sharp:3.6336e-01 L11_sharp:4.7528e-01 L12_sharp:7.6715e-01 total_fnorm:1.1406e+00 total_l1_linf:1.0960e+03 total_spectral:5.8984e-01 L1_fnorm:8.6426e-02 L2_fnorm:8.7402e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6426e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.5938e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.6914e-02 L12_fnorm:8.1055e-02 L1_l1linf:2.1118e-02 L2_l1linf:2.0874e-02 L3_l1linf:2.0996e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0874e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0508e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0386e-02 L10_l1linf:2.0386e-02 L11_l1linf:2.1973e-02 L12_l1linf:1.5991e-02 L1_spectral:1.2948e-03 L2_spectral:1.2978e-03 L3_spectral:1.2990e-03 L4_spectral:1.2995e-03 L5_spectral:1.2913e-03 L6_spectral:1.2997e-03 L7_spectral:1.2946e-03 L8_spectral:1.2889e-03 L9_spectral:1.2964e-03 L10_spectral:1.2922e-03 L11_spectral:1.2816e-03 L12_spectral:1.2425e-03 train_time:289147ms step_avg:42.52ms +[2025-09-11 13:57:27] [Rank 0] PRINT: step:6800/10000 val_loss:5.5374 total_sharp:6.3290e-02 L1_sharp:1.6839e-02 L2_sharp:1.7360e-02 L3_sharp:1.7653e-02 L4_sharp:2.1235e-02 L5_sharp:3.0100e-02 L6_sharp:5.1862e-02 L7_sharp:9.5193e-02 L8_sharp:1.3446e-01 L9_sharp:2.4112e-01 L10_sharp:3.6336e-01 L11_sharp:4.7528e-01 L12_sharp:7.6715e-01 total_fnorm:1.1406e+00 total_l1_linf:1.0960e+03 total_spectral:5.8984e-01 L1_fnorm:8.6426e-02 L2_fnorm:8.7402e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6426e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.5938e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.6914e-02 L12_fnorm:8.1055e-02 L1_l1linf:2.1118e-02 L2_l1linf:2.0874e-02 L3_l1linf:2.0996e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0874e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0508e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0386e-02 L10_l1linf:2.0386e-02 L11_l1linf:2.1973e-02 L12_l1linf:1.5991e-02 L1_spectral:1.2948e-03 L2_spectral:1.2978e-03 L3_spectral:1.2990e-03 L4_spectral:1.2995e-03 L5_spectral:1.2913e-03 L6_spectral:1.2997e-03 L7_spectral:1.2946e-03 L8_spectral:1.2889e-03 L9_spectral:1.2964e-03 L10_spectral:1.2922e-03 L11_spectral:1.2816e-03 L12_spectral:1.2425e-03 train_time:289147ms step_avg:42.52ms +[2025-09-11 13:57:28] [Rank 0] step:6801/10000 train_time:290618ms step_avg:42.73ms +[2025-09-11 13:57:28] [Rank 0] step:6801/10000 train_time:290618ms step_avg:42.73ms +[2025-09-11 13:57:29] [Rank 0] step:6821/10000 train_time:291340ms step_avg:42.71ms +[2025-09-11 13:57:29] [Rank 0] step:6821/10000 train_time:291340ms step_avg:42.71ms +[2025-09-11 13:57:30] [Rank 0] step:6841/10000 train_time:292042ms step_avg:42.69ms +[2025-09-11 13:57:30] [Rank 0] step:6841/10000 train_time:292042ms step_avg:42.69ms +[2025-09-11 13:57:31] [Rank 0] step:6861/10000 train_time:292741ms step_avg:42.67ms +[2025-09-11 13:57:31] [Rank 0] step:6861/10000 train_time:292741ms step_avg:42.67ms +[2025-09-11 13:57:31] [Rank 0] step:6881/10000 train_time:293443ms step_avg:42.65ms +[2025-09-11 13:57:31] [Rank 0] step:6881/10000 train_time:293443ms step_avg:42.65ms +[2025-09-11 13:57:32] [Rank 0] step:6901/10000 train_time:294142ms step_avg:42.62ms +[2025-09-11 13:57:32] [Rank 0] step:6901/10000 train_time:294142ms step_avg:42.62ms +[2025-09-11 13:57:33] [Rank 0] step:6921/10000 train_time:295150ms step_avg:42.65ms +[2025-09-11 13:57:33] [Rank 0] step:6921/10000 train_time:295150ms step_avg:42.65ms +[2025-09-11 13:57:34] [Rank 0] step:6941/10000 train_time:295852ms step_avg:42.62ms +[2025-09-11 13:57:34] [Rank 0] step:6941/10000 train_time:295852ms step_avg:42.62ms +[2025-09-11 13:57:34] [Rank 0] step:6961/10000 train_time:296553ms step_avg:42.60ms +[2025-09-11 13:57:34] [Rank 0] step:6961/10000 train_time:296553ms step_avg:42.60ms +[2025-09-11 13:57:35] [Rank 0] step:6981/10000 train_time:297550ms step_avg:42.62ms +[2025-09-11 13:57:35] [Rank 0] step:6981/10000 train_time:297550ms step_avg:42.62ms +[2025-09-11 13:57:36] [Rank 0] step:7001/10000 train_time:298249ms step_avg:42.60ms +[2025-09-11 13:57:36] [Rank 0] step:7001/10000 train_time:298249ms step_avg:42.60ms +[2025-09-11 13:57:37] [Rank 0] step:7021/10000 train_time:298948ms step_avg:42.58ms +[2025-09-11 13:57:37] [Rank 0] step:7021/10000 train_time:298948ms step_avg:42.58ms +[2025-09-11 13:57:37] [Rank 0] step:7041/10000 train_time:299645ms step_avg:42.56ms +[2025-09-11 13:57:37] [Rank 0] step:7041/10000 train_time:299645ms step_avg:42.56ms +[2025-09-11 13:57:38] [Rank 0] step:7061/10000 train_time:300345ms step_avg:42.54ms +[2025-09-11 13:57:38] [Rank 0] step:7061/10000 train_time:300345ms step_avg:42.54ms +[2025-09-11 13:57:39] [Rank 0] step:7081/10000 train_time:301042ms step_avg:42.51ms +[2025-09-11 13:57:39] [Rank 0] step:7081/10000 train_time:301042ms step_avg:42.51ms +[2025-09-11 13:57:40] [Rank 0] step:7101/10000 train_time:301740ms step_avg:42.49ms +[2025-09-11 13:57:40] [Rank 0] step:7101/10000 train_time:301740ms step_avg:42.49ms +[2025-09-11 13:57:40] [Rank 0] step:7121/10000 train_time:302441ms step_avg:42.47ms +[2025-09-11 13:57:40] [Rank 0] step:7121/10000 train_time:302441ms step_avg:42.47ms +[2025-09-11 13:57:41] [Rank 0] step:7141/10000 train_time:303140ms step_avg:42.45ms +[2025-09-11 13:57:41] [Rank 0] step:7141/10000 train_time:303140ms step_avg:42.45ms +[2025-09-11 13:57:42] [Rank 0] step:7161/10000 train_time:303838ms step_avg:42.43ms +[2025-09-11 13:57:42] [Rank 0] step:7161/10000 train_time:303838ms step_avg:42.43ms +[2025-09-11 13:57:42] [Rank 0] step:7181/10000 train_time:304536ms step_avg:42.41ms +[2025-09-11 13:57:42] [Rank 0] step:7181/10000 train_time:304536ms step_avg:42.41ms +[2025-09-11 13:57:43] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:57:43] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:57:53] [Rank 0] PRINT: step:7200/10000 val_loss:5.5173 total_sharp:6.2597e-02 L1_sharp:1.2799e-02 L2_sharp:1.1420e-02 L3_sharp:1.3710e-02 L4_sharp:1.7851e-02 L5_sharp:2.4304e-02 L6_sharp:3.6676e-02 L7_sharp:7.2158e-02 L8_sharp:1.1007e-01 L9_sharp:1.8304e-01 L10_sharp:3.3799e-01 L11_sharp:4.6766e-01 L12_sharp:9.6053e-01 total_fnorm:9.4141e-01 total_l1_linf:8.4000e+02 total_spectral:4.7656e-01 L1_fnorm:7.3730e-02 L2_fnorm:7.4219e-02 L3_fnorm:7.4707e-02 L4_fnorm:7.4219e-02 L5_fnorm:7.3242e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3242e-02 L8_fnorm:7.2754e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.3730e-02 L12_fnorm:6.7383e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7578e-02 L4_l1linf:1.7578e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.6846e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.6357e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.7700e-02 L12_l1linf:1.2329e-02 L1_spectral:1.1514e-03 L2_spectral:1.1549e-03 L3_spectral:1.1488e-03 L4_spectral:1.1522e-03 L5_spectral:1.1529e-03 L6_spectral:1.1533e-03 L7_spectral:1.1410e-03 L8_spectral:1.1449e-03 L9_spectral:1.1433e-03 L10_spectral:1.1297e-03 L11_spectral:1.1211e-03 L12_spectral:1.0594e-03 train_time:305215ms step_avg:42.39ms +[2025-09-11 13:57:53] [Rank 0] PRINT: step:7200/10000 val_loss:5.5173 total_sharp:6.2597e-02 L1_sharp:1.2799e-02 L2_sharp:1.1420e-02 L3_sharp:1.3710e-02 L4_sharp:1.7851e-02 L5_sharp:2.4304e-02 L6_sharp:3.6676e-02 L7_sharp:7.2158e-02 L8_sharp:1.1007e-01 L9_sharp:1.8304e-01 L10_sharp:3.3799e-01 L11_sharp:4.6766e-01 L12_sharp:9.6053e-01 total_fnorm:9.4141e-01 total_l1_linf:8.4000e+02 total_spectral:4.7656e-01 L1_fnorm:7.3730e-02 L2_fnorm:7.4219e-02 L3_fnorm:7.4707e-02 L4_fnorm:7.4219e-02 L5_fnorm:7.3242e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3242e-02 L8_fnorm:7.2754e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.3730e-02 L12_fnorm:6.7383e-02 L1_l1linf:1.6846e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7578e-02 L4_l1linf:1.7578e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.6846e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.6357e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.7700e-02 L12_l1linf:1.2329e-02 L1_spectral:1.1514e-03 L2_spectral:1.1549e-03 L3_spectral:1.1488e-03 L4_spectral:1.1522e-03 L5_spectral:1.1529e-03 L6_spectral:1.1533e-03 L7_spectral:1.1410e-03 L8_spectral:1.1449e-03 L9_spectral:1.1433e-03 L10_spectral:1.1297e-03 L11_spectral:1.1211e-03 L12_spectral:1.0594e-03 train_time:305215ms step_avg:42.39ms +[2025-09-11 13:57:55] [Rank 0] step:7201/10000 train_time:306847ms step_avg:42.61ms +[2025-09-11 13:57:55] [Rank 0] step:7201/10000 train_time:306847ms step_avg:42.61ms +[2025-09-11 13:57:56] [Rank 0] step:7221/10000 train_time:307556ms step_avg:42.59ms +[2025-09-11 13:57:56] [Rank 0] step:7221/10000 train_time:307556ms step_avg:42.59ms +[2025-09-11 13:57:57] [Rank 0] step:7241/10000 train_time:308257ms step_avg:42.57ms +[2025-09-11 13:57:57] [Rank 0] step:7241/10000 train_time:308257ms step_avg:42.57ms +[2025-09-11 13:57:57] [Rank 0] step:7261/10000 train_time:308959ms step_avg:42.55ms +[2025-09-11 13:57:57] [Rank 0] step:7261/10000 train_time:308959ms step_avg:42.55ms +[2025-09-11 13:57:58] [Rank 0] step:7281/10000 train_time:309668ms step_avg:42.53ms +[2025-09-11 13:57:58] [Rank 0] step:7281/10000 train_time:309668ms step_avg:42.53ms +[2025-09-11 13:57:59] [Rank 0] step:7301/10000 train_time:310368ms step_avg:42.51ms +[2025-09-11 13:57:59] [Rank 0] step:7301/10000 train_time:310368ms step_avg:42.51ms +[2025-09-11 13:57:59] [Rank 0] step:7321/10000 train_time:311069ms step_avg:42.49ms +[2025-09-11 13:57:59] [Rank 0] step:7321/10000 train_time:311069ms step_avg:42.49ms +[2025-09-11 13:58:00] [Rank 0] step:7341/10000 train_time:311770ms step_avg:42.47ms +[2025-09-11 13:58:00] [Rank 0] step:7341/10000 train_time:311770ms step_avg:42.47ms +[2025-09-11 13:58:01] [Rank 0] step:7361/10000 train_time:312470ms step_avg:42.45ms +[2025-09-11 13:58:01] [Rank 0] step:7361/10000 train_time:312470ms step_avg:42.45ms +[2025-09-11 13:58:01] [Rank 0] step:7381/10000 train_time:313172ms step_avg:42.43ms +[2025-09-11 13:58:01] [Rank 0] step:7381/10000 train_time:313172ms step_avg:42.43ms +[2025-09-11 13:58:02] [Rank 0] step:7401/10000 train_time:313871ms step_avg:42.41ms +[2025-09-11 13:58:02] [Rank 0] step:7401/10000 train_time:313871ms step_avg:42.41ms +[2025-09-11 13:58:03] [Rank 0] step:7421/10000 train_time:314571ms step_avg:42.39ms +[2025-09-11 13:58:03] [Rank 0] step:7421/10000 train_time:314571ms step_avg:42.39ms +[2025-09-11 13:58:04] [Rank 0] step:7441/10000 train_time:315273ms step_avg:42.37ms +[2025-09-11 13:58:04] [Rank 0] step:7441/10000 train_time:315273ms step_avg:42.37ms +[2025-09-11 13:58:04] [Rank 0] step:7461/10000 train_time:315973ms step_avg:42.35ms +[2025-09-11 13:58:04] [Rank 0] step:7461/10000 train_time:315973ms step_avg:42.35ms +[2025-09-11 13:58:05] [Rank 0] step:7481/10000 train_time:316676ms step_avg:42.33ms +[2025-09-11 13:58:05] [Rank 0] step:7481/10000 train_time:316676ms step_avg:42.33ms +[2025-09-11 13:58:06] [Rank 0] step:7501/10000 train_time:317376ms step_avg:42.31ms +[2025-09-11 13:58:06] [Rank 0] step:7501/10000 train_time:317376ms step_avg:42.31ms +[2025-09-11 13:58:06] [Rank 0] step:7521/10000 train_time:318078ms step_avg:42.29ms +[2025-09-11 13:58:06] [Rank 0] step:7521/10000 train_time:318078ms step_avg:42.29ms +[2025-09-11 13:58:07] [Rank 0] step:7541/10000 train_time:318776ms step_avg:42.27ms +[2025-09-11 13:58:07] [Rank 0] step:7541/10000 train_time:318776ms step_avg:42.27ms +[2025-09-11 13:58:08] [Rank 0] step:7561/10000 train_time:319479ms step_avg:42.25ms +[2025-09-11 13:58:08] [Rank 0] step:7561/10000 train_time:319479ms step_avg:42.25ms +[2025-09-11 13:58:08] [Rank 0] step:7581/10000 train_time:320181ms step_avg:42.23ms +[2025-09-11 13:58:08] [Rank 0] step:7581/10000 train_time:320181ms step_avg:42.23ms +[2025-09-11 13:58:09] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:58:09] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:58:20] [Rank 0] PRINT: step:7600/10000 val_loss:5.5035 total_sharp:8.2586e-02 L1_sharp:1.2308e-02 L2_sharp:1.4464e-02 L3_sharp:1.4412e-02 L4_sharp:1.3924e-02 L5_sharp:2.3155e-02 L6_sharp:4.0055e-02 L7_sharp:8.5298e-02 L8_sharp:1.2322e-01 L9_sharp:2.1701e-01 L10_sharp:3.7251e-01 L11_sharp:5.2390e-01 L12_sharp:1.2663e+00 total_fnorm:6.8750e-01 total_l1_linf:6.1600e+02 total_spectral:3.5742e-01 L1_fnorm:6.0791e-02 L2_fnorm:6.1279e-02 L3_fnorm:6.1523e-02 L4_fnorm:6.1279e-02 L5_fnorm:6.0791e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0303e-02 L8_fnorm:6.0303e-02 L9_fnorm:6.0303e-02 L10_fnorm:5.9814e-02 L11_fnorm:6.0303e-02 L12_fnorm:5.4688e-02 L1_l1linf:1.3550e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.3733e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.3000e-02 L8_l1linf:1.3062e-02 L9_l1linf:1.3428e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3855e-02 L12_l1linf:9.8877e-03 L1_spectral:9.9906e-04 L2_spectral:1.0061e-03 L3_spectral:1.0098e-03 L4_spectral:1.0034e-03 L5_spectral:9.9738e-04 L6_spectral:9.9420e-04 L7_spectral:9.9121e-04 L8_spectral:9.9752e-04 L9_spectral:9.9216e-04 L10_spectral:9.8120e-04 L11_spectral:9.5474e-04 L12_spectral:8.8356e-04 train_time:320862ms step_avg:42.22ms +[2025-09-11 13:58:20] [Rank 0] PRINT: step:7600/10000 val_loss:5.5035 total_sharp:8.2586e-02 L1_sharp:1.2308e-02 L2_sharp:1.4464e-02 L3_sharp:1.4412e-02 L4_sharp:1.3924e-02 L5_sharp:2.3155e-02 L6_sharp:4.0055e-02 L7_sharp:8.5298e-02 L8_sharp:1.2322e-01 L9_sharp:2.1701e-01 L10_sharp:3.7251e-01 L11_sharp:5.2390e-01 L12_sharp:1.2663e+00 total_fnorm:6.8750e-01 total_l1_linf:6.1600e+02 total_spectral:3.5742e-01 L1_fnorm:6.0791e-02 L2_fnorm:6.1279e-02 L3_fnorm:6.1523e-02 L4_fnorm:6.1279e-02 L5_fnorm:6.0791e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0303e-02 L8_fnorm:6.0303e-02 L9_fnorm:6.0303e-02 L10_fnorm:5.9814e-02 L11_fnorm:6.0303e-02 L12_fnorm:5.4688e-02 L1_l1linf:1.3550e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.3733e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.3000e-02 L8_l1linf:1.3062e-02 L9_l1linf:1.3428e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3855e-02 L12_l1linf:9.8877e-03 L1_spectral:9.9906e-04 L2_spectral:1.0061e-03 L3_spectral:1.0098e-03 L4_spectral:1.0034e-03 L5_spectral:9.9738e-04 L6_spectral:9.9420e-04 L7_spectral:9.9121e-04 L8_spectral:9.9752e-04 L9_spectral:9.9216e-04 L10_spectral:9.8120e-04 L11_spectral:9.5474e-04 L12_spectral:8.8356e-04 train_time:320862ms step_avg:42.22ms +[2025-09-11 13:58:22] [Rank 0] step:7601/10000 train_time:322495ms step_avg:42.43ms +[2025-09-11 13:58:22] [Rank 0] step:7601/10000 train_time:322495ms step_avg:42.43ms +[2025-09-11 13:58:22] [Rank 0] step:7621/10000 train_time:323222ms step_avg:42.41ms +[2025-09-11 13:58:22] [Rank 0] step:7621/10000 train_time:323222ms step_avg:42.41ms +[2025-09-11 13:58:23] [Rank 0] step:7641/10000 train_time:323924ms step_avg:42.39ms +[2025-09-11 13:58:23] [Rank 0] step:7641/10000 train_time:323924ms step_avg:42.39ms +[2025-09-11 13:58:24] [Rank 0] step:7661/10000 train_time:324624ms step_avg:42.37ms +[2025-09-11 13:58:24] [Rank 0] step:7661/10000 train_time:324624ms step_avg:42.37ms +[2025-09-11 13:58:24] [Rank 0] step:7681/10000 train_time:325325ms step_avg:42.35ms +[2025-09-11 13:58:24] [Rank 0] step:7681/10000 train_time:325325ms step_avg:42.35ms +[2025-09-11 13:58:25] [Rank 0] step:7701/10000 train_time:326027ms step_avg:42.34ms +[2025-09-11 13:58:25] [Rank 0] step:7701/10000 train_time:326027ms step_avg:42.34ms +[2025-09-11 13:58:26] [Rank 0] step:7721/10000 train_time:326729ms step_avg:42.32ms +[2025-09-11 13:58:26] [Rank 0] step:7721/10000 train_time:326729ms step_avg:42.32ms +[2025-09-11 13:58:27] [Rank 0] step:7741/10000 train_time:327432ms step_avg:42.30ms +[2025-09-11 13:58:27] [Rank 0] step:7741/10000 train_time:327432ms step_avg:42.30ms +[2025-09-11 13:58:27] [Rank 0] step:7761/10000 train_time:328132ms step_avg:42.28ms +[2025-09-11 13:58:27] [Rank 0] step:7761/10000 train_time:328132ms step_avg:42.28ms +[2025-09-11 13:58:28] [Rank 0] step:7781/10000 train_time:328836ms step_avg:42.26ms +[2025-09-11 13:58:28] [Rank 0] step:7781/10000 train_time:328836ms step_avg:42.26ms +[2025-09-11 13:58:29] [Rank 0] step:7801/10000 train_time:329536ms step_avg:42.24ms +[2025-09-11 13:58:29] [Rank 0] step:7801/10000 train_time:329536ms step_avg:42.24ms +[2025-09-11 13:58:29] [Rank 0] step:7821/10000 train_time:330237ms step_avg:42.22ms +[2025-09-11 13:58:29] [Rank 0] step:7821/10000 train_time:330237ms step_avg:42.22ms +[2025-09-11 13:58:30] [Rank 0] step:7841/10000 train_time:330939ms step_avg:42.21ms +[2025-09-11 13:58:30] [Rank 0] step:7841/10000 train_time:330939ms step_avg:42.21ms +[2025-09-11 13:58:31] [Rank 0] step:7861/10000 train_time:331644ms step_avg:42.19ms +[2025-09-11 13:58:31] [Rank 0] step:7861/10000 train_time:331644ms step_avg:42.19ms +[2025-09-11 13:58:32] [Rank 0] step:7881/10000 train_time:332345ms step_avg:42.17ms +[2025-09-11 13:58:32] [Rank 0] step:7881/10000 train_time:332345ms step_avg:42.17ms +[2025-09-11 13:58:32] [Rank 0] step:7901/10000 train_time:333048ms step_avg:42.15ms +[2025-09-11 13:58:32] [Rank 0] step:7901/10000 train_time:333048ms step_avg:42.15ms +[2025-09-11 13:58:33] [Rank 0] step:7921/10000 train_time:333750ms step_avg:42.13ms +[2025-09-11 13:58:33] [Rank 0] step:7921/10000 train_time:333750ms step_avg:42.13ms +[2025-09-11 13:58:34] [Rank 0] step:7941/10000 train_time:334453ms step_avg:42.12ms +[2025-09-11 13:58:34] [Rank 0] step:7941/10000 train_time:334453ms step_avg:42.12ms +[2025-09-11 13:58:34] [Rank 0] step:7961/10000 train_time:335153ms step_avg:42.10ms +[2025-09-11 13:58:34] [Rank 0] step:7961/10000 train_time:335153ms step_avg:42.10ms +[2025-09-11 13:58:35] [Rank 0] step:7981/10000 train_time:335858ms step_avg:42.08ms +[2025-09-11 13:58:35] [Rank 0] step:7981/10000 train_time:335858ms step_avg:42.08ms +[2025-09-11 13:58:36] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:58:36] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:58:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:58:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:58:46] [Rank 0] PRINT: step:8000/10000 val_loss:5.4932 total_sharp:7.4858e-02 L1_sharp:1.2173e-02 L2_sharp:1.2288e-02 L3_sharp:8.2724e-03 L4_sharp:1.6698e-02 L5_sharp:2.1476e-02 L6_sharp:4.2311e-02 L7_sharp:9.1832e-02 L8_sharp:1.4485e-01 L9_sharp:2.3795e-01 L10_sharp:3.5442e-01 L11_sharp:5.0243e-01 L12_sharp:7.2168e-01 total_fnorm:5.7812e-01 total_l1_linf:4.6600e+02 total_spectral:2.9102e-01 L1_fnorm:4.8828e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.0254e-02 L2_l1linf:1.0437e-02 L3_l1linf:1.0254e-02 L4_l1linf:1.0498e-02 L5_l1linf:1.0132e-02 L6_l1linf:1.0132e-02 L7_l1linf:1.0132e-02 L8_l1linf:9.8267e-03 L9_l1linf:1.0071e-02 L10_l1linf:1.0193e-02 L11_l1linf:1.0620e-02 L12_l1linf:7.2937e-03 L1_spectral:8.3935e-04 L2_spectral:8.4012e-04 L3_spectral:8.5180e-04 L4_spectral:8.4485e-04 L5_spectral:8.4071e-04 L6_spectral:8.3652e-04 L7_spectral:8.3021e-04 L8_spectral:8.2494e-04 L9_spectral:8.2726e-04 L10_spectral:8.0063e-04 L11_spectral:7.9083e-04 L12_spectral:7.1543e-04 train_time:336699ms step_avg:42.09ms +[2025-09-11 13:58:46] [Rank 0] PRINT: step:8000/10000 val_loss:5.4932 total_sharp:7.4858e-02 L1_sharp:1.2173e-02 L2_sharp:1.2288e-02 L3_sharp:8.2724e-03 L4_sharp:1.6698e-02 L5_sharp:2.1476e-02 L6_sharp:4.2311e-02 L7_sharp:9.1832e-02 L8_sharp:1.4485e-01 L9_sharp:2.3795e-01 L10_sharp:3.5442e-01 L11_sharp:5.0243e-01 L12_sharp:7.2168e-01 total_fnorm:5.7812e-01 total_l1_linf:4.6600e+02 total_spectral:2.9102e-01 L1_fnorm:4.8828e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.0254e-02 L2_l1linf:1.0437e-02 L3_l1linf:1.0254e-02 L4_l1linf:1.0498e-02 L5_l1linf:1.0132e-02 L6_l1linf:1.0132e-02 L7_l1linf:1.0132e-02 L8_l1linf:9.8267e-03 L9_l1linf:1.0071e-02 L10_l1linf:1.0193e-02 L11_l1linf:1.0620e-02 L12_l1linf:7.2937e-03 L1_spectral:8.3935e-04 L2_spectral:8.4012e-04 L3_spectral:8.5180e-04 L4_spectral:8.4485e-04 L5_spectral:8.4071e-04 L6_spectral:8.3652e-04 L7_spectral:8.3021e-04 L8_spectral:8.2494e-04 L9_spectral:8.2726e-04 L10_spectral:8.0063e-04 L11_spectral:7.9083e-04 L12_spectral:7.1543e-04 train_time:336699ms step_avg:42.09ms +[2025-09-11 13:58:48] [Rank 0] step:8001/10000 train_time:338394ms step_avg:42.29ms +[2025-09-11 13:58:48] [Rank 0] step:8001/10000 train_time:338394ms step_avg:42.29ms +[2025-09-11 13:58:49] [Rank 0] step:8021/10000 train_time:339115ms step_avg:42.28ms +[2025-09-11 13:58:49] [Rank 0] step:8021/10000 train_time:339115ms step_avg:42.28ms +[2025-09-11 13:58:49] [Rank 0] step:8041/10000 train_time:339818ms step_avg:42.26ms +[2025-09-11 13:58:49] [Rank 0] step:8041/10000 train_time:339818ms step_avg:42.26ms +[2025-09-11 13:58:50] [Rank 0] step:8061/10000 train_time:340522ms step_avg:42.24ms +[2025-09-11 13:58:50] [Rank 0] step:8061/10000 train_time:340522ms step_avg:42.24ms +[2025-09-11 13:58:51] [Rank 0] step:8081/10000 train_time:341223ms step_avg:42.23ms +[2025-09-11 13:58:51] [Rank 0] step:8081/10000 train_time:341223ms step_avg:42.23ms +[2025-09-11 13:58:52] [Rank 0] step:8101/10000 train_time:341926ms step_avg:42.21ms +[2025-09-11 13:58:52] [Rank 0] step:8101/10000 train_time:341926ms step_avg:42.21ms +[2025-09-11 13:58:52] [Rank 0] step:8121/10000 train_time:342633ms step_avg:42.19ms +[2025-09-11 13:58:52] [Rank 0] step:8121/10000 train_time:342633ms step_avg:42.19ms +[2025-09-11 13:58:54] [Rank 0] step:8141/10000 train_time:344086ms step_avg:42.27ms +[2025-09-11 13:58:54] [Rank 0] step:8141/10000 train_time:344086ms step_avg:42.27ms +[2025-09-11 13:58:54] [Rank 0] step:8161/10000 train_time:344791ms step_avg:42.25ms +[2025-09-11 13:58:54] [Rank 0] step:8161/10000 train_time:344791ms step_avg:42.25ms +[2025-09-11 13:58:55] [Rank 0] step:8181/10000 train_time:345505ms step_avg:42.23ms +[2025-09-11 13:58:55] [Rank 0] step:8181/10000 train_time:345505ms step_avg:42.23ms +[2025-09-11 13:58:56] [Rank 0] step:8201/10000 train_time:346214ms step_avg:42.22ms +[2025-09-11 13:58:56] [Rank 0] step:8201/10000 train_time:346214ms step_avg:42.22ms +[2025-09-11 13:58:57] [Rank 0] step:8221/10000 train_time:346923ms step_avg:42.20ms +[2025-09-11 13:58:57] [Rank 0] step:8221/10000 train_time:346923ms step_avg:42.20ms +[2025-09-11 13:58:57] [Rank 0] step:8241/10000 train_time:347641ms step_avg:42.18ms +[2025-09-11 13:58:57] [Rank 0] step:8241/10000 train_time:347641ms step_avg:42.18ms +[2025-09-11 13:58:58] [Rank 0] step:8261/10000 train_time:348349ms step_avg:42.17ms +[2025-09-11 13:58:58] [Rank 0] step:8261/10000 train_time:348349ms step_avg:42.17ms +[2025-09-11 13:58:59] [Rank 0] step:8281/10000 train_time:349055ms step_avg:42.15ms +[2025-09-11 13:58:59] [Rank 0] step:8281/10000 train_time:349055ms step_avg:42.15ms +[2025-09-11 13:58:59] [Rank 0] step:8301/10000 train_time:349764ms step_avg:42.14ms +[2025-09-11 13:58:59] [Rank 0] step:8301/10000 train_time:349764ms step_avg:42.14ms +[2025-09-11 13:59:00] [Rank 0] step:8321/10000 train_time:350471ms step_avg:42.12ms +[2025-09-11 13:59:00] [Rank 0] step:8321/10000 train_time:350471ms step_avg:42.12ms +[2025-09-11 13:59:01] [Rank 0] step:8341/10000 train_time:351186ms step_avg:42.10ms +[2025-09-11 13:59:01] [Rank 0] step:8341/10000 train_time:351186ms step_avg:42.10ms +[2025-09-11 13:59:02] [Rank 0] step:8361/10000 train_time:351891ms step_avg:42.09ms +[2025-09-11 13:59:02] [Rank 0] step:8361/10000 train_time:351891ms step_avg:42.09ms +[2025-09-11 13:59:02] [Rank 0] step:8381/10000 train_time:352603ms step_avg:42.07ms +[2025-09-11 13:59:02] [Rank 0] step:8381/10000 train_time:352603ms step_avg:42.07ms +[2025-09-11 13:59:03] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:59:03] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:59:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:59:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:59:16] [Rank 0] PRINT: step:8400/10000 val_loss:5.4829 total_sharp:6.4289e-02 L1_sharp:9.1274e-03 L2_sharp:1.0139e-02 L3_sharp:8.9501e-03 L4_sharp:1.2838e-02 L5_sharp:1.9608e-02 L6_sharp:3.5046e-02 L7_sharp:6.9999e-02 L8_sharp:9.2825e-02 L9_sharp:1.7134e-01 L10_sharp:2.6700e-01 L11_sharp:3.7977e-01 L12_sharp:8.6329e-01 total_fnorm:4.3555e-01 total_l1_linf:3.1800e+02 total_spectral:2.1973e-01 L1_fnorm:3.8086e-02 L2_fnorm:3.8574e-02 L3_fnorm:3.8574e-02 L4_fnorm:3.8574e-02 L5_fnorm:3.8086e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7598e-02 L8_fnorm:3.7598e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7354e-02 L12_fnorm:3.3691e-02 L1_l1linf:7.5989e-03 L2_l1linf:7.8735e-03 L3_l1linf:7.4768e-03 L4_l1linf:7.4463e-03 L5_l1linf:7.4158e-03 L6_l1linf:7.3547e-03 L7_l1linf:7.1106e-03 L8_l1linf:6.9580e-03 L9_l1linf:6.9580e-03 L10_l1linf:7.1411e-03 L11_l1linf:7.6904e-03 L12_l1linf:5.2185e-03 L1_spectral:6.7613e-04 L2_spectral:6.8064e-04 L3_spectral:6.7940e-04 L4_spectral:6.8013e-04 L5_spectral:6.6865e-04 L6_spectral:6.5981e-04 L7_spectral:6.5834e-04 L8_spectral:6.5503e-04 L9_spectral:6.5691e-04 L10_spectral:6.4239e-04 L11_spectral:6.2673e-04 L12_spectral:5.6742e-04 train_time:353295ms step_avg:42.06ms +[2025-09-11 13:59:16] [Rank 0] PRINT: step:8400/10000 val_loss:5.4829 total_sharp:6.4289e-02 L1_sharp:9.1274e-03 L2_sharp:1.0139e-02 L3_sharp:8.9501e-03 L4_sharp:1.2838e-02 L5_sharp:1.9608e-02 L6_sharp:3.5046e-02 L7_sharp:6.9999e-02 L8_sharp:9.2825e-02 L9_sharp:1.7134e-01 L10_sharp:2.6700e-01 L11_sharp:3.7977e-01 L12_sharp:8.6329e-01 total_fnorm:4.3555e-01 total_l1_linf:3.1800e+02 total_spectral:2.1973e-01 L1_fnorm:3.8086e-02 L2_fnorm:3.8574e-02 L3_fnorm:3.8574e-02 L4_fnorm:3.8574e-02 L5_fnorm:3.8086e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7598e-02 L8_fnorm:3.7598e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7354e-02 L12_fnorm:3.3691e-02 L1_l1linf:7.5989e-03 L2_l1linf:7.8735e-03 L3_l1linf:7.4768e-03 L4_l1linf:7.4463e-03 L5_l1linf:7.4158e-03 L6_l1linf:7.3547e-03 L7_l1linf:7.1106e-03 L8_l1linf:6.9580e-03 L9_l1linf:6.9580e-03 L10_l1linf:7.1411e-03 L11_l1linf:7.6904e-03 L12_l1linf:5.2185e-03 L1_spectral:6.7613e-04 L2_spectral:6.8064e-04 L3_spectral:6.7940e-04 L4_spectral:6.8013e-04 L5_spectral:6.6865e-04 L6_spectral:6.5981e-04 L7_spectral:6.5834e-04 L8_spectral:6.5503e-04 L9_spectral:6.5691e-04 L10_spectral:6.4239e-04 L11_spectral:6.2673e-04 L12_spectral:5.6742e-04 train_time:353295ms step_avg:42.06ms +[2025-09-11 13:59:18] [Rank 0] step:8401/10000 train_time:354960ms step_avg:42.25ms +[2025-09-11 13:59:18] [Rank 0] step:8401/10000 train_time:354960ms step_avg:42.25ms +[2025-09-11 13:59:19] [Rank 0] step:8421/10000 train_time:355683ms step_avg:42.24ms +[2025-09-11 13:59:19] [Rank 0] step:8421/10000 train_time:355683ms step_avg:42.24ms +[2025-09-11 13:59:19] [Rank 0] step:8441/10000 train_time:356393ms step_avg:42.22ms +[2025-09-11 13:59:19] [Rank 0] step:8441/10000 train_time:356393ms step_avg:42.22ms +[2025-09-11 13:59:20] [Rank 0] step:8461/10000 train_time:357104ms step_avg:42.21ms +[2025-09-11 13:59:20] [Rank 0] step:8461/10000 train_time:357104ms step_avg:42.21ms +[2025-09-11 13:59:21] [Rank 0] step:8481/10000 train_time:357816ms step_avg:42.19ms +[2025-09-11 13:59:21] [Rank 0] step:8481/10000 train_time:357816ms step_avg:42.19ms +[2025-09-11 13:59:22] [Rank 0] step:8501/10000 train_time:358525ms step_avg:42.17ms +[2025-09-11 13:59:22] [Rank 0] step:8501/10000 train_time:358525ms step_avg:42.17ms +[2025-09-11 13:59:22] [Rank 0] step:8521/10000 train_time:359234ms step_avg:42.16ms +[2025-09-11 13:59:22] [Rank 0] step:8521/10000 train_time:359234ms step_avg:42.16ms +[2025-09-11 13:59:23] [Rank 0] step:8541/10000 train_time:359944ms step_avg:42.14ms +[2025-09-11 13:59:23] [Rank 0] step:8541/10000 train_time:359944ms step_avg:42.14ms +[2025-09-11 13:59:24] [Rank 0] step:8561/10000 train_time:360658ms step_avg:42.13ms +[2025-09-11 13:59:24] [Rank 0] step:8561/10000 train_time:360658ms step_avg:42.13ms +[2025-09-11 13:59:24] [Rank 0] step:8581/10000 train_time:361372ms step_avg:42.11ms +[2025-09-11 13:59:24] [Rank 0] step:8581/10000 train_time:361372ms step_avg:42.11ms +[2025-09-11 13:59:25] [Rank 0] step:8601/10000 train_time:362083ms step_avg:42.10ms +[2025-09-11 13:59:25] [Rank 0] step:8601/10000 train_time:362083ms step_avg:42.10ms +[2025-09-11 13:59:26] [Rank 0] step:8621/10000 train_time:362792ms step_avg:42.08ms +[2025-09-11 13:59:26] [Rank 0] step:8621/10000 train_time:362792ms step_avg:42.08ms +[2025-09-11 13:59:27] [Rank 0] step:8641/10000 train_time:363500ms step_avg:42.07ms +[2025-09-11 13:59:27] [Rank 0] step:8641/10000 train_time:363500ms step_avg:42.07ms +[2025-09-11 13:59:27] [Rank 0] step:8661/10000 train_time:364210ms step_avg:42.05ms +[2025-09-11 13:59:27] [Rank 0] step:8661/10000 train_time:364210ms step_avg:42.05ms +[2025-09-11 13:59:28] [Rank 0] step:8681/10000 train_time:364920ms step_avg:42.04ms +[2025-09-11 13:59:28] [Rank 0] step:8681/10000 train_time:364920ms step_avg:42.04ms +[2025-09-11 13:59:29] [Rank 0] step:8701/10000 train_time:365629ms step_avg:42.02ms +[2025-09-11 13:59:29] [Rank 0] step:8701/10000 train_time:365629ms step_avg:42.02ms +[2025-09-11 13:59:29] [Rank 0] step:8721/10000 train_time:366340ms step_avg:42.01ms +[2025-09-11 13:59:29] [Rank 0] step:8721/10000 train_time:366340ms step_avg:42.01ms +[2025-09-11 13:59:30] [Rank 0] step:8741/10000 train_time:367047ms step_avg:41.99ms +[2025-09-11 13:59:30] [Rank 0] step:8741/10000 train_time:367047ms step_avg:41.99ms +[2025-09-11 13:59:31] [Rank 0] step:8761/10000 train_time:367759ms step_avg:41.98ms +[2025-09-11 13:59:31] [Rank 0] step:8761/10000 train_time:367759ms step_avg:41.98ms +[2025-09-11 13:59:32] [Rank 0] step:8781/10000 train_time:368466ms step_avg:41.96ms +[2025-09-11 13:59:32] [Rank 0] step:8781/10000 train_time:368466ms step_avg:41.96ms +[2025-09-11 13:59:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:59:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:59:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:59:43] [Rank 0] PRINT: step:8800/10000 val_loss:5.4753 total_sharp:4.4005e-02 L1_sharp:7.0797e-03 L2_sharp:8.4640e-03 L3_sharp:6.0409e-03 L4_sharp:1.1910e-02 L5_sharp:1.5122e-02 L6_sharp:2.5165e-02 L7_sharp:4.7876e-02 L8_sharp:8.2136e-02 L9_sharp:1.4916e-01 L10_sharp:2.0167e-01 L11_sharp:3.2954e-01 L12_sharp:5.9064e-01 total_fnorm:3.1250e-01 total_l1_linf:2.0300e+02 total_spectral:1.5723e-01 L1_fnorm:2.7466e-02 L2_fnorm:2.7588e-02 L3_fnorm:2.7710e-02 L4_fnorm:2.7588e-02 L5_fnorm:2.7344e-02 L6_fnorm:2.7222e-02 L7_fnorm:2.6978e-02 L8_fnorm:2.6855e-02 L9_fnorm:2.6733e-02 L10_fnorm:2.6611e-02 L11_fnorm:2.6733e-02 L12_fnorm:2.4292e-02 L1_l1linf:4.8523e-03 L2_l1linf:4.8523e-03 L3_l1linf:5.0964e-03 L4_l1linf:4.9438e-03 L5_l1linf:4.9133e-03 L6_l1linf:4.7607e-03 L7_l1linf:4.6082e-03 L8_l1linf:4.5776e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.4861e-03 L11_l1linf:4.8218e-03 L12_l1linf:3.5706e-03 L1_spectral:5.0026e-04 L2_spectral:5.0173e-04 L3_spectral:4.9796e-04 L4_spectral:5.0117e-04 L5_spectral:4.9791e-04 L6_spectral:4.8889e-04 L7_spectral:4.8578e-04 L8_spectral:4.8138e-04 L9_spectral:4.8171e-04 L10_spectral:4.6858e-04 L11_spectral:4.5631e-04 L12_spectral:4.1263e-04 train_time:369154ms step_avg:41.95ms +[2025-09-11 13:59:43] [Rank 0] PRINT: step:8800/10000 val_loss:5.4753 total_sharp:4.4005e-02 L1_sharp:7.0797e-03 L2_sharp:8.4640e-03 L3_sharp:6.0409e-03 L4_sharp:1.1910e-02 L5_sharp:1.5122e-02 L6_sharp:2.5165e-02 L7_sharp:4.7876e-02 L8_sharp:8.2136e-02 L9_sharp:1.4916e-01 L10_sharp:2.0167e-01 L11_sharp:3.2954e-01 L12_sharp:5.9064e-01 total_fnorm:3.1250e-01 total_l1_linf:2.0300e+02 total_spectral:1.5723e-01 L1_fnorm:2.7466e-02 L2_fnorm:2.7588e-02 L3_fnorm:2.7710e-02 L4_fnorm:2.7588e-02 L5_fnorm:2.7344e-02 L6_fnorm:2.7222e-02 L7_fnorm:2.6978e-02 L8_fnorm:2.6855e-02 L9_fnorm:2.6733e-02 L10_fnorm:2.6611e-02 L11_fnorm:2.6733e-02 L12_fnorm:2.4292e-02 L1_l1linf:4.8523e-03 L2_l1linf:4.8523e-03 L3_l1linf:5.0964e-03 L4_l1linf:4.9438e-03 L5_l1linf:4.9133e-03 L6_l1linf:4.7607e-03 L7_l1linf:4.6082e-03 L8_l1linf:4.5776e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.4861e-03 L11_l1linf:4.8218e-03 L12_l1linf:3.5706e-03 L1_spectral:5.0026e-04 L2_spectral:5.0173e-04 L3_spectral:4.9796e-04 L4_spectral:5.0117e-04 L5_spectral:4.9791e-04 L6_spectral:4.8889e-04 L7_spectral:4.8578e-04 L8_spectral:4.8138e-04 L9_spectral:4.8171e-04 L10_spectral:4.6858e-04 L11_spectral:4.5631e-04 L12_spectral:4.1263e-04 train_time:369154ms step_avg:41.95ms +[2025-09-11 13:59:44] [Rank 0] step:8801/10000 train_time:370832ms step_avg:42.14ms +[2025-09-11 13:59:44] [Rank 0] step:8801/10000 train_time:370832ms step_avg:42.14ms +[2025-09-11 13:59:45] [Rank 0] step:8821/10000 train_time:371546ms step_avg:42.12ms +[2025-09-11 13:59:45] [Rank 0] step:8821/10000 train_time:371546ms step_avg:42.12ms +[2025-09-11 13:59:46] [Rank 0] step:8841/10000 train_time:372256ms step_avg:42.11ms +[2025-09-11 13:59:46] [Rank 0] step:8841/10000 train_time:372256ms step_avg:42.11ms +[2025-09-11 13:59:46] [Rank 0] step:8861/10000 train_time:372965ms step_avg:42.09ms +[2025-09-11 13:59:46] [Rank 0] step:8861/10000 train_time:372965ms step_avg:42.09ms +[2025-09-11 13:59:47] [Rank 0] step:8881/10000 train_time:373676ms step_avg:42.08ms +[2025-09-11 13:59:47] [Rank 0] step:8881/10000 train_time:373676ms step_avg:42.08ms +[2025-09-11 13:59:48] [Rank 0] step:8901/10000 train_time:374388ms step_avg:42.06ms +[2025-09-11 13:59:48] [Rank 0] step:8901/10000 train_time:374388ms step_avg:42.06ms +[2025-09-11 13:59:48] [Rank 0] step:8921/10000 train_time:375094ms step_avg:42.05ms +[2025-09-11 13:59:48] [Rank 0] step:8921/10000 train_time:375094ms step_avg:42.05ms +[2025-09-11 13:59:49] [Rank 0] step:8941/10000 train_time:375808ms step_avg:42.03ms +[2025-09-11 13:59:49] [Rank 0] step:8941/10000 train_time:375808ms step_avg:42.03ms +[2025-09-11 13:59:50] [Rank 0] step:8961/10000 train_time:376526ms step_avg:42.02ms +[2025-09-11 13:59:50] [Rank 0] step:8961/10000 train_time:376526ms step_avg:42.02ms +[2025-09-11 13:59:51] [Rank 0] step:8981/10000 train_time:377239ms step_avg:42.00ms +[2025-09-11 13:59:51] [Rank 0] step:8981/10000 train_time:377239ms step_avg:42.00ms +[2025-09-11 13:59:51] [Rank 0] step:9001/10000 train_time:377944ms step_avg:41.99ms +[2025-09-11 13:59:51] [Rank 0] step:9001/10000 train_time:377944ms step_avg:41.99ms +[2025-09-11 13:59:52] [Rank 0] step:9021/10000 train_time:378656ms step_avg:41.97ms +[2025-09-11 13:59:52] [Rank 0] step:9021/10000 train_time:378656ms step_avg:41.97ms +[2025-09-11 13:59:53] [Rank 0] step:9041/10000 train_time:379368ms step_avg:41.96ms +[2025-09-11 13:59:53] [Rank 0] step:9041/10000 train_time:379368ms step_avg:41.96ms +[2025-09-11 13:59:53] [Rank 0] step:9061/10000 train_time:380077ms step_avg:41.95ms +[2025-09-11 13:59:53] [Rank 0] step:9061/10000 train_time:380077ms step_avg:41.95ms +[2025-09-11 13:59:54] [Rank 0] step:9081/10000 train_time:380789ms step_avg:41.93ms +[2025-09-11 13:59:54] [Rank 0] step:9081/10000 train_time:380789ms step_avg:41.93ms +[2025-09-11 13:59:55] [Rank 0] step:9101/10000 train_time:381502ms step_avg:41.92ms +[2025-09-11 13:59:55] [Rank 0] step:9101/10000 train_time:381502ms step_avg:41.92ms +[2025-09-11 13:59:56] [Rank 0] step:9121/10000 train_time:382216ms step_avg:41.91ms +[2025-09-11 13:59:56] [Rank 0] step:9121/10000 train_time:382216ms step_avg:41.91ms +[2025-09-11 13:59:56] [Rank 0] step:9141/10000 train_time:382924ms step_avg:41.89ms +[2025-09-11 13:59:56] [Rank 0] step:9141/10000 train_time:382924ms step_avg:41.89ms +[2025-09-11 13:59:57] [Rank 0] step:9161/10000 train_time:383637ms step_avg:41.88ms +[2025-09-11 13:59:57] [Rank 0] step:9161/10000 train_time:383637ms step_avg:41.88ms +[2025-09-11 13:59:58] [Rank 0] step:9181/10000 train_time:384349ms step_avg:41.86ms +[2025-09-11 13:59:58] [Rank 0] step:9181/10000 train_time:384349ms step_avg:41.86ms +[2025-09-11 13:59:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:59:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:00:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:00:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:00:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.4690 total_sharp:5.2596e-02 L1_sharp:7.4614e-03 L2_sharp:8.3003e-03 L3_sharp:5.0933e-03 L4_sharp:9.1405e-03 L5_sharp:1.2205e-02 L6_sharp:3.5915e-02 L7_sharp:6.1287e-02 L8_sharp:8.9163e-02 L9_sharp:1.3321e-01 L10_sharp:1.9744e-01 L11_sharp:2.8461e-01 L12_sharp:6.4821e-01 total_fnorm:1.9141e-01 total_l1_linf:1.1300e+02 total_spectral:9.7168e-02 L1_fnorm:1.8066e-02 L2_fnorm:1.8311e-02 L3_fnorm:1.8311e-02 L4_fnorm:1.8188e-02 L5_fnorm:1.8066e-02 L6_fnorm:1.7944e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7700e-02 L12_fnorm:1.5747e-02 L1_l1linf:2.8534e-03 L2_l1linf:2.8534e-03 L3_l1linf:2.9907e-03 L4_l1linf:2.8381e-03 L5_l1linf:2.8076e-03 L6_l1linf:2.8076e-03 L7_l1linf:2.7771e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.7771e-03 L10_l1linf:2.7466e-03 L11_l1linf:3.1128e-03 L12_l1linf:1.9684e-03 L1_spectral:3.4183e-04 L2_spectral:3.4481e-04 L3_spectral:3.4728e-04 L4_spectral:3.3875e-04 L5_spectral:3.3388e-04 L6_spectral:3.3080e-04 L7_spectral:3.2978e-04 L8_spectral:3.2716e-04 L9_spectral:3.2889e-04 L10_spectral:3.1508e-04 L11_spectral:3.0481e-04 L12_spectral:2.7882e-04 train_time:385043ms step_avg:41.85ms +[2025-09-11 14:00:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.4690 total_sharp:5.2596e-02 L1_sharp:7.4614e-03 L2_sharp:8.3003e-03 L3_sharp:5.0933e-03 L4_sharp:9.1405e-03 L5_sharp:1.2205e-02 L6_sharp:3.5915e-02 L7_sharp:6.1287e-02 L8_sharp:8.9163e-02 L9_sharp:1.3321e-01 L10_sharp:1.9744e-01 L11_sharp:2.8461e-01 L12_sharp:6.4821e-01 total_fnorm:1.9141e-01 total_l1_linf:1.1300e+02 total_spectral:9.7168e-02 L1_fnorm:1.8066e-02 L2_fnorm:1.8311e-02 L3_fnorm:1.8311e-02 L4_fnorm:1.8188e-02 L5_fnorm:1.8066e-02 L6_fnorm:1.7944e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7700e-02 L12_fnorm:1.5747e-02 L1_l1linf:2.8534e-03 L2_l1linf:2.8534e-03 L3_l1linf:2.9907e-03 L4_l1linf:2.8381e-03 L5_l1linf:2.8076e-03 L6_l1linf:2.8076e-03 L7_l1linf:2.7771e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.7771e-03 L10_l1linf:2.7466e-03 L11_l1linf:3.1128e-03 L12_l1linf:1.9684e-03 L1_spectral:3.4183e-04 L2_spectral:3.4481e-04 L3_spectral:3.4728e-04 L4_spectral:3.3875e-04 L5_spectral:3.3388e-04 L6_spectral:3.3080e-04 L7_spectral:3.2978e-04 L8_spectral:3.2716e-04 L9_spectral:3.2889e-04 L10_spectral:3.1508e-04 L11_spectral:3.0481e-04 L12_spectral:2.7882e-04 train_time:385043ms step_avg:41.85ms +[2025-09-11 14:00:11] [Rank 0] step:9201/10000 train_time:386931ms step_avg:42.05ms +[2025-09-11 14:00:11] [Rank 0] step:9201/10000 train_time:386931ms step_avg:42.05ms +[2025-09-11 14:00:12] [Rank 0] step:9221/10000 train_time:387664ms step_avg:42.04ms +[2025-09-11 14:00:12] [Rank 0] step:9221/10000 train_time:387664ms step_avg:42.04ms +[2025-09-11 14:00:12] [Rank 0] step:9241/10000 train_time:388373ms step_avg:42.03ms +[2025-09-11 14:00:12] [Rank 0] step:9241/10000 train_time:388373ms step_avg:42.03ms +[2025-09-11 14:00:13] [Rank 0] step:9261/10000 train_time:389085ms step_avg:42.01ms +[2025-09-11 14:00:13] [Rank 0] step:9261/10000 train_time:389085ms step_avg:42.01ms +[2025-09-11 14:00:14] [Rank 0] step:9281/10000 train_time:389796ms step_avg:42.00ms +[2025-09-11 14:00:14] [Rank 0] step:9281/10000 train_time:389796ms step_avg:42.00ms +[2025-09-11 14:00:14] [Rank 0] step:9301/10000 train_time:390504ms step_avg:41.99ms +[2025-09-11 14:00:14] [Rank 0] step:9301/10000 train_time:390504ms step_avg:41.99ms +[2025-09-11 14:00:15] [Rank 0] step:9321/10000 train_time:391216ms step_avg:41.97ms +[2025-09-11 14:00:15] [Rank 0] step:9321/10000 train_time:391216ms step_avg:41.97ms +[2025-09-11 14:00:16] [Rank 0] step:9341/10000 train_time:391925ms step_avg:41.96ms +[2025-09-11 14:00:16] [Rank 0] step:9341/10000 train_time:391925ms step_avg:41.96ms +[2025-09-11 14:00:17] [Rank 0] step:9361/10000 train_time:392631ms step_avg:41.94ms +[2025-09-11 14:00:17] [Rank 0] step:9361/10000 train_time:392631ms step_avg:41.94ms +[2025-09-11 14:00:17] [Rank 0] step:9381/10000 train_time:393341ms step_avg:41.93ms +[2025-09-11 14:00:17] [Rank 0] step:9381/10000 train_time:393341ms step_avg:41.93ms +[2025-09-11 14:00:18] [Rank 0] step:9401/10000 train_time:394053ms step_avg:41.92ms +[2025-09-11 14:00:18] [Rank 0] step:9401/10000 train_time:394053ms step_avg:41.92ms +[2025-09-11 14:00:19] [Rank 0] step:9421/10000 train_time:394765ms step_avg:41.90ms +[2025-09-11 14:00:19] [Rank 0] step:9421/10000 train_time:394765ms step_avg:41.90ms +[2025-09-11 14:00:19] [Rank 0] step:9441/10000 train_time:395478ms step_avg:41.89ms +[2025-09-11 14:00:19] [Rank 0] step:9441/10000 train_time:395478ms step_avg:41.89ms +[2025-09-11 14:00:20] [Rank 0] step:9461/10000 train_time:396188ms step_avg:41.88ms +[2025-09-11 14:00:20] [Rank 0] step:9461/10000 train_time:396188ms step_avg:41.88ms +[2025-09-11 14:00:21] [Rank 0] step:9481/10000 train_time:396900ms step_avg:41.86ms +[2025-09-11 14:00:21] [Rank 0] step:9481/10000 train_time:396900ms step_avg:41.86ms +[2025-09-11 14:00:22] [Rank 0] step:9501/10000 train_time:397612ms step_avg:41.85ms +[2025-09-11 14:00:22] [Rank 0] step:9501/10000 train_time:397612ms step_avg:41.85ms +[2025-09-11 14:00:22] [Rank 0] step:9521/10000 train_time:398326ms step_avg:41.84ms +[2025-09-11 14:00:22] [Rank 0] step:9521/10000 train_time:398326ms step_avg:41.84ms +[2025-09-11 14:00:23] [Rank 0] step:9541/10000 train_time:399034ms step_avg:41.82ms +[2025-09-11 14:00:23] [Rank 0] step:9541/10000 train_time:399034ms step_avg:41.82ms +[2025-09-11 14:00:24] [Rank 0] step:9561/10000 train_time:399746ms step_avg:41.81ms +[2025-09-11 14:00:24] [Rank 0] step:9561/10000 train_time:399746ms step_avg:41.81ms +[2025-09-11 14:00:24] [Rank 0] step:9581/10000 train_time:400458ms step_avg:41.80ms +[2025-09-11 14:00:24] [Rank 0] step:9581/10000 train_time:400458ms step_avg:41.80ms +[2025-09-11 14:00:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:00:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:00:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:00:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:00:36] [Rank 0] PRINT: step:9600/10000 val_loss:5.4650 total_sharp:2.5142e-02 L1_sharp:6.1428e-03 L2_sharp:6.8193e-03 L3_sharp:5.6802e-03 L4_sharp:8.9424e-03 L5_sharp:7.8092e-03 L6_sharp:1.8699e-02 L7_sharp:3.1950e-02 L8_sharp:4.3449e-02 L9_sharp:8.2146e-02 L10_sharp:1.3673e-01 L11_sharp:1.9126e-01 L12_sharp:4.6017e-01 total_fnorm:1.1084e-01 total_l1_linf:5.3500e+01 total_spectral:5.7373e-02 L1_fnorm:1.0071e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0071e-02 L6_fnorm:1.0071e-02 L7_fnorm:1.0010e-02 L8_fnorm:9.9487e-03 L9_fnorm:1.0010e-02 L10_fnorm:9.8877e-03 L11_fnorm:9.9487e-03 L12_fnorm:8.7891e-03 L1_l1linf:1.3275e-03 L2_l1linf:1.3428e-03 L3_l1linf:1.3504e-03 L4_l1linf:1.3885e-03 L5_l1linf:1.3580e-03 L6_l1linf:1.3428e-03 L7_l1linf:1.3199e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3199e-03 L10_l1linf:1.3123e-03 L11_l1linf:1.3962e-03 L12_l1linf:1.0452e-03 L1_spectral:1.9621e-04 L2_spectral:1.9785e-04 L3_spectral:1.9742e-04 L4_spectral:1.9718e-04 L5_spectral:1.9338e-04 L6_spectral:1.8829e-04 L7_spectral:1.8900e-04 L8_spectral:1.8633e-04 L9_spectral:1.8634e-04 L10_spectral:1.8049e-04 L11_spectral:1.7380e-04 L12_spectral:1.5960e-04 train_time:401146ms step_avg:41.79ms +[2025-09-11 14:00:36] [Rank 0] PRINT: step:9600/10000 val_loss:5.4650 total_sharp:2.5142e-02 L1_sharp:6.1428e-03 L2_sharp:6.8193e-03 L3_sharp:5.6802e-03 L4_sharp:8.9424e-03 L5_sharp:7.8092e-03 L6_sharp:1.8699e-02 L7_sharp:3.1950e-02 L8_sharp:4.3449e-02 L9_sharp:8.2146e-02 L10_sharp:1.3673e-01 L11_sharp:1.9126e-01 L12_sharp:4.6017e-01 total_fnorm:1.1084e-01 total_l1_linf:5.3500e+01 total_spectral:5.7373e-02 L1_fnorm:1.0071e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0071e-02 L6_fnorm:1.0071e-02 L7_fnorm:1.0010e-02 L8_fnorm:9.9487e-03 L9_fnorm:1.0010e-02 L10_fnorm:9.8877e-03 L11_fnorm:9.9487e-03 L12_fnorm:8.7891e-03 L1_l1linf:1.3275e-03 L2_l1linf:1.3428e-03 L3_l1linf:1.3504e-03 L4_l1linf:1.3885e-03 L5_l1linf:1.3580e-03 L6_l1linf:1.3428e-03 L7_l1linf:1.3199e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3199e-03 L10_l1linf:1.3123e-03 L11_l1linf:1.3962e-03 L12_l1linf:1.0452e-03 L1_spectral:1.9621e-04 L2_spectral:1.9785e-04 L3_spectral:1.9742e-04 L4_spectral:1.9718e-04 L5_spectral:1.9338e-04 L6_spectral:1.8829e-04 L7_spectral:1.8900e-04 L8_spectral:1.8633e-04 L9_spectral:1.8634e-04 L10_spectral:1.8049e-04 L11_spectral:1.7380e-04 L12_spectral:1.5960e-04 train_time:401146ms step_avg:41.79ms +[2025-09-11 14:00:37] [Rank 0] step:9601/10000 train_time:402851ms step_avg:41.96ms +[2025-09-11 14:00:37] [Rank 0] step:9601/10000 train_time:402851ms step_avg:41.96ms +[2025-09-11 14:00:38] [Rank 0] step:9621/10000 train_time:403583ms step_avg:41.95ms +[2025-09-11 14:00:38] [Rank 0] step:9621/10000 train_time:403583ms step_avg:41.95ms +[2025-09-11 14:00:39] [Rank 0] step:9641/10000 train_time:404300ms step_avg:41.94ms +[2025-09-11 14:00:39] [Rank 0] step:9641/10000 train_time:404300ms step_avg:41.94ms +[2025-09-11 14:00:40] [Rank 0] step:9661/10000 train_time:405024ms step_avg:41.92ms +[2025-09-11 14:00:40] [Rank 0] step:9661/10000 train_time:405024ms step_avg:41.92ms +[2025-09-11 14:00:40] [Rank 0] step:9681/10000 train_time:405739ms step_avg:41.91ms +[2025-09-11 14:00:40] [Rank 0] step:9681/10000 train_time:405739ms step_avg:41.91ms +[2025-09-11 14:00:41] [Rank 0] step:9701/10000 train_time:406456ms step_avg:41.90ms +[2025-09-11 14:00:41] [Rank 0] step:9701/10000 train_time:406456ms step_avg:41.90ms +[2025-09-11 14:00:42] [Rank 0] step:9721/10000 train_time:407177ms step_avg:41.89ms +[2025-09-11 14:00:42] [Rank 0] step:9721/10000 train_time:407177ms step_avg:41.89ms +[2025-09-11 14:00:43] [Rank 0] step:9741/10000 train_time:408143ms step_avg:41.90ms +[2025-09-11 14:00:43] [Rank 0] step:9741/10000 train_time:408143ms step_avg:41.90ms +[2025-09-11 14:00:43] [Rank 0] step:9761/10000 train_time:408862ms step_avg:41.89ms +[2025-09-11 14:00:43] [Rank 0] step:9761/10000 train_time:408862ms step_avg:41.89ms +[2025-09-11 14:00:44] [Rank 0] step:9781/10000 train_time:409578ms step_avg:41.87ms +[2025-09-11 14:00:44] [Rank 0] step:9781/10000 train_time:409578ms step_avg:41.87ms +[2025-09-11 14:00:45] [Rank 0] step:9801/10000 train_time:410552ms step_avg:41.89ms +[2025-09-11 14:00:45] [Rank 0] step:9801/10000 train_time:410552ms step_avg:41.89ms +[2025-09-11 14:00:46] [Rank 0] step:9821/10000 train_time:411271ms step_avg:41.88ms +[2025-09-11 14:00:46] [Rank 0] step:9821/10000 train_time:411271ms step_avg:41.88ms +[2025-09-11 14:00:47] [Rank 0] step:9841/10000 train_time:411992ms step_avg:41.86ms +[2025-09-11 14:00:47] [Rank 0] step:9841/10000 train_time:411992ms step_avg:41.86ms +[2025-09-11 14:00:47] [Rank 0] step:9861/10000 train_time:412709ms step_avg:41.85ms +[2025-09-11 14:00:47] [Rank 0] step:9861/10000 train_time:412709ms step_avg:41.85ms +[2025-09-11 14:00:48] [Rank 0] step:9881/10000 train_time:413427ms step_avg:41.84ms +[2025-09-11 14:00:48] [Rank 0] step:9881/10000 train_time:413427ms step_avg:41.84ms +[2025-09-11 14:00:49] [Rank 0] step:9901/10000 train_time:414142ms step_avg:41.83ms +[2025-09-11 14:00:49] [Rank 0] step:9901/10000 train_time:414142ms step_avg:41.83ms +[2025-09-11 14:00:49] [Rank 0] step:9921/10000 train_time:414860ms step_avg:41.82ms +[2025-09-11 14:00:49] [Rank 0] step:9921/10000 train_time:414860ms step_avg:41.82ms +[2025-09-11 14:00:50] [Rank 0] step:9941/10000 train_time:415582ms step_avg:41.80ms +[2025-09-11 14:00:50] [Rank 0] step:9941/10000 train_time:415582ms step_avg:41.80ms +[2025-09-11 14:00:51] [Rank 0] step:9961/10000 train_time:416306ms step_avg:41.79ms +[2025-09-11 14:00:51] [Rank 0] step:9961/10000 train_time:416306ms step_avg:41.79ms +[2025-09-11 14:00:52] [Rank 0] step:9981/10000 train_time:417025ms step_avg:41.78ms +[2025-09-11 14:00:52] [Rank 0] step:9981/10000 train_time:417025ms step_avg:41.78ms +[2025-09-11 14:00:52] [Rank 0] step:10000/10000 train_time:417719ms step_avg:41.77ms +[2025-09-11 14:00:52] [Rank 0] step:10000/10000 train_time:417719ms step_avg:41.77ms +[2025-09-11 14:00:52] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:00:52] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:00:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:01:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:01:03] [Rank 0] PRINT: step:10000/10000 val_loss:5.4642 total_sharp:1.8502e-02 L1_sharp:4.0572e-03 L2_sharp:4.4126e-03 L3_sharp:3.0364e-03 L4_sharp:6.5450e-03 L5_sharp:1.1071e-02 L6_sharp:1.8087e-02 L7_sharp:3.3638e-02 L8_sharp:3.4843e-02 L9_sharp:6.4786e-02 L10_sharp:9.6140e-02 L11_sharp:1.4023e-01 L12_sharp:4.1018e-01 total_fnorm:4.2725e-02 total_l1_linf:1.5250e+01 total_spectral:2.2095e-02 L1_fnorm:3.9673e-03 L2_fnorm:3.9978e-03 L3_fnorm:3.9978e-03 L4_fnorm:3.9978e-03 L5_fnorm:3.9368e-03 L6_fnorm:3.9368e-03 L7_fnorm:3.9062e-03 L8_fnorm:3.9062e-03 L9_fnorm:3.9368e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8757e-03 L12_fnorm:3.4180e-03 L1_l1linf:4.1962e-04 L2_l1linf:3.9864e-04 L3_l1linf:4.2915e-04 L4_l1linf:4.1580e-04 L5_l1linf:4.0436e-04 L6_l1linf:4.0436e-04 L7_l1linf:4.2534e-04 L8_l1linf:4.0436e-04 L9_l1linf:4.0245e-04 L10_l1linf:4.0436e-04 L11_l1linf:4.4060e-04 L12_l1linf:3.2043e-04 L1_spectral:7.8960e-05 L2_spectral:8.0229e-05 L3_spectral:8.0485e-05 L4_spectral:7.8540e-05 L5_spectral:7.7987e-05 L6_spectral:7.6996e-05 L7_spectral:7.5168e-05 L8_spectral:7.6038e-05 L9_spectral:7.5482e-05 L10_spectral:7.1587e-05 L11_spectral:7.1166e-05 L12_spectral:6.5257e-05 train_time:417740ms step_avg:41.77ms +[2025-09-11 14:01:03] [Rank 0] PRINT: step:10000/10000 val_loss:5.4642 total_sharp:1.8502e-02 L1_sharp:4.0572e-03 L2_sharp:4.4126e-03 L3_sharp:3.0364e-03 L4_sharp:6.5450e-03 L5_sharp:1.1071e-02 L6_sharp:1.8087e-02 L7_sharp:3.3638e-02 L8_sharp:3.4843e-02 L9_sharp:6.4786e-02 L10_sharp:9.6140e-02 L11_sharp:1.4023e-01 L12_sharp:4.1018e-01 total_fnorm:4.2725e-02 total_l1_linf:1.5250e+01 total_spectral:2.2095e-02 L1_fnorm:3.9673e-03 L2_fnorm:3.9978e-03 L3_fnorm:3.9978e-03 L4_fnorm:3.9978e-03 L5_fnorm:3.9368e-03 L6_fnorm:3.9368e-03 L7_fnorm:3.9062e-03 L8_fnorm:3.9062e-03 L9_fnorm:3.9368e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8757e-03 L12_fnorm:3.4180e-03 L1_l1linf:4.1962e-04 L2_l1linf:3.9864e-04 L3_l1linf:4.2915e-04 L4_l1linf:4.1580e-04 L5_l1linf:4.0436e-04 L6_l1linf:4.0436e-04 L7_l1linf:4.2534e-04 L8_l1linf:4.0436e-04 L9_l1linf:4.0245e-04 L10_l1linf:4.0436e-04 L11_l1linf:4.4060e-04 L12_l1linf:3.2043e-04 L1_spectral:7.8960e-05 L2_spectral:8.0229e-05 L3_spectral:8.0485e-05 L4_spectral:7.8540e-05 L5_spectral:7.7987e-05 L6_spectral:7.6996e-05 L7_spectral:7.5168e-05 L8_spectral:7.6038e-05 L9_spectral:7.5482e-05 L10_spectral:7.1587e-05 L11_spectral:7.1166e-05 L12_spectral:6.5257e-05 train_time:417740ms step_avg:41.77ms +[2025-09-11 14:01:03] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:01:03 2025 --- +[2025-09-11 14:01:03] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:01:03 2025 --- +[2025-09-11 14:01:03] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 14:01:03] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3b78fa93cfc25f686b0c5a50641a6209b15419b5 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "92e22894-4d6b-415e-915f-c2bacca2c5a2", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/training_log_92e22894-4d6b-415e-915f-c2bacca2c5a2.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/training_log_92e22894-4d6b-415e-915f-c2bacca2c5a2.txt new file mode 100644 index 0000000000000000000000000000000000000000..30a43ff15057c0eabda50721adc75c1b44c697da --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42/training_log_92e22894-4d6b-415e-915f-c2bacca2c5a2.txt @@ -0,0 +1,4264 @@ +[2025-09-11 12:51:56] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:51:56 2025 --- +[2025-09-11 12:51:56] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:51:56 2025 --- +[2025-09-11 12:51:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:51:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:51:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:51:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:51:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:51:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:51:56] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42 +[2025-09-11 12:51:56] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_42 +[2025-09-11 12:51:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:51:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:51:56] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:51:56] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:51:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:51:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:51:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:51:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:51:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:51:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:51:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:51:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:51:57] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:51:57] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:52:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:52:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:52:00] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:52:00] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:52:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:52:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:52:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:52:05] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:52:05] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:52:05] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:52:43] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:52:43] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:52:43] [Rank 0] PRINT: Starting training... +[2025-09-11 12:52:43] [Rank 0] PRINT: Starting training... +[2025-09-11 12:52:44] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.89ms +[2025-09-11 12:52:44] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.89ms +[2025-09-11 12:52:44] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.42ms +[2025-09-11 12:52:44] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.42ms +[2025-09-11 12:52:45] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.48ms +[2025-09-11 12:52:45] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.48ms +[2025-09-11 12:52:46] [Rank 0] step:81/10000 train_time:3321ms step_avg:41.00ms +[2025-09-11 12:52:46] [Rank 0] step:81/10000 train_time:3321ms step_avg:41.00ms +[2025-09-11 12:52:47] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 12:52:47] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 12:52:47] [Rank 0] step:121/10000 train_time:4779ms step_avg:39.50ms +[2025-09-11 12:52:47] [Rank 0] step:121/10000 train_time:4779ms step_avg:39.50ms +[2025-09-11 12:52:48] [Rank 0] step:141/10000 train_time:5508ms step_avg:39.06ms +[2025-09-11 12:52:48] [Rank 0] step:141/10000 train_time:5508ms step_avg:39.06ms +[2025-09-11 12:52:49] [Rank 0] step:161/10000 train_time:6237ms step_avg:38.74ms +[2025-09-11 12:52:49] [Rank 0] step:161/10000 train_time:6237ms step_avg:38.74ms +[2025-09-11 12:52:50] [Rank 0] step:181/10000 train_time:6966ms step_avg:38.48ms +[2025-09-11 12:52:50] [Rank 0] step:181/10000 train_time:6966ms step_avg:38.48ms +[2025-09-11 12:52:50] [Rank 0] step:201/10000 train_time:7694ms step_avg:38.28ms +[2025-09-11 12:52:50] [Rank 0] step:201/10000 train_time:7694ms step_avg:38.28ms +[2025-09-11 12:52:51] [Rank 0] step:221/10000 train_time:8424ms step_avg:38.12ms +[2025-09-11 12:52:51] [Rank 0] step:221/10000 train_time:8424ms step_avg:38.12ms +[2025-09-11 12:52:52] [Rank 0] step:241/10000 train_time:9153ms step_avg:37.98ms +[2025-09-11 12:52:52] [Rank 0] step:241/10000 train_time:9153ms step_avg:37.98ms +[2025-09-11 12:52:52] [Rank 0] step:261/10000 train_time:9882ms step_avg:37.86ms +[2025-09-11 12:52:52] [Rank 0] step:261/10000 train_time:9882ms step_avg:37.86ms +[2025-09-11 12:52:53] [Rank 0] step:281/10000 train_time:10611ms step_avg:37.76ms +[2025-09-11 12:52:53] [Rank 0] step:281/10000 train_time:10611ms step_avg:37.76ms +[2025-09-11 12:52:54] [Rank 0] step:301/10000 train_time:11340ms step_avg:37.67ms +[2025-09-11 12:52:54] [Rank 0] step:301/10000 train_time:11340ms step_avg:37.67ms +[2025-09-11 12:52:55] [Rank 0] step:321/10000 train_time:12069ms step_avg:37.60ms +[2025-09-11 12:52:55] [Rank 0] step:321/10000 train_time:12069ms step_avg:37.60ms +[2025-09-11 12:52:55] [Rank 0] step:341/10000 train_time:12797ms step_avg:37.53ms +[2025-09-11 12:52:55] [Rank 0] step:341/10000 train_time:12797ms step_avg:37.53ms +[2025-09-11 12:52:56] [Rank 0] step:361/10000 train_time:13526ms step_avg:37.47ms +[2025-09-11 12:52:56] [Rank 0] step:361/10000 train_time:13526ms step_avg:37.47ms +[2025-09-11 12:52:57] [Rank 0] step:381/10000 train_time:14254ms step_avg:37.41ms +[2025-09-11 12:52:57] [Rank 0] step:381/10000 train_time:14254ms step_avg:37.41ms +[2025-09-11 12:52:58] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:52:58] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:53:44] [Rank 0] PRINT: step:400/10000 val_loss:7.1034 total_sharp:3.4969e-02 L1_sharp:8.2724e-02 L2_sharp:8.6997e-02 L3_sharp:9.0726e-02 L4_sharp:9.5897e-02 L5_sharp:1.5421e-01 L6_sharp:3.1769e-01 L7_sharp:4.1059e-01 L8_sharp:2.7837e-01 L9_sharp:4.2784e-01 L10_sharp:5.4213e-01 L11_sharp:8.8741e-01 L12_sharp:7.2923e-01 total_fnorm:5.4992e+00 total_l1_linf:1.6324e+04 total_spectral:2.7496e+00 L1_fnorm:2.4290e-01 L2_fnorm:2.4222e-01 L3_fnorm:2.4145e-01 L4_fnorm:2.4046e-01 L5_fnorm:2.4027e-01 L6_fnorm:2.3012e-01 L7_fnorm:2.1613e-01 L8_fnorm:1.8617e-01 L9_fnorm:1.5602e-01 L10_fnorm:1.3211e-01 L11_fnorm:1.1255e-01 L12_fnorm:1.0556e-01 L1_l1linf:8.3341e-02 L2_l1linf:8.2671e-02 L3_l1linf:8.2421e-02 L4_l1linf:8.3369e-02 L5_l1linf:8.4757e-02 L6_l1linf:8.2350e-02 L7_l1linf:7.2276e-02 L8_l1linf:6.3657e-02 L9_l1linf:5.5407e-02 L10_l1linf:5.1818e-02 L11_l1linf:4.8502e-02 L12_l1linf:4.7514e-02 L1_spectral:2.4106e-03 L2_spectral:2.4098e-03 L3_spectral:2.4089e-03 L4_spectral:2.4125e-03 L5_spectral:2.4091e-03 L6_spectral:2.4122e-03 L7_spectral:2.4106e-03 L8_spectral:2.4082e-03 L9_spectral:2.4076e-03 L10_spectral:2.4064e-03 L11_spectral:2.4059e-03 L12_spectral:2.4050e-03 train_time:14963ms step_avg:37.41ms +[2025-09-11 12:53:44] [Rank 0] PRINT: step:400/10000 val_loss:7.1034 total_sharp:3.4969e-02 L1_sharp:8.2724e-02 L2_sharp:8.6997e-02 L3_sharp:9.0726e-02 L4_sharp:9.5897e-02 L5_sharp:1.5421e-01 L6_sharp:3.1769e-01 L7_sharp:4.1059e-01 L8_sharp:2.7837e-01 L9_sharp:4.2784e-01 L10_sharp:5.4213e-01 L11_sharp:8.8741e-01 L12_sharp:7.2923e-01 total_fnorm:5.4992e+00 total_l1_linf:1.6324e+04 total_spectral:2.7496e+00 L1_fnorm:2.4290e-01 L2_fnorm:2.4222e-01 L3_fnorm:2.4145e-01 L4_fnorm:2.4046e-01 L5_fnorm:2.4027e-01 L6_fnorm:2.3012e-01 L7_fnorm:2.1613e-01 L8_fnorm:1.8617e-01 L9_fnorm:1.5602e-01 L10_fnorm:1.3211e-01 L11_fnorm:1.1255e-01 L12_fnorm:1.0556e-01 L1_l1linf:8.3341e-02 L2_l1linf:8.2671e-02 L3_l1linf:8.2421e-02 L4_l1linf:8.3369e-02 L5_l1linf:8.4757e-02 L6_l1linf:8.2350e-02 L7_l1linf:7.2276e-02 L8_l1linf:6.3657e-02 L9_l1linf:5.5407e-02 L10_l1linf:5.1818e-02 L11_l1linf:4.8502e-02 L12_l1linf:4.7514e-02 L1_spectral:2.4106e-03 L2_spectral:2.4098e-03 L3_spectral:2.4089e-03 L4_spectral:2.4125e-03 L5_spectral:2.4091e-03 L6_spectral:2.4122e-03 L7_spectral:2.4106e-03 L8_spectral:2.4082e-03 L9_spectral:2.4076e-03 L10_spectral:2.4064e-03 L11_spectral:2.4059e-03 L12_spectral:2.4050e-03 train_time:14963ms step_avg:37.41ms +[2025-09-11 12:54:13] [Rank 0] step:401/10000 train_time:44238ms step_avg:110.32ms +[2025-09-11 12:54:13] [Rank 0] step:401/10000 train_time:44238ms step_avg:110.32ms +[2025-09-11 12:54:16] [Rank 0] step:421/10000 train_time:46630ms step_avg:110.76ms +[2025-09-11 12:54:16] [Rank 0] step:421/10000 train_time:46630ms step_avg:110.76ms +[2025-09-11 12:54:16] [Rank 0] step:441/10000 train_time:47271ms step_avg:107.19ms +[2025-09-11 12:54:16] [Rank 0] step:441/10000 train_time:47271ms step_avg:107.19ms +[2025-09-11 12:54:17] [Rank 0] step:461/10000 train_time:47911ms step_avg:103.93ms +[2025-09-11 12:54:17] [Rank 0] step:461/10000 train_time:47911ms step_avg:103.93ms +[2025-09-11 12:54:18] [Rank 0] step:481/10000 train_time:48859ms step_avg:101.58ms +[2025-09-11 12:54:18] [Rank 0] step:481/10000 train_time:48859ms step_avg:101.58ms +[2025-09-11 12:54:19] [Rank 0] step:501/10000 train_time:49498ms step_avg:98.80ms +[2025-09-11 12:54:19] [Rank 0] step:501/10000 train_time:49498ms step_avg:98.80ms +[2025-09-11 12:54:19] [Rank 0] step:521/10000 train_time:50137ms step_avg:96.23ms +[2025-09-11 12:54:19] [Rank 0] step:521/10000 train_time:50137ms step_avg:96.23ms +[2025-09-11 12:54:20] [Rank 0] step:541/10000 train_time:50777ms step_avg:93.86ms +[2025-09-11 12:54:20] [Rank 0] step:541/10000 train_time:50777ms step_avg:93.86ms +[2025-09-11 12:54:21] [Rank 0] step:561/10000 train_time:51731ms step_avg:92.21ms +[2025-09-11 12:54:21] [Rank 0] step:561/10000 train_time:51731ms step_avg:92.21ms +[2025-09-11 12:54:21] [Rank 0] step:581/10000 train_time:52371ms step_avg:90.14ms +[2025-09-11 12:54:21] [Rank 0] step:581/10000 train_time:52371ms step_avg:90.14ms +[2025-09-11 12:54:22] [Rank 0] step:601/10000 train_time:53012ms step_avg:88.21ms +[2025-09-11 12:54:22] [Rank 0] step:601/10000 train_time:53012ms step_avg:88.21ms +[2025-09-11 12:54:23] [Rank 0] step:621/10000 train_time:53650ms step_avg:86.39ms +[2025-09-11 12:54:23] [Rank 0] step:621/10000 train_time:53650ms step_avg:86.39ms +[2025-09-11 12:54:23] [Rank 0] step:641/10000 train_time:54288ms step_avg:84.69ms +[2025-09-11 12:54:23] [Rank 0] step:641/10000 train_time:54288ms step_avg:84.69ms +[2025-09-11 12:54:24] [Rank 0] step:661/10000 train_time:54927ms step_avg:83.10ms +[2025-09-11 12:54:24] [Rank 0] step:661/10000 train_time:54927ms step_avg:83.10ms +[2025-09-11 12:54:25] [Rank 0] step:681/10000 train_time:55565ms step_avg:81.59ms +[2025-09-11 12:54:25] [Rank 0] step:681/10000 train_time:55565ms step_avg:81.59ms +[2025-09-11 12:54:25] [Rank 0] step:701/10000 train_time:56203ms step_avg:80.18ms +[2025-09-11 12:54:25] [Rank 0] step:701/10000 train_time:56203ms step_avg:80.18ms +[2025-09-11 12:54:26] [Rank 0] step:721/10000 train_time:56842ms step_avg:78.84ms +[2025-09-11 12:54:26] [Rank 0] step:721/10000 train_time:56842ms step_avg:78.84ms +[2025-09-11 12:54:27] [Rank 0] step:741/10000 train_time:57480ms step_avg:77.57ms +[2025-09-11 12:54:27] [Rank 0] step:741/10000 train_time:57480ms step_avg:77.57ms +[2025-09-11 12:54:27] [Rank 0] step:761/10000 train_time:58124ms step_avg:76.38ms +[2025-09-11 12:54:27] [Rank 0] step:761/10000 train_time:58124ms step_avg:76.38ms +[2025-09-11 12:54:28] [Rank 0] step:781/10000 train_time:58767ms step_avg:75.25ms +[2025-09-11 12:54:28] [Rank 0] step:781/10000 train_time:58767ms step_avg:75.25ms +[2025-09-11 12:54:28] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:54:28] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:55:15] [Rank 0] PRINT: step:800/10000 val_loss:6.6115 total_sharp:1.5240e-01 L1_sharp:6.7843e-02 L2_sharp:6.9894e-02 L3_sharp:7.8275e-02 L4_sharp:1.1616e-01 L5_sharp:1.8274e-01 L6_sharp:2.9594e-01 L7_sharp:3.1431e-01 L8_sharp:3.4065e-01 L9_sharp:4.6614e-01 L10_sharp:5.1543e-01 L11_sharp:7.1203e-01 L12_sharp:7.9902e-01 total_fnorm:2.9062e+00 total_l1_linf:4.5440e+03 total_spectral:1.4609e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.2949e-01 L9_fnorm:2.1484e-01 L10_fnorm:1.8457e-01 L11_fnorm:1.4746e-01 L12_fnorm:1.2256e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.8125e-02 L4_l1linf:8.0078e-02 L5_l1linf:8.2520e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.0566e-02 L8_l1linf:7.6660e-02 L9_l1linf:6.4941e-02 L10_l1linf:5.4443e-02 L11_l1linf:4.4434e-02 L12_l1linf:3.6621e-02 L1_spectral:3.1503e-03 L2_spectral:3.1170e-03 L3_spectral:3.1432e-03 L4_spectral:3.1261e-03 L5_spectral:3.1127e-03 L6_spectral:3.0716e-03 L7_spectral:3.1135e-03 L8_spectral:3.0625e-03 L9_spectral:3.0410e-03 L10_spectral:2.9262e-03 L11_spectral:2.7595e-03 L12_spectral:2.5481e-03 train_time:59393ms step_avg:74.24ms +[2025-09-11 12:55:15] [Rank 0] PRINT: step:800/10000 val_loss:6.6115 total_sharp:1.5240e-01 L1_sharp:6.7843e-02 L2_sharp:6.9894e-02 L3_sharp:7.8275e-02 L4_sharp:1.1616e-01 L5_sharp:1.8274e-01 L6_sharp:2.9594e-01 L7_sharp:3.1431e-01 L8_sharp:3.4065e-01 L9_sharp:4.6614e-01 L10_sharp:5.1543e-01 L11_sharp:7.1203e-01 L12_sharp:7.9902e-01 total_fnorm:2.9062e+00 total_l1_linf:4.5440e+03 total_spectral:1.4609e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.2949e-01 L9_fnorm:2.1484e-01 L10_fnorm:1.8457e-01 L11_fnorm:1.4746e-01 L12_fnorm:1.2256e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.8125e-02 L4_l1linf:8.0078e-02 L5_l1linf:8.2520e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.0566e-02 L8_l1linf:7.6660e-02 L9_l1linf:6.4941e-02 L10_l1linf:5.4443e-02 L11_l1linf:4.4434e-02 L12_l1linf:3.6621e-02 L1_spectral:3.1503e-03 L2_spectral:3.1170e-03 L3_spectral:3.1432e-03 L4_spectral:3.1261e-03 L5_spectral:3.1127e-03 L6_spectral:3.0716e-03 L7_spectral:3.1135e-03 L8_spectral:3.0625e-03 L9_spectral:3.0410e-03 L10_spectral:2.9262e-03 L11_spectral:2.7595e-03 L12_spectral:2.5481e-03 train_time:59393ms step_avg:74.24ms +[2025-09-11 12:55:17] [Rank 0] step:801/10000 train_time:61177ms step_avg:76.38ms +[2025-09-11 12:55:17] [Rank 0] step:801/10000 train_time:61177ms step_avg:76.38ms +[2025-09-11 12:55:17] [Rank 0] step:821/10000 train_time:61826ms step_avg:75.31ms +[2025-09-11 12:55:17] [Rank 0] step:821/10000 train_time:61826ms step_avg:75.31ms +[2025-09-11 12:55:18] [Rank 0] step:841/10000 train_time:62472ms step_avg:74.28ms +[2025-09-11 12:55:18] [Rank 0] step:841/10000 train_time:62472ms step_avg:74.28ms +[2025-09-11 12:55:19] [Rank 0] step:861/10000 train_time:63117ms step_avg:73.31ms +[2025-09-11 12:55:19] [Rank 0] step:861/10000 train_time:63117ms step_avg:73.31ms +[2025-09-11 12:55:19] [Rank 0] step:881/10000 train_time:63762ms step_avg:72.37ms +[2025-09-11 12:55:19] [Rank 0] step:881/10000 train_time:63762ms step_avg:72.37ms +[2025-09-11 12:55:20] [Rank 0] step:901/10000 train_time:64407ms step_avg:71.48ms +[2025-09-11 12:55:20] [Rank 0] step:901/10000 train_time:64407ms step_avg:71.48ms +[2025-09-11 12:55:21] [Rank 0] step:921/10000 train_time:65051ms step_avg:70.63ms +[2025-09-11 12:55:21] [Rank 0] step:921/10000 train_time:65051ms step_avg:70.63ms +[2025-09-11 12:55:22] [Rank 0] step:941/10000 train_time:65970ms step_avg:70.11ms +[2025-09-11 12:55:22] [Rank 0] step:941/10000 train_time:65970ms step_avg:70.11ms +[2025-09-11 12:55:22] [Rank 0] step:961/10000 train_time:66614ms step_avg:69.32ms +[2025-09-11 12:55:22] [Rank 0] step:961/10000 train_time:66614ms step_avg:69.32ms +[2025-09-11 12:55:23] [Rank 0] step:981/10000 train_time:67259ms step_avg:68.56ms +[2025-09-11 12:55:23] [Rank 0] step:981/10000 train_time:67259ms step_avg:68.56ms +[2025-09-11 12:55:24] [Rank 0] step:1001/10000 train_time:68194ms step_avg:68.13ms +[2025-09-11 12:55:24] [Rank 0] step:1001/10000 train_time:68194ms step_avg:68.13ms +[2025-09-11 12:55:24] [Rank 0] step:1021/10000 train_time:68838ms step_avg:67.42ms +[2025-09-11 12:55:24] [Rank 0] step:1021/10000 train_time:68838ms step_avg:67.42ms +[2025-09-11 12:55:25] [Rank 0] step:1041/10000 train_time:69482ms step_avg:66.75ms +[2025-09-11 12:55:25] [Rank 0] step:1041/10000 train_time:69482ms step_avg:66.75ms +[2025-09-11 12:55:26] [Rank 0] step:1061/10000 train_time:70126ms step_avg:66.09ms +[2025-09-11 12:55:26] [Rank 0] step:1061/10000 train_time:70126ms step_avg:66.09ms +[2025-09-11 12:55:26] [Rank 0] step:1081/10000 train_time:70769ms step_avg:65.47ms +[2025-09-11 12:55:26] [Rank 0] step:1081/10000 train_time:70769ms step_avg:65.47ms +[2025-09-11 12:55:27] [Rank 0] step:1101/10000 train_time:71413ms step_avg:64.86ms +[2025-09-11 12:55:27] [Rank 0] step:1101/10000 train_time:71413ms step_avg:64.86ms +[2025-09-11 12:55:28] [Rank 0] step:1121/10000 train_time:72057ms step_avg:64.28ms +[2025-09-11 12:55:28] [Rank 0] step:1121/10000 train_time:72057ms step_avg:64.28ms +[2025-09-11 12:55:28] [Rank 0] step:1141/10000 train_time:72701ms step_avg:63.72ms +[2025-09-11 12:55:28] [Rank 0] step:1141/10000 train_time:72701ms step_avg:63.72ms +[2025-09-11 12:55:29] [Rank 0] step:1161/10000 train_time:73346ms step_avg:63.17ms +[2025-09-11 12:55:29] [Rank 0] step:1161/10000 train_time:73346ms step_avg:63.17ms +[2025-09-11 12:55:30] [Rank 0] step:1181/10000 train_time:73990ms step_avg:62.65ms +[2025-09-11 12:55:30] [Rank 0] step:1181/10000 train_time:73990ms step_avg:62.65ms +[2025-09-11 12:55:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:55:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:55:41] [Rank 0] PRINT: step:1200/10000 val_loss:6.3634 total_sharp:1.0872e-01 L1_sharp:5.6497e-02 L2_sharp:5.6724e-02 L3_sharp:4.6370e-02 L4_sharp:7.6606e-02 L5_sharp:1.0092e-01 L6_sharp:1.5751e-01 L7_sharp:1.6699e-01 L8_sharp:1.5849e-01 L9_sharp:1.6404e-01 L10_sharp:1.5488e-01 L11_sharp:1.8275e-01 L12_sharp:3.2207e-01 total_fnorm:2.3594e+00 total_l1_linf:3.9200e+03 total_spectral:1.1953e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.2559e-01 L12_fnorm:1.9043e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.4219e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.6660e-02 L7_l1linf:7.8125e-02 L8_l1linf:8.0078e-02 L9_l1linf:8.0078e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.6406e-02 L12_l1linf:4.8584e-02 L1_spectral:3.1393e-03 L2_spectral:3.1375e-03 L3_spectral:3.1255e-03 L4_spectral:3.1225e-03 L5_spectral:3.1179e-03 L6_spectral:3.1065e-03 L7_spectral:3.1072e-03 L8_spectral:3.1067e-03 L9_spectral:3.0804e-03 L10_spectral:3.0975e-03 L11_spectral:3.0729e-03 L12_spectral:2.9591e-03 train_time:74617ms step_avg:62.18ms +[2025-09-11 12:55:41] [Rank 0] PRINT: step:1200/10000 val_loss:6.3634 total_sharp:1.0872e-01 L1_sharp:5.6497e-02 L2_sharp:5.6724e-02 L3_sharp:4.6370e-02 L4_sharp:7.6606e-02 L5_sharp:1.0092e-01 L6_sharp:1.5751e-01 L7_sharp:1.6699e-01 L8_sharp:1.5849e-01 L9_sharp:1.6404e-01 L10_sharp:1.5488e-01 L11_sharp:1.8275e-01 L12_sharp:3.2207e-01 total_fnorm:2.3594e+00 total_l1_linf:3.9200e+03 total_spectral:1.1953e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.2559e-01 L12_fnorm:1.9043e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.4219e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.6660e-02 L7_l1linf:7.8125e-02 L8_l1linf:8.0078e-02 L9_l1linf:8.0078e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.6406e-02 L12_l1linf:4.8584e-02 L1_spectral:3.1393e-03 L2_spectral:3.1375e-03 L3_spectral:3.1255e-03 L4_spectral:3.1225e-03 L5_spectral:3.1179e-03 L6_spectral:3.1065e-03 L7_spectral:3.1072e-03 L8_spectral:3.1067e-03 L9_spectral:3.0804e-03 L10_spectral:3.0975e-03 L11_spectral:3.0729e-03 L12_spectral:2.9591e-03 train_time:74617ms step_avg:62.18ms +[2025-09-11 12:55:43] [Rank 0] step:1201/10000 train_time:76405ms step_avg:63.62ms +[2025-09-11 12:55:43] [Rank 0] step:1201/10000 train_time:76405ms step_avg:63.62ms +[2025-09-11 12:55:43] [Rank 0] step:1221/10000 train_time:77054ms step_avg:63.11ms +[2025-09-11 12:55:43] [Rank 0] step:1221/10000 train_time:77054ms step_avg:63.11ms +[2025-09-11 12:55:44] [Rank 0] step:1241/10000 train_time:77700ms step_avg:62.61ms +[2025-09-11 12:55:44] [Rank 0] step:1241/10000 train_time:77700ms step_avg:62.61ms +[2025-09-11 12:55:45] [Rank 0] step:1261/10000 train_time:78345ms step_avg:62.13ms +[2025-09-11 12:55:45] [Rank 0] step:1261/10000 train_time:78345ms step_avg:62.13ms +[2025-09-11 12:55:45] [Rank 0] step:1281/10000 train_time:78990ms step_avg:61.66ms +[2025-09-11 12:55:45] [Rank 0] step:1281/10000 train_time:78990ms step_avg:61.66ms +[2025-09-11 12:55:46] [Rank 0] step:1301/10000 train_time:79635ms step_avg:61.21ms +[2025-09-11 12:55:46] [Rank 0] step:1301/10000 train_time:79635ms step_avg:61.21ms +[2025-09-11 12:55:47] [Rank 0] step:1321/10000 train_time:80280ms step_avg:60.77ms +[2025-09-11 12:55:47] [Rank 0] step:1321/10000 train_time:80280ms step_avg:60.77ms +[2025-09-11 12:55:47] [Rank 0] step:1341/10000 train_time:80924ms step_avg:60.35ms +[2025-09-11 12:55:47] [Rank 0] step:1341/10000 train_time:80924ms step_avg:60.35ms +[2025-09-11 12:55:48] [Rank 0] step:1361/10000 train_time:81569ms step_avg:59.93ms +[2025-09-11 12:55:48] [Rank 0] step:1361/10000 train_time:81569ms step_avg:59.93ms +[2025-09-11 12:55:49] [Rank 0] step:1381/10000 train_time:82213ms step_avg:59.53ms +[2025-09-11 12:55:49] [Rank 0] step:1381/10000 train_time:82213ms step_avg:59.53ms +[2025-09-11 12:55:49] [Rank 0] step:1401/10000 train_time:82858ms step_avg:59.14ms +[2025-09-11 12:55:49] [Rank 0] step:1401/10000 train_time:82858ms step_avg:59.14ms +[2025-09-11 12:55:50] [Rank 0] step:1421/10000 train_time:83502ms step_avg:58.76ms +[2025-09-11 12:55:50] [Rank 0] step:1421/10000 train_time:83502ms step_avg:58.76ms +[2025-09-11 12:55:50] [Rank 0] step:1441/10000 train_time:84147ms step_avg:58.39ms +[2025-09-11 12:55:50] [Rank 0] step:1441/10000 train_time:84147ms step_avg:58.39ms +[2025-09-11 12:55:51] [Rank 0] step:1461/10000 train_time:84791ms step_avg:58.04ms +[2025-09-11 12:55:51] [Rank 0] step:1461/10000 train_time:84791ms step_avg:58.04ms +[2025-09-11 12:55:52] [Rank 0] step:1481/10000 train_time:85435ms step_avg:57.69ms +[2025-09-11 12:55:52] [Rank 0] step:1481/10000 train_time:85435ms step_avg:57.69ms +[2025-09-11 12:55:52] [Rank 0] step:1501/10000 train_time:86083ms step_avg:57.35ms +[2025-09-11 12:55:52] [Rank 0] step:1501/10000 train_time:86083ms step_avg:57.35ms +[2025-09-11 12:55:53] [Rank 0] step:1521/10000 train_time:86732ms step_avg:57.02ms +[2025-09-11 12:55:53] [Rank 0] step:1521/10000 train_time:86732ms step_avg:57.02ms +[2025-09-11 12:55:54] [Rank 0] step:1541/10000 train_time:87380ms step_avg:56.70ms +[2025-09-11 12:55:54] [Rank 0] step:1541/10000 train_time:87380ms step_avg:56.70ms +[2025-09-11 12:55:54] [Rank 0] step:1561/10000 train_time:88029ms step_avg:56.39ms +[2025-09-11 12:55:54] [Rank 0] step:1561/10000 train_time:88029ms step_avg:56.39ms +[2025-09-11 12:55:55] [Rank 0] step:1581/10000 train_time:88677ms step_avg:56.09ms +[2025-09-11 12:55:55] [Rank 0] step:1581/10000 train_time:88677ms step_avg:56.09ms +[2025-09-11 12:55:56] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:55:56] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:55:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:55:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:06] [Rank 0] PRINT: step:1600/10000 val_loss:6.2116 total_sharp:1.0556e-01 L1_sharp:2.5349e-02 L2_sharp:2.1947e-02 L3_sharp:2.5892e-02 L4_sharp:4.0457e-02 L5_sharp:5.8894e-02 L6_sharp:1.0799e-01 L7_sharp:1.1275e-01 L8_sharp:1.3117e-01 L9_sharp:1.1531e-01 L10_sharp:1.3262e-01 L11_sharp:1.6629e-01 L12_sharp:3.6261e-01 total_fnorm:2.1719e+00 total_l1_linf:3.6160e+03 total_spectral:1.1016e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.3926e-01 L12_fnorm:2.0215e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.4707e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.7148e-02 L9_l1linf:7.8613e-02 L10_l1linf:7.7637e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.5898e-02 L1_spectral:3.1363e-03 L2_spectral:3.1560e-03 L3_spectral:3.1430e-03 L4_spectral:3.1462e-03 L5_spectral:3.1180e-03 L6_spectral:3.1127e-03 L7_spectral:3.1410e-03 L8_spectral:3.1328e-03 L9_spectral:3.1340e-03 L10_spectral:3.1251e-03 L11_spectral:3.1234e-03 L12_spectral:3.0318e-03 train_time:89308ms step_avg:55.82ms +[2025-09-11 12:56:06] [Rank 0] PRINT: step:1600/10000 val_loss:6.2116 total_sharp:1.0556e-01 L1_sharp:2.5349e-02 L2_sharp:2.1947e-02 L3_sharp:2.5892e-02 L4_sharp:4.0457e-02 L5_sharp:5.8894e-02 L6_sharp:1.0799e-01 L7_sharp:1.1275e-01 L8_sharp:1.3117e-01 L9_sharp:1.1531e-01 L10_sharp:1.3262e-01 L11_sharp:1.6629e-01 L12_sharp:3.6261e-01 total_fnorm:2.1719e+00 total_l1_linf:3.6160e+03 total_spectral:1.1016e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.3926e-01 L12_fnorm:2.0215e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.4707e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.7148e-02 L9_l1linf:7.8613e-02 L10_l1linf:7.7637e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.5898e-02 L1_spectral:3.1363e-03 L2_spectral:3.1560e-03 L3_spectral:3.1430e-03 L4_spectral:3.1462e-03 L5_spectral:3.1180e-03 L6_spectral:3.1127e-03 L7_spectral:3.1410e-03 L8_spectral:3.1328e-03 L9_spectral:3.1340e-03 L10_spectral:3.1251e-03 L11_spectral:3.1234e-03 L12_spectral:3.0318e-03 train_time:89308ms step_avg:55.82ms +[2025-09-11 12:56:08] [Rank 0] step:1601/10000 train_time:91181ms step_avg:56.95ms +[2025-09-11 12:56:08] [Rank 0] step:1601/10000 train_time:91181ms step_avg:56.95ms +[2025-09-11 12:56:09] [Rank 0] step:1621/10000 train_time:91834ms step_avg:56.65ms +[2025-09-11 12:56:09] [Rank 0] step:1621/10000 train_time:91834ms step_avg:56.65ms +[2025-09-11 12:56:09] [Rank 0] step:1641/10000 train_time:92484ms step_avg:56.36ms +[2025-09-11 12:56:09] [Rank 0] step:1641/10000 train_time:92484ms step_avg:56.36ms +[2025-09-11 12:56:10] [Rank 0] step:1661/10000 train_time:93133ms step_avg:56.07ms +[2025-09-11 12:56:10] [Rank 0] step:1661/10000 train_time:93133ms step_avg:56.07ms +[2025-09-11 12:56:10] [Rank 0] step:1681/10000 train_time:93783ms step_avg:55.79ms +[2025-09-11 12:56:10] [Rank 0] step:1681/10000 train_time:93783ms step_avg:55.79ms +[2025-09-11 12:56:11] [Rank 0] step:1701/10000 train_time:94432ms step_avg:55.52ms +[2025-09-11 12:56:11] [Rank 0] step:1701/10000 train_time:94432ms step_avg:55.52ms +[2025-09-11 12:56:12] [Rank 0] step:1721/10000 train_time:95081ms step_avg:55.25ms +[2025-09-11 12:56:12] [Rank 0] step:1721/10000 train_time:95081ms step_avg:55.25ms +[2025-09-11 12:56:12] [Rank 0] step:1741/10000 train_time:95730ms step_avg:54.99ms +[2025-09-11 12:56:12] [Rank 0] step:1741/10000 train_time:95730ms step_avg:54.99ms +[2025-09-11 12:56:13] [Rank 0] step:1761/10000 train_time:96379ms step_avg:54.73ms +[2025-09-11 12:56:13] [Rank 0] step:1761/10000 train_time:96379ms step_avg:54.73ms +[2025-09-11 12:56:14] [Rank 0] step:1781/10000 train_time:97028ms step_avg:54.48ms +[2025-09-11 12:56:14] [Rank 0] step:1781/10000 train_time:97028ms step_avg:54.48ms +[2025-09-11 12:56:14] [Rank 0] step:1801/10000 train_time:97678ms step_avg:54.24ms +[2025-09-11 12:56:14] [Rank 0] step:1801/10000 train_time:97678ms step_avg:54.24ms +[2025-09-11 12:56:15] [Rank 0] step:1821/10000 train_time:98327ms step_avg:54.00ms +[2025-09-11 12:56:15] [Rank 0] step:1821/10000 train_time:98327ms step_avg:54.00ms +[2025-09-11 12:56:16] [Rank 0] step:1841/10000 train_time:98976ms step_avg:53.76ms +[2025-09-11 12:56:16] [Rank 0] step:1841/10000 train_time:98976ms step_avg:53.76ms +[2025-09-11 12:56:16] [Rank 0] step:1861/10000 train_time:99625ms step_avg:53.53ms +[2025-09-11 12:56:16] [Rank 0] step:1861/10000 train_time:99625ms step_avg:53.53ms +[2025-09-11 12:56:17] [Rank 0] step:1881/10000 train_time:100274ms step_avg:53.31ms +[2025-09-11 12:56:17] [Rank 0] step:1881/10000 train_time:100274ms step_avg:53.31ms +[2025-09-11 12:56:18] [Rank 0] step:1901/10000 train_time:100922ms step_avg:53.09ms +[2025-09-11 12:56:18] [Rank 0] step:1901/10000 train_time:100922ms step_avg:53.09ms +[2025-09-11 12:56:18] [Rank 0] step:1921/10000 train_time:101571ms step_avg:52.87ms +[2025-09-11 12:56:18] [Rank 0] step:1921/10000 train_time:101571ms step_avg:52.87ms +[2025-09-11 12:56:19] [Rank 0] step:1941/10000 train_time:102221ms step_avg:52.66ms +[2025-09-11 12:56:19] [Rank 0] step:1941/10000 train_time:102221ms step_avg:52.66ms +[2025-09-11 12:56:20] [Rank 0] step:1961/10000 train_time:102869ms step_avg:52.46ms +[2025-09-11 12:56:20] [Rank 0] step:1961/10000 train_time:102869ms step_avg:52.46ms +[2025-09-11 12:56:20] [Rank 0] step:1981/10000 train_time:103517ms step_avg:52.26ms +[2025-09-11 12:56:20] [Rank 0] step:1981/10000 train_time:103517ms step_avg:52.26ms +[2025-09-11 12:56:21] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:56:21] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:32] [Rank 0] PRINT: step:2000/10000 val_loss:6.0761 total_sharp:8.8291e-02 L1_sharp:1.0803e-02 L2_sharp:1.0429e-02 L3_sharp:1.2059e-02 L4_sharp:1.9887e-02 L5_sharp:3.7606e-02 L6_sharp:6.7710e-02 L7_sharp:9.3785e-02 L8_sharp:1.0384e-01 L9_sharp:1.0091e-01 L10_sharp:1.1370e-01 L11_sharp:1.5115e-01 L12_sharp:4.7380e-01 total_fnorm:2.0781e+00 total_l1_linf:3.5040e+03 total_spectral:1.0547e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.0996e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9336e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.2754e-02 L8_l1linf:7.3242e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.6660e-02 L11_l1linf:7.1289e-02 L12_l1linf:3.9307e-02 L1_spectral:3.1477e-03 L2_spectral:3.1535e-03 L3_spectral:3.1564e-03 L4_spectral:3.1526e-03 L5_spectral:3.1397e-03 L6_spectral:3.1310e-03 L7_spectral:3.1394e-03 L8_spectral:3.1402e-03 L9_spectral:3.1467e-03 L10_spectral:3.1225e-03 L11_spectral:3.1298e-03 L12_spectral:3.0695e-03 train_time:104149ms step_avg:52.07ms +[2025-09-11 12:56:32] [Rank 0] PRINT: step:2000/10000 val_loss:6.0761 total_sharp:8.8291e-02 L1_sharp:1.0803e-02 L2_sharp:1.0429e-02 L3_sharp:1.2059e-02 L4_sharp:1.9887e-02 L5_sharp:3.7606e-02 L6_sharp:6.7710e-02 L7_sharp:9.3785e-02 L8_sharp:1.0384e-01 L9_sharp:1.0091e-01 L10_sharp:1.1370e-01 L11_sharp:1.5115e-01 L12_sharp:4.7380e-01 total_fnorm:2.0781e+00 total_l1_linf:3.5040e+03 total_spectral:1.0547e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.0996e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9336e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.2754e-02 L8_l1linf:7.3242e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.6660e-02 L11_l1linf:7.1289e-02 L12_l1linf:3.9307e-02 L1_spectral:3.1477e-03 L2_spectral:3.1535e-03 L3_spectral:3.1564e-03 L4_spectral:3.1526e-03 L5_spectral:3.1397e-03 L6_spectral:3.1310e-03 L7_spectral:3.1394e-03 L8_spectral:3.1402e-03 L9_spectral:3.1467e-03 L10_spectral:3.1225e-03 L11_spectral:3.1298e-03 L12_spectral:3.0695e-03 train_time:104149ms step_avg:52.07ms +[2025-09-11 12:56:33] [Rank 0] step:2001/10000 train_time:105939ms step_avg:52.94ms +[2025-09-11 12:56:33] [Rank 0] step:2001/10000 train_time:105939ms step_avg:52.94ms +[2025-09-11 12:56:34] [Rank 0] step:2021/10000 train_time:106593ms step_avg:52.74ms +[2025-09-11 12:56:34] [Rank 0] step:2021/10000 train_time:106593ms step_avg:52.74ms +[2025-09-11 12:56:35] [Rank 0] step:2041/10000 train_time:107242ms step_avg:52.54ms +[2025-09-11 12:56:35] [Rank 0] step:2041/10000 train_time:107242ms step_avg:52.54ms +[2025-09-11 12:56:35] [Rank 0] step:2061/10000 train_time:107891ms step_avg:52.35ms +[2025-09-11 12:56:35] [Rank 0] step:2061/10000 train_time:107891ms step_avg:52.35ms +[2025-09-11 12:56:36] [Rank 0] step:2081/10000 train_time:108541ms step_avg:52.16ms +[2025-09-11 12:56:36] [Rank 0] step:2081/10000 train_time:108541ms step_avg:52.16ms +[2025-09-11 12:56:37] [Rank 0] step:2101/10000 train_time:109190ms step_avg:51.97ms +[2025-09-11 12:56:37] [Rank 0] step:2101/10000 train_time:109190ms step_avg:51.97ms +[2025-09-11 12:56:37] [Rank 0] step:2121/10000 train_time:109839ms step_avg:51.79ms +[2025-09-11 12:56:37] [Rank 0] step:2121/10000 train_time:109839ms step_avg:51.79ms +[2025-09-11 12:56:38] [Rank 0] step:2141/10000 train_time:110487ms step_avg:51.61ms +[2025-09-11 12:56:38] [Rank 0] step:2141/10000 train_time:110487ms step_avg:51.61ms +[2025-09-11 12:56:39] [Rank 0] step:2161/10000 train_time:111136ms step_avg:51.43ms +[2025-09-11 12:56:39] [Rank 0] step:2161/10000 train_time:111136ms step_avg:51.43ms +[2025-09-11 12:56:39] [Rank 0] step:2181/10000 train_time:111784ms step_avg:51.25ms +[2025-09-11 12:56:39] [Rank 0] step:2181/10000 train_time:111784ms step_avg:51.25ms +[2025-09-11 12:56:40] [Rank 0] step:2201/10000 train_time:112432ms step_avg:51.08ms +[2025-09-11 12:56:40] [Rank 0] step:2201/10000 train_time:112432ms step_avg:51.08ms +[2025-09-11 12:56:41] [Rank 0] step:2221/10000 train_time:113083ms step_avg:50.92ms +[2025-09-11 12:56:41] [Rank 0] step:2221/10000 train_time:113083ms step_avg:50.92ms +[2025-09-11 12:56:41] [Rank 0] step:2241/10000 train_time:113741ms step_avg:50.75ms +[2025-09-11 12:56:41] [Rank 0] step:2241/10000 train_time:113741ms step_avg:50.75ms +[2025-09-11 12:56:42] [Rank 0] step:2261/10000 train_time:114403ms step_avg:50.60ms +[2025-09-11 12:56:42] [Rank 0] step:2261/10000 train_time:114403ms step_avg:50.60ms +[2025-09-11 12:56:43] [Rank 0] step:2281/10000 train_time:115065ms step_avg:50.45ms +[2025-09-11 12:56:43] [Rank 0] step:2281/10000 train_time:115065ms step_avg:50.45ms +[2025-09-11 12:56:43] [Rank 0] step:2301/10000 train_time:115728ms step_avg:50.29ms +[2025-09-11 12:56:43] [Rank 0] step:2301/10000 train_time:115728ms step_avg:50.29ms +[2025-09-11 12:56:44] [Rank 0] step:2321/10000 train_time:116389ms step_avg:50.15ms +[2025-09-11 12:56:44] [Rank 0] step:2321/10000 train_time:116389ms step_avg:50.15ms +[2025-09-11 12:56:45] [Rank 0] step:2341/10000 train_time:117052ms step_avg:50.00ms +[2025-09-11 12:56:45] [Rank 0] step:2341/10000 train_time:117052ms step_avg:50.00ms +[2025-09-11 12:56:45] [Rank 0] step:2361/10000 train_time:117714ms step_avg:49.86ms +[2025-09-11 12:56:45] [Rank 0] step:2361/10000 train_time:117714ms step_avg:49.86ms +[2025-09-11 12:56:46] [Rank 0] step:2381/10000 train_time:118378ms step_avg:49.72ms +[2025-09-11 12:56:46] [Rank 0] step:2381/10000 train_time:118378ms step_avg:49.72ms +[2025-09-11 12:56:46] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:56:46] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:56:57] [Rank 0] PRINT: step:2400/10000 val_loss:5.9419 total_sharp:1.0107e-01 L1_sharp:1.8421e-02 L2_sharp:1.4756e-02 L3_sharp:1.5770e-02 L4_sharp:2.0806e-02 L5_sharp:4.0762e-02 L6_sharp:6.2565e-02 L7_sharp:7.6737e-02 L8_sharp:1.0090e-01 L9_sharp:1.1767e-01 L10_sharp:1.1887e-01 L11_sharp:1.5547e-01 L12_sharp:3.2095e-01 total_fnorm:1.9688e+00 total_l1_linf:3.3440e+03 total_spectral:1.0000e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2070e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.0801e-02 L4_l1linf:6.9824e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:7.0312e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.1777e-02 L12_l1linf:4.2236e-02 L1_spectral:3.1742e-03 L2_spectral:3.1902e-03 L3_spectral:3.1688e-03 L4_spectral:3.1553e-03 L5_spectral:3.1665e-03 L6_spectral:3.1498e-03 L7_spectral:3.1643e-03 L8_spectral:3.1309e-03 L9_spectral:3.1387e-03 L10_spectral:3.1569e-03 L11_spectral:3.1526e-03 L12_spectral:3.1055e-03 train_time:119022ms step_avg:49.59ms +[2025-09-11 12:56:57] [Rank 0] PRINT: step:2400/10000 val_loss:5.9419 total_sharp:1.0107e-01 L1_sharp:1.8421e-02 L2_sharp:1.4756e-02 L3_sharp:1.5770e-02 L4_sharp:2.0806e-02 L5_sharp:4.0762e-02 L6_sharp:6.2565e-02 L7_sharp:7.6737e-02 L8_sharp:1.0090e-01 L9_sharp:1.1767e-01 L10_sharp:1.1887e-01 L11_sharp:1.5547e-01 L12_sharp:3.2095e-01 total_fnorm:1.9688e+00 total_l1_linf:3.3440e+03 total_spectral:1.0000e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2070e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.0801e-02 L4_l1linf:6.9824e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:7.0312e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.1777e-02 L12_l1linf:4.2236e-02 L1_spectral:3.1742e-03 L2_spectral:3.1902e-03 L3_spectral:3.1688e-03 L4_spectral:3.1553e-03 L5_spectral:3.1665e-03 L6_spectral:3.1498e-03 L7_spectral:3.1643e-03 L8_spectral:3.1309e-03 L9_spectral:3.1387e-03 L10_spectral:3.1569e-03 L11_spectral:3.1526e-03 L12_spectral:3.1055e-03 train_time:119022ms step_avg:49.59ms +[2025-09-11 12:56:59] [Rank 0] step:2401/10000 train_time:120846ms step_avg:50.33ms +[2025-09-11 12:56:59] [Rank 0] step:2401/10000 train_time:120846ms step_avg:50.33ms +[2025-09-11 12:56:59] [Rank 0] step:2421/10000 train_time:121527ms step_avg:50.20ms +[2025-09-11 12:56:59] [Rank 0] step:2421/10000 train_time:121527ms step_avg:50.20ms +[2025-09-11 12:57:00] [Rank 0] step:2441/10000 train_time:122191ms step_avg:50.06ms +[2025-09-11 12:57:00] [Rank 0] step:2441/10000 train_time:122191ms step_avg:50.06ms +[2025-09-11 12:57:01] [Rank 0] step:2461/10000 train_time:122855ms step_avg:49.92ms +[2025-09-11 12:57:01] [Rank 0] step:2461/10000 train_time:122855ms step_avg:49.92ms +[2025-09-11 12:57:01] [Rank 0] step:2481/10000 train_time:123518ms step_avg:49.79ms +[2025-09-11 12:57:01] [Rank 0] step:2481/10000 train_time:123518ms step_avg:49.79ms +[2025-09-11 12:57:02] [Rank 0] step:2501/10000 train_time:124180ms step_avg:49.65ms +[2025-09-11 12:57:02] [Rank 0] step:2501/10000 train_time:124180ms step_avg:49.65ms +[2025-09-11 12:57:03] [Rank 0] step:2521/10000 train_time:124842ms step_avg:49.52ms +[2025-09-11 12:57:03] [Rank 0] step:2521/10000 train_time:124842ms step_avg:49.52ms +[2025-09-11 12:57:03] [Rank 0] step:2541/10000 train_time:125504ms step_avg:49.39ms +[2025-09-11 12:57:03] [Rank 0] step:2541/10000 train_time:125504ms step_avg:49.39ms +[2025-09-11 12:57:04] [Rank 0] step:2561/10000 train_time:126167ms step_avg:49.26ms +[2025-09-11 12:57:04] [Rank 0] step:2561/10000 train_time:126167ms step_avg:49.26ms +[2025-09-11 12:57:05] [Rank 0] step:2581/10000 train_time:126831ms step_avg:49.14ms +[2025-09-11 12:57:05] [Rank 0] step:2581/10000 train_time:126831ms step_avg:49.14ms +[2025-09-11 12:57:05] [Rank 0] step:2601/10000 train_time:127493ms step_avg:49.02ms +[2025-09-11 12:57:05] [Rank 0] step:2601/10000 train_time:127493ms step_avg:49.02ms +[2025-09-11 12:57:06] [Rank 0] step:2621/10000 train_time:128156ms step_avg:48.90ms +[2025-09-11 12:57:06] [Rank 0] step:2621/10000 train_time:128156ms step_avg:48.90ms +[2025-09-11 12:57:07] [Rank 0] step:2641/10000 train_time:128818ms step_avg:48.78ms +[2025-09-11 12:57:07] [Rank 0] step:2641/10000 train_time:128818ms step_avg:48.78ms +[2025-09-11 12:57:07] [Rank 0] step:2661/10000 train_time:129480ms step_avg:48.66ms +[2025-09-11 12:57:07] [Rank 0] step:2661/10000 train_time:129480ms step_avg:48.66ms +[2025-09-11 12:57:08] [Rank 0] step:2681/10000 train_time:130143ms step_avg:48.54ms +[2025-09-11 12:57:08] [Rank 0] step:2681/10000 train_time:130143ms step_avg:48.54ms +[2025-09-11 12:57:09] [Rank 0] step:2701/10000 train_time:130805ms step_avg:48.43ms +[2025-09-11 12:57:09] [Rank 0] step:2701/10000 train_time:130805ms step_avg:48.43ms +[2025-09-11 12:57:09] [Rank 0] step:2721/10000 train_time:131467ms step_avg:48.32ms +[2025-09-11 12:57:09] [Rank 0] step:2721/10000 train_time:131467ms step_avg:48.32ms +[2025-09-11 12:57:10] [Rank 0] step:2741/10000 train_time:132130ms step_avg:48.20ms +[2025-09-11 12:57:10] [Rank 0] step:2741/10000 train_time:132130ms step_avg:48.20ms +[2025-09-11 12:57:11] [Rank 0] step:2761/10000 train_time:132792ms step_avg:48.10ms +[2025-09-11 12:57:11] [Rank 0] step:2761/10000 train_time:132792ms step_avg:48.10ms +[2025-09-11 12:57:11] [Rank 0] step:2781/10000 train_time:133454ms step_avg:47.99ms +[2025-09-11 12:57:11] [Rank 0] step:2781/10000 train_time:133454ms step_avg:47.99ms +[2025-09-11 12:57:12] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:57:12] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:57:22] [Rank 0] PRINT: step:2800/10000 val_loss:5.8472 total_sharp:1.2202e-01 L1_sharp:1.0563e-02 L2_sharp:9.5198e-03 L3_sharp:1.3002e-02 L4_sharp:2.1065e-02 L5_sharp:2.8890e-02 L6_sharp:5.7604e-02 L7_sharp:7.6809e-02 L8_sharp:1.1262e-01 L9_sharp:1.0746e-01 L10_sharp:1.2907e-01 L11_sharp:1.7242e-01 L12_sharp:5.2172e-01 total_fnorm:1.8906e+00 total_l1_linf:3.1680e+03 total_spectral:9.6094e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.1387e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0312e-02 L3_l1linf:7.0312e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.6406e-02 L6_l1linf:6.6895e-02 L7_l1linf:7.0312e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.2754e-02 L11_l1linf:6.7871e-02 L12_l1linf:3.7842e-02 L1_spectral:3.1925e-03 L2_spectral:3.1841e-03 L3_spectral:3.1860e-03 L4_spectral:3.1519e-03 L5_spectral:3.1746e-03 L6_spectral:3.1562e-03 L7_spectral:3.1552e-03 L8_spectral:3.1349e-03 L9_spectral:3.1572e-03 L10_spectral:3.1607e-03 L11_spectral:3.1405e-03 L12_spectral:3.1530e-03 train_time:134098ms step_avg:47.89ms +[2025-09-11 12:57:22] [Rank 0] PRINT: step:2800/10000 val_loss:5.8472 total_sharp:1.2202e-01 L1_sharp:1.0563e-02 L2_sharp:9.5198e-03 L3_sharp:1.3002e-02 L4_sharp:2.1065e-02 L5_sharp:2.8890e-02 L6_sharp:5.7604e-02 L7_sharp:7.6809e-02 L8_sharp:1.1262e-01 L9_sharp:1.0746e-01 L10_sharp:1.2907e-01 L11_sharp:1.7242e-01 L12_sharp:5.2172e-01 total_fnorm:1.8906e+00 total_l1_linf:3.1680e+03 total_spectral:9.6094e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.1387e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0312e-02 L3_l1linf:7.0312e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.6406e-02 L6_l1linf:6.6895e-02 L7_l1linf:7.0312e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.2754e-02 L11_l1linf:6.7871e-02 L12_l1linf:3.7842e-02 L1_spectral:3.1925e-03 L2_spectral:3.1841e-03 L3_spectral:3.1860e-03 L4_spectral:3.1519e-03 L5_spectral:3.1746e-03 L6_spectral:3.1562e-03 L7_spectral:3.1552e-03 L8_spectral:3.1349e-03 L9_spectral:3.1572e-03 L10_spectral:3.1607e-03 L11_spectral:3.1405e-03 L12_spectral:3.1530e-03 train_time:134098ms step_avg:47.89ms +[2025-09-11 12:57:24] [Rank 0] step:2801/10000 train_time:135893ms step_avg:48.52ms +[2025-09-11 12:57:24] [Rank 0] step:2801/10000 train_time:135893ms step_avg:48.52ms +[2025-09-11 12:57:24] [Rank 0] step:2821/10000 train_time:136560ms step_avg:48.41ms +[2025-09-11 12:57:24] [Rank 0] step:2821/10000 train_time:136560ms step_avg:48.41ms +[2025-09-11 12:57:25] [Rank 0] step:2841/10000 train_time:137224ms step_avg:48.30ms +[2025-09-11 12:57:25] [Rank 0] step:2841/10000 train_time:137224ms step_avg:48.30ms +[2025-09-11 12:57:26] [Rank 0] step:2861/10000 train_time:137887ms step_avg:48.20ms +[2025-09-11 12:57:26] [Rank 0] step:2861/10000 train_time:137887ms step_avg:48.20ms +[2025-09-11 12:57:26] [Rank 0] step:2881/10000 train_time:138550ms step_avg:48.09ms +[2025-09-11 12:57:26] [Rank 0] step:2881/10000 train_time:138550ms step_avg:48.09ms +[2025-09-11 12:57:27] [Rank 0] step:2901/10000 train_time:139213ms step_avg:47.99ms +[2025-09-11 12:57:27] [Rank 0] step:2901/10000 train_time:139213ms step_avg:47.99ms +[2025-09-11 12:57:28] [Rank 0] step:2921/10000 train_time:140140ms step_avg:47.98ms +[2025-09-11 12:57:28] [Rank 0] step:2921/10000 train_time:140140ms step_avg:47.98ms +[2025-09-11 12:57:29] [Rank 0] step:2941/10000 train_time:140803ms step_avg:47.88ms +[2025-09-11 12:57:29] [Rank 0] step:2941/10000 train_time:140803ms step_avg:47.88ms +[2025-09-11 12:57:29] [Rank 0] step:2961/10000 train_time:141466ms step_avg:47.78ms +[2025-09-11 12:57:29] [Rank 0] step:2961/10000 train_time:141466ms step_avg:47.78ms +[2025-09-11 12:57:30] [Rank 0] step:2981/10000 train_time:142441ms step_avg:47.78ms +[2025-09-11 12:57:30] [Rank 0] step:2981/10000 train_time:142441ms step_avg:47.78ms +[2025-09-11 12:57:31] [Rank 0] step:3001/10000 train_time:143107ms step_avg:47.69ms +[2025-09-11 12:57:31] [Rank 0] step:3001/10000 train_time:143107ms step_avg:47.69ms +[2025-09-11 12:57:32] [Rank 0] step:3021/10000 train_time:143772ms step_avg:47.59ms +[2025-09-11 12:57:32] [Rank 0] step:3021/10000 train_time:143772ms step_avg:47.59ms +[2025-09-11 12:57:32] [Rank 0] step:3041/10000 train_time:144437ms step_avg:47.50ms +[2025-09-11 12:57:32] [Rank 0] step:3041/10000 train_time:144437ms step_avg:47.50ms +[2025-09-11 12:57:33] [Rank 0] step:3061/10000 train_time:145103ms step_avg:47.40ms +[2025-09-11 12:57:33] [Rank 0] step:3061/10000 train_time:145103ms step_avg:47.40ms +[2025-09-11 12:57:34] [Rank 0] step:3081/10000 train_time:145769ms step_avg:47.31ms +[2025-09-11 12:57:34] [Rank 0] step:3081/10000 train_time:145769ms step_avg:47.31ms +[2025-09-11 12:57:34] [Rank 0] step:3101/10000 train_time:146434ms step_avg:47.22ms +[2025-09-11 12:57:34] [Rank 0] step:3101/10000 train_time:146434ms step_avg:47.22ms +[2025-09-11 12:57:35] [Rank 0] step:3121/10000 train_time:147100ms step_avg:47.13ms +[2025-09-11 12:57:35] [Rank 0] step:3121/10000 train_time:147100ms step_avg:47.13ms +[2025-09-11 12:57:36] [Rank 0] step:3141/10000 train_time:147766ms step_avg:47.04ms +[2025-09-11 12:57:36] [Rank 0] step:3141/10000 train_time:147766ms step_avg:47.04ms +[2025-09-11 12:57:36] [Rank 0] step:3161/10000 train_time:148431ms step_avg:46.96ms +[2025-09-11 12:57:36] [Rank 0] step:3161/10000 train_time:148431ms step_avg:46.96ms +[2025-09-11 12:57:37] [Rank 0] step:3181/10000 train_time:149097ms step_avg:46.87ms +[2025-09-11 12:57:37] [Rank 0] step:3181/10000 train_time:149097ms step_avg:46.87ms +[2025-09-11 12:57:38] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:57:38] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:57:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:57:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:57:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:57:48] [Rank 0] PRINT: step:3200/10000 val_loss:5.7697 total_sharp:7.8665e-02 L1_sharp:1.5727e-02 L2_sharp:1.2148e-02 L3_sharp:1.4375e-02 L4_sharp:2.0322e-02 L5_sharp:2.7250e-02 L6_sharp:4.4313e-02 L7_sharp:5.3921e-02 L8_sharp:8.9651e-02 L9_sharp:9.4520e-02 L10_sharp:1.0612e-01 L11_sharp:1.2998e-01 L12_sharp:3.5731e-01 total_fnorm:2.0000e+00 total_l1_linf:3.3120e+03 total_spectral:1.0234e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2363e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.4941e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5430e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.8848e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.0527e-02 L1_spectral:3.2248e-03 L2_spectral:3.2016e-03 L3_spectral:3.2009e-03 L4_spectral:3.2006e-03 L5_spectral:3.1702e-03 L6_spectral:3.1603e-03 L7_spectral:3.1717e-03 L8_spectral:3.1583e-03 L9_spectral:3.1620e-03 L10_spectral:3.1560e-03 L11_spectral:3.1560e-03 L12_spectral:3.1231e-03 train_time:149744ms step_avg:46.79ms +[2025-09-11 12:57:48] [Rank 0] PRINT: step:3200/10000 val_loss:5.7697 total_sharp:7.8665e-02 L1_sharp:1.5727e-02 L2_sharp:1.2148e-02 L3_sharp:1.4375e-02 L4_sharp:2.0322e-02 L5_sharp:2.7250e-02 L6_sharp:4.4313e-02 L7_sharp:5.3921e-02 L8_sharp:8.9651e-02 L9_sharp:9.4520e-02 L10_sharp:1.0612e-01 L11_sharp:1.2998e-01 L12_sharp:3.5731e-01 total_fnorm:2.0000e+00 total_l1_linf:3.3120e+03 total_spectral:1.0234e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2363e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.4941e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5430e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.8848e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.0527e-02 L1_spectral:3.2248e-03 L2_spectral:3.2016e-03 L3_spectral:3.2009e-03 L4_spectral:3.2006e-03 L5_spectral:3.1702e-03 L6_spectral:3.1603e-03 L7_spectral:3.1717e-03 L8_spectral:3.1583e-03 L9_spectral:3.1620e-03 L10_spectral:3.1560e-03 L11_spectral:3.1560e-03 L12_spectral:3.1231e-03 train_time:149744ms step_avg:46.79ms +[2025-09-11 12:57:50] [Rank 0] step:3201/10000 train_time:151582ms step_avg:47.35ms +[2025-09-11 12:57:50] [Rank 0] step:3201/10000 train_time:151582ms step_avg:47.35ms +[2025-09-11 12:57:51] [Rank 0] step:3221/10000 train_time:152253ms step_avg:47.27ms +[2025-09-11 12:57:51] [Rank 0] step:3221/10000 train_time:152253ms step_avg:47.27ms +[2025-09-11 12:57:52] [Rank 0] step:3241/10000 train_time:152920ms step_avg:47.18ms +[2025-09-11 12:57:52] [Rank 0] step:3241/10000 train_time:152920ms step_avg:47.18ms +[2025-09-11 12:57:52] [Rank 0] step:3261/10000 train_time:153588ms step_avg:47.10ms +[2025-09-11 12:57:52] [Rank 0] step:3261/10000 train_time:153588ms step_avg:47.10ms +[2025-09-11 12:57:53] [Rank 0] step:3281/10000 train_time:154254ms step_avg:47.01ms +[2025-09-11 12:57:53] [Rank 0] step:3281/10000 train_time:154254ms step_avg:47.01ms +[2025-09-11 12:57:54] [Rank 0] step:3301/10000 train_time:154921ms step_avg:46.93ms +[2025-09-11 12:57:54] [Rank 0] step:3301/10000 train_time:154921ms step_avg:46.93ms +[2025-09-11 12:57:54] [Rank 0] step:3321/10000 train_time:155587ms step_avg:46.85ms +[2025-09-11 12:57:54] [Rank 0] step:3321/10000 train_time:155587ms step_avg:46.85ms +[2025-09-11 12:57:55] [Rank 0] step:3341/10000 train_time:156254ms step_avg:46.77ms +[2025-09-11 12:57:55] [Rank 0] step:3341/10000 train_time:156254ms step_avg:46.77ms +[2025-09-11 12:57:56] [Rank 0] step:3361/10000 train_time:156921ms step_avg:46.69ms +[2025-09-11 12:57:56] [Rank 0] step:3361/10000 train_time:156921ms step_avg:46.69ms +[2025-09-11 12:57:56] [Rank 0] step:3381/10000 train_time:157588ms step_avg:46.61ms +[2025-09-11 12:57:56] [Rank 0] step:3381/10000 train_time:157588ms step_avg:46.61ms +[2025-09-11 12:57:57] [Rank 0] step:3401/10000 train_time:158254ms step_avg:46.53ms +[2025-09-11 12:57:57] [Rank 0] step:3401/10000 train_time:158254ms step_avg:46.53ms +[2025-09-11 12:57:58] [Rank 0] step:3421/10000 train_time:158920ms step_avg:46.45ms +[2025-09-11 12:57:58] [Rank 0] step:3421/10000 train_time:158920ms step_avg:46.45ms +[2025-09-11 12:57:58] [Rank 0] step:3441/10000 train_time:159585ms step_avg:46.38ms +[2025-09-11 12:57:58] [Rank 0] step:3441/10000 train_time:159585ms step_avg:46.38ms +[2025-09-11 12:57:59] [Rank 0] step:3461/10000 train_time:160252ms step_avg:46.30ms +[2025-09-11 12:57:59] [Rank 0] step:3461/10000 train_time:160252ms step_avg:46.30ms +[2025-09-11 12:58:00] [Rank 0] step:3481/10000 train_time:160919ms step_avg:46.23ms +[2025-09-11 12:58:00] [Rank 0] step:3481/10000 train_time:160919ms step_avg:46.23ms +[2025-09-11 12:58:00] [Rank 0] step:3501/10000 train_time:161586ms step_avg:46.15ms +[2025-09-11 12:58:00] [Rank 0] step:3501/10000 train_time:161586ms step_avg:46.15ms +[2025-09-11 12:58:01] [Rank 0] step:3521/10000 train_time:162252ms step_avg:46.08ms +[2025-09-11 12:58:01] [Rank 0] step:3521/10000 train_time:162252ms step_avg:46.08ms +[2025-09-11 12:58:02] [Rank 0] step:3541/10000 train_time:162918ms step_avg:46.01ms +[2025-09-11 12:58:02] [Rank 0] step:3541/10000 train_time:162918ms step_avg:46.01ms +[2025-09-11 12:58:02] [Rank 0] step:3561/10000 train_time:163584ms step_avg:45.94ms +[2025-09-11 12:58:02] [Rank 0] step:3561/10000 train_time:163584ms step_avg:45.94ms +[2025-09-11 12:58:03] [Rank 0] step:3581/10000 train_time:164251ms step_avg:45.87ms +[2025-09-11 12:58:03] [Rank 0] step:3581/10000 train_time:164251ms step_avg:45.87ms +[2025-09-11 12:58:04] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:58:04] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:58:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:58:16] [Rank 0] PRINT: step:3600/10000 val_loss:5.7175 total_sharp:8.6165e-02 L1_sharp:1.0987e-02 L2_sharp:8.2487e-03 L3_sharp:1.0011e-02 L4_sharp:1.2636e-02 L5_sharp:2.2496e-02 L6_sharp:3.6679e-02 L7_sharp:5.7373e-02 L8_sharp:8.4981e-02 L9_sharp:8.8997e-02 L10_sharp:9.7984e-02 L11_sharp:1.3536e-01 L12_sharp:4.0251e-01 total_fnorm:1.8359e+00 total_l1_linf:3.0880e+03 total_spectral:9.3750e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2168e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.4941e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.6406e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.6895e-02 L12_l1linf:3.6865e-02 L1_spectral:3.2100e-03 L2_spectral:3.1937e-03 L3_spectral:3.1928e-03 L4_spectral:3.1858e-03 L5_spectral:3.1677e-03 L6_spectral:3.1537e-03 L7_spectral:3.1800e-03 L8_spectral:3.1348e-03 L9_spectral:3.1828e-03 L10_spectral:3.1780e-03 L11_spectral:3.1585e-03 L12_spectral:3.1457e-03 train_time:164899ms step_avg:45.81ms +[2025-09-11 12:58:16] [Rank 0] PRINT: step:3600/10000 val_loss:5.7175 total_sharp:8.6165e-02 L1_sharp:1.0987e-02 L2_sharp:8.2487e-03 L3_sharp:1.0011e-02 L4_sharp:1.2636e-02 L5_sharp:2.2496e-02 L6_sharp:3.6679e-02 L7_sharp:5.7373e-02 L8_sharp:8.4981e-02 L9_sharp:8.8997e-02 L10_sharp:9.7984e-02 L11_sharp:1.3536e-01 L12_sharp:4.0251e-01 total_fnorm:1.8359e+00 total_l1_linf:3.0880e+03 total_spectral:9.3750e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2168e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.4941e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.6406e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.6895e-02 L12_l1linf:3.6865e-02 L1_spectral:3.2100e-03 L2_spectral:3.1937e-03 L3_spectral:3.1928e-03 L4_spectral:3.1858e-03 L5_spectral:3.1677e-03 L6_spectral:3.1537e-03 L7_spectral:3.1800e-03 L8_spectral:3.1348e-03 L9_spectral:3.1828e-03 L10_spectral:3.1780e-03 L11_spectral:3.1585e-03 L12_spectral:3.1457e-03 train_time:164899ms step_avg:45.81ms +[2025-09-11 12:58:18] [Rank 0] step:3601/10000 train_time:166661ms step_avg:46.28ms +[2025-09-11 12:58:18] [Rank 0] step:3601/10000 train_time:166661ms step_avg:46.28ms +[2025-09-11 12:58:19] [Rank 0] step:3621/10000 train_time:167331ms step_avg:46.21ms +[2025-09-11 12:58:19] [Rank 0] step:3621/10000 train_time:167331ms step_avg:46.21ms +[2025-09-11 12:58:20] [Rank 0] step:3641/10000 train_time:167997ms step_avg:46.14ms +[2025-09-11 12:58:20] [Rank 0] step:3641/10000 train_time:167997ms step_avg:46.14ms +[2025-09-11 12:58:20] [Rank 0] step:3661/10000 train_time:168663ms step_avg:46.07ms +[2025-09-11 12:58:20] [Rank 0] step:3661/10000 train_time:168663ms step_avg:46.07ms +[2025-09-11 12:58:21] [Rank 0] step:3681/10000 train_time:169329ms step_avg:46.00ms +[2025-09-11 12:58:21] [Rank 0] step:3681/10000 train_time:169329ms step_avg:46.00ms +[2025-09-11 12:58:22] [Rank 0] step:3701/10000 train_time:169995ms step_avg:45.93ms +[2025-09-11 12:58:22] [Rank 0] step:3701/10000 train_time:169995ms step_avg:45.93ms +[2025-09-11 12:58:22] [Rank 0] step:3721/10000 train_time:170670ms step_avg:45.87ms +[2025-09-11 12:58:22] [Rank 0] step:3721/10000 train_time:170670ms step_avg:45.87ms +[2025-09-11 12:58:23] [Rank 0] step:3741/10000 train_time:171346ms step_avg:45.80ms +[2025-09-11 12:58:23] [Rank 0] step:3741/10000 train_time:171346ms step_avg:45.80ms +[2025-09-11 12:58:24] [Rank 0] step:3761/10000 train_time:172022ms step_avg:45.74ms +[2025-09-11 12:58:24] [Rank 0] step:3761/10000 train_time:172022ms step_avg:45.74ms +[2025-09-11 12:58:24] [Rank 0] step:3781/10000 train_time:172698ms step_avg:45.68ms +[2025-09-11 12:58:24] [Rank 0] step:3781/10000 train_time:172698ms step_avg:45.68ms +[2025-09-11 12:58:25] [Rank 0] step:3801/10000 train_time:173374ms step_avg:45.61ms +[2025-09-11 12:58:25] [Rank 0] step:3801/10000 train_time:173374ms step_avg:45.61ms +[2025-09-11 12:58:26] [Rank 0] step:3821/10000 train_time:174052ms step_avg:45.55ms +[2025-09-11 12:58:26] [Rank 0] step:3821/10000 train_time:174052ms step_avg:45.55ms +[2025-09-11 12:58:26] [Rank 0] step:3841/10000 train_time:174728ms step_avg:45.49ms +[2025-09-11 12:58:26] [Rank 0] step:3841/10000 train_time:174728ms step_avg:45.49ms +[2025-09-11 12:58:27] [Rank 0] step:3861/10000 train_time:175404ms step_avg:45.43ms +[2025-09-11 12:58:27] [Rank 0] step:3861/10000 train_time:175404ms step_avg:45.43ms +[2025-09-11 12:58:28] [Rank 0] step:3881/10000 train_time:176079ms step_avg:45.37ms +[2025-09-11 12:58:28] [Rank 0] step:3881/10000 train_time:176079ms step_avg:45.37ms +[2025-09-11 12:58:28] [Rank 0] step:3901/10000 train_time:176755ms step_avg:45.31ms +[2025-09-11 12:58:28] [Rank 0] step:3901/10000 train_time:176755ms step_avg:45.31ms +[2025-09-11 12:58:29] [Rank 0] step:3921/10000 train_time:177432ms step_avg:45.25ms +[2025-09-11 12:58:29] [Rank 0] step:3921/10000 train_time:177432ms step_avg:45.25ms +[2025-09-11 12:58:30] [Rank 0] step:3941/10000 train_time:178108ms step_avg:45.19ms +[2025-09-11 12:58:30] [Rank 0] step:3941/10000 train_time:178108ms step_avg:45.19ms +[2025-09-11 12:58:30] [Rank 0] step:3961/10000 train_time:178947ms step_avg:45.18ms +[2025-09-11 12:58:30] [Rank 0] step:3961/10000 train_time:178947ms step_avg:45.18ms +[2025-09-11 12:58:31] [Rank 0] step:3981/10000 train_time:179734ms step_avg:45.15ms +[2025-09-11 12:58:31] [Rank 0] step:3981/10000 train_time:179734ms step_avg:45.15ms +[2025-09-11 12:58:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:58:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:58:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:58:43] [Rank 0] PRINT: step:4000/10000 val_loss:5.6671 total_sharp:8.6688e-02 L1_sharp:1.1401e-02 L2_sharp:9.4588e-03 L3_sharp:1.0096e-02 L4_sharp:1.4414e-02 L5_sharp:2.0041e-02 L6_sharp:3.5475e-02 L7_sharp:5.4024e-02 L8_sharp:8.1360e-02 L9_sharp:1.0275e-01 L10_sharp:1.1126e-01 L11_sharp:1.6265e-01 L12_sharp:4.1754e-01 total_fnorm:2.0156e+00 total_l1_linf:3.1520e+03 total_spectral:1.0234e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2168e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.6406e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.5430e-02 L12_l1linf:3.8330e-02 L1_spectral:3.2073e-03 L2_spectral:3.2027e-03 L3_spectral:3.1914e-03 L4_spectral:3.1909e-03 L5_spectral:3.1771e-03 L6_spectral:3.1659e-03 L7_spectral:3.1803e-03 L8_spectral:3.1392e-03 L9_spectral:3.1773e-03 L10_spectral:3.1706e-03 L11_spectral:3.2004e-03 L12_spectral:3.1547e-03 train_time:180391ms step_avg:45.10ms +[2025-09-11 12:58:43] [Rank 0] PRINT: step:4000/10000 val_loss:5.6671 total_sharp:8.6688e-02 L1_sharp:1.1401e-02 L2_sharp:9.4588e-03 L3_sharp:1.0096e-02 L4_sharp:1.4414e-02 L5_sharp:2.0041e-02 L6_sharp:3.5475e-02 L7_sharp:5.4024e-02 L8_sharp:8.1360e-02 L9_sharp:1.0275e-01 L10_sharp:1.1126e-01 L11_sharp:1.6265e-01 L12_sharp:4.1754e-01 total_fnorm:2.0156e+00 total_l1_linf:3.1520e+03 total_spectral:1.0234e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2168e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.6406e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.5430e-02 L12_l1linf:3.8330e-02 L1_spectral:3.2073e-03 L2_spectral:3.2027e-03 L3_spectral:3.1914e-03 L4_spectral:3.1909e-03 L5_spectral:3.1771e-03 L6_spectral:3.1659e-03 L7_spectral:3.1803e-03 L8_spectral:3.1392e-03 L9_spectral:3.1773e-03 L10_spectral:3.1706e-03 L11_spectral:3.2004e-03 L12_spectral:3.1547e-03 train_time:180391ms step_avg:45.10ms +[2025-09-11 12:58:44] [Rank 0] step:4001/10000 train_time:182184ms step_avg:45.53ms +[2025-09-11 12:58:44] [Rank 0] step:4001/10000 train_time:182184ms step_avg:45.53ms +[2025-09-11 12:58:45] [Rank 0] step:4021/10000 train_time:182865ms step_avg:45.48ms +[2025-09-11 12:58:45] [Rank 0] step:4021/10000 train_time:182865ms step_avg:45.48ms +[2025-09-11 12:58:46] [Rank 0] step:4041/10000 train_time:183542ms step_avg:45.42ms +[2025-09-11 12:58:46] [Rank 0] step:4041/10000 train_time:183542ms step_avg:45.42ms +[2025-09-11 12:58:46] [Rank 0] step:4061/10000 train_time:184217ms step_avg:45.36ms +[2025-09-11 12:58:46] [Rank 0] step:4061/10000 train_time:184217ms step_avg:45.36ms +[2025-09-11 12:58:47] [Rank 0] step:4081/10000 train_time:184895ms step_avg:45.31ms +[2025-09-11 12:58:47] [Rank 0] step:4081/10000 train_time:184895ms step_avg:45.31ms +[2025-09-11 12:58:48] [Rank 0] step:4101/10000 train_time:185571ms step_avg:45.25ms +[2025-09-11 12:58:48] [Rank 0] step:4101/10000 train_time:185571ms step_avg:45.25ms +[2025-09-11 12:58:48] [Rank 0] step:4121/10000 train_time:186248ms step_avg:45.19ms +[2025-09-11 12:58:48] [Rank 0] step:4121/10000 train_time:186248ms step_avg:45.19ms +[2025-09-11 12:58:49] [Rank 0] step:4141/10000 train_time:186924ms step_avg:45.14ms +[2025-09-11 12:58:49] [Rank 0] step:4141/10000 train_time:186924ms step_avg:45.14ms +[2025-09-11 12:58:50] [Rank 0] step:4161/10000 train_time:187601ms step_avg:45.09ms +[2025-09-11 12:58:50] [Rank 0] step:4161/10000 train_time:187601ms step_avg:45.09ms +[2025-09-11 12:58:50] [Rank 0] step:4181/10000 train_time:188277ms step_avg:45.03ms +[2025-09-11 12:58:50] [Rank 0] step:4181/10000 train_time:188277ms step_avg:45.03ms +[2025-09-11 12:58:51] [Rank 0] step:4201/10000 train_time:188954ms step_avg:44.98ms +[2025-09-11 12:58:51] [Rank 0] step:4201/10000 train_time:188954ms step_avg:44.98ms +[2025-09-11 12:58:52] [Rank 0] step:4221/10000 train_time:189629ms step_avg:44.93ms +[2025-09-11 12:58:52] [Rank 0] step:4221/10000 train_time:189629ms step_avg:44.93ms +[2025-09-11 12:58:52] [Rank 0] step:4241/10000 train_time:190305ms step_avg:44.87ms +[2025-09-11 12:58:52] [Rank 0] step:4241/10000 train_time:190305ms step_avg:44.87ms +[2025-09-11 12:58:53] [Rank 0] step:4261/10000 train_time:190983ms step_avg:44.82ms +[2025-09-11 12:58:53] [Rank 0] step:4261/10000 train_time:190983ms step_avg:44.82ms +[2025-09-11 12:58:54] [Rank 0] step:4281/10000 train_time:191661ms step_avg:44.77ms +[2025-09-11 12:58:54] [Rank 0] step:4281/10000 train_time:191661ms step_avg:44.77ms +[2025-09-11 12:58:55] [Rank 0] step:4301/10000 train_time:192339ms step_avg:44.72ms +[2025-09-11 12:58:55] [Rank 0] step:4301/10000 train_time:192339ms step_avg:44.72ms +[2025-09-11 12:58:55] [Rank 0] step:4321/10000 train_time:193015ms step_avg:44.67ms +[2025-09-11 12:58:55] [Rank 0] step:4321/10000 train_time:193015ms step_avg:44.67ms +[2025-09-11 12:58:56] [Rank 0] step:4341/10000 train_time:193691ms step_avg:44.62ms +[2025-09-11 12:58:56] [Rank 0] step:4341/10000 train_time:193691ms step_avg:44.62ms +[2025-09-11 12:58:57] [Rank 0] step:4361/10000 train_time:194367ms step_avg:44.57ms +[2025-09-11 12:58:57] [Rank 0] step:4361/10000 train_time:194367ms step_avg:44.57ms +[2025-09-11 12:58:57] [Rank 0] step:4381/10000 train_time:195043ms step_avg:44.52ms +[2025-09-11 12:58:57] [Rank 0] step:4381/10000 train_time:195043ms step_avg:44.52ms +[2025-09-11 12:58:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:58:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:59:09] [Rank 0] PRINT: step:4400/10000 val_loss:5.6258 total_sharp:7.7871e-02 L1_sharp:1.1103e-02 L2_sharp:7.7341e-03 L3_sharp:7.8308e-03 L4_sharp:1.0595e-02 L5_sharp:2.2564e-02 L6_sharp:3.7236e-02 L7_sharp:5.1634e-02 L8_sharp:7.5470e-02 L9_sharp:7.8678e-02 L10_sharp:8.8651e-02 L11_sharp:1.2297e-01 L12_sharp:2.9104e-01 total_fnorm:1.8281e+00 total_l1_linf:2.9760e+03 total_spectral:9.2969e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2070e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.5430e-02 L11_l1linf:6.3965e-02 L12_l1linf:3.6133e-02 L1_spectral:3.2593e-03 L2_spectral:3.2441e-03 L3_spectral:3.2223e-03 L4_spectral:3.2022e-03 L5_spectral:3.1925e-03 L6_spectral:3.1600e-03 L7_spectral:3.2159e-03 L8_spectral:3.1417e-03 L9_spectral:3.1786e-03 L10_spectral:3.1885e-03 L11_spectral:3.1845e-03 L12_spectral:3.1697e-03 train_time:195700ms step_avg:44.48ms +[2025-09-11 12:59:09] [Rank 0] PRINT: step:4400/10000 val_loss:5.6258 total_sharp:7.7871e-02 L1_sharp:1.1103e-02 L2_sharp:7.7341e-03 L3_sharp:7.8308e-03 L4_sharp:1.0595e-02 L5_sharp:2.2564e-02 L6_sharp:3.7236e-02 L7_sharp:5.1634e-02 L8_sharp:7.5470e-02 L9_sharp:7.8678e-02 L10_sharp:8.8651e-02 L11_sharp:1.2297e-01 L12_sharp:2.9104e-01 total_fnorm:1.8281e+00 total_l1_linf:2.9760e+03 total_spectral:9.2969e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2070e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.5430e-02 L11_l1linf:6.3965e-02 L12_l1linf:3.6133e-02 L1_spectral:3.2593e-03 L2_spectral:3.2441e-03 L3_spectral:3.2223e-03 L4_spectral:3.2022e-03 L5_spectral:3.1925e-03 L6_spectral:3.1600e-03 L7_spectral:3.2159e-03 L8_spectral:3.1417e-03 L9_spectral:3.1786e-03 L10_spectral:3.1885e-03 L11_spectral:3.1845e-03 L12_spectral:3.1697e-03 train_time:195700ms step_avg:44.48ms +[2025-09-11 12:59:11] [Rank 0] step:4401/10000 train_time:198304ms step_avg:45.06ms +[2025-09-11 12:59:11] [Rank 0] step:4401/10000 train_time:198304ms step_avg:45.06ms +[2025-09-11 12:59:12] [Rank 0] step:4421/10000 train_time:199283ms step_avg:45.08ms +[2025-09-11 12:59:12] [Rank 0] step:4421/10000 train_time:199283ms step_avg:45.08ms +[2025-09-11 12:59:13] [Rank 0] step:4441/10000 train_time:199960ms step_avg:45.03ms +[2025-09-11 12:59:13] [Rank 0] step:4441/10000 train_time:199960ms step_avg:45.03ms +[2025-09-11 12:59:14] [Rank 0] step:4461/10000 train_time:200639ms step_avg:44.98ms +[2025-09-11 12:59:14] [Rank 0] step:4461/10000 train_time:200639ms step_avg:44.98ms +[2025-09-11 12:59:14] [Rank 0] step:4481/10000 train_time:201324ms step_avg:44.93ms +[2025-09-11 12:59:14] [Rank 0] step:4481/10000 train_time:201324ms step_avg:44.93ms +[2025-09-11 12:59:15] [Rank 0] step:4501/10000 train_time:202003ms step_avg:44.88ms +[2025-09-11 12:59:15] [Rank 0] step:4501/10000 train_time:202003ms step_avg:44.88ms +[2025-09-11 12:59:16] [Rank 0] step:4521/10000 train_time:202682ms step_avg:44.83ms +[2025-09-11 12:59:16] [Rank 0] step:4521/10000 train_time:202682ms step_avg:44.83ms +[2025-09-11 12:59:16] [Rank 0] step:4541/10000 train_time:203361ms step_avg:44.78ms +[2025-09-11 12:59:16] [Rank 0] step:4541/10000 train_time:203361ms step_avg:44.78ms +[2025-09-11 12:59:17] [Rank 0] step:4561/10000 train_time:204039ms step_avg:44.74ms +[2025-09-11 12:59:17] [Rank 0] step:4561/10000 train_time:204039ms step_avg:44.74ms +[2025-09-11 12:59:18] [Rank 0] step:4581/10000 train_time:204717ms step_avg:44.69ms +[2025-09-11 12:59:18] [Rank 0] step:4581/10000 train_time:204717ms step_avg:44.69ms +[2025-09-11 12:59:18] [Rank 0] step:4601/10000 train_time:205396ms step_avg:44.64ms +[2025-09-11 12:59:18] [Rank 0] step:4601/10000 train_time:205396ms step_avg:44.64ms +[2025-09-11 12:59:19] [Rank 0] step:4621/10000 train_time:206074ms step_avg:44.60ms +[2025-09-11 12:59:19] [Rank 0] step:4621/10000 train_time:206074ms step_avg:44.60ms +[2025-09-11 12:59:20] [Rank 0] step:4641/10000 train_time:206752ms step_avg:44.55ms +[2025-09-11 12:59:20] [Rank 0] step:4641/10000 train_time:206752ms step_avg:44.55ms +[2025-09-11 12:59:20] [Rank 0] step:4661/10000 train_time:207431ms step_avg:44.50ms +[2025-09-11 12:59:20] [Rank 0] step:4661/10000 train_time:207431ms step_avg:44.50ms +[2025-09-11 12:59:21] [Rank 0] step:4681/10000 train_time:208110ms step_avg:44.46ms +[2025-09-11 12:59:21] [Rank 0] step:4681/10000 train_time:208110ms step_avg:44.46ms +[2025-09-11 12:59:22] [Rank 0] step:4701/10000 train_time:208788ms step_avg:44.41ms +[2025-09-11 12:59:22] [Rank 0] step:4701/10000 train_time:208788ms step_avg:44.41ms +[2025-09-11 12:59:22] [Rank 0] step:4721/10000 train_time:209466ms step_avg:44.37ms +[2025-09-11 12:59:22] [Rank 0] step:4721/10000 train_time:209466ms step_avg:44.37ms +[2025-09-11 12:59:23] [Rank 0] step:4741/10000 train_time:210143ms step_avg:44.32ms +[2025-09-11 12:59:23] [Rank 0] step:4741/10000 train_time:210143ms step_avg:44.32ms +[2025-09-11 12:59:24] [Rank 0] step:4761/10000 train_time:210823ms step_avg:44.28ms +[2025-09-11 12:59:24] [Rank 0] step:4761/10000 train_time:210823ms step_avg:44.28ms +[2025-09-11 12:59:24] [Rank 0] step:4781/10000 train_time:211501ms step_avg:44.24ms +[2025-09-11 12:59:24] [Rank 0] step:4781/10000 train_time:211501ms step_avg:44.24ms +[2025-09-11 12:59:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:59:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:59:35] [Rank 0] PRINT: step:4800/10000 val_loss:5.5887 total_sharp:7.9785e-02 L1_sharp:9.3517e-03 L2_sharp:5.1425e-03 L3_sharp:1.0231e-02 L4_sharp:1.0954e-02 L5_sharp:1.6563e-02 L6_sharp:2.5709e-02 L7_sharp:3.7373e-02 L8_sharp:7.0165e-02 L9_sharp:8.3171e-02 L10_sharp:8.8814e-02 L11_sharp:1.2429e-01 L12_sharp:3.8851e-01 total_fnorm:1.7734e+00 total_l1_linf:2.9120e+03 total_spectral:9.1016e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2363e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.4453e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.1523e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.2256e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2988e-02 L12_l1linf:3.5156e-02 L1_spectral:3.2442e-03 L2_spectral:3.2318e-03 L3_spectral:3.2198e-03 L4_spectral:3.2075e-03 L5_spectral:3.1957e-03 L6_spectral:3.2053e-03 L7_spectral:3.1945e-03 L8_spectral:3.1588e-03 L9_spectral:3.1824e-03 L10_spectral:3.1995e-03 L11_spectral:3.1826e-03 L12_spectral:3.1763e-03 train_time:212159ms step_avg:44.20ms +[2025-09-11 12:59:35] [Rank 0] PRINT: step:4800/10000 val_loss:5.5887 total_sharp:7.9785e-02 L1_sharp:9.3517e-03 L2_sharp:5.1425e-03 L3_sharp:1.0231e-02 L4_sharp:1.0954e-02 L5_sharp:1.6563e-02 L6_sharp:2.5709e-02 L7_sharp:3.7373e-02 L8_sharp:7.0165e-02 L9_sharp:8.3171e-02 L10_sharp:8.8814e-02 L11_sharp:1.2429e-01 L12_sharp:3.8851e-01 total_fnorm:1.7734e+00 total_l1_linf:2.9120e+03 total_spectral:9.1016e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2363e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.4453e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.1523e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.2256e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2988e-02 L12_l1linf:3.5156e-02 L1_spectral:3.2442e-03 L2_spectral:3.2318e-03 L3_spectral:3.2198e-03 L4_spectral:3.2075e-03 L5_spectral:3.1957e-03 L6_spectral:3.2053e-03 L7_spectral:3.1945e-03 L8_spectral:3.1588e-03 L9_spectral:3.1824e-03 L10_spectral:3.1995e-03 L11_spectral:3.1826e-03 L12_spectral:3.1763e-03 train_time:212159ms step_avg:44.20ms +[2025-09-11 12:59:37] [Rank 0] step:4801/10000 train_time:214015ms step_avg:44.58ms +[2025-09-11 12:59:37] [Rank 0] step:4801/10000 train_time:214015ms step_avg:44.58ms +[2025-09-11 12:59:38] [Rank 0] step:4821/10000 train_time:214726ms step_avg:44.54ms +[2025-09-11 12:59:38] [Rank 0] step:4821/10000 train_time:214726ms step_avg:44.54ms +[2025-09-11 12:59:38] [Rank 0] step:4841/10000 train_time:215407ms step_avg:44.50ms +[2025-09-11 12:59:38] [Rank 0] step:4841/10000 train_time:215407ms step_avg:44.50ms +[2025-09-11 12:59:39] [Rank 0] step:4861/10000 train_time:216088ms step_avg:44.45ms +[2025-09-11 12:59:39] [Rank 0] step:4861/10000 train_time:216088ms step_avg:44.45ms +[2025-09-11 12:59:40] [Rank 0] step:4881/10000 train_time:216768ms step_avg:44.41ms +[2025-09-11 12:59:40] [Rank 0] step:4881/10000 train_time:216768ms step_avg:44.41ms +[2025-09-11 12:59:41] [Rank 0] step:4901/10000 train_time:217450ms step_avg:44.37ms +[2025-09-11 12:59:41] [Rank 0] step:4901/10000 train_time:217450ms step_avg:44.37ms +[2025-09-11 12:59:41] [Rank 0] step:4921/10000 train_time:218130ms step_avg:44.33ms +[2025-09-11 12:59:41] [Rank 0] step:4921/10000 train_time:218130ms step_avg:44.33ms +[2025-09-11 12:59:42] [Rank 0] step:4941/10000 train_time:218810ms step_avg:44.28ms +[2025-09-11 12:59:42] [Rank 0] step:4941/10000 train_time:218810ms step_avg:44.28ms +[2025-09-11 12:59:43] [Rank 0] step:4961/10000 train_time:219490ms step_avg:44.24ms +[2025-09-11 12:59:43] [Rank 0] step:4961/10000 train_time:219490ms step_avg:44.24ms +[2025-09-11 12:59:43] [Rank 0] step:4981/10000 train_time:220170ms step_avg:44.20ms +[2025-09-11 12:59:43] [Rank 0] step:4981/10000 train_time:220170ms step_avg:44.20ms +[2025-09-11 12:59:44] [Rank 0] step:5001/10000 train_time:220852ms step_avg:44.16ms +[2025-09-11 12:59:44] [Rank 0] step:5001/10000 train_time:220852ms step_avg:44.16ms +[2025-09-11 12:59:45] [Rank 0] step:5021/10000 train_time:221531ms step_avg:44.12ms +[2025-09-11 12:59:45] [Rank 0] step:5021/10000 train_time:221531ms step_avg:44.12ms +[2025-09-11 12:59:45] [Rank 0] step:5041/10000 train_time:222210ms step_avg:44.08ms +[2025-09-11 12:59:45] [Rank 0] step:5041/10000 train_time:222210ms step_avg:44.08ms +[2025-09-11 12:59:46] [Rank 0] step:5061/10000 train_time:222890ms step_avg:44.04ms +[2025-09-11 12:59:46] [Rank 0] step:5061/10000 train_time:222890ms step_avg:44.04ms +[2025-09-11 12:59:47] [Rank 0] step:5081/10000 train_time:223569ms step_avg:44.00ms +[2025-09-11 12:59:47] [Rank 0] step:5081/10000 train_time:223569ms step_avg:44.00ms +[2025-09-11 12:59:47] [Rank 0] step:5101/10000 train_time:224249ms step_avg:43.96ms +[2025-09-11 12:59:47] [Rank 0] step:5101/10000 train_time:224249ms step_avg:43.96ms +[2025-09-11 12:59:48] [Rank 0] step:5121/10000 train_time:224929ms step_avg:43.92ms +[2025-09-11 12:59:48] [Rank 0] step:5121/10000 train_time:224929ms step_avg:43.92ms +[2025-09-11 12:59:49] [Rank 0] step:5141/10000 train_time:225609ms step_avg:43.88ms +[2025-09-11 12:59:49] [Rank 0] step:5141/10000 train_time:225609ms step_avg:43.88ms +[2025-09-11 12:59:49] [Rank 0] step:5161/10000 train_time:226289ms step_avg:43.85ms +[2025-09-11 12:59:49] [Rank 0] step:5161/10000 train_time:226289ms step_avg:43.85ms +[2025-09-11 12:59:50] [Rank 0] step:5181/10000 train_time:226968ms step_avg:43.81ms +[2025-09-11 12:59:50] [Rank 0] step:5181/10000 train_time:226968ms step_avg:43.81ms +[2025-09-11 12:59:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:59:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:01] [Rank 0] PRINT: step:5200/10000 val_loss:5.5520 total_sharp:1.0130e-01 L1_sharp:1.1234e-02 L2_sharp:1.0196e-02 L3_sharp:1.0419e-02 L4_sharp:1.5374e-02 L5_sharp:2.0965e-02 L6_sharp:3.3312e-02 L7_sharp:4.3484e-02 L8_sharp:7.4944e-02 L9_sharp:8.3088e-02 L10_sharp:9.9992e-02 L11_sharp:1.4656e-01 L12_sharp:4.9711e-01 total_fnorm:1.7109e+00 total_l1_linf:2.8000e+03 total_spectral:8.7500e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4121e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2461e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.2500e-02 L5_l1linf:6.1523e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0791e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.0303e-02 L10_l1linf:6.1523e-02 L11_l1linf:6.2500e-02 L12_l1linf:3.4912e-02 L1_spectral:3.2381e-03 L2_spectral:3.2291e-03 L3_spectral:3.2073e-03 L4_spectral:3.2284e-03 L5_spectral:3.1981e-03 L6_spectral:3.1946e-03 L7_spectral:3.1940e-03 L8_spectral:3.1756e-03 L9_spectral:3.2105e-03 L10_spectral:3.1945e-03 L11_spectral:3.1931e-03 L12_spectral:3.1883e-03 train_time:227634ms step_avg:43.78ms +[2025-09-11 13:00:01] [Rank 0] PRINT: step:5200/10000 val_loss:5.5520 total_sharp:1.0130e-01 L1_sharp:1.1234e-02 L2_sharp:1.0196e-02 L3_sharp:1.0419e-02 L4_sharp:1.5374e-02 L5_sharp:2.0965e-02 L6_sharp:3.3312e-02 L7_sharp:4.3484e-02 L8_sharp:7.4944e-02 L9_sharp:8.3088e-02 L10_sharp:9.9992e-02 L11_sharp:1.4656e-01 L12_sharp:4.9711e-01 total_fnorm:1.7109e+00 total_l1_linf:2.8000e+03 total_spectral:8.7500e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4121e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2461e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.2500e-02 L5_l1linf:6.1523e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0791e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.0303e-02 L10_l1linf:6.1523e-02 L11_l1linf:6.2500e-02 L12_l1linf:3.4912e-02 L1_spectral:3.2381e-03 L2_spectral:3.2291e-03 L3_spectral:3.2073e-03 L4_spectral:3.2284e-03 L5_spectral:3.1981e-03 L6_spectral:3.1946e-03 L7_spectral:3.1940e-03 L8_spectral:3.1756e-03 L9_spectral:3.2105e-03 L10_spectral:3.1945e-03 L11_spectral:3.1931e-03 L12_spectral:3.1883e-03 train_time:227634ms step_avg:43.78ms +[2025-09-11 13:00:03] [Rank 0] step:5201/10000 train_time:229460ms step_avg:44.12ms +[2025-09-11 13:00:03] [Rank 0] step:5201/10000 train_time:229460ms step_avg:44.12ms +[2025-09-11 13:00:04] [Rank 0] step:5221/10000 train_time:230180ms step_avg:44.09ms +[2025-09-11 13:00:04] [Rank 0] step:5221/10000 train_time:230180ms step_avg:44.09ms +[2025-09-11 13:00:04] [Rank 0] step:5241/10000 train_time:230868ms step_avg:44.05ms +[2025-09-11 13:00:04] [Rank 0] step:5241/10000 train_time:230868ms step_avg:44.05ms +[2025-09-11 13:00:05] [Rank 0] step:5261/10000 train_time:231556ms step_avg:44.01ms +[2025-09-11 13:00:05] [Rank 0] step:5261/10000 train_time:231556ms step_avg:44.01ms +[2025-09-11 13:00:06] [Rank 0] step:5281/10000 train_time:232247ms step_avg:43.98ms +[2025-09-11 13:00:06] [Rank 0] step:5281/10000 train_time:232247ms step_avg:43.98ms +[2025-09-11 13:00:06] [Rank 0] step:5301/10000 train_time:232936ms step_avg:43.94ms +[2025-09-11 13:00:06] [Rank 0] step:5301/10000 train_time:232936ms step_avg:43.94ms +[2025-09-11 13:00:07] [Rank 0] step:5321/10000 train_time:233624ms step_avg:43.91ms +[2025-09-11 13:00:07] [Rank 0] step:5321/10000 train_time:233624ms step_avg:43.91ms +[2025-09-11 13:00:08] [Rank 0] step:5341/10000 train_time:234312ms step_avg:43.87ms +[2025-09-11 13:00:08] [Rank 0] step:5341/10000 train_time:234312ms step_avg:43.87ms +[2025-09-11 13:00:08] [Rank 0] step:5361/10000 train_time:235000ms step_avg:43.84ms +[2025-09-11 13:00:08] [Rank 0] step:5361/10000 train_time:235000ms step_avg:43.84ms +[2025-09-11 13:00:09] [Rank 0] step:5381/10000 train_time:235689ms step_avg:43.80ms +[2025-09-11 13:00:09] [Rank 0] step:5381/10000 train_time:235689ms step_avg:43.80ms +[2025-09-11 13:00:10] [Rank 0] step:5401/10000 train_time:236375ms step_avg:43.77ms +[2025-09-11 13:00:10] [Rank 0] step:5401/10000 train_time:236375ms step_avg:43.77ms +[2025-09-11 13:00:10] [Rank 0] step:5421/10000 train_time:237065ms step_avg:43.73ms +[2025-09-11 13:00:10] [Rank 0] step:5421/10000 train_time:237065ms step_avg:43.73ms +[2025-09-11 13:00:11] [Rank 0] step:5441/10000 train_time:237755ms step_avg:43.70ms +[2025-09-11 13:00:11] [Rank 0] step:5441/10000 train_time:237755ms step_avg:43.70ms +[2025-09-11 13:00:12] [Rank 0] step:5461/10000 train_time:238444ms step_avg:43.66ms +[2025-09-11 13:00:12] [Rank 0] step:5461/10000 train_time:238444ms step_avg:43.66ms +[2025-09-11 13:00:12] [Rank 0] step:5481/10000 train_time:239133ms step_avg:43.63ms +[2025-09-11 13:00:12] [Rank 0] step:5481/10000 train_time:239133ms step_avg:43.63ms +[2025-09-11 13:00:13] [Rank 0] step:5501/10000 train_time:239821ms step_avg:43.60ms +[2025-09-11 13:00:13] [Rank 0] step:5501/10000 train_time:239821ms step_avg:43.60ms +[2025-09-11 13:00:14] [Rank 0] step:5521/10000 train_time:240509ms step_avg:43.56ms +[2025-09-11 13:00:14] [Rank 0] step:5521/10000 train_time:240509ms step_avg:43.56ms +[2025-09-11 13:00:15] [Rank 0] step:5541/10000 train_time:241200ms step_avg:43.53ms +[2025-09-11 13:00:15] [Rank 0] step:5541/10000 train_time:241200ms step_avg:43.53ms +[2025-09-11 13:00:15] [Rank 0] step:5561/10000 train_time:241890ms step_avg:43.50ms +[2025-09-11 13:00:15] [Rank 0] step:5561/10000 train_time:241890ms step_avg:43.50ms +[2025-09-11 13:00:16] [Rank 0] step:5581/10000 train_time:242579ms step_avg:43.47ms +[2025-09-11 13:00:16] [Rank 0] step:5581/10000 train_time:242579ms step_avg:43.47ms +[2025-09-11 13:00:17] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:00:17] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:00:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:27] [Rank 0] PRINT: step:5600/10000 val_loss:5.5286 total_sharp:7.5562e-02 L1_sharp:1.1380e-02 L2_sharp:7.3162e-03 L3_sharp:7.8251e-03 L4_sharp:9.4382e-03 L5_sharp:1.5483e-02 L6_sharp:2.5204e-02 L7_sharp:3.6674e-02 L8_sharp:5.9846e-02 L9_sharp:7.6266e-02 L10_sharp:8.6026e-02 L11_sharp:1.2162e-01 L12_sharp:2.6958e-01 total_fnorm:1.7109e+00 total_l1_linf:2.7840e+03 total_spectral:8.7500e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4023e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2656e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0059e-02 L6_l1linf:5.9814e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.1523e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2450e-03 L2_spectral:3.2594e-03 L3_spectral:3.2223e-03 L4_spectral:3.2215e-03 L5_spectral:3.2069e-03 L6_spectral:3.1957e-03 L7_spectral:3.1938e-03 L8_spectral:3.1801e-03 L9_spectral:3.2091e-03 L10_spectral:3.2020e-03 L11_spectral:3.1828e-03 L12_spectral:3.1817e-03 train_time:243248ms step_avg:43.44ms +[2025-09-11 13:00:27] [Rank 0] PRINT: step:5600/10000 val_loss:5.5286 total_sharp:7.5562e-02 L1_sharp:1.1380e-02 L2_sharp:7.3162e-03 L3_sharp:7.8251e-03 L4_sharp:9.4382e-03 L5_sharp:1.5483e-02 L6_sharp:2.5204e-02 L7_sharp:3.6674e-02 L8_sharp:5.9846e-02 L9_sharp:7.6266e-02 L10_sharp:8.6026e-02 L11_sharp:1.2162e-01 L12_sharp:2.6958e-01 total_fnorm:1.7109e+00 total_l1_linf:2.7840e+03 total_spectral:8.7500e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4023e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2656e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0059e-02 L6_l1linf:5.9814e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.1523e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2450e-03 L2_spectral:3.2594e-03 L3_spectral:3.2223e-03 L4_spectral:3.2215e-03 L5_spectral:3.2069e-03 L6_spectral:3.1957e-03 L7_spectral:3.1938e-03 L8_spectral:3.1801e-03 L9_spectral:3.2091e-03 L10_spectral:3.2020e-03 L11_spectral:3.1828e-03 L12_spectral:3.1817e-03 train_time:243248ms step_avg:43.44ms +[2025-09-11 13:00:29] [Rank 0] step:5601/10000 train_time:245168ms step_avg:43.77ms +[2025-09-11 13:00:29] [Rank 0] step:5601/10000 train_time:245168ms step_avg:43.77ms +[2025-09-11 13:00:30] [Rank 0] step:5621/10000 train_time:245889ms step_avg:43.74ms +[2025-09-11 13:00:30] [Rank 0] step:5621/10000 train_time:245889ms step_avg:43.74ms +[2025-09-11 13:00:31] [Rank 0] step:5641/10000 train_time:246578ms step_avg:43.71ms +[2025-09-11 13:00:31] [Rank 0] step:5641/10000 train_time:246578ms step_avg:43.71ms +[2025-09-11 13:00:31] [Rank 0] step:5661/10000 train_time:247269ms step_avg:43.68ms +[2025-09-11 13:00:31] [Rank 0] step:5661/10000 train_time:247269ms step_avg:43.68ms +[2025-09-11 13:00:32] [Rank 0] step:5681/10000 train_time:247957ms step_avg:43.65ms +[2025-09-11 13:00:32] [Rank 0] step:5681/10000 train_time:247957ms step_avg:43.65ms +[2025-09-11 13:00:33] [Rank 0] step:5701/10000 train_time:248649ms step_avg:43.61ms +[2025-09-11 13:00:33] [Rank 0] step:5701/10000 train_time:248649ms step_avg:43.61ms +[2025-09-11 13:00:33] [Rank 0] step:5721/10000 train_time:249338ms step_avg:43.58ms +[2025-09-11 13:00:33] [Rank 0] step:5721/10000 train_time:249338ms step_avg:43.58ms +[2025-09-11 13:00:34] [Rank 0] step:5741/10000 train_time:250029ms step_avg:43.55ms +[2025-09-11 13:00:34] [Rank 0] step:5741/10000 train_time:250029ms step_avg:43.55ms +[2025-09-11 13:00:35] [Rank 0] step:5761/10000 train_time:250719ms step_avg:43.52ms +[2025-09-11 13:00:35] [Rank 0] step:5761/10000 train_time:250719ms step_avg:43.52ms +[2025-09-11 13:00:35] [Rank 0] step:5781/10000 train_time:251410ms step_avg:43.49ms +[2025-09-11 13:00:35] [Rank 0] step:5781/10000 train_time:251410ms step_avg:43.49ms +[2025-09-11 13:00:36] [Rank 0] step:5801/10000 train_time:252102ms step_avg:43.46ms +[2025-09-11 13:00:36] [Rank 0] step:5801/10000 train_time:252102ms step_avg:43.46ms +[2025-09-11 13:00:37] [Rank 0] step:5821/10000 train_time:252945ms step_avg:43.45ms +[2025-09-11 13:00:37] [Rank 0] step:5821/10000 train_time:252945ms step_avg:43.45ms +[2025-09-11 13:00:38] [Rank 0] step:5841/10000 train_time:253758ms step_avg:43.44ms +[2025-09-11 13:00:38] [Rank 0] step:5841/10000 train_time:253758ms step_avg:43.44ms +[2025-09-11 13:00:38] [Rank 0] step:5861/10000 train_time:254448ms step_avg:43.41ms +[2025-09-11 13:00:38] [Rank 0] step:5861/10000 train_time:254448ms step_avg:43.41ms +[2025-09-11 13:00:39] [Rank 0] step:5881/10000 train_time:255137ms step_avg:43.38ms +[2025-09-11 13:00:39] [Rank 0] step:5881/10000 train_time:255137ms step_avg:43.38ms +[2025-09-11 13:00:40] [Rank 0] step:5901/10000 train_time:256123ms step_avg:43.40ms +[2025-09-11 13:00:40] [Rank 0] step:5901/10000 train_time:256123ms step_avg:43.40ms +[2025-09-11 13:00:41] [Rank 0] step:5921/10000 train_time:256816ms step_avg:43.37ms +[2025-09-11 13:00:41] [Rank 0] step:5921/10000 train_time:256816ms step_avg:43.37ms +[2025-09-11 13:00:42] [Rank 0] step:5941/10000 train_time:257507ms step_avg:43.34ms +[2025-09-11 13:00:42] [Rank 0] step:5941/10000 train_time:257507ms step_avg:43.34ms +[2025-09-11 13:00:42] [Rank 0] step:5961/10000 train_time:258199ms step_avg:43.31ms +[2025-09-11 13:00:42] [Rank 0] step:5961/10000 train_time:258199ms step_avg:43.31ms +[2025-09-11 13:00:43] [Rank 0] step:5981/10000 train_time:258889ms step_avg:43.29ms +[2025-09-11 13:00:43] [Rank 0] step:5981/10000 train_time:258889ms step_avg:43.29ms +[2025-09-11 13:00:44] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:00:44] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:00:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:00:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:00:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:00:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:00:54] [Rank 0] PRINT: step:6000/10000 val_loss:5.4952 total_sharp:9.3418e-02 L1_sharp:6.6025e-03 L2_sharp:5.3116e-03 L3_sharp:5.7055e-03 L4_sharp:9.7724e-03 L5_sharp:1.1393e-02 L6_sharp:1.9567e-02 L7_sharp:3.4664e-02 L8_sharp:5.7430e-02 L9_sharp:6.8225e-02 L10_sharp:9.1223e-02 L11_sharp:1.4795e-01 L12_sharp:5.0969e-01 total_fnorm:1.6562e+00 total_l1_linf:2.7040e+03 total_spectral:8.4766e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4023e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2559e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3477e-02 L4_l1linf:6.1523e-02 L5_l1linf:5.9814e-02 L6_l1linf:5.8350e-02 L7_l1linf:5.8594e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0547e-02 L11_l1linf:6.0059e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2453e-03 L2_spectral:3.2507e-03 L3_spectral:3.2371e-03 L4_spectral:3.2125e-03 L5_spectral:3.2049e-03 L6_spectral:3.1993e-03 L7_spectral:3.2038e-03 L8_spectral:3.1914e-03 L9_spectral:3.2093e-03 L10_spectral:3.2007e-03 L11_spectral:3.2071e-03 L12_spectral:3.2018e-03 train_time:259562ms step_avg:43.26ms +[2025-09-11 13:00:54] [Rank 0] PRINT: step:6000/10000 val_loss:5.4952 total_sharp:9.3418e-02 L1_sharp:6.6025e-03 L2_sharp:5.3116e-03 L3_sharp:5.7055e-03 L4_sharp:9.7724e-03 L5_sharp:1.1393e-02 L6_sharp:1.9567e-02 L7_sharp:3.4664e-02 L8_sharp:5.7430e-02 L9_sharp:6.8225e-02 L10_sharp:9.1223e-02 L11_sharp:1.4795e-01 L12_sharp:5.0969e-01 total_fnorm:1.6562e+00 total_l1_linf:2.7040e+03 total_spectral:8.4766e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4023e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2559e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3477e-02 L4_l1linf:6.1523e-02 L5_l1linf:5.9814e-02 L6_l1linf:5.8350e-02 L7_l1linf:5.8594e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0547e-02 L11_l1linf:6.0059e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2453e-03 L2_spectral:3.2507e-03 L3_spectral:3.2371e-03 L4_spectral:3.2125e-03 L5_spectral:3.2049e-03 L6_spectral:3.1993e-03 L7_spectral:3.2038e-03 L8_spectral:3.1914e-03 L9_spectral:3.2093e-03 L10_spectral:3.2007e-03 L11_spectral:3.2071e-03 L12_spectral:3.2018e-03 train_time:259562ms step_avg:43.26ms +[2025-09-11 13:00:56] [Rank 0] step:6001/10000 train_time:261372ms step_avg:43.55ms +[2025-09-11 13:00:56] [Rank 0] step:6001/10000 train_time:261372ms step_avg:43.55ms +[2025-09-11 13:00:57] [Rank 0] step:6021/10000 train_time:262103ms step_avg:43.53ms +[2025-09-11 13:00:57] [Rank 0] step:6021/10000 train_time:262103ms step_avg:43.53ms +[2025-09-11 13:00:58] [Rank 0] step:6041/10000 train_time:262796ms step_avg:43.50ms +[2025-09-11 13:00:58] [Rank 0] step:6041/10000 train_time:262796ms step_avg:43.50ms +[2025-09-11 13:00:58] [Rank 0] step:6061/10000 train_time:263489ms step_avg:43.47ms +[2025-09-11 13:00:58] [Rank 0] step:6061/10000 train_time:263489ms step_avg:43.47ms +[2025-09-11 13:00:59] [Rank 0] step:6081/10000 train_time:264181ms step_avg:43.44ms +[2025-09-11 13:00:59] [Rank 0] step:6081/10000 train_time:264181ms step_avg:43.44ms +[2025-09-11 13:01:00] [Rank 0] step:6101/10000 train_time:264873ms step_avg:43.41ms +[2025-09-11 13:01:00] [Rank 0] step:6101/10000 train_time:264873ms step_avg:43.41ms +[2025-09-11 13:01:00] [Rank 0] step:6121/10000 train_time:265566ms step_avg:43.39ms +[2025-09-11 13:01:00] [Rank 0] step:6121/10000 train_time:265566ms step_avg:43.39ms +[2025-09-11 13:01:01] [Rank 0] step:6141/10000 train_time:266260ms step_avg:43.36ms +[2025-09-11 13:01:01] [Rank 0] step:6141/10000 train_time:266260ms step_avg:43.36ms +[2025-09-11 13:01:02] [Rank 0] step:6161/10000 train_time:266955ms step_avg:43.33ms +[2025-09-11 13:01:02] [Rank 0] step:6161/10000 train_time:266955ms step_avg:43.33ms +[2025-09-11 13:01:03] [Rank 0] step:6181/10000 train_time:267646ms step_avg:43.30ms +[2025-09-11 13:01:03] [Rank 0] step:6181/10000 train_time:267646ms step_avg:43.30ms +[2025-09-11 13:01:03] [Rank 0] step:6201/10000 train_time:268340ms step_avg:43.27ms +[2025-09-11 13:01:03] [Rank 0] step:6201/10000 train_time:268340ms step_avg:43.27ms +[2025-09-11 13:01:04] [Rank 0] step:6221/10000 train_time:269033ms step_avg:43.25ms +[2025-09-11 13:01:04] [Rank 0] step:6221/10000 train_time:269033ms step_avg:43.25ms +[2025-09-11 13:01:05] [Rank 0] step:6241/10000 train_time:269726ms step_avg:43.22ms +[2025-09-11 13:01:05] [Rank 0] step:6241/10000 train_time:269726ms step_avg:43.22ms +[2025-09-11 13:01:05] [Rank 0] step:6261/10000 train_time:270416ms step_avg:43.19ms +[2025-09-11 13:01:05] [Rank 0] step:6261/10000 train_time:270416ms step_avg:43.19ms +[2025-09-11 13:01:06] [Rank 0] step:6281/10000 train_time:271110ms step_avg:43.16ms +[2025-09-11 13:01:06] [Rank 0] step:6281/10000 train_time:271110ms step_avg:43.16ms +[2025-09-11 13:01:07] [Rank 0] step:6301/10000 train_time:271800ms step_avg:43.14ms +[2025-09-11 13:01:07] [Rank 0] step:6301/10000 train_time:271800ms step_avg:43.14ms +[2025-09-11 13:01:07] [Rank 0] step:6321/10000 train_time:272495ms step_avg:43.11ms +[2025-09-11 13:01:07] [Rank 0] step:6321/10000 train_time:272495ms step_avg:43.11ms +[2025-09-11 13:01:08] [Rank 0] step:6341/10000 train_time:273187ms step_avg:43.08ms +[2025-09-11 13:01:08] [Rank 0] step:6341/10000 train_time:273187ms step_avg:43.08ms +[2025-09-11 13:01:09] [Rank 0] step:6361/10000 train_time:273879ms step_avg:43.06ms +[2025-09-11 13:01:09] [Rank 0] step:6361/10000 train_time:273879ms step_avg:43.06ms +[2025-09-11 13:01:09] [Rank 0] step:6381/10000 train_time:274570ms step_avg:43.03ms +[2025-09-11 13:01:09] [Rank 0] step:6381/10000 train_time:274570ms step_avg:43.03ms +[2025-09-11 13:01:10] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:01:10] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:01:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:01:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:01:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:01:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:01:21] [Rank 0] PRINT: step:6400/10000 val_loss:5.4654 total_sharp:7.5092e-02 L1_sharp:9.5722e-03 L2_sharp:5.0549e-03 L3_sharp:7.3826e-03 L4_sharp:1.0094e-02 L5_sharp:1.0708e-02 L6_sharp:2.2876e-02 L7_sharp:3.5097e-02 L8_sharp:5.5062e-02 L9_sharp:6.5519e-02 L10_sharp:7.6373e-02 L11_sharp:1.0993e-01 L12_sharp:2.4882e-01 total_fnorm:1.4766e+00 total_l1_linf:2.2720e+03 total_spectral:7.4219e-01 L1_fnorm:2.2070e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1582e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1387e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1777e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.0215e-01 L1_l1linf:5.6152e-02 L2_l1linf:5.6152e-02 L3_l1linf:5.3711e-02 L4_l1linf:5.3223e-02 L5_l1linf:5.2490e-02 L6_l1linf:5.0537e-02 L7_l1linf:5.0537e-02 L8_l1linf:4.9316e-02 L9_l1linf:4.9561e-02 L10_l1linf:5.0781e-02 L11_l1linf:5.1270e-02 L12_l1linf:3.0273e-02 L1_spectral:2.9402e-03 L2_spectral:2.9430e-03 L3_spectral:2.9430e-03 L4_spectral:2.9143e-03 L5_spectral:2.9238e-03 L6_spectral:2.9112e-03 L7_spectral:2.9015e-03 L8_spectral:2.9030e-03 L9_spectral:2.9141e-03 L10_spectral:2.9322e-03 L11_spectral:2.9160e-03 L12_spectral:2.9027e-03 train_time:275241ms step_avg:43.01ms +[2025-09-11 13:01:21] [Rank 0] PRINT: step:6400/10000 val_loss:5.4654 total_sharp:7.5092e-02 L1_sharp:9.5722e-03 L2_sharp:5.0549e-03 L3_sharp:7.3826e-03 L4_sharp:1.0094e-02 L5_sharp:1.0708e-02 L6_sharp:2.2876e-02 L7_sharp:3.5097e-02 L8_sharp:5.5062e-02 L9_sharp:6.5519e-02 L10_sharp:7.6373e-02 L11_sharp:1.0993e-01 L12_sharp:2.4882e-01 total_fnorm:1.4766e+00 total_l1_linf:2.2720e+03 total_spectral:7.4219e-01 L1_fnorm:2.2070e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1582e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1387e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1777e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.0215e-01 L1_l1linf:5.6152e-02 L2_l1linf:5.6152e-02 L3_l1linf:5.3711e-02 L4_l1linf:5.3223e-02 L5_l1linf:5.2490e-02 L6_l1linf:5.0537e-02 L7_l1linf:5.0537e-02 L8_l1linf:4.9316e-02 L9_l1linf:4.9561e-02 L10_l1linf:5.0781e-02 L11_l1linf:5.1270e-02 L12_l1linf:3.0273e-02 L1_spectral:2.9402e-03 L2_spectral:2.9430e-03 L3_spectral:2.9430e-03 L4_spectral:2.9143e-03 L5_spectral:2.9238e-03 L6_spectral:2.9112e-03 L7_spectral:2.9015e-03 L8_spectral:2.9030e-03 L9_spectral:2.9141e-03 L10_spectral:2.9322e-03 L11_spectral:2.9160e-03 L12_spectral:2.9027e-03 train_time:275241ms step_avg:43.01ms +[2025-09-11 13:01:23] [Rank 0] step:6401/10000 train_time:277057ms step_avg:43.28ms +[2025-09-11 13:01:23] [Rank 0] step:6401/10000 train_time:277057ms step_avg:43.28ms +[2025-09-11 13:01:24] [Rank 0] step:6421/10000 train_time:277778ms step_avg:43.26ms +[2025-09-11 13:01:24] [Rank 0] step:6421/10000 train_time:277778ms step_avg:43.26ms +[2025-09-11 13:01:24] [Rank 0] step:6441/10000 train_time:278469ms step_avg:43.23ms +[2025-09-11 13:01:24] [Rank 0] step:6441/10000 train_time:278469ms step_avg:43.23ms +[2025-09-11 13:01:25] [Rank 0] step:6461/10000 train_time:279161ms step_avg:43.21ms +[2025-09-11 13:01:25] [Rank 0] step:6461/10000 train_time:279161ms step_avg:43.21ms +[2025-09-11 13:01:26] [Rank 0] step:6481/10000 train_time:279855ms step_avg:43.18ms +[2025-09-11 13:01:26] [Rank 0] step:6481/10000 train_time:279855ms step_avg:43.18ms +[2025-09-11 13:01:26] [Rank 0] step:6501/10000 train_time:280548ms step_avg:43.15ms +[2025-09-11 13:01:26] [Rank 0] step:6501/10000 train_time:280548ms step_avg:43.15ms +[2025-09-11 13:01:27] [Rank 0] step:6521/10000 train_time:281240ms step_avg:43.13ms +[2025-09-11 13:01:27] [Rank 0] step:6521/10000 train_time:281240ms step_avg:43.13ms +[2025-09-11 13:01:28] [Rank 0] step:6541/10000 train_time:281930ms step_avg:43.10ms +[2025-09-11 13:01:28] [Rank 0] step:6541/10000 train_time:281930ms step_avg:43.10ms +[2025-09-11 13:01:28] [Rank 0] step:6561/10000 train_time:282622ms step_avg:43.08ms +[2025-09-11 13:01:28] [Rank 0] step:6561/10000 train_time:282622ms step_avg:43.08ms +[2025-09-11 13:01:29] [Rank 0] step:6581/10000 train_time:283315ms step_avg:43.05ms +[2025-09-11 13:01:29] [Rank 0] step:6581/10000 train_time:283315ms step_avg:43.05ms +[2025-09-11 13:01:30] [Rank 0] step:6601/10000 train_time:284007ms step_avg:43.02ms +[2025-09-11 13:01:30] [Rank 0] step:6601/10000 train_time:284007ms step_avg:43.02ms +[2025-09-11 13:01:31] [Rank 0] step:6621/10000 train_time:284697ms step_avg:43.00ms +[2025-09-11 13:01:31] [Rank 0] step:6621/10000 train_time:284697ms step_avg:43.00ms +[2025-09-11 13:01:31] [Rank 0] step:6641/10000 train_time:285389ms step_avg:42.97ms +[2025-09-11 13:01:31] [Rank 0] step:6641/10000 train_time:285389ms step_avg:42.97ms +[2025-09-11 13:01:32] [Rank 0] step:6661/10000 train_time:286082ms step_avg:42.95ms +[2025-09-11 13:01:32] [Rank 0] step:6661/10000 train_time:286082ms step_avg:42.95ms +[2025-09-11 13:01:33] [Rank 0] step:6681/10000 train_time:286780ms step_avg:42.92ms +[2025-09-11 13:01:33] [Rank 0] step:6681/10000 train_time:286780ms step_avg:42.92ms +[2025-09-11 13:01:33] [Rank 0] step:6701/10000 train_time:287478ms step_avg:42.90ms +[2025-09-11 13:01:33] [Rank 0] step:6701/10000 train_time:287478ms step_avg:42.90ms +[2025-09-11 13:01:34] [Rank 0] step:6721/10000 train_time:288176ms step_avg:42.88ms +[2025-09-11 13:01:34] [Rank 0] step:6721/10000 train_time:288176ms step_avg:42.88ms +[2025-09-11 13:01:35] [Rank 0] step:6741/10000 train_time:288876ms step_avg:42.85ms +[2025-09-11 13:01:35] [Rank 0] step:6741/10000 train_time:288876ms step_avg:42.85ms +[2025-09-11 13:01:35] [Rank 0] step:6761/10000 train_time:289572ms step_avg:42.83ms +[2025-09-11 13:01:35] [Rank 0] step:6761/10000 train_time:289572ms step_avg:42.83ms +[2025-09-11 13:01:36] [Rank 0] step:6781/10000 train_time:290270ms step_avg:42.81ms +[2025-09-11 13:01:36] [Rank 0] step:6781/10000 train_time:290270ms step_avg:42.81ms +[2025-09-11 13:01:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:01:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:01:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:01:48] [Rank 0] PRINT: step:6800/10000 val_loss:5.4424 total_sharp:6.4578e-02 L1_sharp:7.8418e-03 L2_sharp:4.7049e-03 L3_sharp:4.5656e-03 L4_sharp:8.7851e-03 L5_sharp:1.2941e-02 L6_sharp:2.0346e-02 L7_sharp:3.1987e-02 L8_sharp:5.6720e-02 L9_sharp:6.7701e-02 L10_sharp:7.2337e-02 L11_sharp:1.0096e-01 L12_sharp:2.2100e-01 total_fnorm:1.3438e+00 total_l1_linf:1.8960e+03 total_spectral:6.5234e-01 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8945e-01 L6_fnorm:1.8750e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8555e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8945e-01 L11_fnorm:1.9141e-01 L12_fnorm:1.7676e-01 L1_l1linf:4.8096e-02 L2_l1linf:4.7607e-02 L3_l1linf:4.5410e-02 L4_l1linf:4.5166e-02 L5_l1linf:4.3213e-02 L6_l1linf:4.1992e-02 L7_l1linf:4.1992e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.1504e-02 L12_l1linf:2.5269e-02 L1_spectral:2.6199e-03 L2_spectral:2.6033e-03 L3_spectral:2.6042e-03 L4_spectral:2.6250e-03 L5_spectral:2.6124e-03 L6_spectral:2.5901e-03 L7_spectral:2.6078e-03 L8_spectral:2.5924e-03 L9_spectral:2.6161e-03 L10_spectral:2.6048e-03 L11_spectral:2.5975e-03 L12_spectral:2.5720e-03 train_time:290947ms step_avg:42.79ms +[2025-09-11 13:01:48] [Rank 0] PRINT: step:6800/10000 val_loss:5.4424 total_sharp:6.4578e-02 L1_sharp:7.8418e-03 L2_sharp:4.7049e-03 L3_sharp:4.5656e-03 L4_sharp:8.7851e-03 L5_sharp:1.2941e-02 L6_sharp:2.0346e-02 L7_sharp:3.1987e-02 L8_sharp:5.6720e-02 L9_sharp:6.7701e-02 L10_sharp:7.2337e-02 L11_sharp:1.0096e-01 L12_sharp:2.2100e-01 total_fnorm:1.3438e+00 total_l1_linf:1.8960e+03 total_spectral:6.5234e-01 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8945e-01 L6_fnorm:1.8750e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8555e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8945e-01 L11_fnorm:1.9141e-01 L12_fnorm:1.7676e-01 L1_l1linf:4.8096e-02 L2_l1linf:4.7607e-02 L3_l1linf:4.5410e-02 L4_l1linf:4.5166e-02 L5_l1linf:4.3213e-02 L6_l1linf:4.1992e-02 L7_l1linf:4.1992e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.1504e-02 L12_l1linf:2.5269e-02 L1_spectral:2.6199e-03 L2_spectral:2.6033e-03 L3_spectral:2.6042e-03 L4_spectral:2.6250e-03 L5_spectral:2.6124e-03 L6_spectral:2.5901e-03 L7_spectral:2.6078e-03 L8_spectral:2.5924e-03 L9_spectral:2.6161e-03 L10_spectral:2.6048e-03 L11_spectral:2.5975e-03 L12_spectral:2.5720e-03 train_time:290947ms step_avg:42.79ms +[2025-09-11 13:01:50] [Rank 0] step:6801/10000 train_time:292891ms step_avg:43.07ms +[2025-09-11 13:01:50] [Rank 0] step:6801/10000 train_time:292891ms step_avg:43.07ms +[2025-09-11 13:01:50] [Rank 0] step:6821/10000 train_time:293621ms step_avg:43.05ms +[2025-09-11 13:01:50] [Rank 0] step:6821/10000 train_time:293621ms step_avg:43.05ms +[2025-09-11 13:01:51] [Rank 0] step:6841/10000 train_time:294322ms step_avg:43.02ms +[2025-09-11 13:01:51] [Rank 0] step:6841/10000 train_time:294322ms step_avg:43.02ms +[2025-09-11 13:01:52] [Rank 0] step:6861/10000 train_time:295022ms step_avg:43.00ms +[2025-09-11 13:01:52] [Rank 0] step:6861/10000 train_time:295022ms step_avg:43.00ms +[2025-09-11 13:01:52] [Rank 0] step:6881/10000 train_time:295724ms step_avg:42.98ms +[2025-09-11 13:01:52] [Rank 0] step:6881/10000 train_time:295724ms step_avg:42.98ms +[2025-09-11 13:01:53] [Rank 0] step:6901/10000 train_time:296422ms step_avg:42.95ms +[2025-09-11 13:01:53] [Rank 0] step:6901/10000 train_time:296422ms step_avg:42.95ms +[2025-09-11 13:01:54] [Rank 0] step:6921/10000 train_time:297121ms step_avg:42.93ms +[2025-09-11 13:01:54] [Rank 0] step:6921/10000 train_time:297121ms step_avg:42.93ms +[2025-09-11 13:01:55] [Rank 0] step:6941/10000 train_time:297821ms step_avg:42.91ms +[2025-09-11 13:01:55] [Rank 0] step:6941/10000 train_time:297821ms step_avg:42.91ms +[2025-09-11 13:01:55] [Rank 0] step:6961/10000 train_time:298522ms step_avg:42.88ms +[2025-09-11 13:01:55] [Rank 0] step:6961/10000 train_time:298522ms step_avg:42.88ms +[2025-09-11 13:01:56] [Rank 0] step:6981/10000 train_time:299226ms step_avg:42.86ms +[2025-09-11 13:01:56] [Rank 0] step:6981/10000 train_time:299226ms step_avg:42.86ms +[2025-09-11 13:01:57] [Rank 0] step:7001/10000 train_time:299926ms step_avg:42.84ms +[2025-09-11 13:01:57] [Rank 0] step:7001/10000 train_time:299926ms step_avg:42.84ms +[2025-09-11 13:01:57] [Rank 0] step:7021/10000 train_time:300625ms step_avg:42.82ms +[2025-09-11 13:01:57] [Rank 0] step:7021/10000 train_time:300625ms step_avg:42.82ms +[2025-09-11 13:01:58] [Rank 0] step:7041/10000 train_time:301323ms step_avg:42.80ms +[2025-09-11 13:01:58] [Rank 0] step:7041/10000 train_time:301323ms step_avg:42.80ms +[2025-09-11 13:01:59] [Rank 0] step:7061/10000 train_time:302023ms step_avg:42.77ms +[2025-09-11 13:01:59] [Rank 0] step:7061/10000 train_time:302023ms step_avg:42.77ms +[2025-09-11 13:01:59] [Rank 0] step:7081/10000 train_time:302722ms step_avg:42.75ms +[2025-09-11 13:01:59] [Rank 0] step:7081/10000 train_time:302722ms step_avg:42.75ms +[2025-09-11 13:02:00] [Rank 0] step:7101/10000 train_time:303422ms step_avg:42.73ms +[2025-09-11 13:02:00] [Rank 0] step:7101/10000 train_time:303422ms step_avg:42.73ms +[2025-09-11 13:02:01] [Rank 0] step:7121/10000 train_time:304123ms step_avg:42.71ms +[2025-09-11 13:02:01] [Rank 0] step:7121/10000 train_time:304123ms step_avg:42.71ms +[2025-09-11 13:02:02] [Rank 0] step:7141/10000 train_time:304822ms step_avg:42.69ms +[2025-09-11 13:02:02] [Rank 0] step:7141/10000 train_time:304822ms step_avg:42.69ms +[2025-09-11 13:02:02] [Rank 0] step:7161/10000 train_time:305526ms step_avg:42.67ms +[2025-09-11 13:02:02] [Rank 0] step:7161/10000 train_time:305526ms step_avg:42.67ms +[2025-09-11 13:02:03] [Rank 0] step:7181/10000 train_time:306225ms step_avg:42.64ms +[2025-09-11 13:02:03] [Rank 0] step:7181/10000 train_time:306225ms step_avg:42.64ms +[2025-09-11 13:02:04] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:02:04] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:02:17] [Rank 0] PRINT: step:7200/10000 val_loss:5.4198 total_sharp:6.9728e-02 L1_sharp:8.3214e-03 L2_sharp:4.1725e-03 L3_sharp:5.2838e-03 L4_sharp:7.9903e-03 L5_sharp:1.2694e-02 L6_sharp:1.9973e-02 L7_sharp:3.1109e-02 L8_sharp:5.0711e-02 L9_sharp:5.9238e-02 L10_sharp:6.9644e-02 L11_sharp:1.0014e-01 L12_sharp:2.4673e-01 total_fnorm:1.0547e+00 total_l1_linf:1.4960e+03 total_spectral:5.3516e-01 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6504e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6406e-01 L6_fnorm:1.6211e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6309e-01 L11_fnorm:1.6406e-01 L12_fnorm:1.5137e-01 L1_l1linf:3.9795e-02 L2_l1linf:3.8818e-02 L3_l1linf:3.7109e-02 L4_l1linf:3.6865e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.4180e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.2715e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.1484e-02 L1_spectral:2.3116e-03 L2_spectral:2.3258e-03 L3_spectral:2.3090e-03 L4_spectral:2.3265e-03 L5_spectral:2.3059e-03 L6_spectral:2.3114e-03 L7_spectral:2.3030e-03 L8_spectral:2.2712e-03 L9_spectral:2.2948e-03 L10_spectral:2.2925e-03 L11_spectral:2.2852e-03 L12_spectral:2.2918e-03 train_time:306906ms step_avg:42.63ms +[2025-09-11 13:02:17] [Rank 0] PRINT: step:7200/10000 val_loss:5.4198 total_sharp:6.9728e-02 L1_sharp:8.3214e-03 L2_sharp:4.1725e-03 L3_sharp:5.2838e-03 L4_sharp:7.9903e-03 L5_sharp:1.2694e-02 L6_sharp:1.9973e-02 L7_sharp:3.1109e-02 L8_sharp:5.0711e-02 L9_sharp:5.9238e-02 L10_sharp:6.9644e-02 L11_sharp:1.0014e-01 L12_sharp:2.4673e-01 total_fnorm:1.0547e+00 total_l1_linf:1.4960e+03 total_spectral:5.3516e-01 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6504e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6406e-01 L6_fnorm:1.6211e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6309e-01 L11_fnorm:1.6406e-01 L12_fnorm:1.5137e-01 L1_l1linf:3.9795e-02 L2_l1linf:3.8818e-02 L3_l1linf:3.7109e-02 L4_l1linf:3.6865e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.4180e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.2715e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.3691e-02 L12_l1linf:2.1484e-02 L1_spectral:2.3116e-03 L2_spectral:2.3258e-03 L3_spectral:2.3090e-03 L4_spectral:2.3265e-03 L5_spectral:2.3059e-03 L6_spectral:2.3114e-03 L7_spectral:2.3030e-03 L8_spectral:2.2712e-03 L9_spectral:2.2948e-03 L10_spectral:2.2925e-03 L11_spectral:2.2852e-03 L12_spectral:2.2918e-03 train_time:306906ms step_avg:42.63ms +[2025-09-11 13:02:19] [Rank 0] step:7201/10000 train_time:308810ms step_avg:42.88ms +[2025-09-11 13:02:19] [Rank 0] step:7201/10000 train_time:308810ms step_avg:42.88ms +[2025-09-11 13:02:20] [Rank 0] step:7221/10000 train_time:309524ms step_avg:42.86ms +[2025-09-11 13:02:20] [Rank 0] step:7221/10000 train_time:309524ms step_avg:42.86ms +[2025-09-11 13:02:20] [Rank 0] step:7241/10000 train_time:310224ms step_avg:42.84ms +[2025-09-11 13:02:20] [Rank 0] step:7241/10000 train_time:310224ms step_avg:42.84ms +[2025-09-11 13:02:21] [Rank 0] step:7261/10000 train_time:310927ms step_avg:42.82ms +[2025-09-11 13:02:21] [Rank 0] step:7261/10000 train_time:310927ms step_avg:42.82ms +[2025-09-11 13:02:22] [Rank 0] step:7281/10000 train_time:311633ms step_avg:42.80ms +[2025-09-11 13:02:22] [Rank 0] step:7281/10000 train_time:311633ms step_avg:42.80ms +[2025-09-11 13:02:22] [Rank 0] step:7301/10000 train_time:312333ms step_avg:42.78ms +[2025-09-11 13:02:22] [Rank 0] step:7301/10000 train_time:312333ms step_avg:42.78ms +[2025-09-11 13:02:23] [Rank 0] step:7321/10000 train_time:313033ms step_avg:42.76ms +[2025-09-11 13:02:23] [Rank 0] step:7321/10000 train_time:313033ms step_avg:42.76ms +[2025-09-11 13:02:24] [Rank 0] step:7341/10000 train_time:313736ms step_avg:42.74ms +[2025-09-11 13:02:24] [Rank 0] step:7341/10000 train_time:313736ms step_avg:42.74ms +[2025-09-11 13:02:25] [Rank 0] step:7361/10000 train_time:314436ms step_avg:42.72ms +[2025-09-11 13:02:25] [Rank 0] step:7361/10000 train_time:314436ms step_avg:42.72ms +[2025-09-11 13:02:25] [Rank 0] step:7381/10000 train_time:315138ms step_avg:42.70ms +[2025-09-11 13:02:25] [Rank 0] step:7381/10000 train_time:315138ms step_avg:42.70ms +[2025-09-11 13:02:26] [Rank 0] step:7401/10000 train_time:315837ms step_avg:42.67ms +[2025-09-11 13:02:26] [Rank 0] step:7401/10000 train_time:315837ms step_avg:42.67ms +[2025-09-11 13:02:27] [Rank 0] step:7421/10000 train_time:316537ms step_avg:42.65ms +[2025-09-11 13:02:27] [Rank 0] step:7421/10000 train_time:316537ms step_avg:42.65ms +[2025-09-11 13:02:27] [Rank 0] step:7441/10000 train_time:317239ms step_avg:42.63ms +[2025-09-11 13:02:27] [Rank 0] step:7441/10000 train_time:317239ms step_avg:42.63ms +[2025-09-11 13:02:28] [Rank 0] step:7461/10000 train_time:317940ms step_avg:42.61ms +[2025-09-11 13:02:28] [Rank 0] step:7461/10000 train_time:317940ms step_avg:42.61ms +[2025-09-11 13:02:29] [Rank 0] step:7481/10000 train_time:318642ms step_avg:42.59ms +[2025-09-11 13:02:29] [Rank 0] step:7481/10000 train_time:318642ms step_avg:42.59ms +[2025-09-11 13:02:29] [Rank 0] step:7501/10000 train_time:319343ms step_avg:42.57ms +[2025-09-11 13:02:29] [Rank 0] step:7501/10000 train_time:319343ms step_avg:42.57ms +[2025-09-11 13:02:30] [Rank 0] step:7521/10000 train_time:320047ms step_avg:42.55ms +[2025-09-11 13:02:30] [Rank 0] step:7521/10000 train_time:320047ms step_avg:42.55ms +[2025-09-11 13:02:31] [Rank 0] step:7541/10000 train_time:320746ms step_avg:42.53ms +[2025-09-11 13:02:31] [Rank 0] step:7541/10000 train_time:320746ms step_avg:42.53ms +[2025-09-11 13:02:32] [Rank 0] step:7561/10000 train_time:321450ms step_avg:42.51ms +[2025-09-11 13:02:32] [Rank 0] step:7561/10000 train_time:321450ms step_avg:42.51ms +[2025-09-11 13:02:32] [Rank 0] step:7581/10000 train_time:322153ms step_avg:42.49ms +[2025-09-11 13:02:32] [Rank 0] step:7581/10000 train_time:322153ms step_avg:42.49ms +[2025-09-11 13:02:33] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:02:33] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:02:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.4049 total_sharp:7.9158e-02 L1_sharp:7.5530e-03 L2_sharp:4.8886e-03 L3_sharp:6.3115e-03 L4_sharp:8.0134e-03 L5_sharp:1.3445e-02 L6_sharp:2.2197e-02 L7_sharp:2.9342e-02 L8_sharp:4.7682e-02 L9_sharp:6.3168e-02 L10_sharp:6.8963e-02 L11_sharp:9.8652e-02 L12_sharp:2.6953e-01 total_fnorm:8.4766e-01 total_l1_linf:1.1360e+03 total_spectral:4.1602e-01 L1_fnorm:1.4062e-01 L2_fnorm:1.4062e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3770e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3672e-01 L12_fnorm:1.2598e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.0762e-02 L3_l1linf:2.9663e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.8198e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6245e-02 L9_l1linf:2.5879e-02 L10_l1linf:2.6733e-02 L11_l1linf:2.6611e-02 L12_l1linf:1.6968e-02 L1_spectral:2.0282e-03 L2_spectral:2.0240e-03 L3_spectral:2.0377e-03 L4_spectral:2.0129e-03 L5_spectral:2.0063e-03 L6_spectral:2.0064e-03 L7_spectral:1.9911e-03 L8_spectral:1.9429e-03 L9_spectral:1.9652e-03 L10_spectral:1.9625e-03 L11_spectral:1.9642e-03 L12_spectral:1.9547e-03 train_time:322835ms step_avg:42.48ms +[2025-09-11 13:02:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.4049 total_sharp:7.9158e-02 L1_sharp:7.5530e-03 L2_sharp:4.8886e-03 L3_sharp:6.3115e-03 L4_sharp:8.0134e-03 L5_sharp:1.3445e-02 L6_sharp:2.2197e-02 L7_sharp:2.9342e-02 L8_sharp:4.7682e-02 L9_sharp:6.3168e-02 L10_sharp:6.8963e-02 L11_sharp:9.8652e-02 L12_sharp:2.6953e-01 total_fnorm:8.4766e-01 total_l1_linf:1.1360e+03 total_spectral:4.1602e-01 L1_fnorm:1.4062e-01 L2_fnorm:1.4062e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3770e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3672e-01 L12_fnorm:1.2598e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.0762e-02 L3_l1linf:2.9663e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.8198e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6245e-02 L9_l1linf:2.5879e-02 L10_l1linf:2.6733e-02 L11_l1linf:2.6611e-02 L12_l1linf:1.6968e-02 L1_spectral:2.0282e-03 L2_spectral:2.0240e-03 L3_spectral:2.0377e-03 L4_spectral:2.0129e-03 L5_spectral:2.0063e-03 L6_spectral:2.0064e-03 L7_spectral:1.9911e-03 L8_spectral:1.9429e-03 L9_spectral:1.9652e-03 L10_spectral:1.9625e-03 L11_spectral:1.9642e-03 L12_spectral:1.9547e-03 train_time:322835ms step_avg:42.48ms +[2025-09-11 13:02:46] [Rank 0] step:7601/10000 train_time:324777ms step_avg:42.73ms +[2025-09-11 13:02:46] [Rank 0] step:7601/10000 train_time:324777ms step_avg:42.73ms +[2025-09-11 13:02:47] [Rank 0] step:7621/10000 train_time:325501ms step_avg:42.71ms +[2025-09-11 13:02:47] [Rank 0] step:7621/10000 train_time:325501ms step_avg:42.71ms +[2025-09-11 13:02:48] [Rank 0] step:7641/10000 train_time:326205ms step_avg:42.69ms +[2025-09-11 13:02:48] [Rank 0] step:7641/10000 train_time:326205ms step_avg:42.69ms +[2025-09-11 13:02:48] [Rank 0] step:7661/10000 train_time:326907ms step_avg:42.67ms +[2025-09-11 13:02:48] [Rank 0] step:7661/10000 train_time:326907ms step_avg:42.67ms +[2025-09-11 13:02:49] [Rank 0] step:7681/10000 train_time:327610ms step_avg:42.65ms +[2025-09-11 13:02:49] [Rank 0] step:7681/10000 train_time:327610ms step_avg:42.65ms +[2025-09-11 13:02:50] [Rank 0] step:7701/10000 train_time:328313ms step_avg:42.63ms +[2025-09-11 13:02:50] [Rank 0] step:7701/10000 train_time:328313ms step_avg:42.63ms +[2025-09-11 13:02:50] [Rank 0] step:7721/10000 train_time:329016ms step_avg:42.61ms +[2025-09-11 13:02:50] [Rank 0] step:7721/10000 train_time:329016ms step_avg:42.61ms +[2025-09-11 13:02:51] [Rank 0] step:7741/10000 train_time:329719ms step_avg:42.59ms +[2025-09-11 13:02:51] [Rank 0] step:7741/10000 train_time:329719ms step_avg:42.59ms +[2025-09-11 13:02:52] [Rank 0] step:7761/10000 train_time:330421ms step_avg:42.57ms +[2025-09-11 13:02:52] [Rank 0] step:7761/10000 train_time:330421ms step_avg:42.57ms +[2025-09-11 13:02:52] [Rank 0] step:7781/10000 train_time:331125ms step_avg:42.56ms +[2025-09-11 13:02:52] [Rank 0] step:7781/10000 train_time:331125ms step_avg:42.56ms +[2025-09-11 13:02:53] [Rank 0] step:7801/10000 train_time:331826ms step_avg:42.54ms +[2025-09-11 13:02:53] [Rank 0] step:7801/10000 train_time:331826ms step_avg:42.54ms +[2025-09-11 13:02:54] [Rank 0] step:7821/10000 train_time:332528ms step_avg:42.52ms +[2025-09-11 13:02:54] [Rank 0] step:7821/10000 train_time:332528ms step_avg:42.52ms +[2025-09-11 13:02:55] [Rank 0] step:7841/10000 train_time:333231ms step_avg:42.50ms +[2025-09-11 13:02:55] [Rank 0] step:7841/10000 train_time:333231ms step_avg:42.50ms +[2025-09-11 13:02:55] [Rank 0] step:7861/10000 train_time:333936ms step_avg:42.48ms +[2025-09-11 13:02:55] [Rank 0] step:7861/10000 train_time:333936ms step_avg:42.48ms +[2025-09-11 13:02:56] [Rank 0] step:7881/10000 train_time:334638ms step_avg:42.46ms +[2025-09-11 13:02:56] [Rank 0] step:7881/10000 train_time:334638ms step_avg:42.46ms +[2025-09-11 13:02:57] [Rank 0] step:7901/10000 train_time:335342ms step_avg:42.44ms +[2025-09-11 13:02:57] [Rank 0] step:7901/10000 train_time:335342ms step_avg:42.44ms +[2025-09-11 13:02:57] [Rank 0] step:7921/10000 train_time:336045ms step_avg:42.42ms +[2025-09-11 13:02:57] [Rank 0] step:7921/10000 train_time:336045ms step_avg:42.42ms +[2025-09-11 13:02:58] [Rank 0] step:7941/10000 train_time:336749ms step_avg:42.41ms +[2025-09-11 13:02:58] [Rank 0] step:7941/10000 train_time:336749ms step_avg:42.41ms +[2025-09-11 13:02:59] [Rank 0] step:7961/10000 train_time:337450ms step_avg:42.39ms +[2025-09-11 13:02:59] [Rank 0] step:7961/10000 train_time:337450ms step_avg:42.39ms +[2025-09-11 13:03:00] [Rank 0] step:7981/10000 train_time:338157ms step_avg:42.37ms +[2025-09-11 13:03:00] [Rank 0] step:7981/10000 train_time:338157ms step_avg:42.37ms +[2025-09-11 13:03:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:03:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:03:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:03:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:03:14] [Rank 0] PRINT: step:8000/10000 val_loss:5.3959 total_sharp:6.4541e-02 L1_sharp:5.3366e-03 L2_sharp:5.0824e-03 L3_sharp:2.6234e-03 L4_sharp:6.1392e-03 L5_sharp:8.5516e-03 L6_sharp:1.6527e-02 L7_sharp:2.4153e-02 L8_sharp:4.0317e-02 L9_sharp:4.8383e-02 L10_sharp:5.6129e-02 L11_sharp:8.8707e-02 L12_sharp:2.1351e-01 total_fnorm:6.7578e-01 total_l1_linf:8.6000e+02 total_spectral:3.4180e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1279e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1084e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.1084e-01 L12_fnorm:1.0156e-01 L1_l1linf:2.3560e-02 L2_l1linf:2.2949e-02 L3_l1linf:2.2949e-02 L4_l1linf:2.1729e-02 L5_l1linf:2.1118e-02 L6_l1linf:2.1118e-02 L7_l1linf:2.1484e-02 L8_l1linf:2.0264e-02 L9_l1linf:1.9897e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0142e-02 L12_l1linf:1.3489e-02 L1_spectral:1.7321e-03 L2_spectral:1.7212e-03 L3_spectral:1.7155e-03 L4_spectral:1.7039e-03 L5_spectral:1.6962e-03 L6_spectral:1.6780e-03 L7_spectral:1.6695e-03 L8_spectral:1.6365e-03 L9_spectral:1.6521e-03 L10_spectral:1.6348e-03 L11_spectral:1.6439e-03 L12_spectral:1.6194e-03 train_time:338837ms step_avg:42.35ms +[2025-09-11 13:03:14] [Rank 0] PRINT: step:8000/10000 val_loss:5.3959 total_sharp:6.4541e-02 L1_sharp:5.3366e-03 L2_sharp:5.0824e-03 L3_sharp:2.6234e-03 L4_sharp:6.1392e-03 L5_sharp:8.5516e-03 L6_sharp:1.6527e-02 L7_sharp:2.4153e-02 L8_sharp:4.0317e-02 L9_sharp:4.8383e-02 L10_sharp:5.6129e-02 L11_sharp:8.8707e-02 L12_sharp:2.1351e-01 total_fnorm:6.7578e-01 total_l1_linf:8.6000e+02 total_spectral:3.4180e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1279e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1084e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.1084e-01 L12_fnorm:1.0156e-01 L1_l1linf:2.3560e-02 L2_l1linf:2.2949e-02 L3_l1linf:2.2949e-02 L4_l1linf:2.1729e-02 L5_l1linf:2.1118e-02 L6_l1linf:2.1118e-02 L7_l1linf:2.1484e-02 L8_l1linf:2.0264e-02 L9_l1linf:1.9897e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0142e-02 L12_l1linf:1.3489e-02 L1_spectral:1.7321e-03 L2_spectral:1.7212e-03 L3_spectral:1.7155e-03 L4_spectral:1.7039e-03 L5_spectral:1.6962e-03 L6_spectral:1.6780e-03 L7_spectral:1.6695e-03 L8_spectral:1.6365e-03 L9_spectral:1.6521e-03 L10_spectral:1.6348e-03 L11_spectral:1.6439e-03 L12_spectral:1.6194e-03 train_time:338837ms step_avg:42.35ms +[2025-09-11 13:03:16] [Rank 0] step:8001/10000 train_time:340701ms step_avg:42.58ms +[2025-09-11 13:03:16] [Rank 0] step:8001/10000 train_time:340701ms step_avg:42.58ms +[2025-09-11 13:03:17] [Rank 0] step:8021/10000 train_time:341419ms step_avg:42.57ms +[2025-09-11 13:03:17] [Rank 0] step:8021/10000 train_time:341419ms step_avg:42.57ms +[2025-09-11 13:03:18] [Rank 0] step:8041/10000 train_time:342124ms step_avg:42.55ms +[2025-09-11 13:03:18] [Rank 0] step:8041/10000 train_time:342124ms step_avg:42.55ms +[2025-09-11 13:03:18] [Rank 0] step:8061/10000 train_time:342830ms step_avg:42.53ms +[2025-09-11 13:03:18] [Rank 0] step:8061/10000 train_time:342830ms step_avg:42.53ms +[2025-09-11 13:03:19] [Rank 0] step:8081/10000 train_time:343532ms step_avg:42.51ms +[2025-09-11 13:03:19] [Rank 0] step:8081/10000 train_time:343532ms step_avg:42.51ms +[2025-09-11 13:03:20] [Rank 0] step:8101/10000 train_time:344236ms step_avg:42.49ms +[2025-09-11 13:03:20] [Rank 0] step:8101/10000 train_time:344236ms step_avg:42.49ms +[2025-09-11 13:03:21] [Rank 0] step:8121/10000 train_time:344943ms step_avg:42.48ms +[2025-09-11 13:03:21] [Rank 0] step:8121/10000 train_time:344943ms step_avg:42.48ms +[2025-09-11 13:03:22] [Rank 0] step:8141/10000 train_time:346378ms step_avg:42.55ms +[2025-09-11 13:03:22] [Rank 0] step:8141/10000 train_time:346378ms step_avg:42.55ms +[2025-09-11 13:03:23] [Rank 0] step:8161/10000 train_time:347086ms step_avg:42.53ms +[2025-09-11 13:03:23] [Rank 0] step:8161/10000 train_time:347086ms step_avg:42.53ms +[2025-09-11 13:03:23] [Rank 0] step:8181/10000 train_time:347801ms step_avg:42.51ms +[2025-09-11 13:03:23] [Rank 0] step:8181/10000 train_time:347801ms step_avg:42.51ms +[2025-09-11 13:03:24] [Rank 0] step:8201/10000 train_time:348513ms step_avg:42.50ms +[2025-09-11 13:03:24] [Rank 0] step:8201/10000 train_time:348513ms step_avg:42.50ms +[2025-09-11 13:03:25] [Rank 0] step:8221/10000 train_time:349224ms step_avg:42.48ms +[2025-09-11 13:03:25] [Rank 0] step:8221/10000 train_time:349224ms step_avg:42.48ms +[2025-09-11 13:03:26] [Rank 0] step:8241/10000 train_time:349942ms step_avg:42.46ms +[2025-09-11 13:03:26] [Rank 0] step:8241/10000 train_time:349942ms step_avg:42.46ms +[2025-09-11 13:03:26] [Rank 0] step:8261/10000 train_time:350651ms step_avg:42.45ms +[2025-09-11 13:03:26] [Rank 0] step:8261/10000 train_time:350651ms step_avg:42.45ms +[2025-09-11 13:03:27] [Rank 0] step:8281/10000 train_time:351358ms step_avg:42.43ms +[2025-09-11 13:03:27] [Rank 0] step:8281/10000 train_time:351358ms step_avg:42.43ms +[2025-09-11 13:03:28] [Rank 0] step:8301/10000 train_time:352068ms step_avg:42.41ms +[2025-09-11 13:03:28] [Rank 0] step:8301/10000 train_time:352068ms step_avg:42.41ms +[2025-09-11 13:03:28] [Rank 0] step:8321/10000 train_time:352779ms step_avg:42.40ms +[2025-09-11 13:03:28] [Rank 0] step:8321/10000 train_time:352779ms step_avg:42.40ms +[2025-09-11 13:03:29] [Rank 0] step:8341/10000 train_time:353496ms step_avg:42.38ms +[2025-09-11 13:03:29] [Rank 0] step:8341/10000 train_time:353496ms step_avg:42.38ms +[2025-09-11 13:03:30] [Rank 0] step:8361/10000 train_time:354202ms step_avg:42.36ms +[2025-09-11 13:03:30] [Rank 0] step:8361/10000 train_time:354202ms step_avg:42.36ms +[2025-09-11 13:03:31] [Rank 0] step:8381/10000 train_time:354915ms step_avg:42.35ms +[2025-09-11 13:03:31] [Rank 0] step:8381/10000 train_time:354915ms step_avg:42.35ms +[2025-09-11 13:03:31] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:03:31] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:03:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:03:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:03:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:03:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:03:42] [Rank 0] PRINT: step:8400/10000 val_loss:5.3836 total_sharp:5.4324e-02 L1_sharp:5.4921e-03 L2_sharp:2.5581e-03 L3_sharp:3.2366e-03 L4_sharp:4.0229e-03 L5_sharp:6.6057e-03 L6_sharp:1.4778e-02 L7_sharp:1.6011e-02 L8_sharp:3.3083e-02 L9_sharp:4.1825e-02 L10_sharp:5.1031e-02 L11_sharp:6.7486e-02 L12_sharp:1.8396e-01 total_fnorm:5.0781e-01 total_l1_linf:6.0400e+02 total_spectral:2.5977e-01 L1_fnorm:9.0820e-02 L2_fnorm:8.9844e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.8379e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.6914e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5938e-02 L12_fnorm:7.9590e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6357e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.4954e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4221e-02 L11_l1linf:1.4404e-02 L12_l1linf:1.0010e-02 L1_spectral:1.4069e-03 L2_spectral:1.3972e-03 L3_spectral:1.3931e-03 L4_spectral:1.3793e-03 L5_spectral:1.3585e-03 L6_spectral:1.3557e-03 L7_spectral:1.3326e-03 L8_spectral:1.2991e-03 L9_spectral:1.3172e-03 L10_spectral:1.3074e-03 L11_spectral:1.2985e-03 L12_spectral:1.2928e-03 train_time:355614ms step_avg:42.33ms +[2025-09-11 13:03:42] [Rank 0] PRINT: step:8400/10000 val_loss:5.3836 total_sharp:5.4324e-02 L1_sharp:5.4921e-03 L2_sharp:2.5581e-03 L3_sharp:3.2366e-03 L4_sharp:4.0229e-03 L5_sharp:6.6057e-03 L6_sharp:1.4778e-02 L7_sharp:1.6011e-02 L8_sharp:3.3083e-02 L9_sharp:4.1825e-02 L10_sharp:5.1031e-02 L11_sharp:6.7486e-02 L12_sharp:1.8396e-01 total_fnorm:5.0781e-01 total_l1_linf:6.0400e+02 total_spectral:2.5977e-01 L1_fnorm:9.0820e-02 L2_fnorm:8.9844e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.8379e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.6914e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5938e-02 L12_fnorm:7.9590e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6357e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.4954e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4221e-02 L11_l1linf:1.4404e-02 L12_l1linf:1.0010e-02 L1_spectral:1.4069e-03 L2_spectral:1.3972e-03 L3_spectral:1.3931e-03 L4_spectral:1.3793e-03 L5_spectral:1.3585e-03 L6_spectral:1.3557e-03 L7_spectral:1.3326e-03 L8_spectral:1.2991e-03 L9_spectral:1.3172e-03 L10_spectral:1.3074e-03 L11_spectral:1.2985e-03 L12_spectral:1.2928e-03 train_time:355614ms step_avg:42.33ms +[2025-09-11 13:03:44] [Rank 0] step:8401/10000 train_time:357588ms step_avg:42.56ms +[2025-09-11 13:03:44] [Rank 0] step:8401/10000 train_time:357588ms step_avg:42.56ms +[2025-09-11 13:03:45] [Rank 0] step:8421/10000 train_time:358318ms step_avg:42.55ms +[2025-09-11 13:03:45] [Rank 0] step:8421/10000 train_time:358318ms step_avg:42.55ms +[2025-09-11 13:03:46] [Rank 0] step:8441/10000 train_time:359029ms step_avg:42.53ms +[2025-09-11 13:03:46] [Rank 0] step:8441/10000 train_time:359029ms step_avg:42.53ms +[2025-09-11 13:03:47] [Rank 0] step:8461/10000 train_time:359740ms step_avg:42.52ms +[2025-09-11 13:03:47] [Rank 0] step:8461/10000 train_time:359740ms step_avg:42.52ms +[2025-09-11 13:03:47] [Rank 0] step:8481/10000 train_time:360714ms step_avg:42.53ms +[2025-09-11 13:03:47] [Rank 0] step:8481/10000 train_time:360714ms step_avg:42.53ms +[2025-09-11 13:03:48] [Rank 0] step:8501/10000 train_time:361424ms step_avg:42.52ms +[2025-09-11 13:03:48] [Rank 0] step:8501/10000 train_time:361424ms step_avg:42.52ms +[2025-09-11 13:03:49] [Rank 0] step:8521/10000 train_time:362132ms step_avg:42.50ms +[2025-09-11 13:03:49] [Rank 0] step:8521/10000 train_time:362132ms step_avg:42.50ms +[2025-09-11 13:03:50] [Rank 0] step:8541/10000 train_time:363115ms step_avg:42.51ms +[2025-09-11 13:03:50] [Rank 0] step:8541/10000 train_time:363115ms step_avg:42.51ms +[2025-09-11 13:03:50] [Rank 0] step:8561/10000 train_time:363828ms step_avg:42.50ms +[2025-09-11 13:03:50] [Rank 0] step:8561/10000 train_time:363828ms step_avg:42.50ms +[2025-09-11 13:03:51] [Rank 0] step:8581/10000 train_time:364541ms step_avg:42.48ms +[2025-09-11 13:03:51] [Rank 0] step:8581/10000 train_time:364541ms step_avg:42.48ms +[2025-09-11 13:03:52] [Rank 0] step:8601/10000 train_time:365251ms step_avg:42.47ms +[2025-09-11 13:03:52] [Rank 0] step:8601/10000 train_time:365251ms step_avg:42.47ms +[2025-09-11 13:03:53] [Rank 0] step:8621/10000 train_time:365959ms step_avg:42.45ms +[2025-09-11 13:03:53] [Rank 0] step:8621/10000 train_time:365959ms step_avg:42.45ms +[2025-09-11 13:03:53] [Rank 0] step:8641/10000 train_time:366667ms step_avg:42.43ms +[2025-09-11 13:03:53] [Rank 0] step:8641/10000 train_time:366667ms step_avg:42.43ms +[2025-09-11 13:03:54] [Rank 0] step:8661/10000 train_time:367376ms step_avg:42.42ms +[2025-09-11 13:03:54] [Rank 0] step:8661/10000 train_time:367376ms step_avg:42.42ms +[2025-09-11 13:03:55] [Rank 0] step:8681/10000 train_time:368087ms step_avg:42.40ms +[2025-09-11 13:03:55] [Rank 0] step:8681/10000 train_time:368087ms step_avg:42.40ms +[2025-09-11 13:03:55] [Rank 0] step:8701/10000 train_time:368795ms step_avg:42.39ms +[2025-09-11 13:03:55] [Rank 0] step:8701/10000 train_time:368795ms step_avg:42.39ms +[2025-09-11 13:03:56] [Rank 0] step:8721/10000 train_time:369506ms step_avg:42.37ms +[2025-09-11 13:03:56] [Rank 0] step:8721/10000 train_time:369506ms step_avg:42.37ms +[2025-09-11 13:03:57] [Rank 0] step:8741/10000 train_time:370212ms step_avg:42.35ms +[2025-09-11 13:03:57] [Rank 0] step:8741/10000 train_time:370212ms step_avg:42.35ms +[2025-09-11 13:03:58] [Rank 0] step:8761/10000 train_time:370924ms step_avg:42.34ms +[2025-09-11 13:03:58] [Rank 0] step:8761/10000 train_time:370924ms step_avg:42.34ms +[2025-09-11 13:03:58] [Rank 0] step:8781/10000 train_time:371630ms step_avg:42.32ms +[2025-09-11 13:03:58] [Rank 0] step:8781/10000 train_time:371630ms step_avg:42.32ms +[2025-09-11 13:03:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:03:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:04:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:04:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:04:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:04:10] [Rank 0] PRINT: step:8800/10000 val_loss:5.3748 total_sharp:4.6492e-02 L1_sharp:1.5787e-03 L2_sharp:3.2578e-03 L3_sharp:2.0917e-03 L4_sharp:6.1939e-03 L5_sharp:5.5661e-03 L6_sharp:1.3890e-02 L7_sharp:2.1034e-02 L8_sharp:2.9416e-02 L9_sharp:3.5187e-02 L10_sharp:4.1832e-02 L11_sharp:6.1845e-02 L12_sharp:1.8044e-01 total_fnorm:3.6719e-01 total_l1_linf:3.8800e+02 total_spectral:1.8652e-01 L1_fnorm:6.5430e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.3965e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1035e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.1523e-02 L11_fnorm:6.1523e-02 L12_fnorm:5.7129e-02 L1_l1linf:1.1597e-02 L2_l1linf:1.1230e-02 L3_l1linf:1.0986e-02 L4_l1linf:1.0620e-02 L5_l1linf:1.0742e-02 L6_l1linf:1.0010e-02 L7_l1linf:9.5215e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.0942e-03 L10_l1linf:8.9722e-03 L11_l1linf:9.5825e-03 L12_l1linf:6.8665e-03 L1_spectral:1.0592e-03 L2_spectral:1.0460e-03 L3_spectral:1.0425e-03 L4_spectral:1.0249e-03 L5_spectral:1.0041e-03 L6_spectral:1.0001e-03 L7_spectral:9.7253e-04 L8_spectral:9.6114e-04 L9_spectral:9.6774e-04 L10_spectral:9.5079e-04 L11_spectral:9.5730e-04 L12_spectral:9.4859e-04 train_time:372317ms step_avg:42.31ms +[2025-09-11 13:04:10] [Rank 0] PRINT: step:8800/10000 val_loss:5.3748 total_sharp:4.6492e-02 L1_sharp:1.5787e-03 L2_sharp:3.2578e-03 L3_sharp:2.0917e-03 L4_sharp:6.1939e-03 L5_sharp:5.5661e-03 L6_sharp:1.3890e-02 L7_sharp:2.1034e-02 L8_sharp:2.9416e-02 L9_sharp:3.5187e-02 L10_sharp:4.1832e-02 L11_sharp:6.1845e-02 L12_sharp:1.8044e-01 total_fnorm:3.6719e-01 total_l1_linf:3.8800e+02 total_spectral:1.8652e-01 L1_fnorm:6.5430e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.3965e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1035e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.1523e-02 L11_fnorm:6.1523e-02 L12_fnorm:5.7129e-02 L1_l1linf:1.1597e-02 L2_l1linf:1.1230e-02 L3_l1linf:1.0986e-02 L4_l1linf:1.0620e-02 L5_l1linf:1.0742e-02 L6_l1linf:1.0010e-02 L7_l1linf:9.5215e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.0942e-03 L10_l1linf:8.9722e-03 L11_l1linf:9.5825e-03 L12_l1linf:6.8665e-03 L1_spectral:1.0592e-03 L2_spectral:1.0460e-03 L3_spectral:1.0425e-03 L4_spectral:1.0249e-03 L5_spectral:1.0041e-03 L6_spectral:1.0001e-03 L7_spectral:9.7253e-04 L8_spectral:9.6114e-04 L9_spectral:9.6774e-04 L10_spectral:9.5079e-04 L11_spectral:9.5730e-04 L12_spectral:9.4859e-04 train_time:372317ms step_avg:42.31ms +[2025-09-11 13:04:13] [Rank 0] step:8801/10000 train_time:374483ms step_avg:42.55ms +[2025-09-11 13:04:13] [Rank 0] step:8801/10000 train_time:374483ms step_avg:42.55ms +[2025-09-11 13:04:13] [Rank 0] step:8821/10000 train_time:375376ms step_avg:42.55ms +[2025-09-11 13:04:13] [Rank 0] step:8821/10000 train_time:375376ms step_avg:42.55ms +[2025-09-11 13:04:14] [Rank 0] step:8841/10000 train_time:376086ms step_avg:42.54ms +[2025-09-11 13:04:14] [Rank 0] step:8841/10000 train_time:376086ms step_avg:42.54ms +[2025-09-11 13:04:15] [Rank 0] step:8861/10000 train_time:376795ms step_avg:42.52ms +[2025-09-11 13:04:15] [Rank 0] step:8861/10000 train_time:376795ms step_avg:42.52ms +[2025-09-11 13:04:16] [Rank 0] step:8881/10000 train_time:377506ms step_avg:42.51ms +[2025-09-11 13:04:16] [Rank 0] step:8881/10000 train_time:377506ms step_avg:42.51ms +[2025-09-11 13:04:16] [Rank 0] step:8901/10000 train_time:378228ms step_avg:42.49ms +[2025-09-11 13:04:16] [Rank 0] step:8901/10000 train_time:378228ms step_avg:42.49ms +[2025-09-11 13:04:17] [Rank 0] step:8921/10000 train_time:378935ms step_avg:42.48ms +[2025-09-11 13:04:17] [Rank 0] step:8921/10000 train_time:378935ms step_avg:42.48ms +[2025-09-11 13:04:18] [Rank 0] step:8941/10000 train_time:379649ms step_avg:42.46ms +[2025-09-11 13:04:18] [Rank 0] step:8941/10000 train_time:379649ms step_avg:42.46ms +[2025-09-11 13:04:18] [Rank 0] step:8961/10000 train_time:380367ms step_avg:42.45ms +[2025-09-11 13:04:18] [Rank 0] step:8961/10000 train_time:380367ms step_avg:42.45ms +[2025-09-11 13:04:19] [Rank 0] step:8981/10000 train_time:381081ms step_avg:42.43ms +[2025-09-11 13:04:19] [Rank 0] step:8981/10000 train_time:381081ms step_avg:42.43ms +[2025-09-11 13:04:20] [Rank 0] step:9001/10000 train_time:381786ms step_avg:42.42ms +[2025-09-11 13:04:20] [Rank 0] step:9001/10000 train_time:381786ms step_avg:42.42ms +[2025-09-11 13:04:21] [Rank 0] step:9021/10000 train_time:382495ms step_avg:42.40ms +[2025-09-11 13:04:21] [Rank 0] step:9021/10000 train_time:382495ms step_avg:42.40ms +[2025-09-11 13:04:21] [Rank 0] step:9041/10000 train_time:383208ms step_avg:42.39ms +[2025-09-11 13:04:21] [Rank 0] step:9041/10000 train_time:383208ms step_avg:42.39ms +[2025-09-11 13:04:22] [Rank 0] step:9061/10000 train_time:383916ms step_avg:42.37ms +[2025-09-11 13:04:22] [Rank 0] step:9061/10000 train_time:383916ms step_avg:42.37ms +[2025-09-11 13:04:23] [Rank 0] step:9081/10000 train_time:384629ms step_avg:42.36ms +[2025-09-11 13:04:23] [Rank 0] step:9081/10000 train_time:384629ms step_avg:42.36ms +[2025-09-11 13:04:23] [Rank 0] step:9101/10000 train_time:385343ms step_avg:42.34ms +[2025-09-11 13:04:23] [Rank 0] step:9101/10000 train_time:385343ms step_avg:42.34ms +[2025-09-11 13:04:24] [Rank 0] step:9121/10000 train_time:386058ms step_avg:42.33ms +[2025-09-11 13:04:24] [Rank 0] step:9121/10000 train_time:386058ms step_avg:42.33ms +[2025-09-11 13:04:25] [Rank 0] step:9141/10000 train_time:386766ms step_avg:42.31ms +[2025-09-11 13:04:25] [Rank 0] step:9141/10000 train_time:386766ms step_avg:42.31ms +[2025-09-11 13:04:26] [Rank 0] step:9161/10000 train_time:387479ms step_avg:42.30ms +[2025-09-11 13:04:26] [Rank 0] step:9161/10000 train_time:387479ms step_avg:42.30ms +[2025-09-11 13:04:26] [Rank 0] step:9181/10000 train_time:388191ms step_avg:42.28ms +[2025-09-11 13:04:26] [Rank 0] step:9181/10000 train_time:388191ms step_avg:42.28ms +[2025-09-11 13:04:27] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:04:27] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:04:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:04:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:04:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:04:38] [Rank 0] PRINT: step:9200/10000 val_loss:5.3684 total_sharp:4.5356e-02 L1_sharp:3.0344e-03 L2_sharp:2.7067e-03 L3_sharp:1.8487e-03 L4_sharp:3.9192e-03 L5_sharp:4.2599e-03 L6_sharp:8.6640e-03 L7_sharp:1.4733e-02 L8_sharp:2.2828e-02 L9_sharp:2.7458e-02 L10_sharp:3.4590e-02 L11_sharp:4.8601e-02 L12_sharp:2.0361e-01 total_fnorm:2.3340e-01 total_l1_linf:2.2100e+02 total_spectral:1.1768e-01 L1_fnorm:4.3701e-02 L2_fnorm:4.3213e-02 L3_fnorm:4.2725e-02 L4_fnorm:4.2236e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.7354e-02 L1_l1linf:7.0801e-03 L2_l1linf:6.6223e-03 L3_l1linf:6.4697e-03 L4_l1linf:6.3171e-03 L5_l1linf:6.0730e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.8899e-03 L8_l1linf:5.6458e-03 L9_l1linf:5.3406e-03 L10_l1linf:5.3711e-03 L11_l1linf:5.5847e-03 L12_l1linf:3.9368e-03 L1_spectral:7.3398e-04 L2_spectral:7.2278e-04 L3_spectral:7.1785e-04 L4_spectral:7.0254e-04 L5_spectral:6.8773e-04 L6_spectral:6.8721e-04 L7_spectral:6.7304e-04 L8_spectral:6.5979e-04 L9_spectral:6.5447e-04 L10_spectral:6.4834e-04 L11_spectral:6.4891e-04 L12_spectral:6.4461e-04 train_time:388884ms step_avg:42.27ms +[2025-09-11 13:04:38] [Rank 0] PRINT: step:9200/10000 val_loss:5.3684 total_sharp:4.5356e-02 L1_sharp:3.0344e-03 L2_sharp:2.7067e-03 L3_sharp:1.8487e-03 L4_sharp:3.9192e-03 L5_sharp:4.2599e-03 L6_sharp:8.6640e-03 L7_sharp:1.4733e-02 L8_sharp:2.2828e-02 L9_sharp:2.7458e-02 L10_sharp:3.4590e-02 L11_sharp:4.8601e-02 L12_sharp:2.0361e-01 total_fnorm:2.3340e-01 total_l1_linf:2.2100e+02 total_spectral:1.1768e-01 L1_fnorm:4.3701e-02 L2_fnorm:4.3213e-02 L3_fnorm:4.2725e-02 L4_fnorm:4.2236e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.7354e-02 L1_l1linf:7.0801e-03 L2_l1linf:6.6223e-03 L3_l1linf:6.4697e-03 L4_l1linf:6.3171e-03 L5_l1linf:6.0730e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.8899e-03 L8_l1linf:5.6458e-03 L9_l1linf:5.3406e-03 L10_l1linf:5.3711e-03 L11_l1linf:5.5847e-03 L12_l1linf:3.9368e-03 L1_spectral:7.3398e-04 L2_spectral:7.2278e-04 L3_spectral:7.1785e-04 L4_spectral:7.0254e-04 L5_spectral:6.8773e-04 L6_spectral:6.8721e-04 L7_spectral:6.7304e-04 L8_spectral:6.5979e-04 L9_spectral:6.5447e-04 L10_spectral:6.4834e-04 L11_spectral:6.4891e-04 L12_spectral:6.4461e-04 train_time:388884ms step_avg:42.27ms +[2025-09-11 13:04:40] [Rank 0] step:9201/10000 train_time:390822ms step_avg:42.48ms +[2025-09-11 13:04:40] [Rank 0] step:9201/10000 train_time:390822ms step_avg:42.48ms +[2025-09-11 13:04:41] [Rank 0] step:9221/10000 train_time:391544ms step_avg:42.46ms +[2025-09-11 13:04:41] [Rank 0] step:9221/10000 train_time:391544ms step_avg:42.46ms +[2025-09-11 13:04:41] [Rank 0] step:9241/10000 train_time:392253ms step_avg:42.45ms +[2025-09-11 13:04:41] [Rank 0] step:9241/10000 train_time:392253ms step_avg:42.45ms +[2025-09-11 13:04:42] [Rank 0] step:9261/10000 train_time:392965ms step_avg:42.43ms +[2025-09-11 13:04:42] [Rank 0] step:9261/10000 train_time:392965ms step_avg:42.43ms +[2025-09-11 13:04:43] [Rank 0] step:9281/10000 train_time:393676ms step_avg:42.42ms +[2025-09-11 13:04:43] [Rank 0] step:9281/10000 train_time:393676ms step_avg:42.42ms +[2025-09-11 13:04:43] [Rank 0] step:9301/10000 train_time:394384ms step_avg:42.40ms +[2025-09-11 13:04:43] [Rank 0] step:9301/10000 train_time:394384ms step_avg:42.40ms +[2025-09-11 13:04:44] [Rank 0] step:9321/10000 train_time:395095ms step_avg:42.39ms +[2025-09-11 13:04:44] [Rank 0] step:9321/10000 train_time:395095ms step_avg:42.39ms +[2025-09-11 13:04:45] [Rank 0] step:9341/10000 train_time:395803ms step_avg:42.37ms +[2025-09-11 13:04:45] [Rank 0] step:9341/10000 train_time:395803ms step_avg:42.37ms +[2025-09-11 13:04:46] [Rank 0] step:9361/10000 train_time:396508ms step_avg:42.36ms +[2025-09-11 13:04:46] [Rank 0] step:9361/10000 train_time:396508ms step_avg:42.36ms +[2025-09-11 13:04:46] [Rank 0] step:9381/10000 train_time:397218ms step_avg:42.34ms +[2025-09-11 13:04:46] [Rank 0] step:9381/10000 train_time:397218ms step_avg:42.34ms +[2025-09-11 13:04:47] [Rank 0] step:9401/10000 train_time:397929ms step_avg:42.33ms +[2025-09-11 13:04:47] [Rank 0] step:9401/10000 train_time:397929ms step_avg:42.33ms +[2025-09-11 13:04:48] [Rank 0] step:9421/10000 train_time:398641ms step_avg:42.31ms +[2025-09-11 13:04:48] [Rank 0] step:9421/10000 train_time:398641ms step_avg:42.31ms +[2025-09-11 13:04:48] [Rank 0] step:9441/10000 train_time:399354ms step_avg:42.30ms +[2025-09-11 13:04:48] [Rank 0] step:9441/10000 train_time:399354ms step_avg:42.30ms +[2025-09-11 13:04:49] [Rank 0] step:9461/10000 train_time:400064ms step_avg:42.29ms +[2025-09-11 13:04:49] [Rank 0] step:9461/10000 train_time:400064ms step_avg:42.29ms +[2025-09-11 13:04:50] [Rank 0] step:9481/10000 train_time:401065ms step_avg:42.30ms +[2025-09-11 13:04:50] [Rank 0] step:9481/10000 train_time:401065ms step_avg:42.30ms +[2025-09-11 13:04:51] [Rank 0] step:9501/10000 train_time:401776ms step_avg:42.29ms +[2025-09-11 13:04:51] [Rank 0] step:9501/10000 train_time:401776ms step_avg:42.29ms +[2025-09-11 13:04:51] [Rank 0] step:9521/10000 train_time:402490ms step_avg:42.27ms +[2025-09-11 13:04:51] [Rank 0] step:9521/10000 train_time:402490ms step_avg:42.27ms +[2025-09-11 13:04:52] [Rank 0] step:9541/10000 train_time:403199ms step_avg:42.26ms +[2025-09-11 13:04:52] [Rank 0] step:9541/10000 train_time:403199ms step_avg:42.26ms +[2025-09-11 13:04:53] [Rank 0] step:9561/10000 train_time:404147ms step_avg:42.27ms +[2025-09-11 13:04:53] [Rank 0] step:9561/10000 train_time:404147ms step_avg:42.27ms +[2025-09-11 13:04:54] [Rank 0] step:9581/10000 train_time:404859ms step_avg:42.26ms +[2025-09-11 13:04:54] [Rank 0] step:9581/10000 train_time:404859ms step_avg:42.26ms +[2025-09-11 13:04:55] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:04:55] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:05:06] [Rank 0] PRINT: step:9600/10000 val_loss:5.3636 total_sharp:2.5558e-02 L1_sharp:2.0518e-03 L2_sharp:9.1121e-04 L3_sharp:1.3499e-03 L4_sharp:2.2716e-03 L5_sharp:3.8962e-03 L6_sharp:9.0531e-03 L7_sharp:1.3352e-02 L8_sharp:1.9102e-02 L9_sharp:1.8752e-02 L10_sharp:2.4284e-02 L11_sharp:3.5734e-02 L12_sharp:1.2992e-01 total_fnorm:1.3867e-01 total_l1_linf:1.0300e+02 total_spectral:6.8359e-02 L1_fnorm:2.4414e-02 L2_fnorm:2.4048e-02 L3_fnorm:2.3682e-02 L4_fnorm:2.3560e-02 L5_fnorm:2.3438e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2705e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2827e-02 L12_fnorm:2.0874e-02 L1_l1linf:3.0212e-03 L2_l1linf:2.9297e-03 L3_l1linf:2.9144e-03 L4_l1linf:2.8076e-03 L5_l1linf:3.0060e-03 L6_l1linf:2.8229e-03 L7_l1linf:2.7618e-03 L8_l1linf:2.6550e-03 L9_l1linf:2.7161e-03 L10_l1linf:2.6398e-03 L11_l1linf:2.7313e-03 L12_l1linf:2.0447e-03 L1_spectral:4.2281e-04 L2_spectral:4.2102e-04 L3_spectral:4.1148e-04 L4_spectral:4.0270e-04 L5_spectral:3.9502e-04 L6_spectral:3.9027e-04 L7_spectral:3.8216e-04 L8_spectral:3.7598e-04 L9_spectral:3.7829e-04 L10_spectral:3.6574e-04 L11_spectral:3.6911e-04 L12_spectral:3.6745e-04 train_time:405546ms step_avg:42.24ms +[2025-09-11 13:05:06] [Rank 0] PRINT: step:9600/10000 val_loss:5.3636 total_sharp:2.5558e-02 L1_sharp:2.0518e-03 L2_sharp:9.1121e-04 L3_sharp:1.3499e-03 L4_sharp:2.2716e-03 L5_sharp:3.8962e-03 L6_sharp:9.0531e-03 L7_sharp:1.3352e-02 L8_sharp:1.9102e-02 L9_sharp:1.8752e-02 L10_sharp:2.4284e-02 L11_sharp:3.5734e-02 L12_sharp:1.2992e-01 total_fnorm:1.3867e-01 total_l1_linf:1.0300e+02 total_spectral:6.8359e-02 L1_fnorm:2.4414e-02 L2_fnorm:2.4048e-02 L3_fnorm:2.3682e-02 L4_fnorm:2.3560e-02 L5_fnorm:2.3438e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2705e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2827e-02 L12_fnorm:2.0874e-02 L1_l1linf:3.0212e-03 L2_l1linf:2.9297e-03 L3_l1linf:2.9144e-03 L4_l1linf:2.8076e-03 L5_l1linf:3.0060e-03 L6_l1linf:2.8229e-03 L7_l1linf:2.7618e-03 L8_l1linf:2.6550e-03 L9_l1linf:2.7161e-03 L10_l1linf:2.6398e-03 L11_l1linf:2.7313e-03 L12_l1linf:2.0447e-03 L1_spectral:4.2281e-04 L2_spectral:4.2102e-04 L3_spectral:4.1148e-04 L4_spectral:4.0270e-04 L5_spectral:3.9502e-04 L6_spectral:3.9027e-04 L7_spectral:3.8216e-04 L8_spectral:3.7598e-04 L9_spectral:3.7829e-04 L10_spectral:3.6574e-04 L11_spectral:3.6911e-04 L12_spectral:3.6745e-04 train_time:405546ms step_avg:42.24ms +[2025-09-11 13:05:08] [Rank 0] step:9601/10000 train_time:407503ms step_avg:42.44ms +[2025-09-11 13:05:08] [Rank 0] step:9601/10000 train_time:407503ms step_avg:42.44ms +[2025-09-11 13:05:08] [Rank 0] step:9621/10000 train_time:408221ms step_avg:42.43ms +[2025-09-11 13:05:08] [Rank 0] step:9621/10000 train_time:408221ms step_avg:42.43ms +[2025-09-11 13:05:09] [Rank 0] step:9641/10000 train_time:408936ms step_avg:42.42ms +[2025-09-11 13:05:09] [Rank 0] step:9641/10000 train_time:408936ms step_avg:42.42ms +[2025-09-11 13:05:10] [Rank 0] step:9661/10000 train_time:409659ms step_avg:42.40ms +[2025-09-11 13:05:10] [Rank 0] step:9661/10000 train_time:409659ms step_avg:42.40ms +[2025-09-11 13:05:11] [Rank 0] step:9681/10000 train_time:410375ms step_avg:42.39ms +[2025-09-11 13:05:11] [Rank 0] step:9681/10000 train_time:410375ms step_avg:42.39ms +[2025-09-11 13:05:11] [Rank 0] step:9701/10000 train_time:411092ms step_avg:42.38ms +[2025-09-11 13:05:11] [Rank 0] step:9701/10000 train_time:411092ms step_avg:42.38ms +[2025-09-11 13:05:12] [Rank 0] step:9721/10000 train_time:411813ms step_avg:42.36ms +[2025-09-11 13:05:12] [Rank 0] step:9721/10000 train_time:411813ms step_avg:42.36ms +[2025-09-11 13:05:13] [Rank 0] step:9741/10000 train_time:412531ms step_avg:42.35ms +[2025-09-11 13:05:13] [Rank 0] step:9741/10000 train_time:412531ms step_avg:42.35ms +[2025-09-11 13:05:13] [Rank 0] step:9761/10000 train_time:413249ms step_avg:42.34ms +[2025-09-11 13:05:13] [Rank 0] step:9761/10000 train_time:413249ms step_avg:42.34ms +[2025-09-11 13:05:14] [Rank 0] step:9781/10000 train_time:413964ms step_avg:42.32ms +[2025-09-11 13:05:14] [Rank 0] step:9781/10000 train_time:413964ms step_avg:42.32ms +[2025-09-11 13:05:15] [Rank 0] step:9801/10000 train_time:414689ms step_avg:42.31ms +[2025-09-11 13:05:15] [Rank 0] step:9801/10000 train_time:414689ms step_avg:42.31ms +[2025-09-11 13:05:16] [Rank 0] step:9821/10000 train_time:415408ms step_avg:42.30ms +[2025-09-11 13:05:16] [Rank 0] step:9821/10000 train_time:415408ms step_avg:42.30ms +[2025-09-11 13:05:16] [Rank 0] step:9841/10000 train_time:416130ms step_avg:42.29ms +[2025-09-11 13:05:16] [Rank 0] step:9841/10000 train_time:416130ms step_avg:42.29ms +[2025-09-11 13:05:17] [Rank 0] step:9861/10000 train_time:416846ms step_avg:42.27ms +[2025-09-11 13:05:17] [Rank 0] step:9861/10000 train_time:416846ms step_avg:42.27ms +[2025-09-11 13:05:18] [Rank 0] step:9881/10000 train_time:417564ms step_avg:42.26ms +[2025-09-11 13:05:18] [Rank 0] step:9881/10000 train_time:417564ms step_avg:42.26ms +[2025-09-11 13:05:18] [Rank 0] step:9901/10000 train_time:418278ms step_avg:42.25ms +[2025-09-11 13:05:18] [Rank 0] step:9901/10000 train_time:418278ms step_avg:42.25ms +[2025-09-11 13:05:19] [Rank 0] step:9921/10000 train_time:418994ms step_avg:42.23ms +[2025-09-11 13:05:19] [Rank 0] step:9921/10000 train_time:418994ms step_avg:42.23ms +[2025-09-11 13:05:20] [Rank 0] step:9941/10000 train_time:419716ms step_avg:42.22ms +[2025-09-11 13:05:20] [Rank 0] step:9941/10000 train_time:419716ms step_avg:42.22ms +[2025-09-11 13:05:21] [Rank 0] step:9961/10000 train_time:420439ms step_avg:42.21ms +[2025-09-11 13:05:21] [Rank 0] step:9961/10000 train_time:420439ms step_avg:42.21ms +[2025-09-11 13:05:21] [Rank 0] step:9981/10000 train_time:421158ms step_avg:42.20ms +[2025-09-11 13:05:21] [Rank 0] step:9981/10000 train_time:421158ms step_avg:42.20ms +[2025-09-11 13:05:22] [Rank 0] step:10000/10000 train_time:421849ms step_avg:42.18ms +[2025-09-11 13:05:22] [Rank 0] step:10000/10000 train_time:421849ms step_avg:42.18ms +[2025-09-11 13:05:22] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:05:22] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:05:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:05:33] [Rank 0] PRINT: step:10000/10000 val_loss:5.3623 total_sharp:1.7963e-02 L1_sharp:1.0839e-03 L2_sharp:-1.6439e-04 L3_sharp:6.8690e-04 L4_sharp:7.5143e-04 L5_sharp:3.5525e-03 L6_sharp:6.0521e-03 L7_sharp:7.5205e-03 L8_sharp:1.2265e-02 L9_sharp:1.4665e-02 L10_sharp:1.7100e-02 L11_sharp:2.5346e-02 L12_sharp:1.2064e-01 total_fnorm:5.3223e-02 total_l1_linf:2.9500e+01 total_spectral:2.6489e-02 L1_fnorm:9.6436e-03 L2_fnorm:9.5825e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.2773e-03 L5_fnorm:9.2773e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.9722e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.1177e-03 L1_l1linf:1.0452e-03 L2_l1linf:9.6893e-04 L3_l1linf:9.7656e-04 L4_l1linf:9.3460e-04 L5_l1linf:8.9645e-04 L6_l1linf:9.0408e-04 L7_l1linf:8.8882e-04 L8_l1linf:9.5367e-04 L9_l1linf:7.8964e-04 L10_l1linf:8.5831e-04 L11_l1linf:8.0872e-04 L12_l1linf:6.5613e-04 L1_spectral:1.7082e-04 L2_spectral:1.6892e-04 L3_spectral:1.6583e-04 L4_spectral:1.6105e-04 L5_spectral:1.5853e-04 L6_spectral:1.5661e-04 L7_spectral:1.5498e-04 L8_spectral:1.5585e-04 L9_spectral:1.4990e-04 L10_spectral:1.4887e-04 L11_spectral:1.4811e-04 L12_spectral:1.4833e-04 train_time:421870ms step_avg:42.19ms +[2025-09-11 13:05:33] [Rank 0] PRINT: step:10000/10000 val_loss:5.3623 total_sharp:1.7963e-02 L1_sharp:1.0839e-03 L2_sharp:-1.6439e-04 L3_sharp:6.8690e-04 L4_sharp:7.5143e-04 L5_sharp:3.5525e-03 L6_sharp:6.0521e-03 L7_sharp:7.5205e-03 L8_sharp:1.2265e-02 L9_sharp:1.4665e-02 L10_sharp:1.7100e-02 L11_sharp:2.5346e-02 L12_sharp:1.2064e-01 total_fnorm:5.3223e-02 total_l1_linf:2.9500e+01 total_spectral:2.6489e-02 L1_fnorm:9.6436e-03 L2_fnorm:9.5825e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.2773e-03 L5_fnorm:9.2773e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.9722e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.1177e-03 L1_l1linf:1.0452e-03 L2_l1linf:9.6893e-04 L3_l1linf:9.7656e-04 L4_l1linf:9.3460e-04 L5_l1linf:8.9645e-04 L6_l1linf:9.0408e-04 L7_l1linf:8.8882e-04 L8_l1linf:9.5367e-04 L9_l1linf:7.8964e-04 L10_l1linf:8.5831e-04 L11_l1linf:8.0872e-04 L12_l1linf:6.5613e-04 L1_spectral:1.7082e-04 L2_spectral:1.6892e-04 L3_spectral:1.6583e-04 L4_spectral:1.6105e-04 L5_spectral:1.5853e-04 L6_spectral:1.5661e-04 L7_spectral:1.5498e-04 L8_spectral:1.5585e-04 L9_spectral:1.4990e-04 L10_spectral:1.4887e-04 L11_spectral:1.4811e-04 L12_spectral:1.4833e-04 train_time:421870ms step_avg:42.19ms +[2025-09-11 13:05:33] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:05:33 2025 --- +[2025-09-11 13:05:33] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:05:33 2025 --- +[2025-09-11 13:05:33] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 13:05:33] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..71103d79f6cb07616ad521d9ad1e171e77b4dd11 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "0f5a9c38-f12e-4278-bfdc-4288c430c0b2", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/training_log_0f5a9c38-f12e-4278-bfdc-4288c430c0b2.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/training_log_0f5a9c38-f12e-4278-bfdc-4288c430c0b2.txt new file mode 100644 index 0000000000000000000000000000000000000000..e50553105cbb8e3e5644d0656f7a3b6c5c89cd0a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45/training_log_0f5a9c38-f12e-4278-bfdc-4288c430c0b2.txt @@ -0,0 +1,4264 @@ +[2025-09-11 13:33:49] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:33:49 2025 --- +[2025-09-11 13:33:49] [Rank 0] PRINT: --- Script Start: Thu Sep 11 13:33:49 2025 --- +[2025-09-11 13:33:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:33:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 13:33:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:33:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 13:33:50] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:33:50] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 13:33:50] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45 +[2025-09-11 13:33:50] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.002_seed_45 +[2025-09-11 13:33:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:33:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 13:33:50] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:33:50] [Rank 0] PRINT: Constructing model... +[2025-09-11 13:33:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:33:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 13:33:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:33:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 13:33:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:33:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 13:33:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:33:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 13:33:51] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:33:51] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 13:33:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:33:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 13:33:53] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:33:53] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 13:33:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:33:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 13:33:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:33:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 13:33:58] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:33:58] [Rank 0] PRINT: Starting warmup... +[2025-09-11 13:34:37] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:34:37] [Rank 0] PRINT: Warmup complete. +[2025-09-11 13:34:37] [Rank 0] PRINT: Starting training... +[2025-09-11 13:34:37] [Rank 0] PRINT: Starting training... +[2025-09-11 13:34:38] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.93ms +[2025-09-11 13:34:38] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.93ms +[2025-09-11 13:34:38] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.41ms +[2025-09-11 13:34:38] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.41ms +[2025-09-11 13:34:39] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.48ms +[2025-09-11 13:34:39] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.48ms +[2025-09-11 13:34:40] [Rank 0] step:81/10000 train_time:3320ms step_avg:40.99ms +[2025-09-11 13:34:40] [Rank 0] step:81/10000 train_time:3320ms step_avg:40.99ms +[2025-09-11 13:34:41] [Rank 0] step:101/10000 train_time:4048ms step_avg:40.08ms +[2025-09-11 13:34:41] [Rank 0] step:101/10000 train_time:4048ms step_avg:40.08ms +[2025-09-11 13:34:41] [Rank 0] step:121/10000 train_time:4776ms step_avg:39.47ms +[2025-09-11 13:34:41] [Rank 0] step:121/10000 train_time:4776ms step_avg:39.47ms +[2025-09-11 13:34:42] [Rank 0] step:141/10000 train_time:5504ms step_avg:39.04ms +[2025-09-11 13:34:42] [Rank 0] step:141/10000 train_time:5504ms step_avg:39.04ms +[2025-09-11 13:34:43] [Rank 0] step:161/10000 train_time:6232ms step_avg:38.71ms +[2025-09-11 13:34:43] [Rank 0] step:161/10000 train_time:6232ms step_avg:38.71ms +[2025-09-11 13:34:44] [Rank 0] step:181/10000 train_time:6960ms step_avg:38.45ms +[2025-09-11 13:34:44] [Rank 0] step:181/10000 train_time:6960ms step_avg:38.45ms +[2025-09-11 13:34:44] [Rank 0] step:201/10000 train_time:7689ms step_avg:38.25ms +[2025-09-11 13:34:44] [Rank 0] step:201/10000 train_time:7689ms step_avg:38.25ms +[2025-09-11 13:34:45] [Rank 0] step:221/10000 train_time:8416ms step_avg:38.08ms +[2025-09-11 13:34:45] [Rank 0] step:221/10000 train_time:8416ms step_avg:38.08ms +[2025-09-11 13:34:46] [Rank 0] step:241/10000 train_time:9144ms step_avg:37.94ms +[2025-09-11 13:34:46] [Rank 0] step:241/10000 train_time:9144ms step_avg:37.94ms +[2025-09-11 13:34:47] [Rank 0] step:261/10000 train_time:9871ms step_avg:37.82ms +[2025-09-11 13:34:47] [Rank 0] step:261/10000 train_time:9871ms step_avg:37.82ms +[2025-09-11 13:34:47] [Rank 0] step:281/10000 train_time:10599ms step_avg:37.72ms +[2025-09-11 13:34:47] [Rank 0] step:281/10000 train_time:10599ms step_avg:37.72ms +[2025-09-11 13:34:48] [Rank 0] step:301/10000 train_time:11327ms step_avg:37.63ms +[2025-09-11 13:34:48] [Rank 0] step:301/10000 train_time:11327ms step_avg:37.63ms +[2025-09-11 13:34:49] [Rank 0] step:321/10000 train_time:12054ms step_avg:37.55ms +[2025-09-11 13:34:49] [Rank 0] step:321/10000 train_time:12054ms step_avg:37.55ms +[2025-09-11 13:34:49] [Rank 0] step:341/10000 train_time:12782ms step_avg:37.48ms +[2025-09-11 13:34:49] [Rank 0] step:341/10000 train_time:12782ms step_avg:37.48ms +[2025-09-11 13:34:50] [Rank 0] step:361/10000 train_time:13509ms step_avg:37.42ms +[2025-09-11 13:34:50] [Rank 0] step:361/10000 train_time:13509ms step_avg:37.42ms +[2025-09-11 13:34:51] [Rank 0] step:381/10000 train_time:14236ms step_avg:37.37ms +[2025-09-11 13:34:51] [Rank 0] step:381/10000 train_time:14236ms step_avg:37.37ms +[2025-09-11 13:34:52] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:34:52] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 13:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 13:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 13:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:35:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 13:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:35:38] [Rank 0] PRINT: step:400/10000 val_loss:7.0949 total_sharp:2.7055e-02 L1_sharp:5.9394e-02 L2_sharp:7.0340e-02 L3_sharp:6.6979e-02 L4_sharp:7.8498e-02 L5_sharp:8.3466e-02 L6_sharp:1.4002e-01 L7_sharp:3.5567e-01 L8_sharp:3.1907e-01 L9_sharp:5.3666e-01 L10_sharp:5.7816e-01 L11_sharp:7.3780e-01 L12_sharp:1.0849e+00 total_fnorm:5.4316e+00 total_l1_linf:1.6177e+04 total_spectral:2.7158e+00 L1_fnorm:2.4328e-01 L2_fnorm:2.4256e-01 L3_fnorm:2.4156e-01 L4_fnorm:2.4092e-01 L5_fnorm:2.4082e-01 L6_fnorm:2.3991e-01 L7_fnorm:2.1482e-01 L8_fnorm:1.9005e-01 L9_fnorm:1.5918e-01 L10_fnorm:1.3984e-01 L11_fnorm:1.2173e-01 L12_fnorm:1.1049e-01 L1_l1linf:8.2718e-02 L2_l1linf:8.2535e-02 L3_l1linf:8.1967e-02 L4_l1linf:8.2308e-02 L5_l1linf:8.3543e-02 L6_l1linf:8.4066e-02 L7_l1linf:7.5779e-02 L8_l1linf:6.5320e-02 L9_l1linf:5.5829e-02 L10_l1linf:5.2490e-02 L11_l1linf:5.0487e-02 L12_l1linf:4.8286e-02 L1_spectral:2.4097e-03 L2_spectral:2.4094e-03 L3_spectral:2.4108e-03 L4_spectral:2.4095e-03 L5_spectral:2.4096e-03 L6_spectral:2.4097e-03 L7_spectral:2.4093e-03 L8_spectral:2.4083e-03 L9_spectral:2.4074e-03 L10_spectral:2.4061e-03 L11_spectral:2.4058e-03 L12_spectral:2.4049e-03 train_time:14943ms step_avg:37.36ms +[2025-09-11 13:35:38] [Rank 0] PRINT: step:400/10000 val_loss:7.0949 total_sharp:2.7055e-02 L1_sharp:5.9394e-02 L2_sharp:7.0340e-02 L3_sharp:6.6979e-02 L4_sharp:7.8498e-02 L5_sharp:8.3466e-02 L6_sharp:1.4002e-01 L7_sharp:3.5567e-01 L8_sharp:3.1907e-01 L9_sharp:5.3666e-01 L10_sharp:5.7816e-01 L11_sharp:7.3780e-01 L12_sharp:1.0849e+00 total_fnorm:5.4316e+00 total_l1_linf:1.6177e+04 total_spectral:2.7158e+00 L1_fnorm:2.4328e-01 L2_fnorm:2.4256e-01 L3_fnorm:2.4156e-01 L4_fnorm:2.4092e-01 L5_fnorm:2.4082e-01 L6_fnorm:2.3991e-01 L7_fnorm:2.1482e-01 L8_fnorm:1.9005e-01 L9_fnorm:1.5918e-01 L10_fnorm:1.3984e-01 L11_fnorm:1.2173e-01 L12_fnorm:1.1049e-01 L1_l1linf:8.2718e-02 L2_l1linf:8.2535e-02 L3_l1linf:8.1967e-02 L4_l1linf:8.2308e-02 L5_l1linf:8.3543e-02 L6_l1linf:8.4066e-02 L7_l1linf:7.5779e-02 L8_l1linf:6.5320e-02 L9_l1linf:5.5829e-02 L10_l1linf:5.2490e-02 L11_l1linf:5.0487e-02 L12_l1linf:4.8286e-02 L1_spectral:2.4097e-03 L2_spectral:2.4094e-03 L3_spectral:2.4108e-03 L4_spectral:2.4095e-03 L5_spectral:2.4096e-03 L6_spectral:2.4097e-03 L7_spectral:2.4093e-03 L8_spectral:2.4083e-03 L9_spectral:2.4074e-03 L10_spectral:2.4061e-03 L11_spectral:2.4058e-03 L12_spectral:2.4049e-03 train_time:14943ms step_avg:37.36ms +[2025-09-11 13:36:08] [Rank 0] step:401/10000 train_time:45062ms step_avg:112.37ms +[2025-09-11 13:36:08] [Rank 0] step:401/10000 train_time:45062ms step_avg:112.37ms +[2025-09-11 13:36:11] [Rank 0] step:421/10000 train_time:48143ms step_avg:114.35ms +[2025-09-11 13:36:11] [Rank 0] step:421/10000 train_time:48143ms step_avg:114.35ms +[2025-09-11 13:36:12] [Rank 0] step:441/10000 train_time:48783ms step_avg:110.62ms +[2025-09-11 13:36:12] [Rank 0] step:441/10000 train_time:48783ms step_avg:110.62ms +[2025-09-11 13:36:12] [Rank 0] step:461/10000 train_time:49423ms step_avg:107.21ms +[2025-09-11 13:36:12] [Rank 0] step:461/10000 train_time:49423ms step_avg:107.21ms +[2025-09-11 13:36:13] [Rank 0] step:481/10000 train_time:50063ms step_avg:104.08ms +[2025-09-11 13:36:13] [Rank 0] step:481/10000 train_time:50063ms step_avg:104.08ms +[2025-09-11 13:36:14] [Rank 0] step:501/10000 train_time:50703ms step_avg:101.20ms +[2025-09-11 13:36:14] [Rank 0] step:501/10000 train_time:50703ms step_avg:101.20ms +[2025-09-11 13:36:14] [Rank 0] step:521/10000 train_time:51342ms step_avg:98.55ms +[2025-09-11 13:36:14] [Rank 0] step:521/10000 train_time:51342ms step_avg:98.55ms +[2025-09-11 13:36:15] [Rank 0] step:541/10000 train_time:51982ms step_avg:96.09ms +[2025-09-11 13:36:15] [Rank 0] step:541/10000 train_time:51982ms step_avg:96.09ms +[2025-09-11 13:36:16] [Rank 0] step:561/10000 train_time:52622ms step_avg:93.80ms +[2025-09-11 13:36:16] [Rank 0] step:561/10000 train_time:52622ms step_avg:93.80ms +[2025-09-11 13:36:16] [Rank 0] step:581/10000 train_time:53261ms step_avg:91.67ms +[2025-09-11 13:36:16] [Rank 0] step:581/10000 train_time:53261ms step_avg:91.67ms +[2025-09-11 13:36:17] [Rank 0] step:601/10000 train_time:53902ms step_avg:89.69ms +[2025-09-11 13:36:17] [Rank 0] step:601/10000 train_time:53902ms step_avg:89.69ms +[2025-09-11 13:36:18] [Rank 0] step:621/10000 train_time:54541ms step_avg:87.83ms +[2025-09-11 13:36:18] [Rank 0] step:621/10000 train_time:54541ms step_avg:87.83ms +[2025-09-11 13:36:18] [Rank 0] step:641/10000 train_time:55180ms step_avg:86.08ms +[2025-09-11 13:36:18] [Rank 0] step:641/10000 train_time:55180ms step_avg:86.08ms +[2025-09-11 13:36:19] [Rank 0] step:661/10000 train_time:55820ms step_avg:84.45ms +[2025-09-11 13:36:19] [Rank 0] step:661/10000 train_time:55820ms step_avg:84.45ms +[2025-09-11 13:36:19] [Rank 0] step:681/10000 train_time:56460ms step_avg:82.91ms +[2025-09-11 13:36:19] [Rank 0] step:681/10000 train_time:56460ms step_avg:82.91ms +[2025-09-11 13:36:20] [Rank 0] step:701/10000 train_time:57099ms step_avg:81.45ms +[2025-09-11 13:36:20] [Rank 0] step:701/10000 train_time:57099ms step_avg:81.45ms +[2025-09-11 13:36:21] [Rank 0] step:721/10000 train_time:57738ms step_avg:80.08ms +[2025-09-11 13:36:21] [Rank 0] step:721/10000 train_time:57738ms step_avg:80.08ms +[2025-09-11 13:36:21] [Rank 0] step:741/10000 train_time:58378ms step_avg:78.78ms +[2025-09-11 13:36:21] [Rank 0] step:741/10000 train_time:58378ms step_avg:78.78ms +[2025-09-11 13:36:22] [Rank 0] step:761/10000 train_time:59022ms step_avg:77.56ms +[2025-09-11 13:36:22] [Rank 0] step:761/10000 train_time:59022ms step_avg:77.56ms +[2025-09-11 13:36:23] [Rank 0] step:781/10000 train_time:59666ms step_avg:76.40ms +[2025-09-11 13:36:23] [Rank 0] step:781/10000 train_time:59666ms step_avg:76.40ms +[2025-09-11 13:36:23] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:36:23] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 13:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 13:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 13:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:37:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 13:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:07] [Rank 0] PRINT: step:800/10000 val_loss:6.6099 total_sharp:1.7467e-01 L1_sharp:7.8694e-02 L2_sharp:8.1017e-02 L3_sharp:9.0987e-02 L4_sharp:1.2045e-01 L5_sharp:1.8992e-01 L6_sharp:2.7545e-01 L7_sharp:4.0678e-01 L8_sharp:3.4895e-01 L9_sharp:4.7445e-01 L10_sharp:4.9186e-01 L11_sharp:6.6946e-01 L12_sharp:1.1353e+00 total_fnorm:2.9062e+00 total_l1_linf:4.6080e+03 total_spectral:1.4609e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3047e-01 L9_fnorm:2.1777e-01 L10_fnorm:1.9727e-01 L11_fnorm:1.6504e-01 L12_fnorm:1.2354e-01 L1_l1linf:8.0078e-02 L2_l1linf:7.9102e-02 L3_l1linf:7.8613e-02 L4_l1linf:8.0078e-02 L5_l1linf:8.1055e-02 L6_l1linf:8.3496e-02 L7_l1linf:8.0566e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.0312e-02 L10_l1linf:5.9570e-02 L11_l1linf:4.4922e-02 L12_l1linf:3.7598e-02 L1_spectral:3.1331e-03 L2_spectral:3.1140e-03 L3_spectral:3.1177e-03 L4_spectral:3.1154e-03 L5_spectral:3.1083e-03 L6_spectral:3.1279e-03 L7_spectral:3.0891e-03 L8_spectral:3.0479e-03 L9_spectral:3.0337e-03 L10_spectral:2.9597e-03 L11_spectral:2.8633e-03 L12_spectral:2.6295e-03 train_time:60293ms step_avg:75.37ms +[2025-09-11 13:37:07] [Rank 0] PRINT: step:800/10000 val_loss:6.6099 total_sharp:1.7467e-01 L1_sharp:7.8694e-02 L2_sharp:8.1017e-02 L3_sharp:9.0987e-02 L4_sharp:1.2045e-01 L5_sharp:1.8992e-01 L6_sharp:2.7545e-01 L7_sharp:4.0678e-01 L8_sharp:3.4895e-01 L9_sharp:4.7445e-01 L10_sharp:4.9186e-01 L11_sharp:6.6946e-01 L12_sharp:1.1353e+00 total_fnorm:2.9062e+00 total_l1_linf:4.6080e+03 total_spectral:1.4609e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3047e-01 L9_fnorm:2.1777e-01 L10_fnorm:1.9727e-01 L11_fnorm:1.6504e-01 L12_fnorm:1.2354e-01 L1_l1linf:8.0078e-02 L2_l1linf:7.9102e-02 L3_l1linf:7.8613e-02 L4_l1linf:8.0078e-02 L5_l1linf:8.1055e-02 L6_l1linf:8.3496e-02 L7_l1linf:8.0566e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.0312e-02 L10_l1linf:5.9570e-02 L11_l1linf:4.4922e-02 L12_l1linf:3.7598e-02 L1_spectral:3.1331e-03 L2_spectral:3.1140e-03 L3_spectral:3.1177e-03 L4_spectral:3.1154e-03 L5_spectral:3.1083e-03 L6_spectral:3.1279e-03 L7_spectral:3.0891e-03 L8_spectral:3.0479e-03 L9_spectral:3.0337e-03 L10_spectral:2.9597e-03 L11_spectral:2.8633e-03 L12_spectral:2.6295e-03 train_time:60293ms step_avg:75.37ms +[2025-09-11 13:37:08] [Rank 0] step:801/10000 train_time:61744ms step_avg:77.08ms +[2025-09-11 13:37:08] [Rank 0] step:801/10000 train_time:61744ms step_avg:77.08ms +[2025-09-11 13:37:09] [Rank 0] step:821/10000 train_time:62384ms step_avg:75.99ms +[2025-09-11 13:37:09] [Rank 0] step:821/10000 train_time:62384ms step_avg:75.99ms +[2025-09-11 13:37:10] [Rank 0] step:841/10000 train_time:63031ms step_avg:74.95ms +[2025-09-11 13:37:10] [Rank 0] step:841/10000 train_time:63031ms step_avg:74.95ms +[2025-09-11 13:37:10] [Rank 0] step:861/10000 train_time:63676ms step_avg:73.96ms +[2025-09-11 13:37:10] [Rank 0] step:861/10000 train_time:63676ms step_avg:73.96ms +[2025-09-11 13:37:11] [Rank 0] step:881/10000 train_time:64323ms step_avg:73.01ms +[2025-09-11 13:37:11] [Rank 0] step:881/10000 train_time:64323ms step_avg:73.01ms +[2025-09-11 13:37:12] [Rank 0] step:901/10000 train_time:64968ms step_avg:72.11ms +[2025-09-11 13:37:12] [Rank 0] step:901/10000 train_time:64968ms step_avg:72.11ms +[2025-09-11 13:37:12] [Rank 0] step:921/10000 train_time:65617ms step_avg:71.25ms +[2025-09-11 13:37:12] [Rank 0] step:921/10000 train_time:65617ms step_avg:71.25ms +[2025-09-11 13:37:13] [Rank 0] step:941/10000 train_time:66263ms step_avg:70.42ms +[2025-09-11 13:37:13] [Rank 0] step:941/10000 train_time:66263ms step_avg:70.42ms +[2025-09-11 13:37:14] [Rank 0] step:961/10000 train_time:66910ms step_avg:69.63ms +[2025-09-11 13:37:14] [Rank 0] step:961/10000 train_time:66910ms step_avg:69.63ms +[2025-09-11 13:37:14] [Rank 0] step:981/10000 train_time:67556ms step_avg:68.86ms +[2025-09-11 13:37:14] [Rank 0] step:981/10000 train_time:67556ms step_avg:68.86ms +[2025-09-11 13:37:15] [Rank 0] step:1001/10000 train_time:68202ms step_avg:68.13ms +[2025-09-11 13:37:15] [Rank 0] step:1001/10000 train_time:68202ms step_avg:68.13ms +[2025-09-11 13:37:16] [Rank 0] step:1021/10000 train_time:68849ms step_avg:67.43ms +[2025-09-11 13:37:16] [Rank 0] step:1021/10000 train_time:68849ms step_avg:67.43ms +[2025-09-11 13:37:16] [Rank 0] step:1041/10000 train_time:69494ms step_avg:66.76ms +[2025-09-11 13:37:16] [Rank 0] step:1041/10000 train_time:69494ms step_avg:66.76ms +[2025-09-11 13:37:17] [Rank 0] step:1061/10000 train_time:70139ms step_avg:66.11ms +[2025-09-11 13:37:17] [Rank 0] step:1061/10000 train_time:70139ms step_avg:66.11ms +[2025-09-11 13:37:18] [Rank 0] step:1081/10000 train_time:70786ms step_avg:65.48ms +[2025-09-11 13:37:18] [Rank 0] step:1081/10000 train_time:70786ms step_avg:65.48ms +[2025-09-11 13:37:18] [Rank 0] step:1101/10000 train_time:71430ms step_avg:64.88ms +[2025-09-11 13:37:18] [Rank 0] step:1101/10000 train_time:71430ms step_avg:64.88ms +[2025-09-11 13:37:19] [Rank 0] step:1121/10000 train_time:72075ms step_avg:64.30ms +[2025-09-11 13:37:19] [Rank 0] step:1121/10000 train_time:72075ms step_avg:64.30ms +[2025-09-11 13:37:19] [Rank 0] step:1141/10000 train_time:72727ms step_avg:63.74ms +[2025-09-11 13:37:19] [Rank 0] step:1141/10000 train_time:72727ms step_avg:63.74ms +[2025-09-11 13:37:20] [Rank 0] step:1161/10000 train_time:73373ms step_avg:63.20ms +[2025-09-11 13:37:20] [Rank 0] step:1161/10000 train_time:73373ms step_avg:63.20ms +[2025-09-11 13:37:21] [Rank 0] step:1181/10000 train_time:74019ms step_avg:62.67ms +[2025-09-11 13:37:21] [Rank 0] step:1181/10000 train_time:74019ms step_avg:62.67ms +[2025-09-11 13:37:21] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:37:21] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 13:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 13:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 13:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:37:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 13:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:32] [Rank 0] PRINT: step:1200/10000 val_loss:6.3598 total_sharp:1.0381e-01 L1_sharp:3.0188e-02 L2_sharp:3.0436e-02 L3_sharp:3.3067e-02 L4_sharp:4.7157e-02 L5_sharp:8.2112e-02 L6_sharp:1.1085e-01 L7_sharp:1.5954e-01 L8_sharp:1.5338e-01 L9_sharp:1.5170e-01 L10_sharp:1.4065e-01 L11_sharp:2.0732e-01 L12_sharp:4.4607e-01 total_fnorm:2.3750e+00 total_l1_linf:3.9520e+03 total_spectral:1.2031e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.2949e-01 L12_fnorm:1.8848e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.4219e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.6172e-02 L7_l1linf:7.8125e-02 L8_l1linf:8.0566e-02 L9_l1linf:8.1055e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.6895e-02 L12_l1linf:4.5410e-02 L1_spectral:3.1297e-03 L2_spectral:3.1354e-03 L3_spectral:3.1275e-03 L4_spectral:3.1403e-03 L5_spectral:3.1346e-03 L6_spectral:3.1313e-03 L7_spectral:3.1323e-03 L8_spectral:3.1076e-03 L9_spectral:3.1138e-03 L10_spectral:3.1098e-03 L11_spectral:3.0976e-03 L12_spectral:2.9551e-03 train_time:74647ms step_avg:62.21ms +[2025-09-11 13:37:32] [Rank 0] PRINT: step:1200/10000 val_loss:6.3598 total_sharp:1.0381e-01 L1_sharp:3.0188e-02 L2_sharp:3.0436e-02 L3_sharp:3.3067e-02 L4_sharp:4.7157e-02 L5_sharp:8.2112e-02 L6_sharp:1.1085e-01 L7_sharp:1.5954e-01 L8_sharp:1.5338e-01 L9_sharp:1.5170e-01 L10_sharp:1.4065e-01 L11_sharp:2.0732e-01 L12_sharp:4.4607e-01 total_fnorm:2.3750e+00 total_l1_linf:3.9520e+03 total_spectral:1.2031e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.2949e-01 L12_fnorm:1.8848e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.4219e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.6172e-02 L7_l1linf:7.8125e-02 L8_l1linf:8.0566e-02 L9_l1linf:8.1055e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.6895e-02 L12_l1linf:4.5410e-02 L1_spectral:3.1297e-03 L2_spectral:3.1354e-03 L3_spectral:3.1275e-03 L4_spectral:3.1403e-03 L5_spectral:3.1346e-03 L6_spectral:3.1313e-03 L7_spectral:3.1323e-03 L8_spectral:3.1076e-03 L9_spectral:3.1138e-03 L10_spectral:3.1098e-03 L11_spectral:3.0976e-03 L12_spectral:2.9551e-03 train_time:74647ms step_avg:62.21ms +[2025-09-11 13:37:34] [Rank 0] step:1201/10000 train_time:76122ms step_avg:63.38ms +[2025-09-11 13:37:34] [Rank 0] step:1201/10000 train_time:76122ms step_avg:63.38ms +[2025-09-11 13:37:34] [Rank 0] step:1221/10000 train_time:76787ms step_avg:62.89ms +[2025-09-11 13:37:34] [Rank 0] step:1221/10000 train_time:76787ms step_avg:62.89ms +[2025-09-11 13:37:35] [Rank 0] step:1241/10000 train_time:77433ms step_avg:62.40ms +[2025-09-11 13:37:35] [Rank 0] step:1241/10000 train_time:77433ms step_avg:62.40ms +[2025-09-11 13:37:36] [Rank 0] step:1261/10000 train_time:78077ms step_avg:61.92ms +[2025-09-11 13:37:36] [Rank 0] step:1261/10000 train_time:78077ms step_avg:61.92ms +[2025-09-11 13:37:36] [Rank 0] step:1281/10000 train_time:78722ms step_avg:61.45ms +[2025-09-11 13:37:36] [Rank 0] step:1281/10000 train_time:78722ms step_avg:61.45ms +[2025-09-11 13:37:37] [Rank 0] step:1301/10000 train_time:79366ms step_avg:61.00ms +[2025-09-11 13:37:37] [Rank 0] step:1301/10000 train_time:79366ms step_avg:61.00ms +[2025-09-11 13:37:38] [Rank 0] step:1321/10000 train_time:80011ms step_avg:60.57ms +[2025-09-11 13:37:38] [Rank 0] step:1321/10000 train_time:80011ms step_avg:60.57ms +[2025-09-11 13:37:38] [Rank 0] step:1341/10000 train_time:80655ms step_avg:60.15ms +[2025-09-11 13:37:38] [Rank 0] step:1341/10000 train_time:80655ms step_avg:60.15ms +[2025-09-11 13:37:39] [Rank 0] step:1361/10000 train_time:81299ms step_avg:59.74ms +[2025-09-11 13:37:39] [Rank 0] step:1361/10000 train_time:81299ms step_avg:59.74ms +[2025-09-11 13:37:40] [Rank 0] step:1381/10000 train_time:81943ms step_avg:59.34ms +[2025-09-11 13:37:40] [Rank 0] step:1381/10000 train_time:81943ms step_avg:59.34ms +[2025-09-11 13:37:40] [Rank 0] step:1401/10000 train_time:82587ms step_avg:58.95ms +[2025-09-11 13:37:40] [Rank 0] step:1401/10000 train_time:82587ms step_avg:58.95ms +[2025-09-11 13:37:41] [Rank 0] step:1421/10000 train_time:83231ms step_avg:58.57ms +[2025-09-11 13:37:41] [Rank 0] step:1421/10000 train_time:83231ms step_avg:58.57ms +[2025-09-11 13:37:41] [Rank 0] step:1441/10000 train_time:83875ms step_avg:58.21ms +[2025-09-11 13:37:41] [Rank 0] step:1441/10000 train_time:83875ms step_avg:58.21ms +[2025-09-11 13:37:42] [Rank 0] step:1461/10000 train_time:84520ms step_avg:57.85ms +[2025-09-11 13:37:42] [Rank 0] step:1461/10000 train_time:84520ms step_avg:57.85ms +[2025-09-11 13:37:43] [Rank 0] step:1481/10000 train_time:85164ms step_avg:57.50ms +[2025-09-11 13:37:43] [Rank 0] step:1481/10000 train_time:85164ms step_avg:57.50ms +[2025-09-11 13:37:43] [Rank 0] step:1501/10000 train_time:85812ms step_avg:57.17ms +[2025-09-11 13:37:43] [Rank 0] step:1501/10000 train_time:85812ms step_avg:57.17ms +[2025-09-11 13:37:44] [Rank 0] step:1521/10000 train_time:86461ms step_avg:56.84ms +[2025-09-11 13:37:44] [Rank 0] step:1521/10000 train_time:86461ms step_avg:56.84ms +[2025-09-11 13:37:45] [Rank 0] step:1541/10000 train_time:87109ms step_avg:56.53ms +[2025-09-11 13:37:45] [Rank 0] step:1541/10000 train_time:87109ms step_avg:56.53ms +[2025-09-11 13:37:45] [Rank 0] step:1561/10000 train_time:87756ms step_avg:56.22ms +[2025-09-11 13:37:45] [Rank 0] step:1561/10000 train_time:87756ms step_avg:56.22ms +[2025-09-11 13:37:46] [Rank 0] step:1581/10000 train_time:88404ms step_avg:55.92ms +[2025-09-11 13:37:46] [Rank 0] step:1581/10000 train_time:88404ms step_avg:55.92ms +[2025-09-11 13:37:47] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:37:47] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 13:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 13:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 13:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:37:57] [Rank 0] PRINT: step:1600/10000 val_loss:6.2008 total_sharp:1.0222e-01 L1_sharp:2.3059e-02 L2_sharp:1.8649e-02 L3_sharp:1.9883e-02 L4_sharp:3.2200e-02 L5_sharp:5.0755e-02 L6_sharp:7.0275e-02 L7_sharp:1.0987e-01 L8_sharp:1.3630e-01 L9_sharp:1.3552e-01 L10_sharp:1.3033e-01 L11_sharp:1.7985e-01 L12_sharp:4.5384e-01 total_fnorm:2.1875e+00 total_l1_linf:3.6640e+03 total_spectral:1.1094e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.0410e-01 L1_l1linf:7.3730e-02 L2_l1linf:7.3242e-02 L3_l1linf:7.2754e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.5195e-02 L8_l1linf:7.6172e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.1748e-02 L1_spectral:3.1346e-03 L2_spectral:3.1466e-03 L3_spectral:3.1346e-03 L4_spectral:3.1405e-03 L5_spectral:3.1401e-03 L6_spectral:3.1314e-03 L7_spectral:3.1573e-03 L8_spectral:3.1012e-03 L9_spectral:3.1349e-03 L10_spectral:3.1308e-03 L11_spectral:3.1195e-03 L12_spectral:3.0407e-03 train_time:89033ms step_avg:55.65ms +[2025-09-11 13:37:57] [Rank 0] PRINT: step:1600/10000 val_loss:6.2008 total_sharp:1.0222e-01 L1_sharp:2.3059e-02 L2_sharp:1.8649e-02 L3_sharp:1.9883e-02 L4_sharp:3.2200e-02 L5_sharp:5.0755e-02 L6_sharp:7.0275e-02 L7_sharp:1.0987e-01 L8_sharp:1.3630e-01 L9_sharp:1.3552e-01 L10_sharp:1.3033e-01 L11_sharp:1.7985e-01 L12_sharp:4.5384e-01 total_fnorm:2.1875e+00 total_l1_linf:3.6640e+03 total_spectral:1.1094e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.0410e-01 L1_l1linf:7.3730e-02 L2_l1linf:7.3242e-02 L3_l1linf:7.2754e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.5195e-02 L8_l1linf:7.6172e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.8613e-02 L11_l1linf:6.8848e-02 L12_l1linf:4.1748e-02 L1_spectral:3.1346e-03 L2_spectral:3.1466e-03 L3_spectral:3.1346e-03 L4_spectral:3.1405e-03 L5_spectral:3.1401e-03 L6_spectral:3.1314e-03 L7_spectral:3.1573e-03 L8_spectral:3.1012e-03 L9_spectral:3.1349e-03 L10_spectral:3.1308e-03 L11_spectral:3.1195e-03 L12_spectral:3.0407e-03 train_time:89033ms step_avg:55.65ms +[2025-09-11 13:37:59] [Rank 0] step:1601/10000 train_time:90489ms step_avg:56.52ms +[2025-09-11 13:37:59] [Rank 0] step:1601/10000 train_time:90489ms step_avg:56.52ms +[2025-09-11 13:37:59] [Rank 0] step:1621/10000 train_time:91136ms step_avg:56.22ms +[2025-09-11 13:37:59] [Rank 0] step:1621/10000 train_time:91136ms step_avg:56.22ms +[2025-09-11 13:38:00] [Rank 0] step:1641/10000 train_time:91787ms step_avg:55.93ms +[2025-09-11 13:38:00] [Rank 0] step:1641/10000 train_time:91787ms step_avg:55.93ms +[2025-09-11 13:38:01] [Rank 0] step:1661/10000 train_time:92437ms step_avg:55.65ms +[2025-09-11 13:38:01] [Rank 0] step:1661/10000 train_time:92437ms step_avg:55.65ms +[2025-09-11 13:38:01] [Rank 0] step:1681/10000 train_time:93087ms step_avg:55.38ms +[2025-09-11 13:38:01] [Rank 0] step:1681/10000 train_time:93087ms step_avg:55.38ms +[2025-09-11 13:38:02] [Rank 0] step:1701/10000 train_time:93737ms step_avg:55.11ms +[2025-09-11 13:38:02] [Rank 0] step:1701/10000 train_time:93737ms step_avg:55.11ms +[2025-09-11 13:38:03] [Rank 0] step:1721/10000 train_time:94386ms step_avg:54.84ms +[2025-09-11 13:38:03] [Rank 0] step:1721/10000 train_time:94386ms step_avg:54.84ms +[2025-09-11 13:38:03] [Rank 0] step:1741/10000 train_time:95035ms step_avg:54.59ms +[2025-09-11 13:38:03] [Rank 0] step:1741/10000 train_time:95035ms step_avg:54.59ms +[2025-09-11 13:38:04] [Rank 0] step:1761/10000 train_time:95684ms step_avg:54.33ms +[2025-09-11 13:38:04] [Rank 0] step:1761/10000 train_time:95684ms step_avg:54.33ms +[2025-09-11 13:38:05] [Rank 0] step:1781/10000 train_time:96333ms step_avg:54.09ms +[2025-09-11 13:38:05] [Rank 0] step:1781/10000 train_time:96333ms step_avg:54.09ms +[2025-09-11 13:38:05] [Rank 0] step:1801/10000 train_time:96982ms step_avg:53.85ms +[2025-09-11 13:38:05] [Rank 0] step:1801/10000 train_time:96982ms step_avg:53.85ms +[2025-09-11 13:38:06] [Rank 0] step:1821/10000 train_time:97631ms step_avg:53.61ms +[2025-09-11 13:38:06] [Rank 0] step:1821/10000 train_time:97631ms step_avg:53.61ms +[2025-09-11 13:38:07] [Rank 0] step:1841/10000 train_time:98280ms step_avg:53.38ms +[2025-09-11 13:38:07] [Rank 0] step:1841/10000 train_time:98280ms step_avg:53.38ms +[2025-09-11 13:38:07] [Rank 0] step:1861/10000 train_time:98928ms step_avg:53.16ms +[2025-09-11 13:38:07] [Rank 0] step:1861/10000 train_time:98928ms step_avg:53.16ms +[2025-09-11 13:38:08] [Rank 0] step:1881/10000 train_time:99577ms step_avg:52.94ms +[2025-09-11 13:38:08] [Rank 0] step:1881/10000 train_time:99577ms step_avg:52.94ms +[2025-09-11 13:38:08] [Rank 0] step:1901/10000 train_time:100226ms step_avg:52.72ms +[2025-09-11 13:38:08] [Rank 0] step:1901/10000 train_time:100226ms step_avg:52.72ms +[2025-09-11 13:38:09] [Rank 0] step:1921/10000 train_time:100875ms step_avg:52.51ms +[2025-09-11 13:38:09] [Rank 0] step:1921/10000 train_time:100875ms step_avg:52.51ms +[2025-09-11 13:38:10] [Rank 0] step:1941/10000 train_time:101526ms step_avg:52.31ms +[2025-09-11 13:38:10] [Rank 0] step:1941/10000 train_time:101526ms step_avg:52.31ms +[2025-09-11 13:38:10] [Rank 0] step:1961/10000 train_time:102174ms step_avg:52.10ms +[2025-09-11 13:38:10] [Rank 0] step:1961/10000 train_time:102174ms step_avg:52.10ms +[2025-09-11 13:38:11] [Rank 0] step:1981/10000 train_time:102824ms step_avg:51.90ms +[2025-09-11 13:38:11] [Rank 0] step:1981/10000 train_time:102824ms step_avg:51.90ms +[2025-09-11 13:38:12] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:38:12] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 13:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 13:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 13:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:38:22] [Rank 0] PRINT: step:2000/10000 val_loss:6.0549 total_sharp:1.0746e-01 L1_sharp:1.9165e-02 L2_sharp:1.4696e-02 L3_sharp:1.3110e-02 L4_sharp:1.9225e-02 L5_sharp:3.8207e-02 L6_sharp:4.6506e-02 L7_sharp:8.5868e-02 L8_sharp:1.4180e-01 L9_sharp:1.1064e-01 L10_sharp:1.1952e-01 L11_sharp:2.5498e-01 L12_sharp:7.1004e-01 total_fnorm:2.1094e+00 total_l1_linf:3.5360e+03 total_spectral:1.0703e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.0801e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.3242e-02 L3_l1linf:7.3242e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9336e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.4219e-02 L8_l1linf:7.3242e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.6660e-02 L11_l1linf:6.9336e-02 L12_l1linf:3.8330e-02 L1_spectral:3.1656e-03 L2_spectral:3.1796e-03 L3_spectral:3.1764e-03 L4_spectral:3.1658e-03 L5_spectral:3.1632e-03 L6_spectral:3.1687e-03 L7_spectral:3.1308e-03 L8_spectral:3.1046e-03 L9_spectral:3.1267e-03 L10_spectral:3.1391e-03 L11_spectral:3.1390e-03 L12_spectral:3.0828e-03 train_time:103455ms step_avg:51.73ms +[2025-09-11 13:38:22] [Rank 0] PRINT: step:2000/10000 val_loss:6.0549 total_sharp:1.0746e-01 L1_sharp:1.9165e-02 L2_sharp:1.4696e-02 L3_sharp:1.3110e-02 L4_sharp:1.9225e-02 L5_sharp:3.8207e-02 L6_sharp:4.6506e-02 L7_sharp:8.5868e-02 L8_sharp:1.4180e-01 L9_sharp:1.1064e-01 L10_sharp:1.1952e-01 L11_sharp:2.5498e-01 L12_sharp:7.1004e-01 total_fnorm:2.1094e+00 total_l1_linf:3.5360e+03 total_spectral:1.0703e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.0801e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.3242e-02 L3_l1linf:7.3242e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9336e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.4219e-02 L8_l1linf:7.3242e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.6660e-02 L11_l1linf:6.9336e-02 L12_l1linf:3.8330e-02 L1_spectral:3.1656e-03 L2_spectral:3.1796e-03 L3_spectral:3.1764e-03 L4_spectral:3.1658e-03 L5_spectral:3.1632e-03 L6_spectral:3.1687e-03 L7_spectral:3.1308e-03 L8_spectral:3.1046e-03 L9_spectral:3.1267e-03 L10_spectral:3.1391e-03 L11_spectral:3.1390e-03 L12_spectral:3.0828e-03 train_time:103455ms step_avg:51.73ms +[2025-09-11 13:38:24] [Rank 0] step:2001/10000 train_time:104954ms step_avg:52.45ms +[2025-09-11 13:38:24] [Rank 0] step:2001/10000 train_time:104954ms step_avg:52.45ms +[2025-09-11 13:38:25] [Rank 0] step:2021/10000 train_time:105626ms step_avg:52.26ms +[2025-09-11 13:38:25] [Rank 0] step:2021/10000 train_time:105626ms step_avg:52.26ms +[2025-09-11 13:38:25] [Rank 0] step:2041/10000 train_time:106275ms step_avg:52.07ms +[2025-09-11 13:38:25] [Rank 0] step:2041/10000 train_time:106275ms step_avg:52.07ms +[2025-09-11 13:38:26] [Rank 0] step:2061/10000 train_time:106923ms step_avg:51.88ms +[2025-09-11 13:38:26] [Rank 0] step:2061/10000 train_time:106923ms step_avg:51.88ms +[2025-09-11 13:38:26] [Rank 0] step:2081/10000 train_time:107571ms step_avg:51.69ms +[2025-09-11 13:38:26] [Rank 0] step:2081/10000 train_time:107571ms step_avg:51.69ms +[2025-09-11 13:38:27] [Rank 0] step:2101/10000 train_time:108219ms step_avg:51.51ms +[2025-09-11 13:38:27] [Rank 0] step:2101/10000 train_time:108219ms step_avg:51.51ms +[2025-09-11 13:38:28] [Rank 0] step:2121/10000 train_time:108867ms step_avg:51.33ms +[2025-09-11 13:38:28] [Rank 0] step:2121/10000 train_time:108867ms step_avg:51.33ms +[2025-09-11 13:38:28] [Rank 0] step:2141/10000 train_time:109515ms step_avg:51.15ms +[2025-09-11 13:38:28] [Rank 0] step:2141/10000 train_time:109515ms step_avg:51.15ms +[2025-09-11 13:38:29] [Rank 0] step:2161/10000 train_time:110162ms step_avg:50.98ms +[2025-09-11 13:38:29] [Rank 0] step:2161/10000 train_time:110162ms step_avg:50.98ms +[2025-09-11 13:38:30] [Rank 0] step:2181/10000 train_time:110810ms step_avg:50.81ms +[2025-09-11 13:38:30] [Rank 0] step:2181/10000 train_time:110810ms step_avg:50.81ms +[2025-09-11 13:38:30] [Rank 0] step:2201/10000 train_time:111458ms step_avg:50.64ms +[2025-09-11 13:38:30] [Rank 0] step:2201/10000 train_time:111458ms step_avg:50.64ms +[2025-09-11 13:38:31] [Rank 0] step:2221/10000 train_time:112105ms step_avg:50.48ms +[2025-09-11 13:38:31] [Rank 0] step:2221/10000 train_time:112105ms step_avg:50.48ms +[2025-09-11 13:38:32] [Rank 0] step:2241/10000 train_time:112765ms step_avg:50.32ms +[2025-09-11 13:38:32] [Rank 0] step:2241/10000 train_time:112765ms step_avg:50.32ms +[2025-09-11 13:38:32] [Rank 0] step:2261/10000 train_time:113426ms step_avg:50.17ms +[2025-09-11 13:38:32] [Rank 0] step:2261/10000 train_time:113426ms step_avg:50.17ms +[2025-09-11 13:38:33] [Rank 0] step:2281/10000 train_time:114088ms step_avg:50.02ms +[2025-09-11 13:38:33] [Rank 0] step:2281/10000 train_time:114088ms step_avg:50.02ms +[2025-09-11 13:38:34] [Rank 0] step:2301/10000 train_time:114749ms step_avg:49.87ms +[2025-09-11 13:38:34] [Rank 0] step:2301/10000 train_time:114749ms step_avg:49.87ms +[2025-09-11 13:38:35] [Rank 0] step:2321/10000 train_time:115711ms step_avg:49.85ms +[2025-09-11 13:38:35] [Rank 0] step:2321/10000 train_time:115711ms step_avg:49.85ms +[2025-09-11 13:38:35] [Rank 0] step:2341/10000 train_time:116372ms step_avg:49.71ms +[2025-09-11 13:38:35] [Rank 0] step:2341/10000 train_time:116372ms step_avg:49.71ms +[2025-09-11 13:38:36] [Rank 0] step:2361/10000 train_time:117033ms step_avg:49.57ms +[2025-09-11 13:38:36] [Rank 0] step:2361/10000 train_time:117033ms step_avg:49.57ms +[2025-09-11 13:38:37] [Rank 0] step:2381/10000 train_time:117833ms step_avg:49.49ms +[2025-09-11 13:38:37] [Rank 0] step:2381/10000 train_time:117833ms step_avg:49.49ms +[2025-09-11 13:38:38] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:38:38] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 13:38:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:38:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:38:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 13:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:38:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 13:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:38:48] [Rank 0] PRINT: step:2400/10000 val_loss:5.9261 total_sharp:9.2591e-02 L1_sharp:1.0341e-02 L2_sharp:8.6345e-03 L3_sharp:9.6569e-03 L4_sharp:1.5075e-02 L5_sharp:2.4880e-02 L6_sharp:3.3919e-02 L7_sharp:6.3893e-02 L8_sharp:1.1895e-01 L9_sharp:1.1242e-01 L10_sharp:1.2493e-01 L11_sharp:1.7748e-01 L12_sharp:5.9206e-01 total_fnorm:2.0156e+00 total_l1_linf:3.3760e+03 total_spectral:1.0312e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1777e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1289e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.8848e-02 L7_l1linf:7.0801e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.3242e-02 L11_l1linf:6.9824e-02 L12_l1linf:4.1016e-02 L1_spectral:3.1883e-03 L2_spectral:3.1787e-03 L3_spectral:3.1571e-03 L4_spectral:3.1570e-03 L5_spectral:3.1555e-03 L6_spectral:3.1467e-03 L7_spectral:3.1585e-03 L8_spectral:3.1195e-03 L9_spectral:3.1457e-03 L10_spectral:3.1526e-03 L11_spectral:3.1509e-03 L12_spectral:3.1256e-03 train_time:118624ms step_avg:49.43ms +[2025-09-11 13:38:48] [Rank 0] PRINT: step:2400/10000 val_loss:5.9261 total_sharp:9.2591e-02 L1_sharp:1.0341e-02 L2_sharp:8.6345e-03 L3_sharp:9.6569e-03 L4_sharp:1.5075e-02 L5_sharp:2.4880e-02 L6_sharp:3.3919e-02 L7_sharp:6.3893e-02 L8_sharp:1.1895e-01 L9_sharp:1.1242e-01 L10_sharp:1.2493e-01 L11_sharp:1.7748e-01 L12_sharp:5.9206e-01 total_fnorm:2.0156e+00 total_l1_linf:3.3760e+03 total_spectral:1.0312e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1777e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1289e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.8848e-02 L7_l1linf:7.0801e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.3242e-02 L11_l1linf:6.9824e-02 L12_l1linf:4.1016e-02 L1_spectral:3.1883e-03 L2_spectral:3.1787e-03 L3_spectral:3.1571e-03 L4_spectral:3.1570e-03 L5_spectral:3.1555e-03 L6_spectral:3.1467e-03 L7_spectral:3.1585e-03 L8_spectral:3.1195e-03 L9_spectral:3.1457e-03 L10_spectral:3.1526e-03 L11_spectral:3.1509e-03 L12_spectral:3.1256e-03 train_time:118624ms step_avg:49.43ms +[2025-09-11 13:38:50] [Rank 0] step:2401/10000 train_time:120100ms step_avg:50.02ms +[2025-09-11 13:38:50] [Rank 0] step:2401/10000 train_time:120100ms step_avg:50.02ms +[2025-09-11 13:38:51] [Rank 0] step:2421/10000 train_time:120758ms step_avg:49.88ms +[2025-09-11 13:38:51] [Rank 0] step:2421/10000 train_time:120758ms step_avg:49.88ms +[2025-09-11 13:38:51] [Rank 0] step:2441/10000 train_time:121420ms step_avg:49.74ms +[2025-09-11 13:38:51] [Rank 0] step:2441/10000 train_time:121420ms step_avg:49.74ms +[2025-09-11 13:38:52] [Rank 0] step:2461/10000 train_time:122082ms step_avg:49.61ms +[2025-09-11 13:38:52] [Rank 0] step:2461/10000 train_time:122082ms step_avg:49.61ms +[2025-09-11 13:38:53] [Rank 0] step:2481/10000 train_time:122744ms step_avg:49.47ms +[2025-09-11 13:38:53] [Rank 0] step:2481/10000 train_time:122744ms step_avg:49.47ms +[2025-09-11 13:38:53] [Rank 0] step:2501/10000 train_time:123406ms step_avg:49.34ms +[2025-09-11 13:38:53] [Rank 0] step:2501/10000 train_time:123406ms step_avg:49.34ms +[2025-09-11 13:38:54] [Rank 0] step:2521/10000 train_time:124067ms step_avg:49.21ms +[2025-09-11 13:38:54] [Rank 0] step:2521/10000 train_time:124067ms step_avg:49.21ms +[2025-09-11 13:38:55] [Rank 0] step:2541/10000 train_time:124728ms step_avg:49.09ms +[2025-09-11 13:38:55] [Rank 0] step:2541/10000 train_time:124728ms step_avg:49.09ms +[2025-09-11 13:38:55] [Rank 0] step:2561/10000 train_time:125389ms step_avg:48.96ms +[2025-09-11 13:38:55] [Rank 0] step:2561/10000 train_time:125389ms step_avg:48.96ms +[2025-09-11 13:38:56] [Rank 0] step:2581/10000 train_time:126051ms step_avg:48.84ms +[2025-09-11 13:38:56] [Rank 0] step:2581/10000 train_time:126051ms step_avg:48.84ms +[2025-09-11 13:38:56] [Rank 0] step:2601/10000 train_time:126712ms step_avg:48.72ms +[2025-09-11 13:38:56] [Rank 0] step:2601/10000 train_time:126712ms step_avg:48.72ms +[2025-09-11 13:38:57] [Rank 0] step:2621/10000 train_time:127374ms step_avg:48.60ms +[2025-09-11 13:38:57] [Rank 0] step:2621/10000 train_time:127374ms step_avg:48.60ms +[2025-09-11 13:38:58] [Rank 0] step:2641/10000 train_time:128036ms step_avg:48.48ms +[2025-09-11 13:38:58] [Rank 0] step:2641/10000 train_time:128036ms step_avg:48.48ms +[2025-09-11 13:38:58] [Rank 0] step:2661/10000 train_time:128697ms step_avg:48.36ms +[2025-09-11 13:38:58] [Rank 0] step:2661/10000 train_time:128697ms step_avg:48.36ms +[2025-09-11 13:38:59] [Rank 0] step:2681/10000 train_time:129359ms step_avg:48.25ms +[2025-09-11 13:38:59] [Rank 0] step:2681/10000 train_time:129359ms step_avg:48.25ms +[2025-09-11 13:39:00] [Rank 0] step:2701/10000 train_time:130021ms step_avg:48.14ms +[2025-09-11 13:39:00] [Rank 0] step:2701/10000 train_time:130021ms step_avg:48.14ms +[2025-09-11 13:39:00] [Rank 0] step:2721/10000 train_time:130686ms step_avg:48.03ms +[2025-09-11 13:39:00] [Rank 0] step:2721/10000 train_time:130686ms step_avg:48.03ms +[2025-09-11 13:39:01] [Rank 0] step:2741/10000 train_time:131348ms step_avg:47.92ms +[2025-09-11 13:39:01] [Rank 0] step:2741/10000 train_time:131348ms step_avg:47.92ms +[2025-09-11 13:39:02] [Rank 0] step:2761/10000 train_time:132010ms step_avg:47.81ms +[2025-09-11 13:39:02] [Rank 0] step:2761/10000 train_time:132010ms step_avg:47.81ms +[2025-09-11 13:39:02] [Rank 0] step:2781/10000 train_time:132672ms step_avg:47.71ms +[2025-09-11 13:39:02] [Rank 0] step:2781/10000 train_time:132672ms step_avg:47.71ms +[2025-09-11 13:39:03] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:39:03] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 13:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 13:39:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:39:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 13:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 13:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 13:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 13:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:39:17] [Rank 0] PRINT: step:2800/10000 val_loss:5.8431 total_sharp:1.1902e-01 L1_sharp:1.4036e-02 L2_sharp:1.1569e-02 L3_sharp:1.4932e-02 L4_sharp:1.7334e-02 L5_sharp:3.0740e-02 L6_sharp:4.0780e-02 L7_sharp:7.5999e-02 L8_sharp:1.0730e-01 L9_sharp:1.0534e-01 L10_sharp:1.3532e-01 L11_sharp:2.0126e-01 L12_sharp:6.0137e-01 total_fnorm:1.8672e+00 total_l1_linf:3.1520e+03 total_spectral:9.4922e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.1289e-01 L1_l1linf:7.0801e-02 L2_l1linf:7.1777e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0312e-02 L10_l1linf:7.2754e-02 L11_l1linf:6.7383e-02 L12_l1linf:3.7842e-02 L1_spectral:3.1904e-03 L2_spectral:3.1793e-03 L3_spectral:3.1886e-03 L4_spectral:3.1663e-03 L5_spectral:3.1455e-03 L6_spectral:3.1530e-03 L7_spectral:3.1527e-03 L8_spectral:3.1245e-03 L9_spectral:3.1469e-03 L10_spectral:3.1550e-03 L11_spectral:3.1508e-03 L12_spectral:3.1183e-03 train_time:133315ms step_avg:47.61ms +[2025-09-11 13:39:17] [Rank 0] PRINT: step:2800/10000 val_loss:5.8431 total_sharp:1.1902e-01 L1_sharp:1.4036e-02 L2_sharp:1.1569e-02 L3_sharp:1.4932e-02 L4_sharp:1.7334e-02 L5_sharp:3.0740e-02 L6_sharp:4.0780e-02 L7_sharp:7.5999e-02 L8_sharp:1.0730e-01 L9_sharp:1.0534e-01 L10_sharp:1.3532e-01 L11_sharp:2.0126e-01 L12_sharp:6.0137e-01 total_fnorm:1.8672e+00 total_l1_linf:3.1520e+03 total_spectral:9.4922e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.1289e-01 L1_l1linf:7.0801e-02 L2_l1linf:7.1777e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.9824e-02 L9_l1linf:7.0312e-02 L10_l1linf:7.2754e-02 L11_l1linf:6.7383e-02 L12_l1linf:3.7842e-02 L1_spectral:3.1904e-03 L2_spectral:3.1793e-03 L3_spectral:3.1886e-03 L4_spectral:3.1663e-03 L5_spectral:3.1455e-03 L6_spectral:3.1530e-03 L7_spectral:3.1527e-03 L8_spectral:3.1245e-03 L9_spectral:3.1469e-03 L10_spectral:3.1550e-03 L11_spectral:3.1508e-03 L12_spectral:3.1183e-03 train_time:133315ms step_avg:47.61ms +[2025-09-11 13:39:18] [Rank 0] step:2801/10000 train_time:135089ms step_avg:48.23ms +[2025-09-11 13:39:18] [Rank 0] step:2801/10000 train_time:135089ms step_avg:48.23ms +[2025-09-11 13:39:19] [Rank 0] step:2821/10000 train_time:135755ms step_avg:48.12ms +[2025-09-11 13:39:19] [Rank 0] step:2821/10000 train_time:135755ms step_avg:48.12ms +[2025-09-11 13:39:20] [Rank 0] step:2841/10000 train_time:136419ms step_avg:48.02ms +[2025-09-11 13:39:20] [Rank 0] step:2841/10000 train_time:136419ms step_avg:48.02ms +[2025-09-11 13:39:20] [Rank 0] step:2861/10000 train_time:137082ms step_avg:47.91ms +[2025-09-11 13:39:20] [Rank 0] step:2861/10000 train_time:137082ms step_avg:47.91ms +[2025-09-11 13:39:21] [Rank 0] step:2881/10000 train_time:137746ms step_avg:47.81ms +[2025-09-11 13:39:21] [Rank 0] step:2881/10000 train_time:137746ms step_avg:47.81ms +[2025-09-11 13:39:22] [Rank 0] step:2901/10000 train_time:138409ms step_avg:47.71ms +[2025-09-11 13:39:22] [Rank 0] step:2901/10000 train_time:138409ms step_avg:47.71ms +[2025-09-11 13:39:22] [Rank 0] step:2921/10000 train_time:139071ms step_avg:47.61ms +[2025-09-11 13:39:22] [Rank 0] step:2921/10000 train_time:139071ms step_avg:47.61ms +[2025-09-11 13:39:23] [Rank 0] step:2941/10000 train_time:139734ms step_avg:47.51ms +[2025-09-11 13:39:23] [Rank 0] step:2941/10000 train_time:139734ms step_avg:47.51ms +[2025-09-11 13:39:24] [Rank 0] step:2961/10000 train_time:140397ms step_avg:47.42ms +[2025-09-11 13:39:24] [Rank 0] step:2961/10000 train_time:140397ms step_avg:47.42ms +[2025-09-11 13:39:24] [Rank 0] step:2981/10000 train_time:141062ms step_avg:47.32ms +[2025-09-11 13:39:24] [Rank 0] step:2981/10000 train_time:141062ms step_avg:47.32ms +[2025-09-11 13:39:25] [Rank 0] step:3001/10000 train_time:141728ms step_avg:47.23ms +[2025-09-11 13:39:25] [Rank 0] step:3001/10000 train_time:141728ms step_avg:47.23ms +[2025-09-11 13:39:26] [Rank 0] step:3021/10000 train_time:142393ms step_avg:47.13ms +[2025-09-11 13:39:26] [Rank 0] step:3021/10000 train_time:142393ms step_avg:47.13ms +[2025-09-11 13:39:26] [Rank 0] step:3041/10000 train_time:143060ms step_avg:47.04ms +[2025-09-11 13:39:26] [Rank 0] step:3041/10000 train_time:143060ms step_avg:47.04ms +[2025-09-11 13:39:27] [Rank 0] step:3061/10000 train_time:143725ms step_avg:46.95ms +[2025-09-11 13:39:27] [Rank 0] step:3061/10000 train_time:143725ms step_avg:46.95ms +[2025-09-11 13:39:28] [Rank 0] step:3081/10000 train_time:144391ms step_avg:46.87ms +[2025-09-11 13:39:28] [Rank 0] step:3081/10000 train_time:144391ms step_avg:46.87ms +[2025-09-11 13:39:28] [Rank 0] step:3101/10000 train_time:145057ms step_avg:46.78ms +[2025-09-11 13:39:28] [Rank 0] step:3101/10000 train_time:145057ms step_avg:46.78ms +[2025-09-11 13:39:29] [Rank 0] step:3121/10000 train_time:145724ms step_avg:46.69ms +[2025-09-11 13:39:29] [Rank 0] step:3121/10000 train_time:145724ms step_avg:46.69ms +[2025-09-11 13:39:30] [Rank 0] step:3141/10000 train_time:146389ms step_avg:46.61ms +[2025-09-11 13:39:30] [Rank 0] step:3141/10000 train_time:146389ms step_avg:46.61ms +[2025-09-11 13:39:30] [Rank 0] step:3161/10000 train_time:147054ms step_avg:46.52ms +[2025-09-11 13:39:30] [Rank 0] step:3161/10000 train_time:147054ms step_avg:46.52ms +[2025-09-11 13:39:31] [Rank 0] step:3181/10000 train_time:147719ms step_avg:46.44ms +[2025-09-11 13:39:31] [Rank 0] step:3181/10000 train_time:147719ms step_avg:46.44ms +[2025-09-11 13:39:32] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:39:32] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 13:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:39:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 13:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 13:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:39:43] [Rank 0] PRINT: step:3200/10000 val_loss:5.7655 total_sharp:7.1666e-02 L1_sharp:1.1486e-02 L2_sharp:1.0329e-02 L3_sharp:1.1761e-02 L4_sharp:1.4696e-02 L5_sharp:2.2417e-02 L6_sharp:3.5344e-02 L7_sharp:6.3136e-02 L8_sharp:1.0405e-01 L9_sharp:8.7810e-02 L10_sharp:9.5544e-02 L11_sharp:1.2928e-01 L12_sharp:3.4541e-01 total_fnorm:1.9922e+00 total_l1_linf:3.2960e+03 total_spectral:1.0156e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2461e-01 L1_l1linf:7.0801e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.8359e-02 L11_l1linf:6.9336e-02 L12_l1linf:4.1016e-02 L1_spectral:3.2059e-03 L2_spectral:3.2063e-03 L3_spectral:3.1766e-03 L4_spectral:3.1910e-03 L5_spectral:3.1722e-03 L6_spectral:3.1856e-03 L7_spectral:3.1545e-03 L8_spectral:3.1367e-03 L9_spectral:3.1636e-03 L10_spectral:3.1629e-03 L11_spectral:3.1500e-03 L12_spectral:3.1504e-03 train_time:148366ms step_avg:46.36ms +[2025-09-11 13:39:43] [Rank 0] PRINT: step:3200/10000 val_loss:5.7655 total_sharp:7.1666e-02 L1_sharp:1.1486e-02 L2_sharp:1.0329e-02 L3_sharp:1.1761e-02 L4_sharp:1.4696e-02 L5_sharp:2.2417e-02 L6_sharp:3.5344e-02 L7_sharp:6.3136e-02 L8_sharp:1.0405e-01 L9_sharp:8.7810e-02 L10_sharp:9.5544e-02 L11_sharp:1.2928e-01 L12_sharp:3.4541e-01 total_fnorm:1.9922e+00 total_l1_linf:3.2960e+03 total_spectral:1.0156e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2461e-01 L1_l1linf:7.0801e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.8359e-02 L11_l1linf:6.9336e-02 L12_l1linf:4.1016e-02 L1_spectral:3.2059e-03 L2_spectral:3.2063e-03 L3_spectral:3.1766e-03 L4_spectral:3.1910e-03 L5_spectral:3.1722e-03 L6_spectral:3.1856e-03 L7_spectral:3.1545e-03 L8_spectral:3.1367e-03 L9_spectral:3.1636e-03 L10_spectral:3.1629e-03 L11_spectral:3.1500e-03 L12_spectral:3.1504e-03 train_time:148366ms step_avg:46.36ms +[2025-09-11 13:39:44] [Rank 0] step:3201/10000 train_time:150165ms step_avg:46.91ms +[2025-09-11 13:39:44] [Rank 0] step:3201/10000 train_time:150165ms step_avg:46.91ms +[2025-09-11 13:39:45] [Rank 0] step:3221/10000 train_time:150820ms step_avg:46.82ms +[2025-09-11 13:39:45] [Rank 0] step:3221/10000 train_time:150820ms step_avg:46.82ms +[2025-09-11 13:39:46] [Rank 0] step:3241/10000 train_time:151486ms step_avg:46.74ms +[2025-09-11 13:39:46] [Rank 0] step:3241/10000 train_time:151486ms step_avg:46.74ms +[2025-09-11 13:39:46] [Rank 0] step:3261/10000 train_time:152153ms step_avg:46.66ms +[2025-09-11 13:39:46] [Rank 0] step:3261/10000 train_time:152153ms step_avg:46.66ms +[2025-09-11 13:39:47] [Rank 0] step:3281/10000 train_time:152818ms step_avg:46.58ms +[2025-09-11 13:39:47] [Rank 0] step:3281/10000 train_time:152818ms step_avg:46.58ms +[2025-09-11 13:39:48] [Rank 0] step:3301/10000 train_time:153484ms step_avg:46.50ms +[2025-09-11 13:39:48] [Rank 0] step:3301/10000 train_time:153484ms step_avg:46.50ms +[2025-09-11 13:39:48] [Rank 0] step:3321/10000 train_time:154149ms step_avg:46.42ms +[2025-09-11 13:39:48] [Rank 0] step:3321/10000 train_time:154149ms step_avg:46.42ms +[2025-09-11 13:39:49] [Rank 0] step:3341/10000 train_time:154815ms step_avg:46.34ms +[2025-09-11 13:39:49] [Rank 0] step:3341/10000 train_time:154815ms step_avg:46.34ms +[2025-09-11 13:39:50] [Rank 0] step:3361/10000 train_time:155481ms step_avg:46.26ms +[2025-09-11 13:39:50] [Rank 0] step:3361/10000 train_time:155481ms step_avg:46.26ms +[2025-09-11 13:39:50] [Rank 0] step:3381/10000 train_time:156146ms step_avg:46.18ms +[2025-09-11 13:39:50] [Rank 0] step:3381/10000 train_time:156146ms step_avg:46.18ms +[2025-09-11 13:39:51] [Rank 0] step:3401/10000 train_time:156811ms step_avg:46.11ms +[2025-09-11 13:39:51] [Rank 0] step:3401/10000 train_time:156811ms step_avg:46.11ms +[2025-09-11 13:39:52] [Rank 0] step:3421/10000 train_time:157476ms step_avg:46.03ms +[2025-09-11 13:39:52] [Rank 0] step:3421/10000 train_time:157476ms step_avg:46.03ms +[2025-09-11 13:39:52] [Rank 0] step:3441/10000 train_time:158141ms step_avg:45.96ms +[2025-09-11 13:39:52] [Rank 0] step:3441/10000 train_time:158141ms step_avg:45.96ms +[2025-09-11 13:39:53] [Rank 0] step:3461/10000 train_time:158805ms step_avg:45.88ms +[2025-09-11 13:39:53] [Rank 0] step:3461/10000 train_time:158805ms step_avg:45.88ms +[2025-09-11 13:39:54] [Rank 0] step:3481/10000 train_time:159471ms step_avg:45.81ms +[2025-09-11 13:39:54] [Rank 0] step:3481/10000 train_time:159471ms step_avg:45.81ms +[2025-09-11 13:39:54] [Rank 0] step:3501/10000 train_time:160136ms step_avg:45.74ms +[2025-09-11 13:39:54] [Rank 0] step:3501/10000 train_time:160136ms step_avg:45.74ms +[2025-09-11 13:39:55] [Rank 0] step:3521/10000 train_time:160801ms step_avg:45.67ms +[2025-09-11 13:39:55] [Rank 0] step:3521/10000 train_time:160801ms step_avg:45.67ms +[2025-09-11 13:39:56] [Rank 0] step:3541/10000 train_time:161466ms step_avg:45.60ms +[2025-09-11 13:39:56] [Rank 0] step:3541/10000 train_time:161466ms step_avg:45.60ms +[2025-09-11 13:39:56] [Rank 0] step:3561/10000 train_time:162131ms step_avg:45.53ms +[2025-09-11 13:39:56] [Rank 0] step:3561/10000 train_time:162131ms step_avg:45.53ms +[2025-09-11 13:39:57] [Rank 0] step:3581/10000 train_time:162796ms step_avg:45.46ms +[2025-09-11 13:39:57] [Rank 0] step:3581/10000 train_time:162796ms step_avg:45.46ms +[2025-09-11 13:39:58] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:39:58] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 13:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 13:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 13:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 13:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:40:09] [Rank 0] PRINT: step:3600/10000 val_loss:5.7144 total_sharp:9.6883e-02 L1_sharp:1.0258e-02 L2_sharp:8.6561e-03 L3_sharp:1.0280e-02 L4_sharp:1.3048e-02 L5_sharp:2.3460e-02 L6_sharp:3.6206e-02 L7_sharp:6.3880e-02 L8_sharp:9.0013e-02 L9_sharp:9.7704e-02 L10_sharp:1.0666e-01 L11_sharp:1.5534e-01 L12_sharp:4.0534e-01 total_fnorm:1.8516e+00 total_l1_linf:3.0880e+03 total_spectral:9.4141e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2363e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.6895e-02 L5_l1linf:6.4941e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.5430e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.6406e-02 L12_l1linf:3.7354e-02 L1_spectral:3.1948e-03 L2_spectral:3.2082e-03 L3_spectral:3.1948e-03 L4_spectral:3.1995e-03 L5_spectral:3.1775e-03 L6_spectral:3.1760e-03 L7_spectral:3.1563e-03 L8_spectral:3.1633e-03 L9_spectral:3.1634e-03 L10_spectral:3.1635e-03 L11_spectral:3.1788e-03 L12_spectral:3.1625e-03 train_time:163442ms step_avg:45.40ms +[2025-09-11 13:40:09] [Rank 0] PRINT: step:3600/10000 val_loss:5.7144 total_sharp:9.6883e-02 L1_sharp:1.0258e-02 L2_sharp:8.6561e-03 L3_sharp:1.0280e-02 L4_sharp:1.3048e-02 L5_sharp:2.3460e-02 L6_sharp:3.6206e-02 L7_sharp:6.3880e-02 L8_sharp:9.0013e-02 L9_sharp:9.7704e-02 L10_sharp:1.0666e-01 L11_sharp:1.5534e-01 L12_sharp:4.0534e-01 total_fnorm:1.8516e+00 total_l1_linf:3.0880e+03 total_spectral:9.4141e-01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2363e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.6895e-02 L5_l1linf:6.4941e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.5430e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.6406e-02 L12_l1linf:3.7354e-02 L1_spectral:3.1948e-03 L2_spectral:3.2082e-03 L3_spectral:3.1948e-03 L4_spectral:3.1995e-03 L5_spectral:3.1775e-03 L6_spectral:3.1760e-03 L7_spectral:3.1563e-03 L8_spectral:3.1633e-03 L9_spectral:3.1634e-03 L10_spectral:3.1635e-03 L11_spectral:3.1788e-03 L12_spectral:3.1625e-03 train_time:163442ms step_avg:45.40ms +[2025-09-11 13:40:11] [Rank 0] step:3601/10000 train_time:165573ms step_avg:45.98ms +[2025-09-11 13:40:11] [Rank 0] step:3601/10000 train_time:165573ms step_avg:45.98ms +[2025-09-11 13:40:12] [Rank 0] step:3621/10000 train_time:166256ms step_avg:45.91ms +[2025-09-11 13:40:12] [Rank 0] step:3621/10000 train_time:166256ms step_avg:45.91ms +[2025-09-11 13:40:12] [Rank 0] step:3641/10000 train_time:166922ms step_avg:45.84ms +[2025-09-11 13:40:12] [Rank 0] step:3641/10000 train_time:166922ms step_avg:45.84ms +[2025-09-11 13:40:13] [Rank 0] step:3661/10000 train_time:167587ms step_avg:45.78ms +[2025-09-11 13:40:13] [Rank 0] step:3661/10000 train_time:167587ms step_avg:45.78ms +[2025-09-11 13:40:14] [Rank 0] step:3681/10000 train_time:168252ms step_avg:45.71ms +[2025-09-11 13:40:14] [Rank 0] step:3681/10000 train_time:168252ms step_avg:45.71ms +[2025-09-11 13:40:14] [Rank 0] step:3701/10000 train_time:168917ms step_avg:45.64ms +[2025-09-11 13:40:14] [Rank 0] step:3701/10000 train_time:168917ms step_avg:45.64ms +[2025-09-11 13:40:15] [Rank 0] step:3721/10000 train_time:169593ms step_avg:45.58ms +[2025-09-11 13:40:15] [Rank 0] step:3721/10000 train_time:169593ms step_avg:45.58ms +[2025-09-11 13:40:16] [Rank 0] step:3741/10000 train_time:170269ms step_avg:45.51ms +[2025-09-11 13:40:16] [Rank 0] step:3741/10000 train_time:170269ms step_avg:45.51ms +[2025-09-11 13:40:16] [Rank 0] step:3761/10000 train_time:170946ms step_avg:45.45ms +[2025-09-11 13:40:16] [Rank 0] step:3761/10000 train_time:170946ms step_avg:45.45ms +[2025-09-11 13:40:17] [Rank 0] step:3781/10000 train_time:171622ms step_avg:45.39ms +[2025-09-11 13:40:17] [Rank 0] step:3781/10000 train_time:171622ms step_avg:45.39ms +[2025-09-11 13:40:18] [Rank 0] step:3801/10000 train_time:172298ms step_avg:45.33ms +[2025-09-11 13:40:18] [Rank 0] step:3801/10000 train_time:172298ms step_avg:45.33ms +[2025-09-11 13:40:18] [Rank 0] step:3821/10000 train_time:172975ms step_avg:45.27ms +[2025-09-11 13:40:18] [Rank 0] step:3821/10000 train_time:172975ms step_avg:45.27ms +[2025-09-11 13:40:19] [Rank 0] step:3841/10000 train_time:173651ms step_avg:45.21ms +[2025-09-11 13:40:19] [Rank 0] step:3841/10000 train_time:173651ms step_avg:45.21ms +[2025-09-11 13:40:20] [Rank 0] step:3861/10000 train_time:174326ms step_avg:45.15ms +[2025-09-11 13:40:20] [Rank 0] step:3861/10000 train_time:174326ms step_avg:45.15ms +[2025-09-11 13:40:20] [Rank 0] step:3881/10000 train_time:175003ms step_avg:45.09ms +[2025-09-11 13:40:20] [Rank 0] step:3881/10000 train_time:175003ms step_avg:45.09ms +[2025-09-11 13:40:21] [Rank 0] step:3901/10000 train_time:175678ms step_avg:45.03ms +[2025-09-11 13:40:21] [Rank 0] step:3901/10000 train_time:175678ms step_avg:45.03ms +[2025-09-11 13:40:22] [Rank 0] step:3921/10000 train_time:176355ms step_avg:44.98ms +[2025-09-11 13:40:22] [Rank 0] step:3921/10000 train_time:176355ms step_avg:44.98ms +[2025-09-11 13:40:22] [Rank 0] step:3941/10000 train_time:177032ms step_avg:44.92ms +[2025-09-11 13:40:22] [Rank 0] step:3941/10000 train_time:177032ms step_avg:44.92ms +[2025-09-11 13:40:23] [Rank 0] step:3961/10000 train_time:177709ms step_avg:44.86ms +[2025-09-11 13:40:23] [Rank 0] step:3961/10000 train_time:177709ms step_avg:44.86ms +[2025-09-11 13:40:24] [Rank 0] step:3981/10000 train_time:178385ms step_avg:44.81ms +[2025-09-11 13:40:24] [Rank 0] step:3981/10000 train_time:178385ms step_avg:44.81ms +[2025-09-11 13:40:24] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:40:24] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 13:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 13:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 13:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 13:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:40:35] [Rank 0] PRINT: step:4000/10000 val_loss:5.6673 total_sharp:7.9905e-02 L1_sharp:8.1464e-03 L2_sharp:6.4905e-03 L3_sharp:7.3041e-03 L4_sharp:9.1714e-03 L5_sharp:1.7857e-02 L6_sharp:2.8990e-02 L7_sharp:5.0389e-02 L8_sharp:8.9660e-02 L9_sharp:9.1486e-02 L10_sharp:1.1210e-01 L11_sharp:1.5683e-01 L12_sharp:4.1861e-01 total_fnorm:1.9922e+00 total_l1_linf:3.1360e+03 total_spectral:1.0156e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1973e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3965e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.3965e-02 L12_l1linf:3.6133e-02 L1_spectral:3.2191e-03 L2_spectral:3.2206e-03 L3_spectral:3.2016e-03 L4_spectral:3.1939e-03 L5_spectral:3.2015e-03 L6_spectral:3.1633e-03 L7_spectral:3.1681e-03 L8_spectral:3.1307e-03 L9_spectral:3.1703e-03 L10_spectral:3.1826e-03 L11_spectral:3.1769e-03 L12_spectral:3.1745e-03 train_time:179043ms step_avg:44.76ms +[2025-09-11 13:40:35] [Rank 0] PRINT: step:4000/10000 val_loss:5.6673 total_sharp:7.9905e-02 L1_sharp:8.1464e-03 L2_sharp:6.4905e-03 L3_sharp:7.3041e-03 L4_sharp:9.1714e-03 L5_sharp:1.7857e-02 L6_sharp:2.8990e-02 L7_sharp:5.0389e-02 L8_sharp:8.9660e-02 L9_sharp:9.1486e-02 L10_sharp:1.1210e-01 L11_sharp:1.5683e-01 L12_sharp:4.1861e-01 total_fnorm:1.9922e+00 total_l1_linf:3.1360e+03 total_spectral:1.0156e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1973e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3965e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.3965e-02 L12_l1linf:3.6133e-02 L1_spectral:3.2191e-03 L2_spectral:3.2206e-03 L3_spectral:3.2016e-03 L4_spectral:3.1939e-03 L5_spectral:3.2015e-03 L6_spectral:3.1633e-03 L7_spectral:3.1681e-03 L8_spectral:3.1307e-03 L9_spectral:3.1703e-03 L10_spectral:3.1826e-03 L11_spectral:3.1769e-03 L12_spectral:3.1745e-03 train_time:179043ms step_avg:44.76ms +[2025-09-11 13:40:37] [Rank 0] step:4001/10000 train_time:180836ms step_avg:45.20ms +[2025-09-11 13:40:37] [Rank 0] step:4001/10000 train_time:180836ms step_avg:45.20ms +[2025-09-11 13:40:38] [Rank 0] step:4021/10000 train_time:181520ms step_avg:45.14ms +[2025-09-11 13:40:38] [Rank 0] step:4021/10000 train_time:181520ms step_avg:45.14ms +[2025-09-11 13:40:38] [Rank 0] step:4041/10000 train_time:182198ms step_avg:45.09ms +[2025-09-11 13:40:38] [Rank 0] step:4041/10000 train_time:182198ms step_avg:45.09ms +[2025-09-11 13:40:39] [Rank 0] step:4061/10000 train_time:182873ms step_avg:45.03ms +[2025-09-11 13:40:39] [Rank 0] step:4061/10000 train_time:182873ms step_avg:45.03ms +[2025-09-11 13:40:40] [Rank 0] step:4081/10000 train_time:183551ms step_avg:44.98ms +[2025-09-11 13:40:40] [Rank 0] step:4081/10000 train_time:183551ms step_avg:44.98ms +[2025-09-11 13:40:40] [Rank 0] step:4101/10000 train_time:184226ms step_avg:44.92ms +[2025-09-11 13:40:40] [Rank 0] step:4101/10000 train_time:184226ms step_avg:44.92ms +[2025-09-11 13:40:41] [Rank 0] step:4121/10000 train_time:185189ms step_avg:44.94ms +[2025-09-11 13:40:41] [Rank 0] step:4121/10000 train_time:185189ms step_avg:44.94ms +[2025-09-11 13:40:42] [Rank 0] step:4141/10000 train_time:185864ms step_avg:44.88ms +[2025-09-11 13:40:42] [Rank 0] step:4141/10000 train_time:185864ms step_avg:44.88ms +[2025-09-11 13:40:43] [Rank 0] step:4161/10000 train_time:186540ms step_avg:44.83ms +[2025-09-11 13:40:43] [Rank 0] step:4161/10000 train_time:186540ms step_avg:44.83ms +[2025-09-11 13:40:44] [Rank 0] step:4181/10000 train_time:187512ms step_avg:44.85ms +[2025-09-11 13:40:44] [Rank 0] step:4181/10000 train_time:187512ms step_avg:44.85ms +[2025-09-11 13:40:44] [Rank 0] step:4201/10000 train_time:188189ms step_avg:44.80ms +[2025-09-11 13:40:44] [Rank 0] step:4201/10000 train_time:188189ms step_avg:44.80ms +[2025-09-11 13:40:45] [Rank 0] step:4221/10000 train_time:188866ms step_avg:44.74ms +[2025-09-11 13:40:45] [Rank 0] step:4221/10000 train_time:188866ms step_avg:44.74ms +[2025-09-11 13:40:46] [Rank 0] step:4241/10000 train_time:189542ms step_avg:44.69ms +[2025-09-11 13:40:46] [Rank 0] step:4241/10000 train_time:189542ms step_avg:44.69ms +[2025-09-11 13:40:46] [Rank 0] step:4261/10000 train_time:190218ms step_avg:44.64ms +[2025-09-11 13:40:46] [Rank 0] step:4261/10000 train_time:190218ms step_avg:44.64ms +[2025-09-11 13:40:47] [Rank 0] step:4281/10000 train_time:190894ms step_avg:44.59ms +[2025-09-11 13:40:47] [Rank 0] step:4281/10000 train_time:190894ms step_avg:44.59ms +[2025-09-11 13:40:48] [Rank 0] step:4301/10000 train_time:191571ms step_avg:44.54ms +[2025-09-11 13:40:48] [Rank 0] step:4301/10000 train_time:191571ms step_avg:44.54ms +[2025-09-11 13:40:48] [Rank 0] step:4321/10000 train_time:192247ms step_avg:44.49ms +[2025-09-11 13:40:48] [Rank 0] step:4321/10000 train_time:192247ms step_avg:44.49ms +[2025-09-11 13:40:49] [Rank 0] step:4341/10000 train_time:192923ms step_avg:44.44ms +[2025-09-11 13:40:49] [Rank 0] step:4341/10000 train_time:192923ms step_avg:44.44ms +[2025-09-11 13:40:50] [Rank 0] step:4361/10000 train_time:193599ms step_avg:44.39ms +[2025-09-11 13:40:50] [Rank 0] step:4361/10000 train_time:193599ms step_avg:44.39ms +[2025-09-11 13:40:50] [Rank 0] step:4381/10000 train_time:194276ms step_avg:44.35ms +[2025-09-11 13:40:50] [Rank 0] step:4381/10000 train_time:194276ms step_avg:44.35ms +[2025-09-11 13:40:51] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:40:51] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 13:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 13:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 13:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 13:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:02] [Rank 0] PRINT: step:4400/10000 val_loss:5.6253 total_sharp:7.4264e-02 L1_sharp:8.2910e-03 L2_sharp:3.8699e-03 L3_sharp:4.0615e-03 L4_sharp:8.4173e-03 L5_sharp:1.3367e-02 L6_sharp:2.5042e-02 L7_sharp:4.8020e-02 L8_sharp:6.8589e-02 L9_sharp:7.2903e-02 L10_sharp:9.0058e-02 L11_sharp:1.2897e-01 L12_sharp:4.1021e-01 total_fnorm:1.7969e+00 total_l1_linf:2.9440e+03 total_spectral:9.1406e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1973e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.3477e-02 L12_l1linf:3.4668e-02 L1_spectral:3.2475e-03 L2_spectral:3.2159e-03 L3_spectral:3.2103e-03 L4_spectral:3.2101e-03 L5_spectral:3.1852e-03 L6_spectral:3.1885e-03 L7_spectral:3.1805e-03 L8_spectral:3.1511e-03 L9_spectral:3.1797e-03 L10_spectral:3.1670e-03 L11_spectral:3.1875e-03 L12_spectral:3.1823e-03 train_time:194932ms step_avg:44.30ms +[2025-09-11 13:41:02] [Rank 0] PRINT: step:4400/10000 val_loss:5.6253 total_sharp:7.4264e-02 L1_sharp:8.2910e-03 L2_sharp:3.8699e-03 L3_sharp:4.0615e-03 L4_sharp:8.4173e-03 L5_sharp:1.3367e-02 L6_sharp:2.5042e-02 L7_sharp:4.8020e-02 L8_sharp:6.8589e-02 L9_sharp:7.2903e-02 L10_sharp:9.0058e-02 L11_sharp:1.2897e-01 L12_sharp:4.1021e-01 total_fnorm:1.7969e+00 total_l1_linf:2.9440e+03 total_spectral:9.1406e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.1973e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.3477e-02 L12_l1linf:3.4668e-02 L1_spectral:3.2475e-03 L2_spectral:3.2159e-03 L3_spectral:3.2103e-03 L4_spectral:3.2101e-03 L5_spectral:3.1852e-03 L6_spectral:3.1885e-03 L7_spectral:3.1805e-03 L8_spectral:3.1511e-03 L9_spectral:3.1797e-03 L10_spectral:3.1670e-03 L11_spectral:3.1875e-03 L12_spectral:3.1823e-03 train_time:194932ms step_avg:44.30ms +[2025-09-11 13:41:04] [Rank 0] step:4401/10000 train_time:196772ms step_avg:44.71ms +[2025-09-11 13:41:04] [Rank 0] step:4401/10000 train_time:196772ms step_avg:44.71ms +[2025-09-11 13:41:04] [Rank 0] step:4421/10000 train_time:197486ms step_avg:44.67ms +[2025-09-11 13:41:04] [Rank 0] step:4421/10000 train_time:197486ms step_avg:44.67ms +[2025-09-11 13:41:05] [Rank 0] step:4441/10000 train_time:198166ms step_avg:44.62ms +[2025-09-11 13:41:05] [Rank 0] step:4441/10000 train_time:198166ms step_avg:44.62ms +[2025-09-11 13:41:06] [Rank 0] step:4461/10000 train_time:198846ms step_avg:44.57ms +[2025-09-11 13:41:06] [Rank 0] step:4461/10000 train_time:198846ms step_avg:44.57ms +[2025-09-11 13:41:06] [Rank 0] step:4481/10000 train_time:199524ms step_avg:44.53ms +[2025-09-11 13:41:06] [Rank 0] step:4481/10000 train_time:199524ms step_avg:44.53ms +[2025-09-11 13:41:07] [Rank 0] step:4501/10000 train_time:200204ms step_avg:44.48ms +[2025-09-11 13:41:07] [Rank 0] step:4501/10000 train_time:200204ms step_avg:44.48ms +[2025-09-11 13:41:08] [Rank 0] step:4521/10000 train_time:200882ms step_avg:44.43ms +[2025-09-11 13:41:08] [Rank 0] step:4521/10000 train_time:200882ms step_avg:44.43ms +[2025-09-11 13:41:08] [Rank 0] step:4541/10000 train_time:201561ms step_avg:44.39ms +[2025-09-11 13:41:08] [Rank 0] step:4541/10000 train_time:201561ms step_avg:44.39ms +[2025-09-11 13:41:09] [Rank 0] step:4561/10000 train_time:202240ms step_avg:44.34ms +[2025-09-11 13:41:09] [Rank 0] step:4561/10000 train_time:202240ms step_avg:44.34ms +[2025-09-11 13:41:10] [Rank 0] step:4581/10000 train_time:202918ms step_avg:44.30ms +[2025-09-11 13:41:10] [Rank 0] step:4581/10000 train_time:202918ms step_avg:44.30ms +[2025-09-11 13:41:10] [Rank 0] step:4601/10000 train_time:203596ms step_avg:44.25ms +[2025-09-11 13:41:10] [Rank 0] step:4601/10000 train_time:203596ms step_avg:44.25ms +[2025-09-11 13:41:11] [Rank 0] step:4621/10000 train_time:204275ms step_avg:44.21ms +[2025-09-11 13:41:11] [Rank 0] step:4621/10000 train_time:204275ms step_avg:44.21ms +[2025-09-11 13:41:12] [Rank 0] step:4641/10000 train_time:204952ms step_avg:44.16ms +[2025-09-11 13:41:12] [Rank 0] step:4641/10000 train_time:204952ms step_avg:44.16ms +[2025-09-11 13:41:13] [Rank 0] step:4661/10000 train_time:205631ms step_avg:44.12ms +[2025-09-11 13:41:13] [Rank 0] step:4661/10000 train_time:205631ms step_avg:44.12ms +[2025-09-11 13:41:13] [Rank 0] step:4681/10000 train_time:206310ms step_avg:44.07ms +[2025-09-11 13:41:13] [Rank 0] step:4681/10000 train_time:206310ms step_avg:44.07ms +[2025-09-11 13:41:14] [Rank 0] step:4701/10000 train_time:206988ms step_avg:44.03ms +[2025-09-11 13:41:14] [Rank 0] step:4701/10000 train_time:206988ms step_avg:44.03ms +[2025-09-11 13:41:15] [Rank 0] step:4721/10000 train_time:207666ms step_avg:43.99ms +[2025-09-11 13:41:15] [Rank 0] step:4721/10000 train_time:207666ms step_avg:43.99ms +[2025-09-11 13:41:15] [Rank 0] step:4741/10000 train_time:208345ms step_avg:43.95ms +[2025-09-11 13:41:15] [Rank 0] step:4741/10000 train_time:208345ms step_avg:43.95ms +[2025-09-11 13:41:16] [Rank 0] step:4761/10000 train_time:209025ms step_avg:43.90ms +[2025-09-11 13:41:16] [Rank 0] step:4761/10000 train_time:209025ms step_avg:43.90ms +[2025-09-11 13:41:17] [Rank 0] step:4781/10000 train_time:209704ms step_avg:43.86ms +[2025-09-11 13:41:17] [Rank 0] step:4781/10000 train_time:209704ms step_avg:43.86ms +[2025-09-11 13:41:17] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:41:17] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 13:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 13:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 13:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 13:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:28] [Rank 0] PRINT: step:4800/10000 val_loss:5.5854 total_sharp:7.1952e-02 L1_sharp:8.0757e-03 L2_sharp:6.5263e-03 L3_sharp:6.8934e-03 L4_sharp:1.0752e-02 L5_sharp:1.6417e-02 L6_sharp:2.3623e-02 L7_sharp:3.9668e-02 L8_sharp:7.3765e-02 L9_sharp:7.0411e-02 L10_sharp:8.7704e-02 L11_sharp:1.2832e-01 L12_sharp:3.6985e-01 total_fnorm:1.7891e+00 total_l1_linf:2.9280e+03 total_spectral:9.1406e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2363e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.0791e-02 L7_l1linf:6.2500e-02 L8_l1linf:6.0303e-02 L9_l1linf:6.1523e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2988e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2237e-03 L2_spectral:3.2475e-03 L3_spectral:3.2235e-03 L4_spectral:3.2138e-03 L5_spectral:3.2029e-03 L6_spectral:3.2107e-03 L7_spectral:3.1949e-03 L8_spectral:3.1676e-03 L9_spectral:3.1773e-03 L10_spectral:3.2016e-03 L11_spectral:3.1854e-03 L12_spectral:3.1757e-03 train_time:210363ms step_avg:43.83ms +[2025-09-11 13:41:28] [Rank 0] PRINT: step:4800/10000 val_loss:5.5854 total_sharp:7.1952e-02 L1_sharp:8.0757e-03 L2_sharp:6.5263e-03 L3_sharp:6.8934e-03 L4_sharp:1.0752e-02 L5_sharp:1.6417e-02 L6_sharp:2.3623e-02 L7_sharp:3.9668e-02 L8_sharp:7.3765e-02 L9_sharp:7.0411e-02 L10_sharp:8.7704e-02 L11_sharp:1.2832e-01 L12_sharp:3.6985e-01 total_fnorm:1.7891e+00 total_l1_linf:2.9280e+03 total_spectral:9.1406e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2363e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.0791e-02 L7_l1linf:6.2500e-02 L8_l1linf:6.0303e-02 L9_l1linf:6.1523e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2988e-02 L12_l1linf:3.4424e-02 L1_spectral:3.2237e-03 L2_spectral:3.2475e-03 L3_spectral:3.2235e-03 L4_spectral:3.2138e-03 L5_spectral:3.2029e-03 L6_spectral:3.2107e-03 L7_spectral:3.1949e-03 L8_spectral:3.1676e-03 L9_spectral:3.1773e-03 L10_spectral:3.2016e-03 L11_spectral:3.1854e-03 L12_spectral:3.1757e-03 train_time:210363ms step_avg:43.83ms +[2025-09-11 13:41:30] [Rank 0] step:4801/10000 train_time:212219ms step_avg:44.20ms +[2025-09-11 13:41:30] [Rank 0] step:4801/10000 train_time:212219ms step_avg:44.20ms +[2025-09-11 13:41:31] [Rank 0] step:4821/10000 train_time:212915ms step_avg:44.16ms +[2025-09-11 13:41:31] [Rank 0] step:4821/10000 train_time:212915ms step_avg:44.16ms +[2025-09-11 13:41:31] [Rank 0] step:4841/10000 train_time:213596ms step_avg:44.12ms +[2025-09-11 13:41:31] [Rank 0] step:4841/10000 train_time:213596ms step_avg:44.12ms +[2025-09-11 13:41:32] [Rank 0] step:4861/10000 train_time:214274ms step_avg:44.08ms +[2025-09-11 13:41:32] [Rank 0] step:4861/10000 train_time:214274ms step_avg:44.08ms +[2025-09-11 13:41:33] [Rank 0] step:4881/10000 train_time:214953ms step_avg:44.04ms +[2025-09-11 13:41:33] [Rank 0] step:4881/10000 train_time:214953ms step_avg:44.04ms +[2025-09-11 13:41:33] [Rank 0] step:4901/10000 train_time:215633ms step_avg:44.00ms +[2025-09-11 13:41:33] [Rank 0] step:4901/10000 train_time:215633ms step_avg:44.00ms +[2025-09-11 13:41:34] [Rank 0] step:4921/10000 train_time:216312ms step_avg:43.96ms +[2025-09-11 13:41:34] [Rank 0] step:4921/10000 train_time:216312ms step_avg:43.96ms +[2025-09-11 13:41:35] [Rank 0] step:4941/10000 train_time:216991ms step_avg:43.92ms +[2025-09-11 13:41:35] [Rank 0] step:4941/10000 train_time:216991ms step_avg:43.92ms +[2025-09-11 13:41:36] [Rank 0] step:4961/10000 train_time:217670ms step_avg:43.88ms +[2025-09-11 13:41:36] [Rank 0] step:4961/10000 train_time:217670ms step_avg:43.88ms +[2025-09-11 13:41:36] [Rank 0] step:4981/10000 train_time:218349ms step_avg:43.84ms +[2025-09-11 13:41:36] [Rank 0] step:4981/10000 train_time:218349ms step_avg:43.84ms +[2025-09-11 13:41:37] [Rank 0] step:5001/10000 train_time:219029ms step_avg:43.80ms +[2025-09-11 13:41:37] [Rank 0] step:5001/10000 train_time:219029ms step_avg:43.80ms +[2025-09-11 13:41:38] [Rank 0] step:5021/10000 train_time:219707ms step_avg:43.76ms +[2025-09-11 13:41:38] [Rank 0] step:5021/10000 train_time:219707ms step_avg:43.76ms +[2025-09-11 13:41:38] [Rank 0] step:5041/10000 train_time:220385ms step_avg:43.72ms +[2025-09-11 13:41:38] [Rank 0] step:5041/10000 train_time:220385ms step_avg:43.72ms +[2025-09-11 13:41:39] [Rank 0] step:5061/10000 train_time:221064ms step_avg:43.68ms +[2025-09-11 13:41:39] [Rank 0] step:5061/10000 train_time:221064ms step_avg:43.68ms +[2025-09-11 13:41:40] [Rank 0] step:5081/10000 train_time:221741ms step_avg:43.64ms +[2025-09-11 13:41:40] [Rank 0] step:5081/10000 train_time:221741ms step_avg:43.64ms +[2025-09-11 13:41:40] [Rank 0] step:5101/10000 train_time:222421ms step_avg:43.60ms +[2025-09-11 13:41:40] [Rank 0] step:5101/10000 train_time:222421ms step_avg:43.60ms +[2025-09-11 13:41:41] [Rank 0] step:5121/10000 train_time:223099ms step_avg:43.57ms +[2025-09-11 13:41:41] [Rank 0] step:5121/10000 train_time:223099ms step_avg:43.57ms +[2025-09-11 13:41:42] [Rank 0] step:5141/10000 train_time:223778ms step_avg:43.53ms +[2025-09-11 13:41:42] [Rank 0] step:5141/10000 train_time:223778ms step_avg:43.53ms +[2025-09-11 13:41:42] [Rank 0] step:5161/10000 train_time:224457ms step_avg:43.49ms +[2025-09-11 13:41:42] [Rank 0] step:5161/10000 train_time:224457ms step_avg:43.49ms +[2025-09-11 13:41:43] [Rank 0] step:5181/10000 train_time:225135ms step_avg:43.45ms +[2025-09-11 13:41:43] [Rank 0] step:5181/10000 train_time:225135ms step_avg:43.45ms +[2025-09-11 13:41:44] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:41:44] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 13:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 13:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 13:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 13:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 13:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 13:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:41:54] [Rank 0] PRINT: step:5200/10000 val_loss:5.5536 total_sharp:9.0840e-02 L1_sharp:1.1108e-02 L2_sharp:6.3586e-03 L3_sharp:9.7425e-03 L4_sharp:1.1752e-02 L5_sharp:1.7672e-02 L6_sharp:2.3905e-02 L7_sharp:4.2611e-02 L8_sharp:7.0428e-02 L9_sharp:7.9361e-02 L10_sharp:1.0557e-01 L11_sharp:1.5238e-01 L12_sharp:3.8195e-01 total_fnorm:1.7266e+00 total_l1_linf:2.8160e+03 total_spectral:8.7891e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2559e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5918e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.1768e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.9570e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.1523e-02 L12_l1linf:3.5400e-02 L1_spectral:3.2559e-03 L2_spectral:3.2315e-03 L3_spectral:3.2116e-03 L4_spectral:3.2281e-03 L5_spectral:3.2208e-03 L6_spectral:3.1956e-03 L7_spectral:3.1908e-03 L8_spectral:3.1686e-03 L9_spectral:3.1922e-03 L10_spectral:3.2162e-03 L11_spectral:3.1913e-03 L12_spectral:3.1787e-03 train_time:225801ms step_avg:43.42ms +[2025-09-11 13:41:54] [Rank 0] PRINT: step:5200/10000 val_loss:5.5536 total_sharp:9.0840e-02 L1_sharp:1.1108e-02 L2_sharp:6.3586e-03 L3_sharp:9.7425e-03 L4_sharp:1.1752e-02 L5_sharp:1.7672e-02 L6_sharp:2.3905e-02 L7_sharp:4.2611e-02 L8_sharp:7.0428e-02 L9_sharp:7.9361e-02 L10_sharp:1.0557e-01 L11_sharp:1.5238e-01 L12_sharp:3.8195e-01 total_fnorm:1.7266e+00 total_l1_linf:2.8160e+03 total_spectral:8.7891e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.2559e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5918e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.1768e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.9570e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.1523e-02 L12_l1linf:3.5400e-02 L1_spectral:3.2559e-03 L2_spectral:3.2315e-03 L3_spectral:3.2116e-03 L4_spectral:3.2281e-03 L5_spectral:3.2208e-03 L6_spectral:3.1956e-03 L7_spectral:3.1908e-03 L8_spectral:3.1686e-03 L9_spectral:3.1922e-03 L10_spectral:3.2162e-03 L11_spectral:3.1913e-03 L12_spectral:3.1787e-03 train_time:225801ms step_avg:43.42ms +[2025-09-11 13:41:56] [Rank 0] step:5201/10000 train_time:227655ms step_avg:43.77ms +[2025-09-11 13:41:56] [Rank 0] step:5201/10000 train_time:227655ms step_avg:43.77ms +[2025-09-11 13:41:57] [Rank 0] step:5221/10000 train_time:228369ms step_avg:43.74ms +[2025-09-11 13:41:57] [Rank 0] step:5221/10000 train_time:228369ms step_avg:43.74ms +[2025-09-11 13:41:58] [Rank 0] step:5241/10000 train_time:229056ms step_avg:43.70ms +[2025-09-11 13:41:58] [Rank 0] step:5241/10000 train_time:229056ms step_avg:43.70ms +[2025-09-11 13:41:58] [Rank 0] step:5261/10000 train_time:229745ms step_avg:43.67ms +[2025-09-11 13:41:58] [Rank 0] step:5261/10000 train_time:229745ms step_avg:43.67ms +[2025-09-11 13:41:59] [Rank 0] step:5281/10000 train_time:230433ms step_avg:43.63ms +[2025-09-11 13:41:59] [Rank 0] step:5281/10000 train_time:230433ms step_avg:43.63ms +[2025-09-11 13:42:00] [Rank 0] step:5301/10000 train_time:231121ms step_avg:43.60ms +[2025-09-11 13:42:00] [Rank 0] step:5301/10000 train_time:231121ms step_avg:43.60ms +[2025-09-11 13:42:01] [Rank 0] step:5321/10000 train_time:231809ms step_avg:43.56ms +[2025-09-11 13:42:01] [Rank 0] step:5321/10000 train_time:231809ms step_avg:43.56ms +[2025-09-11 13:42:01] [Rank 0] step:5341/10000 train_time:232497ms step_avg:43.53ms +[2025-09-11 13:42:01] [Rank 0] step:5341/10000 train_time:232497ms step_avg:43.53ms +[2025-09-11 13:42:02] [Rank 0] step:5361/10000 train_time:233189ms step_avg:43.50ms +[2025-09-11 13:42:02] [Rank 0] step:5361/10000 train_time:233189ms step_avg:43.50ms +[2025-09-11 13:42:03] [Rank 0] step:5381/10000 train_time:233877ms step_avg:43.46ms +[2025-09-11 13:42:03] [Rank 0] step:5381/10000 train_time:233877ms step_avg:43.46ms +[2025-09-11 13:42:03] [Rank 0] step:5401/10000 train_time:234564ms step_avg:43.43ms +[2025-09-11 13:42:03] [Rank 0] step:5401/10000 train_time:234564ms step_avg:43.43ms +[2025-09-11 13:42:04] [Rank 0] step:5421/10000 train_time:235254ms step_avg:43.40ms +[2025-09-11 13:42:04] [Rank 0] step:5421/10000 train_time:235254ms step_avg:43.40ms +[2025-09-11 13:42:05] [Rank 0] step:5441/10000 train_time:235943ms step_avg:43.36ms +[2025-09-11 13:42:05] [Rank 0] step:5441/10000 train_time:235943ms step_avg:43.36ms +[2025-09-11 13:42:05] [Rank 0] step:5461/10000 train_time:236631ms step_avg:43.33ms +[2025-09-11 13:42:05] [Rank 0] step:5461/10000 train_time:236631ms step_avg:43.33ms +[2025-09-11 13:42:06] [Rank 0] step:5481/10000 train_time:237319ms step_avg:43.30ms +[2025-09-11 13:42:06] [Rank 0] step:5481/10000 train_time:237319ms step_avg:43.30ms +[2025-09-11 13:42:07] [Rank 0] step:5501/10000 train_time:238006ms step_avg:43.27ms +[2025-09-11 13:42:07] [Rank 0] step:5501/10000 train_time:238006ms step_avg:43.27ms +[2025-09-11 13:42:07] [Rank 0] step:5521/10000 train_time:238694ms step_avg:43.23ms +[2025-09-11 13:42:07] [Rank 0] step:5521/10000 train_time:238694ms step_avg:43.23ms +[2025-09-11 13:42:08] [Rank 0] step:5541/10000 train_time:239384ms step_avg:43.20ms +[2025-09-11 13:42:08] [Rank 0] step:5541/10000 train_time:239384ms step_avg:43.20ms +[2025-09-11 13:42:09] [Rank 0] step:5561/10000 train_time:240073ms step_avg:43.17ms +[2025-09-11 13:42:09] [Rank 0] step:5561/10000 train_time:240073ms step_avg:43.17ms +[2025-09-11 13:42:09] [Rank 0] step:5581/10000 train_time:240762ms step_avg:43.14ms +[2025-09-11 13:42:09] [Rank 0] step:5581/10000 train_time:240762ms step_avg:43.14ms +[2025-09-11 13:42:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:42:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 13:42:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:42:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 13:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 13:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 13:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:42:21] [Rank 0] PRINT: step:5600/10000 val_loss:5.5255 total_sharp:7.0490e-02 L1_sharp:6.2035e-03 L2_sharp:5.1831e-03 L3_sharp:7.2876e-03 L4_sharp:8.1264e-03 L5_sharp:1.0331e-02 L6_sharp:1.8635e-02 L7_sharp:3.0448e-02 L8_sharp:6.0512e-02 L9_sharp:6.9768e-02 L10_sharp:9.1480e-02 L11_sharp:1.3843e-01 L12_sharp:3.0776e-01 total_fnorm:1.7344e+00 total_l1_linf:2.8160e+03 total_spectral:8.8672e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4121e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2656e-01 L1_l1linf:6.5918e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.0059e-02 L6_l1linf:6.0059e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.8350e-02 L9_l1linf:6.0059e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.1035e-02 L12_l1linf:3.4180e-02 L1_spectral:3.2367e-03 L2_spectral:3.2279e-03 L3_spectral:3.2157e-03 L4_spectral:3.2203e-03 L5_spectral:3.2098e-03 L6_spectral:3.2138e-03 L7_spectral:3.1978e-03 L8_spectral:3.1819e-03 L9_spectral:3.1977e-03 L10_spectral:3.1937e-03 L11_spectral:3.2008e-03 L12_spectral:3.1859e-03 train_time:241431ms step_avg:43.11ms +[2025-09-11 13:42:21] [Rank 0] PRINT: step:5600/10000 val_loss:5.5255 total_sharp:7.0490e-02 L1_sharp:6.2035e-03 L2_sharp:5.1831e-03 L3_sharp:7.2876e-03 L4_sharp:8.1264e-03 L5_sharp:1.0331e-02 L6_sharp:1.8635e-02 L7_sharp:3.0448e-02 L8_sharp:6.0512e-02 L9_sharp:6.9768e-02 L10_sharp:9.1480e-02 L11_sharp:1.3843e-01 L12_sharp:3.0776e-01 total_fnorm:1.7344e+00 total_l1_linf:2.8160e+03 total_spectral:8.8672e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4121e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.2656e-01 L1_l1linf:6.5918e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.0059e-02 L6_l1linf:6.0059e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.8350e-02 L9_l1linf:6.0059e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.1035e-02 L12_l1linf:3.4180e-02 L1_spectral:3.2367e-03 L2_spectral:3.2279e-03 L3_spectral:3.2157e-03 L4_spectral:3.2203e-03 L5_spectral:3.2098e-03 L6_spectral:3.2138e-03 L7_spectral:3.1978e-03 L8_spectral:3.1819e-03 L9_spectral:3.1977e-03 L10_spectral:3.1937e-03 L11_spectral:3.2008e-03 L12_spectral:3.1859e-03 train_time:241431ms step_avg:43.11ms +[2025-09-11 13:42:23] [Rank 0] step:5601/10000 train_time:243297ms step_avg:43.44ms +[2025-09-11 13:42:23] [Rank 0] step:5601/10000 train_time:243297ms step_avg:43.44ms +[2025-09-11 13:42:24] [Rank 0] step:5621/10000 train_time:243999ms step_avg:43.41ms +[2025-09-11 13:42:24] [Rank 0] step:5621/10000 train_time:243999ms step_avg:43.41ms +[2025-09-11 13:42:25] [Rank 0] step:5641/10000 train_time:244687ms step_avg:43.38ms +[2025-09-11 13:42:25] [Rank 0] step:5641/10000 train_time:244687ms step_avg:43.38ms +[2025-09-11 13:42:25] [Rank 0] step:5661/10000 train_time:245374ms step_avg:43.34ms +[2025-09-11 13:42:25] [Rank 0] step:5661/10000 train_time:245374ms step_avg:43.34ms +[2025-09-11 13:42:26] [Rank 0] step:5681/10000 train_time:246063ms step_avg:43.31ms +[2025-09-11 13:42:26] [Rank 0] step:5681/10000 train_time:246063ms step_avg:43.31ms +[2025-09-11 13:42:27] [Rank 0] step:5701/10000 train_time:246754ms step_avg:43.28ms +[2025-09-11 13:42:27] [Rank 0] step:5701/10000 train_time:246754ms step_avg:43.28ms +[2025-09-11 13:42:27] [Rank 0] step:5721/10000 train_time:247442ms step_avg:43.25ms +[2025-09-11 13:42:27] [Rank 0] step:5721/10000 train_time:247442ms step_avg:43.25ms +[2025-09-11 13:42:28] [Rank 0] step:5741/10000 train_time:248131ms step_avg:43.22ms +[2025-09-11 13:42:28] [Rank 0] step:5741/10000 train_time:248131ms step_avg:43.22ms +[2025-09-11 13:42:29] [Rank 0] step:5761/10000 train_time:248821ms step_avg:43.19ms +[2025-09-11 13:42:29] [Rank 0] step:5761/10000 train_time:248821ms step_avg:43.19ms +[2025-09-11 13:42:29] [Rank 0] step:5781/10000 train_time:249511ms step_avg:43.16ms +[2025-09-11 13:42:29] [Rank 0] step:5781/10000 train_time:249511ms step_avg:43.16ms +[2025-09-11 13:42:30] [Rank 0] step:5801/10000 train_time:250202ms step_avg:43.13ms +[2025-09-11 13:42:30] [Rank 0] step:5801/10000 train_time:250202ms step_avg:43.13ms +[2025-09-11 13:42:31] [Rank 0] step:5821/10000 train_time:250890ms step_avg:43.10ms +[2025-09-11 13:42:31] [Rank 0] step:5821/10000 train_time:250890ms step_avg:43.10ms +[2025-09-11 13:42:31] [Rank 0] step:5841/10000 train_time:251580ms step_avg:43.07ms +[2025-09-11 13:42:31] [Rank 0] step:5841/10000 train_time:251580ms step_avg:43.07ms +[2025-09-11 13:42:32] [Rank 0] step:5861/10000 train_time:252269ms step_avg:43.04ms +[2025-09-11 13:42:32] [Rank 0] step:5861/10000 train_time:252269ms step_avg:43.04ms +[2025-09-11 13:42:33] [Rank 0] step:5881/10000 train_time:252957ms step_avg:43.01ms +[2025-09-11 13:42:33] [Rank 0] step:5881/10000 train_time:252957ms step_avg:43.01ms +[2025-09-11 13:42:34] [Rank 0] step:5901/10000 train_time:253645ms step_avg:42.98ms +[2025-09-11 13:42:34] [Rank 0] step:5901/10000 train_time:253645ms step_avg:42.98ms +[2025-09-11 13:42:34] [Rank 0] step:5921/10000 train_time:254336ms step_avg:42.95ms +[2025-09-11 13:42:34] [Rank 0] step:5921/10000 train_time:254336ms step_avg:42.95ms +[2025-09-11 13:42:35] [Rank 0] step:5941/10000 train_time:255026ms step_avg:42.93ms +[2025-09-11 13:42:35] [Rank 0] step:5941/10000 train_time:255026ms step_avg:42.93ms +[2025-09-11 13:42:36] [Rank 0] step:5961/10000 train_time:255717ms step_avg:42.90ms +[2025-09-11 13:42:36] [Rank 0] step:5961/10000 train_time:255717ms step_avg:42.90ms +[2025-09-11 13:42:36] [Rank 0] step:5981/10000 train_time:256406ms step_avg:42.87ms +[2025-09-11 13:42:36] [Rank 0] step:5981/10000 train_time:256406ms step_avg:42.87ms +[2025-09-11 13:42:37] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:42:37] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 13:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 13:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 13:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 13:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:42:48] [Rank 0] PRINT: step:6000/10000 val_loss:5.4933 total_sharp:9.3062e-02 L1_sharp:9.5790e-03 L2_sharp:6.6045e-03 L3_sharp:7.2450e-03 L4_sharp:7.8550e-03 L5_sharp:1.3503e-02 L6_sharp:1.9111e-02 L7_sharp:2.9904e-02 L8_sharp:6.0291e-02 L9_sharp:6.9247e-02 L10_sharp:1.0503e-01 L11_sharp:1.5039e-01 L12_sharp:4.8094e-01 total_fnorm:1.6641e+00 total_l1_linf:2.7200e+03 total_spectral:8.5156e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4121e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2754e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.3477e-02 L4_l1linf:6.2012e-02 L5_l1linf:5.9814e-02 L6_l1linf:5.9814e-02 L7_l1linf:5.8594e-02 L8_l1linf:5.8594e-02 L9_l1linf:5.8105e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.0547e-02 L12_l1linf:3.4180e-02 L1_spectral:3.2568e-03 L2_spectral:3.2365e-03 L3_spectral:3.2136e-03 L4_spectral:3.2185e-03 L5_spectral:3.2069e-03 L6_spectral:3.2032e-03 L7_spectral:3.2019e-03 L8_spectral:3.1763e-03 L9_spectral:3.2090e-03 L10_spectral:3.2137e-03 L11_spectral:3.2000e-03 L12_spectral:3.2023e-03 train_time:257080ms step_avg:42.85ms +[2025-09-11 13:42:48] [Rank 0] PRINT: step:6000/10000 val_loss:5.4933 total_sharp:9.3062e-02 L1_sharp:9.5790e-03 L2_sharp:6.6045e-03 L3_sharp:7.2450e-03 L4_sharp:7.8550e-03 L5_sharp:1.3503e-02 L6_sharp:1.9111e-02 L7_sharp:2.9904e-02 L8_sharp:6.0291e-02 L9_sharp:6.9247e-02 L10_sharp:1.0503e-01 L11_sharp:1.5039e-01 L12_sharp:4.8094e-01 total_fnorm:1.6641e+00 total_l1_linf:2.7200e+03 total_spectral:8.5156e-01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4121e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2754e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.3477e-02 L4_l1linf:6.2012e-02 L5_l1linf:5.9814e-02 L6_l1linf:5.9814e-02 L7_l1linf:5.8594e-02 L8_l1linf:5.8594e-02 L9_l1linf:5.8105e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.0547e-02 L12_l1linf:3.4180e-02 L1_spectral:3.2568e-03 L2_spectral:3.2365e-03 L3_spectral:3.2136e-03 L4_spectral:3.2185e-03 L5_spectral:3.2069e-03 L6_spectral:3.2032e-03 L7_spectral:3.2019e-03 L8_spectral:3.1763e-03 L9_spectral:3.2090e-03 L10_spectral:3.2137e-03 L11_spectral:3.2000e-03 L12_spectral:3.2023e-03 train_time:257080ms step_avg:42.85ms +[2025-09-11 13:42:50] [Rank 0] step:6001/10000 train_time:258928ms step_avg:43.15ms +[2025-09-11 13:42:50] [Rank 0] step:6001/10000 train_time:258928ms step_avg:43.15ms +[2025-09-11 13:42:51] [Rank 0] step:6021/10000 train_time:259633ms step_avg:43.12ms +[2025-09-11 13:42:51] [Rank 0] step:6021/10000 train_time:259633ms step_avg:43.12ms +[2025-09-11 13:42:51] [Rank 0] step:6041/10000 train_time:260326ms step_avg:43.09ms +[2025-09-11 13:42:51] [Rank 0] step:6041/10000 train_time:260326ms step_avg:43.09ms +[2025-09-11 13:42:52] [Rank 0] step:6061/10000 train_time:261020ms step_avg:43.07ms +[2025-09-11 13:42:52] [Rank 0] step:6061/10000 train_time:261020ms step_avg:43.07ms +[2025-09-11 13:42:53] [Rank 0] step:6081/10000 train_time:261711ms step_avg:43.04ms +[2025-09-11 13:42:53] [Rank 0] step:6081/10000 train_time:261711ms step_avg:43.04ms +[2025-09-11 13:42:53] [Rank 0] step:6101/10000 train_time:262401ms step_avg:43.01ms +[2025-09-11 13:42:53] [Rank 0] step:6101/10000 train_time:262401ms step_avg:43.01ms +[2025-09-11 13:42:54] [Rank 0] step:6121/10000 train_time:263092ms step_avg:42.98ms +[2025-09-11 13:42:54] [Rank 0] step:6121/10000 train_time:263092ms step_avg:42.98ms +[2025-09-11 13:42:55] [Rank 0] step:6141/10000 train_time:263784ms step_avg:42.95ms +[2025-09-11 13:42:55] [Rank 0] step:6141/10000 train_time:263784ms step_avg:42.95ms +[2025-09-11 13:42:55] [Rank 0] step:6161/10000 train_time:264474ms step_avg:42.93ms +[2025-09-11 13:42:55] [Rank 0] step:6161/10000 train_time:264474ms step_avg:42.93ms +[2025-09-11 13:42:56] [Rank 0] step:6181/10000 train_time:265164ms step_avg:42.90ms +[2025-09-11 13:42:56] [Rank 0] step:6181/10000 train_time:265164ms step_avg:42.90ms +[2025-09-11 13:42:57] [Rank 0] step:6201/10000 train_time:265855ms step_avg:42.87ms +[2025-09-11 13:42:57] [Rank 0] step:6201/10000 train_time:265855ms step_avg:42.87ms +[2025-09-11 13:42:58] [Rank 0] step:6221/10000 train_time:266547ms step_avg:42.85ms +[2025-09-11 13:42:58] [Rank 0] step:6221/10000 train_time:266547ms step_avg:42.85ms +[2025-09-11 13:42:58] [Rank 0] step:6241/10000 train_time:267239ms step_avg:42.82ms +[2025-09-11 13:42:58] [Rank 0] step:6241/10000 train_time:267239ms step_avg:42.82ms +[2025-09-11 13:42:59] [Rank 0] step:6261/10000 train_time:267928ms step_avg:42.79ms +[2025-09-11 13:42:59] [Rank 0] step:6261/10000 train_time:267928ms step_avg:42.79ms +[2025-09-11 13:43:00] [Rank 0] step:6281/10000 train_time:268619ms step_avg:42.77ms +[2025-09-11 13:43:00] [Rank 0] step:6281/10000 train_time:268619ms step_avg:42.77ms +[2025-09-11 13:43:00] [Rank 0] step:6301/10000 train_time:269307ms step_avg:42.74ms +[2025-09-11 13:43:00] [Rank 0] step:6301/10000 train_time:269307ms step_avg:42.74ms +[2025-09-11 13:43:01] [Rank 0] step:6321/10000 train_time:270001ms step_avg:42.71ms +[2025-09-11 13:43:01] [Rank 0] step:6321/10000 train_time:270001ms step_avg:42.71ms +[2025-09-11 13:43:02] [Rank 0] step:6341/10000 train_time:270694ms step_avg:42.69ms +[2025-09-11 13:43:02] [Rank 0] step:6341/10000 train_time:270694ms step_avg:42.69ms +[2025-09-11 13:43:02] [Rank 0] step:6361/10000 train_time:271388ms step_avg:42.66ms +[2025-09-11 13:43:02] [Rank 0] step:6361/10000 train_time:271388ms step_avg:42.66ms +[2025-09-11 13:43:03] [Rank 0] step:6381/10000 train_time:272079ms step_avg:42.64ms +[2025-09-11 13:43:03] [Rank 0] step:6381/10000 train_time:272079ms step_avg:42.64ms +[2025-09-11 13:43:04] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:43:04] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 13:43:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:43:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 13:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 13:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 13:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 13:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:43:17] [Rank 0] PRINT: step:6400/10000 val_loss:5.4625 total_sharp:8.2807e-02 L1_sharp:7.6683e-03 L2_sharp:5.3742e-03 L3_sharp:5.9559e-03 L4_sharp:9.3073e-03 L5_sharp:1.4858e-02 L6_sharp:2.1472e-02 L7_sharp:3.2488e-02 L8_sharp:6.4210e-02 L9_sharp:6.9504e-02 L10_sharp:8.8484e-02 L11_sharp:1.2042e-01 L12_sharp:3.4655e-01 total_fnorm:1.5000e+00 total_l1_linf:2.3040e+03 total_spectral:7.5000e-01 L1_fnorm:2.2168e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.1875e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1582e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.0215e-01 L1_l1linf:5.6641e-02 L2_l1linf:5.5908e-02 L3_l1linf:5.4199e-02 L4_l1linf:5.3467e-02 L5_l1linf:5.2002e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0049e-02 L8_l1linf:4.9316e-02 L9_l1linf:5.0781e-02 L10_l1linf:5.0537e-02 L11_l1linf:5.0537e-02 L12_l1linf:2.9175e-02 L1_spectral:2.9369e-03 L2_spectral:2.9345e-03 L3_spectral:2.9131e-03 L4_spectral:2.9160e-03 L5_spectral:2.9071e-03 L6_spectral:2.9132e-03 L7_spectral:2.9042e-03 L8_spectral:2.8831e-03 L9_spectral:2.9240e-03 L10_spectral:2.9190e-03 L11_spectral:2.9133e-03 L12_spectral:2.9049e-03 train_time:272751ms step_avg:42.62ms +[2025-09-11 13:43:17] [Rank 0] PRINT: step:6400/10000 val_loss:5.4625 total_sharp:8.2807e-02 L1_sharp:7.6683e-03 L2_sharp:5.3742e-03 L3_sharp:5.9559e-03 L4_sharp:9.3073e-03 L5_sharp:1.4858e-02 L6_sharp:2.1472e-02 L7_sharp:3.2488e-02 L8_sharp:6.4210e-02 L9_sharp:6.9504e-02 L10_sharp:8.8484e-02 L11_sharp:1.2042e-01 L12_sharp:3.4655e-01 total_fnorm:1.5000e+00 total_l1_linf:2.3040e+03 total_spectral:7.5000e-01 L1_fnorm:2.2168e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.1875e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1582e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.0215e-01 L1_l1linf:5.6641e-02 L2_l1linf:5.5908e-02 L3_l1linf:5.4199e-02 L4_l1linf:5.3467e-02 L5_l1linf:5.2002e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0049e-02 L8_l1linf:4.9316e-02 L9_l1linf:5.0781e-02 L10_l1linf:5.0537e-02 L11_l1linf:5.0537e-02 L12_l1linf:2.9175e-02 L1_spectral:2.9369e-03 L2_spectral:2.9345e-03 L3_spectral:2.9131e-03 L4_spectral:2.9160e-03 L5_spectral:2.9071e-03 L6_spectral:2.9132e-03 L7_spectral:2.9042e-03 L8_spectral:2.8831e-03 L9_spectral:2.9240e-03 L10_spectral:2.9190e-03 L11_spectral:2.9133e-03 L12_spectral:2.9049e-03 train_time:272751ms step_avg:42.62ms +[2025-09-11 13:43:18] [Rank 0] step:6401/10000 train_time:274633ms step_avg:42.90ms +[2025-09-11 13:43:18] [Rank 0] step:6401/10000 train_time:274633ms step_avg:42.90ms +[2025-09-11 13:43:19] [Rank 0] step:6421/10000 train_time:275335ms step_avg:42.88ms +[2025-09-11 13:43:19] [Rank 0] step:6421/10000 train_time:275335ms step_avg:42.88ms +[2025-09-11 13:43:20] [Rank 0] step:6441/10000 train_time:276025ms step_avg:42.85ms +[2025-09-11 13:43:20] [Rank 0] step:6441/10000 train_time:276025ms step_avg:42.85ms +[2025-09-11 13:43:21] [Rank 0] step:6461/10000 train_time:276716ms step_avg:42.83ms +[2025-09-11 13:43:21] [Rank 0] step:6461/10000 train_time:276716ms step_avg:42.83ms +[2025-09-11 13:43:21] [Rank 0] step:6481/10000 train_time:277411ms step_avg:42.80ms +[2025-09-11 13:43:21] [Rank 0] step:6481/10000 train_time:277411ms step_avg:42.80ms +[2025-09-11 13:43:22] [Rank 0] step:6501/10000 train_time:278104ms step_avg:42.78ms +[2025-09-11 13:43:22] [Rank 0] step:6501/10000 train_time:278104ms step_avg:42.78ms +[2025-09-11 13:43:23] [Rank 0] step:6521/10000 train_time:278795ms step_avg:42.75ms +[2025-09-11 13:43:23] [Rank 0] step:6521/10000 train_time:278795ms step_avg:42.75ms +[2025-09-11 13:43:23] [Rank 0] step:6541/10000 train_time:279485ms step_avg:42.73ms +[2025-09-11 13:43:23] [Rank 0] step:6541/10000 train_time:279485ms step_avg:42.73ms +[2025-09-11 13:43:24] [Rank 0] step:6561/10000 train_time:280174ms step_avg:42.70ms +[2025-09-11 13:43:24] [Rank 0] step:6561/10000 train_time:280174ms step_avg:42.70ms +[2025-09-11 13:43:25] [Rank 0] step:6581/10000 train_time:280866ms step_avg:42.68ms +[2025-09-11 13:43:25] [Rank 0] step:6581/10000 train_time:280866ms step_avg:42.68ms +[2025-09-11 13:43:25] [Rank 0] step:6601/10000 train_time:281557ms step_avg:42.65ms +[2025-09-11 13:43:25] [Rank 0] step:6601/10000 train_time:281557ms step_avg:42.65ms +[2025-09-11 13:43:26] [Rank 0] step:6621/10000 train_time:282247ms step_avg:42.63ms +[2025-09-11 13:43:26] [Rank 0] step:6621/10000 train_time:282247ms step_avg:42.63ms +[2025-09-11 13:43:27] [Rank 0] step:6641/10000 train_time:282938ms step_avg:42.60ms +[2025-09-11 13:43:27] [Rank 0] step:6641/10000 train_time:282938ms step_avg:42.60ms +[2025-09-11 13:43:27] [Rank 0] step:6661/10000 train_time:283630ms step_avg:42.58ms +[2025-09-11 13:43:27] [Rank 0] step:6661/10000 train_time:283630ms step_avg:42.58ms +[2025-09-11 13:43:28] [Rank 0] step:6681/10000 train_time:284327ms step_avg:42.56ms +[2025-09-11 13:43:28] [Rank 0] step:6681/10000 train_time:284327ms step_avg:42.56ms +[2025-09-11 13:43:29] [Rank 0] step:6701/10000 train_time:285025ms step_avg:42.53ms +[2025-09-11 13:43:29] [Rank 0] step:6701/10000 train_time:285025ms step_avg:42.53ms +[2025-09-11 13:43:30] [Rank 0] step:6721/10000 train_time:285725ms step_avg:42.51ms +[2025-09-11 13:43:30] [Rank 0] step:6721/10000 train_time:285725ms step_avg:42.51ms +[2025-09-11 13:43:30] [Rank 0] step:6741/10000 train_time:286424ms step_avg:42.49ms +[2025-09-11 13:43:30] [Rank 0] step:6741/10000 train_time:286424ms step_avg:42.49ms +[2025-09-11 13:43:31] [Rank 0] step:6761/10000 train_time:287121ms step_avg:42.47ms +[2025-09-11 13:43:31] [Rank 0] step:6761/10000 train_time:287121ms step_avg:42.47ms +[2025-09-11 13:43:32] [Rank 0] step:6781/10000 train_time:287819ms step_avg:42.44ms +[2025-09-11 13:43:32] [Rank 0] step:6781/10000 train_time:287819ms step_avg:42.44ms +[2025-09-11 13:43:32] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:43:32] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 13:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 13:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 13:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:43:43] [Rank 0] PRINT: step:6800/10000 val_loss:5.4412 total_sharp:6.7813e-02 L1_sharp:6.3881e-03 L2_sharp:5.2194e-03 L3_sharp:2.7808e-03 L4_sharp:7.5913e-03 L5_sharp:1.2219e-02 L6_sharp:1.7841e-02 L7_sharp:2.9085e-02 L8_sharp:5.3876e-02 L9_sharp:6.2435e-02 L10_sharp:8.0732e-02 L11_sharp:1.1197e-01 L12_sharp:2.4923e-01 total_fnorm:1.3594e+00 total_l1_linf:1.9040e+03 total_spectral:6.6016e-01 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8750e-01 L8_fnorm:1.8457e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.9141e-01 L12_fnorm:1.7676e-01 L1_l1linf:4.8340e-02 L2_l1linf:4.6143e-02 L3_l1linf:4.4922e-02 L4_l1linf:4.3945e-02 L5_l1linf:4.3701e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.1260e-02 L11_l1linf:4.1504e-02 L12_l1linf:2.4780e-02 L1_spectral:2.6100e-03 L2_spectral:2.5863e-03 L3_spectral:2.6209e-03 L4_spectral:2.5922e-03 L5_spectral:2.6079e-03 L6_spectral:2.5911e-03 L7_spectral:2.5819e-03 L8_spectral:2.5734e-03 L9_spectral:2.6131e-03 L10_spectral:2.5988e-03 L11_spectral:2.5964e-03 L12_spectral:2.5809e-03 train_time:288496ms step_avg:42.43ms +[2025-09-11 13:43:43] [Rank 0] PRINT: step:6800/10000 val_loss:5.4412 total_sharp:6.7813e-02 L1_sharp:6.3881e-03 L2_sharp:5.2194e-03 L3_sharp:2.7808e-03 L4_sharp:7.5913e-03 L5_sharp:1.2219e-02 L6_sharp:1.7841e-02 L7_sharp:2.9085e-02 L8_sharp:5.3876e-02 L9_sharp:6.2435e-02 L10_sharp:8.0732e-02 L11_sharp:1.1197e-01 L12_sharp:2.4923e-01 total_fnorm:1.3594e+00 total_l1_linf:1.9040e+03 total_spectral:6.6016e-01 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8750e-01 L8_fnorm:1.8457e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.9141e-01 L12_fnorm:1.7676e-01 L1_l1linf:4.8340e-02 L2_l1linf:4.6143e-02 L3_l1linf:4.4922e-02 L4_l1linf:4.3945e-02 L5_l1linf:4.3701e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.1260e-02 L11_l1linf:4.1504e-02 L12_l1linf:2.4780e-02 L1_spectral:2.6100e-03 L2_spectral:2.5863e-03 L3_spectral:2.6209e-03 L4_spectral:2.5922e-03 L5_spectral:2.6079e-03 L6_spectral:2.5911e-03 L7_spectral:2.5819e-03 L8_spectral:2.5734e-03 L9_spectral:2.6131e-03 L10_spectral:2.5988e-03 L11_spectral:2.5964e-03 L12_spectral:2.5809e-03 train_time:288496ms step_avg:42.43ms +[2025-09-11 13:43:45] [Rank 0] step:6801/10000 train_time:290372ms step_avg:42.70ms +[2025-09-11 13:43:45] [Rank 0] step:6801/10000 train_time:290372ms step_avg:42.70ms +[2025-09-11 13:43:46] [Rank 0] step:6821/10000 train_time:291076ms step_avg:42.67ms +[2025-09-11 13:43:46] [Rank 0] step:6821/10000 train_time:291076ms step_avg:42.67ms +[2025-09-11 13:43:47] [Rank 0] step:6841/10000 train_time:291778ms step_avg:42.65ms +[2025-09-11 13:43:47] [Rank 0] step:6841/10000 train_time:291778ms step_avg:42.65ms +[2025-09-11 13:43:47] [Rank 0] step:6861/10000 train_time:292477ms step_avg:42.63ms +[2025-09-11 13:43:47] [Rank 0] step:6861/10000 train_time:292477ms step_avg:42.63ms +[2025-09-11 13:43:48] [Rank 0] step:6881/10000 train_time:293178ms step_avg:42.61ms +[2025-09-11 13:43:48] [Rank 0] step:6881/10000 train_time:293178ms step_avg:42.61ms +[2025-09-11 13:43:49] [Rank 0] step:6901/10000 train_time:293877ms step_avg:42.58ms +[2025-09-11 13:43:49] [Rank 0] step:6901/10000 train_time:293877ms step_avg:42.58ms +[2025-09-11 13:43:49] [Rank 0] step:6921/10000 train_time:294575ms step_avg:42.56ms +[2025-09-11 13:43:49] [Rank 0] step:6921/10000 train_time:294575ms step_avg:42.56ms +[2025-09-11 13:43:51] [Rank 0] step:6941/10000 train_time:295433ms step_avg:42.56ms +[2025-09-11 13:43:51] [Rank 0] step:6941/10000 train_time:295433ms step_avg:42.56ms +[2025-09-11 13:43:51] [Rank 0] step:6961/10000 train_time:296278ms step_avg:42.56ms +[2025-09-11 13:43:51] [Rank 0] step:6961/10000 train_time:296278ms step_avg:42.56ms +[2025-09-11 13:43:52] [Rank 0] step:6981/10000 train_time:296979ms step_avg:42.54ms +[2025-09-11 13:43:52] [Rank 0] step:6981/10000 train_time:296979ms step_avg:42.54ms +[2025-09-11 13:43:53] [Rank 0] step:7001/10000 train_time:297678ms step_avg:42.52ms +[2025-09-11 13:43:53] [Rank 0] step:7001/10000 train_time:297678ms step_avg:42.52ms +[2025-09-11 13:43:54] [Rank 0] step:7021/10000 train_time:298670ms step_avg:42.54ms +[2025-09-11 13:43:54] [Rank 0] step:7021/10000 train_time:298670ms step_avg:42.54ms +[2025-09-11 13:43:54] [Rank 0] step:7041/10000 train_time:299367ms step_avg:42.52ms +[2025-09-11 13:43:54] [Rank 0] step:7041/10000 train_time:299367ms step_avg:42.52ms +[2025-09-11 13:43:55] [Rank 0] step:7061/10000 train_time:300067ms step_avg:42.50ms +[2025-09-11 13:43:55] [Rank 0] step:7061/10000 train_time:300067ms step_avg:42.50ms +[2025-09-11 13:43:56] [Rank 0] step:7081/10000 train_time:300765ms step_avg:42.47ms +[2025-09-11 13:43:56] [Rank 0] step:7081/10000 train_time:300765ms step_avg:42.47ms +[2025-09-11 13:43:56] [Rank 0] step:7101/10000 train_time:301464ms step_avg:42.45ms +[2025-09-11 13:43:56] [Rank 0] step:7101/10000 train_time:301464ms step_avg:42.45ms +[2025-09-11 13:43:57] [Rank 0] step:7121/10000 train_time:302164ms step_avg:42.43ms +[2025-09-11 13:43:57] [Rank 0] step:7121/10000 train_time:302164ms step_avg:42.43ms +[2025-09-11 13:43:58] [Rank 0] step:7141/10000 train_time:302864ms step_avg:42.41ms +[2025-09-11 13:43:58] [Rank 0] step:7141/10000 train_time:302864ms step_avg:42.41ms +[2025-09-11 13:43:58] [Rank 0] step:7161/10000 train_time:303563ms step_avg:42.39ms +[2025-09-11 13:43:58] [Rank 0] step:7161/10000 train_time:303563ms step_avg:42.39ms +[2025-09-11 13:43:59] [Rank 0] step:7181/10000 train_time:304261ms step_avg:42.37ms +[2025-09-11 13:43:59] [Rank 0] step:7181/10000 train_time:304261ms step_avg:42.37ms +[2025-09-11 13:44:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:44:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 13:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 13:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 13:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 13:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 13:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 13:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 13:44:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:44:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 13:44:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:44:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:44:13] [Rank 0] PRINT: step:7200/10000 val_loss:5.4195 total_sharp:6.8425e-02 L1_sharp:3.1722e-03 L2_sharp:2.6486e-03 L3_sharp:4.3913e-03 L4_sharp:4.1347e-03 L5_sharp:8.9024e-03 L6_sharp:1.5108e-02 L7_sharp:3.2616e-02 L8_sharp:5.1083e-02 L9_sharp:5.6598e-02 L10_sharp:7.3179e-02 L11_sharp:1.0215e-01 L12_sharp:2.8307e-01 total_fnorm:1.0703e+00 total_l1_linf:1.5040e+03 total_spectral:5.3906e-01 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6504e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6406e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6211e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6504e-01 L12_fnorm:1.5039e-01 L1_l1linf:3.9062e-02 L2_l1linf:3.8574e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7109e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5645e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.3691e-02 L9_l1linf:3.3203e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.0630e-02 L1_spectral:2.3323e-03 L2_spectral:2.3298e-03 L3_spectral:2.3210e-03 L4_spectral:2.3303e-03 L5_spectral:2.3196e-03 L6_spectral:2.3325e-03 L7_spectral:2.3053e-03 L8_spectral:2.2686e-03 L9_spectral:2.2899e-03 L10_spectral:2.2759e-03 L11_spectral:2.2818e-03 L12_spectral:2.2808e-03 train_time:304940ms step_avg:42.35ms +[2025-09-11 13:44:13] [Rank 0] PRINT: step:7200/10000 val_loss:5.4195 total_sharp:6.8425e-02 L1_sharp:3.1722e-03 L2_sharp:2.6486e-03 L3_sharp:4.3913e-03 L4_sharp:4.1347e-03 L5_sharp:8.9024e-03 L6_sharp:1.5108e-02 L7_sharp:3.2616e-02 L8_sharp:5.1083e-02 L9_sharp:5.6598e-02 L10_sharp:7.3179e-02 L11_sharp:1.0215e-01 L12_sharp:2.8307e-01 total_fnorm:1.0703e+00 total_l1_linf:1.5040e+03 total_spectral:5.3906e-01 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6504e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6406e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6211e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6504e-01 L12_fnorm:1.5039e-01 L1_l1linf:3.9062e-02 L2_l1linf:3.8574e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7109e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5645e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.3691e-02 L9_l1linf:3.3203e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.3936e-02 L12_l1linf:2.0630e-02 L1_spectral:2.3323e-03 L2_spectral:2.3298e-03 L3_spectral:2.3210e-03 L4_spectral:2.3303e-03 L5_spectral:2.3196e-03 L6_spectral:2.3325e-03 L7_spectral:2.3053e-03 L8_spectral:2.2686e-03 L9_spectral:2.2899e-03 L10_spectral:2.2759e-03 L11_spectral:2.2818e-03 L12_spectral:2.2808e-03 train_time:304940ms step_avg:42.35ms +[2025-09-11 13:44:15] [Rank 0] step:7201/10000 train_time:306887ms step_avg:42.62ms +[2025-09-11 13:44:15] [Rank 0] step:7201/10000 train_time:306887ms step_avg:42.62ms +[2025-09-11 13:44:16] [Rank 0] step:7221/10000 train_time:307608ms step_avg:42.60ms +[2025-09-11 13:44:16] [Rank 0] step:7221/10000 train_time:307608ms step_avg:42.60ms +[2025-09-11 13:44:17] [Rank 0] step:7241/10000 train_time:308308ms step_avg:42.58ms +[2025-09-11 13:44:17] [Rank 0] step:7241/10000 train_time:308308ms step_avg:42.58ms +[2025-09-11 13:44:18] [Rank 0] step:7261/10000 train_time:309010ms step_avg:42.56ms +[2025-09-11 13:44:18] [Rank 0] step:7261/10000 train_time:309010ms step_avg:42.56ms +[2025-09-11 13:44:18] [Rank 0] step:7281/10000 train_time:309715ms step_avg:42.54ms +[2025-09-11 13:44:18] [Rank 0] step:7281/10000 train_time:309715ms step_avg:42.54ms +[2025-09-11 13:44:19] [Rank 0] step:7301/10000 train_time:310415ms step_avg:42.52ms +[2025-09-11 13:44:19] [Rank 0] step:7301/10000 train_time:310415ms step_avg:42.52ms +[2025-09-11 13:44:20] [Rank 0] step:7321/10000 train_time:311115ms step_avg:42.50ms +[2025-09-11 13:44:20] [Rank 0] step:7321/10000 train_time:311115ms step_avg:42.50ms +[2025-09-11 13:44:20] [Rank 0] step:7341/10000 train_time:311816ms step_avg:42.48ms +[2025-09-11 13:44:20] [Rank 0] step:7341/10000 train_time:311816ms step_avg:42.48ms +[2025-09-11 13:44:21] [Rank 0] step:7361/10000 train_time:312517ms step_avg:42.46ms +[2025-09-11 13:44:21] [Rank 0] step:7361/10000 train_time:312517ms step_avg:42.46ms +[2025-09-11 13:44:22] [Rank 0] step:7381/10000 train_time:313219ms step_avg:42.44ms +[2025-09-11 13:44:22] [Rank 0] step:7381/10000 train_time:313219ms step_avg:42.44ms +[2025-09-11 13:44:22] [Rank 0] step:7401/10000 train_time:313917ms step_avg:42.42ms +[2025-09-11 13:44:22] [Rank 0] step:7401/10000 train_time:313917ms step_avg:42.42ms +[2025-09-11 13:44:23] [Rank 0] step:7421/10000 train_time:314617ms step_avg:42.40ms +[2025-09-11 13:44:23] [Rank 0] step:7421/10000 train_time:314617ms step_avg:42.40ms +[2025-09-11 13:44:24] [Rank 0] step:7441/10000 train_time:315317ms step_avg:42.38ms +[2025-09-11 13:44:24] [Rank 0] step:7441/10000 train_time:315317ms step_avg:42.38ms +[2025-09-11 13:44:25] [Rank 0] step:7461/10000 train_time:316018ms step_avg:42.36ms +[2025-09-11 13:44:25] [Rank 0] step:7461/10000 train_time:316018ms step_avg:42.36ms +[2025-09-11 13:44:25] [Rank 0] step:7481/10000 train_time:316720ms step_avg:42.34ms +[2025-09-11 13:44:25] [Rank 0] step:7481/10000 train_time:316720ms step_avg:42.34ms +[2025-09-11 13:44:26] [Rank 0] step:7501/10000 train_time:317424ms step_avg:42.32ms +[2025-09-11 13:44:26] [Rank 0] step:7501/10000 train_time:317424ms step_avg:42.32ms +[2025-09-11 13:44:27] [Rank 0] step:7521/10000 train_time:318126ms step_avg:42.30ms +[2025-09-11 13:44:27] [Rank 0] step:7521/10000 train_time:318126ms step_avg:42.30ms +[2025-09-11 13:44:27] [Rank 0] step:7541/10000 train_time:318825ms step_avg:42.28ms +[2025-09-11 13:44:27] [Rank 0] step:7541/10000 train_time:318825ms step_avg:42.28ms +[2025-09-11 13:44:28] [Rank 0] step:7561/10000 train_time:319527ms step_avg:42.26ms +[2025-09-11 13:44:28] [Rank 0] step:7561/10000 train_time:319527ms step_avg:42.26ms +[2025-09-11 13:44:29] [Rank 0] step:7581/10000 train_time:320229ms step_avg:42.24ms +[2025-09-11 13:44:29] [Rank 0] step:7581/10000 train_time:320229ms step_avg:42.24ms +[2025-09-11 13:44:29] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:44:29] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 13:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 13:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 13:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 13:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 13:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 13:44:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:44:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:44:41] [Rank 0] PRINT: step:7600/10000 val_loss:5.4045 total_sharp:7.8515e-02 L1_sharp:6.4966e-03 L2_sharp:5.4544e-03 L3_sharp:5.4480e-03 L4_sharp:7.7530e-03 L5_sharp:9.8973e-03 L6_sharp:1.7974e-02 L7_sharp:3.1759e-02 L8_sharp:5.7817e-02 L9_sharp:5.6468e-02 L10_sharp:7.6627e-02 L11_sharp:9.7836e-02 L12_sharp:2.1200e-01 total_fnorm:8.5547e-01 total_l1_linf:1.1440e+03 total_spectral:4.1992e-01 L1_fnorm:1.4062e-01 L2_fnorm:1.4062e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3867e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3770e-01 L7_fnorm:1.3574e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3770e-01 L12_fnorm:1.2598e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.0273e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6489e-02 L9_l1linf:2.6001e-02 L10_l1linf:2.6001e-02 L11_l1linf:2.6489e-02 L12_l1linf:1.7578e-02 L1_spectral:2.0499e-03 L2_spectral:2.0377e-03 L3_spectral:2.0180e-03 L4_spectral:2.0221e-03 L5_spectral:2.0124e-03 L6_spectral:2.0122e-03 L7_spectral:1.9882e-03 L8_spectral:1.9367e-03 L9_spectral:1.9756e-03 L10_spectral:1.9547e-03 L11_spectral:1.9597e-03 L12_spectral:1.9569e-03 train_time:320911ms step_avg:42.23ms +[2025-09-11 13:44:41] [Rank 0] PRINT: step:7600/10000 val_loss:5.4045 total_sharp:7.8515e-02 L1_sharp:6.4966e-03 L2_sharp:5.4544e-03 L3_sharp:5.4480e-03 L4_sharp:7.7530e-03 L5_sharp:9.8973e-03 L6_sharp:1.7974e-02 L7_sharp:3.1759e-02 L8_sharp:5.7817e-02 L9_sharp:5.6468e-02 L10_sharp:7.6627e-02 L11_sharp:9.7836e-02 L12_sharp:2.1200e-01 total_fnorm:8.5547e-01 total_l1_linf:1.1440e+03 total_spectral:4.1992e-01 L1_fnorm:1.4062e-01 L2_fnorm:1.4062e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3867e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3770e-01 L7_fnorm:1.3574e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3770e-01 L12_fnorm:1.2598e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.0273e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6489e-02 L9_l1linf:2.6001e-02 L10_l1linf:2.6001e-02 L11_l1linf:2.6489e-02 L12_l1linf:1.7578e-02 L1_spectral:2.0499e-03 L2_spectral:2.0377e-03 L3_spectral:2.0180e-03 L4_spectral:2.0221e-03 L5_spectral:2.0124e-03 L6_spectral:2.0122e-03 L7_spectral:1.9882e-03 L8_spectral:1.9367e-03 L9_spectral:1.9756e-03 L10_spectral:1.9547e-03 L11_spectral:1.9597e-03 L12_spectral:1.9569e-03 train_time:320911ms step_avg:42.23ms +[2025-09-11 13:44:42] [Rank 0] step:7601/10000 train_time:322716ms step_avg:42.46ms +[2025-09-11 13:44:42] [Rank 0] step:7601/10000 train_time:322716ms step_avg:42.46ms +[2025-09-11 13:44:43] [Rank 0] step:7621/10000 train_time:323433ms step_avg:42.44ms +[2025-09-11 13:44:43] [Rank 0] step:7621/10000 train_time:323433ms step_avg:42.44ms +[2025-09-11 13:44:44] [Rank 0] step:7641/10000 train_time:324137ms step_avg:42.42ms +[2025-09-11 13:44:44] [Rank 0] step:7641/10000 train_time:324137ms step_avg:42.42ms +[2025-09-11 13:44:44] [Rank 0] step:7661/10000 train_time:324847ms step_avg:42.40ms +[2025-09-11 13:44:44] [Rank 0] step:7661/10000 train_time:324847ms step_avg:42.40ms +[2025-09-11 13:44:45] [Rank 0] step:7681/10000 train_time:325549ms step_avg:42.38ms +[2025-09-11 13:44:45] [Rank 0] step:7681/10000 train_time:325549ms step_avg:42.38ms +[2025-09-11 13:44:46] [Rank 0] step:7701/10000 train_time:326251ms step_avg:42.36ms +[2025-09-11 13:44:46] [Rank 0] step:7701/10000 train_time:326251ms step_avg:42.36ms +[2025-09-11 13:44:47] [Rank 0] step:7721/10000 train_time:326953ms step_avg:42.35ms +[2025-09-11 13:44:47] [Rank 0] step:7721/10000 train_time:326953ms step_avg:42.35ms +[2025-09-11 13:44:47] [Rank 0] step:7741/10000 train_time:327656ms step_avg:42.33ms +[2025-09-11 13:44:47] [Rank 0] step:7741/10000 train_time:327656ms step_avg:42.33ms +[2025-09-11 13:44:48] [Rank 0] step:7761/10000 train_time:328356ms step_avg:42.31ms +[2025-09-11 13:44:48] [Rank 0] step:7761/10000 train_time:328356ms step_avg:42.31ms +[2025-09-11 13:44:49] [Rank 0] step:7781/10000 train_time:329059ms step_avg:42.29ms +[2025-09-11 13:44:49] [Rank 0] step:7781/10000 train_time:329059ms step_avg:42.29ms +[2025-09-11 13:44:49] [Rank 0] step:7801/10000 train_time:329760ms step_avg:42.27ms +[2025-09-11 13:44:49] [Rank 0] step:7801/10000 train_time:329760ms step_avg:42.27ms +[2025-09-11 13:44:50] [Rank 0] step:7821/10000 train_time:330462ms step_avg:42.25ms +[2025-09-11 13:44:50] [Rank 0] step:7821/10000 train_time:330462ms step_avg:42.25ms +[2025-09-11 13:44:51] [Rank 0] step:7841/10000 train_time:331165ms step_avg:42.23ms +[2025-09-11 13:44:51] [Rank 0] step:7841/10000 train_time:331165ms step_avg:42.23ms +[2025-09-11 13:44:51] [Rank 0] step:7861/10000 train_time:331869ms step_avg:42.22ms +[2025-09-11 13:44:51] [Rank 0] step:7861/10000 train_time:331869ms step_avg:42.22ms +[2025-09-11 13:44:52] [Rank 0] step:7881/10000 train_time:332572ms step_avg:42.20ms +[2025-09-11 13:44:52] [Rank 0] step:7881/10000 train_time:332572ms step_avg:42.20ms +[2025-09-11 13:44:53] [Rank 0] step:7901/10000 train_time:333275ms step_avg:42.18ms +[2025-09-11 13:44:53] [Rank 0] step:7901/10000 train_time:333275ms step_avg:42.18ms +[2025-09-11 13:44:54] [Rank 0] step:7921/10000 train_time:334250ms step_avg:42.20ms +[2025-09-11 13:44:54] [Rank 0] step:7921/10000 train_time:334250ms step_avg:42.20ms +[2025-09-11 13:44:55] [Rank 0] step:7941/10000 train_time:334954ms step_avg:42.18ms +[2025-09-11 13:44:55] [Rank 0] step:7941/10000 train_time:334954ms step_avg:42.18ms +[2025-09-11 13:44:55] [Rank 0] step:7961/10000 train_time:335654ms step_avg:42.16ms +[2025-09-11 13:44:55] [Rank 0] step:7961/10000 train_time:335654ms step_avg:42.16ms +[2025-09-11 13:44:56] [Rank 0] step:7981/10000 train_time:336614ms step_avg:42.18ms +[2025-09-11 13:44:56] [Rank 0] step:7981/10000 train_time:336614ms step_avg:42.18ms +[2025-09-11 13:44:57] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:44:57] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 13:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:45:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 13:45:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:45:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 13:45:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:45:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 13:45:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:45:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:45:08] [Rank 0] PRINT: step:8000/10000 val_loss:5.3927 total_sharp:6.6668e-02 L1_sharp:6.0210e-03 L2_sharp:3.7225e-03 L3_sharp:3.3285e-03 L4_sharp:6.6323e-03 L5_sharp:1.0873e-02 L6_sharp:1.4328e-02 L7_sharp:2.6539e-02 L8_sharp:4.1818e-02 L9_sharp:5.1158e-02 L10_sharp:6.2060e-02 L11_sharp:9.2713e-02 L12_sharp:2.1555e-01 total_fnorm:6.7578e-01 total_l1_linf:8.6400e+02 total_spectral:3.4180e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1279e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1035e-01 L8_fnorm:1.0840e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.0986e-01 L11_fnorm:1.1182e-01 L12_fnorm:1.0205e-01 L1_l1linf:2.4048e-02 L2_l1linf:2.3804e-02 L3_l1linf:2.2949e-02 L4_l1linf:2.2827e-02 L5_l1linf:2.1484e-02 L6_l1linf:2.0996e-02 L7_l1linf:2.1118e-02 L8_l1linf:2.0020e-02 L9_l1linf:2.0264e-02 L10_l1linf:1.9653e-02 L11_l1linf:1.9775e-02 L12_l1linf:1.3245e-02 L1_spectral:1.7197e-03 L2_spectral:1.7182e-03 L3_spectral:1.7172e-03 L4_spectral:1.7213e-03 L5_spectral:1.6930e-03 L6_spectral:1.6855e-03 L7_spectral:1.6565e-03 L8_spectral:1.6170e-03 L9_spectral:1.6600e-03 L10_spectral:1.6294e-03 L11_spectral:1.6222e-03 L12_spectral:1.6331e-03 train_time:337295ms step_avg:42.16ms +[2025-09-11 13:45:08] [Rank 0] PRINT: step:8000/10000 val_loss:5.3927 total_sharp:6.6668e-02 L1_sharp:6.0210e-03 L2_sharp:3.7225e-03 L3_sharp:3.3285e-03 L4_sharp:6.6323e-03 L5_sharp:1.0873e-02 L6_sharp:1.4328e-02 L7_sharp:2.6539e-02 L8_sharp:4.1818e-02 L9_sharp:5.1158e-02 L10_sharp:6.2060e-02 L11_sharp:9.2713e-02 L12_sharp:2.1555e-01 total_fnorm:6.7578e-01 total_l1_linf:8.6400e+02 total_spectral:3.4180e-01 L1_fnorm:1.1523e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1279e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1035e-01 L8_fnorm:1.0840e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.0986e-01 L11_fnorm:1.1182e-01 L12_fnorm:1.0205e-01 L1_l1linf:2.4048e-02 L2_l1linf:2.3804e-02 L3_l1linf:2.2949e-02 L4_l1linf:2.2827e-02 L5_l1linf:2.1484e-02 L6_l1linf:2.0996e-02 L7_l1linf:2.1118e-02 L8_l1linf:2.0020e-02 L9_l1linf:2.0264e-02 L10_l1linf:1.9653e-02 L11_l1linf:1.9775e-02 L12_l1linf:1.3245e-02 L1_spectral:1.7197e-03 L2_spectral:1.7182e-03 L3_spectral:1.7172e-03 L4_spectral:1.7213e-03 L5_spectral:1.6930e-03 L6_spectral:1.6855e-03 L7_spectral:1.6565e-03 L8_spectral:1.6170e-03 L9_spectral:1.6600e-03 L10_spectral:1.6294e-03 L11_spectral:1.6222e-03 L12_spectral:1.6331e-03 train_time:337295ms step_avg:42.16ms +[2025-09-11 13:45:10] [Rank 0] step:8001/10000 train_time:339271ms step_avg:42.40ms +[2025-09-11 13:45:10] [Rank 0] step:8001/10000 train_time:339271ms step_avg:42.40ms +[2025-09-11 13:45:11] [Rank 0] step:8021/10000 train_time:339991ms step_avg:42.39ms +[2025-09-11 13:45:11] [Rank 0] step:8021/10000 train_time:339991ms step_avg:42.39ms +[2025-09-11 13:45:11] [Rank 0] step:8041/10000 train_time:340695ms step_avg:42.37ms +[2025-09-11 13:45:11] [Rank 0] step:8041/10000 train_time:340695ms step_avg:42.37ms +[2025-09-11 13:45:12] [Rank 0] step:8061/10000 train_time:341400ms step_avg:42.35ms +[2025-09-11 13:45:12] [Rank 0] step:8061/10000 train_time:341400ms step_avg:42.35ms +[2025-09-11 13:45:13] [Rank 0] step:8081/10000 train_time:342101ms step_avg:42.33ms +[2025-09-11 13:45:13] [Rank 0] step:8081/10000 train_time:342101ms step_avg:42.33ms +[2025-09-11 13:45:13] [Rank 0] step:8101/10000 train_time:342803ms step_avg:42.32ms +[2025-09-11 13:45:13] [Rank 0] step:8101/10000 train_time:342803ms step_avg:42.32ms +[2025-09-11 13:45:14] [Rank 0] step:8121/10000 train_time:343509ms step_avg:42.30ms +[2025-09-11 13:45:14] [Rank 0] step:8121/10000 train_time:343509ms step_avg:42.30ms +[2025-09-11 13:45:16] [Rank 0] step:8141/10000 train_time:344942ms step_avg:42.37ms +[2025-09-11 13:45:16] [Rank 0] step:8141/10000 train_time:344942ms step_avg:42.37ms +[2025-09-11 13:45:16] [Rank 0] step:8161/10000 train_time:345647ms step_avg:42.35ms +[2025-09-11 13:45:16] [Rank 0] step:8161/10000 train_time:345647ms step_avg:42.35ms +[2025-09-11 13:45:17] [Rank 0] step:8181/10000 train_time:346361ms step_avg:42.34ms +[2025-09-11 13:45:17] [Rank 0] step:8181/10000 train_time:346361ms step_avg:42.34ms +[2025-09-11 13:45:18] [Rank 0] step:8201/10000 train_time:347073ms step_avg:42.32ms +[2025-09-11 13:45:18] [Rank 0] step:8201/10000 train_time:347073ms step_avg:42.32ms +[2025-09-11 13:45:18] [Rank 0] step:8221/10000 train_time:347782ms step_avg:42.30ms +[2025-09-11 13:45:18] [Rank 0] step:8221/10000 train_time:347782ms step_avg:42.30ms +[2025-09-11 13:45:19] [Rank 0] step:8241/10000 train_time:348498ms step_avg:42.29ms +[2025-09-11 13:45:19] [Rank 0] step:8241/10000 train_time:348498ms step_avg:42.29ms +[2025-09-11 13:45:20] [Rank 0] step:8261/10000 train_time:349206ms step_avg:42.27ms +[2025-09-11 13:45:20] [Rank 0] step:8261/10000 train_time:349206ms step_avg:42.27ms +[2025-09-11 13:45:21] [Rank 0] step:8281/10000 train_time:349911ms step_avg:42.25ms +[2025-09-11 13:45:21] [Rank 0] step:8281/10000 train_time:349911ms step_avg:42.25ms +[2025-09-11 13:45:21] [Rank 0] step:8301/10000 train_time:350621ms step_avg:42.24ms +[2025-09-11 13:45:21] [Rank 0] step:8301/10000 train_time:350621ms step_avg:42.24ms +[2025-09-11 13:45:22] [Rank 0] step:8321/10000 train_time:351329ms step_avg:42.22ms +[2025-09-11 13:45:22] [Rank 0] step:8321/10000 train_time:351329ms step_avg:42.22ms +[2025-09-11 13:45:23] [Rank 0] step:8341/10000 train_time:352044ms step_avg:42.21ms +[2025-09-11 13:45:23] [Rank 0] step:8341/10000 train_time:352044ms step_avg:42.21ms +[2025-09-11 13:45:23] [Rank 0] step:8361/10000 train_time:352748ms step_avg:42.19ms +[2025-09-11 13:45:23] [Rank 0] step:8361/10000 train_time:352748ms step_avg:42.19ms +[2025-09-11 13:45:24] [Rank 0] step:8381/10000 train_time:353459ms step_avg:42.17ms +[2025-09-11 13:45:24] [Rank 0] step:8381/10000 train_time:353459ms step_avg:42.17ms +[2025-09-11 13:45:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:45:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 13:45:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:45:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 13:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 13:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 13:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 13:45:36] [Rank 0] PRINT: step:8400/10000 val_loss:5.3806 total_sharp:6.1234e-02 L1_sharp:5.3415e-03 L2_sharp:3.9553e-03 L3_sharp:4.4900e-03 L4_sharp:6.4874e-03 L5_sharp:9.1775e-03 L6_sharp:1.3721e-02 L7_sharp:2.1587e-02 L8_sharp:3.9923e-02 L9_sharp:3.7126e-02 L10_sharp:5.3949e-02 L11_sharp:7.2694e-02 L12_sharp:2.7815e-01 total_fnorm:5.1172e-01 total_l1_linf:6.0000e+02 total_spectral:2.5977e-01 L1_fnorm:9.0332e-02 L2_fnorm:8.9844e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.7891e-02 L5_fnorm:8.7402e-02 L6_fnorm:8.7402e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.6426e-02 L12_fnorm:7.9102e-02 L1_l1linf:1.7822e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6968e-02 L4_l1linf:1.6235e-02 L5_l1linf:1.5503e-02 L6_l1linf:1.5259e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4893e-02 L9_l1linf:1.4099e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.4282e-02 L12_l1linf:9.7046e-03 L1_spectral:1.4040e-03 L2_spectral:1.3893e-03 L3_spectral:1.3808e-03 L4_spectral:1.3762e-03 L5_spectral:1.3679e-03 L6_spectral:1.3684e-03 L7_spectral:1.3362e-03 L8_spectral:1.2951e-03 L9_spectral:1.3165e-03 L10_spectral:1.3038e-03 L11_spectral:1.2905e-03 L12_spectral:1.3035e-03 train_time:354151ms step_avg:42.16ms +[2025-09-11 13:45:36] [Rank 0] PRINT: step:8400/10000 val_loss:5.3806 total_sharp:6.1234e-02 L1_sharp:5.3415e-03 L2_sharp:3.9553e-03 L3_sharp:4.4900e-03 L4_sharp:6.4874e-03 L5_sharp:9.1775e-03 L6_sharp:1.3721e-02 L7_sharp:2.1587e-02 L8_sharp:3.9923e-02 L9_sharp:3.7126e-02 L10_sharp:5.3949e-02 L11_sharp:7.2694e-02 L12_sharp:2.7815e-01 total_fnorm:5.1172e-01 total_l1_linf:6.0000e+02 total_spectral:2.5977e-01 L1_fnorm:9.0332e-02 L2_fnorm:8.9844e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.7891e-02 L5_fnorm:8.7402e-02 L6_fnorm:8.7402e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.6426e-02 L12_fnorm:7.9102e-02 L1_l1linf:1.7822e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6968e-02 L4_l1linf:1.6235e-02 L5_l1linf:1.5503e-02 L6_l1linf:1.5259e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4893e-02 L9_l1linf:1.4099e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.4282e-02 L12_l1linf:9.7046e-03 L1_spectral:1.4040e-03 L2_spectral:1.3893e-03 L3_spectral:1.3808e-03 L4_spectral:1.3762e-03 L5_spectral:1.3679e-03 L6_spectral:1.3684e-03 L7_spectral:1.3362e-03 L8_spectral:1.2951e-03 L9_spectral:1.3165e-03 L10_spectral:1.3038e-03 L11_spectral:1.2905e-03 L12_spectral:1.3035e-03 train_time:354151ms step_avg:42.16ms +[2025-09-11 13:45:38] [Rank 0] step:8401/10000 train_time:356055ms step_avg:42.38ms +[2025-09-11 13:45:38] [Rank 0] step:8401/10000 train_time:356055ms step_avg:42.38ms +[2025-09-11 13:45:38] [Rank 0] step:8421/10000 train_time:356773ms step_avg:42.37ms +[2025-09-11 13:45:38] [Rank 0] step:8421/10000 train_time:356773ms step_avg:42.37ms +[2025-09-11 13:45:39] [Rank 0] step:8441/10000 train_time:357489ms step_avg:42.35ms +[2025-09-11 13:45:39] [Rank 0] step:8441/10000 train_time:357489ms step_avg:42.35ms +[2025-09-11 13:45:40] [Rank 0] step:8461/10000 train_time:358201ms step_avg:42.34ms +[2025-09-11 13:45:40] [Rank 0] step:8461/10000 train_time:358201ms step_avg:42.34ms +[2025-09-11 13:45:41] [Rank 0] step:8481/10000 train_time:358912ms step_avg:42.32ms +[2025-09-11 13:45:41] [Rank 0] step:8481/10000 train_time:358912ms step_avg:42.32ms +[2025-09-11 13:45:41] [Rank 0] step:8501/10000 train_time:359621ms step_avg:42.30ms +[2025-09-11 13:45:41] [Rank 0] step:8501/10000 train_time:359621ms step_avg:42.30ms +[2025-09-11 13:45:42] [Rank 0] step:8521/10000 train_time:360331ms step_avg:42.29ms +[2025-09-11 13:45:42] [Rank 0] step:8521/10000 train_time:360331ms step_avg:42.29ms +[2025-09-11 13:45:43] [Rank 0] step:8541/10000 train_time:361041ms step_avg:42.27ms +[2025-09-11 13:45:43] [Rank 0] step:8541/10000 train_time:361041ms step_avg:42.27ms +[2025-09-11 13:45:43] [Rank 0] step:8561/10000 train_time:361754ms step_avg:42.26ms +[2025-09-11 13:45:43] [Rank 0] step:8561/10000 train_time:361754ms step_avg:42.26ms +[2025-09-11 13:45:44] [Rank 0] step:8581/10000 train_time:362468ms step_avg:42.24ms +[2025-09-11 13:45:44] [Rank 0] step:8581/10000 train_time:362468ms step_avg:42.24ms +[2025-09-11 13:45:45] [Rank 0] step:8601/10000 train_time:363178ms step_avg:42.23ms +[2025-09-11 13:45:45] [Rank 0] step:8601/10000 train_time:363178ms step_avg:42.23ms +[2025-09-11 13:45:46] [Rank 0] step:8621/10000 train_time:363886ms step_avg:42.21ms +[2025-09-11 13:45:46] [Rank 0] step:8621/10000 train_time:363886ms step_avg:42.21ms +[2025-09-11 13:45:46] [Rank 0] step:8641/10000 train_time:364598ms step_avg:42.19ms +[2025-09-11 13:45:46] [Rank 0] step:8641/10000 train_time:364598ms step_avg:42.19ms +[2025-09-11 13:45:47] [Rank 0] step:8661/10000 train_time:365307ms step_avg:42.18ms +[2025-09-11 13:45:47] [Rank 0] step:8661/10000 train_time:365307ms step_avg:42.18ms +[2025-09-11 13:45:48] [Rank 0] step:8681/10000 train_time:366018ms step_avg:42.16ms +[2025-09-11 13:45:48] [Rank 0] step:8681/10000 train_time:366018ms step_avg:42.16ms +[2025-09-11 13:45:48] [Rank 0] step:8701/10000 train_time:366726ms step_avg:42.15ms +[2025-09-11 13:45:48] [Rank 0] step:8701/10000 train_time:366726ms step_avg:42.15ms +[2025-09-11 13:45:49] [Rank 0] step:8721/10000 train_time:367436ms step_avg:42.13ms +[2025-09-11 13:45:49] [Rank 0] step:8721/10000 train_time:367436ms step_avg:42.13ms +[2025-09-11 13:45:50] [Rank 0] step:8741/10000 train_time:368143ms step_avg:42.12ms +[2025-09-11 13:45:50] [Rank 0] step:8741/10000 train_time:368143ms step_avg:42.12ms +[2025-09-11 13:45:50] [Rank 0] step:8761/10000 train_time:368854ms step_avg:42.10ms +[2025-09-11 13:45:50] [Rank 0] step:8761/10000 train_time:368854ms step_avg:42.10ms +[2025-09-11 13:45:51] [Rank 0] step:8781/10000 train_time:369559ms step_avg:42.09ms +[2025-09-11 13:45:51] [Rank 0] step:8781/10000 train_time:369559ms step_avg:42.09ms +[2025-09-11 13:45:52] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:45:52] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 13:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 13:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 13:46:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:03] [Rank 0] PRINT: step:8800/10000 val_loss:5.3738 total_sharp:5.2666e-02 L1_sharp:6.0543e-03 L2_sharp:2.8546e-03 L3_sharp:2.4196e-03 L4_sharp:5.5762e-03 L5_sharp:8.3274e-03 L6_sharp:1.1023e-02 L7_sharp:1.7849e-02 L8_sharp:3.7569e-02 L9_sharp:4.0105e-02 L10_sharp:5.0081e-02 L11_sharp:7.0984e-02 L12_sharp:1.9612e-01 total_fnorm:3.6719e-01 total_l1_linf:3.8800e+02 total_spectral:1.8652e-01 L1_fnorm:6.5430e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.4453e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1035e-02 L9_fnorm:6.2012e-02 L10_fnorm:6.1035e-02 L11_fnorm:6.2012e-02 L12_fnorm:5.6885e-02 L1_l1linf:1.1963e-02 L2_l1linf:1.1780e-02 L3_l1linf:1.1047e-02 L4_l1linf:1.0742e-02 L5_l1linf:1.0681e-02 L6_l1linf:9.8877e-03 L7_l1linf:9.6436e-03 L8_l1linf:9.8267e-03 L9_l1linf:9.2773e-03 L10_l1linf:9.0942e-03 L11_l1linf:9.4604e-03 L12_l1linf:6.3782e-03 L1_spectral:1.0515e-03 L2_spectral:1.0410e-03 L3_spectral:1.0416e-03 L4_spectral:1.0261e-03 L5_spectral:1.0182e-03 L6_spectral:1.0027e-03 L7_spectral:9.8518e-04 L8_spectral:9.4913e-04 L9_spectral:9.6428e-04 L10_spectral:9.6142e-04 L11_spectral:9.4517e-04 L12_spectral:9.5076e-04 train_time:370246ms step_avg:42.07ms +[2025-09-11 13:46:03] [Rank 0] PRINT: step:8800/10000 val_loss:5.3738 total_sharp:5.2666e-02 L1_sharp:6.0543e-03 L2_sharp:2.8546e-03 L3_sharp:2.4196e-03 L4_sharp:5.5762e-03 L5_sharp:8.3274e-03 L6_sharp:1.1023e-02 L7_sharp:1.7849e-02 L8_sharp:3.7569e-02 L9_sharp:4.0105e-02 L10_sharp:5.0081e-02 L11_sharp:7.0984e-02 L12_sharp:1.9612e-01 total_fnorm:3.6719e-01 total_l1_linf:3.8800e+02 total_spectral:1.8652e-01 L1_fnorm:6.5430e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.4453e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1035e-02 L9_fnorm:6.2012e-02 L10_fnorm:6.1035e-02 L11_fnorm:6.2012e-02 L12_fnorm:5.6885e-02 L1_l1linf:1.1963e-02 L2_l1linf:1.1780e-02 L3_l1linf:1.1047e-02 L4_l1linf:1.0742e-02 L5_l1linf:1.0681e-02 L6_l1linf:9.8877e-03 L7_l1linf:9.6436e-03 L8_l1linf:9.8267e-03 L9_l1linf:9.2773e-03 L10_l1linf:9.0942e-03 L11_l1linf:9.4604e-03 L12_l1linf:6.3782e-03 L1_spectral:1.0515e-03 L2_spectral:1.0410e-03 L3_spectral:1.0416e-03 L4_spectral:1.0261e-03 L5_spectral:1.0182e-03 L6_spectral:1.0027e-03 L7_spectral:9.8518e-04 L8_spectral:9.4913e-04 L9_spectral:9.6428e-04 L10_spectral:9.6142e-04 L11_spectral:9.4517e-04 L12_spectral:9.5076e-04 train_time:370246ms step_avg:42.07ms +[2025-09-11 13:46:05] [Rank 0] step:8801/10000 train_time:372061ms step_avg:42.27ms +[2025-09-11 13:46:05] [Rank 0] step:8801/10000 train_time:372061ms step_avg:42.27ms +[2025-09-11 13:46:05] [Rank 0] step:8821/10000 train_time:372800ms step_avg:42.26ms +[2025-09-11 13:46:05] [Rank 0] step:8821/10000 train_time:372800ms step_avg:42.26ms +[2025-09-11 13:46:06] [Rank 0] step:8841/10000 train_time:373511ms step_avg:42.25ms +[2025-09-11 13:46:06] [Rank 0] step:8841/10000 train_time:373511ms step_avg:42.25ms +[2025-09-11 13:46:07] [Rank 0] step:8861/10000 train_time:374220ms step_avg:42.23ms +[2025-09-11 13:46:07] [Rank 0] step:8861/10000 train_time:374220ms step_avg:42.23ms +[2025-09-11 13:46:07] [Rank 0] step:8881/10000 train_time:374932ms step_avg:42.22ms +[2025-09-11 13:46:07] [Rank 0] step:8881/10000 train_time:374932ms step_avg:42.22ms +[2025-09-11 13:46:08] [Rank 0] step:8901/10000 train_time:375643ms step_avg:42.20ms +[2025-09-11 13:46:08] [Rank 0] step:8901/10000 train_time:375643ms step_avg:42.20ms +[2025-09-11 13:46:09] [Rank 0] step:8921/10000 train_time:376349ms step_avg:42.19ms +[2025-09-11 13:46:09] [Rank 0] step:8921/10000 train_time:376349ms step_avg:42.19ms +[2025-09-11 13:46:10] [Rank 0] step:8941/10000 train_time:377063ms step_avg:42.17ms +[2025-09-11 13:46:10] [Rank 0] step:8941/10000 train_time:377063ms step_avg:42.17ms +[2025-09-11 13:46:10] [Rank 0] step:8961/10000 train_time:377780ms step_avg:42.16ms +[2025-09-11 13:46:10] [Rank 0] step:8961/10000 train_time:377780ms step_avg:42.16ms +[2025-09-11 13:46:11] [Rank 0] step:8981/10000 train_time:378493ms step_avg:42.14ms +[2025-09-11 13:46:11] [Rank 0] step:8981/10000 train_time:378493ms step_avg:42.14ms +[2025-09-11 13:46:12] [Rank 0] step:9001/10000 train_time:379198ms step_avg:42.13ms +[2025-09-11 13:46:12] [Rank 0] step:9001/10000 train_time:379198ms step_avg:42.13ms +[2025-09-11 13:46:12] [Rank 0] step:9021/10000 train_time:379907ms step_avg:42.11ms +[2025-09-11 13:46:12] [Rank 0] step:9021/10000 train_time:379907ms step_avg:42.11ms +[2025-09-11 13:46:13] [Rank 0] step:9041/10000 train_time:380619ms step_avg:42.10ms +[2025-09-11 13:46:13] [Rank 0] step:9041/10000 train_time:380619ms step_avg:42.10ms +[2025-09-11 13:46:14] [Rank 0] step:9061/10000 train_time:381327ms step_avg:42.08ms +[2025-09-11 13:46:14] [Rank 0] step:9061/10000 train_time:381327ms step_avg:42.08ms +[2025-09-11 13:46:15] [Rank 0] step:9081/10000 train_time:382040ms step_avg:42.07ms +[2025-09-11 13:46:15] [Rank 0] step:9081/10000 train_time:382040ms step_avg:42.07ms +[2025-09-11 13:46:15] [Rank 0] step:9101/10000 train_time:382752ms step_avg:42.06ms +[2025-09-11 13:46:15] [Rank 0] step:9101/10000 train_time:382752ms step_avg:42.06ms +[2025-09-11 13:46:16] [Rank 0] step:9121/10000 train_time:383465ms step_avg:42.04ms +[2025-09-11 13:46:16] [Rank 0] step:9121/10000 train_time:383465ms step_avg:42.04ms +[2025-09-11 13:46:17] [Rank 0] step:9141/10000 train_time:384173ms step_avg:42.03ms +[2025-09-11 13:46:17] [Rank 0] step:9141/10000 train_time:384173ms step_avg:42.03ms +[2025-09-11 13:46:17] [Rank 0] step:9161/10000 train_time:384888ms step_avg:42.01ms +[2025-09-11 13:46:17] [Rank 0] step:9161/10000 train_time:384888ms step_avg:42.01ms +[2025-09-11 13:46:18] [Rank 0] step:9181/10000 train_time:385600ms step_avg:42.00ms +[2025-09-11 13:46:18] [Rank 0] step:9181/10000 train_time:385600ms step_avg:42.00ms +[2025-09-11 13:46:19] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:46:19] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 13:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 13:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 13:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:29] [Rank 0] PRINT: step:9200/10000 val_loss:5.3666 total_sharp:4.9413e-02 L1_sharp:4.0894e-03 L2_sharp:2.5640e-03 L3_sharp:3.4429e-03 L4_sharp:4.2692e-03 L5_sharp:4.5958e-03 L6_sharp:9.6112e-03 L7_sharp:1.7531e-02 L8_sharp:2.6839e-02 L9_sharp:2.9672e-02 L10_sharp:4.2756e-02 L11_sharp:5.8683e-02 L12_sharp:2.4025e-01 total_fnorm:2.3340e-01 total_l1_linf:2.2100e+02 total_spectral:1.1768e-01 L1_fnorm:4.3945e-02 L2_fnorm:4.3213e-02 L3_fnorm:4.2969e-02 L4_fnorm:4.2480e-02 L5_fnorm:4.2236e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1504e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.7354e-02 L1_l1linf:6.8054e-03 L2_l1linf:7.0190e-03 L3_l1linf:6.4697e-03 L4_l1linf:6.4392e-03 L5_l1linf:6.0425e-03 L6_l1linf:5.8289e-03 L7_l1linf:5.8289e-03 L8_l1linf:5.7678e-03 L9_l1linf:5.4626e-03 L10_l1linf:5.5542e-03 L11_l1linf:5.4626e-03 L12_l1linf:4.2114e-03 L1_spectral:7.3307e-04 L2_spectral:7.2137e-04 L3_spectral:7.1553e-04 L4_spectral:7.0795e-04 L5_spectral:6.9263e-04 L6_spectral:6.8700e-04 L7_spectral:6.6789e-04 L8_spectral:6.5117e-04 L9_spectral:6.5130e-04 L10_spectral:6.4624e-04 L11_spectral:6.3820e-04 L12_spectral:6.3567e-04 train_time:386293ms step_avg:41.99ms +[2025-09-11 13:46:29] [Rank 0] PRINT: step:9200/10000 val_loss:5.3666 total_sharp:4.9413e-02 L1_sharp:4.0894e-03 L2_sharp:2.5640e-03 L3_sharp:3.4429e-03 L4_sharp:4.2692e-03 L5_sharp:4.5958e-03 L6_sharp:9.6112e-03 L7_sharp:1.7531e-02 L8_sharp:2.6839e-02 L9_sharp:2.9672e-02 L10_sharp:4.2756e-02 L11_sharp:5.8683e-02 L12_sharp:2.4025e-01 total_fnorm:2.3340e-01 total_l1_linf:2.2100e+02 total_spectral:1.1768e-01 L1_fnorm:4.3945e-02 L2_fnorm:4.3213e-02 L3_fnorm:4.2969e-02 L4_fnorm:4.2480e-02 L5_fnorm:4.2236e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1504e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.7354e-02 L1_l1linf:6.8054e-03 L2_l1linf:7.0190e-03 L3_l1linf:6.4697e-03 L4_l1linf:6.4392e-03 L5_l1linf:6.0425e-03 L6_l1linf:5.8289e-03 L7_l1linf:5.8289e-03 L8_l1linf:5.7678e-03 L9_l1linf:5.4626e-03 L10_l1linf:5.5542e-03 L11_l1linf:5.4626e-03 L12_l1linf:4.2114e-03 L1_spectral:7.3307e-04 L2_spectral:7.2137e-04 L3_spectral:7.1553e-04 L4_spectral:7.0795e-04 L5_spectral:6.9263e-04 L6_spectral:6.8700e-04 L7_spectral:6.6789e-04 L8_spectral:6.5117e-04 L9_spectral:6.5130e-04 L10_spectral:6.4624e-04 L11_spectral:6.3820e-04 L12_spectral:6.3567e-04 train_time:386293ms step_avg:41.99ms +[2025-09-11 13:46:31] [Rank 0] step:9201/10000 train_time:388132ms step_avg:42.18ms +[2025-09-11 13:46:31] [Rank 0] step:9201/10000 train_time:388132ms step_avg:42.18ms +[2025-09-11 13:46:32] [Rank 0] step:9221/10000 train_time:388859ms step_avg:42.17ms +[2025-09-11 13:46:32] [Rank 0] step:9221/10000 train_time:388859ms step_avg:42.17ms +[2025-09-11 13:46:33] [Rank 0] step:9241/10000 train_time:389569ms step_avg:42.16ms +[2025-09-11 13:46:33] [Rank 0] step:9241/10000 train_time:389569ms step_avg:42.16ms +[2025-09-11 13:46:33] [Rank 0] step:9261/10000 train_time:390280ms step_avg:42.14ms +[2025-09-11 13:46:33] [Rank 0] step:9261/10000 train_time:390280ms step_avg:42.14ms +[2025-09-11 13:46:34] [Rank 0] step:9281/10000 train_time:390992ms step_avg:42.13ms +[2025-09-11 13:46:34] [Rank 0] step:9281/10000 train_time:390992ms step_avg:42.13ms +[2025-09-11 13:46:35] [Rank 0] step:9301/10000 train_time:391700ms step_avg:42.11ms +[2025-09-11 13:46:35] [Rank 0] step:9301/10000 train_time:391700ms step_avg:42.11ms +[2025-09-11 13:46:36] [Rank 0] step:9321/10000 train_time:392412ms step_avg:42.10ms +[2025-09-11 13:46:36] [Rank 0] step:9321/10000 train_time:392412ms step_avg:42.10ms +[2025-09-11 13:46:36] [Rank 0] step:9341/10000 train_time:393119ms step_avg:42.09ms +[2025-09-11 13:46:36] [Rank 0] step:9341/10000 train_time:393119ms step_avg:42.09ms +[2025-09-11 13:46:37] [Rank 0] step:9361/10000 train_time:393825ms step_avg:42.07ms +[2025-09-11 13:46:37] [Rank 0] step:9361/10000 train_time:393825ms step_avg:42.07ms +[2025-09-11 13:46:38] [Rank 0] step:9381/10000 train_time:394535ms step_avg:42.06ms +[2025-09-11 13:46:38] [Rank 0] step:9381/10000 train_time:394535ms step_avg:42.06ms +[2025-09-11 13:46:38] [Rank 0] step:9401/10000 train_time:395247ms step_avg:42.04ms +[2025-09-11 13:46:38] [Rank 0] step:9401/10000 train_time:395247ms step_avg:42.04ms +[2025-09-11 13:46:39] [Rank 0] step:9421/10000 train_time:395959ms step_avg:42.03ms +[2025-09-11 13:46:39] [Rank 0] step:9421/10000 train_time:395959ms step_avg:42.03ms +[2025-09-11 13:46:40] [Rank 0] step:9441/10000 train_time:396672ms step_avg:42.02ms +[2025-09-11 13:46:40] [Rank 0] step:9441/10000 train_time:396672ms step_avg:42.02ms +[2025-09-11 13:46:41] [Rank 0] step:9461/10000 train_time:397381ms step_avg:42.00ms +[2025-09-11 13:46:41] [Rank 0] step:9461/10000 train_time:397381ms step_avg:42.00ms +[2025-09-11 13:46:41] [Rank 0] step:9481/10000 train_time:398093ms step_avg:41.99ms +[2025-09-11 13:46:41] [Rank 0] step:9481/10000 train_time:398093ms step_avg:41.99ms +[2025-09-11 13:46:42] [Rank 0] step:9501/10000 train_time:398805ms step_avg:41.98ms +[2025-09-11 13:46:42] [Rank 0] step:9501/10000 train_time:398805ms step_avg:41.98ms +[2025-09-11 13:46:43] [Rank 0] step:9521/10000 train_time:399519ms step_avg:41.96ms +[2025-09-11 13:46:43] [Rank 0] step:9521/10000 train_time:399519ms step_avg:41.96ms +[2025-09-11 13:46:43] [Rank 0] step:9541/10000 train_time:400228ms step_avg:41.95ms +[2025-09-11 13:46:43] [Rank 0] step:9541/10000 train_time:400228ms step_avg:41.95ms +[2025-09-11 13:46:44] [Rank 0] step:9561/10000 train_time:400939ms step_avg:41.93ms +[2025-09-11 13:46:44] [Rank 0] step:9561/10000 train_time:400939ms step_avg:41.93ms +[2025-09-11 13:46:45] [Rank 0] step:9581/10000 train_time:401651ms step_avg:41.92ms +[2025-09-11 13:46:45] [Rank 0] step:9581/10000 train_time:401651ms step_avg:41.92ms +[2025-09-11 13:46:46] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:46:46] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 13:46:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:46:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:46:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 13:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 13:46:56] [Rank 0] PRINT: step:9600/10000 val_loss:5.3614 total_sharp:2.7153e-02 L1_sharp:3.2035e-03 L2_sharp:1.8356e-03 L3_sharp:3.0652e-03 L4_sharp:3.8551e-03 L5_sharp:5.3821e-03 L6_sharp:7.4272e-03 L7_sharp:1.1535e-02 L8_sharp:2.1632e-02 L9_sharp:2.2771e-02 L10_sharp:2.9834e-02 L11_sharp:4.0724e-02 L12_sharp:1.3988e-01 total_fnorm:1.3965e-01 total_l1_linf:1.0350e+02 total_spectral:6.8848e-02 L1_fnorm:2.4658e-02 L2_fnorm:2.4292e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3682e-02 L5_fnorm:2.3682e-02 L6_fnorm:2.3560e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2583e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.2827e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.0874e-02 L1_l1linf:3.2501e-03 L2_l1linf:3.2349e-03 L3_l1linf:3.1738e-03 L4_l1linf:3.0670e-03 L5_l1linf:2.8534e-03 L6_l1linf:2.8076e-03 L7_l1linf:2.7771e-03 L8_l1linf:2.8076e-03 L9_l1linf:2.6093e-03 L10_l1linf:2.5787e-03 L11_l1linf:2.6245e-03 L12_l1linf:1.9379e-03 L1_spectral:4.2982e-04 L2_spectral:4.2104e-04 L3_spectral:4.1634e-04 L4_spectral:4.0494e-04 L5_spectral:3.9992e-04 L6_spectral:3.9727e-04 L7_spectral:3.8250e-04 L8_spectral:3.7548e-04 L9_spectral:3.7203e-04 L10_spectral:3.6913e-04 L11_spectral:3.6519e-04 L12_spectral:3.6515e-04 train_time:402339ms step_avg:41.91ms +[2025-09-11 13:46:56] [Rank 0] PRINT: step:9600/10000 val_loss:5.3614 total_sharp:2.7153e-02 L1_sharp:3.2035e-03 L2_sharp:1.8356e-03 L3_sharp:3.0652e-03 L4_sharp:3.8551e-03 L5_sharp:5.3821e-03 L6_sharp:7.4272e-03 L7_sharp:1.1535e-02 L8_sharp:2.1632e-02 L9_sharp:2.2771e-02 L10_sharp:2.9834e-02 L11_sharp:4.0724e-02 L12_sharp:1.3988e-01 total_fnorm:1.3965e-01 total_l1_linf:1.0350e+02 total_spectral:6.8848e-02 L1_fnorm:2.4658e-02 L2_fnorm:2.4292e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3682e-02 L5_fnorm:2.3682e-02 L6_fnorm:2.3560e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2583e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.2827e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.0874e-02 L1_l1linf:3.2501e-03 L2_l1linf:3.2349e-03 L3_l1linf:3.1738e-03 L4_l1linf:3.0670e-03 L5_l1linf:2.8534e-03 L6_l1linf:2.8076e-03 L7_l1linf:2.7771e-03 L8_l1linf:2.8076e-03 L9_l1linf:2.6093e-03 L10_l1linf:2.5787e-03 L11_l1linf:2.6245e-03 L12_l1linf:1.9379e-03 L1_spectral:4.2982e-04 L2_spectral:4.2104e-04 L3_spectral:4.1634e-04 L4_spectral:4.0494e-04 L5_spectral:3.9992e-04 L6_spectral:3.9727e-04 L7_spectral:3.8250e-04 L8_spectral:3.7548e-04 L9_spectral:3.7203e-04 L10_spectral:3.6913e-04 L11_spectral:3.6519e-04 L12_spectral:3.6515e-04 train_time:402339ms step_avg:41.91ms +[2025-09-11 13:46:58] [Rank 0] step:9601/10000 train_time:404190ms step_avg:42.10ms +[2025-09-11 13:46:58] [Rank 0] step:9601/10000 train_time:404190ms step_avg:42.10ms +[2025-09-11 13:46:59] [Rank 0] step:9621/10000 train_time:404909ms step_avg:42.09ms +[2025-09-11 13:46:59] [Rank 0] step:9621/10000 train_time:404909ms step_avg:42.09ms +[2025-09-11 13:47:00] [Rank 0] step:9641/10000 train_time:405767ms step_avg:42.09ms +[2025-09-11 13:47:00] [Rank 0] step:9641/10000 train_time:405767ms step_avg:42.09ms +[2025-09-11 13:47:01] [Rank 0] step:9661/10000 train_time:406608ms step_avg:42.09ms +[2025-09-11 13:47:01] [Rank 0] step:9661/10000 train_time:406608ms step_avg:42.09ms +[2025-09-11 13:47:01] [Rank 0] step:9681/10000 train_time:407323ms step_avg:42.07ms +[2025-09-11 13:47:01] [Rank 0] step:9681/10000 train_time:407323ms step_avg:42.07ms +[2025-09-11 13:47:02] [Rank 0] step:9701/10000 train_time:408038ms step_avg:42.06ms +[2025-09-11 13:47:02] [Rank 0] step:9701/10000 train_time:408038ms step_avg:42.06ms +[2025-09-11 13:47:03] [Rank 0] step:9721/10000 train_time:408986ms step_avg:42.07ms +[2025-09-11 13:47:03] [Rank 0] step:9721/10000 train_time:408986ms step_avg:42.07ms +[2025-09-11 13:47:04] [Rank 0] step:9741/10000 train_time:409704ms step_avg:42.06ms +[2025-09-11 13:47:04] [Rank 0] step:9741/10000 train_time:409704ms step_avg:42.06ms +[2025-09-11 13:47:05] [Rank 0] step:9761/10000 train_time:410420ms step_avg:42.05ms +[2025-09-11 13:47:05] [Rank 0] step:9761/10000 train_time:410420ms step_avg:42.05ms +[2025-09-11 13:47:05] [Rank 0] step:9781/10000 train_time:411138ms step_avg:42.03ms +[2025-09-11 13:47:05] [Rank 0] step:9781/10000 train_time:411138ms step_avg:42.03ms +[2025-09-11 13:47:06] [Rank 0] step:9801/10000 train_time:411859ms step_avg:42.02ms +[2025-09-11 13:47:06] [Rank 0] step:9801/10000 train_time:411859ms step_avg:42.02ms +[2025-09-11 13:47:07] [Rank 0] step:9821/10000 train_time:412577ms step_avg:42.01ms +[2025-09-11 13:47:07] [Rank 0] step:9821/10000 train_time:412577ms step_avg:42.01ms +[2025-09-11 13:47:07] [Rank 0] step:9841/10000 train_time:413297ms step_avg:42.00ms +[2025-09-11 13:47:07] [Rank 0] step:9841/10000 train_time:413297ms step_avg:42.00ms +[2025-09-11 13:47:08] [Rank 0] step:9861/10000 train_time:414013ms step_avg:41.98ms +[2025-09-11 13:47:08] [Rank 0] step:9861/10000 train_time:414013ms step_avg:41.98ms +[2025-09-11 13:47:09] [Rank 0] step:9881/10000 train_time:414729ms step_avg:41.97ms +[2025-09-11 13:47:09] [Rank 0] step:9881/10000 train_time:414729ms step_avg:41.97ms +[2025-09-11 13:47:10] [Rank 0] step:9901/10000 train_time:415442ms step_avg:41.96ms +[2025-09-11 13:47:10] [Rank 0] step:9901/10000 train_time:415442ms step_avg:41.96ms +[2025-09-11 13:47:10] [Rank 0] step:9921/10000 train_time:416158ms step_avg:41.95ms +[2025-09-11 13:47:10] [Rank 0] step:9921/10000 train_time:416158ms step_avg:41.95ms +[2025-09-11 13:47:11] [Rank 0] step:9941/10000 train_time:416878ms step_avg:41.94ms +[2025-09-11 13:47:11] [Rank 0] step:9941/10000 train_time:416878ms step_avg:41.94ms +[2025-09-11 13:47:12] [Rank 0] step:9961/10000 train_time:417599ms step_avg:41.92ms +[2025-09-11 13:47:12] [Rank 0] step:9961/10000 train_time:417599ms step_avg:41.92ms +[2025-09-11 13:47:12] [Rank 0] step:9981/10000 train_time:418317ms step_avg:41.91ms +[2025-09-11 13:47:12] [Rank 0] step:9981/10000 train_time:418317ms step_avg:41.91ms +[2025-09-11 13:47:13] [Rank 0] step:10000/10000 train_time:419006ms step_avg:41.90ms +[2025-09-11 13:47:13] [Rank 0] step:10000/10000 train_time:419006ms step_avg:41.90ms +[2025-09-11 13:47:13] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:47:13] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 13:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 13:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 13:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 13:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 13:47:24] [Rank 0] PRINT: step:10000/10000 val_loss:5.3596 total_sharp:1.8373e-02 L1_sharp:1.5356e-03 L2_sharp:1.8661e-03 L3_sharp:1.6472e-03 L4_sharp:3.1199e-03 L5_sharp:2.7470e-03 L6_sharp:4.6014e-03 L7_sharp:8.0732e-03 L8_sharp:1.6091e-02 L9_sharp:1.4084e-02 L10_sharp:2.0678e-02 L11_sharp:2.7472e-02 L12_sharp:1.3280e-01 total_fnorm:5.3223e-02 total_l1_linf:2.9500e+01 total_spectral:2.6489e-02 L1_fnorm:9.6436e-03 L2_fnorm:9.5215e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.3384e-03 L5_fnorm:9.2163e-03 L6_fnorm:9.2163e-03 L7_fnorm:9.0942e-03 L8_fnorm:8.9111e-03 L9_fnorm:9.0942e-03 L10_fnorm:8.9722e-03 L11_fnorm:9.0332e-03 L12_fnorm:8.1177e-03 L1_l1linf:1.0147e-03 L2_l1linf:9.6893e-04 L3_l1linf:9.3079e-04 L4_l1linf:8.9264e-04 L5_l1linf:8.7738e-04 L6_l1linf:8.5831e-04 L7_l1linf:8.6212e-04 L8_l1linf:8.8120e-04 L9_l1linf:8.6212e-04 L10_l1linf:8.7738e-04 L11_l1linf:8.1635e-04 L12_l1linf:6.3705e-04 L1_spectral:1.7110e-04 L2_spectral:1.6911e-04 L3_spectral:1.6601e-04 L4_spectral:1.6363e-04 L5_spectral:1.6158e-04 L6_spectral:1.6061e-04 L7_spectral:1.5319e-04 L8_spectral:1.5421e-04 L9_spectral:1.4943e-04 L10_spectral:1.4688e-04 L11_spectral:1.4448e-04 L12_spectral:1.4753e-04 train_time:419027ms step_avg:41.90ms +[2025-09-11 13:47:24] [Rank 0] PRINT: step:10000/10000 val_loss:5.3596 total_sharp:1.8373e-02 L1_sharp:1.5356e-03 L2_sharp:1.8661e-03 L3_sharp:1.6472e-03 L4_sharp:3.1199e-03 L5_sharp:2.7470e-03 L6_sharp:4.6014e-03 L7_sharp:8.0732e-03 L8_sharp:1.6091e-02 L9_sharp:1.4084e-02 L10_sharp:2.0678e-02 L11_sharp:2.7472e-02 L12_sharp:1.3280e-01 total_fnorm:5.3223e-02 total_l1_linf:2.9500e+01 total_spectral:2.6489e-02 L1_fnorm:9.6436e-03 L2_fnorm:9.5215e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.3384e-03 L5_fnorm:9.2163e-03 L6_fnorm:9.2163e-03 L7_fnorm:9.0942e-03 L8_fnorm:8.9111e-03 L9_fnorm:9.0942e-03 L10_fnorm:8.9722e-03 L11_fnorm:9.0332e-03 L12_fnorm:8.1177e-03 L1_l1linf:1.0147e-03 L2_l1linf:9.6893e-04 L3_l1linf:9.3079e-04 L4_l1linf:8.9264e-04 L5_l1linf:8.7738e-04 L6_l1linf:8.5831e-04 L7_l1linf:8.6212e-04 L8_l1linf:8.8120e-04 L9_l1linf:8.6212e-04 L10_l1linf:8.7738e-04 L11_l1linf:8.1635e-04 L12_l1linf:6.3705e-04 L1_spectral:1.7110e-04 L2_spectral:1.6911e-04 L3_spectral:1.6601e-04 L4_spectral:1.6363e-04 L5_spectral:1.6158e-04 L6_spectral:1.6061e-04 L7_spectral:1.5319e-04 L8_spectral:1.5421e-04 L9_spectral:1.4943e-04 L10_spectral:1.4688e-04 L11_spectral:1.4448e-04 L12_spectral:1.4753e-04 train_time:419027ms step_avg:41.90ms +[2025-09-11 13:47:24] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:47:24 2025 --- +[2025-09-11 13:47:24] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 13:47:24 2025 --- +[2025-09-11 13:47:24] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 13:47:24] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e000053926c8b344baad31ad35c91f1d0c9c9577 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "92c7c167-73e8-4ba3-86fc-bff3b5d77c9d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/training_log_92c7c167-73e8-4ba3-86fc-bff3b5d77c9d.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/training_log_92c7c167-73e8-4ba3-86fc-bff3b5d77c9d.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d9272aa2c5f0adb3129d8a32c9e6ef04b2186a7 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44/training_log_92c7c167-73e8-4ba3-86fc-bff3b5d77c9d.txt @@ -0,0 +1,4264 @@ +[2025-09-11 07:24:44] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:24:44 2025 --- +[2025-09-11 07:24:44] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:24:44 2025 --- +[2025-09-11 07:24:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:24:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:24:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:24:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:24:44] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:24:44] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:24:44] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44 +[2025-09-11 07:24:44] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_44 +[2025-09-11 07:24:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:24:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:24:44] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:24:44] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:24:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:24:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:24:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:24:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:24:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:24:45] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:24:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:24:45] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:24:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:24:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:24:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:24:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:24:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:24:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:24:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:24:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:24:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:24:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:24:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:24:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:25:31] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:25:31] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:25:31] [Rank 0] PRINT: Starting training... +[2025-09-11 07:25:31] [Rank 0] PRINT: Starting training... +[2025-09-11 07:25:32] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 07:25:32] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 07:25:33] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.44ms +[2025-09-11 07:25:33] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.44ms +[2025-09-11 07:25:34] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.47ms +[2025-09-11 07:25:34] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.47ms +[2025-09-11 07:25:34] [Rank 0] step:81/10000 train_time:3320ms step_avg:40.99ms +[2025-09-11 07:25:34] [Rank 0] step:81/10000 train_time:3320ms step_avg:40.99ms +[2025-09-11 07:25:35] [Rank 0] step:101/10000 train_time:4048ms step_avg:40.08ms +[2025-09-11 07:25:35] [Rank 0] step:101/10000 train_time:4048ms step_avg:40.08ms +[2025-09-11 07:25:36] [Rank 0] step:121/10000 train_time:4777ms step_avg:39.48ms +[2025-09-11 07:25:36] [Rank 0] step:121/10000 train_time:4777ms step_avg:39.48ms +[2025-09-11 07:25:37] [Rank 0] step:141/10000 train_time:5505ms step_avg:39.04ms +[2025-09-11 07:25:37] [Rank 0] step:141/10000 train_time:5505ms step_avg:39.04ms +[2025-09-11 07:25:37] [Rank 0] step:161/10000 train_time:6234ms step_avg:38.72ms +[2025-09-11 07:25:37] [Rank 0] step:161/10000 train_time:6234ms step_avg:38.72ms +[2025-09-11 07:25:38] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 07:25:38] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 07:25:39] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.27ms +[2025-09-11 07:25:39] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.27ms +[2025-09-11 07:25:40] [Rank 0] step:221/10000 train_time:8420ms step_avg:38.10ms +[2025-09-11 07:25:40] [Rank 0] step:221/10000 train_time:8420ms step_avg:38.10ms +[2025-09-11 07:25:40] [Rank 0] step:241/10000 train_time:9148ms step_avg:37.96ms +[2025-09-11 07:25:40] [Rank 0] step:241/10000 train_time:9148ms step_avg:37.96ms +[2025-09-11 07:25:41] [Rank 0] step:261/10000 train_time:9876ms step_avg:37.84ms +[2025-09-11 07:25:41] [Rank 0] step:261/10000 train_time:9876ms step_avg:37.84ms +[2025-09-11 07:25:42] [Rank 0] step:281/10000 train_time:10605ms step_avg:37.74ms +[2025-09-11 07:25:42] [Rank 0] step:281/10000 train_time:10605ms step_avg:37.74ms +[2025-09-11 07:25:42] [Rank 0] step:301/10000 train_time:11333ms step_avg:37.65ms +[2025-09-11 07:25:42] [Rank 0] step:301/10000 train_time:11333ms step_avg:37.65ms +[2025-09-11 07:25:43] [Rank 0] step:321/10000 train_time:12061ms step_avg:37.57ms +[2025-09-11 07:25:43] [Rank 0] step:321/10000 train_time:12061ms step_avg:37.57ms +[2025-09-11 07:25:44] [Rank 0] step:341/10000 train_time:12789ms step_avg:37.50ms +[2025-09-11 07:25:44] [Rank 0] step:341/10000 train_time:12789ms step_avg:37.50ms +[2025-09-11 07:25:45] [Rank 0] step:361/10000 train_time:13517ms step_avg:37.44ms +[2025-09-11 07:25:45] [Rank 0] step:361/10000 train_time:13517ms step_avg:37.44ms +[2025-09-11 07:25:45] [Rank 0] step:381/10000 train_time:14245ms step_avg:37.39ms +[2025-09-11 07:25:45] [Rank 0] step:381/10000 train_time:14245ms step_avg:37.39ms +[2025-09-11 07:25:46] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:25:46] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:26:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:26:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:26:34] [Rank 0] PRINT: step:400/10000 val_loss:7.1313 total_sharp:4.1964e-02 L1_sharp:2.2625e-02 L2_sharp:1.8589e-02 L3_sharp:2.0824e-02 L4_sharp:1.6547e-02 L5_sharp:3.8513e-02 L6_sharp:2.5691e-02 L7_sharp:2.7664e-02 L8_sharp:2.4290e-02 L9_sharp:2.3725e-02 L10_sharp:1.4521e-02 L11_sharp:1.2389e-02 L12_sharp:1.7661e-02 total_fnorm:6.1976e+00 total_l1_linf:2.3707e+04 total_spectral:3.0990e+00 L1_fnorm:1.2222e+00 L2_fnorm:1.2221e+00 L3_fnorm:1.2196e+00 L4_fnorm:1.1833e+00 L5_fnorm:1.0383e+00 L6_fnorm:9.2309e-01 L7_fnorm:8.0639e-01 L8_fnorm:6.9196e-01 L9_fnorm:6.3123e-01 L10_fnorm:6.1873e-01 L11_fnorm:5.8037e-01 L12_fnorm:5.2326e-01 L1_l1linf:3.9739e-01 L2_l1linf:4.1799e-01 L3_l1linf:4.1850e-01 L4_l1linf:3.8951e-01 L5_l1linf:3.3543e-01 L6_l1linf:2.9470e-01 L7_l1linf:2.6270e-01 L8_l1linf:2.5154e-01 L9_l1linf:2.4102e-01 L10_l1linf:2.4102e-01 L11_l1linf:2.4541e-01 L12_l1linf:2.3571e-01 L1_spectral:1.2038e-02 L2_spectral:1.2043e-02 L3_spectral:1.2046e-02 L4_spectral:1.2049e-02 L5_spectral:1.2039e-02 L6_spectral:1.2032e-02 L7_spectral:1.2023e-02 L8_spectral:1.2023e-02 L9_spectral:1.2021e-02 L10_spectral:1.2018e-02 L11_spectral:1.2016e-02 L12_spectral:1.2020e-02 train_time:14952ms step_avg:37.38ms +[2025-09-11 07:26:34] [Rank 0] PRINT: step:400/10000 val_loss:7.1313 total_sharp:4.1964e-02 L1_sharp:2.2625e-02 L2_sharp:1.8589e-02 L3_sharp:2.0824e-02 L4_sharp:1.6547e-02 L5_sharp:3.8513e-02 L6_sharp:2.5691e-02 L7_sharp:2.7664e-02 L8_sharp:2.4290e-02 L9_sharp:2.3725e-02 L10_sharp:1.4521e-02 L11_sharp:1.2389e-02 L12_sharp:1.7661e-02 total_fnorm:6.1976e+00 total_l1_linf:2.3707e+04 total_spectral:3.0990e+00 L1_fnorm:1.2222e+00 L2_fnorm:1.2221e+00 L3_fnorm:1.2196e+00 L4_fnorm:1.1833e+00 L5_fnorm:1.0383e+00 L6_fnorm:9.2309e-01 L7_fnorm:8.0639e-01 L8_fnorm:6.9196e-01 L9_fnorm:6.3123e-01 L10_fnorm:6.1873e-01 L11_fnorm:5.8037e-01 L12_fnorm:5.2326e-01 L1_l1linf:3.9739e-01 L2_l1linf:4.1799e-01 L3_l1linf:4.1850e-01 L4_l1linf:3.8951e-01 L5_l1linf:3.3543e-01 L6_l1linf:2.9470e-01 L7_l1linf:2.6270e-01 L8_l1linf:2.5154e-01 L9_l1linf:2.4102e-01 L10_l1linf:2.4102e-01 L11_l1linf:2.4541e-01 L12_l1linf:2.3571e-01 L1_spectral:1.2038e-02 L2_spectral:1.2043e-02 L3_spectral:1.2046e-02 L4_spectral:1.2049e-02 L5_spectral:1.2039e-02 L6_spectral:1.2032e-02 L7_spectral:1.2023e-02 L8_spectral:1.2023e-02 L9_spectral:1.2021e-02 L10_spectral:1.2018e-02 L11_spectral:1.2016e-02 L12_spectral:1.2020e-02 train_time:14952ms step_avg:37.38ms +[2025-09-11 07:27:04] [Rank 0] step:401/10000 train_time:45863ms step_avg:114.37ms +[2025-09-11 07:27:04] [Rank 0] step:401/10000 train_time:45863ms step_avg:114.37ms +[2025-09-11 07:27:06] [Rank 0] step:421/10000 train_time:47686ms step_avg:113.27ms +[2025-09-11 07:27:06] [Rank 0] step:421/10000 train_time:47686ms step_avg:113.27ms +[2025-09-11 07:27:07] [Rank 0] step:441/10000 train_time:48326ms step_avg:109.58ms +[2025-09-11 07:27:07] [Rank 0] step:441/10000 train_time:48326ms step_avg:109.58ms +[2025-09-11 07:27:08] [Rank 0] step:461/10000 train_time:48964ms step_avg:106.21ms +[2025-09-11 07:27:08] [Rank 0] step:461/10000 train_time:48964ms step_avg:106.21ms +[2025-09-11 07:27:08] [Rank 0] step:481/10000 train_time:49604ms step_avg:103.13ms +[2025-09-11 07:27:08] [Rank 0] step:481/10000 train_time:49604ms step_avg:103.13ms +[2025-09-11 07:27:09] [Rank 0] step:501/10000 train_time:50242ms step_avg:100.28ms +[2025-09-11 07:27:09] [Rank 0] step:501/10000 train_time:50242ms step_avg:100.28ms +[2025-09-11 07:27:09] [Rank 0] step:521/10000 train_time:50880ms step_avg:97.66ms +[2025-09-11 07:27:09] [Rank 0] step:521/10000 train_time:50880ms step_avg:97.66ms +[2025-09-11 07:27:10] [Rank 0] step:541/10000 train_time:51518ms step_avg:95.23ms +[2025-09-11 07:27:10] [Rank 0] step:541/10000 train_time:51518ms step_avg:95.23ms +[2025-09-11 07:27:11] [Rank 0] step:561/10000 train_time:52156ms step_avg:92.97ms +[2025-09-11 07:27:11] [Rank 0] step:561/10000 train_time:52156ms step_avg:92.97ms +[2025-09-11 07:27:11] [Rank 0] step:581/10000 train_time:52794ms step_avg:90.87ms +[2025-09-11 07:27:11] [Rank 0] step:581/10000 train_time:52794ms step_avg:90.87ms +[2025-09-11 07:27:12] [Rank 0] step:601/10000 train_time:53432ms step_avg:88.91ms +[2025-09-11 07:27:12] [Rank 0] step:601/10000 train_time:53432ms step_avg:88.91ms +[2025-09-11 07:27:13] [Rank 0] step:621/10000 train_time:54070ms step_avg:87.07ms +[2025-09-11 07:27:13] [Rank 0] step:621/10000 train_time:54070ms step_avg:87.07ms +[2025-09-11 07:27:13] [Rank 0] step:641/10000 train_time:54707ms step_avg:85.35ms +[2025-09-11 07:27:13] [Rank 0] step:641/10000 train_time:54707ms step_avg:85.35ms +[2025-09-11 07:27:14] [Rank 0] step:661/10000 train_time:55344ms step_avg:83.73ms +[2025-09-11 07:27:14] [Rank 0] step:661/10000 train_time:55344ms step_avg:83.73ms +[2025-09-11 07:27:15] [Rank 0] step:681/10000 train_time:55981ms step_avg:82.20ms +[2025-09-11 07:27:15] [Rank 0] step:681/10000 train_time:55981ms step_avg:82.20ms +[2025-09-11 07:27:15] [Rank 0] step:701/10000 train_time:56618ms step_avg:80.77ms +[2025-09-11 07:27:15] [Rank 0] step:701/10000 train_time:56618ms step_avg:80.77ms +[2025-09-11 07:27:16] [Rank 0] step:721/10000 train_time:57255ms step_avg:79.41ms +[2025-09-11 07:27:16] [Rank 0] step:721/10000 train_time:57255ms step_avg:79.41ms +[2025-09-11 07:27:16] [Rank 0] step:741/10000 train_time:57892ms step_avg:78.13ms +[2025-09-11 07:27:16] [Rank 0] step:741/10000 train_time:57892ms step_avg:78.13ms +[2025-09-11 07:27:17] [Rank 0] step:761/10000 train_time:58534ms step_avg:76.92ms +[2025-09-11 07:27:17] [Rank 0] step:761/10000 train_time:58534ms step_avg:76.92ms +[2025-09-11 07:27:18] [Rank 0] step:781/10000 train_time:59176ms step_avg:75.77ms +[2025-09-11 07:27:18] [Rank 0] step:781/10000 train_time:59176ms step_avg:75.77ms +[2025-09-11 07:27:18] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:27:18] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:28:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:28:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:02] [Rank 0] PRINT: step:800/10000 val_loss:6.6142 total_sharp:7.6439e-02 L1_sharp:2.9431e-02 L2_sharp:1.2638e-02 L3_sharp:1.4093e-02 L4_sharp:1.5369e-02 L5_sharp:2.3191e-02 L6_sharp:1.7523e-02 L7_sharp:1.2209e-02 L8_sharp:1.3236e-02 L9_sharp:1.2575e-02 L10_sharp:1.4350e-02 L11_sharp:1.2882e-02 L12_sharp:1.9901e-02 total_fnorm:4.5625e+00 total_l1_linf:1.2992e+04 total_spectral:2.2812e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2188e+00 L5_fnorm:1.1484e+00 L6_fnorm:1.1016e+00 L7_fnorm:1.0312e+00 L8_fnorm:8.9062e-01 L9_fnorm:8.3594e-01 L10_fnorm:7.6172e-01 L11_fnorm:7.2266e-01 L12_fnorm:6.2109e-01 L1_l1linf:3.9258e-01 L2_l1linf:4.0234e-01 L3_l1linf:4.0234e-01 L4_l1linf:3.8086e-01 L5_l1linf:3.4766e-01 L6_l1linf:3.1641e-01 L7_l1linf:2.8906e-01 L8_l1linf:2.6562e-01 L9_l1linf:2.3438e-01 L10_l1linf:2.2363e-01 L11_l1linf:2.1973e-01 L12_l1linf:2.0898e-01 L1_spectral:1.3495e-02 L2_spectral:1.3470e-02 L3_spectral:1.3464e-02 L4_spectral:1.3555e-02 L5_spectral:1.3578e-02 L6_spectral:1.3567e-02 L7_spectral:1.3595e-02 L8_spectral:1.3510e-02 L9_spectral:1.3323e-02 L10_spectral:1.3137e-02 L11_spectral:1.2906e-02 L12_spectral:1.2692e-02 train_time:59801ms step_avg:74.75ms +[2025-09-11 07:28:02] [Rank 0] PRINT: step:800/10000 val_loss:6.6142 total_sharp:7.6439e-02 L1_sharp:2.9431e-02 L2_sharp:1.2638e-02 L3_sharp:1.4093e-02 L4_sharp:1.5369e-02 L5_sharp:2.3191e-02 L6_sharp:1.7523e-02 L7_sharp:1.2209e-02 L8_sharp:1.3236e-02 L9_sharp:1.2575e-02 L10_sharp:1.4350e-02 L11_sharp:1.2882e-02 L12_sharp:1.9901e-02 total_fnorm:4.5625e+00 total_l1_linf:1.2992e+04 total_spectral:2.2812e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2188e+00 L5_fnorm:1.1484e+00 L6_fnorm:1.1016e+00 L7_fnorm:1.0312e+00 L8_fnorm:8.9062e-01 L9_fnorm:8.3594e-01 L10_fnorm:7.6172e-01 L11_fnorm:7.2266e-01 L12_fnorm:6.2109e-01 L1_l1linf:3.9258e-01 L2_l1linf:4.0234e-01 L3_l1linf:4.0234e-01 L4_l1linf:3.8086e-01 L5_l1linf:3.4766e-01 L6_l1linf:3.1641e-01 L7_l1linf:2.8906e-01 L8_l1linf:2.6562e-01 L9_l1linf:2.3438e-01 L10_l1linf:2.2363e-01 L11_l1linf:2.1973e-01 L12_l1linf:2.0898e-01 L1_spectral:1.3495e-02 L2_spectral:1.3470e-02 L3_spectral:1.3464e-02 L4_spectral:1.3555e-02 L5_spectral:1.3578e-02 L6_spectral:1.3567e-02 L7_spectral:1.3595e-02 L8_spectral:1.3510e-02 L9_spectral:1.3323e-02 L10_spectral:1.3137e-02 L11_spectral:1.2906e-02 L12_spectral:1.2692e-02 train_time:59801ms step_avg:74.75ms +[2025-09-11 07:28:03] [Rank 0] step:801/10000 train_time:60899ms step_avg:76.03ms +[2025-09-11 07:28:03] [Rank 0] step:801/10000 train_time:60899ms step_avg:76.03ms +[2025-09-11 07:28:05] [Rank 0] step:821/10000 train_time:62085ms step_avg:75.62ms +[2025-09-11 07:28:05] [Rank 0] step:821/10000 train_time:62085ms step_avg:75.62ms +[2025-09-11 07:28:05] [Rank 0] step:841/10000 train_time:62729ms step_avg:74.59ms +[2025-09-11 07:28:05] [Rank 0] step:841/10000 train_time:62729ms step_avg:74.59ms +[2025-09-11 07:28:06] [Rank 0] step:861/10000 train_time:63374ms step_avg:73.60ms +[2025-09-11 07:28:06] [Rank 0] step:861/10000 train_time:63374ms step_avg:73.60ms +[2025-09-11 07:28:07] [Rank 0] step:881/10000 train_time:64318ms step_avg:73.01ms +[2025-09-11 07:28:07] [Rank 0] step:881/10000 train_time:64318ms step_avg:73.01ms +[2025-09-11 07:28:07] [Rank 0] step:901/10000 train_time:64961ms step_avg:72.10ms +[2025-09-11 07:28:07] [Rank 0] step:901/10000 train_time:64961ms step_avg:72.10ms +[2025-09-11 07:28:08] [Rank 0] step:921/10000 train_time:65604ms step_avg:71.23ms +[2025-09-11 07:28:08] [Rank 0] step:921/10000 train_time:65604ms step_avg:71.23ms +[2025-09-11 07:28:09] [Rank 0] step:941/10000 train_time:66248ms step_avg:70.40ms +[2025-09-11 07:28:09] [Rank 0] step:941/10000 train_time:66248ms step_avg:70.40ms +[2025-09-11 07:28:09] [Rank 0] step:961/10000 train_time:66892ms step_avg:69.61ms +[2025-09-11 07:28:09] [Rank 0] step:961/10000 train_time:66892ms step_avg:69.61ms +[2025-09-11 07:28:10] [Rank 0] step:981/10000 train_time:67535ms step_avg:68.84ms +[2025-09-11 07:28:10] [Rank 0] step:981/10000 train_time:67535ms step_avg:68.84ms +[2025-09-11 07:28:11] [Rank 0] step:1001/10000 train_time:68177ms step_avg:68.11ms +[2025-09-11 07:28:11] [Rank 0] step:1001/10000 train_time:68177ms step_avg:68.11ms +[2025-09-11 07:28:11] [Rank 0] step:1021/10000 train_time:68820ms step_avg:67.40ms +[2025-09-11 07:28:11] [Rank 0] step:1021/10000 train_time:68820ms step_avg:67.40ms +[2025-09-11 07:28:12] [Rank 0] step:1041/10000 train_time:69464ms step_avg:66.73ms +[2025-09-11 07:28:12] [Rank 0] step:1041/10000 train_time:69464ms step_avg:66.73ms +[2025-09-11 07:28:13] [Rank 0] step:1061/10000 train_time:70107ms step_avg:66.08ms +[2025-09-11 07:28:13] [Rank 0] step:1061/10000 train_time:70107ms step_avg:66.08ms +[2025-09-11 07:28:13] [Rank 0] step:1081/10000 train_time:70750ms step_avg:65.45ms +[2025-09-11 07:28:13] [Rank 0] step:1081/10000 train_time:70750ms step_avg:65.45ms +[2025-09-11 07:28:14] [Rank 0] step:1101/10000 train_time:71393ms step_avg:64.84ms +[2025-09-11 07:28:14] [Rank 0] step:1101/10000 train_time:71393ms step_avg:64.84ms +[2025-09-11 07:28:15] [Rank 0] step:1121/10000 train_time:72037ms step_avg:64.26ms +[2025-09-11 07:28:15] [Rank 0] step:1121/10000 train_time:72037ms step_avg:64.26ms +[2025-09-11 07:28:15] [Rank 0] step:1141/10000 train_time:72679ms step_avg:63.70ms +[2025-09-11 07:28:15] [Rank 0] step:1141/10000 train_time:72679ms step_avg:63.70ms +[2025-09-11 07:28:16] [Rank 0] step:1161/10000 train_time:73322ms step_avg:63.15ms +[2025-09-11 07:28:16] [Rank 0] step:1161/10000 train_time:73322ms step_avg:63.15ms +[2025-09-11 07:28:16] [Rank 0] step:1181/10000 train_time:73965ms step_avg:62.63ms +[2025-09-11 07:28:16] [Rank 0] step:1181/10000 train_time:73965ms step_avg:62.63ms +[2025-09-11 07:28:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:28:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:27] [Rank 0] PRINT: step:1200/10000 val_loss:6.3321 total_sharp:5.0018e-02 L1_sharp:2.3266e-02 L2_sharp:6.9832e-03 L3_sharp:8.2531e-03 L4_sharp:1.0555e-02 L5_sharp:1.6273e-02 L6_sharp:9.0108e-03 L7_sharp:5.9937e-03 L8_sharp:7.7784e-03 L9_sharp:5.8703e-03 L10_sharp:7.5989e-03 L11_sharp:7.7897e-03 L12_sharp:1.6196e-02 total_fnorm:4.5938e+00 total_l1_linf:1.3440e+04 total_spectral:2.2969e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2188e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1016e+00 L10_fnorm:9.6875e-01 L11_fnorm:9.8047e-01 L12_fnorm:8.8672e-01 L1_l1linf:3.7891e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.7891e-01 L4_l1linf:3.7891e-01 L5_l1linf:3.7500e-01 L6_l1linf:3.6719e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.3398e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.7930e-01 L11_l1linf:2.6172e-01 L12_l1linf:2.2363e-01 L1_spectral:1.3974e-02 L2_spectral:1.3947e-02 L3_spectral:1.3882e-02 L4_spectral:1.3831e-02 L5_spectral:1.3881e-02 L6_spectral:1.3940e-02 L7_spectral:1.4030e-02 L8_spectral:1.4152e-02 L9_spectral:1.4107e-02 L10_spectral:1.4187e-02 L11_spectral:1.3960e-02 L12_spectral:1.3710e-02 train_time:74590ms step_avg:62.16ms +[2025-09-11 07:28:27] [Rank 0] PRINT: step:1200/10000 val_loss:6.3321 total_sharp:5.0018e-02 L1_sharp:2.3266e-02 L2_sharp:6.9832e-03 L3_sharp:8.2531e-03 L4_sharp:1.0555e-02 L5_sharp:1.6273e-02 L6_sharp:9.0108e-03 L7_sharp:5.9937e-03 L8_sharp:7.7784e-03 L9_sharp:5.8703e-03 L10_sharp:7.5989e-03 L11_sharp:7.7897e-03 L12_sharp:1.6196e-02 total_fnorm:4.5938e+00 total_l1_linf:1.3440e+04 total_spectral:2.2969e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2188e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1016e+00 L10_fnorm:9.6875e-01 L11_fnorm:9.8047e-01 L12_fnorm:8.8672e-01 L1_l1linf:3.7891e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.7891e-01 L4_l1linf:3.7891e-01 L5_l1linf:3.7500e-01 L6_l1linf:3.6719e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.3398e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.7930e-01 L11_l1linf:2.6172e-01 L12_l1linf:2.2363e-01 L1_spectral:1.3974e-02 L2_spectral:1.3947e-02 L3_spectral:1.3882e-02 L4_spectral:1.3831e-02 L5_spectral:1.3881e-02 L6_spectral:1.3940e-02 L7_spectral:1.4030e-02 L8_spectral:1.4152e-02 L9_spectral:1.4107e-02 L10_spectral:1.4187e-02 L11_spectral:1.3960e-02 L12_spectral:1.3710e-02 train_time:74590ms step_avg:62.16ms +[2025-09-11 07:28:28] [Rank 0] step:1201/10000 train_time:75660ms step_avg:63.00ms +[2025-09-11 07:28:28] [Rank 0] step:1201/10000 train_time:75660ms step_avg:63.00ms +[2025-09-11 07:28:29] [Rank 0] step:1221/10000 train_time:76293ms step_avg:62.48ms +[2025-09-11 07:28:29] [Rank 0] step:1221/10000 train_time:76293ms step_avg:62.48ms +[2025-09-11 07:28:29] [Rank 0] step:1241/10000 train_time:76938ms step_avg:62.00ms +[2025-09-11 07:28:29] [Rank 0] step:1241/10000 train_time:76938ms step_avg:62.00ms +[2025-09-11 07:28:30] [Rank 0] step:1261/10000 train_time:77582ms step_avg:61.52ms +[2025-09-11 07:28:30] [Rank 0] step:1261/10000 train_time:77582ms step_avg:61.52ms +[2025-09-11 07:28:31] [Rank 0] step:1281/10000 train_time:78225ms step_avg:61.07ms +[2025-09-11 07:28:31] [Rank 0] step:1281/10000 train_time:78225ms step_avg:61.07ms +[2025-09-11 07:28:31] [Rank 0] step:1301/10000 train_time:78869ms step_avg:60.62ms +[2025-09-11 07:28:31] [Rank 0] step:1301/10000 train_time:78869ms step_avg:60.62ms +[2025-09-11 07:28:32] [Rank 0] step:1321/10000 train_time:79512ms step_avg:60.19ms +[2025-09-11 07:28:32] [Rank 0] step:1321/10000 train_time:79512ms step_avg:60.19ms +[2025-09-11 07:28:33] [Rank 0] step:1341/10000 train_time:80154ms step_avg:59.77ms +[2025-09-11 07:28:33] [Rank 0] step:1341/10000 train_time:80154ms step_avg:59.77ms +[2025-09-11 07:28:33] [Rank 0] step:1361/10000 train_time:80798ms step_avg:59.37ms +[2025-09-11 07:28:33] [Rank 0] step:1361/10000 train_time:80798ms step_avg:59.37ms +[2025-09-11 07:28:34] [Rank 0] step:1381/10000 train_time:81440ms step_avg:58.97ms +[2025-09-11 07:28:34] [Rank 0] step:1381/10000 train_time:81440ms step_avg:58.97ms +[2025-09-11 07:28:35] [Rank 0] step:1401/10000 train_time:82083ms step_avg:58.59ms +[2025-09-11 07:28:35] [Rank 0] step:1401/10000 train_time:82083ms step_avg:58.59ms +[2025-09-11 07:28:35] [Rank 0] step:1421/10000 train_time:82726ms step_avg:58.22ms +[2025-09-11 07:28:35] [Rank 0] step:1421/10000 train_time:82726ms step_avg:58.22ms +[2025-09-11 07:28:36] [Rank 0] step:1441/10000 train_time:83368ms step_avg:57.85ms +[2025-09-11 07:28:36] [Rank 0] step:1441/10000 train_time:83368ms step_avg:57.85ms +[2025-09-11 07:28:37] [Rank 0] step:1461/10000 train_time:84011ms step_avg:57.50ms +[2025-09-11 07:28:37] [Rank 0] step:1461/10000 train_time:84011ms step_avg:57.50ms +[2025-09-11 07:28:37] [Rank 0] step:1481/10000 train_time:84654ms step_avg:57.16ms +[2025-09-11 07:28:37] [Rank 0] step:1481/10000 train_time:84654ms step_avg:57.16ms +[2025-09-11 07:28:38] [Rank 0] step:1501/10000 train_time:85301ms step_avg:56.83ms +[2025-09-11 07:28:38] [Rank 0] step:1501/10000 train_time:85301ms step_avg:56.83ms +[2025-09-11 07:28:39] [Rank 0] step:1521/10000 train_time:85948ms step_avg:56.51ms +[2025-09-11 07:28:39] [Rank 0] step:1521/10000 train_time:85948ms step_avg:56.51ms +[2025-09-11 07:28:39] [Rank 0] step:1541/10000 train_time:86594ms step_avg:56.19ms +[2025-09-11 07:28:39] [Rank 0] step:1541/10000 train_time:86594ms step_avg:56.19ms +[2025-09-11 07:28:40] [Rank 0] step:1561/10000 train_time:87241ms step_avg:55.89ms +[2025-09-11 07:28:40] [Rank 0] step:1561/10000 train_time:87241ms step_avg:55.89ms +[2025-09-11 07:28:40] [Rank 0] step:1581/10000 train_time:87888ms step_avg:55.59ms +[2025-09-11 07:28:40] [Rank 0] step:1581/10000 train_time:87888ms step_avg:55.59ms +[2025-09-11 07:28:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:28:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:28:51] [Rank 0] PRINT: step:1600/10000 val_loss:6.1376 total_sharp:3.3965e-02 L1_sharp:9.0032e-03 L2_sharp:2.3849e-03 L3_sharp:4.1018e-03 L4_sharp:5.5497e-03 L5_sharp:1.0033e-02 L6_sharp:5.8745e-03 L7_sharp:4.9606e-03 L8_sharp:7.3869e-03 L9_sharp:5.3062e-03 L10_sharp:5.2533e-03 L11_sharp:5.9746e-03 L12_sharp:1.6196e-02 total_fnorm:4.6250e+00 total_l1_linf:1.3376e+04 total_spectral:2.3125e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2656e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.1953e+00 L9_fnorm:1.1953e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.0703e+00 L12_fnorm:9.4141e-01 L1_l1linf:3.7305e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5547e-01 L4_l1linf:3.6523e-01 L5_l1linf:3.6328e-01 L6_l1linf:3.6133e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.2422e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.7344e-01 L12_l1linf:2.2363e-01 L1_spectral:1.4483e-02 L2_spectral:1.4370e-02 L3_spectral:1.4250e-02 L4_spectral:1.4131e-02 L5_spectral:1.4103e-02 L6_spectral:1.4091e-02 L7_spectral:1.4113e-02 L8_spectral:1.4355e-02 L9_spectral:1.4484e-02 L10_spectral:1.4525e-02 L11_spectral:1.4525e-02 L12_spectral:1.4086e-02 train_time:88520ms step_avg:55.33ms +[2025-09-11 07:28:51] [Rank 0] PRINT: step:1600/10000 val_loss:6.1376 total_sharp:3.3965e-02 L1_sharp:9.0032e-03 L2_sharp:2.3849e-03 L3_sharp:4.1018e-03 L4_sharp:5.5497e-03 L5_sharp:1.0033e-02 L6_sharp:5.8745e-03 L7_sharp:4.9606e-03 L8_sharp:7.3869e-03 L9_sharp:5.3062e-03 L10_sharp:5.2533e-03 L11_sharp:5.9746e-03 L12_sharp:1.6196e-02 total_fnorm:4.6250e+00 total_l1_linf:1.3376e+04 total_spectral:2.3125e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2656e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.1953e+00 L9_fnorm:1.1953e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.0703e+00 L12_fnorm:9.4141e-01 L1_l1linf:3.7305e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5547e-01 L4_l1linf:3.6523e-01 L5_l1linf:3.6328e-01 L6_l1linf:3.6133e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.2422e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.7344e-01 L12_l1linf:2.2363e-01 L1_spectral:1.4483e-02 L2_spectral:1.4370e-02 L3_spectral:1.4250e-02 L4_spectral:1.4131e-02 L5_spectral:1.4103e-02 L6_spectral:1.4091e-02 L7_spectral:1.4113e-02 L8_spectral:1.4355e-02 L9_spectral:1.4484e-02 L10_spectral:1.4525e-02 L11_spectral:1.4525e-02 L12_spectral:1.4086e-02 train_time:88520ms step_avg:55.33ms +[2025-09-11 07:28:52] [Rank 0] step:1601/10000 train_time:89598ms step_avg:55.96ms +[2025-09-11 07:28:52] [Rank 0] step:1601/10000 train_time:89598ms step_avg:55.96ms +[2025-09-11 07:28:53] [Rank 0] step:1621/10000 train_time:90245ms step_avg:55.67ms +[2025-09-11 07:28:53] [Rank 0] step:1621/10000 train_time:90245ms step_avg:55.67ms +[2025-09-11 07:28:53] [Rank 0] step:1641/10000 train_time:90896ms step_avg:55.39ms +[2025-09-11 07:28:53] [Rank 0] step:1641/10000 train_time:90896ms step_avg:55.39ms +[2025-09-11 07:28:54] [Rank 0] step:1661/10000 train_time:91546ms step_avg:55.12ms +[2025-09-11 07:28:54] [Rank 0] step:1661/10000 train_time:91546ms step_avg:55.12ms +[2025-09-11 07:28:55] [Rank 0] step:1681/10000 train_time:92195ms step_avg:54.85ms +[2025-09-11 07:28:55] [Rank 0] step:1681/10000 train_time:92195ms step_avg:54.85ms +[2025-09-11 07:28:55] [Rank 0] step:1701/10000 train_time:92847ms step_avg:54.58ms +[2025-09-11 07:28:55] [Rank 0] step:1701/10000 train_time:92847ms step_avg:54.58ms +[2025-09-11 07:28:56] [Rank 0] step:1721/10000 train_time:93499ms step_avg:54.33ms +[2025-09-11 07:28:56] [Rank 0] step:1721/10000 train_time:93499ms step_avg:54.33ms +[2025-09-11 07:28:57] [Rank 0] step:1741/10000 train_time:94148ms step_avg:54.08ms +[2025-09-11 07:28:57] [Rank 0] step:1741/10000 train_time:94148ms step_avg:54.08ms +[2025-09-11 07:28:57] [Rank 0] step:1761/10000 train_time:94797ms step_avg:53.83ms +[2025-09-11 07:28:57] [Rank 0] step:1761/10000 train_time:94797ms step_avg:53.83ms +[2025-09-11 07:28:58] [Rank 0] step:1781/10000 train_time:95446ms step_avg:53.59ms +[2025-09-11 07:28:58] [Rank 0] step:1781/10000 train_time:95446ms step_avg:53.59ms +[2025-09-11 07:28:59] [Rank 0] step:1801/10000 train_time:96095ms step_avg:53.36ms +[2025-09-11 07:28:59] [Rank 0] step:1801/10000 train_time:96095ms step_avg:53.36ms +[2025-09-11 07:28:59] [Rank 0] step:1821/10000 train_time:96744ms step_avg:53.13ms +[2025-09-11 07:28:59] [Rank 0] step:1821/10000 train_time:96744ms step_avg:53.13ms +[2025-09-11 07:29:00] [Rank 0] step:1841/10000 train_time:97393ms step_avg:52.90ms +[2025-09-11 07:29:00] [Rank 0] step:1841/10000 train_time:97393ms step_avg:52.90ms +[2025-09-11 07:29:00] [Rank 0] step:1861/10000 train_time:98042ms step_avg:52.68ms +[2025-09-11 07:29:00] [Rank 0] step:1861/10000 train_time:98042ms step_avg:52.68ms +[2025-09-11 07:29:01] [Rank 0] step:1881/10000 train_time:98691ms step_avg:52.47ms +[2025-09-11 07:29:01] [Rank 0] step:1881/10000 train_time:98691ms step_avg:52.47ms +[2025-09-11 07:29:02] [Rank 0] step:1901/10000 train_time:99340ms step_avg:52.26ms +[2025-09-11 07:29:02] [Rank 0] step:1901/10000 train_time:99340ms step_avg:52.26ms +[2025-09-11 07:29:02] [Rank 0] step:1921/10000 train_time:99990ms step_avg:52.05ms +[2025-09-11 07:29:02] [Rank 0] step:1921/10000 train_time:99990ms step_avg:52.05ms +[2025-09-11 07:29:03] [Rank 0] step:1941/10000 train_time:100639ms step_avg:51.85ms +[2025-09-11 07:29:03] [Rank 0] step:1941/10000 train_time:100639ms step_avg:51.85ms +[2025-09-11 07:29:04] [Rank 0] step:1961/10000 train_time:101288ms step_avg:51.65ms +[2025-09-11 07:29:04] [Rank 0] step:1961/10000 train_time:101288ms step_avg:51.65ms +[2025-09-11 07:29:04] [Rank 0] step:1981/10000 train_time:101937ms step_avg:51.46ms +[2025-09-11 07:29:04] [Rank 0] step:1981/10000 train_time:101937ms step_avg:51.46ms +[2025-09-11 07:29:05] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:29:05] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:29:15] [Rank 0] PRINT: step:2000/10000 val_loss:6.0009 total_sharp:3.0754e-02 L1_sharp:8.8687e-03 L2_sharp:2.4474e-03 L3_sharp:3.1379e-03 L4_sharp:3.8457e-03 L5_sharp:9.2211e-03 L6_sharp:4.2772e-03 L7_sharp:3.7066e-03 L8_sharp:6.3227e-03 L9_sharp:5.7346e-03 L10_sharp:5.9123e-03 L11_sharp:6.5862e-03 L12_sharp:1.4659e-02 total_fnorm:4.6562e+00 total_l1_linf:1.3248e+04 total_spectral:2.3281e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.1875e+00 L11_fnorm:1.1250e+00 L12_fnorm:9.7266e-01 L1_l1linf:3.6719e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.4375e-01 L4_l1linf:3.4766e-01 L5_l1linf:3.4375e-01 L6_l1linf:3.4961e-01 L7_l1linf:3.4570e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.0078e-01 L11_l1linf:2.7344e-01 L12_l1linf:2.1875e-01 L1_spectral:1.4818e-02 L2_spectral:1.4739e-02 L3_spectral:1.4565e-02 L4_spectral:1.4544e-02 L5_spectral:1.4599e-02 L6_spectral:1.4416e-02 L7_spectral:1.4341e-02 L8_spectral:1.4439e-02 L9_spectral:1.4573e-02 L10_spectral:1.4744e-02 L11_spectral:1.4722e-02 L12_spectral:1.4265e-02 train_time:102568ms step_avg:51.28ms +[2025-09-11 07:29:15] [Rank 0] PRINT: step:2000/10000 val_loss:6.0009 total_sharp:3.0754e-02 L1_sharp:8.8687e-03 L2_sharp:2.4474e-03 L3_sharp:3.1379e-03 L4_sharp:3.8457e-03 L5_sharp:9.2211e-03 L6_sharp:4.2772e-03 L7_sharp:3.7066e-03 L8_sharp:6.3227e-03 L9_sharp:5.7346e-03 L10_sharp:5.9123e-03 L11_sharp:6.5862e-03 L12_sharp:1.4659e-02 total_fnorm:4.6562e+00 total_l1_linf:1.3248e+04 total_spectral:2.3281e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.1875e+00 L11_fnorm:1.1250e+00 L12_fnorm:9.7266e-01 L1_l1linf:3.6719e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.4375e-01 L4_l1linf:3.4766e-01 L5_l1linf:3.4375e-01 L6_l1linf:3.4961e-01 L7_l1linf:3.4570e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.0078e-01 L11_l1linf:2.7344e-01 L12_l1linf:2.1875e-01 L1_spectral:1.4818e-02 L2_spectral:1.4739e-02 L3_spectral:1.4565e-02 L4_spectral:1.4544e-02 L5_spectral:1.4599e-02 L6_spectral:1.4416e-02 L7_spectral:1.4341e-02 L8_spectral:1.4439e-02 L9_spectral:1.4573e-02 L10_spectral:1.4744e-02 L11_spectral:1.4722e-02 L12_spectral:1.4265e-02 train_time:102568ms step_avg:51.28ms +[2025-09-11 07:29:16] [Rank 0] step:2001/10000 train_time:103664ms step_avg:51.81ms +[2025-09-11 07:29:16] [Rank 0] step:2001/10000 train_time:103664ms step_avg:51.81ms +[2025-09-11 07:29:17] [Rank 0] step:2021/10000 train_time:104300ms step_avg:51.61ms +[2025-09-11 07:29:17] [Rank 0] step:2021/10000 train_time:104300ms step_avg:51.61ms +[2025-09-11 07:29:17] [Rank 0] step:2041/10000 train_time:104949ms step_avg:51.42ms +[2025-09-11 07:29:17] [Rank 0] step:2041/10000 train_time:104949ms step_avg:51.42ms +[2025-09-11 07:29:18] [Rank 0] step:2061/10000 train_time:105597ms step_avg:51.24ms +[2025-09-11 07:29:18] [Rank 0] step:2061/10000 train_time:105597ms step_avg:51.24ms +[2025-09-11 07:29:19] [Rank 0] step:2081/10000 train_time:106245ms step_avg:51.05ms +[2025-09-11 07:29:19] [Rank 0] step:2081/10000 train_time:106245ms step_avg:51.05ms +[2025-09-11 07:29:19] [Rank 0] step:2101/10000 train_time:106893ms step_avg:50.88ms +[2025-09-11 07:29:19] [Rank 0] step:2101/10000 train_time:106893ms step_avg:50.88ms +[2025-09-11 07:29:20] [Rank 0] step:2121/10000 train_time:107543ms step_avg:50.70ms +[2025-09-11 07:29:20] [Rank 0] step:2121/10000 train_time:107543ms step_avg:50.70ms +[2025-09-11 07:29:21] [Rank 0] step:2141/10000 train_time:108190ms step_avg:50.53ms +[2025-09-11 07:29:21] [Rank 0] step:2141/10000 train_time:108190ms step_avg:50.53ms +[2025-09-11 07:29:21] [Rank 0] step:2161/10000 train_time:108838ms step_avg:50.36ms +[2025-09-11 07:29:21] [Rank 0] step:2161/10000 train_time:108838ms step_avg:50.36ms +[2025-09-11 07:29:22] [Rank 0] step:2181/10000 train_time:109486ms step_avg:50.20ms +[2025-09-11 07:29:22] [Rank 0] step:2181/10000 train_time:109486ms step_avg:50.20ms +[2025-09-11 07:29:23] [Rank 0] step:2201/10000 train_time:110133ms step_avg:50.04ms +[2025-09-11 07:29:23] [Rank 0] step:2201/10000 train_time:110133ms step_avg:50.04ms +[2025-09-11 07:29:23] [Rank 0] step:2221/10000 train_time:110781ms step_avg:49.88ms +[2025-09-11 07:29:23] [Rank 0] step:2221/10000 train_time:110781ms step_avg:49.88ms +[2025-09-11 07:29:24] [Rank 0] step:2241/10000 train_time:111440ms step_avg:49.73ms +[2025-09-11 07:29:24] [Rank 0] step:2241/10000 train_time:111440ms step_avg:49.73ms +[2025-09-11 07:29:25] [Rank 0] step:2261/10000 train_time:112100ms step_avg:49.58ms +[2025-09-11 07:29:25] [Rank 0] step:2261/10000 train_time:112100ms step_avg:49.58ms +[2025-09-11 07:29:25] [Rank 0] step:2281/10000 train_time:112760ms step_avg:49.43ms +[2025-09-11 07:29:25] [Rank 0] step:2281/10000 train_time:112760ms step_avg:49.43ms +[2025-09-11 07:29:26] [Rank 0] step:2301/10000 train_time:113421ms step_avg:49.29ms +[2025-09-11 07:29:26] [Rank 0] step:2301/10000 train_time:113421ms step_avg:49.29ms +[2025-09-11 07:29:27] [Rank 0] step:2321/10000 train_time:114082ms step_avg:49.15ms +[2025-09-11 07:29:27] [Rank 0] step:2321/10000 train_time:114082ms step_avg:49.15ms +[2025-09-11 07:29:27] [Rank 0] step:2341/10000 train_time:114743ms step_avg:49.01ms +[2025-09-11 07:29:27] [Rank 0] step:2341/10000 train_time:114743ms step_avg:49.01ms +[2025-09-11 07:29:28] [Rank 0] step:2361/10000 train_time:115405ms step_avg:48.88ms +[2025-09-11 07:29:28] [Rank 0] step:2361/10000 train_time:115405ms step_avg:48.88ms +[2025-09-11 07:29:29] [Rank 0] step:2381/10000 train_time:116065ms step_avg:48.75ms +[2025-09-11 07:29:29] [Rank 0] step:2381/10000 train_time:116065ms step_avg:48.75ms +[2025-09-11 07:29:29] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:29:29] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:29:42] [Rank 0] PRINT: step:2400/10000 val_loss:5.8894 total_sharp:2.4013e-02 L1_sharp:4.0620e-03 L2_sharp:1.9584e-03 L3_sharp:2.9407e-03 L4_sharp:2.5937e-03 L5_sharp:5.9910e-03 L6_sharp:4.0324e-03 L7_sharp:2.8946e-03 L8_sharp:5.1598e-03 L9_sharp:4.7004e-03 L10_sharp:4.8770e-03 L11_sharp:4.9759e-03 L12_sharp:1.1014e-02 total_fnorm:4.6562e+00 total_l1_linf:1.2928e+04 total_spectral:2.3281e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1719e+00 L12_fnorm:1.0156e+00 L1_l1linf:3.6133e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.4570e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.3008e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3789e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.7734e-01 L12_l1linf:2.2461e-01 L1_spectral:1.5140e-02 L2_spectral:1.4893e-02 L3_spectral:1.4739e-02 L4_spectral:1.4725e-02 L5_spectral:1.4912e-02 L6_spectral:1.4751e-02 L7_spectral:1.4727e-02 L8_spectral:1.4531e-02 L9_spectral:1.4661e-02 L10_spectral:1.4854e-02 L11_spectral:1.4964e-02 L12_spectral:1.4641e-02 train_time:116707ms step_avg:48.63ms +[2025-09-11 07:29:42] [Rank 0] PRINT: step:2400/10000 val_loss:5.8894 total_sharp:2.4013e-02 L1_sharp:4.0620e-03 L2_sharp:1.9584e-03 L3_sharp:2.9407e-03 L4_sharp:2.5937e-03 L5_sharp:5.9910e-03 L6_sharp:4.0324e-03 L7_sharp:2.8946e-03 L8_sharp:5.1598e-03 L9_sharp:4.7004e-03 L10_sharp:4.8770e-03 L11_sharp:4.9759e-03 L12_sharp:1.1014e-02 total_fnorm:4.6562e+00 total_l1_linf:1.2928e+04 total_spectral:2.3281e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1719e+00 L12_fnorm:1.0156e+00 L1_l1linf:3.6133e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.4570e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.3008e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3789e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.7734e-01 L12_l1linf:2.2461e-01 L1_spectral:1.5140e-02 L2_spectral:1.4893e-02 L3_spectral:1.4739e-02 L4_spectral:1.4725e-02 L5_spectral:1.4912e-02 L6_spectral:1.4751e-02 L7_spectral:1.4727e-02 L8_spectral:1.4531e-02 L9_spectral:1.4661e-02 L10_spectral:1.4854e-02 L11_spectral:1.4964e-02 L12_spectral:1.4641e-02 train_time:116707ms step_avg:48.63ms +[2025-09-11 07:29:43] [Rank 0] step:2401/10000 train_time:117829ms step_avg:49.08ms +[2025-09-11 07:29:43] [Rank 0] step:2401/10000 train_time:117829ms step_avg:49.08ms +[2025-09-11 07:29:44] [Rank 0] step:2421/10000 train_time:118479ms step_avg:48.94ms +[2025-09-11 07:29:44] [Rank 0] step:2421/10000 train_time:118479ms step_avg:48.94ms +[2025-09-11 07:29:45] [Rank 0] step:2441/10000 train_time:119141ms step_avg:48.81ms +[2025-09-11 07:29:45] [Rank 0] step:2441/10000 train_time:119141ms step_avg:48.81ms +[2025-09-11 07:29:45] [Rank 0] step:2461/10000 train_time:119802ms step_avg:48.68ms +[2025-09-11 07:29:45] [Rank 0] step:2461/10000 train_time:119802ms step_avg:48.68ms +[2025-09-11 07:29:46] [Rank 0] step:2481/10000 train_time:120464ms step_avg:48.55ms +[2025-09-11 07:29:46] [Rank 0] step:2481/10000 train_time:120464ms step_avg:48.55ms +[2025-09-11 07:29:47] [Rank 0] step:2501/10000 train_time:121125ms step_avg:48.43ms +[2025-09-11 07:29:47] [Rank 0] step:2501/10000 train_time:121125ms step_avg:48.43ms +[2025-09-11 07:29:47] [Rank 0] step:2521/10000 train_time:121787ms step_avg:48.31ms +[2025-09-11 07:29:47] [Rank 0] step:2521/10000 train_time:121787ms step_avg:48.31ms +[2025-09-11 07:29:48] [Rank 0] step:2541/10000 train_time:122448ms step_avg:48.19ms +[2025-09-11 07:29:48] [Rank 0] step:2541/10000 train_time:122448ms step_avg:48.19ms +[2025-09-11 07:29:49] [Rank 0] step:2561/10000 train_time:123109ms step_avg:48.07ms +[2025-09-11 07:29:49] [Rank 0] step:2561/10000 train_time:123109ms step_avg:48.07ms +[2025-09-11 07:29:49] [Rank 0] step:2581/10000 train_time:123771ms step_avg:47.95ms +[2025-09-11 07:29:49] [Rank 0] step:2581/10000 train_time:123771ms step_avg:47.95ms +[2025-09-11 07:29:50] [Rank 0] step:2601/10000 train_time:124432ms step_avg:47.84ms +[2025-09-11 07:29:50] [Rank 0] step:2601/10000 train_time:124432ms step_avg:47.84ms +[2025-09-11 07:29:51] [Rank 0] step:2621/10000 train_time:125093ms step_avg:47.73ms +[2025-09-11 07:29:51] [Rank 0] step:2621/10000 train_time:125093ms step_avg:47.73ms +[2025-09-11 07:29:51] [Rank 0] step:2641/10000 train_time:125754ms step_avg:47.62ms +[2025-09-11 07:29:51] [Rank 0] step:2641/10000 train_time:125754ms step_avg:47.62ms +[2025-09-11 07:29:52] [Rank 0] step:2661/10000 train_time:126415ms step_avg:47.51ms +[2025-09-11 07:29:52] [Rank 0] step:2661/10000 train_time:126415ms step_avg:47.51ms +[2025-09-11 07:29:53] [Rank 0] step:2681/10000 train_time:127077ms step_avg:47.40ms +[2025-09-11 07:29:53] [Rank 0] step:2681/10000 train_time:127077ms step_avg:47.40ms +[2025-09-11 07:29:53] [Rank 0] step:2701/10000 train_time:127738ms step_avg:47.29ms +[2025-09-11 07:29:53] [Rank 0] step:2701/10000 train_time:127738ms step_avg:47.29ms +[2025-09-11 07:29:54] [Rank 0] step:2721/10000 train_time:128399ms step_avg:47.19ms +[2025-09-11 07:29:54] [Rank 0] step:2721/10000 train_time:128399ms step_avg:47.19ms +[2025-09-11 07:29:55] [Rank 0] step:2741/10000 train_time:129061ms step_avg:47.09ms +[2025-09-11 07:29:55] [Rank 0] step:2741/10000 train_time:129061ms step_avg:47.09ms +[2025-09-11 07:29:55] [Rank 0] step:2761/10000 train_time:129723ms step_avg:46.98ms +[2025-09-11 07:29:55] [Rank 0] step:2761/10000 train_time:129723ms step_avg:46.98ms +[2025-09-11 07:29:56] [Rank 0] step:2781/10000 train_time:130383ms step_avg:46.88ms +[2025-09-11 07:29:56] [Rank 0] step:2781/10000 train_time:130383ms step_avg:46.88ms +[2025-09-11 07:29:57] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:29:57] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:06] [Rank 0] PRINT: step:2800/10000 val_loss:5.8164 total_sharp:2.9185e-02 L1_sharp:5.8237e-03 L2_sharp:2.6791e-03 L3_sharp:3.4096e-03 L4_sharp:2.7821e-03 L5_sharp:6.8414e-03 L6_sharp:4.4893e-03 L7_sharp:3.9903e-03 L8_sharp:5.2230e-03 L9_sharp:4.5729e-03 L10_sharp:5.0092e-03 L11_sharp:5.6480e-03 L12_sharp:1.3863e-02 total_fnorm:4.6250e+00 total_l1_linf:1.2544e+04 total_spectral:2.3125e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2734e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0234e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.2617e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.1641e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1582e-01 L1_spectral:1.5397e-02 L2_spectral:1.5165e-02 L3_spectral:1.5009e-02 L4_spectral:1.4977e-02 L5_spectral:1.5135e-02 L6_spectral:1.4996e-02 L7_spectral:1.4952e-02 L8_spectral:1.4736e-02 L9_spectral:1.4788e-02 L10_spectral:1.4968e-02 L11_spectral:1.5087e-02 L12_spectral:1.4544e-02 train_time:131027ms step_avg:46.80ms +[2025-09-11 07:30:06] [Rank 0] PRINT: step:2800/10000 val_loss:5.8164 total_sharp:2.9185e-02 L1_sharp:5.8237e-03 L2_sharp:2.6791e-03 L3_sharp:3.4096e-03 L4_sharp:2.7821e-03 L5_sharp:6.8414e-03 L6_sharp:4.4893e-03 L7_sharp:3.9903e-03 L8_sharp:5.2230e-03 L9_sharp:4.5729e-03 L10_sharp:5.0092e-03 L11_sharp:5.6480e-03 L12_sharp:1.3863e-02 total_fnorm:4.6250e+00 total_l1_linf:1.2544e+04 total_spectral:2.3125e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2734e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0234e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.2617e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.1641e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1582e-01 L1_spectral:1.5397e-02 L2_spectral:1.5165e-02 L3_spectral:1.5009e-02 L4_spectral:1.4977e-02 L5_spectral:1.5135e-02 L6_spectral:1.4996e-02 L7_spectral:1.4952e-02 L8_spectral:1.4736e-02 L9_spectral:1.4788e-02 L10_spectral:1.4968e-02 L11_spectral:1.5087e-02 L12_spectral:1.4544e-02 train_time:131027ms step_avg:46.80ms +[2025-09-11 07:30:08] [Rank 0] step:2801/10000 train_time:132139ms step_avg:47.18ms +[2025-09-11 07:30:08] [Rank 0] step:2801/10000 train_time:132139ms step_avg:47.18ms +[2025-09-11 07:30:08] [Rank 0] step:2821/10000 train_time:132788ms step_avg:47.07ms +[2025-09-11 07:30:08] [Rank 0] step:2821/10000 train_time:132788ms step_avg:47.07ms +[2025-09-11 07:30:09] [Rank 0] step:2841/10000 train_time:133451ms step_avg:46.97ms +[2025-09-11 07:30:09] [Rank 0] step:2841/10000 train_time:133451ms step_avg:46.97ms +[2025-09-11 07:30:10] [Rank 0] step:2861/10000 train_time:134113ms step_avg:46.88ms +[2025-09-11 07:30:10] [Rank 0] step:2861/10000 train_time:134113ms step_avg:46.88ms +[2025-09-11 07:30:11] [Rank 0] step:2881/10000 train_time:135207ms step_avg:46.93ms +[2025-09-11 07:30:11] [Rank 0] step:2881/10000 train_time:135207ms step_avg:46.93ms +[2025-09-11 07:30:11] [Rank 0] step:2901/10000 train_time:135999ms step_avg:46.88ms +[2025-09-11 07:30:11] [Rank 0] step:2901/10000 train_time:135999ms step_avg:46.88ms +[2025-09-11 07:30:12] [Rank 0] step:2921/10000 train_time:136660ms step_avg:46.79ms +[2025-09-11 07:30:12] [Rank 0] step:2921/10000 train_time:136660ms step_avg:46.79ms +[2025-09-11 07:30:13] [Rank 0] step:2941/10000 train_time:137620ms step_avg:46.79ms +[2025-09-11 07:30:13] [Rank 0] step:2941/10000 train_time:137620ms step_avg:46.79ms +[2025-09-11 07:30:14] [Rank 0] step:2961/10000 train_time:138281ms step_avg:46.70ms +[2025-09-11 07:30:14] [Rank 0] step:2961/10000 train_time:138281ms step_avg:46.70ms +[2025-09-11 07:30:14] [Rank 0] step:2981/10000 train_time:138945ms step_avg:46.61ms +[2025-09-11 07:30:14] [Rank 0] step:2981/10000 train_time:138945ms step_avg:46.61ms +[2025-09-11 07:30:15] [Rank 0] step:3001/10000 train_time:139608ms step_avg:46.52ms +[2025-09-11 07:30:15] [Rank 0] step:3001/10000 train_time:139608ms step_avg:46.52ms +[2025-09-11 07:30:16] [Rank 0] step:3021/10000 train_time:140272ms step_avg:46.43ms +[2025-09-11 07:30:16] [Rank 0] step:3021/10000 train_time:140272ms step_avg:46.43ms +[2025-09-11 07:30:16] [Rank 0] step:3041/10000 train_time:140936ms step_avg:46.35ms +[2025-09-11 07:30:16] [Rank 0] step:3041/10000 train_time:140936ms step_avg:46.35ms +[2025-09-11 07:30:17] [Rank 0] step:3061/10000 train_time:141600ms step_avg:46.26ms +[2025-09-11 07:30:17] [Rank 0] step:3061/10000 train_time:141600ms step_avg:46.26ms +[2025-09-11 07:30:18] [Rank 0] step:3081/10000 train_time:142263ms step_avg:46.17ms +[2025-09-11 07:30:18] [Rank 0] step:3081/10000 train_time:142263ms step_avg:46.17ms +[2025-09-11 07:30:18] [Rank 0] step:3101/10000 train_time:142927ms step_avg:46.09ms +[2025-09-11 07:30:18] [Rank 0] step:3101/10000 train_time:142927ms step_avg:46.09ms +[2025-09-11 07:30:19] [Rank 0] step:3121/10000 train_time:143591ms step_avg:46.01ms +[2025-09-11 07:30:19] [Rank 0] step:3121/10000 train_time:143591ms step_avg:46.01ms +[2025-09-11 07:30:20] [Rank 0] step:3141/10000 train_time:144255ms step_avg:45.93ms +[2025-09-11 07:30:20] [Rank 0] step:3141/10000 train_time:144255ms step_avg:45.93ms +[2025-09-11 07:30:20] [Rank 0] step:3161/10000 train_time:144919ms step_avg:45.85ms +[2025-09-11 07:30:20] [Rank 0] step:3161/10000 train_time:144919ms step_avg:45.85ms +[2025-09-11 07:30:21] [Rank 0] step:3181/10000 train_time:145584ms step_avg:45.77ms +[2025-09-11 07:30:21] [Rank 0] step:3181/10000 train_time:145584ms step_avg:45.77ms +[2025-09-11 07:30:22] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:30:22] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:32] [Rank 0] PRINT: step:3200/10000 val_loss:5.7346 total_sharp:2.4219e-02 L1_sharp:4.5182e-03 L2_sharp:1.6786e-03 L3_sharp:2.1896e-03 L4_sharp:2.9452e-03 L5_sharp:5.1751e-03 L6_sharp:2.8364e-03 L7_sharp:2.8379e-03 L8_sharp:4.4682e-03 L9_sharp:4.4169e-03 L10_sharp:4.3255e-03 L11_sharp:5.6321e-03 L12_sharp:1.7216e-02 total_fnorm:4.6875e+00 total_l1_linf:1.2544e+04 total_spectral:2.3438e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2812e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2109e+00 L12_fnorm:1.0625e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.2031e-01 L4_l1linf:3.3203e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1641e-01 L9_l1linf:3.1445e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1191e-01 L1_spectral:1.5571e-02 L2_spectral:1.5302e-02 L3_spectral:1.5180e-02 L4_spectral:1.5171e-02 L5_spectral:1.5304e-02 L6_spectral:1.5169e-02 L7_spectral:1.5231e-02 L8_spectral:1.4917e-02 L9_spectral:1.4889e-02 L10_spectral:1.4991e-02 L11_spectral:1.5190e-02 L12_spectral:1.4898e-02 train_time:146229ms step_avg:45.70ms +[2025-09-11 07:30:32] [Rank 0] PRINT: step:3200/10000 val_loss:5.7346 total_sharp:2.4219e-02 L1_sharp:4.5182e-03 L2_sharp:1.6786e-03 L3_sharp:2.1896e-03 L4_sharp:2.9452e-03 L5_sharp:5.1751e-03 L6_sharp:2.8364e-03 L7_sharp:2.8379e-03 L8_sharp:4.4682e-03 L9_sharp:4.4169e-03 L10_sharp:4.3255e-03 L11_sharp:5.6321e-03 L12_sharp:1.7216e-02 total_fnorm:4.6875e+00 total_l1_linf:1.2544e+04 total_spectral:2.3438e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2812e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2109e+00 L12_fnorm:1.0625e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.2031e-01 L4_l1linf:3.3203e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1641e-01 L9_l1linf:3.1445e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1191e-01 L1_spectral:1.5571e-02 L2_spectral:1.5302e-02 L3_spectral:1.5180e-02 L4_spectral:1.5171e-02 L5_spectral:1.5304e-02 L6_spectral:1.5169e-02 L7_spectral:1.5231e-02 L8_spectral:1.4917e-02 L9_spectral:1.4889e-02 L10_spectral:1.4991e-02 L11_spectral:1.5190e-02 L12_spectral:1.4898e-02 train_time:146229ms step_avg:45.70ms +[2025-09-11 07:30:33] [Rank 0] step:3201/10000 train_time:147354ms step_avg:46.03ms +[2025-09-11 07:30:33] [Rank 0] step:3201/10000 train_time:147354ms step_avg:46.03ms +[2025-09-11 07:30:33] [Rank 0] step:3221/10000 train_time:148008ms step_avg:45.95ms +[2025-09-11 07:30:33] [Rank 0] step:3221/10000 train_time:148008ms step_avg:45.95ms +[2025-09-11 07:30:34] [Rank 0] step:3241/10000 train_time:148673ms step_avg:45.87ms +[2025-09-11 07:30:34] [Rank 0] step:3241/10000 train_time:148673ms step_avg:45.87ms +[2025-09-11 07:30:35] [Rank 0] step:3261/10000 train_time:149338ms step_avg:45.80ms +[2025-09-11 07:30:35] [Rank 0] step:3261/10000 train_time:149338ms step_avg:45.80ms +[2025-09-11 07:30:35] [Rank 0] step:3281/10000 train_time:150002ms step_avg:45.72ms +[2025-09-11 07:30:35] [Rank 0] step:3281/10000 train_time:150002ms step_avg:45.72ms +[2025-09-11 07:30:36] [Rank 0] step:3301/10000 train_time:150666ms step_avg:45.64ms +[2025-09-11 07:30:36] [Rank 0] step:3301/10000 train_time:150666ms step_avg:45.64ms +[2025-09-11 07:30:37] [Rank 0] step:3321/10000 train_time:151330ms step_avg:45.57ms +[2025-09-11 07:30:37] [Rank 0] step:3321/10000 train_time:151330ms step_avg:45.57ms +[2025-09-11 07:30:37] [Rank 0] step:3341/10000 train_time:151994ms step_avg:45.49ms +[2025-09-11 07:30:37] [Rank 0] step:3341/10000 train_time:151994ms step_avg:45.49ms +[2025-09-11 07:30:38] [Rank 0] step:3361/10000 train_time:152659ms step_avg:45.42ms +[2025-09-11 07:30:38] [Rank 0] step:3361/10000 train_time:152659ms step_avg:45.42ms +[2025-09-11 07:30:39] [Rank 0] step:3381/10000 train_time:153323ms step_avg:45.35ms +[2025-09-11 07:30:39] [Rank 0] step:3381/10000 train_time:153323ms step_avg:45.35ms +[2025-09-11 07:30:39] [Rank 0] step:3401/10000 train_time:153986ms step_avg:45.28ms +[2025-09-11 07:30:39] [Rank 0] step:3401/10000 train_time:153986ms step_avg:45.28ms +[2025-09-11 07:30:40] [Rank 0] step:3421/10000 train_time:154649ms step_avg:45.21ms +[2025-09-11 07:30:40] [Rank 0] step:3421/10000 train_time:154649ms step_avg:45.21ms +[2025-09-11 07:30:41] [Rank 0] step:3441/10000 train_time:155313ms step_avg:45.14ms +[2025-09-11 07:30:41] [Rank 0] step:3441/10000 train_time:155313ms step_avg:45.14ms +[2025-09-11 07:30:41] [Rank 0] step:3461/10000 train_time:155975ms step_avg:45.07ms +[2025-09-11 07:30:41] [Rank 0] step:3461/10000 train_time:155975ms step_avg:45.07ms +[2025-09-11 07:30:42] [Rank 0] step:3481/10000 train_time:156639ms step_avg:45.00ms +[2025-09-11 07:30:42] [Rank 0] step:3481/10000 train_time:156639ms step_avg:45.00ms +[2025-09-11 07:30:43] [Rank 0] step:3501/10000 train_time:157302ms step_avg:44.93ms +[2025-09-11 07:30:43] [Rank 0] step:3501/10000 train_time:157302ms step_avg:44.93ms +[2025-09-11 07:30:43] [Rank 0] step:3521/10000 train_time:157966ms step_avg:44.86ms +[2025-09-11 07:30:43] [Rank 0] step:3521/10000 train_time:157966ms step_avg:44.86ms +[2025-09-11 07:30:44] [Rank 0] step:3541/10000 train_time:158629ms step_avg:44.80ms +[2025-09-11 07:30:44] [Rank 0] step:3541/10000 train_time:158629ms step_avg:44.80ms +[2025-09-11 07:30:45] [Rank 0] step:3561/10000 train_time:159293ms step_avg:44.73ms +[2025-09-11 07:30:45] [Rank 0] step:3561/10000 train_time:159293ms step_avg:44.73ms +[2025-09-11 07:30:45] [Rank 0] step:3581/10000 train_time:159958ms step_avg:44.67ms +[2025-09-11 07:30:45] [Rank 0] step:3581/10000 train_time:159958ms step_avg:44.67ms +[2025-09-11 07:30:46] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:30:46] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:30:56] [Rank 0] PRINT: step:3600/10000 val_loss:5.6881 total_sharp:2.1934e-02 L1_sharp:5.6391e-03 L2_sharp:2.4097e-03 L3_sharp:2.8859e-03 L4_sharp:2.2478e-03 L5_sharp:4.0566e-03 L6_sharp:2.8909e-03 L7_sharp:2.8350e-03 L8_sharp:4.1311e-03 L9_sharp:4.1757e-03 L10_sharp:4.3011e-03 L11_sharp:4.4769e-03 L12_sharp:1.0768e-02 total_fnorm:4.6250e+00 total_l1_linf:1.2224e+04 total_spectral:2.3125e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2734e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2266e+00 L12_fnorm:1.0859e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.2617e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1973e-01 L1_spectral:1.5777e-02 L2_spectral:1.5410e-02 L3_spectral:1.5298e-02 L4_spectral:1.5308e-02 L5_spectral:1.5532e-02 L6_spectral:1.5349e-02 L7_spectral:1.5368e-02 L8_spectral:1.5119e-02 L9_spectral:1.5139e-02 L10_spectral:1.5012e-02 L11_spectral:1.5214e-02 L12_spectral:1.5025e-02 train_time:160603ms step_avg:44.61ms +[2025-09-11 07:30:56] [Rank 0] PRINT: step:3600/10000 val_loss:5.6881 total_sharp:2.1934e-02 L1_sharp:5.6391e-03 L2_sharp:2.4097e-03 L3_sharp:2.8859e-03 L4_sharp:2.2478e-03 L5_sharp:4.0566e-03 L6_sharp:2.8909e-03 L7_sharp:2.8350e-03 L8_sharp:4.1311e-03 L9_sharp:4.1757e-03 L10_sharp:4.3011e-03 L11_sharp:4.4769e-03 L12_sharp:1.0768e-02 total_fnorm:4.6250e+00 total_l1_linf:1.2224e+04 total_spectral:2.3125e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2734e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2266e+00 L12_fnorm:1.0859e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.2617e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.6953e-01 L12_l1linf:2.1973e-01 L1_spectral:1.5777e-02 L2_spectral:1.5410e-02 L3_spectral:1.5298e-02 L4_spectral:1.5308e-02 L5_spectral:1.5532e-02 L6_spectral:1.5349e-02 L7_spectral:1.5368e-02 L8_spectral:1.5119e-02 L9_spectral:1.5139e-02 L10_spectral:1.5012e-02 L11_spectral:1.5214e-02 L12_spectral:1.5025e-02 train_time:160603ms step_avg:44.61ms +[2025-09-11 07:30:57] [Rank 0] step:3601/10000 train_time:161729ms step_avg:44.91ms +[2025-09-11 07:30:57] [Rank 0] step:3601/10000 train_time:161729ms step_avg:44.91ms +[2025-09-11 07:30:57] [Rank 0] step:3621/10000 train_time:162384ms step_avg:44.85ms +[2025-09-11 07:30:57] [Rank 0] step:3621/10000 train_time:162384ms step_avg:44.85ms +[2025-09-11 07:30:58] [Rank 0] step:3641/10000 train_time:163049ms step_avg:44.78ms +[2025-09-11 07:30:58] [Rank 0] step:3641/10000 train_time:163049ms step_avg:44.78ms +[2025-09-11 07:30:59] [Rank 0] step:3661/10000 train_time:163714ms step_avg:44.72ms +[2025-09-11 07:30:59] [Rank 0] step:3661/10000 train_time:163714ms step_avg:44.72ms +[2025-09-11 07:30:59] [Rank 0] step:3681/10000 train_time:164378ms step_avg:44.66ms +[2025-09-11 07:30:59] [Rank 0] step:3681/10000 train_time:164378ms step_avg:44.66ms +[2025-09-11 07:31:00] [Rank 0] step:3701/10000 train_time:165041ms step_avg:44.59ms +[2025-09-11 07:31:00] [Rank 0] step:3701/10000 train_time:165041ms step_avg:44.59ms +[2025-09-11 07:31:01] [Rank 0] step:3721/10000 train_time:165714ms step_avg:44.53ms +[2025-09-11 07:31:01] [Rank 0] step:3721/10000 train_time:165714ms step_avg:44.53ms +[2025-09-11 07:31:01] [Rank 0] step:3741/10000 train_time:166391ms step_avg:44.48ms +[2025-09-11 07:31:01] [Rank 0] step:3741/10000 train_time:166391ms step_avg:44.48ms +[2025-09-11 07:31:02] [Rank 0] step:3761/10000 train_time:167067ms step_avg:44.42ms +[2025-09-11 07:31:02] [Rank 0] step:3761/10000 train_time:167067ms step_avg:44.42ms +[2025-09-11 07:31:03] [Rank 0] step:3781/10000 train_time:167742ms step_avg:44.36ms +[2025-09-11 07:31:03] [Rank 0] step:3781/10000 train_time:167742ms step_avg:44.36ms +[2025-09-11 07:31:03] [Rank 0] step:3801/10000 train_time:168418ms step_avg:44.31ms +[2025-09-11 07:31:03] [Rank 0] step:3801/10000 train_time:168418ms step_avg:44.31ms +[2025-09-11 07:31:04] [Rank 0] step:3821/10000 train_time:169095ms step_avg:44.25ms +[2025-09-11 07:31:04] [Rank 0] step:3821/10000 train_time:169095ms step_avg:44.25ms +[2025-09-11 07:31:05] [Rank 0] step:3841/10000 train_time:169770ms step_avg:44.20ms +[2025-09-11 07:31:05] [Rank 0] step:3841/10000 train_time:169770ms step_avg:44.20ms +[2025-09-11 07:31:05] [Rank 0] step:3861/10000 train_time:170444ms step_avg:44.15ms +[2025-09-11 07:31:05] [Rank 0] step:3861/10000 train_time:170444ms step_avg:44.15ms +[2025-09-11 07:31:06] [Rank 0] step:3881/10000 train_time:171119ms step_avg:44.09ms +[2025-09-11 07:31:06] [Rank 0] step:3881/10000 train_time:171119ms step_avg:44.09ms +[2025-09-11 07:31:07] [Rank 0] step:3901/10000 train_time:171793ms step_avg:44.04ms +[2025-09-11 07:31:07] [Rank 0] step:3901/10000 train_time:171793ms step_avg:44.04ms +[2025-09-11 07:31:07] [Rank 0] step:3921/10000 train_time:172468ms step_avg:43.99ms +[2025-09-11 07:31:07] [Rank 0] step:3921/10000 train_time:172468ms step_avg:43.99ms +[2025-09-11 07:31:08] [Rank 0] step:3941/10000 train_time:173143ms step_avg:43.93ms +[2025-09-11 07:31:08] [Rank 0] step:3941/10000 train_time:173143ms step_avg:43.93ms +[2025-09-11 07:31:09] [Rank 0] step:3961/10000 train_time:173818ms step_avg:43.88ms +[2025-09-11 07:31:09] [Rank 0] step:3961/10000 train_time:173818ms step_avg:43.88ms +[2025-09-11 07:31:10] [Rank 0] step:3981/10000 train_time:174492ms step_avg:43.83ms +[2025-09-11 07:31:10] [Rank 0] step:3981/10000 train_time:174492ms step_avg:43.83ms +[2025-09-11 07:31:10] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:31:10] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:31:20] [Rank 0] PRINT: step:4000/10000 val_loss:5.6343 total_sharp:2.6749e-02 L1_sharp:5.4835e-03 L2_sharp:1.8315e-03 L3_sharp:2.3757e-03 L4_sharp:2.5324e-03 L5_sharp:5.5443e-03 L6_sharp:3.0344e-03 L7_sharp:2.9368e-03 L8_sharp:4.5257e-03 L9_sharp:4.3373e-03 L10_sharp:4.7765e-03 L11_sharp:5.9098e-03 L12_sharp:1.5652e-02 total_fnorm:4.6562e+00 total_l1_linf:1.2032e+04 total_spectral:2.3281e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2188e+00 L12_fnorm:1.0703e+00 L1_l1linf:3.4180e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.0273e-01 L6_l1linf:2.9688e-01 L7_l1linf:3.0469e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.5781e-01 L12_l1linf:2.0508e-01 L1_spectral:1.5870e-02 L2_spectral:1.5522e-02 L3_spectral:1.5524e-02 L4_spectral:1.5374e-02 L5_spectral:1.5585e-02 L6_spectral:1.5497e-02 L7_spectral:1.5492e-02 L8_spectral:1.5295e-02 L9_spectral:1.5312e-02 L10_spectral:1.5210e-02 L11_spectral:1.5195e-02 L12_spectral:1.4961e-02 train_time:175148ms step_avg:43.79ms +[2025-09-11 07:31:20] [Rank 0] PRINT: step:4000/10000 val_loss:5.6343 total_sharp:2.6749e-02 L1_sharp:5.4835e-03 L2_sharp:1.8315e-03 L3_sharp:2.3757e-03 L4_sharp:2.5324e-03 L5_sharp:5.5443e-03 L6_sharp:3.0344e-03 L7_sharp:2.9368e-03 L8_sharp:4.5257e-03 L9_sharp:4.3373e-03 L10_sharp:4.7765e-03 L11_sharp:5.9098e-03 L12_sharp:1.5652e-02 total_fnorm:4.6562e+00 total_l1_linf:1.2032e+04 total_spectral:2.3281e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2188e+00 L12_fnorm:1.0703e+00 L1_l1linf:3.4180e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.0273e-01 L6_l1linf:2.9688e-01 L7_l1linf:3.0469e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.5781e-01 L12_l1linf:2.0508e-01 L1_spectral:1.5870e-02 L2_spectral:1.5522e-02 L3_spectral:1.5524e-02 L4_spectral:1.5374e-02 L5_spectral:1.5585e-02 L6_spectral:1.5497e-02 L7_spectral:1.5492e-02 L8_spectral:1.5295e-02 L9_spectral:1.5312e-02 L10_spectral:1.5210e-02 L11_spectral:1.5195e-02 L12_spectral:1.4961e-02 train_time:175148ms step_avg:43.79ms +[2025-09-11 07:31:21] [Rank 0] step:4001/10000 train_time:176263ms step_avg:44.05ms +[2025-09-11 07:31:21] [Rank 0] step:4001/10000 train_time:176263ms step_avg:44.05ms +[2025-09-11 07:31:22] [Rank 0] step:4021/10000 train_time:176944ms step_avg:44.00ms +[2025-09-11 07:31:22] [Rank 0] step:4021/10000 train_time:176944ms step_avg:44.00ms +[2025-09-11 07:31:23] [Rank 0] step:4041/10000 train_time:177621ms step_avg:43.95ms +[2025-09-11 07:31:23] [Rank 0] step:4041/10000 train_time:177621ms step_avg:43.95ms +[2025-09-11 07:31:23] [Rank 0] step:4061/10000 train_time:178296ms step_avg:43.90ms +[2025-09-11 07:31:23] [Rank 0] step:4061/10000 train_time:178296ms step_avg:43.90ms +[2025-09-11 07:31:24] [Rank 0] step:4081/10000 train_time:178972ms step_avg:43.86ms +[2025-09-11 07:31:24] [Rank 0] step:4081/10000 train_time:178972ms step_avg:43.86ms +[2025-09-11 07:31:25] [Rank 0] step:4101/10000 train_time:179648ms step_avg:43.81ms +[2025-09-11 07:31:25] [Rank 0] step:4101/10000 train_time:179648ms step_avg:43.81ms +[2025-09-11 07:31:25] [Rank 0] step:4121/10000 train_time:180322ms step_avg:43.76ms +[2025-09-11 07:31:25] [Rank 0] step:4121/10000 train_time:180322ms step_avg:43.76ms +[2025-09-11 07:31:26] [Rank 0] step:4141/10000 train_time:180997ms step_avg:43.71ms +[2025-09-11 07:31:26] [Rank 0] step:4141/10000 train_time:180997ms step_avg:43.71ms +[2025-09-11 07:31:27] [Rank 0] step:4161/10000 train_time:181672ms step_avg:43.66ms +[2025-09-11 07:31:27] [Rank 0] step:4161/10000 train_time:181672ms step_avg:43.66ms +[2025-09-11 07:31:27] [Rank 0] step:4181/10000 train_time:182348ms step_avg:43.61ms +[2025-09-11 07:31:27] [Rank 0] step:4181/10000 train_time:182348ms step_avg:43.61ms +[2025-09-11 07:31:28] [Rank 0] step:4201/10000 train_time:183024ms step_avg:43.57ms +[2025-09-11 07:31:28] [Rank 0] step:4201/10000 train_time:183024ms step_avg:43.57ms +[2025-09-11 07:31:29] [Rank 0] step:4221/10000 train_time:183698ms step_avg:43.52ms +[2025-09-11 07:31:29] [Rank 0] step:4221/10000 train_time:183698ms step_avg:43.52ms +[2025-09-11 07:31:29] [Rank 0] step:4241/10000 train_time:184374ms step_avg:43.47ms +[2025-09-11 07:31:29] [Rank 0] step:4241/10000 train_time:184374ms step_avg:43.47ms +[2025-09-11 07:31:30] [Rank 0] step:4261/10000 train_time:185049ms step_avg:43.43ms +[2025-09-11 07:31:30] [Rank 0] step:4261/10000 train_time:185049ms step_avg:43.43ms +[2025-09-11 07:31:31] [Rank 0] step:4281/10000 train_time:185726ms step_avg:43.38ms +[2025-09-11 07:31:31] [Rank 0] step:4281/10000 train_time:185726ms step_avg:43.38ms +[2025-09-11 07:31:31] [Rank 0] step:4301/10000 train_time:186402ms step_avg:43.34ms +[2025-09-11 07:31:31] [Rank 0] step:4301/10000 train_time:186402ms step_avg:43.34ms +[2025-09-11 07:31:32] [Rank 0] step:4321/10000 train_time:187077ms step_avg:43.29ms +[2025-09-11 07:31:32] [Rank 0] step:4321/10000 train_time:187077ms step_avg:43.29ms +[2025-09-11 07:31:33] [Rank 0] step:4341/10000 train_time:187751ms step_avg:43.25ms +[2025-09-11 07:31:33] [Rank 0] step:4341/10000 train_time:187751ms step_avg:43.25ms +[2025-09-11 07:31:33] [Rank 0] step:4361/10000 train_time:188426ms step_avg:43.21ms +[2025-09-11 07:31:33] [Rank 0] step:4361/10000 train_time:188426ms step_avg:43.21ms +[2025-09-11 07:31:34] [Rank 0] step:4381/10000 train_time:189102ms step_avg:43.16ms +[2025-09-11 07:31:34] [Rank 0] step:4381/10000 train_time:189102ms step_avg:43.16ms +[2025-09-11 07:31:35] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:31:35] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:31:45] [Rank 0] PRINT: step:4400/10000 val_loss:5.5942 total_sharp:2.2844e-02 L1_sharp:6.6298e-03 L2_sharp:1.8861e-03 L3_sharp:2.0766e-03 L4_sharp:2.4138e-03 L5_sharp:3.8305e-03 L6_sharp:2.4160e-03 L7_sharp:2.6051e-03 L8_sharp:3.3254e-03 L9_sharp:3.3167e-03 L10_sharp:3.6198e-03 L11_sharp:5.1664e-03 L12_sharp:2.0300e-02 total_fnorm:4.5625e+00 total_l1_linf:1.1712e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2188e+00 L12_fnorm:1.0625e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.0078e-01 L6_l1linf:2.9688e-01 L7_l1linf:2.9688e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.8516e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0117e-01 L1_spectral:1.5963e-02 L2_spectral:1.5674e-02 L3_spectral:1.5524e-02 L4_spectral:1.5508e-02 L5_spectral:1.5686e-02 L6_spectral:1.5557e-02 L7_spectral:1.5651e-02 L8_spectral:1.5310e-02 L9_spectral:1.5503e-02 L10_spectral:1.5379e-02 L11_spectral:1.5315e-02 L12_spectral:1.4947e-02 train_time:189758ms step_avg:43.13ms +[2025-09-11 07:31:45] [Rank 0] PRINT: step:4400/10000 val_loss:5.5942 total_sharp:2.2844e-02 L1_sharp:6.6298e-03 L2_sharp:1.8861e-03 L3_sharp:2.0766e-03 L4_sharp:2.4138e-03 L5_sharp:3.8305e-03 L6_sharp:2.4160e-03 L7_sharp:2.6051e-03 L8_sharp:3.3254e-03 L9_sharp:3.3167e-03 L10_sharp:3.6198e-03 L11_sharp:5.1664e-03 L12_sharp:2.0300e-02 total_fnorm:4.5625e+00 total_l1_linf:1.1712e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2188e+00 L12_fnorm:1.0625e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.0078e-01 L6_l1linf:2.9688e-01 L7_l1linf:2.9688e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.8516e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0117e-01 L1_spectral:1.5963e-02 L2_spectral:1.5674e-02 L3_spectral:1.5524e-02 L4_spectral:1.5508e-02 L5_spectral:1.5686e-02 L6_spectral:1.5557e-02 L7_spectral:1.5651e-02 L8_spectral:1.5310e-02 L9_spectral:1.5503e-02 L10_spectral:1.5379e-02 L11_spectral:1.5315e-02 L12_spectral:1.4947e-02 train_time:189758ms step_avg:43.13ms +[2025-09-11 07:31:46] [Rank 0] step:4401/10000 train_time:190874ms step_avg:43.37ms +[2025-09-11 07:31:46] [Rank 0] step:4401/10000 train_time:190874ms step_avg:43.37ms +[2025-09-11 07:31:47] [Rank 0] step:4421/10000 train_time:191540ms step_avg:43.33ms +[2025-09-11 07:31:47] [Rank 0] step:4421/10000 train_time:191540ms step_avg:43.33ms +[2025-09-11 07:31:47] [Rank 0] step:4441/10000 train_time:192217ms step_avg:43.28ms +[2025-09-11 07:31:47] [Rank 0] step:4441/10000 train_time:192217ms step_avg:43.28ms +[2025-09-11 07:31:48] [Rank 0] step:4461/10000 train_time:192899ms step_avg:43.24ms +[2025-09-11 07:31:48] [Rank 0] step:4461/10000 train_time:192899ms step_avg:43.24ms +[2025-09-11 07:31:49] [Rank 0] step:4481/10000 train_time:193578ms step_avg:43.20ms +[2025-09-11 07:31:49] [Rank 0] step:4481/10000 train_time:193578ms step_avg:43.20ms +[2025-09-11 07:31:50] [Rank 0] step:4501/10000 train_time:194257ms step_avg:43.16ms +[2025-09-11 07:31:50] [Rank 0] step:4501/10000 train_time:194257ms step_avg:43.16ms +[2025-09-11 07:31:50] [Rank 0] step:4521/10000 train_time:194935ms step_avg:43.12ms +[2025-09-11 07:31:50] [Rank 0] step:4521/10000 train_time:194935ms step_avg:43.12ms +[2025-09-11 07:31:51] [Rank 0] step:4541/10000 train_time:195614ms step_avg:43.08ms +[2025-09-11 07:31:51] [Rank 0] step:4541/10000 train_time:195614ms step_avg:43.08ms +[2025-09-11 07:31:52] [Rank 0] step:4561/10000 train_time:196292ms step_avg:43.04ms +[2025-09-11 07:31:52] [Rank 0] step:4561/10000 train_time:196292ms step_avg:43.04ms +[2025-09-11 07:31:52] [Rank 0] step:4581/10000 train_time:196969ms step_avg:43.00ms +[2025-09-11 07:31:52] [Rank 0] step:4581/10000 train_time:196969ms step_avg:43.00ms +[2025-09-11 07:31:53] [Rank 0] step:4601/10000 train_time:197648ms step_avg:42.96ms +[2025-09-11 07:31:53] [Rank 0] step:4601/10000 train_time:197648ms step_avg:42.96ms +[2025-09-11 07:31:54] [Rank 0] step:4621/10000 train_time:198326ms step_avg:42.92ms +[2025-09-11 07:31:54] [Rank 0] step:4621/10000 train_time:198326ms step_avg:42.92ms +[2025-09-11 07:31:54] [Rank 0] step:4641/10000 train_time:199003ms step_avg:42.88ms +[2025-09-11 07:31:54] [Rank 0] step:4641/10000 train_time:199003ms step_avg:42.88ms +[2025-09-11 07:31:55] [Rank 0] step:4661/10000 train_time:199682ms step_avg:42.84ms +[2025-09-11 07:31:55] [Rank 0] step:4661/10000 train_time:199682ms step_avg:42.84ms +[2025-09-11 07:31:56] [Rank 0] step:4681/10000 train_time:200359ms step_avg:42.80ms +[2025-09-11 07:31:56] [Rank 0] step:4681/10000 train_time:200359ms step_avg:42.80ms +[2025-09-11 07:31:56] [Rank 0] step:4701/10000 train_time:201037ms step_avg:42.76ms +[2025-09-11 07:31:56] [Rank 0] step:4701/10000 train_time:201037ms step_avg:42.76ms +[2025-09-11 07:31:57] [Rank 0] step:4721/10000 train_time:201715ms step_avg:42.73ms +[2025-09-11 07:31:57] [Rank 0] step:4721/10000 train_time:201715ms step_avg:42.73ms +[2025-09-11 07:31:58] [Rank 0] step:4741/10000 train_time:202395ms step_avg:42.69ms +[2025-09-11 07:31:58] [Rank 0] step:4741/10000 train_time:202395ms step_avg:42.69ms +[2025-09-11 07:31:58] [Rank 0] step:4761/10000 train_time:203074ms step_avg:42.65ms +[2025-09-11 07:31:58] [Rank 0] step:4761/10000 train_time:203074ms step_avg:42.65ms +[2025-09-11 07:31:59] [Rank 0] step:4781/10000 train_time:203752ms step_avg:42.62ms +[2025-09-11 07:31:59] [Rank 0] step:4781/10000 train_time:203752ms step_avg:42.62ms +[2025-09-11 07:32:00] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:32:00] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:32:10] [Rank 0] PRINT: step:4800/10000 val_loss:5.5518 total_sharp:1.7812e-02 L1_sharp:5.5142e-03 L2_sharp:1.9641e-03 L3_sharp:1.8916e-03 L4_sharp:1.7117e-03 L5_sharp:3.6621e-03 L6_sharp:2.1511e-03 L7_sharp:2.1734e-03 L8_sharp:3.0400e-03 L9_sharp:3.1385e-03 L10_sharp:3.5241e-03 L11_sharp:4.1838e-03 L12_sharp:9.8054e-03 total_fnorm:4.5625e+00 total_l1_linf:1.1648e+04 total_spectral:2.2969e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1094e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0273e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.8516e-01 L7_l1linf:2.8906e-01 L8_l1linf:2.8906e-01 L9_l1linf:2.8711e-01 L10_l1linf:2.8516e-01 L11_l1linf:2.5977e-01 L12_l1linf:2.1094e-01 L1_spectral:1.6043e-02 L2_spectral:1.5752e-02 L3_spectral:1.5667e-02 L4_spectral:1.5638e-02 L5_spectral:1.5946e-02 L6_spectral:1.5747e-02 L7_spectral:1.5858e-02 L8_spectral:1.5587e-02 L9_spectral:1.5600e-02 L10_spectral:1.5547e-02 L11_spectral:1.5409e-02 L12_spectral:1.5221e-02 train_time:204410ms step_avg:42.59ms +[2025-09-11 07:32:10] [Rank 0] PRINT: step:4800/10000 val_loss:5.5518 total_sharp:1.7812e-02 L1_sharp:5.5142e-03 L2_sharp:1.9641e-03 L3_sharp:1.8916e-03 L4_sharp:1.7117e-03 L5_sharp:3.6621e-03 L6_sharp:2.1511e-03 L7_sharp:2.1734e-03 L8_sharp:3.0400e-03 L9_sharp:3.1385e-03 L10_sharp:3.5241e-03 L11_sharp:4.1838e-03 L12_sharp:9.8054e-03 total_fnorm:4.5625e+00 total_l1_linf:1.1648e+04 total_spectral:2.2969e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1094e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0273e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.8516e-01 L7_l1linf:2.8906e-01 L8_l1linf:2.8906e-01 L9_l1linf:2.8711e-01 L10_l1linf:2.8516e-01 L11_l1linf:2.5977e-01 L12_l1linf:2.1094e-01 L1_spectral:1.6043e-02 L2_spectral:1.5752e-02 L3_spectral:1.5667e-02 L4_spectral:1.5638e-02 L5_spectral:1.5946e-02 L6_spectral:1.5747e-02 L7_spectral:1.5858e-02 L8_spectral:1.5587e-02 L9_spectral:1.5600e-02 L10_spectral:1.5547e-02 L11_spectral:1.5409e-02 L12_spectral:1.5221e-02 train_time:204410ms step_avg:42.59ms +[2025-09-11 07:32:11] [Rank 0] step:4801/10000 train_time:205543ms step_avg:42.81ms +[2025-09-11 07:32:11] [Rank 0] step:4801/10000 train_time:205543ms step_avg:42.81ms +[2025-09-11 07:32:11] [Rank 0] step:4821/10000 train_time:206212ms step_avg:42.77ms +[2025-09-11 07:32:11] [Rank 0] step:4821/10000 train_time:206212ms step_avg:42.77ms +[2025-09-11 07:32:12] [Rank 0] step:4841/10000 train_time:206892ms step_avg:42.74ms +[2025-09-11 07:32:12] [Rank 0] step:4841/10000 train_time:206892ms step_avg:42.74ms +[2025-09-11 07:32:13] [Rank 0] step:4861/10000 train_time:207570ms step_avg:42.70ms +[2025-09-11 07:32:13] [Rank 0] step:4861/10000 train_time:207570ms step_avg:42.70ms +[2025-09-11 07:32:13] [Rank 0] step:4881/10000 train_time:208249ms step_avg:42.67ms +[2025-09-11 07:32:13] [Rank 0] step:4881/10000 train_time:208249ms step_avg:42.67ms +[2025-09-11 07:32:14] [Rank 0] step:4901/10000 train_time:208928ms step_avg:42.63ms +[2025-09-11 07:32:14] [Rank 0] step:4901/10000 train_time:208928ms step_avg:42.63ms +[2025-09-11 07:32:15] [Rank 0] step:4921/10000 train_time:209607ms step_avg:42.59ms +[2025-09-11 07:32:15] [Rank 0] step:4921/10000 train_time:209607ms step_avg:42.59ms +[2025-09-11 07:32:16] [Rank 0] step:4941/10000 train_time:210285ms step_avg:42.56ms +[2025-09-11 07:32:16] [Rank 0] step:4941/10000 train_time:210285ms step_avg:42.56ms +[2025-09-11 07:32:16] [Rank 0] step:4961/10000 train_time:210964ms step_avg:42.52ms +[2025-09-11 07:32:16] [Rank 0] step:4961/10000 train_time:210964ms step_avg:42.52ms +[2025-09-11 07:32:17] [Rank 0] step:4981/10000 train_time:212173ms step_avg:42.60ms +[2025-09-11 07:32:17] [Rank 0] step:4981/10000 train_time:212173ms step_avg:42.60ms +[2025-09-11 07:32:18] [Rank 0] step:5001/10000 train_time:212852ms step_avg:42.56ms +[2025-09-11 07:32:18] [Rank 0] step:5001/10000 train_time:212852ms step_avg:42.56ms +[2025-09-11 07:32:19] [Rank 0] step:5021/10000 train_time:213531ms step_avg:42.53ms +[2025-09-11 07:32:19] [Rank 0] step:5021/10000 train_time:213531ms step_avg:42.53ms +[2025-09-11 07:32:20] [Rank 0] step:5041/10000 train_time:214475ms step_avg:42.55ms +[2025-09-11 07:32:20] [Rank 0] step:5041/10000 train_time:214475ms step_avg:42.55ms +[2025-09-11 07:32:20] [Rank 0] step:5061/10000 train_time:215154ms step_avg:42.51ms +[2025-09-11 07:32:20] [Rank 0] step:5061/10000 train_time:215154ms step_avg:42.51ms +[2025-09-11 07:32:21] [Rank 0] step:5081/10000 train_time:215833ms step_avg:42.48ms +[2025-09-11 07:32:21] [Rank 0] step:5081/10000 train_time:215833ms step_avg:42.48ms +[2025-09-11 07:32:22] [Rank 0] step:5101/10000 train_time:216512ms step_avg:42.45ms +[2025-09-11 07:32:22] [Rank 0] step:5101/10000 train_time:216512ms step_avg:42.45ms +[2025-09-11 07:32:22] [Rank 0] step:5121/10000 train_time:217190ms step_avg:42.41ms +[2025-09-11 07:32:22] [Rank 0] step:5121/10000 train_time:217190ms step_avg:42.41ms +[2025-09-11 07:32:23] [Rank 0] step:5141/10000 train_time:217870ms step_avg:42.38ms +[2025-09-11 07:32:23] [Rank 0] step:5141/10000 train_time:217870ms step_avg:42.38ms +[2025-09-11 07:32:24] [Rank 0] step:5161/10000 train_time:218548ms step_avg:42.35ms +[2025-09-11 07:32:24] [Rank 0] step:5161/10000 train_time:218548ms step_avg:42.35ms +[2025-09-11 07:32:24] [Rank 0] step:5181/10000 train_time:219226ms step_avg:42.31ms +[2025-09-11 07:32:24] [Rank 0] step:5181/10000 train_time:219226ms step_avg:42.31ms +[2025-09-11 07:32:25] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:32:25] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:32:35] [Rank 0] PRINT: step:5200/10000 val_loss:5.5195 total_sharp:2.1293e-02 L1_sharp:4.9187e-03 L2_sharp:2.3627e-03 L3_sharp:3.0054e-03 L4_sharp:1.8211e-03 L5_sharp:3.3087e-03 L6_sharp:2.2116e-03 L7_sharp:2.2486e-03 L8_sharp:3.3396e-03 L9_sharp:3.1831e-03 L10_sharp:4.2019e-03 L11_sharp:5.4105e-03 L12_sharp:1.5088e-02 total_fnorm:4.5000e+00 total_l1_linf:1.1392e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1172e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9688e-01 L5_l1linf:2.8906e-01 L6_l1linf:2.7734e-01 L7_l1linf:2.8125e-01 L8_l1linf:2.8320e-01 L9_l1linf:2.8125e-01 L10_l1linf:2.8320e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0898e-01 L1_spectral:1.6145e-02 L2_spectral:1.5764e-02 L3_spectral:1.5745e-02 L4_spectral:1.5686e-02 L5_spectral:1.5886e-02 L6_spectral:1.5898e-02 L7_spectral:1.5917e-02 L8_spectral:1.5656e-02 L9_spectral:1.5670e-02 L10_spectral:1.5617e-02 L11_spectral:1.5557e-02 L12_spectral:1.5241e-02 train_time:219893ms step_avg:42.29ms +[2025-09-11 07:32:35] [Rank 0] PRINT: step:5200/10000 val_loss:5.5195 total_sharp:2.1293e-02 L1_sharp:4.9187e-03 L2_sharp:2.3627e-03 L3_sharp:3.0054e-03 L4_sharp:1.8211e-03 L5_sharp:3.3087e-03 L6_sharp:2.2116e-03 L7_sharp:2.2486e-03 L8_sharp:3.3396e-03 L9_sharp:3.1831e-03 L10_sharp:4.2019e-03 L11_sharp:5.4105e-03 L12_sharp:1.5088e-02 total_fnorm:4.5000e+00 total_l1_linf:1.1392e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2734e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1172e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9688e-01 L5_l1linf:2.8906e-01 L6_l1linf:2.7734e-01 L7_l1linf:2.8125e-01 L8_l1linf:2.8320e-01 L9_l1linf:2.8125e-01 L10_l1linf:2.8320e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0898e-01 L1_spectral:1.6145e-02 L2_spectral:1.5764e-02 L3_spectral:1.5745e-02 L4_spectral:1.5686e-02 L5_spectral:1.5886e-02 L6_spectral:1.5898e-02 L7_spectral:1.5917e-02 L8_spectral:1.5656e-02 L9_spectral:1.5670e-02 L10_spectral:1.5617e-02 L11_spectral:1.5557e-02 L12_spectral:1.5241e-02 train_time:219893ms step_avg:42.29ms +[2025-09-11 07:32:36] [Rank 0] step:5201/10000 train_time:221035ms step_avg:42.50ms +[2025-09-11 07:32:36] [Rank 0] step:5201/10000 train_time:221035ms step_avg:42.50ms +[2025-09-11 07:32:37] [Rank 0] step:5221/10000 train_time:221732ms step_avg:42.47ms +[2025-09-11 07:32:37] [Rank 0] step:5221/10000 train_time:221732ms step_avg:42.47ms +[2025-09-11 07:32:38] [Rank 0] step:5241/10000 train_time:222421ms step_avg:42.44ms +[2025-09-11 07:32:38] [Rank 0] step:5241/10000 train_time:222421ms step_avg:42.44ms +[2025-09-11 07:32:38] [Rank 0] step:5261/10000 train_time:223111ms step_avg:42.41ms +[2025-09-11 07:32:38] [Rank 0] step:5261/10000 train_time:223111ms step_avg:42.41ms +[2025-09-11 07:32:39] [Rank 0] step:5281/10000 train_time:223799ms step_avg:42.38ms +[2025-09-11 07:32:39] [Rank 0] step:5281/10000 train_time:223799ms step_avg:42.38ms +[2025-09-11 07:32:40] [Rank 0] step:5301/10000 train_time:224488ms step_avg:42.35ms +[2025-09-11 07:32:40] [Rank 0] step:5301/10000 train_time:224488ms step_avg:42.35ms +[2025-09-11 07:32:40] [Rank 0] step:5321/10000 train_time:225175ms step_avg:42.32ms +[2025-09-11 07:32:40] [Rank 0] step:5321/10000 train_time:225175ms step_avg:42.32ms +[2025-09-11 07:32:41] [Rank 0] step:5341/10000 train_time:225863ms step_avg:42.29ms +[2025-09-11 07:32:41] [Rank 0] step:5341/10000 train_time:225863ms step_avg:42.29ms +[2025-09-11 07:32:42] [Rank 0] step:5361/10000 train_time:226551ms step_avg:42.26ms +[2025-09-11 07:32:42] [Rank 0] step:5361/10000 train_time:226551ms step_avg:42.26ms +[2025-09-11 07:32:42] [Rank 0] step:5381/10000 train_time:227241ms step_avg:42.23ms +[2025-09-11 07:32:42] [Rank 0] step:5381/10000 train_time:227241ms step_avg:42.23ms +[2025-09-11 07:32:43] [Rank 0] step:5401/10000 train_time:227928ms step_avg:42.20ms +[2025-09-11 07:32:43] [Rank 0] step:5401/10000 train_time:227928ms step_avg:42.20ms +[2025-09-11 07:32:44] [Rank 0] step:5421/10000 train_time:228616ms step_avg:42.17ms +[2025-09-11 07:32:44] [Rank 0] step:5421/10000 train_time:228616ms step_avg:42.17ms +[2025-09-11 07:32:44] [Rank 0] step:5441/10000 train_time:229304ms step_avg:42.14ms +[2025-09-11 07:32:44] [Rank 0] step:5441/10000 train_time:229304ms step_avg:42.14ms +[2025-09-11 07:32:45] [Rank 0] step:5461/10000 train_time:229992ms step_avg:42.12ms +[2025-09-11 07:32:45] [Rank 0] step:5461/10000 train_time:229992ms step_avg:42.12ms +[2025-09-11 07:32:46] [Rank 0] step:5481/10000 train_time:230681ms step_avg:42.09ms +[2025-09-11 07:32:46] [Rank 0] step:5481/10000 train_time:230681ms step_avg:42.09ms +[2025-09-11 07:32:47] [Rank 0] step:5501/10000 train_time:231370ms step_avg:42.06ms +[2025-09-11 07:32:47] [Rank 0] step:5501/10000 train_time:231370ms step_avg:42.06ms +[2025-09-11 07:32:47] [Rank 0] step:5521/10000 train_time:232057ms step_avg:42.03ms +[2025-09-11 07:32:47] [Rank 0] step:5521/10000 train_time:232057ms step_avg:42.03ms +[2025-09-11 07:32:48] [Rank 0] step:5541/10000 train_time:232747ms step_avg:42.00ms +[2025-09-11 07:32:48] [Rank 0] step:5541/10000 train_time:232747ms step_avg:42.00ms +[2025-09-11 07:32:49] [Rank 0] step:5561/10000 train_time:233437ms step_avg:41.98ms +[2025-09-11 07:32:49] [Rank 0] step:5561/10000 train_time:233437ms step_avg:41.98ms +[2025-09-11 07:32:49] [Rank 0] step:5581/10000 train_time:234126ms step_avg:41.95ms +[2025-09-11 07:32:49] [Rank 0] step:5581/10000 train_time:234126ms step_avg:41.95ms +[2025-09-11 07:32:50] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:32:50] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:00] [Rank 0] PRINT: step:5600/10000 val_loss:5.4955 total_sharp:1.7310e-02 L1_sharp:4.8167e-03 L2_sharp:1.1912e-03 L3_sharp:1.6291e-03 L4_sharp:1.4213e-03 L5_sharp:3.1729e-03 L6_sharp:2.3576e-03 L7_sharp:2.2047e-03 L8_sharp:2.9664e-03 L9_sharp:3.3602e-03 L10_sharp:3.8138e-03 L11_sharp:4.3175e-03 L12_sharp:1.2295e-02 total_fnorm:4.5312e+00 total_l1_linf:1.1328e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1328e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.1641e-01 L3_l1linf:2.9688e-01 L4_l1linf:2.9492e-01 L5_l1linf:2.8320e-01 L6_l1linf:2.7734e-01 L7_l1linf:2.7734e-01 L8_l1linf:2.7930e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0996e-01 L1_spectral:1.6199e-02 L2_spectral:1.5815e-02 L3_spectral:1.5807e-02 L4_spectral:1.5805e-02 L5_spectral:1.6009e-02 L6_spectral:1.5918e-02 L7_spectral:1.5996e-02 L8_spectral:1.5658e-02 L9_spectral:1.5726e-02 L10_spectral:1.5792e-02 L11_spectral:1.5622e-02 L12_spectral:1.5322e-02 train_time:234794ms step_avg:41.93ms +[2025-09-11 07:33:00] [Rank 0] PRINT: step:5600/10000 val_loss:5.4955 total_sharp:1.7310e-02 L1_sharp:4.8167e-03 L2_sharp:1.1912e-03 L3_sharp:1.6291e-03 L4_sharp:1.4213e-03 L5_sharp:3.1729e-03 L6_sharp:2.3576e-03 L7_sharp:2.2047e-03 L8_sharp:2.9664e-03 L9_sharp:3.3602e-03 L10_sharp:3.8138e-03 L11_sharp:4.3175e-03 L12_sharp:1.2295e-02 total_fnorm:4.5312e+00 total_l1_linf:1.1328e+04 total_spectral:2.2969e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.1328e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.1641e-01 L3_l1linf:2.9688e-01 L4_l1linf:2.9492e-01 L5_l1linf:2.8320e-01 L6_l1linf:2.7734e-01 L7_l1linf:2.7734e-01 L8_l1linf:2.7930e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.5195e-01 L12_l1linf:2.0996e-01 L1_spectral:1.6199e-02 L2_spectral:1.5815e-02 L3_spectral:1.5807e-02 L4_spectral:1.5805e-02 L5_spectral:1.6009e-02 L6_spectral:1.5918e-02 L7_spectral:1.5996e-02 L8_spectral:1.5658e-02 L9_spectral:1.5726e-02 L10_spectral:1.5792e-02 L11_spectral:1.5622e-02 L12_spectral:1.5322e-02 train_time:234794ms step_avg:41.93ms +[2025-09-11 07:33:01] [Rank 0] step:5601/10000 train_time:235935ms step_avg:42.12ms +[2025-09-11 07:33:01] [Rank 0] step:5601/10000 train_time:235935ms step_avg:42.12ms +[2025-09-11 07:33:02] [Rank 0] step:5621/10000 train_time:236614ms step_avg:42.09ms +[2025-09-11 07:33:02] [Rank 0] step:5621/10000 train_time:236614ms step_avg:42.09ms +[2025-09-11 07:33:03] [Rank 0] step:5641/10000 train_time:237302ms step_avg:42.07ms +[2025-09-11 07:33:03] [Rank 0] step:5641/10000 train_time:237302ms step_avg:42.07ms +[2025-09-11 07:33:03] [Rank 0] step:5661/10000 train_time:237991ms step_avg:42.04ms +[2025-09-11 07:33:03] [Rank 0] step:5661/10000 train_time:237991ms step_avg:42.04ms +[2025-09-11 07:33:04] [Rank 0] step:5681/10000 train_time:238680ms step_avg:42.01ms +[2025-09-11 07:33:04] [Rank 0] step:5681/10000 train_time:238680ms step_avg:42.01ms +[2025-09-11 07:33:05] [Rank 0] step:5701/10000 train_time:239371ms step_avg:41.99ms +[2025-09-11 07:33:05] [Rank 0] step:5701/10000 train_time:239371ms step_avg:41.99ms +[2025-09-11 07:33:05] [Rank 0] step:5721/10000 train_time:240059ms step_avg:41.96ms +[2025-09-11 07:33:05] [Rank 0] step:5721/10000 train_time:240059ms step_avg:41.96ms +[2025-09-11 07:33:06] [Rank 0] step:5741/10000 train_time:240750ms step_avg:41.94ms +[2025-09-11 07:33:06] [Rank 0] step:5741/10000 train_time:240750ms step_avg:41.94ms +[2025-09-11 07:33:07] [Rank 0] step:5761/10000 train_time:241440ms step_avg:41.91ms +[2025-09-11 07:33:07] [Rank 0] step:5761/10000 train_time:241440ms step_avg:41.91ms +[2025-09-11 07:33:07] [Rank 0] step:5781/10000 train_time:242129ms step_avg:41.88ms +[2025-09-11 07:33:07] [Rank 0] step:5781/10000 train_time:242129ms step_avg:41.88ms +[2025-09-11 07:33:08] [Rank 0] step:5801/10000 train_time:242820ms step_avg:41.86ms +[2025-09-11 07:33:08] [Rank 0] step:5801/10000 train_time:242820ms step_avg:41.86ms +[2025-09-11 07:33:09] [Rank 0] step:5821/10000 train_time:243509ms step_avg:41.83ms +[2025-09-11 07:33:09] [Rank 0] step:5821/10000 train_time:243509ms step_avg:41.83ms +[2025-09-11 07:33:09] [Rank 0] step:5841/10000 train_time:244199ms step_avg:41.81ms +[2025-09-11 07:33:09] [Rank 0] step:5841/10000 train_time:244199ms step_avg:41.81ms +[2025-09-11 07:33:10] [Rank 0] step:5861/10000 train_time:244887ms step_avg:41.78ms +[2025-09-11 07:33:10] [Rank 0] step:5861/10000 train_time:244887ms step_avg:41.78ms +[2025-09-11 07:33:11] [Rank 0] step:5881/10000 train_time:245576ms step_avg:41.76ms +[2025-09-11 07:33:11] [Rank 0] step:5881/10000 train_time:245576ms step_avg:41.76ms +[2025-09-11 07:33:12] [Rank 0] step:5901/10000 train_time:246264ms step_avg:41.73ms +[2025-09-11 07:33:12] [Rank 0] step:5901/10000 train_time:246264ms step_avg:41.73ms +[2025-09-11 07:33:12] [Rank 0] step:5921/10000 train_time:246956ms step_avg:41.71ms +[2025-09-11 07:33:12] [Rank 0] step:5921/10000 train_time:246956ms step_avg:41.71ms +[2025-09-11 07:33:13] [Rank 0] step:5941/10000 train_time:247646ms step_avg:41.68ms +[2025-09-11 07:33:13] [Rank 0] step:5941/10000 train_time:247646ms step_avg:41.68ms +[2025-09-11 07:33:14] [Rank 0] step:5961/10000 train_time:248336ms step_avg:41.66ms +[2025-09-11 07:33:14] [Rank 0] step:5961/10000 train_time:248336ms step_avg:41.66ms +[2025-09-11 07:33:14] [Rank 0] step:5981/10000 train_time:249027ms step_avg:41.64ms +[2025-09-11 07:33:14] [Rank 0] step:5981/10000 train_time:249027ms step_avg:41.64ms +[2025-09-11 07:33:15] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:33:15] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:25] [Rank 0] PRINT: step:6000/10000 val_loss:5.4551 total_sharp:1.8428e-02 L1_sharp:4.0561e-03 L2_sharp:1.6816e-03 L3_sharp:1.1577e-03 L4_sharp:1.3462e-03 L5_sharp:3.0983e-03 L6_sharp:2.2593e-03 L7_sharp:1.8662e-03 L8_sharp:2.8018e-03 L9_sharp:3.0117e-03 L10_sharp:3.4208e-03 L11_sharp:5.0833e-03 L12_sharp:2.3121e-02 total_fnorm:4.5000e+00 total_l1_linf:1.1136e+04 total_spectral:2.2812e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1250e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.0859e-01 L3_l1linf:2.9883e-01 L4_l1linf:2.9883e-01 L5_l1linf:2.7930e-01 L6_l1linf:2.6953e-01 L7_l1linf:2.6758e-01 L8_l1linf:2.7344e-01 L9_l1linf:2.6953e-01 L10_l1linf:2.7148e-01 L11_l1linf:2.4707e-01 L12_l1linf:2.0703e-01 L1_spectral:1.6211e-02 L2_spectral:1.5955e-02 L3_spectral:1.5882e-02 L4_spectral:1.5892e-02 L5_spectral:1.5996e-02 L6_spectral:1.5977e-02 L7_spectral:1.6080e-02 L8_spectral:1.5843e-02 L9_spectral:1.5841e-02 L10_spectral:1.5844e-02 L11_spectral:1.5758e-02 L12_spectral:1.5417e-02 train_time:249700ms step_avg:41.62ms +[2025-09-11 07:33:25] [Rank 0] PRINT: step:6000/10000 val_loss:5.4551 total_sharp:1.8428e-02 L1_sharp:4.0561e-03 L2_sharp:1.6816e-03 L3_sharp:1.1577e-03 L4_sharp:1.3462e-03 L5_sharp:3.0983e-03 L6_sharp:2.2593e-03 L7_sharp:1.8662e-03 L8_sharp:2.8018e-03 L9_sharp:3.0117e-03 L10_sharp:3.4208e-03 L11_sharp:5.0833e-03 L12_sharp:2.3121e-02 total_fnorm:4.5000e+00 total_l1_linf:1.1136e+04 total_spectral:2.2812e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1250e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.0859e-01 L3_l1linf:2.9883e-01 L4_l1linf:2.9883e-01 L5_l1linf:2.7930e-01 L6_l1linf:2.6953e-01 L7_l1linf:2.6758e-01 L8_l1linf:2.7344e-01 L9_l1linf:2.6953e-01 L10_l1linf:2.7148e-01 L11_l1linf:2.4707e-01 L12_l1linf:2.0703e-01 L1_spectral:1.6211e-02 L2_spectral:1.5955e-02 L3_spectral:1.5882e-02 L4_spectral:1.5892e-02 L5_spectral:1.5996e-02 L6_spectral:1.5977e-02 L7_spectral:1.6080e-02 L8_spectral:1.5843e-02 L9_spectral:1.5841e-02 L10_spectral:1.5844e-02 L11_spectral:1.5758e-02 L12_spectral:1.5417e-02 train_time:249700ms step_avg:41.62ms +[2025-09-11 07:33:26] [Rank 0] step:6001/10000 train_time:250849ms step_avg:41.80ms +[2025-09-11 07:33:26] [Rank 0] step:6001/10000 train_time:250849ms step_avg:41.80ms +[2025-09-11 07:33:27] [Rank 0] step:6021/10000 train_time:251529ms step_avg:41.78ms +[2025-09-11 07:33:27] [Rank 0] step:6021/10000 train_time:251529ms step_avg:41.78ms +[2025-09-11 07:33:28] [Rank 0] step:6041/10000 train_time:252223ms step_avg:41.75ms +[2025-09-11 07:33:28] [Rank 0] step:6041/10000 train_time:252223ms step_avg:41.75ms +[2025-09-11 07:33:28] [Rank 0] step:6061/10000 train_time:252914ms step_avg:41.73ms +[2025-09-11 07:33:28] [Rank 0] step:6061/10000 train_time:252914ms step_avg:41.73ms +[2025-09-11 07:33:29] [Rank 0] step:6081/10000 train_time:253607ms step_avg:41.70ms +[2025-09-11 07:33:29] [Rank 0] step:6081/10000 train_time:253607ms step_avg:41.70ms +[2025-09-11 07:33:30] [Rank 0] step:6101/10000 train_time:254297ms step_avg:41.68ms +[2025-09-11 07:33:30] [Rank 0] step:6101/10000 train_time:254297ms step_avg:41.68ms +[2025-09-11 07:33:30] [Rank 0] step:6121/10000 train_time:254988ms step_avg:41.66ms +[2025-09-11 07:33:30] [Rank 0] step:6121/10000 train_time:254988ms step_avg:41.66ms +[2025-09-11 07:33:31] [Rank 0] step:6141/10000 train_time:255680ms step_avg:41.63ms +[2025-09-11 07:33:31] [Rank 0] step:6141/10000 train_time:255680ms step_avg:41.63ms +[2025-09-11 07:33:32] [Rank 0] step:6161/10000 train_time:256371ms step_avg:41.61ms +[2025-09-11 07:33:32] [Rank 0] step:6161/10000 train_time:256371ms step_avg:41.61ms +[2025-09-11 07:33:32] [Rank 0] step:6181/10000 train_time:257061ms step_avg:41.59ms +[2025-09-11 07:33:32] [Rank 0] step:6181/10000 train_time:257061ms step_avg:41.59ms +[2025-09-11 07:33:33] [Rank 0] step:6201/10000 train_time:257752ms step_avg:41.57ms +[2025-09-11 07:33:33] [Rank 0] step:6201/10000 train_time:257752ms step_avg:41.57ms +[2025-09-11 07:33:34] [Rank 0] step:6221/10000 train_time:258444ms step_avg:41.54ms +[2025-09-11 07:33:34] [Rank 0] step:6221/10000 train_time:258444ms step_avg:41.54ms +[2025-09-11 07:33:34] [Rank 0] step:6241/10000 train_time:259135ms step_avg:41.52ms +[2025-09-11 07:33:34] [Rank 0] step:6241/10000 train_time:259135ms step_avg:41.52ms +[2025-09-11 07:33:35] [Rank 0] step:6261/10000 train_time:259825ms step_avg:41.50ms +[2025-09-11 07:33:35] [Rank 0] step:6261/10000 train_time:259825ms step_avg:41.50ms +[2025-09-11 07:33:36] [Rank 0] step:6281/10000 train_time:260516ms step_avg:41.48ms +[2025-09-11 07:33:36] [Rank 0] step:6281/10000 train_time:260516ms step_avg:41.48ms +[2025-09-11 07:33:37] [Rank 0] step:6301/10000 train_time:261205ms step_avg:41.45ms +[2025-09-11 07:33:37] [Rank 0] step:6301/10000 train_time:261205ms step_avg:41.45ms +[2025-09-11 07:33:37] [Rank 0] step:6321/10000 train_time:261898ms step_avg:41.43ms +[2025-09-11 07:33:37] [Rank 0] step:6321/10000 train_time:261898ms step_avg:41.43ms +[2025-09-11 07:33:38] [Rank 0] step:6341/10000 train_time:262589ms step_avg:41.41ms +[2025-09-11 07:33:38] [Rank 0] step:6341/10000 train_time:262589ms step_avg:41.41ms +[2025-09-11 07:33:39] [Rank 0] step:6361/10000 train_time:263281ms step_avg:41.39ms +[2025-09-11 07:33:39] [Rank 0] step:6361/10000 train_time:263281ms step_avg:41.39ms +[2025-09-11 07:33:39] [Rank 0] step:6381/10000 train_time:263971ms step_avg:41.37ms +[2025-09-11 07:33:39] [Rank 0] step:6381/10000 train_time:263971ms step_avg:41.37ms +[2025-09-11 07:33:40] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:33:40] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:33:50] [Rank 0] PRINT: step:6400/10000 val_loss:5.4340 total_sharp:1.4853e-02 L1_sharp:2.5818e-03 L2_sharp:1.4475e-03 L3_sharp:1.7534e-03 L4_sharp:1.4381e-03 L5_sharp:2.5781e-03 L6_sharp:2.0167e-03 L7_sharp:1.9877e-03 L8_sharp:2.9178e-03 L9_sharp:2.7699e-03 L10_sharp:3.3523e-03 L11_sharp:3.8824e-03 L12_sharp:9.7457e-03 total_fnorm:4.0625e+00 total_l1_linf:9.5360e+03 total_spectral:2.0469e+00 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1406e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1406e+00 L10_fnorm:1.1406e+00 L11_fnorm:1.1250e+00 L12_fnorm:1.0312e+00 L1_l1linf:2.7734e-01 L2_l1linf:2.6758e-01 L3_l1linf:2.5391e-01 L4_l1linf:2.5195e-01 L5_l1linf:2.4023e-01 L6_l1linf:2.3535e-01 L7_l1linf:2.3145e-01 L8_l1linf:2.3145e-01 L9_l1linf:2.3242e-01 L10_l1linf:2.3535e-01 L11_l1linf:2.1777e-01 L12_l1linf:1.8066e-01 L1_spectral:1.4846e-02 L2_spectral:1.4581e-02 L3_spectral:1.4455e-02 L4_spectral:1.4557e-02 L5_spectral:1.4615e-02 L6_spectral:1.4630e-02 L7_spectral:1.4699e-02 L8_spectral:1.4479e-02 L9_spectral:1.4605e-02 L10_spectral:1.4530e-02 L11_spectral:1.4548e-02 L12_spectral:1.4183e-02 train_time:264640ms step_avg:41.35ms +[2025-09-11 07:33:50] [Rank 0] PRINT: step:6400/10000 val_loss:5.4340 total_sharp:1.4853e-02 L1_sharp:2.5818e-03 L2_sharp:1.4475e-03 L3_sharp:1.7534e-03 L4_sharp:1.4381e-03 L5_sharp:2.5781e-03 L6_sharp:2.0167e-03 L7_sharp:1.9877e-03 L8_sharp:2.9178e-03 L9_sharp:2.7699e-03 L10_sharp:3.3523e-03 L11_sharp:3.8824e-03 L12_sharp:9.7457e-03 total_fnorm:4.0625e+00 total_l1_linf:9.5360e+03 total_spectral:2.0469e+00 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1406e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1406e+00 L10_fnorm:1.1406e+00 L11_fnorm:1.1250e+00 L12_fnorm:1.0312e+00 L1_l1linf:2.7734e-01 L2_l1linf:2.6758e-01 L3_l1linf:2.5391e-01 L4_l1linf:2.5195e-01 L5_l1linf:2.4023e-01 L6_l1linf:2.3535e-01 L7_l1linf:2.3145e-01 L8_l1linf:2.3145e-01 L9_l1linf:2.3242e-01 L10_l1linf:2.3535e-01 L11_l1linf:2.1777e-01 L12_l1linf:1.8066e-01 L1_spectral:1.4846e-02 L2_spectral:1.4581e-02 L3_spectral:1.4455e-02 L4_spectral:1.4557e-02 L5_spectral:1.4615e-02 L6_spectral:1.4630e-02 L7_spectral:1.4699e-02 L8_spectral:1.4479e-02 L9_spectral:1.4605e-02 L10_spectral:1.4530e-02 L11_spectral:1.4548e-02 L12_spectral:1.4183e-02 train_time:264640ms step_avg:41.35ms +[2025-09-11 07:33:51] [Rank 0] step:6401/10000 train_time:265802ms step_avg:41.53ms +[2025-09-11 07:33:51] [Rank 0] step:6401/10000 train_time:265802ms step_avg:41.53ms +[2025-09-11 07:33:52] [Rank 0] step:6421/10000 train_time:266547ms step_avg:41.51ms +[2025-09-11 07:33:52] [Rank 0] step:6421/10000 train_time:266547ms step_avg:41.51ms +[2025-09-11 07:33:53] [Rank 0] step:6441/10000 train_time:267238ms step_avg:41.49ms +[2025-09-11 07:33:53] [Rank 0] step:6441/10000 train_time:267238ms step_avg:41.49ms +[2025-09-11 07:33:53] [Rank 0] step:6461/10000 train_time:267929ms step_avg:41.47ms +[2025-09-11 07:33:53] [Rank 0] step:6461/10000 train_time:267929ms step_avg:41.47ms +[2025-09-11 07:33:54] [Rank 0] step:6481/10000 train_time:268624ms step_avg:41.45ms +[2025-09-11 07:33:54] [Rank 0] step:6481/10000 train_time:268624ms step_avg:41.45ms +[2025-09-11 07:33:55] [Rank 0] step:6501/10000 train_time:269316ms step_avg:41.43ms +[2025-09-11 07:33:55] [Rank 0] step:6501/10000 train_time:269316ms step_avg:41.43ms +[2025-09-11 07:33:55] [Rank 0] step:6521/10000 train_time:270007ms step_avg:41.41ms +[2025-09-11 07:33:55] [Rank 0] step:6521/10000 train_time:270007ms step_avg:41.41ms +[2025-09-11 07:33:56] [Rank 0] step:6541/10000 train_time:270697ms step_avg:41.38ms +[2025-09-11 07:33:56] [Rank 0] step:6541/10000 train_time:270697ms step_avg:41.38ms +[2025-09-11 07:33:57] [Rank 0] step:6561/10000 train_time:271387ms step_avg:41.36ms +[2025-09-11 07:33:57] [Rank 0] step:6561/10000 train_time:271387ms step_avg:41.36ms +[2025-09-11 07:33:57] [Rank 0] step:6581/10000 train_time:272078ms step_avg:41.34ms +[2025-09-11 07:33:57] [Rank 0] step:6581/10000 train_time:272078ms step_avg:41.34ms +[2025-09-11 07:33:58] [Rank 0] step:6601/10000 train_time:272770ms step_avg:41.32ms +[2025-09-11 07:33:58] [Rank 0] step:6601/10000 train_time:272770ms step_avg:41.32ms +[2025-09-11 07:33:59] [Rank 0] step:6621/10000 train_time:273460ms step_avg:41.30ms +[2025-09-11 07:33:59] [Rank 0] step:6621/10000 train_time:273460ms step_avg:41.30ms +[2025-09-11 07:33:59] [Rank 0] step:6641/10000 train_time:274151ms step_avg:41.28ms +[2025-09-11 07:33:59] [Rank 0] step:6641/10000 train_time:274151ms step_avg:41.28ms +[2025-09-11 07:34:00] [Rank 0] step:6661/10000 train_time:274842ms step_avg:41.26ms +[2025-09-11 07:34:00] [Rank 0] step:6661/10000 train_time:274842ms step_avg:41.26ms +[2025-09-11 07:34:01] [Rank 0] step:6681/10000 train_time:275539ms step_avg:41.24ms +[2025-09-11 07:34:01] [Rank 0] step:6681/10000 train_time:275539ms step_avg:41.24ms +[2025-09-11 07:34:02] [Rank 0] step:6701/10000 train_time:276236ms step_avg:41.22ms +[2025-09-11 07:34:02] [Rank 0] step:6701/10000 train_time:276236ms step_avg:41.22ms +[2025-09-11 07:34:02] [Rank 0] step:6721/10000 train_time:276936ms step_avg:41.20ms +[2025-09-11 07:34:02] [Rank 0] step:6721/10000 train_time:276936ms step_avg:41.20ms +[2025-09-11 07:34:03] [Rank 0] step:6741/10000 train_time:277635ms step_avg:41.19ms +[2025-09-11 07:34:03] [Rank 0] step:6741/10000 train_time:277635ms step_avg:41.19ms +[2025-09-11 07:34:04] [Rank 0] step:6761/10000 train_time:278333ms step_avg:41.17ms +[2025-09-11 07:34:04] [Rank 0] step:6761/10000 train_time:278333ms step_avg:41.17ms +[2025-09-11 07:34:04] [Rank 0] step:6781/10000 train_time:279030ms step_avg:41.15ms +[2025-09-11 07:34:04] [Rank 0] step:6781/10000 train_time:279030ms step_avg:41.15ms +[2025-09-11 07:34:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:34:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:34:15] [Rank 0] PRINT: step:6800/10000 val_loss:5.4073 total_sharp:1.3089e-02 L1_sharp:2.9098e-03 L2_sharp:1.0397e-03 L3_sharp:1.1534e-03 L4_sharp:1.3448e-03 L5_sharp:2.5902e-03 L6_sharp:1.7696e-03 L7_sharp:1.7221e-03 L8_sharp:2.7086e-03 L9_sharp:2.5577e-03 L10_sharp:2.8714e-03 L11_sharp:3.6165e-03 L12_sharp:8.2817e-03 total_fnorm:3.6406e+00 total_l1_linf:8.0640e+03 total_spectral:1.8125e+00 L1_fnorm:1.0312e+00 L2_fnorm:9.9219e-01 L3_fnorm:9.9609e-01 L4_fnorm:9.9609e-01 L5_fnorm:9.8828e-01 L6_fnorm:1.0000e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.9219e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0078e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.1797e-01 L1_l1linf:2.3926e-01 L2_l1linf:2.3047e-01 L3_l1linf:2.2070e-01 L4_l1linf:2.1582e-01 L5_l1linf:2.0801e-01 L6_l1linf:1.9922e-01 L7_l1linf:2.0117e-01 L8_l1linf:1.9629e-01 L9_l1linf:1.9629e-01 L10_l1linf:1.9629e-01 L11_l1linf:1.8555e-01 L12_l1linf:1.5820e-01 L1_spectral:1.3443e-02 L2_spectral:1.3125e-02 L3_spectral:1.3105e-02 L4_spectral:1.3247e-02 L5_spectral:1.3114e-02 L6_spectral:1.3211e-02 L7_spectral:1.3236e-02 L8_spectral:1.3173e-02 L9_spectral:1.3211e-02 L10_spectral:1.3234e-02 L11_spectral:1.3254e-02 L12_spectral:1.2855e-02 train_time:279708ms step_avg:41.13ms +[2025-09-11 07:34:15] [Rank 0] PRINT: step:6800/10000 val_loss:5.4073 total_sharp:1.3089e-02 L1_sharp:2.9098e-03 L2_sharp:1.0397e-03 L3_sharp:1.1534e-03 L4_sharp:1.3448e-03 L5_sharp:2.5902e-03 L6_sharp:1.7696e-03 L7_sharp:1.7221e-03 L8_sharp:2.7086e-03 L9_sharp:2.5577e-03 L10_sharp:2.8714e-03 L11_sharp:3.6165e-03 L12_sharp:8.2817e-03 total_fnorm:3.6406e+00 total_l1_linf:8.0640e+03 total_spectral:1.8125e+00 L1_fnorm:1.0312e+00 L2_fnorm:9.9219e-01 L3_fnorm:9.9609e-01 L4_fnorm:9.9609e-01 L5_fnorm:9.8828e-01 L6_fnorm:1.0000e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.9219e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0078e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.1797e-01 L1_l1linf:2.3926e-01 L2_l1linf:2.3047e-01 L3_l1linf:2.2070e-01 L4_l1linf:2.1582e-01 L5_l1linf:2.0801e-01 L6_l1linf:1.9922e-01 L7_l1linf:2.0117e-01 L8_l1linf:1.9629e-01 L9_l1linf:1.9629e-01 L10_l1linf:1.9629e-01 L11_l1linf:1.8555e-01 L12_l1linf:1.5820e-01 L1_spectral:1.3443e-02 L2_spectral:1.3125e-02 L3_spectral:1.3105e-02 L4_spectral:1.3247e-02 L5_spectral:1.3114e-02 L6_spectral:1.3211e-02 L7_spectral:1.3236e-02 L8_spectral:1.3173e-02 L9_spectral:1.3211e-02 L10_spectral:1.3234e-02 L11_spectral:1.3254e-02 L12_spectral:1.2855e-02 train_time:279708ms step_avg:41.13ms +[2025-09-11 07:34:16] [Rank 0] step:6801/10000 train_time:280864ms step_avg:41.30ms +[2025-09-11 07:34:16] [Rank 0] step:6801/10000 train_time:280864ms step_avg:41.30ms +[2025-09-11 07:34:17] [Rank 0] step:6821/10000 train_time:281554ms step_avg:41.28ms +[2025-09-11 07:34:17] [Rank 0] step:6821/10000 train_time:281554ms step_avg:41.28ms +[2025-09-11 07:34:17] [Rank 0] step:6841/10000 train_time:282256ms step_avg:41.26ms +[2025-09-11 07:34:17] [Rank 0] step:6841/10000 train_time:282256ms step_avg:41.26ms +[2025-09-11 07:34:18] [Rank 0] step:6861/10000 train_time:282957ms step_avg:41.24ms +[2025-09-11 07:34:18] [Rank 0] step:6861/10000 train_time:282957ms step_avg:41.24ms +[2025-09-11 07:34:19] [Rank 0] step:6881/10000 train_time:283657ms step_avg:41.22ms +[2025-09-11 07:34:19] [Rank 0] step:6881/10000 train_time:283657ms step_avg:41.22ms +[2025-09-11 07:34:19] [Rank 0] step:6901/10000 train_time:284354ms step_avg:41.20ms +[2025-09-11 07:34:19] [Rank 0] step:6901/10000 train_time:284354ms step_avg:41.20ms +[2025-09-11 07:34:20] [Rank 0] step:6921/10000 train_time:285052ms step_avg:41.19ms +[2025-09-11 07:34:20] [Rank 0] step:6921/10000 train_time:285052ms step_avg:41.19ms +[2025-09-11 07:34:21] [Rank 0] step:6941/10000 train_time:285752ms step_avg:41.17ms +[2025-09-11 07:34:21] [Rank 0] step:6941/10000 train_time:285752ms step_avg:41.17ms +[2025-09-11 07:34:22] [Rank 0] step:6961/10000 train_time:286450ms step_avg:41.15ms +[2025-09-11 07:34:22] [Rank 0] step:6961/10000 train_time:286450ms step_avg:41.15ms +[2025-09-11 07:34:22] [Rank 0] step:6981/10000 train_time:287151ms step_avg:41.13ms +[2025-09-11 07:34:22] [Rank 0] step:6981/10000 train_time:287151ms step_avg:41.13ms +[2025-09-11 07:34:23] [Rank 0] step:7001/10000 train_time:288144ms step_avg:41.16ms +[2025-09-11 07:34:23] [Rank 0] step:7001/10000 train_time:288144ms step_avg:41.16ms +[2025-09-11 07:34:24] [Rank 0] step:7021/10000 train_time:289134ms step_avg:41.18ms +[2025-09-11 07:34:24] [Rank 0] step:7021/10000 train_time:289134ms step_avg:41.18ms +[2025-09-11 07:34:25] [Rank 0] step:7041/10000 train_time:289832ms step_avg:41.16ms +[2025-09-11 07:34:25] [Rank 0] step:7041/10000 train_time:289832ms step_avg:41.16ms +[2025-09-11 07:34:26] [Rank 0] step:7061/10000 train_time:290809ms step_avg:41.19ms +[2025-09-11 07:34:26] [Rank 0] step:7061/10000 train_time:290809ms step_avg:41.19ms +[2025-09-11 07:34:27] [Rank 0] step:7081/10000 train_time:291508ms step_avg:41.17ms +[2025-09-11 07:34:27] [Rank 0] step:7081/10000 train_time:291508ms step_avg:41.17ms +[2025-09-11 07:34:27] [Rank 0] step:7101/10000 train_time:292206ms step_avg:41.15ms +[2025-09-11 07:34:27] [Rank 0] step:7101/10000 train_time:292206ms step_avg:41.15ms +[2025-09-11 07:34:28] [Rank 0] step:7121/10000 train_time:292907ms step_avg:41.13ms +[2025-09-11 07:34:28] [Rank 0] step:7121/10000 train_time:292907ms step_avg:41.13ms +[2025-09-11 07:34:29] [Rank 0] step:7141/10000 train_time:293605ms step_avg:41.12ms +[2025-09-11 07:34:29] [Rank 0] step:7141/10000 train_time:293605ms step_avg:41.12ms +[2025-09-11 07:34:29] [Rank 0] step:7161/10000 train_time:294307ms step_avg:41.10ms +[2025-09-11 07:34:29] [Rank 0] step:7161/10000 train_time:294307ms step_avg:41.10ms +[2025-09-11 07:34:30] [Rank 0] step:7181/10000 train_time:295004ms step_avg:41.08ms +[2025-09-11 07:34:30] [Rank 0] step:7181/10000 train_time:295004ms step_avg:41.08ms +[2025-09-11 07:34:31] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:34:31] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:34:44] [Rank 0] PRINT: step:7200/10000 val_loss:5.3825 total_sharp:1.3547e-02 L1_sharp:3.7660e-03 L2_sharp:1.0114e-03 L3_sharp:9.2125e-04 L4_sharp:8.0532e-04 L5_sharp:2.5208e-03 L6_sharp:1.6352e-03 L7_sharp:1.5079e-03 L8_sharp:2.3118e-03 L9_sharp:2.4807e-03 L10_sharp:3.0902e-03 L11_sharp:3.8017e-03 L12_sharp:1.2779e-02 total_fnorm:3.1094e+00 total_l1_linf:6.4960e+03 total_spectral:1.5547e+00 L1_fnorm:8.9844e-01 L2_fnorm:8.5547e-01 L3_fnorm:8.6328e-01 L4_fnorm:8.6328e-01 L5_fnorm:8.5156e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.5156e-01 L9_fnorm:8.6328e-01 L10_fnorm:8.6328e-01 L11_fnorm:8.5938e-01 L12_fnorm:7.8516e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.8750e-01 L3_l1linf:1.8066e-01 L4_l1linf:1.8066e-01 L5_l1linf:1.6602e-01 L6_l1linf:1.6309e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.6113e-01 L10_l1linf:1.5918e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.2598e-01 L1_spectral:1.1714e-02 L2_spectral:1.1525e-02 L3_spectral:1.1587e-02 L4_spectral:1.1623e-02 L5_spectral:1.1530e-02 L6_spectral:1.1655e-02 L7_spectral:1.1655e-02 L8_spectral:1.1612e-02 L9_spectral:1.1613e-02 L10_spectral:1.1653e-02 L11_spectral:1.1594e-02 L12_spectral:1.1388e-02 train_time:295688ms step_avg:41.07ms +[2025-09-11 07:34:44] [Rank 0] PRINT: step:7200/10000 val_loss:5.3825 total_sharp:1.3547e-02 L1_sharp:3.7660e-03 L2_sharp:1.0114e-03 L3_sharp:9.2125e-04 L4_sharp:8.0532e-04 L5_sharp:2.5208e-03 L6_sharp:1.6352e-03 L7_sharp:1.5079e-03 L8_sharp:2.3118e-03 L9_sharp:2.4807e-03 L10_sharp:3.0902e-03 L11_sharp:3.8017e-03 L12_sharp:1.2779e-02 total_fnorm:3.1094e+00 total_l1_linf:6.4960e+03 total_spectral:1.5547e+00 L1_fnorm:8.9844e-01 L2_fnorm:8.5547e-01 L3_fnorm:8.6328e-01 L4_fnorm:8.6328e-01 L5_fnorm:8.5156e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.5156e-01 L9_fnorm:8.6328e-01 L10_fnorm:8.6328e-01 L11_fnorm:8.5938e-01 L12_fnorm:7.8516e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.8750e-01 L3_l1linf:1.8066e-01 L4_l1linf:1.8066e-01 L5_l1linf:1.6602e-01 L6_l1linf:1.6309e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.6113e-01 L10_l1linf:1.5918e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.2598e-01 L1_spectral:1.1714e-02 L2_spectral:1.1525e-02 L3_spectral:1.1587e-02 L4_spectral:1.1623e-02 L5_spectral:1.1530e-02 L6_spectral:1.1655e-02 L7_spectral:1.1655e-02 L8_spectral:1.1612e-02 L9_spectral:1.1613e-02 L10_spectral:1.1653e-02 L11_spectral:1.1594e-02 L12_spectral:1.1388e-02 train_time:295688ms step_avg:41.07ms +[2025-09-11 07:34:45] [Rank 0] step:7201/10000 train_time:296850ms step_avg:41.22ms +[2025-09-11 07:34:45] [Rank 0] step:7201/10000 train_time:296850ms step_avg:41.22ms +[2025-09-11 07:34:46] [Rank 0] step:7221/10000 train_time:297582ms step_avg:41.21ms +[2025-09-11 07:34:46] [Rank 0] step:7221/10000 train_time:297582ms step_avg:41.21ms +[2025-09-11 07:34:47] [Rank 0] step:7241/10000 train_time:298284ms step_avg:41.19ms +[2025-09-11 07:34:47] [Rank 0] step:7241/10000 train_time:298284ms step_avg:41.19ms +[2025-09-11 07:34:47] [Rank 0] step:7261/10000 train_time:298985ms step_avg:41.18ms +[2025-09-11 07:34:47] [Rank 0] step:7261/10000 train_time:298985ms step_avg:41.18ms +[2025-09-11 07:34:48] [Rank 0] step:7281/10000 train_time:299691ms step_avg:41.16ms +[2025-09-11 07:34:48] [Rank 0] step:7281/10000 train_time:299691ms step_avg:41.16ms +[2025-09-11 07:34:49] [Rank 0] step:7301/10000 train_time:300389ms step_avg:41.14ms +[2025-09-11 07:34:49] [Rank 0] step:7301/10000 train_time:300389ms step_avg:41.14ms +[2025-09-11 07:34:49] [Rank 0] step:7321/10000 train_time:301089ms step_avg:41.13ms +[2025-09-11 07:34:49] [Rank 0] step:7321/10000 train_time:301089ms step_avg:41.13ms +[2025-09-11 07:34:50] [Rank 0] step:7341/10000 train_time:301790ms step_avg:41.11ms +[2025-09-11 07:34:50] [Rank 0] step:7341/10000 train_time:301790ms step_avg:41.11ms +[2025-09-11 07:34:51] [Rank 0] step:7361/10000 train_time:302490ms step_avg:41.09ms +[2025-09-11 07:34:51] [Rank 0] step:7361/10000 train_time:302490ms step_avg:41.09ms +[2025-09-11 07:34:52] [Rank 0] step:7381/10000 train_time:303191ms step_avg:41.08ms +[2025-09-11 07:34:52] [Rank 0] step:7381/10000 train_time:303191ms step_avg:41.08ms +[2025-09-11 07:34:52] [Rank 0] step:7401/10000 train_time:303892ms step_avg:41.06ms +[2025-09-11 07:34:52] [Rank 0] step:7401/10000 train_time:303892ms step_avg:41.06ms +[2025-09-11 07:34:53] [Rank 0] step:7421/10000 train_time:304591ms step_avg:41.04ms +[2025-09-11 07:34:53] [Rank 0] step:7421/10000 train_time:304591ms step_avg:41.04ms +[2025-09-11 07:34:54] [Rank 0] step:7441/10000 train_time:305293ms step_avg:41.03ms +[2025-09-11 07:34:54] [Rank 0] step:7441/10000 train_time:305293ms step_avg:41.03ms +[2025-09-11 07:34:54] [Rank 0] step:7461/10000 train_time:305994ms step_avg:41.01ms +[2025-09-11 07:34:54] [Rank 0] step:7461/10000 train_time:305994ms step_avg:41.01ms +[2025-09-11 07:34:55] [Rank 0] step:7481/10000 train_time:306697ms step_avg:41.00ms +[2025-09-11 07:34:55] [Rank 0] step:7481/10000 train_time:306697ms step_avg:41.00ms +[2025-09-11 07:34:56] [Rank 0] step:7501/10000 train_time:307399ms step_avg:40.98ms +[2025-09-11 07:34:56] [Rank 0] step:7501/10000 train_time:307399ms step_avg:40.98ms +[2025-09-11 07:34:56] [Rank 0] step:7521/10000 train_time:308101ms step_avg:40.97ms +[2025-09-11 07:34:56] [Rank 0] step:7521/10000 train_time:308101ms step_avg:40.97ms +[2025-09-11 07:34:57] [Rank 0] step:7541/10000 train_time:308800ms step_avg:40.95ms +[2025-09-11 07:34:57] [Rank 0] step:7541/10000 train_time:308800ms step_avg:40.95ms +[2025-09-11 07:34:58] [Rank 0] step:7561/10000 train_time:309503ms step_avg:40.93ms +[2025-09-11 07:34:58] [Rank 0] step:7561/10000 train_time:309503ms step_avg:40.93ms +[2025-09-11 07:34:59] [Rank 0] step:7581/10000 train_time:310206ms step_avg:40.92ms +[2025-09-11 07:34:59] [Rank 0] step:7581/10000 train_time:310206ms step_avg:40.92ms +[2025-09-11 07:34:59] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:34:59] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:35:09] [Rank 0] PRINT: step:7600/10000 val_loss:5.3657 total_sharp:1.3158e-02 L1_sharp:4.1086e-03 L2_sharp:1.7708e-03 L3_sharp:1.6393e-03 L4_sharp:9.7804e-04 L5_sharp:2.6459e-03 L6_sharp:1.8476e-03 L7_sharp:1.7920e-03 L8_sharp:2.6099e-03 L9_sharp:2.5185e-03 L10_sharp:2.8317e-03 L11_sharp:3.4675e-03 L12_sharp:7.5955e-03 total_fnorm:2.6094e+00 total_l1_linf:5.0880e+03 total_spectral:1.2891e+00 L1_fnorm:7.6172e-01 L2_fnorm:7.2266e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1094e-01 L6_fnorm:7.1875e-01 L7_fnorm:7.2266e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.2266e-01 L10_fnorm:7.2266e-01 L11_fnorm:7.1875e-01 L12_fnorm:6.6797e-01 L1_l1linf:1.5527e-01 L2_l1linf:1.5039e-01 L3_l1linf:1.4160e-01 L4_l1linf:1.4160e-01 L5_l1linf:1.3086e-01 L6_l1linf:1.2695e-01 L7_l1linf:1.3086e-01 L8_l1linf:1.2793e-01 L9_l1linf:1.2598e-01 L10_l1linf:1.2598e-01 L11_l1linf:1.2158e-01 L12_l1linf:1.1084e-01 L1_spectral:9.9240e-03 L2_spectral:1.0019e-02 L3_spectral:9.9283e-03 L4_spectral:9.9054e-03 L5_spectral:9.8048e-03 L6_spectral:9.8993e-03 L7_spectral:9.9744e-03 L8_spectral:9.8977e-03 L9_spectral:9.8850e-03 L10_spectral:9.8933e-03 L11_spectral:9.9874e-03 L12_spectral:9.7148e-03 train_time:310889ms step_avg:40.91ms +[2025-09-11 07:35:09] [Rank 0] PRINT: step:7600/10000 val_loss:5.3657 total_sharp:1.3158e-02 L1_sharp:4.1086e-03 L2_sharp:1.7708e-03 L3_sharp:1.6393e-03 L4_sharp:9.7804e-04 L5_sharp:2.6459e-03 L6_sharp:1.8476e-03 L7_sharp:1.7920e-03 L8_sharp:2.6099e-03 L9_sharp:2.5185e-03 L10_sharp:2.8317e-03 L11_sharp:3.4675e-03 L12_sharp:7.5955e-03 total_fnorm:2.6094e+00 total_l1_linf:5.0880e+03 total_spectral:1.2891e+00 L1_fnorm:7.6172e-01 L2_fnorm:7.2266e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1094e-01 L6_fnorm:7.1875e-01 L7_fnorm:7.2266e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.2266e-01 L10_fnorm:7.2266e-01 L11_fnorm:7.1875e-01 L12_fnorm:6.6797e-01 L1_l1linf:1.5527e-01 L2_l1linf:1.5039e-01 L3_l1linf:1.4160e-01 L4_l1linf:1.4160e-01 L5_l1linf:1.3086e-01 L6_l1linf:1.2695e-01 L7_l1linf:1.3086e-01 L8_l1linf:1.2793e-01 L9_l1linf:1.2598e-01 L10_l1linf:1.2598e-01 L11_l1linf:1.2158e-01 L12_l1linf:1.1084e-01 L1_spectral:9.9240e-03 L2_spectral:1.0019e-02 L3_spectral:9.9283e-03 L4_spectral:9.9054e-03 L5_spectral:9.8048e-03 L6_spectral:9.8993e-03 L7_spectral:9.9744e-03 L8_spectral:9.8977e-03 L9_spectral:9.8850e-03 L10_spectral:9.8933e-03 L11_spectral:9.9874e-03 L12_spectral:9.7148e-03 train_time:310889ms step_avg:40.91ms +[2025-09-11 07:35:11] [Rank 0] step:7601/10000 train_time:312108ms step_avg:41.06ms +[2025-09-11 07:35:11] [Rank 0] step:7601/10000 train_time:312108ms step_avg:41.06ms +[2025-09-11 07:35:11] [Rank 0] step:7621/10000 train_time:312821ms step_avg:41.05ms +[2025-09-11 07:35:11] [Rank 0] step:7621/10000 train_time:312821ms step_avg:41.05ms +[2025-09-11 07:35:12] [Rank 0] step:7641/10000 train_time:313525ms step_avg:41.03ms +[2025-09-11 07:35:12] [Rank 0] step:7641/10000 train_time:313525ms step_avg:41.03ms +[2025-09-11 07:35:13] [Rank 0] step:7661/10000 train_time:314226ms step_avg:41.02ms +[2025-09-11 07:35:13] [Rank 0] step:7661/10000 train_time:314226ms step_avg:41.02ms +[2025-09-11 07:35:13] [Rank 0] step:7681/10000 train_time:314927ms step_avg:41.00ms +[2025-09-11 07:35:13] [Rank 0] step:7681/10000 train_time:314927ms step_avg:41.00ms +[2025-09-11 07:35:14] [Rank 0] step:7701/10000 train_time:315631ms step_avg:40.99ms +[2025-09-11 07:35:14] [Rank 0] step:7701/10000 train_time:315631ms step_avg:40.99ms +[2025-09-11 07:35:15] [Rank 0] step:7721/10000 train_time:316333ms step_avg:40.97ms +[2025-09-11 07:35:15] [Rank 0] step:7721/10000 train_time:316333ms step_avg:40.97ms +[2025-09-11 07:35:15] [Rank 0] step:7741/10000 train_time:317035ms step_avg:40.96ms +[2025-09-11 07:35:15] [Rank 0] step:7741/10000 train_time:317035ms step_avg:40.96ms +[2025-09-11 07:35:16] [Rank 0] step:7761/10000 train_time:317737ms step_avg:40.94ms +[2025-09-11 07:35:16] [Rank 0] step:7761/10000 train_time:317737ms step_avg:40.94ms +[2025-09-11 07:35:17] [Rank 0] step:7781/10000 train_time:318440ms step_avg:40.93ms +[2025-09-11 07:35:17] [Rank 0] step:7781/10000 train_time:318440ms step_avg:40.93ms +[2025-09-11 07:35:18] [Rank 0] step:7801/10000 train_time:319141ms step_avg:40.91ms +[2025-09-11 07:35:18] [Rank 0] step:7801/10000 train_time:319141ms step_avg:40.91ms +[2025-09-11 07:35:18] [Rank 0] step:7821/10000 train_time:319843ms step_avg:40.90ms +[2025-09-11 07:35:18] [Rank 0] step:7821/10000 train_time:319843ms step_avg:40.90ms +[2025-09-11 07:35:19] [Rank 0] step:7841/10000 train_time:320547ms step_avg:40.88ms +[2025-09-11 07:35:19] [Rank 0] step:7841/10000 train_time:320547ms step_avg:40.88ms +[2025-09-11 07:35:20] [Rank 0] step:7861/10000 train_time:321251ms step_avg:40.87ms +[2025-09-11 07:35:20] [Rank 0] step:7861/10000 train_time:321251ms step_avg:40.87ms +[2025-09-11 07:35:20] [Rank 0] step:7881/10000 train_time:321952ms step_avg:40.85ms +[2025-09-11 07:35:20] [Rank 0] step:7881/10000 train_time:321952ms step_avg:40.85ms +[2025-09-11 07:35:21] [Rank 0] step:7901/10000 train_time:322656ms step_avg:40.84ms +[2025-09-11 07:35:21] [Rank 0] step:7901/10000 train_time:322656ms step_avg:40.84ms +[2025-09-11 07:35:22] [Rank 0] step:7921/10000 train_time:323359ms step_avg:40.82ms +[2025-09-11 07:35:22] [Rank 0] step:7921/10000 train_time:323359ms step_avg:40.82ms +[2025-09-11 07:35:23] [Rank 0] step:7941/10000 train_time:324062ms step_avg:40.81ms +[2025-09-11 07:35:23] [Rank 0] step:7941/10000 train_time:324062ms step_avg:40.81ms +[2025-09-11 07:35:23] [Rank 0] step:7961/10000 train_time:324762ms step_avg:40.79ms +[2025-09-11 07:35:23] [Rank 0] step:7961/10000 train_time:324762ms step_avg:40.79ms +[2025-09-11 07:35:24] [Rank 0] step:7981/10000 train_time:325466ms step_avg:40.78ms +[2025-09-11 07:35:24] [Rank 0] step:7981/10000 train_time:325466ms step_avg:40.78ms +[2025-09-11 07:35:25] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:35:25] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:35:35] [Rank 0] PRINT: step:8000/10000 val_loss:5.3519 total_sharp:1.2620e-02 L1_sharp:2.9361e-03 L2_sharp:1.5140e-03 L3_sharp:1.1158e-03 L4_sharp:1.1766e-03 L5_sharp:2.0335e-03 L6_sharp:1.5239e-03 L7_sharp:1.4293e-03 L8_sharp:1.9825e-03 L9_sharp:2.1439e-03 L10_sharp:2.7677e-03 L11_sharp:3.3555e-03 L12_sharp:1.0537e-02 total_fnorm:2.1250e+00 total_l1_linf:3.8880e+03 total_spectral:1.0547e+00 L1_fnorm:6.3281e-01 L2_fnorm:5.8984e-01 L3_fnorm:5.9375e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8203e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7812e-01 L9_fnorm:5.8984e-01 L10_fnorm:5.9375e-01 L11_fnorm:5.8984e-01 L12_fnorm:5.4297e-01 L1_l1linf:1.2207e-01 L2_l1linf:1.2012e-01 L3_l1linf:1.1182e-01 L4_l1linf:1.0742e-01 L5_l1linf:1.0156e-01 L6_l1linf:1.0010e-01 L7_l1linf:1.0107e-01 L8_l1linf:9.6680e-02 L9_l1linf:9.7656e-02 L10_l1linf:9.8633e-02 L11_l1linf:9.0820e-02 L12_l1linf:8.3008e-02 L1_spectral:8.3661e-03 L2_spectral:8.3927e-03 L3_spectral:8.3386e-03 L4_spectral:8.2997e-03 L5_spectral:8.3155e-03 L6_spectral:8.3186e-03 L7_spectral:8.3454e-03 L8_spectral:8.3260e-03 L9_spectral:8.3559e-03 L10_spectral:8.3763e-03 L11_spectral:8.3665e-03 L12_spectral:8.1285e-03 train_time:326148ms step_avg:40.77ms +[2025-09-11 07:35:35] [Rank 0] PRINT: step:8000/10000 val_loss:5.3519 total_sharp:1.2620e-02 L1_sharp:2.9361e-03 L2_sharp:1.5140e-03 L3_sharp:1.1158e-03 L4_sharp:1.1766e-03 L5_sharp:2.0335e-03 L6_sharp:1.5239e-03 L7_sharp:1.4293e-03 L8_sharp:1.9825e-03 L9_sharp:2.1439e-03 L10_sharp:2.7677e-03 L11_sharp:3.3555e-03 L12_sharp:1.0537e-02 total_fnorm:2.1250e+00 total_l1_linf:3.8880e+03 total_spectral:1.0547e+00 L1_fnorm:6.3281e-01 L2_fnorm:5.8984e-01 L3_fnorm:5.9375e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8203e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7812e-01 L9_fnorm:5.8984e-01 L10_fnorm:5.9375e-01 L11_fnorm:5.8984e-01 L12_fnorm:5.4297e-01 L1_l1linf:1.2207e-01 L2_l1linf:1.2012e-01 L3_l1linf:1.1182e-01 L4_l1linf:1.0742e-01 L5_l1linf:1.0156e-01 L6_l1linf:1.0010e-01 L7_l1linf:1.0107e-01 L8_l1linf:9.6680e-02 L9_l1linf:9.7656e-02 L10_l1linf:9.8633e-02 L11_l1linf:9.0820e-02 L12_l1linf:8.3008e-02 L1_spectral:8.3661e-03 L2_spectral:8.3927e-03 L3_spectral:8.3386e-03 L4_spectral:8.2997e-03 L5_spectral:8.3155e-03 L6_spectral:8.3186e-03 L7_spectral:8.3454e-03 L8_spectral:8.3260e-03 L9_spectral:8.3559e-03 L10_spectral:8.3763e-03 L11_spectral:8.3665e-03 L12_spectral:8.1285e-03 train_time:326148ms step_avg:40.77ms +[2025-09-11 07:35:36] [Rank 0] step:8001/10000 train_time:327330ms step_avg:40.91ms +[2025-09-11 07:35:36] [Rank 0] step:8001/10000 train_time:327330ms step_avg:40.91ms +[2025-09-11 07:35:36] [Rank 0] step:8021/10000 train_time:328022ms step_avg:40.90ms +[2025-09-11 07:35:36] [Rank 0] step:8021/10000 train_time:328022ms step_avg:40.90ms +[2025-09-11 07:35:37] [Rank 0] step:8041/10000 train_time:328724ms step_avg:40.88ms +[2025-09-11 07:35:37] [Rank 0] step:8041/10000 train_time:328724ms step_avg:40.88ms +[2025-09-11 07:35:38] [Rank 0] step:8061/10000 train_time:329429ms step_avg:40.87ms +[2025-09-11 07:35:38] [Rank 0] step:8061/10000 train_time:329429ms step_avg:40.87ms +[2025-09-11 07:35:39] [Rank 0] step:8081/10000 train_time:330128ms step_avg:40.85ms +[2025-09-11 07:35:39] [Rank 0] step:8081/10000 train_time:330128ms step_avg:40.85ms +[2025-09-11 07:35:39] [Rank 0] step:8101/10000 train_time:330827ms step_avg:40.84ms +[2025-09-11 07:35:39] [Rank 0] step:8101/10000 train_time:330827ms step_avg:40.84ms +[2025-09-11 07:35:40] [Rank 0] step:8121/10000 train_time:331532ms step_avg:40.82ms +[2025-09-11 07:35:40] [Rank 0] step:8121/10000 train_time:331532ms step_avg:40.82ms +[2025-09-11 07:35:41] [Rank 0] step:8141/10000 train_time:332974ms step_avg:40.90ms +[2025-09-11 07:35:41] [Rank 0] step:8141/10000 train_time:332974ms step_avg:40.90ms +[2025-09-11 07:35:42] [Rank 0] step:8161/10000 train_time:333678ms step_avg:40.89ms +[2025-09-11 07:35:42] [Rank 0] step:8161/10000 train_time:333678ms step_avg:40.89ms +[2025-09-11 07:35:43] [Rank 0] step:8181/10000 train_time:334392ms step_avg:40.87ms +[2025-09-11 07:35:43] [Rank 0] step:8181/10000 train_time:334392ms step_avg:40.87ms +[2025-09-11 07:35:44] [Rank 0] step:8201/10000 train_time:335100ms step_avg:40.86ms +[2025-09-11 07:35:44] [Rank 0] step:8201/10000 train_time:335100ms step_avg:40.86ms +[2025-09-11 07:35:44] [Rank 0] step:8221/10000 train_time:335808ms step_avg:40.85ms +[2025-09-11 07:35:44] [Rank 0] step:8221/10000 train_time:335808ms step_avg:40.85ms +[2025-09-11 07:35:45] [Rank 0] step:8241/10000 train_time:336525ms step_avg:40.84ms +[2025-09-11 07:35:45] [Rank 0] step:8241/10000 train_time:336525ms step_avg:40.84ms +[2025-09-11 07:35:46] [Rank 0] step:8261/10000 train_time:337232ms step_avg:40.82ms +[2025-09-11 07:35:46] [Rank 0] step:8261/10000 train_time:337232ms step_avg:40.82ms +[2025-09-11 07:35:46] [Rank 0] step:8281/10000 train_time:337936ms step_avg:40.81ms +[2025-09-11 07:35:46] [Rank 0] step:8281/10000 train_time:337936ms step_avg:40.81ms +[2025-09-11 07:35:47] [Rank 0] step:8301/10000 train_time:338643ms step_avg:40.80ms +[2025-09-11 07:35:47] [Rank 0] step:8301/10000 train_time:338643ms step_avg:40.80ms +[2025-09-11 07:35:48] [Rank 0] step:8321/10000 train_time:339349ms step_avg:40.78ms +[2025-09-11 07:35:48] [Rank 0] step:8321/10000 train_time:339349ms step_avg:40.78ms +[2025-09-11 07:35:48] [Rank 0] step:8341/10000 train_time:340063ms step_avg:40.77ms +[2025-09-11 07:35:48] [Rank 0] step:8341/10000 train_time:340063ms step_avg:40.77ms +[2025-09-11 07:35:49] [Rank 0] step:8361/10000 train_time:340766ms step_avg:40.76ms +[2025-09-11 07:35:49] [Rank 0] step:8361/10000 train_time:340766ms step_avg:40.76ms +[2025-09-11 07:35:50] [Rank 0] step:8381/10000 train_time:341476ms step_avg:40.74ms +[2025-09-11 07:35:50] [Rank 0] step:8381/10000 train_time:341476ms step_avg:40.74ms +[2025-09-11 07:35:51] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:35:51] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:01] [Rank 0] PRINT: step:8400/10000 val_loss:5.3389 total_sharp:9.9966e-03 L1_sharp:3.9500e-03 L2_sharp:9.8887e-04 L3_sharp:1.3651e-03 L4_sharp:8.6483e-04 L5_sharp:1.5456e-03 L6_sharp:1.2963e-03 L7_sharp:1.4299e-03 L8_sharp:1.6362e-03 L9_sharp:1.7864e-03 L10_sharp:2.1609e-03 L11_sharp:2.6630e-03 L12_sharp:9.1275e-03 total_fnorm:1.6250e+00 total_l1_linf:2.7840e+03 total_spectral:8.2812e-01 L1_fnorm:5.0781e-01 L2_fnorm:4.6484e-01 L3_fnorm:4.6484e-01 L4_fnorm:4.6484e-01 L5_fnorm:4.5703e-01 L6_fnorm:4.5703e-01 L7_fnorm:4.5898e-01 L8_fnorm:4.5117e-01 L9_fnorm:4.5898e-01 L10_fnorm:4.6094e-01 L11_fnorm:4.6094e-01 L12_fnorm:4.2188e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.5449e-02 L3_l1linf:8.1055e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.0801e-02 L7_l1linf:6.9824e-02 L8_l1linf:7.0312e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.4219e-02 L11_l1linf:6.7383e-02 L12_l1linf:5.9326e-02 L1_spectral:7.1518e-03 L2_spectral:6.8062e-03 L3_spectral:6.7605e-03 L4_spectral:6.6419e-03 L5_spectral:6.7207e-03 L6_spectral:6.6327e-03 L7_spectral:6.6760e-03 L8_spectral:6.6999e-03 L9_spectral:6.6762e-03 L10_spectral:6.6283e-03 L11_spectral:6.6800e-03 L12_spectral:6.5318e-03 train_time:342164ms step_avg:40.73ms +[2025-09-11 07:36:01] [Rank 0] PRINT: step:8400/10000 val_loss:5.3389 total_sharp:9.9966e-03 L1_sharp:3.9500e-03 L2_sharp:9.8887e-04 L3_sharp:1.3651e-03 L4_sharp:8.6483e-04 L5_sharp:1.5456e-03 L6_sharp:1.2963e-03 L7_sharp:1.4299e-03 L8_sharp:1.6362e-03 L9_sharp:1.7864e-03 L10_sharp:2.1609e-03 L11_sharp:2.6630e-03 L12_sharp:9.1275e-03 total_fnorm:1.6250e+00 total_l1_linf:2.7840e+03 total_spectral:8.2812e-01 L1_fnorm:5.0781e-01 L2_fnorm:4.6484e-01 L3_fnorm:4.6484e-01 L4_fnorm:4.6484e-01 L5_fnorm:4.5703e-01 L6_fnorm:4.5703e-01 L7_fnorm:4.5898e-01 L8_fnorm:4.5117e-01 L9_fnorm:4.5898e-01 L10_fnorm:4.6094e-01 L11_fnorm:4.6094e-01 L12_fnorm:4.2188e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.5449e-02 L3_l1linf:8.1055e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.0801e-02 L7_l1linf:6.9824e-02 L8_l1linf:7.0312e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.4219e-02 L11_l1linf:6.7383e-02 L12_l1linf:5.9326e-02 L1_spectral:7.1518e-03 L2_spectral:6.8062e-03 L3_spectral:6.7605e-03 L4_spectral:6.6419e-03 L5_spectral:6.7207e-03 L6_spectral:6.6327e-03 L7_spectral:6.6760e-03 L8_spectral:6.6999e-03 L9_spectral:6.6762e-03 L10_spectral:6.6283e-03 L11_spectral:6.6800e-03 L12_spectral:6.5318e-03 train_time:342164ms step_avg:40.73ms +[2025-09-11 07:36:02] [Rank 0] step:8401/10000 train_time:343341ms step_avg:40.87ms +[2025-09-11 07:36:02] [Rank 0] step:8401/10000 train_time:343341ms step_avg:40.87ms +[2025-09-11 07:36:02] [Rank 0] step:8421/10000 train_time:344039ms step_avg:40.85ms +[2025-09-11 07:36:02] [Rank 0] step:8421/10000 train_time:344039ms step_avg:40.85ms +[2025-09-11 07:36:03] [Rank 0] step:8441/10000 train_time:344752ms step_avg:40.84ms +[2025-09-11 07:36:03] [Rank 0] step:8441/10000 train_time:344752ms step_avg:40.84ms +[2025-09-11 07:36:04] [Rank 0] step:8461/10000 train_time:345462ms step_avg:40.83ms +[2025-09-11 07:36:04] [Rank 0] step:8461/10000 train_time:345462ms step_avg:40.83ms +[2025-09-11 07:36:05] [Rank 0] step:8481/10000 train_time:346174ms step_avg:40.82ms +[2025-09-11 07:36:05] [Rank 0] step:8481/10000 train_time:346174ms step_avg:40.82ms +[2025-09-11 07:36:05] [Rank 0] step:8501/10000 train_time:346883ms step_avg:40.80ms +[2025-09-11 07:36:05] [Rank 0] step:8501/10000 train_time:346883ms step_avg:40.80ms +[2025-09-11 07:36:06] [Rank 0] step:8521/10000 train_time:347592ms step_avg:40.79ms +[2025-09-11 07:36:06] [Rank 0] step:8521/10000 train_time:347592ms step_avg:40.79ms +[2025-09-11 07:36:07] [Rank 0] step:8541/10000 train_time:348301ms step_avg:40.78ms +[2025-09-11 07:36:07] [Rank 0] step:8541/10000 train_time:348301ms step_avg:40.78ms +[2025-09-11 07:36:07] [Rank 0] step:8561/10000 train_time:349014ms step_avg:40.77ms +[2025-09-11 07:36:07] [Rank 0] step:8561/10000 train_time:349014ms step_avg:40.77ms +[2025-09-11 07:36:08] [Rank 0] step:8581/10000 train_time:349725ms step_avg:40.76ms +[2025-09-11 07:36:08] [Rank 0] step:8581/10000 train_time:349725ms step_avg:40.76ms +[2025-09-11 07:36:09] [Rank 0] step:8601/10000 train_time:350435ms step_avg:40.74ms +[2025-09-11 07:36:09] [Rank 0] step:8601/10000 train_time:350435ms step_avg:40.74ms +[2025-09-11 07:36:10] [Rank 0] step:8621/10000 train_time:351142ms step_avg:40.73ms +[2025-09-11 07:36:10] [Rank 0] step:8621/10000 train_time:351142ms step_avg:40.73ms +[2025-09-11 07:36:10] [Rank 0] step:8641/10000 train_time:351852ms step_avg:40.72ms +[2025-09-11 07:36:10] [Rank 0] step:8641/10000 train_time:351852ms step_avg:40.72ms +[2025-09-11 07:36:11] [Rank 0] step:8661/10000 train_time:352561ms step_avg:40.71ms +[2025-09-11 07:36:11] [Rank 0] step:8661/10000 train_time:352561ms step_avg:40.71ms +[2025-09-11 07:36:12] [Rank 0] step:8681/10000 train_time:353272ms step_avg:40.69ms +[2025-09-11 07:36:12] [Rank 0] step:8681/10000 train_time:353272ms step_avg:40.69ms +[2025-09-11 07:36:12] [Rank 0] step:8701/10000 train_time:353979ms step_avg:40.68ms +[2025-09-11 07:36:12] [Rank 0] step:8701/10000 train_time:353979ms step_avg:40.68ms +[2025-09-11 07:36:13] [Rank 0] step:8721/10000 train_time:354691ms step_avg:40.67ms +[2025-09-11 07:36:13] [Rank 0] step:8721/10000 train_time:354691ms step_avg:40.67ms +[2025-09-11 07:36:14] [Rank 0] step:8741/10000 train_time:355397ms step_avg:40.66ms +[2025-09-11 07:36:14] [Rank 0] step:8741/10000 train_time:355397ms step_avg:40.66ms +[2025-09-11 07:36:14] [Rank 0] step:8761/10000 train_time:356109ms step_avg:40.65ms +[2025-09-11 07:36:14] [Rank 0] step:8761/10000 train_time:356109ms step_avg:40.65ms +[2025-09-11 07:36:15] [Rank 0] step:8781/10000 train_time:356816ms step_avg:40.63ms +[2025-09-11 07:36:15] [Rank 0] step:8781/10000 train_time:356816ms step_avg:40.63ms +[2025-09-11 07:36:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:36:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:26] [Rank 0] PRINT: step:8800/10000 val_loss:5.3304 total_sharp:8.1731e-03 L1_sharp:2.5066e-03 L2_sharp:6.9883e-04 L3_sharp:8.1734e-04 L4_sharp:7.9869e-04 L5_sharp:1.4866e-03 L6_sharp:1.0715e-03 L7_sharp:1.1147e-03 L8_sharp:1.5927e-03 L9_sharp:1.8057e-03 L10_sharp:2.0066e-03 L11_sharp:2.3728e-03 L12_sharp:6.7202e-03 total_fnorm:1.2188e+00 total_l1_linf:1.8400e+03 total_spectral:6.0938e-01 L1_fnorm:3.8867e-01 L2_fnorm:3.4375e-01 L3_fnorm:3.4570e-01 L4_fnorm:3.4180e-01 L5_fnorm:3.3594e-01 L6_fnorm:3.3789e-01 L7_fnorm:3.3789e-01 L8_fnorm:3.3203e-01 L9_fnorm:3.3594e-01 L10_fnorm:3.3984e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.1445e-01 L1_l1linf:6.5430e-02 L2_l1linf:5.7861e-02 L3_l1linf:5.3467e-02 L4_l1linf:5.2979e-02 L5_l1linf:4.9561e-02 L6_l1linf:4.7363e-02 L7_l1linf:4.5898e-02 L8_l1linf:4.4678e-02 L9_l1linf:4.5166e-02 L10_l1linf:4.6387e-02 L11_l1linf:4.5166e-02 L12_l1linf:4.3213e-02 L1_spectral:5.8360e-03 L2_spectral:5.2531e-03 L3_spectral:5.0650e-03 L4_spectral:5.0028e-03 L5_spectral:5.0854e-03 L6_spectral:5.0145e-03 L7_spectral:5.0754e-03 L8_spectral:5.0637e-03 L9_spectral:4.9875e-03 L10_spectral:5.0094e-03 L11_spectral:5.0175e-03 L12_spectral:4.9333e-03 train_time:357502ms step_avg:40.63ms +[2025-09-11 07:36:26] [Rank 0] PRINT: step:8800/10000 val_loss:5.3304 total_sharp:8.1731e-03 L1_sharp:2.5066e-03 L2_sharp:6.9883e-04 L3_sharp:8.1734e-04 L4_sharp:7.9869e-04 L5_sharp:1.4866e-03 L6_sharp:1.0715e-03 L7_sharp:1.1147e-03 L8_sharp:1.5927e-03 L9_sharp:1.8057e-03 L10_sharp:2.0066e-03 L11_sharp:2.3728e-03 L12_sharp:6.7202e-03 total_fnorm:1.2188e+00 total_l1_linf:1.8400e+03 total_spectral:6.0938e-01 L1_fnorm:3.8867e-01 L2_fnorm:3.4375e-01 L3_fnorm:3.4570e-01 L4_fnorm:3.4180e-01 L5_fnorm:3.3594e-01 L6_fnorm:3.3789e-01 L7_fnorm:3.3789e-01 L8_fnorm:3.3203e-01 L9_fnorm:3.3594e-01 L10_fnorm:3.3984e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.1445e-01 L1_l1linf:6.5430e-02 L2_l1linf:5.7861e-02 L3_l1linf:5.3467e-02 L4_l1linf:5.2979e-02 L5_l1linf:4.9561e-02 L6_l1linf:4.7363e-02 L7_l1linf:4.5898e-02 L8_l1linf:4.4678e-02 L9_l1linf:4.5166e-02 L10_l1linf:4.6387e-02 L11_l1linf:4.5166e-02 L12_l1linf:4.3213e-02 L1_spectral:5.8360e-03 L2_spectral:5.2531e-03 L3_spectral:5.0650e-03 L4_spectral:5.0028e-03 L5_spectral:5.0854e-03 L6_spectral:5.0145e-03 L7_spectral:5.0754e-03 L8_spectral:5.0637e-03 L9_spectral:4.9875e-03 L10_spectral:5.0094e-03 L11_spectral:5.0175e-03 L12_spectral:4.9333e-03 train_time:357502ms step_avg:40.63ms +[2025-09-11 07:36:27] [Rank 0] step:8801/10000 train_time:358706ms step_avg:40.76ms +[2025-09-11 07:36:27] [Rank 0] step:8801/10000 train_time:358706ms step_avg:40.76ms +[2025-09-11 07:36:28] [Rank 0] step:8821/10000 train_time:359431ms step_avg:40.75ms +[2025-09-11 07:36:28] [Rank 0] step:8821/10000 train_time:359431ms step_avg:40.75ms +[2025-09-11 07:36:28] [Rank 0] step:8841/10000 train_time:360145ms step_avg:40.74ms +[2025-09-11 07:36:28] [Rank 0] step:8841/10000 train_time:360145ms step_avg:40.74ms +[2025-09-11 07:36:29] [Rank 0] step:8861/10000 train_time:360855ms step_avg:40.72ms +[2025-09-11 07:36:29] [Rank 0] step:8861/10000 train_time:360855ms step_avg:40.72ms +[2025-09-11 07:36:30] [Rank 0] step:8881/10000 train_time:362096ms step_avg:40.77ms +[2025-09-11 07:36:30] [Rank 0] step:8881/10000 train_time:362096ms step_avg:40.77ms +[2025-09-11 07:36:31] [Rank 0] step:8901/10000 train_time:362809ms step_avg:40.76ms +[2025-09-11 07:36:31] [Rank 0] step:8901/10000 train_time:362809ms step_avg:40.76ms +[2025-09-11 07:36:32] [Rank 0] step:8921/10000 train_time:363515ms step_avg:40.75ms +[2025-09-11 07:36:32] [Rank 0] step:8921/10000 train_time:363515ms step_avg:40.75ms +[2025-09-11 07:36:33] [Rank 0] step:8941/10000 train_time:364491ms step_avg:40.77ms +[2025-09-11 07:36:33] [Rank 0] step:8941/10000 train_time:364491ms step_avg:40.77ms +[2025-09-11 07:36:33] [Rank 0] step:8961/10000 train_time:365209ms step_avg:40.76ms +[2025-09-11 07:36:33] [Rank 0] step:8961/10000 train_time:365209ms step_avg:40.76ms +[2025-09-11 07:36:34] [Rank 0] step:8981/10000 train_time:365922ms step_avg:40.74ms +[2025-09-11 07:36:34] [Rank 0] step:8981/10000 train_time:365922ms step_avg:40.74ms +[2025-09-11 07:36:35] [Rank 0] step:9001/10000 train_time:366626ms step_avg:40.73ms +[2025-09-11 07:36:35] [Rank 0] step:9001/10000 train_time:366626ms step_avg:40.73ms +[2025-09-11 07:36:35] [Rank 0] step:9021/10000 train_time:367337ms step_avg:40.72ms +[2025-09-11 07:36:35] [Rank 0] step:9021/10000 train_time:367337ms step_avg:40.72ms +[2025-09-11 07:36:36] [Rank 0] step:9041/10000 train_time:368049ms step_avg:40.71ms +[2025-09-11 07:36:36] [Rank 0] step:9041/10000 train_time:368049ms step_avg:40.71ms +[2025-09-11 07:36:37] [Rank 0] step:9061/10000 train_time:368757ms step_avg:40.70ms +[2025-09-11 07:36:37] [Rank 0] step:9061/10000 train_time:368757ms step_avg:40.70ms +[2025-09-11 07:36:38] [Rank 0] step:9081/10000 train_time:369468ms step_avg:40.69ms +[2025-09-11 07:36:38] [Rank 0] step:9081/10000 train_time:369468ms step_avg:40.69ms +[2025-09-11 07:36:38] [Rank 0] step:9101/10000 train_time:370181ms step_avg:40.67ms +[2025-09-11 07:36:38] [Rank 0] step:9101/10000 train_time:370181ms step_avg:40.67ms +[2025-09-11 07:36:39] [Rank 0] step:9121/10000 train_time:370898ms step_avg:40.66ms +[2025-09-11 07:36:39] [Rank 0] step:9121/10000 train_time:370898ms step_avg:40.66ms +[2025-09-11 07:36:40] [Rank 0] step:9141/10000 train_time:371607ms step_avg:40.65ms +[2025-09-11 07:36:40] [Rank 0] step:9141/10000 train_time:371607ms step_avg:40.65ms +[2025-09-11 07:36:40] [Rank 0] step:9161/10000 train_time:372319ms step_avg:40.64ms +[2025-09-11 07:36:40] [Rank 0] step:9161/10000 train_time:372319ms step_avg:40.64ms +[2025-09-11 07:36:41] [Rank 0] step:9181/10000 train_time:373033ms step_avg:40.63ms +[2025-09-11 07:36:41] [Rank 0] step:9181/10000 train_time:373033ms step_avg:40.63ms +[2025-09-11 07:36:42] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:36:42] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:36:52] [Rank 0] PRINT: step:9200/10000 val_loss:5.3167 total_sharp:6.7264e-03 L1_sharp:2.2786e-03 L2_sharp:9.4644e-04 L3_sharp:7.6131e-04 L4_sharp:5.4162e-04 L5_sharp:9.5449e-04 L6_sharp:1.0371e-03 L7_sharp:8.3224e-04 L8_sharp:1.2513e-03 L9_sharp:1.4819e-03 L10_sharp:1.6601e-03 L11_sharp:2.0206e-03 L12_sharp:6.2197e-03 total_fnorm:8.0469e-01 total_l1_linf:1.0640e+03 total_spectral:4.0234e-01 L1_fnorm:2.6758e-01 L2_fnorm:2.2656e-01 L3_fnorm:2.2852e-01 L4_fnorm:2.2559e-01 L5_fnorm:2.2266e-01 L6_fnorm:2.2363e-01 L7_fnorm:2.2363e-01 L8_fnorm:2.1875e-01 L9_fnorm:2.2266e-01 L10_fnorm:2.2363e-01 L11_fnorm:2.2168e-01 L12_fnorm:2.0410e-01 L1_l1linf:4.3945e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.1494e-02 L5_l1linf:2.8931e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8442e-02 L8_l1linf:2.7344e-02 L9_l1linf:2.7466e-02 L10_l1linf:2.7466e-02 L11_l1linf:2.6245e-02 L12_l1linf:2.4414e-02 L1_spectral:4.4196e-03 L2_spectral:3.5372e-03 L3_spectral:3.4217e-03 L4_spectral:3.3578e-03 L5_spectral:3.4750e-03 L6_spectral:3.3741e-03 L7_spectral:3.4136e-03 L8_spectral:3.4254e-03 L9_spectral:3.3940e-03 L10_spectral:3.3658e-03 L11_spectral:3.3510e-03 L12_spectral:3.2905e-03 train_time:373727ms step_avg:40.62ms +[2025-09-11 07:36:52] [Rank 0] PRINT: step:9200/10000 val_loss:5.3167 total_sharp:6.7264e-03 L1_sharp:2.2786e-03 L2_sharp:9.4644e-04 L3_sharp:7.6131e-04 L4_sharp:5.4162e-04 L5_sharp:9.5449e-04 L6_sharp:1.0371e-03 L7_sharp:8.3224e-04 L8_sharp:1.2513e-03 L9_sharp:1.4819e-03 L10_sharp:1.6601e-03 L11_sharp:2.0206e-03 L12_sharp:6.2197e-03 total_fnorm:8.0469e-01 total_l1_linf:1.0640e+03 total_spectral:4.0234e-01 L1_fnorm:2.6758e-01 L2_fnorm:2.2656e-01 L3_fnorm:2.2852e-01 L4_fnorm:2.2559e-01 L5_fnorm:2.2266e-01 L6_fnorm:2.2363e-01 L7_fnorm:2.2363e-01 L8_fnorm:2.1875e-01 L9_fnorm:2.2266e-01 L10_fnorm:2.2363e-01 L11_fnorm:2.2168e-01 L12_fnorm:2.0410e-01 L1_l1linf:4.3945e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.1494e-02 L5_l1linf:2.8931e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8442e-02 L8_l1linf:2.7344e-02 L9_l1linf:2.7466e-02 L10_l1linf:2.7466e-02 L11_l1linf:2.6245e-02 L12_l1linf:2.4414e-02 L1_spectral:4.4196e-03 L2_spectral:3.5372e-03 L3_spectral:3.4217e-03 L4_spectral:3.3578e-03 L5_spectral:3.4750e-03 L6_spectral:3.3741e-03 L7_spectral:3.4136e-03 L8_spectral:3.4254e-03 L9_spectral:3.3940e-03 L10_spectral:3.3658e-03 L11_spectral:3.3510e-03 L12_spectral:3.2905e-03 train_time:373727ms step_avg:40.62ms +[2025-09-11 07:36:53] [Rank 0] step:9201/10000 train_time:374945ms step_avg:40.75ms +[2025-09-11 07:36:53] [Rank 0] step:9201/10000 train_time:374945ms step_avg:40.75ms +[2025-09-11 07:36:54] [Rank 0] step:9221/10000 train_time:375661ms step_avg:40.74ms +[2025-09-11 07:36:54] [Rank 0] step:9221/10000 train_time:375661ms step_avg:40.74ms +[2025-09-11 07:36:54] [Rank 0] step:9241/10000 train_time:376371ms step_avg:40.73ms +[2025-09-11 07:36:54] [Rank 0] step:9241/10000 train_time:376371ms step_avg:40.73ms +[2025-09-11 07:36:55] [Rank 0] step:9261/10000 train_time:377083ms step_avg:40.72ms +[2025-09-11 07:36:55] [Rank 0] step:9261/10000 train_time:377083ms step_avg:40.72ms +[2025-09-11 07:36:56] [Rank 0] step:9281/10000 train_time:377795ms step_avg:40.71ms +[2025-09-11 07:36:56] [Rank 0] step:9281/10000 train_time:377795ms step_avg:40.71ms +[2025-09-11 07:36:57] [Rank 0] step:9301/10000 train_time:378504ms step_avg:40.70ms +[2025-09-11 07:36:57] [Rank 0] step:9301/10000 train_time:378504ms step_avg:40.70ms +[2025-09-11 07:36:57] [Rank 0] step:9321/10000 train_time:379216ms step_avg:40.68ms +[2025-09-11 07:36:57] [Rank 0] step:9321/10000 train_time:379216ms step_avg:40.68ms +[2025-09-11 07:36:58] [Rank 0] step:9341/10000 train_time:379922ms step_avg:40.67ms +[2025-09-11 07:36:58] [Rank 0] step:9341/10000 train_time:379922ms step_avg:40.67ms +[2025-09-11 07:36:59] [Rank 0] step:9361/10000 train_time:380631ms step_avg:40.66ms +[2025-09-11 07:36:59] [Rank 0] step:9361/10000 train_time:380631ms step_avg:40.66ms +[2025-09-11 07:36:59] [Rank 0] step:9381/10000 train_time:381339ms step_avg:40.65ms +[2025-09-11 07:36:59] [Rank 0] step:9381/10000 train_time:381339ms step_avg:40.65ms +[2025-09-11 07:37:00] [Rank 0] step:9401/10000 train_time:382050ms step_avg:40.64ms +[2025-09-11 07:37:00] [Rank 0] step:9401/10000 train_time:382050ms step_avg:40.64ms +[2025-09-11 07:37:01] [Rank 0] step:9421/10000 train_time:382762ms step_avg:40.63ms +[2025-09-11 07:37:01] [Rank 0] step:9421/10000 train_time:382762ms step_avg:40.63ms +[2025-09-11 07:37:02] [Rank 0] step:9441/10000 train_time:383475ms step_avg:40.62ms +[2025-09-11 07:37:02] [Rank 0] step:9441/10000 train_time:383475ms step_avg:40.62ms +[2025-09-11 07:37:02] [Rank 0] step:9461/10000 train_time:384186ms step_avg:40.61ms +[2025-09-11 07:37:02] [Rank 0] step:9461/10000 train_time:384186ms step_avg:40.61ms +[2025-09-11 07:37:03] [Rank 0] step:9481/10000 train_time:384898ms step_avg:40.60ms +[2025-09-11 07:37:03] [Rank 0] step:9481/10000 train_time:384898ms step_avg:40.60ms +[2025-09-11 07:37:04] [Rank 0] step:9501/10000 train_time:385609ms step_avg:40.59ms +[2025-09-11 07:37:04] [Rank 0] step:9501/10000 train_time:385609ms step_avg:40.59ms +[2025-09-11 07:37:04] [Rank 0] step:9521/10000 train_time:386322ms step_avg:40.58ms +[2025-09-11 07:37:04] [Rank 0] step:9521/10000 train_time:386322ms step_avg:40.58ms +[2025-09-11 07:37:05] [Rank 0] step:9541/10000 train_time:387030ms step_avg:40.56ms +[2025-09-11 07:37:05] [Rank 0] step:9541/10000 train_time:387030ms step_avg:40.56ms +[2025-09-11 07:37:06] [Rank 0] step:9561/10000 train_time:387741ms step_avg:40.55ms +[2025-09-11 07:37:06] [Rank 0] step:9561/10000 train_time:387741ms step_avg:40.55ms +[2025-09-11 07:37:06] [Rank 0] step:9581/10000 train_time:388453ms step_avg:40.54ms +[2025-09-11 07:37:06] [Rank 0] step:9581/10000 train_time:388453ms step_avg:40.54ms +[2025-09-11 07:37:07] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:37:07] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:37:17] [Rank 0] PRINT: step:9600/10000 val_loss:5.3106 total_sharp:4.4126e-03 L1_sharp:5.3706e-04 L2_sharp:6.4746e-04 L3_sharp:6.3895e-04 L4_sharp:4.0296e-04 L5_sharp:7.5899e-04 L6_sharp:6.6281e-04 L7_sharp:6.6740e-04 L8_sharp:9.1207e-04 L9_sharp:1.0282e-03 L10_sharp:1.1857e-03 L11_sharp:1.5424e-03 L12_sharp:5.5268e-03 total_fnorm:4.6094e-01 total_l1_linf:5.0200e+02 total_spectral:2.3047e-01 L1_fnorm:1.5820e-01 L2_fnorm:1.2793e-01 L3_fnorm:1.2988e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2695e-01 L6_fnorm:1.2598e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2500e-01 L9_fnorm:1.2695e-01 L10_fnorm:1.2695e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.5269e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.5320e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.4465e-02 L6_l1linf:1.4038e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.3672e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.3123e-02 L12_l1linf:1.2268e-02 L1_spectral:2.9071e-03 L2_spectral:2.0756e-03 L3_spectral:1.9859e-03 L4_spectral:1.9431e-03 L5_spectral:2.0256e-03 L6_spectral:1.9595e-03 L7_spectral:1.9783e-03 L8_spectral:2.0212e-03 L9_spectral:1.9960e-03 L10_spectral:1.9571e-03 L11_spectral:1.9605e-03 L12_spectral:1.8917e-03 train_time:389140ms step_avg:40.54ms +[2025-09-11 07:37:17] [Rank 0] PRINT: step:9600/10000 val_loss:5.3106 total_sharp:4.4126e-03 L1_sharp:5.3706e-04 L2_sharp:6.4746e-04 L3_sharp:6.3895e-04 L4_sharp:4.0296e-04 L5_sharp:7.5899e-04 L6_sharp:6.6281e-04 L7_sharp:6.6740e-04 L8_sharp:9.1207e-04 L9_sharp:1.0282e-03 L10_sharp:1.1857e-03 L11_sharp:1.5424e-03 L12_sharp:5.5268e-03 total_fnorm:4.6094e-01 total_l1_linf:5.0200e+02 total_spectral:2.3047e-01 L1_fnorm:1.5820e-01 L2_fnorm:1.2793e-01 L3_fnorm:1.2988e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2695e-01 L6_fnorm:1.2598e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2500e-01 L9_fnorm:1.2695e-01 L10_fnorm:1.2695e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.5269e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.5320e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.4465e-02 L6_l1linf:1.4038e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.3672e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.3123e-02 L12_l1linf:1.2268e-02 L1_spectral:2.9071e-03 L2_spectral:2.0756e-03 L3_spectral:1.9859e-03 L4_spectral:1.9431e-03 L5_spectral:2.0256e-03 L6_spectral:1.9595e-03 L7_spectral:1.9783e-03 L8_spectral:2.0212e-03 L9_spectral:1.9960e-03 L10_spectral:1.9571e-03 L11_spectral:1.9605e-03 L12_spectral:1.8917e-03 train_time:389140ms step_avg:40.54ms +[2025-09-11 07:37:18] [Rank 0] step:9601/10000 train_time:390378ms step_avg:40.66ms +[2025-09-11 07:37:18] [Rank 0] step:9601/10000 train_time:390378ms step_avg:40.66ms +[2025-09-11 07:37:19] [Rank 0] step:9621/10000 train_time:391107ms step_avg:40.65ms +[2025-09-11 07:37:19] [Rank 0] step:9621/10000 train_time:391107ms step_avg:40.65ms +[2025-09-11 07:37:20] [Rank 0] step:9641/10000 train_time:391823ms step_avg:40.64ms +[2025-09-11 07:37:20] [Rank 0] step:9641/10000 train_time:391823ms step_avg:40.64ms +[2025-09-11 07:37:21] [Rank 0] step:9661/10000 train_time:392546ms step_avg:40.63ms +[2025-09-11 07:37:21] [Rank 0] step:9661/10000 train_time:392546ms step_avg:40.63ms +[2025-09-11 07:37:21] [Rank 0] step:9681/10000 train_time:393260ms step_avg:40.62ms +[2025-09-11 07:37:21] [Rank 0] step:9681/10000 train_time:393260ms step_avg:40.62ms +[2025-09-11 07:37:22] [Rank 0] step:9701/10000 train_time:393977ms step_avg:40.61ms +[2025-09-11 07:37:22] [Rank 0] step:9701/10000 train_time:393977ms step_avg:40.61ms +[2025-09-11 07:37:23] [Rank 0] step:9721/10000 train_time:394697ms step_avg:40.60ms +[2025-09-11 07:37:23] [Rank 0] step:9721/10000 train_time:394697ms step_avg:40.60ms +[2025-09-11 07:37:23] [Rank 0] step:9741/10000 train_time:395414ms step_avg:40.59ms +[2025-09-11 07:37:23] [Rank 0] step:9741/10000 train_time:395414ms step_avg:40.59ms +[2025-09-11 07:37:24] [Rank 0] step:9761/10000 train_time:396132ms step_avg:40.58ms +[2025-09-11 07:37:24] [Rank 0] step:9761/10000 train_time:396132ms step_avg:40.58ms +[2025-09-11 07:37:25] [Rank 0] step:9781/10000 train_time:396847ms step_avg:40.57ms +[2025-09-11 07:37:25] [Rank 0] step:9781/10000 train_time:396847ms step_avg:40.57ms +[2025-09-11 07:37:26] [Rank 0] step:9801/10000 train_time:397569ms step_avg:40.56ms +[2025-09-11 07:37:26] [Rank 0] step:9801/10000 train_time:397569ms step_avg:40.56ms +[2025-09-11 07:37:26] [Rank 0] step:9821/10000 train_time:398288ms step_avg:40.55ms +[2025-09-11 07:37:26] [Rank 0] step:9821/10000 train_time:398288ms step_avg:40.55ms +[2025-09-11 07:37:27] [Rank 0] step:9841/10000 train_time:399009ms step_avg:40.55ms +[2025-09-11 07:37:27] [Rank 0] step:9841/10000 train_time:399009ms step_avg:40.55ms +[2025-09-11 07:37:28] [Rank 0] step:9861/10000 train_time:399728ms step_avg:40.54ms +[2025-09-11 07:37:28] [Rank 0] step:9861/10000 train_time:399728ms step_avg:40.54ms +[2025-09-11 07:37:28] [Rank 0] step:9881/10000 train_time:400445ms step_avg:40.53ms +[2025-09-11 07:37:28] [Rank 0] step:9881/10000 train_time:400445ms step_avg:40.53ms +[2025-09-11 07:37:29] [Rank 0] step:9901/10000 train_time:401162ms step_avg:40.52ms +[2025-09-11 07:37:29] [Rank 0] step:9901/10000 train_time:401162ms step_avg:40.52ms +[2025-09-11 07:37:30] [Rank 0] step:9921/10000 train_time:401879ms step_avg:40.51ms +[2025-09-11 07:37:30] [Rank 0] step:9921/10000 train_time:401879ms step_avg:40.51ms +[2025-09-11 07:37:31] [Rank 0] step:9941/10000 train_time:402600ms step_avg:40.50ms +[2025-09-11 07:37:31] [Rank 0] step:9941/10000 train_time:402600ms step_avg:40.50ms +[2025-09-11 07:37:31] [Rank 0] step:9961/10000 train_time:403322ms step_avg:40.49ms +[2025-09-11 07:37:31] [Rank 0] step:9961/10000 train_time:403322ms step_avg:40.49ms +[2025-09-11 07:37:32] [Rank 0] step:9981/10000 train_time:404041ms step_avg:40.48ms +[2025-09-11 07:37:32] [Rank 0] step:9981/10000 train_time:404041ms step_avg:40.48ms +[2025-09-11 07:37:33] [Rank 0] step:10000/10000 train_time:405258ms step_avg:40.53ms +[2025-09-11 07:37:33] [Rank 0] step:10000/10000 train_time:405258ms step_avg:40.53ms +[2025-09-11 07:37:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:37:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:37:45] [Rank 0] PRINT: step:10000/10000 val_loss:5.3087 total_sharp:2.8673e-03 L1_sharp:1.1688e-03 L2_sharp:4.3740e-04 L3_sharp:3.8519e-04 L4_sharp:3.4110e-04 L5_sharp:5.6711e-04 L6_sharp:5.0097e-04 L7_sharp:5.6844e-04 L8_sharp:6.0056e-04 L9_sharp:7.8859e-04 L10_sharp:7.9550e-04 L11_sharp:1.1670e-03 L12_sharp:4.3504e-03 total_fnorm:1.8066e-01 total_l1_linf:1.4300e+02 total_spectral:8.9355e-02 L1_fnorm:6.4453e-02 L2_fnorm:5.0049e-02 L3_fnorm:5.0049e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.9072e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.4922e-02 L1_l1linf:8.8501e-03 L2_l1linf:5.0354e-03 L3_l1linf:5.1270e-03 L4_l1linf:4.5166e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.1809e-03 L7_l1linf:4.2114e-03 L8_l1linf:4.1504e-03 L9_l1linf:4.3945e-03 L10_l1linf:4.2114e-03 L11_l1linf:3.9368e-03 L12_l1linf:3.7994e-03 L1_spectral:1.2817e-03 L2_spectral:8.1459e-04 L3_spectral:7.7839e-04 L4_spectral:7.7456e-04 L5_spectral:8.0238e-04 L6_spectral:7.7528e-04 L7_spectral:7.7945e-04 L8_spectral:7.9504e-04 L9_spectral:7.8302e-04 L10_spectral:7.8238e-04 L11_spectral:7.7986e-04 L12_spectral:7.4870e-04 train_time:405280ms step_avg:40.53ms +[2025-09-11 07:37:45] [Rank 0] PRINT: step:10000/10000 val_loss:5.3087 total_sharp:2.8673e-03 L1_sharp:1.1688e-03 L2_sharp:4.3740e-04 L3_sharp:3.8519e-04 L4_sharp:3.4110e-04 L5_sharp:5.6711e-04 L6_sharp:5.0097e-04 L7_sharp:5.6844e-04 L8_sharp:6.0056e-04 L9_sharp:7.8859e-04 L10_sharp:7.9550e-04 L11_sharp:1.1670e-03 L12_sharp:4.3504e-03 total_fnorm:1.8066e-01 total_l1_linf:1.4300e+02 total_spectral:8.9355e-02 L1_fnorm:6.4453e-02 L2_fnorm:5.0049e-02 L3_fnorm:5.0049e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.9072e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.4922e-02 L1_l1linf:8.8501e-03 L2_l1linf:5.0354e-03 L3_l1linf:5.1270e-03 L4_l1linf:4.5166e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.1809e-03 L7_l1linf:4.2114e-03 L8_l1linf:4.1504e-03 L9_l1linf:4.3945e-03 L10_l1linf:4.2114e-03 L11_l1linf:3.9368e-03 L12_l1linf:3.7994e-03 L1_spectral:1.2817e-03 L2_spectral:8.1459e-04 L3_spectral:7.7839e-04 L4_spectral:7.7456e-04 L5_spectral:8.0238e-04 L6_spectral:7.7528e-04 L7_spectral:7.7945e-04 L8_spectral:7.9504e-04 L9_spectral:7.8302e-04 L10_spectral:7.8238e-04 L11_spectral:7.7986e-04 L12_spectral:7.4870e-04 train_time:405280ms step_avg:40.53ms +[2025-09-11 07:37:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:37:45 2025 --- +[2025-09-11 07:37:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:37:45 2025 --- +[2025-09-11 07:37:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 07:37:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..912099c7b46602365493064035841f7c96f8960d --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "af844e7c-932e-48a9-a7eb-5d4632a6e2a4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/training_log_af844e7c-932e-48a9-a7eb-5d4632a6e2a4.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/training_log_af844e7c-932e-48a9-a7eb-5d4632a6e2a4.txt new file mode 100644 index 0000000000000000000000000000000000000000..c07ac8de433ca2af1f810ae9d8d26e48bb1dc902 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44/training_log_af844e7c-932e-48a9-a7eb-5d4632a6e2a4.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:04:33] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:04:33 2025 --- +[2025-09-11 08:04:33] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:04:33 2025 --- +[2025-09-11 08:04:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:04:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:04:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:04:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:04:33] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:04:33] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:04:33] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44 +[2025-09-11 08:04:33] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_44 +[2025-09-11 08:04:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:04:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:04:33] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:04:33] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:04:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:04:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:04:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:04:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:04:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:04:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:04:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:04:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:04:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:04:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:04:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:04:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:04:36] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:04:36] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:04:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:04:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:04:42] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:04:42] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:04:42] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:04:42] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:05:21] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:05:21] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:05:21] [Rank 0] PRINT: Starting training... +[2025-09-11 08:05:21] [Rank 0] PRINT: Starting training... +[2025-09-11 08:05:22] [Rank 0] step:21/10000 train_time:1135ms step_avg:54.04ms +[2025-09-11 08:05:22] [Rank 0] step:21/10000 train_time:1135ms step_avg:54.04ms +[2025-09-11 08:05:23] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.46ms +[2025-09-11 08:05:23] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.46ms +[2025-09-11 08:05:24] [Rank 0] step:61/10000 train_time:2593ms step_avg:42.51ms +[2025-09-11 08:05:24] [Rank 0] step:61/10000 train_time:2593ms step_avg:42.51ms +[2025-09-11 08:05:24] [Rank 0] step:81/10000 train_time:3322ms step_avg:41.01ms +[2025-09-11 08:05:24] [Rank 0] step:81/10000 train_time:3322ms step_avg:41.01ms +[2025-09-11 08:05:25] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 08:05:25] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 08:05:26] [Rank 0] step:121/10000 train_time:4778ms step_avg:39.49ms +[2025-09-11 08:05:26] [Rank 0] step:121/10000 train_time:4778ms step_avg:39.49ms +[2025-09-11 08:05:27] [Rank 0] step:141/10000 train_time:5506ms step_avg:39.05ms +[2025-09-11 08:05:27] [Rank 0] step:141/10000 train_time:5506ms step_avg:39.05ms +[2025-09-11 08:05:27] [Rank 0] step:161/10000 train_time:6235ms step_avg:38.73ms +[2025-09-11 08:05:27] [Rank 0] step:161/10000 train_time:6235ms step_avg:38.73ms +[2025-09-11 08:05:28] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 08:05:28] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 08:05:29] [Rank 0] step:201/10000 train_time:7690ms step_avg:38.26ms +[2025-09-11 08:05:29] [Rank 0] step:201/10000 train_time:7690ms step_avg:38.26ms +[2025-09-11 08:05:30] [Rank 0] step:221/10000 train_time:8418ms step_avg:38.09ms +[2025-09-11 08:05:30] [Rank 0] step:221/10000 train_time:8418ms step_avg:38.09ms +[2025-09-11 08:05:30] [Rank 0] step:241/10000 train_time:9146ms step_avg:37.95ms +[2025-09-11 08:05:30] [Rank 0] step:241/10000 train_time:9146ms step_avg:37.95ms +[2025-09-11 08:05:31] [Rank 0] step:261/10000 train_time:9873ms step_avg:37.83ms +[2025-09-11 08:05:31] [Rank 0] step:261/10000 train_time:9873ms step_avg:37.83ms +[2025-09-11 08:05:32] [Rank 0] step:281/10000 train_time:10600ms step_avg:37.72ms +[2025-09-11 08:05:32] [Rank 0] step:281/10000 train_time:10600ms step_avg:37.72ms +[2025-09-11 08:05:32] [Rank 0] step:301/10000 train_time:11328ms step_avg:37.63ms +[2025-09-11 08:05:32] [Rank 0] step:301/10000 train_time:11328ms step_avg:37.63ms +[2025-09-11 08:05:33] [Rank 0] step:321/10000 train_time:12055ms step_avg:37.56ms +[2025-09-11 08:05:33] [Rank 0] step:321/10000 train_time:12055ms step_avg:37.56ms +[2025-09-11 08:05:34] [Rank 0] step:341/10000 train_time:12783ms step_avg:37.49ms +[2025-09-11 08:05:34] [Rank 0] step:341/10000 train_time:12783ms step_avg:37.49ms +[2025-09-11 08:05:35] [Rank 0] step:361/10000 train_time:13511ms step_avg:37.43ms +[2025-09-11 08:05:35] [Rank 0] step:361/10000 train_time:13511ms step_avg:37.43ms +[2025-09-11 08:05:35] [Rank 0] step:381/10000 train_time:14237ms step_avg:37.37ms +[2025-09-11 08:05:35] [Rank 0] step:381/10000 train_time:14237ms step_avg:37.37ms +[2025-09-11 08:05:36] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:05:36] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:06:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:06:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:06:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:06:25] [Rank 0] PRINT: step:400/10000 val_loss:7.1734 total_sharp:2.4241e-02 L1_sharp:1.3617e-02 L2_sharp:1.1110e-02 L3_sharp:6.2617e-03 L4_sharp:6.0896e-03 L5_sharp:7.1337e-03 L6_sharp:5.0703e-03 L7_sharp:6.1235e-03 L8_sharp:4.5029e-03 L9_sharp:4.3745e-03 L10_sharp:1.3103e-03 L11_sharp:3.4043e-03 L12_sharp:2.2481e-03 total_fnorm:8.1484e+00 total_l1_linf:3.2839e+04 total_spectral:4.0742e+00 L1_fnorm:2.4664e+00 L2_fnorm:2.4490e+00 L3_fnorm:2.3249e+00 L4_fnorm:2.1684e+00 L5_fnorm:1.8853e+00 L6_fnorm:1.7017e+00 L7_fnorm:1.4650e+00 L8_fnorm:1.3158e+00 L9_fnorm:1.2169e+00 L10_fnorm:1.2272e+00 L11_fnorm:1.1174e+00 L12_fnorm:1.1148e+00 L1_l1linf:8.3173e-01 L2_l1linf:8.1516e-01 L3_l1linf:7.7698e-01 L4_l1linf:6.9901e-01 L5_l1linf:5.8908e-01 L6_l1linf:5.4852e-01 L7_l1linf:5.0031e-01 L8_l1linf:4.7705e-01 L9_l1linf:4.6552e-01 L10_l1linf:4.5559e-01 L11_l1linf:4.6429e-01 L12_l1linf:4.5053e-01 L1_spectral:2.4088e-02 L2_spectral:2.4092e-02 L3_spectral:2.4099e-02 L4_spectral:2.4077e-02 L5_spectral:2.4062e-02 L6_spectral:2.4042e-02 L7_spectral:2.4036e-02 L8_spectral:2.4024e-02 L9_spectral:2.4031e-02 L10_spectral:2.4010e-02 L11_spectral:2.4014e-02 L12_spectral:2.4013e-02 train_time:14945ms step_avg:37.36ms +[2025-09-11 08:06:25] [Rank 0] PRINT: step:400/10000 val_loss:7.1734 total_sharp:2.4241e-02 L1_sharp:1.3617e-02 L2_sharp:1.1110e-02 L3_sharp:6.2617e-03 L4_sharp:6.0896e-03 L5_sharp:7.1337e-03 L6_sharp:5.0703e-03 L7_sharp:6.1235e-03 L8_sharp:4.5029e-03 L9_sharp:4.3745e-03 L10_sharp:1.3103e-03 L11_sharp:3.4043e-03 L12_sharp:2.2481e-03 total_fnorm:8.1484e+00 total_l1_linf:3.2839e+04 total_spectral:4.0742e+00 L1_fnorm:2.4664e+00 L2_fnorm:2.4490e+00 L3_fnorm:2.3249e+00 L4_fnorm:2.1684e+00 L5_fnorm:1.8853e+00 L6_fnorm:1.7017e+00 L7_fnorm:1.4650e+00 L8_fnorm:1.3158e+00 L9_fnorm:1.2169e+00 L10_fnorm:1.2272e+00 L11_fnorm:1.1174e+00 L12_fnorm:1.1148e+00 L1_l1linf:8.3173e-01 L2_l1linf:8.1516e-01 L3_l1linf:7.7698e-01 L4_l1linf:6.9901e-01 L5_l1linf:5.8908e-01 L6_l1linf:5.4852e-01 L7_l1linf:5.0031e-01 L8_l1linf:4.7705e-01 L9_l1linf:4.6552e-01 L10_l1linf:4.5559e-01 L11_l1linf:4.6429e-01 L12_l1linf:4.5053e-01 L1_spectral:2.4088e-02 L2_spectral:2.4092e-02 L3_spectral:2.4099e-02 L4_spectral:2.4077e-02 L5_spectral:2.4062e-02 L6_spectral:2.4042e-02 L7_spectral:2.4036e-02 L8_spectral:2.4024e-02 L9_spectral:2.4031e-02 L10_spectral:2.4010e-02 L11_spectral:2.4014e-02 L12_spectral:2.4013e-02 train_time:14945ms step_avg:37.36ms +[2025-09-11 08:06:55] [Rank 0] step:401/10000 train_time:44871ms step_avg:111.90ms +[2025-09-11 08:06:55] [Rank 0] step:401/10000 train_time:44871ms step_avg:111.90ms +[2025-09-11 08:06:58] [Rank 0] step:421/10000 train_time:47331ms step_avg:112.43ms +[2025-09-11 08:06:58] [Rank 0] step:421/10000 train_time:47331ms step_avg:112.43ms +[2025-09-11 08:06:58] [Rank 0] step:441/10000 train_time:47970ms step_avg:108.77ms +[2025-09-11 08:06:58] [Rank 0] step:441/10000 train_time:47970ms step_avg:108.77ms +[2025-09-11 08:06:59] [Rank 0] step:461/10000 train_time:48608ms step_avg:105.44ms +[2025-09-11 08:06:59] [Rank 0] step:461/10000 train_time:48608ms step_avg:105.44ms +[2025-09-11 08:07:00] [Rank 0] step:481/10000 train_time:49246ms step_avg:102.38ms +[2025-09-11 08:07:00] [Rank 0] step:481/10000 train_time:49246ms step_avg:102.38ms +[2025-09-11 08:07:00] [Rank 0] step:501/10000 train_time:49883ms step_avg:99.57ms +[2025-09-11 08:07:00] [Rank 0] step:501/10000 train_time:49883ms step_avg:99.57ms +[2025-09-11 08:07:01] [Rank 0] step:521/10000 train_time:50521ms step_avg:96.97ms +[2025-09-11 08:07:01] [Rank 0] step:521/10000 train_time:50521ms step_avg:96.97ms +[2025-09-11 08:07:01] [Rank 0] step:541/10000 train_time:51158ms step_avg:94.56ms +[2025-09-11 08:07:01] [Rank 0] step:541/10000 train_time:51158ms step_avg:94.56ms +[2025-09-11 08:07:02] [Rank 0] step:561/10000 train_time:51796ms step_avg:92.33ms +[2025-09-11 08:07:02] [Rank 0] step:561/10000 train_time:51796ms step_avg:92.33ms +[2025-09-11 08:07:03] [Rank 0] step:581/10000 train_time:52433ms step_avg:90.25ms +[2025-09-11 08:07:03] [Rank 0] step:581/10000 train_time:52433ms step_avg:90.25ms +[2025-09-11 08:07:03] [Rank 0] step:601/10000 train_time:53071ms step_avg:88.30ms +[2025-09-11 08:07:03] [Rank 0] step:601/10000 train_time:53071ms step_avg:88.30ms +[2025-09-11 08:07:04] [Rank 0] step:621/10000 train_time:54181ms step_avg:87.25ms +[2025-09-11 08:07:04] [Rank 0] step:621/10000 train_time:54181ms step_avg:87.25ms +[2025-09-11 08:07:05] [Rank 0] step:641/10000 train_time:54940ms step_avg:85.71ms +[2025-09-11 08:07:05] [Rank 0] step:641/10000 train_time:54940ms step_avg:85.71ms +[2025-09-11 08:07:06] [Rank 0] step:661/10000 train_time:55577ms step_avg:84.08ms +[2025-09-11 08:07:06] [Rank 0] step:661/10000 train_time:55577ms step_avg:84.08ms +[2025-09-11 08:07:07] [Rank 0] step:681/10000 train_time:56528ms step_avg:83.01ms +[2025-09-11 08:07:07] [Rank 0] step:681/10000 train_time:56528ms step_avg:83.01ms +[2025-09-11 08:07:07] [Rank 0] step:701/10000 train_time:57166ms step_avg:81.55ms +[2025-09-11 08:07:07] [Rank 0] step:701/10000 train_time:57166ms step_avg:81.55ms +[2025-09-11 08:07:08] [Rank 0] step:721/10000 train_time:57802ms step_avg:80.17ms +[2025-09-11 08:07:08] [Rank 0] step:721/10000 train_time:57802ms step_avg:80.17ms +[2025-09-11 08:07:09] [Rank 0] step:741/10000 train_time:58440ms step_avg:78.87ms +[2025-09-11 08:07:09] [Rank 0] step:741/10000 train_time:58440ms step_avg:78.87ms +[2025-09-11 08:07:09] [Rank 0] step:761/10000 train_time:59082ms step_avg:77.64ms +[2025-09-11 08:07:09] [Rank 0] step:761/10000 train_time:59082ms step_avg:77.64ms +[2025-09-11 08:07:10] [Rank 0] step:781/10000 train_time:59724ms step_avg:76.47ms +[2025-09-11 08:07:10] [Rank 0] step:781/10000 train_time:59724ms step_avg:76.47ms +[2025-09-11 08:07:11] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:07:11] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:07:56] [Rank 0] PRINT: step:800/10000 val_loss:6.6588 total_sharp:3.2242e-02 L1_sharp:1.5889e-02 L2_sharp:8.7560e-03 L3_sharp:5.5161e-03 L4_sharp:4.5782e-03 L5_sharp:6.0677e-03 L6_sharp:3.2836e-03 L7_sharp:2.7877e-03 L8_sharp:2.5084e-03 L9_sharp:2.6366e-03 L10_sharp:2.5542e-03 L11_sharp:3.2017e-03 L12_sharp:7.6614e-03 total_fnorm:7.4375e+00 total_l1_linf:2.2528e+04 total_spectral:3.7031e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.3906e+00 L4_fnorm:2.3125e+00 L5_fnorm:2.1094e+00 L6_fnorm:2.0312e+00 L7_fnorm:1.8828e+00 L8_fnorm:1.7266e+00 L9_fnorm:1.5234e+00 L10_fnorm:1.4922e+00 L11_fnorm:1.3359e+00 L12_fnorm:1.1562e+00 L1_l1linf:8.3203e-01 L2_l1linf:7.6562e-01 L3_l1linf:7.4219e-01 L4_l1linf:6.8359e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.0547e-01 L7_l1linf:5.5859e-01 L8_l1linf:5.3125e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.4922e-01 L11_l1linf:4.3359e-01 L12_l1linf:4.3945e-01 L1_spectral:2.6785e-02 L2_spectral:2.6641e-02 L3_spectral:2.6485e-02 L4_spectral:2.6397e-02 L5_spectral:2.6405e-02 L6_spectral:2.6472e-02 L7_spectral:2.6433e-02 L8_spectral:2.6245e-02 L9_spectral:2.6047e-02 L10_spectral:2.5752e-02 L11_spectral:2.5533e-02 L12_spectral:2.5003e-02 train_time:60348ms step_avg:75.43ms +[2025-09-11 08:07:56] [Rank 0] PRINT: step:800/10000 val_loss:6.6588 total_sharp:3.2242e-02 L1_sharp:1.5889e-02 L2_sharp:8.7560e-03 L3_sharp:5.5161e-03 L4_sharp:4.5782e-03 L5_sharp:6.0677e-03 L6_sharp:3.2836e-03 L7_sharp:2.7877e-03 L8_sharp:2.5084e-03 L9_sharp:2.6366e-03 L10_sharp:2.5542e-03 L11_sharp:3.2017e-03 L12_sharp:7.6614e-03 total_fnorm:7.4375e+00 total_l1_linf:2.2528e+04 total_spectral:3.7031e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.3906e+00 L4_fnorm:2.3125e+00 L5_fnorm:2.1094e+00 L6_fnorm:2.0312e+00 L7_fnorm:1.8828e+00 L8_fnorm:1.7266e+00 L9_fnorm:1.5234e+00 L10_fnorm:1.4922e+00 L11_fnorm:1.3359e+00 L12_fnorm:1.1562e+00 L1_l1linf:8.3203e-01 L2_l1linf:7.6562e-01 L3_l1linf:7.4219e-01 L4_l1linf:6.8359e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.0547e-01 L7_l1linf:5.5859e-01 L8_l1linf:5.3125e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.4922e-01 L11_l1linf:4.3359e-01 L12_l1linf:4.3945e-01 L1_spectral:2.6785e-02 L2_spectral:2.6641e-02 L3_spectral:2.6485e-02 L4_spectral:2.6397e-02 L5_spectral:2.6405e-02 L6_spectral:2.6472e-02 L7_spectral:2.6433e-02 L8_spectral:2.6245e-02 L9_spectral:2.6047e-02 L10_spectral:2.5752e-02 L11_spectral:2.5533e-02 L12_spectral:2.5003e-02 train_time:60348ms step_avg:75.43ms +[2025-09-11 08:07:58] [Rank 0] step:801/10000 train_time:62188ms step_avg:77.64ms +[2025-09-11 08:07:58] [Rank 0] step:801/10000 train_time:62188ms step_avg:77.64ms +[2025-09-11 08:07:58] [Rank 0] step:821/10000 train_time:62820ms step_avg:76.52ms +[2025-09-11 08:07:58] [Rank 0] step:821/10000 train_time:62820ms step_avg:76.52ms +[2025-09-11 08:07:59] [Rank 0] step:841/10000 train_time:63463ms step_avg:75.46ms +[2025-09-11 08:07:59] [Rank 0] step:841/10000 train_time:63463ms step_avg:75.46ms +[2025-09-11 08:07:59] [Rank 0] step:861/10000 train_time:64106ms step_avg:74.46ms +[2025-09-11 08:07:59] [Rank 0] step:861/10000 train_time:64106ms step_avg:74.46ms +[2025-09-11 08:08:00] [Rank 0] step:881/10000 train_time:64749ms step_avg:73.50ms +[2025-09-11 08:08:00] [Rank 0] step:881/10000 train_time:64749ms step_avg:73.50ms +[2025-09-11 08:08:01] [Rank 0] step:901/10000 train_time:65392ms step_avg:72.58ms +[2025-09-11 08:08:01] [Rank 0] step:901/10000 train_time:65392ms step_avg:72.58ms +[2025-09-11 08:08:01] [Rank 0] step:921/10000 train_time:66035ms step_avg:71.70ms +[2025-09-11 08:08:01] [Rank 0] step:921/10000 train_time:66035ms step_avg:71.70ms +[2025-09-11 08:08:02] [Rank 0] step:941/10000 train_time:66678ms step_avg:70.86ms +[2025-09-11 08:08:02] [Rank 0] step:941/10000 train_time:66678ms step_avg:70.86ms +[2025-09-11 08:08:03] [Rank 0] step:961/10000 train_time:67321ms step_avg:70.05ms +[2025-09-11 08:08:03] [Rank 0] step:961/10000 train_time:67321ms step_avg:70.05ms +[2025-09-11 08:08:03] [Rank 0] step:981/10000 train_time:67963ms step_avg:69.28ms +[2025-09-11 08:08:03] [Rank 0] step:981/10000 train_time:67963ms step_avg:69.28ms +[2025-09-11 08:08:04] [Rank 0] step:1001/10000 train_time:68605ms step_avg:68.54ms +[2025-09-11 08:08:04] [Rank 0] step:1001/10000 train_time:68605ms step_avg:68.54ms +[2025-09-11 08:08:05] [Rank 0] step:1021/10000 train_time:69248ms step_avg:67.82ms +[2025-09-11 08:08:05] [Rank 0] step:1021/10000 train_time:69248ms step_avg:67.82ms +[2025-09-11 08:08:05] [Rank 0] step:1041/10000 train_time:69891ms step_avg:67.14ms +[2025-09-11 08:08:05] [Rank 0] step:1041/10000 train_time:69891ms step_avg:67.14ms +[2025-09-11 08:08:06] [Rank 0] step:1061/10000 train_time:70533ms step_avg:66.48ms +[2025-09-11 08:08:06] [Rank 0] step:1061/10000 train_time:70533ms step_avg:66.48ms +[2025-09-11 08:08:07] [Rank 0] step:1081/10000 train_time:71177ms step_avg:65.84ms +[2025-09-11 08:08:07] [Rank 0] step:1081/10000 train_time:71177ms step_avg:65.84ms +[2025-09-11 08:08:08] [Rank 0] step:1101/10000 train_time:72329ms step_avg:65.69ms +[2025-09-11 08:08:08] [Rank 0] step:1101/10000 train_time:72329ms step_avg:65.69ms +[2025-09-11 08:08:08] [Rank 0] step:1121/10000 train_time:73045ms step_avg:65.16ms +[2025-09-11 08:08:08] [Rank 0] step:1121/10000 train_time:73045ms step_avg:65.16ms +[2025-09-11 08:08:09] [Rank 0] step:1141/10000 train_time:73687ms step_avg:64.58ms +[2025-09-11 08:08:09] [Rank 0] step:1141/10000 train_time:73687ms step_avg:64.58ms +[2025-09-11 08:08:10] [Rank 0] step:1161/10000 train_time:74611ms step_avg:64.26ms +[2025-09-11 08:08:10] [Rank 0] step:1161/10000 train_time:74611ms step_avg:64.26ms +[2025-09-11 08:08:11] [Rank 0] step:1181/10000 train_time:75253ms step_avg:63.72ms +[2025-09-11 08:08:11] [Rank 0] step:1181/10000 train_time:75253ms step_avg:63.72ms +[2025-09-11 08:08:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:08:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:08:21] [Rank 0] PRINT: step:1200/10000 val_loss:6.3991 total_sharp:1.5785e-02 L1_sharp:7.1969e-03 L2_sharp:3.6368e-03 L3_sharp:2.2032e-03 L4_sharp:2.3419e-03 L5_sharp:4.3279e-03 L6_sharp:1.4570e-03 L7_sharp:1.3869e-03 L8_sharp:1.5716e-03 L9_sharp:1.2124e-03 L10_sharp:1.1399e-03 L11_sharp:1.4381e-03 L12_sharp:4.3674e-03 total_fnorm:8.1875e+00 total_l1_linf:2.4960e+04 total_spectral:4.0625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.3906e+00 L6_fnorm:2.4062e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.1719e+00 L9_fnorm:2.0625e+00 L10_fnorm:2.0156e+00 L11_fnorm:1.8906e+00 L12_fnorm:1.7188e+00 L1_l1linf:7.7734e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.5000e-01 L5_l1linf:7.3047e-01 L6_l1linf:7.1094e-01 L7_l1linf:6.9922e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.2109e-01 L10_l1linf:6.1328e-01 L11_l1linf:5.6641e-01 L12_l1linf:4.7461e-01 L1_spectral:2.7942e-02 L2_spectral:2.7471e-02 L3_spectral:2.7517e-02 L4_spectral:2.7297e-02 L5_spectral:2.7299e-02 L6_spectral:2.7315e-02 L7_spectral:2.7488e-02 L8_spectral:2.7579e-02 L9_spectral:2.7707e-02 L10_spectral:2.7569e-02 L11_spectral:2.7501e-02 L12_spectral:2.6806e-02 train_time:75878ms step_avg:63.23ms +[2025-09-11 08:08:21] [Rank 0] PRINT: step:1200/10000 val_loss:6.3991 total_sharp:1.5785e-02 L1_sharp:7.1969e-03 L2_sharp:3.6368e-03 L3_sharp:2.2032e-03 L4_sharp:2.3419e-03 L5_sharp:4.3279e-03 L6_sharp:1.4570e-03 L7_sharp:1.3869e-03 L8_sharp:1.5716e-03 L9_sharp:1.2124e-03 L10_sharp:1.1399e-03 L11_sharp:1.4381e-03 L12_sharp:4.3674e-03 total_fnorm:8.1875e+00 total_l1_linf:2.4960e+04 total_spectral:4.0625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.3906e+00 L6_fnorm:2.4062e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.1719e+00 L9_fnorm:2.0625e+00 L10_fnorm:2.0156e+00 L11_fnorm:1.8906e+00 L12_fnorm:1.7188e+00 L1_l1linf:7.7734e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.5000e-01 L5_l1linf:7.3047e-01 L6_l1linf:7.1094e-01 L7_l1linf:6.9922e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.2109e-01 L10_l1linf:6.1328e-01 L11_l1linf:5.6641e-01 L12_l1linf:4.7461e-01 L1_spectral:2.7942e-02 L2_spectral:2.7471e-02 L3_spectral:2.7517e-02 L4_spectral:2.7297e-02 L5_spectral:2.7299e-02 L6_spectral:2.7315e-02 L7_spectral:2.7488e-02 L8_spectral:2.7579e-02 L9_spectral:2.7707e-02 L10_spectral:2.7569e-02 L11_spectral:2.7501e-02 L12_spectral:2.6806e-02 train_time:75878ms step_avg:63.23ms +[2025-09-11 08:08:23] [Rank 0] step:1201/10000 train_time:77447ms step_avg:64.49ms +[2025-09-11 08:08:23] [Rank 0] step:1201/10000 train_time:77447ms step_avg:64.49ms +[2025-09-11 08:08:24] [Rank 0] step:1221/10000 train_time:78132ms step_avg:63.99ms +[2025-09-11 08:08:24] [Rank 0] step:1221/10000 train_time:78132ms step_avg:63.99ms +[2025-09-11 08:08:24] [Rank 0] step:1241/10000 train_time:78777ms step_avg:63.48ms +[2025-09-11 08:08:24] [Rank 0] step:1241/10000 train_time:78777ms step_avg:63.48ms +[2025-09-11 08:08:25] [Rank 0] step:1261/10000 train_time:79419ms step_avg:62.98ms +[2025-09-11 08:08:25] [Rank 0] step:1261/10000 train_time:79419ms step_avg:62.98ms +[2025-09-11 08:08:26] [Rank 0] step:1281/10000 train_time:80062ms step_avg:62.50ms +[2025-09-11 08:08:26] [Rank 0] step:1281/10000 train_time:80062ms step_avg:62.50ms +[2025-09-11 08:08:26] [Rank 0] step:1301/10000 train_time:80704ms step_avg:62.03ms +[2025-09-11 08:08:26] [Rank 0] step:1301/10000 train_time:80704ms step_avg:62.03ms +[2025-09-11 08:08:27] [Rank 0] step:1321/10000 train_time:81346ms step_avg:61.58ms +[2025-09-11 08:08:27] [Rank 0] step:1321/10000 train_time:81346ms step_avg:61.58ms +[2025-09-11 08:08:27] [Rank 0] step:1341/10000 train_time:81988ms step_avg:61.14ms +[2025-09-11 08:08:27] [Rank 0] step:1341/10000 train_time:81988ms step_avg:61.14ms +[2025-09-11 08:08:28] [Rank 0] step:1361/10000 train_time:82631ms step_avg:60.71ms +[2025-09-11 08:08:28] [Rank 0] step:1361/10000 train_time:82631ms step_avg:60.71ms +[2025-09-11 08:08:29] [Rank 0] step:1381/10000 train_time:83273ms step_avg:60.30ms +[2025-09-11 08:08:29] [Rank 0] step:1381/10000 train_time:83273ms step_avg:60.30ms +[2025-09-11 08:08:29] [Rank 0] step:1401/10000 train_time:83915ms step_avg:59.90ms +[2025-09-11 08:08:29] [Rank 0] step:1401/10000 train_time:83915ms step_avg:59.90ms +[2025-09-11 08:08:30] [Rank 0] step:1421/10000 train_time:84556ms step_avg:59.50ms +[2025-09-11 08:08:30] [Rank 0] step:1421/10000 train_time:84556ms step_avg:59.50ms +[2025-09-11 08:08:31] [Rank 0] step:1441/10000 train_time:85198ms step_avg:59.12ms +[2025-09-11 08:08:31] [Rank 0] step:1441/10000 train_time:85198ms step_avg:59.12ms +[2025-09-11 08:08:31] [Rank 0] step:1461/10000 train_time:85839ms step_avg:58.75ms +[2025-09-11 08:08:31] [Rank 0] step:1461/10000 train_time:85839ms step_avg:58.75ms +[2025-09-11 08:08:32] [Rank 0] step:1481/10000 train_time:86481ms step_avg:58.39ms +[2025-09-11 08:08:32] [Rank 0] step:1481/10000 train_time:86481ms step_avg:58.39ms +[2025-09-11 08:08:33] [Rank 0] step:1501/10000 train_time:87126ms step_avg:58.05ms +[2025-09-11 08:08:33] [Rank 0] step:1501/10000 train_time:87126ms step_avg:58.05ms +[2025-09-11 08:08:33] [Rank 0] step:1521/10000 train_time:87773ms step_avg:57.71ms +[2025-09-11 08:08:33] [Rank 0] step:1521/10000 train_time:87773ms step_avg:57.71ms +[2025-09-11 08:08:34] [Rank 0] step:1541/10000 train_time:88419ms step_avg:57.38ms +[2025-09-11 08:08:34] [Rank 0] step:1541/10000 train_time:88419ms step_avg:57.38ms +[2025-09-11 08:08:35] [Rank 0] step:1561/10000 train_time:89065ms step_avg:57.06ms +[2025-09-11 08:08:35] [Rank 0] step:1561/10000 train_time:89065ms step_avg:57.06ms +[2025-09-11 08:08:35] [Rank 0] step:1581/10000 train_time:89712ms step_avg:56.74ms +[2025-09-11 08:08:35] [Rank 0] step:1581/10000 train_time:89712ms step_avg:56.74ms +[2025-09-11 08:08:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:08:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:08:47] [Rank 0] PRINT: step:1600/10000 val_loss:6.2131 total_sharp:1.0857e-02 L1_sharp:4.8183e-03 L2_sharp:1.9357e-03 L3_sharp:1.5665e-03 L4_sharp:1.0837e-03 L5_sharp:2.6440e-03 L6_sharp:9.2046e-04 L7_sharp:1.0915e-03 L8_sharp:1.4162e-03 L9_sharp:9.9591e-04 L10_sharp:9.6642e-04 L11_sharp:1.4104e-03 L12_sharp:6.4190e-03 total_fnorm:8.3750e+00 total_l1_linf:2.4960e+04 total_spectral:4.1875e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4375e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4219e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.2812e+00 L10_fnorm:2.2031e+00 L11_fnorm:2.0938e+00 L12_fnorm:1.8047e+00 L1_l1linf:7.4219e-01 L2_l1linf:6.9531e-01 L3_l1linf:7.0703e-01 L4_l1linf:7.0703e-01 L5_l1linf:7.0312e-01 L6_l1linf:7.0312e-01 L7_l1linf:7.0312e-01 L8_l1linf:6.9922e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.8984e-01 L12_l1linf:4.6680e-01 L1_spectral:2.8928e-02 L2_spectral:2.8355e-02 L3_spectral:2.8109e-02 L4_spectral:2.8187e-02 L5_spectral:2.7824e-02 L6_spectral:2.7780e-02 L7_spectral:2.8028e-02 L8_spectral:2.8246e-02 L9_spectral:2.8407e-02 L10_spectral:2.8503e-02 L11_spectral:2.8285e-02 L12_spectral:2.7523e-02 train_time:90340ms step_avg:56.46ms +[2025-09-11 08:08:47] [Rank 0] PRINT: step:1600/10000 val_loss:6.2131 total_sharp:1.0857e-02 L1_sharp:4.8183e-03 L2_sharp:1.9357e-03 L3_sharp:1.5665e-03 L4_sharp:1.0837e-03 L5_sharp:2.6440e-03 L6_sharp:9.2046e-04 L7_sharp:1.0915e-03 L8_sharp:1.4162e-03 L9_sharp:9.9591e-04 L10_sharp:9.6642e-04 L11_sharp:1.4104e-03 L12_sharp:6.4190e-03 total_fnorm:8.3750e+00 total_l1_linf:2.4960e+04 total_spectral:4.1875e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4375e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4219e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.2812e+00 L10_fnorm:2.2031e+00 L11_fnorm:2.0938e+00 L12_fnorm:1.8047e+00 L1_l1linf:7.4219e-01 L2_l1linf:6.9531e-01 L3_l1linf:7.0703e-01 L4_l1linf:7.0703e-01 L5_l1linf:7.0312e-01 L6_l1linf:7.0312e-01 L7_l1linf:7.0312e-01 L8_l1linf:6.9922e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.8984e-01 L12_l1linf:4.6680e-01 L1_spectral:2.8928e-02 L2_spectral:2.8355e-02 L3_spectral:2.8109e-02 L4_spectral:2.8187e-02 L5_spectral:2.7824e-02 L6_spectral:2.7780e-02 L7_spectral:2.8028e-02 L8_spectral:2.8246e-02 L9_spectral:2.8407e-02 L10_spectral:2.8503e-02 L11_spectral:2.8285e-02 L12_spectral:2.7523e-02 train_time:90340ms step_avg:56.46ms +[2025-09-11 08:08:48] [Rank 0] step:1601/10000 train_time:91920ms step_avg:57.41ms +[2025-09-11 08:08:48] [Rank 0] step:1601/10000 train_time:91920ms step_avg:57.41ms +[2025-09-11 08:08:49] [Rank 0] step:1621/10000 train_time:92574ms step_avg:57.11ms +[2025-09-11 08:08:49] [Rank 0] step:1621/10000 train_time:92574ms step_avg:57.11ms +[2025-09-11 08:08:49] [Rank 0] step:1641/10000 train_time:93228ms step_avg:56.81ms +[2025-09-11 08:08:49] [Rank 0] step:1641/10000 train_time:93228ms step_avg:56.81ms +[2025-09-11 08:08:50] [Rank 0] step:1661/10000 train_time:93875ms step_avg:56.52ms +[2025-09-11 08:08:50] [Rank 0] step:1661/10000 train_time:93875ms step_avg:56.52ms +[2025-09-11 08:08:51] [Rank 0] step:1681/10000 train_time:94522ms step_avg:56.23ms +[2025-09-11 08:08:51] [Rank 0] step:1681/10000 train_time:94522ms step_avg:56.23ms +[2025-09-11 08:08:51] [Rank 0] step:1701/10000 train_time:95169ms step_avg:55.95ms +[2025-09-11 08:08:51] [Rank 0] step:1701/10000 train_time:95169ms step_avg:55.95ms +[2025-09-11 08:08:52] [Rank 0] step:1721/10000 train_time:95816ms step_avg:55.67ms +[2025-09-11 08:08:52] [Rank 0] step:1721/10000 train_time:95816ms step_avg:55.67ms +[2025-09-11 08:08:53] [Rank 0] step:1741/10000 train_time:96462ms step_avg:55.41ms +[2025-09-11 08:08:53] [Rank 0] step:1741/10000 train_time:96462ms step_avg:55.41ms +[2025-09-11 08:08:53] [Rank 0] step:1761/10000 train_time:97110ms step_avg:55.14ms +[2025-09-11 08:08:53] [Rank 0] step:1761/10000 train_time:97110ms step_avg:55.14ms +[2025-09-11 08:08:54] [Rank 0] step:1781/10000 train_time:97756ms step_avg:54.89ms +[2025-09-11 08:08:54] [Rank 0] step:1781/10000 train_time:97756ms step_avg:54.89ms +[2025-09-11 08:08:55] [Rank 0] step:1801/10000 train_time:98404ms step_avg:54.64ms +[2025-09-11 08:08:55] [Rank 0] step:1801/10000 train_time:98404ms step_avg:54.64ms +[2025-09-11 08:08:55] [Rank 0] step:1821/10000 train_time:99050ms step_avg:54.39ms +[2025-09-11 08:08:55] [Rank 0] step:1821/10000 train_time:99050ms step_avg:54.39ms +[2025-09-11 08:08:56] [Rank 0] step:1841/10000 train_time:99697ms step_avg:54.15ms +[2025-09-11 08:08:56] [Rank 0] step:1841/10000 train_time:99697ms step_avg:54.15ms +[2025-09-11 08:08:57] [Rank 0] step:1861/10000 train_time:100351ms step_avg:53.92ms +[2025-09-11 08:08:57] [Rank 0] step:1861/10000 train_time:100351ms step_avg:53.92ms +[2025-09-11 08:08:57] [Rank 0] step:1881/10000 train_time:100997ms step_avg:53.69ms +[2025-09-11 08:08:57] [Rank 0] step:1881/10000 train_time:100997ms step_avg:53.69ms +[2025-09-11 08:08:58] [Rank 0] step:1901/10000 train_time:101642ms step_avg:53.47ms +[2025-09-11 08:08:58] [Rank 0] step:1901/10000 train_time:101642ms step_avg:53.47ms +[2025-09-11 08:08:59] [Rank 0] step:1921/10000 train_time:102289ms step_avg:53.25ms +[2025-09-11 08:08:59] [Rank 0] step:1921/10000 train_time:102289ms step_avg:53.25ms +[2025-09-11 08:08:59] [Rank 0] step:1941/10000 train_time:102935ms step_avg:53.03ms +[2025-09-11 08:08:59] [Rank 0] step:1941/10000 train_time:102935ms step_avg:53.03ms +[2025-09-11 08:09:00] [Rank 0] step:1961/10000 train_time:103581ms step_avg:52.82ms +[2025-09-11 08:09:00] [Rank 0] step:1961/10000 train_time:103581ms step_avg:52.82ms +[2025-09-11 08:09:00] [Rank 0] step:1981/10000 train_time:104227ms step_avg:52.61ms +[2025-09-11 08:09:00] [Rank 0] step:1981/10000 train_time:104227ms step_avg:52.61ms +[2025-09-11 08:09:01] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:09:01] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:09:11] [Rank 0] PRINT: step:2000/10000 val_loss:6.0721 total_sharp:9.2602e-03 L1_sharp:3.1370e-03 L2_sharp:1.5798e-03 L3_sharp:1.1903e-03 L4_sharp:9.0802e-04 L5_sharp:2.4962e-03 L6_sharp:8.4835e-04 L7_sharp:8.0794e-04 L8_sharp:1.5011e-03 L9_sharp:1.0174e-03 L10_sharp:9.5341e-04 L11_sharp:1.5184e-03 L12_sharp:4.9859e-03 total_fnorm:8.5625e+00 total_l1_linf:2.4960e+04 total_spectral:4.2812e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4375e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.3750e+00 L10_fnorm:2.3438e+00 L11_fnorm:2.2188e+00 L12_fnorm:1.9141e+00 L1_l1linf:7.2656e-01 L2_l1linf:6.7578e-01 L3_l1linf:6.8750e-01 L4_l1linf:6.9531e-01 L5_l1linf:6.7578e-01 L6_l1linf:6.7969e-01 L7_l1linf:6.8750e-01 L8_l1linf:6.9141e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.4062e-01 L11_l1linf:5.8203e-01 L12_l1linf:4.6094e-01 L1_spectral:2.9809e-02 L2_spectral:2.8985e-02 L3_spectral:2.8935e-02 L4_spectral:2.9044e-02 L5_spectral:2.8710e-02 L6_spectral:2.8662e-02 L7_spectral:2.8276e-02 L8_spectral:2.8571e-02 L9_spectral:2.8816e-02 L10_spectral:2.9051e-02 L11_spectral:2.9110e-02 L12_spectral:2.8118e-02 train_time:104855ms step_avg:52.43ms +[2025-09-11 08:09:11] [Rank 0] PRINT: step:2000/10000 val_loss:6.0721 total_sharp:9.2602e-03 L1_sharp:3.1370e-03 L2_sharp:1.5798e-03 L3_sharp:1.1903e-03 L4_sharp:9.0802e-04 L5_sharp:2.4962e-03 L6_sharp:8.4835e-04 L7_sharp:8.0794e-04 L8_sharp:1.5011e-03 L9_sharp:1.0174e-03 L10_sharp:9.5341e-04 L11_sharp:1.5184e-03 L12_sharp:4.9859e-03 total_fnorm:8.5625e+00 total_l1_linf:2.4960e+04 total_spectral:4.2812e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4375e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.3750e+00 L10_fnorm:2.3438e+00 L11_fnorm:2.2188e+00 L12_fnorm:1.9141e+00 L1_l1linf:7.2656e-01 L2_l1linf:6.7578e-01 L3_l1linf:6.8750e-01 L4_l1linf:6.9531e-01 L5_l1linf:6.7578e-01 L6_l1linf:6.7969e-01 L7_l1linf:6.8750e-01 L8_l1linf:6.9141e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.4062e-01 L11_l1linf:5.8203e-01 L12_l1linf:4.6094e-01 L1_spectral:2.9809e-02 L2_spectral:2.8985e-02 L3_spectral:2.8935e-02 L4_spectral:2.9044e-02 L5_spectral:2.8710e-02 L6_spectral:2.8662e-02 L7_spectral:2.8276e-02 L8_spectral:2.8571e-02 L9_spectral:2.8816e-02 L10_spectral:2.9051e-02 L11_spectral:2.9110e-02 L12_spectral:2.8118e-02 train_time:104855ms step_avg:52.43ms +[2025-09-11 08:09:13] [Rank 0] step:2001/10000 train_time:106365ms step_avg:53.16ms +[2025-09-11 08:09:13] [Rank 0] step:2001/10000 train_time:106365ms step_avg:53.16ms +[2025-09-11 08:09:14] [Rank 0] step:2021/10000 train_time:107277ms step_avg:53.08ms +[2025-09-11 08:09:14] [Rank 0] step:2021/10000 train_time:107277ms step_avg:53.08ms +[2025-09-11 08:09:14] [Rank 0] step:2041/10000 train_time:107923ms step_avg:52.88ms +[2025-09-11 08:09:14] [Rank 0] step:2041/10000 train_time:107923ms step_avg:52.88ms +[2025-09-11 08:09:15] [Rank 0] step:2061/10000 train_time:108570ms step_avg:52.68ms +[2025-09-11 08:09:15] [Rank 0] step:2061/10000 train_time:108570ms step_avg:52.68ms +[2025-09-11 08:09:16] [Rank 0] step:2081/10000 train_time:109216ms step_avg:52.48ms +[2025-09-11 08:09:16] [Rank 0] step:2081/10000 train_time:109216ms step_avg:52.48ms +[2025-09-11 08:09:16] [Rank 0] step:2101/10000 train_time:109863ms step_avg:52.29ms +[2025-09-11 08:09:16] [Rank 0] step:2101/10000 train_time:109863ms step_avg:52.29ms +[2025-09-11 08:09:17] [Rank 0] step:2121/10000 train_time:110508ms step_avg:52.10ms +[2025-09-11 08:09:17] [Rank 0] step:2121/10000 train_time:110508ms step_avg:52.10ms +[2025-09-11 08:09:17] [Rank 0] step:2141/10000 train_time:111154ms step_avg:51.92ms +[2025-09-11 08:09:17] [Rank 0] step:2141/10000 train_time:111154ms step_avg:51.92ms +[2025-09-11 08:09:18] [Rank 0] step:2161/10000 train_time:111800ms step_avg:51.74ms +[2025-09-11 08:09:18] [Rank 0] step:2161/10000 train_time:111800ms step_avg:51.74ms +[2025-09-11 08:09:19] [Rank 0] step:2181/10000 train_time:112445ms step_avg:51.56ms +[2025-09-11 08:09:19] [Rank 0] step:2181/10000 train_time:112445ms step_avg:51.56ms +[2025-09-11 08:09:19] [Rank 0] step:2201/10000 train_time:113090ms step_avg:51.38ms +[2025-09-11 08:09:19] [Rank 0] step:2201/10000 train_time:113090ms step_avg:51.38ms +[2025-09-11 08:09:20] [Rank 0] step:2221/10000 train_time:113736ms step_avg:51.21ms +[2025-09-11 08:09:20] [Rank 0] step:2221/10000 train_time:113736ms step_avg:51.21ms +[2025-09-11 08:09:21] [Rank 0] step:2241/10000 train_time:114393ms step_avg:51.05ms +[2025-09-11 08:09:21] [Rank 0] step:2241/10000 train_time:114393ms step_avg:51.05ms +[2025-09-11 08:09:21] [Rank 0] step:2261/10000 train_time:115051ms step_avg:50.89ms +[2025-09-11 08:09:21] [Rank 0] step:2261/10000 train_time:115051ms step_avg:50.89ms +[2025-09-11 08:09:22] [Rank 0] step:2281/10000 train_time:115710ms step_avg:50.73ms +[2025-09-11 08:09:22] [Rank 0] step:2281/10000 train_time:115710ms step_avg:50.73ms +[2025-09-11 08:09:23] [Rank 0] step:2301/10000 train_time:116368ms step_avg:50.57ms +[2025-09-11 08:09:23] [Rank 0] step:2301/10000 train_time:116368ms step_avg:50.57ms +[2025-09-11 08:09:23] [Rank 0] step:2321/10000 train_time:117027ms step_avg:50.42ms +[2025-09-11 08:09:23] [Rank 0] step:2321/10000 train_time:117027ms step_avg:50.42ms +[2025-09-11 08:09:24] [Rank 0] step:2341/10000 train_time:117685ms step_avg:50.27ms +[2025-09-11 08:09:24] [Rank 0] step:2341/10000 train_time:117685ms step_avg:50.27ms +[2025-09-11 08:09:25] [Rank 0] step:2361/10000 train_time:118343ms step_avg:50.12ms +[2025-09-11 08:09:25] [Rank 0] step:2361/10000 train_time:118343ms step_avg:50.12ms +[2025-09-11 08:09:25] [Rank 0] step:2381/10000 train_time:119003ms step_avg:49.98ms +[2025-09-11 08:09:25] [Rank 0] step:2381/10000 train_time:119003ms step_avg:49.98ms +[2025-09-11 08:09:26] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:09:26] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:09:36] [Rank 0] PRINT: step:2400/10000 val_loss:5.9520 total_sharp:8.1975e-03 L1_sharp:2.3377e-03 L2_sharp:1.7793e-03 L3_sharp:1.1004e-03 L4_sharp:6.8127e-04 L5_sharp:1.8871e-03 L6_sharp:6.8701e-04 L7_sharp:6.8902e-04 L8_sharp:1.2738e-03 L9_sharp:9.5663e-04 L10_sharp:9.4071e-04 L11_sharp:1.1790e-03 L12_sharp:3.6500e-03 total_fnorm:8.6875e+00 total_l1_linf:2.4576e+04 total_spectral:4.3125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.4219e+00 L10_fnorm:2.3906e+00 L11_fnorm:2.3125e+00 L12_fnorm:2.0312e+00 L1_l1linf:7.3047e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.6406e-01 L5_l1linf:6.5234e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.4844e-01 L11_l1linf:5.9766e-01 L12_l1linf:4.9219e-01 L1_spectral:3.0428e-02 L2_spectral:2.9538e-02 L3_spectral:2.9287e-02 L4_spectral:2.9404e-02 L5_spectral:2.9486e-02 L6_spectral:2.9113e-02 L7_spectral:2.8877e-02 L8_spectral:2.8765e-02 L9_spectral:2.8966e-02 L10_spectral:2.9166e-02 L11_spectral:2.9373e-02 L12_spectral:2.8785e-02 train_time:119641ms step_avg:49.85ms +[2025-09-11 08:09:36] [Rank 0] PRINT: step:2400/10000 val_loss:5.9520 total_sharp:8.1975e-03 L1_sharp:2.3377e-03 L2_sharp:1.7793e-03 L3_sharp:1.1004e-03 L4_sharp:6.8127e-04 L5_sharp:1.8871e-03 L6_sharp:6.8701e-04 L7_sharp:6.8902e-04 L8_sharp:1.2738e-03 L9_sharp:9.5663e-04 L10_sharp:9.4071e-04 L11_sharp:1.1790e-03 L12_sharp:3.6500e-03 total_fnorm:8.6875e+00 total_l1_linf:2.4576e+04 total_spectral:4.3125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.4219e+00 L10_fnorm:2.3906e+00 L11_fnorm:2.3125e+00 L12_fnorm:2.0312e+00 L1_l1linf:7.3047e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.6406e-01 L5_l1linf:6.5234e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.4844e-01 L11_l1linf:5.9766e-01 L12_l1linf:4.9219e-01 L1_spectral:3.0428e-02 L2_spectral:2.9538e-02 L3_spectral:2.9287e-02 L4_spectral:2.9404e-02 L5_spectral:2.9486e-02 L6_spectral:2.9113e-02 L7_spectral:2.8877e-02 L8_spectral:2.8765e-02 L9_spectral:2.8966e-02 L10_spectral:2.9166e-02 L11_spectral:2.9373e-02 L12_spectral:2.8785e-02 train_time:119641ms step_avg:49.85ms +[2025-09-11 08:09:38] [Rank 0] step:2401/10000 train_time:121266ms step_avg:50.51ms +[2025-09-11 08:09:38] [Rank 0] step:2401/10000 train_time:121266ms step_avg:50.51ms +[2025-09-11 08:09:39] [Rank 0] step:2421/10000 train_time:122254ms step_avg:50.50ms +[2025-09-11 08:09:39] [Rank 0] step:2421/10000 train_time:122254ms step_avg:50.50ms +[2025-09-11 08:09:39] [Rank 0] step:2441/10000 train_time:122916ms step_avg:50.35ms +[2025-09-11 08:09:39] [Rank 0] step:2441/10000 train_time:122916ms step_avg:50.35ms +[2025-09-11 08:09:40] [Rank 0] step:2461/10000 train_time:123575ms step_avg:50.21ms +[2025-09-11 08:09:40] [Rank 0] step:2461/10000 train_time:123575ms step_avg:50.21ms +[2025-09-11 08:09:41] [Rank 0] step:2481/10000 train_time:124236ms step_avg:50.07ms +[2025-09-11 08:09:41] [Rank 0] step:2481/10000 train_time:124236ms step_avg:50.07ms +[2025-09-11 08:09:41] [Rank 0] step:2501/10000 train_time:124896ms step_avg:49.94ms +[2025-09-11 08:09:41] [Rank 0] step:2501/10000 train_time:124896ms step_avg:49.94ms +[2025-09-11 08:09:42] [Rank 0] step:2521/10000 train_time:125556ms step_avg:49.80ms +[2025-09-11 08:09:42] [Rank 0] step:2521/10000 train_time:125556ms step_avg:49.80ms +[2025-09-11 08:09:43] [Rank 0] step:2541/10000 train_time:126215ms step_avg:49.67ms +[2025-09-11 08:09:43] [Rank 0] step:2541/10000 train_time:126215ms step_avg:49.67ms +[2025-09-11 08:09:43] [Rank 0] step:2561/10000 train_time:126874ms step_avg:49.54ms +[2025-09-11 08:09:43] [Rank 0] step:2561/10000 train_time:126874ms step_avg:49.54ms +[2025-09-11 08:09:44] [Rank 0] step:2581/10000 train_time:127534ms step_avg:49.41ms +[2025-09-11 08:09:44] [Rank 0] step:2581/10000 train_time:127534ms step_avg:49.41ms +[2025-09-11 08:09:45] [Rank 0] step:2601/10000 train_time:128193ms step_avg:49.29ms +[2025-09-11 08:09:45] [Rank 0] step:2601/10000 train_time:128193ms step_avg:49.29ms +[2025-09-11 08:09:45] [Rank 0] step:2621/10000 train_time:128852ms step_avg:49.16ms +[2025-09-11 08:09:45] [Rank 0] step:2621/10000 train_time:128852ms step_avg:49.16ms +[2025-09-11 08:09:46] [Rank 0] step:2641/10000 train_time:129517ms step_avg:49.04ms +[2025-09-11 08:09:46] [Rank 0] step:2641/10000 train_time:129517ms step_avg:49.04ms +[2025-09-11 08:09:47] [Rank 0] step:2661/10000 train_time:130177ms step_avg:48.92ms +[2025-09-11 08:09:47] [Rank 0] step:2661/10000 train_time:130177ms step_avg:48.92ms +[2025-09-11 08:09:47] [Rank 0] step:2681/10000 train_time:130836ms step_avg:48.80ms +[2025-09-11 08:09:47] [Rank 0] step:2681/10000 train_time:130836ms step_avg:48.80ms +[2025-09-11 08:09:48] [Rank 0] step:2701/10000 train_time:131495ms step_avg:48.68ms +[2025-09-11 08:09:48] [Rank 0] step:2701/10000 train_time:131495ms step_avg:48.68ms +[2025-09-11 08:09:49] [Rank 0] step:2721/10000 train_time:132156ms step_avg:48.57ms +[2025-09-11 08:09:49] [Rank 0] step:2721/10000 train_time:132156ms step_avg:48.57ms +[2025-09-11 08:09:49] [Rank 0] step:2741/10000 train_time:132815ms step_avg:48.46ms +[2025-09-11 08:09:49] [Rank 0] step:2741/10000 train_time:132815ms step_avg:48.46ms +[2025-09-11 08:09:50] [Rank 0] step:2761/10000 train_time:133476ms step_avg:48.34ms +[2025-09-11 08:09:50] [Rank 0] step:2761/10000 train_time:133476ms step_avg:48.34ms +[2025-09-11 08:09:51] [Rank 0] step:2781/10000 train_time:134135ms step_avg:48.23ms +[2025-09-11 08:09:51] [Rank 0] step:2781/10000 train_time:134135ms step_avg:48.23ms +[2025-09-11 08:09:51] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:09:51] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:01] [Rank 0] PRINT: step:2800/10000 val_loss:5.8732 total_sharp:9.4401e-03 L1_sharp:2.4458e-03 L2_sharp:1.8372e-03 L3_sharp:8.5543e-04 L4_sharp:6.0275e-04 L5_sharp:1.8755e-03 L6_sharp:8.2779e-04 L7_sharp:7.9908e-04 L8_sharp:1.2831e-03 L9_sharp:1.0489e-03 L10_sharp:1.1316e-03 L11_sharp:1.6000e-03 L12_sharp:6.4120e-03 total_fnorm:8.5625e+00 total_l1_linf:2.3936e+04 total_spectral:4.3125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5312e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4375e+00 L10_fnorm:2.4219e+00 L11_fnorm:2.3125e+00 L12_fnorm:1.9844e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.5625e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.4844e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.4453e-01 L11_l1linf:5.7422e-01 L12_l1linf:4.6875e-01 L1_spectral:3.0690e-02 L2_spectral:2.9734e-02 L3_spectral:2.9710e-02 L4_spectral:2.9828e-02 L5_spectral:2.9862e-02 L6_spectral:2.9920e-02 L7_spectral:2.9410e-02 L8_spectral:2.9302e-02 L9_spectral:2.9176e-02 L10_spectral:2.9409e-02 L11_spectral:2.9571e-02 L12_spectral:2.8713e-02 train_time:134776ms step_avg:48.13ms +[2025-09-11 08:10:01] [Rank 0] PRINT: step:2800/10000 val_loss:5.8732 total_sharp:9.4401e-03 L1_sharp:2.4458e-03 L2_sharp:1.8372e-03 L3_sharp:8.5543e-04 L4_sharp:6.0275e-04 L5_sharp:1.8755e-03 L6_sharp:8.2779e-04 L7_sharp:7.9908e-04 L8_sharp:1.2831e-03 L9_sharp:1.0489e-03 L10_sharp:1.1316e-03 L11_sharp:1.6000e-03 L12_sharp:6.4120e-03 total_fnorm:8.5625e+00 total_l1_linf:2.3936e+04 total_spectral:4.3125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5312e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4375e+00 L10_fnorm:2.4219e+00 L11_fnorm:2.3125e+00 L12_fnorm:1.9844e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.5625e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.4844e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.4453e-01 L11_l1linf:5.7422e-01 L12_l1linf:4.6875e-01 L1_spectral:3.0690e-02 L2_spectral:2.9734e-02 L3_spectral:2.9710e-02 L4_spectral:2.9828e-02 L5_spectral:2.9862e-02 L6_spectral:2.9920e-02 L7_spectral:2.9410e-02 L8_spectral:2.9302e-02 L9_spectral:2.9176e-02 L10_spectral:2.9409e-02 L11_spectral:2.9571e-02 L12_spectral:2.8713e-02 train_time:134776ms step_avg:48.13ms +[2025-09-11 08:10:03] [Rank 0] step:2801/10000 train_time:136479ms step_avg:48.73ms +[2025-09-11 08:10:03] [Rank 0] step:2801/10000 train_time:136479ms step_avg:48.73ms +[2025-09-11 08:10:04] [Rank 0] step:2821/10000 train_time:137147ms step_avg:48.62ms +[2025-09-11 08:10:04] [Rank 0] step:2821/10000 train_time:137147ms step_avg:48.62ms +[2025-09-11 08:10:04] [Rank 0] step:2841/10000 train_time:137807ms step_avg:48.51ms +[2025-09-11 08:10:04] [Rank 0] step:2841/10000 train_time:137807ms step_avg:48.51ms +[2025-09-11 08:10:05] [Rank 0] step:2861/10000 train_time:138468ms step_avg:48.40ms +[2025-09-11 08:10:05] [Rank 0] step:2861/10000 train_time:138468ms step_avg:48.40ms +[2025-09-11 08:10:06] [Rank 0] step:2881/10000 train_time:139128ms step_avg:48.29ms +[2025-09-11 08:10:06] [Rank 0] step:2881/10000 train_time:139128ms step_avg:48.29ms +[2025-09-11 08:10:06] [Rank 0] step:2901/10000 train_time:139788ms step_avg:48.19ms +[2025-09-11 08:10:06] [Rank 0] step:2901/10000 train_time:139788ms step_avg:48.19ms +[2025-09-11 08:10:07] [Rank 0] step:2921/10000 train_time:140448ms step_avg:48.08ms +[2025-09-11 08:10:07] [Rank 0] step:2921/10000 train_time:140448ms step_avg:48.08ms +[2025-09-11 08:10:08] [Rank 0] step:2941/10000 train_time:141107ms step_avg:47.98ms +[2025-09-11 08:10:08] [Rank 0] step:2941/10000 train_time:141107ms step_avg:47.98ms +[2025-09-11 08:10:08] [Rank 0] step:2961/10000 train_time:141766ms step_avg:47.88ms +[2025-09-11 08:10:08] [Rank 0] step:2961/10000 train_time:141766ms step_avg:47.88ms +[2025-09-11 08:10:09] [Rank 0] step:2981/10000 train_time:142428ms step_avg:47.78ms +[2025-09-11 08:10:09] [Rank 0] step:2981/10000 train_time:142428ms step_avg:47.78ms +[2025-09-11 08:10:10] [Rank 0] step:3001/10000 train_time:143090ms step_avg:47.68ms +[2025-09-11 08:10:10] [Rank 0] step:3001/10000 train_time:143090ms step_avg:47.68ms +[2025-09-11 08:10:10] [Rank 0] step:3021/10000 train_time:143752ms step_avg:47.58ms +[2025-09-11 08:10:10] [Rank 0] step:3021/10000 train_time:143752ms step_avg:47.58ms +[2025-09-11 08:10:11] [Rank 0] step:3041/10000 train_time:144415ms step_avg:47.49ms +[2025-09-11 08:10:11] [Rank 0] step:3041/10000 train_time:144415ms step_avg:47.49ms +[2025-09-11 08:10:12] [Rank 0] step:3061/10000 train_time:145077ms step_avg:47.40ms +[2025-09-11 08:10:12] [Rank 0] step:3061/10000 train_time:145077ms step_avg:47.40ms +[2025-09-11 08:10:12] [Rank 0] step:3081/10000 train_time:145739ms step_avg:47.30ms +[2025-09-11 08:10:12] [Rank 0] step:3081/10000 train_time:145739ms step_avg:47.30ms +[2025-09-11 08:10:13] [Rank 0] step:3101/10000 train_time:146401ms step_avg:47.21ms +[2025-09-11 08:10:13] [Rank 0] step:3101/10000 train_time:146401ms step_avg:47.21ms +[2025-09-11 08:10:14] [Rank 0] step:3121/10000 train_time:147337ms step_avg:47.21ms +[2025-09-11 08:10:14] [Rank 0] step:3121/10000 train_time:147337ms step_avg:47.21ms +[2025-09-11 08:10:15] [Rank 0] step:3141/10000 train_time:148254ms step_avg:47.20ms +[2025-09-11 08:10:15] [Rank 0] step:3141/10000 train_time:148254ms step_avg:47.20ms +[2025-09-11 08:10:15] [Rank 0] step:3161/10000 train_time:148915ms step_avg:47.11ms +[2025-09-11 08:10:15] [Rank 0] step:3161/10000 train_time:148915ms step_avg:47.11ms +[2025-09-11 08:10:16] [Rank 0] step:3181/10000 train_time:149877ms step_avg:47.12ms +[2025-09-11 08:10:16] [Rank 0] step:3181/10000 train_time:149877ms step_avg:47.12ms +[2025-09-11 08:10:17] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:10:17] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:27] [Rank 0] PRINT: step:3200/10000 val_loss:5.7938 total_sharp:6.8910e-03 L1_sharp:1.8980e-03 L2_sharp:8.0427e-04 L3_sharp:7.6642e-04 L4_sharp:6.2873e-04 L5_sharp:1.5898e-03 L6_sharp:7.9771e-04 L7_sharp:6.8237e-04 L8_sharp:1.0737e-03 L9_sharp:9.7016e-04 L10_sharp:9.0281e-04 L11_sharp:1.1342e-03 L12_sharp:4.3266e-03 total_fnorm:8.7500e+00 total_l1_linf:2.3936e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.4688e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.1250e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.1328e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.9766e-01 L12_l1linf:4.7852e-01 L1_spectral:3.1211e-02 L2_spectral:3.0208e-02 L3_spectral:3.0295e-02 L4_spectral:3.0474e-02 L5_spectral:3.0456e-02 L6_spectral:3.0128e-02 L7_spectral:2.9938e-02 L8_spectral:2.9869e-02 L9_spectral:2.9553e-02 L10_spectral:2.9439e-02 L11_spectral:2.9838e-02 L12_spectral:2.9465e-02 train_time:150520ms step_avg:47.04ms +[2025-09-11 08:10:27] [Rank 0] PRINT: step:3200/10000 val_loss:5.7938 total_sharp:6.8910e-03 L1_sharp:1.8980e-03 L2_sharp:8.0427e-04 L3_sharp:7.6642e-04 L4_sharp:6.2873e-04 L5_sharp:1.5898e-03 L6_sharp:7.9771e-04 L7_sharp:6.8237e-04 L8_sharp:1.0737e-03 L9_sharp:9.7016e-04 L10_sharp:9.0281e-04 L11_sharp:1.1342e-03 L12_sharp:4.3266e-03 total_fnorm:8.7500e+00 total_l1_linf:2.3936e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.4688e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.1250e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.1328e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.9766e-01 L12_l1linf:4.7852e-01 L1_spectral:3.1211e-02 L2_spectral:3.0208e-02 L3_spectral:3.0295e-02 L4_spectral:3.0474e-02 L5_spectral:3.0456e-02 L6_spectral:3.0128e-02 L7_spectral:2.9938e-02 L8_spectral:2.9869e-02 L9_spectral:2.9553e-02 L10_spectral:2.9439e-02 L11_spectral:2.9838e-02 L12_spectral:2.9465e-02 train_time:150520ms step_avg:47.04ms +[2025-09-11 08:10:29] [Rank 0] step:3201/10000 train_time:152148ms step_avg:47.53ms +[2025-09-11 08:10:29] [Rank 0] step:3201/10000 train_time:152148ms step_avg:47.53ms +[2025-09-11 08:10:30] [Rank 0] step:3221/10000 train_time:152861ms step_avg:47.46ms +[2025-09-11 08:10:30] [Rank 0] step:3221/10000 train_time:152861ms step_avg:47.46ms +[2025-09-11 08:10:30] [Rank 0] step:3241/10000 train_time:153525ms step_avg:47.37ms +[2025-09-11 08:10:30] [Rank 0] step:3241/10000 train_time:153525ms step_avg:47.37ms +[2025-09-11 08:10:31] [Rank 0] step:3261/10000 train_time:154188ms step_avg:47.28ms +[2025-09-11 08:10:31] [Rank 0] step:3261/10000 train_time:154188ms step_avg:47.28ms +[2025-09-11 08:10:32] [Rank 0] step:3281/10000 train_time:154852ms step_avg:47.20ms +[2025-09-11 08:10:32] [Rank 0] step:3281/10000 train_time:154852ms step_avg:47.20ms +[2025-09-11 08:10:32] [Rank 0] step:3301/10000 train_time:155515ms step_avg:47.11ms +[2025-09-11 08:10:32] [Rank 0] step:3301/10000 train_time:155515ms step_avg:47.11ms +[2025-09-11 08:10:33] [Rank 0] step:3321/10000 train_time:156178ms step_avg:47.03ms +[2025-09-11 08:10:33] [Rank 0] step:3321/10000 train_time:156178ms step_avg:47.03ms +[2025-09-11 08:10:34] [Rank 0] step:3341/10000 train_time:156840ms step_avg:46.94ms +[2025-09-11 08:10:34] [Rank 0] step:3341/10000 train_time:156840ms step_avg:46.94ms +[2025-09-11 08:10:34] [Rank 0] step:3361/10000 train_time:157503ms step_avg:46.86ms +[2025-09-11 08:10:34] [Rank 0] step:3361/10000 train_time:157503ms step_avg:46.86ms +[2025-09-11 08:10:35] [Rank 0] step:3381/10000 train_time:158166ms step_avg:46.78ms +[2025-09-11 08:10:35] [Rank 0] step:3381/10000 train_time:158166ms step_avg:46.78ms +[2025-09-11 08:10:36] [Rank 0] step:3401/10000 train_time:158829ms step_avg:46.70ms +[2025-09-11 08:10:36] [Rank 0] step:3401/10000 train_time:158829ms step_avg:46.70ms +[2025-09-11 08:10:36] [Rank 0] step:3421/10000 train_time:159495ms step_avg:46.62ms +[2025-09-11 08:10:36] [Rank 0] step:3421/10000 train_time:159495ms step_avg:46.62ms +[2025-09-11 08:10:37] [Rank 0] step:3441/10000 train_time:160158ms step_avg:46.54ms +[2025-09-11 08:10:37] [Rank 0] step:3441/10000 train_time:160158ms step_avg:46.54ms +[2025-09-11 08:10:38] [Rank 0] step:3461/10000 train_time:160820ms step_avg:46.47ms +[2025-09-11 08:10:38] [Rank 0] step:3461/10000 train_time:160820ms step_avg:46.47ms +[2025-09-11 08:10:38] [Rank 0] step:3481/10000 train_time:161483ms step_avg:46.39ms +[2025-09-11 08:10:38] [Rank 0] step:3481/10000 train_time:161483ms step_avg:46.39ms +[2025-09-11 08:10:39] [Rank 0] step:3501/10000 train_time:162146ms step_avg:46.31ms +[2025-09-11 08:10:39] [Rank 0] step:3501/10000 train_time:162146ms step_avg:46.31ms +[2025-09-11 08:10:40] [Rank 0] step:3521/10000 train_time:162808ms step_avg:46.24ms +[2025-09-11 08:10:40] [Rank 0] step:3521/10000 train_time:162808ms step_avg:46.24ms +[2025-09-11 08:10:40] [Rank 0] step:3541/10000 train_time:163470ms step_avg:46.16ms +[2025-09-11 08:10:40] [Rank 0] step:3541/10000 train_time:163470ms step_avg:46.16ms +[2025-09-11 08:10:41] [Rank 0] step:3561/10000 train_time:164132ms step_avg:46.09ms +[2025-09-11 08:10:41] [Rank 0] step:3561/10000 train_time:164132ms step_avg:46.09ms +[2025-09-11 08:10:42] [Rank 0] step:3581/10000 train_time:164794ms step_avg:46.02ms +[2025-09-11 08:10:42] [Rank 0] step:3581/10000 train_time:164794ms step_avg:46.02ms +[2025-09-11 08:10:42] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:10:42] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:10:52] [Rank 0] PRINT: step:3600/10000 val_loss:5.7470 total_sharp:6.1423e-03 L1_sharp:1.3362e-03 L2_sharp:1.1659e-03 L3_sharp:6.9030e-04 L4_sharp:6.0177e-04 L5_sharp:1.2461e-03 L6_sharp:5.8334e-04 L7_sharp:6.1888e-04 L8_sharp:9.9772e-04 L9_sharp:8.6564e-04 L10_sharp:9.1062e-04 L11_sharp:1.1353e-03 L12_sharp:3.3161e-03 total_fnorm:8.6875e+00 total_l1_linf:2.3424e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5312e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.4375e+00 L12_fnorm:2.1719e+00 L1_l1linf:6.9141e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.2891e-01 L4_l1linf:6.2500e-01 L5_l1linf:5.9766e-01 L6_l1linf:5.8984e-01 L7_l1linf:6.0938e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.8984e-01 L12_l1linf:4.8438e-01 L1_spectral:3.1451e-02 L2_spectral:3.0417e-02 L3_spectral:3.0427e-02 L4_spectral:3.0835e-02 L5_spectral:3.0673e-02 L6_spectral:3.0511e-02 L7_spectral:3.0469e-02 L8_spectral:3.0086e-02 L9_spectral:3.0052e-02 L10_spectral:2.9815e-02 L11_spectral:2.9852e-02 L12_spectral:2.9693e-02 train_time:165437ms step_avg:45.95ms +[2025-09-11 08:10:52] [Rank 0] PRINT: step:3600/10000 val_loss:5.7470 total_sharp:6.1423e-03 L1_sharp:1.3362e-03 L2_sharp:1.1659e-03 L3_sharp:6.9030e-04 L4_sharp:6.0177e-04 L5_sharp:1.2461e-03 L6_sharp:5.8334e-04 L7_sharp:6.1888e-04 L8_sharp:9.9772e-04 L9_sharp:8.6564e-04 L10_sharp:9.1062e-04 L11_sharp:1.1353e-03 L12_sharp:3.3161e-03 total_fnorm:8.6875e+00 total_l1_linf:2.3424e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5312e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.4375e+00 L12_fnorm:2.1719e+00 L1_l1linf:6.9141e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.2891e-01 L4_l1linf:6.2500e-01 L5_l1linf:5.9766e-01 L6_l1linf:5.8984e-01 L7_l1linf:6.0938e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.3672e-01 L11_l1linf:5.8984e-01 L12_l1linf:4.8438e-01 L1_spectral:3.1451e-02 L2_spectral:3.0417e-02 L3_spectral:3.0427e-02 L4_spectral:3.0835e-02 L5_spectral:3.0673e-02 L6_spectral:3.0511e-02 L7_spectral:3.0469e-02 L8_spectral:3.0086e-02 L9_spectral:3.0052e-02 L10_spectral:2.9815e-02 L11_spectral:2.9852e-02 L12_spectral:2.9693e-02 train_time:165437ms step_avg:45.95ms +[2025-09-11 08:10:54] [Rank 0] step:3601/10000 train_time:166788ms step_avg:46.32ms +[2025-09-11 08:10:54] [Rank 0] step:3601/10000 train_time:166788ms step_avg:46.32ms +[2025-09-11 08:10:54] [Rank 0] step:3621/10000 train_time:167457ms step_avg:46.25ms +[2025-09-11 08:10:54] [Rank 0] step:3621/10000 train_time:167457ms step_avg:46.25ms +[2025-09-11 08:10:55] [Rank 0] step:3641/10000 train_time:168120ms step_avg:46.17ms +[2025-09-11 08:10:55] [Rank 0] step:3641/10000 train_time:168120ms step_avg:46.17ms +[2025-09-11 08:10:56] [Rank 0] step:3661/10000 train_time:168783ms step_avg:46.10ms +[2025-09-11 08:10:56] [Rank 0] step:3661/10000 train_time:168783ms step_avg:46.10ms +[2025-09-11 08:10:56] [Rank 0] step:3681/10000 train_time:169445ms step_avg:46.03ms +[2025-09-11 08:10:56] [Rank 0] step:3681/10000 train_time:169445ms step_avg:46.03ms +[2025-09-11 08:10:57] [Rank 0] step:3701/10000 train_time:170107ms step_avg:45.96ms +[2025-09-11 08:10:57] [Rank 0] step:3701/10000 train_time:170107ms step_avg:45.96ms +[2025-09-11 08:10:58] [Rank 0] step:3721/10000 train_time:170779ms step_avg:45.90ms +[2025-09-11 08:10:58] [Rank 0] step:3721/10000 train_time:170779ms step_avg:45.90ms +[2025-09-11 08:10:58] [Rank 0] step:3741/10000 train_time:171453ms step_avg:45.83ms +[2025-09-11 08:10:58] [Rank 0] step:3741/10000 train_time:171453ms step_avg:45.83ms +[2025-09-11 08:10:59] [Rank 0] step:3761/10000 train_time:172128ms step_avg:45.77ms +[2025-09-11 08:10:59] [Rank 0] step:3761/10000 train_time:172128ms step_avg:45.77ms +[2025-09-11 08:11:00] [Rank 0] step:3781/10000 train_time:172801ms step_avg:45.70ms +[2025-09-11 08:11:00] [Rank 0] step:3781/10000 train_time:172801ms step_avg:45.70ms +[2025-09-11 08:11:00] [Rank 0] step:3801/10000 train_time:173476ms step_avg:45.64ms +[2025-09-11 08:11:00] [Rank 0] step:3801/10000 train_time:173476ms step_avg:45.64ms +[2025-09-11 08:11:01] [Rank 0] step:3821/10000 train_time:174150ms step_avg:45.58ms +[2025-09-11 08:11:01] [Rank 0] step:3821/10000 train_time:174150ms step_avg:45.58ms +[2025-09-11 08:11:02] [Rank 0] step:3841/10000 train_time:174824ms step_avg:45.52ms +[2025-09-11 08:11:02] [Rank 0] step:3841/10000 train_time:174824ms step_avg:45.52ms +[2025-09-11 08:11:02] [Rank 0] step:3861/10000 train_time:175498ms step_avg:45.45ms +[2025-09-11 08:11:02] [Rank 0] step:3861/10000 train_time:175498ms step_avg:45.45ms +[2025-09-11 08:11:03] [Rank 0] step:3881/10000 train_time:176171ms step_avg:45.39ms +[2025-09-11 08:11:03] [Rank 0] step:3881/10000 train_time:176171ms step_avg:45.39ms +[2025-09-11 08:11:04] [Rank 0] step:3901/10000 train_time:176844ms step_avg:45.33ms +[2025-09-11 08:11:04] [Rank 0] step:3901/10000 train_time:176844ms step_avg:45.33ms +[2025-09-11 08:11:04] [Rank 0] step:3921/10000 train_time:177517ms step_avg:45.27ms +[2025-09-11 08:11:04] [Rank 0] step:3921/10000 train_time:177517ms step_avg:45.27ms +[2025-09-11 08:11:05] [Rank 0] step:3941/10000 train_time:178190ms step_avg:45.21ms +[2025-09-11 08:11:05] [Rank 0] step:3941/10000 train_time:178190ms step_avg:45.21ms +[2025-09-11 08:11:06] [Rank 0] step:3961/10000 train_time:178864ms step_avg:45.16ms +[2025-09-11 08:11:06] [Rank 0] step:3961/10000 train_time:178864ms step_avg:45.16ms +[2025-09-11 08:11:06] [Rank 0] step:3981/10000 train_time:179536ms step_avg:45.10ms +[2025-09-11 08:11:06] [Rank 0] step:3981/10000 train_time:179536ms step_avg:45.10ms +[2025-09-11 08:11:07] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:11:07] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:11:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.6924 total_sharp:8.5734e-03 L1_sharp:1.3835e-03 L2_sharp:1.2223e-03 L3_sharp:9.9984e-04 L4_sharp:6.1781e-04 L5_sharp:1.6439e-03 L6_sharp:7.0744e-04 L7_sharp:6.3603e-04 L8_sharp:1.1051e-03 L9_sharp:9.3363e-04 L10_sharp:1.1493e-03 L11_sharp:1.6621e-03 L12_sharp:5.2218e-03 total_fnorm:8.6875e+00 total_l1_linf:2.3040e+04 total_spectral:4.3438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4688e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.1250e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0547e-01 L6_l1linf:5.8594e-01 L7_l1linf:5.9375e-01 L8_l1linf:6.0938e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.2109e-01 L11_l1linf:5.6250e-01 L12_l1linf:4.5703e-01 L1_spectral:3.1632e-02 L2_spectral:3.0844e-02 L3_spectral:3.0606e-02 L4_spectral:3.0893e-02 L5_spectral:3.0860e-02 L6_spectral:3.0856e-02 L7_spectral:3.0541e-02 L8_spectral:3.0584e-02 L9_spectral:3.0063e-02 L10_spectral:3.0264e-02 L11_spectral:3.0105e-02 L12_spectral:2.9828e-02 train_time:180191ms step_avg:45.05ms +[2025-09-11 08:11:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.6924 total_sharp:8.5734e-03 L1_sharp:1.3835e-03 L2_sharp:1.2223e-03 L3_sharp:9.9984e-04 L4_sharp:6.1781e-04 L5_sharp:1.6439e-03 L6_sharp:7.0744e-04 L7_sharp:6.3603e-04 L8_sharp:1.1051e-03 L9_sharp:9.3363e-04 L10_sharp:1.1493e-03 L11_sharp:1.6621e-03 L12_sharp:5.2218e-03 total_fnorm:8.6875e+00 total_l1_linf:2.3040e+04 total_spectral:4.3438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4688e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.1250e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0547e-01 L6_l1linf:5.8594e-01 L7_l1linf:5.9375e-01 L8_l1linf:6.0938e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.2109e-01 L11_l1linf:5.6250e-01 L12_l1linf:4.5703e-01 L1_spectral:3.1632e-02 L2_spectral:3.0844e-02 L3_spectral:3.0606e-02 L4_spectral:3.0893e-02 L5_spectral:3.0860e-02 L6_spectral:3.0856e-02 L7_spectral:3.0541e-02 L8_spectral:3.0584e-02 L9_spectral:3.0063e-02 L10_spectral:3.0264e-02 L11_spectral:3.0105e-02 L12_spectral:2.9828e-02 train_time:180191ms step_avg:45.05ms +[2025-09-11 08:11:18] [Rank 0] step:4001/10000 train_time:181428ms step_avg:45.35ms +[2025-09-11 08:11:18] [Rank 0] step:4001/10000 train_time:181428ms step_avg:45.35ms +[2025-09-11 08:11:19] [Rank 0] step:4021/10000 train_time:182127ms step_avg:45.29ms +[2025-09-11 08:11:19] [Rank 0] step:4021/10000 train_time:182127ms step_avg:45.29ms +[2025-09-11 08:11:20] [Rank 0] step:4041/10000 train_time:183072ms step_avg:45.30ms +[2025-09-11 08:11:20] [Rank 0] step:4041/10000 train_time:183072ms step_avg:45.30ms +[2025-09-11 08:11:20] [Rank 0] step:4061/10000 train_time:183745ms step_avg:45.25ms +[2025-09-11 08:11:20] [Rank 0] step:4061/10000 train_time:183745ms step_avg:45.25ms +[2025-09-11 08:11:21] [Rank 0] step:4081/10000 train_time:184420ms step_avg:45.19ms +[2025-09-11 08:11:21] [Rank 0] step:4081/10000 train_time:184420ms step_avg:45.19ms +[2025-09-11 08:11:22] [Rank 0] step:4101/10000 train_time:185094ms step_avg:45.13ms +[2025-09-11 08:11:22] [Rank 0] step:4101/10000 train_time:185094ms step_avg:45.13ms +[2025-09-11 08:11:22] [Rank 0] step:4121/10000 train_time:185768ms step_avg:45.08ms +[2025-09-11 08:11:22] [Rank 0] step:4121/10000 train_time:185768ms step_avg:45.08ms +[2025-09-11 08:11:23] [Rank 0] step:4141/10000 train_time:186442ms step_avg:45.02ms +[2025-09-11 08:11:23] [Rank 0] step:4141/10000 train_time:186442ms step_avg:45.02ms +[2025-09-11 08:11:24] [Rank 0] step:4161/10000 train_time:187117ms step_avg:44.97ms +[2025-09-11 08:11:24] [Rank 0] step:4161/10000 train_time:187117ms step_avg:44.97ms +[2025-09-11 08:11:24] [Rank 0] step:4181/10000 train_time:187791ms step_avg:44.92ms +[2025-09-11 08:11:24] [Rank 0] step:4181/10000 train_time:187791ms step_avg:44.92ms +[2025-09-11 08:11:25] [Rank 0] step:4201/10000 train_time:188465ms step_avg:44.86ms +[2025-09-11 08:11:25] [Rank 0] step:4201/10000 train_time:188465ms step_avg:44.86ms +[2025-09-11 08:11:26] [Rank 0] step:4221/10000 train_time:189141ms step_avg:44.81ms +[2025-09-11 08:11:26] [Rank 0] step:4221/10000 train_time:189141ms step_avg:44.81ms +[2025-09-11 08:11:26] [Rank 0] step:4241/10000 train_time:189815ms step_avg:44.76ms +[2025-09-11 08:11:26] [Rank 0] step:4241/10000 train_time:189815ms step_avg:44.76ms +[2025-09-11 08:11:27] [Rank 0] step:4261/10000 train_time:190489ms step_avg:44.71ms +[2025-09-11 08:11:27] [Rank 0] step:4261/10000 train_time:190489ms step_avg:44.71ms +[2025-09-11 08:11:28] [Rank 0] step:4281/10000 train_time:191165ms step_avg:44.65ms +[2025-09-11 08:11:28] [Rank 0] step:4281/10000 train_time:191165ms step_avg:44.65ms +[2025-09-11 08:11:28] [Rank 0] step:4301/10000 train_time:191840ms step_avg:44.60ms +[2025-09-11 08:11:28] [Rank 0] step:4301/10000 train_time:191840ms step_avg:44.60ms +[2025-09-11 08:11:29] [Rank 0] step:4321/10000 train_time:192515ms step_avg:44.55ms +[2025-09-11 08:11:29] [Rank 0] step:4321/10000 train_time:192515ms step_avg:44.55ms +[2025-09-11 08:11:30] [Rank 0] step:4341/10000 train_time:193188ms step_avg:44.50ms +[2025-09-11 08:11:30] [Rank 0] step:4341/10000 train_time:193188ms step_avg:44.50ms +[2025-09-11 08:11:30] [Rank 0] step:4361/10000 train_time:193862ms step_avg:44.45ms +[2025-09-11 08:11:30] [Rank 0] step:4361/10000 train_time:193862ms step_avg:44.45ms +[2025-09-11 08:11:31] [Rank 0] step:4381/10000 train_time:194537ms step_avg:44.40ms +[2025-09-11 08:11:31] [Rank 0] step:4381/10000 train_time:194537ms step_avg:44.40ms +[2025-09-11 08:11:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:11:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:11:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:11:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:11:44] [Rank 0] PRINT: step:4400/10000 val_loss:5.6521 total_sharp:7.1969e-03 L1_sharp:3.3633e-03 L2_sharp:1.1627e-03 L3_sharp:7.6005e-04 L4_sharp:4.9392e-04 L5_sharp:1.2515e-03 L6_sharp:5.7861e-04 L7_sharp:5.9672e-04 L8_sharp:7.9753e-04 L9_sharp:7.5878e-04 L10_sharp:8.8628e-04 L11_sharp:1.5156e-03 L12_sharp:6.5293e-03 total_fnorm:8.6250e+00 total_l1_linf:2.2528e+04 total_spectral:4.3438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.4219e+00 L12_fnorm:2.1094e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2500e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.1328e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.7422e-01 L7_l1linf:5.8594e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.0547e-01 L11_l1linf:5.5469e-01 L12_l1linf:4.2969e-01 L1_spectral:3.1850e-02 L2_spectral:3.0844e-02 L3_spectral:3.0848e-02 L4_spectral:3.1175e-02 L5_spectral:3.0927e-02 L6_spectral:3.0962e-02 L7_spectral:3.0916e-02 L8_spectral:3.0700e-02 L9_spectral:3.0424e-02 L10_spectral:3.0519e-02 L11_spectral:3.0199e-02 L12_spectral:2.9774e-02 train_time:195192ms step_avg:44.36ms +[2025-09-11 08:11:44] [Rank 0] PRINT: step:4400/10000 val_loss:5.6521 total_sharp:7.1969e-03 L1_sharp:3.3633e-03 L2_sharp:1.1627e-03 L3_sharp:7.6005e-04 L4_sharp:4.9392e-04 L5_sharp:1.2515e-03 L6_sharp:5.7861e-04 L7_sharp:5.9672e-04 L8_sharp:7.9753e-04 L9_sharp:7.5878e-04 L10_sharp:8.8628e-04 L11_sharp:1.5156e-03 L12_sharp:6.5293e-03 total_fnorm:8.6250e+00 total_l1_linf:2.2528e+04 total_spectral:4.3438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.4219e+00 L12_fnorm:2.1094e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2500e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.1328e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.7422e-01 L7_l1linf:5.8594e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.0547e-01 L11_l1linf:5.5469e-01 L12_l1linf:4.2969e-01 L1_spectral:3.1850e-02 L2_spectral:3.0844e-02 L3_spectral:3.0848e-02 L4_spectral:3.1175e-02 L5_spectral:3.0927e-02 L6_spectral:3.0962e-02 L7_spectral:3.0916e-02 L8_spectral:3.0700e-02 L9_spectral:3.0424e-02 L10_spectral:3.0519e-02 L11_spectral:3.0199e-02 L12_spectral:2.9774e-02 train_time:195192ms step_avg:44.36ms +[2025-09-11 08:11:45] [Rank 0] step:4401/10000 train_time:196457ms step_avg:44.64ms +[2025-09-11 08:11:45] [Rank 0] step:4401/10000 train_time:196457ms step_avg:44.64ms +[2025-09-11 08:11:46] [Rank 0] step:4421/10000 train_time:197161ms step_avg:44.60ms +[2025-09-11 08:11:46] [Rank 0] step:4421/10000 train_time:197161ms step_avg:44.60ms +[2025-09-11 08:11:47] [Rank 0] step:4441/10000 train_time:197837ms step_avg:44.55ms +[2025-09-11 08:11:47] [Rank 0] step:4441/10000 train_time:197837ms step_avg:44.55ms +[2025-09-11 08:11:47] [Rank 0] step:4461/10000 train_time:198513ms step_avg:44.50ms +[2025-09-11 08:11:47] [Rank 0] step:4461/10000 train_time:198513ms step_avg:44.50ms +[2025-09-11 08:11:48] [Rank 0] step:4481/10000 train_time:199190ms step_avg:44.45ms +[2025-09-11 08:11:48] [Rank 0] step:4481/10000 train_time:199190ms step_avg:44.45ms +[2025-09-11 08:11:49] [Rank 0] step:4501/10000 train_time:199868ms step_avg:44.41ms +[2025-09-11 08:11:49] [Rank 0] step:4501/10000 train_time:199868ms step_avg:44.41ms +[2025-09-11 08:11:49] [Rank 0] step:4521/10000 train_time:200545ms step_avg:44.36ms +[2025-09-11 08:11:49] [Rank 0] step:4521/10000 train_time:200545ms step_avg:44.36ms +[2025-09-11 08:11:50] [Rank 0] step:4541/10000 train_time:201223ms step_avg:44.31ms +[2025-09-11 08:11:50] [Rank 0] step:4541/10000 train_time:201223ms step_avg:44.31ms +[2025-09-11 08:11:51] [Rank 0] step:4561/10000 train_time:201900ms step_avg:44.27ms +[2025-09-11 08:11:51] [Rank 0] step:4561/10000 train_time:201900ms step_avg:44.27ms +[2025-09-11 08:11:51] [Rank 0] step:4581/10000 train_time:202577ms step_avg:44.22ms +[2025-09-11 08:11:51] [Rank 0] step:4581/10000 train_time:202577ms step_avg:44.22ms +[2025-09-11 08:11:52] [Rank 0] step:4601/10000 train_time:203254ms step_avg:44.18ms +[2025-09-11 08:11:52] [Rank 0] step:4601/10000 train_time:203254ms step_avg:44.18ms +[2025-09-11 08:11:53] [Rank 0] step:4621/10000 train_time:203931ms step_avg:44.13ms +[2025-09-11 08:11:53] [Rank 0] step:4621/10000 train_time:203931ms step_avg:44.13ms +[2025-09-11 08:11:53] [Rank 0] step:4641/10000 train_time:204607ms step_avg:44.09ms +[2025-09-11 08:11:53] [Rank 0] step:4641/10000 train_time:204607ms step_avg:44.09ms +[2025-09-11 08:11:54] [Rank 0] step:4661/10000 train_time:205284ms step_avg:44.04ms +[2025-09-11 08:11:54] [Rank 0] step:4661/10000 train_time:205284ms step_avg:44.04ms +[2025-09-11 08:11:55] [Rank 0] step:4681/10000 train_time:205961ms step_avg:44.00ms +[2025-09-11 08:11:55] [Rank 0] step:4681/10000 train_time:205961ms step_avg:44.00ms +[2025-09-11 08:11:55] [Rank 0] step:4701/10000 train_time:206638ms step_avg:43.96ms +[2025-09-11 08:11:55] [Rank 0] step:4701/10000 train_time:206638ms step_avg:43.96ms +[2025-09-11 08:11:56] [Rank 0] step:4721/10000 train_time:207315ms step_avg:43.91ms +[2025-09-11 08:11:56] [Rank 0] step:4721/10000 train_time:207315ms step_avg:43.91ms +[2025-09-11 08:11:57] [Rank 0] step:4741/10000 train_time:207991ms step_avg:43.87ms +[2025-09-11 08:11:57] [Rank 0] step:4741/10000 train_time:207991ms step_avg:43.87ms +[2025-09-11 08:11:57] [Rank 0] step:4761/10000 train_time:208667ms step_avg:43.83ms +[2025-09-11 08:11:57] [Rank 0] step:4761/10000 train_time:208667ms step_avg:43.83ms +[2025-09-11 08:11:58] [Rank 0] step:4781/10000 train_time:209343ms step_avg:43.79ms +[2025-09-11 08:11:58] [Rank 0] step:4781/10000 train_time:209343ms step_avg:43.79ms +[2025-09-11 08:11:59] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:11:59] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:08] [Rank 0] PRINT: step:4800/10000 val_loss:5.6109 total_sharp:5.1567e-03 L1_sharp:2.6535e-03 L2_sharp:6.1078e-04 L3_sharp:6.2424e-04 L4_sharp:4.2359e-04 L5_sharp:9.1671e-04 L6_sharp:4.7247e-04 L7_sharp:5.0773e-04 L8_sharp:8.9815e-04 L9_sharp:7.0573e-04 L10_sharp:8.2491e-04 L11_sharp:1.0703e-03 L12_sharp:3.3807e-03 total_fnorm:8.6875e+00 total_l1_linf:2.2400e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4531e+00 L12_fnorm:2.2188e+00 L1_l1linf:6.7188e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.0547e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.7812e-01 L6_l1linf:5.5859e-01 L7_l1linf:5.7031e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.7422e-01 L10_l1linf:6.0547e-01 L11_l1linf:5.6641e-01 L12_l1linf:4.6094e-01 L1_spectral:3.1914e-02 L2_spectral:3.1236e-02 L3_spectral:3.1083e-02 L4_spectral:3.1355e-02 L5_spectral:3.1210e-02 L6_spectral:3.1600e-02 L7_spectral:3.1370e-02 L8_spectral:3.1037e-02 L9_spectral:3.0815e-02 L10_spectral:3.0919e-02 L11_spectral:3.0267e-02 L12_spectral:3.0326e-02 train_time:209999ms step_avg:43.75ms +[2025-09-11 08:12:08] [Rank 0] PRINT: step:4800/10000 val_loss:5.6109 total_sharp:5.1567e-03 L1_sharp:2.6535e-03 L2_sharp:6.1078e-04 L3_sharp:6.2424e-04 L4_sharp:4.2359e-04 L5_sharp:9.1671e-04 L6_sharp:4.7247e-04 L7_sharp:5.0773e-04 L8_sharp:8.9815e-04 L9_sharp:7.0573e-04 L10_sharp:8.2491e-04 L11_sharp:1.0703e-03 L12_sharp:3.3807e-03 total_fnorm:8.6875e+00 total_l1_linf:2.2400e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4531e+00 L12_fnorm:2.2188e+00 L1_l1linf:6.7188e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.0547e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.7812e-01 L6_l1linf:5.5859e-01 L7_l1linf:5.7031e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.7422e-01 L10_l1linf:6.0547e-01 L11_l1linf:5.6641e-01 L12_l1linf:4.6094e-01 L1_spectral:3.1914e-02 L2_spectral:3.1236e-02 L3_spectral:3.1083e-02 L4_spectral:3.1355e-02 L5_spectral:3.1210e-02 L6_spectral:3.1600e-02 L7_spectral:3.1370e-02 L8_spectral:3.1037e-02 L9_spectral:3.0815e-02 L10_spectral:3.0919e-02 L11_spectral:3.0267e-02 L12_spectral:3.0326e-02 train_time:209999ms step_avg:43.75ms +[2025-09-11 08:12:10] [Rank 0] step:4801/10000 train_time:211251ms step_avg:44.00ms +[2025-09-11 08:12:10] [Rank 0] step:4801/10000 train_time:211251ms step_avg:44.00ms +[2025-09-11 08:12:10] [Rank 0] step:4821/10000 train_time:211973ms step_avg:43.97ms +[2025-09-11 08:12:10] [Rank 0] step:4821/10000 train_time:211973ms step_avg:43.97ms +[2025-09-11 08:12:11] [Rank 0] step:4841/10000 train_time:212652ms step_avg:43.93ms +[2025-09-11 08:12:11] [Rank 0] step:4841/10000 train_time:212652ms step_avg:43.93ms +[2025-09-11 08:12:12] [Rank 0] step:4861/10000 train_time:213330ms step_avg:43.89ms +[2025-09-11 08:12:12] [Rank 0] step:4861/10000 train_time:213330ms step_avg:43.89ms +[2025-09-11 08:12:12] [Rank 0] step:4881/10000 train_time:214007ms step_avg:43.84ms +[2025-09-11 08:12:12] [Rank 0] step:4881/10000 train_time:214007ms step_avg:43.84ms +[2025-09-11 08:12:13] [Rank 0] step:4901/10000 train_time:214685ms step_avg:43.80ms +[2025-09-11 08:12:13] [Rank 0] step:4901/10000 train_time:214685ms step_avg:43.80ms +[2025-09-11 08:12:14] [Rank 0] step:4921/10000 train_time:215362ms step_avg:43.76ms +[2025-09-11 08:12:14] [Rank 0] step:4921/10000 train_time:215362ms step_avg:43.76ms +[2025-09-11 08:12:14] [Rank 0] step:4941/10000 train_time:216039ms step_avg:43.72ms +[2025-09-11 08:12:14] [Rank 0] step:4941/10000 train_time:216039ms step_avg:43.72ms +[2025-09-11 08:12:15] [Rank 0] step:4961/10000 train_time:216715ms step_avg:43.68ms +[2025-09-11 08:12:15] [Rank 0] step:4961/10000 train_time:216715ms step_avg:43.68ms +[2025-09-11 08:12:16] [Rank 0] step:4981/10000 train_time:217392ms step_avg:43.64ms +[2025-09-11 08:12:16] [Rank 0] step:4981/10000 train_time:217392ms step_avg:43.64ms +[2025-09-11 08:12:16] [Rank 0] step:5001/10000 train_time:218070ms step_avg:43.61ms +[2025-09-11 08:12:16] [Rank 0] step:5001/10000 train_time:218070ms step_avg:43.61ms +[2025-09-11 08:12:17] [Rank 0] step:5021/10000 train_time:218746ms step_avg:43.57ms +[2025-09-11 08:12:17] [Rank 0] step:5021/10000 train_time:218746ms step_avg:43.57ms +[2025-09-11 08:12:18] [Rank 0] step:5041/10000 train_time:219422ms step_avg:43.53ms +[2025-09-11 08:12:18] [Rank 0] step:5041/10000 train_time:219422ms step_avg:43.53ms +[2025-09-11 08:12:18] [Rank 0] step:5061/10000 train_time:220098ms step_avg:43.49ms +[2025-09-11 08:12:18] [Rank 0] step:5061/10000 train_time:220098ms step_avg:43.49ms +[2025-09-11 08:12:19] [Rank 0] step:5081/10000 train_time:220774ms step_avg:43.45ms +[2025-09-11 08:12:19] [Rank 0] step:5081/10000 train_time:220774ms step_avg:43.45ms +[2025-09-11 08:12:20] [Rank 0] step:5101/10000 train_time:221601ms step_avg:43.44ms +[2025-09-11 08:12:20] [Rank 0] step:5101/10000 train_time:221601ms step_avg:43.44ms +[2025-09-11 08:12:21] [Rank 0] step:5121/10000 train_time:222705ms step_avg:43.49ms +[2025-09-11 08:12:21] [Rank 0] step:5121/10000 train_time:222705ms step_avg:43.49ms +[2025-09-11 08:12:22] [Rank 0] step:5141/10000 train_time:223381ms step_avg:43.45ms +[2025-09-11 08:12:22] [Rank 0] step:5141/10000 train_time:223381ms step_avg:43.45ms +[2025-09-11 08:12:23] [Rank 0] step:5161/10000 train_time:224214ms step_avg:43.44ms +[2025-09-11 08:12:23] [Rank 0] step:5161/10000 train_time:224214ms step_avg:43.44ms +[2025-09-11 08:12:23] [Rank 0] step:5181/10000 train_time:225038ms step_avg:43.44ms +[2025-09-11 08:12:23] [Rank 0] step:5181/10000 train_time:225038ms step_avg:43.44ms +[2025-09-11 08:12:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:12:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:34] [Rank 0] PRINT: step:5200/10000 val_loss:5.5756 total_sharp:5.2312e-03 L1_sharp:2.1315e-03 L2_sharp:7.5120e-04 L3_sharp:3.0867e-04 L4_sharp:4.0710e-04 L5_sharp:8.7771e-04 L6_sharp:4.5251e-04 L7_sharp:4.5619e-04 L8_sharp:7.7955e-04 L9_sharp:7.0932e-04 L10_sharp:8.5049e-04 L11_sharp:1.1758e-03 L12_sharp:4.4273e-03 total_fnorm:8.6875e+00 total_l1_linf:2.2016e+04 total_spectral:4.3750e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.2500e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9766e-01 L4_l1linf:5.8984e-01 L5_l1linf:5.5859e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.5469e-01 L8_l1linf:5.6250e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.8984e-01 L11_l1linf:5.5078e-01 L12_l1linf:4.6680e-01 L1_spectral:3.2174e-02 L2_spectral:3.1317e-02 L3_spectral:3.1212e-02 L4_spectral:3.1764e-02 L5_spectral:3.1510e-02 L6_spectral:3.1476e-02 L7_spectral:3.1446e-02 L8_spectral:3.1273e-02 L9_spectral:3.1068e-02 L10_spectral:3.1170e-02 L11_spectral:3.0727e-02 L12_spectral:3.0517e-02 train_time:225701ms step_avg:43.40ms +[2025-09-11 08:12:34] [Rank 0] PRINT: step:5200/10000 val_loss:5.5756 total_sharp:5.2312e-03 L1_sharp:2.1315e-03 L2_sharp:7.5120e-04 L3_sharp:3.0867e-04 L4_sharp:4.0710e-04 L5_sharp:8.7771e-04 L6_sharp:4.5251e-04 L7_sharp:4.5619e-04 L8_sharp:7.7955e-04 L9_sharp:7.0932e-04 L10_sharp:8.5049e-04 L11_sharp:1.1758e-03 L12_sharp:4.4273e-03 total_fnorm:8.6875e+00 total_l1_linf:2.2016e+04 total_spectral:4.3750e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5312e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.2500e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9766e-01 L4_l1linf:5.8984e-01 L5_l1linf:5.5859e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.5469e-01 L8_l1linf:5.6250e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.8984e-01 L11_l1linf:5.5078e-01 L12_l1linf:4.6680e-01 L1_spectral:3.2174e-02 L2_spectral:3.1317e-02 L3_spectral:3.1212e-02 L4_spectral:3.1764e-02 L5_spectral:3.1510e-02 L6_spectral:3.1476e-02 L7_spectral:3.1446e-02 L8_spectral:3.1273e-02 L9_spectral:3.1068e-02 L10_spectral:3.1170e-02 L11_spectral:3.0727e-02 L12_spectral:3.0517e-02 train_time:225701ms step_avg:43.40ms +[2025-09-11 08:12:35] [Rank 0] step:5201/10000 train_time:226940ms step_avg:43.63ms +[2025-09-11 08:12:35] [Rank 0] step:5201/10000 train_time:226940ms step_avg:43.63ms +[2025-09-11 08:12:36] [Rank 0] step:5221/10000 train_time:227655ms step_avg:43.60ms +[2025-09-11 08:12:36] [Rank 0] step:5221/10000 train_time:227655ms step_avg:43.60ms +[2025-09-11 08:12:37] [Rank 0] step:5241/10000 train_time:228341ms step_avg:43.57ms +[2025-09-11 08:12:37] [Rank 0] step:5241/10000 train_time:228341ms step_avg:43.57ms +[2025-09-11 08:12:37] [Rank 0] step:5261/10000 train_time:229028ms step_avg:43.53ms +[2025-09-11 08:12:37] [Rank 0] step:5261/10000 train_time:229028ms step_avg:43.53ms +[2025-09-11 08:12:38] [Rank 0] step:5281/10000 train_time:229714ms step_avg:43.50ms +[2025-09-11 08:12:38] [Rank 0] step:5281/10000 train_time:229714ms step_avg:43.50ms +[2025-09-11 08:12:39] [Rank 0] step:5301/10000 train_time:230400ms step_avg:43.46ms +[2025-09-11 08:12:39] [Rank 0] step:5301/10000 train_time:230400ms step_avg:43.46ms +[2025-09-11 08:12:39] [Rank 0] step:5321/10000 train_time:231086ms step_avg:43.43ms +[2025-09-11 08:12:39] [Rank 0] step:5321/10000 train_time:231086ms step_avg:43.43ms +[2025-09-11 08:12:40] [Rank 0] step:5341/10000 train_time:231771ms step_avg:43.39ms +[2025-09-11 08:12:40] [Rank 0] step:5341/10000 train_time:231771ms step_avg:43.39ms +[2025-09-11 08:12:41] [Rank 0] step:5361/10000 train_time:232458ms step_avg:43.36ms +[2025-09-11 08:12:41] [Rank 0] step:5361/10000 train_time:232458ms step_avg:43.36ms +[2025-09-11 08:12:41] [Rank 0] step:5381/10000 train_time:233144ms step_avg:43.33ms +[2025-09-11 08:12:41] [Rank 0] step:5381/10000 train_time:233144ms step_avg:43.33ms +[2025-09-11 08:12:42] [Rank 0] step:5401/10000 train_time:233829ms step_avg:43.29ms +[2025-09-11 08:12:42] [Rank 0] step:5401/10000 train_time:233829ms step_avg:43.29ms +[2025-09-11 08:12:43] [Rank 0] step:5421/10000 train_time:234516ms step_avg:43.26ms +[2025-09-11 08:12:43] [Rank 0] step:5421/10000 train_time:234516ms step_avg:43.26ms +[2025-09-11 08:12:44] [Rank 0] step:5441/10000 train_time:235202ms step_avg:43.23ms +[2025-09-11 08:12:44] [Rank 0] step:5441/10000 train_time:235202ms step_avg:43.23ms +[2025-09-11 08:12:44] [Rank 0] step:5461/10000 train_time:235888ms step_avg:43.20ms +[2025-09-11 08:12:44] [Rank 0] step:5461/10000 train_time:235888ms step_avg:43.20ms +[2025-09-11 08:12:45] [Rank 0] step:5481/10000 train_time:236574ms step_avg:43.16ms +[2025-09-11 08:12:45] [Rank 0] step:5481/10000 train_time:236574ms step_avg:43.16ms +[2025-09-11 08:12:46] [Rank 0] step:5501/10000 train_time:237260ms step_avg:43.13ms +[2025-09-11 08:12:46] [Rank 0] step:5501/10000 train_time:237260ms step_avg:43.13ms +[2025-09-11 08:12:46] [Rank 0] step:5521/10000 train_time:237945ms step_avg:43.10ms +[2025-09-11 08:12:46] [Rank 0] step:5521/10000 train_time:237945ms step_avg:43.10ms +[2025-09-11 08:12:47] [Rank 0] step:5541/10000 train_time:238633ms step_avg:43.07ms +[2025-09-11 08:12:47] [Rank 0] step:5541/10000 train_time:238633ms step_avg:43.07ms +[2025-09-11 08:12:48] [Rank 0] step:5561/10000 train_time:239321ms step_avg:43.04ms +[2025-09-11 08:12:48] [Rank 0] step:5561/10000 train_time:239321ms step_avg:43.04ms +[2025-09-11 08:12:48] [Rank 0] step:5581/10000 train_time:240008ms step_avg:43.00ms +[2025-09-11 08:12:48] [Rank 0] step:5581/10000 train_time:240008ms step_avg:43.00ms +[2025-09-11 08:12:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:12:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:12:59] [Rank 0] PRINT: step:5600/10000 val_loss:5.5518 total_sharp:5.0873e-03 L1_sharp:2.3549e-03 L2_sharp:9.0954e-04 L3_sharp:3.9115e-04 L4_sharp:2.7498e-04 L5_sharp:9.0319e-04 L6_sharp:5.2191e-04 L7_sharp:5.3894e-04 L8_sharp:8.2755e-04 L9_sharp:7.3308e-04 L10_sharp:8.8960e-04 L11_sharp:1.0948e-03 L12_sharp:3.0430e-03 total_fnorm:8.6875e+00 total_l1_linf:2.1760e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4844e+00 L12_fnorm:2.2656e+00 L1_l1linf:6.6016e-01 L2_l1linf:5.9766e-01 L3_l1linf:5.8984e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.5078e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.4688e-01 L8_l1linf:5.5859e-01 L9_l1linf:5.6250e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.5078e-01 L12_l1linf:4.6289e-01 L1_spectral:3.2337e-02 L2_spectral:3.1449e-02 L3_spectral:3.1370e-02 L4_spectral:3.1880e-02 L5_spectral:3.1661e-02 L6_spectral:3.1659e-02 L7_spectral:3.1749e-02 L8_spectral:3.1397e-02 L9_spectral:3.1275e-02 L10_spectral:3.1553e-02 L11_spectral:3.0963e-02 L12_spectral:3.0595e-02 train_time:240675ms step_avg:42.98ms +[2025-09-11 08:12:59] [Rank 0] PRINT: step:5600/10000 val_loss:5.5518 total_sharp:5.0873e-03 L1_sharp:2.3549e-03 L2_sharp:9.0954e-04 L3_sharp:3.9115e-04 L4_sharp:2.7498e-04 L5_sharp:9.0319e-04 L6_sharp:5.2191e-04 L7_sharp:5.3894e-04 L8_sharp:8.2755e-04 L9_sharp:7.3308e-04 L10_sharp:8.8960e-04 L11_sharp:1.0948e-03 L12_sharp:3.0430e-03 total_fnorm:8.6875e+00 total_l1_linf:2.1760e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4844e+00 L12_fnorm:2.2656e+00 L1_l1linf:6.6016e-01 L2_l1linf:5.9766e-01 L3_l1linf:5.8984e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.5078e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.4688e-01 L8_l1linf:5.5859e-01 L9_l1linf:5.6250e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.5078e-01 L12_l1linf:4.6289e-01 L1_spectral:3.2337e-02 L2_spectral:3.1449e-02 L3_spectral:3.1370e-02 L4_spectral:3.1880e-02 L5_spectral:3.1661e-02 L6_spectral:3.1659e-02 L7_spectral:3.1749e-02 L8_spectral:3.1397e-02 L9_spectral:3.1275e-02 L10_spectral:3.1553e-02 L11_spectral:3.0963e-02 L12_spectral:3.0595e-02 train_time:240675ms step_avg:42.98ms +[2025-09-11 08:13:00] [Rank 0] step:5601/10000 train_time:241946ms step_avg:43.20ms +[2025-09-11 08:13:00] [Rank 0] step:5601/10000 train_time:241946ms step_avg:43.20ms +[2025-09-11 08:13:01] [Rank 0] step:5621/10000 train_time:242676ms step_avg:43.17ms +[2025-09-11 08:13:01] [Rank 0] step:5621/10000 train_time:242676ms step_avg:43.17ms +[2025-09-11 08:13:02] [Rank 0] step:5641/10000 train_time:243362ms step_avg:43.14ms +[2025-09-11 08:13:02] [Rank 0] step:5641/10000 train_time:243362ms step_avg:43.14ms +[2025-09-11 08:13:02] [Rank 0] step:5661/10000 train_time:244049ms step_avg:43.11ms +[2025-09-11 08:13:02] [Rank 0] step:5661/10000 train_time:244049ms step_avg:43.11ms +[2025-09-11 08:13:03] [Rank 0] step:5681/10000 train_time:244736ms step_avg:43.08ms +[2025-09-11 08:13:03] [Rank 0] step:5681/10000 train_time:244736ms step_avg:43.08ms +[2025-09-11 08:13:04] [Rank 0] step:5701/10000 train_time:245424ms step_avg:43.05ms +[2025-09-11 08:13:04] [Rank 0] step:5701/10000 train_time:245424ms step_avg:43.05ms +[2025-09-11 08:13:04] [Rank 0] step:5721/10000 train_time:246110ms step_avg:43.02ms +[2025-09-11 08:13:04] [Rank 0] step:5721/10000 train_time:246110ms step_avg:43.02ms +[2025-09-11 08:13:05] [Rank 0] step:5741/10000 train_time:246798ms step_avg:42.99ms +[2025-09-11 08:13:05] [Rank 0] step:5741/10000 train_time:246798ms step_avg:42.99ms +[2025-09-11 08:13:06] [Rank 0] step:5761/10000 train_time:247485ms step_avg:42.96ms +[2025-09-11 08:13:06] [Rank 0] step:5761/10000 train_time:247485ms step_avg:42.96ms +[2025-09-11 08:13:07] [Rank 0] step:5781/10000 train_time:248173ms step_avg:42.93ms +[2025-09-11 08:13:07] [Rank 0] step:5781/10000 train_time:248173ms step_avg:42.93ms +[2025-09-11 08:13:07] [Rank 0] step:5801/10000 train_time:248861ms step_avg:42.90ms +[2025-09-11 08:13:07] [Rank 0] step:5801/10000 train_time:248861ms step_avg:42.90ms +[2025-09-11 08:13:08] [Rank 0] step:5821/10000 train_time:249554ms step_avg:42.87ms +[2025-09-11 08:13:08] [Rank 0] step:5821/10000 train_time:249554ms step_avg:42.87ms +[2025-09-11 08:13:09] [Rank 0] step:5841/10000 train_time:250243ms step_avg:42.84ms +[2025-09-11 08:13:09] [Rank 0] step:5841/10000 train_time:250243ms step_avg:42.84ms +[2025-09-11 08:13:09] [Rank 0] step:5861/10000 train_time:250928ms step_avg:42.81ms +[2025-09-11 08:13:09] [Rank 0] step:5861/10000 train_time:250928ms step_avg:42.81ms +[2025-09-11 08:13:10] [Rank 0] step:5881/10000 train_time:251614ms step_avg:42.78ms +[2025-09-11 08:13:10] [Rank 0] step:5881/10000 train_time:251614ms step_avg:42.78ms +[2025-09-11 08:13:11] [Rank 0] step:5901/10000 train_time:252300ms step_avg:42.76ms +[2025-09-11 08:13:11] [Rank 0] step:5901/10000 train_time:252300ms step_avg:42.76ms +[2025-09-11 08:13:11] [Rank 0] step:5921/10000 train_time:252989ms step_avg:42.73ms +[2025-09-11 08:13:11] [Rank 0] step:5921/10000 train_time:252989ms step_avg:42.73ms +[2025-09-11 08:13:12] [Rank 0] step:5941/10000 train_time:253677ms step_avg:42.70ms +[2025-09-11 08:13:12] [Rank 0] step:5941/10000 train_time:253677ms step_avg:42.70ms +[2025-09-11 08:13:13] [Rank 0] step:5961/10000 train_time:254364ms step_avg:42.67ms +[2025-09-11 08:13:13] [Rank 0] step:5961/10000 train_time:254364ms step_avg:42.67ms +[2025-09-11 08:13:13] [Rank 0] step:5981/10000 train_time:255052ms step_avg:42.64ms +[2025-09-11 08:13:13] [Rank 0] step:5981/10000 train_time:255052ms step_avg:42.64ms +[2025-09-11 08:13:14] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:13:14] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:13:24] [Rank 0] PRINT: step:6000/10000 val_loss:5.5149 total_sharp:4.8790e-03 L1_sharp:3.4931e-03 L2_sharp:6.4052e-04 L3_sharp:4.4559e-04 L4_sharp:3.8160e-04 L5_sharp:8.3964e-04 L6_sharp:4.8107e-04 L7_sharp:4.5915e-04 L8_sharp:7.0011e-04 L9_sharp:7.1558e-04 L10_sharp:7.6237e-04 L11_sharp:1.0337e-03 L12_sharp:4.9127e-03 total_fnorm:8.6250e+00 total_l1_linf:2.1504e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.2812e+00 L1_l1linf:6.6016e-01 L2_l1linf:5.9766e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.7812e-01 L5_l1linf:5.4688e-01 L6_l1linf:5.4297e-01 L7_l1linf:5.4297e-01 L8_l1linf:5.5078e-01 L9_l1linf:5.5078e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.5469e-01 L12_l1linf:4.5898e-01 L1_spectral:3.2372e-02 L2_spectral:3.1537e-02 L3_spectral:3.1788e-02 L4_spectral:3.1888e-02 L5_spectral:3.1740e-02 L6_spectral:3.2002e-02 L7_spectral:3.1870e-02 L8_spectral:3.1727e-02 L9_spectral:3.1531e-02 L10_spectral:3.1468e-02 L11_spectral:3.1112e-02 L12_spectral:3.0779e-02 train_time:255722ms step_avg:42.62ms +[2025-09-11 08:13:24] [Rank 0] PRINT: step:6000/10000 val_loss:5.5149 total_sharp:4.8790e-03 L1_sharp:3.4931e-03 L2_sharp:6.4052e-04 L3_sharp:4.4559e-04 L4_sharp:3.8160e-04 L5_sharp:8.3964e-04 L6_sharp:4.8107e-04 L7_sharp:4.5915e-04 L8_sharp:7.0011e-04 L9_sharp:7.1558e-04 L10_sharp:7.6237e-04 L11_sharp:1.0337e-03 L12_sharp:4.9127e-03 total_fnorm:8.6250e+00 total_l1_linf:2.1504e+04 total_spectral:4.3750e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.2812e+00 L1_l1linf:6.6016e-01 L2_l1linf:5.9766e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.7812e-01 L5_l1linf:5.4688e-01 L6_l1linf:5.4297e-01 L7_l1linf:5.4297e-01 L8_l1linf:5.5078e-01 L9_l1linf:5.5078e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.5469e-01 L12_l1linf:4.5898e-01 L1_spectral:3.2372e-02 L2_spectral:3.1537e-02 L3_spectral:3.1788e-02 L4_spectral:3.1888e-02 L5_spectral:3.1740e-02 L6_spectral:3.2002e-02 L7_spectral:3.1870e-02 L8_spectral:3.1727e-02 L9_spectral:3.1531e-02 L10_spectral:3.1468e-02 L11_spectral:3.1112e-02 L12_spectral:3.0779e-02 train_time:255722ms step_avg:42.62ms +[2025-09-11 08:13:25] [Rank 0] step:6001/10000 train_time:257003ms step_avg:42.83ms +[2025-09-11 08:13:25] [Rank 0] step:6001/10000 train_time:257003ms step_avg:42.83ms +[2025-09-11 08:13:26] [Rank 0] step:6021/10000 train_time:257968ms step_avg:42.84ms +[2025-09-11 08:13:26] [Rank 0] step:6021/10000 train_time:257968ms step_avg:42.84ms +[2025-09-11 08:13:27] [Rank 0] step:6041/10000 train_time:258661ms step_avg:42.82ms +[2025-09-11 08:13:27] [Rank 0] step:6041/10000 train_time:258661ms step_avg:42.82ms +[2025-09-11 08:13:28] [Rank 0] step:6061/10000 train_time:259349ms step_avg:42.79ms +[2025-09-11 08:13:28] [Rank 0] step:6061/10000 train_time:259349ms step_avg:42.79ms +[2025-09-11 08:13:28] [Rank 0] step:6081/10000 train_time:260040ms step_avg:42.76ms +[2025-09-11 08:13:28] [Rank 0] step:6081/10000 train_time:260040ms step_avg:42.76ms +[2025-09-11 08:13:29] [Rank 0] step:6101/10000 train_time:260727ms step_avg:42.74ms +[2025-09-11 08:13:29] [Rank 0] step:6101/10000 train_time:260727ms step_avg:42.74ms +[2025-09-11 08:13:30] [Rank 0] step:6121/10000 train_time:261416ms step_avg:42.71ms +[2025-09-11 08:13:30] [Rank 0] step:6121/10000 train_time:261416ms step_avg:42.71ms +[2025-09-11 08:13:31] [Rank 0] step:6141/10000 train_time:262104ms step_avg:42.68ms +[2025-09-11 08:13:31] [Rank 0] step:6141/10000 train_time:262104ms step_avg:42.68ms +[2025-09-11 08:13:31] [Rank 0] step:6161/10000 train_time:262794ms step_avg:42.65ms +[2025-09-11 08:13:31] [Rank 0] step:6161/10000 train_time:262794ms step_avg:42.65ms +[2025-09-11 08:13:32] [Rank 0] step:6181/10000 train_time:263480ms step_avg:42.63ms +[2025-09-11 08:13:32] [Rank 0] step:6181/10000 train_time:263480ms step_avg:42.63ms +[2025-09-11 08:13:33] [Rank 0] step:6201/10000 train_time:264169ms step_avg:42.60ms +[2025-09-11 08:13:33] [Rank 0] step:6201/10000 train_time:264169ms step_avg:42.60ms +[2025-09-11 08:13:33] [Rank 0] step:6221/10000 train_time:264858ms step_avg:42.57ms +[2025-09-11 08:13:33] [Rank 0] step:6221/10000 train_time:264858ms step_avg:42.57ms +[2025-09-11 08:13:34] [Rank 0] step:6241/10000 train_time:265546ms step_avg:42.55ms +[2025-09-11 08:13:34] [Rank 0] step:6241/10000 train_time:265546ms step_avg:42.55ms +[2025-09-11 08:13:35] [Rank 0] step:6261/10000 train_time:266234ms step_avg:42.52ms +[2025-09-11 08:13:35] [Rank 0] step:6261/10000 train_time:266234ms step_avg:42.52ms +[2025-09-11 08:13:35] [Rank 0] step:6281/10000 train_time:266922ms step_avg:42.50ms +[2025-09-11 08:13:35] [Rank 0] step:6281/10000 train_time:266922ms step_avg:42.50ms +[2025-09-11 08:13:36] [Rank 0] step:6301/10000 train_time:267609ms step_avg:42.47ms +[2025-09-11 08:13:36] [Rank 0] step:6301/10000 train_time:267609ms step_avg:42.47ms +[2025-09-11 08:13:37] [Rank 0] step:6321/10000 train_time:268301ms step_avg:42.45ms +[2025-09-11 08:13:37] [Rank 0] step:6321/10000 train_time:268301ms step_avg:42.45ms +[2025-09-11 08:13:37] [Rank 0] step:6341/10000 train_time:268990ms step_avg:42.42ms +[2025-09-11 08:13:37] [Rank 0] step:6341/10000 train_time:268990ms step_avg:42.42ms +[2025-09-11 08:13:38] [Rank 0] step:6361/10000 train_time:269680ms step_avg:42.40ms +[2025-09-11 08:13:38] [Rank 0] step:6361/10000 train_time:269680ms step_avg:42.40ms +[2025-09-11 08:13:39] [Rank 0] step:6381/10000 train_time:270368ms step_avg:42.37ms +[2025-09-11 08:13:39] [Rank 0] step:6381/10000 train_time:270368ms step_avg:42.37ms +[2025-09-11 08:13:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:13:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:13:49] [Rank 0] PRINT: step:6400/10000 val_loss:5.4911 total_sharp:4.5104e-03 L1_sharp:2.1823e-03 L2_sharp:5.5784e-04 L3_sharp:4.5160e-04 L4_sharp:3.5923e-04 L5_sharp:8.0999e-04 L6_sharp:4.6909e-04 L7_sharp:4.6808e-04 L8_sharp:7.3938e-04 L9_sharp:6.5067e-04 L10_sharp:7.1915e-04 L11_sharp:9.4629e-04 L12_sharp:2.8565e-03 total_fnorm:7.8750e+00 total_l1_linf:1.8560e+04 total_spectral:3.9375e+00 L1_fnorm:2.3125e+00 L2_fnorm:2.2812e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.2188e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0625e+00 L1_l1linf:5.7031e-01 L2_l1linf:5.1562e-01 L3_l1linf:5.1172e-01 L4_l1linf:5.0781e-01 L5_l1linf:4.8047e-01 L6_l1linf:4.6484e-01 L7_l1linf:4.6484e-01 L8_l1linf:4.6875e-01 L9_l1linf:4.7266e-01 L10_l1linf:4.8242e-01 L11_l1linf:4.7852e-01 L12_l1linf:4.2188e-01 L1_spectral:2.9953e-02 L2_spectral:2.8971e-02 L3_spectral:2.8978e-02 L4_spectral:2.9371e-02 L5_spectral:2.8737e-02 L6_spectral:2.9281e-02 L7_spectral:2.9280e-02 L8_spectral:2.9005e-02 L9_spectral:2.8958e-02 L10_spectral:2.9011e-02 L11_spectral:2.8923e-02 L12_spectral:2.8430e-02 train_time:271036ms step_avg:42.35ms +[2025-09-11 08:13:49] [Rank 0] PRINT: step:6400/10000 val_loss:5.4911 total_sharp:4.5104e-03 L1_sharp:2.1823e-03 L2_sharp:5.5784e-04 L3_sharp:4.5160e-04 L4_sharp:3.5923e-04 L5_sharp:8.0999e-04 L6_sharp:4.6909e-04 L7_sharp:4.6808e-04 L8_sharp:7.3938e-04 L9_sharp:6.5067e-04 L10_sharp:7.1915e-04 L11_sharp:9.4629e-04 L12_sharp:2.8565e-03 total_fnorm:7.8750e+00 total_l1_linf:1.8560e+04 total_spectral:3.9375e+00 L1_fnorm:2.3125e+00 L2_fnorm:2.2812e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.2188e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0625e+00 L1_l1linf:5.7031e-01 L2_l1linf:5.1562e-01 L3_l1linf:5.1172e-01 L4_l1linf:5.0781e-01 L5_l1linf:4.8047e-01 L6_l1linf:4.6484e-01 L7_l1linf:4.6484e-01 L8_l1linf:4.6875e-01 L9_l1linf:4.7266e-01 L10_l1linf:4.8242e-01 L11_l1linf:4.7852e-01 L12_l1linf:4.2188e-01 L1_spectral:2.9953e-02 L2_spectral:2.8971e-02 L3_spectral:2.8978e-02 L4_spectral:2.9371e-02 L5_spectral:2.8737e-02 L6_spectral:2.9281e-02 L7_spectral:2.9280e-02 L8_spectral:2.9005e-02 L9_spectral:2.8958e-02 L10_spectral:2.9011e-02 L11_spectral:2.8923e-02 L12_spectral:2.8430e-02 train_time:271036ms step_avg:42.35ms +[2025-09-11 08:13:51] [Rank 0] step:6401/10000 train_time:272342ms step_avg:42.55ms +[2025-09-11 08:13:51] [Rank 0] step:6401/10000 train_time:272342ms step_avg:42.55ms +[2025-09-11 08:13:51] [Rank 0] step:6421/10000 train_time:273047ms step_avg:42.52ms +[2025-09-11 08:13:51] [Rank 0] step:6421/10000 train_time:273047ms step_avg:42.52ms +[2025-09-11 08:13:52] [Rank 0] step:6441/10000 train_time:273737ms step_avg:42.50ms +[2025-09-11 08:13:52] [Rank 0] step:6441/10000 train_time:273737ms step_avg:42.50ms +[2025-09-11 08:13:53] [Rank 0] step:6461/10000 train_time:274427ms step_avg:42.47ms +[2025-09-11 08:13:53] [Rank 0] step:6461/10000 train_time:274427ms step_avg:42.47ms +[2025-09-11 08:13:53] [Rank 0] step:6481/10000 train_time:275118ms step_avg:42.45ms +[2025-09-11 08:13:53] [Rank 0] step:6481/10000 train_time:275118ms step_avg:42.45ms +[2025-09-11 08:13:54] [Rank 0] step:6501/10000 train_time:275810ms step_avg:42.43ms +[2025-09-11 08:13:54] [Rank 0] step:6501/10000 train_time:275810ms step_avg:42.43ms +[2025-09-11 08:13:55] [Rank 0] step:6521/10000 train_time:276501ms step_avg:42.40ms +[2025-09-11 08:13:55] [Rank 0] step:6521/10000 train_time:276501ms step_avg:42.40ms +[2025-09-11 08:13:56] [Rank 0] step:6541/10000 train_time:277189ms step_avg:42.38ms +[2025-09-11 08:13:56] [Rank 0] step:6541/10000 train_time:277189ms step_avg:42.38ms +[2025-09-11 08:13:56] [Rank 0] step:6561/10000 train_time:277879ms step_avg:42.35ms +[2025-09-11 08:13:56] [Rank 0] step:6561/10000 train_time:277879ms step_avg:42.35ms +[2025-09-11 08:13:57] [Rank 0] step:6581/10000 train_time:278569ms step_avg:42.33ms +[2025-09-11 08:13:57] [Rank 0] step:6581/10000 train_time:278569ms step_avg:42.33ms +[2025-09-11 08:13:58] [Rank 0] step:6601/10000 train_time:279259ms step_avg:42.31ms +[2025-09-11 08:13:58] [Rank 0] step:6601/10000 train_time:279259ms step_avg:42.31ms +[2025-09-11 08:13:58] [Rank 0] step:6621/10000 train_time:279947ms step_avg:42.28ms +[2025-09-11 08:13:58] [Rank 0] step:6621/10000 train_time:279947ms step_avg:42.28ms +[2025-09-11 08:13:59] [Rank 0] step:6641/10000 train_time:280636ms step_avg:42.26ms +[2025-09-11 08:13:59] [Rank 0] step:6641/10000 train_time:280636ms step_avg:42.26ms +[2025-09-11 08:14:00] [Rank 0] step:6661/10000 train_time:281326ms step_avg:42.23ms +[2025-09-11 08:14:00] [Rank 0] step:6661/10000 train_time:281326ms step_avg:42.23ms +[2025-09-11 08:14:00] [Rank 0] step:6681/10000 train_time:282022ms step_avg:42.21ms +[2025-09-11 08:14:00] [Rank 0] step:6681/10000 train_time:282022ms step_avg:42.21ms +[2025-09-11 08:14:01] [Rank 0] step:6701/10000 train_time:282717ms step_avg:42.19ms +[2025-09-11 08:14:01] [Rank 0] step:6701/10000 train_time:282717ms step_avg:42.19ms +[2025-09-11 08:14:02] [Rank 0] step:6721/10000 train_time:283415ms step_avg:42.17ms +[2025-09-11 08:14:02] [Rank 0] step:6721/10000 train_time:283415ms step_avg:42.17ms +[2025-09-11 08:14:02] [Rank 0] step:6741/10000 train_time:284112ms step_avg:42.15ms +[2025-09-11 08:14:02] [Rank 0] step:6741/10000 train_time:284112ms step_avg:42.15ms +[2025-09-11 08:14:03] [Rank 0] step:6761/10000 train_time:284808ms step_avg:42.13ms +[2025-09-11 08:14:03] [Rank 0] step:6761/10000 train_time:284808ms step_avg:42.13ms +[2025-09-11 08:14:04] [Rank 0] step:6781/10000 train_time:285505ms step_avg:42.10ms +[2025-09-11 08:14:04] [Rank 0] step:6781/10000 train_time:285505ms step_avg:42.10ms +[2025-09-11 08:14:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:14:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:14:15] [Rank 0] PRINT: step:6800/10000 val_loss:5.4572 total_sharp:3.6317e-03 L1_sharp:4.2148e-03 L2_sharp:5.8738e-04 L3_sharp:4.0154e-04 L4_sharp:2.5192e-04 L5_sharp:7.4359e-04 L6_sharp:3.2411e-04 L7_sharp:3.7513e-04 L8_sharp:5.6998e-04 L9_sharp:5.5047e-04 L10_sharp:6.0251e-04 L11_sharp:9.1833e-04 L12_sharp:2.4121e-03 total_fnorm:7.0000e+00 total_l1_linf:1.5616e+04 total_spectral:3.4844e+00 L1_fnorm:2.0625e+00 L2_fnorm:2.0156e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9609e+00 L6_fnorm:2.0000e+00 L7_fnorm:1.9922e+00 L8_fnorm:1.9609e+00 L9_fnorm:1.9844e+00 L10_fnorm:1.9844e+00 L11_fnorm:1.9844e+00 L12_fnorm:1.8438e+00 L1_l1linf:4.9023e-01 L2_l1linf:4.4141e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.3555e-01 L5_l1linf:4.0820e-01 L6_l1linf:3.9453e-01 L7_l1linf:3.9062e-01 L8_l1linf:3.9453e-01 L9_l1linf:3.9844e-01 L10_l1linf:4.0820e-01 L11_l1linf:4.0625e-01 L12_l1linf:3.6133e-01 L1_spectral:2.6672e-02 L2_spectral:2.6283e-02 L3_spectral:2.6392e-02 L4_spectral:2.6347e-02 L5_spectral:2.5728e-02 L6_spectral:2.6408e-02 L7_spectral:2.6305e-02 L8_spectral:2.6135e-02 L9_spectral:2.6207e-02 L10_spectral:2.6335e-02 L11_spectral:2.6270e-02 L12_spectral:2.5738e-02 train_time:286181ms step_avg:42.09ms +[2025-09-11 08:14:15] [Rank 0] PRINT: step:6800/10000 val_loss:5.4572 total_sharp:3.6317e-03 L1_sharp:4.2148e-03 L2_sharp:5.8738e-04 L3_sharp:4.0154e-04 L4_sharp:2.5192e-04 L5_sharp:7.4359e-04 L6_sharp:3.2411e-04 L7_sharp:3.7513e-04 L8_sharp:5.6998e-04 L9_sharp:5.5047e-04 L10_sharp:6.0251e-04 L11_sharp:9.1833e-04 L12_sharp:2.4121e-03 total_fnorm:7.0000e+00 total_l1_linf:1.5616e+04 total_spectral:3.4844e+00 L1_fnorm:2.0625e+00 L2_fnorm:2.0156e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9609e+00 L6_fnorm:2.0000e+00 L7_fnorm:1.9922e+00 L8_fnorm:1.9609e+00 L9_fnorm:1.9844e+00 L10_fnorm:1.9844e+00 L11_fnorm:1.9844e+00 L12_fnorm:1.8438e+00 L1_l1linf:4.9023e-01 L2_l1linf:4.4141e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.3555e-01 L5_l1linf:4.0820e-01 L6_l1linf:3.9453e-01 L7_l1linf:3.9062e-01 L8_l1linf:3.9453e-01 L9_l1linf:3.9844e-01 L10_l1linf:4.0820e-01 L11_l1linf:4.0625e-01 L12_l1linf:3.6133e-01 L1_spectral:2.6672e-02 L2_spectral:2.6283e-02 L3_spectral:2.6392e-02 L4_spectral:2.6347e-02 L5_spectral:2.5728e-02 L6_spectral:2.6408e-02 L7_spectral:2.6305e-02 L8_spectral:2.6135e-02 L9_spectral:2.6207e-02 L10_spectral:2.6335e-02 L11_spectral:2.6270e-02 L12_spectral:2.5738e-02 train_time:286181ms step_avg:42.09ms +[2025-09-11 08:14:16] [Rank 0] step:6801/10000 train_time:287506ms step_avg:42.27ms +[2025-09-11 08:14:16] [Rank 0] step:6801/10000 train_time:287506ms step_avg:42.27ms +[2025-09-11 08:14:17] [Rank 0] step:6821/10000 train_time:288243ms step_avg:42.26ms +[2025-09-11 08:14:17] [Rank 0] step:6821/10000 train_time:288243ms step_avg:42.26ms +[2025-09-11 08:14:17] [Rank 0] step:6841/10000 train_time:288944ms step_avg:42.24ms +[2025-09-11 08:14:17] [Rank 0] step:6841/10000 train_time:288944ms step_avg:42.24ms +[2025-09-11 08:14:18] [Rank 0] step:6861/10000 train_time:289643ms step_avg:42.22ms +[2025-09-11 08:14:18] [Rank 0] step:6861/10000 train_time:289643ms step_avg:42.22ms +[2025-09-11 08:14:19] [Rank 0] step:6881/10000 train_time:290342ms step_avg:42.19ms +[2025-09-11 08:14:19] [Rank 0] step:6881/10000 train_time:290342ms step_avg:42.19ms +[2025-09-11 08:14:19] [Rank 0] step:6901/10000 train_time:291039ms step_avg:42.17ms +[2025-09-11 08:14:19] [Rank 0] step:6901/10000 train_time:291039ms step_avg:42.17ms +[2025-09-11 08:14:20] [Rank 0] step:6921/10000 train_time:291735ms step_avg:42.15ms +[2025-09-11 08:14:20] [Rank 0] step:6921/10000 train_time:291735ms step_avg:42.15ms +[2025-09-11 08:14:21] [Rank 0] step:6941/10000 train_time:292433ms step_avg:42.13ms +[2025-09-11 08:14:21] [Rank 0] step:6941/10000 train_time:292433ms step_avg:42.13ms +[2025-09-11 08:14:22] [Rank 0] step:6961/10000 train_time:293131ms step_avg:42.11ms +[2025-09-11 08:14:22] [Rank 0] step:6961/10000 train_time:293131ms step_avg:42.11ms +[2025-09-11 08:14:22] [Rank 0] step:6981/10000 train_time:293831ms step_avg:42.09ms +[2025-09-11 08:14:22] [Rank 0] step:6981/10000 train_time:293831ms step_avg:42.09ms +[2025-09-11 08:14:23] [Rank 0] step:7001/10000 train_time:294529ms step_avg:42.07ms +[2025-09-11 08:14:23] [Rank 0] step:7001/10000 train_time:294529ms step_avg:42.07ms +[2025-09-11 08:14:24] [Rank 0] step:7021/10000 train_time:295232ms step_avg:42.05ms +[2025-09-11 08:14:24] [Rank 0] step:7021/10000 train_time:295232ms step_avg:42.05ms +[2025-09-11 08:14:24] [Rank 0] step:7041/10000 train_time:295928ms step_avg:42.03ms +[2025-09-11 08:14:24] [Rank 0] step:7041/10000 train_time:295928ms step_avg:42.03ms +[2025-09-11 08:14:25] [Rank 0] step:7061/10000 train_time:296627ms step_avg:42.01ms +[2025-09-11 08:14:25] [Rank 0] step:7061/10000 train_time:296627ms step_avg:42.01ms +[2025-09-11 08:14:26] [Rank 0] step:7081/10000 train_time:297325ms step_avg:41.99ms +[2025-09-11 08:14:26] [Rank 0] step:7081/10000 train_time:297325ms step_avg:41.99ms +[2025-09-11 08:14:27] [Rank 0] step:7101/10000 train_time:298302ms step_avg:42.01ms +[2025-09-11 08:14:27] [Rank 0] step:7101/10000 train_time:298302ms step_avg:42.01ms +[2025-09-11 08:14:28] [Rank 0] step:7121/10000 train_time:299290ms step_avg:42.03ms +[2025-09-11 08:14:28] [Rank 0] step:7121/10000 train_time:299290ms step_avg:42.03ms +[2025-09-11 08:14:28] [Rank 0] step:7141/10000 train_time:299987ms step_avg:42.01ms +[2025-09-11 08:14:28] [Rank 0] step:7141/10000 train_time:299987ms step_avg:42.01ms +[2025-09-11 08:14:29] [Rank 0] step:7161/10000 train_time:300960ms step_avg:42.03ms +[2025-09-11 08:14:29] [Rank 0] step:7161/10000 train_time:300960ms step_avg:42.03ms +[2025-09-11 08:14:30] [Rank 0] step:7181/10000 train_time:301656ms step_avg:42.01ms +[2025-09-11 08:14:30] [Rank 0] step:7181/10000 train_time:301656ms step_avg:42.01ms +[2025-09-11 08:14:31] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:14:31] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:14:44] [Rank 0] PRINT: step:7200/10000 val_loss:5.4312 total_sharp:3.6289e-03 L1_sharp:1.2980e-03 L2_sharp:3.7130e-04 L3_sharp:2.8746e-04 L4_sharp:2.7680e-04 L5_sharp:5.7998e-04 L6_sharp:3.6891e-04 L7_sharp:3.6941e-04 L8_sharp:6.0879e-04 L9_sharp:5.4013e-04 L10_sharp:6.8542e-04 L11_sharp:8.9810e-04 L12_sharp:2.7368e-03 total_fnorm:6.0000e+00 total_l1_linf:1.2672e+04 total_spectral:3.0000e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.7500e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7344e+00 L5_fnorm:1.6875e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7188e+00 L8_fnorm:1.6797e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7109e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.6016e+00 L1_l1linf:4.0234e-01 L2_l1linf:3.7891e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.5938e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1445e-01 L9_l1linf:3.1641e-01 L10_l1linf:3.3008e-01 L11_l1linf:3.3008e-01 L12_l1linf:2.9102e-01 L1_spectral:2.3434e-02 L2_spectral:2.3157e-02 L3_spectral:2.2962e-02 L4_spectral:2.3116e-02 L5_spectral:2.2895e-02 L6_spectral:2.3171e-02 L7_spectral:2.3134e-02 L8_spectral:2.3094e-02 L9_spectral:2.3170e-02 L10_spectral:2.3231e-02 L11_spectral:2.3051e-02 L12_spectral:2.2832e-02 train_time:302334ms step_avg:41.99ms +[2025-09-11 08:14:44] [Rank 0] PRINT: step:7200/10000 val_loss:5.4312 total_sharp:3.6289e-03 L1_sharp:1.2980e-03 L2_sharp:3.7130e-04 L3_sharp:2.8746e-04 L4_sharp:2.7680e-04 L5_sharp:5.7998e-04 L6_sharp:3.6891e-04 L7_sharp:3.6941e-04 L8_sharp:6.0879e-04 L9_sharp:5.4013e-04 L10_sharp:6.8542e-04 L11_sharp:8.9810e-04 L12_sharp:2.7368e-03 total_fnorm:6.0000e+00 total_l1_linf:1.2672e+04 total_spectral:3.0000e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.7500e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7344e+00 L5_fnorm:1.6875e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7188e+00 L8_fnorm:1.6797e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7109e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.6016e+00 L1_l1linf:4.0234e-01 L2_l1linf:3.7891e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.5938e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1445e-01 L9_l1linf:3.1641e-01 L10_l1linf:3.3008e-01 L11_l1linf:3.3008e-01 L12_l1linf:2.9102e-01 L1_spectral:2.3434e-02 L2_spectral:2.3157e-02 L3_spectral:2.2962e-02 L4_spectral:2.3116e-02 L5_spectral:2.2895e-02 L6_spectral:2.3171e-02 L7_spectral:2.3134e-02 L8_spectral:2.3094e-02 L9_spectral:2.3170e-02 L10_spectral:2.3231e-02 L11_spectral:2.3051e-02 L12_spectral:2.2832e-02 train_time:302334ms step_avg:41.99ms +[2025-09-11 08:14:45] [Rank 0] step:7201/10000 train_time:303643ms step_avg:42.17ms +[2025-09-11 08:14:45] [Rank 0] step:7201/10000 train_time:303643ms step_avg:42.17ms +[2025-09-11 08:14:46] [Rank 0] step:7221/10000 train_time:304378ms step_avg:42.15ms +[2025-09-11 08:14:46] [Rank 0] step:7221/10000 train_time:304378ms step_avg:42.15ms +[2025-09-11 08:14:46] [Rank 0] step:7241/10000 train_time:305078ms step_avg:42.13ms +[2025-09-11 08:14:46] [Rank 0] step:7241/10000 train_time:305078ms step_avg:42.13ms +[2025-09-11 08:14:47] [Rank 0] step:7261/10000 train_time:305778ms step_avg:42.11ms +[2025-09-11 08:14:47] [Rank 0] step:7261/10000 train_time:305778ms step_avg:42.11ms +[2025-09-11 08:14:48] [Rank 0] step:7281/10000 train_time:306481ms step_avg:42.09ms +[2025-09-11 08:14:48] [Rank 0] step:7281/10000 train_time:306481ms step_avg:42.09ms +[2025-09-11 08:14:48] [Rank 0] step:7301/10000 train_time:307178ms step_avg:42.07ms +[2025-09-11 08:14:48] [Rank 0] step:7301/10000 train_time:307178ms step_avg:42.07ms +[2025-09-11 08:14:49] [Rank 0] step:7321/10000 train_time:307877ms step_avg:42.05ms +[2025-09-11 08:14:49] [Rank 0] step:7321/10000 train_time:307877ms step_avg:42.05ms +[2025-09-11 08:14:50] [Rank 0] step:7341/10000 train_time:308577ms step_avg:42.03ms +[2025-09-11 08:14:50] [Rank 0] step:7341/10000 train_time:308577ms step_avg:42.03ms +[2025-09-11 08:14:51] [Rank 0] step:7361/10000 train_time:309275ms step_avg:42.02ms +[2025-09-11 08:14:51] [Rank 0] step:7361/10000 train_time:309275ms step_avg:42.02ms +[2025-09-11 08:14:51] [Rank 0] step:7381/10000 train_time:309975ms step_avg:42.00ms +[2025-09-11 08:14:51] [Rank 0] step:7381/10000 train_time:309975ms step_avg:42.00ms +[2025-09-11 08:14:52] [Rank 0] step:7401/10000 train_time:310675ms step_avg:41.98ms +[2025-09-11 08:14:52] [Rank 0] step:7401/10000 train_time:310675ms step_avg:41.98ms +[2025-09-11 08:14:53] [Rank 0] step:7421/10000 train_time:311372ms step_avg:41.96ms +[2025-09-11 08:14:53] [Rank 0] step:7421/10000 train_time:311372ms step_avg:41.96ms +[2025-09-11 08:14:53] [Rank 0] step:7441/10000 train_time:312071ms step_avg:41.94ms +[2025-09-11 08:14:53] [Rank 0] step:7441/10000 train_time:312071ms step_avg:41.94ms +[2025-09-11 08:14:54] [Rank 0] step:7461/10000 train_time:312773ms step_avg:41.92ms +[2025-09-11 08:14:54] [Rank 0] step:7461/10000 train_time:312773ms step_avg:41.92ms +[2025-09-11 08:14:55] [Rank 0] step:7481/10000 train_time:313476ms step_avg:41.90ms +[2025-09-11 08:14:55] [Rank 0] step:7481/10000 train_time:313476ms step_avg:41.90ms +[2025-09-11 08:14:55] [Rank 0] step:7501/10000 train_time:314175ms step_avg:41.88ms +[2025-09-11 08:14:55] [Rank 0] step:7501/10000 train_time:314175ms step_avg:41.88ms +[2025-09-11 08:14:56] [Rank 0] step:7521/10000 train_time:314875ms step_avg:41.87ms +[2025-09-11 08:14:56] [Rank 0] step:7521/10000 train_time:314875ms step_avg:41.87ms +[2025-09-11 08:14:57] [Rank 0] step:7541/10000 train_time:315572ms step_avg:41.85ms +[2025-09-11 08:14:57] [Rank 0] step:7541/10000 train_time:315572ms step_avg:41.85ms +[2025-09-11 08:14:58] [Rank 0] step:7561/10000 train_time:316271ms step_avg:41.83ms +[2025-09-11 08:14:58] [Rank 0] step:7561/10000 train_time:316271ms step_avg:41.83ms +[2025-09-11 08:14:58] [Rank 0] step:7581/10000 train_time:316970ms step_avg:41.81ms +[2025-09-11 08:14:58] [Rank 0] step:7581/10000 train_time:316970ms step_avg:41.81ms +[2025-09-11 08:14:59] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:14:59] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:15:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:15:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:15:09] [Rank 0] PRINT: step:7600/10000 val_loss:5.4103 total_sharp:3.7010e-03 L1_sharp:2.1446e-03 L2_sharp:5.3949e-04 L3_sharp:3.4321e-04 L4_sharp:3.3220e-04 L5_sharp:5.6874e-04 L6_sharp:3.8889e-04 L7_sharp:4.2948e-04 L8_sharp:6.0103e-04 L9_sharp:5.5052e-04 L10_sharp:6.4012e-04 L11_sharp:8.8591e-04 L12_sharp:2.2945e-03 total_fnorm:5.0625e+00 total_l1_linf:9.9200e+03 total_spectral:2.5000e+00 L1_fnorm:1.5234e+00 L2_fnorm:1.4688e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4062e+00 L6_fnorm:1.4375e+00 L7_fnorm:1.4297e+00 L8_fnorm:1.4062e+00 L9_fnorm:1.4297e+00 L10_fnorm:1.4297e+00 L11_fnorm:1.4375e+00 L12_fnorm:1.3516e+00 L1_l1linf:3.2812e-01 L2_l1linf:2.9102e-01 L3_l1linf:2.9688e-01 L4_l1linf:2.8906e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.5586e-01 L7_l1linf:2.5586e-01 L8_l1linf:2.5195e-01 L9_l1linf:2.6172e-01 L10_l1linf:2.6172e-01 L11_l1linf:2.6562e-01 L12_l1linf:2.3633e-01 L1_spectral:2.0089e-02 L2_spectral:1.9942e-02 L3_spectral:1.9753e-02 L4_spectral:1.9899e-02 L5_spectral:1.9554e-02 L6_spectral:1.9951e-02 L7_spectral:1.9996e-02 L8_spectral:1.9960e-02 L9_spectral:1.9965e-02 L10_spectral:1.9921e-02 L11_spectral:1.9818e-02 L12_spectral:1.9638e-02 train_time:317651ms step_avg:41.80ms +[2025-09-11 08:15:09] [Rank 0] PRINT: step:7600/10000 val_loss:5.4103 total_sharp:3.7010e-03 L1_sharp:2.1446e-03 L2_sharp:5.3949e-04 L3_sharp:3.4321e-04 L4_sharp:3.3220e-04 L5_sharp:5.6874e-04 L6_sharp:3.8889e-04 L7_sharp:4.2948e-04 L8_sharp:6.0103e-04 L9_sharp:5.5052e-04 L10_sharp:6.4012e-04 L11_sharp:8.8591e-04 L12_sharp:2.2945e-03 total_fnorm:5.0625e+00 total_l1_linf:9.9200e+03 total_spectral:2.5000e+00 L1_fnorm:1.5234e+00 L2_fnorm:1.4688e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4062e+00 L6_fnorm:1.4375e+00 L7_fnorm:1.4297e+00 L8_fnorm:1.4062e+00 L9_fnorm:1.4297e+00 L10_fnorm:1.4297e+00 L11_fnorm:1.4375e+00 L12_fnorm:1.3516e+00 L1_l1linf:3.2812e-01 L2_l1linf:2.9102e-01 L3_l1linf:2.9688e-01 L4_l1linf:2.8906e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.5586e-01 L7_l1linf:2.5586e-01 L8_l1linf:2.5195e-01 L9_l1linf:2.6172e-01 L10_l1linf:2.6172e-01 L11_l1linf:2.6562e-01 L12_l1linf:2.3633e-01 L1_spectral:2.0089e-02 L2_spectral:1.9942e-02 L3_spectral:1.9753e-02 L4_spectral:1.9899e-02 L5_spectral:1.9554e-02 L6_spectral:1.9951e-02 L7_spectral:1.9996e-02 L8_spectral:1.9960e-02 L9_spectral:1.9965e-02 L10_spectral:1.9921e-02 L11_spectral:1.9818e-02 L12_spectral:1.9638e-02 train_time:317651ms step_avg:41.80ms +[2025-09-11 08:15:10] [Rank 0] step:7601/10000 train_time:318940ms step_avg:41.96ms +[2025-09-11 08:15:10] [Rank 0] step:7601/10000 train_time:318940ms step_avg:41.96ms +[2025-09-11 08:15:11] [Rank 0] step:7621/10000 train_time:319667ms step_avg:41.95ms +[2025-09-11 08:15:11] [Rank 0] step:7621/10000 train_time:319667ms step_avg:41.95ms +[2025-09-11 08:15:12] [Rank 0] step:7641/10000 train_time:320368ms step_avg:41.93ms +[2025-09-11 08:15:12] [Rank 0] step:7641/10000 train_time:320368ms step_avg:41.93ms +[2025-09-11 08:15:12] [Rank 0] step:7661/10000 train_time:321067ms step_avg:41.91ms +[2025-09-11 08:15:12] [Rank 0] step:7661/10000 train_time:321067ms step_avg:41.91ms +[2025-09-11 08:15:13] [Rank 0] step:7681/10000 train_time:321766ms step_avg:41.89ms +[2025-09-11 08:15:13] [Rank 0] step:7681/10000 train_time:321766ms step_avg:41.89ms +[2025-09-11 08:15:14] [Rank 0] step:7701/10000 train_time:322467ms step_avg:41.87ms +[2025-09-11 08:15:14] [Rank 0] step:7701/10000 train_time:322467ms step_avg:41.87ms +[2025-09-11 08:15:14] [Rank 0] step:7721/10000 train_time:323165ms step_avg:41.86ms +[2025-09-11 08:15:14] [Rank 0] step:7721/10000 train_time:323165ms step_avg:41.86ms +[2025-09-11 08:15:15] [Rank 0] step:7741/10000 train_time:323864ms step_avg:41.84ms +[2025-09-11 08:15:15] [Rank 0] step:7741/10000 train_time:323864ms step_avg:41.84ms +[2025-09-11 08:15:16] [Rank 0] step:7761/10000 train_time:324564ms step_avg:41.82ms +[2025-09-11 08:15:16] [Rank 0] step:7761/10000 train_time:324564ms step_avg:41.82ms +[2025-09-11 08:15:16] [Rank 0] step:7781/10000 train_time:325265ms step_avg:41.80ms +[2025-09-11 08:15:16] [Rank 0] step:7781/10000 train_time:325265ms step_avg:41.80ms +[2025-09-11 08:15:17] [Rank 0] step:7801/10000 train_time:325963ms step_avg:41.78ms +[2025-09-11 08:15:17] [Rank 0] step:7801/10000 train_time:325963ms step_avg:41.78ms +[2025-09-11 08:15:18] [Rank 0] step:7821/10000 train_time:326663ms step_avg:41.77ms +[2025-09-11 08:15:18] [Rank 0] step:7821/10000 train_time:326663ms step_avg:41.77ms +[2025-09-11 08:15:19] [Rank 0] step:7841/10000 train_time:327363ms step_avg:41.75ms +[2025-09-11 08:15:19] [Rank 0] step:7841/10000 train_time:327363ms step_avg:41.75ms +[2025-09-11 08:15:19] [Rank 0] step:7861/10000 train_time:328064ms step_avg:41.73ms +[2025-09-11 08:15:19] [Rank 0] step:7861/10000 train_time:328064ms step_avg:41.73ms +[2025-09-11 08:15:20] [Rank 0] step:7881/10000 train_time:328762ms step_avg:41.72ms +[2025-09-11 08:15:20] [Rank 0] step:7881/10000 train_time:328762ms step_avg:41.72ms +[2025-09-11 08:15:21] [Rank 0] step:7901/10000 train_time:329462ms step_avg:41.70ms +[2025-09-11 08:15:21] [Rank 0] step:7901/10000 train_time:329462ms step_avg:41.70ms +[2025-09-11 08:15:21] [Rank 0] step:7921/10000 train_time:330161ms step_avg:41.68ms +[2025-09-11 08:15:21] [Rank 0] step:7921/10000 train_time:330161ms step_avg:41.68ms +[2025-09-11 08:15:22] [Rank 0] step:7941/10000 train_time:330862ms step_avg:41.66ms +[2025-09-11 08:15:22] [Rank 0] step:7941/10000 train_time:330862ms step_avg:41.66ms +[2025-09-11 08:15:23] [Rank 0] step:7961/10000 train_time:331559ms step_avg:41.65ms +[2025-09-11 08:15:23] [Rank 0] step:7961/10000 train_time:331559ms step_avg:41.65ms +[2025-09-11 08:15:23] [Rank 0] step:7981/10000 train_time:332260ms step_avg:41.63ms +[2025-09-11 08:15:23] [Rank 0] step:7981/10000 train_time:332260ms step_avg:41.63ms +[2025-09-11 08:15:24] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:15:24] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:15:34] [Rank 0] PRINT: step:8000/10000 val_loss:5.3972 total_sharp:3.2145e-03 L1_sharp:1.0727e-03 L2_sharp:4.3377e-04 L3_sharp:2.7304e-04 L4_sharp:3.1563e-04 L5_sharp:5.0132e-04 L6_sharp:3.7444e-04 L7_sharp:3.5841e-04 L8_sharp:5.1508e-04 L9_sharp:4.8621e-04 L10_sharp:5.5549e-04 L11_sharp:8.6387e-04 L12_sharp:2.5115e-03 total_fnorm:4.1250e+00 total_l1_linf:7.5840e+03 total_spectral:2.0625e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1484e+00 L6_fnorm:1.1719e+00 L7_fnorm:1.1719e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1719e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0938e+00 L1_l1linf:2.5000e-01 L2_l1linf:2.2949e-01 L3_l1linf:2.2461e-01 L4_l1linf:2.2363e-01 L5_l1linf:2.0801e-01 L6_l1linf:1.9727e-01 L7_l1linf:1.9727e-01 L8_l1linf:1.9922e-01 L9_l1linf:1.9824e-01 L10_l1linf:2.0020e-01 L11_l1linf:2.0312e-01 L12_l1linf:1.8262e-01 L1_spectral:1.7541e-02 L2_spectral:1.6698e-02 L3_spectral:1.6436e-02 L4_spectral:1.6665e-02 L5_spectral:1.6532e-02 L6_spectral:1.6721e-02 L7_spectral:1.6681e-02 L8_spectral:1.6725e-02 L9_spectral:1.6858e-02 L10_spectral:1.6733e-02 L11_spectral:1.6713e-02 L12_spectral:1.6337e-02 train_time:332939ms step_avg:41.62ms +[2025-09-11 08:15:34] [Rank 0] PRINT: step:8000/10000 val_loss:5.3972 total_sharp:3.2145e-03 L1_sharp:1.0727e-03 L2_sharp:4.3377e-04 L3_sharp:2.7304e-04 L4_sharp:3.1563e-04 L5_sharp:5.0132e-04 L6_sharp:3.7444e-04 L7_sharp:3.5841e-04 L8_sharp:5.1508e-04 L9_sharp:4.8621e-04 L10_sharp:5.5549e-04 L11_sharp:8.6387e-04 L12_sharp:2.5115e-03 total_fnorm:4.1250e+00 total_l1_linf:7.5840e+03 total_spectral:2.0625e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1484e+00 L6_fnorm:1.1719e+00 L7_fnorm:1.1719e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1719e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0938e+00 L1_l1linf:2.5000e-01 L2_l1linf:2.2949e-01 L3_l1linf:2.2461e-01 L4_l1linf:2.2363e-01 L5_l1linf:2.0801e-01 L6_l1linf:1.9727e-01 L7_l1linf:1.9727e-01 L8_l1linf:1.9922e-01 L9_l1linf:1.9824e-01 L10_l1linf:2.0020e-01 L11_l1linf:2.0312e-01 L12_l1linf:1.8262e-01 L1_spectral:1.7541e-02 L2_spectral:1.6698e-02 L3_spectral:1.6436e-02 L4_spectral:1.6665e-02 L5_spectral:1.6532e-02 L6_spectral:1.6721e-02 L7_spectral:1.6681e-02 L8_spectral:1.6725e-02 L9_spectral:1.6858e-02 L10_spectral:1.6733e-02 L11_spectral:1.6713e-02 L12_spectral:1.6337e-02 train_time:332939ms step_avg:41.62ms +[2025-09-11 08:15:35] [Rank 0] step:8001/10000 train_time:334276ms step_avg:41.78ms +[2025-09-11 08:15:35] [Rank 0] step:8001/10000 train_time:334276ms step_avg:41.78ms +[2025-09-11 08:15:36] [Rank 0] step:8021/10000 train_time:335012ms step_avg:41.77ms +[2025-09-11 08:15:36] [Rank 0] step:8021/10000 train_time:335012ms step_avg:41.77ms +[2025-09-11 08:15:37] [Rank 0] step:8041/10000 train_time:335713ms step_avg:41.75ms +[2025-09-11 08:15:37] [Rank 0] step:8041/10000 train_time:335713ms step_avg:41.75ms +[2025-09-11 08:15:37] [Rank 0] step:8061/10000 train_time:336416ms step_avg:41.73ms +[2025-09-11 08:15:37] [Rank 0] step:8061/10000 train_time:336416ms step_avg:41.73ms +[2025-09-11 08:15:38] [Rank 0] step:8081/10000 train_time:337114ms step_avg:41.72ms +[2025-09-11 08:15:38] [Rank 0] step:8081/10000 train_time:337114ms step_avg:41.72ms +[2025-09-11 08:15:39] [Rank 0] step:8101/10000 train_time:337811ms step_avg:41.70ms +[2025-09-11 08:15:39] [Rank 0] step:8101/10000 train_time:337811ms step_avg:41.70ms +[2025-09-11 08:15:40] [Rank 0] step:8121/10000 train_time:338513ms step_avg:41.68ms +[2025-09-11 08:15:40] [Rank 0] step:8121/10000 train_time:338513ms step_avg:41.68ms +[2025-09-11 08:15:41] [Rank 0] step:8141/10000 train_time:339966ms step_avg:41.76ms +[2025-09-11 08:15:41] [Rank 0] step:8141/10000 train_time:339966ms step_avg:41.76ms +[2025-09-11 08:15:42] [Rank 0] step:8161/10000 train_time:340669ms step_avg:41.74ms +[2025-09-11 08:15:42] [Rank 0] step:8161/10000 train_time:340669ms step_avg:41.74ms +[2025-09-11 08:15:42] [Rank 0] step:8181/10000 train_time:341380ms step_avg:41.73ms +[2025-09-11 08:15:42] [Rank 0] step:8181/10000 train_time:341380ms step_avg:41.73ms +[2025-09-11 08:15:43] [Rank 0] step:8201/10000 train_time:342087ms step_avg:41.71ms +[2025-09-11 08:15:43] [Rank 0] step:8201/10000 train_time:342087ms step_avg:41.71ms +[2025-09-11 08:15:44] [Rank 0] step:8221/10000 train_time:342793ms step_avg:41.70ms +[2025-09-11 08:15:44] [Rank 0] step:8221/10000 train_time:342793ms step_avg:41.70ms +[2025-09-11 08:15:45] [Rank 0] step:8241/10000 train_time:343509ms step_avg:41.68ms +[2025-09-11 08:15:45] [Rank 0] step:8241/10000 train_time:343509ms step_avg:41.68ms +[2025-09-11 08:15:45] [Rank 0] step:8261/10000 train_time:344214ms step_avg:41.67ms +[2025-09-11 08:15:45] [Rank 0] step:8261/10000 train_time:344214ms step_avg:41.67ms +[2025-09-11 08:15:46] [Rank 0] step:8281/10000 train_time:344916ms step_avg:41.65ms +[2025-09-11 08:15:46] [Rank 0] step:8281/10000 train_time:344916ms step_avg:41.65ms +[2025-09-11 08:15:47] [Rank 0] step:8301/10000 train_time:345621ms step_avg:41.64ms +[2025-09-11 08:15:47] [Rank 0] step:8301/10000 train_time:345621ms step_avg:41.64ms +[2025-09-11 08:15:47] [Rank 0] step:8321/10000 train_time:346326ms step_avg:41.62ms +[2025-09-11 08:15:47] [Rank 0] step:8321/10000 train_time:346326ms step_avg:41.62ms +[2025-09-11 08:15:48] [Rank 0] step:8341/10000 train_time:347038ms step_avg:41.61ms +[2025-09-11 08:15:48] [Rank 0] step:8341/10000 train_time:347038ms step_avg:41.61ms +[2025-09-11 08:15:49] [Rank 0] step:8361/10000 train_time:347740ms step_avg:41.59ms +[2025-09-11 08:15:49] [Rank 0] step:8361/10000 train_time:347740ms step_avg:41.59ms +[2025-09-11 08:15:50] [Rank 0] step:8381/10000 train_time:348447ms step_avg:41.58ms +[2025-09-11 08:15:50] [Rank 0] step:8381/10000 train_time:348447ms step_avg:41.58ms +[2025-09-11 08:15:50] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:15:50] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:00] [Rank 0] PRINT: step:8400/10000 val_loss:5.3777 total_sharp:2.8750e-03 L1_sharp:1.4786e-03 L2_sharp:2.8896e-04 L3_sharp:3.1761e-04 L4_sharp:2.5995e-04 L5_sharp:4.1078e-04 L6_sharp:2.6579e-04 L7_sharp:3.3762e-04 L8_sharp:4.4316e-04 L9_sharp:3.9759e-04 L10_sharp:5.1822e-04 L11_sharp:6.8729e-04 L12_sharp:2.4997e-03 total_fnorm:3.1875e+00 total_l1_linf:5.4080e+03 total_spectral:1.6094e+00 L1_fnorm:1.0078e+00 L2_fnorm:9.5312e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2578e-01 L5_fnorm:8.9844e-01 L6_fnorm:9.1406e-01 L7_fnorm:9.1406e-01 L8_fnorm:8.9453e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.1016e-01 L11_fnorm:9.2188e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.8750e-01 L2_l1linf:1.6699e-01 L3_l1linf:1.6309e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.4746e-01 L6_l1linf:1.4355e-01 L7_l1linf:1.3867e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.4160e-01 L10_l1linf:1.4648e-01 L11_l1linf:1.4648e-01 L12_l1linf:1.3867e-01 L1_spectral:1.4846e-02 L2_spectral:1.3465e-02 L3_spectral:1.3314e-02 L4_spectral:1.3326e-02 L5_spectral:1.3494e-02 L6_spectral:1.3387e-02 L7_spectral:1.3503e-02 L8_spectral:1.3426e-02 L9_spectral:1.3400e-02 L10_spectral:1.3426e-02 L11_spectral:1.3368e-02 L12_spectral:1.3171e-02 train_time:349135ms step_avg:41.56ms +[2025-09-11 08:16:00] [Rank 0] PRINT: step:8400/10000 val_loss:5.3777 total_sharp:2.8750e-03 L1_sharp:1.4786e-03 L2_sharp:2.8896e-04 L3_sharp:3.1761e-04 L4_sharp:2.5995e-04 L5_sharp:4.1078e-04 L6_sharp:2.6579e-04 L7_sharp:3.3762e-04 L8_sharp:4.4316e-04 L9_sharp:3.9759e-04 L10_sharp:5.1822e-04 L11_sharp:6.8729e-04 L12_sharp:2.4997e-03 total_fnorm:3.1875e+00 total_l1_linf:5.4080e+03 total_spectral:1.6094e+00 L1_fnorm:1.0078e+00 L2_fnorm:9.5312e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2578e-01 L5_fnorm:8.9844e-01 L6_fnorm:9.1406e-01 L7_fnorm:9.1406e-01 L8_fnorm:8.9453e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.1016e-01 L11_fnorm:9.2188e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.8750e-01 L2_l1linf:1.6699e-01 L3_l1linf:1.6309e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.4746e-01 L6_l1linf:1.4355e-01 L7_l1linf:1.3867e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.4160e-01 L10_l1linf:1.4648e-01 L11_l1linf:1.4648e-01 L12_l1linf:1.3867e-01 L1_spectral:1.4846e-02 L2_spectral:1.3465e-02 L3_spectral:1.3314e-02 L4_spectral:1.3326e-02 L5_spectral:1.3494e-02 L6_spectral:1.3387e-02 L7_spectral:1.3503e-02 L8_spectral:1.3426e-02 L9_spectral:1.3400e-02 L10_spectral:1.3426e-02 L11_spectral:1.3368e-02 L12_spectral:1.3171e-02 train_time:349135ms step_avg:41.56ms +[2025-09-11 08:16:02] [Rank 0] step:8401/10000 train_time:350443ms step_avg:41.71ms +[2025-09-11 08:16:02] [Rank 0] step:8401/10000 train_time:350443ms step_avg:41.71ms +[2025-09-11 08:16:02] [Rank 0] step:8421/10000 train_time:351167ms step_avg:41.70ms +[2025-09-11 08:16:02] [Rank 0] step:8421/10000 train_time:351167ms step_avg:41.70ms +[2025-09-11 08:16:03] [Rank 0] step:8441/10000 train_time:351877ms step_avg:41.69ms +[2025-09-11 08:16:03] [Rank 0] step:8441/10000 train_time:351877ms step_avg:41.69ms +[2025-09-11 08:16:04] [Rank 0] step:8461/10000 train_time:352585ms step_avg:41.67ms +[2025-09-11 08:16:04] [Rank 0] step:8461/10000 train_time:352585ms step_avg:41.67ms +[2025-09-11 08:16:04] [Rank 0] step:8481/10000 train_time:353294ms step_avg:41.66ms +[2025-09-11 08:16:04] [Rank 0] step:8481/10000 train_time:353294ms step_avg:41.66ms +[2025-09-11 08:16:05] [Rank 0] step:8501/10000 train_time:354002ms step_avg:41.64ms +[2025-09-11 08:16:05] [Rank 0] step:8501/10000 train_time:354002ms step_avg:41.64ms +[2025-09-11 08:16:06] [Rank 0] step:8521/10000 train_time:354709ms step_avg:41.63ms +[2025-09-11 08:16:06] [Rank 0] step:8521/10000 train_time:354709ms step_avg:41.63ms +[2025-09-11 08:16:07] [Rank 0] step:8541/10000 train_time:355414ms step_avg:41.61ms +[2025-09-11 08:16:07] [Rank 0] step:8541/10000 train_time:355414ms step_avg:41.61ms +[2025-09-11 08:16:07] [Rank 0] step:8561/10000 train_time:356126ms step_avg:41.60ms +[2025-09-11 08:16:07] [Rank 0] step:8561/10000 train_time:356126ms step_avg:41.60ms +[2025-09-11 08:16:08] [Rank 0] step:8581/10000 train_time:356835ms step_avg:41.58ms +[2025-09-11 08:16:08] [Rank 0] step:8581/10000 train_time:356835ms step_avg:41.58ms +[2025-09-11 08:16:09] [Rank 0] step:8601/10000 train_time:357543ms step_avg:41.57ms +[2025-09-11 08:16:09] [Rank 0] step:8601/10000 train_time:357543ms step_avg:41.57ms +[2025-09-11 08:16:09] [Rank 0] step:8621/10000 train_time:358250ms step_avg:41.56ms +[2025-09-11 08:16:09] [Rank 0] step:8621/10000 train_time:358250ms step_avg:41.56ms +[2025-09-11 08:16:10] [Rank 0] step:8641/10000 train_time:358957ms step_avg:41.54ms +[2025-09-11 08:16:10] [Rank 0] step:8641/10000 train_time:358957ms step_avg:41.54ms +[2025-09-11 08:16:11] [Rank 0] step:8661/10000 train_time:359664ms step_avg:41.53ms +[2025-09-11 08:16:11] [Rank 0] step:8661/10000 train_time:359664ms step_avg:41.53ms +[2025-09-11 08:16:12] [Rank 0] step:8681/10000 train_time:360372ms step_avg:41.51ms +[2025-09-11 08:16:12] [Rank 0] step:8681/10000 train_time:360372ms step_avg:41.51ms +[2025-09-11 08:16:12] [Rank 0] step:8701/10000 train_time:361077ms step_avg:41.50ms +[2025-09-11 08:16:12] [Rank 0] step:8701/10000 train_time:361077ms step_avg:41.50ms +[2025-09-11 08:16:13] [Rank 0] step:8721/10000 train_time:361787ms step_avg:41.48ms +[2025-09-11 08:16:13] [Rank 0] step:8721/10000 train_time:361787ms step_avg:41.48ms +[2025-09-11 08:16:14] [Rank 0] step:8741/10000 train_time:362490ms step_avg:41.47ms +[2025-09-11 08:16:14] [Rank 0] step:8741/10000 train_time:362490ms step_avg:41.47ms +[2025-09-11 08:16:14] [Rank 0] step:8761/10000 train_time:363200ms step_avg:41.46ms +[2025-09-11 08:16:14] [Rank 0] step:8761/10000 train_time:363200ms step_avg:41.46ms +[2025-09-11 08:16:15] [Rank 0] step:8781/10000 train_time:363903ms step_avg:41.44ms +[2025-09-11 08:16:15] [Rank 0] step:8781/10000 train_time:363903ms step_avg:41.44ms +[2025-09-11 08:16:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:16:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:16:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:26] [Rank 0] PRINT: step:8800/10000 val_loss:5.3665 total_sharp:1.9771e-03 L1_sharp:1.2506e-03 L2_sharp:3.0905e-04 L3_sharp:2.3151e-04 L4_sharp:2.2320e-04 L5_sharp:2.7645e-04 L6_sharp:1.9447e-04 L7_sharp:2.4090e-04 L8_sharp:3.4927e-04 L9_sharp:3.4725e-04 L10_sharp:4.4478e-04 L11_sharp:6.5849e-04 L12_sharp:1.8445e-03 total_fnorm:2.3750e+00 total_l1_linf:3.6000e+03 total_spectral:1.1875e+00 L1_fnorm:7.6172e-01 L2_fnorm:7.0312e-01 L3_fnorm:6.8750e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.6406e-01 L6_fnorm:6.7578e-01 L7_fnorm:6.7188e-01 L8_fnorm:6.5625e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6797e-01 L11_fnorm:6.7578e-01 L12_fnorm:6.4062e-01 L1_l1linf:1.2891e-01 L2_l1linf:1.1523e-01 L3_l1linf:1.1084e-01 L4_l1linf:1.0742e-01 L5_l1linf:1.0547e-01 L6_l1linf:9.6191e-02 L7_l1linf:9.4727e-02 L8_l1linf:9.1797e-02 L9_l1linf:9.3750e-02 L10_l1linf:9.7656e-02 L11_l1linf:1.0400e-01 L12_l1linf:9.5703e-02 L1_spectral:1.1789e-02 L2_spectral:1.0236e-02 L3_spectral:9.9823e-03 L4_spectral:9.9914e-03 L5_spectral:1.0201e-02 L6_spectral:1.0056e-02 L7_spectral:1.0119e-02 L8_spectral:1.0208e-02 L9_spectral:1.0159e-02 L10_spectral:1.0117e-02 L11_spectral:1.0077e-02 L12_spectral:9.8463e-03 train_time:364588ms step_avg:41.43ms +[2025-09-11 08:16:26] [Rank 0] PRINT: step:8800/10000 val_loss:5.3665 total_sharp:1.9771e-03 L1_sharp:1.2506e-03 L2_sharp:3.0905e-04 L3_sharp:2.3151e-04 L4_sharp:2.2320e-04 L5_sharp:2.7645e-04 L6_sharp:1.9447e-04 L7_sharp:2.4090e-04 L8_sharp:3.4927e-04 L9_sharp:3.4725e-04 L10_sharp:4.4478e-04 L11_sharp:6.5849e-04 L12_sharp:1.8445e-03 total_fnorm:2.3750e+00 total_l1_linf:3.6000e+03 total_spectral:1.1875e+00 L1_fnorm:7.6172e-01 L2_fnorm:7.0312e-01 L3_fnorm:6.8750e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.6406e-01 L6_fnorm:6.7578e-01 L7_fnorm:6.7188e-01 L8_fnorm:6.5625e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6797e-01 L11_fnorm:6.7578e-01 L12_fnorm:6.4062e-01 L1_l1linf:1.2891e-01 L2_l1linf:1.1523e-01 L3_l1linf:1.1084e-01 L4_l1linf:1.0742e-01 L5_l1linf:1.0547e-01 L6_l1linf:9.6191e-02 L7_l1linf:9.4727e-02 L8_l1linf:9.1797e-02 L9_l1linf:9.3750e-02 L10_l1linf:9.7656e-02 L11_l1linf:1.0400e-01 L12_l1linf:9.5703e-02 L1_spectral:1.1789e-02 L2_spectral:1.0236e-02 L3_spectral:9.9823e-03 L4_spectral:9.9914e-03 L5_spectral:1.0201e-02 L6_spectral:1.0056e-02 L7_spectral:1.0119e-02 L8_spectral:1.0208e-02 L9_spectral:1.0159e-02 L10_spectral:1.0117e-02 L11_spectral:1.0077e-02 L12_spectral:9.8463e-03 train_time:364588ms step_avg:41.43ms +[2025-09-11 08:16:27] [Rank 0] step:8801/10000 train_time:365886ms step_avg:41.57ms +[2025-09-11 08:16:27] [Rank 0] step:8801/10000 train_time:365886ms step_avg:41.57ms +[2025-09-11 08:16:28] [Rank 0] step:8821/10000 train_time:366620ms step_avg:41.56ms +[2025-09-11 08:16:28] [Rank 0] step:8821/10000 train_time:366620ms step_avg:41.56ms +[2025-09-11 08:16:28] [Rank 0] step:8841/10000 train_time:367329ms step_avg:41.55ms +[2025-09-11 08:16:28] [Rank 0] step:8841/10000 train_time:367329ms step_avg:41.55ms +[2025-09-11 08:16:29] [Rank 0] step:8861/10000 train_time:368038ms step_avg:41.53ms +[2025-09-11 08:16:29] [Rank 0] step:8861/10000 train_time:368038ms step_avg:41.53ms +[2025-09-11 08:16:30] [Rank 0] step:8881/10000 train_time:368745ms step_avg:41.52ms +[2025-09-11 08:16:30] [Rank 0] step:8881/10000 train_time:368745ms step_avg:41.52ms +[2025-09-11 08:16:30] [Rank 0] step:8901/10000 train_time:369454ms step_avg:41.51ms +[2025-09-11 08:16:30] [Rank 0] step:8901/10000 train_time:369454ms step_avg:41.51ms +[2025-09-11 08:16:31] [Rank 0] step:8921/10000 train_time:370159ms step_avg:41.49ms +[2025-09-11 08:16:31] [Rank 0] step:8921/10000 train_time:370159ms step_avg:41.49ms +[2025-09-11 08:16:32] [Rank 0] step:8941/10000 train_time:370869ms step_avg:41.48ms +[2025-09-11 08:16:32] [Rank 0] step:8941/10000 train_time:370869ms step_avg:41.48ms +[2025-09-11 08:16:33] [Rank 0] step:8961/10000 train_time:371585ms step_avg:41.47ms +[2025-09-11 08:16:33] [Rank 0] step:8961/10000 train_time:371585ms step_avg:41.47ms +[2025-09-11 08:16:34] [Rank 0] step:8981/10000 train_time:372840ms step_avg:41.51ms +[2025-09-11 08:16:34] [Rank 0] step:8981/10000 train_time:372840ms step_avg:41.51ms +[2025-09-11 08:16:35] [Rank 0] step:9001/10000 train_time:373542ms step_avg:41.50ms +[2025-09-11 08:16:35] [Rank 0] step:9001/10000 train_time:373542ms step_avg:41.50ms +[2025-09-11 08:16:35] [Rank 0] step:9021/10000 train_time:374399ms step_avg:41.50ms +[2025-09-11 08:16:35] [Rank 0] step:9021/10000 train_time:374399ms step_avg:41.50ms +[2025-09-11 08:16:36] [Rank 0] step:9041/10000 train_time:375216ms step_avg:41.50ms +[2025-09-11 08:16:36] [Rank 0] step:9041/10000 train_time:375216ms step_avg:41.50ms +[2025-09-11 08:16:37] [Rank 0] step:9061/10000 train_time:375923ms step_avg:41.49ms +[2025-09-11 08:16:37] [Rank 0] step:9061/10000 train_time:375923ms step_avg:41.49ms +[2025-09-11 08:16:38] [Rank 0] step:9081/10000 train_time:376632ms step_avg:41.47ms +[2025-09-11 08:16:38] [Rank 0] step:9081/10000 train_time:376632ms step_avg:41.47ms +[2025-09-11 08:16:38] [Rank 0] step:9101/10000 train_time:377343ms step_avg:41.46ms +[2025-09-11 08:16:38] [Rank 0] step:9101/10000 train_time:377343ms step_avg:41.46ms +[2025-09-11 08:16:39] [Rank 0] step:9121/10000 train_time:378055ms step_avg:41.45ms +[2025-09-11 08:16:39] [Rank 0] step:9121/10000 train_time:378055ms step_avg:41.45ms +[2025-09-11 08:16:40] [Rank 0] step:9141/10000 train_time:378761ms step_avg:41.44ms +[2025-09-11 08:16:40] [Rank 0] step:9141/10000 train_time:378761ms step_avg:41.44ms +[2025-09-11 08:16:41] [Rank 0] step:9161/10000 train_time:379472ms step_avg:41.42ms +[2025-09-11 08:16:41] [Rank 0] step:9161/10000 train_time:379472ms step_avg:41.42ms +[2025-09-11 08:16:41] [Rank 0] step:9181/10000 train_time:380182ms step_avg:41.41ms +[2025-09-11 08:16:41] [Rank 0] step:9181/10000 train_time:380182ms step_avg:41.41ms +[2025-09-11 08:16:42] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:16:42] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:16:52] [Rank 0] PRINT: step:9200/10000 val_loss:5.3534 total_sharp:1.9192e-03 L1_sharp:1.6774e-03 L2_sharp:2.1977e-04 L3_sharp:1.8743e-04 L4_sharp:1.7217e-04 L5_sharp:3.2191e-04 L6_sharp:1.8963e-04 L7_sharp:2.1247e-04 L8_sharp:3.5047e-04 L9_sharp:3.4036e-04 L10_sharp:3.7503e-04 L11_sharp:5.1386e-04 L12_sharp:1.8084e-03 total_fnorm:1.5625e+00 total_l1_linf:2.0640e+03 total_spectral:7.8516e-01 L1_fnorm:5.1172e-01 L2_fnorm:4.6680e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.3945e-01 L6_fnorm:4.4336e-01 L7_fnorm:4.4336e-01 L8_fnorm:4.3164e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3945e-01 L11_fnorm:4.4531e-01 L12_fnorm:4.1797e-01 L1_l1linf:7.6172e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:5.8838e-02 L7_l1linf:5.5908e-02 L8_l1linf:5.5908e-02 L9_l1linf:5.4688e-02 L10_l1linf:5.8105e-02 L11_l1linf:5.8350e-02 L12_l1linf:5.8838e-02 L1_spectral:8.4064e-03 L2_spectral:6.9031e-03 L3_spectral:6.6508e-03 L4_spectral:6.8175e-03 L5_spectral:7.0591e-03 L6_spectral:6.7674e-03 L7_spectral:6.8196e-03 L8_spectral:6.8573e-03 L9_spectral:6.7789e-03 L10_spectral:6.8368e-03 L11_spectral:6.7986e-03 L12_spectral:6.6598e-03 train_time:380872ms step_avg:41.40ms +[2025-09-11 08:16:52] [Rank 0] PRINT: step:9200/10000 val_loss:5.3534 total_sharp:1.9192e-03 L1_sharp:1.6774e-03 L2_sharp:2.1977e-04 L3_sharp:1.8743e-04 L4_sharp:1.7217e-04 L5_sharp:3.2191e-04 L6_sharp:1.8963e-04 L7_sharp:2.1247e-04 L8_sharp:3.5047e-04 L9_sharp:3.4036e-04 L10_sharp:3.7503e-04 L11_sharp:5.1386e-04 L12_sharp:1.8084e-03 total_fnorm:1.5625e+00 total_l1_linf:2.0640e+03 total_spectral:7.8516e-01 L1_fnorm:5.1172e-01 L2_fnorm:4.6680e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.3945e-01 L6_fnorm:4.4336e-01 L7_fnorm:4.4336e-01 L8_fnorm:4.3164e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3945e-01 L11_fnorm:4.4531e-01 L12_fnorm:4.1797e-01 L1_l1linf:7.6172e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:5.8838e-02 L7_l1linf:5.5908e-02 L8_l1linf:5.5908e-02 L9_l1linf:5.4688e-02 L10_l1linf:5.8105e-02 L11_l1linf:5.8350e-02 L12_l1linf:5.8838e-02 L1_spectral:8.4064e-03 L2_spectral:6.9031e-03 L3_spectral:6.6508e-03 L4_spectral:6.8175e-03 L5_spectral:7.0591e-03 L6_spectral:6.7674e-03 L7_spectral:6.8196e-03 L8_spectral:6.8573e-03 L9_spectral:6.7789e-03 L10_spectral:6.8368e-03 L11_spectral:6.7986e-03 L12_spectral:6.6598e-03 train_time:380872ms step_avg:41.40ms +[2025-09-11 08:16:53] [Rank 0] step:9201/10000 train_time:382197ms step_avg:41.54ms +[2025-09-11 08:16:53] [Rank 0] step:9201/10000 train_time:382197ms step_avg:41.54ms +[2025-09-11 08:16:54] [Rank 0] step:9221/10000 train_time:382948ms step_avg:41.53ms +[2025-09-11 08:16:54] [Rank 0] step:9221/10000 train_time:382948ms step_avg:41.53ms +[2025-09-11 08:16:55] [Rank 0] step:9241/10000 train_time:383654ms step_avg:41.52ms +[2025-09-11 08:16:55] [Rank 0] step:9241/10000 train_time:383654ms step_avg:41.52ms +[2025-09-11 08:16:55] [Rank 0] step:9261/10000 train_time:384365ms step_avg:41.50ms +[2025-09-11 08:16:55] [Rank 0] step:9261/10000 train_time:384365ms step_avg:41.50ms +[2025-09-11 08:16:56] [Rank 0] step:9281/10000 train_time:385075ms step_avg:41.49ms +[2025-09-11 08:16:56] [Rank 0] step:9281/10000 train_time:385075ms step_avg:41.49ms +[2025-09-11 08:16:57] [Rank 0] step:9301/10000 train_time:385781ms step_avg:41.48ms +[2025-09-11 08:16:57] [Rank 0] step:9301/10000 train_time:385781ms step_avg:41.48ms +[2025-09-11 08:16:58] [Rank 0] step:9321/10000 train_time:386492ms step_avg:41.46ms +[2025-09-11 08:16:58] [Rank 0] step:9321/10000 train_time:386492ms step_avg:41.46ms +[2025-09-11 08:16:58] [Rank 0] step:9341/10000 train_time:387196ms step_avg:41.45ms +[2025-09-11 08:16:58] [Rank 0] step:9341/10000 train_time:387196ms step_avg:41.45ms +[2025-09-11 08:16:59] [Rank 0] step:9361/10000 train_time:387900ms step_avg:41.44ms +[2025-09-11 08:16:59] [Rank 0] step:9361/10000 train_time:387900ms step_avg:41.44ms +[2025-09-11 08:17:00] [Rank 0] step:9381/10000 train_time:388605ms step_avg:41.42ms +[2025-09-11 08:17:00] [Rank 0] step:9381/10000 train_time:388605ms step_avg:41.42ms +[2025-09-11 08:17:00] [Rank 0] step:9401/10000 train_time:389314ms step_avg:41.41ms +[2025-09-11 08:17:00] [Rank 0] step:9401/10000 train_time:389314ms step_avg:41.41ms +[2025-09-11 08:17:01] [Rank 0] step:9421/10000 train_time:390024ms step_avg:41.40ms +[2025-09-11 08:17:01] [Rank 0] step:9421/10000 train_time:390024ms step_avg:41.40ms +[2025-09-11 08:17:02] [Rank 0] step:9441/10000 train_time:390736ms step_avg:41.39ms +[2025-09-11 08:17:02] [Rank 0] step:9441/10000 train_time:390736ms step_avg:41.39ms +[2025-09-11 08:17:03] [Rank 0] step:9461/10000 train_time:391445ms step_avg:41.37ms +[2025-09-11 08:17:03] [Rank 0] step:9461/10000 train_time:391445ms step_avg:41.37ms +[2025-09-11 08:17:03] [Rank 0] step:9481/10000 train_time:392156ms step_avg:41.36ms +[2025-09-11 08:17:03] [Rank 0] step:9481/10000 train_time:392156ms step_avg:41.36ms +[2025-09-11 08:17:04] [Rank 0] step:9501/10000 train_time:392866ms step_avg:41.35ms +[2025-09-11 08:17:04] [Rank 0] step:9501/10000 train_time:392866ms step_avg:41.35ms +[2025-09-11 08:17:05] [Rank 0] step:9521/10000 train_time:393576ms step_avg:41.34ms +[2025-09-11 08:17:05] [Rank 0] step:9521/10000 train_time:393576ms step_avg:41.34ms +[2025-09-11 08:17:05] [Rank 0] step:9541/10000 train_time:394282ms step_avg:41.33ms +[2025-09-11 08:17:05] [Rank 0] step:9541/10000 train_time:394282ms step_avg:41.33ms +[2025-09-11 08:17:06] [Rank 0] step:9561/10000 train_time:394990ms step_avg:41.31ms +[2025-09-11 08:17:06] [Rank 0] step:9561/10000 train_time:394990ms step_avg:41.31ms +[2025-09-11 08:17:07] [Rank 0] step:9581/10000 train_time:395700ms step_avg:41.30ms +[2025-09-11 08:17:07] [Rank 0] step:9581/10000 train_time:395700ms step_avg:41.30ms +[2025-09-11 08:17:07] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:17:07] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:17:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:17:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:17:18] [Rank 0] PRINT: step:9600/10000 val_loss:5.3466 total_sharp:1.3626e-03 L1_sharp:8.3573e-04 L2_sharp:1.0921e-04 L3_sharp:1.9485e-04 L4_sharp:1.3543e-04 L5_sharp:2.7551e-04 L6_sharp:1.6768e-04 L7_sharp:2.0326e-04 L8_sharp:2.8940e-04 L9_sharp:2.4431e-04 L10_sharp:2.9256e-04 L11_sharp:3.7026e-04 L12_sharp:1.4777e-03 total_fnorm:8.9062e-01 total_l1_linf:9.7600e+02 total_spectral:4.4727e-01 L1_fnorm:2.9492e-01 L2_fnorm:2.6758e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.5195e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3535e-01 L1_l1linf:3.4912e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.2227e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.7100e-02 L8_l1linf:2.7466e-02 L9_l1linf:2.7710e-02 L10_l1linf:2.8564e-02 L11_l1linf:2.8564e-02 L12_l1linf:2.6855e-02 L1_spectral:5.0690e-03 L2_spectral:4.0300e-03 L3_spectral:3.9464e-03 L4_spectral:3.9367e-03 L5_spectral:4.1125e-03 L6_spectral:3.9541e-03 L7_spectral:4.0021e-03 L8_spectral:4.0420e-03 L9_spectral:3.9689e-03 L10_spectral:3.9354e-03 L11_spectral:3.9432e-03 L12_spectral:3.9088e-03 train_time:396385ms step_avg:41.29ms +[2025-09-11 08:17:18] [Rank 0] PRINT: step:9600/10000 val_loss:5.3466 total_sharp:1.3626e-03 L1_sharp:8.3573e-04 L2_sharp:1.0921e-04 L3_sharp:1.9485e-04 L4_sharp:1.3543e-04 L5_sharp:2.7551e-04 L6_sharp:1.6768e-04 L7_sharp:2.0326e-04 L8_sharp:2.8940e-04 L9_sharp:2.4431e-04 L10_sharp:2.9256e-04 L11_sharp:3.7026e-04 L12_sharp:1.4777e-03 total_fnorm:8.9062e-01 total_l1_linf:9.7600e+02 total_spectral:4.4727e-01 L1_fnorm:2.9492e-01 L2_fnorm:2.6758e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.5195e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3535e-01 L1_l1linf:3.4912e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.2227e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.7100e-02 L8_l1linf:2.7466e-02 L9_l1linf:2.7710e-02 L10_l1linf:2.8564e-02 L11_l1linf:2.8564e-02 L12_l1linf:2.6855e-02 L1_spectral:5.0690e-03 L2_spectral:4.0300e-03 L3_spectral:3.9464e-03 L4_spectral:3.9367e-03 L5_spectral:4.1125e-03 L6_spectral:3.9541e-03 L7_spectral:4.0021e-03 L8_spectral:4.0420e-03 L9_spectral:3.9689e-03 L10_spectral:3.9354e-03 L11_spectral:3.9432e-03 L12_spectral:3.9088e-03 train_time:396385ms step_avg:41.29ms +[2025-09-11 08:17:19] [Rank 0] step:9601/10000 train_time:397693ms step_avg:41.42ms +[2025-09-11 08:17:19] [Rank 0] step:9601/10000 train_time:397693ms step_avg:41.42ms +[2025-09-11 08:17:20] [Rank 0] step:9621/10000 train_time:398428ms step_avg:41.41ms +[2025-09-11 08:17:20] [Rank 0] step:9621/10000 train_time:398428ms step_avg:41.41ms +[2025-09-11 08:17:20] [Rank 0] step:9641/10000 train_time:399143ms step_avg:41.40ms +[2025-09-11 08:17:20] [Rank 0] step:9641/10000 train_time:399143ms step_avg:41.40ms +[2025-09-11 08:17:21] [Rank 0] step:9661/10000 train_time:399864ms step_avg:41.39ms +[2025-09-11 08:17:21] [Rank 0] step:9661/10000 train_time:399864ms step_avg:41.39ms +[2025-09-11 08:17:22] [Rank 0] step:9681/10000 train_time:400576ms step_avg:41.38ms +[2025-09-11 08:17:22] [Rank 0] step:9681/10000 train_time:400576ms step_avg:41.38ms +[2025-09-11 08:17:23] [Rank 0] step:9701/10000 train_time:401292ms step_avg:41.37ms +[2025-09-11 08:17:23] [Rank 0] step:9701/10000 train_time:401292ms step_avg:41.37ms +[2025-09-11 08:17:23] [Rank 0] step:9721/10000 train_time:402014ms step_avg:41.36ms +[2025-09-11 08:17:23] [Rank 0] step:9721/10000 train_time:402014ms step_avg:41.36ms +[2025-09-11 08:17:24] [Rank 0] step:9741/10000 train_time:402729ms step_avg:41.34ms +[2025-09-11 08:17:24] [Rank 0] step:9741/10000 train_time:402729ms step_avg:41.34ms +[2025-09-11 08:17:25] [Rank 0] step:9761/10000 train_time:403444ms step_avg:41.33ms +[2025-09-11 08:17:25] [Rank 0] step:9761/10000 train_time:403444ms step_avg:41.33ms +[2025-09-11 08:17:25] [Rank 0] step:9781/10000 train_time:404157ms step_avg:41.32ms +[2025-09-11 08:17:25] [Rank 0] step:9781/10000 train_time:404157ms step_avg:41.32ms +[2025-09-11 08:17:26] [Rank 0] step:9801/10000 train_time:404877ms step_avg:41.31ms +[2025-09-11 08:17:26] [Rank 0] step:9801/10000 train_time:404877ms step_avg:41.31ms +[2025-09-11 08:17:27] [Rank 0] step:9821/10000 train_time:405594ms step_avg:41.30ms +[2025-09-11 08:17:27] [Rank 0] step:9821/10000 train_time:405594ms step_avg:41.30ms +[2025-09-11 08:17:28] [Rank 0] step:9841/10000 train_time:406315ms step_avg:41.29ms +[2025-09-11 08:17:28] [Rank 0] step:9841/10000 train_time:406315ms step_avg:41.29ms +[2025-09-11 08:17:28] [Rank 0] step:9861/10000 train_time:407032ms step_avg:41.28ms +[2025-09-11 08:17:28] [Rank 0] step:9861/10000 train_time:407032ms step_avg:41.28ms +[2025-09-11 08:17:29] [Rank 0] step:9881/10000 train_time:407749ms step_avg:41.27ms +[2025-09-11 08:17:29] [Rank 0] step:9881/10000 train_time:407749ms step_avg:41.27ms +[2025-09-11 08:17:30] [Rank 0] step:9901/10000 train_time:408462ms step_avg:41.25ms +[2025-09-11 08:17:30] [Rank 0] step:9901/10000 train_time:408462ms step_avg:41.25ms +[2025-09-11 08:17:30] [Rank 0] step:9921/10000 train_time:409176ms step_avg:41.24ms +[2025-09-11 08:17:30] [Rank 0] step:9921/10000 train_time:409176ms step_avg:41.24ms +[2025-09-11 08:17:31] [Rank 0] step:9941/10000 train_time:409895ms step_avg:41.23ms +[2025-09-11 08:17:31] [Rank 0] step:9941/10000 train_time:409895ms step_avg:41.23ms +[2025-09-11 08:17:32] [Rank 0] step:9961/10000 train_time:410614ms step_avg:41.22ms +[2025-09-11 08:17:32] [Rank 0] step:9961/10000 train_time:410614ms step_avg:41.22ms +[2025-09-11 08:17:33] [Rank 0] step:9981/10000 train_time:411330ms step_avg:41.21ms +[2025-09-11 08:17:33] [Rank 0] step:9981/10000 train_time:411330ms step_avg:41.21ms +[2025-09-11 08:17:33] [Rank 0] step:10000/10000 train_time:412017ms step_avg:41.20ms +[2025-09-11 08:17:33] [Rank 0] step:10000/10000 train_time:412017ms step_avg:41.20ms +[2025-09-11 08:17:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:17:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:17:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:17:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:17:45] [Rank 0] PRINT: step:10000/10000 val_loss:5.3438 total_sharp:8.1626e-04 L1_sharp:3.9362e-04 L2_sharp:8.3881e-05 L3_sharp:1.0645e-04 L4_sharp:1.0801e-04 L5_sharp:1.5253e-04 L6_sharp:9.6005e-05 L7_sharp:1.3117e-04 L8_sharp:1.7759e-04 L9_sharp:1.7796e-04 L10_sharp:2.0547e-04 L11_sharp:2.7939e-04 L12_sharp:1.2847e-03 total_fnorm:3.4766e-01 total_l1_linf:2.7600e+02 total_spectral:1.7285e-01 L1_fnorm:1.1572e-01 L2_fnorm:1.0254e-01 L3_fnorm:9.8633e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.6680e-02 L6_fnorm:9.7656e-02 L7_fnorm:9.7168e-02 L8_fnorm:9.5215e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.7168e-02 L11_fnorm:9.7656e-02 L12_fnorm:9.1309e-02 L1_l1linf:1.0864e-02 L2_l1linf:9.9487e-03 L3_l1linf:9.6436e-03 L4_l1linf:1.0071e-02 L5_l1linf:1.0193e-02 L6_l1linf:8.9111e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.1177e-03 L9_l1linf:8.2397e-03 L10_l1linf:8.2397e-03 L11_l1linf:8.6670e-03 L12_l1linf:8.7891e-03 L1_spectral:2.0618e-03 L2_spectral:1.5789e-03 L3_spectral:1.5294e-03 L4_spectral:1.5579e-03 L5_spectral:1.6780e-03 L6_spectral:1.5686e-03 L7_spectral:1.5704e-03 L8_spectral:1.5953e-03 L9_spectral:1.5746e-03 L10_spectral:1.5828e-03 L11_spectral:1.5819e-03 L12_spectral:1.5573e-03 train_time:412037ms step_avg:41.20ms +[2025-09-11 08:17:45] [Rank 0] PRINT: step:10000/10000 val_loss:5.3438 total_sharp:8.1626e-04 L1_sharp:3.9362e-04 L2_sharp:8.3881e-05 L3_sharp:1.0645e-04 L4_sharp:1.0801e-04 L5_sharp:1.5253e-04 L6_sharp:9.6005e-05 L7_sharp:1.3117e-04 L8_sharp:1.7759e-04 L9_sharp:1.7796e-04 L10_sharp:2.0547e-04 L11_sharp:2.7939e-04 L12_sharp:1.2847e-03 total_fnorm:3.4766e-01 total_l1_linf:2.7600e+02 total_spectral:1.7285e-01 L1_fnorm:1.1572e-01 L2_fnorm:1.0254e-01 L3_fnorm:9.8633e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.6680e-02 L6_fnorm:9.7656e-02 L7_fnorm:9.7168e-02 L8_fnorm:9.5215e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.7168e-02 L11_fnorm:9.7656e-02 L12_fnorm:9.1309e-02 L1_l1linf:1.0864e-02 L2_l1linf:9.9487e-03 L3_l1linf:9.6436e-03 L4_l1linf:1.0071e-02 L5_l1linf:1.0193e-02 L6_l1linf:8.9111e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.1177e-03 L9_l1linf:8.2397e-03 L10_l1linf:8.2397e-03 L11_l1linf:8.6670e-03 L12_l1linf:8.7891e-03 L1_spectral:2.0618e-03 L2_spectral:1.5789e-03 L3_spectral:1.5294e-03 L4_spectral:1.5579e-03 L5_spectral:1.6780e-03 L6_spectral:1.5686e-03 L7_spectral:1.5704e-03 L8_spectral:1.5953e-03 L9_spectral:1.5746e-03 L10_spectral:1.5828e-03 L11_spectral:1.5819e-03 L12_spectral:1.5573e-03 train_time:412037ms step_avg:41.20ms +[2025-09-11 08:17:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:17:45 2025 --- +[2025-09-11 08:17:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:17:45 2025 --- +[2025-09-11 08:17:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 08:17:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..440a89b6bf52694818fc5f67168f1e840e3d987e --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "8214b888-b7e0-41d4-b295-d97c75ddb016", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/training_log_8214b888-b7e0-41d4-b295-d97c75ddb016.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/training_log_8214b888-b7e0-41d4-b295-d97c75ddb016.txt new file mode 100644 index 0000000000000000000000000000000000000000..76329520e33c410ee3ec7b501f5bb78131811e79 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44/training_log_8214b888-b7e0-41d4-b295-d97c75ddb016.txt @@ -0,0 +1,4264 @@ +[2025-09-11 07:51:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:51:15 2025 --- +[2025-09-11 07:51:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:51:15 2025 --- +[2025-09-11 07:51:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:51:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:51:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:51:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:51:15] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:51:15] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:51:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44 +[2025-09-11 07:51:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_44 +[2025-09-11 07:51:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:51:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:51:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:51:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:51:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:51:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:51:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:51:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:51:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:51:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:51:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:51:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:51:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:51:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:51:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:51:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:51:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:51:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:51:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:51:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:51:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:51:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:51:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:51:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:52:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:52:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:52:01] [Rank 0] PRINT: Starting training... +[2025-09-11 07:52:01] [Rank 0] PRINT: Starting training... +[2025-09-11 07:52:03] [Rank 0] step:21/10000 train_time:1140ms step_avg:54.29ms +[2025-09-11 07:52:03] [Rank 0] step:21/10000 train_time:1140ms step_avg:54.29ms +[2025-09-11 07:52:03] [Rank 0] step:41/10000 train_time:1874ms step_avg:45.71ms +[2025-09-11 07:52:03] [Rank 0] step:41/10000 train_time:1874ms step_avg:45.71ms +[2025-09-11 07:52:04] [Rank 0] step:61/10000 train_time:2607ms step_avg:42.74ms +[2025-09-11 07:52:04] [Rank 0] step:61/10000 train_time:2607ms step_avg:42.74ms +[2025-09-11 07:52:05] [Rank 0] step:81/10000 train_time:3340ms step_avg:41.23ms +[2025-09-11 07:52:05] [Rank 0] step:81/10000 train_time:3340ms step_avg:41.23ms +[2025-09-11 07:52:06] [Rank 0] step:101/10000 train_time:4073ms step_avg:40.33ms +[2025-09-11 07:52:06] [Rank 0] step:101/10000 train_time:4073ms step_avg:40.33ms +[2025-09-11 07:52:06] [Rank 0] step:121/10000 train_time:4806ms step_avg:39.72ms +[2025-09-11 07:52:06] [Rank 0] step:121/10000 train_time:4806ms step_avg:39.72ms +[2025-09-11 07:52:07] [Rank 0] step:141/10000 train_time:5539ms step_avg:39.28ms +[2025-09-11 07:52:07] [Rank 0] step:141/10000 train_time:5539ms step_avg:39.28ms +[2025-09-11 07:52:08] [Rank 0] step:161/10000 train_time:6272ms step_avg:38.96ms +[2025-09-11 07:52:08] [Rank 0] step:161/10000 train_time:6272ms step_avg:38.96ms +[2025-09-11 07:52:08] [Rank 0] step:181/10000 train_time:7004ms step_avg:38.70ms +[2025-09-11 07:52:08] [Rank 0] step:181/10000 train_time:7004ms step_avg:38.70ms +[2025-09-11 07:52:09] [Rank 0] step:201/10000 train_time:7736ms step_avg:38.49ms +[2025-09-11 07:52:09] [Rank 0] step:201/10000 train_time:7736ms step_avg:38.49ms +[2025-09-11 07:52:10] [Rank 0] step:221/10000 train_time:8468ms step_avg:38.32ms +[2025-09-11 07:52:10] [Rank 0] step:221/10000 train_time:8468ms step_avg:38.32ms +[2025-09-11 07:52:11] [Rank 0] step:241/10000 train_time:9201ms step_avg:38.18ms +[2025-09-11 07:52:11] [Rank 0] step:241/10000 train_time:9201ms step_avg:38.18ms +[2025-09-11 07:52:11] [Rank 0] step:261/10000 train_time:9933ms step_avg:38.06ms +[2025-09-11 07:52:11] [Rank 0] step:261/10000 train_time:9933ms step_avg:38.06ms +[2025-09-11 07:52:12] [Rank 0] step:281/10000 train_time:10666ms step_avg:37.96ms +[2025-09-11 07:52:12] [Rank 0] step:281/10000 train_time:10666ms step_avg:37.96ms +[2025-09-11 07:52:13] [Rank 0] step:301/10000 train_time:11398ms step_avg:37.87ms +[2025-09-11 07:52:13] [Rank 0] step:301/10000 train_time:11398ms step_avg:37.87ms +[2025-09-11 07:52:14] [Rank 0] step:321/10000 train_time:12130ms step_avg:37.79ms +[2025-09-11 07:52:14] [Rank 0] step:321/10000 train_time:12130ms step_avg:37.79ms +[2025-09-11 07:52:14] [Rank 0] step:341/10000 train_time:12863ms step_avg:37.72ms +[2025-09-11 07:52:14] [Rank 0] step:341/10000 train_time:12863ms step_avg:37.72ms +[2025-09-11 07:52:15] [Rank 0] step:361/10000 train_time:13596ms step_avg:37.66ms +[2025-09-11 07:52:15] [Rank 0] step:361/10000 train_time:13596ms step_avg:37.66ms +[2025-09-11 07:52:16] [Rank 0] step:381/10000 train_time:14328ms step_avg:37.61ms +[2025-09-11 07:52:16] [Rank 0] step:381/10000 train_time:14328ms step_avg:37.61ms +[2025-09-11 07:52:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:52:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:52:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:52:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:52:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:52:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:52:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:53:04] [Rank 0] PRINT: step:400/10000 val_loss:7.2400 total_sharp:1.5432e-02 L1_sharp:8.8096e-03 L2_sharp:4.5597e-03 L3_sharp:2.8494e-03 L4_sharp:2.8211e-03 L5_sharp:2.3564e-03 L6_sharp:2.4721e-03 L7_sharp:1.4477e-03 L8_sharp:1.0455e-03 L9_sharp:2.8108e-04 L10_sharp:5.2144e-04 L11_sharp:1.2828e-03 L12_sharp:3.4011e-04 total_fnorm:1.4892e+01 total_l1_linf:5.4061e+04 total_spectral:7.4460e+00 L1_fnorm:6.2312e+00 L2_fnorm:5.7398e+00 L3_fnorm:5.2121e+00 L4_fnorm:4.6912e+00 L5_fnorm:3.9525e+00 L6_fnorm:3.3524e+00 L7_fnorm:3.0225e+00 L8_fnorm:2.8177e+00 L9_fnorm:2.7502e+00 L10_fnorm:2.7682e+00 L11_fnorm:2.5499e+00 L12_fnorm:2.5906e+00 L1_l1linf:2.0399e+00 L2_l1linf:1.8183e+00 L3_l1linf:1.5595e+00 L4_l1linf:1.3840e+00 L5_l1linf:1.1780e+00 L6_l1linf:1.1285e+00 L7_l1linf:1.2424e+00 L8_l1linf:1.1425e+00 L9_l1linf:1.2160e+00 L10_l1linf:1.2177e+00 L11_l1linf:1.1642e+00 L12_l1linf:1.1429e+00 L1_spectral:6.0410e-02 L2_spectral:6.0363e-02 L3_spectral:6.0361e-02 L4_spectral:6.0310e-02 L5_spectral:6.0125e-02 L6_spectral:6.0051e-02 L7_spectral:5.9906e-02 L8_spectral:5.9898e-02 L9_spectral:5.9934e-02 L10_spectral:5.9959e-02 L11_spectral:5.9930e-02 L12_spectral:5.9999e-02 train_time:15040ms step_avg:37.60ms +[2025-09-11 07:53:04] [Rank 0] PRINT: step:400/10000 val_loss:7.2400 total_sharp:1.5432e-02 L1_sharp:8.8096e-03 L2_sharp:4.5597e-03 L3_sharp:2.8494e-03 L4_sharp:2.8211e-03 L5_sharp:2.3564e-03 L6_sharp:2.4721e-03 L7_sharp:1.4477e-03 L8_sharp:1.0455e-03 L9_sharp:2.8108e-04 L10_sharp:5.2144e-04 L11_sharp:1.2828e-03 L12_sharp:3.4011e-04 total_fnorm:1.4892e+01 total_l1_linf:5.4061e+04 total_spectral:7.4460e+00 L1_fnorm:6.2312e+00 L2_fnorm:5.7398e+00 L3_fnorm:5.2121e+00 L4_fnorm:4.6912e+00 L5_fnorm:3.9525e+00 L6_fnorm:3.3524e+00 L7_fnorm:3.0225e+00 L8_fnorm:2.8177e+00 L9_fnorm:2.7502e+00 L10_fnorm:2.7682e+00 L11_fnorm:2.5499e+00 L12_fnorm:2.5906e+00 L1_l1linf:2.0399e+00 L2_l1linf:1.8183e+00 L3_l1linf:1.5595e+00 L4_l1linf:1.3840e+00 L5_l1linf:1.1780e+00 L6_l1linf:1.1285e+00 L7_l1linf:1.2424e+00 L8_l1linf:1.1425e+00 L9_l1linf:1.2160e+00 L10_l1linf:1.2177e+00 L11_l1linf:1.1642e+00 L12_l1linf:1.1429e+00 L1_spectral:6.0410e-02 L2_spectral:6.0363e-02 L3_spectral:6.0361e-02 L4_spectral:6.0310e-02 L5_spectral:6.0125e-02 L6_spectral:6.0051e-02 L7_spectral:5.9906e-02 L8_spectral:5.9898e-02 L9_spectral:5.9934e-02 L10_spectral:5.9959e-02 L11_spectral:5.9930e-02 L12_spectral:5.9999e-02 train_time:15040ms step_avg:37.60ms +[2025-09-11 07:53:34] [Rank 0] step:401/10000 train_time:45259ms step_avg:112.86ms +[2025-09-11 07:53:34] [Rank 0] step:401/10000 train_time:45259ms step_avg:112.86ms +[2025-09-11 07:53:37] [Rank 0] step:421/10000 train_time:47949ms step_avg:113.89ms +[2025-09-11 07:53:37] [Rank 0] step:421/10000 train_time:47949ms step_avg:113.89ms +[2025-09-11 07:53:38] [Rank 0] step:441/10000 train_time:48590ms step_avg:110.18ms +[2025-09-11 07:53:38] [Rank 0] step:441/10000 train_time:48590ms step_avg:110.18ms +[2025-09-11 07:53:38] [Rank 0] step:461/10000 train_time:49230ms step_avg:106.79ms +[2025-09-11 07:53:38] [Rank 0] step:461/10000 train_time:49230ms step_avg:106.79ms +[2025-09-11 07:53:39] [Rank 0] step:481/10000 train_time:49871ms step_avg:103.68ms +[2025-09-11 07:53:39] [Rank 0] step:481/10000 train_time:49871ms step_avg:103.68ms +[2025-09-11 07:53:40] [Rank 0] step:501/10000 train_time:50511ms step_avg:100.82ms +[2025-09-11 07:53:40] [Rank 0] step:501/10000 train_time:50511ms step_avg:100.82ms +[2025-09-11 07:53:40] [Rank 0] step:521/10000 train_time:51151ms step_avg:98.18ms +[2025-09-11 07:53:40] [Rank 0] step:521/10000 train_time:51151ms step_avg:98.18ms +[2025-09-11 07:53:41] [Rank 0] step:541/10000 train_time:51792ms step_avg:95.73ms +[2025-09-11 07:53:41] [Rank 0] step:541/10000 train_time:51792ms step_avg:95.73ms +[2025-09-11 07:53:41] [Rank 0] step:561/10000 train_time:52432ms step_avg:93.46ms +[2025-09-11 07:53:41] [Rank 0] step:561/10000 train_time:52432ms step_avg:93.46ms +[2025-09-11 07:53:42] [Rank 0] step:581/10000 train_time:53071ms step_avg:91.34ms +[2025-09-11 07:53:42] [Rank 0] step:581/10000 train_time:53071ms step_avg:91.34ms +[2025-09-11 07:53:43] [Rank 0] step:601/10000 train_time:53712ms step_avg:89.37ms +[2025-09-11 07:53:43] [Rank 0] step:601/10000 train_time:53712ms step_avg:89.37ms +[2025-09-11 07:53:43] [Rank 0] step:621/10000 train_time:54352ms step_avg:87.52ms +[2025-09-11 07:53:43] [Rank 0] step:621/10000 train_time:54352ms step_avg:87.52ms +[2025-09-11 07:53:44] [Rank 0] step:641/10000 train_time:54992ms step_avg:85.79ms +[2025-09-11 07:53:44] [Rank 0] step:641/10000 train_time:54992ms step_avg:85.79ms +[2025-09-11 07:53:45] [Rank 0] step:661/10000 train_time:55632ms step_avg:84.16ms +[2025-09-11 07:53:45] [Rank 0] step:661/10000 train_time:55632ms step_avg:84.16ms +[2025-09-11 07:53:45] [Rank 0] step:681/10000 train_time:56272ms step_avg:82.63ms +[2025-09-11 07:53:45] [Rank 0] step:681/10000 train_time:56272ms step_avg:82.63ms +[2025-09-11 07:53:46] [Rank 0] step:701/10000 train_time:56912ms step_avg:81.19ms +[2025-09-11 07:53:46] [Rank 0] step:701/10000 train_time:56912ms step_avg:81.19ms +[2025-09-11 07:53:47] [Rank 0] step:721/10000 train_time:57551ms step_avg:79.82ms +[2025-09-11 07:53:47] [Rank 0] step:721/10000 train_time:57551ms step_avg:79.82ms +[2025-09-11 07:53:47] [Rank 0] step:741/10000 train_time:58192ms step_avg:78.53ms +[2025-09-11 07:53:47] [Rank 0] step:741/10000 train_time:58192ms step_avg:78.53ms +[2025-09-11 07:53:48] [Rank 0] step:761/10000 train_time:58837ms step_avg:77.32ms +[2025-09-11 07:53:48] [Rank 0] step:761/10000 train_time:58837ms step_avg:77.32ms +[2025-09-11 07:53:49] [Rank 0] step:781/10000 train_time:59482ms step_avg:76.16ms +[2025-09-11 07:53:49] [Rank 0] step:781/10000 train_time:59482ms step_avg:76.16ms +[2025-09-11 07:53:49] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:53:49] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:54:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:54:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:54:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:54:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:54:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:54:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:54:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:54:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:54:35] [Rank 0] PRINT: step:800/10000 val_loss:6.7030 total_sharp:7.1200e-03 L1_sharp:2.6296e-03 L2_sharp:1.8464e-03 L3_sharp:1.5385e-03 L4_sharp:8.9242e-04 L5_sharp:8.2856e-04 L6_sharp:5.8552e-04 L7_sharp:4.5577e-04 L8_sharp:4.8174e-04 L9_sharp:2.2989e-04 L10_sharp:2.8760e-04 L11_sharp:5.4714e-04 L12_sharp:1.0248e-03 total_fnorm:1.6375e+01 total_l1_linf:4.7872e+04 total_spectral:8.1875e+00 L1_fnorm:6.3125e+00 L2_fnorm:5.9062e+00 L3_fnorm:5.7188e+00 L4_fnorm:5.4062e+00 L5_fnorm:5.0000e+00 L6_fnorm:4.5000e+00 L7_fnorm:4.1875e+00 L8_fnorm:3.8594e+00 L9_fnorm:3.6250e+00 L10_fnorm:3.4688e+00 L11_fnorm:3.1250e+00 L12_fnorm:3.1094e+00 L1_l1linf:1.9297e+00 L2_l1linf:1.7812e+00 L3_l1linf:1.6719e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.4375e+00 L6_l1linf:1.2422e+00 L7_l1linf:1.1719e+00 L8_l1linf:1.1484e+00 L9_l1linf:1.1172e+00 L10_l1linf:1.1328e+00 L11_l1linf:1.1250e+00 L12_l1linf:1.1250e+00 L1_spectral:6.7097e-02 L2_spectral:6.5911e-02 L3_spectral:6.5730e-02 L4_spectral:6.5347e-02 L5_spectral:6.5270e-02 L6_spectral:6.5144e-02 L7_spectral:6.4984e-02 L8_spectral:6.4359e-02 L9_spectral:6.4405e-02 L10_spectral:6.3726e-02 L11_spectral:6.3122e-02 L12_spectral:6.3353e-02 train_time:60109ms step_avg:75.14ms +[2025-09-11 07:54:35] [Rank 0] PRINT: step:800/10000 val_loss:6.7030 total_sharp:7.1200e-03 L1_sharp:2.6296e-03 L2_sharp:1.8464e-03 L3_sharp:1.5385e-03 L4_sharp:8.9242e-04 L5_sharp:8.2856e-04 L6_sharp:5.8552e-04 L7_sharp:4.5577e-04 L8_sharp:4.8174e-04 L9_sharp:2.2989e-04 L10_sharp:2.8760e-04 L11_sharp:5.4714e-04 L12_sharp:1.0248e-03 total_fnorm:1.6375e+01 total_l1_linf:4.7872e+04 total_spectral:8.1875e+00 L1_fnorm:6.3125e+00 L2_fnorm:5.9062e+00 L3_fnorm:5.7188e+00 L4_fnorm:5.4062e+00 L5_fnorm:5.0000e+00 L6_fnorm:4.5000e+00 L7_fnorm:4.1875e+00 L8_fnorm:3.8594e+00 L9_fnorm:3.6250e+00 L10_fnorm:3.4688e+00 L11_fnorm:3.1250e+00 L12_fnorm:3.1094e+00 L1_l1linf:1.9297e+00 L2_l1linf:1.7812e+00 L3_l1linf:1.6719e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.4375e+00 L6_l1linf:1.2422e+00 L7_l1linf:1.1719e+00 L8_l1linf:1.1484e+00 L9_l1linf:1.1172e+00 L10_l1linf:1.1328e+00 L11_l1linf:1.1250e+00 L12_l1linf:1.1250e+00 L1_spectral:6.7097e-02 L2_spectral:6.5911e-02 L3_spectral:6.5730e-02 L4_spectral:6.5347e-02 L5_spectral:6.5270e-02 L6_spectral:6.5144e-02 L7_spectral:6.4984e-02 L8_spectral:6.4359e-02 L9_spectral:6.4405e-02 L10_spectral:6.3726e-02 L11_spectral:6.3122e-02 L12_spectral:6.3353e-02 train_time:60109ms step_avg:75.14ms +[2025-09-11 07:54:37] [Rank 0] step:801/10000 train_time:62085ms step_avg:77.51ms +[2025-09-11 07:54:37] [Rank 0] step:801/10000 train_time:62085ms step_avg:77.51ms +[2025-09-11 07:54:38] [Rank 0] step:821/10000 train_time:62801ms step_avg:76.49ms +[2025-09-11 07:54:38] [Rank 0] step:821/10000 train_time:62801ms step_avg:76.49ms +[2025-09-11 07:54:38] [Rank 0] step:841/10000 train_time:63446ms step_avg:75.44ms +[2025-09-11 07:54:38] [Rank 0] step:841/10000 train_time:63446ms step_avg:75.44ms +[2025-09-11 07:54:39] [Rank 0] step:861/10000 train_time:64092ms step_avg:74.44ms +[2025-09-11 07:54:39] [Rank 0] step:861/10000 train_time:64092ms step_avg:74.44ms +[2025-09-11 07:54:39] [Rank 0] step:881/10000 train_time:64737ms step_avg:73.48ms +[2025-09-11 07:54:39] [Rank 0] step:881/10000 train_time:64737ms step_avg:73.48ms +[2025-09-11 07:54:40] [Rank 0] step:901/10000 train_time:65381ms step_avg:72.57ms +[2025-09-11 07:54:40] [Rank 0] step:901/10000 train_time:65381ms step_avg:72.57ms +[2025-09-11 07:54:41] [Rank 0] step:921/10000 train_time:66026ms step_avg:71.69ms +[2025-09-11 07:54:41] [Rank 0] step:921/10000 train_time:66026ms step_avg:71.69ms +[2025-09-11 07:54:41] [Rank 0] step:941/10000 train_time:66671ms step_avg:70.85ms +[2025-09-11 07:54:41] [Rank 0] step:941/10000 train_time:66671ms step_avg:70.85ms +[2025-09-11 07:54:42] [Rank 0] step:961/10000 train_time:67316ms step_avg:70.05ms +[2025-09-11 07:54:42] [Rank 0] step:961/10000 train_time:67316ms step_avg:70.05ms +[2025-09-11 07:54:43] [Rank 0] step:981/10000 train_time:67960ms step_avg:69.28ms +[2025-09-11 07:54:43] [Rank 0] step:981/10000 train_time:67960ms step_avg:69.28ms +[2025-09-11 07:54:43] [Rank 0] step:1001/10000 train_time:68605ms step_avg:68.54ms +[2025-09-11 07:54:43] [Rank 0] step:1001/10000 train_time:68605ms step_avg:68.54ms +[2025-09-11 07:54:44] [Rank 0] step:1021/10000 train_time:69249ms step_avg:67.83ms +[2025-09-11 07:54:44] [Rank 0] step:1021/10000 train_time:69249ms step_avg:67.83ms +[2025-09-11 07:54:45] [Rank 0] step:1041/10000 train_time:69894ms step_avg:67.14ms +[2025-09-11 07:54:45] [Rank 0] step:1041/10000 train_time:69894ms step_avg:67.14ms +[2025-09-11 07:54:45] [Rank 0] step:1061/10000 train_time:70538ms step_avg:66.48ms +[2025-09-11 07:54:45] [Rank 0] step:1061/10000 train_time:70538ms step_avg:66.48ms +[2025-09-11 07:54:46] [Rank 0] step:1081/10000 train_time:71182ms step_avg:65.85ms +[2025-09-11 07:54:46] [Rank 0] step:1081/10000 train_time:71182ms step_avg:65.85ms +[2025-09-11 07:54:47] [Rank 0] step:1101/10000 train_time:71826ms step_avg:65.24ms +[2025-09-11 07:54:47] [Rank 0] step:1101/10000 train_time:71826ms step_avg:65.24ms +[2025-09-11 07:54:47] [Rank 0] step:1121/10000 train_time:72470ms step_avg:64.65ms +[2025-09-11 07:54:47] [Rank 0] step:1121/10000 train_time:72470ms step_avg:64.65ms +[2025-09-11 07:54:48] [Rank 0] step:1141/10000 train_time:73114ms step_avg:64.08ms +[2025-09-11 07:54:48] [Rank 0] step:1141/10000 train_time:73114ms step_avg:64.08ms +[2025-09-11 07:54:49] [Rank 0] step:1161/10000 train_time:73758ms step_avg:63.53ms +[2025-09-11 07:54:49] [Rank 0] step:1161/10000 train_time:73758ms step_avg:63.53ms +[2025-09-11 07:54:49] [Rank 0] step:1181/10000 train_time:74402ms step_avg:63.00ms +[2025-09-11 07:54:49] [Rank 0] step:1181/10000 train_time:74402ms step_avg:63.00ms +[2025-09-11 07:54:50] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:54:50] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:54:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:54:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:54:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:55:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:00] [Rank 0] PRINT: step:1200/10000 val_loss:6.4430 total_sharp:2.5737e-03 L1_sharp:7.8219e-04 L2_sharp:7.1677e-04 L3_sharp:5.4463e-04 L4_sharp:3.0891e-04 L5_sharp:4.3909e-04 L6_sharp:2.4337e-04 L7_sharp:2.2926e-04 L8_sharp:3.5072e-04 L9_sharp:1.5565e-04 L10_sharp:1.6718e-04 L11_sharp:2.5358e-04 L12_sharp:5.2307e-04 total_fnorm:1.9000e+01 total_l1_linf:5.5552e+04 total_spectral:9.4375e+00 L1_fnorm:6.3750e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0625e+00 L4_fnorm:5.9688e+00 L5_fnorm:5.7812e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.3750e+00 L8_fnorm:5.0000e+00 L9_fnorm:4.8750e+00 L10_fnorm:4.6562e+00 L11_fnorm:4.3438e+00 L12_fnorm:4.1562e+00 L1_l1linf:1.8594e+00 L2_l1linf:1.8281e+00 L3_l1linf:1.8125e+00 L4_l1linf:1.7812e+00 L5_l1linf:1.7422e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4531e+00 L9_l1linf:1.4609e+00 L10_l1linf:1.3594e+00 L11_l1linf:1.2422e+00 L12_l1linf:1.1328e+00 L1_spectral:6.9886e-02 L2_spectral:6.8081e-02 L3_spectral:6.7987e-02 L4_spectral:6.7970e-02 L5_spectral:6.7544e-02 L6_spectral:6.7770e-02 L7_spectral:6.7999e-02 L8_spectral:6.7936e-02 L9_spectral:6.8166e-02 L10_spectral:6.7619e-02 L11_spectral:6.7275e-02 L12_spectral:6.6731e-02 train_time:75029ms step_avg:62.52ms +[2025-09-11 07:55:00] [Rank 0] PRINT: step:1200/10000 val_loss:6.4430 total_sharp:2.5737e-03 L1_sharp:7.8219e-04 L2_sharp:7.1677e-04 L3_sharp:5.4463e-04 L4_sharp:3.0891e-04 L5_sharp:4.3909e-04 L6_sharp:2.4337e-04 L7_sharp:2.2926e-04 L8_sharp:3.5072e-04 L9_sharp:1.5565e-04 L10_sharp:1.6718e-04 L11_sharp:2.5358e-04 L12_sharp:5.2307e-04 total_fnorm:1.9000e+01 total_l1_linf:5.5552e+04 total_spectral:9.4375e+00 L1_fnorm:6.3750e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0625e+00 L4_fnorm:5.9688e+00 L5_fnorm:5.7812e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.3750e+00 L8_fnorm:5.0000e+00 L9_fnorm:4.8750e+00 L10_fnorm:4.6562e+00 L11_fnorm:4.3438e+00 L12_fnorm:4.1562e+00 L1_l1linf:1.8594e+00 L2_l1linf:1.8281e+00 L3_l1linf:1.8125e+00 L4_l1linf:1.7812e+00 L5_l1linf:1.7422e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4531e+00 L9_l1linf:1.4609e+00 L10_l1linf:1.3594e+00 L11_l1linf:1.2422e+00 L12_l1linf:1.1328e+00 L1_spectral:6.9886e-02 L2_spectral:6.8081e-02 L3_spectral:6.7987e-02 L4_spectral:6.7970e-02 L5_spectral:6.7544e-02 L6_spectral:6.7770e-02 L7_spectral:6.7999e-02 L8_spectral:6.7936e-02 L9_spectral:6.8166e-02 L10_spectral:6.7619e-02 L11_spectral:6.7275e-02 L12_spectral:6.6731e-02 train_time:75029ms step_avg:62.52ms +[2025-09-11 07:55:02] [Rank 0] step:1201/10000 train_time:76832ms step_avg:63.97ms +[2025-09-11 07:55:02] [Rank 0] step:1201/10000 train_time:76832ms step_avg:63.97ms +[2025-09-11 07:55:03] [Rank 0] step:1221/10000 train_time:77491ms step_avg:63.47ms +[2025-09-11 07:55:03] [Rank 0] step:1221/10000 train_time:77491ms step_avg:63.47ms +[2025-09-11 07:55:03] [Rank 0] step:1241/10000 train_time:78137ms step_avg:62.96ms +[2025-09-11 07:55:03] [Rank 0] step:1241/10000 train_time:78137ms step_avg:62.96ms +[2025-09-11 07:55:04] [Rank 0] step:1261/10000 train_time:78783ms step_avg:62.48ms +[2025-09-11 07:55:04] [Rank 0] step:1261/10000 train_time:78783ms step_avg:62.48ms +[2025-09-11 07:55:04] [Rank 0] step:1281/10000 train_time:79428ms step_avg:62.01ms +[2025-09-11 07:55:04] [Rank 0] step:1281/10000 train_time:79428ms step_avg:62.01ms +[2025-09-11 07:55:05] [Rank 0] step:1301/10000 train_time:80073ms step_avg:61.55ms +[2025-09-11 07:55:05] [Rank 0] step:1301/10000 train_time:80073ms step_avg:61.55ms +[2025-09-11 07:55:06] [Rank 0] step:1321/10000 train_time:80717ms step_avg:61.10ms +[2025-09-11 07:55:06] [Rank 0] step:1321/10000 train_time:80717ms step_avg:61.10ms +[2025-09-11 07:55:06] [Rank 0] step:1341/10000 train_time:81362ms step_avg:60.67ms +[2025-09-11 07:55:06] [Rank 0] step:1341/10000 train_time:81362ms step_avg:60.67ms +[2025-09-11 07:55:07] [Rank 0] step:1361/10000 train_time:82006ms step_avg:60.25ms +[2025-09-11 07:55:07] [Rank 0] step:1361/10000 train_time:82006ms step_avg:60.25ms +[2025-09-11 07:55:08] [Rank 0] step:1381/10000 train_time:82650ms step_avg:59.85ms +[2025-09-11 07:55:08] [Rank 0] step:1381/10000 train_time:82650ms step_avg:59.85ms +[2025-09-11 07:55:08] [Rank 0] step:1401/10000 train_time:83294ms step_avg:59.45ms +[2025-09-11 07:55:08] [Rank 0] step:1401/10000 train_time:83294ms step_avg:59.45ms +[2025-09-11 07:55:09] [Rank 0] step:1421/10000 train_time:83938ms step_avg:59.07ms +[2025-09-11 07:55:09] [Rank 0] step:1421/10000 train_time:83938ms step_avg:59.07ms +[2025-09-11 07:55:10] [Rank 0] step:1441/10000 train_time:84583ms step_avg:58.70ms +[2025-09-11 07:55:10] [Rank 0] step:1441/10000 train_time:84583ms step_avg:58.70ms +[2025-09-11 07:55:10] [Rank 0] step:1461/10000 train_time:85226ms step_avg:58.33ms +[2025-09-11 07:55:10] [Rank 0] step:1461/10000 train_time:85226ms step_avg:58.33ms +[2025-09-11 07:55:11] [Rank 0] step:1481/10000 train_time:85870ms step_avg:57.98ms +[2025-09-11 07:55:11] [Rank 0] step:1481/10000 train_time:85870ms step_avg:57.98ms +[2025-09-11 07:55:12] [Rank 0] step:1501/10000 train_time:86518ms step_avg:57.64ms +[2025-09-11 07:55:12] [Rank 0] step:1501/10000 train_time:86518ms step_avg:57.64ms +[2025-09-11 07:55:12] [Rank 0] step:1521/10000 train_time:87167ms step_avg:57.31ms +[2025-09-11 07:55:12] [Rank 0] step:1521/10000 train_time:87167ms step_avg:57.31ms +[2025-09-11 07:55:13] [Rank 0] step:1541/10000 train_time:87816ms step_avg:56.99ms +[2025-09-11 07:55:13] [Rank 0] step:1541/10000 train_time:87816ms step_avg:56.99ms +[2025-09-11 07:55:13] [Rank 0] step:1561/10000 train_time:88463ms step_avg:56.67ms +[2025-09-11 07:55:13] [Rank 0] step:1561/10000 train_time:88463ms step_avg:56.67ms +[2025-09-11 07:55:14] [Rank 0] step:1581/10000 train_time:89111ms step_avg:56.36ms +[2025-09-11 07:55:14] [Rank 0] step:1581/10000 train_time:89111ms step_avg:56.36ms +[2025-09-11 07:55:15] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:55:15] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:55:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:55:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:25] [Rank 0] PRINT: step:1600/10000 val_loss:6.2781 total_sharp:2.5846e-03 L1_sharp:1.5099e-03 L2_sharp:6.4475e-04 L3_sharp:4.2074e-04 L4_sharp:3.0326e-04 L5_sharp:4.2902e-04 L6_sharp:2.1063e-04 L7_sharp:1.8047e-04 L8_sharp:3.7336e-04 L9_sharp:1.7584e-04 L10_sharp:2.0698e-04 L11_sharp:3.0027e-04 L12_sharp:6.5651e-04 total_fnorm:1.9750e+01 total_l1_linf:5.6576e+04 total_spectral:9.8750e+00 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.0938e+00 L5_fnorm:5.9062e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.7812e+00 L8_fnorm:5.4062e+00 L9_fnorm:5.3750e+00 L10_fnorm:5.1250e+00 L11_fnorm:4.8125e+00 L12_fnorm:4.4375e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7578e+00 L3_l1linf:1.7656e+00 L4_l1linf:1.7266e+00 L5_l1linf:1.7344e+00 L6_l1linf:1.6719e+00 L7_l1linf:1.6484e+00 L8_l1linf:1.5469e+00 L9_l1linf:1.5312e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.2734e+00 L12_l1linf:1.1172e+00 L1_spectral:7.2346e-02 L2_spectral:7.0670e-02 L3_spectral:7.0390e-02 L4_spectral:7.0116e-02 L5_spectral:6.9282e-02 L6_spectral:6.9252e-02 L7_spectral:6.9580e-02 L8_spectral:6.9914e-02 L9_spectral:7.0249e-02 L10_spectral:6.9858e-02 L11_spectral:6.9726e-02 L12_spectral:6.8623e-02 train_time:89742ms step_avg:56.09ms +[2025-09-11 07:55:25] [Rank 0] PRINT: step:1600/10000 val_loss:6.2781 total_sharp:2.5846e-03 L1_sharp:1.5099e-03 L2_sharp:6.4475e-04 L3_sharp:4.2074e-04 L4_sharp:3.0326e-04 L5_sharp:4.2902e-04 L6_sharp:2.1063e-04 L7_sharp:1.8047e-04 L8_sharp:3.7336e-04 L9_sharp:1.7584e-04 L10_sharp:2.0698e-04 L11_sharp:3.0027e-04 L12_sharp:6.5651e-04 total_fnorm:1.9750e+01 total_l1_linf:5.6576e+04 total_spectral:9.8750e+00 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.0938e+00 L5_fnorm:5.9062e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.7812e+00 L8_fnorm:5.4062e+00 L9_fnorm:5.3750e+00 L10_fnorm:5.1250e+00 L11_fnorm:4.8125e+00 L12_fnorm:4.4375e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7578e+00 L3_l1linf:1.7656e+00 L4_l1linf:1.7266e+00 L5_l1linf:1.7344e+00 L6_l1linf:1.6719e+00 L7_l1linf:1.6484e+00 L8_l1linf:1.5469e+00 L9_l1linf:1.5312e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.2734e+00 L12_l1linf:1.1172e+00 L1_spectral:7.2346e-02 L2_spectral:7.0670e-02 L3_spectral:7.0390e-02 L4_spectral:7.0116e-02 L5_spectral:6.9282e-02 L6_spectral:6.9252e-02 L7_spectral:6.9580e-02 L8_spectral:6.9914e-02 L9_spectral:7.0249e-02 L10_spectral:6.9858e-02 L11_spectral:6.9726e-02 L12_spectral:6.8623e-02 train_time:89742ms step_avg:56.09ms +[2025-09-11 07:55:26] [Rank 0] step:1601/10000 train_time:90925ms step_avg:56.79ms +[2025-09-11 07:55:26] [Rank 0] step:1601/10000 train_time:90925ms step_avg:56.79ms +[2025-09-11 07:55:27] [Rank 0] step:1621/10000 train_time:91565ms step_avg:56.49ms +[2025-09-11 07:55:27] [Rank 0] step:1621/10000 train_time:91565ms step_avg:56.49ms +[2025-09-11 07:55:27] [Rank 0] step:1641/10000 train_time:92216ms step_avg:56.19ms +[2025-09-11 07:55:27] [Rank 0] step:1641/10000 train_time:92216ms step_avg:56.19ms +[2025-09-11 07:55:29] [Rank 0] step:1661/10000 train_time:93447ms step_avg:56.26ms +[2025-09-11 07:55:29] [Rank 0] step:1661/10000 train_time:93447ms step_avg:56.26ms +[2025-09-11 07:55:29] [Rank 0] step:1681/10000 train_time:94098ms step_avg:55.98ms +[2025-09-11 07:55:29] [Rank 0] step:1681/10000 train_time:94098ms step_avg:55.98ms +[2025-09-11 07:55:30] [Rank 0] step:1701/10000 train_time:94748ms step_avg:55.70ms +[2025-09-11 07:55:30] [Rank 0] step:1701/10000 train_time:94748ms step_avg:55.70ms +[2025-09-11 07:55:31] [Rank 0] step:1721/10000 train_time:95651ms step_avg:55.58ms +[2025-09-11 07:55:31] [Rank 0] step:1721/10000 train_time:95651ms step_avg:55.58ms +[2025-09-11 07:55:31] [Rank 0] step:1741/10000 train_time:96301ms step_avg:55.31ms +[2025-09-11 07:55:31] [Rank 0] step:1741/10000 train_time:96301ms step_avg:55.31ms +[2025-09-11 07:55:32] [Rank 0] step:1761/10000 train_time:96951ms step_avg:55.05ms +[2025-09-11 07:55:32] [Rank 0] step:1761/10000 train_time:96951ms step_avg:55.05ms +[2025-09-11 07:55:33] [Rank 0] step:1781/10000 train_time:97602ms step_avg:54.80ms +[2025-09-11 07:55:33] [Rank 0] step:1781/10000 train_time:97602ms step_avg:54.80ms +[2025-09-11 07:55:33] [Rank 0] step:1801/10000 train_time:98251ms step_avg:54.55ms +[2025-09-11 07:55:33] [Rank 0] step:1801/10000 train_time:98251ms step_avg:54.55ms +[2025-09-11 07:55:34] [Rank 0] step:1821/10000 train_time:98901ms step_avg:54.31ms +[2025-09-11 07:55:34] [Rank 0] step:1821/10000 train_time:98901ms step_avg:54.31ms +[2025-09-11 07:55:35] [Rank 0] step:1841/10000 train_time:99550ms step_avg:54.07ms +[2025-09-11 07:55:35] [Rank 0] step:1841/10000 train_time:99550ms step_avg:54.07ms +[2025-09-11 07:55:35] [Rank 0] step:1861/10000 train_time:100200ms step_avg:53.84ms +[2025-09-11 07:55:35] [Rank 0] step:1861/10000 train_time:100200ms step_avg:53.84ms +[2025-09-11 07:55:36] [Rank 0] step:1881/10000 train_time:100849ms step_avg:53.61ms +[2025-09-11 07:55:36] [Rank 0] step:1881/10000 train_time:100849ms step_avg:53.61ms +[2025-09-11 07:55:37] [Rank 0] step:1901/10000 train_time:101498ms step_avg:53.39ms +[2025-09-11 07:55:37] [Rank 0] step:1901/10000 train_time:101498ms step_avg:53.39ms +[2025-09-11 07:55:37] [Rank 0] step:1921/10000 train_time:102147ms step_avg:53.17ms +[2025-09-11 07:55:37] [Rank 0] step:1921/10000 train_time:102147ms step_avg:53.17ms +[2025-09-11 07:55:38] [Rank 0] step:1941/10000 train_time:102796ms step_avg:52.96ms +[2025-09-11 07:55:38] [Rank 0] step:1941/10000 train_time:102796ms step_avg:52.96ms +[2025-09-11 07:55:39] [Rank 0] step:1961/10000 train_time:103446ms step_avg:52.75ms +[2025-09-11 07:55:39] [Rank 0] step:1961/10000 train_time:103446ms step_avg:52.75ms +[2025-09-11 07:55:39] [Rank 0] step:1981/10000 train_time:104094ms step_avg:52.55ms +[2025-09-11 07:55:39] [Rank 0] step:1981/10000 train_time:104094ms step_avg:52.55ms +[2025-09-11 07:55:40] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:55:40] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:55:50] [Rank 0] PRINT: step:2000/10000 val_loss:6.1270 total_sharp:1.7920e-03 L1_sharp:5.0776e-04 L2_sharp:3.8205e-04 L3_sharp:2.4249e-04 L4_sharp:1.6938e-04 L5_sharp:3.3425e-04 L6_sharp:1.4805e-04 L7_sharp:1.4646e-04 L8_sharp:3.4496e-04 L9_sharp:1.5451e-04 L10_sharp:1.7203e-04 L11_sharp:3.0257e-04 L12_sharp:9.8372e-04 total_fnorm:2.0375e+01 total_l1_linf:5.7088e+04 total_spectral:1.0125e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:5.9062e+00 L6_fnorm:6.0625e+00 L7_fnorm:5.9688e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.6875e+00 L10_fnorm:5.4688e+00 L11_fnorm:5.1250e+00 L12_fnorm:4.6250e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.7109e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6562e+00 L6_l1linf:1.6484e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5938e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.3047e+00 L12_l1linf:1.0859e+00 L1_spectral:7.4042e-02 L2_spectral:7.2264e-02 L3_spectral:7.2821e-02 L4_spectral:7.1948e-02 L5_spectral:7.0957e-02 L6_spectral:7.1042e-02 L7_spectral:7.0537e-02 L8_spectral:7.1045e-02 L9_spectral:7.1456e-02 L10_spectral:7.1560e-02 L11_spectral:7.1721e-02 L12_spectral:7.0080e-02 train_time:104726ms step_avg:52.36ms +[2025-09-11 07:55:50] [Rank 0] PRINT: step:2000/10000 val_loss:6.1270 total_sharp:1.7920e-03 L1_sharp:5.0776e-04 L2_sharp:3.8205e-04 L3_sharp:2.4249e-04 L4_sharp:1.6938e-04 L5_sharp:3.3425e-04 L6_sharp:1.4805e-04 L7_sharp:1.4646e-04 L8_sharp:3.4496e-04 L9_sharp:1.5451e-04 L10_sharp:1.7203e-04 L11_sharp:3.0257e-04 L12_sharp:9.8372e-04 total_fnorm:2.0375e+01 total_l1_linf:5.7088e+04 total_spectral:1.0125e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:5.9062e+00 L6_fnorm:6.0625e+00 L7_fnorm:5.9688e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.6875e+00 L10_fnorm:5.4688e+00 L11_fnorm:5.1250e+00 L12_fnorm:4.6250e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.7109e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6562e+00 L6_l1linf:1.6484e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5938e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.3047e+00 L12_l1linf:1.0859e+00 L1_spectral:7.4042e-02 L2_spectral:7.2264e-02 L3_spectral:7.2821e-02 L4_spectral:7.1948e-02 L5_spectral:7.0957e-02 L6_spectral:7.1042e-02 L7_spectral:7.0537e-02 L8_spectral:7.1045e-02 L9_spectral:7.1456e-02 L10_spectral:7.1560e-02 L11_spectral:7.1721e-02 L12_spectral:7.0080e-02 train_time:104726ms step_avg:52.36ms +[2025-09-11 07:55:51] [Rank 0] step:2001/10000 train_time:105894ms step_avg:52.92ms +[2025-09-11 07:55:51] [Rank 0] step:2001/10000 train_time:105894ms step_avg:52.92ms +[2025-09-11 07:55:52] [Rank 0] step:2021/10000 train_time:106532ms step_avg:52.71ms +[2025-09-11 07:55:52] [Rank 0] step:2021/10000 train_time:106532ms step_avg:52.71ms +[2025-09-11 07:55:52] [Rank 0] step:2041/10000 train_time:107182ms step_avg:52.51ms +[2025-09-11 07:55:52] [Rank 0] step:2041/10000 train_time:107182ms step_avg:52.51ms +[2025-09-11 07:55:53] [Rank 0] step:2061/10000 train_time:107832ms step_avg:52.32ms +[2025-09-11 07:55:53] [Rank 0] step:2061/10000 train_time:107832ms step_avg:52.32ms +[2025-09-11 07:55:54] [Rank 0] step:2081/10000 train_time:108482ms step_avg:52.13ms +[2025-09-11 07:55:54] [Rank 0] step:2081/10000 train_time:108482ms step_avg:52.13ms +[2025-09-11 07:55:54] [Rank 0] step:2101/10000 train_time:109131ms step_avg:51.94ms +[2025-09-11 07:55:54] [Rank 0] step:2101/10000 train_time:109131ms step_avg:51.94ms +[2025-09-11 07:55:55] [Rank 0] step:2121/10000 train_time:109781ms step_avg:51.76ms +[2025-09-11 07:55:55] [Rank 0] step:2121/10000 train_time:109781ms step_avg:51.76ms +[2025-09-11 07:55:56] [Rank 0] step:2141/10000 train_time:110430ms step_avg:51.58ms +[2025-09-11 07:55:56] [Rank 0] step:2141/10000 train_time:110430ms step_avg:51.58ms +[2025-09-11 07:55:56] [Rank 0] step:2161/10000 train_time:111079ms step_avg:51.40ms +[2025-09-11 07:55:56] [Rank 0] step:2161/10000 train_time:111079ms step_avg:51.40ms +[2025-09-11 07:55:57] [Rank 0] step:2181/10000 train_time:111728ms step_avg:51.23ms +[2025-09-11 07:55:57] [Rank 0] step:2181/10000 train_time:111728ms step_avg:51.23ms +[2025-09-11 07:55:57] [Rank 0] step:2201/10000 train_time:112377ms step_avg:51.06ms +[2025-09-11 07:55:57] [Rank 0] step:2201/10000 train_time:112377ms step_avg:51.06ms +[2025-09-11 07:55:58] [Rank 0] step:2221/10000 train_time:113025ms step_avg:50.89ms +[2025-09-11 07:55:58] [Rank 0] step:2221/10000 train_time:113025ms step_avg:50.89ms +[2025-09-11 07:55:59] [Rank 0] step:2241/10000 train_time:113686ms step_avg:50.73ms +[2025-09-11 07:55:59] [Rank 0] step:2241/10000 train_time:113686ms step_avg:50.73ms +[2025-09-11 07:55:59] [Rank 0] step:2261/10000 train_time:114347ms step_avg:50.57ms +[2025-09-11 07:55:59] [Rank 0] step:2261/10000 train_time:114347ms step_avg:50.57ms +[2025-09-11 07:56:00] [Rank 0] step:2281/10000 train_time:115012ms step_avg:50.42ms +[2025-09-11 07:56:00] [Rank 0] step:2281/10000 train_time:115012ms step_avg:50.42ms +[2025-09-11 07:56:01] [Rank 0] step:2301/10000 train_time:115674ms step_avg:50.27ms +[2025-09-11 07:56:01] [Rank 0] step:2301/10000 train_time:115674ms step_avg:50.27ms +[2025-09-11 07:56:01] [Rank 0] step:2321/10000 train_time:116336ms step_avg:50.12ms +[2025-09-11 07:56:01] [Rank 0] step:2321/10000 train_time:116336ms step_avg:50.12ms +[2025-09-11 07:56:02] [Rank 0] step:2341/10000 train_time:116997ms step_avg:49.98ms +[2025-09-11 07:56:02] [Rank 0] step:2341/10000 train_time:116997ms step_avg:49.98ms +[2025-09-11 07:56:03] [Rank 0] step:2361/10000 train_time:117659ms step_avg:49.83ms +[2025-09-11 07:56:03] [Rank 0] step:2361/10000 train_time:117659ms step_avg:49.83ms +[2025-09-11 07:56:03] [Rank 0] step:2381/10000 train_time:118321ms step_avg:49.69ms +[2025-09-11 07:56:03] [Rank 0] step:2381/10000 train_time:118321ms step_avg:49.69ms +[2025-09-11 07:56:04] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:56:04] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:56:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:56:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:56:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:56:14] [Rank 0] PRINT: step:2400/10000 val_loss:5.9880 total_sharp:1.8566e-03 L1_sharp:9.9195e-04 L2_sharp:4.1133e-04 L3_sharp:1.8175e-04 L4_sharp:1.3575e-04 L5_sharp:2.6502e-04 L6_sharp:1.2034e-04 L7_sharp:1.3183e-04 L8_sharp:2.6596e-04 L9_sharp:1.4811e-04 L10_sharp:2.0863e-04 L11_sharp:2.5049e-04 L12_sharp:7.2735e-04 total_fnorm:2.0625e+01 total_l1_linf:5.7088e+04 total_spectral:1.0312e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.1250e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.6562e+00 L11_fnorm:5.4062e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6953e+00 L3_l1linf:1.6641e+00 L4_l1linf:1.6016e+00 L5_l1linf:1.5859e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.1094e+00 L1_spectral:7.5242e-02 L2_spectral:7.3399e-02 L3_spectral:7.4102e-02 L4_spectral:7.3552e-02 L5_spectral:7.2073e-02 L6_spectral:7.2458e-02 L7_spectral:7.2072e-02 L8_spectral:7.1762e-02 L9_spectral:7.2874e-02 L10_spectral:7.2659e-02 L11_spectral:7.3173e-02 L12_spectral:7.1988e-02 train_time:118964ms step_avg:49.57ms +[2025-09-11 07:56:14] [Rank 0] PRINT: step:2400/10000 val_loss:5.9880 total_sharp:1.8566e-03 L1_sharp:9.9195e-04 L2_sharp:4.1133e-04 L3_sharp:1.8175e-04 L4_sharp:1.3575e-04 L5_sharp:2.6502e-04 L6_sharp:1.2034e-04 L7_sharp:1.3183e-04 L8_sharp:2.6596e-04 L9_sharp:1.4811e-04 L10_sharp:2.0863e-04 L11_sharp:2.5049e-04 L12_sharp:7.2735e-04 total_fnorm:2.0625e+01 total_l1_linf:5.7088e+04 total_spectral:1.0312e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.1250e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.6562e+00 L11_fnorm:5.4062e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6953e+00 L3_l1linf:1.6641e+00 L4_l1linf:1.6016e+00 L5_l1linf:1.5859e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.1094e+00 L1_spectral:7.5242e-02 L2_spectral:7.3399e-02 L3_spectral:7.4102e-02 L4_spectral:7.3552e-02 L5_spectral:7.2073e-02 L6_spectral:7.2458e-02 L7_spectral:7.2072e-02 L8_spectral:7.1762e-02 L9_spectral:7.2874e-02 L10_spectral:7.2659e-02 L11_spectral:7.3173e-02 L12_spectral:7.1988e-02 train_time:118964ms step_avg:49.57ms +[2025-09-11 07:56:15] [Rank 0] step:2401/10000 train_time:120142ms step_avg:50.04ms +[2025-09-11 07:56:15] [Rank 0] step:2401/10000 train_time:120142ms step_avg:50.04ms +[2025-09-11 07:56:16] [Rank 0] step:2421/10000 train_time:120794ms step_avg:49.89ms +[2025-09-11 07:56:16] [Rank 0] step:2421/10000 train_time:120794ms step_avg:49.89ms +[2025-09-11 07:56:17] [Rank 0] step:2441/10000 train_time:121457ms step_avg:49.76ms +[2025-09-11 07:56:17] [Rank 0] step:2441/10000 train_time:121457ms step_avg:49.76ms +[2025-09-11 07:56:17] [Rank 0] step:2461/10000 train_time:122121ms step_avg:49.62ms +[2025-09-11 07:56:17] [Rank 0] step:2461/10000 train_time:122121ms step_avg:49.62ms +[2025-09-11 07:56:18] [Rank 0] step:2481/10000 train_time:122784ms step_avg:49.49ms +[2025-09-11 07:56:18] [Rank 0] step:2481/10000 train_time:122784ms step_avg:49.49ms +[2025-09-11 07:56:19] [Rank 0] step:2501/10000 train_time:123447ms step_avg:49.36ms +[2025-09-11 07:56:19] [Rank 0] step:2501/10000 train_time:123447ms step_avg:49.36ms +[2025-09-11 07:56:19] [Rank 0] step:2521/10000 train_time:124109ms step_avg:49.23ms +[2025-09-11 07:56:19] [Rank 0] step:2521/10000 train_time:124109ms step_avg:49.23ms +[2025-09-11 07:56:20] [Rank 0] step:2541/10000 train_time:124772ms step_avg:49.10ms +[2025-09-11 07:56:20] [Rank 0] step:2541/10000 train_time:124772ms step_avg:49.10ms +[2025-09-11 07:56:21] [Rank 0] step:2561/10000 train_time:125434ms step_avg:48.98ms +[2025-09-11 07:56:21] [Rank 0] step:2561/10000 train_time:125434ms step_avg:48.98ms +[2025-09-11 07:56:21] [Rank 0] step:2581/10000 train_time:126096ms step_avg:48.86ms +[2025-09-11 07:56:21] [Rank 0] step:2581/10000 train_time:126096ms step_avg:48.86ms +[2025-09-11 07:56:22] [Rank 0] step:2601/10000 train_time:126758ms step_avg:48.73ms +[2025-09-11 07:56:22] [Rank 0] step:2601/10000 train_time:126758ms step_avg:48.73ms +[2025-09-11 07:56:23] [Rank 0] step:2621/10000 train_time:127420ms step_avg:48.61ms +[2025-09-11 07:56:23] [Rank 0] step:2621/10000 train_time:127420ms step_avg:48.61ms +[2025-09-11 07:56:23] [Rank 0] step:2641/10000 train_time:128082ms step_avg:48.50ms +[2025-09-11 07:56:23] [Rank 0] step:2641/10000 train_time:128082ms step_avg:48.50ms +[2025-09-11 07:56:24] [Rank 0] step:2661/10000 train_time:128744ms step_avg:48.38ms +[2025-09-11 07:56:24] [Rank 0] step:2661/10000 train_time:128744ms step_avg:48.38ms +[2025-09-11 07:56:25] [Rank 0] step:2681/10000 train_time:129406ms step_avg:48.27ms +[2025-09-11 07:56:25] [Rank 0] step:2681/10000 train_time:129406ms step_avg:48.27ms +[2025-09-11 07:56:25] [Rank 0] step:2701/10000 train_time:130069ms step_avg:48.16ms +[2025-09-11 07:56:25] [Rank 0] step:2701/10000 train_time:130069ms step_avg:48.16ms +[2025-09-11 07:56:26] [Rank 0] step:2721/10000 train_time:130731ms step_avg:48.05ms +[2025-09-11 07:56:26] [Rank 0] step:2721/10000 train_time:130731ms step_avg:48.05ms +[2025-09-11 07:56:27] [Rank 0] step:2741/10000 train_time:131394ms step_avg:47.94ms +[2025-09-11 07:56:27] [Rank 0] step:2741/10000 train_time:131394ms step_avg:47.94ms +[2025-09-11 07:56:27] [Rank 0] step:2761/10000 train_time:132055ms step_avg:47.83ms +[2025-09-11 07:56:27] [Rank 0] step:2761/10000 train_time:132055ms step_avg:47.83ms +[2025-09-11 07:56:28] [Rank 0] step:2781/10000 train_time:132717ms step_avg:47.72ms +[2025-09-11 07:56:28] [Rank 0] step:2781/10000 train_time:132717ms step_avg:47.72ms +[2025-09-11 07:56:29] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:56:29] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:56:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:56:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:56:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:56:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:56:42] [Rank 0] PRINT: step:2800/10000 val_loss:5.9001 total_sharp:1.8137e-03 L1_sharp:6.4165e-04 L2_sharp:4.3406e-04 L3_sharp:2.3377e-04 L4_sharp:1.3622e-04 L5_sharp:3.5542e-04 L6_sharp:1.8648e-04 L7_sharp:1.2437e-04 L8_sharp:2.8006e-04 L9_sharp:1.4626e-04 L10_sharp:2.0525e-04 L11_sharp:2.9736e-04 L12_sharp:8.3768e-04 total_fnorm:2.0750e+01 total_l1_linf:5.6064e+04 total_spectral:1.0375e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1250e+00 L8_fnorm:5.9062e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.4688e+00 L12_fnorm:4.8750e+00 L1_l1linf:1.7031e+00 L2_l1linf:1.6562e+00 L3_l1linf:1.6094e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.3516e+00 L12_l1linf:1.0859e+00 L1_spectral:7.6606e-02 L2_spectral:7.4852e-02 L3_spectral:7.4984e-02 L4_spectral:7.4632e-02 L5_spectral:7.3487e-02 L6_spectral:7.3734e-02 L7_spectral:7.3340e-02 L8_spectral:7.2803e-02 L9_spectral:7.3253e-02 L10_spectral:7.3782e-02 L11_spectral:7.4013e-02 L12_spectral:7.2191e-02 train_time:133363ms step_avg:47.63ms +[2025-09-11 07:56:42] [Rank 0] PRINT: step:2800/10000 val_loss:5.9001 total_sharp:1.8137e-03 L1_sharp:6.4165e-04 L2_sharp:4.3406e-04 L3_sharp:2.3377e-04 L4_sharp:1.3622e-04 L5_sharp:3.5542e-04 L6_sharp:1.8648e-04 L7_sharp:1.2437e-04 L8_sharp:2.8006e-04 L9_sharp:1.4626e-04 L10_sharp:2.0525e-04 L11_sharp:2.9736e-04 L12_sharp:8.3768e-04 total_fnorm:2.0750e+01 total_l1_linf:5.6064e+04 total_spectral:1.0375e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1250e+00 L8_fnorm:5.9062e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.4688e+00 L12_fnorm:4.8750e+00 L1_l1linf:1.7031e+00 L2_l1linf:1.6562e+00 L3_l1linf:1.6094e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.3516e+00 L12_l1linf:1.0859e+00 L1_spectral:7.6606e-02 L2_spectral:7.4852e-02 L3_spectral:7.4984e-02 L4_spectral:7.4632e-02 L5_spectral:7.3487e-02 L6_spectral:7.3734e-02 L7_spectral:7.3340e-02 L8_spectral:7.2803e-02 L9_spectral:7.3253e-02 L10_spectral:7.3782e-02 L11_spectral:7.4013e-02 L12_spectral:7.2191e-02 train_time:133363ms step_avg:47.63ms +[2025-09-11 07:56:43] [Rank 0] step:2801/10000 train_time:134518ms step_avg:48.03ms +[2025-09-11 07:56:43] [Rank 0] step:2801/10000 train_time:134518ms step_avg:48.03ms +[2025-09-11 07:56:44] [Rank 0] step:2821/10000 train_time:135169ms step_avg:47.92ms +[2025-09-11 07:56:44] [Rank 0] step:2821/10000 train_time:135169ms step_avg:47.92ms +[2025-09-11 07:56:44] [Rank 0] step:2841/10000 train_time:135833ms step_avg:47.81ms +[2025-09-11 07:56:44] [Rank 0] step:2841/10000 train_time:135833ms step_avg:47.81ms +[2025-09-11 07:56:45] [Rank 0] step:2861/10000 train_time:136496ms step_avg:47.71ms +[2025-09-11 07:56:45] [Rank 0] step:2861/10000 train_time:136496ms step_avg:47.71ms +[2025-09-11 07:56:46] [Rank 0] step:2881/10000 train_time:137159ms step_avg:47.61ms +[2025-09-11 07:56:46] [Rank 0] step:2881/10000 train_time:137159ms step_avg:47.61ms +[2025-09-11 07:56:46] [Rank 0] step:2901/10000 train_time:137822ms step_avg:47.51ms +[2025-09-11 07:56:46] [Rank 0] step:2901/10000 train_time:137822ms step_avg:47.51ms +[2025-09-11 07:56:47] [Rank 0] step:2921/10000 train_time:138485ms step_avg:47.41ms +[2025-09-11 07:56:47] [Rank 0] step:2921/10000 train_time:138485ms step_avg:47.41ms +[2025-09-11 07:56:48] [Rank 0] step:2941/10000 train_time:139148ms step_avg:47.31ms +[2025-09-11 07:56:48] [Rank 0] step:2941/10000 train_time:139148ms step_avg:47.31ms +[2025-09-11 07:56:48] [Rank 0] step:2961/10000 train_time:139810ms step_avg:47.22ms +[2025-09-11 07:56:48] [Rank 0] step:2961/10000 train_time:139810ms step_avg:47.22ms +[2025-09-11 07:56:49] [Rank 0] step:2981/10000 train_time:140475ms step_avg:47.12ms +[2025-09-11 07:56:49] [Rank 0] step:2981/10000 train_time:140475ms step_avg:47.12ms +[2025-09-11 07:56:50] [Rank 0] step:3001/10000 train_time:141141ms step_avg:47.03ms +[2025-09-11 07:56:50] [Rank 0] step:3001/10000 train_time:141141ms step_avg:47.03ms +[2025-09-11 07:56:50] [Rank 0] step:3021/10000 train_time:141807ms step_avg:46.94ms +[2025-09-11 07:56:50] [Rank 0] step:3021/10000 train_time:141807ms step_avg:46.94ms +[2025-09-11 07:56:51] [Rank 0] step:3041/10000 train_time:142472ms step_avg:46.85ms +[2025-09-11 07:56:51] [Rank 0] step:3041/10000 train_time:142472ms step_avg:46.85ms +[2025-09-11 07:56:52] [Rank 0] step:3061/10000 train_time:143137ms step_avg:46.76ms +[2025-09-11 07:56:52] [Rank 0] step:3061/10000 train_time:143137ms step_avg:46.76ms +[2025-09-11 07:56:52] [Rank 0] step:3081/10000 train_time:143803ms step_avg:46.67ms +[2025-09-11 07:56:52] [Rank 0] step:3081/10000 train_time:143803ms step_avg:46.67ms +[2025-09-11 07:56:53] [Rank 0] step:3101/10000 train_time:144469ms step_avg:46.59ms +[2025-09-11 07:56:53] [Rank 0] step:3101/10000 train_time:144469ms step_avg:46.59ms +[2025-09-11 07:56:54] [Rank 0] step:3121/10000 train_time:145134ms step_avg:46.50ms +[2025-09-11 07:56:54] [Rank 0] step:3121/10000 train_time:145134ms step_avg:46.50ms +[2025-09-11 07:56:54] [Rank 0] step:3141/10000 train_time:145799ms step_avg:46.42ms +[2025-09-11 07:56:54] [Rank 0] step:3141/10000 train_time:145799ms step_avg:46.42ms +[2025-09-11 07:56:55] [Rank 0] step:3161/10000 train_time:146463ms step_avg:46.33ms +[2025-09-11 07:56:55] [Rank 0] step:3161/10000 train_time:146463ms step_avg:46.33ms +[2025-09-11 07:56:56] [Rank 0] step:3181/10000 train_time:147129ms step_avg:46.25ms +[2025-09-11 07:56:56] [Rank 0] step:3181/10000 train_time:147129ms step_avg:46.25ms +[2025-09-11 07:56:56] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:56:56] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:57:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:57:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:57:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:06] [Rank 0] PRINT: step:3200/10000 val_loss:5.8145 total_sharp:1.3736e-03 L1_sharp:6.0244e-04 L2_sharp:2.2826e-04 L3_sharp:1.7092e-04 L4_sharp:8.9582e-05 L5_sharp:2.5858e-04 L6_sharp:1.5570e-04 L7_sharp:1.0941e-04 L8_sharp:2.1712e-04 L9_sharp:1.5329e-04 L10_sharp:1.7796e-04 L11_sharp:2.3725e-04 L12_sharp:6.5101e-04 total_fnorm:2.1000e+01 total_l1_linf:5.5808e+04 total_spectral:1.0500e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.0625e+00 L10_fnorm:5.9062e+00 L11_fnorm:5.6875e+00 L12_fnorm:5.1562e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5000e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.1406e+00 L1_spectral:7.7514e-02 L2_spectral:7.5822e-02 L3_spectral:7.6318e-02 L4_spectral:7.5941e-02 L5_spectral:7.4112e-02 L6_spectral:7.4797e-02 L7_spectral:7.4775e-02 L8_spectral:7.3877e-02 L9_spectral:7.3430e-02 L10_spectral:7.3874e-02 L11_spectral:7.4489e-02 L12_spectral:7.4231e-02 train_time:147775ms step_avg:46.18ms +[2025-09-11 07:57:06] [Rank 0] PRINT: step:3200/10000 val_loss:5.8145 total_sharp:1.3736e-03 L1_sharp:6.0244e-04 L2_sharp:2.2826e-04 L3_sharp:1.7092e-04 L4_sharp:8.9582e-05 L5_sharp:2.5858e-04 L6_sharp:1.5570e-04 L7_sharp:1.0941e-04 L8_sharp:2.1712e-04 L9_sharp:1.5329e-04 L10_sharp:1.7796e-04 L11_sharp:2.3725e-04 L12_sharp:6.5101e-04 total_fnorm:2.1000e+01 total_l1_linf:5.5808e+04 total_spectral:1.0500e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.0625e+00 L10_fnorm:5.9062e+00 L11_fnorm:5.6875e+00 L12_fnorm:5.1562e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5000e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.1406e+00 L1_spectral:7.7514e-02 L2_spectral:7.5822e-02 L3_spectral:7.6318e-02 L4_spectral:7.5941e-02 L5_spectral:7.4112e-02 L6_spectral:7.4797e-02 L7_spectral:7.4775e-02 L8_spectral:7.3877e-02 L9_spectral:7.3430e-02 L10_spectral:7.3874e-02 L11_spectral:7.4489e-02 L12_spectral:7.4231e-02 train_time:147775ms step_avg:46.18ms +[2025-09-11 07:57:07] [Rank 0] step:3201/10000 train_time:148977ms step_avg:46.54ms +[2025-09-11 07:57:07] [Rank 0] step:3201/10000 train_time:148977ms step_avg:46.54ms +[2025-09-11 07:57:08] [Rank 0] step:3221/10000 train_time:149633ms step_avg:46.46ms +[2025-09-11 07:57:08] [Rank 0] step:3221/10000 train_time:149633ms step_avg:46.46ms +[2025-09-11 07:57:09] [Rank 0] step:3241/10000 train_time:150301ms step_avg:46.37ms +[2025-09-11 07:57:09] [Rank 0] step:3241/10000 train_time:150301ms step_avg:46.37ms +[2025-09-11 07:57:09] [Rank 0] step:3261/10000 train_time:150969ms step_avg:46.30ms +[2025-09-11 07:57:09] [Rank 0] step:3261/10000 train_time:150969ms step_avg:46.30ms +[2025-09-11 07:57:10] [Rank 0] step:3281/10000 train_time:151636ms step_avg:46.22ms +[2025-09-11 07:57:10] [Rank 0] step:3281/10000 train_time:151636ms step_avg:46.22ms +[2025-09-11 07:57:11] [Rank 0] step:3301/10000 train_time:152303ms step_avg:46.14ms +[2025-09-11 07:57:11] [Rank 0] step:3301/10000 train_time:152303ms step_avg:46.14ms +[2025-09-11 07:57:11] [Rank 0] step:3321/10000 train_time:152970ms step_avg:46.06ms +[2025-09-11 07:57:11] [Rank 0] step:3321/10000 train_time:152970ms step_avg:46.06ms +[2025-09-11 07:57:12] [Rank 0] step:3341/10000 train_time:153636ms step_avg:45.99ms +[2025-09-11 07:57:12] [Rank 0] step:3341/10000 train_time:153636ms step_avg:45.99ms +[2025-09-11 07:57:13] [Rank 0] step:3361/10000 train_time:154304ms step_avg:45.91ms +[2025-09-11 07:57:13] [Rank 0] step:3361/10000 train_time:154304ms step_avg:45.91ms +[2025-09-11 07:57:13] [Rank 0] step:3381/10000 train_time:154970ms step_avg:45.84ms +[2025-09-11 07:57:13] [Rank 0] step:3381/10000 train_time:154970ms step_avg:45.84ms +[2025-09-11 07:57:14] [Rank 0] step:3401/10000 train_time:155636ms step_avg:45.76ms +[2025-09-11 07:57:14] [Rank 0] step:3401/10000 train_time:155636ms step_avg:45.76ms +[2025-09-11 07:57:15] [Rank 0] step:3421/10000 train_time:156303ms step_avg:45.69ms +[2025-09-11 07:57:15] [Rank 0] step:3421/10000 train_time:156303ms step_avg:45.69ms +[2025-09-11 07:57:15] [Rank 0] step:3441/10000 train_time:156968ms step_avg:45.62ms +[2025-09-11 07:57:15] [Rank 0] step:3441/10000 train_time:156968ms step_avg:45.62ms +[2025-09-11 07:57:16] [Rank 0] step:3461/10000 train_time:157634ms step_avg:45.55ms +[2025-09-11 07:57:16] [Rank 0] step:3461/10000 train_time:157634ms step_avg:45.55ms +[2025-09-11 07:57:17] [Rank 0] step:3481/10000 train_time:158300ms step_avg:45.48ms +[2025-09-11 07:57:17] [Rank 0] step:3481/10000 train_time:158300ms step_avg:45.48ms +[2025-09-11 07:57:17] [Rank 0] step:3501/10000 train_time:158967ms step_avg:45.41ms +[2025-09-11 07:57:17] [Rank 0] step:3501/10000 train_time:158967ms step_avg:45.41ms +[2025-09-11 07:57:18] [Rank 0] step:3521/10000 train_time:159632ms step_avg:45.34ms +[2025-09-11 07:57:18] [Rank 0] step:3521/10000 train_time:159632ms step_avg:45.34ms +[2025-09-11 07:57:19] [Rank 0] step:3541/10000 train_time:160298ms step_avg:45.27ms +[2025-09-11 07:57:19] [Rank 0] step:3541/10000 train_time:160298ms step_avg:45.27ms +[2025-09-11 07:57:19] [Rank 0] step:3561/10000 train_time:160964ms step_avg:45.20ms +[2025-09-11 07:57:19] [Rank 0] step:3561/10000 train_time:160964ms step_avg:45.20ms +[2025-09-11 07:57:20] [Rank 0] step:3581/10000 train_time:161629ms step_avg:45.14ms +[2025-09-11 07:57:20] [Rank 0] step:3581/10000 train_time:161629ms step_avg:45.14ms +[2025-09-11 07:57:21] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:57:21] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:57:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:57:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:31] [Rank 0] PRINT: step:3600/10000 val_loss:5.7592 total_sharp:1.0505e-03 L1_sharp:5.1599e-04 L2_sharp:2.0278e-04 L3_sharp:1.4079e-04 L4_sharp:7.7288e-05 L5_sharp:1.9878e-04 L6_sharp:1.1593e-04 L7_sharp:1.0651e-04 L8_sharp:1.9388e-04 L9_sharp:1.2270e-04 L10_sharp:1.5530e-04 L11_sharp:2.1184e-04 L12_sharp:5.5449e-04 total_fnorm:2.1250e+01 total_l1_linf:5.5296e+04 total_spectral:1.0562e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.2812e+00 L1_l1linf:1.6719e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.4688e+00 L6_l1linf:1.4531e+00 L7_l1linf:1.5078e+00 L8_l1linf:1.4922e+00 L9_l1linf:1.5000e+00 L10_l1linf:1.4766e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.1094e+00 L1_spectral:7.8038e-02 L2_spectral:7.6214e-02 L3_spectral:7.6683e-02 L4_spectral:7.6643e-02 L5_spectral:7.4789e-02 L6_spectral:7.6086e-02 L7_spectral:7.5554e-02 L8_spectral:7.5180e-02 L9_spectral:7.4208e-02 L10_spectral:7.5011e-02 L11_spectral:7.5469e-02 L12_spectral:7.4969e-02 train_time:162276ms step_avg:45.08ms +[2025-09-11 07:57:31] [Rank 0] PRINT: step:3600/10000 val_loss:5.7592 total_sharp:1.0505e-03 L1_sharp:5.1599e-04 L2_sharp:2.0278e-04 L3_sharp:1.4079e-04 L4_sharp:7.7288e-05 L5_sharp:1.9878e-04 L6_sharp:1.1593e-04 L7_sharp:1.0651e-04 L8_sharp:1.9388e-04 L9_sharp:1.2270e-04 L10_sharp:1.5530e-04 L11_sharp:2.1184e-04 L12_sharp:5.5449e-04 total_fnorm:2.1250e+01 total_l1_linf:5.5296e+04 total_spectral:1.0562e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.2812e+00 L1_l1linf:1.6719e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.4688e+00 L6_l1linf:1.4531e+00 L7_l1linf:1.5078e+00 L8_l1linf:1.4922e+00 L9_l1linf:1.5000e+00 L10_l1linf:1.4766e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.1094e+00 L1_spectral:7.8038e-02 L2_spectral:7.6214e-02 L3_spectral:7.6683e-02 L4_spectral:7.6643e-02 L5_spectral:7.4789e-02 L6_spectral:7.6086e-02 L7_spectral:7.5554e-02 L8_spectral:7.5180e-02 L9_spectral:7.4208e-02 L10_spectral:7.5011e-02 L11_spectral:7.5469e-02 L12_spectral:7.4969e-02 train_time:162276ms step_avg:45.08ms +[2025-09-11 07:57:32] [Rank 0] step:3601/10000 train_time:163476ms step_avg:45.40ms +[2025-09-11 07:57:32] [Rank 0] step:3601/10000 train_time:163476ms step_avg:45.40ms +[2025-09-11 07:57:33] [Rank 0] step:3621/10000 train_time:164130ms step_avg:45.33ms +[2025-09-11 07:57:33] [Rank 0] step:3621/10000 train_time:164130ms step_avg:45.33ms +[2025-09-11 07:57:33] [Rank 0] step:3641/10000 train_time:164796ms step_avg:45.26ms +[2025-09-11 07:57:33] [Rank 0] step:3641/10000 train_time:164796ms step_avg:45.26ms +[2025-09-11 07:57:34] [Rank 0] step:3661/10000 train_time:165598ms step_avg:45.23ms +[2025-09-11 07:57:34] [Rank 0] step:3661/10000 train_time:165598ms step_avg:45.23ms +[2025-09-11 07:57:35] [Rank 0] step:3681/10000 train_time:166691ms step_avg:45.28ms +[2025-09-11 07:57:35] [Rank 0] step:3681/10000 train_time:166691ms step_avg:45.28ms +[2025-09-11 07:57:36] [Rank 0] step:3701/10000 train_time:167357ms step_avg:45.22ms +[2025-09-11 07:57:36] [Rank 0] step:3701/10000 train_time:167357ms step_avg:45.22ms +[2025-09-11 07:57:37] [Rank 0] step:3721/10000 train_time:168269ms step_avg:45.22ms +[2025-09-11 07:57:37] [Rank 0] step:3721/10000 train_time:168269ms step_avg:45.22ms +[2025-09-11 07:57:37] [Rank 0] step:3741/10000 train_time:168964ms step_avg:45.17ms +[2025-09-11 07:57:37] [Rank 0] step:3741/10000 train_time:168964ms step_avg:45.17ms +[2025-09-11 07:57:38] [Rank 0] step:3761/10000 train_time:169642ms step_avg:45.11ms +[2025-09-11 07:57:38] [Rank 0] step:3761/10000 train_time:169642ms step_avg:45.11ms +[2025-09-11 07:57:39] [Rank 0] step:3781/10000 train_time:170318ms step_avg:45.05ms +[2025-09-11 07:57:39] [Rank 0] step:3781/10000 train_time:170318ms step_avg:45.05ms +[2025-09-11 07:57:39] [Rank 0] step:3801/10000 train_time:170994ms step_avg:44.99ms +[2025-09-11 07:57:39] [Rank 0] step:3801/10000 train_time:170994ms step_avg:44.99ms +[2025-09-11 07:57:40] [Rank 0] step:3821/10000 train_time:171671ms step_avg:44.93ms +[2025-09-11 07:57:40] [Rank 0] step:3821/10000 train_time:171671ms step_avg:44.93ms +[2025-09-11 07:57:41] [Rank 0] step:3841/10000 train_time:172348ms step_avg:44.87ms +[2025-09-11 07:57:41] [Rank 0] step:3841/10000 train_time:172348ms step_avg:44.87ms +[2025-09-11 07:57:42] [Rank 0] step:3861/10000 train_time:173024ms step_avg:44.81ms +[2025-09-11 07:57:42] [Rank 0] step:3861/10000 train_time:173024ms step_avg:44.81ms +[2025-09-11 07:57:42] [Rank 0] step:3881/10000 train_time:173699ms step_avg:44.76ms +[2025-09-11 07:57:42] [Rank 0] step:3881/10000 train_time:173699ms step_avg:44.76ms +[2025-09-11 07:57:43] [Rank 0] step:3901/10000 train_time:174376ms step_avg:44.70ms +[2025-09-11 07:57:43] [Rank 0] step:3901/10000 train_time:174376ms step_avg:44.70ms +[2025-09-11 07:57:44] [Rank 0] step:3921/10000 train_time:175052ms step_avg:44.64ms +[2025-09-11 07:57:44] [Rank 0] step:3921/10000 train_time:175052ms step_avg:44.64ms +[2025-09-11 07:57:44] [Rank 0] step:3941/10000 train_time:175729ms step_avg:44.59ms +[2025-09-11 07:57:44] [Rank 0] step:3941/10000 train_time:175729ms step_avg:44.59ms +[2025-09-11 07:57:45] [Rank 0] step:3961/10000 train_time:176405ms step_avg:44.54ms +[2025-09-11 07:57:45] [Rank 0] step:3961/10000 train_time:176405ms step_avg:44.54ms +[2025-09-11 07:57:46] [Rank 0] step:3981/10000 train_time:177080ms step_avg:44.48ms +[2025-09-11 07:57:46] [Rank 0] step:3981/10000 train_time:177080ms step_avg:44.48ms +[2025-09-11 07:57:46] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:57:46] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:57:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:57:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:57:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:57:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.7070 total_sharp:1.5998e-03 L1_sharp:5.6054e-04 L2_sharp:2.1391e-04 L3_sharp:1.4795e-04 L4_sharp:1.1717e-04 L5_sharp:2.1665e-04 L6_sharp:1.3171e-04 L7_sharp:1.3213e-04 L8_sharp:2.6478e-04 L9_sharp:1.7225e-04 L10_sharp:2.1545e-04 L11_sharp:3.0641e-04 L12_sharp:9.1282e-04 total_fnorm:2.1125e+01 total_l1_linf:5.4016e+04 total_spectral:1.0500e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.0625e+00 L10_fnorm:5.9688e+00 L11_fnorm:5.7812e+00 L12_fnorm:5.1875e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.4531e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4531e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3125e+00 L12_l1linf:1.1094e+00 L1_spectral:7.8258e-02 L2_spectral:7.6222e-02 L3_spectral:7.7122e-02 L4_spectral:7.6962e-02 L5_spectral:7.5289e-02 L6_spectral:7.6656e-02 L7_spectral:7.6626e-02 L8_spectral:7.6197e-02 L9_spectral:7.6039e-02 L10_spectral:7.5596e-02 L11_spectral:7.5885e-02 L12_spectral:7.5035e-02 train_time:177737ms step_avg:44.43ms +[2025-09-11 07:57:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.7070 total_sharp:1.5998e-03 L1_sharp:5.6054e-04 L2_sharp:2.1391e-04 L3_sharp:1.4795e-04 L4_sharp:1.1717e-04 L5_sharp:2.1665e-04 L6_sharp:1.3171e-04 L7_sharp:1.3213e-04 L8_sharp:2.6478e-04 L9_sharp:1.7225e-04 L10_sharp:2.1545e-04 L11_sharp:3.0641e-04 L12_sharp:9.1282e-04 total_fnorm:2.1125e+01 total_l1_linf:5.4016e+04 total_spectral:1.0500e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.0625e+00 L10_fnorm:5.9688e+00 L11_fnorm:5.7812e+00 L12_fnorm:5.1875e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.4531e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4531e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3125e+00 L12_l1linf:1.1094e+00 L1_spectral:7.8258e-02 L2_spectral:7.6222e-02 L3_spectral:7.7122e-02 L4_spectral:7.6962e-02 L5_spectral:7.5289e-02 L6_spectral:7.6656e-02 L7_spectral:7.6626e-02 L8_spectral:7.6197e-02 L9_spectral:7.6039e-02 L10_spectral:7.5596e-02 L11_spectral:7.5885e-02 L12_spectral:7.5035e-02 train_time:177737ms step_avg:44.43ms +[2025-09-11 07:57:57] [Rank 0] step:4001/10000 train_time:178939ms step_avg:44.72ms +[2025-09-11 07:57:57] [Rank 0] step:4001/10000 train_time:178939ms step_avg:44.72ms +[2025-09-11 07:57:58] [Rank 0] step:4021/10000 train_time:179631ms step_avg:44.67ms +[2025-09-11 07:57:58] [Rank 0] step:4021/10000 train_time:179631ms step_avg:44.67ms +[2025-09-11 07:57:59] [Rank 0] step:4041/10000 train_time:180309ms step_avg:44.62ms +[2025-09-11 07:57:59] [Rank 0] step:4041/10000 train_time:180309ms step_avg:44.62ms +[2025-09-11 07:57:59] [Rank 0] step:4061/10000 train_time:180984ms step_avg:44.57ms +[2025-09-11 07:57:59] [Rank 0] step:4061/10000 train_time:180984ms step_avg:44.57ms +[2025-09-11 07:58:00] [Rank 0] step:4081/10000 train_time:181661ms step_avg:44.51ms +[2025-09-11 07:58:00] [Rank 0] step:4081/10000 train_time:181661ms step_avg:44.51ms +[2025-09-11 07:58:01] [Rank 0] step:4101/10000 train_time:182337ms step_avg:44.46ms +[2025-09-11 07:58:01] [Rank 0] step:4101/10000 train_time:182337ms step_avg:44.46ms +[2025-09-11 07:58:02] [Rank 0] step:4121/10000 train_time:183014ms step_avg:44.41ms +[2025-09-11 07:58:02] [Rank 0] step:4121/10000 train_time:183014ms step_avg:44.41ms +[2025-09-11 07:58:02] [Rank 0] step:4141/10000 train_time:183690ms step_avg:44.36ms +[2025-09-11 07:58:02] [Rank 0] step:4141/10000 train_time:183690ms step_avg:44.36ms +[2025-09-11 07:58:03] [Rank 0] step:4161/10000 train_time:184366ms step_avg:44.31ms +[2025-09-11 07:58:03] [Rank 0] step:4161/10000 train_time:184366ms step_avg:44.31ms +[2025-09-11 07:58:04] [Rank 0] step:4181/10000 train_time:185042ms step_avg:44.26ms +[2025-09-11 07:58:04] [Rank 0] step:4181/10000 train_time:185042ms step_avg:44.26ms +[2025-09-11 07:58:04] [Rank 0] step:4201/10000 train_time:185718ms step_avg:44.21ms +[2025-09-11 07:58:04] [Rank 0] step:4201/10000 train_time:185718ms step_avg:44.21ms +[2025-09-11 07:58:05] [Rank 0] step:4221/10000 train_time:186393ms step_avg:44.16ms +[2025-09-11 07:58:05] [Rank 0] step:4221/10000 train_time:186393ms step_avg:44.16ms +[2025-09-11 07:58:06] [Rank 0] step:4241/10000 train_time:187068ms step_avg:44.11ms +[2025-09-11 07:58:06] [Rank 0] step:4241/10000 train_time:187068ms step_avg:44.11ms +[2025-09-11 07:58:06] [Rank 0] step:4261/10000 train_time:187745ms step_avg:44.06ms +[2025-09-11 07:58:06] [Rank 0] step:4261/10000 train_time:187745ms step_avg:44.06ms +[2025-09-11 07:58:07] [Rank 0] step:4281/10000 train_time:188422ms step_avg:44.01ms +[2025-09-11 07:58:07] [Rank 0] step:4281/10000 train_time:188422ms step_avg:44.01ms +[2025-09-11 07:58:08] [Rank 0] step:4301/10000 train_time:189098ms step_avg:43.97ms +[2025-09-11 07:58:08] [Rank 0] step:4301/10000 train_time:189098ms step_avg:43.97ms +[2025-09-11 07:58:08] [Rank 0] step:4321/10000 train_time:189774ms step_avg:43.92ms +[2025-09-11 07:58:08] [Rank 0] step:4321/10000 train_time:189774ms step_avg:43.92ms +[2025-09-11 07:58:09] [Rank 0] step:4341/10000 train_time:190449ms step_avg:43.87ms +[2025-09-11 07:58:09] [Rank 0] step:4341/10000 train_time:190449ms step_avg:43.87ms +[2025-09-11 07:58:10] [Rank 0] step:4361/10000 train_time:191125ms step_avg:43.83ms +[2025-09-11 07:58:10] [Rank 0] step:4361/10000 train_time:191125ms step_avg:43.83ms +[2025-09-11 07:58:10] [Rank 0] step:4381/10000 train_time:191802ms step_avg:43.78ms +[2025-09-11 07:58:10] [Rank 0] step:4381/10000 train_time:191802ms step_avg:43.78ms +[2025-09-11 07:58:11] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:58:11] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:58:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:58:21] [Rank 0] PRINT: step:4400/10000 val_loss:5.6640 total_sharp:1.3826e-03 L1_sharp:3.6349e-04 L2_sharp:1.8264e-04 L3_sharp:1.3791e-04 L4_sharp:6.4329e-05 L5_sharp:2.2505e-04 L6_sharp:1.4099e-04 L7_sharp:1.0521e-04 L8_sharp:1.9914e-04 L9_sharp:1.3226e-04 L10_sharp:1.9957e-04 L11_sharp:2.4088e-04 L12_sharp:1.2027e-03 total_fnorm:2.1125e+01 total_l1_linf:5.3504e+04 total_spectral:1.0562e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.8750e+00 L12_fnorm:5.2188e+00 L1_l1linf:1.6406e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.4609e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4219e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.0078e+00 L1_spectral:7.9079e-02 L2_spectral:7.6608e-02 L3_spectral:7.7461e-02 L4_spectral:7.7472e-02 L5_spectral:7.6085e-02 L6_spectral:7.7384e-02 L7_spectral:7.6935e-02 L8_spectral:7.6504e-02 L9_spectral:7.6298e-02 L10_spectral:7.5460e-02 L11_spectral:7.5990e-02 L12_spectral:7.5110e-02 train_time:192458ms step_avg:43.74ms +[2025-09-11 07:58:21] [Rank 0] PRINT: step:4400/10000 val_loss:5.6640 total_sharp:1.3826e-03 L1_sharp:3.6349e-04 L2_sharp:1.8264e-04 L3_sharp:1.3791e-04 L4_sharp:6.4329e-05 L5_sharp:2.2505e-04 L6_sharp:1.4099e-04 L7_sharp:1.0521e-04 L8_sharp:1.9914e-04 L9_sharp:1.3226e-04 L10_sharp:1.9957e-04 L11_sharp:2.4088e-04 L12_sharp:1.2027e-03 total_fnorm:2.1125e+01 total_l1_linf:5.3504e+04 total_spectral:1.0562e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.8750e+00 L12_fnorm:5.2188e+00 L1_l1linf:1.6406e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.4609e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4219e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.0078e+00 L1_spectral:7.9079e-02 L2_spectral:7.6608e-02 L3_spectral:7.7461e-02 L4_spectral:7.7472e-02 L5_spectral:7.6085e-02 L6_spectral:7.7384e-02 L7_spectral:7.6935e-02 L8_spectral:7.6504e-02 L9_spectral:7.6298e-02 L10_spectral:7.5460e-02 L11_spectral:7.5990e-02 L12_spectral:7.5110e-02 train_time:192458ms step_avg:43.74ms +[2025-09-11 07:58:22] [Rank 0] step:4401/10000 train_time:193642ms step_avg:44.00ms +[2025-09-11 07:58:22] [Rank 0] step:4401/10000 train_time:193642ms step_avg:44.00ms +[2025-09-11 07:58:23] [Rank 0] step:4421/10000 train_time:194349ms step_avg:43.96ms +[2025-09-11 07:58:23] [Rank 0] step:4421/10000 train_time:194349ms step_avg:43.96ms +[2025-09-11 07:58:24] [Rank 0] step:4441/10000 train_time:195027ms step_avg:43.92ms +[2025-09-11 07:58:24] [Rank 0] step:4441/10000 train_time:195027ms step_avg:43.92ms +[2025-09-11 07:58:24] [Rank 0] step:4461/10000 train_time:195706ms step_avg:43.87ms +[2025-09-11 07:58:24] [Rank 0] step:4461/10000 train_time:195706ms step_avg:43.87ms +[2025-09-11 07:58:25] [Rank 0] step:4481/10000 train_time:196386ms step_avg:43.83ms +[2025-09-11 07:58:25] [Rank 0] step:4481/10000 train_time:196386ms step_avg:43.83ms +[2025-09-11 07:58:26] [Rank 0] step:4501/10000 train_time:197066ms step_avg:43.78ms +[2025-09-11 07:58:26] [Rank 0] step:4501/10000 train_time:197066ms step_avg:43.78ms +[2025-09-11 07:58:26] [Rank 0] step:4521/10000 train_time:197746ms step_avg:43.74ms +[2025-09-11 07:58:26] [Rank 0] step:4521/10000 train_time:197746ms step_avg:43.74ms +[2025-09-11 07:58:27] [Rank 0] step:4541/10000 train_time:198425ms step_avg:43.70ms +[2025-09-11 07:58:27] [Rank 0] step:4541/10000 train_time:198425ms step_avg:43.70ms +[2025-09-11 07:58:28] [Rank 0] step:4561/10000 train_time:199105ms step_avg:43.65ms +[2025-09-11 07:58:28] [Rank 0] step:4561/10000 train_time:199105ms step_avg:43.65ms +[2025-09-11 07:58:28] [Rank 0] step:4581/10000 train_time:199784ms step_avg:43.61ms +[2025-09-11 07:58:28] [Rank 0] step:4581/10000 train_time:199784ms step_avg:43.61ms +[2025-09-11 07:58:29] [Rank 0] step:4601/10000 train_time:200466ms step_avg:43.57ms +[2025-09-11 07:58:29] [Rank 0] step:4601/10000 train_time:200466ms step_avg:43.57ms +[2025-09-11 07:58:30] [Rank 0] step:4621/10000 train_time:201144ms step_avg:43.53ms +[2025-09-11 07:58:30] [Rank 0] step:4621/10000 train_time:201144ms step_avg:43.53ms +[2025-09-11 07:58:30] [Rank 0] step:4641/10000 train_time:201824ms step_avg:43.49ms +[2025-09-11 07:58:30] [Rank 0] step:4641/10000 train_time:201824ms step_avg:43.49ms +[2025-09-11 07:58:31] [Rank 0] step:4661/10000 train_time:202503ms step_avg:43.45ms +[2025-09-11 07:58:31] [Rank 0] step:4661/10000 train_time:202503ms step_avg:43.45ms +[2025-09-11 07:58:32] [Rank 0] step:4681/10000 train_time:203181ms step_avg:43.41ms +[2025-09-11 07:58:32] [Rank 0] step:4681/10000 train_time:203181ms step_avg:43.41ms +[2025-09-11 07:58:32] [Rank 0] step:4701/10000 train_time:203860ms step_avg:43.37ms +[2025-09-11 07:58:32] [Rank 0] step:4701/10000 train_time:203860ms step_avg:43.37ms +[2025-09-11 07:58:33] [Rank 0] step:4721/10000 train_time:204539ms step_avg:43.33ms +[2025-09-11 07:58:33] [Rank 0] step:4721/10000 train_time:204539ms step_avg:43.33ms +[2025-09-11 07:58:34] [Rank 0] step:4741/10000 train_time:205218ms step_avg:43.29ms +[2025-09-11 07:58:34] [Rank 0] step:4741/10000 train_time:205218ms step_avg:43.29ms +[2025-09-11 07:58:34] [Rank 0] step:4761/10000 train_time:205898ms step_avg:43.25ms +[2025-09-11 07:58:34] [Rank 0] step:4761/10000 train_time:205898ms step_avg:43.25ms +[2025-09-11 07:58:35] [Rank 0] step:4781/10000 train_time:206576ms step_avg:43.21ms +[2025-09-11 07:58:35] [Rank 0] step:4781/10000 train_time:206576ms step_avg:43.21ms +[2025-09-11 07:58:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:58:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:58:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:58:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:58:46] [Rank 0] PRINT: step:4800/10000 val_loss:5.6233 total_sharp:1.0685e-03 L1_sharp:2.7194e-04 L2_sharp:1.1621e-04 L3_sharp:1.0772e-04 L4_sharp:6.9137e-05 L5_sharp:1.8093e-04 L6_sharp:1.1223e-04 L7_sharp:1.2039e-04 L8_sharp:1.9568e-04 L9_sharp:1.3280e-04 L10_sharp:1.7015e-04 L11_sharp:2.1461e-04 L12_sharp:6.1246e-04 total_fnorm:2.1250e+01 total_l1_linf:5.2992e+04 total_spectral:1.0625e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1562e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.0938e+00 L11_fnorm:5.9688e+00 L12_fnorm:5.4688e+00 L1_l1linf:1.6406e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.4141e+00 L6_l1linf:1.3672e+00 L7_l1linf:1.3750e+00 L8_l1linf:1.4219e+00 L9_l1linf:1.4297e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3516e+00 L12_l1linf:1.0859e+00 L1_spectral:7.9618e-02 L2_spectral:7.7167e-02 L3_spectral:7.7638e-02 L4_spectral:7.7883e-02 L5_spectral:7.6259e-02 L6_spectral:7.7462e-02 L7_spectral:7.7676e-02 L8_spectral:7.6797e-02 L9_spectral:7.6975e-02 L10_spectral:7.6083e-02 L11_spectral:7.6483e-02 L12_spectral:7.6405e-02 train_time:207235ms step_avg:43.17ms +[2025-09-11 07:58:46] [Rank 0] PRINT: step:4800/10000 val_loss:5.6233 total_sharp:1.0685e-03 L1_sharp:2.7194e-04 L2_sharp:1.1621e-04 L3_sharp:1.0772e-04 L4_sharp:6.9137e-05 L5_sharp:1.8093e-04 L6_sharp:1.1223e-04 L7_sharp:1.2039e-04 L8_sharp:1.9568e-04 L9_sharp:1.3280e-04 L10_sharp:1.7015e-04 L11_sharp:2.1461e-04 L12_sharp:6.1246e-04 total_fnorm:2.1250e+01 total_l1_linf:5.2992e+04 total_spectral:1.0625e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1562e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.0938e+00 L11_fnorm:5.9688e+00 L12_fnorm:5.4688e+00 L1_l1linf:1.6406e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.4141e+00 L6_l1linf:1.3672e+00 L7_l1linf:1.3750e+00 L8_l1linf:1.4219e+00 L9_l1linf:1.4297e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3516e+00 L12_l1linf:1.0859e+00 L1_spectral:7.9618e-02 L2_spectral:7.7167e-02 L3_spectral:7.7638e-02 L4_spectral:7.7883e-02 L5_spectral:7.6259e-02 L6_spectral:7.7462e-02 L7_spectral:7.7676e-02 L8_spectral:7.6797e-02 L9_spectral:7.6975e-02 L10_spectral:7.6083e-02 L11_spectral:7.6483e-02 L12_spectral:7.6405e-02 train_time:207235ms step_avg:43.17ms +[2025-09-11 07:58:47] [Rank 0] step:4801/10000 train_time:208391ms step_avg:43.41ms +[2025-09-11 07:58:47] [Rank 0] step:4801/10000 train_time:208391ms step_avg:43.41ms +[2025-09-11 07:58:48] [Rank 0] step:4821/10000 train_time:209110ms step_avg:43.37ms +[2025-09-11 07:58:48] [Rank 0] step:4821/10000 train_time:209110ms step_avg:43.37ms +[2025-09-11 07:58:49] [Rank 0] step:4841/10000 train_time:209791ms step_avg:43.34ms +[2025-09-11 07:58:49] [Rank 0] step:4841/10000 train_time:209791ms step_avg:43.34ms +[2025-09-11 07:58:49] [Rank 0] step:4861/10000 train_time:210470ms step_avg:43.30ms +[2025-09-11 07:58:49] [Rank 0] step:4861/10000 train_time:210470ms step_avg:43.30ms +[2025-09-11 07:58:50] [Rank 0] step:4881/10000 train_time:211150ms step_avg:43.26ms +[2025-09-11 07:58:50] [Rank 0] step:4881/10000 train_time:211150ms step_avg:43.26ms +[2025-09-11 07:58:51] [Rank 0] step:4901/10000 train_time:211831ms step_avg:43.22ms +[2025-09-11 07:58:51] [Rank 0] step:4901/10000 train_time:211831ms step_avg:43.22ms +[2025-09-11 07:58:51] [Rank 0] step:4921/10000 train_time:212510ms step_avg:43.18ms +[2025-09-11 07:58:51] [Rank 0] step:4921/10000 train_time:212510ms step_avg:43.18ms +[2025-09-11 07:58:52] [Rank 0] step:4941/10000 train_time:213189ms step_avg:43.15ms +[2025-09-11 07:58:52] [Rank 0] step:4941/10000 train_time:213189ms step_avg:43.15ms +[2025-09-11 07:58:53] [Rank 0] step:4961/10000 train_time:213869ms step_avg:43.11ms +[2025-09-11 07:58:53] [Rank 0] step:4961/10000 train_time:213869ms step_avg:43.11ms +[2025-09-11 07:58:53] [Rank 0] step:4981/10000 train_time:214548ms step_avg:43.07ms +[2025-09-11 07:58:53] [Rank 0] step:4981/10000 train_time:214548ms step_avg:43.07ms +[2025-09-11 07:58:54] [Rank 0] step:5001/10000 train_time:215228ms step_avg:43.04ms +[2025-09-11 07:58:54] [Rank 0] step:5001/10000 train_time:215228ms step_avg:43.04ms +[2025-09-11 07:58:55] [Rank 0] step:5021/10000 train_time:215906ms step_avg:43.00ms +[2025-09-11 07:58:55] [Rank 0] step:5021/10000 train_time:215906ms step_avg:43.00ms +[2025-09-11 07:58:55] [Rank 0] step:5041/10000 train_time:216584ms step_avg:42.96ms +[2025-09-11 07:58:55] [Rank 0] step:5041/10000 train_time:216584ms step_avg:42.96ms +[2025-09-11 07:58:56] [Rank 0] step:5061/10000 train_time:217263ms step_avg:42.93ms +[2025-09-11 07:58:56] [Rank 0] step:5061/10000 train_time:217263ms step_avg:42.93ms +[2025-09-11 07:58:57] [Rank 0] step:5081/10000 train_time:217942ms step_avg:42.89ms +[2025-09-11 07:58:57] [Rank 0] step:5081/10000 train_time:217942ms step_avg:42.89ms +[2025-09-11 07:58:58] [Rank 0] step:5101/10000 train_time:218621ms step_avg:42.86ms +[2025-09-11 07:58:58] [Rank 0] step:5101/10000 train_time:218621ms step_avg:42.86ms +[2025-09-11 07:58:58] [Rank 0] step:5121/10000 train_time:219300ms step_avg:42.82ms +[2025-09-11 07:58:58] [Rank 0] step:5121/10000 train_time:219300ms step_avg:42.82ms +[2025-09-11 07:58:59] [Rank 0] step:5141/10000 train_time:219978ms step_avg:42.79ms +[2025-09-11 07:58:59] [Rank 0] step:5141/10000 train_time:219978ms step_avg:42.79ms +[2025-09-11 07:59:00] [Rank 0] step:5161/10000 train_time:220657ms step_avg:42.75ms +[2025-09-11 07:59:00] [Rank 0] step:5161/10000 train_time:220657ms step_avg:42.75ms +[2025-09-11 07:59:00] [Rank 0] step:5181/10000 train_time:221336ms step_avg:42.72ms +[2025-09-11 07:59:00] [Rank 0] step:5181/10000 train_time:221336ms step_avg:42.72ms +[2025-09-11 07:59:01] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:59:01] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:59:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:59:11] [Rank 0] PRINT: step:5200/10000 val_loss:5.5846 total_sharp:1.0446e-03 L1_sharp:2.8688e-04 L2_sharp:1.7107e-04 L3_sharp:8.7253e-05 L4_sharp:6.4839e-05 L5_sharp:1.6675e-04 L6_sharp:1.0825e-04 L7_sharp:1.0747e-04 L8_sharp:1.7331e-04 L9_sharp:1.1103e-04 L10_sharp:1.7258e-04 L11_sharp:2.5143e-04 L12_sharp:8.0320e-04 total_fnorm:2.1250e+01 total_l1_linf:5.2480e+04 total_spectral:1.0625e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.4688e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.3906e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.4062e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.4219e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.0781e+00 L1_spectral:7.9980e-02 L2_spectral:7.7520e-02 L3_spectral:7.8222e-02 L4_spectral:7.7982e-02 L5_spectral:7.7479e-02 L6_spectral:7.8247e-02 L7_spectral:7.7990e-02 L8_spectral:7.7859e-02 L9_spectral:7.7460e-02 L10_spectral:7.7117e-02 L11_spectral:7.6614e-02 L12_spectral:7.6846e-02 train_time:222002ms step_avg:42.69ms +[2025-09-11 07:59:11] [Rank 0] PRINT: step:5200/10000 val_loss:5.5846 total_sharp:1.0446e-03 L1_sharp:2.8688e-04 L2_sharp:1.7107e-04 L3_sharp:8.7253e-05 L4_sharp:6.4839e-05 L5_sharp:1.6675e-04 L6_sharp:1.0825e-04 L7_sharp:1.0747e-04 L8_sharp:1.7331e-04 L9_sharp:1.1103e-04 L10_sharp:1.7258e-04 L11_sharp:2.5143e-04 L12_sharp:8.0320e-04 total_fnorm:2.1250e+01 total_l1_linf:5.2480e+04 total_spectral:1.0625e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0312e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.4688e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.3906e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.4062e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.4219e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.0781e+00 L1_spectral:7.9980e-02 L2_spectral:7.7520e-02 L3_spectral:7.8222e-02 L4_spectral:7.7982e-02 L5_spectral:7.7479e-02 L6_spectral:7.8247e-02 L7_spectral:7.7990e-02 L8_spectral:7.7859e-02 L9_spectral:7.7460e-02 L10_spectral:7.7117e-02 L11_spectral:7.6614e-02 L12_spectral:7.6846e-02 train_time:222002ms step_avg:42.69ms +[2025-09-11 07:59:12] [Rank 0] step:5201/10000 train_time:223186ms step_avg:42.91ms +[2025-09-11 07:59:12] [Rank 0] step:5201/10000 train_time:223186ms step_avg:42.91ms +[2025-09-11 07:59:13] [Rank 0] step:5221/10000 train_time:223879ms step_avg:42.88ms +[2025-09-11 07:59:13] [Rank 0] step:5221/10000 train_time:223879ms step_avg:42.88ms +[2025-09-11 07:59:13] [Rank 0] step:5241/10000 train_time:224569ms step_avg:42.85ms +[2025-09-11 07:59:13] [Rank 0] step:5241/10000 train_time:224569ms step_avg:42.85ms +[2025-09-11 07:59:14] [Rank 0] step:5261/10000 train_time:225258ms step_avg:42.82ms +[2025-09-11 07:59:14] [Rank 0] step:5261/10000 train_time:225258ms step_avg:42.82ms +[2025-09-11 07:59:15] [Rank 0] step:5281/10000 train_time:225948ms step_avg:42.79ms +[2025-09-11 07:59:15] [Rank 0] step:5281/10000 train_time:225948ms step_avg:42.79ms +[2025-09-11 07:59:16] [Rank 0] step:5301/10000 train_time:226637ms step_avg:42.75ms +[2025-09-11 07:59:16] [Rank 0] step:5301/10000 train_time:226637ms step_avg:42.75ms +[2025-09-11 07:59:16] [Rank 0] step:5321/10000 train_time:227325ms step_avg:42.72ms +[2025-09-11 07:59:16] [Rank 0] step:5321/10000 train_time:227325ms step_avg:42.72ms +[2025-09-11 07:59:17] [Rank 0] step:5341/10000 train_time:228014ms step_avg:42.69ms +[2025-09-11 07:59:17] [Rank 0] step:5341/10000 train_time:228014ms step_avg:42.69ms +[2025-09-11 07:59:18] [Rank 0] step:5361/10000 train_time:228702ms step_avg:42.66ms +[2025-09-11 07:59:18] [Rank 0] step:5361/10000 train_time:228702ms step_avg:42.66ms +[2025-09-11 07:59:18] [Rank 0] step:5381/10000 train_time:229392ms step_avg:42.63ms +[2025-09-11 07:59:18] [Rank 0] step:5381/10000 train_time:229392ms step_avg:42.63ms +[2025-09-11 07:59:19] [Rank 0] step:5401/10000 train_time:230079ms step_avg:42.60ms +[2025-09-11 07:59:19] [Rank 0] step:5401/10000 train_time:230079ms step_avg:42.60ms +[2025-09-11 07:59:20] [Rank 0] step:5421/10000 train_time:230769ms step_avg:42.57ms +[2025-09-11 07:59:20] [Rank 0] step:5421/10000 train_time:230769ms step_avg:42.57ms +[2025-09-11 07:59:20] [Rank 0] step:5441/10000 train_time:231457ms step_avg:42.54ms +[2025-09-11 07:59:20] [Rank 0] step:5441/10000 train_time:231457ms step_avg:42.54ms +[2025-09-11 07:59:21] [Rank 0] step:5461/10000 train_time:232147ms step_avg:42.51ms +[2025-09-11 07:59:21] [Rank 0] step:5461/10000 train_time:232147ms step_avg:42.51ms +[2025-09-11 07:59:22] [Rank 0] step:5481/10000 train_time:232835ms step_avg:42.48ms +[2025-09-11 07:59:22] [Rank 0] step:5481/10000 train_time:232835ms step_avg:42.48ms +[2025-09-11 07:59:22] [Rank 0] step:5501/10000 train_time:233524ms step_avg:42.45ms +[2025-09-11 07:59:22] [Rank 0] step:5501/10000 train_time:233524ms step_avg:42.45ms +[2025-09-11 07:59:23] [Rank 0] step:5521/10000 train_time:234212ms step_avg:42.42ms +[2025-09-11 07:59:23] [Rank 0] step:5521/10000 train_time:234212ms step_avg:42.42ms +[2025-09-11 07:59:24] [Rank 0] step:5541/10000 train_time:234902ms step_avg:42.39ms +[2025-09-11 07:59:24] [Rank 0] step:5541/10000 train_time:234902ms step_avg:42.39ms +[2025-09-11 07:59:25] [Rank 0] step:5561/10000 train_time:235592ms step_avg:42.37ms +[2025-09-11 07:59:25] [Rank 0] step:5561/10000 train_time:235592ms step_avg:42.37ms +[2025-09-11 07:59:25] [Rank 0] step:5581/10000 train_time:236281ms step_avg:42.34ms +[2025-09-11 07:59:25] [Rank 0] step:5581/10000 train_time:236281ms step_avg:42.34ms +[2025-09-11 07:59:26] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:59:26] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:59:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:59:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:59:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:59:36] [Rank 0] PRINT: step:5600/10000 val_loss:5.5549 total_sharp:9.3559e-04 L1_sharp:6.1200e-04 L2_sharp:1.0510e-04 L3_sharp:1.0354e-04 L4_sharp:6.3428e-05 L5_sharp:1.7007e-04 L6_sharp:9.3564e-05 L7_sharp:9.7232e-05 L8_sharp:1.5556e-04 L9_sharp:1.2158e-04 L10_sharp:1.6593e-04 L11_sharp:2.1505e-04 L12_sharp:5.4690e-04 total_fnorm:2.1375e+01 total_l1_linf:5.1968e+04 total_spectral:1.0625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0938e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4297e+00 L5_l1linf:1.3672e+00 L6_l1linf:1.3516e+00 L7_l1linf:1.3594e+00 L8_l1linf:1.3672e+00 L9_l1linf:1.3750e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.1094e+00 L1_spectral:7.9874e-02 L2_spectral:7.7919e-02 L3_spectral:7.8255e-02 L4_spectral:7.8114e-02 L5_spectral:7.6925e-02 L6_spectral:7.8072e-02 L7_spectral:7.8273e-02 L8_spectral:7.7436e-02 L9_spectral:7.7360e-02 L10_spectral:7.7336e-02 L11_spectral:7.7049e-02 L12_spectral:7.6893e-02 train_time:236950ms step_avg:42.31ms +[2025-09-11 07:59:36] [Rank 0] PRINT: step:5600/10000 val_loss:5.5549 total_sharp:9.3559e-04 L1_sharp:6.1200e-04 L2_sharp:1.0510e-04 L3_sharp:1.0354e-04 L4_sharp:6.3428e-05 L5_sharp:1.7007e-04 L6_sharp:9.3564e-05 L7_sharp:9.7232e-05 L8_sharp:1.5556e-04 L9_sharp:1.2158e-04 L10_sharp:1.6593e-04 L11_sharp:2.1505e-04 L12_sharp:5.4690e-04 total_fnorm:2.1375e+01 total_l1_linf:5.1968e+04 total_spectral:1.0625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0938e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4297e+00 L5_l1linf:1.3672e+00 L6_l1linf:1.3516e+00 L7_l1linf:1.3594e+00 L8_l1linf:1.3672e+00 L9_l1linf:1.3750e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.1094e+00 L1_spectral:7.9874e-02 L2_spectral:7.7919e-02 L3_spectral:7.8255e-02 L4_spectral:7.8114e-02 L5_spectral:7.6925e-02 L6_spectral:7.8072e-02 L7_spectral:7.8273e-02 L8_spectral:7.7436e-02 L9_spectral:7.7360e-02 L10_spectral:7.7336e-02 L11_spectral:7.7049e-02 L12_spectral:7.6893e-02 train_time:236950ms step_avg:42.31ms +[2025-09-11 07:59:37] [Rank 0] step:5601/10000 train_time:238272ms step_avg:42.54ms +[2025-09-11 07:59:37] [Rank 0] step:5601/10000 train_time:238272ms step_avg:42.54ms +[2025-09-11 07:59:38] [Rank 0] step:5621/10000 train_time:238999ms step_avg:42.52ms +[2025-09-11 07:59:38] [Rank 0] step:5621/10000 train_time:238999ms step_avg:42.52ms +[2025-09-11 07:59:38] [Rank 0] step:5641/10000 train_time:239688ms step_avg:42.49ms +[2025-09-11 07:59:38] [Rank 0] step:5641/10000 train_time:239688ms step_avg:42.49ms +[2025-09-11 07:59:39] [Rank 0] step:5661/10000 train_time:240377ms step_avg:42.46ms +[2025-09-11 07:59:39] [Rank 0] step:5661/10000 train_time:240377ms step_avg:42.46ms +[2025-09-11 07:59:40] [Rank 0] step:5681/10000 train_time:241065ms step_avg:42.43ms +[2025-09-11 07:59:40] [Rank 0] step:5681/10000 train_time:241065ms step_avg:42.43ms +[2025-09-11 07:59:40] [Rank 0] step:5701/10000 train_time:241755ms step_avg:42.41ms +[2025-09-11 07:59:40] [Rank 0] step:5701/10000 train_time:241755ms step_avg:42.41ms +[2025-09-11 07:59:41] [Rank 0] step:5721/10000 train_time:242444ms step_avg:42.38ms +[2025-09-11 07:59:41] [Rank 0] step:5721/10000 train_time:242444ms step_avg:42.38ms +[2025-09-11 07:59:42] [Rank 0] step:5741/10000 train_time:243711ms step_avg:42.45ms +[2025-09-11 07:59:42] [Rank 0] step:5741/10000 train_time:243711ms step_avg:42.45ms +[2025-09-11 07:59:43] [Rank 0] step:5761/10000 train_time:244401ms step_avg:42.42ms +[2025-09-11 07:59:43] [Rank 0] step:5761/10000 train_time:244401ms step_avg:42.42ms +[2025-09-11 07:59:44] [Rank 0] step:5781/10000 train_time:245236ms step_avg:42.42ms +[2025-09-11 07:59:44] [Rank 0] step:5781/10000 train_time:245236ms step_avg:42.42ms +[2025-09-11 07:59:45] [Rank 0] step:5801/10000 train_time:246069ms step_avg:42.42ms +[2025-09-11 07:59:45] [Rank 0] step:5801/10000 train_time:246069ms step_avg:42.42ms +[2025-09-11 07:59:45] [Rank 0] step:5821/10000 train_time:246757ms step_avg:42.39ms +[2025-09-11 07:59:45] [Rank 0] step:5821/10000 train_time:246757ms step_avg:42.39ms +[2025-09-11 07:59:46] [Rank 0] step:5841/10000 train_time:247446ms step_avg:42.36ms +[2025-09-11 07:59:46] [Rank 0] step:5841/10000 train_time:247446ms step_avg:42.36ms +[2025-09-11 07:59:47] [Rank 0] step:5861/10000 train_time:248135ms step_avg:42.34ms +[2025-09-11 07:59:47] [Rank 0] step:5861/10000 train_time:248135ms step_avg:42.34ms +[2025-09-11 07:59:48] [Rank 0] step:5881/10000 train_time:248824ms step_avg:42.31ms +[2025-09-11 07:59:48] [Rank 0] step:5881/10000 train_time:248824ms step_avg:42.31ms +[2025-09-11 07:59:48] [Rank 0] step:5901/10000 train_time:249513ms step_avg:42.28ms +[2025-09-11 07:59:48] [Rank 0] step:5901/10000 train_time:249513ms step_avg:42.28ms +[2025-09-11 07:59:49] [Rank 0] step:5921/10000 train_time:250204ms step_avg:42.26ms +[2025-09-11 07:59:49] [Rank 0] step:5921/10000 train_time:250204ms step_avg:42.26ms +[2025-09-11 07:59:50] [Rank 0] step:5941/10000 train_time:250895ms step_avg:42.23ms +[2025-09-11 07:59:50] [Rank 0] step:5941/10000 train_time:250895ms step_avg:42.23ms +[2025-09-11 07:59:50] [Rank 0] step:5961/10000 train_time:251585ms step_avg:42.21ms +[2025-09-11 07:59:50] [Rank 0] step:5961/10000 train_time:251585ms step_avg:42.21ms +[2025-09-11 07:59:51] [Rank 0] step:5981/10000 train_time:252275ms step_avg:42.18ms +[2025-09-11 07:59:51] [Rank 0] step:5981/10000 train_time:252275ms step_avg:42.18ms +[2025-09-11 07:59:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:59:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:00:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:00:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:00:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:02] [Rank 0] PRINT: step:6000/10000 val_loss:5.5169 total_sharp:9.8711e-04 L1_sharp:1.7146e-04 L2_sharp:1.1048e-04 L3_sharp:9.7773e-05 L4_sharp:5.8582e-05 L5_sharp:1.5109e-04 L6_sharp:8.9194e-05 L7_sharp:9.2007e-05 L8_sharp:1.6978e-04 L9_sharp:1.2763e-04 L10_sharp:1.6615e-04 L11_sharp:2.3471e-04 L12_sharp:9.4478e-04 total_fnorm:2.1250e+01 total_l1_linf:5.1200e+04 total_spectral:1.0625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0625e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4219e+00 L5_l1linf:1.3594e+00 L6_l1linf:1.3125e+00 L7_l1linf:1.3203e+00 L8_l1linf:1.3516e+00 L9_l1linf:1.3594e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.3047e+00 L12_l1linf:1.0703e+00 L1_spectral:8.0045e-02 L2_spectral:7.8317e-02 L3_spectral:7.8798e-02 L4_spectral:7.8761e-02 L5_spectral:7.7324e-02 L6_spectral:7.8804e-02 L7_spectral:7.8488e-02 L8_spectral:7.8175e-02 L9_spectral:7.8335e-02 L10_spectral:7.7782e-02 L11_spectral:7.7423e-02 L12_spectral:7.7382e-02 train_time:252947ms step_avg:42.16ms +[2025-09-11 08:00:02] [Rank 0] PRINT: step:6000/10000 val_loss:5.5169 total_sharp:9.8711e-04 L1_sharp:1.7146e-04 L2_sharp:1.1048e-04 L3_sharp:9.7773e-05 L4_sharp:5.8582e-05 L5_sharp:1.5109e-04 L6_sharp:8.9194e-05 L7_sharp:9.2007e-05 L8_sharp:1.6978e-04 L9_sharp:1.2763e-04 L10_sharp:1.6615e-04 L11_sharp:2.3471e-04 L12_sharp:9.4478e-04 total_fnorm:2.1250e+01 total_l1_linf:5.1200e+04 total_spectral:1.0625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0625e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.4219e+00 L5_l1linf:1.3594e+00 L6_l1linf:1.3125e+00 L7_l1linf:1.3203e+00 L8_l1linf:1.3516e+00 L9_l1linf:1.3594e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.3047e+00 L12_l1linf:1.0703e+00 L1_spectral:8.0045e-02 L2_spectral:7.8317e-02 L3_spectral:7.8798e-02 L4_spectral:7.8761e-02 L5_spectral:7.7324e-02 L6_spectral:7.8804e-02 L7_spectral:7.8488e-02 L8_spectral:7.8175e-02 L9_spectral:7.8335e-02 L10_spectral:7.7782e-02 L11_spectral:7.7423e-02 L12_spectral:7.7382e-02 train_time:252947ms step_avg:42.16ms +[2025-09-11 08:00:03] [Rank 0] step:6001/10000 train_time:254113ms step_avg:42.35ms +[2025-09-11 08:00:03] [Rank 0] step:6001/10000 train_time:254113ms step_avg:42.35ms +[2025-09-11 08:00:04] [Rank 0] step:6021/10000 train_time:254809ms step_avg:42.32ms +[2025-09-11 08:00:04] [Rank 0] step:6021/10000 train_time:254809ms step_avg:42.32ms +[2025-09-11 08:00:04] [Rank 0] step:6041/10000 train_time:255503ms step_avg:42.29ms +[2025-09-11 08:00:04] [Rank 0] step:6041/10000 train_time:255503ms step_avg:42.29ms +[2025-09-11 08:00:05] [Rank 0] step:6061/10000 train_time:256195ms step_avg:42.27ms +[2025-09-11 08:00:05] [Rank 0] step:6061/10000 train_time:256195ms step_avg:42.27ms +[2025-09-11 08:00:06] [Rank 0] step:6081/10000 train_time:256889ms step_avg:42.24ms +[2025-09-11 08:00:06] [Rank 0] step:6081/10000 train_time:256889ms step_avg:42.24ms +[2025-09-11 08:00:06] [Rank 0] step:6101/10000 train_time:257579ms step_avg:42.22ms +[2025-09-11 08:00:06] [Rank 0] step:6101/10000 train_time:257579ms step_avg:42.22ms +[2025-09-11 08:00:07] [Rank 0] step:6121/10000 train_time:258270ms step_avg:42.19ms +[2025-09-11 08:00:07] [Rank 0] step:6121/10000 train_time:258270ms step_avg:42.19ms +[2025-09-11 08:00:08] [Rank 0] step:6141/10000 train_time:258962ms step_avg:42.17ms +[2025-09-11 08:00:08] [Rank 0] step:6141/10000 train_time:258962ms step_avg:42.17ms +[2025-09-11 08:00:08] [Rank 0] step:6161/10000 train_time:259653ms step_avg:42.14ms +[2025-09-11 08:00:08] [Rank 0] step:6161/10000 train_time:259653ms step_avg:42.14ms +[2025-09-11 08:00:09] [Rank 0] step:6181/10000 train_time:260342ms step_avg:42.12ms +[2025-09-11 08:00:09] [Rank 0] step:6181/10000 train_time:260342ms step_avg:42.12ms +[2025-09-11 08:00:10] [Rank 0] step:6201/10000 train_time:261033ms step_avg:42.10ms +[2025-09-11 08:00:10] [Rank 0] step:6201/10000 train_time:261033ms step_avg:42.10ms +[2025-09-11 08:00:10] [Rank 0] step:6221/10000 train_time:261724ms step_avg:42.07ms +[2025-09-11 08:00:10] [Rank 0] step:6221/10000 train_time:261724ms step_avg:42.07ms +[2025-09-11 08:00:11] [Rank 0] step:6241/10000 train_time:262416ms step_avg:42.05ms +[2025-09-11 08:00:11] [Rank 0] step:6241/10000 train_time:262416ms step_avg:42.05ms +[2025-09-11 08:00:12] [Rank 0] step:6261/10000 train_time:263105ms step_avg:42.02ms +[2025-09-11 08:00:12] [Rank 0] step:6261/10000 train_time:263105ms step_avg:42.02ms +[2025-09-11 08:00:13] [Rank 0] step:6281/10000 train_time:263796ms step_avg:42.00ms +[2025-09-11 08:00:13] [Rank 0] step:6281/10000 train_time:263796ms step_avg:42.00ms +[2025-09-11 08:00:13] [Rank 0] step:6301/10000 train_time:264485ms step_avg:41.98ms +[2025-09-11 08:00:13] [Rank 0] step:6301/10000 train_time:264485ms step_avg:41.98ms +[2025-09-11 08:00:14] [Rank 0] step:6321/10000 train_time:265178ms step_avg:41.95ms +[2025-09-11 08:00:14] [Rank 0] step:6321/10000 train_time:265178ms step_avg:41.95ms +[2025-09-11 08:00:15] [Rank 0] step:6341/10000 train_time:265870ms step_avg:41.93ms +[2025-09-11 08:00:15] [Rank 0] step:6341/10000 train_time:265870ms step_avg:41.93ms +[2025-09-11 08:00:15] [Rank 0] step:6361/10000 train_time:266562ms step_avg:41.91ms +[2025-09-11 08:00:15] [Rank 0] step:6361/10000 train_time:266562ms step_avg:41.91ms +[2025-09-11 08:00:16] [Rank 0] step:6381/10000 train_time:267253ms step_avg:41.88ms +[2025-09-11 08:00:16] [Rank 0] step:6381/10000 train_time:267253ms step_avg:41.88ms +[2025-09-11 08:00:17] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:00:17] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:00:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:00:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:00:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:27] [Rank 0] PRINT: step:6400/10000 val_loss:5.4910 total_sharp:8.8802e-04 L1_sharp:2.7289e-04 L2_sharp:1.1580e-04 L3_sharp:8.1005e-05 L4_sharp:4.9708e-05 L5_sharp:1.3634e-04 L6_sharp:9.1024e-05 L7_sharp:6.8838e-05 L8_sharp:1.5567e-04 L9_sharp:1.2020e-04 L10_sharp:1.5139e-04 L11_sharp:2.1900e-04 L12_sharp:6.0463e-04 total_fnorm:1.9125e+01 total_l1_linf:4.4288e+04 total_spectral:9.6250e+00 L1_fnorm:5.7812e+00 L2_fnorm:5.6250e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.4375e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.4688e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5312e+00 L11_fnorm:5.5000e+00 L12_fnorm:5.0938e+00 L1_l1linf:1.4453e+00 L2_l1linf:1.3359e+00 L3_l1linf:1.3125e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.1797e+00 L6_l1linf:1.1562e+00 L7_l1linf:1.1875e+00 L8_l1linf:1.1719e+00 L9_l1linf:1.1875e+00 L10_l1linf:1.2031e+00 L11_l1linf:1.1719e+00 L12_l1linf:9.7656e-01 L1_spectral:7.3710e-02 L2_spectral:7.1984e-02 L3_spectral:7.2343e-02 L4_spectral:7.2030e-02 L5_spectral:7.1102e-02 L6_spectral:7.2674e-02 L7_spectral:7.2694e-02 L8_spectral:7.2380e-02 L9_spectral:7.2142e-02 L10_spectral:7.1918e-02 L11_spectral:7.1129e-02 L12_spectral:7.0982e-02 train_time:267924ms step_avg:41.86ms +[2025-09-11 08:00:27] [Rank 0] PRINT: step:6400/10000 val_loss:5.4910 total_sharp:8.8802e-04 L1_sharp:2.7289e-04 L2_sharp:1.1580e-04 L3_sharp:8.1005e-05 L4_sharp:4.9708e-05 L5_sharp:1.3634e-04 L6_sharp:9.1024e-05 L7_sharp:6.8838e-05 L8_sharp:1.5567e-04 L9_sharp:1.2020e-04 L10_sharp:1.5139e-04 L11_sharp:2.1900e-04 L12_sharp:6.0463e-04 total_fnorm:1.9125e+01 total_l1_linf:4.4288e+04 total_spectral:9.6250e+00 L1_fnorm:5.7812e+00 L2_fnorm:5.6250e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.4375e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.4688e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5312e+00 L11_fnorm:5.5000e+00 L12_fnorm:5.0938e+00 L1_l1linf:1.4453e+00 L2_l1linf:1.3359e+00 L3_l1linf:1.3125e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.1797e+00 L6_l1linf:1.1562e+00 L7_l1linf:1.1875e+00 L8_l1linf:1.1719e+00 L9_l1linf:1.1875e+00 L10_l1linf:1.2031e+00 L11_l1linf:1.1719e+00 L12_l1linf:9.7656e-01 L1_spectral:7.3710e-02 L2_spectral:7.1984e-02 L3_spectral:7.2343e-02 L4_spectral:7.2030e-02 L5_spectral:7.1102e-02 L6_spectral:7.2674e-02 L7_spectral:7.2694e-02 L8_spectral:7.2380e-02 L9_spectral:7.2142e-02 L10_spectral:7.1918e-02 L11_spectral:7.1129e-02 L12_spectral:7.0982e-02 train_time:267924ms step_avg:41.86ms +[2025-09-11 08:00:28] [Rank 0] step:6401/10000 train_time:269092ms step_avg:42.04ms +[2025-09-11 08:00:28] [Rank 0] step:6401/10000 train_time:269092ms step_avg:42.04ms +[2025-09-11 08:00:29] [Rank 0] step:6421/10000 train_time:269812ms step_avg:42.02ms +[2025-09-11 08:00:29] [Rank 0] step:6421/10000 train_time:269812ms step_avg:42.02ms +[2025-09-11 08:00:29] [Rank 0] step:6441/10000 train_time:270507ms step_avg:42.00ms +[2025-09-11 08:00:29] [Rank 0] step:6441/10000 train_time:270507ms step_avg:42.00ms +[2025-09-11 08:00:30] [Rank 0] step:6461/10000 train_time:271200ms step_avg:41.97ms +[2025-09-11 08:00:30] [Rank 0] step:6461/10000 train_time:271200ms step_avg:41.97ms +[2025-09-11 08:00:31] [Rank 0] step:6481/10000 train_time:271894ms step_avg:41.95ms +[2025-09-11 08:00:31] [Rank 0] step:6481/10000 train_time:271894ms step_avg:41.95ms +[2025-09-11 08:00:31] [Rank 0] step:6501/10000 train_time:272588ms step_avg:41.93ms +[2025-09-11 08:00:31] [Rank 0] step:6501/10000 train_time:272588ms step_avg:41.93ms +[2025-09-11 08:00:32] [Rank 0] step:6521/10000 train_time:273281ms step_avg:41.91ms +[2025-09-11 08:00:32] [Rank 0] step:6521/10000 train_time:273281ms step_avg:41.91ms +[2025-09-11 08:00:33] [Rank 0] step:6541/10000 train_time:273974ms step_avg:41.89ms +[2025-09-11 08:00:33] [Rank 0] step:6541/10000 train_time:273974ms step_avg:41.89ms +[2025-09-11 08:00:33] [Rank 0] step:6561/10000 train_time:274667ms step_avg:41.86ms +[2025-09-11 08:00:33] [Rank 0] step:6561/10000 train_time:274667ms step_avg:41.86ms +[2025-09-11 08:00:34] [Rank 0] step:6581/10000 train_time:275360ms step_avg:41.84ms +[2025-09-11 08:00:34] [Rank 0] step:6581/10000 train_time:275360ms step_avg:41.84ms +[2025-09-11 08:00:35] [Rank 0] step:6601/10000 train_time:276051ms step_avg:41.82ms +[2025-09-11 08:00:35] [Rank 0] step:6601/10000 train_time:276051ms step_avg:41.82ms +[2025-09-11 08:00:35] [Rank 0] step:6621/10000 train_time:276742ms step_avg:41.80ms +[2025-09-11 08:00:35] [Rank 0] step:6621/10000 train_time:276742ms step_avg:41.80ms +[2025-09-11 08:00:36] [Rank 0] step:6641/10000 train_time:277434ms step_avg:41.78ms +[2025-09-11 08:00:36] [Rank 0] step:6641/10000 train_time:277434ms step_avg:41.78ms +[2025-09-11 08:00:37] [Rank 0] step:6661/10000 train_time:278127ms step_avg:41.75ms +[2025-09-11 08:00:37] [Rank 0] step:6661/10000 train_time:278127ms step_avg:41.75ms +[2025-09-11 08:00:38] [Rank 0] step:6681/10000 train_time:278826ms step_avg:41.73ms +[2025-09-11 08:00:38] [Rank 0] step:6681/10000 train_time:278826ms step_avg:41.73ms +[2025-09-11 08:00:38] [Rank 0] step:6701/10000 train_time:279524ms step_avg:41.71ms +[2025-09-11 08:00:38] [Rank 0] step:6701/10000 train_time:279524ms step_avg:41.71ms +[2025-09-11 08:00:39] [Rank 0] step:6721/10000 train_time:280224ms step_avg:41.69ms +[2025-09-11 08:00:39] [Rank 0] step:6721/10000 train_time:280224ms step_avg:41.69ms +[2025-09-11 08:00:40] [Rank 0] step:6741/10000 train_time:280924ms step_avg:41.67ms +[2025-09-11 08:00:40] [Rank 0] step:6741/10000 train_time:280924ms step_avg:41.67ms +[2025-09-11 08:00:40] [Rank 0] step:6761/10000 train_time:281623ms step_avg:41.65ms +[2025-09-11 08:00:40] [Rank 0] step:6761/10000 train_time:281623ms step_avg:41.65ms +[2025-09-11 08:00:41] [Rank 0] step:6781/10000 train_time:282323ms step_avg:41.63ms +[2025-09-11 08:00:41] [Rank 0] step:6781/10000 train_time:282323ms step_avg:41.63ms +[2025-09-11 08:00:42] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:00:42] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:00:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:00:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:00:52] [Rank 0] PRINT: step:6800/10000 val_loss:5.4635 total_sharp:7.4395e-04 L1_sharp:4.3374e-04 L2_sharp:1.1710e-04 L3_sharp:7.7916e-05 L4_sharp:3.1035e-05 L5_sharp:1.3093e-04 L6_sharp:8.2929e-05 L7_sharp:8.3995e-05 L8_sharp:1.2137e-04 L9_sharp:1.2518e-04 L10_sharp:1.4658e-04 L11_sharp:1.9368e-04 L12_sharp:4.8713e-04 total_fnorm:1.6750e+01 total_l1_linf:3.7376e+04 total_spectral:8.5000e+00 L1_fnorm:5.1250e+00 L2_fnorm:4.9688e+00 L3_fnorm:4.9375e+00 L4_fnorm:4.9062e+00 L5_fnorm:4.7812e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.8125e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.5625e+00 L1_l1linf:1.2188e+00 L2_l1linf:1.1328e+00 L3_l1linf:1.1016e+00 L4_l1linf:1.0859e+00 L5_l1linf:1.0000e+00 L6_l1linf:1.0000e+00 L7_l1linf:9.4922e-01 L8_l1linf:9.8438e-01 L9_l1linf:9.9219e-01 L10_l1linf:1.0078e+00 L11_l1linf:9.8828e-01 L12_l1linf:8.3984e-01 L1_spectral:6.6460e-02 L2_spectral:6.4913e-02 L3_spectral:6.5306e-02 L4_spectral:6.5353e-02 L5_spectral:6.4275e-02 L6_spectral:6.5936e-02 L7_spectral:6.5833e-02 L8_spectral:6.5271e-02 L9_spectral:6.5197e-02 L10_spectral:6.5292e-02 L11_spectral:6.4797e-02 L12_spectral:6.3673e-02 train_time:283002ms step_avg:41.62ms +[2025-09-11 08:00:52] [Rank 0] PRINT: step:6800/10000 val_loss:5.4635 total_sharp:7.4395e-04 L1_sharp:4.3374e-04 L2_sharp:1.1710e-04 L3_sharp:7.7916e-05 L4_sharp:3.1035e-05 L5_sharp:1.3093e-04 L6_sharp:8.2929e-05 L7_sharp:8.3995e-05 L8_sharp:1.2137e-04 L9_sharp:1.2518e-04 L10_sharp:1.4658e-04 L11_sharp:1.9368e-04 L12_sharp:4.8713e-04 total_fnorm:1.6750e+01 total_l1_linf:3.7376e+04 total_spectral:8.5000e+00 L1_fnorm:5.1250e+00 L2_fnorm:4.9688e+00 L3_fnorm:4.9375e+00 L4_fnorm:4.9062e+00 L5_fnorm:4.7812e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.8125e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.5625e+00 L1_l1linf:1.2188e+00 L2_l1linf:1.1328e+00 L3_l1linf:1.1016e+00 L4_l1linf:1.0859e+00 L5_l1linf:1.0000e+00 L6_l1linf:1.0000e+00 L7_l1linf:9.4922e-01 L8_l1linf:9.8438e-01 L9_l1linf:9.9219e-01 L10_l1linf:1.0078e+00 L11_l1linf:9.8828e-01 L12_l1linf:8.3984e-01 L1_spectral:6.6460e-02 L2_spectral:6.4913e-02 L3_spectral:6.5306e-02 L4_spectral:6.5353e-02 L5_spectral:6.4275e-02 L6_spectral:6.5936e-02 L7_spectral:6.5833e-02 L8_spectral:6.5271e-02 L9_spectral:6.5197e-02 L10_spectral:6.5292e-02 L11_spectral:6.4797e-02 L12_spectral:6.3673e-02 train_time:283002ms step_avg:41.62ms +[2025-09-11 08:00:53] [Rank 0] step:6801/10000 train_time:284223ms step_avg:41.79ms +[2025-09-11 08:00:53] [Rank 0] step:6801/10000 train_time:284223ms step_avg:41.79ms +[2025-09-11 08:00:54] [Rank 0] step:6821/10000 train_time:284928ms step_avg:41.77ms +[2025-09-11 08:00:54] [Rank 0] step:6821/10000 train_time:284928ms step_avg:41.77ms +[2025-09-11 08:00:55] [Rank 0] step:6841/10000 train_time:285632ms step_avg:41.75ms +[2025-09-11 08:00:55] [Rank 0] step:6841/10000 train_time:285632ms step_avg:41.75ms +[2025-09-11 08:00:55] [Rank 0] step:6861/10000 train_time:286333ms step_avg:41.73ms +[2025-09-11 08:00:55] [Rank 0] step:6861/10000 train_time:286333ms step_avg:41.73ms +[2025-09-11 08:00:56] [Rank 0] step:6881/10000 train_time:287034ms step_avg:41.71ms +[2025-09-11 08:00:56] [Rank 0] step:6881/10000 train_time:287034ms step_avg:41.71ms +[2025-09-11 08:00:57] [Rank 0] step:6901/10000 train_time:287732ms step_avg:41.69ms +[2025-09-11 08:00:57] [Rank 0] step:6901/10000 train_time:287732ms step_avg:41.69ms +[2025-09-11 08:00:57] [Rank 0] step:6921/10000 train_time:288430ms step_avg:41.67ms +[2025-09-11 08:00:57] [Rank 0] step:6921/10000 train_time:288430ms step_avg:41.67ms +[2025-09-11 08:00:58] [Rank 0] step:6941/10000 train_time:289131ms step_avg:41.66ms +[2025-09-11 08:00:58] [Rank 0] step:6941/10000 train_time:289131ms step_avg:41.66ms +[2025-09-11 08:00:59] [Rank 0] step:6961/10000 train_time:289830ms step_avg:41.64ms +[2025-09-11 08:00:59] [Rank 0] step:6961/10000 train_time:289830ms step_avg:41.64ms +[2025-09-11 08:00:59] [Rank 0] step:6981/10000 train_time:290531ms step_avg:41.62ms +[2025-09-11 08:00:59] [Rank 0] step:6981/10000 train_time:290531ms step_avg:41.62ms +[2025-09-11 08:01:00] [Rank 0] step:7001/10000 train_time:291231ms step_avg:41.60ms +[2025-09-11 08:01:00] [Rank 0] step:7001/10000 train_time:291231ms step_avg:41.60ms +[2025-09-11 08:01:01] [Rank 0] step:7021/10000 train_time:291931ms step_avg:41.58ms +[2025-09-11 08:01:01] [Rank 0] step:7021/10000 train_time:291931ms step_avg:41.58ms +[2025-09-11 08:01:02] [Rank 0] step:7041/10000 train_time:292631ms step_avg:41.56ms +[2025-09-11 08:01:02] [Rank 0] step:7041/10000 train_time:292631ms step_avg:41.56ms +[2025-09-11 08:01:02] [Rank 0] step:7061/10000 train_time:293332ms step_avg:41.54ms +[2025-09-11 08:01:02] [Rank 0] step:7061/10000 train_time:293332ms step_avg:41.54ms +[2025-09-11 08:01:03] [Rank 0] step:7081/10000 train_time:294032ms step_avg:41.52ms +[2025-09-11 08:01:03] [Rank 0] step:7081/10000 train_time:294032ms step_avg:41.52ms +[2025-09-11 08:01:04] [Rank 0] step:7101/10000 train_time:294732ms step_avg:41.51ms +[2025-09-11 08:01:04] [Rank 0] step:7101/10000 train_time:294732ms step_avg:41.51ms +[2025-09-11 08:01:04] [Rank 0] step:7121/10000 train_time:295433ms step_avg:41.49ms +[2025-09-11 08:01:04] [Rank 0] step:7121/10000 train_time:295433ms step_avg:41.49ms +[2025-09-11 08:01:05] [Rank 0] step:7141/10000 train_time:296132ms step_avg:41.47ms +[2025-09-11 08:01:05] [Rank 0] step:7141/10000 train_time:296132ms step_avg:41.47ms +[2025-09-11 08:01:06] [Rank 0] step:7161/10000 train_time:296833ms step_avg:41.45ms +[2025-09-11 08:01:06] [Rank 0] step:7161/10000 train_time:296833ms step_avg:41.45ms +[2025-09-11 08:01:06] [Rank 0] step:7181/10000 train_time:297532ms step_avg:41.43ms +[2025-09-11 08:01:06] [Rank 0] step:7181/10000 train_time:297532ms step_avg:41.43ms +[2025-09-11 08:01:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:01:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:01:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:01:17] [Rank 0] PRINT: step:7200/10000 val_loss:5.4343 total_sharp:6.5125e-04 L1_sharp:2.5088e-04 L2_sharp:5.5179e-05 L3_sharp:5.9907e-05 L4_sharp:6.9491e-05 L5_sharp:9.6102e-05 L6_sharp:7.4589e-05 L7_sharp:5.9577e-05 L8_sharp:1.0059e-04 L9_sharp:9.0845e-05 L10_sharp:1.2818e-04 L11_sharp:2.1045e-04 L12_sharp:5.9760e-04 total_fnorm:1.4688e+01 total_l1_linf:3.0592e+04 total_spectral:7.3750e+00 L1_fnorm:4.5000e+00 L2_fnorm:4.3438e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.2812e+00 L5_fnorm:4.1562e+00 L6_fnorm:4.2812e+00 L7_fnorm:4.2812e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2500e+00 L12_fnorm:3.9375e+00 L1_l1linf:1.0391e+00 L2_l1linf:9.4531e-01 L3_l1linf:9.3750e-01 L4_l1linf:9.0625e-01 L5_l1linf:8.4375e-01 L6_l1linf:8.1250e-01 L7_l1linf:7.9688e-01 L8_l1linf:8.0859e-01 L9_l1linf:8.2812e-01 L10_l1linf:8.2422e-01 L11_l1linf:8.1250e-01 L12_l1linf:7.1094e-01 L1_spectral:5.9171e-02 L2_spectral:5.8169e-02 L3_spectral:5.7771e-02 L4_spectral:5.8187e-02 L5_spectral:5.7338e-02 L6_spectral:5.8288e-02 L7_spectral:5.8497e-02 L8_spectral:5.8227e-02 L9_spectral:5.8299e-02 L10_spectral:5.7755e-02 L11_spectral:5.7467e-02 L12_spectral:5.6641e-02 train_time:298211ms step_avg:41.42ms +[2025-09-11 08:01:17] [Rank 0] PRINT: step:7200/10000 val_loss:5.4343 total_sharp:6.5125e-04 L1_sharp:2.5088e-04 L2_sharp:5.5179e-05 L3_sharp:5.9907e-05 L4_sharp:6.9491e-05 L5_sharp:9.6102e-05 L6_sharp:7.4589e-05 L7_sharp:5.9577e-05 L8_sharp:1.0059e-04 L9_sharp:9.0845e-05 L10_sharp:1.2818e-04 L11_sharp:2.1045e-04 L12_sharp:5.9760e-04 total_fnorm:1.4688e+01 total_l1_linf:3.0592e+04 total_spectral:7.3750e+00 L1_fnorm:4.5000e+00 L2_fnorm:4.3438e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.2812e+00 L5_fnorm:4.1562e+00 L6_fnorm:4.2812e+00 L7_fnorm:4.2812e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2500e+00 L12_fnorm:3.9375e+00 L1_l1linf:1.0391e+00 L2_l1linf:9.4531e-01 L3_l1linf:9.3750e-01 L4_l1linf:9.0625e-01 L5_l1linf:8.4375e-01 L6_l1linf:8.1250e-01 L7_l1linf:7.9688e-01 L8_l1linf:8.0859e-01 L9_l1linf:8.2812e-01 L10_l1linf:8.2422e-01 L11_l1linf:8.1250e-01 L12_l1linf:7.1094e-01 L1_spectral:5.9171e-02 L2_spectral:5.8169e-02 L3_spectral:5.7771e-02 L4_spectral:5.8187e-02 L5_spectral:5.7338e-02 L6_spectral:5.8288e-02 L7_spectral:5.8497e-02 L8_spectral:5.8227e-02 L9_spectral:5.8299e-02 L10_spectral:5.7755e-02 L11_spectral:5.7467e-02 L12_spectral:5.6641e-02 train_time:298211ms step_avg:41.42ms +[2025-09-11 08:01:18] [Rank 0] step:7201/10000 train_time:299401ms step_avg:41.58ms +[2025-09-11 08:01:18] [Rank 0] step:7201/10000 train_time:299401ms step_avg:41.58ms +[2025-09-11 08:01:19] [Rank 0] step:7221/10000 train_time:300121ms step_avg:41.56ms +[2025-09-11 08:01:19] [Rank 0] step:7221/10000 train_time:300121ms step_avg:41.56ms +[2025-09-11 08:01:20] [Rank 0] step:7241/10000 train_time:300822ms step_avg:41.54ms +[2025-09-11 08:01:20] [Rank 0] step:7241/10000 train_time:300822ms step_avg:41.54ms +[2025-09-11 08:01:21] [Rank 0] step:7261/10000 train_time:301524ms step_avg:41.53ms +[2025-09-11 08:01:21] [Rank 0] step:7261/10000 train_time:301524ms step_avg:41.53ms +[2025-09-11 08:01:21] [Rank 0] step:7281/10000 train_time:302230ms step_avg:41.51ms +[2025-09-11 08:01:21] [Rank 0] step:7281/10000 train_time:302230ms step_avg:41.51ms +[2025-09-11 08:01:22] [Rank 0] step:7301/10000 train_time:302929ms step_avg:41.49ms +[2025-09-11 08:01:22] [Rank 0] step:7301/10000 train_time:302929ms step_avg:41.49ms +[2025-09-11 08:01:23] [Rank 0] step:7321/10000 train_time:303629ms step_avg:41.47ms +[2025-09-11 08:01:23] [Rank 0] step:7321/10000 train_time:303629ms step_avg:41.47ms +[2025-09-11 08:01:23] [Rank 0] step:7341/10000 train_time:304331ms step_avg:41.46ms +[2025-09-11 08:01:23] [Rank 0] step:7341/10000 train_time:304331ms step_avg:41.46ms +[2025-09-11 08:01:24] [Rank 0] step:7361/10000 train_time:305031ms step_avg:41.44ms +[2025-09-11 08:01:24] [Rank 0] step:7361/10000 train_time:305031ms step_avg:41.44ms +[2025-09-11 08:01:25] [Rank 0] step:7381/10000 train_time:305733ms step_avg:41.42ms +[2025-09-11 08:01:25] [Rank 0] step:7381/10000 train_time:305733ms step_avg:41.42ms +[2025-09-11 08:01:25] [Rank 0] step:7401/10000 train_time:306432ms step_avg:41.40ms +[2025-09-11 08:01:25] [Rank 0] step:7401/10000 train_time:306432ms step_avg:41.40ms +[2025-09-11 08:01:26] [Rank 0] step:7421/10000 train_time:307132ms step_avg:41.39ms +[2025-09-11 08:01:26] [Rank 0] step:7421/10000 train_time:307132ms step_avg:41.39ms +[2025-09-11 08:01:27] [Rank 0] step:7441/10000 train_time:307835ms step_avg:41.37ms +[2025-09-11 08:01:27] [Rank 0] step:7441/10000 train_time:307835ms step_avg:41.37ms +[2025-09-11 08:01:28] [Rank 0] step:7461/10000 train_time:308535ms step_avg:41.35ms +[2025-09-11 08:01:28] [Rank 0] step:7461/10000 train_time:308535ms step_avg:41.35ms +[2025-09-11 08:01:28] [Rank 0] step:7481/10000 train_time:309239ms step_avg:41.34ms +[2025-09-11 08:01:28] [Rank 0] step:7481/10000 train_time:309239ms step_avg:41.34ms +[2025-09-11 08:01:29] [Rank 0] step:7501/10000 train_time:309941ms step_avg:41.32ms +[2025-09-11 08:01:29] [Rank 0] step:7501/10000 train_time:309941ms step_avg:41.32ms +[2025-09-11 08:01:30] [Rank 0] step:7521/10000 train_time:310643ms step_avg:41.30ms +[2025-09-11 08:01:30] [Rank 0] step:7521/10000 train_time:310643ms step_avg:41.30ms +[2025-09-11 08:01:30] [Rank 0] step:7541/10000 train_time:311342ms step_avg:41.29ms +[2025-09-11 08:01:30] [Rank 0] step:7541/10000 train_time:311342ms step_avg:41.29ms +[2025-09-11 08:01:31] [Rank 0] step:7561/10000 train_time:312045ms step_avg:41.27ms +[2025-09-11 08:01:31] [Rank 0] step:7561/10000 train_time:312045ms step_avg:41.27ms +[2025-09-11 08:01:32] [Rank 0] step:7581/10000 train_time:312746ms step_avg:41.25ms +[2025-09-11 08:01:32] [Rank 0] step:7581/10000 train_time:312746ms step_avg:41.25ms +[2025-09-11 08:01:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:01:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:01:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.4192 total_sharp:7.0408e-04 L1_sharp:2.1563e-04 L2_sharp:9.5680e-05 L3_sharp:7.3740e-05 L4_sharp:5.7012e-05 L5_sharp:1.4478e-04 L6_sharp:8.9437e-05 L7_sharp:6.7315e-05 L8_sharp:1.3021e-04 L9_sharp:1.1613e-04 L10_sharp:1.2346e-04 L11_sharp:1.8877e-04 L12_sharp:4.2737e-04 total_fnorm:1.2438e+01 total_l1_linf:2.4192e+04 total_spectral:6.2188e+00 L1_fnorm:3.7969e+00 L2_fnorm:3.6562e+00 L3_fnorm:3.6094e+00 L4_fnorm:3.5938e+00 L5_fnorm:3.4844e+00 L6_fnorm:3.5938e+00 L7_fnorm:3.5938e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.5938e+00 L11_fnorm:3.5938e+00 L12_fnorm:3.3594e+00 L1_l1linf:8.0859e-01 L2_l1linf:7.6562e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.1875e-01 L5_l1linf:6.6797e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.6406e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.6797e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.7812e-01 L1_spectral:5.1341e-02 L2_spectral:4.9947e-02 L3_spectral:5.0186e-02 L4_spectral:5.0130e-02 L5_spectral:4.9278e-02 L6_spectral:5.0799e-02 L7_spectral:5.0518e-02 L8_spectral:5.0324e-02 L9_spectral:5.0421e-02 L10_spectral:5.0095e-02 L11_spectral:4.9760e-02 L12_spectral:4.9164e-02 train_time:313429ms step_avg:41.24ms +[2025-09-11 08:01:44] [Rank 0] PRINT: step:7600/10000 val_loss:5.4192 total_sharp:7.0408e-04 L1_sharp:2.1563e-04 L2_sharp:9.5680e-05 L3_sharp:7.3740e-05 L4_sharp:5.7012e-05 L5_sharp:1.4478e-04 L6_sharp:8.9437e-05 L7_sharp:6.7315e-05 L8_sharp:1.3021e-04 L9_sharp:1.1613e-04 L10_sharp:1.2346e-04 L11_sharp:1.8877e-04 L12_sharp:4.2737e-04 total_fnorm:1.2438e+01 total_l1_linf:2.4192e+04 total_spectral:6.2188e+00 L1_fnorm:3.7969e+00 L2_fnorm:3.6562e+00 L3_fnorm:3.6094e+00 L4_fnorm:3.5938e+00 L5_fnorm:3.4844e+00 L6_fnorm:3.5938e+00 L7_fnorm:3.5938e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.5938e+00 L11_fnorm:3.5938e+00 L12_fnorm:3.3594e+00 L1_l1linf:8.0859e-01 L2_l1linf:7.6562e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.1875e-01 L5_l1linf:6.6797e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.6406e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.6797e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.7812e-01 L1_spectral:5.1341e-02 L2_spectral:4.9947e-02 L3_spectral:5.0186e-02 L4_spectral:5.0130e-02 L5_spectral:4.9278e-02 L6_spectral:5.0799e-02 L7_spectral:5.0518e-02 L8_spectral:5.0324e-02 L9_spectral:5.0421e-02 L10_spectral:5.0095e-02 L11_spectral:4.9760e-02 L12_spectral:4.9164e-02 train_time:313429ms step_avg:41.24ms +[2025-09-11 08:01:45] [Rank 0] step:7601/10000 train_time:314627ms step_avg:41.39ms +[2025-09-11 08:01:45] [Rank 0] step:7601/10000 train_time:314627ms step_avg:41.39ms +[2025-09-11 08:01:46] [Rank 0] step:7621/10000 train_time:315354ms step_avg:41.38ms +[2025-09-11 08:01:46] [Rank 0] step:7621/10000 train_time:315354ms step_avg:41.38ms +[2025-09-11 08:01:47] [Rank 0] step:7641/10000 train_time:316058ms step_avg:41.36ms +[2025-09-11 08:01:47] [Rank 0] step:7641/10000 train_time:316058ms step_avg:41.36ms +[2025-09-11 08:01:47] [Rank 0] step:7661/10000 train_time:316759ms step_avg:41.35ms +[2025-09-11 08:01:47] [Rank 0] step:7661/10000 train_time:316759ms step_avg:41.35ms +[2025-09-11 08:01:49] [Rank 0] step:7681/10000 train_time:318017ms step_avg:41.40ms +[2025-09-11 08:01:49] [Rank 0] step:7681/10000 train_time:318017ms step_avg:41.40ms +[2025-09-11 08:01:49] [Rank 0] step:7701/10000 train_time:318721ms step_avg:41.39ms +[2025-09-11 08:01:49] [Rank 0] step:7701/10000 train_time:318721ms step_avg:41.39ms +[2025-09-11 08:01:50] [Rank 0] step:7721/10000 train_time:319422ms step_avg:41.37ms +[2025-09-11 08:01:50] [Rank 0] step:7721/10000 train_time:319422ms step_avg:41.37ms +[2025-09-11 08:01:51] [Rank 0] step:7741/10000 train_time:320395ms step_avg:41.39ms +[2025-09-11 08:01:51] [Rank 0] step:7741/10000 train_time:320395ms step_avg:41.39ms +[2025-09-11 08:01:52] [Rank 0] step:7761/10000 train_time:321097ms step_avg:41.37ms +[2025-09-11 08:01:52] [Rank 0] step:7761/10000 train_time:321097ms step_avg:41.37ms +[2025-09-11 08:01:52] [Rank 0] step:7781/10000 train_time:321799ms step_avg:41.36ms +[2025-09-11 08:01:52] [Rank 0] step:7781/10000 train_time:321799ms step_avg:41.36ms +[2025-09-11 08:01:53] [Rank 0] step:7801/10000 train_time:322499ms step_avg:41.34ms +[2025-09-11 08:01:53] [Rank 0] step:7801/10000 train_time:322499ms step_avg:41.34ms +[2025-09-11 08:01:54] [Rank 0] step:7821/10000 train_time:323201ms step_avg:41.32ms +[2025-09-11 08:01:54] [Rank 0] step:7821/10000 train_time:323201ms step_avg:41.32ms +[2025-09-11 08:01:55] [Rank 0] step:7841/10000 train_time:323906ms step_avg:41.31ms +[2025-09-11 08:01:55] [Rank 0] step:7841/10000 train_time:323906ms step_avg:41.31ms +[2025-09-11 08:01:55] [Rank 0] step:7861/10000 train_time:324610ms step_avg:41.29ms +[2025-09-11 08:01:55] [Rank 0] step:7861/10000 train_time:324610ms step_avg:41.29ms +[2025-09-11 08:01:56] [Rank 0] step:7881/10000 train_time:325311ms step_avg:41.28ms +[2025-09-11 08:01:56] [Rank 0] step:7881/10000 train_time:325311ms step_avg:41.28ms +[2025-09-11 08:01:57] [Rank 0] step:7901/10000 train_time:326014ms step_avg:41.26ms +[2025-09-11 08:01:57] [Rank 0] step:7901/10000 train_time:326014ms step_avg:41.26ms +[2025-09-11 08:01:57] [Rank 0] step:7921/10000 train_time:326716ms step_avg:41.25ms +[2025-09-11 08:01:57] [Rank 0] step:7921/10000 train_time:326716ms step_avg:41.25ms +[2025-09-11 08:01:58] [Rank 0] step:7941/10000 train_time:327419ms step_avg:41.23ms +[2025-09-11 08:01:58] [Rank 0] step:7941/10000 train_time:327419ms step_avg:41.23ms +[2025-09-11 08:01:59] [Rank 0] step:7961/10000 train_time:328119ms step_avg:41.22ms +[2025-09-11 08:01:59] [Rank 0] step:7961/10000 train_time:328119ms step_avg:41.22ms +[2025-09-11 08:01:59] [Rank 0] step:7981/10000 train_time:328823ms step_avg:41.20ms +[2025-09-11 08:01:59] [Rank 0] step:7981/10000 train_time:328823ms step_avg:41.20ms +[2025-09-11 08:02:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:02:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:02:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:02:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:02:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:02:10] [Rank 0] PRINT: step:8000/10000 val_loss:5.4033 total_sharp:7.0548e-04 L1_sharp:2.9760e-04 L2_sharp:1.2046e-04 L3_sharp:5.7020e-05 L4_sharp:5.7800e-05 L5_sharp:1.1501e-04 L6_sharp:7.2995e-05 L7_sharp:7.3687e-05 L8_sharp:1.0646e-04 L9_sharp:9.9043e-05 L10_sharp:1.3798e-04 L11_sharp:1.8016e-04 L12_sharp:4.5960e-04 total_fnorm:9.9375e+00 total_l1_linf:1.8176e+04 total_spectral:5.0312e+00 L1_fnorm:3.1094e+00 L2_fnorm:2.9844e+00 L3_fnorm:2.9219e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.8281e+00 L6_fnorm:2.9219e+00 L7_fnorm:2.9219e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.9062e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9062e+00 L12_fnorm:2.7031e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.8594e-01 L3_l1linf:5.7422e-01 L4_l1linf:5.5078e-01 L5_l1linf:5.1172e-01 L6_l1linf:5.0000e-01 L7_l1linf:4.9023e-01 L8_l1linf:5.0391e-01 L9_l1linf:4.9805e-01 L10_l1linf:5.0781e-01 L11_l1linf:5.0781e-01 L12_l1linf:4.5312e-01 L1_spectral:4.2674e-02 L2_spectral:4.1371e-02 L3_spectral:4.1878e-02 L4_spectral:4.1657e-02 L5_spectral:4.0967e-02 L6_spectral:4.2123e-02 L7_spectral:4.2108e-02 L8_spectral:4.1948e-02 L9_spectral:4.1785e-02 L10_spectral:4.1641e-02 L11_spectral:4.1977e-02 L12_spectral:4.0643e-02 train_time:329504ms step_avg:41.19ms +[2025-09-11 08:02:10] [Rank 0] PRINT: step:8000/10000 val_loss:5.4033 total_sharp:7.0548e-04 L1_sharp:2.9760e-04 L2_sharp:1.2046e-04 L3_sharp:5.7020e-05 L4_sharp:5.7800e-05 L5_sharp:1.1501e-04 L6_sharp:7.2995e-05 L7_sharp:7.3687e-05 L8_sharp:1.0646e-04 L9_sharp:9.9043e-05 L10_sharp:1.3798e-04 L11_sharp:1.8016e-04 L12_sharp:4.5960e-04 total_fnorm:9.9375e+00 total_l1_linf:1.8176e+04 total_spectral:5.0312e+00 L1_fnorm:3.1094e+00 L2_fnorm:2.9844e+00 L3_fnorm:2.9219e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.8281e+00 L6_fnorm:2.9219e+00 L7_fnorm:2.9219e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.9062e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9062e+00 L12_fnorm:2.7031e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.8594e-01 L3_l1linf:5.7422e-01 L4_l1linf:5.5078e-01 L5_l1linf:5.1172e-01 L6_l1linf:5.0000e-01 L7_l1linf:4.9023e-01 L8_l1linf:5.0391e-01 L9_l1linf:4.9805e-01 L10_l1linf:5.0781e-01 L11_l1linf:5.0781e-01 L12_l1linf:4.5312e-01 L1_spectral:4.2674e-02 L2_spectral:4.1371e-02 L3_spectral:4.1878e-02 L4_spectral:4.1657e-02 L5_spectral:4.0967e-02 L6_spectral:4.2123e-02 L7_spectral:4.2108e-02 L8_spectral:4.1948e-02 L9_spectral:4.1785e-02 L10_spectral:4.1641e-02 L11_spectral:4.1977e-02 L12_spectral:4.0643e-02 train_time:329504ms step_avg:41.19ms +[2025-09-11 08:02:11] [Rank 0] step:8001/10000 train_time:330697ms step_avg:41.33ms +[2025-09-11 08:02:11] [Rank 0] step:8001/10000 train_time:330697ms step_avg:41.33ms +[2025-09-11 08:02:12] [Rank 0] step:8021/10000 train_time:331429ms step_avg:41.32ms +[2025-09-11 08:02:12] [Rank 0] step:8021/10000 train_time:331429ms step_avg:41.32ms +[2025-09-11 08:02:13] [Rank 0] step:8041/10000 train_time:332133ms step_avg:41.30ms +[2025-09-11 08:02:13] [Rank 0] step:8041/10000 train_time:332133ms step_avg:41.30ms +[2025-09-11 08:02:14] [Rank 0] step:8061/10000 train_time:332839ms step_avg:41.29ms +[2025-09-11 08:02:14] [Rank 0] step:8061/10000 train_time:332839ms step_avg:41.29ms +[2025-09-11 08:02:14] [Rank 0] step:8081/10000 train_time:333539ms step_avg:41.27ms +[2025-09-11 08:02:14] [Rank 0] step:8081/10000 train_time:333539ms step_avg:41.27ms +[2025-09-11 08:02:15] [Rank 0] step:8101/10000 train_time:334240ms step_avg:41.26ms +[2025-09-11 08:02:15] [Rank 0] step:8101/10000 train_time:334240ms step_avg:41.26ms +[2025-09-11 08:02:16] [Rank 0] step:8121/10000 train_time:334946ms step_avg:41.24ms +[2025-09-11 08:02:16] [Rank 0] step:8121/10000 train_time:334946ms step_avg:41.24ms +[2025-09-11 08:02:17] [Rank 0] step:8141/10000 train_time:336401ms step_avg:41.32ms +[2025-09-11 08:02:17] [Rank 0] step:8141/10000 train_time:336401ms step_avg:41.32ms +[2025-09-11 08:02:18] [Rank 0] step:8161/10000 train_time:337108ms step_avg:41.31ms +[2025-09-11 08:02:18] [Rank 0] step:8161/10000 train_time:337108ms step_avg:41.31ms +[2025-09-11 08:02:18] [Rank 0] step:8181/10000 train_time:337823ms step_avg:41.29ms +[2025-09-11 08:02:18] [Rank 0] step:8181/10000 train_time:337823ms step_avg:41.29ms +[2025-09-11 08:02:19] [Rank 0] step:8201/10000 train_time:338533ms step_avg:41.28ms +[2025-09-11 08:02:19] [Rank 0] step:8201/10000 train_time:338533ms step_avg:41.28ms +[2025-09-11 08:02:20] [Rank 0] step:8221/10000 train_time:339243ms step_avg:41.27ms +[2025-09-11 08:02:20] [Rank 0] step:8221/10000 train_time:339243ms step_avg:41.27ms +[2025-09-11 08:02:21] [Rank 0] step:8241/10000 train_time:339962ms step_avg:41.25ms +[2025-09-11 08:02:21] [Rank 0] step:8241/10000 train_time:339962ms step_avg:41.25ms +[2025-09-11 08:02:21] [Rank 0] step:8261/10000 train_time:340671ms step_avg:41.24ms +[2025-09-11 08:02:21] [Rank 0] step:8261/10000 train_time:340671ms step_avg:41.24ms +[2025-09-11 08:02:22] [Rank 0] step:8281/10000 train_time:341377ms step_avg:41.22ms +[2025-09-11 08:02:22] [Rank 0] step:8281/10000 train_time:341377ms step_avg:41.22ms +[2025-09-11 08:02:23] [Rank 0] step:8301/10000 train_time:342086ms step_avg:41.21ms +[2025-09-11 08:02:23] [Rank 0] step:8301/10000 train_time:342086ms step_avg:41.21ms +[2025-09-11 08:02:23] [Rank 0] step:8321/10000 train_time:342795ms step_avg:41.20ms +[2025-09-11 08:02:23] [Rank 0] step:8321/10000 train_time:342795ms step_avg:41.20ms +[2025-09-11 08:02:24] [Rank 0] step:8341/10000 train_time:343511ms step_avg:41.18ms +[2025-09-11 08:02:24] [Rank 0] step:8341/10000 train_time:343511ms step_avg:41.18ms +[2025-09-11 08:02:25] [Rank 0] step:8361/10000 train_time:344216ms step_avg:41.17ms +[2025-09-11 08:02:25] [Rank 0] step:8361/10000 train_time:344216ms step_avg:41.17ms +[2025-09-11 08:02:26] [Rank 0] step:8381/10000 train_time:344928ms step_avg:41.16ms +[2025-09-11 08:02:26] [Rank 0] step:8381/10000 train_time:344928ms step_avg:41.16ms +[2025-09-11 08:02:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:02:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:02:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:02:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:02:36] [Rank 0] PRINT: step:8400/10000 val_loss:5.3860 total_sharp:5.3145e-04 L1_sharp:1.2935e-04 L2_sharp:9.0355e-05 L3_sharp:4.5213e-05 L4_sharp:3.4613e-05 L5_sharp:9.3311e-05 L6_sharp:4.1338e-05 L7_sharp:5.2319e-05 L8_sharp:9.5639e-05 L9_sharp:8.9405e-05 L10_sharp:1.0437e-04 L11_sharp:1.5835e-04 L12_sharp:4.5939e-04 total_fnorm:7.9375e+00 total_l1_linf:1.3056e+04 total_spectral:3.9375e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.3438e+00 L3_fnorm:2.2812e+00 L4_fnorm:2.2812e+00 L5_fnorm:2.2031e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2812e+00 L12_fnorm:2.1250e+00 L1_l1linf:4.9023e-01 L2_l1linf:4.3359e-01 L3_l1linf:4.2383e-01 L4_l1linf:4.3750e-01 L5_l1linf:3.8477e-01 L6_l1linf:3.6719e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.7500e-01 L11_l1linf:3.7109e-01 L12_l1linf:3.4180e-01 L1_spectral:3.3939e-02 L2_spectral:3.3013e-02 L3_spectral:3.2590e-02 L4_spectral:3.3352e-02 L5_spectral:3.2936e-02 L6_spectral:3.3477e-02 L7_spectral:3.3611e-02 L8_spectral:3.3489e-02 L9_spectral:3.3316e-02 L10_spectral:3.3161e-02 L11_spectral:3.3287e-02 L12_spectral:3.2512e-02 train_time:345618ms step_avg:41.15ms +[2025-09-11 08:02:36] [Rank 0] PRINT: step:8400/10000 val_loss:5.3860 total_sharp:5.3145e-04 L1_sharp:1.2935e-04 L2_sharp:9.0355e-05 L3_sharp:4.5213e-05 L4_sharp:3.4613e-05 L5_sharp:9.3311e-05 L6_sharp:4.1338e-05 L7_sharp:5.2319e-05 L8_sharp:9.5639e-05 L9_sharp:8.9405e-05 L10_sharp:1.0437e-04 L11_sharp:1.5835e-04 L12_sharp:4.5939e-04 total_fnorm:7.9375e+00 total_l1_linf:1.3056e+04 total_spectral:3.9375e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.3438e+00 L3_fnorm:2.2812e+00 L4_fnorm:2.2812e+00 L5_fnorm:2.2031e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2812e+00 L12_fnorm:2.1250e+00 L1_l1linf:4.9023e-01 L2_l1linf:4.3359e-01 L3_l1linf:4.2383e-01 L4_l1linf:4.3750e-01 L5_l1linf:3.8477e-01 L6_l1linf:3.6719e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.7500e-01 L11_l1linf:3.7109e-01 L12_l1linf:3.4180e-01 L1_spectral:3.3939e-02 L2_spectral:3.3013e-02 L3_spectral:3.2590e-02 L4_spectral:3.3352e-02 L5_spectral:3.2936e-02 L6_spectral:3.3477e-02 L7_spectral:3.3611e-02 L8_spectral:3.3489e-02 L9_spectral:3.3316e-02 L10_spectral:3.3161e-02 L11_spectral:3.3287e-02 L12_spectral:3.2512e-02 train_time:345618ms step_avg:41.15ms +[2025-09-11 08:02:38] [Rank 0] step:8401/10000 train_time:347227ms step_avg:41.33ms +[2025-09-11 08:02:38] [Rank 0] step:8401/10000 train_time:347227ms step_avg:41.33ms +[2025-09-11 08:02:39] [Rank 0] step:8421/10000 train_time:347960ms step_avg:41.32ms +[2025-09-11 08:02:39] [Rank 0] step:8421/10000 train_time:347960ms step_avg:41.32ms +[2025-09-11 08:02:39] [Rank 0] step:8441/10000 train_time:348672ms step_avg:41.31ms +[2025-09-11 08:02:39] [Rank 0] step:8441/10000 train_time:348672ms step_avg:41.31ms +[2025-09-11 08:02:40] [Rank 0] step:8461/10000 train_time:349383ms step_avg:41.29ms +[2025-09-11 08:02:40] [Rank 0] step:8461/10000 train_time:349383ms step_avg:41.29ms +[2025-09-11 08:02:41] [Rank 0] step:8481/10000 train_time:350096ms step_avg:41.28ms +[2025-09-11 08:02:41] [Rank 0] step:8481/10000 train_time:350096ms step_avg:41.28ms +[2025-09-11 08:02:41] [Rank 0] step:8501/10000 train_time:350806ms step_avg:41.27ms +[2025-09-11 08:02:41] [Rank 0] step:8501/10000 train_time:350806ms step_avg:41.27ms +[2025-09-11 08:02:42] [Rank 0] step:8521/10000 train_time:351515ms step_avg:41.25ms +[2025-09-11 08:02:42] [Rank 0] step:8521/10000 train_time:351515ms step_avg:41.25ms +[2025-09-11 08:02:43] [Rank 0] step:8541/10000 train_time:352223ms step_avg:41.24ms +[2025-09-11 08:02:43] [Rank 0] step:8541/10000 train_time:352223ms step_avg:41.24ms +[2025-09-11 08:02:44] [Rank 0] step:8561/10000 train_time:352938ms step_avg:41.23ms +[2025-09-11 08:02:44] [Rank 0] step:8561/10000 train_time:352938ms step_avg:41.23ms +[2025-09-11 08:02:44] [Rank 0] step:8581/10000 train_time:353650ms step_avg:41.21ms +[2025-09-11 08:02:44] [Rank 0] step:8581/10000 train_time:353650ms step_avg:41.21ms +[2025-09-11 08:02:45] [Rank 0] step:8601/10000 train_time:354361ms step_avg:41.20ms +[2025-09-11 08:02:45] [Rank 0] step:8601/10000 train_time:354361ms step_avg:41.20ms +[2025-09-11 08:02:46] [Rank 0] step:8621/10000 train_time:355071ms step_avg:41.19ms +[2025-09-11 08:02:46] [Rank 0] step:8621/10000 train_time:355071ms step_avg:41.19ms +[2025-09-11 08:02:46] [Rank 0] step:8641/10000 train_time:355779ms step_avg:41.17ms +[2025-09-11 08:02:46] [Rank 0] step:8641/10000 train_time:355779ms step_avg:41.17ms +[2025-09-11 08:02:47] [Rank 0] step:8661/10000 train_time:356489ms step_avg:41.16ms +[2025-09-11 08:02:47] [Rank 0] step:8661/10000 train_time:356489ms step_avg:41.16ms +[2025-09-11 08:02:48] [Rank 0] step:8681/10000 train_time:357200ms step_avg:41.15ms +[2025-09-11 08:02:48] [Rank 0] step:8681/10000 train_time:357200ms step_avg:41.15ms +[2025-09-11 08:02:49] [Rank 0] step:8701/10000 train_time:357907ms step_avg:41.13ms +[2025-09-11 08:02:49] [Rank 0] step:8701/10000 train_time:357907ms step_avg:41.13ms +[2025-09-11 08:02:49] [Rank 0] step:8721/10000 train_time:358620ms step_avg:41.12ms +[2025-09-11 08:02:49] [Rank 0] step:8721/10000 train_time:358620ms step_avg:41.12ms +[2025-09-11 08:02:50] [Rank 0] step:8741/10000 train_time:359326ms step_avg:41.11ms +[2025-09-11 08:02:50] [Rank 0] step:8741/10000 train_time:359326ms step_avg:41.11ms +[2025-09-11 08:02:51] [Rank 0] step:8761/10000 train_time:360039ms step_avg:41.10ms +[2025-09-11 08:02:51] [Rank 0] step:8761/10000 train_time:360039ms step_avg:41.10ms +[2025-09-11 08:02:52] [Rank 0] step:8781/10000 train_time:361294ms step_avg:41.14ms +[2025-09-11 08:02:52] [Rank 0] step:8781/10000 train_time:361294ms step_avg:41.14ms +[2025-09-11 08:02:53] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:02:53] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:02:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:02:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:02] [Rank 0] PRINT: step:8800/10000 val_loss:5.3736 total_sharp:4.0584e-04 L1_sharp:1.2474e-04 L2_sharp:7.1847e-05 L3_sharp:3.7247e-05 L4_sharp:3.6250e-05 L5_sharp:7.0584e-05 L6_sharp:4.1529e-05 L7_sharp:4.4065e-05 L8_sharp:6.7876e-05 L9_sharp:6.2341e-05 L10_sharp:8.6404e-05 L11_sharp:1.3070e-04 L12_sharp:3.7419e-04 total_fnorm:5.7812e+00 total_l1_linf:8.5760e+03 total_spectral:2.8750e+00 L1_fnorm:1.7891e+00 L2_fnorm:1.7109e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.6562e+00 L5_fnorm:1.6094e+00 L6_fnorm:1.6562e+00 L7_fnorm:1.6484e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6406e+00 L10_fnorm:1.6562e+00 L11_fnorm:1.6719e+00 L12_fnorm:1.5781e+00 L1_l1linf:3.2227e-01 L2_l1linf:2.9102e-01 L3_l1linf:2.7734e-01 L4_l1linf:2.6953e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.3926e-01 L7_l1linf:2.4609e-01 L8_l1linf:2.3438e-01 L9_l1linf:2.3535e-01 L10_l1linf:2.5000e-01 L11_l1linf:2.4609e-01 L12_l1linf:2.3633e-01 L1_spectral:2.5468e-02 L2_spectral:2.4593e-02 L3_spectral:2.4679e-02 L4_spectral:2.4731e-02 L5_spectral:2.5203e-02 L6_spectral:2.4941e-02 L7_spectral:2.5056e-02 L8_spectral:2.5031e-02 L9_spectral:2.4983e-02 L10_spectral:2.4777e-02 L11_spectral:2.5158e-02 L12_spectral:2.4778e-02 train_time:361981ms step_avg:41.13ms +[2025-09-11 08:03:02] [Rank 0] PRINT: step:8800/10000 val_loss:5.3736 total_sharp:4.0584e-04 L1_sharp:1.2474e-04 L2_sharp:7.1847e-05 L3_sharp:3.7247e-05 L4_sharp:3.6250e-05 L5_sharp:7.0584e-05 L6_sharp:4.1529e-05 L7_sharp:4.4065e-05 L8_sharp:6.7876e-05 L9_sharp:6.2341e-05 L10_sharp:8.6404e-05 L11_sharp:1.3070e-04 L12_sharp:3.7419e-04 total_fnorm:5.7812e+00 total_l1_linf:8.5760e+03 total_spectral:2.8750e+00 L1_fnorm:1.7891e+00 L2_fnorm:1.7109e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.6562e+00 L5_fnorm:1.6094e+00 L6_fnorm:1.6562e+00 L7_fnorm:1.6484e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6406e+00 L10_fnorm:1.6562e+00 L11_fnorm:1.6719e+00 L12_fnorm:1.5781e+00 L1_l1linf:3.2227e-01 L2_l1linf:2.9102e-01 L3_l1linf:2.7734e-01 L4_l1linf:2.6953e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.3926e-01 L7_l1linf:2.4609e-01 L8_l1linf:2.3438e-01 L9_l1linf:2.3535e-01 L10_l1linf:2.5000e-01 L11_l1linf:2.4609e-01 L12_l1linf:2.3633e-01 L1_spectral:2.5468e-02 L2_spectral:2.4593e-02 L3_spectral:2.4679e-02 L4_spectral:2.4731e-02 L5_spectral:2.5203e-02 L6_spectral:2.4941e-02 L7_spectral:2.5056e-02 L8_spectral:2.5031e-02 L9_spectral:2.4983e-02 L10_spectral:2.4777e-02 L11_spectral:2.5158e-02 L12_spectral:2.4778e-02 train_time:361981ms step_avg:41.13ms +[2025-09-11 08:03:03] [Rank 0] step:8801/10000 train_time:363179ms step_avg:41.27ms +[2025-09-11 08:03:03] [Rank 0] step:8801/10000 train_time:363179ms step_avg:41.27ms +[2025-09-11 08:03:04] [Rank 0] step:8821/10000 train_time:363928ms step_avg:41.26ms +[2025-09-11 08:03:04] [Rank 0] step:8821/10000 train_time:363928ms step_avg:41.26ms +[2025-09-11 08:03:05] [Rank 0] step:8841/10000 train_time:364639ms step_avg:41.24ms +[2025-09-11 08:03:05] [Rank 0] step:8841/10000 train_time:364639ms step_avg:41.24ms +[2025-09-11 08:03:06] [Rank 0] step:8861/10000 train_time:365349ms step_avg:41.23ms +[2025-09-11 08:03:06] [Rank 0] step:8861/10000 train_time:365349ms step_avg:41.23ms +[2025-09-11 08:03:06] [Rank 0] step:8881/10000 train_time:366058ms step_avg:41.22ms +[2025-09-11 08:03:06] [Rank 0] step:8881/10000 train_time:366058ms step_avg:41.22ms +[2025-09-11 08:03:07] [Rank 0] step:8901/10000 train_time:366770ms step_avg:41.21ms +[2025-09-11 08:03:07] [Rank 0] step:8901/10000 train_time:366770ms step_avg:41.21ms +[2025-09-11 08:03:08] [Rank 0] step:8921/10000 train_time:367476ms step_avg:41.19ms +[2025-09-11 08:03:08] [Rank 0] step:8921/10000 train_time:367476ms step_avg:41.19ms +[2025-09-11 08:03:08] [Rank 0] step:8941/10000 train_time:368187ms step_avg:41.18ms +[2025-09-11 08:03:08] [Rank 0] step:8941/10000 train_time:368187ms step_avg:41.18ms +[2025-09-11 08:03:09] [Rank 0] step:8961/10000 train_time:368905ms step_avg:41.17ms +[2025-09-11 08:03:09] [Rank 0] step:8961/10000 train_time:368905ms step_avg:41.17ms +[2025-09-11 08:03:10] [Rank 0] step:8981/10000 train_time:369619ms step_avg:41.16ms +[2025-09-11 08:03:10] [Rank 0] step:8981/10000 train_time:369619ms step_avg:41.16ms +[2025-09-11 08:03:11] [Rank 0] step:9001/10000 train_time:370323ms step_avg:41.14ms +[2025-09-11 08:03:11] [Rank 0] step:9001/10000 train_time:370323ms step_avg:41.14ms +[2025-09-11 08:03:11] [Rank 0] step:9021/10000 train_time:371034ms step_avg:41.13ms +[2025-09-11 08:03:11] [Rank 0] step:9021/10000 train_time:371034ms step_avg:41.13ms +[2025-09-11 08:03:12] [Rank 0] step:9041/10000 train_time:371747ms step_avg:41.12ms +[2025-09-11 08:03:12] [Rank 0] step:9041/10000 train_time:371747ms step_avg:41.12ms +[2025-09-11 08:03:13] [Rank 0] step:9061/10000 train_time:372455ms step_avg:41.11ms +[2025-09-11 08:03:13] [Rank 0] step:9061/10000 train_time:372455ms step_avg:41.11ms +[2025-09-11 08:03:13] [Rank 0] step:9081/10000 train_time:373166ms step_avg:41.09ms +[2025-09-11 08:03:13] [Rank 0] step:9081/10000 train_time:373166ms step_avg:41.09ms +[2025-09-11 08:03:14] [Rank 0] step:9101/10000 train_time:373880ms step_avg:41.08ms +[2025-09-11 08:03:14] [Rank 0] step:9101/10000 train_time:373880ms step_avg:41.08ms +[2025-09-11 08:03:15] [Rank 0] step:9121/10000 train_time:374595ms step_avg:41.07ms +[2025-09-11 08:03:15] [Rank 0] step:9121/10000 train_time:374595ms step_avg:41.07ms +[2025-09-11 08:03:16] [Rank 0] step:9141/10000 train_time:375303ms step_avg:41.06ms +[2025-09-11 08:03:16] [Rank 0] step:9141/10000 train_time:375303ms step_avg:41.06ms +[2025-09-11 08:03:16] [Rank 0] step:9161/10000 train_time:376015ms step_avg:41.05ms +[2025-09-11 08:03:16] [Rank 0] step:9161/10000 train_time:376015ms step_avg:41.05ms +[2025-09-11 08:03:17] [Rank 0] step:9181/10000 train_time:376727ms step_avg:41.03ms +[2025-09-11 08:03:17] [Rank 0] step:9181/10000 train_time:376727ms step_avg:41.03ms +[2025-09-11 08:03:18] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:03:18] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:03:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:03:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:03:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:28] [Rank 0] PRINT: step:9200/10000 val_loss:5.3641 total_sharp:3.8394e-04 L1_sharp:1.0508e-04 L2_sharp:3.1150e-05 L3_sharp:4.3612e-05 L4_sharp:2.4458e-05 L5_sharp:6.1447e-05 L6_sharp:3.6148e-05 L7_sharp:3.4066e-05 L8_sharp:7.6223e-05 L9_sharp:6.6213e-05 L10_sharp:7.5032e-05 L11_sharp:1.1922e-04 L12_sharp:4.2776e-04 total_fnorm:3.8438e+00 total_l1_linf:4.9920e+03 total_spectral:1.9219e+00 L1_fnorm:1.2031e+00 L2_fnorm:1.1562e+00 L3_fnorm:1.1172e+00 L4_fnorm:1.1094e+00 L5_fnorm:1.0859e+00 L6_fnorm:1.1016e+00 L7_fnorm:1.1016e+00 L8_fnorm:1.0703e+00 L9_fnorm:1.0938e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.0312e+00 L1_l1linf:1.9141e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7480e-01 L4_l1linf:1.6406e-01 L5_l1linf:1.6992e-01 L6_l1linf:1.4941e-01 L7_l1linf:1.4746e-01 L8_l1linf:1.4258e-01 L9_l1linf:1.4355e-01 L10_l1linf:1.5332e-01 L11_l1linf:1.4844e-01 L12_l1linf:1.4355e-01 L1_spectral:1.7640e-02 L2_spectral:1.7006e-02 L3_spectral:1.6800e-02 L4_spectral:1.6900e-02 L5_spectral:1.7443e-02 L6_spectral:1.7161e-02 L7_spectral:1.7085e-02 L8_spectral:1.7264e-02 L9_spectral:1.7092e-02 L10_spectral:1.7161e-02 L11_spectral:1.7112e-02 L12_spectral:1.6959e-02 train_time:377420ms step_avg:41.02ms +[2025-09-11 08:03:28] [Rank 0] PRINT: step:9200/10000 val_loss:5.3641 total_sharp:3.8394e-04 L1_sharp:1.0508e-04 L2_sharp:3.1150e-05 L3_sharp:4.3612e-05 L4_sharp:2.4458e-05 L5_sharp:6.1447e-05 L6_sharp:3.6148e-05 L7_sharp:3.4066e-05 L8_sharp:7.6223e-05 L9_sharp:6.6213e-05 L10_sharp:7.5032e-05 L11_sharp:1.1922e-04 L12_sharp:4.2776e-04 total_fnorm:3.8438e+00 total_l1_linf:4.9920e+03 total_spectral:1.9219e+00 L1_fnorm:1.2031e+00 L2_fnorm:1.1562e+00 L3_fnorm:1.1172e+00 L4_fnorm:1.1094e+00 L5_fnorm:1.0859e+00 L6_fnorm:1.1016e+00 L7_fnorm:1.1016e+00 L8_fnorm:1.0703e+00 L9_fnorm:1.0938e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.0312e+00 L1_l1linf:1.9141e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7480e-01 L4_l1linf:1.6406e-01 L5_l1linf:1.6992e-01 L6_l1linf:1.4941e-01 L7_l1linf:1.4746e-01 L8_l1linf:1.4258e-01 L9_l1linf:1.4355e-01 L10_l1linf:1.5332e-01 L11_l1linf:1.4844e-01 L12_l1linf:1.4355e-01 L1_spectral:1.7640e-02 L2_spectral:1.7006e-02 L3_spectral:1.6800e-02 L4_spectral:1.6900e-02 L5_spectral:1.7443e-02 L6_spectral:1.7161e-02 L7_spectral:1.7085e-02 L8_spectral:1.7264e-02 L9_spectral:1.7092e-02 L10_spectral:1.7161e-02 L11_spectral:1.7112e-02 L12_spectral:1.6959e-02 train_time:377420ms step_avg:41.02ms +[2025-09-11 08:03:29] [Rank 0] step:9201/10000 train_time:378655ms step_avg:41.15ms +[2025-09-11 08:03:29] [Rank 0] step:9201/10000 train_time:378655ms step_avg:41.15ms +[2025-09-11 08:03:30] [Rank 0] step:9221/10000 train_time:379395ms step_avg:41.14ms +[2025-09-11 08:03:30] [Rank 0] step:9221/10000 train_time:379395ms step_avg:41.14ms +[2025-09-11 08:03:30] [Rank 0] step:9241/10000 train_time:380104ms step_avg:41.13ms +[2025-09-11 08:03:30] [Rank 0] step:9241/10000 train_time:380104ms step_avg:41.13ms +[2025-09-11 08:03:31] [Rank 0] step:9261/10000 train_time:380816ms step_avg:41.12ms +[2025-09-11 08:03:31] [Rank 0] step:9261/10000 train_time:380816ms step_avg:41.12ms +[2025-09-11 08:03:32] [Rank 0] step:9281/10000 train_time:381528ms step_avg:41.11ms +[2025-09-11 08:03:32] [Rank 0] step:9281/10000 train_time:381528ms step_avg:41.11ms +[2025-09-11 08:03:32] [Rank 0] step:9301/10000 train_time:382237ms step_avg:41.10ms +[2025-09-11 08:03:32] [Rank 0] step:9301/10000 train_time:382237ms step_avg:41.10ms +[2025-09-11 08:03:33] [Rank 0] step:9321/10000 train_time:382951ms step_avg:41.08ms +[2025-09-11 08:03:33] [Rank 0] step:9321/10000 train_time:382951ms step_avg:41.08ms +[2025-09-11 08:03:34] [Rank 0] step:9341/10000 train_time:383657ms step_avg:41.07ms +[2025-09-11 08:03:34] [Rank 0] step:9341/10000 train_time:383657ms step_avg:41.07ms +[2025-09-11 08:03:35] [Rank 0] step:9361/10000 train_time:384365ms step_avg:41.06ms +[2025-09-11 08:03:35] [Rank 0] step:9361/10000 train_time:384365ms step_avg:41.06ms +[2025-09-11 08:03:35] [Rank 0] step:9381/10000 train_time:385073ms step_avg:41.05ms +[2025-09-11 08:03:35] [Rank 0] step:9381/10000 train_time:385073ms step_avg:41.05ms +[2025-09-11 08:03:36] [Rank 0] step:9401/10000 train_time:385785ms step_avg:41.04ms +[2025-09-11 08:03:36] [Rank 0] step:9401/10000 train_time:385785ms step_avg:41.04ms +[2025-09-11 08:03:37] [Rank 0] step:9421/10000 train_time:386496ms step_avg:41.02ms +[2025-09-11 08:03:37] [Rank 0] step:9421/10000 train_time:386496ms step_avg:41.02ms +[2025-09-11 08:03:37] [Rank 0] step:9441/10000 train_time:387210ms step_avg:41.01ms +[2025-09-11 08:03:37] [Rank 0] step:9441/10000 train_time:387210ms step_avg:41.01ms +[2025-09-11 08:03:38] [Rank 0] step:9461/10000 train_time:387920ms step_avg:41.00ms +[2025-09-11 08:03:38] [Rank 0] step:9461/10000 train_time:387920ms step_avg:41.00ms +[2025-09-11 08:03:39] [Rank 0] step:9481/10000 train_time:388632ms step_avg:40.99ms +[2025-09-11 08:03:39] [Rank 0] step:9481/10000 train_time:388632ms step_avg:40.99ms +[2025-09-11 08:03:40] [Rank 0] step:9501/10000 train_time:389344ms step_avg:40.98ms +[2025-09-11 08:03:40] [Rank 0] step:9501/10000 train_time:389344ms step_avg:40.98ms +[2025-09-11 08:03:40] [Rank 0] step:9521/10000 train_time:390056ms step_avg:40.97ms +[2025-09-11 08:03:40] [Rank 0] step:9521/10000 train_time:390056ms step_avg:40.97ms +[2025-09-11 08:03:41] [Rank 0] step:9541/10000 train_time:390764ms step_avg:40.96ms +[2025-09-11 08:03:41] [Rank 0] step:9541/10000 train_time:390764ms step_avg:40.96ms +[2025-09-11 08:03:42] [Rank 0] step:9561/10000 train_time:391474ms step_avg:40.94ms +[2025-09-11 08:03:42] [Rank 0] step:9561/10000 train_time:391474ms step_avg:40.94ms +[2025-09-11 08:03:42] [Rank 0] step:9581/10000 train_time:392186ms step_avg:40.93ms +[2025-09-11 08:03:42] [Rank 0] step:9581/10000 train_time:392186ms step_avg:40.93ms +[2025-09-11 08:03:43] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:03:43] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:03:53] [Rank 0] PRINT: step:9600/10000 val_loss:5.3571 total_sharp:2.6590e-04 L1_sharp:1.0090e-04 L2_sharp:4.0239e-05 L3_sharp:3.8543e-05 L4_sharp:2.1454e-05 L5_sharp:4.7559e-05 L6_sharp:3.1421e-05 L7_sharp:2.8271e-05 L8_sharp:5.4798e-05 L9_sharp:4.6043e-05 L10_sharp:6.1397e-05 L11_sharp:8.9042e-05 L12_sharp:2.9859e-04 total_fnorm:2.1719e+00 total_l1_linf:2.3360e+03 total_spectral:1.0781e+00 L1_fnorm:6.6797e-01 L2_fnorm:6.4844e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.0938e-01 L6_fnorm:6.2109e-01 L7_fnorm:6.1719e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.2500e-01 L12_fnorm:5.8203e-01 L1_l1linf:9.2773e-02 L2_l1linf:8.4473e-02 L3_l1linf:8.4961e-02 L4_l1linf:7.9102e-02 L5_l1linf:8.8867e-02 L6_l1linf:7.2754e-02 L7_l1linf:7.0312e-02 L8_l1linf:7.1777e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.4219e-02 L11_l1linf:7.2266e-02 L12_l1linf:7.1289e-02 L1_spectral:9.8089e-03 L2_spectral:9.6218e-03 L3_spectral:9.6117e-03 L4_spectral:9.7010e-03 L5_spectral:1.0105e-02 L6_spectral:9.7574e-03 L7_spectral:9.8564e-03 L8_spectral:9.9089e-03 L9_spectral:9.6973e-03 L10_spectral:9.7762e-03 L11_spectral:9.7752e-03 L12_spectral:1.0087e-02 train_time:392872ms step_avg:40.92ms +[2025-09-11 08:03:53] [Rank 0] PRINT: step:9600/10000 val_loss:5.3571 total_sharp:2.6590e-04 L1_sharp:1.0090e-04 L2_sharp:4.0239e-05 L3_sharp:3.8543e-05 L4_sharp:2.1454e-05 L5_sharp:4.7559e-05 L6_sharp:3.1421e-05 L7_sharp:2.8271e-05 L8_sharp:5.4798e-05 L9_sharp:4.6043e-05 L10_sharp:6.1397e-05 L11_sharp:8.9042e-05 L12_sharp:2.9859e-04 total_fnorm:2.1719e+00 total_l1_linf:2.3360e+03 total_spectral:1.0781e+00 L1_fnorm:6.6797e-01 L2_fnorm:6.4844e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.0938e-01 L6_fnorm:6.2109e-01 L7_fnorm:6.1719e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.2500e-01 L12_fnorm:5.8203e-01 L1_l1linf:9.2773e-02 L2_l1linf:8.4473e-02 L3_l1linf:8.4961e-02 L4_l1linf:7.9102e-02 L5_l1linf:8.8867e-02 L6_l1linf:7.2754e-02 L7_l1linf:7.0312e-02 L8_l1linf:7.1777e-02 L9_l1linf:7.0801e-02 L10_l1linf:7.4219e-02 L11_l1linf:7.2266e-02 L12_l1linf:7.1289e-02 L1_spectral:9.8089e-03 L2_spectral:9.6218e-03 L3_spectral:9.6117e-03 L4_spectral:9.7010e-03 L5_spectral:1.0105e-02 L6_spectral:9.7574e-03 L7_spectral:9.8564e-03 L8_spectral:9.9089e-03 L9_spectral:9.6973e-03 L10_spectral:9.7762e-03 L11_spectral:9.7752e-03 L12_spectral:1.0087e-02 train_time:392872ms step_avg:40.92ms +[2025-09-11 08:03:54] [Rank 0] step:9601/10000 train_time:394120ms step_avg:41.05ms +[2025-09-11 08:03:54] [Rank 0] step:9601/10000 train_time:394120ms step_avg:41.05ms +[2025-09-11 08:03:55] [Rank 0] step:9621/10000 train_time:395153ms step_avg:41.07ms +[2025-09-11 08:03:55] [Rank 0] step:9621/10000 train_time:395153ms step_avg:41.07ms +[2025-09-11 08:03:56] [Rank 0] step:9641/10000 train_time:395869ms step_avg:41.06ms +[2025-09-11 08:03:56] [Rank 0] step:9641/10000 train_time:395869ms step_avg:41.06ms +[2025-09-11 08:03:57] [Rank 0] step:9661/10000 train_time:396731ms step_avg:41.07ms +[2025-09-11 08:03:57] [Rank 0] step:9661/10000 train_time:396731ms step_avg:41.07ms +[2025-09-11 08:03:58] [Rank 0] step:9681/10000 train_time:397563ms step_avg:41.07ms +[2025-09-11 08:03:58] [Rank 0] step:9681/10000 train_time:397563ms step_avg:41.07ms +[2025-09-11 08:03:58] [Rank 0] step:9701/10000 train_time:398281ms step_avg:41.06ms +[2025-09-11 08:03:58] [Rank 0] step:9701/10000 train_time:398281ms step_avg:41.06ms +[2025-09-11 08:03:59] [Rank 0] step:9721/10000 train_time:399002ms step_avg:41.05ms +[2025-09-11 08:03:59] [Rank 0] step:9721/10000 train_time:399002ms step_avg:41.05ms +[2025-09-11 08:04:00] [Rank 0] step:9741/10000 train_time:399721ms step_avg:41.03ms +[2025-09-11 08:04:00] [Rank 0] step:9741/10000 train_time:399721ms step_avg:41.03ms +[2025-09-11 08:04:01] [Rank 0] step:9761/10000 train_time:400438ms step_avg:41.02ms +[2025-09-11 08:04:01] [Rank 0] step:9761/10000 train_time:400438ms step_avg:41.02ms +[2025-09-11 08:04:01] [Rank 0] step:9781/10000 train_time:401155ms step_avg:41.01ms +[2025-09-11 08:04:01] [Rank 0] step:9781/10000 train_time:401155ms step_avg:41.01ms +[2025-09-11 08:04:02] [Rank 0] step:9801/10000 train_time:401876ms step_avg:41.00ms +[2025-09-11 08:04:02] [Rank 0] step:9801/10000 train_time:401876ms step_avg:41.00ms +[2025-09-11 08:04:03] [Rank 0] step:9821/10000 train_time:402596ms step_avg:40.99ms +[2025-09-11 08:04:03] [Rank 0] step:9821/10000 train_time:402596ms step_avg:40.99ms +[2025-09-11 08:04:03] [Rank 0] step:9841/10000 train_time:403317ms step_avg:40.98ms +[2025-09-11 08:04:03] [Rank 0] step:9841/10000 train_time:403317ms step_avg:40.98ms +[2025-09-11 08:04:04] [Rank 0] step:9861/10000 train_time:404036ms step_avg:40.97ms +[2025-09-11 08:04:04] [Rank 0] step:9861/10000 train_time:404036ms step_avg:40.97ms +[2025-09-11 08:04:05] [Rank 0] step:9881/10000 train_time:404754ms step_avg:40.96ms +[2025-09-11 08:04:05] [Rank 0] step:9881/10000 train_time:404754ms step_avg:40.96ms +[2025-09-11 08:04:06] [Rank 0] step:9901/10000 train_time:405469ms step_avg:40.95ms +[2025-09-11 08:04:06] [Rank 0] step:9901/10000 train_time:405469ms step_avg:40.95ms +[2025-09-11 08:04:06] [Rank 0] step:9921/10000 train_time:406185ms step_avg:40.94ms +[2025-09-11 08:04:06] [Rank 0] step:9921/10000 train_time:406185ms step_avg:40.94ms +[2025-09-11 08:04:07] [Rank 0] step:9941/10000 train_time:406906ms step_avg:40.93ms +[2025-09-11 08:04:07] [Rank 0] step:9941/10000 train_time:406906ms step_avg:40.93ms +[2025-09-11 08:04:08] [Rank 0] step:9961/10000 train_time:407629ms step_avg:40.92ms +[2025-09-11 08:04:08] [Rank 0] step:9961/10000 train_time:407629ms step_avg:40.92ms +[2025-09-11 08:04:08] [Rank 0] step:9981/10000 train_time:408347ms step_avg:40.91ms +[2025-09-11 08:04:08] [Rank 0] step:9981/10000 train_time:408347ms step_avg:40.91ms +[2025-09-11 08:04:09] [Rank 0] step:10000/10000 train_time:409037ms step_avg:40.90ms +[2025-09-11 08:04:09] [Rank 0] step:10000/10000 train_time:409037ms step_avg:40.90ms +[2025-09-11 08:04:09] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:04:09] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:04:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:04:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:04:19] [Rank 0] PRINT: step:10000/10000 val_loss:5.3544 total_sharp:1.5982e-04 L1_sharp:6.4073e-05 L2_sharp:2.2863e-05 L3_sharp:2.4651e-05 L4_sharp:1.8254e-05 L5_sharp:4.3376e-05 L6_sharp:2.6404e-05 L7_sharp:2.3311e-05 L8_sharp:3.8198e-05 L9_sharp:3.6921e-05 L10_sharp:4.2867e-05 L11_sharp:6.4812e-05 L12_sharp:2.3982e-04 total_fnorm:8.5156e-01 total_l1_linf:6.6400e+02 total_spectral:4.1992e-01 L1_fnorm:2.6562e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.3730e-01 L6_fnorm:2.4121e-01 L7_fnorm:2.4023e-01 L8_fnorm:2.3633e-01 L9_fnorm:2.4023e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.2754e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.6489e-02 L3_l1linf:2.6611e-02 L4_l1linf:2.4292e-02 L5_l1linf:2.6489e-02 L6_l1linf:2.2339e-02 L7_l1linf:2.0630e-02 L8_l1linf:2.1973e-02 L9_l1linf:2.2095e-02 L10_l1linf:2.1973e-02 L11_l1linf:2.1973e-02 L12_l1linf:2.3804e-02 L1_spectral:3.9734e-03 L2_spectral:3.8283e-03 L3_spectral:3.8702e-03 L4_spectral:3.8592e-03 L5_spectral:4.1519e-03 L6_spectral:3.9334e-03 L7_spectral:3.9098e-03 L8_spectral:4.0051e-03 L9_spectral:3.9520e-03 L10_spectral:3.8801e-03 L11_spectral:3.9306e-03 L12_spectral:4.0031e-03 train_time:409055ms step_avg:40.91ms +[2025-09-11 08:04:19] [Rank 0] PRINT: step:10000/10000 val_loss:5.3544 total_sharp:1.5982e-04 L1_sharp:6.4073e-05 L2_sharp:2.2863e-05 L3_sharp:2.4651e-05 L4_sharp:1.8254e-05 L5_sharp:4.3376e-05 L6_sharp:2.6404e-05 L7_sharp:2.3311e-05 L8_sharp:3.8198e-05 L9_sharp:3.6921e-05 L10_sharp:4.2867e-05 L11_sharp:6.4812e-05 L12_sharp:2.3982e-04 total_fnorm:8.5156e-01 total_l1_linf:6.6400e+02 total_spectral:4.1992e-01 L1_fnorm:2.6562e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.3730e-01 L6_fnorm:2.4121e-01 L7_fnorm:2.4023e-01 L8_fnorm:2.3633e-01 L9_fnorm:2.4023e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.2754e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.6489e-02 L3_l1linf:2.6611e-02 L4_l1linf:2.4292e-02 L5_l1linf:2.6489e-02 L6_l1linf:2.2339e-02 L7_l1linf:2.0630e-02 L8_l1linf:2.1973e-02 L9_l1linf:2.2095e-02 L10_l1linf:2.1973e-02 L11_l1linf:2.1973e-02 L12_l1linf:2.3804e-02 L1_spectral:3.9734e-03 L2_spectral:3.8283e-03 L3_spectral:3.8702e-03 L4_spectral:3.8592e-03 L5_spectral:4.1519e-03 L6_spectral:3.9334e-03 L7_spectral:3.9098e-03 L8_spectral:4.0051e-03 L9_spectral:3.9520e-03 L10_spectral:3.8801e-03 L11_spectral:3.9306e-03 L12_spectral:4.0031e-03 train_time:409055ms step_avg:40.91ms +[2025-09-11 08:04:19] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:04:19 2025 --- +[2025-09-11 08:04:19] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:04:19 2025 --- +[2025-09-11 08:04:19] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 08:04:19] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..56b59d226a7881dc415e15061ac08c2a4d2bfe38 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "586c59e2-6b8b-48f7-a0a3-6ebf33809948", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/training_log_586c59e2-6b8b-48f7-a0a3-6ebf33809948.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/training_log_586c59e2-6b8b-48f7-a0a3-6ebf33809948.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a8fe7a94a3ffaa2ec8d010bc572236a843ef643 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44/training_log_586c59e2-6b8b-48f7-a0a3-6ebf33809948.txt @@ -0,0 +1,4264 @@ +[2025-09-11 07:37:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:37:58 2025 --- +[2025-09-11 07:37:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:37:58 2025 --- +[2025-09-11 07:37:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:37:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:37:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:37:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:37:58] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:37:58] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 07:37:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44 +[2025-09-11 07:37:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_44 +[2025-09-11 07:37:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:37:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:37:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:37:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:37:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:37:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:37:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:37:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:37:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:37:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:37:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:37:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:37:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:37:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:38:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:38:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:38:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:38:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:38:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:38:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:38:08] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:38:08] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:38:08] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:38:08] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:38:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:38:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:38:46] [Rank 0] PRINT: Starting training... +[2025-09-11 07:38:46] [Rank 0] PRINT: Starting training... +[2025-09-11 07:38:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.17ms +[2025-09-11 07:38:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.17ms +[2025-09-11 07:38:48] [Rank 0] step:41/10000 train_time:1869ms step_avg:45.58ms +[2025-09-11 07:38:48] [Rank 0] step:41/10000 train_time:1869ms step_avg:45.58ms +[2025-09-11 07:38:49] [Rank 0] step:61/10000 train_time:2601ms step_avg:42.65ms +[2025-09-11 07:38:49] [Rank 0] step:61/10000 train_time:2601ms step_avg:42.65ms +[2025-09-11 07:38:50] [Rank 0] step:81/10000 train_time:3332ms step_avg:41.14ms +[2025-09-11 07:38:50] [Rank 0] step:81/10000 train_time:3332ms step_avg:41.14ms +[2025-09-11 07:38:50] [Rank 0] step:101/10000 train_time:4062ms step_avg:40.22ms +[2025-09-11 07:38:50] [Rank 0] step:101/10000 train_time:4062ms step_avg:40.22ms +[2025-09-11 07:38:51] [Rank 0] step:121/10000 train_time:4792ms step_avg:39.61ms +[2025-09-11 07:38:51] [Rank 0] step:121/10000 train_time:4792ms step_avg:39.61ms +[2025-09-11 07:38:52] [Rank 0] step:141/10000 train_time:5522ms step_avg:39.17ms +[2025-09-11 07:38:52] [Rank 0] step:141/10000 train_time:5522ms step_avg:39.17ms +[2025-09-11 07:38:53] [Rank 0] step:161/10000 train_time:6253ms step_avg:38.84ms +[2025-09-11 07:38:53] [Rank 0] step:161/10000 train_time:6253ms step_avg:38.84ms +[2025-09-11 07:38:53] [Rank 0] step:181/10000 train_time:6983ms step_avg:38.58ms +[2025-09-11 07:38:53] [Rank 0] step:181/10000 train_time:6983ms step_avg:38.58ms +[2025-09-11 07:38:54] [Rank 0] step:201/10000 train_time:7713ms step_avg:38.37ms +[2025-09-11 07:38:54] [Rank 0] step:201/10000 train_time:7713ms step_avg:38.37ms +[2025-09-11 07:38:55] [Rank 0] step:221/10000 train_time:8442ms step_avg:38.20ms +[2025-09-11 07:38:55] [Rank 0] step:221/10000 train_time:8442ms step_avg:38.20ms +[2025-09-11 07:38:55] [Rank 0] step:241/10000 train_time:9173ms step_avg:38.06ms +[2025-09-11 07:38:55] [Rank 0] step:241/10000 train_time:9173ms step_avg:38.06ms +[2025-09-11 07:38:56] [Rank 0] step:261/10000 train_time:9903ms step_avg:37.94ms +[2025-09-11 07:38:56] [Rank 0] step:261/10000 train_time:9903ms step_avg:37.94ms +[2025-09-11 07:38:57] [Rank 0] step:281/10000 train_time:10632ms step_avg:37.84ms +[2025-09-11 07:38:57] [Rank 0] step:281/10000 train_time:10632ms step_avg:37.84ms +[2025-09-11 07:38:58] [Rank 0] step:301/10000 train_time:11363ms step_avg:37.75ms +[2025-09-11 07:38:58] [Rank 0] step:301/10000 train_time:11363ms step_avg:37.75ms +[2025-09-11 07:38:58] [Rank 0] step:321/10000 train_time:12093ms step_avg:37.67ms +[2025-09-11 07:38:58] [Rank 0] step:321/10000 train_time:12093ms step_avg:37.67ms +[2025-09-11 07:38:59] [Rank 0] step:341/10000 train_time:12823ms step_avg:37.60ms +[2025-09-11 07:38:59] [Rank 0] step:341/10000 train_time:12823ms step_avg:37.60ms +[2025-09-11 07:39:00] [Rank 0] step:361/10000 train_time:13553ms step_avg:37.54ms +[2025-09-11 07:39:00] [Rank 0] step:361/10000 train_time:13553ms step_avg:37.54ms +[2025-09-11 07:39:01] [Rank 0] step:381/10000 train_time:14283ms step_avg:37.49ms +[2025-09-11 07:39:01] [Rank 0] step:381/10000 train_time:14283ms step_avg:37.49ms +[2025-09-11 07:39:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:39:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:39:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:39:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:39:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:39:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:39:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:39:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:39:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:39:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:39:50] [Rank 0] PRINT: step:400/10000 val_loss:7.2813 total_sharp:4.0448e-03 L1_sharp:1.8228e-03 L2_sharp:1.2780e-03 L3_sharp:1.6465e-03 L4_sharp:1.3908e-03 L5_sharp:5.6212e-04 L6_sharp:3.9836e-04 L7_sharp:1.5827e-04 L8_sharp:9.8878e-05 L9_sharp:1.4110e-05 L10_sharp:-3.7582e-05 L11_sharp:3.7802e-04 L12_sharp:6.5966e-05 total_fnorm:2.4716e+01 total_l1_linf:7.9154e+04 total_spectral:1.2358e+01 L1_fnorm:1.2092e+01 L2_fnorm:1.0187e+01 L3_fnorm:8.2118e+00 L4_fnorm:6.5332e+00 L5_fnorm:5.8801e+00 L6_fnorm:5.4873e+00 L7_fnorm:5.6623e+00 L8_fnorm:5.3739e+00 L9_fnorm:5.1009e+00 L10_fnorm:4.9216e+00 L11_fnorm:4.6519e+00 L12_fnorm:4.9548e+00 L1_l1linf:3.5195e+00 L2_l1linf:3.0747e+00 L3_l1linf:2.6138e+00 L4_l1linf:2.3414e+00 L5_l1linf:2.2214e+00 L6_l1linf:2.2747e+00 L7_l1linf:2.2437e+00 L8_l1linf:2.2771e+00 L9_l1linf:2.2364e+00 L10_l1linf:2.2912e+00 L11_l1linf:2.2991e+00 L12_l1linf:2.2880e+00 L1_spectral:1.2179e-01 L2_spectral:1.2120e-01 L3_spectral:1.2071e-01 L4_spectral:1.2030e-01 L5_spectral:1.2001e-01 L6_spectral:1.1992e-01 L7_spectral:1.1984e-01 L8_spectral:1.1963e-01 L9_spectral:1.1995e-01 L10_spectral:1.1984e-01 L11_spectral:1.1974e-01 L12_spectral:1.1987e-01 train_time:14993ms step_avg:37.48ms +[2025-09-11 07:39:50] [Rank 0] PRINT: step:400/10000 val_loss:7.2813 total_sharp:4.0448e-03 L1_sharp:1.8228e-03 L2_sharp:1.2780e-03 L3_sharp:1.6465e-03 L4_sharp:1.3908e-03 L5_sharp:5.6212e-04 L6_sharp:3.9836e-04 L7_sharp:1.5827e-04 L8_sharp:9.8878e-05 L9_sharp:1.4110e-05 L10_sharp:-3.7582e-05 L11_sharp:3.7802e-04 L12_sharp:6.5966e-05 total_fnorm:2.4716e+01 total_l1_linf:7.9154e+04 total_spectral:1.2358e+01 L1_fnorm:1.2092e+01 L2_fnorm:1.0187e+01 L3_fnorm:8.2118e+00 L4_fnorm:6.5332e+00 L5_fnorm:5.8801e+00 L6_fnorm:5.4873e+00 L7_fnorm:5.6623e+00 L8_fnorm:5.3739e+00 L9_fnorm:5.1009e+00 L10_fnorm:4.9216e+00 L11_fnorm:4.6519e+00 L12_fnorm:4.9548e+00 L1_l1linf:3.5195e+00 L2_l1linf:3.0747e+00 L3_l1linf:2.6138e+00 L4_l1linf:2.3414e+00 L5_l1linf:2.2214e+00 L6_l1linf:2.2747e+00 L7_l1linf:2.2437e+00 L8_l1linf:2.2771e+00 L9_l1linf:2.2364e+00 L10_l1linf:2.2912e+00 L11_l1linf:2.2991e+00 L12_l1linf:2.2880e+00 L1_spectral:1.2179e-01 L2_spectral:1.2120e-01 L3_spectral:1.2071e-01 L4_spectral:1.2030e-01 L5_spectral:1.2001e-01 L6_spectral:1.1992e-01 L7_spectral:1.1984e-01 L8_spectral:1.1963e-01 L9_spectral:1.1995e-01 L10_spectral:1.1984e-01 L11_spectral:1.1974e-01 L12_spectral:1.1987e-01 train_time:14993ms step_avg:37.48ms +[2025-09-11 07:40:21] [Rank 0] step:401/10000 train_time:46200ms step_avg:115.21ms +[2025-09-11 07:40:21] [Rank 0] step:401/10000 train_time:46200ms step_avg:115.21ms +[2025-09-11 07:40:23] [Rank 0] step:421/10000 train_time:48086ms step_avg:114.22ms +[2025-09-11 07:40:23] [Rank 0] step:421/10000 train_time:48086ms step_avg:114.22ms +[2025-09-11 07:40:24] [Rank 0] step:441/10000 train_time:48726ms step_avg:110.49ms +[2025-09-11 07:40:24] [Rank 0] step:441/10000 train_time:48726ms step_avg:110.49ms +[2025-09-11 07:40:24] [Rank 0] step:461/10000 train_time:49366ms step_avg:107.08ms +[2025-09-11 07:40:24] [Rank 0] step:461/10000 train_time:49366ms step_avg:107.08ms +[2025-09-11 07:40:25] [Rank 0] step:481/10000 train_time:50004ms step_avg:103.96ms +[2025-09-11 07:40:25] [Rank 0] step:481/10000 train_time:50004ms step_avg:103.96ms +[2025-09-11 07:40:26] [Rank 0] step:501/10000 train_time:50643ms step_avg:101.08ms +[2025-09-11 07:40:26] [Rank 0] step:501/10000 train_time:50643ms step_avg:101.08ms +[2025-09-11 07:40:26] [Rank 0] step:521/10000 train_time:51281ms step_avg:98.43ms +[2025-09-11 07:40:26] [Rank 0] step:521/10000 train_time:51281ms step_avg:98.43ms +[2025-09-11 07:40:27] [Rank 0] step:541/10000 train_time:51920ms step_avg:95.97ms +[2025-09-11 07:40:27] [Rank 0] step:541/10000 train_time:51920ms step_avg:95.97ms +[2025-09-11 07:40:28] [Rank 0] step:561/10000 train_time:52558ms step_avg:93.69ms +[2025-09-11 07:40:28] [Rank 0] step:561/10000 train_time:52558ms step_avg:93.69ms +[2025-09-11 07:40:28] [Rank 0] step:581/10000 train_time:53197ms step_avg:91.56ms +[2025-09-11 07:40:28] [Rank 0] step:581/10000 train_time:53197ms step_avg:91.56ms +[2025-09-11 07:40:29] [Rank 0] step:601/10000 train_time:53835ms step_avg:89.58ms +[2025-09-11 07:40:29] [Rank 0] step:601/10000 train_time:53835ms step_avg:89.58ms +[2025-09-11 07:40:29] [Rank 0] step:621/10000 train_time:54474ms step_avg:87.72ms +[2025-09-11 07:40:29] [Rank 0] step:621/10000 train_time:54474ms step_avg:87.72ms +[2025-09-11 07:40:30] [Rank 0] step:641/10000 train_time:55112ms step_avg:85.98ms +[2025-09-11 07:40:30] [Rank 0] step:641/10000 train_time:55112ms step_avg:85.98ms +[2025-09-11 07:40:31] [Rank 0] step:661/10000 train_time:55750ms step_avg:84.34ms +[2025-09-11 07:40:31] [Rank 0] step:661/10000 train_time:55750ms step_avg:84.34ms +[2025-09-11 07:40:31] [Rank 0] step:681/10000 train_time:56389ms step_avg:82.80ms +[2025-09-11 07:40:31] [Rank 0] step:681/10000 train_time:56389ms step_avg:82.80ms +[2025-09-11 07:40:32] [Rank 0] step:701/10000 train_time:57027ms step_avg:81.35ms +[2025-09-11 07:40:32] [Rank 0] step:701/10000 train_time:57027ms step_avg:81.35ms +[2025-09-11 07:40:33] [Rank 0] step:721/10000 train_time:57665ms step_avg:79.98ms +[2025-09-11 07:40:33] [Rank 0] step:721/10000 train_time:57665ms step_avg:79.98ms +[2025-09-11 07:40:33] [Rank 0] step:741/10000 train_time:58304ms step_avg:78.68ms +[2025-09-11 07:40:33] [Rank 0] step:741/10000 train_time:58304ms step_avg:78.68ms +[2025-09-11 07:40:34] [Rank 0] step:761/10000 train_time:58947ms step_avg:77.46ms +[2025-09-11 07:40:34] [Rank 0] step:761/10000 train_time:58947ms step_avg:77.46ms +[2025-09-11 07:40:35] [Rank 0] step:781/10000 train_time:59590ms step_avg:76.30ms +[2025-09-11 07:40:35] [Rank 0] step:781/10000 train_time:59590ms step_avg:76.30ms +[2025-09-11 07:40:35] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:40:35] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:41:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:41:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:41:21] [Rank 0] PRINT: step:800/10000 val_loss:6.7426 total_sharp:2.4844e-03 L1_sharp:1.3001e-03 L2_sharp:8.0571e-04 L3_sharp:6.3828e-04 L4_sharp:3.3272e-04 L5_sharp:2.5142e-04 L6_sharp:9.2724e-05 L7_sharp:1.2651e-04 L8_sharp:6.3684e-05 L9_sharp:2.8443e-05 L10_sharp:2.7791e-04 L11_sharp:4.2779e-04 L12_sharp:5.2467e-05 total_fnorm:3.0125e+01 total_l1_linf:8.1920e+04 total_spectral:1.4875e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.1375e+01 L3_fnorm:1.0375e+01 L4_fnorm:9.3750e+00 L5_fnorm:8.3750e+00 L6_fnorm:8.0000e+00 L7_fnorm:7.4062e+00 L8_fnorm:7.0938e+00 L9_fnorm:6.8438e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0000e+00 L12_fnorm:6.1250e+00 L1_l1linf:3.5781e+00 L2_l1linf:3.4219e+00 L3_l1linf:2.8438e+00 L4_l1linf:2.6406e+00 L5_l1linf:2.4531e+00 L6_l1linf:2.2969e+00 L7_l1linf:2.2500e+00 L8_l1linf:2.2188e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.2656e+00 L11_l1linf:2.2812e+00 L12_l1linf:2.2500e+00 L1_spectral:1.3313e-01 L2_spectral:1.3008e-01 L3_spectral:1.2971e-01 L4_spectral:1.2808e-01 L5_spectral:1.2779e-01 L6_spectral:1.2780e-01 L7_spectral:1.2726e-01 L8_spectral:1.2641e-01 L9_spectral:1.2618e-01 L10_spectral:1.2585e-01 L11_spectral:1.2578e-01 L12_spectral:1.2527e-01 train_time:60216ms step_avg:75.27ms +[2025-09-11 07:41:21] [Rank 0] PRINT: step:800/10000 val_loss:6.7426 total_sharp:2.4844e-03 L1_sharp:1.3001e-03 L2_sharp:8.0571e-04 L3_sharp:6.3828e-04 L4_sharp:3.3272e-04 L5_sharp:2.5142e-04 L6_sharp:9.2724e-05 L7_sharp:1.2651e-04 L8_sharp:6.3684e-05 L9_sharp:2.8443e-05 L10_sharp:2.7791e-04 L11_sharp:4.2779e-04 L12_sharp:5.2467e-05 total_fnorm:3.0125e+01 total_l1_linf:8.1920e+04 total_spectral:1.4875e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.1375e+01 L3_fnorm:1.0375e+01 L4_fnorm:9.3750e+00 L5_fnorm:8.3750e+00 L6_fnorm:8.0000e+00 L7_fnorm:7.4062e+00 L8_fnorm:7.0938e+00 L9_fnorm:6.8438e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0000e+00 L12_fnorm:6.1250e+00 L1_l1linf:3.5781e+00 L2_l1linf:3.4219e+00 L3_l1linf:2.8438e+00 L4_l1linf:2.6406e+00 L5_l1linf:2.4531e+00 L6_l1linf:2.2969e+00 L7_l1linf:2.2500e+00 L8_l1linf:2.2188e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.2656e+00 L11_l1linf:2.2812e+00 L12_l1linf:2.2500e+00 L1_spectral:1.3313e-01 L2_spectral:1.3008e-01 L3_spectral:1.2971e-01 L4_spectral:1.2808e-01 L5_spectral:1.2779e-01 L6_spectral:1.2780e-01 L7_spectral:1.2726e-01 L8_spectral:1.2641e-01 L9_spectral:1.2618e-01 L10_spectral:1.2585e-01 L11_spectral:1.2578e-01 L12_spectral:1.2527e-01 train_time:60216ms step_avg:75.27ms +[2025-09-11 07:41:22] [Rank 0] step:801/10000 train_time:61278ms step_avg:76.50ms +[2025-09-11 07:41:22] [Rank 0] step:801/10000 train_time:61278ms step_avg:76.50ms +[2025-09-11 07:41:23] [Rank 0] step:821/10000 train_time:61912ms step_avg:75.41ms +[2025-09-11 07:41:23] [Rank 0] step:821/10000 train_time:61912ms step_avg:75.41ms +[2025-09-11 07:41:23] [Rank 0] step:841/10000 train_time:62557ms step_avg:74.38ms +[2025-09-11 07:41:23] [Rank 0] step:841/10000 train_time:62557ms step_avg:74.38ms +[2025-09-11 07:41:24] [Rank 0] step:861/10000 train_time:63204ms step_avg:73.41ms +[2025-09-11 07:41:24] [Rank 0] step:861/10000 train_time:63204ms step_avg:73.41ms +[2025-09-11 07:41:25] [Rank 0] step:881/10000 train_time:63849ms step_avg:72.47ms +[2025-09-11 07:41:25] [Rank 0] step:881/10000 train_time:63849ms step_avg:72.47ms +[2025-09-11 07:41:25] [Rank 0] step:901/10000 train_time:64493ms step_avg:71.58ms +[2025-09-11 07:41:25] [Rank 0] step:901/10000 train_time:64493ms step_avg:71.58ms +[2025-09-11 07:41:26] [Rank 0] step:921/10000 train_time:65137ms step_avg:70.72ms +[2025-09-11 07:41:26] [Rank 0] step:921/10000 train_time:65137ms step_avg:70.72ms +[2025-09-11 07:41:27] [Rank 0] step:941/10000 train_time:65782ms step_avg:69.91ms +[2025-09-11 07:41:27] [Rank 0] step:941/10000 train_time:65782ms step_avg:69.91ms +[2025-09-11 07:41:27] [Rank 0] step:961/10000 train_time:66426ms step_avg:69.12ms +[2025-09-11 07:41:27] [Rank 0] step:961/10000 train_time:66426ms step_avg:69.12ms +[2025-09-11 07:41:28] [Rank 0] step:981/10000 train_time:67069ms step_avg:68.37ms +[2025-09-11 07:41:28] [Rank 0] step:981/10000 train_time:67069ms step_avg:68.37ms +[2025-09-11 07:41:29] [Rank 0] step:1001/10000 train_time:67712ms step_avg:67.64ms +[2025-09-11 07:41:29] [Rank 0] step:1001/10000 train_time:67712ms step_avg:67.64ms +[2025-09-11 07:41:29] [Rank 0] step:1021/10000 train_time:68356ms step_avg:66.95ms +[2025-09-11 07:41:29] [Rank 0] step:1021/10000 train_time:68356ms step_avg:66.95ms +[2025-09-11 07:41:30] [Rank 0] step:1041/10000 train_time:69000ms step_avg:66.28ms +[2025-09-11 07:41:30] [Rank 0] step:1041/10000 train_time:69000ms step_avg:66.28ms +[2025-09-11 07:41:31] [Rank 0] step:1061/10000 train_time:69645ms step_avg:65.64ms +[2025-09-11 07:41:31] [Rank 0] step:1061/10000 train_time:69645ms step_avg:65.64ms +[2025-09-11 07:41:31] [Rank 0] step:1081/10000 train_time:70289ms step_avg:65.02ms +[2025-09-11 07:41:31] [Rank 0] step:1081/10000 train_time:70289ms step_avg:65.02ms +[2025-09-11 07:41:32] [Rank 0] step:1101/10000 train_time:70933ms step_avg:64.43ms +[2025-09-11 07:41:32] [Rank 0] step:1101/10000 train_time:70933ms step_avg:64.43ms +[2025-09-11 07:41:32] [Rank 0] step:1121/10000 train_time:71576ms step_avg:63.85ms +[2025-09-11 07:41:32] [Rank 0] step:1121/10000 train_time:71576ms step_avg:63.85ms +[2025-09-11 07:41:33] [Rank 0] step:1141/10000 train_time:72219ms step_avg:63.29ms +[2025-09-11 07:41:33] [Rank 0] step:1141/10000 train_time:72219ms step_avg:63.29ms +[2025-09-11 07:41:34] [Rank 0] step:1161/10000 train_time:72862ms step_avg:62.76ms +[2025-09-11 07:41:34] [Rank 0] step:1161/10000 train_time:72862ms step_avg:62.76ms +[2025-09-11 07:41:34] [Rank 0] step:1181/10000 train_time:73507ms step_avg:62.24ms +[2025-09-11 07:41:34] [Rank 0] step:1181/10000 train_time:73507ms step_avg:62.24ms +[2025-09-11 07:41:35] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:41:35] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:41:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:41:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:41:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:41:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:41:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:41:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:41:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:41:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:41:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:41:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:41:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:41:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:41:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:41:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:41:45] [Rank 0] PRINT: step:1200/10000 val_loss:6.4823 total_sharp:8.2844e-04 L1_sharp:3.3273e-04 L2_sharp:3.2093e-04 L3_sharp:2.3323e-04 L4_sharp:1.0316e-04 L5_sharp:8.5268e-05 L6_sharp:5.9270e-05 L7_sharp:6.5329e-05 L8_sharp:8.0918e-05 L9_sharp:1.7920e-05 L10_sharp:7.8784e-05 L11_sharp:2.2750e-04 L12_sharp:1.1864e-04 total_fnorm:3.6000e+01 total_l1_linf:1.0086e+05 total_spectral:1.7875e+01 L1_fnorm:1.2562e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1625e+01 L4_fnorm:1.1250e+01 L5_fnorm:1.0625e+01 L6_fnorm:1.0375e+01 L7_fnorm:9.9375e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.4375e+00 L10_fnorm:8.6875e+00 L11_fnorm:8.3750e+00 L12_fnorm:8.1250e+00 L1_l1linf:3.5625e+00 L2_l1linf:3.6094e+00 L3_l1linf:3.3594e+00 L4_l1linf:3.2031e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0469e+00 L7_l1linf:2.8906e+00 L8_l1linf:2.7500e+00 L9_l1linf:2.8281e+00 L10_l1linf:2.2344e+00 L11_l1linf:2.2031e+00 L12_l1linf:2.2656e+00 L1_spectral:1.3946e-01 L2_spectral:1.3459e-01 L3_spectral:1.3436e-01 L4_spectral:1.3314e-01 L5_spectral:1.3375e-01 L6_spectral:1.3455e-01 L7_spectral:1.3411e-01 L8_spectral:1.3293e-01 L9_spectral:1.3285e-01 L10_spectral:1.3191e-01 L11_spectral:1.3161e-01 L12_spectral:1.3300e-01 train_time:74133ms step_avg:61.78ms +[2025-09-11 07:41:45] [Rank 0] PRINT: step:1200/10000 val_loss:6.4823 total_sharp:8.2844e-04 L1_sharp:3.3273e-04 L2_sharp:3.2093e-04 L3_sharp:2.3323e-04 L4_sharp:1.0316e-04 L5_sharp:8.5268e-05 L6_sharp:5.9270e-05 L7_sharp:6.5329e-05 L8_sharp:8.0918e-05 L9_sharp:1.7920e-05 L10_sharp:7.8784e-05 L11_sharp:2.2750e-04 L12_sharp:1.1864e-04 total_fnorm:3.6000e+01 total_l1_linf:1.0086e+05 total_spectral:1.7875e+01 L1_fnorm:1.2562e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1625e+01 L4_fnorm:1.1250e+01 L5_fnorm:1.0625e+01 L6_fnorm:1.0375e+01 L7_fnorm:9.9375e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.4375e+00 L10_fnorm:8.6875e+00 L11_fnorm:8.3750e+00 L12_fnorm:8.1250e+00 L1_l1linf:3.5625e+00 L2_l1linf:3.6094e+00 L3_l1linf:3.3594e+00 L4_l1linf:3.2031e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0469e+00 L7_l1linf:2.8906e+00 L8_l1linf:2.7500e+00 L9_l1linf:2.8281e+00 L10_l1linf:2.2344e+00 L11_l1linf:2.2031e+00 L12_l1linf:2.2656e+00 L1_spectral:1.3946e-01 L2_spectral:1.3459e-01 L3_spectral:1.3436e-01 L4_spectral:1.3314e-01 L5_spectral:1.3375e-01 L6_spectral:1.3455e-01 L7_spectral:1.3411e-01 L8_spectral:1.3293e-01 L9_spectral:1.3285e-01 L10_spectral:1.3191e-01 L11_spectral:1.3161e-01 L12_spectral:1.3300e-01 train_time:74133ms step_avg:61.78ms +[2025-09-11 07:41:46] [Rank 0] step:1201/10000 train_time:75261ms step_avg:62.67ms +[2025-09-11 07:41:46] [Rank 0] step:1201/10000 train_time:75261ms step_avg:62.67ms +[2025-09-11 07:41:47] [Rank 0] step:1221/10000 train_time:76054ms step_avg:62.29ms +[2025-09-11 07:41:47] [Rank 0] step:1221/10000 train_time:76054ms step_avg:62.29ms +[2025-09-11 07:41:48] [Rank 0] step:1241/10000 train_time:76698ms step_avg:61.80ms +[2025-09-11 07:41:48] [Rank 0] step:1241/10000 train_time:76698ms step_avg:61.80ms +[2025-09-11 07:41:49] [Rank 0] step:1261/10000 train_time:77656ms step_avg:61.58ms +[2025-09-11 07:41:49] [Rank 0] step:1261/10000 train_time:77656ms step_avg:61.58ms +[2025-09-11 07:41:49] [Rank 0] step:1281/10000 train_time:78301ms step_avg:61.12ms +[2025-09-11 07:41:49] [Rank 0] step:1281/10000 train_time:78301ms step_avg:61.12ms +[2025-09-11 07:41:50] [Rank 0] step:1301/10000 train_time:78945ms step_avg:60.68ms +[2025-09-11 07:41:50] [Rank 0] step:1301/10000 train_time:78945ms step_avg:60.68ms +[2025-09-11 07:41:51] [Rank 0] step:1321/10000 train_time:79591ms step_avg:60.25ms +[2025-09-11 07:41:51] [Rank 0] step:1321/10000 train_time:79591ms step_avg:60.25ms +[2025-09-11 07:41:51] [Rank 0] step:1341/10000 train_time:80234ms step_avg:59.83ms +[2025-09-11 07:41:51] [Rank 0] step:1341/10000 train_time:80234ms step_avg:59.83ms +[2025-09-11 07:41:52] [Rank 0] step:1361/10000 train_time:80879ms step_avg:59.43ms +[2025-09-11 07:41:52] [Rank 0] step:1361/10000 train_time:80879ms step_avg:59.43ms +[2025-09-11 07:41:53] [Rank 0] step:1381/10000 train_time:81522ms step_avg:59.03ms +[2025-09-11 07:41:53] [Rank 0] step:1381/10000 train_time:81522ms step_avg:59.03ms +[2025-09-11 07:41:53] [Rank 0] step:1401/10000 train_time:82166ms step_avg:58.65ms +[2025-09-11 07:41:53] [Rank 0] step:1401/10000 train_time:82166ms step_avg:58.65ms +[2025-09-11 07:41:54] [Rank 0] step:1421/10000 train_time:82810ms step_avg:58.28ms +[2025-09-11 07:41:54] [Rank 0] step:1421/10000 train_time:82810ms step_avg:58.28ms +[2025-09-11 07:41:55] [Rank 0] step:1441/10000 train_time:83453ms step_avg:57.91ms +[2025-09-11 07:41:55] [Rank 0] step:1441/10000 train_time:83453ms step_avg:57.91ms +[2025-09-11 07:41:55] [Rank 0] step:1461/10000 train_time:84096ms step_avg:57.56ms +[2025-09-11 07:41:55] [Rank 0] step:1461/10000 train_time:84096ms step_avg:57.56ms +[2025-09-11 07:41:56] [Rank 0] step:1481/10000 train_time:84740ms step_avg:57.22ms +[2025-09-11 07:41:56] [Rank 0] step:1481/10000 train_time:84740ms step_avg:57.22ms +[2025-09-11 07:41:57] [Rank 0] step:1501/10000 train_time:85387ms step_avg:56.89ms +[2025-09-11 07:41:57] [Rank 0] step:1501/10000 train_time:85387ms step_avg:56.89ms +[2025-09-11 07:41:57] [Rank 0] step:1521/10000 train_time:86035ms step_avg:56.56ms +[2025-09-11 07:41:57] [Rank 0] step:1521/10000 train_time:86035ms step_avg:56.56ms +[2025-09-11 07:41:58] [Rank 0] step:1541/10000 train_time:86682ms step_avg:56.25ms +[2025-09-11 07:41:58] [Rank 0] step:1541/10000 train_time:86682ms step_avg:56.25ms +[2025-09-11 07:41:58] [Rank 0] step:1561/10000 train_time:87330ms step_avg:55.94ms +[2025-09-11 07:41:58] [Rank 0] step:1561/10000 train_time:87330ms step_avg:55.94ms +[2025-09-11 07:41:59] [Rank 0] step:1581/10000 train_time:87977ms step_avg:55.65ms +[2025-09-11 07:41:59] [Rank 0] step:1581/10000 train_time:87977ms step_avg:55.65ms +[2025-09-11 07:42:00] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:42:00] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:42:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:42:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:42:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:42:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:42:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:42:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:10] [Rank 0] PRINT: step:1600/10000 val_loss:6.3251 total_sharp:6.3794e-04 L1_sharp:3.5483e-04 L2_sharp:2.1045e-04 L3_sharp:1.3851e-04 L4_sharp:5.9533e-05 L5_sharp:5.7247e-05 L6_sharp:3.7834e-05 L7_sharp:4.2343e-05 L8_sharp:5.2698e-05 L9_sharp:1.2765e-05 L10_sharp:7.4434e-05 L11_sharp:2.0143e-04 L12_sharp:1.8314e-04 total_fnorm:3.7750e+01 total_l1_linf:1.0547e+05 total_spectral:1.9000e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2000e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.1250e+01 L6_fnorm:1.1250e+01 L7_fnorm:1.1000e+01 L8_fnorm:1.0438e+01 L9_fnorm:1.0438e+01 L10_fnorm:9.6875e+00 L11_fnorm:9.3125e+00 L12_fnorm:8.6875e+00 L1_l1linf:3.5312e+00 L2_l1linf:3.5625e+00 L3_l1linf:3.3750e+00 L4_l1linf:3.2812e+00 L5_l1linf:3.2656e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.1406e+00 L8_l1linf:2.9844e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.3438e+00 L11_l1linf:2.1250e+00 L12_l1linf:2.1875e+00 L1_spectral:1.4414e-01 L2_spectral:1.3959e-01 L3_spectral:1.3869e-01 L4_spectral:1.3812e-01 L5_spectral:1.3690e-01 L6_spectral:1.3781e-01 L7_spectral:1.3778e-01 L8_spectral:1.3733e-01 L9_spectral:1.3733e-01 L10_spectral:1.3592e-01 L11_spectral:1.3579e-01 L12_spectral:1.3662e-01 train_time:88607ms step_avg:55.38ms +[2025-09-11 07:42:10] [Rank 0] PRINT: step:1600/10000 val_loss:6.3251 total_sharp:6.3794e-04 L1_sharp:3.5483e-04 L2_sharp:2.1045e-04 L3_sharp:1.3851e-04 L4_sharp:5.9533e-05 L5_sharp:5.7247e-05 L6_sharp:3.7834e-05 L7_sharp:4.2343e-05 L8_sharp:5.2698e-05 L9_sharp:1.2765e-05 L10_sharp:7.4434e-05 L11_sharp:2.0143e-04 L12_sharp:1.8314e-04 total_fnorm:3.7750e+01 total_l1_linf:1.0547e+05 total_spectral:1.9000e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2000e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.1250e+01 L6_fnorm:1.1250e+01 L7_fnorm:1.1000e+01 L8_fnorm:1.0438e+01 L9_fnorm:1.0438e+01 L10_fnorm:9.6875e+00 L11_fnorm:9.3125e+00 L12_fnorm:8.6875e+00 L1_l1linf:3.5312e+00 L2_l1linf:3.5625e+00 L3_l1linf:3.3750e+00 L4_l1linf:3.2812e+00 L5_l1linf:3.2656e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.1406e+00 L8_l1linf:2.9844e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.3438e+00 L11_l1linf:2.1250e+00 L12_l1linf:2.1875e+00 L1_spectral:1.4414e-01 L2_spectral:1.3959e-01 L3_spectral:1.3869e-01 L4_spectral:1.3812e-01 L5_spectral:1.3690e-01 L6_spectral:1.3781e-01 L7_spectral:1.3778e-01 L8_spectral:1.3733e-01 L9_spectral:1.3733e-01 L10_spectral:1.3592e-01 L11_spectral:1.3579e-01 L12_spectral:1.3662e-01 train_time:88607ms step_avg:55.38ms +[2025-09-11 07:42:11] [Rank 0] step:1601/10000 train_time:89697ms step_avg:56.03ms +[2025-09-11 07:42:11] [Rank 0] step:1601/10000 train_time:89697ms step_avg:56.03ms +[2025-09-11 07:42:11] [Rank 0] step:1621/10000 train_time:90333ms step_avg:55.73ms +[2025-09-11 07:42:11] [Rank 0] step:1621/10000 train_time:90333ms step_avg:55.73ms +[2025-09-11 07:42:12] [Rank 0] step:1641/10000 train_time:90982ms step_avg:55.44ms +[2025-09-11 07:42:12] [Rank 0] step:1641/10000 train_time:90982ms step_avg:55.44ms +[2025-09-11 07:42:13] [Rank 0] step:1661/10000 train_time:91631ms step_avg:55.17ms +[2025-09-11 07:42:13] [Rank 0] step:1661/10000 train_time:91631ms step_avg:55.17ms +[2025-09-11 07:42:13] [Rank 0] step:1681/10000 train_time:92280ms step_avg:54.90ms +[2025-09-11 07:42:13] [Rank 0] step:1681/10000 train_time:92280ms step_avg:54.90ms +[2025-09-11 07:42:14] [Rank 0] step:1701/10000 train_time:92929ms step_avg:54.63ms +[2025-09-11 07:42:14] [Rank 0] step:1701/10000 train_time:92929ms step_avg:54.63ms +[2025-09-11 07:42:15] [Rank 0] step:1721/10000 train_time:93577ms step_avg:54.37ms +[2025-09-11 07:42:15] [Rank 0] step:1721/10000 train_time:93577ms step_avg:54.37ms +[2025-09-11 07:42:15] [Rank 0] step:1741/10000 train_time:94224ms step_avg:54.12ms +[2025-09-11 07:42:15] [Rank 0] step:1741/10000 train_time:94224ms step_avg:54.12ms +[2025-09-11 07:42:16] [Rank 0] step:1761/10000 train_time:94872ms step_avg:53.87ms +[2025-09-11 07:42:16] [Rank 0] step:1761/10000 train_time:94872ms step_avg:53.87ms +[2025-09-11 07:42:17] [Rank 0] step:1781/10000 train_time:95520ms step_avg:53.63ms +[2025-09-11 07:42:17] [Rank 0] step:1781/10000 train_time:95520ms step_avg:53.63ms +[2025-09-11 07:42:17] [Rank 0] step:1801/10000 train_time:96167ms step_avg:53.40ms +[2025-09-11 07:42:17] [Rank 0] step:1801/10000 train_time:96167ms step_avg:53.40ms +[2025-09-11 07:42:18] [Rank 0] step:1821/10000 train_time:96816ms step_avg:53.17ms +[2025-09-11 07:42:18] [Rank 0] step:1821/10000 train_time:96816ms step_avg:53.17ms +[2025-09-11 07:42:19] [Rank 0] step:1841/10000 train_time:97463ms step_avg:52.94ms +[2025-09-11 07:42:19] [Rank 0] step:1841/10000 train_time:97463ms step_avg:52.94ms +[2025-09-11 07:42:19] [Rank 0] step:1861/10000 train_time:98111ms step_avg:52.72ms +[2025-09-11 07:42:19] [Rank 0] step:1861/10000 train_time:98111ms step_avg:52.72ms +[2025-09-11 07:42:20] [Rank 0] step:1881/10000 train_time:98758ms step_avg:52.50ms +[2025-09-11 07:42:20] [Rank 0] step:1881/10000 train_time:98758ms step_avg:52.50ms +[2025-09-11 07:42:20] [Rank 0] step:1901/10000 train_time:99405ms step_avg:52.29ms +[2025-09-11 07:42:20] [Rank 0] step:1901/10000 train_time:99405ms step_avg:52.29ms +[2025-09-11 07:42:21] [Rank 0] step:1921/10000 train_time:100052ms step_avg:52.08ms +[2025-09-11 07:42:21] [Rank 0] step:1921/10000 train_time:100052ms step_avg:52.08ms +[2025-09-11 07:42:22] [Rank 0] step:1941/10000 train_time:100700ms step_avg:51.88ms +[2025-09-11 07:42:22] [Rank 0] step:1941/10000 train_time:100700ms step_avg:51.88ms +[2025-09-11 07:42:22] [Rank 0] step:1961/10000 train_time:101347ms step_avg:51.68ms +[2025-09-11 07:42:22] [Rank 0] step:1961/10000 train_time:101347ms step_avg:51.68ms +[2025-09-11 07:42:23] [Rank 0] step:1981/10000 train_time:101995ms step_avg:51.49ms +[2025-09-11 07:42:23] [Rank 0] step:1981/10000 train_time:101995ms step_avg:51.49ms +[2025-09-11 07:42:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:42:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:42:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:42:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:34] [Rank 0] PRINT: step:2000/10000 val_loss:6.1730 total_sharp:5.6053e-04 L1_sharp:1.9508e-04 L2_sharp:1.4079e-04 L3_sharp:8.9465e-05 L4_sharp:4.2006e-05 L5_sharp:6.7914e-05 L6_sharp:3.4026e-05 L7_sharp:4.3911e-05 L8_sharp:7.5149e-05 L9_sharp:3.5148e-05 L10_sharp:9.9448e-05 L11_sharp:2.2252e-04 L12_sharp:2.3765e-04 total_fnorm:3.9000e+01 total_l1_linf:1.0752e+05 total_spectral:1.9625e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.1375e+01 L6_fnorm:1.1688e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.0938e+01 L9_fnorm:1.1062e+01 L10_fnorm:1.0250e+01 L11_fnorm:1.0125e+01 L12_fnorm:9.1250e+00 L1_l1linf:3.4062e+00 L2_l1linf:3.4531e+00 L3_l1linf:3.2969e+00 L4_l1linf:3.1875e+00 L5_l1linf:3.1719e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0781e+00 L10_l1linf:2.3281e+00 L11_l1linf:2.1250e+00 L12_l1linf:2.2031e+00 L1_spectral:1.4701e-01 L2_spectral:1.4365e-01 L3_spectral:1.4244e-01 L4_spectral:1.4185e-01 L5_spectral:1.3919e-01 L6_spectral:1.3907e-01 L7_spectral:1.3990e-01 L8_spectral:1.4063e-01 L9_spectral:1.4065e-01 L10_spectral:1.3962e-01 L11_spectral:1.3937e-01 L12_spectral:1.4034e-01 train_time:102627ms step_avg:51.31ms +[2025-09-11 07:42:34] [Rank 0] PRINT: step:2000/10000 val_loss:6.1730 total_sharp:5.6053e-04 L1_sharp:1.9508e-04 L2_sharp:1.4079e-04 L3_sharp:8.9465e-05 L4_sharp:4.2006e-05 L5_sharp:6.7914e-05 L6_sharp:3.4026e-05 L7_sharp:4.3911e-05 L8_sharp:7.5149e-05 L9_sharp:3.5148e-05 L10_sharp:9.9448e-05 L11_sharp:2.2252e-04 L12_sharp:2.3765e-04 total_fnorm:3.9000e+01 total_l1_linf:1.0752e+05 total_spectral:1.9625e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.1375e+01 L6_fnorm:1.1688e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.0938e+01 L9_fnorm:1.1062e+01 L10_fnorm:1.0250e+01 L11_fnorm:1.0125e+01 L12_fnorm:9.1250e+00 L1_l1linf:3.4062e+00 L2_l1linf:3.4531e+00 L3_l1linf:3.2969e+00 L4_l1linf:3.1875e+00 L5_l1linf:3.1719e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0781e+00 L10_l1linf:2.3281e+00 L11_l1linf:2.1250e+00 L12_l1linf:2.2031e+00 L1_spectral:1.4701e-01 L2_spectral:1.4365e-01 L3_spectral:1.4244e-01 L4_spectral:1.4185e-01 L5_spectral:1.3919e-01 L6_spectral:1.3907e-01 L7_spectral:1.3990e-01 L8_spectral:1.4063e-01 L9_spectral:1.4065e-01 L10_spectral:1.3962e-01 L11_spectral:1.3937e-01 L12_spectral:1.4034e-01 train_time:102627ms step_avg:51.31ms +[2025-09-11 07:42:35] [Rank 0] step:2001/10000 train_time:103738ms step_avg:51.84ms +[2025-09-11 07:42:35] [Rank 0] step:2001/10000 train_time:103738ms step_avg:51.84ms +[2025-09-11 07:42:35] [Rank 0] step:2021/10000 train_time:104374ms step_avg:51.64ms +[2025-09-11 07:42:35] [Rank 0] step:2021/10000 train_time:104374ms step_avg:51.64ms +[2025-09-11 07:42:36] [Rank 0] step:2041/10000 train_time:105022ms step_avg:51.46ms +[2025-09-11 07:42:36] [Rank 0] step:2041/10000 train_time:105022ms step_avg:51.46ms +[2025-09-11 07:42:37] [Rank 0] step:2061/10000 train_time:105670ms step_avg:51.27ms +[2025-09-11 07:42:37] [Rank 0] step:2061/10000 train_time:105670ms step_avg:51.27ms +[2025-09-11 07:42:37] [Rank 0] step:2081/10000 train_time:106317ms step_avg:51.09ms +[2025-09-11 07:42:37] [Rank 0] step:2081/10000 train_time:106317ms step_avg:51.09ms +[2025-09-11 07:42:38] [Rank 0] step:2101/10000 train_time:106963ms step_avg:50.91ms +[2025-09-11 07:42:38] [Rank 0] step:2101/10000 train_time:106963ms step_avg:50.91ms +[2025-09-11 07:42:39] [Rank 0] step:2121/10000 train_time:107610ms step_avg:50.74ms +[2025-09-11 07:42:39] [Rank 0] step:2121/10000 train_time:107610ms step_avg:50.74ms +[2025-09-11 07:42:39] [Rank 0] step:2141/10000 train_time:108256ms step_avg:50.56ms +[2025-09-11 07:42:39] [Rank 0] step:2141/10000 train_time:108256ms step_avg:50.56ms +[2025-09-11 07:42:40] [Rank 0] step:2161/10000 train_time:108903ms step_avg:50.39ms +[2025-09-11 07:42:40] [Rank 0] step:2161/10000 train_time:108903ms step_avg:50.39ms +[2025-09-11 07:42:41] [Rank 0] step:2181/10000 train_time:109551ms step_avg:50.23ms +[2025-09-11 07:42:41] [Rank 0] step:2181/10000 train_time:109551ms step_avg:50.23ms +[2025-09-11 07:42:41] [Rank 0] step:2201/10000 train_time:110198ms step_avg:50.07ms +[2025-09-11 07:42:41] [Rank 0] step:2201/10000 train_time:110198ms step_avg:50.07ms +[2025-09-11 07:42:42] [Rank 0] step:2221/10000 train_time:110845ms step_avg:49.91ms +[2025-09-11 07:42:42] [Rank 0] step:2221/10000 train_time:110845ms step_avg:49.91ms +[2025-09-11 07:42:42] [Rank 0] step:2241/10000 train_time:111506ms step_avg:49.76ms +[2025-09-11 07:42:42] [Rank 0] step:2241/10000 train_time:111506ms step_avg:49.76ms +[2025-09-11 07:42:43] [Rank 0] step:2261/10000 train_time:112166ms step_avg:49.61ms +[2025-09-11 07:42:43] [Rank 0] step:2261/10000 train_time:112166ms step_avg:49.61ms +[2025-09-11 07:42:44] [Rank 0] step:2281/10000 train_time:112826ms step_avg:49.46ms +[2025-09-11 07:42:44] [Rank 0] step:2281/10000 train_time:112826ms step_avg:49.46ms +[2025-09-11 07:42:44] [Rank 0] step:2301/10000 train_time:113486ms step_avg:49.32ms +[2025-09-11 07:42:44] [Rank 0] step:2301/10000 train_time:113486ms step_avg:49.32ms +[2025-09-11 07:42:45] [Rank 0] step:2321/10000 train_time:114147ms step_avg:49.18ms +[2025-09-11 07:42:45] [Rank 0] step:2321/10000 train_time:114147ms step_avg:49.18ms +[2025-09-11 07:42:46] [Rank 0] step:2341/10000 train_time:114807ms step_avg:49.04ms +[2025-09-11 07:42:46] [Rank 0] step:2341/10000 train_time:114807ms step_avg:49.04ms +[2025-09-11 07:42:46] [Rank 0] step:2361/10000 train_time:115467ms step_avg:48.91ms +[2025-09-11 07:42:46] [Rank 0] step:2361/10000 train_time:115467ms step_avg:48.91ms +[2025-09-11 07:42:47] [Rank 0] step:2381/10000 train_time:116127ms step_avg:48.77ms +[2025-09-11 07:42:47] [Rank 0] step:2381/10000 train_time:116127ms step_avg:48.77ms +[2025-09-11 07:42:48] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:42:48] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:42:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:42:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:42:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:42:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:42:57] [Rank 0] PRINT: step:2400/10000 val_loss:6.0351 total_sharp:3.8030e-04 L1_sharp:1.5491e-04 L2_sharp:1.0287e-04 L3_sharp:4.4813e-05 L4_sharp:3.0904e-05 L5_sharp:5.9291e-05 L6_sharp:2.1415e-05 L7_sharp:3.3178e-05 L8_sharp:5.2130e-05 L9_sharp:3.4901e-05 L10_sharp:6.6163e-05 L11_sharp:1.5090e-04 L12_sharp:1.3222e-04 total_fnorm:4.1000e+01 total_l1_linf:1.0854e+05 total_spectral:2.0125e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1938e+01 L7_fnorm:1.1875e+01 L8_fnorm:1.1375e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1188e+01 L11_fnorm:1.0750e+01 L12_fnorm:9.8125e+00 L1_l1linf:3.3125e+00 L2_l1linf:3.3750e+00 L3_l1linf:3.2031e+00 L4_l1linf:3.1094e+00 L5_l1linf:3.0469e+00 L6_l1linf:3.0156e+00 L7_l1linf:3.0625e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0156e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.2500e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5117e-01 L2_spectral:1.4648e-01 L3_spectral:1.4600e-01 L4_spectral:1.4502e-01 L5_spectral:1.4241e-01 L6_spectral:1.4265e-01 L7_spectral:1.4250e-01 L8_spectral:1.4228e-01 L9_spectral:1.4326e-01 L10_spectral:1.4229e-01 L11_spectral:1.4387e-01 L12_spectral:1.4403e-01 train_time:116769ms step_avg:48.65ms +[2025-09-11 07:42:57] [Rank 0] PRINT: step:2400/10000 val_loss:6.0351 total_sharp:3.8030e-04 L1_sharp:1.5491e-04 L2_sharp:1.0287e-04 L3_sharp:4.4813e-05 L4_sharp:3.0904e-05 L5_sharp:5.9291e-05 L6_sharp:2.1415e-05 L7_sharp:3.3178e-05 L8_sharp:5.2130e-05 L9_sharp:3.4901e-05 L10_sharp:6.6163e-05 L11_sharp:1.5090e-04 L12_sharp:1.3222e-04 total_fnorm:4.1000e+01 total_l1_linf:1.0854e+05 total_spectral:2.0125e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1938e+01 L7_fnorm:1.1875e+01 L8_fnorm:1.1375e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1188e+01 L11_fnorm:1.0750e+01 L12_fnorm:9.8125e+00 L1_l1linf:3.3125e+00 L2_l1linf:3.3750e+00 L3_l1linf:3.2031e+00 L4_l1linf:3.1094e+00 L5_l1linf:3.0469e+00 L6_l1linf:3.0156e+00 L7_l1linf:3.0625e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0156e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.2500e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5117e-01 L2_spectral:1.4648e-01 L3_spectral:1.4600e-01 L4_spectral:1.4502e-01 L5_spectral:1.4241e-01 L6_spectral:1.4265e-01 L7_spectral:1.4250e-01 L8_spectral:1.4228e-01 L9_spectral:1.4326e-01 L10_spectral:1.4229e-01 L11_spectral:1.4387e-01 L12_spectral:1.4403e-01 train_time:116769ms step_avg:48.65ms +[2025-09-11 07:42:59] [Rank 0] step:2401/10000 train_time:117931ms step_avg:49.12ms +[2025-09-11 07:42:59] [Rank 0] step:2401/10000 train_time:117931ms step_avg:49.12ms +[2025-09-11 07:42:59] [Rank 0] step:2421/10000 train_time:118599ms step_avg:48.99ms +[2025-09-11 07:42:59] [Rank 0] step:2421/10000 train_time:118599ms step_avg:48.99ms +[2025-09-11 07:43:00] [Rank 0] step:2441/10000 train_time:119263ms step_avg:48.86ms +[2025-09-11 07:43:00] [Rank 0] step:2441/10000 train_time:119263ms step_avg:48.86ms +[2025-09-11 07:43:01] [Rank 0] step:2461/10000 train_time:119927ms step_avg:48.73ms +[2025-09-11 07:43:01] [Rank 0] step:2461/10000 train_time:119927ms step_avg:48.73ms +[2025-09-11 07:43:01] [Rank 0] step:2481/10000 train_time:120590ms step_avg:48.61ms +[2025-09-11 07:43:01] [Rank 0] step:2481/10000 train_time:120590ms step_avg:48.61ms +[2025-09-11 07:43:02] [Rank 0] step:2501/10000 train_time:121254ms step_avg:48.48ms +[2025-09-11 07:43:02] [Rank 0] step:2501/10000 train_time:121254ms step_avg:48.48ms +[2025-09-11 07:43:02] [Rank 0] step:2521/10000 train_time:121919ms step_avg:48.36ms +[2025-09-11 07:43:02] [Rank 0] step:2521/10000 train_time:121919ms step_avg:48.36ms +[2025-09-11 07:43:03] [Rank 0] step:2541/10000 train_time:122582ms step_avg:48.24ms +[2025-09-11 07:43:03] [Rank 0] step:2541/10000 train_time:122582ms step_avg:48.24ms +[2025-09-11 07:43:04] [Rank 0] step:2561/10000 train_time:123246ms step_avg:48.12ms +[2025-09-11 07:43:04] [Rank 0] step:2561/10000 train_time:123246ms step_avg:48.12ms +[2025-09-11 07:43:04] [Rank 0] step:2581/10000 train_time:123910ms step_avg:48.01ms +[2025-09-11 07:43:04] [Rank 0] step:2581/10000 train_time:123910ms step_avg:48.01ms +[2025-09-11 07:43:05] [Rank 0] step:2601/10000 train_time:124573ms step_avg:47.89ms +[2025-09-11 07:43:05] [Rank 0] step:2601/10000 train_time:124573ms step_avg:47.89ms +[2025-09-11 07:43:06] [Rank 0] step:2621/10000 train_time:125237ms step_avg:47.78ms +[2025-09-11 07:43:06] [Rank 0] step:2621/10000 train_time:125237ms step_avg:47.78ms +[2025-09-11 07:43:06] [Rank 0] step:2641/10000 train_time:125900ms step_avg:47.67ms +[2025-09-11 07:43:06] [Rank 0] step:2641/10000 train_time:125900ms step_avg:47.67ms +[2025-09-11 07:43:07] [Rank 0] step:2661/10000 train_time:126562ms step_avg:47.56ms +[2025-09-11 07:43:07] [Rank 0] step:2661/10000 train_time:126562ms step_avg:47.56ms +[2025-09-11 07:43:08] [Rank 0] step:2681/10000 train_time:127226ms step_avg:47.45ms +[2025-09-11 07:43:08] [Rank 0] step:2681/10000 train_time:127226ms step_avg:47.45ms +[2025-09-11 07:43:08] [Rank 0] step:2701/10000 train_time:127889ms step_avg:47.35ms +[2025-09-11 07:43:08] [Rank 0] step:2701/10000 train_time:127889ms step_avg:47.35ms +[2025-09-11 07:43:09] [Rank 0] step:2721/10000 train_time:128552ms step_avg:47.24ms +[2025-09-11 07:43:09] [Rank 0] step:2721/10000 train_time:128552ms step_avg:47.24ms +[2025-09-11 07:43:10] [Rank 0] step:2741/10000 train_time:129216ms step_avg:47.14ms +[2025-09-11 07:43:10] [Rank 0] step:2741/10000 train_time:129216ms step_avg:47.14ms +[2025-09-11 07:43:10] [Rank 0] step:2761/10000 train_time:129879ms step_avg:47.04ms +[2025-09-11 07:43:10] [Rank 0] step:2761/10000 train_time:129879ms step_avg:47.04ms +[2025-09-11 07:43:11] [Rank 0] step:2781/10000 train_time:130544ms step_avg:46.94ms +[2025-09-11 07:43:11] [Rank 0] step:2781/10000 train_time:130544ms step_avg:46.94ms +[2025-09-11 07:43:12] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:43:12] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:43:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:43:21] [Rank 0] PRINT: step:2800/10000 val_loss:5.9426 total_sharp:4.8316e-04 L1_sharp:1.6432e-04 L2_sharp:1.1465e-04 L3_sharp:7.4283e-05 L4_sharp:3.1326e-05 L5_sharp:5.1215e-05 L6_sharp:3.2346e-05 L7_sharp:3.8128e-05 L8_sharp:6.1439e-05 L9_sharp:3.6061e-05 L10_sharp:6.6056e-05 L11_sharp:2.0815e-04 L12_sharp:2.7626e-04 total_fnorm:4.1000e+01 total_l1_linf:1.0701e+05 total_spectral:2.0250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1625e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1438e+01 L9_fnorm:1.1688e+01 L10_fnorm:1.1375e+01 L11_fnorm:1.0812e+01 L12_fnorm:9.5625e+00 L1_l1linf:3.2969e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.1406e+00 L4_l1linf:3.0469e+00 L5_l1linf:3.0156e+00 L6_l1linf:2.9844e+00 L7_l1linf:3.0000e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.2344e+00 L12_l1linf:2.1719e+00 L1_spectral:1.5300e-01 L2_spectral:1.4866e-01 L3_spectral:1.4795e-01 L4_spectral:1.4692e-01 L5_spectral:1.4527e-01 L6_spectral:1.4593e-01 L7_spectral:1.4474e-01 L8_spectral:1.4418e-01 L9_spectral:1.4460e-01 L10_spectral:1.4479e-01 L11_spectral:1.4588e-01 L12_spectral:1.4467e-01 train_time:131189ms step_avg:46.85ms +[2025-09-11 07:43:21] [Rank 0] PRINT: step:2800/10000 val_loss:5.9426 total_sharp:4.8316e-04 L1_sharp:1.6432e-04 L2_sharp:1.1465e-04 L3_sharp:7.4283e-05 L4_sharp:3.1326e-05 L5_sharp:5.1215e-05 L6_sharp:3.2346e-05 L7_sharp:3.8128e-05 L8_sharp:6.1439e-05 L9_sharp:3.6061e-05 L10_sharp:6.6056e-05 L11_sharp:2.0815e-04 L12_sharp:2.7626e-04 total_fnorm:4.1000e+01 total_l1_linf:1.0701e+05 total_spectral:2.0250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1625e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1438e+01 L9_fnorm:1.1688e+01 L10_fnorm:1.1375e+01 L11_fnorm:1.0812e+01 L12_fnorm:9.5625e+00 L1_l1linf:3.2969e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.1406e+00 L4_l1linf:3.0469e+00 L5_l1linf:3.0156e+00 L6_l1linf:2.9844e+00 L7_l1linf:3.0000e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.2344e+00 L12_l1linf:2.1719e+00 L1_spectral:1.5300e-01 L2_spectral:1.4866e-01 L3_spectral:1.4795e-01 L4_spectral:1.4692e-01 L5_spectral:1.4527e-01 L6_spectral:1.4593e-01 L7_spectral:1.4474e-01 L8_spectral:1.4418e-01 L9_spectral:1.4460e-01 L10_spectral:1.4479e-01 L11_spectral:1.4588e-01 L12_spectral:1.4467e-01 train_time:131189ms step_avg:46.85ms +[2025-09-11 07:43:22] [Rank 0] step:2801/10000 train_time:132311ms step_avg:47.24ms +[2025-09-11 07:43:22] [Rank 0] step:2801/10000 train_time:132311ms step_avg:47.24ms +[2025-09-11 07:43:23] [Rank 0] step:2821/10000 train_time:132962ms step_avg:47.13ms +[2025-09-11 07:43:23] [Rank 0] step:2821/10000 train_time:132962ms step_avg:47.13ms +[2025-09-11 07:43:24] [Rank 0] step:2841/10000 train_time:133627ms step_avg:47.04ms +[2025-09-11 07:43:24] [Rank 0] step:2841/10000 train_time:133627ms step_avg:47.04ms +[2025-09-11 07:43:24] [Rank 0] step:2861/10000 train_time:134291ms step_avg:46.94ms +[2025-09-11 07:43:24] [Rank 0] step:2861/10000 train_time:134291ms step_avg:46.94ms +[2025-09-11 07:43:25] [Rank 0] step:2881/10000 train_time:134954ms step_avg:46.84ms +[2025-09-11 07:43:25] [Rank 0] step:2881/10000 train_time:134954ms step_avg:46.84ms +[2025-09-11 07:43:26] [Rank 0] step:2901/10000 train_time:135618ms step_avg:46.75ms +[2025-09-11 07:43:26] [Rank 0] step:2901/10000 train_time:135618ms step_avg:46.75ms +[2025-09-11 07:43:26] [Rank 0] step:2921/10000 train_time:136280ms step_avg:46.66ms +[2025-09-11 07:43:26] [Rank 0] step:2921/10000 train_time:136280ms step_avg:46.66ms +[2025-09-11 07:43:27] [Rank 0] step:2941/10000 train_time:136943ms step_avg:46.56ms +[2025-09-11 07:43:27] [Rank 0] step:2941/10000 train_time:136943ms step_avg:46.56ms +[2025-09-11 07:43:28] [Rank 0] step:2961/10000 train_time:137606ms step_avg:46.47ms +[2025-09-11 07:43:28] [Rank 0] step:2961/10000 train_time:137606ms step_avg:46.47ms +[2025-09-11 07:43:28] [Rank 0] step:2981/10000 train_time:138271ms step_avg:46.38ms +[2025-09-11 07:43:28] [Rank 0] step:2981/10000 train_time:138271ms step_avg:46.38ms +[2025-09-11 07:43:29] [Rank 0] step:3001/10000 train_time:138936ms step_avg:46.30ms +[2025-09-11 07:43:29] [Rank 0] step:3001/10000 train_time:138936ms step_avg:46.30ms +[2025-09-11 07:43:30] [Rank 0] step:3021/10000 train_time:139601ms step_avg:46.21ms +[2025-09-11 07:43:30] [Rank 0] step:3021/10000 train_time:139601ms step_avg:46.21ms +[2025-09-11 07:43:30] [Rank 0] step:3041/10000 train_time:140266ms step_avg:46.12ms +[2025-09-11 07:43:30] [Rank 0] step:3041/10000 train_time:140266ms step_avg:46.12ms +[2025-09-11 07:43:31] [Rank 0] step:3061/10000 train_time:140931ms step_avg:46.04ms +[2025-09-11 07:43:31] [Rank 0] step:3061/10000 train_time:140931ms step_avg:46.04ms +[2025-09-11 07:43:32] [Rank 0] step:3081/10000 train_time:141596ms step_avg:45.96ms +[2025-09-11 07:43:32] [Rank 0] step:3081/10000 train_time:141596ms step_avg:45.96ms +[2025-09-11 07:43:32] [Rank 0] step:3101/10000 train_time:142261ms step_avg:45.88ms +[2025-09-11 07:43:32] [Rank 0] step:3101/10000 train_time:142261ms step_avg:45.88ms +[2025-09-11 07:43:33] [Rank 0] step:3121/10000 train_time:142926ms step_avg:45.80ms +[2025-09-11 07:43:33] [Rank 0] step:3121/10000 train_time:142926ms step_avg:45.80ms +[2025-09-11 07:43:34] [Rank 0] step:3141/10000 train_time:143590ms step_avg:45.71ms +[2025-09-11 07:43:34] [Rank 0] step:3141/10000 train_time:143590ms step_avg:45.71ms +[2025-09-11 07:43:34] [Rank 0] step:3161/10000 train_time:144255ms step_avg:45.64ms +[2025-09-11 07:43:34] [Rank 0] step:3161/10000 train_time:144255ms step_avg:45.64ms +[2025-09-11 07:43:35] [Rank 0] step:3181/10000 train_time:144920ms step_avg:45.56ms +[2025-09-11 07:43:35] [Rank 0] step:3181/10000 train_time:144920ms step_avg:45.56ms +[2025-09-11 07:43:36] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:43:36] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:43:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:43:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:43:46] [Rank 0] PRINT: step:3200/10000 val_loss:5.8517 total_sharp:4.0859e-04 L1_sharp:1.4238e-04 L2_sharp:1.1945e-04 L3_sharp:5.5223e-05 L4_sharp:2.5567e-05 L5_sharp:6.0178e-05 L6_sharp:2.5390e-05 L7_sharp:3.4924e-05 L8_sharp:5.6447e-05 L9_sharp:3.5398e-05 L10_sharp:5.8194e-05 L11_sharp:1.3746e-04 L12_sharp:2.2514e-04 total_fnorm:4.1250e+01 total_l1_linf:1.0701e+05 total_spectral:2.0625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2125e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1688e+01 L11_fnorm:1.1375e+01 L12_fnorm:1.0062e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0938e+00 L4_l1linf:2.9844e+00 L5_l1linf:2.8906e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.9062e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.3281e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5467e-01 L2_spectral:1.5042e-01 L3_spectral:1.5017e-01 L4_spectral:1.4970e-01 L5_spectral:1.4633e-01 L6_spectral:1.4883e-01 L7_spectral:1.4709e-01 L8_spectral:1.4651e-01 L9_spectral:1.4571e-01 L10_spectral:1.4650e-01 L11_spectral:1.4792e-01 L12_spectral:1.4775e-01 train_time:145567ms step_avg:45.49ms +[2025-09-11 07:43:46] [Rank 0] PRINT: step:3200/10000 val_loss:5.8517 total_sharp:4.0859e-04 L1_sharp:1.4238e-04 L2_sharp:1.1945e-04 L3_sharp:5.5223e-05 L4_sharp:2.5567e-05 L5_sharp:6.0178e-05 L6_sharp:2.5390e-05 L7_sharp:3.4924e-05 L8_sharp:5.6447e-05 L9_sharp:3.5398e-05 L10_sharp:5.8194e-05 L11_sharp:1.3746e-04 L12_sharp:2.2514e-04 total_fnorm:4.1250e+01 total_l1_linf:1.0701e+05 total_spectral:2.0625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2125e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1688e+01 L11_fnorm:1.1375e+01 L12_fnorm:1.0062e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0938e+00 L4_l1linf:2.9844e+00 L5_l1linf:2.8906e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.9062e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.3281e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5467e-01 L2_spectral:1.5042e-01 L3_spectral:1.5017e-01 L4_spectral:1.4970e-01 L5_spectral:1.4633e-01 L6_spectral:1.4883e-01 L7_spectral:1.4709e-01 L8_spectral:1.4651e-01 L9_spectral:1.4571e-01 L10_spectral:1.4650e-01 L11_spectral:1.4792e-01 L12_spectral:1.4775e-01 train_time:145567ms step_avg:45.49ms +[2025-09-11 07:43:47] [Rank 0] step:3201/10000 train_time:146696ms step_avg:45.83ms +[2025-09-11 07:43:47] [Rank 0] step:3201/10000 train_time:146696ms step_avg:45.83ms +[2025-09-11 07:43:48] [Rank 0] step:3221/10000 train_time:147352ms step_avg:45.75ms +[2025-09-11 07:43:48] [Rank 0] step:3221/10000 train_time:147352ms step_avg:45.75ms +[2025-09-11 07:43:48] [Rank 0] step:3241/10000 train_time:148019ms step_avg:45.67ms +[2025-09-11 07:43:48] [Rank 0] step:3241/10000 train_time:148019ms step_avg:45.67ms +[2025-09-11 07:43:49] [Rank 0] step:3261/10000 train_time:148685ms step_avg:45.59ms +[2025-09-11 07:43:49] [Rank 0] step:3261/10000 train_time:148685ms step_avg:45.59ms +[2025-09-11 07:43:50] [Rank 0] step:3281/10000 train_time:149352ms step_avg:45.52ms +[2025-09-11 07:43:50] [Rank 0] step:3281/10000 train_time:149352ms step_avg:45.52ms +[2025-09-11 07:43:50] [Rank 0] step:3301/10000 train_time:150019ms step_avg:45.45ms +[2025-09-11 07:43:50] [Rank 0] step:3301/10000 train_time:150019ms step_avg:45.45ms +[2025-09-11 07:43:51] [Rank 0] step:3321/10000 train_time:150684ms step_avg:45.37ms +[2025-09-11 07:43:51] [Rank 0] step:3321/10000 train_time:150684ms step_avg:45.37ms +[2025-09-11 07:43:52] [Rank 0] step:3341/10000 train_time:151349ms step_avg:45.30ms +[2025-09-11 07:43:52] [Rank 0] step:3341/10000 train_time:151349ms step_avg:45.30ms +[2025-09-11 07:43:52] [Rank 0] step:3361/10000 train_time:152159ms step_avg:45.27ms +[2025-09-11 07:43:52] [Rank 0] step:3361/10000 train_time:152159ms step_avg:45.27ms +[2025-09-11 07:43:53] [Rank 0] step:3381/10000 train_time:153218ms step_avg:45.32ms +[2025-09-11 07:43:53] [Rank 0] step:3381/10000 train_time:153218ms step_avg:45.32ms +[2025-09-11 07:43:54] [Rank 0] step:3401/10000 train_time:153883ms step_avg:45.25ms +[2025-09-11 07:43:54] [Rank 0] step:3401/10000 train_time:153883ms step_avg:45.25ms +[2025-09-11 07:43:55] [Rank 0] step:3421/10000 train_time:154697ms step_avg:45.22ms +[2025-09-11 07:43:55] [Rank 0] step:3421/10000 train_time:154697ms step_avg:45.22ms +[2025-09-11 07:43:56] [Rank 0] step:3441/10000 train_time:155468ms step_avg:45.18ms +[2025-09-11 07:43:56] [Rank 0] step:3441/10000 train_time:155468ms step_avg:45.18ms +[2025-09-11 07:43:56] [Rank 0] step:3461/10000 train_time:156133ms step_avg:45.11ms +[2025-09-11 07:43:56] [Rank 0] step:3461/10000 train_time:156133ms step_avg:45.11ms +[2025-09-11 07:43:57] [Rank 0] step:3481/10000 train_time:156800ms step_avg:45.04ms +[2025-09-11 07:43:57] [Rank 0] step:3481/10000 train_time:156800ms step_avg:45.04ms +[2025-09-11 07:43:58] [Rank 0] step:3501/10000 train_time:157464ms step_avg:44.98ms +[2025-09-11 07:43:58] [Rank 0] step:3501/10000 train_time:157464ms step_avg:44.98ms +[2025-09-11 07:43:58] [Rank 0] step:3521/10000 train_time:158129ms step_avg:44.91ms +[2025-09-11 07:43:58] [Rank 0] step:3521/10000 train_time:158129ms step_avg:44.91ms +[2025-09-11 07:43:59] [Rank 0] step:3541/10000 train_time:158802ms step_avg:44.85ms +[2025-09-11 07:43:59] [Rank 0] step:3541/10000 train_time:158802ms step_avg:44.85ms +[2025-09-11 07:44:00] [Rank 0] step:3561/10000 train_time:159477ms step_avg:44.78ms +[2025-09-11 07:44:00] [Rank 0] step:3561/10000 train_time:159477ms step_avg:44.78ms +[2025-09-11 07:44:00] [Rank 0] step:3581/10000 train_time:160142ms step_avg:44.72ms +[2025-09-11 07:44:00] [Rank 0] step:3581/10000 train_time:160142ms step_avg:44.72ms +[2025-09-11 07:44:01] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:44:01] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:44:11] [Rank 0] PRINT: step:3600/10000 val_loss:5.7954 total_sharp:3.5407e-04 L1_sharp:1.6030e-04 L2_sharp:8.0644e-05 L3_sharp:5.1187e-05 L4_sharp:2.8673e-05 L5_sharp:4.9374e-05 L6_sharp:2.3039e-05 L7_sharp:3.5866e-05 L8_sharp:4.6785e-05 L9_sharp:3.6127e-05 L10_sharp:5.7801e-05 L11_sharp:1.2982e-04 L12_sharp:2.0490e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0598e+05 total_spectral:2.0750e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1750e+01 L9_fnorm:1.2062e+01 L10_fnorm:1.1938e+01 L11_fnorm:1.1500e+01 L12_fnorm:1.0250e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.8906e+00 L5_l1linf:2.8594e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.8594e+00 L8_l1linf:2.9062e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5597e-01 L2_spectral:1.5241e-01 L3_spectral:1.5179e-01 L4_spectral:1.5115e-01 L5_spectral:1.4810e-01 L6_spectral:1.5007e-01 L7_spectral:1.4863e-01 L8_spectral:1.4734e-01 L9_spectral:1.4757e-01 L10_spectral:1.4817e-01 L11_spectral:1.4918e-01 L12_spectral:1.4919e-01 train_time:160788ms step_avg:44.66ms +[2025-09-11 07:44:11] [Rank 0] PRINT: step:3600/10000 val_loss:5.7954 total_sharp:3.5407e-04 L1_sharp:1.6030e-04 L2_sharp:8.0644e-05 L3_sharp:5.1187e-05 L4_sharp:2.8673e-05 L5_sharp:4.9374e-05 L6_sharp:2.3039e-05 L7_sharp:3.5866e-05 L8_sharp:4.6785e-05 L9_sharp:3.6127e-05 L10_sharp:5.7801e-05 L11_sharp:1.2982e-04 L12_sharp:2.0490e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0598e+05 total_spectral:2.0750e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1750e+01 L9_fnorm:1.2062e+01 L10_fnorm:1.1938e+01 L11_fnorm:1.1500e+01 L12_fnorm:1.0250e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.8906e+00 L5_l1linf:2.8594e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.8594e+00 L8_l1linf:2.9062e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5597e-01 L2_spectral:1.5241e-01 L3_spectral:1.5179e-01 L4_spectral:1.5115e-01 L5_spectral:1.4810e-01 L6_spectral:1.5007e-01 L7_spectral:1.4863e-01 L8_spectral:1.4734e-01 L9_spectral:1.4757e-01 L10_spectral:1.4817e-01 L11_spectral:1.4918e-01 L12_spectral:1.4919e-01 train_time:160788ms step_avg:44.66ms +[2025-09-11 07:44:12] [Rank 0] step:3601/10000 train_time:161925ms step_avg:44.97ms +[2025-09-11 07:44:12] [Rank 0] step:3601/10000 train_time:161925ms step_avg:44.97ms +[2025-09-11 07:44:13] [Rank 0] step:3621/10000 train_time:162580ms step_avg:44.90ms +[2025-09-11 07:44:13] [Rank 0] step:3621/10000 train_time:162580ms step_avg:44.90ms +[2025-09-11 07:44:13] [Rank 0] step:3641/10000 train_time:163246ms step_avg:44.84ms +[2025-09-11 07:44:13] [Rank 0] step:3641/10000 train_time:163246ms step_avg:44.84ms +[2025-09-11 07:44:14] [Rank 0] step:3661/10000 train_time:163912ms step_avg:44.77ms +[2025-09-11 07:44:14] [Rank 0] step:3661/10000 train_time:163912ms step_avg:44.77ms +[2025-09-11 07:44:15] [Rank 0] step:3681/10000 train_time:164578ms step_avg:44.71ms +[2025-09-11 07:44:15] [Rank 0] step:3681/10000 train_time:164578ms step_avg:44.71ms +[2025-09-11 07:44:15] [Rank 0] step:3701/10000 train_time:165243ms step_avg:44.65ms +[2025-09-11 07:44:15] [Rank 0] step:3701/10000 train_time:165243ms step_avg:44.65ms +[2025-09-11 07:44:16] [Rank 0] step:3721/10000 train_time:165918ms step_avg:44.59ms +[2025-09-11 07:44:16] [Rank 0] step:3721/10000 train_time:165918ms step_avg:44.59ms +[2025-09-11 07:44:17] [Rank 0] step:3741/10000 train_time:166595ms step_avg:44.53ms +[2025-09-11 07:44:17] [Rank 0] step:3741/10000 train_time:166595ms step_avg:44.53ms +[2025-09-11 07:44:17] [Rank 0] step:3761/10000 train_time:167272ms step_avg:44.48ms +[2025-09-11 07:44:17] [Rank 0] step:3761/10000 train_time:167272ms step_avg:44.48ms +[2025-09-11 07:44:18] [Rank 0] step:3781/10000 train_time:167949ms step_avg:44.42ms +[2025-09-11 07:44:18] [Rank 0] step:3781/10000 train_time:167949ms step_avg:44.42ms +[2025-09-11 07:44:19] [Rank 0] step:3801/10000 train_time:168626ms step_avg:44.36ms +[2025-09-11 07:44:19] [Rank 0] step:3801/10000 train_time:168626ms step_avg:44.36ms +[2025-09-11 07:44:19] [Rank 0] step:3821/10000 train_time:169302ms step_avg:44.31ms +[2025-09-11 07:44:19] [Rank 0] step:3821/10000 train_time:169302ms step_avg:44.31ms +[2025-09-11 07:44:20] [Rank 0] step:3841/10000 train_time:169980ms step_avg:44.25ms +[2025-09-11 07:44:20] [Rank 0] step:3841/10000 train_time:169980ms step_avg:44.25ms +[2025-09-11 07:44:21] [Rank 0] step:3861/10000 train_time:170656ms step_avg:44.20ms +[2025-09-11 07:44:21] [Rank 0] step:3861/10000 train_time:170656ms step_avg:44.20ms +[2025-09-11 07:44:21] [Rank 0] step:3881/10000 train_time:171332ms step_avg:44.15ms +[2025-09-11 07:44:21] [Rank 0] step:3881/10000 train_time:171332ms step_avg:44.15ms +[2025-09-11 07:44:22] [Rank 0] step:3901/10000 train_time:172009ms step_avg:44.09ms +[2025-09-11 07:44:22] [Rank 0] step:3901/10000 train_time:172009ms step_avg:44.09ms +[2025-09-11 07:44:23] [Rank 0] step:3921/10000 train_time:172685ms step_avg:44.04ms +[2025-09-11 07:44:23] [Rank 0] step:3921/10000 train_time:172685ms step_avg:44.04ms +[2025-09-11 07:44:23] [Rank 0] step:3941/10000 train_time:173362ms step_avg:43.99ms +[2025-09-11 07:44:23] [Rank 0] step:3941/10000 train_time:173362ms step_avg:43.99ms +[2025-09-11 07:44:24] [Rank 0] step:3961/10000 train_time:174039ms step_avg:43.94ms +[2025-09-11 07:44:24] [Rank 0] step:3961/10000 train_time:174039ms step_avg:43.94ms +[2025-09-11 07:44:25] [Rank 0] step:3981/10000 train_time:174715ms step_avg:43.89ms +[2025-09-11 07:44:25] [Rank 0] step:3981/10000 train_time:174715ms step_avg:43.89ms +[2025-09-11 07:44:25] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:44:25] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:44:35] [Rank 0] PRINT: step:4000/10000 val_loss:5.7409 total_sharp:4.0064e-04 L1_sharp:1.3462e-04 L2_sharp:7.0957e-05 L3_sharp:3.4803e-05 L4_sharp:2.9266e-05 L5_sharp:5.6516e-05 L6_sharp:2.7597e-05 L7_sharp:2.9992e-05 L8_sharp:5.9110e-05 L9_sharp:4.4152e-05 L10_sharp:6.5323e-05 L11_sharp:1.7198e-04 L12_sharp:2.1945e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0394e+05 total_spectral:2.0625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.1625e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1938e+01 L11_fnorm:1.1438e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.2031e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.8594e+00 L5_l1linf:2.8906e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.8906e+00 L8_l1linf:2.8906e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.2969e+00 L12_l1linf:2.1875e+00 L1_spectral:1.5725e-01 L2_spectral:1.5139e-01 L3_spectral:1.5262e-01 L4_spectral:1.5253e-01 L5_spectral:1.4973e-01 L6_spectral:1.5160e-01 L7_spectral:1.5099e-01 L8_spectral:1.5019e-01 L9_spectral:1.4974e-01 L10_spectral:1.5060e-01 L11_spectral:1.5133e-01 L12_spectral:1.4943e-01 train_time:175373ms step_avg:43.84ms +[2025-09-11 07:44:35] [Rank 0] PRINT: step:4000/10000 val_loss:5.7409 total_sharp:4.0064e-04 L1_sharp:1.3462e-04 L2_sharp:7.0957e-05 L3_sharp:3.4803e-05 L4_sharp:2.9266e-05 L5_sharp:5.6516e-05 L6_sharp:2.7597e-05 L7_sharp:2.9992e-05 L8_sharp:5.9110e-05 L9_sharp:4.4152e-05 L10_sharp:6.5323e-05 L11_sharp:1.7198e-04 L12_sharp:2.1945e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0394e+05 total_spectral:2.0625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.1625e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1938e+01 L11_fnorm:1.1438e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.2031e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.8594e+00 L5_l1linf:2.8906e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.8906e+00 L8_l1linf:2.8906e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.2969e+00 L12_l1linf:2.1875e+00 L1_spectral:1.5725e-01 L2_spectral:1.5139e-01 L3_spectral:1.5262e-01 L4_spectral:1.5253e-01 L5_spectral:1.4973e-01 L6_spectral:1.5160e-01 L7_spectral:1.5099e-01 L8_spectral:1.5019e-01 L9_spectral:1.4974e-01 L10_spectral:1.5060e-01 L11_spectral:1.5133e-01 L12_spectral:1.4943e-01 train_time:175373ms step_avg:43.84ms +[2025-09-11 07:44:36] [Rank 0] step:4001/10000 train_time:176529ms step_avg:44.12ms +[2025-09-11 07:44:36] [Rank 0] step:4001/10000 train_time:176529ms step_avg:44.12ms +[2025-09-11 07:44:37] [Rank 0] step:4021/10000 train_time:177210ms step_avg:44.07ms +[2025-09-11 07:44:37] [Rank 0] step:4021/10000 train_time:177210ms step_avg:44.07ms +[2025-09-11 07:44:38] [Rank 0] step:4041/10000 train_time:177887ms step_avg:44.02ms +[2025-09-11 07:44:38] [Rank 0] step:4041/10000 train_time:177887ms step_avg:44.02ms +[2025-09-11 07:44:38] [Rank 0] step:4061/10000 train_time:178562ms step_avg:43.97ms +[2025-09-11 07:44:38] [Rank 0] step:4061/10000 train_time:178562ms step_avg:43.97ms +[2025-09-11 07:44:39] [Rank 0] step:4081/10000 train_time:179238ms step_avg:43.92ms +[2025-09-11 07:44:39] [Rank 0] step:4081/10000 train_time:179238ms step_avg:43.92ms +[2025-09-11 07:44:40] [Rank 0] step:4101/10000 train_time:179914ms step_avg:43.87ms +[2025-09-11 07:44:40] [Rank 0] step:4101/10000 train_time:179914ms step_avg:43.87ms +[2025-09-11 07:44:40] [Rank 0] step:4121/10000 train_time:180590ms step_avg:43.82ms +[2025-09-11 07:44:40] [Rank 0] step:4121/10000 train_time:180590ms step_avg:43.82ms +[2025-09-11 07:44:41] [Rank 0] step:4141/10000 train_time:181265ms step_avg:43.77ms +[2025-09-11 07:44:41] [Rank 0] step:4141/10000 train_time:181265ms step_avg:43.77ms +[2025-09-11 07:44:42] [Rank 0] step:4161/10000 train_time:181940ms step_avg:43.73ms +[2025-09-11 07:44:42] [Rank 0] step:4161/10000 train_time:181940ms step_avg:43.73ms +[2025-09-11 07:44:42] [Rank 0] step:4181/10000 train_time:182616ms step_avg:43.68ms +[2025-09-11 07:44:42] [Rank 0] step:4181/10000 train_time:182616ms step_avg:43.68ms +[2025-09-11 07:44:43] [Rank 0] step:4201/10000 train_time:183294ms step_avg:43.63ms +[2025-09-11 07:44:43] [Rank 0] step:4201/10000 train_time:183294ms step_avg:43.63ms +[2025-09-11 07:44:44] [Rank 0] step:4221/10000 train_time:183969ms step_avg:43.58ms +[2025-09-11 07:44:44] [Rank 0] step:4221/10000 train_time:183969ms step_avg:43.58ms +[2025-09-11 07:44:44] [Rank 0] step:4241/10000 train_time:184644ms step_avg:43.54ms +[2025-09-11 07:44:44] [Rank 0] step:4241/10000 train_time:184644ms step_avg:43.54ms +[2025-09-11 07:44:45] [Rank 0] step:4261/10000 train_time:185320ms step_avg:43.49ms +[2025-09-11 07:44:45] [Rank 0] step:4261/10000 train_time:185320ms step_avg:43.49ms +[2025-09-11 07:44:46] [Rank 0] step:4281/10000 train_time:185995ms step_avg:43.45ms +[2025-09-11 07:44:46] [Rank 0] step:4281/10000 train_time:185995ms step_avg:43.45ms +[2025-09-11 07:44:46] [Rank 0] step:4301/10000 train_time:186671ms step_avg:43.40ms +[2025-09-11 07:44:46] [Rank 0] step:4301/10000 train_time:186671ms step_avg:43.40ms +[2025-09-11 07:44:47] [Rank 0] step:4321/10000 train_time:187344ms step_avg:43.36ms +[2025-09-11 07:44:47] [Rank 0] step:4321/10000 train_time:187344ms step_avg:43.36ms +[2025-09-11 07:44:48] [Rank 0] step:4341/10000 train_time:188018ms step_avg:43.31ms +[2025-09-11 07:44:48] [Rank 0] step:4341/10000 train_time:188018ms step_avg:43.31ms +[2025-09-11 07:44:48] [Rank 0] step:4361/10000 train_time:188692ms step_avg:43.27ms +[2025-09-11 07:44:48] [Rank 0] step:4361/10000 train_time:188692ms step_avg:43.27ms +[2025-09-11 07:44:49] [Rank 0] step:4381/10000 train_time:189367ms step_avg:43.22ms +[2025-09-11 07:44:49] [Rank 0] step:4381/10000 train_time:189367ms step_avg:43.22ms +[2025-09-11 07:44:50] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:44:50] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:00] [Rank 0] PRINT: step:4400/10000 val_loss:5.7005 total_sharp:3.1366e-04 L1_sharp:1.7021e-04 L2_sharp:9.4487e-05 L3_sharp:5.8450e-05 L4_sharp:2.6463e-05 L5_sharp:5.2690e-05 L6_sharp:2.7993e-05 L7_sharp:2.4384e-05 L8_sharp:5.2292e-05 L9_sharp:3.7350e-05 L10_sharp:5.8728e-05 L11_sharp:1.3086e-04 L12_sharp:1.8770e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0342e+05 total_spectral:2.0750e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1625e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1562e+00 L3_l1linf:2.9844e+00 L4_l1linf:2.8594e+00 L5_l1linf:2.7969e+00 L6_l1linf:2.7188e+00 L7_l1linf:2.8125e+00 L8_l1linf:2.8438e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.2656e+00 L12_l1linf:2.1250e+00 L1_spectral:1.5735e-01 L2_spectral:1.5240e-01 L3_spectral:1.5336e-01 L4_spectral:1.5318e-01 L5_spectral:1.5025e-01 L6_spectral:1.5310e-01 L7_spectral:1.5244e-01 L8_spectral:1.5059e-01 L9_spectral:1.5122e-01 L10_spectral:1.5248e-01 L11_spectral:1.5238e-01 L12_spectral:1.4888e-01 train_time:190022ms step_avg:43.19ms +[2025-09-11 07:45:00] [Rank 0] PRINT: step:4400/10000 val_loss:5.7005 total_sharp:3.1366e-04 L1_sharp:1.7021e-04 L2_sharp:9.4487e-05 L3_sharp:5.8450e-05 L4_sharp:2.6463e-05 L5_sharp:5.2690e-05 L6_sharp:2.7993e-05 L7_sharp:2.4384e-05 L8_sharp:5.2292e-05 L9_sharp:3.7350e-05 L10_sharp:5.8728e-05 L11_sharp:1.3086e-04 L12_sharp:1.8770e-04 total_fnorm:4.1500e+01 total_l1_linf:1.0342e+05 total_spectral:2.0750e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1625e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1562e+00 L3_l1linf:2.9844e+00 L4_l1linf:2.8594e+00 L5_l1linf:2.7969e+00 L6_l1linf:2.7188e+00 L7_l1linf:2.8125e+00 L8_l1linf:2.8438e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.2656e+00 L12_l1linf:2.1250e+00 L1_spectral:1.5735e-01 L2_spectral:1.5240e-01 L3_spectral:1.5336e-01 L4_spectral:1.5318e-01 L5_spectral:1.5025e-01 L6_spectral:1.5310e-01 L7_spectral:1.5244e-01 L8_spectral:1.5059e-01 L9_spectral:1.5122e-01 L10_spectral:1.5248e-01 L11_spectral:1.5238e-01 L12_spectral:1.4888e-01 train_time:190022ms step_avg:43.19ms +[2025-09-11 07:45:01] [Rank 0] step:4401/10000 train_time:191154ms step_avg:43.43ms +[2025-09-11 07:45:01] [Rank 0] step:4401/10000 train_time:191154ms step_avg:43.43ms +[2025-09-11 07:45:02] [Rank 0] step:4421/10000 train_time:191944ms step_avg:43.42ms +[2025-09-11 07:45:02] [Rank 0] step:4421/10000 train_time:191944ms step_avg:43.42ms +[2025-09-11 07:45:02] [Rank 0] step:4441/10000 train_time:192684ms step_avg:43.39ms +[2025-09-11 07:45:02] [Rank 0] step:4441/10000 train_time:192684ms step_avg:43.39ms +[2025-09-11 07:45:03] [Rank 0] step:4461/10000 train_time:193362ms step_avg:43.35ms +[2025-09-11 07:45:03] [Rank 0] step:4461/10000 train_time:193362ms step_avg:43.35ms +[2025-09-11 07:45:04] [Rank 0] step:4481/10000 train_time:194040ms step_avg:43.30ms +[2025-09-11 07:45:04] [Rank 0] step:4481/10000 train_time:194040ms step_avg:43.30ms +[2025-09-11 07:45:04] [Rank 0] step:4501/10000 train_time:194720ms step_avg:43.26ms +[2025-09-11 07:45:04] [Rank 0] step:4501/10000 train_time:194720ms step_avg:43.26ms +[2025-09-11 07:45:05] [Rank 0] step:4521/10000 train_time:195398ms step_avg:43.22ms +[2025-09-11 07:45:05] [Rank 0] step:4521/10000 train_time:195398ms step_avg:43.22ms +[2025-09-11 07:45:06] [Rank 0] step:4541/10000 train_time:196077ms step_avg:43.18ms +[2025-09-11 07:45:06] [Rank 0] step:4541/10000 train_time:196077ms step_avg:43.18ms +[2025-09-11 07:45:06] [Rank 0] step:4561/10000 train_time:196755ms step_avg:43.14ms +[2025-09-11 07:45:06] [Rank 0] step:4561/10000 train_time:196755ms step_avg:43.14ms +[2025-09-11 07:45:07] [Rank 0] step:4581/10000 train_time:197433ms step_avg:43.10ms +[2025-09-11 07:45:07] [Rank 0] step:4581/10000 train_time:197433ms step_avg:43.10ms +[2025-09-11 07:45:08] [Rank 0] step:4601/10000 train_time:198111ms step_avg:43.06ms +[2025-09-11 07:45:08] [Rank 0] step:4601/10000 train_time:198111ms step_avg:43.06ms +[2025-09-11 07:45:08] [Rank 0] step:4621/10000 train_time:198788ms step_avg:43.02ms +[2025-09-11 07:45:08] [Rank 0] step:4621/10000 train_time:198788ms step_avg:43.02ms +[2025-09-11 07:45:09] [Rank 0] step:4641/10000 train_time:199467ms step_avg:42.98ms +[2025-09-11 07:45:09] [Rank 0] step:4641/10000 train_time:199467ms step_avg:42.98ms +[2025-09-11 07:45:10] [Rank 0] step:4661/10000 train_time:200145ms step_avg:42.94ms +[2025-09-11 07:45:10] [Rank 0] step:4661/10000 train_time:200145ms step_avg:42.94ms +[2025-09-11 07:45:10] [Rank 0] step:4681/10000 train_time:200822ms step_avg:42.90ms +[2025-09-11 07:45:10] [Rank 0] step:4681/10000 train_time:200822ms step_avg:42.90ms +[2025-09-11 07:45:11] [Rank 0] step:4701/10000 train_time:201500ms step_avg:42.86ms +[2025-09-11 07:45:11] [Rank 0] step:4701/10000 train_time:201500ms step_avg:42.86ms +[2025-09-11 07:45:12] [Rank 0] step:4721/10000 train_time:202178ms step_avg:42.83ms +[2025-09-11 07:45:12] [Rank 0] step:4721/10000 train_time:202178ms step_avg:42.83ms +[2025-09-11 07:45:12] [Rank 0] step:4741/10000 train_time:202856ms step_avg:42.79ms +[2025-09-11 07:45:12] [Rank 0] step:4741/10000 train_time:202856ms step_avg:42.79ms +[2025-09-11 07:45:13] [Rank 0] step:4761/10000 train_time:203535ms step_avg:42.75ms +[2025-09-11 07:45:13] [Rank 0] step:4761/10000 train_time:203535ms step_avg:42.75ms +[2025-09-11 07:45:14] [Rank 0] step:4781/10000 train_time:204212ms step_avg:42.71ms +[2025-09-11 07:45:14] [Rank 0] step:4781/10000 train_time:204212ms step_avg:42.71ms +[2025-09-11 07:45:14] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:45:14] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:24] [Rank 0] PRINT: step:4800/10000 val_loss:5.6536 total_sharp:2.1824e-04 L1_sharp:3.1698e-05 L2_sharp:1.9271e-05 L3_sharp:3.5468e-05 L4_sharp:1.2313e-05 L5_sharp:3.5688e-05 L6_sharp:2.1260e-05 L7_sharp:2.2947e-05 L8_sharp:4.9494e-05 L9_sharp:3.7473e-05 L10_sharp:5.5817e-05 L11_sharp:1.0448e-04 L12_sharp:1.4164e-04 total_fnorm:4.2000e+01 total_l1_linf:1.0291e+05 total_spectral:2.1000e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.1938e+01 L12_fnorm:1.0625e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1094e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.7969e+00 L5_l1linf:2.7812e+00 L6_l1linf:2.7031e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.8125e+00 L9_l1linf:2.7344e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.3594e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5883e-01 L2_spectral:1.5407e-01 L3_spectral:1.5515e-01 L4_spectral:1.5456e-01 L5_spectral:1.5089e-01 L6_spectral:1.5501e-01 L7_spectral:1.5293e-01 L8_spectral:1.5171e-01 L9_spectral:1.5339e-01 L10_spectral:1.5340e-01 L11_spectral:1.5296e-01 L12_spectral:1.5262e-01 train_time:204870ms step_avg:42.68ms +[2025-09-11 07:45:24] [Rank 0] PRINT: step:4800/10000 val_loss:5.6536 total_sharp:2.1824e-04 L1_sharp:3.1698e-05 L2_sharp:1.9271e-05 L3_sharp:3.5468e-05 L4_sharp:1.2313e-05 L5_sharp:3.5688e-05 L6_sharp:2.1260e-05 L7_sharp:2.2947e-05 L8_sharp:4.9494e-05 L9_sharp:3.7473e-05 L10_sharp:5.5817e-05 L11_sharp:1.0448e-04 L12_sharp:1.4164e-04 total_fnorm:4.2000e+01 total_l1_linf:1.0291e+05 total_spectral:2.1000e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.1938e+01 L12_fnorm:1.0625e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1094e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.7969e+00 L5_l1linf:2.7812e+00 L6_l1linf:2.7031e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.8125e+00 L9_l1linf:2.7344e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.3594e+00 L12_l1linf:2.2500e+00 L1_spectral:1.5883e-01 L2_spectral:1.5407e-01 L3_spectral:1.5515e-01 L4_spectral:1.5456e-01 L5_spectral:1.5089e-01 L6_spectral:1.5501e-01 L7_spectral:1.5293e-01 L8_spectral:1.5171e-01 L9_spectral:1.5339e-01 L10_spectral:1.5340e-01 L11_spectral:1.5296e-01 L12_spectral:1.5262e-01 train_time:204870ms step_avg:42.68ms +[2025-09-11 07:45:26] [Rank 0] step:4801/10000 train_time:206015ms step_avg:42.91ms +[2025-09-11 07:45:26] [Rank 0] step:4801/10000 train_time:206015ms step_avg:42.91ms +[2025-09-11 07:45:26] [Rank 0] step:4821/10000 train_time:206683ms step_avg:42.87ms +[2025-09-11 07:45:26] [Rank 0] step:4821/10000 train_time:206683ms step_avg:42.87ms +[2025-09-11 07:45:27] [Rank 0] step:4841/10000 train_time:207362ms step_avg:42.83ms +[2025-09-11 07:45:27] [Rank 0] step:4841/10000 train_time:207362ms step_avg:42.83ms +[2025-09-11 07:45:28] [Rank 0] step:4861/10000 train_time:208040ms step_avg:42.80ms +[2025-09-11 07:45:28] [Rank 0] step:4861/10000 train_time:208040ms step_avg:42.80ms +[2025-09-11 07:45:28] [Rank 0] step:4881/10000 train_time:208719ms step_avg:42.76ms +[2025-09-11 07:45:28] [Rank 0] step:4881/10000 train_time:208719ms step_avg:42.76ms +[2025-09-11 07:45:29] [Rank 0] step:4901/10000 train_time:209399ms step_avg:42.73ms +[2025-09-11 07:45:29] [Rank 0] step:4901/10000 train_time:209399ms step_avg:42.73ms +[2025-09-11 07:45:30] [Rank 0] step:4921/10000 train_time:210077ms step_avg:42.69ms +[2025-09-11 07:45:30] [Rank 0] step:4921/10000 train_time:210077ms step_avg:42.69ms +[2025-09-11 07:45:30] [Rank 0] step:4941/10000 train_time:210755ms step_avg:42.65ms +[2025-09-11 07:45:30] [Rank 0] step:4941/10000 train_time:210755ms step_avg:42.65ms +[2025-09-11 07:45:31] [Rank 0] step:4961/10000 train_time:211432ms step_avg:42.62ms +[2025-09-11 07:45:31] [Rank 0] step:4961/10000 train_time:211432ms step_avg:42.62ms +[2025-09-11 07:45:32] [Rank 0] step:4981/10000 train_time:212110ms step_avg:42.58ms +[2025-09-11 07:45:32] [Rank 0] step:4981/10000 train_time:212110ms step_avg:42.58ms +[2025-09-11 07:45:32] [Rank 0] step:5001/10000 train_time:212789ms step_avg:42.55ms +[2025-09-11 07:45:32] [Rank 0] step:5001/10000 train_time:212789ms step_avg:42.55ms +[2025-09-11 07:45:33] [Rank 0] step:5021/10000 train_time:213466ms step_avg:42.51ms +[2025-09-11 07:45:33] [Rank 0] step:5021/10000 train_time:213466ms step_avg:42.51ms +[2025-09-11 07:45:34] [Rank 0] step:5041/10000 train_time:214143ms step_avg:42.48ms +[2025-09-11 07:45:34] [Rank 0] step:5041/10000 train_time:214143ms step_avg:42.48ms +[2025-09-11 07:45:34] [Rank 0] step:5061/10000 train_time:214822ms step_avg:42.45ms +[2025-09-11 07:45:34] [Rank 0] step:5061/10000 train_time:214822ms step_avg:42.45ms +[2025-09-11 07:45:35] [Rank 0] step:5081/10000 train_time:215499ms step_avg:42.41ms +[2025-09-11 07:45:35] [Rank 0] step:5081/10000 train_time:215499ms step_avg:42.41ms +[2025-09-11 07:45:36] [Rank 0] step:5101/10000 train_time:216177ms step_avg:42.38ms +[2025-09-11 07:45:36] [Rank 0] step:5101/10000 train_time:216177ms step_avg:42.38ms +[2025-09-11 07:45:36] [Rank 0] step:5121/10000 train_time:216854ms step_avg:42.35ms +[2025-09-11 07:45:36] [Rank 0] step:5121/10000 train_time:216854ms step_avg:42.35ms +[2025-09-11 07:45:37] [Rank 0] step:5141/10000 train_time:217532ms step_avg:42.31ms +[2025-09-11 07:45:37] [Rank 0] step:5141/10000 train_time:217532ms step_avg:42.31ms +[2025-09-11 07:45:38] [Rank 0] step:5161/10000 train_time:218208ms step_avg:42.28ms +[2025-09-11 07:45:38] [Rank 0] step:5161/10000 train_time:218208ms step_avg:42.28ms +[2025-09-11 07:45:38] [Rank 0] step:5181/10000 train_time:218886ms step_avg:42.25ms +[2025-09-11 07:45:38] [Rank 0] step:5181/10000 train_time:218886ms step_avg:42.25ms +[2025-09-11 07:45:39] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:45:39] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:45:49] [Rank 0] PRINT: step:5200/10000 val_loss:5.6144 total_sharp:2.9462e-04 L1_sharp:1.2154e-04 L2_sharp:7.1273e-05 L3_sharp:3.7850e-05 L4_sharp:2.0635e-05 L5_sharp:3.4585e-05 L6_sharp:2.1662e-05 L7_sharp:2.3997e-05 L8_sharp:4.5685e-05 L9_sharp:3.8904e-05 L10_sharp:5.0412e-05 L11_sharp:1.2525e-04 L12_sharp:2.0974e-04 total_fnorm:4.1750e+01 total_l1_linf:1.0189e+05 total_spectral:2.1000e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.1938e+01 L12_fnorm:1.0750e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9375e+00 L4_l1linf:2.7812e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.6406e+00 L7_l1linf:2.6719e+00 L8_l1linf:2.7031e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2188e+00 L1_spectral:1.5952e-01 L2_spectral:1.5408e-01 L3_spectral:1.5567e-01 L4_spectral:1.5498e-01 L5_spectral:1.5144e-01 L6_spectral:1.5551e-01 L7_spectral:1.5444e-01 L8_spectral:1.5292e-01 L9_spectral:1.5439e-01 L10_spectral:1.5375e-01 L11_spectral:1.5427e-01 L12_spectral:1.5293e-01 train_time:219550ms step_avg:42.22ms +[2025-09-11 07:45:49] [Rank 0] PRINT: step:5200/10000 val_loss:5.6144 total_sharp:2.9462e-04 L1_sharp:1.2154e-04 L2_sharp:7.1273e-05 L3_sharp:3.7850e-05 L4_sharp:2.0635e-05 L5_sharp:3.4585e-05 L6_sharp:2.1662e-05 L7_sharp:2.3997e-05 L8_sharp:4.5685e-05 L9_sharp:3.8904e-05 L10_sharp:5.0412e-05 L11_sharp:1.2525e-04 L12_sharp:2.0974e-04 total_fnorm:4.1750e+01 total_l1_linf:1.0189e+05 total_spectral:2.1000e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.1938e+01 L12_fnorm:1.0750e+01 L1_l1linf:3.2188e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9375e+00 L4_l1linf:2.7812e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.6406e+00 L7_l1linf:2.6719e+00 L8_l1linf:2.7031e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2188e+00 L1_spectral:1.5952e-01 L2_spectral:1.5408e-01 L3_spectral:1.5567e-01 L4_spectral:1.5498e-01 L5_spectral:1.5144e-01 L6_spectral:1.5551e-01 L7_spectral:1.5444e-01 L8_spectral:1.5292e-01 L9_spectral:1.5439e-01 L10_spectral:1.5375e-01 L11_spectral:1.5427e-01 L12_spectral:1.5293e-01 train_time:219550ms step_avg:42.22ms +[2025-09-11 07:45:50] [Rank 0] step:5201/10000 train_time:220733ms step_avg:42.44ms +[2025-09-11 07:45:50] [Rank 0] step:5201/10000 train_time:220733ms step_avg:42.44ms +[2025-09-11 07:45:51] [Rank 0] step:5221/10000 train_time:221467ms step_avg:42.42ms +[2025-09-11 07:45:51] [Rank 0] step:5221/10000 train_time:221467ms step_avg:42.42ms +[2025-09-11 07:45:52] [Rank 0] step:5241/10000 train_time:222156ms step_avg:42.39ms +[2025-09-11 07:45:52] [Rank 0] step:5241/10000 train_time:222156ms step_avg:42.39ms +[2025-09-11 07:45:52] [Rank 0] step:5261/10000 train_time:222846ms step_avg:42.36ms +[2025-09-11 07:45:52] [Rank 0] step:5261/10000 train_time:222846ms step_avg:42.36ms +[2025-09-11 07:45:53] [Rank 0] step:5281/10000 train_time:223536ms step_avg:42.33ms +[2025-09-11 07:45:53] [Rank 0] step:5281/10000 train_time:223536ms step_avg:42.33ms +[2025-09-11 07:45:54] [Rank 0] step:5301/10000 train_time:224224ms step_avg:42.30ms +[2025-09-11 07:45:54] [Rank 0] step:5301/10000 train_time:224224ms step_avg:42.30ms +[2025-09-11 07:45:54] [Rank 0] step:5321/10000 train_time:224911ms step_avg:42.27ms +[2025-09-11 07:45:54] [Rank 0] step:5321/10000 train_time:224911ms step_avg:42.27ms +[2025-09-11 07:45:55] [Rank 0] step:5341/10000 train_time:225599ms step_avg:42.24ms +[2025-09-11 07:45:55] [Rank 0] step:5341/10000 train_time:225599ms step_avg:42.24ms +[2025-09-11 07:45:56] [Rank 0] step:5361/10000 train_time:226287ms step_avg:42.21ms +[2025-09-11 07:45:56] [Rank 0] step:5361/10000 train_time:226287ms step_avg:42.21ms +[2025-09-11 07:45:56] [Rank 0] step:5381/10000 train_time:226976ms step_avg:42.18ms +[2025-09-11 07:45:56] [Rank 0] step:5381/10000 train_time:226976ms step_avg:42.18ms +[2025-09-11 07:45:57] [Rank 0] step:5401/10000 train_time:227664ms step_avg:42.15ms +[2025-09-11 07:45:57] [Rank 0] step:5401/10000 train_time:227664ms step_avg:42.15ms +[2025-09-11 07:45:58] [Rank 0] step:5421/10000 train_time:228353ms step_avg:42.12ms +[2025-09-11 07:45:58] [Rank 0] step:5421/10000 train_time:228353ms step_avg:42.12ms +[2025-09-11 07:45:58] [Rank 0] step:5441/10000 train_time:229041ms step_avg:42.10ms +[2025-09-11 07:45:58] [Rank 0] step:5441/10000 train_time:229041ms step_avg:42.10ms +[2025-09-11 07:46:00] [Rank 0] step:5461/10000 train_time:230287ms step_avg:42.17ms +[2025-09-11 07:46:00] [Rank 0] step:5461/10000 train_time:230287ms step_avg:42.17ms +[2025-09-11 07:46:00] [Rank 0] step:5481/10000 train_time:230976ms step_avg:42.14ms +[2025-09-11 07:46:00] [Rank 0] step:5481/10000 train_time:230976ms step_avg:42.14ms +[2025-09-11 07:46:01] [Rank 0] step:5501/10000 train_time:231664ms step_avg:42.11ms +[2025-09-11 07:46:01] [Rank 0] step:5501/10000 train_time:231664ms step_avg:42.11ms +[2025-09-11 07:46:02] [Rank 0] step:5521/10000 train_time:232625ms step_avg:42.13ms +[2025-09-11 07:46:02] [Rank 0] step:5521/10000 train_time:232625ms step_avg:42.13ms +[2025-09-11 07:46:03] [Rank 0] step:5541/10000 train_time:233316ms step_avg:42.11ms +[2025-09-11 07:46:03] [Rank 0] step:5541/10000 train_time:233316ms step_avg:42.11ms +[2025-09-11 07:46:03] [Rank 0] step:5561/10000 train_time:234006ms step_avg:42.08ms +[2025-09-11 07:46:03] [Rank 0] step:5561/10000 train_time:234006ms step_avg:42.08ms +[2025-09-11 07:46:04] [Rank 0] step:5581/10000 train_time:234695ms step_avg:42.05ms +[2025-09-11 07:46:04] [Rank 0] step:5581/10000 train_time:234695ms step_avg:42.05ms +[2025-09-11 07:46:05] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:46:05] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:46:15] [Rank 0] PRINT: step:5600/10000 val_loss:5.5903 total_sharp:2.3742e-04 L1_sharp:8.9516e-05 L2_sharp:6.3240e-05 L3_sharp:3.0446e-05 L4_sharp:1.1399e-05 L5_sharp:3.4163e-05 L6_sharp:1.2649e-05 L7_sharp:2.3733e-05 L8_sharp:5.0682e-05 L9_sharp:3.7673e-05 L10_sharp:4.8713e-05 L11_sharp:1.0987e-04 L12_sharp:1.2567e-04 total_fnorm:4.1750e+01 total_l1_linf:1.0086e+05 total_spectral:2.1000e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2000e+01 L12_fnorm:1.0875e+01 L1_l1linf:3.1719e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.9062e+00 L4_l1linf:2.7656e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.6406e+00 L7_l1linf:2.6562e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.3906e+00 L12_l1linf:2.2344e+00 L1_spectral:1.5957e-01 L2_spectral:1.5500e-01 L3_spectral:1.5602e-01 L4_spectral:1.5592e-01 L5_spectral:1.5141e-01 L6_spectral:1.5547e-01 L7_spectral:1.5505e-01 L8_spectral:1.5339e-01 L9_spectral:1.5568e-01 L10_spectral:1.5469e-01 L11_spectral:1.5454e-01 L12_spectral:1.5252e-01 train_time:235363ms step_avg:42.03ms +[2025-09-11 07:46:15] [Rank 0] PRINT: step:5600/10000 val_loss:5.5903 total_sharp:2.3742e-04 L1_sharp:8.9516e-05 L2_sharp:6.3240e-05 L3_sharp:3.0446e-05 L4_sharp:1.1399e-05 L5_sharp:3.4163e-05 L6_sharp:1.2649e-05 L7_sharp:2.3733e-05 L8_sharp:5.0682e-05 L9_sharp:3.7673e-05 L10_sharp:4.8713e-05 L11_sharp:1.0987e-04 L12_sharp:1.2567e-04 total_fnorm:4.1750e+01 total_l1_linf:1.0086e+05 total_spectral:2.1000e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2000e+01 L12_fnorm:1.0875e+01 L1_l1linf:3.1719e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.9062e+00 L4_l1linf:2.7656e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.6406e+00 L7_l1linf:2.6562e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.6875e+00 L11_l1linf:2.3906e+00 L12_l1linf:2.2344e+00 L1_spectral:1.5957e-01 L2_spectral:1.5500e-01 L3_spectral:1.5602e-01 L4_spectral:1.5592e-01 L5_spectral:1.5141e-01 L6_spectral:1.5547e-01 L7_spectral:1.5505e-01 L8_spectral:1.5339e-01 L9_spectral:1.5568e-01 L10_spectral:1.5469e-01 L11_spectral:1.5454e-01 L12_spectral:1.5252e-01 train_time:235363ms step_avg:42.03ms +[2025-09-11 07:46:16] [Rank 0] step:5601/10000 train_time:236522ms step_avg:42.23ms +[2025-09-11 07:46:16] [Rank 0] step:5601/10000 train_time:236522ms step_avg:42.23ms +[2025-09-11 07:46:17] [Rank 0] step:5621/10000 train_time:237245ms step_avg:42.21ms +[2025-09-11 07:46:17] [Rank 0] step:5621/10000 train_time:237245ms step_avg:42.21ms +[2025-09-11 07:46:17] [Rank 0] step:5641/10000 train_time:237932ms step_avg:42.18ms +[2025-09-11 07:46:17] [Rank 0] step:5641/10000 train_time:237932ms step_avg:42.18ms +[2025-09-11 07:46:18] [Rank 0] step:5661/10000 train_time:238620ms step_avg:42.15ms +[2025-09-11 07:46:18] [Rank 0] step:5661/10000 train_time:238620ms step_avg:42.15ms +[2025-09-11 07:46:19] [Rank 0] step:5681/10000 train_time:239307ms step_avg:42.12ms +[2025-09-11 07:46:19] [Rank 0] step:5681/10000 train_time:239307ms step_avg:42.12ms +[2025-09-11 07:46:19] [Rank 0] step:5701/10000 train_time:239996ms step_avg:42.10ms +[2025-09-11 07:46:19] [Rank 0] step:5701/10000 train_time:239996ms step_avg:42.10ms +[2025-09-11 07:46:20] [Rank 0] step:5721/10000 train_time:240683ms step_avg:42.07ms +[2025-09-11 07:46:20] [Rank 0] step:5721/10000 train_time:240683ms step_avg:42.07ms +[2025-09-11 07:46:21] [Rank 0] step:5741/10000 train_time:241372ms step_avg:42.04ms +[2025-09-11 07:46:21] [Rank 0] step:5741/10000 train_time:241372ms step_avg:42.04ms +[2025-09-11 07:46:21] [Rank 0] step:5761/10000 train_time:242060ms step_avg:42.02ms +[2025-09-11 07:46:21] [Rank 0] step:5761/10000 train_time:242060ms step_avg:42.02ms +[2025-09-11 07:46:22] [Rank 0] step:5781/10000 train_time:242748ms step_avg:41.99ms +[2025-09-11 07:46:22] [Rank 0] step:5781/10000 train_time:242748ms step_avg:41.99ms +[2025-09-11 07:46:23] [Rank 0] step:5801/10000 train_time:243437ms step_avg:41.96ms +[2025-09-11 07:46:23] [Rank 0] step:5801/10000 train_time:243437ms step_avg:41.96ms +[2025-09-11 07:46:23] [Rank 0] step:5821/10000 train_time:244123ms step_avg:41.94ms +[2025-09-11 07:46:23] [Rank 0] step:5821/10000 train_time:244123ms step_avg:41.94ms +[2025-09-11 07:46:24] [Rank 0] step:5841/10000 train_time:244812ms step_avg:41.91ms +[2025-09-11 07:46:24] [Rank 0] step:5841/10000 train_time:244812ms step_avg:41.91ms +[2025-09-11 07:46:25] [Rank 0] step:5861/10000 train_time:245498ms step_avg:41.89ms +[2025-09-11 07:46:25] [Rank 0] step:5861/10000 train_time:245498ms step_avg:41.89ms +[2025-09-11 07:46:26] [Rank 0] step:5881/10000 train_time:246185ms step_avg:41.86ms +[2025-09-11 07:46:26] [Rank 0] step:5881/10000 train_time:246185ms step_avg:41.86ms +[2025-09-11 07:46:26] [Rank 0] step:5901/10000 train_time:246872ms step_avg:41.84ms +[2025-09-11 07:46:26] [Rank 0] step:5901/10000 train_time:246872ms step_avg:41.84ms +[2025-09-11 07:46:27] [Rank 0] step:5921/10000 train_time:247562ms step_avg:41.81ms +[2025-09-11 07:46:27] [Rank 0] step:5921/10000 train_time:247562ms step_avg:41.81ms +[2025-09-11 07:46:28] [Rank 0] step:5941/10000 train_time:248252ms step_avg:41.79ms +[2025-09-11 07:46:28] [Rank 0] step:5941/10000 train_time:248252ms step_avg:41.79ms +[2025-09-11 07:46:28] [Rank 0] step:5961/10000 train_time:248940ms step_avg:41.76ms +[2025-09-11 07:46:28] [Rank 0] step:5961/10000 train_time:248940ms step_avg:41.76ms +[2025-09-11 07:46:29] [Rank 0] step:5981/10000 train_time:249630ms step_avg:41.74ms +[2025-09-11 07:46:29] [Rank 0] step:5981/10000 train_time:249630ms step_avg:41.74ms +[2025-09-11 07:46:30] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:46:30] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:46:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:46:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:46:44] [Rank 0] PRINT: step:6000/10000 val_loss:5.5512 total_sharp:2.4730e-04 L1_sharp:9.8905e-05 L2_sharp:4.2553e-05 L3_sharp:4.1297e-05 L4_sharp:1.6143e-05 L5_sharp:3.6994e-05 L6_sharp:2.0361e-05 L7_sharp:2.0223e-05 L8_sharp:4.0598e-05 L9_sharp:4.1809e-05 L10_sharp:4.6488e-05 L11_sharp:1.1756e-04 L12_sharp:1.9709e-04 total_fnorm:4.1750e+01 total_l1_linf:9.9840e+04 total_spectral:2.1000e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.0938e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.8750e+00 L4_l1linf:2.7656e+00 L5_l1linf:2.6562e+00 L6_l1linf:2.6094e+00 L7_l1linf:2.6094e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7188e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2188e+00 L1_spectral:1.6011e-01 L2_spectral:1.5544e-01 L3_spectral:1.5687e-01 L4_spectral:1.5514e-01 L5_spectral:1.5338e-01 L6_spectral:1.5671e-01 L7_spectral:1.5664e-01 L8_spectral:1.5490e-01 L9_spectral:1.5620e-01 L10_spectral:1.5505e-01 L11_spectral:1.5540e-01 L12_spectral:1.5396e-01 train_time:250300ms step_avg:41.72ms +[2025-09-11 07:46:44] [Rank 0] PRINT: step:6000/10000 val_loss:5.5512 total_sharp:2.4730e-04 L1_sharp:9.8905e-05 L2_sharp:4.2553e-05 L3_sharp:4.1297e-05 L4_sharp:1.6143e-05 L5_sharp:3.6994e-05 L6_sharp:2.0361e-05 L7_sharp:2.0223e-05 L8_sharp:4.0598e-05 L9_sharp:4.1809e-05 L10_sharp:4.6488e-05 L11_sharp:1.1756e-04 L12_sharp:1.9709e-04 total_fnorm:4.1750e+01 total_l1_linf:9.9840e+04 total_spectral:2.1000e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.0938e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.8750e+00 L4_l1linf:2.7656e+00 L5_l1linf:2.6562e+00 L6_l1linf:2.6094e+00 L7_l1linf:2.6094e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7188e+00 L11_l1linf:2.3125e+00 L12_l1linf:2.2188e+00 L1_spectral:1.6011e-01 L2_spectral:1.5544e-01 L3_spectral:1.5687e-01 L4_spectral:1.5514e-01 L5_spectral:1.5338e-01 L6_spectral:1.5671e-01 L7_spectral:1.5664e-01 L8_spectral:1.5490e-01 L9_spectral:1.5620e-01 L10_spectral:1.5505e-01 L11_spectral:1.5540e-01 L12_spectral:1.5396e-01 train_time:250300ms step_avg:41.72ms +[2025-09-11 07:46:45] [Rank 0] step:6001/10000 train_time:251453ms step_avg:41.90ms +[2025-09-11 07:46:45] [Rank 0] step:6001/10000 train_time:251453ms step_avg:41.90ms +[2025-09-11 07:46:45] [Rank 0] step:6021/10000 train_time:252147ms step_avg:41.88ms +[2025-09-11 07:46:45] [Rank 0] step:6021/10000 train_time:252147ms step_avg:41.88ms +[2025-09-11 07:46:46] [Rank 0] step:6041/10000 train_time:252841ms step_avg:41.85ms +[2025-09-11 07:46:46] [Rank 0] step:6041/10000 train_time:252841ms step_avg:41.85ms +[2025-09-11 07:46:47] [Rank 0] step:6061/10000 train_time:253531ms step_avg:41.83ms +[2025-09-11 07:46:47] [Rank 0] step:6061/10000 train_time:253531ms step_avg:41.83ms +[2025-09-11 07:46:48] [Rank 0] step:6081/10000 train_time:254223ms step_avg:41.81ms +[2025-09-11 07:46:48] [Rank 0] step:6081/10000 train_time:254223ms step_avg:41.81ms +[2025-09-11 07:46:48] [Rank 0] step:6101/10000 train_time:254914ms step_avg:41.78ms +[2025-09-11 07:46:48] [Rank 0] step:6101/10000 train_time:254914ms step_avg:41.78ms +[2025-09-11 07:46:49] [Rank 0] step:6121/10000 train_time:255604ms step_avg:41.76ms +[2025-09-11 07:46:49] [Rank 0] step:6121/10000 train_time:255604ms step_avg:41.76ms +[2025-09-11 07:46:50] [Rank 0] step:6141/10000 train_time:256295ms step_avg:41.74ms +[2025-09-11 07:46:50] [Rank 0] step:6141/10000 train_time:256295ms step_avg:41.74ms +[2025-09-11 07:46:50] [Rank 0] step:6161/10000 train_time:256984ms step_avg:41.71ms +[2025-09-11 07:46:50] [Rank 0] step:6161/10000 train_time:256984ms step_avg:41.71ms +[2025-09-11 07:46:51] [Rank 0] step:6181/10000 train_time:257672ms step_avg:41.69ms +[2025-09-11 07:46:51] [Rank 0] step:6181/10000 train_time:257672ms step_avg:41.69ms +[2025-09-11 07:46:52] [Rank 0] step:6201/10000 train_time:258363ms step_avg:41.66ms +[2025-09-11 07:46:52] [Rank 0] step:6201/10000 train_time:258363ms step_avg:41.66ms +[2025-09-11 07:46:52] [Rank 0] step:6221/10000 train_time:259053ms step_avg:41.64ms +[2025-09-11 07:46:52] [Rank 0] step:6221/10000 train_time:259053ms step_avg:41.64ms +[2025-09-11 07:46:53] [Rank 0] step:6241/10000 train_time:259743ms step_avg:41.62ms +[2025-09-11 07:46:53] [Rank 0] step:6241/10000 train_time:259743ms step_avg:41.62ms +[2025-09-11 07:46:54] [Rank 0] step:6261/10000 train_time:260480ms step_avg:41.60ms +[2025-09-11 07:46:54] [Rank 0] step:6261/10000 train_time:260480ms step_avg:41.60ms +[2025-09-11 07:46:54] [Rank 0] step:6281/10000 train_time:261210ms step_avg:41.59ms +[2025-09-11 07:46:54] [Rank 0] step:6281/10000 train_time:261210ms step_avg:41.59ms +[2025-09-11 07:46:55] [Rank 0] step:6301/10000 train_time:261915ms step_avg:41.57ms +[2025-09-11 07:46:55] [Rank 0] step:6301/10000 train_time:261915ms step_avg:41.57ms +[2025-09-11 07:46:56] [Rank 0] step:6321/10000 train_time:262651ms step_avg:41.55ms +[2025-09-11 07:46:56] [Rank 0] step:6321/10000 train_time:262651ms step_avg:41.55ms +[2025-09-11 07:46:57] [Rank 0] step:6341/10000 train_time:263381ms step_avg:41.54ms +[2025-09-11 07:46:57] [Rank 0] step:6341/10000 train_time:263381ms step_avg:41.54ms +[2025-09-11 07:46:57] [Rank 0] step:6361/10000 train_time:264082ms step_avg:41.52ms +[2025-09-11 07:46:57] [Rank 0] step:6361/10000 train_time:264082ms step_avg:41.52ms +[2025-09-11 07:46:58] [Rank 0] step:6381/10000 train_time:264788ms step_avg:41.50ms +[2025-09-11 07:46:58] [Rank 0] step:6381/10000 train_time:264788ms step_avg:41.50ms +[2025-09-11 07:46:59] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:46:59] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:09] [Rank 0] PRINT: step:6400/10000 val_loss:5.5169 total_sharp:2.0854e-04 L1_sharp:8.5959e-05 L2_sharp:5.9146e-05 L3_sharp:2.6122e-05 L4_sharp:9.5413e-06 L5_sharp:2.9773e-05 L6_sharp:1.7803e-05 L7_sharp:1.8819e-05 L8_sharp:4.0291e-05 L9_sharp:3.8101e-05 L10_sharp:4.2524e-05 L11_sharp:9.9710e-05 L12_sharp:1.1280e-04 total_fnorm:3.8250e+01 total_l1_linf:8.7040e+04 total_spectral:1.9000e+01 L1_fnorm:1.1500e+01 L2_fnorm:1.1188e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1000e+01 L5_fnorm:1.0750e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1125e+01 L8_fnorm:1.0750e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1125e+01 L11_fnorm:1.1000e+01 L12_fnorm:1.0000e+01 L1_l1linf:2.7969e+00 L2_l1linf:2.7344e+00 L3_l1linf:2.5469e+00 L4_l1linf:2.4062e+00 L5_l1linf:2.3438e+00 L6_l1linf:2.3125e+00 L7_l1linf:2.3438e+00 L8_l1linf:2.3438e+00 L9_l1linf:2.3281e+00 L10_l1linf:2.3906e+00 L11_l1linf:2.0781e+00 L12_l1linf:1.9844e+00 L1_spectral:1.4725e-01 L2_spectral:1.4359e-01 L3_spectral:1.4512e-01 L4_spectral:1.4461e-01 L5_spectral:1.4132e-01 L6_spectral:1.4543e-01 L7_spectral:1.4457e-01 L8_spectral:1.4227e-01 L9_spectral:1.4436e-01 L10_spectral:1.4484e-01 L11_spectral:1.4449e-01 L12_spectral:1.4256e-01 train_time:265480ms step_avg:41.48ms +[2025-09-11 07:47:09] [Rank 0] PRINT: step:6400/10000 val_loss:5.5169 total_sharp:2.0854e-04 L1_sharp:8.5959e-05 L2_sharp:5.9146e-05 L3_sharp:2.6122e-05 L4_sharp:9.5413e-06 L5_sharp:2.9773e-05 L6_sharp:1.7803e-05 L7_sharp:1.8819e-05 L8_sharp:4.0291e-05 L9_sharp:3.8101e-05 L10_sharp:4.2524e-05 L11_sharp:9.9710e-05 L12_sharp:1.1280e-04 total_fnorm:3.8250e+01 total_l1_linf:8.7040e+04 total_spectral:1.9000e+01 L1_fnorm:1.1500e+01 L2_fnorm:1.1188e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1000e+01 L5_fnorm:1.0750e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1125e+01 L8_fnorm:1.0750e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1125e+01 L11_fnorm:1.1000e+01 L12_fnorm:1.0000e+01 L1_l1linf:2.7969e+00 L2_l1linf:2.7344e+00 L3_l1linf:2.5469e+00 L4_l1linf:2.4062e+00 L5_l1linf:2.3438e+00 L6_l1linf:2.3125e+00 L7_l1linf:2.3438e+00 L8_l1linf:2.3438e+00 L9_l1linf:2.3281e+00 L10_l1linf:2.3906e+00 L11_l1linf:2.0781e+00 L12_l1linf:1.9844e+00 L1_spectral:1.4725e-01 L2_spectral:1.4359e-01 L3_spectral:1.4512e-01 L4_spectral:1.4461e-01 L5_spectral:1.4132e-01 L6_spectral:1.4543e-01 L7_spectral:1.4457e-01 L8_spectral:1.4227e-01 L9_spectral:1.4436e-01 L10_spectral:1.4484e-01 L11_spectral:1.4449e-01 L12_spectral:1.4256e-01 train_time:265480ms step_avg:41.48ms +[2025-09-11 07:47:10] [Rank 0] step:6401/10000 train_time:266725ms step_avg:41.67ms +[2025-09-11 07:47:10] [Rank 0] step:6401/10000 train_time:266725ms step_avg:41.67ms +[2025-09-11 07:47:11] [Rank 0] step:6421/10000 train_time:267450ms step_avg:41.65ms +[2025-09-11 07:47:11] [Rank 0] step:6421/10000 train_time:267450ms step_avg:41.65ms +[2025-09-11 07:47:12] [Rank 0] step:6441/10000 train_time:268142ms step_avg:41.63ms +[2025-09-11 07:47:12] [Rank 0] step:6441/10000 train_time:268142ms step_avg:41.63ms +[2025-09-11 07:47:12] [Rank 0] step:6461/10000 train_time:268833ms step_avg:41.61ms +[2025-09-11 07:47:12] [Rank 0] step:6461/10000 train_time:268833ms step_avg:41.61ms +[2025-09-11 07:47:13] [Rank 0] step:6481/10000 train_time:269525ms step_avg:41.59ms +[2025-09-11 07:47:13] [Rank 0] step:6481/10000 train_time:269525ms step_avg:41.59ms +[2025-09-11 07:47:14] [Rank 0] step:6501/10000 train_time:270217ms step_avg:41.57ms +[2025-09-11 07:47:14] [Rank 0] step:6501/10000 train_time:270217ms step_avg:41.57ms +[2025-09-11 07:47:14] [Rank 0] step:6521/10000 train_time:270908ms step_avg:41.54ms +[2025-09-11 07:47:14] [Rank 0] step:6521/10000 train_time:270908ms step_avg:41.54ms +[2025-09-11 07:47:15] [Rank 0] step:6541/10000 train_time:271597ms step_avg:41.52ms +[2025-09-11 07:47:15] [Rank 0] step:6541/10000 train_time:271597ms step_avg:41.52ms +[2025-09-11 07:47:16] [Rank 0] step:6561/10000 train_time:272289ms step_avg:41.50ms +[2025-09-11 07:47:16] [Rank 0] step:6561/10000 train_time:272289ms step_avg:41.50ms +[2025-09-11 07:47:16] [Rank 0] step:6581/10000 train_time:272981ms step_avg:41.48ms +[2025-09-11 07:47:16] [Rank 0] step:6581/10000 train_time:272981ms step_avg:41.48ms +[2025-09-11 07:47:17] [Rank 0] step:6601/10000 train_time:273672ms step_avg:41.46ms +[2025-09-11 07:47:17] [Rank 0] step:6601/10000 train_time:273672ms step_avg:41.46ms +[2025-09-11 07:47:18] [Rank 0] step:6621/10000 train_time:274362ms step_avg:41.44ms +[2025-09-11 07:47:18] [Rank 0] step:6621/10000 train_time:274362ms step_avg:41.44ms +[2025-09-11 07:47:19] [Rank 0] step:6641/10000 train_time:275054ms step_avg:41.42ms +[2025-09-11 07:47:19] [Rank 0] step:6641/10000 train_time:275054ms step_avg:41.42ms +[2025-09-11 07:47:19] [Rank 0] step:6661/10000 train_time:275745ms step_avg:41.40ms +[2025-09-11 07:47:19] [Rank 0] step:6661/10000 train_time:275745ms step_avg:41.40ms +[2025-09-11 07:47:20] [Rank 0] step:6681/10000 train_time:276443ms step_avg:41.38ms +[2025-09-11 07:47:20] [Rank 0] step:6681/10000 train_time:276443ms step_avg:41.38ms +[2025-09-11 07:47:21] [Rank 0] step:6701/10000 train_time:277140ms step_avg:41.36ms +[2025-09-11 07:47:21] [Rank 0] step:6701/10000 train_time:277140ms step_avg:41.36ms +[2025-09-11 07:47:21] [Rank 0] step:6721/10000 train_time:277839ms step_avg:41.34ms +[2025-09-11 07:47:21] [Rank 0] step:6721/10000 train_time:277839ms step_avg:41.34ms +[2025-09-11 07:47:22] [Rank 0] step:6741/10000 train_time:278538ms step_avg:41.32ms +[2025-09-11 07:47:22] [Rank 0] step:6741/10000 train_time:278538ms step_avg:41.32ms +[2025-09-11 07:47:23] [Rank 0] step:6761/10000 train_time:279234ms step_avg:41.30ms +[2025-09-11 07:47:23] [Rank 0] step:6761/10000 train_time:279234ms step_avg:41.30ms +[2025-09-11 07:47:23] [Rank 0] step:6781/10000 train_time:279933ms step_avg:41.28ms +[2025-09-11 07:47:23] [Rank 0] step:6781/10000 train_time:279933ms step_avg:41.28ms +[2025-09-11 07:47:24] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:47:24] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:47:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:47:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:34] [Rank 0] PRINT: step:6800/10000 val_loss:5.4908 total_sharp:1.7666e-04 L1_sharp:3.2859e-05 L2_sharp:4.7168e-05 L3_sharp:1.6426e-05 L4_sharp:1.2840e-05 L5_sharp:2.7425e-05 L6_sharp:1.6992e-05 L7_sharp:2.4788e-05 L8_sharp:4.0046e-05 L9_sharp:3.4811e-05 L10_sharp:4.5599e-05 L11_sharp:8.4139e-05 L12_sharp:1.1280e-04 total_fnorm:3.3500e+01 total_l1_linf:7.2704e+04 total_spectral:1.6875e+01 L1_fnorm:1.0250e+01 L2_fnorm:9.8750e+00 L3_fnorm:9.8750e+00 L4_fnorm:9.6875e+00 L5_fnorm:9.4375e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.8125e+00 L8_fnorm:9.5000e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.7500e+00 L12_fnorm:8.8125e+00 L1_l1linf:2.4531e+00 L2_l1linf:2.3125e+00 L3_l1linf:2.2188e+00 L4_l1linf:2.0625e+00 L5_l1linf:2.0312e+00 L6_l1linf:1.9531e+00 L7_l1linf:1.9688e+00 L8_l1linf:1.9219e+00 L9_l1linf:1.9141e+00 L10_l1linf:2.0156e+00 L11_l1linf:1.8203e+00 L12_l1linf:1.7188e+00 L1_spectral:1.3295e-01 L2_spectral:1.2934e-01 L3_spectral:1.3180e-01 L4_spectral:1.3133e-01 L5_spectral:1.2659e-01 L6_spectral:1.3137e-01 L7_spectral:1.3142e-01 L8_spectral:1.2823e-01 L9_spectral:1.3111e-01 L10_spectral:1.3047e-01 L11_spectral:1.3013e-01 L12_spectral:1.2743e-01 train_time:280612ms step_avg:41.27ms +[2025-09-11 07:47:34] [Rank 0] PRINT: step:6800/10000 val_loss:5.4908 total_sharp:1.7666e-04 L1_sharp:3.2859e-05 L2_sharp:4.7168e-05 L3_sharp:1.6426e-05 L4_sharp:1.2840e-05 L5_sharp:2.7425e-05 L6_sharp:1.6992e-05 L7_sharp:2.4788e-05 L8_sharp:4.0046e-05 L9_sharp:3.4811e-05 L10_sharp:4.5599e-05 L11_sharp:8.4139e-05 L12_sharp:1.1280e-04 total_fnorm:3.3500e+01 total_l1_linf:7.2704e+04 total_spectral:1.6875e+01 L1_fnorm:1.0250e+01 L2_fnorm:9.8750e+00 L3_fnorm:9.8750e+00 L4_fnorm:9.6875e+00 L5_fnorm:9.4375e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.8125e+00 L8_fnorm:9.5000e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.7500e+00 L12_fnorm:8.8125e+00 L1_l1linf:2.4531e+00 L2_l1linf:2.3125e+00 L3_l1linf:2.2188e+00 L4_l1linf:2.0625e+00 L5_l1linf:2.0312e+00 L6_l1linf:1.9531e+00 L7_l1linf:1.9688e+00 L8_l1linf:1.9219e+00 L9_l1linf:1.9141e+00 L10_l1linf:2.0156e+00 L11_l1linf:1.8203e+00 L12_l1linf:1.7188e+00 L1_spectral:1.3295e-01 L2_spectral:1.2934e-01 L3_spectral:1.3180e-01 L4_spectral:1.3133e-01 L5_spectral:1.2659e-01 L6_spectral:1.3137e-01 L7_spectral:1.3142e-01 L8_spectral:1.2823e-01 L9_spectral:1.3111e-01 L10_spectral:1.3047e-01 L11_spectral:1.3013e-01 L12_spectral:1.2743e-01 train_time:280612ms step_avg:41.27ms +[2025-09-11 07:47:35] [Rank 0] step:6801/10000 train_time:281763ms step_avg:41.43ms +[2025-09-11 07:47:35] [Rank 0] step:6801/10000 train_time:281763ms step_avg:41.43ms +[2025-09-11 07:47:36] [Rank 0] step:6821/10000 train_time:282459ms step_avg:41.41ms +[2025-09-11 07:47:36] [Rank 0] step:6821/10000 train_time:282459ms step_avg:41.41ms +[2025-09-11 07:47:37] [Rank 0] step:6841/10000 train_time:283162ms step_avg:41.39ms +[2025-09-11 07:47:37] [Rank 0] step:6841/10000 train_time:283162ms step_avg:41.39ms +[2025-09-11 07:47:37] [Rank 0] step:6861/10000 train_time:283861ms step_avg:41.37ms +[2025-09-11 07:47:37] [Rank 0] step:6861/10000 train_time:283861ms step_avg:41.37ms +[2025-09-11 07:47:38] [Rank 0] step:6881/10000 train_time:284560ms step_avg:41.35ms +[2025-09-11 07:47:38] [Rank 0] step:6881/10000 train_time:284560ms step_avg:41.35ms +[2025-09-11 07:47:39] [Rank 0] step:6901/10000 train_time:285256ms step_avg:41.34ms +[2025-09-11 07:47:39] [Rank 0] step:6901/10000 train_time:285256ms step_avg:41.34ms +[2025-09-11 07:47:39] [Rank 0] step:6921/10000 train_time:285954ms step_avg:41.32ms +[2025-09-11 07:47:39] [Rank 0] step:6921/10000 train_time:285954ms step_avg:41.32ms +[2025-09-11 07:47:40] [Rank 0] step:6941/10000 train_time:286653ms step_avg:41.30ms +[2025-09-11 07:47:40] [Rank 0] step:6941/10000 train_time:286653ms step_avg:41.30ms +[2025-09-11 07:47:41] [Rank 0] step:6961/10000 train_time:287350ms step_avg:41.28ms +[2025-09-11 07:47:41] [Rank 0] step:6961/10000 train_time:287350ms step_avg:41.28ms +[2025-09-11 07:47:42] [Rank 0] step:6981/10000 train_time:288051ms step_avg:41.26ms +[2025-09-11 07:47:42] [Rank 0] step:6981/10000 train_time:288051ms step_avg:41.26ms +[2025-09-11 07:47:42] [Rank 0] step:7001/10000 train_time:288750ms step_avg:41.24ms +[2025-09-11 07:47:42] [Rank 0] step:7001/10000 train_time:288750ms step_avg:41.24ms +[2025-09-11 07:47:43] [Rank 0] step:7021/10000 train_time:289448ms step_avg:41.23ms +[2025-09-11 07:47:43] [Rank 0] step:7021/10000 train_time:289448ms step_avg:41.23ms +[2025-09-11 07:47:44] [Rank 0] step:7041/10000 train_time:290147ms step_avg:41.21ms +[2025-09-11 07:47:44] [Rank 0] step:7041/10000 train_time:290147ms step_avg:41.21ms +[2025-09-11 07:47:44] [Rank 0] step:7061/10000 train_time:290846ms step_avg:41.19ms +[2025-09-11 07:47:44] [Rank 0] step:7061/10000 train_time:290846ms step_avg:41.19ms +[2025-09-11 07:47:45] [Rank 0] step:7081/10000 train_time:291546ms step_avg:41.17ms +[2025-09-11 07:47:45] [Rank 0] step:7081/10000 train_time:291546ms step_avg:41.17ms +[2025-09-11 07:47:46] [Rank 0] step:7101/10000 train_time:292245ms step_avg:41.16ms +[2025-09-11 07:47:46] [Rank 0] step:7101/10000 train_time:292245ms step_avg:41.16ms +[2025-09-11 07:47:46] [Rank 0] step:7121/10000 train_time:292945ms step_avg:41.14ms +[2025-09-11 07:47:46] [Rank 0] step:7121/10000 train_time:292945ms step_avg:41.14ms +[2025-09-11 07:47:47] [Rank 0] step:7141/10000 train_time:293643ms step_avg:41.12ms +[2025-09-11 07:47:47] [Rank 0] step:7141/10000 train_time:293643ms step_avg:41.12ms +[2025-09-11 07:47:48] [Rank 0] step:7161/10000 train_time:294344ms step_avg:41.10ms +[2025-09-11 07:47:48] [Rank 0] step:7161/10000 train_time:294344ms step_avg:41.10ms +[2025-09-11 07:47:49] [Rank 0] step:7181/10000 train_time:295042ms step_avg:41.09ms +[2025-09-11 07:47:49] [Rank 0] step:7181/10000 train_time:295042ms step_avg:41.09ms +[2025-09-11 07:47:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:47:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:47:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:47:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:47:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:47:59] [Rank 0] PRINT: step:7200/10000 val_loss:5.4684 total_sharp:1.7415e-04 L1_sharp:6.5534e-05 L2_sharp:4.1598e-05 L3_sharp:2.4193e-05 L4_sharp:1.2798e-05 L5_sharp:6.9453e-06 L6_sharp:1.4544e-05 L7_sharp:1.5753e-05 L8_sharp:3.8157e-05 L9_sharp:3.4677e-05 L10_sharp:3.9501e-05 L11_sharp:9.1831e-05 L12_sharp:1.4209e-04 total_fnorm:2.9125e+01 total_l1_linf:6.0160e+04 total_spectral:1.4688e+01 L1_fnorm:8.9375e+00 L2_fnorm:8.6875e+00 L3_fnorm:8.5625e+00 L4_fnorm:8.4375e+00 L5_fnorm:8.2500e+00 L6_fnorm:8.5625e+00 L7_fnorm:8.5625e+00 L8_fnorm:8.2500e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5625e+00 L11_fnorm:8.5000e+00 L12_fnorm:7.6875e+00 L1_l1linf:2.0469e+00 L2_l1linf:1.9297e+00 L3_l1linf:1.8203e+00 L4_l1linf:1.8047e+00 L5_l1linf:1.7109e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.6094e+00 L10_l1linf:1.6875e+00 L11_l1linf:1.5234e+00 L12_l1linf:1.4531e+00 L1_spectral:1.1809e-01 L2_spectral:1.1422e-01 L3_spectral:1.1684e-01 L4_spectral:1.1689e-01 L5_spectral:1.1297e-01 L6_spectral:1.1606e-01 L7_spectral:1.1619e-01 L8_spectral:1.1408e-01 L9_spectral:1.1681e-01 L10_spectral:1.1680e-01 L11_spectral:1.1675e-01 L12_spectral:1.1188e-01 train_time:295722ms step_avg:41.07ms +[2025-09-11 07:47:59] [Rank 0] PRINT: step:7200/10000 val_loss:5.4684 total_sharp:1.7415e-04 L1_sharp:6.5534e-05 L2_sharp:4.1598e-05 L3_sharp:2.4193e-05 L4_sharp:1.2798e-05 L5_sharp:6.9453e-06 L6_sharp:1.4544e-05 L7_sharp:1.5753e-05 L8_sharp:3.8157e-05 L9_sharp:3.4677e-05 L10_sharp:3.9501e-05 L11_sharp:9.1831e-05 L12_sharp:1.4209e-04 total_fnorm:2.9125e+01 total_l1_linf:6.0160e+04 total_spectral:1.4688e+01 L1_fnorm:8.9375e+00 L2_fnorm:8.6875e+00 L3_fnorm:8.5625e+00 L4_fnorm:8.4375e+00 L5_fnorm:8.2500e+00 L6_fnorm:8.5625e+00 L7_fnorm:8.5625e+00 L8_fnorm:8.2500e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5625e+00 L11_fnorm:8.5000e+00 L12_fnorm:7.6875e+00 L1_l1linf:2.0469e+00 L2_l1linf:1.9297e+00 L3_l1linf:1.8203e+00 L4_l1linf:1.8047e+00 L5_l1linf:1.7109e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.6094e+00 L10_l1linf:1.6875e+00 L11_l1linf:1.5234e+00 L12_l1linf:1.4531e+00 L1_spectral:1.1809e-01 L2_spectral:1.1422e-01 L3_spectral:1.1684e-01 L4_spectral:1.1689e-01 L5_spectral:1.1297e-01 L6_spectral:1.1606e-01 L7_spectral:1.1619e-01 L8_spectral:1.1408e-01 L9_spectral:1.1681e-01 L10_spectral:1.1680e-01 L11_spectral:1.1675e-01 L12_spectral:1.1188e-01 train_time:295722ms step_avg:41.07ms +[2025-09-11 07:48:00] [Rank 0] step:7201/10000 train_time:296963ms step_avg:41.24ms +[2025-09-11 07:48:00] [Rank 0] step:7201/10000 train_time:296963ms step_avg:41.24ms +[2025-09-11 07:48:01] [Rank 0] step:7221/10000 train_time:297693ms step_avg:41.23ms +[2025-09-11 07:48:01] [Rank 0] step:7221/10000 train_time:297693ms step_avg:41.23ms +[2025-09-11 07:48:02] [Rank 0] step:7241/10000 train_time:298394ms step_avg:41.21ms +[2025-09-11 07:48:02] [Rank 0] step:7241/10000 train_time:298394ms step_avg:41.21ms +[2025-09-11 07:48:03] [Rank 0] step:7261/10000 train_time:299096ms step_avg:41.19ms +[2025-09-11 07:48:03] [Rank 0] step:7261/10000 train_time:299096ms step_avg:41.19ms +[2025-09-11 07:48:03] [Rank 0] step:7281/10000 train_time:299802ms step_avg:41.18ms +[2025-09-11 07:48:03] [Rank 0] step:7281/10000 train_time:299802ms step_avg:41.18ms +[2025-09-11 07:48:04] [Rank 0] step:7301/10000 train_time:300501ms step_avg:41.16ms +[2025-09-11 07:48:04] [Rank 0] step:7301/10000 train_time:300501ms step_avg:41.16ms +[2025-09-11 07:48:05] [Rank 0] step:7321/10000 train_time:301200ms step_avg:41.14ms +[2025-09-11 07:48:05] [Rank 0] step:7321/10000 train_time:301200ms step_avg:41.14ms +[2025-09-11 07:48:06] [Rank 0] step:7341/10000 train_time:302291ms step_avg:41.18ms +[2025-09-11 07:48:06] [Rank 0] step:7341/10000 train_time:302291ms step_avg:41.18ms +[2025-09-11 07:48:07] [Rank 0] step:7361/10000 train_time:303099ms step_avg:41.18ms +[2025-09-11 07:48:07] [Rank 0] step:7361/10000 train_time:303099ms step_avg:41.18ms +[2025-09-11 07:48:07] [Rank 0] step:7381/10000 train_time:303800ms step_avg:41.16ms +[2025-09-11 07:48:07] [Rank 0] step:7381/10000 train_time:303800ms step_avg:41.16ms +[2025-09-11 07:48:08] [Rank 0] step:7401/10000 train_time:304773ms step_avg:41.18ms +[2025-09-11 07:48:08] [Rank 0] step:7401/10000 train_time:304773ms step_avg:41.18ms +[2025-09-11 07:48:09] [Rank 0] step:7421/10000 train_time:305472ms step_avg:41.16ms +[2025-09-11 07:48:09] [Rank 0] step:7421/10000 train_time:305472ms step_avg:41.16ms +[2025-09-11 07:48:10] [Rank 0] step:7441/10000 train_time:306172ms step_avg:41.15ms +[2025-09-11 07:48:10] [Rank 0] step:7441/10000 train_time:306172ms step_avg:41.15ms +[2025-09-11 07:48:10] [Rank 0] step:7461/10000 train_time:306872ms step_avg:41.13ms +[2025-09-11 07:48:10] [Rank 0] step:7461/10000 train_time:306872ms step_avg:41.13ms +[2025-09-11 07:48:11] [Rank 0] step:7481/10000 train_time:307573ms step_avg:41.11ms +[2025-09-11 07:48:11] [Rank 0] step:7481/10000 train_time:307573ms step_avg:41.11ms +[2025-09-11 07:48:12] [Rank 0] step:7501/10000 train_time:308273ms step_avg:41.10ms +[2025-09-11 07:48:12] [Rank 0] step:7501/10000 train_time:308273ms step_avg:41.10ms +[2025-09-11 07:48:12] [Rank 0] step:7521/10000 train_time:308974ms step_avg:41.08ms +[2025-09-11 07:48:12] [Rank 0] step:7521/10000 train_time:308974ms step_avg:41.08ms +[2025-09-11 07:48:13] [Rank 0] step:7541/10000 train_time:309672ms step_avg:41.07ms +[2025-09-11 07:48:13] [Rank 0] step:7541/10000 train_time:309672ms step_avg:41.07ms +[2025-09-11 07:48:14] [Rank 0] step:7561/10000 train_time:310373ms step_avg:41.05ms +[2025-09-11 07:48:14] [Rank 0] step:7561/10000 train_time:310373ms step_avg:41.05ms +[2025-09-11 07:48:15] [Rank 0] step:7581/10000 train_time:311073ms step_avg:41.03ms +[2025-09-11 07:48:15] [Rank 0] step:7581/10000 train_time:311073ms step_avg:41.03ms +[2025-09-11 07:48:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:48:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:48:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:48:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:48:25] [Rank 0] PRINT: step:7600/10000 val_loss:5.4479 total_sharp:1.7724e-04 L1_sharp:6.3742e-05 L2_sharp:3.9775e-05 L3_sharp:1.6485e-05 L4_sharp:6.2832e-06 L5_sharp:2.6852e-05 L6_sharp:2.0695e-05 L7_sharp:2.2654e-05 L8_sharp:4.0671e-05 L9_sharp:3.3948e-05 L10_sharp:4.6424e-05 L11_sharp:8.5380e-05 L12_sharp:1.2058e-04 total_fnorm:2.4875e+01 total_l1_linf:4.7616e+04 total_spectral:1.2375e+01 L1_fnorm:7.5625e+00 L2_fnorm:7.2812e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.1250e+00 L5_fnorm:6.9375e+00 L6_fnorm:7.2188e+00 L7_fnorm:7.1875e+00 L8_fnorm:6.9375e+00 L9_fnorm:7.1562e+00 L10_fnorm:7.2188e+00 L11_fnorm:7.1875e+00 L12_fnorm:6.4688e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.3906e+00 L5_l1linf:1.3672e+00 L6_l1linf:1.3672e+00 L7_l1linf:1.3125e+00 L8_l1linf:1.2891e+00 L9_l1linf:1.2812e+00 L10_l1linf:1.3828e+00 L11_l1linf:1.2656e+00 L12_l1linf:1.2109e+00 L1_spectral:1.0176e-01 L2_spectral:9.8815e-02 L3_spectral:9.9658e-02 L4_spectral:9.9499e-02 L5_spectral:9.8357e-02 L6_spectral:1.0070e-01 L7_spectral:1.0109e-01 L8_spectral:9.8949e-02 L9_spectral:1.0125e-01 L10_spectral:1.0183e-01 L11_spectral:1.0072e-01 L12_spectral:9.6578e-02 train_time:311755ms step_avg:41.02ms +[2025-09-11 07:48:25] [Rank 0] PRINT: step:7600/10000 val_loss:5.4479 total_sharp:1.7724e-04 L1_sharp:6.3742e-05 L2_sharp:3.9775e-05 L3_sharp:1.6485e-05 L4_sharp:6.2832e-06 L5_sharp:2.6852e-05 L6_sharp:2.0695e-05 L7_sharp:2.2654e-05 L8_sharp:4.0671e-05 L9_sharp:3.3948e-05 L10_sharp:4.6424e-05 L11_sharp:8.5380e-05 L12_sharp:1.2058e-04 total_fnorm:2.4875e+01 total_l1_linf:4.7616e+04 total_spectral:1.2375e+01 L1_fnorm:7.5625e+00 L2_fnorm:7.2812e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.1250e+00 L5_fnorm:6.9375e+00 L6_fnorm:7.2188e+00 L7_fnorm:7.1875e+00 L8_fnorm:6.9375e+00 L9_fnorm:7.1562e+00 L10_fnorm:7.2188e+00 L11_fnorm:7.1875e+00 L12_fnorm:6.4688e+00 L1_l1linf:1.6172e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.3906e+00 L5_l1linf:1.3672e+00 L6_l1linf:1.3672e+00 L7_l1linf:1.3125e+00 L8_l1linf:1.2891e+00 L9_l1linf:1.2812e+00 L10_l1linf:1.3828e+00 L11_l1linf:1.2656e+00 L12_l1linf:1.2109e+00 L1_spectral:1.0176e-01 L2_spectral:9.8815e-02 L3_spectral:9.9658e-02 L4_spectral:9.9499e-02 L5_spectral:9.8357e-02 L6_spectral:1.0070e-01 L7_spectral:1.0109e-01 L8_spectral:9.8949e-02 L9_spectral:1.0125e-01 L10_spectral:1.0183e-01 L11_spectral:1.0072e-01 L12_spectral:9.6578e-02 train_time:311755ms step_avg:41.02ms +[2025-09-11 07:48:27] [Rank 0] step:7601/10000 train_time:312945ms step_avg:41.17ms +[2025-09-11 07:48:27] [Rank 0] step:7601/10000 train_time:312945ms step_avg:41.17ms +[2025-09-11 07:48:27] [Rank 0] step:7621/10000 train_time:313673ms step_avg:41.16ms +[2025-09-11 07:48:27] [Rank 0] step:7621/10000 train_time:313673ms step_avg:41.16ms +[2025-09-11 07:48:28] [Rank 0] step:7641/10000 train_time:314376ms step_avg:41.14ms +[2025-09-11 07:48:28] [Rank 0] step:7641/10000 train_time:314376ms step_avg:41.14ms +[2025-09-11 07:48:29] [Rank 0] step:7661/10000 train_time:315078ms step_avg:41.13ms +[2025-09-11 07:48:29] [Rank 0] step:7661/10000 train_time:315078ms step_avg:41.13ms +[2025-09-11 07:48:29] [Rank 0] step:7681/10000 train_time:315779ms step_avg:41.11ms +[2025-09-11 07:48:29] [Rank 0] step:7681/10000 train_time:315779ms step_avg:41.11ms +[2025-09-11 07:48:30] [Rank 0] step:7701/10000 train_time:316482ms step_avg:41.10ms +[2025-09-11 07:48:30] [Rank 0] step:7701/10000 train_time:316482ms step_avg:41.10ms +[2025-09-11 07:48:31] [Rank 0] step:7721/10000 train_time:317182ms step_avg:41.08ms +[2025-09-11 07:48:31] [Rank 0] step:7721/10000 train_time:317182ms step_avg:41.08ms +[2025-09-11 07:48:32] [Rank 0] step:7741/10000 train_time:317884ms step_avg:41.06ms +[2025-09-11 07:48:32] [Rank 0] step:7741/10000 train_time:317884ms step_avg:41.06ms +[2025-09-11 07:48:32] [Rank 0] step:7761/10000 train_time:318585ms step_avg:41.05ms +[2025-09-11 07:48:32] [Rank 0] step:7761/10000 train_time:318585ms step_avg:41.05ms +[2025-09-11 07:48:33] [Rank 0] step:7781/10000 train_time:319288ms step_avg:41.03ms +[2025-09-11 07:48:33] [Rank 0] step:7781/10000 train_time:319288ms step_avg:41.03ms +[2025-09-11 07:48:34] [Rank 0] step:7801/10000 train_time:319988ms step_avg:41.02ms +[2025-09-11 07:48:34] [Rank 0] step:7801/10000 train_time:319988ms step_avg:41.02ms +[2025-09-11 07:48:34] [Rank 0] step:7821/10000 train_time:320689ms step_avg:41.00ms +[2025-09-11 07:48:34] [Rank 0] step:7821/10000 train_time:320689ms step_avg:41.00ms +[2025-09-11 07:48:35] [Rank 0] step:7841/10000 train_time:321391ms step_avg:40.99ms +[2025-09-11 07:48:35] [Rank 0] step:7841/10000 train_time:321391ms step_avg:40.99ms +[2025-09-11 07:48:36] [Rank 0] step:7861/10000 train_time:322094ms step_avg:40.97ms +[2025-09-11 07:48:36] [Rank 0] step:7861/10000 train_time:322094ms step_avg:40.97ms +[2025-09-11 07:48:36] [Rank 0] step:7881/10000 train_time:322795ms step_avg:40.96ms +[2025-09-11 07:48:36] [Rank 0] step:7881/10000 train_time:322795ms step_avg:40.96ms +[2025-09-11 07:48:37] [Rank 0] step:7901/10000 train_time:323497ms step_avg:40.94ms +[2025-09-11 07:48:37] [Rank 0] step:7901/10000 train_time:323497ms step_avg:40.94ms +[2025-09-11 07:48:38] [Rank 0] step:7921/10000 train_time:324199ms step_avg:40.93ms +[2025-09-11 07:48:38] [Rank 0] step:7921/10000 train_time:324199ms step_avg:40.93ms +[2025-09-11 07:48:39] [Rank 0] step:7941/10000 train_time:324901ms step_avg:40.91ms +[2025-09-11 07:48:39] [Rank 0] step:7941/10000 train_time:324901ms step_avg:40.91ms +[2025-09-11 07:48:39] [Rank 0] step:7961/10000 train_time:325599ms step_avg:40.90ms +[2025-09-11 07:48:39] [Rank 0] step:7961/10000 train_time:325599ms step_avg:40.90ms +[2025-09-11 07:48:40] [Rank 0] step:7981/10000 train_time:326302ms step_avg:40.88ms +[2025-09-11 07:48:40] [Rank 0] step:7981/10000 train_time:326302ms step_avg:40.88ms +[2025-09-11 07:48:41] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:48:41] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:48:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:48:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:48:51] [Rank 0] PRINT: step:8000/10000 val_loss:5.4353 total_sharp:1.7723e-04 L1_sharp:4.4586e-05 L2_sharp:3.9638e-05 L3_sharp:3.1366e-05 L4_sharp:1.8939e-05 L5_sharp:2.4060e-05 L6_sharp:1.8377e-05 L7_sharp:2.0479e-05 L8_sharp:3.4774e-05 L9_sharp:3.2294e-05 L10_sharp:4.0996e-05 L11_sharp:9.8507e-05 L12_sharp:1.1371e-04 total_fnorm:2.0125e+01 total_l1_linf:3.6096e+04 total_spectral:1.0000e+01 L1_fnorm:6.2188e+00 L2_fnorm:5.9062e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.7812e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.8750e+00 L7_fnorm:5.8125e+00 L8_fnorm:5.6250e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8438e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.2500e+00 L1_l1linf:1.3281e+00 L2_l1linf:1.2031e+00 L3_l1linf:1.1641e+00 L4_l1linf:1.1094e+00 L5_l1linf:1.0625e+00 L6_l1linf:1.0078e+00 L7_l1linf:1.0312e+00 L8_l1linf:1.0156e+00 L9_l1linf:9.8828e-01 L10_l1linf:1.0781e+00 L11_l1linf:9.8047e-01 L12_l1linf:9.2969e-01 L1_spectral:8.6163e-02 L2_spectral:8.2903e-02 L3_spectral:8.3391e-02 L4_spectral:8.2346e-02 L5_spectral:8.2095e-02 L6_spectral:8.4207e-02 L7_spectral:8.3874e-02 L8_spectral:8.2395e-02 L9_spectral:8.3680e-02 L10_spectral:8.3398e-02 L11_spectral:8.4353e-02 L12_spectral:8.0314e-02 train_time:326982ms step_avg:40.87ms +[2025-09-11 07:48:51] [Rank 0] PRINT: step:8000/10000 val_loss:5.4353 total_sharp:1.7723e-04 L1_sharp:4.4586e-05 L2_sharp:3.9638e-05 L3_sharp:3.1366e-05 L4_sharp:1.8939e-05 L5_sharp:2.4060e-05 L6_sharp:1.8377e-05 L7_sharp:2.0479e-05 L8_sharp:3.4774e-05 L9_sharp:3.2294e-05 L10_sharp:4.0996e-05 L11_sharp:9.8507e-05 L12_sharp:1.1371e-04 total_fnorm:2.0125e+01 total_l1_linf:3.6096e+04 total_spectral:1.0000e+01 L1_fnorm:6.2188e+00 L2_fnorm:5.9062e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.7812e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.8750e+00 L7_fnorm:5.8125e+00 L8_fnorm:5.6250e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8438e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.2500e+00 L1_l1linf:1.3281e+00 L2_l1linf:1.2031e+00 L3_l1linf:1.1641e+00 L4_l1linf:1.1094e+00 L5_l1linf:1.0625e+00 L6_l1linf:1.0078e+00 L7_l1linf:1.0312e+00 L8_l1linf:1.0156e+00 L9_l1linf:9.8828e-01 L10_l1linf:1.0781e+00 L11_l1linf:9.8047e-01 L12_l1linf:9.2969e-01 L1_spectral:8.6163e-02 L2_spectral:8.2903e-02 L3_spectral:8.3391e-02 L4_spectral:8.2346e-02 L5_spectral:8.2095e-02 L6_spectral:8.4207e-02 L7_spectral:8.3874e-02 L8_spectral:8.2395e-02 L9_spectral:8.3680e-02 L10_spectral:8.3398e-02 L11_spectral:8.4353e-02 L12_spectral:8.0314e-02 train_time:326982ms step_avg:40.87ms +[2025-09-11 07:48:52] [Rank 0] step:8001/10000 train_time:328155ms step_avg:41.01ms +[2025-09-11 07:48:52] [Rank 0] step:8001/10000 train_time:328155ms step_avg:41.01ms +[2025-09-11 07:48:53] [Rank 0] step:8021/10000 train_time:328895ms step_avg:41.00ms +[2025-09-11 07:48:53] [Rank 0] step:8021/10000 train_time:328895ms step_avg:41.00ms +[2025-09-11 07:48:53] [Rank 0] step:8041/10000 train_time:329597ms step_avg:40.99ms +[2025-09-11 07:48:53] [Rank 0] step:8041/10000 train_time:329597ms step_avg:40.99ms +[2025-09-11 07:48:54] [Rank 0] step:8061/10000 train_time:330301ms step_avg:40.98ms +[2025-09-11 07:48:54] [Rank 0] step:8061/10000 train_time:330301ms step_avg:40.98ms +[2025-09-11 07:48:55] [Rank 0] step:8081/10000 train_time:331001ms step_avg:40.96ms +[2025-09-11 07:48:55] [Rank 0] step:8081/10000 train_time:331001ms step_avg:40.96ms +[2025-09-11 07:48:56] [Rank 0] step:8101/10000 train_time:331700ms step_avg:40.95ms +[2025-09-11 07:48:56] [Rank 0] step:8101/10000 train_time:331700ms step_avg:40.95ms +[2025-09-11 07:48:56] [Rank 0] step:8121/10000 train_time:332406ms step_avg:40.93ms +[2025-09-11 07:48:56] [Rank 0] step:8121/10000 train_time:332406ms step_avg:40.93ms +[2025-09-11 07:48:58] [Rank 0] step:8141/10000 train_time:333832ms step_avg:41.01ms +[2025-09-11 07:48:58] [Rank 0] step:8141/10000 train_time:333832ms step_avg:41.01ms +[2025-09-11 07:48:58] [Rank 0] step:8161/10000 train_time:334538ms step_avg:40.99ms +[2025-09-11 07:48:58] [Rank 0] step:8161/10000 train_time:334538ms step_avg:40.99ms +[2025-09-11 07:48:59] [Rank 0] step:8181/10000 train_time:335251ms step_avg:40.98ms +[2025-09-11 07:48:59] [Rank 0] step:8181/10000 train_time:335251ms step_avg:40.98ms +[2025-09-11 07:49:00] [Rank 0] step:8201/10000 train_time:335960ms step_avg:40.97ms +[2025-09-11 07:49:00] [Rank 0] step:8201/10000 train_time:335960ms step_avg:40.97ms +[2025-09-11 07:49:01] [Rank 0] step:8221/10000 train_time:336668ms step_avg:40.95ms +[2025-09-11 07:49:01] [Rank 0] step:8221/10000 train_time:336668ms step_avg:40.95ms +[2025-09-11 07:49:01] [Rank 0] step:8241/10000 train_time:337386ms step_avg:40.94ms +[2025-09-11 07:49:01] [Rank 0] step:8241/10000 train_time:337386ms step_avg:40.94ms +[2025-09-11 07:49:02] [Rank 0] step:8261/10000 train_time:338094ms step_avg:40.93ms +[2025-09-11 07:49:02] [Rank 0] step:8261/10000 train_time:338094ms step_avg:40.93ms +[2025-09-11 07:49:03] [Rank 0] step:8281/10000 train_time:338799ms step_avg:40.91ms +[2025-09-11 07:49:03] [Rank 0] step:8281/10000 train_time:338799ms step_avg:40.91ms +[2025-09-11 07:49:03] [Rank 0] step:8301/10000 train_time:339507ms step_avg:40.90ms +[2025-09-11 07:49:03] [Rank 0] step:8301/10000 train_time:339507ms step_avg:40.90ms +[2025-09-11 07:49:04] [Rank 0] step:8321/10000 train_time:340215ms step_avg:40.89ms +[2025-09-11 07:49:04] [Rank 0] step:8321/10000 train_time:340215ms step_avg:40.89ms +[2025-09-11 07:49:05] [Rank 0] step:8341/10000 train_time:340929ms step_avg:40.87ms +[2025-09-11 07:49:05] [Rank 0] step:8341/10000 train_time:340929ms step_avg:40.87ms +[2025-09-11 07:49:05] [Rank 0] step:8361/10000 train_time:341633ms step_avg:40.86ms +[2025-09-11 07:49:05] [Rank 0] step:8361/10000 train_time:341633ms step_avg:40.86ms +[2025-09-11 07:49:06] [Rank 0] step:8381/10000 train_time:342343ms step_avg:40.85ms +[2025-09-11 07:49:06] [Rank 0] step:8381/10000 train_time:342343ms step_avg:40.85ms +[2025-09-11 07:49:07] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:49:07] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:49:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:49:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:49:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:49:17] [Rank 0] PRINT: step:8400/10000 val_loss:5.4142 total_sharp:1.6166e-04 L1_sharp:8.6347e-05 L2_sharp:3.4473e-05 L3_sharp:2.0498e-05 L4_sharp:8.0228e-06 L5_sharp:1.4189e-05 L6_sharp:1.2645e-05 L7_sharp:1.3899e-05 L8_sharp:2.7238e-05 L9_sharp:2.7745e-05 L10_sharp:3.1067e-05 L11_sharp:9.1564e-05 L12_sharp:1.4499e-04 total_fnorm:1.5688e+01 total_l1_linf:2.5984e+04 total_spectral:7.8438e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.6562e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.5000e+00 L5_fnorm:4.4062e+00 L6_fnorm:4.5625e+00 L7_fnorm:4.5312e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5625e+00 L11_fnorm:4.5938e+00 L12_fnorm:4.0938e+00 L1_l1linf:9.4922e-01 L2_l1linf:8.9453e-01 L3_l1linf:8.8281e-01 L4_l1linf:8.3203e-01 L5_l1linf:7.8125e-01 L6_l1linf:7.4609e-01 L7_l1linf:7.5391e-01 L8_l1linf:7.4219e-01 L9_l1linf:7.2266e-01 L10_l1linf:7.8516e-01 L11_l1linf:7.5000e-01 L12_l1linf:7.1094e-01 L1_spectral:6.8200e-02 L2_spectral:6.6035e-02 L3_spectral:6.5248e-02 L4_spectral:6.6013e-02 L5_spectral:6.5823e-02 L6_spectral:6.7325e-02 L7_spectral:6.7083e-02 L8_spectral:6.6620e-02 L9_spectral:6.7069e-02 L10_spectral:6.7171e-02 L11_spectral:6.7642e-02 L12_spectral:6.4147e-02 train_time:343033ms step_avg:40.84ms +[2025-09-11 07:49:17] [Rank 0] PRINT: step:8400/10000 val_loss:5.4142 total_sharp:1.6166e-04 L1_sharp:8.6347e-05 L2_sharp:3.4473e-05 L3_sharp:2.0498e-05 L4_sharp:8.0228e-06 L5_sharp:1.4189e-05 L6_sharp:1.2645e-05 L7_sharp:1.3899e-05 L8_sharp:2.7238e-05 L9_sharp:2.7745e-05 L10_sharp:3.1067e-05 L11_sharp:9.1564e-05 L12_sharp:1.4499e-04 total_fnorm:1.5688e+01 total_l1_linf:2.5984e+04 total_spectral:7.8438e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.6562e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.5000e+00 L5_fnorm:4.4062e+00 L6_fnorm:4.5625e+00 L7_fnorm:4.5312e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5625e+00 L11_fnorm:4.5938e+00 L12_fnorm:4.0938e+00 L1_l1linf:9.4922e-01 L2_l1linf:8.9453e-01 L3_l1linf:8.8281e-01 L4_l1linf:8.3203e-01 L5_l1linf:7.8125e-01 L6_l1linf:7.4609e-01 L7_l1linf:7.5391e-01 L8_l1linf:7.4219e-01 L9_l1linf:7.2266e-01 L10_l1linf:7.8516e-01 L11_l1linf:7.5000e-01 L12_l1linf:7.1094e-01 L1_spectral:6.8200e-02 L2_spectral:6.6035e-02 L3_spectral:6.5248e-02 L4_spectral:6.6013e-02 L5_spectral:6.5823e-02 L6_spectral:6.7325e-02 L7_spectral:6.7083e-02 L8_spectral:6.6620e-02 L9_spectral:6.7069e-02 L10_spectral:6.7171e-02 L11_spectral:6.7642e-02 L12_spectral:6.4147e-02 train_time:343033ms step_avg:40.84ms +[2025-09-11 07:49:18] [Rank 0] step:8401/10000 train_time:344217ms step_avg:40.97ms +[2025-09-11 07:49:18] [Rank 0] step:8401/10000 train_time:344217ms step_avg:40.97ms +[2025-09-11 07:49:19] [Rank 0] step:8421/10000 train_time:344959ms step_avg:40.96ms +[2025-09-11 07:49:19] [Rank 0] step:8421/10000 train_time:344959ms step_avg:40.96ms +[2025-09-11 07:49:19] [Rank 0] step:8441/10000 train_time:345670ms step_avg:40.95ms +[2025-09-11 07:49:19] [Rank 0] step:8441/10000 train_time:345670ms step_avg:40.95ms +[2025-09-11 07:49:20] [Rank 0] step:8461/10000 train_time:346380ms step_avg:40.94ms +[2025-09-11 07:49:20] [Rank 0] step:8461/10000 train_time:346380ms step_avg:40.94ms +[2025-09-11 07:49:21] [Rank 0] step:8481/10000 train_time:347091ms step_avg:40.93ms +[2025-09-11 07:49:21] [Rank 0] step:8481/10000 train_time:347091ms step_avg:40.93ms +[2025-09-11 07:49:22] [Rank 0] step:8501/10000 train_time:347798ms step_avg:40.91ms +[2025-09-11 07:49:22] [Rank 0] step:8501/10000 train_time:347798ms step_avg:40.91ms +[2025-09-11 07:49:22] [Rank 0] step:8521/10000 train_time:348506ms step_avg:40.90ms +[2025-09-11 07:49:22] [Rank 0] step:8521/10000 train_time:348506ms step_avg:40.90ms +[2025-09-11 07:49:23] [Rank 0] step:8541/10000 train_time:349213ms step_avg:40.89ms +[2025-09-11 07:49:23] [Rank 0] step:8541/10000 train_time:349213ms step_avg:40.89ms +[2025-09-11 07:49:24] [Rank 0] step:8561/10000 train_time:349926ms step_avg:40.87ms +[2025-09-11 07:49:24] [Rank 0] step:8561/10000 train_time:349926ms step_avg:40.87ms +[2025-09-11 07:49:24] [Rank 0] step:8581/10000 train_time:350636ms step_avg:40.86ms +[2025-09-11 07:49:24] [Rank 0] step:8581/10000 train_time:350636ms step_avg:40.86ms +[2025-09-11 07:49:25] [Rank 0] step:8601/10000 train_time:351347ms step_avg:40.85ms +[2025-09-11 07:49:25] [Rank 0] step:8601/10000 train_time:351347ms step_avg:40.85ms +[2025-09-11 07:49:26] [Rank 0] step:8621/10000 train_time:352055ms step_avg:40.84ms +[2025-09-11 07:49:26] [Rank 0] step:8621/10000 train_time:352055ms step_avg:40.84ms +[2025-09-11 07:49:27] [Rank 0] step:8641/10000 train_time:352762ms step_avg:40.82ms +[2025-09-11 07:49:27] [Rank 0] step:8641/10000 train_time:352762ms step_avg:40.82ms +[2025-09-11 07:49:27] [Rank 0] step:8661/10000 train_time:353471ms step_avg:40.81ms +[2025-09-11 07:49:27] [Rank 0] step:8661/10000 train_time:353471ms step_avg:40.81ms +[2025-09-11 07:49:28] [Rank 0] step:8681/10000 train_time:354181ms step_avg:40.80ms +[2025-09-11 07:49:28] [Rank 0] step:8681/10000 train_time:354181ms step_avg:40.80ms +[2025-09-11 07:49:29] [Rank 0] step:8701/10000 train_time:354888ms step_avg:40.79ms +[2025-09-11 07:49:29] [Rank 0] step:8701/10000 train_time:354888ms step_avg:40.79ms +[2025-09-11 07:49:29] [Rank 0] step:8721/10000 train_time:355599ms step_avg:40.78ms +[2025-09-11 07:49:29] [Rank 0] step:8721/10000 train_time:355599ms step_avg:40.78ms +[2025-09-11 07:49:30] [Rank 0] step:8741/10000 train_time:356304ms step_avg:40.76ms +[2025-09-11 07:49:30] [Rank 0] step:8741/10000 train_time:356304ms step_avg:40.76ms +[2025-09-11 07:49:31] [Rank 0] step:8761/10000 train_time:357016ms step_avg:40.75ms +[2025-09-11 07:49:31] [Rank 0] step:8761/10000 train_time:357016ms step_avg:40.75ms +[2025-09-11 07:49:32] [Rank 0] step:8781/10000 train_time:357725ms step_avg:40.74ms +[2025-09-11 07:49:32] [Rank 0] step:8781/10000 train_time:357725ms step_avg:40.74ms +[2025-09-11 07:49:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:49:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:49:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:49:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:49:44] [Rank 0] PRINT: step:8800/10000 val_loss:5.4024 total_sharp:1.0986e-04 L1_sharp:3.3588e-05 L2_sharp:3.4603e-05 L3_sharp:1.0924e-05 L4_sharp:9.7174e-06 L5_sharp:1.1844e-05 L6_sharp:1.3872e-05 L7_sharp:1.9313e-05 L8_sharp:2.6372e-05 L9_sharp:2.5439e-05 L10_sharp:3.1552e-05 L11_sharp:6.7000e-05 L12_sharp:8.9621e-05 total_fnorm:1.1438e+01 total_l1_linf:1.7152e+04 total_spectral:5.7500e+00 L1_fnorm:3.5781e+00 L2_fnorm:3.4062e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3125e+00 L5_fnorm:3.2188e+00 L6_fnorm:3.3281e+00 L7_fnorm:3.3125e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2812e+00 L10_fnorm:3.3438e+00 L11_fnorm:3.3906e+00 L12_fnorm:3.1094e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.8594e-01 L4_l1linf:5.6641e-01 L5_l1linf:5.2344e-01 L6_l1linf:5.1172e-01 L7_l1linf:4.9609e-01 L8_l1linf:4.7070e-01 L9_l1linf:4.7852e-01 L10_l1linf:5.2734e-01 L11_l1linf:5.3516e-01 L12_l1linf:5.0391e-01 L1_spectral:5.0778e-02 L2_spectral:4.9747e-02 L3_spectral:4.9262e-02 L4_spectral:4.9381e-02 L5_spectral:5.0000e-02 L6_spectral:5.0431e-02 L7_spectral:5.0157e-02 L8_spectral:4.9619e-02 L9_spectral:5.0780e-02 L10_spectral:5.0209e-02 L11_spectral:5.1833e-02 L12_spectral:5.0077e-02 train_time:358412ms step_avg:40.73ms +[2025-09-11 07:49:44] [Rank 0] PRINT: step:8800/10000 val_loss:5.4024 total_sharp:1.0986e-04 L1_sharp:3.3588e-05 L2_sharp:3.4603e-05 L3_sharp:1.0924e-05 L4_sharp:9.7174e-06 L5_sharp:1.1844e-05 L6_sharp:1.3872e-05 L7_sharp:1.9313e-05 L8_sharp:2.6372e-05 L9_sharp:2.5439e-05 L10_sharp:3.1552e-05 L11_sharp:6.7000e-05 L12_sharp:8.9621e-05 total_fnorm:1.1438e+01 total_l1_linf:1.7152e+04 total_spectral:5.7500e+00 L1_fnorm:3.5781e+00 L2_fnorm:3.4062e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3125e+00 L5_fnorm:3.2188e+00 L6_fnorm:3.3281e+00 L7_fnorm:3.3125e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2812e+00 L10_fnorm:3.3438e+00 L11_fnorm:3.3906e+00 L12_fnorm:3.1094e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.8594e-01 L4_l1linf:5.6641e-01 L5_l1linf:5.2344e-01 L6_l1linf:5.1172e-01 L7_l1linf:4.9609e-01 L8_l1linf:4.7070e-01 L9_l1linf:4.7852e-01 L10_l1linf:5.2734e-01 L11_l1linf:5.3516e-01 L12_l1linf:5.0391e-01 L1_spectral:5.0778e-02 L2_spectral:4.9747e-02 L3_spectral:4.9262e-02 L4_spectral:4.9381e-02 L5_spectral:5.0000e-02 L6_spectral:5.0431e-02 L7_spectral:5.0157e-02 L8_spectral:4.9619e-02 L9_spectral:5.0780e-02 L10_spectral:5.0209e-02 L11_spectral:5.1833e-02 L12_spectral:5.0077e-02 train_time:358412ms step_avg:40.73ms +[2025-09-11 07:49:45] [Rank 0] step:8801/10000 train_time:359688ms step_avg:40.87ms +[2025-09-11 07:49:45] [Rank 0] step:8801/10000 train_time:359688ms step_avg:40.87ms +[2025-09-11 07:49:46] [Rank 0] step:8821/10000 train_time:360426ms step_avg:40.86ms +[2025-09-11 07:49:46] [Rank 0] step:8821/10000 train_time:360426ms step_avg:40.86ms +[2025-09-11 07:49:47] [Rank 0] step:8841/10000 train_time:361135ms step_avg:40.85ms +[2025-09-11 07:49:47] [Rank 0] step:8841/10000 train_time:361135ms step_avg:40.85ms +[2025-09-11 07:49:47] [Rank 0] step:8861/10000 train_time:361842ms step_avg:40.84ms +[2025-09-11 07:49:47] [Rank 0] step:8861/10000 train_time:361842ms step_avg:40.84ms +[2025-09-11 07:49:48] [Rank 0] step:8881/10000 train_time:362551ms step_avg:40.82ms +[2025-09-11 07:49:48] [Rank 0] step:8881/10000 train_time:362551ms step_avg:40.82ms +[2025-09-11 07:49:49] [Rank 0] step:8901/10000 train_time:363261ms step_avg:40.81ms +[2025-09-11 07:49:49] [Rank 0] step:8901/10000 train_time:363261ms step_avg:40.81ms +[2025-09-11 07:49:49] [Rank 0] step:8921/10000 train_time:363965ms step_avg:40.80ms +[2025-09-11 07:49:49] [Rank 0] step:8921/10000 train_time:363965ms step_avg:40.80ms +[2025-09-11 07:49:50] [Rank 0] step:8941/10000 train_time:364674ms step_avg:40.79ms +[2025-09-11 07:49:50] [Rank 0] step:8941/10000 train_time:364674ms step_avg:40.79ms +[2025-09-11 07:49:51] [Rank 0] step:8961/10000 train_time:365391ms step_avg:40.78ms +[2025-09-11 07:49:51] [Rank 0] step:8961/10000 train_time:365391ms step_avg:40.78ms +[2025-09-11 07:49:51] [Rank 0] step:8981/10000 train_time:366103ms step_avg:40.76ms +[2025-09-11 07:49:51] [Rank 0] step:8981/10000 train_time:366103ms step_avg:40.76ms +[2025-09-11 07:49:52] [Rank 0] step:9001/10000 train_time:366806ms step_avg:40.75ms +[2025-09-11 07:49:52] [Rank 0] step:9001/10000 train_time:366806ms step_avg:40.75ms +[2025-09-11 07:49:53] [Rank 0] step:9021/10000 train_time:367515ms step_avg:40.74ms +[2025-09-11 07:49:53] [Rank 0] step:9021/10000 train_time:367515ms step_avg:40.74ms +[2025-09-11 07:49:54] [Rank 0] step:9041/10000 train_time:368227ms step_avg:40.73ms +[2025-09-11 07:49:54] [Rank 0] step:9041/10000 train_time:368227ms step_avg:40.73ms +[2025-09-11 07:49:54] [Rank 0] step:9061/10000 train_time:368934ms step_avg:40.72ms +[2025-09-11 07:49:54] [Rank 0] step:9061/10000 train_time:368934ms step_avg:40.72ms +[2025-09-11 07:49:55] [Rank 0] step:9081/10000 train_time:369643ms step_avg:40.71ms +[2025-09-11 07:49:55] [Rank 0] step:9081/10000 train_time:369643ms step_avg:40.71ms +[2025-09-11 07:49:56] [Rank 0] step:9101/10000 train_time:370356ms step_avg:40.69ms +[2025-09-11 07:49:56] [Rank 0] step:9101/10000 train_time:370356ms step_avg:40.69ms +[2025-09-11 07:49:56] [Rank 0] step:9121/10000 train_time:371069ms step_avg:40.68ms +[2025-09-11 07:49:56] [Rank 0] step:9121/10000 train_time:371069ms step_avg:40.68ms +[2025-09-11 07:49:57] [Rank 0] step:9141/10000 train_time:371776ms step_avg:40.67ms +[2025-09-11 07:49:57] [Rank 0] step:9141/10000 train_time:371776ms step_avg:40.67ms +[2025-09-11 07:49:58] [Rank 0] step:9161/10000 train_time:372487ms step_avg:40.66ms +[2025-09-11 07:49:58] [Rank 0] step:9161/10000 train_time:372487ms step_avg:40.66ms +[2025-09-11 07:49:59] [Rank 0] step:9181/10000 train_time:373197ms step_avg:40.65ms +[2025-09-11 07:49:59] [Rank 0] step:9181/10000 train_time:373197ms step_avg:40.65ms +[2025-09-11 07:49:59] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:49:59] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:50:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.3910 total_sharp:1.1906e-04 L1_sharp:4.8042e-05 L2_sharp:2.2199e-05 L3_sharp:9.4505e-06 L4_sharp:4.7499e-06 L5_sharp:1.4461e-05 L6_sharp:7.7930e-06 L7_sharp:9.2856e-06 L8_sharp:2.2430e-05 L9_sharp:2.3242e-05 L10_sharp:2.6956e-05 L11_sharp:6.3538e-05 L12_sharp:1.2931e-04 total_fnorm:7.7188e+00 total_l1_linf:9.9840e+03 total_spectral:3.8438e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.2969e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2188e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2344e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.1250e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.2344e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0000e+00 L1_l1linf:3.8867e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.4961e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9492e-01 L9_l1linf:2.9297e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.0469e-01 L12_l1linf:2.9883e-01 L1_spectral:3.5173e-02 L2_spectral:3.3775e-02 L3_spectral:3.3285e-02 L4_spectral:3.3594e-02 L5_spectral:3.5001e-02 L6_spectral:3.4127e-02 L7_spectral:3.4199e-02 L8_spectral:3.4192e-02 L9_spectral:3.4533e-02 L10_spectral:3.4298e-02 L11_spectral:3.5772e-02 L12_spectral:3.3657e-02 train_time:373890ms step_avg:40.64ms +[2025-09-11 07:50:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.3910 total_sharp:1.1906e-04 L1_sharp:4.8042e-05 L2_sharp:2.2199e-05 L3_sharp:9.4505e-06 L4_sharp:4.7499e-06 L5_sharp:1.4461e-05 L6_sharp:7.7930e-06 L7_sharp:9.2856e-06 L8_sharp:2.2430e-05 L9_sharp:2.3242e-05 L10_sharp:2.6956e-05 L11_sharp:6.3538e-05 L12_sharp:1.2931e-04 total_fnorm:7.7188e+00 total_l1_linf:9.9840e+03 total_spectral:3.8438e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.2969e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2188e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2344e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.1250e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.2344e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0000e+00 L1_l1linf:3.8867e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.4961e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9492e-01 L9_l1linf:2.9297e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.0469e-01 L12_l1linf:2.9883e-01 L1_spectral:3.5173e-02 L2_spectral:3.3775e-02 L3_spectral:3.3285e-02 L4_spectral:3.3594e-02 L5_spectral:3.5001e-02 L6_spectral:3.4127e-02 L7_spectral:3.4199e-02 L8_spectral:3.4192e-02 L9_spectral:3.4533e-02 L10_spectral:3.4298e-02 L11_spectral:3.5772e-02 L12_spectral:3.3657e-02 train_time:373890ms step_avg:40.64ms +[2025-09-11 07:50:10] [Rank 0] step:9201/10000 train_time:375115ms step_avg:40.77ms +[2025-09-11 07:50:10] [Rank 0] step:9201/10000 train_time:375115ms step_avg:40.77ms +[2025-09-11 07:50:11] [Rank 0] step:9221/10000 train_time:375851ms step_avg:40.76ms +[2025-09-11 07:50:11] [Rank 0] step:9221/10000 train_time:375851ms step_avg:40.76ms +[2025-09-11 07:50:12] [Rank 0] step:9241/10000 train_time:377100ms step_avg:40.81ms +[2025-09-11 07:50:12] [Rank 0] step:9241/10000 train_time:377100ms step_avg:40.81ms +[2025-09-11 07:50:13] [Rank 0] step:9261/10000 train_time:377811ms step_avg:40.80ms +[2025-09-11 07:50:13] [Rank 0] step:9261/10000 train_time:377811ms step_avg:40.80ms +[2025-09-11 07:50:14] [Rank 0] step:9281/10000 train_time:378522ms step_avg:40.78ms +[2025-09-11 07:50:14] [Rank 0] step:9281/10000 train_time:378522ms step_avg:40.78ms +[2025-09-11 07:50:15] [Rank 0] step:9301/10000 train_time:379493ms step_avg:40.80ms +[2025-09-11 07:50:15] [Rank 0] step:9301/10000 train_time:379493ms step_avg:40.80ms +[2025-09-11 07:50:16] [Rank 0] step:9321/10000 train_time:380205ms step_avg:40.79ms +[2025-09-11 07:50:16] [Rank 0] step:9321/10000 train_time:380205ms step_avg:40.79ms +[2025-09-11 07:50:16] [Rank 0] step:9341/10000 train_time:380910ms step_avg:40.78ms +[2025-09-11 07:50:16] [Rank 0] step:9341/10000 train_time:380910ms step_avg:40.78ms +[2025-09-11 07:50:17] [Rank 0] step:9361/10000 train_time:381617ms step_avg:40.77ms +[2025-09-11 07:50:17] [Rank 0] step:9361/10000 train_time:381617ms step_avg:40.77ms +[2025-09-11 07:50:18] [Rank 0] step:9381/10000 train_time:382324ms step_avg:40.76ms +[2025-09-11 07:50:18] [Rank 0] step:9381/10000 train_time:382324ms step_avg:40.76ms +[2025-09-11 07:50:18] [Rank 0] step:9401/10000 train_time:383034ms step_avg:40.74ms +[2025-09-11 07:50:18] [Rank 0] step:9401/10000 train_time:383034ms step_avg:40.74ms +[2025-09-11 07:50:19] [Rank 0] step:9421/10000 train_time:383743ms step_avg:40.73ms +[2025-09-11 07:50:19] [Rank 0] step:9421/10000 train_time:383743ms step_avg:40.73ms +[2025-09-11 07:50:20] [Rank 0] step:9441/10000 train_time:384456ms step_avg:40.72ms +[2025-09-11 07:50:20] [Rank 0] step:9441/10000 train_time:384456ms step_avg:40.72ms +[2025-09-11 07:50:20] [Rank 0] step:9461/10000 train_time:385165ms step_avg:40.71ms +[2025-09-11 07:50:20] [Rank 0] step:9461/10000 train_time:385165ms step_avg:40.71ms +[2025-09-11 07:50:21] [Rank 0] step:9481/10000 train_time:385875ms step_avg:40.70ms +[2025-09-11 07:50:21] [Rank 0] step:9481/10000 train_time:385875ms step_avg:40.70ms +[2025-09-11 07:50:22] [Rank 0] step:9501/10000 train_time:386585ms step_avg:40.69ms +[2025-09-11 07:50:22] [Rank 0] step:9501/10000 train_time:386585ms step_avg:40.69ms +[2025-09-11 07:50:23] [Rank 0] step:9521/10000 train_time:387297ms step_avg:40.68ms +[2025-09-11 07:50:23] [Rank 0] step:9521/10000 train_time:387297ms step_avg:40.68ms +[2025-09-11 07:50:23] [Rank 0] step:9541/10000 train_time:388003ms step_avg:40.67ms +[2025-09-11 07:50:23] [Rank 0] step:9541/10000 train_time:388003ms step_avg:40.67ms +[2025-09-11 07:50:24] [Rank 0] step:9561/10000 train_time:388712ms step_avg:40.66ms +[2025-09-11 07:50:24] [Rank 0] step:9561/10000 train_time:388712ms step_avg:40.66ms +[2025-09-11 07:50:25] [Rank 0] step:9581/10000 train_time:389423ms step_avg:40.65ms +[2025-09-11 07:50:25] [Rank 0] step:9581/10000 train_time:389423ms step_avg:40.65ms +[2025-09-11 07:50:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:50:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:50:35] [Rank 0] PRINT: step:9600/10000 val_loss:5.3837 total_sharp:7.8064e-05 L1_sharp:2.8799e-05 L2_sharp:1.4021e-05 L3_sharp:1.0433e-05 L4_sharp:1.3029e-06 L5_sharp:1.0367e-05 L6_sharp:7.3727e-06 L7_sharp:8.6771e-06 L8_sharp:1.8234e-05 L9_sharp:1.6064e-05 L10_sharp:1.9271e-05 L11_sharp:5.2447e-05 L12_sharp:7.7606e-05 total_fnorm:4.3438e+00 total_l1_linf:4.7040e+03 total_spectral:2.1719e+00 L1_fnorm:1.3672e+00 L2_fnorm:1.2969e+00 L3_fnorm:1.2734e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2109e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.1484e+00 L1_l1linf:1.9531e-01 L2_l1linf:1.7773e-01 L3_l1linf:1.8652e-01 L4_l1linf:1.6211e-01 L5_l1linf:1.7871e-01 L6_l1linf:1.5820e-01 L7_l1linf:1.5039e-01 L8_l1linf:1.4355e-01 L9_l1linf:1.3770e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.5723e-01 L12_l1linf:1.5039e-01 L1_spectral:2.0311e-02 L2_spectral:1.9269e-02 L3_spectral:1.9389e-02 L4_spectral:1.9148e-02 L5_spectral:2.0265e-02 L6_spectral:1.9777e-02 L7_spectral:1.9685e-02 L8_spectral:1.9863e-02 L9_spectral:1.9942e-02 L10_spectral:1.9717e-02 L11_spectral:2.1057e-02 L12_spectral:1.9800e-02 train_time:390111ms step_avg:40.64ms +[2025-09-11 07:50:35] [Rank 0] PRINT: step:9600/10000 val_loss:5.3837 total_sharp:7.8064e-05 L1_sharp:2.8799e-05 L2_sharp:1.4021e-05 L3_sharp:1.0433e-05 L4_sharp:1.3029e-06 L5_sharp:1.0367e-05 L6_sharp:7.3727e-06 L7_sharp:8.6771e-06 L8_sharp:1.8234e-05 L9_sharp:1.6064e-05 L10_sharp:1.9271e-05 L11_sharp:5.2447e-05 L12_sharp:7.7606e-05 total_fnorm:4.3438e+00 total_l1_linf:4.7040e+03 total_spectral:2.1719e+00 L1_fnorm:1.3672e+00 L2_fnorm:1.2969e+00 L3_fnorm:1.2734e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2109e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.1484e+00 L1_l1linf:1.9531e-01 L2_l1linf:1.7773e-01 L3_l1linf:1.8652e-01 L4_l1linf:1.6211e-01 L5_l1linf:1.7871e-01 L6_l1linf:1.5820e-01 L7_l1linf:1.5039e-01 L8_l1linf:1.4355e-01 L9_l1linf:1.3770e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.5723e-01 L12_l1linf:1.5039e-01 L1_spectral:2.0311e-02 L2_spectral:1.9269e-02 L3_spectral:1.9389e-02 L4_spectral:1.9148e-02 L5_spectral:2.0265e-02 L6_spectral:1.9777e-02 L7_spectral:1.9685e-02 L8_spectral:1.9863e-02 L9_spectral:1.9942e-02 L10_spectral:1.9717e-02 L11_spectral:2.1057e-02 L12_spectral:1.9800e-02 train_time:390111ms step_avg:40.64ms +[2025-09-11 07:50:36] [Rank 0] step:9601/10000 train_time:391305ms step_avg:40.76ms +[2025-09-11 07:50:36] [Rank 0] step:9601/10000 train_time:391305ms step_avg:40.76ms +[2025-09-11 07:50:37] [Rank 0] step:9621/10000 train_time:392016ms step_avg:40.75ms +[2025-09-11 07:50:37] [Rank 0] step:9621/10000 train_time:392016ms step_avg:40.75ms +[2025-09-11 07:50:38] [Rank 0] step:9641/10000 train_time:392730ms step_avg:40.74ms +[2025-09-11 07:50:38] [Rank 0] step:9641/10000 train_time:392730ms step_avg:40.74ms +[2025-09-11 07:50:39] [Rank 0] step:9661/10000 train_time:393452ms step_avg:40.73ms +[2025-09-11 07:50:39] [Rank 0] step:9661/10000 train_time:393452ms step_avg:40.73ms +[2025-09-11 07:50:39] [Rank 0] step:9681/10000 train_time:394166ms step_avg:40.72ms +[2025-09-11 07:50:39] [Rank 0] step:9681/10000 train_time:394166ms step_avg:40.72ms +[2025-09-11 07:50:40] [Rank 0] step:9701/10000 train_time:394883ms step_avg:40.71ms +[2025-09-11 07:50:40] [Rank 0] step:9701/10000 train_time:394883ms step_avg:40.71ms +[2025-09-11 07:50:41] [Rank 0] step:9721/10000 train_time:395603ms step_avg:40.70ms +[2025-09-11 07:50:41] [Rank 0] step:9721/10000 train_time:395603ms step_avg:40.70ms +[2025-09-11 07:50:41] [Rank 0] step:9741/10000 train_time:396320ms step_avg:40.69ms +[2025-09-11 07:50:41] [Rank 0] step:9741/10000 train_time:396320ms step_avg:40.69ms +[2025-09-11 07:50:42] [Rank 0] step:9761/10000 train_time:397037ms step_avg:40.68ms +[2025-09-11 07:50:42] [Rank 0] step:9761/10000 train_time:397037ms step_avg:40.68ms +[2025-09-11 07:50:43] [Rank 0] step:9781/10000 train_time:397752ms step_avg:40.67ms +[2025-09-11 07:50:43] [Rank 0] step:9781/10000 train_time:397752ms step_avg:40.67ms +[2025-09-11 07:50:44] [Rank 0] step:9801/10000 train_time:398473ms step_avg:40.66ms +[2025-09-11 07:50:44] [Rank 0] step:9801/10000 train_time:398473ms step_avg:40.66ms +[2025-09-11 07:50:44] [Rank 0] step:9821/10000 train_time:399192ms step_avg:40.65ms +[2025-09-11 07:50:44] [Rank 0] step:9821/10000 train_time:399192ms step_avg:40.65ms +[2025-09-11 07:50:45] [Rank 0] step:9841/10000 train_time:399912ms step_avg:40.64ms +[2025-09-11 07:50:45] [Rank 0] step:9841/10000 train_time:399912ms step_avg:40.64ms +[2025-09-11 07:50:46] [Rank 0] step:9861/10000 train_time:400630ms step_avg:40.63ms +[2025-09-11 07:50:46] [Rank 0] step:9861/10000 train_time:400630ms step_avg:40.63ms +[2025-09-11 07:50:46] [Rank 0] step:9881/10000 train_time:401347ms step_avg:40.62ms +[2025-09-11 07:50:46] [Rank 0] step:9881/10000 train_time:401347ms step_avg:40.62ms +[2025-09-11 07:50:47] [Rank 0] step:9901/10000 train_time:402062ms step_avg:40.61ms +[2025-09-11 07:50:47] [Rank 0] step:9901/10000 train_time:402062ms step_avg:40.61ms +[2025-09-11 07:50:48] [Rank 0] step:9921/10000 train_time:402777ms step_avg:40.60ms +[2025-09-11 07:50:48] [Rank 0] step:9921/10000 train_time:402777ms step_avg:40.60ms +[2025-09-11 07:50:49] [Rank 0] step:9941/10000 train_time:403499ms step_avg:40.59ms +[2025-09-11 07:50:49] [Rank 0] step:9941/10000 train_time:403499ms step_avg:40.59ms +[2025-09-11 07:50:49] [Rank 0] step:9961/10000 train_time:404220ms step_avg:40.58ms +[2025-09-11 07:50:49] [Rank 0] step:9961/10000 train_time:404220ms step_avg:40.58ms +[2025-09-11 07:50:50] [Rank 0] step:9981/10000 train_time:404937ms step_avg:40.57ms +[2025-09-11 07:50:50] [Rank 0] step:9981/10000 train_time:404937ms step_avg:40.57ms +[2025-09-11 07:50:51] [Rank 0] step:10000/10000 train_time:405626ms step_avg:40.56ms +[2025-09-11 07:50:51] [Rank 0] step:10000/10000 train_time:405626ms step_avg:40.56ms +[2025-09-11 07:50:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:50:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:50:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:51:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:51:01] [Rank 0] PRINT: step:10000/10000 val_loss:5.3812 total_sharp:4.8452e-05 L1_sharp:1.8240e-05 L2_sharp:5.1445e-06 L3_sharp:8.0850e-06 L4_sharp:5.2309e-06 L5_sharp:6.9720e-06 L6_sharp:6.2925e-06 L7_sharp:1.1821e-05 L8_sharp:1.2432e-05 L9_sharp:1.3010e-05 L10_sharp:1.4826e-05 L11_sharp:3.5645e-05 L12_sharp:5.9823e-05 total_fnorm:1.7031e+00 total_l1_linf:1.3360e+03 total_spectral:8.4766e-01 L1_fnorm:5.3125e-01 L2_fnorm:5.0000e-01 L3_fnorm:4.9219e-01 L4_fnorm:4.8828e-01 L5_fnorm:4.7656e-01 L6_fnorm:4.9023e-01 L7_fnorm:4.8438e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.8242e-01 L10_fnorm:4.9219e-01 L11_fnorm:4.9805e-01 L12_fnorm:4.4727e-01 L1_l1linf:5.6641e-02 L2_l1linf:5.3223e-02 L3_l1linf:5.4932e-02 L4_l1linf:4.9072e-02 L5_l1linf:5.5176e-02 L6_l1linf:4.5654e-02 L7_l1linf:4.5410e-02 L8_l1linf:4.8096e-02 L9_l1linf:4.4189e-02 L10_l1linf:4.8828e-02 L11_l1linf:4.8340e-02 L12_l1linf:4.7119e-02 L1_spectral:8.0503e-03 L2_spectral:7.7138e-03 L3_spectral:7.6082e-03 L4_spectral:7.6999e-03 L5_spectral:8.2690e-03 L6_spectral:7.9512e-03 L7_spectral:7.8368e-03 L8_spectral:8.0162e-03 L9_spectral:7.9245e-03 L10_spectral:7.8649e-03 L11_spectral:8.3532e-03 L12_spectral:8.1259e-03 train_time:405647ms step_avg:40.56ms +[2025-09-11 07:51:01] [Rank 0] PRINT: step:10000/10000 val_loss:5.3812 total_sharp:4.8452e-05 L1_sharp:1.8240e-05 L2_sharp:5.1445e-06 L3_sharp:8.0850e-06 L4_sharp:5.2309e-06 L5_sharp:6.9720e-06 L6_sharp:6.2925e-06 L7_sharp:1.1821e-05 L8_sharp:1.2432e-05 L9_sharp:1.3010e-05 L10_sharp:1.4826e-05 L11_sharp:3.5645e-05 L12_sharp:5.9823e-05 total_fnorm:1.7031e+00 total_l1_linf:1.3360e+03 total_spectral:8.4766e-01 L1_fnorm:5.3125e-01 L2_fnorm:5.0000e-01 L3_fnorm:4.9219e-01 L4_fnorm:4.8828e-01 L5_fnorm:4.7656e-01 L6_fnorm:4.9023e-01 L7_fnorm:4.8438e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.8242e-01 L10_fnorm:4.9219e-01 L11_fnorm:4.9805e-01 L12_fnorm:4.4727e-01 L1_l1linf:5.6641e-02 L2_l1linf:5.3223e-02 L3_l1linf:5.4932e-02 L4_l1linf:4.9072e-02 L5_l1linf:5.5176e-02 L6_l1linf:4.5654e-02 L7_l1linf:4.5410e-02 L8_l1linf:4.8096e-02 L9_l1linf:4.4189e-02 L10_l1linf:4.8828e-02 L11_l1linf:4.8340e-02 L12_l1linf:4.7119e-02 L1_spectral:8.0503e-03 L2_spectral:7.7138e-03 L3_spectral:7.6082e-03 L4_spectral:7.6999e-03 L5_spectral:8.2690e-03 L6_spectral:7.9512e-03 L7_spectral:7.8368e-03 L8_spectral:8.0162e-03 L9_spectral:7.9245e-03 L10_spectral:7.8649e-03 L11_spectral:8.3532e-03 L12_spectral:8.1259e-03 train_time:405647ms step_avg:40.56ms +[2025-09-11 07:51:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:51:01 2025 --- +[2025-09-11 07:51:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:51:01 2025 --- +[2025-09-11 07:51:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 07:51:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bfff7c7dbcc096f52b48e05af0a1b551b5e3ad2e --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "bd7c9329-f951-4b08-bd0e-740f6a81a742", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/training_log_bd7c9329-f951-4b08-bd0e-740f6a81a742.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/training_log_bd7c9329-f951-4b08-bd0e-740f6a81a742.txt new file mode 100644 index 0000000000000000000000000000000000000000..993947423543ace1f49a0e745d8df3ac77e07335 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42/training_log_bd7c9329-f951-4b08-bd0e-740f6a81a742.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:56:34] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:56:34 2025 --- +[2025-09-11 11:56:34] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:56:34 2025 --- +[2025-09-11 11:56:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:56:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:56:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:56:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:56:34] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:56:34] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:56:34] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42 +[2025-09-11 11:56:34] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.0005_seed_42 +[2025-09-11 11:56:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:56:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:56:34] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:56:34] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:56:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:56:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:56:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:56:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:56:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:56:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:56:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:56:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:56:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:56:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:56:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:56:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:56:37] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:56:37] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:56:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:56:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:56:42] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:56:42] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:56:42] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:56:42] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:57:20] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:57:20] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:57:20] [Rank 0] PRINT: Starting training... +[2025-09-11 11:57:20] [Rank 0] PRINT: Starting training... +[2025-09-11 11:57:22] [Rank 0] step:21/10000 train_time:1539ms step_avg:73.30ms +[2025-09-11 11:57:22] [Rank 0] step:21/10000 train_time:1539ms step_avg:73.30ms +[2025-09-11 11:57:23] [Rank 0] step:41/10000 train_time:2395ms step_avg:58.42ms +[2025-09-11 11:57:23] [Rank 0] step:41/10000 train_time:2395ms step_avg:58.42ms +[2025-09-11 11:57:24] [Rank 0] step:61/10000 train_time:3265ms step_avg:53.53ms +[2025-09-11 11:57:24] [Rank 0] step:61/10000 train_time:3265ms step_avg:53.53ms +[2025-09-11 11:57:24] [Rank 0] step:81/10000 train_time:3996ms step_avg:49.33ms +[2025-09-11 11:57:24] [Rank 0] step:81/10000 train_time:3996ms step_avg:49.33ms +[2025-09-11 11:57:25] [Rank 0] step:101/10000 train_time:4875ms step_avg:48.26ms +[2025-09-11 11:57:25] [Rank 0] step:101/10000 train_time:4875ms step_avg:48.26ms +[2025-09-11 11:57:26] [Rank 0] step:121/10000 train_time:5709ms step_avg:47.18ms +[2025-09-11 11:57:26] [Rank 0] step:121/10000 train_time:5709ms step_avg:47.18ms +[2025-09-11 11:57:27] [Rank 0] step:141/10000 train_time:6441ms step_avg:45.68ms +[2025-09-11 11:57:27] [Rank 0] step:141/10000 train_time:6441ms step_avg:45.68ms +[2025-09-11 11:57:28] [Rank 0] step:161/10000 train_time:7172ms step_avg:44.55ms +[2025-09-11 11:57:28] [Rank 0] step:161/10000 train_time:7172ms step_avg:44.55ms +[2025-09-11 11:57:28] [Rank 0] step:181/10000 train_time:7904ms step_avg:43.67ms +[2025-09-11 11:57:28] [Rank 0] step:181/10000 train_time:7904ms step_avg:43.67ms +[2025-09-11 11:57:29] [Rank 0] step:201/10000 train_time:8636ms step_avg:42.96ms +[2025-09-11 11:57:29] [Rank 0] step:201/10000 train_time:8636ms step_avg:42.96ms +[2025-09-11 11:57:30] [Rank 0] step:221/10000 train_time:9375ms step_avg:42.42ms +[2025-09-11 11:57:30] [Rank 0] step:221/10000 train_time:9375ms step_avg:42.42ms +[2025-09-11 11:57:31] [Rank 0] step:241/10000 train_time:10112ms step_avg:41.96ms +[2025-09-11 11:57:31] [Rank 0] step:241/10000 train_time:10112ms step_avg:41.96ms +[2025-09-11 11:57:31] [Rank 0] step:261/10000 train_time:10844ms step_avg:41.55ms +[2025-09-11 11:57:31] [Rank 0] step:261/10000 train_time:10844ms step_avg:41.55ms +[2025-09-11 11:57:32] [Rank 0] step:281/10000 train_time:11575ms step_avg:41.19ms +[2025-09-11 11:57:32] [Rank 0] step:281/10000 train_time:11575ms step_avg:41.19ms +[2025-09-11 11:57:33] [Rank 0] step:301/10000 train_time:12305ms step_avg:40.88ms +[2025-09-11 11:57:33] [Rank 0] step:301/10000 train_time:12305ms step_avg:40.88ms +[2025-09-11 11:57:33] [Rank 0] step:321/10000 train_time:13035ms step_avg:40.61ms +[2025-09-11 11:57:33] [Rank 0] step:321/10000 train_time:13035ms step_avg:40.61ms +[2025-09-11 11:57:34] [Rank 0] step:341/10000 train_time:13766ms step_avg:40.37ms +[2025-09-11 11:57:34] [Rank 0] step:341/10000 train_time:13766ms step_avg:40.37ms +[2025-09-11 11:57:35] [Rank 0] step:361/10000 train_time:14496ms step_avg:40.16ms +[2025-09-11 11:57:35] [Rank 0] step:361/10000 train_time:14496ms step_avg:40.16ms +[2025-09-11 11:57:36] [Rank 0] step:381/10000 train_time:15226ms step_avg:39.96ms +[2025-09-11 11:57:36] [Rank 0] step:381/10000 train_time:15226ms step_avg:39.96ms +[2025-09-11 11:57:36] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:57:36] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:57:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:57:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:58:23] [Rank 0] PRINT: step:400/10000 val_loss:6.9683 total_sharp:1.4851e-03 L1_sharp:1.1642e-01 L2_sharp:1.2222e-01 L3_sharp:1.2914e-01 L4_sharp:1.5650e-01 L5_sharp:1.6091e-01 L6_sharp:1.8771e-01 L7_sharp:1.9818e-01 L8_sharp:2.5204e-01 L9_sharp:2.8432e-01 L10_sharp:3.1529e-01 L11_sharp:3.6709e-01 L12_sharp:3.6168e-01 total_fnorm:9.1017e+00 total_l1_linf:2.2984e+04 total_spectral:4.5516e+00 L1_fnorm:5.7361e-02 L2_fnorm:5.7268e-02 L3_fnorm:5.7127e-02 L4_fnorm:5.6301e-02 L5_fnorm:5.6171e-02 L6_fnorm:5.5755e-02 L7_fnorm:5.5316e-02 L8_fnorm:5.4429e-02 L9_fnorm:5.4432e-02 L10_fnorm:5.3483e-02 L11_fnorm:5.2017e-02 L12_fnorm:5.1544e-02 L1_l1linf:2.2595e-02 L2_l1linf:2.2449e-02 L3_l1linf:2.2584e-02 L4_l1linf:2.2409e-02 L5_l1linf:2.2331e-02 L6_l1linf:2.2227e-02 L7_l1linf:2.2155e-02 L8_l1linf:2.1949e-02 L9_l1linf:2.1670e-02 L10_l1linf:2.1602e-02 L11_l1linf:2.1077e-02 L12_l1linf:2.0578e-02 L1_spectral:6.0234e-04 L2_spectral:6.0230e-04 L3_spectral:6.0240e-04 L4_spectral:6.0238e-04 L5_spectral:6.0246e-04 L6_spectral:6.0258e-04 L7_spectral:6.0252e-04 L8_spectral:6.0236e-04 L9_spectral:6.0281e-04 L10_spectral:6.0254e-04 L11_spectral:6.0229e-04 L12_spectral:6.0236e-04 train_time:15936ms step_avg:39.84ms +[2025-09-11 11:58:23] [Rank 0] PRINT: step:400/10000 val_loss:6.9683 total_sharp:1.4851e-03 L1_sharp:1.1642e-01 L2_sharp:1.2222e-01 L3_sharp:1.2914e-01 L4_sharp:1.5650e-01 L5_sharp:1.6091e-01 L6_sharp:1.8771e-01 L7_sharp:1.9818e-01 L8_sharp:2.5204e-01 L9_sharp:2.8432e-01 L10_sharp:3.1529e-01 L11_sharp:3.6709e-01 L12_sharp:3.6168e-01 total_fnorm:9.1017e+00 total_l1_linf:2.2984e+04 total_spectral:4.5516e+00 L1_fnorm:5.7361e-02 L2_fnorm:5.7268e-02 L3_fnorm:5.7127e-02 L4_fnorm:5.6301e-02 L5_fnorm:5.6171e-02 L6_fnorm:5.5755e-02 L7_fnorm:5.5316e-02 L8_fnorm:5.4429e-02 L9_fnorm:5.4432e-02 L10_fnorm:5.3483e-02 L11_fnorm:5.2017e-02 L12_fnorm:5.1544e-02 L1_l1linf:2.2595e-02 L2_l1linf:2.2449e-02 L3_l1linf:2.2584e-02 L4_l1linf:2.2409e-02 L5_l1linf:2.2331e-02 L6_l1linf:2.2227e-02 L7_l1linf:2.2155e-02 L8_l1linf:2.1949e-02 L9_l1linf:2.1670e-02 L10_l1linf:2.1602e-02 L11_l1linf:2.1077e-02 L12_l1linf:2.0578e-02 L1_spectral:6.0234e-04 L2_spectral:6.0230e-04 L3_spectral:6.0240e-04 L4_spectral:6.0238e-04 L5_spectral:6.0246e-04 L6_spectral:6.0258e-04 L7_spectral:6.0252e-04 L8_spectral:6.0236e-04 L9_spectral:6.0281e-04 L10_spectral:6.0254e-04 L11_spectral:6.0229e-04 L12_spectral:6.0236e-04 train_time:15936ms step_avg:39.84ms +[2025-09-11 11:58:53] [Rank 0] step:401/10000 train_time:46688ms step_avg:116.43ms +[2025-09-11 11:58:53] [Rank 0] step:401/10000 train_time:46688ms step_avg:116.43ms +[2025-09-11 11:58:56] [Rank 0] step:421/10000 train_time:48927ms step_avg:116.22ms +[2025-09-11 11:58:56] [Rank 0] step:421/10000 train_time:48927ms step_avg:116.22ms +[2025-09-11 11:58:56] [Rank 0] step:441/10000 train_time:49569ms step_avg:112.40ms +[2025-09-11 11:58:56] [Rank 0] step:441/10000 train_time:49569ms step_avg:112.40ms +[2025-09-11 11:58:57] [Rank 0] step:461/10000 train_time:50211ms step_avg:108.92ms +[2025-09-11 11:58:57] [Rank 0] step:461/10000 train_time:50211ms step_avg:108.92ms +[2025-09-11 11:58:58] [Rank 0] step:481/10000 train_time:50853ms step_avg:105.72ms +[2025-09-11 11:58:58] [Rank 0] step:481/10000 train_time:50853ms step_avg:105.72ms +[2025-09-11 11:58:58] [Rank 0] step:501/10000 train_time:51494ms step_avg:102.78ms +[2025-09-11 11:58:58] [Rank 0] step:501/10000 train_time:51494ms step_avg:102.78ms +[2025-09-11 11:58:59] [Rank 0] step:521/10000 train_time:52136ms step_avg:100.07ms +[2025-09-11 11:58:59] [Rank 0] step:521/10000 train_time:52136ms step_avg:100.07ms +[2025-09-11 11:58:59] [Rank 0] step:541/10000 train_time:52778ms step_avg:97.56ms +[2025-09-11 11:58:59] [Rank 0] step:541/10000 train_time:52778ms step_avg:97.56ms +[2025-09-11 11:59:00] [Rank 0] step:561/10000 train_time:53418ms step_avg:95.22ms +[2025-09-11 11:59:00] [Rank 0] step:561/10000 train_time:53418ms step_avg:95.22ms +[2025-09-11 11:59:01] [Rank 0] step:581/10000 train_time:54060ms step_avg:93.05ms +[2025-09-11 11:59:01] [Rank 0] step:581/10000 train_time:54060ms step_avg:93.05ms +[2025-09-11 11:59:01] [Rank 0] step:601/10000 train_time:54702ms step_avg:91.02ms +[2025-09-11 11:59:01] [Rank 0] step:601/10000 train_time:54702ms step_avg:91.02ms +[2025-09-11 11:59:02] [Rank 0] step:621/10000 train_time:55345ms step_avg:89.12ms +[2025-09-11 11:59:02] [Rank 0] step:621/10000 train_time:55345ms step_avg:89.12ms +[2025-09-11 11:59:03] [Rank 0] step:641/10000 train_time:55986ms step_avg:87.34ms +[2025-09-11 11:59:03] [Rank 0] step:641/10000 train_time:55986ms step_avg:87.34ms +[2025-09-11 11:59:03] [Rank 0] step:661/10000 train_time:56627ms step_avg:85.67ms +[2025-09-11 11:59:03] [Rank 0] step:661/10000 train_time:56627ms step_avg:85.67ms +[2025-09-11 11:59:04] [Rank 0] step:681/10000 train_time:57268ms step_avg:84.09ms +[2025-09-11 11:59:04] [Rank 0] step:681/10000 train_time:57268ms step_avg:84.09ms +[2025-09-11 11:59:05] [Rank 0] step:701/10000 train_time:57909ms step_avg:82.61ms +[2025-09-11 11:59:05] [Rank 0] step:701/10000 train_time:57909ms step_avg:82.61ms +[2025-09-11 11:59:05] [Rank 0] step:721/10000 train_time:58550ms step_avg:81.21ms +[2025-09-11 11:59:05] [Rank 0] step:721/10000 train_time:58550ms step_avg:81.21ms +[2025-09-11 11:59:06] [Rank 0] step:741/10000 train_time:59191ms step_avg:79.88ms +[2025-09-11 11:59:06] [Rank 0] step:741/10000 train_time:59191ms step_avg:79.88ms +[2025-09-11 11:59:06] [Rank 0] step:761/10000 train_time:59837ms step_avg:78.63ms +[2025-09-11 11:59:06] [Rank 0] step:761/10000 train_time:59837ms step_avg:78.63ms +[2025-09-11 11:59:07] [Rank 0] step:781/10000 train_time:60483ms step_avg:77.44ms +[2025-09-11 11:59:07] [Rank 0] step:781/10000 train_time:60483ms step_avg:77.44ms +[2025-09-11 11:59:08] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:59:08] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:59:51] [Rank 0] PRINT: step:800/10000 val_loss:6.6321 total_sharp:5.1837e-03 L1_sharp:6.6604e-01 L2_sharp:6.1947e-01 L3_sharp:6.4940e-01 L4_sharp:7.1905e-01 L5_sharp:7.7209e-01 L6_sharp:7.0718e-01 L7_sharp:7.6280e-01 L8_sharp:1.0314e+00 L9_sharp:1.1671e+00 L10_sharp:1.1735e+00 L11_sharp:1.2760e+00 L12_sharp:1.3088e+00 total_fnorm:7.0938e+00 total_l1_linf:9.0240e+03 total_spectral:3.5469e+00 L1_fnorm:4.3457e-02 L2_fnorm:4.5410e-02 L3_fnorm:4.5410e-02 L4_fnorm:4.4678e-02 L5_fnorm:4.2725e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.3701e-02 L8_fnorm:4.0039e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0283e-02 L11_fnorm:3.8086e-02 L12_fnorm:3.5645e-02 L1_l1linf:1.9043e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9165e-02 L4_l1linf:1.8921e-02 L5_l1linf:1.8921e-02 L6_l1linf:1.8799e-02 L7_l1linf:1.8799e-02 L8_l1linf:1.8066e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.6846e-02 L12_l1linf:1.5991e-02 L1_spectral:7.0733e-04 L2_spectral:7.0782e-04 L3_spectral:7.0952e-04 L4_spectral:7.0790e-04 L5_spectral:7.1004e-04 L6_spectral:7.1315e-04 L7_spectral:7.1508e-04 L8_spectral:7.1329e-04 L9_spectral:7.1523e-04 L10_spectral:7.1271e-04 L11_spectral:7.0457e-04 L12_spectral:7.0036e-04 train_time:61111ms step_avg:76.39ms +[2025-09-11 11:59:51] [Rank 0] PRINT: step:800/10000 val_loss:6.6321 total_sharp:5.1837e-03 L1_sharp:6.6604e-01 L2_sharp:6.1947e-01 L3_sharp:6.4940e-01 L4_sharp:7.1905e-01 L5_sharp:7.7209e-01 L6_sharp:7.0718e-01 L7_sharp:7.6280e-01 L8_sharp:1.0314e+00 L9_sharp:1.1671e+00 L10_sharp:1.1735e+00 L11_sharp:1.2760e+00 L12_sharp:1.3088e+00 total_fnorm:7.0938e+00 total_l1_linf:9.0240e+03 total_spectral:3.5469e+00 L1_fnorm:4.3457e-02 L2_fnorm:4.5410e-02 L3_fnorm:4.5410e-02 L4_fnorm:4.4678e-02 L5_fnorm:4.2725e-02 L6_fnorm:4.3945e-02 L7_fnorm:4.3701e-02 L8_fnorm:4.0039e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0283e-02 L11_fnorm:3.8086e-02 L12_fnorm:3.5645e-02 L1_l1linf:1.9043e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9165e-02 L4_l1linf:1.8921e-02 L5_l1linf:1.8921e-02 L6_l1linf:1.8799e-02 L7_l1linf:1.8799e-02 L8_l1linf:1.8066e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.6846e-02 L12_l1linf:1.5991e-02 L1_spectral:7.0733e-04 L2_spectral:7.0782e-04 L3_spectral:7.0952e-04 L4_spectral:7.0790e-04 L5_spectral:7.1004e-04 L6_spectral:7.1315e-04 L7_spectral:7.1508e-04 L8_spectral:7.1329e-04 L9_spectral:7.1523e-04 L10_spectral:7.1271e-04 L11_spectral:7.0457e-04 L12_spectral:7.0036e-04 train_time:61111ms step_avg:76.39ms +[2025-09-11 11:59:53] [Rank 0] step:801/10000 train_time:62737ms step_avg:78.32ms +[2025-09-11 11:59:53] [Rank 0] step:801/10000 train_time:62737ms step_avg:78.32ms +[2025-09-11 11:59:54] [Rank 0] step:821/10000 train_time:63415ms step_avg:77.24ms +[2025-09-11 11:59:54] [Rank 0] step:821/10000 train_time:63415ms step_avg:77.24ms +[2025-09-11 11:59:54] [Rank 0] step:841/10000 train_time:64065ms step_avg:76.18ms +[2025-09-11 11:59:54] [Rank 0] step:841/10000 train_time:64065ms step_avg:76.18ms +[2025-09-11 11:59:55] [Rank 0] step:861/10000 train_time:64714ms step_avg:75.16ms +[2025-09-11 11:59:55] [Rank 0] step:861/10000 train_time:64714ms step_avg:75.16ms +[2025-09-11 11:59:56] [Rank 0] step:881/10000 train_time:65362ms step_avg:74.19ms +[2025-09-11 11:59:56] [Rank 0] step:881/10000 train_time:65362ms step_avg:74.19ms +[2025-09-11 11:59:56] [Rank 0] step:901/10000 train_time:66012ms step_avg:73.27ms +[2025-09-11 11:59:56] [Rank 0] step:901/10000 train_time:66012ms step_avg:73.27ms +[2025-09-11 11:59:57] [Rank 0] step:921/10000 train_time:66661ms step_avg:72.38ms +[2025-09-11 11:59:57] [Rank 0] step:921/10000 train_time:66661ms step_avg:72.38ms +[2025-09-11 11:59:57] [Rank 0] step:941/10000 train_time:67308ms step_avg:71.53ms +[2025-09-11 11:59:57] [Rank 0] step:941/10000 train_time:67308ms step_avg:71.53ms +[2025-09-11 11:59:58] [Rank 0] step:961/10000 train_time:67958ms step_avg:70.72ms +[2025-09-11 11:59:58] [Rank 0] step:961/10000 train_time:67958ms step_avg:70.72ms +[2025-09-11 11:59:59] [Rank 0] step:981/10000 train_time:68604ms step_avg:69.93ms +[2025-09-11 11:59:59] [Rank 0] step:981/10000 train_time:68604ms step_avg:69.93ms +[2025-09-11 11:59:59] [Rank 0] step:1001/10000 train_time:69251ms step_avg:69.18ms +[2025-09-11 11:59:59] [Rank 0] step:1001/10000 train_time:69251ms step_avg:69.18ms +[2025-09-11 12:00:00] [Rank 0] step:1021/10000 train_time:69899ms step_avg:68.46ms +[2025-09-11 12:00:00] [Rank 0] step:1021/10000 train_time:69899ms step_avg:68.46ms +[2025-09-11 12:00:01] [Rank 0] step:1041/10000 train_time:70546ms step_avg:67.77ms +[2025-09-11 12:00:01] [Rank 0] step:1041/10000 train_time:70546ms step_avg:67.77ms +[2025-09-11 12:00:01] [Rank 0] step:1061/10000 train_time:71195ms step_avg:67.10ms +[2025-09-11 12:00:01] [Rank 0] step:1061/10000 train_time:71195ms step_avg:67.10ms +[2025-09-11 12:00:02] [Rank 0] step:1081/10000 train_time:71844ms step_avg:66.46ms +[2025-09-11 12:00:02] [Rank 0] step:1081/10000 train_time:71844ms step_avg:66.46ms +[2025-09-11 12:00:03] [Rank 0] step:1101/10000 train_time:72492ms step_avg:65.84ms +[2025-09-11 12:00:03] [Rank 0] step:1101/10000 train_time:72492ms step_avg:65.84ms +[2025-09-11 12:00:03] [Rank 0] step:1121/10000 train_time:73140ms step_avg:65.25ms +[2025-09-11 12:00:03] [Rank 0] step:1121/10000 train_time:73140ms step_avg:65.25ms +[2025-09-11 12:00:04] [Rank 0] step:1141/10000 train_time:73838ms step_avg:64.71ms +[2025-09-11 12:00:04] [Rank 0] step:1141/10000 train_time:73838ms step_avg:64.71ms +[2025-09-11 12:00:05] [Rank 0] step:1161/10000 train_time:74535ms step_avg:64.20ms +[2025-09-11 12:00:05] [Rank 0] step:1161/10000 train_time:74535ms step_avg:64.20ms +[2025-09-11 12:00:05] [Rank 0] step:1181/10000 train_time:75183ms step_avg:63.66ms +[2025-09-11 12:00:05] [Rank 0] step:1181/10000 train_time:75183ms step_avg:63.66ms +[2025-09-11 12:00:06] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:00:06] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:00:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:00:17] [Rank 0] PRINT: step:1200/10000 val_loss:6.4011 total_sharp:5.1469e-03 L1_sharp:4.0746e-01 L2_sharp:3.3177e-01 L3_sharp:3.4864e-01 L4_sharp:3.7090e-01 L5_sharp:5.4845e-01 L6_sharp:5.0257e-01 L7_sharp:5.0194e-01 L8_sharp:7.5732e-01 L9_sharp:7.1902e-01 L10_sharp:7.8847e-01 L11_sharp:6.5522e-01 L12_sharp:8.5469e-01 total_fnorm:5.7812e+00 total_l1_linf:6.4000e+03 total_spectral:2.8906e+00 L1_fnorm:4.8340e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.4189e-02 L12_fnorm:4.1016e-02 L1_l1linf:1.8311e-02 L2_l1linf:1.8188e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8188e-02 L5_l1linf:1.8555e-02 L6_l1linf:1.8188e-02 L7_l1linf:1.8311e-02 L8_l1linf:1.7944e-02 L9_l1linf:1.8066e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.7334e-02 L12_l1linf:1.5869e-02 L1_spectral:7.2363e-04 L2_spectral:7.2796e-04 L3_spectral:7.2702e-04 L4_spectral:7.2911e-04 L5_spectral:7.2485e-04 L6_spectral:7.2999e-04 L7_spectral:7.3003e-04 L8_spectral:7.3620e-04 L9_spectral:7.3884e-04 L10_spectral:7.3676e-04 L11_spectral:7.3495e-04 L12_spectral:7.1516e-04 train_time:75814ms step_avg:63.18ms +[2025-09-11 12:00:17] [Rank 0] PRINT: step:1200/10000 val_loss:6.4011 total_sharp:5.1469e-03 L1_sharp:4.0746e-01 L2_sharp:3.3177e-01 L3_sharp:3.4864e-01 L4_sharp:3.7090e-01 L5_sharp:5.4845e-01 L6_sharp:5.0257e-01 L7_sharp:5.0194e-01 L8_sharp:7.5732e-01 L9_sharp:7.1902e-01 L10_sharp:7.8847e-01 L11_sharp:6.5522e-01 L12_sharp:8.5469e-01 total_fnorm:5.7812e+00 total_l1_linf:6.4000e+03 total_spectral:2.8906e+00 L1_fnorm:4.8340e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.5166e-02 L11_fnorm:4.4189e-02 L12_fnorm:4.1016e-02 L1_l1linf:1.8311e-02 L2_l1linf:1.8188e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8188e-02 L5_l1linf:1.8555e-02 L6_l1linf:1.8188e-02 L7_l1linf:1.8311e-02 L8_l1linf:1.7944e-02 L9_l1linf:1.8066e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.7334e-02 L12_l1linf:1.5869e-02 L1_spectral:7.2363e-04 L2_spectral:7.2796e-04 L3_spectral:7.2702e-04 L4_spectral:7.2911e-04 L5_spectral:7.2485e-04 L6_spectral:7.2999e-04 L7_spectral:7.3003e-04 L8_spectral:7.3620e-04 L9_spectral:7.3884e-04 L10_spectral:7.3676e-04 L11_spectral:7.3495e-04 L12_spectral:7.1516e-04 train_time:75814ms step_avg:63.18ms +[2025-09-11 12:00:19] [Rank 0] step:1201/10000 train_time:77383ms step_avg:64.43ms +[2025-09-11 12:00:19] [Rank 0] step:1201/10000 train_time:77383ms step_avg:64.43ms +[2025-09-11 12:00:19] [Rank 0] step:1221/10000 train_time:78033ms step_avg:63.91ms +[2025-09-11 12:00:19] [Rank 0] step:1221/10000 train_time:78033ms step_avg:63.91ms +[2025-09-11 12:00:20] [Rank 0] step:1241/10000 train_time:78681ms step_avg:63.40ms +[2025-09-11 12:00:20] [Rank 0] step:1241/10000 train_time:78681ms step_avg:63.40ms +[2025-09-11 12:00:20] [Rank 0] step:1261/10000 train_time:79328ms step_avg:62.91ms +[2025-09-11 12:00:20] [Rank 0] step:1261/10000 train_time:79328ms step_avg:62.91ms +[2025-09-11 12:00:21] [Rank 0] step:1281/10000 train_time:79975ms step_avg:62.43ms +[2025-09-11 12:00:21] [Rank 0] step:1281/10000 train_time:79975ms step_avg:62.43ms +[2025-09-11 12:00:22] [Rank 0] step:1301/10000 train_time:80623ms step_avg:61.97ms +[2025-09-11 12:00:22] [Rank 0] step:1301/10000 train_time:80623ms step_avg:61.97ms +[2025-09-11 12:00:22] [Rank 0] step:1321/10000 train_time:81270ms step_avg:61.52ms +[2025-09-11 12:00:22] [Rank 0] step:1321/10000 train_time:81270ms step_avg:61.52ms +[2025-09-11 12:00:23] [Rank 0] step:1341/10000 train_time:81917ms step_avg:61.09ms +[2025-09-11 12:00:23] [Rank 0] step:1341/10000 train_time:81917ms step_avg:61.09ms +[2025-09-11 12:00:24] [Rank 0] step:1361/10000 train_time:82563ms step_avg:60.66ms +[2025-09-11 12:00:24] [Rank 0] step:1361/10000 train_time:82563ms step_avg:60.66ms +[2025-09-11 12:00:24] [Rank 0] step:1381/10000 train_time:83210ms step_avg:60.25ms +[2025-09-11 12:00:24] [Rank 0] step:1381/10000 train_time:83210ms step_avg:60.25ms +[2025-09-11 12:00:25] [Rank 0] step:1401/10000 train_time:83855ms step_avg:59.85ms +[2025-09-11 12:00:25] [Rank 0] step:1401/10000 train_time:83855ms step_avg:59.85ms +[2025-09-11 12:00:26] [Rank 0] step:1421/10000 train_time:84501ms step_avg:59.47ms +[2025-09-11 12:00:26] [Rank 0] step:1421/10000 train_time:84501ms step_avg:59.47ms +[2025-09-11 12:00:26] [Rank 0] step:1441/10000 train_time:85147ms step_avg:59.09ms +[2025-09-11 12:00:26] [Rank 0] step:1441/10000 train_time:85147ms step_avg:59.09ms +[2025-09-11 12:00:27] [Rank 0] step:1461/10000 train_time:85793ms step_avg:58.72ms +[2025-09-11 12:00:27] [Rank 0] step:1461/10000 train_time:85793ms step_avg:58.72ms +[2025-09-11 12:00:28] [Rank 0] step:1481/10000 train_time:86440ms step_avg:58.37ms +[2025-09-11 12:00:28] [Rank 0] step:1481/10000 train_time:86440ms step_avg:58.37ms +[2025-09-11 12:00:28] [Rank 0] step:1501/10000 train_time:87091ms step_avg:58.02ms +[2025-09-11 12:00:28] [Rank 0] step:1501/10000 train_time:87091ms step_avg:58.02ms +[2025-09-11 12:00:29] [Rank 0] step:1521/10000 train_time:87741ms step_avg:57.69ms +[2025-09-11 12:00:29] [Rank 0] step:1521/10000 train_time:87741ms step_avg:57.69ms +[2025-09-11 12:00:30] [Rank 0] step:1541/10000 train_time:88392ms step_avg:57.36ms +[2025-09-11 12:00:30] [Rank 0] step:1541/10000 train_time:88392ms step_avg:57.36ms +[2025-09-11 12:00:30] [Rank 0] step:1561/10000 train_time:89043ms step_avg:57.04ms +[2025-09-11 12:00:30] [Rank 0] step:1561/10000 train_time:89043ms step_avg:57.04ms +[2025-09-11 12:00:31] [Rank 0] step:1581/10000 train_time:89693ms step_avg:56.73ms +[2025-09-11 12:00:31] [Rank 0] step:1581/10000 train_time:89693ms step_avg:56.73ms +[2025-09-11 12:00:31] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:00:31] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:00:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:00:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:00:41] [Rank 0] PRINT: step:1600/10000 val_loss:6.2246 total_sharp:1.5808e-02 L1_sharp:1.1324e+00 L2_sharp:1.0920e+00 L3_sharp:1.0050e+00 L4_sharp:1.1090e+00 L5_sharp:1.4466e+00 L6_sharp:1.3191e+00 L7_sharp:1.4043e+00 L8_sharp:1.7875e+00 L9_sharp:2.0758e+00 L10_sharp:2.1366e+00 L11_sharp:1.6008e+00 L12_sharp:1.2427e+00 total_fnorm:5.3125e+00 total_l1_linf:5.4720e+03 total_spectral:2.6562e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5654e-02 L11_fnorm:4.5410e-02 L12_fnorm:4.1504e-02 L1_l1linf:1.7334e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.7334e-02 L5_l1linf:1.7212e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.7090e-02 L8_l1linf:1.7090e-02 L9_l1linf:1.7212e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.4648e-02 L1_spectral:7.2602e-04 L2_spectral:7.4708e-04 L3_spectral:7.4605e-04 L4_spectral:7.4740e-04 L5_spectral:7.2976e-04 L6_spectral:7.4346e-04 L7_spectral:7.4584e-04 L8_spectral:7.4181e-04 L9_spectral:7.4228e-04 L10_spectral:7.3863e-04 L11_spectral:7.3802e-04 L12_spectral:7.1062e-04 train_time:90326ms step_avg:56.45ms +[2025-09-11 12:00:41] [Rank 0] PRINT: step:1600/10000 val_loss:6.2246 total_sharp:1.5808e-02 L1_sharp:1.1324e+00 L2_sharp:1.0920e+00 L3_sharp:1.0050e+00 L4_sharp:1.1090e+00 L5_sharp:1.4466e+00 L6_sharp:1.3191e+00 L7_sharp:1.4043e+00 L8_sharp:1.7875e+00 L9_sharp:2.0758e+00 L10_sharp:2.1366e+00 L11_sharp:1.6008e+00 L12_sharp:1.2427e+00 total_fnorm:5.3125e+00 total_l1_linf:5.4720e+03 total_spectral:2.6562e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.5654e-02 L11_fnorm:4.5410e-02 L12_fnorm:4.1504e-02 L1_l1linf:1.7334e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.7334e-02 L5_l1linf:1.7212e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.7090e-02 L8_l1linf:1.7090e-02 L9_l1linf:1.7212e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.4648e-02 L1_spectral:7.2602e-04 L2_spectral:7.4708e-04 L3_spectral:7.4605e-04 L4_spectral:7.4740e-04 L5_spectral:7.2976e-04 L6_spectral:7.4346e-04 L7_spectral:7.4584e-04 L8_spectral:7.4181e-04 L9_spectral:7.4228e-04 L10_spectral:7.3863e-04 L11_spectral:7.3802e-04 L12_spectral:7.1062e-04 train_time:90326ms step_avg:56.45ms +[2025-09-11 12:00:43] [Rank 0] step:1601/10000 train_time:91917ms step_avg:57.41ms +[2025-09-11 12:00:43] [Rank 0] step:1601/10000 train_time:91917ms step_avg:57.41ms +[2025-09-11 12:00:44] [Rank 0] step:1621/10000 train_time:92572ms step_avg:57.11ms +[2025-09-11 12:00:44] [Rank 0] step:1621/10000 train_time:92572ms step_avg:57.11ms +[2025-09-11 12:00:44] [Rank 0] step:1641/10000 train_time:93223ms step_avg:56.81ms +[2025-09-11 12:00:44] [Rank 0] step:1641/10000 train_time:93223ms step_avg:56.81ms +[2025-09-11 12:00:45] [Rank 0] step:1661/10000 train_time:93876ms step_avg:56.52ms +[2025-09-11 12:00:45] [Rank 0] step:1661/10000 train_time:93876ms step_avg:56.52ms +[2025-09-11 12:00:46] [Rank 0] step:1681/10000 train_time:94527ms step_avg:56.23ms +[2025-09-11 12:00:46] [Rank 0] step:1681/10000 train_time:94527ms step_avg:56.23ms +[2025-09-11 12:00:46] [Rank 0] step:1701/10000 train_time:95177ms step_avg:55.95ms +[2025-09-11 12:00:46] [Rank 0] step:1701/10000 train_time:95177ms step_avg:55.95ms +[2025-09-11 12:00:47] [Rank 0] step:1721/10000 train_time:95828ms step_avg:55.68ms +[2025-09-11 12:00:47] [Rank 0] step:1721/10000 train_time:95828ms step_avg:55.68ms +[2025-09-11 12:00:48] [Rank 0] step:1741/10000 train_time:96478ms step_avg:55.42ms +[2025-09-11 12:00:48] [Rank 0] step:1741/10000 train_time:96478ms step_avg:55.42ms +[2025-09-11 12:00:48] [Rank 0] step:1761/10000 train_time:97130ms step_avg:55.16ms +[2025-09-11 12:00:48] [Rank 0] step:1761/10000 train_time:97130ms step_avg:55.16ms +[2025-09-11 12:00:49] [Rank 0] step:1781/10000 train_time:97780ms step_avg:54.90ms +[2025-09-11 12:00:49] [Rank 0] step:1781/10000 train_time:97780ms step_avg:54.90ms +[2025-09-11 12:00:50] [Rank 0] step:1801/10000 train_time:98431ms step_avg:54.65ms +[2025-09-11 12:00:50] [Rank 0] step:1801/10000 train_time:98431ms step_avg:54.65ms +[2025-09-11 12:00:50] [Rank 0] step:1821/10000 train_time:99083ms step_avg:54.41ms +[2025-09-11 12:00:50] [Rank 0] step:1821/10000 train_time:99083ms step_avg:54.41ms +[2025-09-11 12:00:51] [Rank 0] step:1841/10000 train_time:99733ms step_avg:54.17ms +[2025-09-11 12:00:51] [Rank 0] step:1841/10000 train_time:99733ms step_avg:54.17ms +[2025-09-11 12:00:52] [Rank 0] step:1861/10000 train_time:100384ms step_avg:53.94ms +[2025-09-11 12:00:52] [Rank 0] step:1861/10000 train_time:100384ms step_avg:53.94ms +[2025-09-11 12:00:52] [Rank 0] step:1881/10000 train_time:101035ms step_avg:53.71ms +[2025-09-11 12:00:52] [Rank 0] step:1881/10000 train_time:101035ms step_avg:53.71ms +[2025-09-11 12:00:53] [Rank 0] step:1901/10000 train_time:101685ms step_avg:53.49ms +[2025-09-11 12:00:53] [Rank 0] step:1901/10000 train_time:101685ms step_avg:53.49ms +[2025-09-11 12:00:53] [Rank 0] step:1921/10000 train_time:102336ms step_avg:53.27ms +[2025-09-11 12:00:53] [Rank 0] step:1921/10000 train_time:102336ms step_avg:53.27ms +[2025-09-11 12:00:54] [Rank 0] step:1941/10000 train_time:102987ms step_avg:53.06ms +[2025-09-11 12:00:54] [Rank 0] step:1941/10000 train_time:102987ms step_avg:53.06ms +[2025-09-11 12:00:55] [Rank 0] step:1961/10000 train_time:103637ms step_avg:52.85ms +[2025-09-11 12:00:55] [Rank 0] step:1961/10000 train_time:103637ms step_avg:52.85ms +[2025-09-11 12:00:55] [Rank 0] step:1981/10000 train_time:104287ms step_avg:52.64ms +[2025-09-11 12:00:55] [Rank 0] step:1981/10000 train_time:104287ms step_avg:52.64ms +[2025-09-11 12:00:56] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:00:56] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:00:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:00:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:00:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:06] [Rank 0] PRINT: step:2000/10000 val_loss:6.0739 total_sharp:2.4703e-02 L1_sharp:1.0759e+00 L2_sharp:9.5999e-01 L3_sharp:1.0735e+00 L4_sharp:1.1969e+00 L5_sharp:1.5802e+00 L6_sharp:1.6592e+00 L7_sharp:1.9214e+00 L8_sharp:2.8487e+00 L9_sharp:3.6563e+00 L10_sharp:4.9329e+00 L11_sharp:5.3590e+00 L12_sharp:6.4151e+00 total_fnorm:5.2188e+00 total_l1_linf:5.4080e+03 total_spectral:2.6094e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1992e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6602e-02 L9_l1linf:1.6724e-02 L10_l1linf:1.6602e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.4160e-02 L1_spectral:7.4988e-04 L2_spectral:7.6038e-04 L3_spectral:7.5792e-04 L4_spectral:7.5381e-04 L5_spectral:7.4665e-04 L6_spectral:7.6558e-04 L7_spectral:7.6187e-04 L8_spectral:7.4704e-04 L9_spectral:7.5635e-04 L10_spectral:7.5977e-04 L11_spectral:7.4712e-04 L12_spectral:6.9132e-04 train_time:104920ms step_avg:52.46ms +[2025-09-11 12:01:06] [Rank 0] PRINT: step:2000/10000 val_loss:6.0739 total_sharp:2.4703e-02 L1_sharp:1.0759e+00 L2_sharp:9.5999e-01 L3_sharp:1.0735e+00 L4_sharp:1.1969e+00 L5_sharp:1.5802e+00 L6_sharp:1.6592e+00 L7_sharp:1.9214e+00 L8_sharp:2.8487e+00 L9_sharp:3.6563e+00 L10_sharp:4.9329e+00 L11_sharp:5.3590e+00 L12_sharp:6.4151e+00 total_fnorm:5.2188e+00 total_l1_linf:5.4080e+03 total_spectral:2.6094e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.1992e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6602e-02 L9_l1linf:1.6724e-02 L10_l1linf:1.6602e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.4160e-02 L1_spectral:7.4988e-04 L2_spectral:7.6038e-04 L3_spectral:7.5792e-04 L4_spectral:7.5381e-04 L5_spectral:7.4665e-04 L6_spectral:7.6558e-04 L7_spectral:7.6187e-04 L8_spectral:7.4704e-04 L9_spectral:7.5635e-04 L10_spectral:7.5977e-04 L11_spectral:7.4712e-04 L12_spectral:6.9132e-04 train_time:104920ms step_avg:52.46ms +[2025-09-11 12:01:08] [Rank 0] step:2001/10000 train_time:106519ms step_avg:53.23ms +[2025-09-11 12:01:08] [Rank 0] step:2001/10000 train_time:106519ms step_avg:53.23ms +[2025-09-11 12:01:09] [Rank 0] step:2021/10000 train_time:107173ms step_avg:53.03ms +[2025-09-11 12:01:09] [Rank 0] step:2021/10000 train_time:107173ms step_avg:53.03ms +[2025-09-11 12:01:09] [Rank 0] step:2041/10000 train_time:107825ms step_avg:52.83ms +[2025-09-11 12:01:09] [Rank 0] step:2041/10000 train_time:107825ms step_avg:52.83ms +[2025-09-11 12:01:10] [Rank 0] step:2061/10000 train_time:108475ms step_avg:52.63ms +[2025-09-11 12:01:10] [Rank 0] step:2061/10000 train_time:108475ms step_avg:52.63ms +[2025-09-11 12:01:11] [Rank 0] step:2081/10000 train_time:109126ms step_avg:52.44ms +[2025-09-11 12:01:11] [Rank 0] step:2081/10000 train_time:109126ms step_avg:52.44ms +[2025-09-11 12:01:11] [Rank 0] step:2101/10000 train_time:109778ms step_avg:52.25ms +[2025-09-11 12:01:11] [Rank 0] step:2101/10000 train_time:109778ms step_avg:52.25ms +[2025-09-11 12:01:12] [Rank 0] step:2121/10000 train_time:110429ms step_avg:52.06ms +[2025-09-11 12:01:12] [Rank 0] step:2121/10000 train_time:110429ms step_avg:52.06ms +[2025-09-11 12:01:13] [Rank 0] step:2141/10000 train_time:111080ms step_avg:51.88ms +[2025-09-11 12:01:13] [Rank 0] step:2141/10000 train_time:111080ms step_avg:51.88ms +[2025-09-11 12:01:13] [Rank 0] step:2161/10000 train_time:111730ms step_avg:51.70ms +[2025-09-11 12:01:13] [Rank 0] step:2161/10000 train_time:111730ms step_avg:51.70ms +[2025-09-11 12:01:14] [Rank 0] step:2181/10000 train_time:112381ms step_avg:51.53ms +[2025-09-11 12:01:14] [Rank 0] step:2181/10000 train_time:112381ms step_avg:51.53ms +[2025-09-11 12:01:15] [Rank 0] step:2201/10000 train_time:113031ms step_avg:51.35ms +[2025-09-11 12:01:15] [Rank 0] step:2201/10000 train_time:113031ms step_avg:51.35ms +[2025-09-11 12:01:15] [Rank 0] step:2221/10000 train_time:113681ms step_avg:51.18ms +[2025-09-11 12:01:15] [Rank 0] step:2221/10000 train_time:113681ms step_avg:51.18ms +[2025-09-11 12:01:16] [Rank 0] step:2241/10000 train_time:114344ms step_avg:51.02ms +[2025-09-11 12:01:16] [Rank 0] step:2241/10000 train_time:114344ms step_avg:51.02ms +[2025-09-11 12:01:17] [Rank 0] step:2261/10000 train_time:115008ms step_avg:50.87ms +[2025-09-11 12:01:17] [Rank 0] step:2261/10000 train_time:115008ms step_avg:50.87ms +[2025-09-11 12:01:17] [Rank 0] step:2281/10000 train_time:115674ms step_avg:50.71ms +[2025-09-11 12:01:17] [Rank 0] step:2281/10000 train_time:115674ms step_avg:50.71ms +[2025-09-11 12:01:18] [Rank 0] step:2301/10000 train_time:116337ms step_avg:50.56ms +[2025-09-11 12:01:18] [Rank 0] step:2301/10000 train_time:116337ms step_avg:50.56ms +[2025-09-11 12:01:19] [Rank 0] step:2321/10000 train_time:117000ms step_avg:50.41ms +[2025-09-11 12:01:19] [Rank 0] step:2321/10000 train_time:117000ms step_avg:50.41ms +[2025-09-11 12:01:19] [Rank 0] step:2341/10000 train_time:117665ms step_avg:50.26ms +[2025-09-11 12:01:19] [Rank 0] step:2341/10000 train_time:117665ms step_avg:50.26ms +[2025-09-11 12:01:20] [Rank 0] step:2361/10000 train_time:118329ms step_avg:50.12ms +[2025-09-11 12:01:20] [Rank 0] step:2361/10000 train_time:118329ms step_avg:50.12ms +[2025-09-11 12:01:21] [Rank 0] step:2381/10000 train_time:118993ms step_avg:49.98ms +[2025-09-11 12:01:21] [Rank 0] step:2381/10000 train_time:118993ms step_avg:49.98ms +[2025-09-11 12:01:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:01:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:01:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:01:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:01:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:01:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:01:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:01:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:32] [Rank 0] PRINT: step:2400/10000 val_loss:5.9462 total_sharp:1.7897e-02 L1_sharp:4.7095e-01 L2_sharp:5.1412e-01 L3_sharp:6.2287e-01 L4_sharp:7.5820e-01 L5_sharp:1.1191e+00 L6_sharp:1.2954e+00 L7_sharp:1.5204e+00 L8_sharp:2.2114e+00 L9_sharp:2.5978e+00 L10_sharp:2.8057e+00 L11_sharp:2.0928e+00 L12_sharp:2.0834e+00 total_fnorm:4.7812e+00 total_l1_linf:4.6400e+03 total_spectral:2.3906e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4709e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5198e-02 L5_l1linf:1.5503e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5747e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.4587e-02 L1_spectral:7.6091e-04 L2_spectral:7.7165e-04 L3_spectral:7.6828e-04 L4_spectral:7.6169e-04 L5_spectral:7.5244e-04 L6_spectral:7.6601e-04 L7_spectral:7.7186e-04 L8_spectral:7.5517e-04 L9_spectral:7.6961e-04 L10_spectral:7.6871e-04 L11_spectral:7.5890e-04 L12_spectral:7.2248e-04 train_time:119638ms step_avg:49.85ms +[2025-09-11 12:01:32] [Rank 0] PRINT: step:2400/10000 val_loss:5.9462 total_sharp:1.7897e-02 L1_sharp:4.7095e-01 L2_sharp:5.1412e-01 L3_sharp:6.2287e-01 L4_sharp:7.5820e-01 L5_sharp:1.1191e+00 L6_sharp:1.2954e+00 L7_sharp:1.5204e+00 L8_sharp:2.2114e+00 L9_sharp:2.5978e+00 L10_sharp:2.8057e+00 L11_sharp:2.0928e+00 L12_sharp:2.0834e+00 total_fnorm:4.7812e+00 total_l1_linf:4.6400e+03 total_spectral:2.3906e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4709e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5198e-02 L5_l1linf:1.5503e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5747e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.4587e-02 L1_spectral:7.6091e-04 L2_spectral:7.7165e-04 L3_spectral:7.6828e-04 L4_spectral:7.6169e-04 L5_spectral:7.5244e-04 L6_spectral:7.6601e-04 L7_spectral:7.7186e-04 L8_spectral:7.5517e-04 L9_spectral:7.6961e-04 L10_spectral:7.6871e-04 L11_spectral:7.5890e-04 L12_spectral:7.2248e-04 train_time:119638ms step_avg:49.85ms +[2025-09-11 12:01:33] [Rank 0] step:2401/10000 train_time:121252ms step_avg:50.50ms +[2025-09-11 12:01:33] [Rank 0] step:2401/10000 train_time:121252ms step_avg:50.50ms +[2025-09-11 12:01:34] [Rank 0] step:2421/10000 train_time:121921ms step_avg:50.36ms +[2025-09-11 12:01:34] [Rank 0] step:2421/10000 train_time:121921ms step_avg:50.36ms +[2025-09-11 12:01:35] [Rank 0] step:2441/10000 train_time:122587ms step_avg:50.22ms +[2025-09-11 12:01:35] [Rank 0] step:2441/10000 train_time:122587ms step_avg:50.22ms +[2025-09-11 12:01:35] [Rank 0] step:2461/10000 train_time:123253ms step_avg:50.08ms +[2025-09-11 12:01:35] [Rank 0] step:2461/10000 train_time:123253ms step_avg:50.08ms +[2025-09-11 12:01:36] [Rank 0] step:2481/10000 train_time:124193ms step_avg:50.06ms +[2025-09-11 12:01:36] [Rank 0] step:2481/10000 train_time:124193ms step_avg:50.06ms +[2025-09-11 12:01:37] [Rank 0] step:2501/10000 train_time:124859ms step_avg:49.92ms +[2025-09-11 12:01:37] [Rank 0] step:2501/10000 train_time:124859ms step_avg:49.92ms +[2025-09-11 12:01:37] [Rank 0] step:2521/10000 train_time:125523ms step_avg:49.79ms +[2025-09-11 12:01:37] [Rank 0] step:2521/10000 train_time:125523ms step_avg:49.79ms +[2025-09-11 12:01:38] [Rank 0] step:2541/10000 train_time:126480ms step_avg:49.78ms +[2025-09-11 12:01:38] [Rank 0] step:2541/10000 train_time:126480ms step_avg:49.78ms +[2025-09-11 12:01:39] [Rank 0] step:2561/10000 train_time:127144ms step_avg:49.65ms +[2025-09-11 12:01:39] [Rank 0] step:2561/10000 train_time:127144ms step_avg:49.65ms +[2025-09-11 12:01:40] [Rank 0] step:2581/10000 train_time:127808ms step_avg:49.52ms +[2025-09-11 12:01:40] [Rank 0] step:2581/10000 train_time:127808ms step_avg:49.52ms +[2025-09-11 12:01:40] [Rank 0] step:2601/10000 train_time:128472ms step_avg:49.39ms +[2025-09-11 12:01:40] [Rank 0] step:2601/10000 train_time:128472ms step_avg:49.39ms +[2025-09-11 12:01:41] [Rank 0] step:2621/10000 train_time:129138ms step_avg:49.27ms +[2025-09-11 12:01:41] [Rank 0] step:2621/10000 train_time:129138ms step_avg:49.27ms +[2025-09-11 12:01:42] [Rank 0] step:2641/10000 train_time:129800ms step_avg:49.15ms +[2025-09-11 12:01:42] [Rank 0] step:2641/10000 train_time:129800ms step_avg:49.15ms +[2025-09-11 12:01:42] [Rank 0] step:2661/10000 train_time:130465ms step_avg:49.03ms +[2025-09-11 12:01:42] [Rank 0] step:2661/10000 train_time:130465ms step_avg:49.03ms +[2025-09-11 12:01:43] [Rank 0] step:2681/10000 train_time:131129ms step_avg:48.91ms +[2025-09-11 12:01:43] [Rank 0] step:2681/10000 train_time:131129ms step_avg:48.91ms +[2025-09-11 12:01:44] [Rank 0] step:2701/10000 train_time:131794ms step_avg:48.79ms +[2025-09-11 12:01:44] [Rank 0] step:2701/10000 train_time:131794ms step_avg:48.79ms +[2025-09-11 12:01:44] [Rank 0] step:2721/10000 train_time:132458ms step_avg:48.68ms +[2025-09-11 12:01:44] [Rank 0] step:2721/10000 train_time:132458ms step_avg:48.68ms +[2025-09-11 12:01:45] [Rank 0] step:2741/10000 train_time:133126ms step_avg:48.57ms +[2025-09-11 12:01:45] [Rank 0] step:2741/10000 train_time:133126ms step_avg:48.57ms +[2025-09-11 12:01:46] [Rank 0] step:2761/10000 train_time:133790ms step_avg:48.46ms +[2025-09-11 12:01:46] [Rank 0] step:2761/10000 train_time:133790ms step_avg:48.46ms +[2025-09-11 12:01:46] [Rank 0] step:2781/10000 train_time:134454ms step_avg:48.35ms +[2025-09-11 12:01:46] [Rank 0] step:2781/10000 train_time:134454ms step_avg:48.35ms +[2025-09-11 12:01:47] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:01:47] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:01:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:01:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:01:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:01:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:01:57] [Rank 0] PRINT: step:2800/10000 val_loss:5.8448 total_sharp:2.0775e-02 L1_sharp:5.5841e-01 L2_sharp:5.7025e-01 L3_sharp:6.4433e-01 L4_sharp:8.9103e-01 L5_sharp:1.1075e+00 L6_sharp:1.1715e+00 L7_sharp:1.3835e+00 L8_sharp:1.8710e+00 L9_sharp:2.4657e+00 L10_sharp:3.4524e+00 L11_sharp:3.0104e+00 L12_sharp:2.2924e+00 total_fnorm:4.4688e+00 total_l1_linf:4.2240e+03 total_spectral:2.2344e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.4160e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4526e-02 L4_l1linf:1.4893e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5320e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5564e-02 L12_l1linf:1.4099e-02 L1_spectral:7.6642e-04 L2_spectral:7.7136e-04 L3_spectral:7.6687e-04 L4_spectral:7.7426e-04 L5_spectral:7.5660e-04 L6_spectral:7.7033e-04 L7_spectral:7.6755e-04 L8_spectral:7.5501e-04 L9_spectral:7.7521e-04 L10_spectral:7.6585e-04 L11_spectral:7.6665e-04 L12_spectral:7.2092e-04 train_time:135100ms step_avg:48.25ms +[2025-09-11 12:01:57] [Rank 0] PRINT: step:2800/10000 val_loss:5.8448 total_sharp:2.0775e-02 L1_sharp:5.5841e-01 L2_sharp:5.7025e-01 L3_sharp:6.4433e-01 L4_sharp:8.9103e-01 L5_sharp:1.1075e+00 L6_sharp:1.1715e+00 L7_sharp:1.3835e+00 L8_sharp:1.8710e+00 L9_sharp:2.4657e+00 L10_sharp:3.4524e+00 L11_sharp:3.0104e+00 L12_sharp:2.2924e+00 total_fnorm:4.4688e+00 total_l1_linf:4.2240e+03 total_spectral:2.2344e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.4160e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4526e-02 L4_l1linf:1.4893e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5320e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5564e-02 L12_l1linf:1.4099e-02 L1_spectral:7.6642e-04 L2_spectral:7.7136e-04 L3_spectral:7.6687e-04 L4_spectral:7.7426e-04 L5_spectral:7.5660e-04 L6_spectral:7.7033e-04 L7_spectral:7.6755e-04 L8_spectral:7.5501e-04 L9_spectral:7.7521e-04 L10_spectral:7.6585e-04 L11_spectral:7.6665e-04 L12_spectral:7.2092e-04 train_time:135100ms step_avg:48.25ms +[2025-09-11 12:01:59] [Rank 0] step:2801/10000 train_time:137048ms step_avg:48.93ms +[2025-09-11 12:01:59] [Rank 0] step:2801/10000 train_time:137048ms step_avg:48.93ms +[2025-09-11 12:02:00] [Rank 0] step:2821/10000 train_time:137720ms step_avg:48.82ms +[2025-09-11 12:02:00] [Rank 0] step:2821/10000 train_time:137720ms step_avg:48.82ms +[2025-09-11 12:02:00] [Rank 0] step:2841/10000 train_time:138387ms step_avg:48.71ms +[2025-09-11 12:02:00] [Rank 0] step:2841/10000 train_time:138387ms step_avg:48.71ms +[2025-09-11 12:02:01] [Rank 0] step:2861/10000 train_time:139054ms step_avg:48.60ms +[2025-09-11 12:02:01] [Rank 0] step:2861/10000 train_time:139054ms step_avg:48.60ms +[2025-09-11 12:02:02] [Rank 0] step:2881/10000 train_time:139724ms step_avg:48.50ms +[2025-09-11 12:02:02] [Rank 0] step:2881/10000 train_time:139724ms step_avg:48.50ms +[2025-09-11 12:02:02] [Rank 0] step:2901/10000 train_time:140391ms step_avg:48.39ms +[2025-09-11 12:02:02] [Rank 0] step:2901/10000 train_time:140391ms step_avg:48.39ms +[2025-09-11 12:02:03] [Rank 0] step:2921/10000 train_time:141058ms step_avg:48.29ms +[2025-09-11 12:02:03] [Rank 0] step:2921/10000 train_time:141058ms step_avg:48.29ms +[2025-09-11 12:02:04] [Rank 0] step:2941/10000 train_time:141725ms step_avg:48.19ms +[2025-09-11 12:02:04] [Rank 0] step:2941/10000 train_time:141725ms step_avg:48.19ms +[2025-09-11 12:02:04] [Rank 0] step:2961/10000 train_time:142391ms step_avg:48.09ms +[2025-09-11 12:02:04] [Rank 0] step:2961/10000 train_time:142391ms step_avg:48.09ms +[2025-09-11 12:02:05] [Rank 0] step:2981/10000 train_time:143061ms step_avg:47.99ms +[2025-09-11 12:02:05] [Rank 0] step:2981/10000 train_time:143061ms step_avg:47.99ms +[2025-09-11 12:02:06] [Rank 0] step:3001/10000 train_time:143728ms step_avg:47.89ms +[2025-09-11 12:02:06] [Rank 0] step:3001/10000 train_time:143728ms step_avg:47.89ms +[2025-09-11 12:02:06] [Rank 0] step:3021/10000 train_time:144397ms step_avg:47.80ms +[2025-09-11 12:02:06] [Rank 0] step:3021/10000 train_time:144397ms step_avg:47.80ms +[2025-09-11 12:02:07] [Rank 0] step:3041/10000 train_time:145065ms step_avg:47.70ms +[2025-09-11 12:02:07] [Rank 0] step:3041/10000 train_time:145065ms step_avg:47.70ms +[2025-09-11 12:02:08] [Rank 0] step:3061/10000 train_time:145734ms step_avg:47.61ms +[2025-09-11 12:02:08] [Rank 0] step:3061/10000 train_time:145734ms step_avg:47.61ms +[2025-09-11 12:02:08] [Rank 0] step:3081/10000 train_time:146403ms step_avg:47.52ms +[2025-09-11 12:02:08] [Rank 0] step:3081/10000 train_time:146403ms step_avg:47.52ms +[2025-09-11 12:02:09] [Rank 0] step:3101/10000 train_time:147072ms step_avg:47.43ms +[2025-09-11 12:02:09] [Rank 0] step:3101/10000 train_time:147072ms step_avg:47.43ms +[2025-09-11 12:02:10] [Rank 0] step:3121/10000 train_time:147741ms step_avg:47.34ms +[2025-09-11 12:02:10] [Rank 0] step:3121/10000 train_time:147741ms step_avg:47.34ms +[2025-09-11 12:02:10] [Rank 0] step:3141/10000 train_time:148410ms step_avg:47.25ms +[2025-09-11 12:02:10] [Rank 0] step:3141/10000 train_time:148410ms step_avg:47.25ms +[2025-09-11 12:02:11] [Rank 0] step:3161/10000 train_time:149078ms step_avg:47.16ms +[2025-09-11 12:02:11] [Rank 0] step:3161/10000 train_time:149078ms step_avg:47.16ms +[2025-09-11 12:02:12] [Rank 0] step:3181/10000 train_time:149746ms step_avg:47.08ms +[2025-09-11 12:02:12] [Rank 0] step:3181/10000 train_time:149746ms step_avg:47.08ms +[2025-09-11 12:02:12] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:02:12] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:02:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:02:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:02:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:02:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:02:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:02:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.7540 total_sharp:1.8907e-02 L1_sharp:4.8684e-01 L2_sharp:5.5097e-01 L3_sharp:6.5893e-01 L4_sharp:8.5392e-01 L5_sharp:1.1591e+00 L6_sharp:1.3678e+00 L7_sharp:1.7015e+00 L8_sharp:2.4646e+00 L9_sharp:3.5532e+00 L10_sharp:3.2595e+00 L11_sharp:2.4716e+00 L12_sharp:3.4153e+00 total_fnorm:5.0312e+00 total_l1_linf:4.9280e+03 total_spectral:2.5156e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.3367e-02 L2_l1linf:1.3489e-02 L3_l1linf:1.3611e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4099e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4282e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4771e-02 L10_l1linf:1.4771e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.4099e-02 L1_spectral:7.7323e-04 L2_spectral:7.8354e-04 L3_spectral:7.7951e-04 L4_spectral:7.7265e-04 L5_spectral:7.6796e-04 L6_spectral:7.7845e-04 L7_spectral:7.8286e-04 L8_spectral:7.7426e-04 L9_spectral:7.8400e-04 L10_spectral:7.8093e-04 L11_spectral:7.8285e-04 L12_spectral:7.4808e-04 train_time:150395ms step_avg:47.00ms +[2025-09-11 12:02:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.7540 total_sharp:1.8907e-02 L1_sharp:4.8684e-01 L2_sharp:5.5097e-01 L3_sharp:6.5893e-01 L4_sharp:8.5392e-01 L5_sharp:1.1591e+00 L6_sharp:1.3678e+00 L7_sharp:1.7015e+00 L8_sharp:2.4646e+00 L9_sharp:3.5532e+00 L10_sharp:3.2595e+00 L11_sharp:2.4716e+00 L12_sharp:3.4153e+00 total_fnorm:5.0312e+00 total_l1_linf:4.9280e+03 total_spectral:2.5156e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.3367e-02 L2_l1linf:1.3489e-02 L3_l1linf:1.3611e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4099e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4282e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4771e-02 L10_l1linf:1.4771e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.4099e-02 L1_spectral:7.7323e-04 L2_spectral:7.8354e-04 L3_spectral:7.7951e-04 L4_spectral:7.7265e-04 L5_spectral:7.6796e-04 L6_spectral:7.7845e-04 L7_spectral:7.8286e-04 L8_spectral:7.7426e-04 L9_spectral:7.8400e-04 L10_spectral:7.8093e-04 L11_spectral:7.8285e-04 L12_spectral:7.4808e-04 train_time:150395ms step_avg:47.00ms +[2025-09-11 12:02:25] [Rank 0] step:3201/10000 train_time:152326ms step_avg:47.59ms +[2025-09-11 12:02:25] [Rank 0] step:3201/10000 train_time:152326ms step_avg:47.59ms +[2025-09-11 12:02:25] [Rank 0] step:3221/10000 train_time:153000ms step_avg:47.50ms +[2025-09-11 12:02:25] [Rank 0] step:3221/10000 train_time:153000ms step_avg:47.50ms +[2025-09-11 12:02:26] [Rank 0] step:3241/10000 train_time:153670ms step_avg:47.41ms +[2025-09-11 12:02:26] [Rank 0] step:3241/10000 train_time:153670ms step_avg:47.41ms +[2025-09-11 12:02:27] [Rank 0] step:3261/10000 train_time:154340ms step_avg:47.33ms +[2025-09-11 12:02:27] [Rank 0] step:3261/10000 train_time:154340ms step_avg:47.33ms +[2025-09-11 12:02:27] [Rank 0] step:3281/10000 train_time:155009ms step_avg:47.24ms +[2025-09-11 12:02:27] [Rank 0] step:3281/10000 train_time:155009ms step_avg:47.24ms +[2025-09-11 12:02:28] [Rank 0] step:3301/10000 train_time:155678ms step_avg:47.16ms +[2025-09-11 12:02:28] [Rank 0] step:3301/10000 train_time:155678ms step_avg:47.16ms +[2025-09-11 12:02:29] [Rank 0] step:3321/10000 train_time:156347ms step_avg:47.08ms +[2025-09-11 12:02:29] [Rank 0] step:3321/10000 train_time:156347ms step_avg:47.08ms +[2025-09-11 12:02:29] [Rank 0] step:3341/10000 train_time:157016ms step_avg:47.00ms +[2025-09-11 12:02:29] [Rank 0] step:3341/10000 train_time:157016ms step_avg:47.00ms +[2025-09-11 12:02:30] [Rank 0] step:3361/10000 train_time:157685ms step_avg:46.92ms +[2025-09-11 12:02:30] [Rank 0] step:3361/10000 train_time:157685ms step_avg:46.92ms +[2025-09-11 12:02:31] [Rank 0] step:3381/10000 train_time:158353ms step_avg:46.84ms +[2025-09-11 12:02:31] [Rank 0] step:3381/10000 train_time:158353ms step_avg:46.84ms +[2025-09-11 12:02:32] [Rank 0] step:3401/10000 train_time:159022ms step_avg:46.76ms +[2025-09-11 12:02:32] [Rank 0] step:3401/10000 train_time:159022ms step_avg:46.76ms +[2025-09-11 12:02:32] [Rank 0] step:3421/10000 train_time:159689ms step_avg:46.68ms +[2025-09-11 12:02:32] [Rank 0] step:3421/10000 train_time:159689ms step_avg:46.68ms +[2025-09-11 12:02:33] [Rank 0] step:3441/10000 train_time:160358ms step_avg:46.60ms +[2025-09-11 12:02:33] [Rank 0] step:3441/10000 train_time:160358ms step_avg:46.60ms +[2025-09-11 12:02:34] [Rank 0] step:3461/10000 train_time:161026ms step_avg:46.53ms +[2025-09-11 12:02:34] [Rank 0] step:3461/10000 train_time:161026ms step_avg:46.53ms +[2025-09-11 12:02:34] [Rank 0] step:3481/10000 train_time:161694ms step_avg:46.45ms +[2025-09-11 12:02:34] [Rank 0] step:3481/10000 train_time:161694ms step_avg:46.45ms +[2025-09-11 12:02:35] [Rank 0] step:3501/10000 train_time:162363ms step_avg:46.38ms +[2025-09-11 12:02:35] [Rank 0] step:3501/10000 train_time:162363ms step_avg:46.38ms +[2025-09-11 12:02:36] [Rank 0] step:3521/10000 train_time:163031ms step_avg:46.30ms +[2025-09-11 12:02:36] [Rank 0] step:3521/10000 train_time:163031ms step_avg:46.30ms +[2025-09-11 12:02:36] [Rank 0] step:3541/10000 train_time:163699ms step_avg:46.23ms +[2025-09-11 12:02:36] [Rank 0] step:3541/10000 train_time:163699ms step_avg:46.23ms +[2025-09-11 12:02:37] [Rank 0] step:3561/10000 train_time:164368ms step_avg:46.16ms +[2025-09-11 12:02:37] [Rank 0] step:3561/10000 train_time:164368ms step_avg:46.16ms +[2025-09-11 12:02:38] [Rank 0] step:3581/10000 train_time:165036ms step_avg:46.09ms +[2025-09-11 12:02:38] [Rank 0] step:3581/10000 train_time:165036ms step_avg:46.09ms +[2025-09-11 12:02:38] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:02:38] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:02:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:02:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:02:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:02:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:02:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:02:49] [Rank 0] PRINT: step:3600/10000 val_loss:5.6937 total_sharp:1.8997e-02 L1_sharp:2.5712e-01 L2_sharp:3.2783e-01 L3_sharp:4.4795e-01 L4_sharp:6.4978e-01 L5_sharp:1.0051e+00 L6_sharp:1.3312e+00 L7_sharp:1.7564e+00 L8_sharp:2.3597e+00 L9_sharp:3.4730e+00 L10_sharp:3.2831e+00 L11_sharp:2.3562e+00 L12_sharp:2.1210e+00 total_fnorm:4.4375e+00 total_l1_linf:4.0960e+03 total_spectral:2.2188e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.3000e-02 L2_l1linf:1.2512e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.3367e-02 L5_l1linf:1.3367e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4587e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.3977e-02 L1_spectral:7.8405e-04 L2_spectral:7.8021e-04 L3_spectral:7.8304e-04 L4_spectral:7.8389e-04 L5_spectral:7.7508e-04 L6_spectral:7.8699e-04 L7_spectral:7.8960e-04 L8_spectral:7.7897e-04 L9_spectral:7.8824e-04 L10_spectral:7.8365e-04 L11_spectral:7.8892e-04 L12_spectral:7.5199e-04 train_time:165686ms step_avg:46.02ms +[2025-09-11 12:02:49] [Rank 0] PRINT: step:3600/10000 val_loss:5.6937 total_sharp:1.8997e-02 L1_sharp:2.5712e-01 L2_sharp:3.2783e-01 L3_sharp:4.4795e-01 L4_sharp:6.4978e-01 L5_sharp:1.0051e+00 L6_sharp:1.3312e+00 L7_sharp:1.7564e+00 L8_sharp:2.3597e+00 L9_sharp:3.4730e+00 L10_sharp:3.2831e+00 L11_sharp:2.3562e+00 L12_sharp:2.1210e+00 total_fnorm:4.4375e+00 total_l1_linf:4.0960e+03 total_spectral:2.2188e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.3000e-02 L2_l1linf:1.2512e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.3367e-02 L5_l1linf:1.3367e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4587e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.3977e-02 L1_spectral:7.8405e-04 L2_spectral:7.8021e-04 L3_spectral:7.8304e-04 L4_spectral:7.8389e-04 L5_spectral:7.7508e-04 L6_spectral:7.8699e-04 L7_spectral:7.8960e-04 L8_spectral:7.7897e-04 L9_spectral:7.8824e-04 L10_spectral:7.8365e-04 L11_spectral:7.8892e-04 L12_spectral:7.5199e-04 train_time:165686ms step_avg:46.02ms +[2025-09-11 12:02:51] [Rank 0] step:3601/10000 train_time:167650ms step_avg:46.56ms +[2025-09-11 12:02:51] [Rank 0] step:3601/10000 train_time:167650ms step_avg:46.56ms +[2025-09-11 12:02:52] [Rank 0] step:3621/10000 train_time:168324ms step_avg:46.49ms +[2025-09-11 12:02:52] [Rank 0] step:3621/10000 train_time:168324ms step_avg:46.49ms +[2025-09-11 12:02:52] [Rank 0] step:3641/10000 train_time:168993ms step_avg:46.41ms +[2025-09-11 12:02:52] [Rank 0] step:3641/10000 train_time:168993ms step_avg:46.41ms +[2025-09-11 12:02:53] [Rank 0] step:3661/10000 train_time:169661ms step_avg:46.34ms +[2025-09-11 12:02:53] [Rank 0] step:3661/10000 train_time:169661ms step_avg:46.34ms +[2025-09-11 12:02:54] [Rank 0] step:3681/10000 train_time:170330ms step_avg:46.27ms +[2025-09-11 12:02:54] [Rank 0] step:3681/10000 train_time:170330ms step_avg:46.27ms +[2025-09-11 12:02:54] [Rank 0] step:3701/10000 train_time:170998ms step_avg:46.20ms +[2025-09-11 12:02:54] [Rank 0] step:3701/10000 train_time:170998ms step_avg:46.20ms +[2025-09-11 12:02:55] [Rank 0] step:3721/10000 train_time:171676ms step_avg:46.14ms +[2025-09-11 12:02:55] [Rank 0] step:3721/10000 train_time:171676ms step_avg:46.14ms +[2025-09-11 12:02:56] [Rank 0] step:3741/10000 train_time:172354ms step_avg:46.07ms +[2025-09-11 12:02:56] [Rank 0] step:3741/10000 train_time:172354ms step_avg:46.07ms +[2025-09-11 12:02:56] [Rank 0] step:3761/10000 train_time:173034ms step_avg:46.01ms +[2025-09-11 12:02:56] [Rank 0] step:3761/10000 train_time:173034ms step_avg:46.01ms +[2025-09-11 12:02:57] [Rank 0] step:3781/10000 train_time:173713ms step_avg:45.94ms +[2025-09-11 12:02:57] [Rank 0] step:3781/10000 train_time:173713ms step_avg:45.94ms +[2025-09-11 12:02:58] [Rank 0] step:3801/10000 train_time:174392ms step_avg:45.88ms +[2025-09-11 12:02:58] [Rank 0] step:3801/10000 train_time:174392ms step_avg:45.88ms +[2025-09-11 12:02:58] [Rank 0] step:3821/10000 train_time:175071ms step_avg:45.82ms +[2025-09-11 12:02:58] [Rank 0] step:3821/10000 train_time:175071ms step_avg:45.82ms +[2025-09-11 12:02:59] [Rank 0] step:3841/10000 train_time:175750ms step_avg:45.76ms +[2025-09-11 12:02:59] [Rank 0] step:3841/10000 train_time:175750ms step_avg:45.76ms +[2025-09-11 12:03:00] [Rank 0] step:3861/10000 train_time:176429ms step_avg:45.70ms +[2025-09-11 12:03:00] [Rank 0] step:3861/10000 train_time:176429ms step_avg:45.70ms +[2025-09-11 12:03:00] [Rank 0] step:3881/10000 train_time:177107ms step_avg:45.63ms +[2025-09-11 12:03:00] [Rank 0] step:3881/10000 train_time:177107ms step_avg:45.63ms +[2025-09-11 12:03:01] [Rank 0] step:3901/10000 train_time:177786ms step_avg:45.57ms +[2025-09-11 12:03:01] [Rank 0] step:3901/10000 train_time:177786ms step_avg:45.57ms +[2025-09-11 12:03:02] [Rank 0] step:3921/10000 train_time:178466ms step_avg:45.52ms +[2025-09-11 12:03:02] [Rank 0] step:3921/10000 train_time:178466ms step_avg:45.52ms +[2025-09-11 12:03:02] [Rank 0] step:3941/10000 train_time:179150ms step_avg:45.46ms +[2025-09-11 12:03:02] [Rank 0] step:3941/10000 train_time:179150ms step_avg:45.46ms +[2025-09-11 12:03:03] [Rank 0] step:3961/10000 train_time:179829ms step_avg:45.40ms +[2025-09-11 12:03:03] [Rank 0] step:3961/10000 train_time:179829ms step_avg:45.40ms +[2025-09-11 12:03:04] [Rank 0] step:3981/10000 train_time:180508ms step_avg:45.34ms +[2025-09-11 12:03:04] [Rank 0] step:3981/10000 train_time:180508ms step_avg:45.34ms +[2025-09-11 12:03:04] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:03:04] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:03:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.6253 total_sharp:1.5433e-02 L1_sharp:2.3070e-01 L2_sharp:3.1809e-01 L3_sharp:4.2061e-01 L4_sharp:5.9053e-01 L5_sharp:9.0366e-01 L6_sharp:1.0907e+00 L7_sharp:1.5128e+00 L8_sharp:2.2292e+00 L9_sharp:3.5524e+00 L10_sharp:4.4047e+00 L11_sharp:2.9772e+00 L12_sharp:2.3119e+00 total_fnorm:4.9688e+00 total_l1_linf:4.6080e+03 total_spectral:2.4844e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2878e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3611e-02 L7_l1linf:1.3672e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.4648e-02 L12_l1linf:1.3916e-02 L1_spectral:7.8905e-04 L2_spectral:7.8462e-04 L3_spectral:7.8634e-04 L4_spectral:7.8466e-04 L5_spectral:7.8624e-04 L6_spectral:7.8808e-04 L7_spectral:7.8954e-04 L8_spectral:7.7584e-04 L9_spectral:7.8747e-04 L10_spectral:7.8004e-04 L11_spectral:7.7763e-04 L12_spectral:7.4519e-04 train_time:181168ms step_avg:45.29ms +[2025-09-11 12:03:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.6253 total_sharp:1.5433e-02 L1_sharp:2.3070e-01 L2_sharp:3.1809e-01 L3_sharp:4.2061e-01 L4_sharp:5.9053e-01 L5_sharp:9.0366e-01 L6_sharp:1.0907e+00 L7_sharp:1.5128e+00 L8_sharp:2.2292e+00 L9_sharp:3.5524e+00 L10_sharp:4.4047e+00 L11_sharp:2.9772e+00 L12_sharp:2.3119e+00 total_fnorm:4.9688e+00 total_l1_linf:4.6080e+03 total_spectral:2.4844e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2878e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3611e-02 L7_l1linf:1.3672e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.4648e-02 L12_l1linf:1.3916e-02 L1_spectral:7.8905e-04 L2_spectral:7.8462e-04 L3_spectral:7.8634e-04 L4_spectral:7.8466e-04 L5_spectral:7.8624e-04 L6_spectral:7.8808e-04 L7_spectral:7.8954e-04 L8_spectral:7.7584e-04 L9_spectral:7.8747e-04 L10_spectral:7.8004e-04 L11_spectral:7.7763e-04 L12_spectral:7.4519e-04 train_time:181168ms step_avg:45.29ms +[2025-09-11 12:03:18] [Rank 0] step:4001/10000 train_time:183013ms step_avg:45.74ms +[2025-09-11 12:03:18] [Rank 0] step:4001/10000 train_time:183013ms step_avg:45.74ms +[2025-09-11 12:03:19] [Rank 0] step:4021/10000 train_time:183724ms step_avg:45.69ms +[2025-09-11 12:03:19] [Rank 0] step:4021/10000 train_time:183724ms step_avg:45.69ms +[2025-09-11 12:03:20] [Rank 0] step:4041/10000 train_time:184403ms step_avg:45.63ms +[2025-09-11 12:03:20] [Rank 0] step:4041/10000 train_time:184403ms step_avg:45.63ms +[2025-09-11 12:03:21] [Rank 0] step:4061/10000 train_time:185080ms step_avg:45.58ms +[2025-09-11 12:03:21] [Rank 0] step:4061/10000 train_time:185080ms step_avg:45.58ms +[2025-09-11 12:03:21] [Rank 0] step:4081/10000 train_time:185760ms step_avg:45.52ms +[2025-09-11 12:03:21] [Rank 0] step:4081/10000 train_time:185760ms step_avg:45.52ms +[2025-09-11 12:03:22] [Rank 0] step:4101/10000 train_time:186438ms step_avg:45.46ms +[2025-09-11 12:03:22] [Rank 0] step:4101/10000 train_time:186438ms step_avg:45.46ms +[2025-09-11 12:03:23] [Rank 0] step:4121/10000 train_time:187116ms step_avg:45.41ms +[2025-09-11 12:03:23] [Rank 0] step:4121/10000 train_time:187116ms step_avg:45.41ms +[2025-09-11 12:03:23] [Rank 0] step:4141/10000 train_time:187795ms step_avg:45.35ms +[2025-09-11 12:03:23] [Rank 0] step:4141/10000 train_time:187795ms step_avg:45.35ms +[2025-09-11 12:03:24] [Rank 0] step:4161/10000 train_time:188475ms step_avg:45.30ms +[2025-09-11 12:03:24] [Rank 0] step:4161/10000 train_time:188475ms step_avg:45.30ms +[2025-09-11 12:03:25] [Rank 0] step:4181/10000 train_time:189154ms step_avg:45.24ms +[2025-09-11 12:03:25] [Rank 0] step:4181/10000 train_time:189154ms step_avg:45.24ms +[2025-09-11 12:03:25] [Rank 0] step:4201/10000 train_time:189833ms step_avg:45.19ms +[2025-09-11 12:03:25] [Rank 0] step:4201/10000 train_time:189833ms step_avg:45.19ms +[2025-09-11 12:03:26] [Rank 0] step:4221/10000 train_time:190512ms step_avg:45.13ms +[2025-09-11 12:03:26] [Rank 0] step:4221/10000 train_time:190512ms step_avg:45.13ms +[2025-09-11 12:03:27] [Rank 0] step:4241/10000 train_time:191191ms step_avg:45.08ms +[2025-09-11 12:03:27] [Rank 0] step:4241/10000 train_time:191191ms step_avg:45.08ms +[2025-09-11 12:03:27] [Rank 0] step:4261/10000 train_time:191870ms step_avg:45.03ms +[2025-09-11 12:03:27] [Rank 0] step:4261/10000 train_time:191870ms step_avg:45.03ms +[2025-09-11 12:03:28] [Rank 0] step:4281/10000 train_time:192550ms step_avg:44.98ms +[2025-09-11 12:03:28] [Rank 0] step:4281/10000 train_time:192550ms step_avg:44.98ms +[2025-09-11 12:03:29] [Rank 0] step:4301/10000 train_time:193231ms step_avg:44.93ms +[2025-09-11 12:03:29] [Rank 0] step:4301/10000 train_time:193231ms step_avg:44.93ms +[2025-09-11 12:03:29] [Rank 0] step:4321/10000 train_time:193910ms step_avg:44.88ms +[2025-09-11 12:03:29] [Rank 0] step:4321/10000 train_time:193910ms step_avg:44.88ms +[2025-09-11 12:03:30] [Rank 0] step:4341/10000 train_time:194590ms step_avg:44.83ms +[2025-09-11 12:03:30] [Rank 0] step:4341/10000 train_time:194590ms step_avg:44.83ms +[2025-09-11 12:03:31] [Rank 0] step:4361/10000 train_time:195267ms step_avg:44.78ms +[2025-09-11 12:03:31] [Rank 0] step:4361/10000 train_time:195267ms step_avg:44.78ms +[2025-09-11 12:03:31] [Rank 0] step:4381/10000 train_time:195946ms step_avg:44.73ms +[2025-09-11 12:03:31] [Rank 0] step:4381/10000 train_time:195946ms step_avg:44.73ms +[2025-09-11 12:03:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:03:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:03:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:03:43] [Rank 0] PRINT: step:4400/10000 val_loss:5.5713 total_sharp:2.0824e-02 L1_sharp:2.7800e-01 L2_sharp:3.4871e-01 L3_sharp:4.3030e-01 L4_sharp:6.0556e-01 L5_sharp:8.3737e-01 L6_sharp:1.0255e+00 L7_sharp:1.5047e+00 L8_sharp:2.1150e+00 L9_sharp:3.7529e+00 L10_sharp:4.8098e+00 L11_sharp:4.4114e+00 L12_sharp:5.4933e+00 total_fnorm:4.5000e+00 total_l1_linf:4.0640e+03 total_spectral:2.2500e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2634e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2878e-02 L5_l1linf:1.3123e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.2695e-02 L1_spectral:7.7677e-04 L2_spectral:7.8237e-04 L3_spectral:7.8482e-04 L4_spectral:7.8189e-04 L5_spectral:7.7443e-04 L6_spectral:7.8728e-04 L7_spectral:7.8856e-04 L8_spectral:7.7448e-04 L9_spectral:7.8393e-04 L10_spectral:7.8000e-04 L11_spectral:7.7806e-04 L12_spectral:7.1878e-04 train_time:196606ms step_avg:44.68ms +[2025-09-11 12:03:43] [Rank 0] PRINT: step:4400/10000 val_loss:5.5713 total_sharp:2.0824e-02 L1_sharp:2.7800e-01 L2_sharp:3.4871e-01 L3_sharp:4.3030e-01 L4_sharp:6.0556e-01 L5_sharp:8.3737e-01 L6_sharp:1.0255e+00 L7_sharp:1.5047e+00 L8_sharp:2.1150e+00 L9_sharp:3.7529e+00 L10_sharp:4.8098e+00 L11_sharp:4.4114e+00 L12_sharp:5.4933e+00 total_fnorm:4.5000e+00 total_l1_linf:4.0640e+03 total_spectral:2.2500e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.3945e-02 L1_l1linf:1.2634e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2878e-02 L5_l1linf:1.3123e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.4282e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.2695e-02 L1_spectral:7.7677e-04 L2_spectral:7.8237e-04 L3_spectral:7.8482e-04 L4_spectral:7.8189e-04 L5_spectral:7.7443e-04 L6_spectral:7.8728e-04 L7_spectral:7.8856e-04 L8_spectral:7.7448e-04 L9_spectral:7.8393e-04 L10_spectral:7.8000e-04 L11_spectral:7.7806e-04 L12_spectral:7.1878e-04 train_time:196606ms step_avg:44.68ms +[2025-09-11 12:03:44] [Rank 0] step:4401/10000 train_time:198471ms step_avg:45.10ms +[2025-09-11 12:03:44] [Rank 0] step:4401/10000 train_time:198471ms step_avg:45.10ms +[2025-09-11 12:03:45] [Rank 0] step:4421/10000 train_time:199292ms step_avg:45.08ms +[2025-09-11 12:03:45] [Rank 0] step:4421/10000 train_time:199292ms step_avg:45.08ms +[2025-09-11 12:03:46] [Rank 0] step:4441/10000 train_time:199975ms step_avg:45.03ms +[2025-09-11 12:03:46] [Rank 0] step:4441/10000 train_time:199975ms step_avg:45.03ms +[2025-09-11 12:03:47] [Rank 0] step:4461/10000 train_time:200657ms step_avg:44.98ms +[2025-09-11 12:03:47] [Rank 0] step:4461/10000 train_time:200657ms step_avg:44.98ms +[2025-09-11 12:03:47] [Rank 0] step:4481/10000 train_time:201338ms step_avg:44.93ms +[2025-09-11 12:03:47] [Rank 0] step:4481/10000 train_time:201338ms step_avg:44.93ms +[2025-09-11 12:03:48] [Rank 0] step:4501/10000 train_time:202020ms step_avg:44.88ms +[2025-09-11 12:03:48] [Rank 0] step:4501/10000 train_time:202020ms step_avg:44.88ms +[2025-09-11 12:03:49] [Rank 0] step:4521/10000 train_time:202702ms step_avg:44.84ms +[2025-09-11 12:03:49] [Rank 0] step:4521/10000 train_time:202702ms step_avg:44.84ms +[2025-09-11 12:03:49] [Rank 0] step:4541/10000 train_time:203383ms step_avg:44.79ms +[2025-09-11 12:03:49] [Rank 0] step:4541/10000 train_time:203383ms step_avg:44.79ms +[2025-09-11 12:03:50] [Rank 0] step:4561/10000 train_time:204065ms step_avg:44.74ms +[2025-09-11 12:03:50] [Rank 0] step:4561/10000 train_time:204065ms step_avg:44.74ms +[2025-09-11 12:03:51] [Rank 0] step:4581/10000 train_time:204746ms step_avg:44.69ms +[2025-09-11 12:03:51] [Rank 0] step:4581/10000 train_time:204746ms step_avg:44.69ms +[2025-09-11 12:03:51] [Rank 0] step:4601/10000 train_time:205426ms step_avg:44.65ms +[2025-09-11 12:03:51] [Rank 0] step:4601/10000 train_time:205426ms step_avg:44.65ms +[2025-09-11 12:03:52] [Rank 0] step:4621/10000 train_time:206108ms step_avg:44.60ms +[2025-09-11 12:03:52] [Rank 0] step:4621/10000 train_time:206108ms step_avg:44.60ms +[2025-09-11 12:03:53] [Rank 0] step:4641/10000 train_time:206788ms step_avg:44.56ms +[2025-09-11 12:03:53] [Rank 0] step:4641/10000 train_time:206788ms step_avg:44.56ms +[2025-09-11 12:03:53] [Rank 0] step:4661/10000 train_time:207471ms step_avg:44.51ms +[2025-09-11 12:03:53] [Rank 0] step:4661/10000 train_time:207471ms step_avg:44.51ms +[2025-09-11 12:03:54] [Rank 0] step:4681/10000 train_time:208151ms step_avg:44.47ms +[2025-09-11 12:03:54] [Rank 0] step:4681/10000 train_time:208151ms step_avg:44.47ms +[2025-09-11 12:03:55] [Rank 0] step:4701/10000 train_time:208832ms step_avg:44.42ms +[2025-09-11 12:03:55] [Rank 0] step:4701/10000 train_time:208832ms step_avg:44.42ms +[2025-09-11 12:03:55] [Rank 0] step:4721/10000 train_time:209513ms step_avg:44.38ms +[2025-09-11 12:03:55] [Rank 0] step:4721/10000 train_time:209513ms step_avg:44.38ms +[2025-09-11 12:03:56] [Rank 0] step:4741/10000 train_time:210193ms step_avg:44.34ms +[2025-09-11 12:03:56] [Rank 0] step:4741/10000 train_time:210193ms step_avg:44.34ms +[2025-09-11 12:03:57] [Rank 0] step:4761/10000 train_time:210875ms step_avg:44.29ms +[2025-09-11 12:03:57] [Rank 0] step:4761/10000 train_time:210875ms step_avg:44.29ms +[2025-09-11 12:03:58] [Rank 0] step:4781/10000 train_time:211555ms step_avg:44.25ms +[2025-09-11 12:03:58] [Rank 0] step:4781/10000 train_time:211555ms step_avg:44.25ms +[2025-09-11 12:03:58] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:03:58] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:04:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:04:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:04:09] [Rank 0] PRINT: step:4800/10000 val_loss:5.5268 total_sharp:1.6466e-02 L1_sharp:1.9393e-01 L2_sharp:2.5584e-01 L3_sharp:3.3142e-01 L4_sharp:4.7714e-01 L5_sharp:6.5041e-01 L6_sharp:9.0544e-01 L7_sharp:1.3299e+00 L8_sharp:1.9703e+00 L9_sharp:3.0911e+00 L10_sharp:4.2101e+00 L11_sharp:2.6781e+00 L12_sharp:2.5204e+00 total_fnorm:4.4688e+00 total_l1_linf:4.0800e+03 total_spectral:2.2344e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1292e-02 L2_l1linf:1.1658e-02 L3_l1linf:1.1963e-02 L4_l1linf:1.2207e-02 L5_l1linf:1.2207e-02 L6_l1linf:1.2939e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3916e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.3855e-02 L12_l1linf:1.3794e-02 L1_spectral:7.9014e-04 L2_spectral:7.9654e-04 L3_spectral:7.9335e-04 L4_spectral:7.8889e-04 L5_spectral:7.8330e-04 L6_spectral:7.9473e-04 L7_spectral:7.9359e-04 L8_spectral:7.8212e-04 L9_spectral:7.9280e-04 L10_spectral:7.9106e-04 L11_spectral:7.8992e-04 L12_spectral:7.5358e-04 train_time:212214ms step_avg:44.21ms +[2025-09-11 12:04:09] [Rank 0] PRINT: step:4800/10000 val_loss:5.5268 total_sharp:1.6466e-02 L1_sharp:1.9393e-01 L2_sharp:2.5584e-01 L3_sharp:3.3142e-01 L4_sharp:4.7714e-01 L5_sharp:6.5041e-01 L6_sharp:9.0544e-01 L7_sharp:1.3299e+00 L8_sharp:1.9703e+00 L9_sharp:3.0911e+00 L10_sharp:4.2101e+00 L11_sharp:2.6781e+00 L12_sharp:2.5204e+00 total_fnorm:4.4688e+00 total_l1_linf:4.0800e+03 total_spectral:2.2344e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1292e-02 L2_l1linf:1.1658e-02 L3_l1linf:1.1963e-02 L4_l1linf:1.2207e-02 L5_l1linf:1.2207e-02 L6_l1linf:1.2939e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3916e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.3855e-02 L12_l1linf:1.3794e-02 L1_spectral:7.9014e-04 L2_spectral:7.9654e-04 L3_spectral:7.9335e-04 L4_spectral:7.8889e-04 L5_spectral:7.8330e-04 L6_spectral:7.9473e-04 L7_spectral:7.9359e-04 L8_spectral:7.8212e-04 L9_spectral:7.9280e-04 L10_spectral:7.9106e-04 L11_spectral:7.8992e-04 L12_spectral:7.5358e-04 train_time:212214ms step_avg:44.21ms +[2025-09-11 12:04:11] [Rank 0] step:4801/10000 train_time:214155ms step_avg:44.61ms +[2025-09-11 12:04:11] [Rank 0] step:4801/10000 train_time:214155ms step_avg:44.61ms +[2025-09-11 12:04:11] [Rank 0] step:4821/10000 train_time:215104ms step_avg:44.62ms +[2025-09-11 12:04:11] [Rank 0] step:4821/10000 train_time:215104ms step_avg:44.62ms +[2025-09-11 12:04:12] [Rank 0] step:4841/10000 train_time:215787ms step_avg:44.57ms +[2025-09-11 12:04:12] [Rank 0] step:4841/10000 train_time:215787ms step_avg:44.57ms +[2025-09-11 12:04:13] [Rank 0] step:4861/10000 train_time:216469ms step_avg:44.53ms +[2025-09-11 12:04:13] [Rank 0] step:4861/10000 train_time:216469ms step_avg:44.53ms +[2025-09-11 12:04:14] [Rank 0] step:4881/10000 train_time:217150ms step_avg:44.49ms +[2025-09-11 12:04:14] [Rank 0] step:4881/10000 train_time:217150ms step_avg:44.49ms +[2025-09-11 12:04:14] [Rank 0] step:4901/10000 train_time:217833ms step_avg:44.45ms +[2025-09-11 12:04:14] [Rank 0] step:4901/10000 train_time:217833ms step_avg:44.45ms +[2025-09-11 12:04:15] [Rank 0] step:4921/10000 train_time:218515ms step_avg:44.40ms +[2025-09-11 12:04:15] [Rank 0] step:4921/10000 train_time:218515ms step_avg:44.40ms +[2025-09-11 12:04:16] [Rank 0] step:4941/10000 train_time:219198ms step_avg:44.36ms +[2025-09-11 12:04:16] [Rank 0] step:4941/10000 train_time:219198ms step_avg:44.36ms +[2025-09-11 12:04:16] [Rank 0] step:4961/10000 train_time:219879ms step_avg:44.32ms +[2025-09-11 12:04:16] [Rank 0] step:4961/10000 train_time:219879ms step_avg:44.32ms +[2025-09-11 12:04:17] [Rank 0] step:4981/10000 train_time:220562ms step_avg:44.28ms +[2025-09-11 12:04:17] [Rank 0] step:4981/10000 train_time:220562ms step_avg:44.28ms +[2025-09-11 12:04:18] [Rank 0] step:5001/10000 train_time:221246ms step_avg:44.24ms +[2025-09-11 12:04:18] [Rank 0] step:5001/10000 train_time:221246ms step_avg:44.24ms +[2025-09-11 12:04:18] [Rank 0] step:5021/10000 train_time:221927ms step_avg:44.20ms +[2025-09-11 12:04:18] [Rank 0] step:5021/10000 train_time:221927ms step_avg:44.20ms +[2025-09-11 12:04:19] [Rank 0] step:5041/10000 train_time:222608ms step_avg:44.16ms +[2025-09-11 12:04:19] [Rank 0] step:5041/10000 train_time:222608ms step_avg:44.16ms +[2025-09-11 12:04:20] [Rank 0] step:5061/10000 train_time:223289ms step_avg:44.12ms +[2025-09-11 12:04:20] [Rank 0] step:5061/10000 train_time:223289ms step_avg:44.12ms +[2025-09-11 12:04:20] [Rank 0] step:5081/10000 train_time:223971ms step_avg:44.08ms +[2025-09-11 12:04:20] [Rank 0] step:5081/10000 train_time:223971ms step_avg:44.08ms +[2025-09-11 12:04:21] [Rank 0] step:5101/10000 train_time:224653ms step_avg:44.04ms +[2025-09-11 12:04:21] [Rank 0] step:5101/10000 train_time:224653ms step_avg:44.04ms +[2025-09-11 12:04:22] [Rank 0] step:5121/10000 train_time:225334ms step_avg:44.00ms +[2025-09-11 12:04:22] [Rank 0] step:5121/10000 train_time:225334ms step_avg:44.00ms +[2025-09-11 12:04:22] [Rank 0] step:5141/10000 train_time:226016ms step_avg:43.96ms +[2025-09-11 12:04:22] [Rank 0] step:5141/10000 train_time:226016ms step_avg:43.96ms +[2025-09-11 12:04:23] [Rank 0] step:5161/10000 train_time:226699ms step_avg:43.93ms +[2025-09-11 12:04:23] [Rank 0] step:5161/10000 train_time:226699ms step_avg:43.93ms +[2025-09-11 12:04:24] [Rank 0] step:5181/10000 train_time:227387ms step_avg:43.89ms +[2025-09-11 12:04:24] [Rank 0] step:5181/10000 train_time:227387ms step_avg:43.89ms +[2025-09-11 12:04:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:04:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:04:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:04:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:04:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:04:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:04:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:04:35] [Rank 0] PRINT: step:5200/10000 val_loss:5.4844 total_sharp:2.1825e-02 L1_sharp:2.0093e-01 L2_sharp:2.8057e-01 L3_sharp:3.1796e-01 L4_sharp:4.2628e-01 L5_sharp:6.1509e-01 L6_sharp:8.5917e-01 L7_sharp:1.2341e+00 L8_sharp:1.9449e+00 L9_sharp:2.9627e+00 L10_sharp:4.4864e+00 L11_sharp:4.7399e+00 L12_sharp:8.8318e+00 total_fnorm:4.1875e+00 total_l1_linf:3.6320e+03 total_spectral:2.0938e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6387e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1292e-02 L2_l1linf:1.1292e-02 L3_l1linf:1.1780e-02 L4_l1linf:1.2024e-02 L5_l1linf:1.2146e-02 L6_l1linf:1.2756e-02 L7_l1linf:1.2939e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.3611e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.3123e-02 L1_spectral:7.9548e-04 L2_spectral:7.9524e-04 L3_spectral:7.9645e-04 L4_spectral:7.8838e-04 L5_spectral:7.8842e-04 L6_spectral:7.9340e-04 L7_spectral:7.9215e-04 L8_spectral:7.8791e-04 L9_spectral:7.9300e-04 L10_spectral:7.8351e-04 L11_spectral:7.8694e-04 L12_spectral:7.6297e-04 train_time:228055ms step_avg:43.86ms +[2025-09-11 12:04:35] [Rank 0] PRINT: step:5200/10000 val_loss:5.4844 total_sharp:2.1825e-02 L1_sharp:2.0093e-01 L2_sharp:2.8057e-01 L3_sharp:3.1796e-01 L4_sharp:4.2628e-01 L5_sharp:6.1509e-01 L6_sharp:8.5917e-01 L7_sharp:1.2341e+00 L8_sharp:1.9449e+00 L9_sharp:2.9627e+00 L10_sharp:4.4864e+00 L11_sharp:4.7399e+00 L12_sharp:8.8318e+00 total_fnorm:4.1875e+00 total_l1_linf:3.6320e+03 total_spectral:2.0938e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6387e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1292e-02 L2_l1linf:1.1292e-02 L3_l1linf:1.1780e-02 L4_l1linf:1.2024e-02 L5_l1linf:1.2146e-02 L6_l1linf:1.2756e-02 L7_l1linf:1.2939e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.3611e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.3123e-02 L1_spectral:7.9548e-04 L2_spectral:7.9524e-04 L3_spectral:7.9645e-04 L4_spectral:7.8838e-04 L5_spectral:7.8842e-04 L6_spectral:7.9340e-04 L7_spectral:7.9215e-04 L8_spectral:7.8791e-04 L9_spectral:7.9300e-04 L10_spectral:7.8351e-04 L11_spectral:7.8694e-04 L12_spectral:7.6297e-04 train_time:228055ms step_avg:43.86ms +[2025-09-11 12:04:36] [Rank 0] step:5201/10000 train_time:229821ms step_avg:44.19ms +[2025-09-11 12:04:36] [Rank 0] step:5201/10000 train_time:229821ms step_avg:44.19ms +[2025-09-11 12:04:37] [Rank 0] step:5221/10000 train_time:230556ms step_avg:44.16ms +[2025-09-11 12:04:37] [Rank 0] step:5221/10000 train_time:230556ms step_avg:44.16ms +[2025-09-11 12:04:38] [Rank 0] step:5241/10000 train_time:231247ms step_avg:44.12ms +[2025-09-11 12:04:38] [Rank 0] step:5241/10000 train_time:231247ms step_avg:44.12ms +[2025-09-11 12:04:39] [Rank 0] step:5261/10000 train_time:231938ms step_avg:44.09ms +[2025-09-11 12:04:39] [Rank 0] step:5261/10000 train_time:231938ms step_avg:44.09ms +[2025-09-11 12:04:39] [Rank 0] step:5281/10000 train_time:232628ms step_avg:44.05ms +[2025-09-11 12:04:39] [Rank 0] step:5281/10000 train_time:232628ms step_avg:44.05ms +[2025-09-11 12:04:40] [Rank 0] step:5301/10000 train_time:233320ms step_avg:44.01ms +[2025-09-11 12:04:40] [Rank 0] step:5301/10000 train_time:233320ms step_avg:44.01ms +[2025-09-11 12:04:41] [Rank 0] step:5321/10000 train_time:234012ms step_avg:43.98ms +[2025-09-11 12:04:41] [Rank 0] step:5321/10000 train_time:234012ms step_avg:43.98ms +[2025-09-11 12:04:41] [Rank 0] step:5341/10000 train_time:234702ms step_avg:43.94ms +[2025-09-11 12:04:41] [Rank 0] step:5341/10000 train_time:234702ms step_avg:43.94ms +[2025-09-11 12:04:42] [Rank 0] step:5361/10000 train_time:235394ms step_avg:43.91ms +[2025-09-11 12:04:42] [Rank 0] step:5361/10000 train_time:235394ms step_avg:43.91ms +[2025-09-11 12:04:43] [Rank 0] step:5381/10000 train_time:236086ms step_avg:43.87ms +[2025-09-11 12:04:43] [Rank 0] step:5381/10000 train_time:236086ms step_avg:43.87ms +[2025-09-11 12:04:43] [Rank 0] step:5401/10000 train_time:236775ms step_avg:43.84ms +[2025-09-11 12:04:43] [Rank 0] step:5401/10000 train_time:236775ms step_avg:43.84ms +[2025-09-11 12:04:44] [Rank 0] step:5421/10000 train_time:237467ms step_avg:43.81ms +[2025-09-11 12:04:44] [Rank 0] step:5421/10000 train_time:237467ms step_avg:43.81ms +[2025-09-11 12:04:45] [Rank 0] step:5441/10000 train_time:238159ms step_avg:43.77ms +[2025-09-11 12:04:45] [Rank 0] step:5441/10000 train_time:238159ms step_avg:43.77ms +[2025-09-11 12:04:46] [Rank 0] step:5461/10000 train_time:239091ms step_avg:43.78ms +[2025-09-11 12:04:46] [Rank 0] step:5461/10000 train_time:239091ms step_avg:43.78ms +[2025-09-11 12:04:46] [Rank 0] step:5481/10000 train_time:239784ms step_avg:43.75ms +[2025-09-11 12:04:46] [Rank 0] step:5481/10000 train_time:239784ms step_avg:43.75ms +[2025-09-11 12:04:47] [Rank 0] step:5501/10000 train_time:240474ms step_avg:43.71ms +[2025-09-11 12:04:47] [Rank 0] step:5501/10000 train_time:240474ms step_avg:43.71ms +[2025-09-11 12:04:48] [Rank 0] step:5521/10000 train_time:241418ms step_avg:43.73ms +[2025-09-11 12:04:48] [Rank 0] step:5521/10000 train_time:241418ms step_avg:43.73ms +[2025-09-11 12:04:49] [Rank 0] step:5541/10000 train_time:242110ms step_avg:43.69ms +[2025-09-11 12:04:49] [Rank 0] step:5541/10000 train_time:242110ms step_avg:43.69ms +[2025-09-11 12:04:49] [Rank 0] step:5561/10000 train_time:242803ms step_avg:43.66ms +[2025-09-11 12:04:49] [Rank 0] step:5561/10000 train_time:242803ms step_avg:43.66ms +[2025-09-11 12:04:50] [Rank 0] step:5581/10000 train_time:243496ms step_avg:43.63ms +[2025-09-11 12:04:50] [Rank 0] step:5581/10000 train_time:243496ms step_avg:43.63ms +[2025-09-11 12:04:51] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:04:51] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:04:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:04:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:01] [Rank 0] PRINT: step:5600/10000 val_loss:5.4429 total_sharp:1.4357e-02 L1_sharp:1.1926e-01 L2_sharp:1.5448e-01 L3_sharp:2.0763e-01 L4_sharp:3.2596e-01 L5_sharp:5.2825e-01 L6_sharp:7.1795e-01 L7_sharp:9.9481e-01 L8_sharp:1.6597e+00 L9_sharp:2.8045e+00 L10_sharp:4.1633e+00 L11_sharp:3.1593e+00 L12_sharp:2.2607e+00 total_fnorm:4.2188e+00 total_l1_linf:3.7120e+03 total_spectral:2.1250e+00 L1_fnorm:4.6387e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6387e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.0803e-02 L2_l1linf:1.1169e-02 L3_l1linf:1.1292e-02 L4_l1linf:1.1780e-02 L5_l1linf:1.1963e-02 L6_l1linf:1.2207e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.3000e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3367e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.3489e-02 L1_spectral:7.9247e-04 L2_spectral:7.9484e-04 L3_spectral:7.9623e-04 L4_spectral:7.9753e-04 L5_spectral:7.8960e-04 L6_spectral:7.9746e-04 L7_spectral:7.9430e-04 L8_spectral:7.9110e-04 L9_spectral:7.9352e-04 L10_spectral:7.8340e-04 L11_spectral:7.8880e-04 L12_spectral:7.6335e-04 train_time:244168ms step_avg:43.60ms +[2025-09-11 12:05:01] [Rank 0] PRINT: step:5600/10000 val_loss:5.4429 total_sharp:1.4357e-02 L1_sharp:1.1926e-01 L2_sharp:1.5448e-01 L3_sharp:2.0763e-01 L4_sharp:3.2596e-01 L5_sharp:5.2825e-01 L6_sharp:7.1795e-01 L7_sharp:9.9481e-01 L8_sharp:1.6597e+00 L9_sharp:2.8045e+00 L10_sharp:4.1633e+00 L11_sharp:3.1593e+00 L12_sharp:2.2607e+00 total_fnorm:4.2188e+00 total_l1_linf:3.7120e+03 total_spectral:2.1250e+00 L1_fnorm:4.6387e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6387e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.0803e-02 L2_l1linf:1.1169e-02 L3_l1linf:1.1292e-02 L4_l1linf:1.1780e-02 L5_l1linf:1.1963e-02 L6_l1linf:1.2207e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.3000e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3367e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.3489e-02 L1_spectral:7.9247e-04 L2_spectral:7.9484e-04 L3_spectral:7.9623e-04 L4_spectral:7.9753e-04 L5_spectral:7.8960e-04 L6_spectral:7.9746e-04 L7_spectral:7.9430e-04 L8_spectral:7.9110e-04 L9_spectral:7.9352e-04 L10_spectral:7.8340e-04 L11_spectral:7.8880e-04 L12_spectral:7.6335e-04 train_time:244168ms step_avg:43.60ms +[2025-09-11 12:05:03] [Rank 0] step:5601/10000 train_time:245995ms step_avg:43.92ms +[2025-09-11 12:05:03] [Rank 0] step:5601/10000 train_time:245995ms step_avg:43.92ms +[2025-09-11 12:05:04] [Rank 0] step:5621/10000 train_time:246716ms step_avg:43.89ms +[2025-09-11 12:05:04] [Rank 0] step:5621/10000 train_time:246716ms step_avg:43.89ms +[2025-09-11 12:05:05] [Rank 0] step:5641/10000 train_time:247460ms step_avg:43.87ms +[2025-09-11 12:05:05] [Rank 0] step:5641/10000 train_time:247460ms step_avg:43.87ms +[2025-09-11 12:05:05] [Rank 0] step:5661/10000 train_time:248150ms step_avg:43.84ms +[2025-09-11 12:05:05] [Rank 0] step:5661/10000 train_time:248150ms step_avg:43.84ms +[2025-09-11 12:05:06] [Rank 0] step:5681/10000 train_time:248841ms step_avg:43.80ms +[2025-09-11 12:05:06] [Rank 0] step:5681/10000 train_time:248841ms step_avg:43.80ms +[2025-09-11 12:05:07] [Rank 0] step:5701/10000 train_time:249534ms step_avg:43.77ms +[2025-09-11 12:05:07] [Rank 0] step:5701/10000 train_time:249534ms step_avg:43.77ms +[2025-09-11 12:05:08] [Rank 0] step:5721/10000 train_time:250222ms step_avg:43.74ms +[2025-09-11 12:05:08] [Rank 0] step:5721/10000 train_time:250222ms step_avg:43.74ms +[2025-09-11 12:05:08] [Rank 0] step:5741/10000 train_time:250914ms step_avg:43.71ms +[2025-09-11 12:05:08] [Rank 0] step:5741/10000 train_time:250914ms step_avg:43.71ms +[2025-09-11 12:05:09] [Rank 0] step:5761/10000 train_time:251605ms step_avg:43.67ms +[2025-09-11 12:05:09] [Rank 0] step:5761/10000 train_time:251605ms step_avg:43.67ms +[2025-09-11 12:05:10] [Rank 0] step:5781/10000 train_time:252297ms step_avg:43.64ms +[2025-09-11 12:05:10] [Rank 0] step:5781/10000 train_time:252297ms step_avg:43.64ms +[2025-09-11 12:05:10] [Rank 0] step:5801/10000 train_time:252989ms step_avg:43.61ms +[2025-09-11 12:05:10] [Rank 0] step:5801/10000 train_time:252989ms step_avg:43.61ms +[2025-09-11 12:05:11] [Rank 0] step:5821/10000 train_time:253679ms step_avg:43.58ms +[2025-09-11 12:05:11] [Rank 0] step:5821/10000 train_time:253679ms step_avg:43.58ms +[2025-09-11 12:05:12] [Rank 0] step:5841/10000 train_time:254372ms step_avg:43.55ms +[2025-09-11 12:05:12] [Rank 0] step:5841/10000 train_time:254372ms step_avg:43.55ms +[2025-09-11 12:05:12] [Rank 0] step:5861/10000 train_time:255062ms step_avg:43.52ms +[2025-09-11 12:05:12] [Rank 0] step:5861/10000 train_time:255062ms step_avg:43.52ms +[2025-09-11 12:05:13] [Rank 0] step:5881/10000 train_time:255753ms step_avg:43.49ms +[2025-09-11 12:05:13] [Rank 0] step:5881/10000 train_time:255753ms step_avg:43.49ms +[2025-09-11 12:05:14] [Rank 0] step:5901/10000 train_time:256442ms step_avg:43.46ms +[2025-09-11 12:05:14] [Rank 0] step:5901/10000 train_time:256442ms step_avg:43.46ms +[2025-09-11 12:05:14] [Rank 0] step:5921/10000 train_time:257135ms step_avg:43.43ms +[2025-09-11 12:05:14] [Rank 0] step:5921/10000 train_time:257135ms step_avg:43.43ms +[2025-09-11 12:05:15] [Rank 0] step:5941/10000 train_time:257827ms step_avg:43.40ms +[2025-09-11 12:05:15] [Rank 0] step:5941/10000 train_time:257827ms step_avg:43.40ms +[2025-09-11 12:05:16] [Rank 0] step:5961/10000 train_time:258519ms step_avg:43.37ms +[2025-09-11 12:05:16] [Rank 0] step:5961/10000 train_time:258519ms step_avg:43.37ms +[2025-09-11 12:05:17] [Rank 0] step:5981/10000 train_time:259211ms step_avg:43.34ms +[2025-09-11 12:05:17] [Rank 0] step:5981/10000 train_time:259211ms step_avg:43.34ms +[2025-09-11 12:05:17] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:05:17] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:28] [Rank 0] PRINT: step:6000/10000 val_loss:5.3929 total_sharp:1.3794e-02 L1_sharp:1.1967e-01 L2_sharp:1.7331e-01 L3_sharp:2.2423e-01 L4_sharp:3.2410e-01 L5_sharp:5.0152e-01 L6_sharp:6.8113e-01 L7_sharp:9.4190e-01 L8_sharp:1.3617e+00 L9_sharp:2.3367e+00 L10_sharp:3.7274e+00 L11_sharp:2.9611e+00 L12_sharp:2.6292e+00 total_fnorm:4.1250e+00 total_l1_linf:3.5200e+03 total_spectral:2.0625e+00 L1_fnorm:4.6143e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6631e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6143e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.0620e-02 L2_l1linf:1.0803e-02 L3_l1linf:1.1108e-02 L4_l1linf:1.1292e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.2024e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.2756e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3184e-02 L12_l1linf:1.3672e-02 L1_spectral:7.9466e-04 L2_spectral:7.9683e-04 L3_spectral:7.9739e-04 L4_spectral:7.9361e-04 L5_spectral:7.9107e-04 L6_spectral:7.9602e-04 L7_spectral:7.9373e-04 L8_spectral:7.9126e-04 L9_spectral:7.9365e-04 L10_spectral:7.8674e-04 L11_spectral:7.9829e-04 L12_spectral:7.7577e-04 train_time:259886ms step_avg:43.31ms +[2025-09-11 12:05:28] [Rank 0] PRINT: step:6000/10000 val_loss:5.3929 total_sharp:1.3794e-02 L1_sharp:1.1967e-01 L2_sharp:1.7331e-01 L3_sharp:2.2423e-01 L4_sharp:3.2410e-01 L5_sharp:5.0152e-01 L6_sharp:6.8113e-01 L7_sharp:9.4190e-01 L8_sharp:1.3617e+00 L9_sharp:2.3367e+00 L10_sharp:3.7274e+00 L11_sharp:2.9611e+00 L12_sharp:2.6292e+00 total_fnorm:4.1250e+00 total_l1_linf:3.5200e+03 total_spectral:2.0625e+00 L1_fnorm:4.6143e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6631e-02 L4_fnorm:4.6631e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6143e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.0620e-02 L2_l1linf:1.0803e-02 L3_l1linf:1.1108e-02 L4_l1linf:1.1292e-02 L5_l1linf:1.1719e-02 L6_l1linf:1.2024e-02 L7_l1linf:1.2573e-02 L8_l1linf:1.2756e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.3184e-02 L11_l1linf:1.3184e-02 L12_l1linf:1.3672e-02 L1_spectral:7.9466e-04 L2_spectral:7.9683e-04 L3_spectral:7.9739e-04 L4_spectral:7.9361e-04 L5_spectral:7.9107e-04 L6_spectral:7.9602e-04 L7_spectral:7.9373e-04 L8_spectral:7.9126e-04 L9_spectral:7.9365e-04 L10_spectral:7.8674e-04 L11_spectral:7.9829e-04 L12_spectral:7.7577e-04 train_time:259886ms step_avg:43.31ms +[2025-09-11 12:05:30] [Rank 0] step:6001/10000 train_time:261705ms step_avg:43.61ms +[2025-09-11 12:05:30] [Rank 0] step:6001/10000 train_time:261705ms step_avg:43.61ms +[2025-09-11 12:05:30] [Rank 0] step:6021/10000 train_time:262419ms step_avg:43.58ms +[2025-09-11 12:05:30] [Rank 0] step:6021/10000 train_time:262419ms step_avg:43.58ms +[2025-09-11 12:05:31] [Rank 0] step:6041/10000 train_time:263114ms step_avg:43.55ms +[2025-09-11 12:05:31] [Rank 0] step:6041/10000 train_time:263114ms step_avg:43.55ms +[2025-09-11 12:05:32] [Rank 0] step:6061/10000 train_time:263806ms step_avg:43.53ms +[2025-09-11 12:05:32] [Rank 0] step:6061/10000 train_time:263806ms step_avg:43.53ms +[2025-09-11 12:05:32] [Rank 0] step:6081/10000 train_time:264499ms step_avg:43.50ms +[2025-09-11 12:05:32] [Rank 0] step:6081/10000 train_time:264499ms step_avg:43.50ms +[2025-09-11 12:05:33] [Rank 0] step:6101/10000 train_time:265190ms step_avg:43.47ms +[2025-09-11 12:05:33] [Rank 0] step:6101/10000 train_time:265190ms step_avg:43.47ms +[2025-09-11 12:05:34] [Rank 0] step:6121/10000 train_time:265884ms step_avg:43.44ms +[2025-09-11 12:05:34] [Rank 0] step:6121/10000 train_time:265884ms step_avg:43.44ms +[2025-09-11 12:05:35] [Rank 0] step:6141/10000 train_time:266577ms step_avg:43.41ms +[2025-09-11 12:05:35] [Rank 0] step:6141/10000 train_time:266577ms step_avg:43.41ms +[2025-09-11 12:05:35] [Rank 0] step:6161/10000 train_time:267269ms step_avg:43.38ms +[2025-09-11 12:05:35] [Rank 0] step:6161/10000 train_time:267269ms step_avg:43.38ms +[2025-09-11 12:05:36] [Rank 0] step:6181/10000 train_time:267960ms step_avg:43.35ms +[2025-09-11 12:05:36] [Rank 0] step:6181/10000 train_time:267960ms step_avg:43.35ms +[2025-09-11 12:05:37] [Rank 0] step:6201/10000 train_time:268653ms step_avg:43.32ms +[2025-09-11 12:05:37] [Rank 0] step:6201/10000 train_time:268653ms step_avg:43.32ms +[2025-09-11 12:05:37] [Rank 0] step:6221/10000 train_time:269347ms step_avg:43.30ms +[2025-09-11 12:05:37] [Rank 0] step:6221/10000 train_time:269347ms step_avg:43.30ms +[2025-09-11 12:05:38] [Rank 0] step:6241/10000 train_time:270040ms step_avg:43.27ms +[2025-09-11 12:05:38] [Rank 0] step:6241/10000 train_time:270040ms step_avg:43.27ms +[2025-09-11 12:05:39] [Rank 0] step:6261/10000 train_time:270729ms step_avg:43.24ms +[2025-09-11 12:05:39] [Rank 0] step:6261/10000 train_time:270729ms step_avg:43.24ms +[2025-09-11 12:05:39] [Rank 0] step:6281/10000 train_time:271423ms step_avg:43.21ms +[2025-09-11 12:05:39] [Rank 0] step:6281/10000 train_time:271423ms step_avg:43.21ms +[2025-09-11 12:05:40] [Rank 0] step:6301/10000 train_time:272113ms step_avg:43.19ms +[2025-09-11 12:05:40] [Rank 0] step:6301/10000 train_time:272113ms step_avg:43.19ms +[2025-09-11 12:05:41] [Rank 0] step:6321/10000 train_time:272808ms step_avg:43.16ms +[2025-09-11 12:05:41] [Rank 0] step:6321/10000 train_time:272808ms step_avg:43.16ms +[2025-09-11 12:05:41] [Rank 0] step:6341/10000 train_time:273501ms step_avg:43.13ms +[2025-09-11 12:05:41] [Rank 0] step:6341/10000 train_time:273501ms step_avg:43.13ms +[2025-09-11 12:05:42] [Rank 0] step:6361/10000 train_time:274193ms step_avg:43.11ms +[2025-09-11 12:05:42] [Rank 0] step:6361/10000 train_time:274193ms step_avg:43.11ms +[2025-09-11 12:05:43] [Rank 0] step:6381/10000 train_time:274885ms step_avg:43.08ms +[2025-09-11 12:05:43] [Rank 0] step:6381/10000 train_time:274885ms step_avg:43.08ms +[2025-09-11 12:05:44] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:05:44] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:05:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:05:54] [Rank 0] PRINT: step:6400/10000 val_loss:5.3567 total_sharp:1.4681e-02 L1_sharp:1.7289e-01 L2_sharp:2.0689e-01 L3_sharp:2.6019e-01 L4_sharp:3.3543e-01 L5_sharp:4.6567e-01 L6_sharp:6.5211e-01 L7_sharp:1.0512e+00 L8_sharp:1.5356e+00 L9_sharp:2.8966e+00 L10_sharp:3.9333e+00 L11_sharp:2.6305e+00 L12_sharp:2.3517e+00 total_fnorm:3.6406e+00 total_l1_linf:2.9440e+03 total_spectral:1.8203e+00 L1_fnorm:4.0527e-02 L2_fnorm:4.0527e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.0771e-02 L5_fnorm:4.0527e-02 L6_fnorm:4.1016e-02 L7_fnorm:4.1016e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0283e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0283e-02 L1_l1linf:9.2773e-03 L2_l1linf:9.5215e-03 L3_l1linf:9.4604e-03 L4_l1linf:9.9487e-03 L5_l1linf:1.0010e-02 L6_l1linf:1.0559e-02 L7_l1linf:1.0803e-02 L8_l1linf:1.1047e-02 L9_l1linf:1.1292e-02 L10_l1linf:1.1414e-02 L11_l1linf:1.1475e-02 L12_l1linf:1.1230e-02 L1_spectral:7.2049e-04 L2_spectral:7.2574e-04 L3_spectral:7.1906e-04 L4_spectral:7.2713e-04 L5_spectral:7.1503e-04 L6_spectral:7.1846e-04 L7_spectral:7.1419e-04 L8_spectral:7.1277e-04 L9_spectral:7.1679e-04 L10_spectral:7.0818e-04 L11_spectral:7.1423e-04 L12_spectral:6.8595e-04 train_time:275557ms step_avg:43.06ms +[2025-09-11 12:05:54] [Rank 0] PRINT: step:6400/10000 val_loss:5.3567 total_sharp:1.4681e-02 L1_sharp:1.7289e-01 L2_sharp:2.0689e-01 L3_sharp:2.6019e-01 L4_sharp:3.3543e-01 L5_sharp:4.6567e-01 L6_sharp:6.5211e-01 L7_sharp:1.0512e+00 L8_sharp:1.5356e+00 L9_sharp:2.8966e+00 L10_sharp:3.9333e+00 L11_sharp:2.6305e+00 L12_sharp:2.3517e+00 total_fnorm:3.6406e+00 total_l1_linf:2.9440e+03 total_spectral:1.8203e+00 L1_fnorm:4.0527e-02 L2_fnorm:4.0527e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.0771e-02 L5_fnorm:4.0527e-02 L6_fnorm:4.1016e-02 L7_fnorm:4.1016e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.0283e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0283e-02 L1_l1linf:9.2773e-03 L2_l1linf:9.5215e-03 L3_l1linf:9.4604e-03 L4_l1linf:9.9487e-03 L5_l1linf:1.0010e-02 L6_l1linf:1.0559e-02 L7_l1linf:1.0803e-02 L8_l1linf:1.1047e-02 L9_l1linf:1.1292e-02 L10_l1linf:1.1414e-02 L11_l1linf:1.1475e-02 L12_l1linf:1.1230e-02 L1_spectral:7.2049e-04 L2_spectral:7.2574e-04 L3_spectral:7.1906e-04 L4_spectral:7.2713e-04 L5_spectral:7.1503e-04 L6_spectral:7.1846e-04 L7_spectral:7.1419e-04 L8_spectral:7.1277e-04 L9_spectral:7.1679e-04 L10_spectral:7.0818e-04 L11_spectral:7.1423e-04 L12_spectral:6.8595e-04 train_time:275557ms step_avg:43.06ms +[2025-09-11 12:05:56] [Rank 0] step:6401/10000 train_time:277362ms step_avg:43.33ms +[2025-09-11 12:05:56] [Rank 0] step:6401/10000 train_time:277362ms step_avg:43.33ms +[2025-09-11 12:05:57] [Rank 0] step:6421/10000 train_time:278087ms step_avg:43.31ms +[2025-09-11 12:05:57] [Rank 0] step:6421/10000 train_time:278087ms step_avg:43.31ms +[2025-09-11 12:05:57] [Rank 0] step:6441/10000 train_time:278780ms step_avg:43.28ms +[2025-09-11 12:05:57] [Rank 0] step:6441/10000 train_time:278780ms step_avg:43.28ms +[2025-09-11 12:05:58] [Rank 0] step:6461/10000 train_time:279473ms step_avg:43.26ms +[2025-09-11 12:05:58] [Rank 0] step:6461/10000 train_time:279473ms step_avg:43.26ms +[2025-09-11 12:05:59] [Rank 0] step:6481/10000 train_time:280168ms step_avg:43.23ms +[2025-09-11 12:05:59] [Rank 0] step:6481/10000 train_time:280168ms step_avg:43.23ms +[2025-09-11 12:05:59] [Rank 0] step:6501/10000 train_time:280863ms step_avg:43.20ms +[2025-09-11 12:05:59] [Rank 0] step:6501/10000 train_time:280863ms step_avg:43.20ms +[2025-09-11 12:06:00] [Rank 0] step:6521/10000 train_time:281557ms step_avg:43.18ms +[2025-09-11 12:06:00] [Rank 0] step:6521/10000 train_time:281557ms step_avg:43.18ms +[2025-09-11 12:06:01] [Rank 0] step:6541/10000 train_time:282249ms step_avg:43.15ms +[2025-09-11 12:06:01] [Rank 0] step:6541/10000 train_time:282249ms step_avg:43.15ms +[2025-09-11 12:06:01] [Rank 0] step:6561/10000 train_time:282942ms step_avg:43.12ms +[2025-09-11 12:06:01] [Rank 0] step:6561/10000 train_time:282942ms step_avg:43.12ms +[2025-09-11 12:06:02] [Rank 0] step:6581/10000 train_time:283637ms step_avg:43.10ms +[2025-09-11 12:06:02] [Rank 0] step:6581/10000 train_time:283637ms step_avg:43.10ms +[2025-09-11 12:06:03] [Rank 0] step:6601/10000 train_time:284331ms step_avg:43.07ms +[2025-09-11 12:06:03] [Rank 0] step:6601/10000 train_time:284331ms step_avg:43.07ms +[2025-09-11 12:06:04] [Rank 0] step:6621/10000 train_time:285023ms step_avg:43.05ms +[2025-09-11 12:06:04] [Rank 0] step:6621/10000 train_time:285023ms step_avg:43.05ms +[2025-09-11 12:06:04] [Rank 0] step:6641/10000 train_time:285718ms step_avg:43.02ms +[2025-09-11 12:06:04] [Rank 0] step:6641/10000 train_time:285718ms step_avg:43.02ms +[2025-09-11 12:06:05] [Rank 0] step:6661/10000 train_time:286412ms step_avg:43.00ms +[2025-09-11 12:06:05] [Rank 0] step:6661/10000 train_time:286412ms step_avg:43.00ms +[2025-09-11 12:06:06] [Rank 0] step:6681/10000 train_time:287111ms step_avg:42.97ms +[2025-09-11 12:06:06] [Rank 0] step:6681/10000 train_time:287111ms step_avg:42.97ms +[2025-09-11 12:06:06] [Rank 0] step:6701/10000 train_time:287812ms step_avg:42.95ms +[2025-09-11 12:06:06] [Rank 0] step:6701/10000 train_time:287812ms step_avg:42.95ms +[2025-09-11 12:06:07] [Rank 0] step:6721/10000 train_time:288513ms step_avg:42.93ms +[2025-09-11 12:06:07] [Rank 0] step:6721/10000 train_time:288513ms step_avg:42.93ms +[2025-09-11 12:06:08] [Rank 0] step:6741/10000 train_time:289213ms step_avg:42.90ms +[2025-09-11 12:06:08] [Rank 0] step:6741/10000 train_time:289213ms step_avg:42.90ms +[2025-09-11 12:06:08] [Rank 0] step:6761/10000 train_time:289911ms step_avg:42.88ms +[2025-09-11 12:06:08] [Rank 0] step:6761/10000 train_time:289911ms step_avg:42.88ms +[2025-09-11 12:06:09] [Rank 0] step:6781/10000 train_time:290612ms step_avg:42.86ms +[2025-09-11 12:06:09] [Rank 0] step:6781/10000 train_time:290612ms step_avg:42.86ms +[2025-09-11 12:06:10] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:06:10] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:06:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:06:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:06:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:06:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:06:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:06:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:06:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:06:20] [Rank 0] PRINT: step:6800/10000 val_loss:5.3191 total_sharp:1.3094e-02 L1_sharp:1.0410e-01 L2_sharp:1.5040e-01 L3_sharp:2.0239e-01 L4_sharp:2.6287e-01 L5_sharp:4.0669e-01 L6_sharp:6.2259e-01 L7_sharp:8.8743e-01 L8_sharp:1.4013e+00 L9_sharp:2.6915e+00 L10_sharp:4.0569e+00 L11_sharp:3.2930e+00 L12_sharp:7.1272e+00 total_fnorm:3.3438e+00 total_l1_linf:2.5760e+03 total_spectral:1.6719e+00 L1_fnorm:3.3936e-02 L2_fnorm:3.4180e-02 L3_fnorm:3.4424e-02 L4_fnorm:3.4668e-02 L5_fnorm:3.4424e-02 L6_fnorm:3.4424e-02 L7_fnorm:3.4424e-02 L8_fnorm:3.4424e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.4424e-02 L12_fnorm:3.4180e-02 L1_l1linf:6.5002e-03 L2_l1linf:6.8665e-03 L3_l1linf:7.0801e-03 L4_l1linf:7.5073e-03 L5_l1linf:7.4158e-03 L6_l1linf:7.7209e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.1787e-03 L9_l1linf:8.5449e-03 L10_l1linf:8.9111e-03 L11_l1linf:8.9722e-03 L12_l1linf:9.4604e-03 L1_spectral:6.4496e-04 L2_spectral:6.4024e-04 L3_spectral:6.4646e-04 L4_spectral:6.4424e-04 L5_spectral:6.3837e-04 L6_spectral:6.3933e-04 L7_spectral:6.3490e-04 L8_spectral:6.3551e-04 L9_spectral:6.3265e-04 L10_spectral:6.2641e-04 L11_spectral:6.2840e-04 L12_spectral:5.9748e-04 train_time:291295ms step_avg:42.84ms +[2025-09-11 12:06:20] [Rank 0] PRINT: step:6800/10000 val_loss:5.3191 total_sharp:1.3094e-02 L1_sharp:1.0410e-01 L2_sharp:1.5040e-01 L3_sharp:2.0239e-01 L4_sharp:2.6287e-01 L5_sharp:4.0669e-01 L6_sharp:6.2259e-01 L7_sharp:8.8743e-01 L8_sharp:1.4013e+00 L9_sharp:2.6915e+00 L10_sharp:4.0569e+00 L11_sharp:3.2930e+00 L12_sharp:7.1272e+00 total_fnorm:3.3438e+00 total_l1_linf:2.5760e+03 total_spectral:1.6719e+00 L1_fnorm:3.3936e-02 L2_fnorm:3.4180e-02 L3_fnorm:3.4424e-02 L4_fnorm:3.4668e-02 L5_fnorm:3.4424e-02 L6_fnorm:3.4424e-02 L7_fnorm:3.4424e-02 L8_fnorm:3.4424e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.4424e-02 L12_fnorm:3.4180e-02 L1_l1linf:6.5002e-03 L2_l1linf:6.8665e-03 L3_l1linf:7.0801e-03 L4_l1linf:7.5073e-03 L5_l1linf:7.4158e-03 L6_l1linf:7.7209e-03 L7_l1linf:7.9956e-03 L8_l1linf:8.1787e-03 L9_l1linf:8.5449e-03 L10_l1linf:8.9111e-03 L11_l1linf:8.9722e-03 L12_l1linf:9.4604e-03 L1_spectral:6.4496e-04 L2_spectral:6.4024e-04 L3_spectral:6.4646e-04 L4_spectral:6.4424e-04 L5_spectral:6.3837e-04 L6_spectral:6.3933e-04 L7_spectral:6.3490e-04 L8_spectral:6.3551e-04 L9_spectral:6.3265e-04 L10_spectral:6.2641e-04 L11_spectral:6.2840e-04 L12_spectral:5.9748e-04 train_time:291295ms step_avg:42.84ms +[2025-09-11 12:06:22] [Rank 0] step:6801/10000 train_time:293106ms step_avg:43.10ms +[2025-09-11 12:06:22] [Rank 0] step:6801/10000 train_time:293106ms step_avg:43.10ms +[2025-09-11 12:06:23] [Rank 0] step:6821/10000 train_time:293819ms step_avg:43.08ms +[2025-09-11 12:06:23] [Rank 0] step:6821/10000 train_time:293819ms step_avg:43.08ms +[2025-09-11 12:06:24] [Rank 0] step:6841/10000 train_time:294526ms step_avg:43.05ms +[2025-09-11 12:06:24] [Rank 0] step:6841/10000 train_time:294526ms step_avg:43.05ms +[2025-09-11 12:06:24] [Rank 0] step:6861/10000 train_time:295228ms step_avg:43.03ms +[2025-09-11 12:06:24] [Rank 0] step:6861/10000 train_time:295228ms step_avg:43.03ms +[2025-09-11 12:06:25] [Rank 0] step:6881/10000 train_time:295931ms step_avg:43.01ms +[2025-09-11 12:06:25] [Rank 0] step:6881/10000 train_time:295931ms step_avg:43.01ms +[2025-09-11 12:06:26] [Rank 0] step:6901/10000 train_time:296632ms step_avg:42.98ms +[2025-09-11 12:06:26] [Rank 0] step:6901/10000 train_time:296632ms step_avg:42.98ms +[2025-09-11 12:06:26] [Rank 0] step:6921/10000 train_time:297332ms step_avg:42.96ms +[2025-09-11 12:06:26] [Rank 0] step:6921/10000 train_time:297332ms step_avg:42.96ms +[2025-09-11 12:06:27] [Rank 0] step:6941/10000 train_time:298033ms step_avg:42.94ms +[2025-09-11 12:06:27] [Rank 0] step:6941/10000 train_time:298033ms step_avg:42.94ms +[2025-09-11 12:06:28] [Rank 0] step:6961/10000 train_time:298736ms step_avg:42.92ms +[2025-09-11 12:06:28] [Rank 0] step:6961/10000 train_time:298736ms step_avg:42.92ms +[2025-09-11 12:06:29] [Rank 0] step:6981/10000 train_time:299439ms step_avg:42.89ms +[2025-09-11 12:06:29] [Rank 0] step:6981/10000 train_time:299439ms step_avg:42.89ms +[2025-09-11 12:06:29] [Rank 0] step:7001/10000 train_time:300142ms step_avg:42.87ms +[2025-09-11 12:06:29] [Rank 0] step:7001/10000 train_time:300142ms step_avg:42.87ms +[2025-09-11 12:06:30] [Rank 0] step:7021/10000 train_time:300843ms step_avg:42.85ms +[2025-09-11 12:06:30] [Rank 0] step:7021/10000 train_time:300843ms step_avg:42.85ms +[2025-09-11 12:06:31] [Rank 0] step:7041/10000 train_time:301543ms step_avg:42.83ms +[2025-09-11 12:06:31] [Rank 0] step:7041/10000 train_time:301543ms step_avg:42.83ms +[2025-09-11 12:06:31] [Rank 0] step:7061/10000 train_time:302246ms step_avg:42.81ms +[2025-09-11 12:06:31] [Rank 0] step:7061/10000 train_time:302246ms step_avg:42.81ms +[2025-09-11 12:06:32] [Rank 0] step:7081/10000 train_time:302947ms step_avg:42.78ms +[2025-09-11 12:06:32] [Rank 0] step:7081/10000 train_time:302947ms step_avg:42.78ms +[2025-09-11 12:06:33] [Rank 0] step:7101/10000 train_time:303648ms step_avg:42.76ms +[2025-09-11 12:06:33] [Rank 0] step:7101/10000 train_time:303648ms step_avg:42.76ms +[2025-09-11 12:06:33] [Rank 0] step:7121/10000 train_time:304351ms step_avg:42.74ms +[2025-09-11 12:06:33] [Rank 0] step:7121/10000 train_time:304351ms step_avg:42.74ms +[2025-09-11 12:06:34] [Rank 0] step:7141/10000 train_time:305053ms step_avg:42.72ms +[2025-09-11 12:06:34] [Rank 0] step:7141/10000 train_time:305053ms step_avg:42.72ms +[2025-09-11 12:06:35] [Rank 0] step:7161/10000 train_time:305756ms step_avg:42.70ms +[2025-09-11 12:06:35] [Rank 0] step:7161/10000 train_time:305756ms step_avg:42.70ms +[2025-09-11 12:06:36] [Rank 0] step:7181/10000 train_time:306456ms step_avg:42.68ms +[2025-09-11 12:06:36] [Rank 0] step:7181/10000 train_time:306456ms step_avg:42.68ms +[2025-09-11 12:06:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:06:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:06:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:06:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:06:47] [Rank 0] PRINT: step:7200/10000 val_loss:5.2894 total_sharp:1.2168e-02 L1_sharp:9.8424e-02 L2_sharp:1.1942e-01 L3_sharp:1.6852e-01 L4_sharp:2.6023e-01 L5_sharp:3.7647e-01 L6_sharp:6.8897e-01 L7_sharp:9.7462e-01 L8_sharp:1.5028e+00 L9_sharp:2.6064e+00 L10_sharp:3.5611e+00 L11_sharp:2.8651e+00 L12_sharp:3.2324e+00 total_fnorm:2.7031e+00 total_l1_linf:1.8880e+03 total_spectral:1.3516e+00 L1_fnorm:2.8564e-02 L2_fnorm:2.8809e-02 L3_fnorm:2.8809e-02 L4_fnorm:2.9053e-02 L5_fnorm:2.9053e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.9053e-02 L8_fnorm:2.9053e-02 L9_fnorm:2.9175e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8931e-02 L12_fnorm:2.8442e-02 L1_l1linf:5.1270e-03 L2_l1linf:5.3711e-03 L3_l1linf:5.5237e-03 L4_l1linf:5.8899e-03 L5_l1linf:5.8594e-03 L6_l1linf:6.1646e-03 L7_l1linf:6.5613e-03 L8_l1linf:6.6223e-03 L9_l1linf:6.8970e-03 L10_l1linf:7.1106e-03 L11_l1linf:7.2632e-03 L12_l1linf:7.5073e-03 L1_spectral:5.6131e-04 L2_spectral:5.6060e-04 L3_spectral:5.6330e-04 L4_spectral:5.5942e-04 L5_spectral:5.6226e-04 L6_spectral:5.5976e-04 L7_spectral:5.5002e-04 L8_spectral:5.5452e-04 L9_spectral:5.4768e-04 L10_spectral:5.4255e-04 L11_spectral:5.4000e-04 L12_spectral:5.0080e-04 train_time:307138ms step_avg:42.66ms +[2025-09-11 12:06:47] [Rank 0] PRINT: step:7200/10000 val_loss:5.2894 total_sharp:1.2168e-02 L1_sharp:9.8424e-02 L2_sharp:1.1942e-01 L3_sharp:1.6852e-01 L4_sharp:2.6023e-01 L5_sharp:3.7647e-01 L6_sharp:6.8897e-01 L7_sharp:9.7462e-01 L8_sharp:1.5028e+00 L9_sharp:2.6064e+00 L10_sharp:3.5611e+00 L11_sharp:2.8651e+00 L12_sharp:3.2324e+00 total_fnorm:2.7031e+00 total_l1_linf:1.8880e+03 total_spectral:1.3516e+00 L1_fnorm:2.8564e-02 L2_fnorm:2.8809e-02 L3_fnorm:2.8809e-02 L4_fnorm:2.9053e-02 L5_fnorm:2.9053e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.9053e-02 L8_fnorm:2.9053e-02 L9_fnorm:2.9175e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8931e-02 L12_fnorm:2.8442e-02 L1_l1linf:5.1270e-03 L2_l1linf:5.3711e-03 L3_l1linf:5.5237e-03 L4_l1linf:5.8899e-03 L5_l1linf:5.8594e-03 L6_l1linf:6.1646e-03 L7_l1linf:6.5613e-03 L8_l1linf:6.6223e-03 L9_l1linf:6.8970e-03 L10_l1linf:7.1106e-03 L11_l1linf:7.2632e-03 L12_l1linf:7.5073e-03 L1_spectral:5.6131e-04 L2_spectral:5.6060e-04 L3_spectral:5.6330e-04 L4_spectral:5.5942e-04 L5_spectral:5.6226e-04 L6_spectral:5.5976e-04 L7_spectral:5.5002e-04 L8_spectral:5.5452e-04 L9_spectral:5.4768e-04 L10_spectral:5.4255e-04 L11_spectral:5.4000e-04 L12_spectral:5.0080e-04 train_time:307138ms step_avg:42.66ms +[2025-09-11 12:06:49] [Rank 0] step:7201/10000 train_time:308948ms step_avg:42.90ms +[2025-09-11 12:06:49] [Rank 0] step:7201/10000 train_time:308948ms step_avg:42.90ms +[2025-09-11 12:06:49] [Rank 0] step:7221/10000 train_time:309678ms step_avg:42.89ms +[2025-09-11 12:06:49] [Rank 0] step:7221/10000 train_time:309678ms step_avg:42.89ms +[2025-09-11 12:06:50] [Rank 0] step:7241/10000 train_time:310381ms step_avg:42.86ms +[2025-09-11 12:06:50] [Rank 0] step:7241/10000 train_time:310381ms step_avg:42.86ms +[2025-09-11 12:06:51] [Rank 0] step:7261/10000 train_time:311085ms step_avg:42.84ms +[2025-09-11 12:06:51] [Rank 0] step:7261/10000 train_time:311085ms step_avg:42.84ms +[2025-09-11 12:06:51] [Rank 0] step:7281/10000 train_time:311793ms step_avg:42.82ms +[2025-09-11 12:06:51] [Rank 0] step:7281/10000 train_time:311793ms step_avg:42.82ms +[2025-09-11 12:06:52] [Rank 0] step:7301/10000 train_time:312758ms step_avg:42.84ms +[2025-09-11 12:06:52] [Rank 0] step:7301/10000 train_time:312758ms step_avg:42.84ms +[2025-09-11 12:06:53] [Rank 0] step:7321/10000 train_time:313460ms step_avg:42.82ms +[2025-09-11 12:06:53] [Rank 0] step:7321/10000 train_time:313460ms step_avg:42.82ms +[2025-09-11 12:06:54] [Rank 0] step:7341/10000 train_time:314164ms step_avg:42.80ms +[2025-09-11 12:06:54] [Rank 0] step:7341/10000 train_time:314164ms step_avg:42.80ms +[2025-09-11 12:06:55] [Rank 0] step:7361/10000 train_time:315140ms step_avg:42.81ms +[2025-09-11 12:06:55] [Rank 0] step:7361/10000 train_time:315140ms step_avg:42.81ms +[2025-09-11 12:06:56] [Rank 0] step:7381/10000 train_time:315845ms step_avg:42.79ms +[2025-09-11 12:06:56] [Rank 0] step:7381/10000 train_time:315845ms step_avg:42.79ms +[2025-09-11 12:06:56] [Rank 0] step:7401/10000 train_time:316544ms step_avg:42.77ms +[2025-09-11 12:06:56] [Rank 0] step:7401/10000 train_time:316544ms step_avg:42.77ms +[2025-09-11 12:06:57] [Rank 0] step:7421/10000 train_time:317246ms step_avg:42.75ms +[2025-09-11 12:06:57] [Rank 0] step:7421/10000 train_time:317246ms step_avg:42.75ms +[2025-09-11 12:06:58] [Rank 0] step:7441/10000 train_time:317949ms step_avg:42.73ms +[2025-09-11 12:06:58] [Rank 0] step:7441/10000 train_time:317949ms step_avg:42.73ms +[2025-09-11 12:06:58] [Rank 0] step:7461/10000 train_time:318652ms step_avg:42.71ms +[2025-09-11 12:06:58] [Rank 0] step:7461/10000 train_time:318652ms step_avg:42.71ms +[2025-09-11 12:06:59] [Rank 0] step:7481/10000 train_time:319356ms step_avg:42.69ms +[2025-09-11 12:06:59] [Rank 0] step:7481/10000 train_time:319356ms step_avg:42.69ms +[2025-09-11 12:07:00] [Rank 0] step:7501/10000 train_time:320059ms step_avg:42.67ms +[2025-09-11 12:07:00] [Rank 0] step:7501/10000 train_time:320059ms step_avg:42.67ms +[2025-09-11 12:07:00] [Rank 0] step:7521/10000 train_time:320763ms step_avg:42.65ms +[2025-09-11 12:07:00] [Rank 0] step:7521/10000 train_time:320763ms step_avg:42.65ms +[2025-09-11 12:07:01] [Rank 0] step:7541/10000 train_time:321465ms step_avg:42.63ms +[2025-09-11 12:07:01] [Rank 0] step:7541/10000 train_time:321465ms step_avg:42.63ms +[2025-09-11 12:07:02] [Rank 0] step:7561/10000 train_time:322170ms step_avg:42.61ms +[2025-09-11 12:07:02] [Rank 0] step:7561/10000 train_time:322170ms step_avg:42.61ms +[2025-09-11 12:07:03] [Rank 0] step:7581/10000 train_time:322876ms step_avg:42.59ms +[2025-09-11 12:07:03] [Rank 0] step:7581/10000 train_time:322876ms step_avg:42.59ms +[2025-09-11 12:07:03] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:07:03] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:07:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:07:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:07:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:07:16] [Rank 0] PRINT: step:7600/10000 val_loss:5.2704 total_sharp:1.1436e-02 L1_sharp:7.4880e-02 L2_sharp:1.1106e-01 L3_sharp:1.5080e-01 L4_sharp:2.1193e-01 L5_sharp:3.2498e-01 L6_sharp:5.2227e-01 L7_sharp:8.2385e-01 L8_sharp:1.0761e+00 L9_sharp:2.0157e+00 L10_sharp:3.1624e+00 L11_sharp:2.4317e+00 L12_sharp:2.2362e+00 total_fnorm:1.9609e+00 total_l1_linf:1.2640e+03 total_spectral:9.8047e-01 L1_fnorm:2.3315e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3560e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3804e-02 L8_fnorm:2.3926e-02 L9_fnorm:2.3926e-02 L10_fnorm:2.3560e-02 L11_fnorm:2.3560e-02 L12_fnorm:2.3071e-02 L1_l1linf:3.9673e-03 L2_l1linf:4.0588e-03 L3_l1linf:4.2419e-03 L4_l1linf:4.3335e-03 L5_l1linf:4.5471e-03 L6_l1linf:4.6997e-03 L7_l1linf:4.8828e-03 L8_l1linf:5.0964e-03 L9_l1linf:5.3406e-03 L10_l1linf:5.4321e-03 L11_l1linf:5.6458e-03 L12_l1linf:6.0120e-03 L1_spectral:4.8248e-04 L2_spectral:4.8376e-04 L3_spectral:4.8297e-04 L4_spectral:4.7951e-04 L5_spectral:4.7977e-04 L6_spectral:4.7635e-04 L7_spectral:4.7073e-04 L8_spectral:4.7627e-04 L9_spectral:4.7170e-04 L10_spectral:4.5725e-04 L11_spectral:4.5784e-04 L12_spectral:4.1816e-04 train_time:323561ms step_avg:42.57ms +[2025-09-11 12:07:16] [Rank 0] PRINT: step:7600/10000 val_loss:5.2704 total_sharp:1.1436e-02 L1_sharp:7.4880e-02 L2_sharp:1.1106e-01 L3_sharp:1.5080e-01 L4_sharp:2.1193e-01 L5_sharp:3.2498e-01 L6_sharp:5.2227e-01 L7_sharp:8.2385e-01 L8_sharp:1.0761e+00 L9_sharp:2.0157e+00 L10_sharp:3.1624e+00 L11_sharp:2.4317e+00 L12_sharp:2.2362e+00 total_fnorm:1.9609e+00 total_l1_linf:1.2640e+03 total_spectral:9.8047e-01 L1_fnorm:2.3315e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3560e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3804e-02 L8_fnorm:2.3926e-02 L9_fnorm:2.3926e-02 L10_fnorm:2.3560e-02 L11_fnorm:2.3560e-02 L12_fnorm:2.3071e-02 L1_l1linf:3.9673e-03 L2_l1linf:4.0588e-03 L3_l1linf:4.2419e-03 L4_l1linf:4.3335e-03 L5_l1linf:4.5471e-03 L6_l1linf:4.6997e-03 L7_l1linf:4.8828e-03 L8_l1linf:5.0964e-03 L9_l1linf:5.3406e-03 L10_l1linf:5.4321e-03 L11_l1linf:5.6458e-03 L12_l1linf:6.0120e-03 L1_spectral:4.8248e-04 L2_spectral:4.8376e-04 L3_spectral:4.8297e-04 L4_spectral:4.7951e-04 L5_spectral:4.7977e-04 L6_spectral:4.7635e-04 L7_spectral:4.7073e-04 L8_spectral:4.7627e-04 L9_spectral:4.7170e-04 L10_spectral:4.5725e-04 L11_spectral:4.5784e-04 L12_spectral:4.1816e-04 train_time:323561ms step_avg:42.57ms +[2025-09-11 12:07:18] [Rank 0] step:7601/10000 train_time:325413ms step_avg:42.81ms +[2025-09-11 12:07:18] [Rank 0] step:7601/10000 train_time:325413ms step_avg:42.81ms +[2025-09-11 12:07:19] [Rank 0] step:7621/10000 train_time:326126ms step_avg:42.79ms +[2025-09-11 12:07:19] [Rank 0] step:7621/10000 train_time:326126ms step_avg:42.79ms +[2025-09-11 12:07:20] [Rank 0] step:7641/10000 train_time:326831ms step_avg:42.77ms +[2025-09-11 12:07:20] [Rank 0] step:7641/10000 train_time:326831ms step_avg:42.77ms +[2025-09-11 12:07:20] [Rank 0] step:7661/10000 train_time:327534ms step_avg:42.75ms +[2025-09-11 12:07:20] [Rank 0] step:7661/10000 train_time:327534ms step_avg:42.75ms +[2025-09-11 12:07:21] [Rank 0] step:7681/10000 train_time:328239ms step_avg:42.73ms +[2025-09-11 12:07:21] [Rank 0] step:7681/10000 train_time:328239ms step_avg:42.73ms +[2025-09-11 12:07:22] [Rank 0] step:7701/10000 train_time:328944ms step_avg:42.71ms +[2025-09-11 12:07:22] [Rank 0] step:7701/10000 train_time:328944ms step_avg:42.71ms +[2025-09-11 12:07:23] [Rank 0] step:7721/10000 train_time:329651ms step_avg:42.70ms +[2025-09-11 12:07:23] [Rank 0] step:7721/10000 train_time:329651ms step_avg:42.70ms +[2025-09-11 12:07:23] [Rank 0] step:7741/10000 train_time:330355ms step_avg:42.68ms +[2025-09-11 12:07:23] [Rank 0] step:7741/10000 train_time:330355ms step_avg:42.68ms +[2025-09-11 12:07:24] [Rank 0] step:7761/10000 train_time:331058ms step_avg:42.66ms +[2025-09-11 12:07:24] [Rank 0] step:7761/10000 train_time:331058ms step_avg:42.66ms +[2025-09-11 12:07:25] [Rank 0] step:7781/10000 train_time:331765ms step_avg:42.64ms +[2025-09-11 12:07:25] [Rank 0] step:7781/10000 train_time:331765ms step_avg:42.64ms +[2025-09-11 12:07:25] [Rank 0] step:7801/10000 train_time:332468ms step_avg:42.62ms +[2025-09-11 12:07:25] [Rank 0] step:7801/10000 train_time:332468ms step_avg:42.62ms +[2025-09-11 12:07:26] [Rank 0] step:7821/10000 train_time:333172ms step_avg:42.60ms +[2025-09-11 12:07:26] [Rank 0] step:7821/10000 train_time:333172ms step_avg:42.60ms +[2025-09-11 12:07:27] [Rank 0] step:7841/10000 train_time:333877ms step_avg:42.58ms +[2025-09-11 12:07:27] [Rank 0] step:7841/10000 train_time:333877ms step_avg:42.58ms +[2025-09-11 12:07:27] [Rank 0] step:7861/10000 train_time:334584ms step_avg:42.56ms +[2025-09-11 12:07:27] [Rank 0] step:7861/10000 train_time:334584ms step_avg:42.56ms +[2025-09-11 12:07:28] [Rank 0] step:7881/10000 train_time:335288ms step_avg:42.54ms +[2025-09-11 12:07:28] [Rank 0] step:7881/10000 train_time:335288ms step_avg:42.54ms +[2025-09-11 12:07:29] [Rank 0] step:7901/10000 train_time:335994ms step_avg:42.53ms +[2025-09-11 12:07:29] [Rank 0] step:7901/10000 train_time:335994ms step_avg:42.53ms +[2025-09-11 12:07:30] [Rank 0] step:7921/10000 train_time:336698ms step_avg:42.51ms +[2025-09-11 12:07:30] [Rank 0] step:7921/10000 train_time:336698ms step_avg:42.51ms +[2025-09-11 12:07:30] [Rank 0] step:7941/10000 train_time:337404ms step_avg:42.49ms +[2025-09-11 12:07:30] [Rank 0] step:7941/10000 train_time:337404ms step_avg:42.49ms +[2025-09-11 12:07:31] [Rank 0] step:7961/10000 train_time:338107ms step_avg:42.47ms +[2025-09-11 12:07:31] [Rank 0] step:7961/10000 train_time:338107ms step_avg:42.47ms +[2025-09-11 12:07:32] [Rank 0] step:7981/10000 train_time:338814ms step_avg:42.45ms +[2025-09-11 12:07:32] [Rank 0] step:7981/10000 train_time:338814ms step_avg:42.45ms +[2025-09-11 12:07:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:07:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:07:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:07:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:07:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:07:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:07:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:07:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:07:43] [Rank 0] PRINT: step:8000/10000 val_loss:5.2596 total_sharp:1.3765e-02 L1_sharp:6.5743e-02 L2_sharp:1.1053e-01 L3_sharp:1.4648e-01 L4_sharp:1.9554e-01 L5_sharp:2.9966e-01 L6_sharp:4.5136e-01 L7_sharp:7.6818e-01 L8_sharp:1.3390e+00 L9_sharp:2.3965e+00 L10_sharp:3.7754e+00 L11_sharp:3.6568e+00 L12_sharp:3.7643e+00 total_fnorm:1.5938e+00 total_l1_linf:9.6400e+02 total_spectral:7.9688e-01 L1_fnorm:1.8555e-02 L2_fnorm:1.8677e-02 L3_fnorm:1.8799e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.9043e-02 L8_fnorm:1.9165e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.8799e-02 L11_fnorm:1.8799e-02 L12_fnorm:1.8311e-02 L1_l1linf:3.0212e-03 L2_l1linf:3.0365e-03 L3_l1linf:3.1433e-03 L4_l1linf:3.4180e-03 L5_l1linf:3.5095e-03 L6_l1linf:3.6774e-03 L7_l1linf:3.9673e-03 L8_l1linf:4.1504e-03 L9_l1linf:4.3030e-03 L10_l1linf:4.5166e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.9596e-04 L2_spectral:3.9467e-04 L3_spectral:3.9635e-04 L4_spectral:3.9447e-04 L5_spectral:3.9335e-04 L6_spectral:3.8653e-04 L7_spectral:3.8458e-04 L8_spectral:3.8555e-04 L9_spectral:3.7818e-04 L10_spectral:3.6188e-04 L11_spectral:3.6599e-04 L12_spectral:3.3305e-04 train_time:339495ms step_avg:42.44ms +[2025-09-11 12:07:43] [Rank 0] PRINT: step:8000/10000 val_loss:5.2596 total_sharp:1.3765e-02 L1_sharp:6.5743e-02 L2_sharp:1.1053e-01 L3_sharp:1.4648e-01 L4_sharp:1.9554e-01 L5_sharp:2.9966e-01 L6_sharp:4.5136e-01 L7_sharp:7.6818e-01 L8_sharp:1.3390e+00 L9_sharp:2.3965e+00 L10_sharp:3.7754e+00 L11_sharp:3.6568e+00 L12_sharp:3.7643e+00 total_fnorm:1.5938e+00 total_l1_linf:9.6400e+02 total_spectral:7.9688e-01 L1_fnorm:1.8555e-02 L2_fnorm:1.8677e-02 L3_fnorm:1.8799e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.9043e-02 L8_fnorm:1.9165e-02 L9_fnorm:1.9287e-02 L10_fnorm:1.8799e-02 L11_fnorm:1.8799e-02 L12_fnorm:1.8311e-02 L1_l1linf:3.0212e-03 L2_l1linf:3.0365e-03 L3_l1linf:3.1433e-03 L4_l1linf:3.4180e-03 L5_l1linf:3.5095e-03 L6_l1linf:3.6774e-03 L7_l1linf:3.9673e-03 L8_l1linf:4.1504e-03 L9_l1linf:4.3030e-03 L10_l1linf:4.5166e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.9596e-04 L2_spectral:3.9467e-04 L3_spectral:3.9635e-04 L4_spectral:3.9447e-04 L5_spectral:3.9335e-04 L6_spectral:3.8653e-04 L7_spectral:3.8458e-04 L8_spectral:3.8555e-04 L9_spectral:3.7818e-04 L10_spectral:3.6188e-04 L11_spectral:3.6599e-04 L12_spectral:3.3305e-04 train_time:339495ms step_avg:42.44ms +[2025-09-11 12:07:45] [Rank 0] step:8001/10000 train_time:341365ms step_avg:42.67ms +[2025-09-11 12:07:45] [Rank 0] step:8001/10000 train_time:341365ms step_avg:42.67ms +[2025-09-11 12:07:46] [Rank 0] step:8021/10000 train_time:342108ms step_avg:42.65ms +[2025-09-11 12:07:46] [Rank 0] step:8021/10000 train_time:342108ms step_avg:42.65ms +[2025-09-11 12:07:46] [Rank 0] step:8041/10000 train_time:342812ms step_avg:42.63ms +[2025-09-11 12:07:46] [Rank 0] step:8041/10000 train_time:342812ms step_avg:42.63ms +[2025-09-11 12:07:47] [Rank 0] step:8061/10000 train_time:343518ms step_avg:42.61ms +[2025-09-11 12:07:47] [Rank 0] step:8061/10000 train_time:343518ms step_avg:42.61ms +[2025-09-11 12:07:48] [Rank 0] step:8081/10000 train_time:344222ms step_avg:42.60ms +[2025-09-11 12:07:48] [Rank 0] step:8081/10000 train_time:344222ms step_avg:42.60ms +[2025-09-11 12:07:48] [Rank 0] step:8101/10000 train_time:344924ms step_avg:42.58ms +[2025-09-11 12:07:48] [Rank 0] step:8101/10000 train_time:344924ms step_avg:42.58ms +[2025-09-11 12:07:49] [Rank 0] step:8121/10000 train_time:345632ms step_avg:42.56ms +[2025-09-11 12:07:49] [Rank 0] step:8121/10000 train_time:345632ms step_avg:42.56ms +[2025-09-11 12:07:51] [Rank 0] step:8141/10000 train_time:347076ms step_avg:42.63ms +[2025-09-11 12:07:51] [Rank 0] step:8141/10000 train_time:347076ms step_avg:42.63ms +[2025-09-11 12:07:51] [Rank 0] step:8161/10000 train_time:347783ms step_avg:42.62ms +[2025-09-11 12:07:51] [Rank 0] step:8161/10000 train_time:347783ms step_avg:42.62ms +[2025-09-11 12:07:52] [Rank 0] step:8181/10000 train_time:348498ms step_avg:42.60ms +[2025-09-11 12:07:52] [Rank 0] step:8181/10000 train_time:348498ms step_avg:42.60ms +[2025-09-11 12:07:53] [Rank 0] step:8201/10000 train_time:349209ms step_avg:42.58ms +[2025-09-11 12:07:53] [Rank 0] step:8201/10000 train_time:349209ms step_avg:42.58ms +[2025-09-11 12:07:53] [Rank 0] step:8221/10000 train_time:349920ms step_avg:42.56ms +[2025-09-11 12:07:53] [Rank 0] step:8221/10000 train_time:349920ms step_avg:42.56ms +[2025-09-11 12:07:54] [Rank 0] step:8241/10000 train_time:350638ms step_avg:42.55ms +[2025-09-11 12:07:54] [Rank 0] step:8241/10000 train_time:350638ms step_avg:42.55ms +[2025-09-11 12:07:55] [Rank 0] step:8261/10000 train_time:351627ms step_avg:42.56ms +[2025-09-11 12:07:55] [Rank 0] step:8261/10000 train_time:351627ms step_avg:42.56ms +[2025-09-11 12:07:56] [Rank 0] step:8281/10000 train_time:352333ms step_avg:42.55ms +[2025-09-11 12:07:56] [Rank 0] step:8281/10000 train_time:352333ms step_avg:42.55ms +[2025-09-11 12:07:57] [Rank 0] step:8301/10000 train_time:353043ms step_avg:42.53ms +[2025-09-11 12:07:57] [Rank 0] step:8301/10000 train_time:353043ms step_avg:42.53ms +[2025-09-11 12:07:57] [Rank 0] step:8321/10000 train_time:353898ms step_avg:42.53ms +[2025-09-11 12:07:57] [Rank 0] step:8321/10000 train_time:353898ms step_avg:42.53ms +[2025-09-11 12:07:58] [Rank 0] step:8341/10000 train_time:354737ms step_avg:42.53ms +[2025-09-11 12:07:58] [Rank 0] step:8341/10000 train_time:354737ms step_avg:42.53ms +[2025-09-11 12:07:59] [Rank 0] step:8361/10000 train_time:355442ms step_avg:42.51ms +[2025-09-11 12:07:59] [Rank 0] step:8361/10000 train_time:355442ms step_avg:42.51ms +[2025-09-11 12:08:00] [Rank 0] step:8381/10000 train_time:356155ms step_avg:42.50ms +[2025-09-11 12:08:00] [Rank 0] step:8381/10000 train_time:356155ms step_avg:42.50ms +[2025-09-11 12:08:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:08:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:08:14] [Rank 0] PRINT: step:8400/10000 val_loss:5.2497 total_sharp:1.0473e-02 L1_sharp:5.8318e-02 L2_sharp:9.1318e-02 L3_sharp:1.2073e-01 L4_sharp:1.6994e-01 L5_sharp:2.8885e-01 L6_sharp:4.1194e-01 L7_sharp:6.1516e-01 L8_sharp:1.0569e+00 L9_sharp:1.8733e+00 L10_sharp:3.1036e+00 L11_sharp:2.5467e+00 L12_sharp:2.3141e+00 total_fnorm:1.1953e+00 total_l1_linf:6.3200e+02 total_spectral:5.9766e-01 L1_fnorm:1.4221e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4404e-02 L4_fnorm:1.4587e-02 L5_fnorm:1.4648e-02 L6_fnorm:1.4526e-02 L7_fnorm:1.4526e-02 L8_fnorm:1.4771e-02 L9_fnorm:1.4771e-02 L10_fnorm:1.4465e-02 L11_fnorm:1.4465e-02 L12_fnorm:1.4038e-02 L1_l1linf:2.0905e-03 L2_l1linf:2.0752e-03 L3_l1linf:2.2278e-03 L4_l1linf:2.2736e-03 L5_l1linf:2.4261e-03 L6_l1linf:2.4719e-03 L7_l1linf:2.6093e-03 L8_l1linf:2.8076e-03 L9_l1linf:2.8839e-03 L10_l1linf:3.0365e-03 L11_l1linf:3.0975e-03 L12_l1linf:3.1586e-03 L1_spectral:3.1066e-04 L2_spectral:3.1498e-04 L3_spectral:3.1463e-04 L4_spectral:3.1684e-04 L5_spectral:3.1321e-04 L6_spectral:3.0866e-04 L7_spectral:3.0470e-04 L8_spectral:3.0955e-04 L9_spectral:3.0145e-04 L10_spectral:2.8942e-04 L11_spectral:2.9170e-04 L12_spectral:2.5756e-04 train_time:356848ms step_avg:42.48ms +[2025-09-11 12:08:14] [Rank 0] PRINT: step:8400/10000 val_loss:5.2497 total_sharp:1.0473e-02 L1_sharp:5.8318e-02 L2_sharp:9.1318e-02 L3_sharp:1.2073e-01 L4_sharp:1.6994e-01 L5_sharp:2.8885e-01 L6_sharp:4.1194e-01 L7_sharp:6.1516e-01 L8_sharp:1.0569e+00 L9_sharp:1.8733e+00 L10_sharp:3.1036e+00 L11_sharp:2.5467e+00 L12_sharp:2.3141e+00 total_fnorm:1.1953e+00 total_l1_linf:6.3200e+02 total_spectral:5.9766e-01 L1_fnorm:1.4221e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4404e-02 L4_fnorm:1.4587e-02 L5_fnorm:1.4648e-02 L6_fnorm:1.4526e-02 L7_fnorm:1.4526e-02 L8_fnorm:1.4771e-02 L9_fnorm:1.4771e-02 L10_fnorm:1.4465e-02 L11_fnorm:1.4465e-02 L12_fnorm:1.4038e-02 L1_l1linf:2.0905e-03 L2_l1linf:2.0752e-03 L3_l1linf:2.2278e-03 L4_l1linf:2.2736e-03 L5_l1linf:2.4261e-03 L6_l1linf:2.4719e-03 L7_l1linf:2.6093e-03 L8_l1linf:2.8076e-03 L9_l1linf:2.8839e-03 L10_l1linf:3.0365e-03 L11_l1linf:3.0975e-03 L12_l1linf:3.1586e-03 L1_spectral:3.1066e-04 L2_spectral:3.1498e-04 L3_spectral:3.1463e-04 L4_spectral:3.1684e-04 L5_spectral:3.1321e-04 L6_spectral:3.0866e-04 L7_spectral:3.0470e-04 L8_spectral:3.0955e-04 L9_spectral:3.0145e-04 L10_spectral:2.8942e-04 L11_spectral:2.9170e-04 L12_spectral:2.5756e-04 train_time:356848ms step_avg:42.48ms +[2025-09-11 12:08:16] [Rank 0] step:8401/10000 train_time:358689ms step_avg:42.70ms +[2025-09-11 12:08:16] [Rank 0] step:8401/10000 train_time:358689ms step_avg:42.70ms +[2025-09-11 12:08:17] [Rank 0] step:8421/10000 train_time:359437ms step_avg:42.68ms +[2025-09-11 12:08:17] [Rank 0] step:8421/10000 train_time:359437ms step_avg:42.68ms +[2025-09-11 12:08:18] [Rank 0] step:8441/10000 train_time:360148ms step_avg:42.67ms +[2025-09-11 12:08:18] [Rank 0] step:8441/10000 train_time:360148ms step_avg:42.67ms +[2025-09-11 12:08:18] [Rank 0] step:8461/10000 train_time:360860ms step_avg:42.65ms +[2025-09-11 12:08:18] [Rank 0] step:8461/10000 train_time:360860ms step_avg:42.65ms +[2025-09-11 12:08:19] [Rank 0] step:8481/10000 train_time:361571ms step_avg:42.63ms +[2025-09-11 12:08:19] [Rank 0] step:8481/10000 train_time:361571ms step_avg:42.63ms +[2025-09-11 12:08:20] [Rank 0] step:8501/10000 train_time:362281ms step_avg:42.62ms +[2025-09-11 12:08:20] [Rank 0] step:8501/10000 train_time:362281ms step_avg:42.62ms +[2025-09-11 12:08:21] [Rank 0] step:8521/10000 train_time:362991ms step_avg:42.60ms +[2025-09-11 12:08:21] [Rank 0] step:8521/10000 train_time:362991ms step_avg:42.60ms +[2025-09-11 12:08:21] [Rank 0] step:8541/10000 train_time:363701ms step_avg:42.58ms +[2025-09-11 12:08:21] [Rank 0] step:8541/10000 train_time:363701ms step_avg:42.58ms +[2025-09-11 12:08:22] [Rank 0] step:8561/10000 train_time:364416ms step_avg:42.57ms +[2025-09-11 12:08:22] [Rank 0] step:8561/10000 train_time:364416ms step_avg:42.57ms +[2025-09-11 12:08:23] [Rank 0] step:8581/10000 train_time:365131ms step_avg:42.55ms +[2025-09-11 12:08:23] [Rank 0] step:8581/10000 train_time:365131ms step_avg:42.55ms +[2025-09-11 12:08:23] [Rank 0] step:8601/10000 train_time:365842ms step_avg:42.53ms +[2025-09-11 12:08:23] [Rank 0] step:8601/10000 train_time:365842ms step_avg:42.53ms +[2025-09-11 12:08:24] [Rank 0] step:8621/10000 train_time:366552ms step_avg:42.52ms +[2025-09-11 12:08:24] [Rank 0] step:8621/10000 train_time:366552ms step_avg:42.52ms +[2025-09-11 12:08:25] [Rank 0] step:8641/10000 train_time:367262ms step_avg:42.50ms +[2025-09-11 12:08:25] [Rank 0] step:8641/10000 train_time:367262ms step_avg:42.50ms +[2025-09-11 12:08:26] [Rank 0] step:8661/10000 train_time:367973ms step_avg:42.49ms +[2025-09-11 12:08:26] [Rank 0] step:8661/10000 train_time:367973ms step_avg:42.49ms +[2025-09-11 12:08:26] [Rank 0] step:8681/10000 train_time:368685ms step_avg:42.47ms +[2025-09-11 12:08:26] [Rank 0] step:8681/10000 train_time:368685ms step_avg:42.47ms +[2025-09-11 12:08:27] [Rank 0] step:8701/10000 train_time:369395ms step_avg:42.45ms +[2025-09-11 12:08:27] [Rank 0] step:8701/10000 train_time:369395ms step_avg:42.45ms +[2025-09-11 12:08:28] [Rank 0] step:8721/10000 train_time:370108ms step_avg:42.44ms +[2025-09-11 12:08:28] [Rank 0] step:8721/10000 train_time:370108ms step_avg:42.44ms +[2025-09-11 12:08:28] [Rank 0] step:8741/10000 train_time:370816ms step_avg:42.42ms +[2025-09-11 12:08:28] [Rank 0] step:8741/10000 train_time:370816ms step_avg:42.42ms +[2025-09-11 12:08:29] [Rank 0] step:8761/10000 train_time:371539ms step_avg:42.41ms +[2025-09-11 12:08:29] [Rank 0] step:8761/10000 train_time:371539ms step_avg:42.41ms +[2025-09-11 12:08:30] [Rank 0] step:8781/10000 train_time:372247ms step_avg:42.39ms +[2025-09-11 12:08:30] [Rank 0] step:8781/10000 train_time:372247ms step_avg:42.39ms +[2025-09-11 12:08:31] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:08:31] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:08:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:08:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:08:41] [Rank 0] PRINT: step:8800/10000 val_loss:5.2409 total_sharp:8.3748e-03 L1_sharp:4.2840e-02 L2_sharp:6.2291e-02 L3_sharp:8.9992e-02 L4_sharp:1.3781e-01 L5_sharp:1.8202e-01 L6_sharp:3.4535e-01 L7_sharp:6.3192e-01 L8_sharp:9.5046e-01 L9_sharp:1.8143e+00 L10_sharp:2.5266e+00 L11_sharp:1.9923e+00 L12_sharp:1.9142e+00 total_fnorm:8.3984e-01 total_l1_linf:3.8800e+02 total_spectral:4.1992e-01 L1_fnorm:1.0132e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0437e-02 L5_fnorm:1.0376e-02 L6_fnorm:1.0315e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0498e-02 L9_fnorm:1.0437e-02 L10_fnorm:1.0315e-02 L11_fnorm:1.0193e-02 L12_fnorm:9.9487e-03 L1_l1linf:1.4572e-03 L2_l1linf:1.4725e-03 L3_l1linf:1.4572e-03 L4_l1linf:1.5106e-03 L5_l1linf:1.5335e-03 L6_l1linf:1.5640e-03 L7_l1linf:1.6251e-03 L8_l1linf:1.7166e-03 L9_l1linf:1.9073e-03 L10_l1linf:1.9302e-03 L11_l1linf:1.9836e-03 L12_l1linf:2.0599e-03 L1_spectral:2.2816e-04 L2_spectral:2.2823e-04 L3_spectral:2.2842e-04 L4_spectral:2.3017e-04 L5_spectral:2.3004e-04 L6_spectral:2.2340e-04 L7_spectral:2.2127e-04 L8_spectral:2.2488e-04 L9_spectral:2.2033e-04 L10_spectral:2.1333e-04 L11_spectral:2.0896e-04 L12_spectral:1.8636e-04 train_time:372936ms step_avg:42.38ms +[2025-09-11 12:08:41] [Rank 0] PRINT: step:8800/10000 val_loss:5.2409 total_sharp:8.3748e-03 L1_sharp:4.2840e-02 L2_sharp:6.2291e-02 L3_sharp:8.9992e-02 L4_sharp:1.3781e-01 L5_sharp:1.8202e-01 L6_sharp:3.4535e-01 L7_sharp:6.3192e-01 L8_sharp:9.5046e-01 L9_sharp:1.8143e+00 L10_sharp:2.5266e+00 L11_sharp:1.9923e+00 L12_sharp:1.9142e+00 total_fnorm:8.3984e-01 total_l1_linf:3.8800e+02 total_spectral:4.1992e-01 L1_fnorm:1.0132e-02 L2_fnorm:1.0193e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0437e-02 L5_fnorm:1.0376e-02 L6_fnorm:1.0315e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0498e-02 L9_fnorm:1.0437e-02 L10_fnorm:1.0315e-02 L11_fnorm:1.0193e-02 L12_fnorm:9.9487e-03 L1_l1linf:1.4572e-03 L2_l1linf:1.4725e-03 L3_l1linf:1.4572e-03 L4_l1linf:1.5106e-03 L5_l1linf:1.5335e-03 L6_l1linf:1.5640e-03 L7_l1linf:1.6251e-03 L8_l1linf:1.7166e-03 L9_l1linf:1.9073e-03 L10_l1linf:1.9302e-03 L11_l1linf:1.9836e-03 L12_l1linf:2.0599e-03 L1_spectral:2.2816e-04 L2_spectral:2.2823e-04 L3_spectral:2.2842e-04 L4_spectral:2.3017e-04 L5_spectral:2.3004e-04 L6_spectral:2.2340e-04 L7_spectral:2.2127e-04 L8_spectral:2.2488e-04 L9_spectral:2.2033e-04 L10_spectral:2.1333e-04 L11_spectral:2.0896e-04 L12_spectral:1.8636e-04 train_time:372936ms step_avg:42.38ms +[2025-09-11 12:08:43] [Rank 0] step:8801/10000 train_time:374850ms step_avg:42.59ms +[2025-09-11 12:08:43] [Rank 0] step:8801/10000 train_time:374850ms step_avg:42.59ms +[2025-09-11 12:08:44] [Rank 0] step:8821/10000 train_time:375571ms step_avg:42.58ms +[2025-09-11 12:08:44] [Rank 0] step:8821/10000 train_time:375571ms step_avg:42.58ms +[2025-09-11 12:08:45] [Rank 0] step:8841/10000 train_time:376282ms step_avg:42.56ms +[2025-09-11 12:08:45] [Rank 0] step:8841/10000 train_time:376282ms step_avg:42.56ms +[2025-09-11 12:08:45] [Rank 0] step:8861/10000 train_time:376991ms step_avg:42.54ms +[2025-09-11 12:08:45] [Rank 0] step:8861/10000 train_time:376991ms step_avg:42.54ms +[2025-09-11 12:08:46] [Rank 0] step:8881/10000 train_time:377702ms step_avg:42.53ms +[2025-09-11 12:08:46] [Rank 0] step:8881/10000 train_time:377702ms step_avg:42.53ms +[2025-09-11 12:08:47] [Rank 0] step:8901/10000 train_time:378416ms step_avg:42.51ms +[2025-09-11 12:08:47] [Rank 0] step:8901/10000 train_time:378416ms step_avg:42.51ms +[2025-09-11 12:08:48] [Rank 0] step:8921/10000 train_time:379123ms step_avg:42.50ms +[2025-09-11 12:08:48] [Rank 0] step:8921/10000 train_time:379123ms step_avg:42.50ms +[2025-09-11 12:08:48] [Rank 0] step:8941/10000 train_time:379839ms step_avg:42.48ms +[2025-09-11 12:08:48] [Rank 0] step:8941/10000 train_time:379839ms step_avg:42.48ms +[2025-09-11 12:08:49] [Rank 0] step:8961/10000 train_time:380559ms step_avg:42.47ms +[2025-09-11 12:08:49] [Rank 0] step:8961/10000 train_time:380559ms step_avg:42.47ms +[2025-09-11 12:08:50] [Rank 0] step:8981/10000 train_time:381274ms step_avg:42.45ms +[2025-09-11 12:08:50] [Rank 0] step:8981/10000 train_time:381274ms step_avg:42.45ms +[2025-09-11 12:08:50] [Rank 0] step:9001/10000 train_time:381981ms step_avg:42.44ms +[2025-09-11 12:08:50] [Rank 0] step:9001/10000 train_time:381981ms step_avg:42.44ms +[2025-09-11 12:08:51] [Rank 0] step:9021/10000 train_time:382692ms step_avg:42.42ms +[2025-09-11 12:08:51] [Rank 0] step:9021/10000 train_time:382692ms step_avg:42.42ms +[2025-09-11 12:08:52] [Rank 0] step:9041/10000 train_time:383405ms step_avg:42.41ms +[2025-09-11 12:08:52] [Rank 0] step:9041/10000 train_time:383405ms step_avg:42.41ms +[2025-09-11 12:08:53] [Rank 0] step:9061/10000 train_time:384115ms step_avg:42.39ms +[2025-09-11 12:08:53] [Rank 0] step:9061/10000 train_time:384115ms step_avg:42.39ms +[2025-09-11 12:08:53] [Rank 0] step:9081/10000 train_time:384829ms step_avg:42.38ms +[2025-09-11 12:08:53] [Rank 0] step:9081/10000 train_time:384829ms step_avg:42.38ms +[2025-09-11 12:08:54] [Rank 0] step:9101/10000 train_time:385545ms step_avg:42.36ms +[2025-09-11 12:08:54] [Rank 0] step:9101/10000 train_time:385545ms step_avg:42.36ms +[2025-09-11 12:08:55] [Rank 0] step:9121/10000 train_time:386261ms step_avg:42.35ms +[2025-09-11 12:08:55] [Rank 0] step:9121/10000 train_time:386261ms step_avg:42.35ms +[2025-09-11 12:08:55] [Rank 0] step:9141/10000 train_time:386971ms step_avg:42.33ms +[2025-09-11 12:08:55] [Rank 0] step:9141/10000 train_time:386971ms step_avg:42.33ms +[2025-09-11 12:08:56] [Rank 0] step:9161/10000 train_time:387687ms step_avg:42.32ms +[2025-09-11 12:08:56] [Rank 0] step:9161/10000 train_time:387687ms step_avg:42.32ms +[2025-09-11 12:08:57] [Rank 0] step:9181/10000 train_time:388401ms step_avg:42.30ms +[2025-09-11 12:08:57] [Rank 0] step:9181/10000 train_time:388401ms step_avg:42.30ms +[2025-09-11 12:08:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:08:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:09:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.2346 total_sharp:1.1572e-02 L1_sharp:4.3600e-02 L2_sharp:5.9480e-02 L3_sharp:9.8481e-02 L4_sharp:1.5314e-01 L5_sharp:2.1477e-01 L6_sharp:3.2783e-01 L7_sharp:5.3340e-01 L8_sharp:8.3227e-01 L9_sharp:1.4577e+00 L10_sharp:2.5614e+00 L11_sharp:2.9677e+00 L12_sharp:6.3734e+00 total_fnorm:5.1562e-01 total_l1_linf:2.0800e+02 total_spectral:2.5781e-01 L1_fnorm:6.6528e-03 L2_fnorm:6.7444e-03 L3_fnorm:6.7749e-03 L4_fnorm:6.8359e-03 L5_fnorm:6.8665e-03 L6_fnorm:6.8359e-03 L7_fnorm:6.8359e-03 L8_fnorm:6.8970e-03 L9_fnorm:6.9275e-03 L10_fnorm:6.7749e-03 L11_fnorm:6.7749e-03 L12_fnorm:6.4087e-03 L1_l1linf:8.7738e-04 L2_l1linf:8.7357e-04 L3_l1linf:8.8501e-04 L4_l1linf:8.6212e-04 L5_l1linf:9.8419e-04 L6_l1linf:1.0071e-03 L7_l1linf:9.7656e-04 L8_l1linf:1.0757e-03 L9_l1linf:1.1597e-03 L10_l1linf:1.1749e-03 L11_l1linf:1.2131e-03 L12_l1linf:1.1749e-03 L1_spectral:1.5142e-04 L2_spectral:1.5325e-04 L3_spectral:1.5299e-04 L4_spectral:1.5204e-04 L5_spectral:1.5250e-04 L6_spectral:1.4742e-04 L7_spectral:1.4705e-04 L8_spectral:1.5198e-04 L9_spectral:1.4561e-04 L10_spectral:1.3984e-04 L11_spectral:1.3720e-04 L12_spectral:1.2147e-04 train_time:389096ms step_avg:42.29ms +[2025-09-11 12:09:09] [Rank 0] PRINT: step:9200/10000 val_loss:5.2346 total_sharp:1.1572e-02 L1_sharp:4.3600e-02 L2_sharp:5.9480e-02 L3_sharp:9.8481e-02 L4_sharp:1.5314e-01 L5_sharp:2.1477e-01 L6_sharp:3.2783e-01 L7_sharp:5.3340e-01 L8_sharp:8.3227e-01 L9_sharp:1.4577e+00 L10_sharp:2.5614e+00 L11_sharp:2.9677e+00 L12_sharp:6.3734e+00 total_fnorm:5.1562e-01 total_l1_linf:2.0800e+02 total_spectral:2.5781e-01 L1_fnorm:6.6528e-03 L2_fnorm:6.7444e-03 L3_fnorm:6.7749e-03 L4_fnorm:6.8359e-03 L5_fnorm:6.8665e-03 L6_fnorm:6.8359e-03 L7_fnorm:6.8359e-03 L8_fnorm:6.8970e-03 L9_fnorm:6.9275e-03 L10_fnorm:6.7749e-03 L11_fnorm:6.7749e-03 L12_fnorm:6.4087e-03 L1_l1linf:8.7738e-04 L2_l1linf:8.7357e-04 L3_l1linf:8.8501e-04 L4_l1linf:8.6212e-04 L5_l1linf:9.8419e-04 L6_l1linf:1.0071e-03 L7_l1linf:9.7656e-04 L8_l1linf:1.0757e-03 L9_l1linf:1.1597e-03 L10_l1linf:1.1749e-03 L11_l1linf:1.2131e-03 L12_l1linf:1.1749e-03 L1_spectral:1.5142e-04 L2_spectral:1.5325e-04 L3_spectral:1.5299e-04 L4_spectral:1.5204e-04 L5_spectral:1.5250e-04 L6_spectral:1.4742e-04 L7_spectral:1.4705e-04 L8_spectral:1.5198e-04 L9_spectral:1.4561e-04 L10_spectral:1.3984e-04 L11_spectral:1.3720e-04 L12_spectral:1.2147e-04 train_time:389096ms step_avg:42.29ms +[2025-09-11 12:09:11] [Rank 0] step:9201/10000 train_time:391965ms step_avg:42.60ms +[2025-09-11 12:09:11] [Rank 0] step:9201/10000 train_time:391965ms step_avg:42.60ms +[2025-09-11 12:09:12] [Rank 0] step:9221/10000 train_time:392696ms step_avg:42.59ms +[2025-09-11 12:09:12] [Rank 0] step:9221/10000 train_time:392696ms step_avg:42.59ms +[2025-09-11 12:09:13] [Rank 0] step:9241/10000 train_time:393406ms step_avg:42.57ms +[2025-09-11 12:09:13] [Rank 0] step:9241/10000 train_time:393406ms step_avg:42.57ms +[2025-09-11 12:09:14] [Rank 0] step:9261/10000 train_time:394120ms step_avg:42.56ms +[2025-09-11 12:09:14] [Rank 0] step:9261/10000 train_time:394120ms step_avg:42.56ms +[2025-09-11 12:09:14] [Rank 0] step:9281/10000 train_time:394833ms step_avg:42.54ms +[2025-09-11 12:09:14] [Rank 0] step:9281/10000 train_time:394833ms step_avg:42.54ms +[2025-09-11 12:09:15] [Rank 0] step:9301/10000 train_time:395542ms step_avg:42.53ms +[2025-09-11 12:09:15] [Rank 0] step:9301/10000 train_time:395542ms step_avg:42.53ms +[2025-09-11 12:09:16] [Rank 0] step:9321/10000 train_time:396255ms step_avg:42.51ms +[2025-09-11 12:09:16] [Rank 0] step:9321/10000 train_time:396255ms step_avg:42.51ms +[2025-09-11 12:09:16] [Rank 0] step:9341/10000 train_time:396965ms step_avg:42.50ms +[2025-09-11 12:09:16] [Rank 0] step:9341/10000 train_time:396965ms step_avg:42.50ms +[2025-09-11 12:09:17] [Rank 0] step:9361/10000 train_time:397671ms step_avg:42.48ms +[2025-09-11 12:09:17] [Rank 0] step:9361/10000 train_time:397671ms step_avg:42.48ms +[2025-09-11 12:09:18] [Rank 0] step:9381/10000 train_time:398382ms step_avg:42.47ms +[2025-09-11 12:09:18] [Rank 0] step:9381/10000 train_time:398382ms step_avg:42.47ms +[2025-09-11 12:09:19] [Rank 0] step:9401/10000 train_time:399096ms step_avg:42.45ms +[2025-09-11 12:09:19] [Rank 0] step:9401/10000 train_time:399096ms step_avg:42.45ms +[2025-09-11 12:09:19] [Rank 0] step:9421/10000 train_time:399810ms step_avg:42.44ms +[2025-09-11 12:09:19] [Rank 0] step:9421/10000 train_time:399810ms step_avg:42.44ms +[2025-09-11 12:09:20] [Rank 0] step:9441/10000 train_time:400524ms step_avg:42.42ms +[2025-09-11 12:09:20] [Rank 0] step:9441/10000 train_time:400524ms step_avg:42.42ms +[2025-09-11 12:09:21] [Rank 0] step:9461/10000 train_time:401235ms step_avg:42.41ms +[2025-09-11 12:09:21] [Rank 0] step:9461/10000 train_time:401235ms step_avg:42.41ms +[2025-09-11 12:09:21] [Rank 0] step:9481/10000 train_time:401948ms step_avg:42.40ms +[2025-09-11 12:09:21] [Rank 0] step:9481/10000 train_time:401948ms step_avg:42.40ms +[2025-09-11 12:09:22] [Rank 0] step:9501/10000 train_time:402661ms step_avg:42.38ms +[2025-09-11 12:09:22] [Rank 0] step:9501/10000 train_time:402661ms step_avg:42.38ms +[2025-09-11 12:09:23] [Rank 0] step:9521/10000 train_time:403377ms step_avg:42.37ms +[2025-09-11 12:09:23] [Rank 0] step:9521/10000 train_time:403377ms step_avg:42.37ms +[2025-09-11 12:09:24] [Rank 0] step:9541/10000 train_time:404087ms step_avg:42.35ms +[2025-09-11 12:09:24] [Rank 0] step:9541/10000 train_time:404087ms step_avg:42.35ms +[2025-09-11 12:09:24] [Rank 0] step:9561/10000 train_time:404799ms step_avg:42.34ms +[2025-09-11 12:09:24] [Rank 0] step:9561/10000 train_time:404799ms step_avg:42.34ms +[2025-09-11 12:09:25] [Rank 0] step:9581/10000 train_time:405514ms step_avg:42.32ms +[2025-09-11 12:09:25] [Rank 0] step:9581/10000 train_time:405514ms step_avg:42.32ms +[2025-09-11 12:09:26] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:09:26] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:09:37] [Rank 0] PRINT: step:9600/10000 val_loss:5.2312 total_sharp:5.3037e-03 L1_sharp:3.4642e-02 L2_sharp:4.2990e-02 L3_sharp:6.0248e-02 L4_sharp:8.3971e-02 L5_sharp:1.1007e-01 L6_sharp:2.4175e-01 L7_sharp:3.2112e-01 L8_sharp:5.3534e-01 L9_sharp:1.2899e+00 L10_sharp:1.8796e+00 L11_sharp:1.5285e+00 L12_sharp:1.6688e+00 total_fnorm:3.0664e-01 total_l1_linf:1.0150e+02 total_spectral:1.5332e-01 L1_fnorm:3.7079e-03 L2_fnorm:3.7231e-03 L3_fnorm:3.7689e-03 L4_fnorm:3.7994e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.7994e-03 L7_fnorm:3.7994e-03 L8_fnorm:3.8605e-03 L9_fnorm:3.8757e-03 L10_fnorm:3.8147e-03 L11_fnorm:3.7842e-03 L12_fnorm:3.5858e-03 L1_l1linf:4.4060e-04 L2_l1linf:4.2534e-04 L3_l1linf:4.4250e-04 L4_l1linf:4.4250e-04 L5_l1linf:4.7112e-04 L6_l1linf:4.6730e-04 L7_l1linf:4.9210e-04 L8_l1linf:5.1498e-04 L9_l1linf:5.4550e-04 L10_l1linf:5.9128e-04 L11_l1linf:6.0654e-04 L12_l1linf:5.6458e-04 L1_spectral:8.6212e-05 L2_spectral:8.6392e-05 L3_spectral:8.8042e-05 L4_spectral:8.8842e-05 L5_spectral:8.8370e-05 L6_spectral:8.4629e-05 L7_spectral:8.4631e-05 L8_spectral:8.5810e-05 L9_spectral:8.2525e-05 L10_spectral:7.9398e-05 L11_spectral:7.8446e-05 L12_spectral:6.8580e-05 train_time:406203ms step_avg:42.31ms +[2025-09-11 12:09:37] [Rank 0] PRINT: step:9600/10000 val_loss:5.2312 total_sharp:5.3037e-03 L1_sharp:3.4642e-02 L2_sharp:4.2990e-02 L3_sharp:6.0248e-02 L4_sharp:8.3971e-02 L5_sharp:1.1007e-01 L6_sharp:2.4175e-01 L7_sharp:3.2112e-01 L8_sharp:5.3534e-01 L9_sharp:1.2899e+00 L10_sharp:1.8796e+00 L11_sharp:1.5285e+00 L12_sharp:1.6688e+00 total_fnorm:3.0664e-01 total_l1_linf:1.0150e+02 total_spectral:1.5332e-01 L1_fnorm:3.7079e-03 L2_fnorm:3.7231e-03 L3_fnorm:3.7689e-03 L4_fnorm:3.7994e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.7994e-03 L7_fnorm:3.7994e-03 L8_fnorm:3.8605e-03 L9_fnorm:3.8757e-03 L10_fnorm:3.8147e-03 L11_fnorm:3.7842e-03 L12_fnorm:3.5858e-03 L1_l1linf:4.4060e-04 L2_l1linf:4.2534e-04 L3_l1linf:4.4250e-04 L4_l1linf:4.4250e-04 L5_l1linf:4.7112e-04 L6_l1linf:4.6730e-04 L7_l1linf:4.9210e-04 L8_l1linf:5.1498e-04 L9_l1linf:5.4550e-04 L10_l1linf:5.9128e-04 L11_l1linf:6.0654e-04 L12_l1linf:5.6458e-04 L1_spectral:8.6212e-05 L2_spectral:8.6392e-05 L3_spectral:8.8042e-05 L4_spectral:8.8842e-05 L5_spectral:8.8370e-05 L6_spectral:8.4629e-05 L7_spectral:8.4631e-05 L8_spectral:8.5810e-05 L9_spectral:8.2525e-05 L10_spectral:7.9398e-05 L11_spectral:7.8446e-05 L12_spectral:6.8580e-05 train_time:406203ms step_avg:42.31ms +[2025-09-11 12:09:39] [Rank 0] step:9601/10000 train_time:408216ms step_avg:42.52ms +[2025-09-11 12:09:39] [Rank 0] step:9601/10000 train_time:408216ms step_avg:42.52ms +[2025-09-11 12:09:39] [Rank 0] step:9621/10000 train_time:408947ms step_avg:42.51ms +[2025-09-11 12:09:39] [Rank 0] step:9621/10000 train_time:408947ms step_avg:42.51ms +[2025-09-11 12:09:40] [Rank 0] step:9641/10000 train_time:409664ms step_avg:42.49ms +[2025-09-11 12:09:40] [Rank 0] step:9641/10000 train_time:409664ms step_avg:42.49ms +[2025-09-11 12:09:41] [Rank 0] step:9661/10000 train_time:410388ms step_avg:42.48ms +[2025-09-11 12:09:41] [Rank 0] step:9661/10000 train_time:410388ms step_avg:42.48ms +[2025-09-11 12:09:42] [Rank 0] step:9681/10000 train_time:411104ms step_avg:42.47ms +[2025-09-11 12:09:42] [Rank 0] step:9681/10000 train_time:411104ms step_avg:42.47ms +[2025-09-11 12:09:42] [Rank 0] step:9701/10000 train_time:411822ms step_avg:42.45ms +[2025-09-11 12:09:42] [Rank 0] step:9701/10000 train_time:411822ms step_avg:42.45ms +[2025-09-11 12:09:43] [Rank 0] step:9721/10000 train_time:412545ms step_avg:42.44ms +[2025-09-11 12:09:43] [Rank 0] step:9721/10000 train_time:412545ms step_avg:42.44ms +[2025-09-11 12:09:44] [Rank 0] step:9741/10000 train_time:413264ms step_avg:42.43ms +[2025-09-11 12:09:44] [Rank 0] step:9741/10000 train_time:413264ms step_avg:42.43ms +[2025-09-11 12:09:45] [Rank 0] step:9761/10000 train_time:413984ms step_avg:42.41ms +[2025-09-11 12:09:45] [Rank 0] step:9761/10000 train_time:413984ms step_avg:42.41ms +[2025-09-11 12:09:45] [Rank 0] step:9781/10000 train_time:414701ms step_avg:42.40ms +[2025-09-11 12:09:45] [Rank 0] step:9781/10000 train_time:414701ms step_avg:42.40ms +[2025-09-11 12:09:46] [Rank 0] step:9801/10000 train_time:415425ms step_avg:42.39ms +[2025-09-11 12:09:46] [Rank 0] step:9801/10000 train_time:415425ms step_avg:42.39ms +[2025-09-11 12:09:47] [Rank 0] step:9821/10000 train_time:416145ms step_avg:42.37ms +[2025-09-11 12:09:47] [Rank 0] step:9821/10000 train_time:416145ms step_avg:42.37ms +[2025-09-11 12:09:47] [Rank 0] step:9841/10000 train_time:416867ms step_avg:42.36ms +[2025-09-11 12:09:47] [Rank 0] step:9841/10000 train_time:416867ms step_avg:42.36ms +[2025-09-11 12:09:48] [Rank 0] step:9861/10000 train_time:417586ms step_avg:42.35ms +[2025-09-11 12:09:48] [Rank 0] step:9861/10000 train_time:417586ms step_avg:42.35ms +[2025-09-11 12:09:49] [Rank 0] step:9881/10000 train_time:418308ms step_avg:42.33ms +[2025-09-11 12:09:49] [Rank 0] step:9881/10000 train_time:418308ms step_avg:42.33ms +[2025-09-11 12:09:50] [Rank 0] step:9901/10000 train_time:419025ms step_avg:42.32ms +[2025-09-11 12:09:50] [Rank 0] step:9901/10000 train_time:419025ms step_avg:42.32ms +[2025-09-11 12:09:50] [Rank 0] step:9921/10000 train_time:419744ms step_avg:42.31ms +[2025-09-11 12:09:50] [Rank 0] step:9921/10000 train_time:419744ms step_avg:42.31ms +[2025-09-11 12:09:51] [Rank 0] step:9941/10000 train_time:420467ms step_avg:42.30ms +[2025-09-11 12:09:51] [Rank 0] step:9941/10000 train_time:420467ms step_avg:42.30ms +[2025-09-11 12:09:52] [Rank 0] step:9961/10000 train_time:421192ms step_avg:42.28ms +[2025-09-11 12:09:52] [Rank 0] step:9961/10000 train_time:421192ms step_avg:42.28ms +[2025-09-11 12:09:52] [Rank 0] step:9981/10000 train_time:421913ms step_avg:42.27ms +[2025-09-11 12:09:52] [Rank 0] step:9981/10000 train_time:421913ms step_avg:42.27ms +[2025-09-11 12:09:53] [Rank 0] step:10000/10000 train_time:422605ms step_avg:42.26ms +[2025-09-11 12:09:53] [Rank 0] step:10000/10000 train_time:422605ms step_avg:42.26ms +[2025-09-11 12:09:53] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:09:53] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:10:04] [Rank 0] PRINT: step:10000/10000 val_loss:5.2312 total_sharp:3.7900e-03 L1_sharp:2.0736e-02 L2_sharp:3.1622e-02 L3_sharp:4.9904e-02 L4_sharp:6.8605e-02 L5_sharp:1.1031e-01 L6_sharp:2.1712e-01 L7_sharp:2.2060e-01 L8_sharp:3.8685e-01 L9_sharp:7.7067e-01 L10_sharp:1.3863e+00 L11_sharp:1.1601e+00 L12_sharp:1.6743e+00 total_fnorm:1.1475e-01 total_l1_linf:2.7750e+01 total_spectral:5.7373e-02 L1_fnorm:1.4343e-03 L2_fnorm:1.4572e-03 L3_fnorm:1.4725e-03 L4_fnorm:1.4877e-03 L5_fnorm:1.4877e-03 L6_fnorm:1.4801e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.5030e-03 L9_fnorm:1.5030e-03 L10_fnorm:1.4877e-03 L11_fnorm:1.4725e-03 L12_fnorm:1.3962e-03 L1_l1linf:1.3924e-04 L2_l1linf:1.3161e-04 L3_l1linf:1.3828e-04 L4_l1linf:1.6117e-04 L5_l1linf:1.3924e-04 L6_l1linf:1.3733e-04 L7_l1linf:1.5926e-04 L8_l1linf:1.4973e-04 L9_l1linf:1.6880e-04 L10_l1linf:1.7738e-04 L11_l1linf:1.7643e-04 L12_l1linf:1.7643e-04 L1_spectral:3.4502e-05 L2_spectral:3.5114e-05 L3_spectral:3.4706e-05 L4_spectral:3.5119e-05 L5_spectral:3.4274e-05 L6_spectral:3.3529e-05 L7_spectral:3.3989e-05 L8_spectral:3.4336e-05 L9_spectral:3.3493e-05 L10_spectral:3.1457e-05 L11_spectral:3.1608e-05 L12_spectral:2.7645e-05 train_time:422626ms step_avg:42.26ms +[2025-09-11 12:10:04] [Rank 0] PRINT: step:10000/10000 val_loss:5.2312 total_sharp:3.7900e-03 L1_sharp:2.0736e-02 L2_sharp:3.1622e-02 L3_sharp:4.9904e-02 L4_sharp:6.8605e-02 L5_sharp:1.1031e-01 L6_sharp:2.1712e-01 L7_sharp:2.2060e-01 L8_sharp:3.8685e-01 L9_sharp:7.7067e-01 L10_sharp:1.3863e+00 L11_sharp:1.1601e+00 L12_sharp:1.6743e+00 total_fnorm:1.1475e-01 total_l1_linf:2.7750e+01 total_spectral:5.7373e-02 L1_fnorm:1.4343e-03 L2_fnorm:1.4572e-03 L3_fnorm:1.4725e-03 L4_fnorm:1.4877e-03 L5_fnorm:1.4877e-03 L6_fnorm:1.4801e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.5030e-03 L9_fnorm:1.5030e-03 L10_fnorm:1.4877e-03 L11_fnorm:1.4725e-03 L12_fnorm:1.3962e-03 L1_l1linf:1.3924e-04 L2_l1linf:1.3161e-04 L3_l1linf:1.3828e-04 L4_l1linf:1.6117e-04 L5_l1linf:1.3924e-04 L6_l1linf:1.3733e-04 L7_l1linf:1.5926e-04 L8_l1linf:1.4973e-04 L9_l1linf:1.6880e-04 L10_l1linf:1.7738e-04 L11_l1linf:1.7643e-04 L12_l1linf:1.7643e-04 L1_spectral:3.4502e-05 L2_spectral:3.5114e-05 L3_spectral:3.4706e-05 L4_spectral:3.5119e-05 L5_spectral:3.4274e-05 L6_spectral:3.3529e-05 L7_spectral:3.3989e-05 L8_spectral:3.4336e-05 L9_spectral:3.3493e-05 L10_spectral:3.1457e-05 L11_spectral:3.1608e-05 L12_spectral:2.7645e-05 train_time:422626ms step_avg:42.26ms +[2025-09-11 12:10:04] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:10:04 2025 --- +[2025-09-11 12:10:04] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:10:04 2025 --- +[2025-09-11 12:10:04] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 12:10:04] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..681e0c61c0ff622bd4b76c44161f473e04f44ab3 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "d3022089-cce3-4b89-89e0-a3b91cb980b8", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/training_log_d3022089-cce3-4b89-89e0-a3b91cb980b8.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/training_log_d3022089-cce3-4b89-89e0-a3b91cb980b8.txt new file mode 100644 index 0000000000000000000000000000000000000000..9b3e0272099d2cfe9a7b14d4cf1a0932afddb388 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42/training_log_d3022089-cce3-4b89-89e0-a3b91cb980b8.txt @@ -0,0 +1,4264 @@ +[2025-09-11 12:24:17] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:24:17 2025 --- +[2025-09-11 12:24:17] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:24:17 2025 --- +[2025-09-11 12:24:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:24:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:24:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:24:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:24:17] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:24:17] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:24:17] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42 +[2025-09-11 12:24:17] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.001_seed_42 +[2025-09-11 12:24:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:24:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:24:17] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:24:17] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:24:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:24:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:24:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:24:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:24:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:24:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:24:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:24:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:24:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:24:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:24:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:24:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:24:20] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:24:20] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:24:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:24:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:24:26] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:24:26] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:24:26] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:24:26] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:25:03] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:25:03] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:25:03] [Rank 0] PRINT: Starting training... +[2025-09-11 12:25:03] [Rank 0] PRINT: Starting training... +[2025-09-11 12:25:04] [Rank 0] step:21/10000 train_time:1165ms step_avg:55.47ms +[2025-09-11 12:25:04] [Rank 0] step:21/10000 train_time:1165ms step_avg:55.47ms +[2025-09-11 12:25:05] [Rank 0] step:41/10000 train_time:1977ms step_avg:48.22ms +[2025-09-11 12:25:05] [Rank 0] step:41/10000 train_time:1977ms step_avg:48.22ms +[2025-09-11 12:25:06] [Rank 0] step:61/10000 train_time:2708ms step_avg:44.40ms +[2025-09-11 12:25:06] [Rank 0] step:61/10000 train_time:2708ms step_avg:44.40ms +[2025-09-11 12:25:07] [Rank 0] step:81/10000 train_time:3439ms step_avg:42.46ms +[2025-09-11 12:25:07] [Rank 0] step:81/10000 train_time:3439ms step_avg:42.46ms +[2025-09-11 12:25:07] [Rank 0] step:101/10000 train_time:4171ms step_avg:41.29ms +[2025-09-11 12:25:07] [Rank 0] step:101/10000 train_time:4171ms step_avg:41.29ms +[2025-09-11 12:25:08] [Rank 0] step:121/10000 train_time:4902ms step_avg:40.51ms +[2025-09-11 12:25:08] [Rank 0] step:121/10000 train_time:4902ms step_avg:40.51ms +[2025-09-11 12:25:09] [Rank 0] step:141/10000 train_time:5632ms step_avg:39.95ms +[2025-09-11 12:25:09] [Rank 0] step:141/10000 train_time:5632ms step_avg:39.95ms +[2025-09-11 12:25:09] [Rank 0] step:161/10000 train_time:6363ms step_avg:39.52ms +[2025-09-11 12:25:09] [Rank 0] step:161/10000 train_time:6363ms step_avg:39.52ms +[2025-09-11 12:25:10] [Rank 0] step:181/10000 train_time:7092ms step_avg:39.18ms +[2025-09-11 12:25:10] [Rank 0] step:181/10000 train_time:7092ms step_avg:39.18ms +[2025-09-11 12:25:11] [Rank 0] step:201/10000 train_time:7823ms step_avg:38.92ms +[2025-09-11 12:25:11] [Rank 0] step:201/10000 train_time:7823ms step_avg:38.92ms +[2025-09-11 12:25:12] [Rank 0] step:221/10000 train_time:8554ms step_avg:38.70ms +[2025-09-11 12:25:12] [Rank 0] step:221/10000 train_time:8554ms step_avg:38.70ms +[2025-09-11 12:25:12] [Rank 0] step:241/10000 train_time:9285ms step_avg:38.53ms +[2025-09-11 12:25:12] [Rank 0] step:241/10000 train_time:9285ms step_avg:38.53ms +[2025-09-11 12:25:13] [Rank 0] step:261/10000 train_time:10015ms step_avg:38.37ms +[2025-09-11 12:25:13] [Rank 0] step:261/10000 train_time:10015ms step_avg:38.37ms +[2025-09-11 12:25:14] [Rank 0] step:281/10000 train_time:10745ms step_avg:38.24ms +[2025-09-11 12:25:14] [Rank 0] step:281/10000 train_time:10745ms step_avg:38.24ms +[2025-09-11 12:25:15] [Rank 0] step:301/10000 train_time:11477ms step_avg:38.13ms +[2025-09-11 12:25:15] [Rank 0] step:301/10000 train_time:11477ms step_avg:38.13ms +[2025-09-11 12:25:15] [Rank 0] step:321/10000 train_time:12207ms step_avg:38.03ms +[2025-09-11 12:25:15] [Rank 0] step:321/10000 train_time:12207ms step_avg:38.03ms +[2025-09-11 12:25:16] [Rank 0] step:341/10000 train_time:12938ms step_avg:37.94ms +[2025-09-11 12:25:16] [Rank 0] step:341/10000 train_time:12938ms step_avg:37.94ms +[2025-09-11 12:25:17] [Rank 0] step:361/10000 train_time:13669ms step_avg:37.86ms +[2025-09-11 12:25:17] [Rank 0] step:361/10000 train_time:13669ms step_avg:37.86ms +[2025-09-11 12:25:17] [Rank 0] step:381/10000 train_time:14399ms step_avg:37.79ms +[2025-09-11 12:25:17] [Rank 0] step:381/10000 train_time:14399ms step_avg:37.79ms +[2025-09-11 12:25:18] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:25:18] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:25:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:25:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:26:05] [Rank 0] PRINT: step:400/10000 val_loss:6.8180 total_sharp:5.5018e-03 L1_sharp:1.4131e-01 L2_sharp:1.2988e-01 L3_sharp:1.3605e-01 L4_sharp:1.6945e-01 L5_sharp:2.1023e-01 L6_sharp:1.8811e-01 L7_sharp:2.4800e-01 L8_sharp:2.6132e-01 L9_sharp:3.8849e-01 L10_sharp:4.6141e-01 L11_sharp:4.0153e-01 L12_sharp:3.4788e-01 total_fnorm:9.2845e+00 total_l1_linf:2.4077e+04 total_spectral:4.6427e+00 L1_fnorm:1.2007e-01 L2_fnorm:1.2011e-01 L3_fnorm:1.1979e-01 L4_fnorm:1.1883e-01 L5_fnorm:1.1848e-01 L6_fnorm:1.1667e-01 L7_fnorm:1.1541e-01 L8_fnorm:1.1287e-01 L9_fnorm:1.1015e-01 L10_fnorm:1.0377e-01 L11_fnorm:9.9322e-02 L12_fnorm:9.6120e-02 L1_l1linf:4.4955e-02 L2_l1linf:4.4813e-02 L3_l1linf:4.4952e-02 L4_l1linf:4.4798e-02 L5_l1linf:4.4551e-02 L6_l1linf:4.3935e-02 L7_l1linf:4.4093e-02 L8_l1linf:4.3004e-02 L9_l1linf:4.1883e-02 L10_l1linf:4.0383e-02 L11_l1linf:3.8339e-02 L12_l1linf:3.6462e-02 L1_spectral:1.2048e-03 L2_spectral:1.2051e-03 L3_spectral:1.2049e-03 L4_spectral:1.2051e-03 L5_spectral:1.2050e-03 L6_spectral:1.2047e-03 L7_spectral:1.2048e-03 L8_spectral:1.2055e-03 L9_spectral:1.2047e-03 L10_spectral:1.2044e-03 L11_spectral:1.2039e-03 L12_spectral:1.2044e-03 train_time:15110ms step_avg:37.77ms +[2025-09-11 12:26:05] [Rank 0] PRINT: step:400/10000 val_loss:6.8180 total_sharp:5.5018e-03 L1_sharp:1.4131e-01 L2_sharp:1.2988e-01 L3_sharp:1.3605e-01 L4_sharp:1.6945e-01 L5_sharp:2.1023e-01 L6_sharp:1.8811e-01 L7_sharp:2.4800e-01 L8_sharp:2.6132e-01 L9_sharp:3.8849e-01 L10_sharp:4.6141e-01 L11_sharp:4.0153e-01 L12_sharp:3.4788e-01 total_fnorm:9.2845e+00 total_l1_linf:2.4077e+04 total_spectral:4.6427e+00 L1_fnorm:1.2007e-01 L2_fnorm:1.2011e-01 L3_fnorm:1.1979e-01 L4_fnorm:1.1883e-01 L5_fnorm:1.1848e-01 L6_fnorm:1.1667e-01 L7_fnorm:1.1541e-01 L8_fnorm:1.1287e-01 L9_fnorm:1.1015e-01 L10_fnorm:1.0377e-01 L11_fnorm:9.9322e-02 L12_fnorm:9.6120e-02 L1_l1linf:4.4955e-02 L2_l1linf:4.4813e-02 L3_l1linf:4.4952e-02 L4_l1linf:4.4798e-02 L5_l1linf:4.4551e-02 L6_l1linf:4.3935e-02 L7_l1linf:4.4093e-02 L8_l1linf:4.3004e-02 L9_l1linf:4.1883e-02 L10_l1linf:4.0383e-02 L11_l1linf:3.8339e-02 L12_l1linf:3.6462e-02 L1_spectral:1.2048e-03 L2_spectral:1.2051e-03 L3_spectral:1.2049e-03 L4_spectral:1.2051e-03 L5_spectral:1.2050e-03 L6_spectral:1.2047e-03 L7_spectral:1.2048e-03 L8_spectral:1.2055e-03 L9_spectral:1.2047e-03 L10_spectral:1.2044e-03 L11_spectral:1.2039e-03 L12_spectral:1.2044e-03 train_time:15110ms step_avg:37.77ms +[2025-09-11 12:26:35] [Rank 0] step:401/10000 train_time:45038ms step_avg:112.32ms +[2025-09-11 12:26:35] [Rank 0] step:401/10000 train_time:45038ms step_avg:112.32ms +[2025-09-11 12:26:37] [Rank 0] step:421/10000 train_time:47310ms step_avg:112.38ms +[2025-09-11 12:26:37] [Rank 0] step:421/10000 train_time:47310ms step_avg:112.38ms +[2025-09-11 12:26:38] [Rank 0] step:441/10000 train_time:47952ms step_avg:108.73ms +[2025-09-11 12:26:38] [Rank 0] step:441/10000 train_time:47952ms step_avg:108.73ms +[2025-09-11 12:26:38] [Rank 0] step:461/10000 train_time:48594ms step_avg:105.41ms +[2025-09-11 12:26:38] [Rank 0] step:461/10000 train_time:48594ms step_avg:105.41ms +[2025-09-11 12:26:39] [Rank 0] step:481/10000 train_time:49236ms step_avg:102.36ms +[2025-09-11 12:26:39] [Rank 0] step:481/10000 train_time:49236ms step_avg:102.36ms +[2025-09-11 12:26:40] [Rank 0] step:501/10000 train_time:49878ms step_avg:99.56ms +[2025-09-11 12:26:40] [Rank 0] step:501/10000 train_time:49878ms step_avg:99.56ms +[2025-09-11 12:26:40] [Rank 0] step:521/10000 train_time:50519ms step_avg:96.97ms +[2025-09-11 12:26:40] [Rank 0] step:521/10000 train_time:50519ms step_avg:96.97ms +[2025-09-11 12:26:41] [Rank 0] step:541/10000 train_time:51161ms step_avg:94.57ms +[2025-09-11 12:26:41] [Rank 0] step:541/10000 train_time:51161ms step_avg:94.57ms +[2025-09-11 12:26:41] [Rank 0] step:561/10000 train_time:51804ms step_avg:92.34ms +[2025-09-11 12:26:41] [Rank 0] step:561/10000 train_time:51804ms step_avg:92.34ms +[2025-09-11 12:26:42] [Rank 0] step:581/10000 train_time:52444ms step_avg:90.27ms +[2025-09-11 12:26:42] [Rank 0] step:581/10000 train_time:52444ms step_avg:90.27ms +[2025-09-11 12:26:43] [Rank 0] step:601/10000 train_time:53085ms step_avg:88.33ms +[2025-09-11 12:26:43] [Rank 0] step:601/10000 train_time:53085ms step_avg:88.33ms +[2025-09-11 12:26:43] [Rank 0] step:621/10000 train_time:53725ms step_avg:86.51ms +[2025-09-11 12:26:43] [Rank 0] step:621/10000 train_time:53725ms step_avg:86.51ms +[2025-09-11 12:26:44] [Rank 0] step:641/10000 train_time:54366ms step_avg:84.81ms +[2025-09-11 12:26:44] [Rank 0] step:641/10000 train_time:54366ms step_avg:84.81ms +[2025-09-11 12:26:45] [Rank 0] step:661/10000 train_time:55007ms step_avg:83.22ms +[2025-09-11 12:26:45] [Rank 0] step:661/10000 train_time:55007ms step_avg:83.22ms +[2025-09-11 12:26:45] [Rank 0] step:681/10000 train_time:55647ms step_avg:81.71ms +[2025-09-11 12:26:45] [Rank 0] step:681/10000 train_time:55647ms step_avg:81.71ms +[2025-09-11 12:26:46] [Rank 0] step:701/10000 train_time:56288ms step_avg:80.30ms +[2025-09-11 12:26:46] [Rank 0] step:701/10000 train_time:56288ms step_avg:80.30ms +[2025-09-11 12:26:47] [Rank 0] step:721/10000 train_time:56929ms step_avg:78.96ms +[2025-09-11 12:26:47] [Rank 0] step:721/10000 train_time:56929ms step_avg:78.96ms +[2025-09-11 12:26:47] [Rank 0] step:741/10000 train_time:57569ms step_avg:77.69ms +[2025-09-11 12:26:47] [Rank 0] step:741/10000 train_time:57569ms step_avg:77.69ms +[2025-09-11 12:26:48] [Rank 0] step:761/10000 train_time:58215ms step_avg:76.50ms +[2025-09-11 12:26:48] [Rank 0] step:761/10000 train_time:58215ms step_avg:76.50ms +[2025-09-11 12:26:48] [Rank 0] step:781/10000 train_time:58861ms step_avg:75.37ms +[2025-09-11 12:26:48] [Rank 0] step:781/10000 train_time:58861ms step_avg:75.37ms +[2025-09-11 12:26:49] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:26:49] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:27:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:27:32] [Rank 0] PRINT: step:800/10000 val_loss:6.3843 total_sharp:2.4835e-02 L1_sharp:2.7039e-01 L2_sharp:2.8568e-01 L3_sharp:2.7245e-01 L4_sharp:3.2273e-01 L5_sharp:3.8515e-01 L6_sharp:4.6942e-01 L7_sharp:5.9419e-01 L8_sharp:1.0640e+00 L9_sharp:1.2490e+00 L10_sharp:1.5653e+00 L11_sharp:1.9305e+00 L12_sharp:2.6150e+00 total_fnorm:6.7188e+00 total_l1_linf:8.8960e+03 total_spectral:3.3750e+00 L1_fnorm:1.1230e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1133e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1279e-01 L8_fnorm:1.0596e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0400e-01 L11_fnorm:9.6191e-02 L12_fnorm:8.6914e-02 L1_l1linf:4.1748e-02 L2_l1linf:4.1748e-02 L3_l1linf:4.1748e-02 L4_l1linf:4.1748e-02 L5_l1linf:4.1992e-02 L6_l1linf:4.1504e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:3.9551e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.3447e-02 L12_l1linf:2.7222e-02 L1_spectral:1.6076e-03 L2_spectral:1.6074e-03 L3_spectral:1.6020e-03 L4_spectral:1.6112e-03 L5_spectral:1.6088e-03 L6_spectral:1.6171e-03 L7_spectral:1.6112e-03 L8_spectral:1.5812e-03 L9_spectral:1.6016e-03 L10_spectral:1.5467e-03 L11_spectral:1.5228e-03 L12_spectral:1.4942e-03 train_time:59489ms step_avg:74.36ms +[2025-09-11 12:27:32] [Rank 0] PRINT: step:800/10000 val_loss:6.3843 total_sharp:2.4835e-02 L1_sharp:2.7039e-01 L2_sharp:2.8568e-01 L3_sharp:2.7245e-01 L4_sharp:3.2273e-01 L5_sharp:3.8515e-01 L6_sharp:4.6942e-01 L7_sharp:5.9419e-01 L8_sharp:1.0640e+00 L9_sharp:1.2490e+00 L10_sharp:1.5653e+00 L11_sharp:1.9305e+00 L12_sharp:2.6150e+00 total_fnorm:6.7188e+00 total_l1_linf:8.8960e+03 total_spectral:3.3750e+00 L1_fnorm:1.1230e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1133e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1279e-01 L8_fnorm:1.0596e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0400e-01 L11_fnorm:9.6191e-02 L12_fnorm:8.6914e-02 L1_l1linf:4.1748e-02 L2_l1linf:4.1748e-02 L3_l1linf:4.1748e-02 L4_l1linf:4.1748e-02 L5_l1linf:4.1992e-02 L6_l1linf:4.1504e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:3.9551e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.3447e-02 L12_l1linf:2.7222e-02 L1_spectral:1.6076e-03 L2_spectral:1.6074e-03 L3_spectral:1.6020e-03 L4_spectral:1.6112e-03 L5_spectral:1.6088e-03 L6_spectral:1.6171e-03 L7_spectral:1.6112e-03 L8_spectral:1.5812e-03 L9_spectral:1.6016e-03 L10_spectral:1.5467e-03 L11_spectral:1.5228e-03 L12_spectral:1.4942e-03 train_time:59489ms step_avg:74.36ms +[2025-09-11 12:27:34] [Rank 0] step:801/10000 train_time:61109ms step_avg:76.29ms +[2025-09-11 12:27:34] [Rank 0] step:801/10000 train_time:61109ms step_avg:76.29ms +[2025-09-11 12:27:34] [Rank 0] step:821/10000 train_time:61742ms step_avg:75.20ms +[2025-09-11 12:27:34] [Rank 0] step:821/10000 train_time:61742ms step_avg:75.20ms +[2025-09-11 12:27:35] [Rank 0] step:841/10000 train_time:62388ms step_avg:74.18ms +[2025-09-11 12:27:35] [Rank 0] step:841/10000 train_time:62388ms step_avg:74.18ms +[2025-09-11 12:27:36] [Rank 0] step:861/10000 train_time:63033ms step_avg:73.21ms +[2025-09-11 12:27:36] [Rank 0] step:861/10000 train_time:63033ms step_avg:73.21ms +[2025-09-11 12:27:36] [Rank 0] step:881/10000 train_time:63679ms step_avg:72.28ms +[2025-09-11 12:27:36] [Rank 0] step:881/10000 train_time:63679ms step_avg:72.28ms +[2025-09-11 12:27:37] [Rank 0] step:901/10000 train_time:64323ms step_avg:71.39ms +[2025-09-11 12:27:37] [Rank 0] step:901/10000 train_time:64323ms step_avg:71.39ms +[2025-09-11 12:27:38] [Rank 0] step:921/10000 train_time:64968ms step_avg:70.54ms +[2025-09-11 12:27:38] [Rank 0] step:921/10000 train_time:64968ms step_avg:70.54ms +[2025-09-11 12:27:38] [Rank 0] step:941/10000 train_time:65613ms step_avg:69.73ms +[2025-09-11 12:27:38] [Rank 0] step:941/10000 train_time:65613ms step_avg:69.73ms +[2025-09-11 12:27:39] [Rank 0] step:961/10000 train_time:66258ms step_avg:68.95ms +[2025-09-11 12:27:39] [Rank 0] step:961/10000 train_time:66258ms step_avg:68.95ms +[2025-09-11 12:27:40] [Rank 0] step:981/10000 train_time:66902ms step_avg:68.20ms +[2025-09-11 12:27:40] [Rank 0] step:981/10000 train_time:66902ms step_avg:68.20ms +[2025-09-11 12:27:40] [Rank 0] step:1001/10000 train_time:67547ms step_avg:67.48ms +[2025-09-11 12:27:40] [Rank 0] step:1001/10000 train_time:67547ms step_avg:67.48ms +[2025-09-11 12:27:41] [Rank 0] step:1021/10000 train_time:68191ms step_avg:66.79ms +[2025-09-11 12:27:41] [Rank 0] step:1021/10000 train_time:68191ms step_avg:66.79ms +[2025-09-11 12:27:42] [Rank 0] step:1041/10000 train_time:68836ms step_avg:66.13ms +[2025-09-11 12:27:42] [Rank 0] step:1041/10000 train_time:68836ms step_avg:66.13ms +[2025-09-11 12:27:42] [Rank 0] step:1061/10000 train_time:69481ms step_avg:65.49ms +[2025-09-11 12:27:42] [Rank 0] step:1061/10000 train_time:69481ms step_avg:65.49ms +[2025-09-11 12:27:43] [Rank 0] step:1081/10000 train_time:70125ms step_avg:64.87ms +[2025-09-11 12:27:43] [Rank 0] step:1081/10000 train_time:70125ms step_avg:64.87ms +[2025-09-11 12:27:44] [Rank 0] step:1101/10000 train_time:70770ms step_avg:64.28ms +[2025-09-11 12:27:44] [Rank 0] step:1101/10000 train_time:70770ms step_avg:64.28ms +[2025-09-11 12:27:44] [Rank 0] step:1121/10000 train_time:71415ms step_avg:63.71ms +[2025-09-11 12:27:44] [Rank 0] step:1121/10000 train_time:71415ms step_avg:63.71ms +[2025-09-11 12:27:45] [Rank 0] step:1141/10000 train_time:72059ms step_avg:63.15ms +[2025-09-11 12:27:45] [Rank 0] step:1141/10000 train_time:72059ms step_avg:63.15ms +[2025-09-11 12:27:45] [Rank 0] step:1161/10000 train_time:72704ms step_avg:62.62ms +[2025-09-11 12:27:45] [Rank 0] step:1161/10000 train_time:72704ms step_avg:62.62ms +[2025-09-11 12:27:46] [Rank 0] step:1181/10000 train_time:73348ms step_avg:62.11ms +[2025-09-11 12:27:46] [Rank 0] step:1181/10000 train_time:73348ms step_avg:62.11ms +[2025-09-11 12:27:47] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:27:47] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:27:58] [Rank 0] PRINT: step:1200/10000 val_loss:6.1177 total_sharp:1.4792e-02 L1_sharp:1.4099e-01 L2_sharp:1.3699e-01 L3_sharp:1.3877e-01 L4_sharp:1.6395e-01 L5_sharp:1.9418e-01 L6_sharp:2.3844e-01 L7_sharp:3.0528e-01 L8_sharp:4.5936e-01 L9_sharp:5.0721e-01 L10_sharp:6.1908e-01 L11_sharp:6.5305e-01 L12_sharp:8.4252e-01 total_fnorm:5.8438e+00 total_l1_linf:7.0720e+03 total_spectral:2.9219e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1230e-01 L12_fnorm:1.0400e-01 L1_l1linf:3.7354e-02 L2_l1linf:3.7598e-02 L3_l1linf:3.7109e-02 L4_l1linf:3.7842e-02 L5_l1linf:3.6865e-02 L6_l1linf:3.7109e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7598e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.9551e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.2471e-02 L1_spectral:1.5982e-03 L2_spectral:1.6197e-03 L3_spectral:1.6159e-03 L4_spectral:1.6238e-03 L5_spectral:1.6041e-03 L6_spectral:1.6076e-03 L7_spectral:1.6101e-03 L8_spectral:1.5947e-03 L9_spectral:1.6054e-03 L10_spectral:1.5752e-03 L11_spectral:1.5701e-03 L12_spectral:1.5962e-03 train_time:73975ms step_avg:61.65ms +[2025-09-11 12:27:58] [Rank 0] PRINT: step:1200/10000 val_loss:6.1177 total_sharp:1.4792e-02 L1_sharp:1.4099e-01 L2_sharp:1.3699e-01 L3_sharp:1.3877e-01 L4_sharp:1.6395e-01 L5_sharp:1.9418e-01 L6_sharp:2.3844e-01 L7_sharp:3.0528e-01 L8_sharp:4.5936e-01 L9_sharp:5.0721e-01 L10_sharp:6.1908e-01 L11_sharp:6.5305e-01 L12_sharp:8.4252e-01 total_fnorm:5.8438e+00 total_l1_linf:7.0720e+03 total_spectral:2.9219e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1230e-01 L12_fnorm:1.0400e-01 L1_l1linf:3.7354e-02 L2_l1linf:3.7598e-02 L3_l1linf:3.7109e-02 L4_l1linf:3.7842e-02 L5_l1linf:3.6865e-02 L6_l1linf:3.7109e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7598e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.9551e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.2471e-02 L1_spectral:1.5982e-03 L2_spectral:1.6197e-03 L3_spectral:1.6159e-03 L4_spectral:1.6238e-03 L5_spectral:1.6041e-03 L6_spectral:1.6076e-03 L7_spectral:1.6101e-03 L8_spectral:1.5947e-03 L9_spectral:1.6054e-03 L10_spectral:1.5752e-03 L11_spectral:1.5701e-03 L12_spectral:1.5962e-03 train_time:73975ms step_avg:61.65ms +[2025-09-11 12:27:59] [Rank 0] step:1201/10000 train_time:75671ms step_avg:63.01ms +[2025-09-11 12:27:59] [Rank 0] step:1201/10000 train_time:75671ms step_avg:63.01ms +[2025-09-11 12:28:00] [Rank 0] step:1221/10000 train_time:76357ms step_avg:62.54ms +[2025-09-11 12:28:00] [Rank 0] step:1221/10000 train_time:76357ms step_avg:62.54ms +[2025-09-11 12:28:01] [Rank 0] step:1241/10000 train_time:77003ms step_avg:62.05ms +[2025-09-11 12:28:01] [Rank 0] step:1241/10000 train_time:77003ms step_avg:62.05ms +[2025-09-11 12:28:01] [Rank 0] step:1261/10000 train_time:77649ms step_avg:61.58ms +[2025-09-11 12:28:01] [Rank 0] step:1261/10000 train_time:77649ms step_avg:61.58ms +[2025-09-11 12:28:02] [Rank 0] step:1281/10000 train_time:78296ms step_avg:61.12ms +[2025-09-11 12:28:02] [Rank 0] step:1281/10000 train_time:78296ms step_avg:61.12ms +[2025-09-11 12:28:03] [Rank 0] step:1301/10000 train_time:78947ms step_avg:60.68ms +[2025-09-11 12:28:03] [Rank 0] step:1301/10000 train_time:78947ms step_avg:60.68ms +[2025-09-11 12:28:03] [Rank 0] step:1321/10000 train_time:79594ms step_avg:60.25ms +[2025-09-11 12:28:03] [Rank 0] step:1321/10000 train_time:79594ms step_avg:60.25ms +[2025-09-11 12:28:04] [Rank 0] step:1341/10000 train_time:80241ms step_avg:59.84ms +[2025-09-11 12:28:04] [Rank 0] step:1341/10000 train_time:80241ms step_avg:59.84ms +[2025-09-11 12:28:04] [Rank 0] step:1361/10000 train_time:80887ms step_avg:59.43ms +[2025-09-11 12:28:04] [Rank 0] step:1361/10000 train_time:80887ms step_avg:59.43ms +[2025-09-11 12:28:05] [Rank 0] step:1381/10000 train_time:81532ms step_avg:59.04ms +[2025-09-11 12:28:05] [Rank 0] step:1381/10000 train_time:81532ms step_avg:59.04ms +[2025-09-11 12:28:06] [Rank 0] step:1401/10000 train_time:82177ms step_avg:58.66ms +[2025-09-11 12:28:06] [Rank 0] step:1401/10000 train_time:82177ms step_avg:58.66ms +[2025-09-11 12:28:06] [Rank 0] step:1421/10000 train_time:82823ms step_avg:58.28ms +[2025-09-11 12:28:06] [Rank 0] step:1421/10000 train_time:82823ms step_avg:58.28ms +[2025-09-11 12:28:07] [Rank 0] step:1441/10000 train_time:83468ms step_avg:57.92ms +[2025-09-11 12:28:07] [Rank 0] step:1441/10000 train_time:83468ms step_avg:57.92ms +[2025-09-11 12:28:08] [Rank 0] step:1461/10000 train_time:84113ms step_avg:57.57ms +[2025-09-11 12:28:08] [Rank 0] step:1461/10000 train_time:84113ms step_avg:57.57ms +[2025-09-11 12:28:08] [Rank 0] step:1481/10000 train_time:84757ms step_avg:57.23ms +[2025-09-11 12:28:08] [Rank 0] step:1481/10000 train_time:84757ms step_avg:57.23ms +[2025-09-11 12:28:09] [Rank 0] step:1501/10000 train_time:85406ms step_avg:56.90ms +[2025-09-11 12:28:09] [Rank 0] step:1501/10000 train_time:85406ms step_avg:56.90ms +[2025-09-11 12:28:10] [Rank 0] step:1521/10000 train_time:86056ms step_avg:56.58ms +[2025-09-11 12:28:10] [Rank 0] step:1521/10000 train_time:86056ms step_avg:56.58ms +[2025-09-11 12:28:10] [Rank 0] step:1541/10000 train_time:86704ms step_avg:56.27ms +[2025-09-11 12:28:10] [Rank 0] step:1541/10000 train_time:86704ms step_avg:56.27ms +[2025-09-11 12:28:11] [Rank 0] step:1561/10000 train_time:87354ms step_avg:55.96ms +[2025-09-11 12:28:11] [Rank 0] step:1561/10000 train_time:87354ms step_avg:55.96ms +[2025-09-11 12:28:12] [Rank 0] step:1581/10000 train_time:88003ms step_avg:55.66ms +[2025-09-11 12:28:12] [Rank 0] step:1581/10000 train_time:88003ms step_avg:55.66ms +[2025-09-11 12:28:12] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:28:12] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:28:23] [Rank 0] PRINT: step:1600/10000 val_loss:5.9397 total_sharp:2.0204e-02 L1_sharp:1.6811e-01 L2_sharp:1.6822e-01 L3_sharp:1.7653e-01 L4_sharp:2.0475e-01 L5_sharp:2.7133e-01 L6_sharp:3.4562e-01 L7_sharp:4.6278e-01 L8_sharp:5.0932e-01 L9_sharp:5.6929e-01 L10_sharp:6.5301e-01 L11_sharp:6.6839e-01 L12_sharp:1.0097e+00 total_fnorm:5.3750e+00 total_l1_linf:6.1760e+03 total_spectral:2.7031e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.5889e-02 L2_l1linf:3.5889e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.6133e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6133e-02 L8_l1linf:3.6133e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.7842e-02 L12_l1linf:3.0518e-02 L1_spectral:1.6013e-03 L2_spectral:1.6142e-03 L3_spectral:1.6127e-03 L4_spectral:1.6080e-03 L5_spectral:1.6059e-03 L6_spectral:1.6087e-03 L7_spectral:1.6026e-03 L8_spectral:1.6093e-03 L9_spectral:1.6119e-03 L10_spectral:1.5935e-03 L11_spectral:1.5822e-03 L12_spectral:1.5916e-03 train_time:88634ms step_avg:55.40ms +[2025-09-11 12:28:23] [Rank 0] PRINT: step:1600/10000 val_loss:5.9397 total_sharp:2.0204e-02 L1_sharp:1.6811e-01 L2_sharp:1.6822e-01 L3_sharp:1.7653e-01 L4_sharp:2.0475e-01 L5_sharp:2.7133e-01 L6_sharp:3.4562e-01 L7_sharp:4.6278e-01 L8_sharp:5.0932e-01 L9_sharp:5.6929e-01 L10_sharp:6.5301e-01 L11_sharp:6.6839e-01 L12_sharp:1.0097e+00 total_fnorm:5.3750e+00 total_l1_linf:6.1760e+03 total_spectral:2.7031e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.0693e-01 L1_l1linf:3.5889e-02 L2_l1linf:3.5889e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.6133e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.6133e-02 L8_l1linf:3.6133e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7598e-02 L11_l1linf:3.7842e-02 L12_l1linf:3.0518e-02 L1_spectral:1.6013e-03 L2_spectral:1.6142e-03 L3_spectral:1.6127e-03 L4_spectral:1.6080e-03 L5_spectral:1.6059e-03 L6_spectral:1.6087e-03 L7_spectral:1.6026e-03 L8_spectral:1.6093e-03 L9_spectral:1.6119e-03 L10_spectral:1.5935e-03 L11_spectral:1.5822e-03 L12_spectral:1.5916e-03 train_time:88634ms step_avg:55.40ms +[2025-09-11 12:28:24] [Rank 0] step:1601/10000 train_time:90321ms step_avg:56.42ms +[2025-09-11 12:28:24] [Rank 0] step:1601/10000 train_time:90321ms step_avg:56.42ms +[2025-09-11 12:28:25] [Rank 0] step:1621/10000 train_time:90976ms step_avg:56.12ms +[2025-09-11 12:28:25] [Rank 0] step:1621/10000 train_time:90976ms step_avg:56.12ms +[2025-09-11 12:28:26] [Rank 0] step:1641/10000 train_time:91629ms step_avg:55.84ms +[2025-09-11 12:28:26] [Rank 0] step:1641/10000 train_time:91629ms step_avg:55.84ms +[2025-09-11 12:28:26] [Rank 0] step:1661/10000 train_time:92279ms step_avg:55.56ms +[2025-09-11 12:28:26] [Rank 0] step:1661/10000 train_time:92279ms step_avg:55.56ms +[2025-09-11 12:28:27] [Rank 0] step:1681/10000 train_time:92930ms step_avg:55.28ms +[2025-09-11 12:28:27] [Rank 0] step:1681/10000 train_time:92930ms step_avg:55.28ms +[2025-09-11 12:28:27] [Rank 0] step:1701/10000 train_time:93580ms step_avg:55.01ms +[2025-09-11 12:28:27] [Rank 0] step:1701/10000 train_time:93580ms step_avg:55.01ms +[2025-09-11 12:28:28] [Rank 0] step:1721/10000 train_time:94233ms step_avg:54.75ms +[2025-09-11 12:28:28] [Rank 0] step:1721/10000 train_time:94233ms step_avg:54.75ms +[2025-09-11 12:28:29] [Rank 0] step:1741/10000 train_time:94883ms step_avg:54.50ms +[2025-09-11 12:28:29] [Rank 0] step:1741/10000 train_time:94883ms step_avg:54.50ms +[2025-09-11 12:28:29] [Rank 0] step:1761/10000 train_time:95533ms step_avg:54.25ms +[2025-09-11 12:28:29] [Rank 0] step:1761/10000 train_time:95533ms step_avg:54.25ms +[2025-09-11 12:28:30] [Rank 0] step:1781/10000 train_time:96184ms step_avg:54.01ms +[2025-09-11 12:28:30] [Rank 0] step:1781/10000 train_time:96184ms step_avg:54.01ms +[2025-09-11 12:28:31] [Rank 0] step:1801/10000 train_time:96834ms step_avg:53.77ms +[2025-09-11 12:28:31] [Rank 0] step:1801/10000 train_time:96834ms step_avg:53.77ms +[2025-09-11 12:28:31] [Rank 0] step:1821/10000 train_time:97484ms step_avg:53.53ms +[2025-09-11 12:28:31] [Rank 0] step:1821/10000 train_time:97484ms step_avg:53.53ms +[2025-09-11 12:28:32] [Rank 0] step:1841/10000 train_time:98134ms step_avg:53.30ms +[2025-09-11 12:28:32] [Rank 0] step:1841/10000 train_time:98134ms step_avg:53.30ms +[2025-09-11 12:28:33] [Rank 0] step:1861/10000 train_time:98784ms step_avg:53.08ms +[2025-09-11 12:28:33] [Rank 0] step:1861/10000 train_time:98784ms step_avg:53.08ms +[2025-09-11 12:28:33] [Rank 0] step:1881/10000 train_time:99434ms step_avg:52.86ms +[2025-09-11 12:28:33] [Rank 0] step:1881/10000 train_time:99434ms step_avg:52.86ms +[2025-09-11 12:28:34] [Rank 0] step:1901/10000 train_time:100084ms step_avg:52.65ms +[2025-09-11 12:28:34] [Rank 0] step:1901/10000 train_time:100084ms step_avg:52.65ms +[2025-09-11 12:28:35] [Rank 0] step:1921/10000 train_time:100735ms step_avg:52.44ms +[2025-09-11 12:28:35] [Rank 0] step:1921/10000 train_time:100735ms step_avg:52.44ms +[2025-09-11 12:28:35] [Rank 0] step:1941/10000 train_time:101385ms step_avg:52.23ms +[2025-09-11 12:28:35] [Rank 0] step:1941/10000 train_time:101385ms step_avg:52.23ms +[2025-09-11 12:28:36] [Rank 0] step:1961/10000 train_time:102035ms step_avg:52.03ms +[2025-09-11 12:28:36] [Rank 0] step:1961/10000 train_time:102035ms step_avg:52.03ms +[2025-09-11 12:28:37] [Rank 0] step:1981/10000 train_time:102686ms step_avg:51.84ms +[2025-09-11 12:28:37] [Rank 0] step:1981/10000 train_time:102686ms step_avg:51.84ms +[2025-09-11 12:28:37] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:28:37] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:28:48] [Rank 0] PRINT: step:2000/10000 val_loss:5.7997 total_sharp:1.5895e-02 L1_sharp:7.6488e-02 L2_sharp:8.2155e-02 L3_sharp:8.2194e-02 L4_sharp:9.6742e-02 L5_sharp:1.3063e-01 L6_sharp:1.6234e-01 L7_sharp:2.2719e-01 L8_sharp:3.9429e-01 L9_sharp:5.0843e-01 L10_sharp:6.5990e-01 L11_sharp:7.6658e-01 L12_sharp:1.7557e+00 total_fnorm:5.4375e+00 total_l1_linf:6.2400e+03 total_spectral:2.7188e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.4180e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3691e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.6377e-02 L11_l1linf:3.7109e-02 L12_l1linf:2.9541e-02 L1_spectral:1.6011e-03 L2_spectral:1.6243e-03 L3_spectral:1.6320e-03 L4_spectral:1.6248e-03 L5_spectral:1.6204e-03 L6_spectral:1.6106e-03 L7_spectral:1.6279e-03 L8_spectral:1.6055e-03 L9_spectral:1.6012e-03 L10_spectral:1.5903e-03 L11_spectral:1.5977e-03 L12_spectral:1.6014e-03 train_time:103318ms step_avg:51.66ms +[2025-09-11 12:28:48] [Rank 0] PRINT: step:2000/10000 val_loss:5.7997 total_sharp:1.5895e-02 L1_sharp:7.6488e-02 L2_sharp:8.2155e-02 L3_sharp:8.2194e-02 L4_sharp:9.6742e-02 L5_sharp:1.3063e-01 L6_sharp:1.6234e-01 L7_sharp:2.2719e-01 L8_sharp:3.9429e-01 L9_sharp:5.0843e-01 L10_sharp:6.5990e-01 L11_sharp:7.6658e-01 L12_sharp:1.7557e+00 total_fnorm:5.4375e+00 total_l1_linf:6.2400e+03 total_spectral:2.7188e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.0791e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.4180e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3691e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.6377e-02 L11_l1linf:3.7109e-02 L12_l1linf:2.9541e-02 L1_spectral:1.6011e-03 L2_spectral:1.6243e-03 L3_spectral:1.6320e-03 L4_spectral:1.6248e-03 L5_spectral:1.6204e-03 L6_spectral:1.6106e-03 L7_spectral:1.6279e-03 L8_spectral:1.6055e-03 L9_spectral:1.6012e-03 L10_spectral:1.5903e-03 L11_spectral:1.5977e-03 L12_spectral:1.6014e-03 train_time:103318ms step_avg:51.66ms +[2025-09-11 12:28:49] [Rank 0] step:2001/10000 train_time:104913ms step_avg:52.43ms +[2025-09-11 12:28:49] [Rank 0] step:2001/10000 train_time:104913ms step_avg:52.43ms +[2025-09-11 12:28:50] [Rank 0] step:2021/10000 train_time:105552ms step_avg:52.23ms +[2025-09-11 12:28:50] [Rank 0] step:2021/10000 train_time:105552ms step_avg:52.23ms +[2025-09-11 12:28:51] [Rank 0] step:2041/10000 train_time:106202ms step_avg:52.03ms +[2025-09-11 12:28:51] [Rank 0] step:2041/10000 train_time:106202ms step_avg:52.03ms +[2025-09-11 12:28:51] [Rank 0] step:2061/10000 train_time:106853ms step_avg:51.85ms +[2025-09-11 12:28:51] [Rank 0] step:2061/10000 train_time:106853ms step_avg:51.85ms +[2025-09-11 12:28:52] [Rank 0] step:2081/10000 train_time:107504ms step_avg:51.66ms +[2025-09-11 12:28:52] [Rank 0] step:2081/10000 train_time:107504ms step_avg:51.66ms +[2025-09-11 12:28:53] [Rank 0] step:2101/10000 train_time:108153ms step_avg:51.48ms +[2025-09-11 12:28:53] [Rank 0] step:2101/10000 train_time:108153ms step_avg:51.48ms +[2025-09-11 12:28:53] [Rank 0] step:2121/10000 train_time:108805ms step_avg:51.30ms +[2025-09-11 12:28:53] [Rank 0] step:2121/10000 train_time:108805ms step_avg:51.30ms +[2025-09-11 12:28:54] [Rank 0] step:2141/10000 train_time:109454ms step_avg:51.12ms +[2025-09-11 12:28:54] [Rank 0] step:2141/10000 train_time:109454ms step_avg:51.12ms +[2025-09-11 12:28:55] [Rank 0] step:2161/10000 train_time:110104ms step_avg:50.95ms +[2025-09-11 12:28:55] [Rank 0] step:2161/10000 train_time:110104ms step_avg:50.95ms +[2025-09-11 12:28:55] [Rank 0] step:2181/10000 train_time:110754ms step_avg:50.78ms +[2025-09-11 12:28:55] [Rank 0] step:2181/10000 train_time:110754ms step_avg:50.78ms +[2025-09-11 12:28:56] [Rank 0] step:2201/10000 train_time:111403ms step_avg:50.61ms +[2025-09-11 12:28:56] [Rank 0] step:2201/10000 train_time:111403ms step_avg:50.61ms +[2025-09-11 12:28:57] [Rank 0] step:2221/10000 train_time:112051ms step_avg:50.45ms +[2025-09-11 12:28:57] [Rank 0] step:2221/10000 train_time:112051ms step_avg:50.45ms +[2025-09-11 12:28:57] [Rank 0] step:2241/10000 train_time:112713ms step_avg:50.30ms +[2025-09-11 12:28:57] [Rank 0] step:2241/10000 train_time:112713ms step_avg:50.30ms +[2025-09-11 12:28:58] [Rank 0] step:2261/10000 train_time:113376ms step_avg:50.14ms +[2025-09-11 12:28:58] [Rank 0] step:2261/10000 train_time:113376ms step_avg:50.14ms +[2025-09-11 12:28:59] [Rank 0] step:2281/10000 train_time:114040ms step_avg:50.00ms +[2025-09-11 12:28:59] [Rank 0] step:2281/10000 train_time:114040ms step_avg:50.00ms +[2025-09-11 12:28:59] [Rank 0] step:2301/10000 train_time:114703ms step_avg:49.85ms +[2025-09-11 12:28:59] [Rank 0] step:2301/10000 train_time:114703ms step_avg:49.85ms +[2025-09-11 12:29:00] [Rank 0] step:2321/10000 train_time:115673ms step_avg:49.84ms +[2025-09-11 12:29:00] [Rank 0] step:2321/10000 train_time:115673ms step_avg:49.84ms +[2025-09-11 12:29:01] [Rank 0] step:2341/10000 train_time:116336ms step_avg:49.69ms +[2025-09-11 12:29:01] [Rank 0] step:2341/10000 train_time:116336ms step_avg:49.69ms +[2025-09-11 12:29:02] [Rank 0] step:2361/10000 train_time:117000ms step_avg:49.56ms +[2025-09-11 12:29:02] [Rank 0] step:2361/10000 train_time:117000ms step_avg:49.56ms +[2025-09-11 12:29:03] [Rank 0] step:2381/10000 train_time:117983ms step_avg:49.55ms +[2025-09-11 12:29:03] [Rank 0] step:2381/10000 train_time:117983ms step_avg:49.55ms +[2025-09-11 12:29:03] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:29:03] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:29:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:29:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:29:16] [Rank 0] PRINT: step:2400/10000 val_loss:5.6723 total_sharp:1.6579e-02 L1_sharp:5.6301e-02 L2_sharp:6.4589e-02 L3_sharp:7.0405e-02 L4_sharp:7.8965e-02 L5_sharp:1.1004e-01 L6_sharp:1.3123e-01 L7_sharp:1.9779e-01 L8_sharp:3.3024e-01 L9_sharp:3.9742e-01 L10_sharp:5.9752e-01 L11_sharp:7.0690e-01 L12_sharp:1.6316e+00 total_fnorm:4.8125e+00 total_l1_linf:5.3120e+03 total_spectral:2.4219e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1084e-01 L1_l1linf:3.2959e-02 L2_l1linf:3.2959e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.2959e-02 L8_l1linf:3.3691e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.4424e-02 L11_l1linf:3.5400e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6087e-03 L2_spectral:1.6206e-03 L3_spectral:1.6215e-03 L4_spectral:1.6249e-03 L5_spectral:1.6094e-03 L6_spectral:1.6054e-03 L7_spectral:1.6116e-03 L8_spectral:1.6079e-03 L9_spectral:1.6161e-03 L10_spectral:1.6084e-03 L11_spectral:1.6150e-03 L12_spectral:1.6038e-03 train_time:118627ms step_avg:49.43ms +[2025-09-11 12:29:16] [Rank 0] PRINT: step:2400/10000 val_loss:5.6723 total_sharp:1.6579e-02 L1_sharp:5.6301e-02 L2_sharp:6.4589e-02 L3_sharp:7.0405e-02 L4_sharp:7.8965e-02 L5_sharp:1.1004e-01 L6_sharp:1.3123e-01 L7_sharp:1.9779e-01 L8_sharp:3.3024e-01 L9_sharp:3.9742e-01 L10_sharp:5.9752e-01 L11_sharp:7.0690e-01 L12_sharp:1.6316e+00 total_fnorm:4.8125e+00 total_l1_linf:5.3120e+03 total_spectral:2.4219e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1084e-01 L1_l1linf:3.2959e-02 L2_l1linf:3.2959e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.2959e-02 L8_l1linf:3.3691e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.4424e-02 L11_l1linf:3.5400e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6087e-03 L2_spectral:1.6206e-03 L3_spectral:1.6215e-03 L4_spectral:1.6249e-03 L5_spectral:1.6094e-03 L6_spectral:1.6054e-03 L7_spectral:1.6116e-03 L8_spectral:1.6079e-03 L9_spectral:1.6161e-03 L10_spectral:1.6084e-03 L11_spectral:1.6150e-03 L12_spectral:1.6038e-03 train_time:118627ms step_avg:49.43ms +[2025-09-11 12:29:18] [Rank 0] step:2401/10000 train_time:120307ms step_avg:50.11ms +[2025-09-11 12:29:18] [Rank 0] step:2401/10000 train_time:120307ms step_avg:50.11ms +[2025-09-11 12:29:19] [Rank 0] step:2421/10000 train_time:120991ms step_avg:49.98ms +[2025-09-11 12:29:19] [Rank 0] step:2421/10000 train_time:120991ms step_avg:49.98ms +[2025-09-11 12:29:20] [Rank 0] step:2441/10000 train_time:121655ms step_avg:49.84ms +[2025-09-11 12:29:20] [Rank 0] step:2441/10000 train_time:121655ms step_avg:49.84ms +[2025-09-11 12:29:20] [Rank 0] step:2461/10000 train_time:122319ms step_avg:49.70ms +[2025-09-11 12:29:20] [Rank 0] step:2461/10000 train_time:122319ms step_avg:49.70ms +[2025-09-11 12:29:21] [Rank 0] step:2481/10000 train_time:122982ms step_avg:49.57ms +[2025-09-11 12:29:21] [Rank 0] step:2481/10000 train_time:122982ms step_avg:49.57ms +[2025-09-11 12:29:22] [Rank 0] step:2501/10000 train_time:123645ms step_avg:49.44ms +[2025-09-11 12:29:22] [Rank 0] step:2501/10000 train_time:123645ms step_avg:49.44ms +[2025-09-11 12:29:22] [Rank 0] step:2521/10000 train_time:124309ms step_avg:49.31ms +[2025-09-11 12:29:22] [Rank 0] step:2521/10000 train_time:124309ms step_avg:49.31ms +[2025-09-11 12:29:23] [Rank 0] step:2541/10000 train_time:124972ms step_avg:49.18ms +[2025-09-11 12:29:23] [Rank 0] step:2541/10000 train_time:124972ms step_avg:49.18ms +[2025-09-11 12:29:24] [Rank 0] step:2561/10000 train_time:125634ms step_avg:49.06ms +[2025-09-11 12:29:24] [Rank 0] step:2561/10000 train_time:125634ms step_avg:49.06ms +[2025-09-11 12:29:24] [Rank 0] step:2581/10000 train_time:126297ms step_avg:48.93ms +[2025-09-11 12:29:24] [Rank 0] step:2581/10000 train_time:126297ms step_avg:48.93ms +[2025-09-11 12:29:25] [Rank 0] step:2601/10000 train_time:126960ms step_avg:48.81ms +[2025-09-11 12:29:25] [Rank 0] step:2601/10000 train_time:126960ms step_avg:48.81ms +[2025-09-11 12:29:25] [Rank 0] step:2621/10000 train_time:127624ms step_avg:48.69ms +[2025-09-11 12:29:25] [Rank 0] step:2621/10000 train_time:127624ms step_avg:48.69ms +[2025-09-11 12:29:26] [Rank 0] step:2641/10000 train_time:128289ms step_avg:48.58ms +[2025-09-11 12:29:26] [Rank 0] step:2641/10000 train_time:128289ms step_avg:48.58ms +[2025-09-11 12:29:27] [Rank 0] step:2661/10000 train_time:128953ms step_avg:48.46ms +[2025-09-11 12:29:27] [Rank 0] step:2661/10000 train_time:128953ms step_avg:48.46ms +[2025-09-11 12:29:27] [Rank 0] step:2681/10000 train_time:129616ms step_avg:48.35ms +[2025-09-11 12:29:27] [Rank 0] step:2681/10000 train_time:129616ms step_avg:48.35ms +[2025-09-11 12:29:28] [Rank 0] step:2701/10000 train_time:130278ms step_avg:48.23ms +[2025-09-11 12:29:28] [Rank 0] step:2701/10000 train_time:130278ms step_avg:48.23ms +[2025-09-11 12:29:29] [Rank 0] step:2721/10000 train_time:130942ms step_avg:48.12ms +[2025-09-11 12:29:29] [Rank 0] step:2721/10000 train_time:130942ms step_avg:48.12ms +[2025-09-11 12:29:29] [Rank 0] step:2741/10000 train_time:131605ms step_avg:48.01ms +[2025-09-11 12:29:29] [Rank 0] step:2741/10000 train_time:131605ms step_avg:48.01ms +[2025-09-11 12:29:30] [Rank 0] step:2761/10000 train_time:132269ms step_avg:47.91ms +[2025-09-11 12:29:30] [Rank 0] step:2761/10000 train_time:132269ms step_avg:47.91ms +[2025-09-11 12:29:31] [Rank 0] step:2781/10000 train_time:132932ms step_avg:47.80ms +[2025-09-11 12:29:31] [Rank 0] step:2781/10000 train_time:132932ms step_avg:47.80ms +[2025-09-11 12:29:31] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:29:31] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:29:42] [Rank 0] PRINT: step:2800/10000 val_loss:5.5664 total_sharp:2.9989e-02 L1_sharp:6.7674e-02 L2_sharp:7.2662e-02 L3_sharp:8.4415e-02 L4_sharp:1.2549e-01 L5_sharp:1.6115e-01 L6_sharp:2.4895e-01 L7_sharp:3.6221e-01 L8_sharp:5.4266e-01 L9_sharp:7.2255e-01 L10_sharp:9.9689e-01 L11_sharp:1.1240e+00 L12_sharp:1.7227e+00 total_fnorm:4.5000e+00 total_l1_linf:4.8320e+03 total_spectral:2.2656e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0986e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.2227e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.3203e-02 L9_l1linf:3.3691e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.8931e-02 L1_spectral:1.6026e-03 L2_spectral:1.6220e-03 L3_spectral:1.6242e-03 L4_spectral:1.6233e-03 L5_spectral:1.6158e-03 L6_spectral:1.6147e-03 L7_spectral:1.6084e-03 L8_spectral:1.6075e-03 L9_spectral:1.6181e-03 L10_spectral:1.6091e-03 L11_spectral:1.6226e-03 L12_spectral:1.5971e-03 train_time:133576ms step_avg:47.71ms +[2025-09-11 12:29:42] [Rank 0] PRINT: step:2800/10000 val_loss:5.5664 total_sharp:2.9989e-02 L1_sharp:6.7674e-02 L2_sharp:7.2662e-02 L3_sharp:8.4415e-02 L4_sharp:1.2549e-01 L5_sharp:1.6115e-01 L6_sharp:2.4895e-01 L7_sharp:3.6221e-01 L8_sharp:5.4266e-01 L9_sharp:7.2255e-01 L10_sharp:9.9689e-01 L11_sharp:1.1240e+00 L12_sharp:1.7227e+00 total_fnorm:4.5000e+00 total_l1_linf:4.8320e+03 total_spectral:2.2656e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.0986e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.2227e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.3203e-02 L9_l1linf:3.3691e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.5400e-02 L12_l1linf:2.8931e-02 L1_spectral:1.6026e-03 L2_spectral:1.6220e-03 L3_spectral:1.6242e-03 L4_spectral:1.6233e-03 L5_spectral:1.6158e-03 L6_spectral:1.6147e-03 L7_spectral:1.6084e-03 L8_spectral:1.6075e-03 L9_spectral:1.6181e-03 L10_spectral:1.6091e-03 L11_spectral:1.6226e-03 L12_spectral:1.5971e-03 train_time:133576ms step_avg:47.71ms +[2025-09-11 12:29:44] [Rank 0] step:2801/10000 train_time:135318ms step_avg:48.31ms +[2025-09-11 12:29:44] [Rank 0] step:2801/10000 train_time:135318ms step_avg:48.31ms +[2025-09-11 12:29:44] [Rank 0] step:2821/10000 train_time:135986ms step_avg:48.20ms +[2025-09-11 12:29:44] [Rank 0] step:2821/10000 train_time:135986ms step_avg:48.20ms +[2025-09-11 12:29:45] [Rank 0] step:2841/10000 train_time:136651ms step_avg:48.10ms +[2025-09-11 12:29:45] [Rank 0] step:2841/10000 train_time:136651ms step_avg:48.10ms +[2025-09-11 12:29:46] [Rank 0] step:2861/10000 train_time:137315ms step_avg:48.00ms +[2025-09-11 12:29:46] [Rank 0] step:2861/10000 train_time:137315ms step_avg:48.00ms +[2025-09-11 12:29:46] [Rank 0] step:2881/10000 train_time:137979ms step_avg:47.89ms +[2025-09-11 12:29:46] [Rank 0] step:2881/10000 train_time:137979ms step_avg:47.89ms +[2025-09-11 12:29:47] [Rank 0] step:2901/10000 train_time:138643ms step_avg:47.79ms +[2025-09-11 12:29:47] [Rank 0] step:2901/10000 train_time:138643ms step_avg:47.79ms +[2025-09-11 12:29:48] [Rank 0] step:2921/10000 train_time:139306ms step_avg:47.69ms +[2025-09-11 12:29:48] [Rank 0] step:2921/10000 train_time:139306ms step_avg:47.69ms +[2025-09-11 12:29:48] [Rank 0] step:2941/10000 train_time:139970ms step_avg:47.59ms +[2025-09-11 12:29:48] [Rank 0] step:2941/10000 train_time:139970ms step_avg:47.59ms +[2025-09-11 12:29:49] [Rank 0] step:2961/10000 train_time:140634ms step_avg:47.50ms +[2025-09-11 12:29:49] [Rank 0] step:2961/10000 train_time:140634ms step_avg:47.50ms +[2025-09-11 12:29:50] [Rank 0] step:2981/10000 train_time:141300ms step_avg:47.40ms +[2025-09-11 12:29:50] [Rank 0] step:2981/10000 train_time:141300ms step_avg:47.40ms +[2025-09-11 12:29:50] [Rank 0] step:3001/10000 train_time:141966ms step_avg:47.31ms +[2025-09-11 12:29:50] [Rank 0] step:3001/10000 train_time:141966ms step_avg:47.31ms +[2025-09-11 12:29:51] [Rank 0] step:3021/10000 train_time:142633ms step_avg:47.21ms +[2025-09-11 12:29:51] [Rank 0] step:3021/10000 train_time:142633ms step_avg:47.21ms +[2025-09-11 12:29:52] [Rank 0] step:3041/10000 train_time:143300ms step_avg:47.12ms +[2025-09-11 12:29:52] [Rank 0] step:3041/10000 train_time:143300ms step_avg:47.12ms +[2025-09-11 12:29:52] [Rank 0] step:3061/10000 train_time:143967ms step_avg:47.03ms +[2025-09-11 12:29:52] [Rank 0] step:3061/10000 train_time:143967ms step_avg:47.03ms +[2025-09-11 12:29:53] [Rank 0] step:3081/10000 train_time:144633ms step_avg:46.94ms +[2025-09-11 12:29:53] [Rank 0] step:3081/10000 train_time:144633ms step_avg:46.94ms +[2025-09-11 12:29:54] [Rank 0] step:3101/10000 train_time:145299ms step_avg:46.86ms +[2025-09-11 12:29:54] [Rank 0] step:3101/10000 train_time:145299ms step_avg:46.86ms +[2025-09-11 12:29:54] [Rank 0] step:3121/10000 train_time:145966ms step_avg:46.77ms +[2025-09-11 12:29:54] [Rank 0] step:3121/10000 train_time:145966ms step_avg:46.77ms +[2025-09-11 12:29:55] [Rank 0] step:3141/10000 train_time:146632ms step_avg:46.68ms +[2025-09-11 12:29:55] [Rank 0] step:3141/10000 train_time:146632ms step_avg:46.68ms +[2025-09-11 12:29:56] [Rank 0] step:3161/10000 train_time:147298ms step_avg:46.60ms +[2025-09-11 12:29:56] [Rank 0] step:3161/10000 train_time:147298ms step_avg:46.60ms +[2025-09-11 12:29:56] [Rank 0] step:3181/10000 train_time:147964ms step_avg:46.52ms +[2025-09-11 12:29:56] [Rank 0] step:3181/10000 train_time:147964ms step_avg:46.52ms +[2025-09-11 12:29:57] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:29:57] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:30:08] [Rank 0] PRINT: step:3200/10000 val_loss:5.4680 total_sharp:1.7274e-02 L1_sharp:7.4464e-02 L2_sharp:7.9431e-02 L3_sharp:9.0276e-02 L4_sharp:1.1588e-01 L5_sharp:1.6059e-01 L6_sharp:2.2825e-01 L7_sharp:3.1807e-01 L8_sharp:4.2609e-01 L9_sharp:4.5749e-01 L10_sharp:5.9605e-01 L11_sharp:6.4105e-01 L12_sharp:9.3577e-01 total_fnorm:5.0938e+00 total_l1_linf:5.6640e+03 total_spectral:2.5469e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1328e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.2715e-02 L11_l1linf:3.3203e-02 L12_l1linf:3.0273e-02 L1_spectral:1.6178e-03 L2_spectral:1.6240e-03 L3_spectral:1.6259e-03 L4_spectral:1.6133e-03 L5_spectral:1.6170e-03 L6_spectral:1.6372e-03 L7_spectral:1.6107e-03 L8_spectral:1.6066e-03 L9_spectral:1.6087e-03 L10_spectral:1.6188e-03 L11_spectral:1.6075e-03 L12_spectral:1.6003e-03 train_time:148612ms step_avg:46.44ms +[2025-09-11 12:30:08] [Rank 0] PRINT: step:3200/10000 val_loss:5.4680 total_sharp:1.7274e-02 L1_sharp:7.4464e-02 L2_sharp:7.9431e-02 L3_sharp:9.0276e-02 L4_sharp:1.1588e-01 L5_sharp:1.6059e-01 L6_sharp:2.2825e-01 L7_sharp:3.1807e-01 L8_sharp:4.2609e-01 L9_sharp:4.5749e-01 L10_sharp:5.9605e-01 L11_sharp:6.4105e-01 L12_sharp:9.3577e-01 total_fnorm:5.0938e+00 total_l1_linf:5.6640e+03 total_spectral:2.5469e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1328e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.1250e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.2715e-02 L11_l1linf:3.3203e-02 L12_l1linf:3.0273e-02 L1_spectral:1.6178e-03 L2_spectral:1.6240e-03 L3_spectral:1.6259e-03 L4_spectral:1.6133e-03 L5_spectral:1.6170e-03 L6_spectral:1.6372e-03 L7_spectral:1.6107e-03 L8_spectral:1.6066e-03 L9_spectral:1.6087e-03 L10_spectral:1.6188e-03 L11_spectral:1.6075e-03 L12_spectral:1.6003e-03 train_time:148612ms step_avg:46.44ms +[2025-09-11 12:30:09] [Rank 0] step:3201/10000 train_time:150285ms step_avg:46.95ms +[2025-09-11 12:30:09] [Rank 0] step:3201/10000 train_time:150285ms step_avg:46.95ms +[2025-09-11 12:30:10] [Rank 0] step:3221/10000 train_time:150964ms step_avg:46.87ms +[2025-09-11 12:30:10] [Rank 0] step:3221/10000 train_time:150964ms step_avg:46.87ms +[2025-09-11 12:30:11] [Rank 0] step:3241/10000 train_time:151630ms step_avg:46.79ms +[2025-09-11 12:30:11] [Rank 0] step:3241/10000 train_time:151630ms step_avg:46.79ms +[2025-09-11 12:30:11] [Rank 0] step:3261/10000 train_time:152297ms step_avg:46.70ms +[2025-09-11 12:30:11] [Rank 0] step:3261/10000 train_time:152297ms step_avg:46.70ms +[2025-09-11 12:30:12] [Rank 0] step:3281/10000 train_time:152963ms step_avg:46.62ms +[2025-09-11 12:30:12] [Rank 0] step:3281/10000 train_time:152963ms step_avg:46.62ms +[2025-09-11 12:30:13] [Rank 0] step:3301/10000 train_time:153629ms step_avg:46.54ms +[2025-09-11 12:30:13] [Rank 0] step:3301/10000 train_time:153629ms step_avg:46.54ms +[2025-09-11 12:30:13] [Rank 0] step:3321/10000 train_time:154295ms step_avg:46.46ms +[2025-09-11 12:30:13] [Rank 0] step:3321/10000 train_time:154295ms step_avg:46.46ms +[2025-09-11 12:30:14] [Rank 0] step:3341/10000 train_time:154961ms step_avg:46.38ms +[2025-09-11 12:30:14] [Rank 0] step:3341/10000 train_time:154961ms step_avg:46.38ms +[2025-09-11 12:30:15] [Rank 0] step:3361/10000 train_time:155628ms step_avg:46.30ms +[2025-09-11 12:30:15] [Rank 0] step:3361/10000 train_time:155628ms step_avg:46.30ms +[2025-09-11 12:30:15] [Rank 0] step:3381/10000 train_time:156295ms step_avg:46.23ms +[2025-09-11 12:30:15] [Rank 0] step:3381/10000 train_time:156295ms step_avg:46.23ms +[2025-09-11 12:30:16] [Rank 0] step:3401/10000 train_time:156961ms step_avg:46.15ms +[2025-09-11 12:30:16] [Rank 0] step:3401/10000 train_time:156961ms step_avg:46.15ms +[2025-09-11 12:30:17] [Rank 0] step:3421/10000 train_time:157626ms step_avg:46.08ms +[2025-09-11 12:30:17] [Rank 0] step:3421/10000 train_time:157626ms step_avg:46.08ms +[2025-09-11 12:30:17] [Rank 0] step:3441/10000 train_time:158292ms step_avg:46.00ms +[2025-09-11 12:30:17] [Rank 0] step:3441/10000 train_time:158292ms step_avg:46.00ms +[2025-09-11 12:30:18] [Rank 0] step:3461/10000 train_time:158957ms step_avg:45.93ms +[2025-09-11 12:30:18] [Rank 0] step:3461/10000 train_time:158957ms step_avg:45.93ms +[2025-09-11 12:30:19] [Rank 0] step:3481/10000 train_time:159623ms step_avg:45.86ms +[2025-09-11 12:30:19] [Rank 0] step:3481/10000 train_time:159623ms step_avg:45.86ms +[2025-09-11 12:30:19] [Rank 0] step:3501/10000 train_time:160289ms step_avg:45.78ms +[2025-09-11 12:30:19] [Rank 0] step:3501/10000 train_time:160289ms step_avg:45.78ms +[2025-09-11 12:30:20] [Rank 0] step:3521/10000 train_time:160954ms step_avg:45.71ms +[2025-09-11 12:30:20] [Rank 0] step:3521/10000 train_time:160954ms step_avg:45.71ms +[2025-09-11 12:30:21] [Rank 0] step:3541/10000 train_time:161620ms step_avg:45.64ms +[2025-09-11 12:30:21] [Rank 0] step:3541/10000 train_time:161620ms step_avg:45.64ms +[2025-09-11 12:30:21] [Rank 0] step:3561/10000 train_time:162286ms step_avg:45.57ms +[2025-09-11 12:30:21] [Rank 0] step:3561/10000 train_time:162286ms step_avg:45.57ms +[2025-09-11 12:30:22] [Rank 0] step:3581/10000 train_time:162951ms step_avg:45.50ms +[2025-09-11 12:30:22] [Rank 0] step:3581/10000 train_time:162951ms step_avg:45.50ms +[2025-09-11 12:30:23] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:30:23] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:30:34] [Rank 0] PRINT: step:3600/10000 val_loss:5.4069 total_sharp:1.3623e-02 L1_sharp:3.9528e-02 L2_sharp:4.5680e-02 L3_sharp:4.8464e-02 L4_sharp:6.1902e-02 L5_sharp:7.5717e-02 L6_sharp:1.1812e-01 L7_sharp:1.6236e-01 L8_sharp:2.4512e-01 L9_sharp:3.3983e-01 L10_sharp:5.1714e-01 L11_sharp:5.8489e-01 L12_sharp:1.0385e+00 total_fnorm:4.5000e+00 total_l1_linf:4.8000e+03 total_spectral:2.2500e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1128e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1006e-02 L6_l1linf:3.0762e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.0640e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.2227e-02 L12_l1linf:2.9663e-02 L1_spectral:1.6201e-03 L2_spectral:1.6329e-03 L3_spectral:1.6221e-03 L4_spectral:1.6395e-03 L5_spectral:1.6099e-03 L6_spectral:1.6239e-03 L7_spectral:1.6104e-03 L8_spectral:1.6077e-03 L9_spectral:1.6180e-03 L10_spectral:1.6074e-03 L11_spectral:1.6049e-03 L12_spectral:1.6107e-03 train_time:163600ms step_avg:45.44ms +[2025-09-11 12:30:34] [Rank 0] PRINT: step:3600/10000 val_loss:5.4069 total_sharp:1.3623e-02 L1_sharp:3.9528e-02 L2_sharp:4.5680e-02 L3_sharp:4.8464e-02 L4_sharp:6.1902e-02 L5_sharp:7.5717e-02 L6_sharp:1.1812e-01 L7_sharp:1.6236e-01 L8_sharp:2.4512e-01 L9_sharp:3.3983e-01 L10_sharp:5.1714e-01 L11_sharp:5.8489e-01 L12_sharp:1.0385e+00 total_fnorm:4.5000e+00 total_l1_linf:4.8000e+03 total_spectral:2.2500e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1128e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1006e-02 L6_l1linf:3.0762e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.0640e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.2227e-02 L12_l1linf:2.9663e-02 L1_spectral:1.6201e-03 L2_spectral:1.6329e-03 L3_spectral:1.6221e-03 L4_spectral:1.6395e-03 L5_spectral:1.6099e-03 L6_spectral:1.6239e-03 L7_spectral:1.6104e-03 L8_spectral:1.6077e-03 L9_spectral:1.6180e-03 L10_spectral:1.6074e-03 L11_spectral:1.6049e-03 L12_spectral:1.6107e-03 train_time:163600ms step_avg:45.44ms +[2025-09-11 12:30:35] [Rank 0] step:3601/10000 train_time:165318ms step_avg:45.91ms +[2025-09-11 12:30:35] [Rank 0] step:3601/10000 train_time:165318ms step_avg:45.91ms +[2025-09-11 12:30:36] [Rank 0] step:3621/10000 train_time:165988ms step_avg:45.84ms +[2025-09-11 12:30:36] [Rank 0] step:3621/10000 train_time:165988ms step_avg:45.84ms +[2025-09-11 12:30:37] [Rank 0] step:3641/10000 train_time:166654ms step_avg:45.77ms +[2025-09-11 12:30:37] [Rank 0] step:3641/10000 train_time:166654ms step_avg:45.77ms +[2025-09-11 12:30:37] [Rank 0] step:3661/10000 train_time:167321ms step_avg:45.70ms +[2025-09-11 12:30:37] [Rank 0] step:3661/10000 train_time:167321ms step_avg:45.70ms +[2025-09-11 12:30:38] [Rank 0] step:3681/10000 train_time:167988ms step_avg:45.64ms +[2025-09-11 12:30:38] [Rank 0] step:3681/10000 train_time:167988ms step_avg:45.64ms +[2025-09-11 12:30:39] [Rank 0] step:3701/10000 train_time:168654ms step_avg:45.57ms +[2025-09-11 12:30:39] [Rank 0] step:3701/10000 train_time:168654ms step_avg:45.57ms +[2025-09-11 12:30:39] [Rank 0] step:3721/10000 train_time:169330ms step_avg:45.51ms +[2025-09-11 12:30:39] [Rank 0] step:3721/10000 train_time:169330ms step_avg:45.51ms +[2025-09-11 12:30:40] [Rank 0] step:3741/10000 train_time:170006ms step_avg:45.44ms +[2025-09-11 12:30:40] [Rank 0] step:3741/10000 train_time:170006ms step_avg:45.44ms +[2025-09-11 12:30:41] [Rank 0] step:3761/10000 train_time:170683ms step_avg:45.38ms +[2025-09-11 12:30:41] [Rank 0] step:3761/10000 train_time:170683ms step_avg:45.38ms +[2025-09-11 12:30:42] [Rank 0] step:3781/10000 train_time:171360ms step_avg:45.32ms +[2025-09-11 12:30:42] [Rank 0] step:3781/10000 train_time:171360ms step_avg:45.32ms +[2025-09-11 12:30:42] [Rank 0] step:3801/10000 train_time:172037ms step_avg:45.26ms +[2025-09-11 12:30:42] [Rank 0] step:3801/10000 train_time:172037ms step_avg:45.26ms +[2025-09-11 12:30:43] [Rank 0] step:3821/10000 train_time:172714ms step_avg:45.20ms +[2025-09-11 12:30:43] [Rank 0] step:3821/10000 train_time:172714ms step_avg:45.20ms +[2025-09-11 12:30:44] [Rank 0] step:3841/10000 train_time:173393ms step_avg:45.14ms +[2025-09-11 12:30:44] [Rank 0] step:3841/10000 train_time:173393ms step_avg:45.14ms +[2025-09-11 12:30:44] [Rank 0] step:3861/10000 train_time:174069ms step_avg:45.08ms +[2025-09-11 12:30:44] [Rank 0] step:3861/10000 train_time:174069ms step_avg:45.08ms +[2025-09-11 12:30:45] [Rank 0] step:3881/10000 train_time:174746ms step_avg:45.03ms +[2025-09-11 12:30:45] [Rank 0] step:3881/10000 train_time:174746ms step_avg:45.03ms +[2025-09-11 12:30:46] [Rank 0] step:3901/10000 train_time:175423ms step_avg:44.97ms +[2025-09-11 12:30:46] [Rank 0] step:3901/10000 train_time:175423ms step_avg:44.97ms +[2025-09-11 12:30:46] [Rank 0] step:3921/10000 train_time:176100ms step_avg:44.91ms +[2025-09-11 12:30:46] [Rank 0] step:3921/10000 train_time:176100ms step_avg:44.91ms +[2025-09-11 12:30:47] [Rank 0] step:3941/10000 train_time:176777ms step_avg:44.86ms +[2025-09-11 12:30:47] [Rank 0] step:3941/10000 train_time:176777ms step_avg:44.86ms +[2025-09-11 12:30:48] [Rank 0] step:3961/10000 train_time:177454ms step_avg:44.80ms +[2025-09-11 12:30:48] [Rank 0] step:3961/10000 train_time:177454ms step_avg:44.80ms +[2025-09-11 12:30:48] [Rank 0] step:3981/10000 train_time:178130ms step_avg:44.75ms +[2025-09-11 12:30:48] [Rank 0] step:3981/10000 train_time:178130ms step_avg:44.75ms +[2025-09-11 12:30:49] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:30:49] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:30:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:00] [Rank 0] PRINT: step:4000/10000 val_loss:5.3486 total_sharp:1.6944e-02 L1_sharp:4.5904e-02 L2_sharp:5.0774e-02 L3_sharp:5.5717e-02 L4_sharp:7.5047e-02 L5_sharp:1.0398e-01 L6_sharp:1.4039e-01 L7_sharp:2.4762e-01 L8_sharp:4.0846e-01 L9_sharp:6.0464e-01 L10_sharp:9.2576e-01 L11_sharp:1.1712e+00 L12_sharp:1.1800e+00 total_fnorm:5.2812e+00 total_l1_linf:5.5360e+03 total_spectral:2.6406e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1230e-01 L1_l1linf:3.0518e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0640e-02 L6_l1linf:3.0396e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.8076e-02 L1_spectral:1.6292e-03 L2_spectral:1.6177e-03 L3_spectral:1.6369e-03 L4_spectral:1.6219e-03 L5_spectral:1.6152e-03 L6_spectral:1.6069e-03 L7_spectral:1.6235e-03 L8_spectral:1.6132e-03 L9_spectral:1.6053e-03 L10_spectral:1.6024e-03 L11_spectral:1.6020e-03 L12_spectral:1.6063e-03 train_time:178788ms step_avg:44.70ms +[2025-09-11 12:31:00] [Rank 0] PRINT: step:4000/10000 val_loss:5.3486 total_sharp:1.6944e-02 L1_sharp:4.5904e-02 L2_sharp:5.0774e-02 L3_sharp:5.5717e-02 L4_sharp:7.5047e-02 L5_sharp:1.0398e-01 L6_sharp:1.4039e-01 L7_sharp:2.4762e-01 L8_sharp:4.0846e-01 L9_sharp:6.0464e-01 L10_sharp:9.2576e-01 L11_sharp:1.1712e+00 L12_sharp:1.1800e+00 total_fnorm:5.2812e+00 total_l1_linf:5.5360e+03 total_spectral:2.6406e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1230e-01 L1_l1linf:3.0518e-02 L2_l1linf:3.0518e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.0640e-02 L6_l1linf:3.0396e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.2227e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.8076e-02 L1_spectral:1.6292e-03 L2_spectral:1.6177e-03 L3_spectral:1.6369e-03 L4_spectral:1.6219e-03 L5_spectral:1.6152e-03 L6_spectral:1.6069e-03 L7_spectral:1.6235e-03 L8_spectral:1.6132e-03 L9_spectral:1.6053e-03 L10_spectral:1.6024e-03 L11_spectral:1.6020e-03 L12_spectral:1.6063e-03 train_time:178788ms step_avg:44.70ms +[2025-09-11 12:31:01] [Rank 0] step:4001/10000 train_time:180431ms step_avg:45.10ms +[2025-09-11 12:31:01] [Rank 0] step:4001/10000 train_time:180431ms step_avg:45.10ms +[2025-09-11 12:31:02] [Rank 0] step:4021/10000 train_time:181130ms step_avg:45.05ms +[2025-09-11 12:31:02] [Rank 0] step:4021/10000 train_time:181130ms step_avg:45.05ms +[2025-09-11 12:31:03] [Rank 0] step:4041/10000 train_time:181809ms step_avg:44.99ms +[2025-09-11 12:31:03] [Rank 0] step:4041/10000 train_time:181809ms step_avg:44.99ms +[2025-09-11 12:31:04] [Rank 0] step:4061/10000 train_time:182486ms step_avg:44.94ms +[2025-09-11 12:31:04] [Rank 0] step:4061/10000 train_time:182486ms step_avg:44.94ms +[2025-09-11 12:31:04] [Rank 0] step:4081/10000 train_time:183164ms step_avg:44.88ms +[2025-09-11 12:31:04] [Rank 0] step:4081/10000 train_time:183164ms step_avg:44.88ms +[2025-09-11 12:31:05] [Rank 0] step:4101/10000 train_time:183842ms step_avg:44.83ms +[2025-09-11 12:31:05] [Rank 0] step:4101/10000 train_time:183842ms step_avg:44.83ms +[2025-09-11 12:31:06] [Rank 0] step:4121/10000 train_time:184521ms step_avg:44.78ms +[2025-09-11 12:31:06] [Rank 0] step:4121/10000 train_time:184521ms step_avg:44.78ms +[2025-09-11 12:31:07] [Rank 0] step:4141/10000 train_time:185504ms step_avg:44.80ms +[2025-09-11 12:31:07] [Rank 0] step:4141/10000 train_time:185504ms step_avg:44.80ms +[2025-09-11 12:31:07] [Rank 0] step:4161/10000 train_time:186182ms step_avg:44.74ms +[2025-09-11 12:31:07] [Rank 0] step:4161/10000 train_time:186182ms step_avg:44.74ms +[2025-09-11 12:31:08] [Rank 0] step:4181/10000 train_time:186860ms step_avg:44.69ms +[2025-09-11 12:31:08] [Rank 0] step:4181/10000 train_time:186860ms step_avg:44.69ms +[2025-09-11 12:31:09] [Rank 0] step:4201/10000 train_time:187822ms step_avg:44.71ms +[2025-09-11 12:31:09] [Rank 0] step:4201/10000 train_time:187822ms step_avg:44.71ms +[2025-09-11 12:31:10] [Rank 0] step:4221/10000 train_time:188499ms step_avg:44.66ms +[2025-09-11 12:31:10] [Rank 0] step:4221/10000 train_time:188499ms step_avg:44.66ms +[2025-09-11 12:31:10] [Rank 0] step:4241/10000 train_time:189177ms step_avg:44.61ms +[2025-09-11 12:31:10] [Rank 0] step:4241/10000 train_time:189177ms step_avg:44.61ms +[2025-09-11 12:31:11] [Rank 0] step:4261/10000 train_time:189856ms step_avg:44.56ms +[2025-09-11 12:31:11] [Rank 0] step:4261/10000 train_time:189856ms step_avg:44.56ms +[2025-09-11 12:31:12] [Rank 0] step:4281/10000 train_time:190535ms step_avg:44.51ms +[2025-09-11 12:31:12] [Rank 0] step:4281/10000 train_time:190535ms step_avg:44.51ms +[2025-09-11 12:31:12] [Rank 0] step:4301/10000 train_time:191213ms step_avg:44.46ms +[2025-09-11 12:31:12] [Rank 0] step:4301/10000 train_time:191213ms step_avg:44.46ms +[2025-09-11 12:31:13] [Rank 0] step:4321/10000 train_time:191890ms step_avg:44.41ms +[2025-09-11 12:31:13] [Rank 0] step:4321/10000 train_time:191890ms step_avg:44.41ms +[2025-09-11 12:31:14] [Rank 0] step:4341/10000 train_time:192568ms step_avg:44.36ms +[2025-09-11 12:31:14] [Rank 0] step:4341/10000 train_time:192568ms step_avg:44.36ms +[2025-09-11 12:31:14] [Rank 0] step:4361/10000 train_time:193247ms step_avg:44.31ms +[2025-09-11 12:31:14] [Rank 0] step:4361/10000 train_time:193247ms step_avg:44.31ms +[2025-09-11 12:31:15] [Rank 0] step:4381/10000 train_time:193926ms step_avg:44.27ms +[2025-09-11 12:31:15] [Rank 0] step:4381/10000 train_time:193926ms step_avg:44.27ms +[2025-09-11 12:31:16] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:31:16] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:31:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:31:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:26] [Rank 0] PRINT: step:4400/10000 val_loss:5.2960 total_sharp:1.3200e-02 L1_sharp:3.3111e-02 L2_sharp:3.7611e-02 L3_sharp:4.0808e-02 L4_sharp:5.8667e-02 L5_sharp:8.5945e-02 L6_sharp:1.4007e-01 L7_sharp:2.1027e-01 L8_sharp:3.0462e-01 L9_sharp:3.9239e-01 L10_sharp:5.6070e-01 L11_sharp:6.1745e-01 L12_sharp:9.9040e-01 total_fnorm:4.6562e+00 total_l1_linf:4.8640e+03 total_spectral:2.3438e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1230e-01 L1_l1linf:3.0151e-02 L2_l1linf:3.0273e-02 L3_l1linf:3.0273e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9907e-02 L6_l1linf:2.9663e-02 L7_l1linf:2.9785e-02 L8_l1linf:3.0151e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.6855e-02 L1_spectral:1.6213e-03 L2_spectral:1.6204e-03 L3_spectral:1.6261e-03 L4_spectral:1.6377e-03 L5_spectral:1.6072e-03 L6_spectral:1.6085e-03 L7_spectral:1.6112e-03 L8_spectral:1.6039e-03 L9_spectral:1.5970e-03 L10_spectral:1.6035e-03 L11_spectral:1.6061e-03 L12_spectral:1.5958e-03 train_time:194584ms step_avg:44.22ms +[2025-09-11 12:31:26] [Rank 0] PRINT: step:4400/10000 val_loss:5.2960 total_sharp:1.3200e-02 L1_sharp:3.3111e-02 L2_sharp:3.7611e-02 L3_sharp:4.0808e-02 L4_sharp:5.8667e-02 L5_sharp:8.5945e-02 L6_sharp:1.4007e-01 L7_sharp:2.1027e-01 L8_sharp:3.0462e-01 L9_sharp:3.9239e-01 L10_sharp:5.6070e-01 L11_sharp:6.1745e-01 L12_sharp:9.9040e-01 total_fnorm:4.6562e+00 total_l1_linf:4.8640e+03 total_spectral:2.3438e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1230e-01 L1_l1linf:3.0151e-02 L2_l1linf:3.0273e-02 L3_l1linf:3.0273e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9907e-02 L6_l1linf:2.9663e-02 L7_l1linf:2.9785e-02 L8_l1linf:3.0151e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.3203e-02 L12_l1linf:2.6855e-02 L1_spectral:1.6213e-03 L2_spectral:1.6204e-03 L3_spectral:1.6261e-03 L4_spectral:1.6377e-03 L5_spectral:1.6072e-03 L6_spectral:1.6085e-03 L7_spectral:1.6112e-03 L8_spectral:1.6039e-03 L9_spectral:1.5970e-03 L10_spectral:1.6035e-03 L11_spectral:1.6061e-03 L12_spectral:1.5958e-03 train_time:194584ms step_avg:44.22ms +[2025-09-11 12:31:28] [Rank 0] step:4401/10000 train_time:196338ms step_avg:44.61ms +[2025-09-11 12:31:28] [Rank 0] step:4401/10000 train_time:196338ms step_avg:44.61ms +[2025-09-11 12:31:29] [Rank 0] step:4421/10000 train_time:197033ms step_avg:44.57ms +[2025-09-11 12:31:29] [Rank 0] step:4421/10000 train_time:197033ms step_avg:44.57ms +[2025-09-11 12:31:29] [Rank 0] step:4441/10000 train_time:197712ms step_avg:44.52ms +[2025-09-11 12:31:29] [Rank 0] step:4441/10000 train_time:197712ms step_avg:44.52ms +[2025-09-11 12:31:30] [Rank 0] step:4461/10000 train_time:198393ms step_avg:44.47ms +[2025-09-11 12:31:30] [Rank 0] step:4461/10000 train_time:198393ms step_avg:44.47ms +[2025-09-11 12:31:31] [Rank 0] step:4481/10000 train_time:199072ms step_avg:44.43ms +[2025-09-11 12:31:31] [Rank 0] step:4481/10000 train_time:199072ms step_avg:44.43ms +[2025-09-11 12:31:32] [Rank 0] step:4501/10000 train_time:199752ms step_avg:44.38ms +[2025-09-11 12:31:32] [Rank 0] step:4501/10000 train_time:199752ms step_avg:44.38ms +[2025-09-11 12:31:32] [Rank 0] step:4521/10000 train_time:200432ms step_avg:44.33ms +[2025-09-11 12:31:32] [Rank 0] step:4521/10000 train_time:200432ms step_avg:44.33ms +[2025-09-11 12:31:33] [Rank 0] step:4541/10000 train_time:201112ms step_avg:44.29ms +[2025-09-11 12:31:33] [Rank 0] step:4541/10000 train_time:201112ms step_avg:44.29ms +[2025-09-11 12:31:34] [Rank 0] step:4561/10000 train_time:201791ms step_avg:44.24ms +[2025-09-11 12:31:34] [Rank 0] step:4561/10000 train_time:201791ms step_avg:44.24ms +[2025-09-11 12:31:34] [Rank 0] step:4581/10000 train_time:202471ms step_avg:44.20ms +[2025-09-11 12:31:34] [Rank 0] step:4581/10000 train_time:202471ms step_avg:44.20ms +[2025-09-11 12:31:35] [Rank 0] step:4601/10000 train_time:203151ms step_avg:44.15ms +[2025-09-11 12:31:35] [Rank 0] step:4601/10000 train_time:203151ms step_avg:44.15ms +[2025-09-11 12:31:36] [Rank 0] step:4621/10000 train_time:203831ms step_avg:44.11ms +[2025-09-11 12:31:36] [Rank 0] step:4621/10000 train_time:203831ms step_avg:44.11ms +[2025-09-11 12:31:36] [Rank 0] step:4641/10000 train_time:204510ms step_avg:44.07ms +[2025-09-11 12:31:36] [Rank 0] step:4641/10000 train_time:204510ms step_avg:44.07ms +[2025-09-11 12:31:37] [Rank 0] step:4661/10000 train_time:205190ms step_avg:44.02ms +[2025-09-11 12:31:37] [Rank 0] step:4661/10000 train_time:205190ms step_avg:44.02ms +[2025-09-11 12:31:38] [Rank 0] step:4681/10000 train_time:205870ms step_avg:43.98ms +[2025-09-11 12:31:38] [Rank 0] step:4681/10000 train_time:205870ms step_avg:43.98ms +[2025-09-11 12:31:38] [Rank 0] step:4701/10000 train_time:206549ms step_avg:43.94ms +[2025-09-11 12:31:38] [Rank 0] step:4701/10000 train_time:206549ms step_avg:43.94ms +[2025-09-11 12:31:39] [Rank 0] step:4721/10000 train_time:207229ms step_avg:43.90ms +[2025-09-11 12:31:39] [Rank 0] step:4721/10000 train_time:207229ms step_avg:43.90ms +[2025-09-11 12:31:40] [Rank 0] step:4741/10000 train_time:207908ms step_avg:43.85ms +[2025-09-11 12:31:40] [Rank 0] step:4741/10000 train_time:207908ms step_avg:43.85ms +[2025-09-11 12:31:40] [Rank 0] step:4761/10000 train_time:208589ms step_avg:43.81ms +[2025-09-11 12:31:40] [Rank 0] step:4761/10000 train_time:208589ms step_avg:43.81ms +[2025-09-11 12:31:41] [Rank 0] step:4781/10000 train_time:209269ms step_avg:43.77ms +[2025-09-11 12:31:41] [Rank 0] step:4781/10000 train_time:209269ms step_avg:43.77ms +[2025-09-11 12:31:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:31:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:31:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:31:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:31:52] [Rank 0] PRINT: step:4800/10000 val_loss:5.2498 total_sharp:1.5882e-02 L1_sharp:3.5049e-02 L2_sharp:3.7410e-02 L3_sharp:3.8056e-02 L4_sharp:4.8793e-02 L5_sharp:6.6493e-02 L6_sharp:1.2474e-01 L7_sharp:2.0945e-01 L8_sharp:3.2478e-01 L9_sharp:4.1253e-01 L10_sharp:5.7571e-01 L11_sharp:8.2776e-01 L12_sharp:1.9332e+00 total_fnorm:4.6562e+00 total_l1_linf:4.9600e+03 total_spectral:2.3438e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1230e-01 L1_l1linf:2.9663e-02 L2_l1linf:2.9907e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.0273e-02 L6_l1linf:2.9419e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9541e-02 L9_l1linf:3.0151e-02 L10_l1linf:3.0884e-02 L11_l1linf:3.1738e-02 L12_l1linf:2.6245e-02 L1_spectral:1.6276e-03 L2_spectral:1.6274e-03 L3_spectral:1.6328e-03 L4_spectral:1.6360e-03 L5_spectral:1.5992e-03 L6_spectral:1.6242e-03 L7_spectral:1.6182e-03 L8_spectral:1.6114e-03 L9_spectral:1.6143e-03 L10_spectral:1.6187e-03 L11_spectral:1.6136e-03 L12_spectral:1.6213e-03 train_time:209928ms step_avg:43.73ms +[2025-09-11 12:31:52] [Rank 0] PRINT: step:4800/10000 val_loss:5.2498 total_sharp:1.5882e-02 L1_sharp:3.5049e-02 L2_sharp:3.7410e-02 L3_sharp:3.8056e-02 L4_sharp:4.8793e-02 L5_sharp:6.6493e-02 L6_sharp:1.2474e-01 L7_sharp:2.0945e-01 L8_sharp:3.2478e-01 L9_sharp:4.1253e-01 L10_sharp:5.7571e-01 L11_sharp:8.2776e-01 L12_sharp:1.9332e+00 total_fnorm:4.6562e+00 total_l1_linf:4.9600e+03 total_spectral:2.3438e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1230e-01 L1_l1linf:2.9663e-02 L2_l1linf:2.9907e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.0273e-02 L6_l1linf:2.9419e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9541e-02 L9_l1linf:3.0151e-02 L10_l1linf:3.0884e-02 L11_l1linf:3.1738e-02 L12_l1linf:2.6245e-02 L1_spectral:1.6276e-03 L2_spectral:1.6274e-03 L3_spectral:1.6328e-03 L4_spectral:1.6360e-03 L5_spectral:1.5992e-03 L6_spectral:1.6242e-03 L7_spectral:1.6182e-03 L8_spectral:1.6114e-03 L9_spectral:1.6143e-03 L10_spectral:1.6187e-03 L11_spectral:1.6136e-03 L12_spectral:1.6213e-03 train_time:209928ms step_avg:43.73ms +[2025-09-11 12:31:54] [Rank 0] step:4801/10000 train_time:211676ms step_avg:44.09ms +[2025-09-11 12:31:54] [Rank 0] step:4801/10000 train_time:211676ms step_avg:44.09ms +[2025-09-11 12:31:55] [Rank 0] step:4821/10000 train_time:212382ms step_avg:44.05ms +[2025-09-11 12:31:55] [Rank 0] step:4821/10000 train_time:212382ms step_avg:44.05ms +[2025-09-11 12:31:56] [Rank 0] step:4841/10000 train_time:213064ms step_avg:44.01ms +[2025-09-11 12:31:56] [Rank 0] step:4841/10000 train_time:213064ms step_avg:44.01ms +[2025-09-11 12:31:56] [Rank 0] step:4861/10000 train_time:213745ms step_avg:43.97ms +[2025-09-11 12:31:56] [Rank 0] step:4861/10000 train_time:213745ms step_avg:43.97ms +[2025-09-11 12:31:57] [Rank 0] step:4881/10000 train_time:214427ms step_avg:43.93ms +[2025-09-11 12:31:57] [Rank 0] step:4881/10000 train_time:214427ms step_avg:43.93ms +[2025-09-11 12:31:58] [Rank 0] step:4901/10000 train_time:215109ms step_avg:43.89ms +[2025-09-11 12:31:58] [Rank 0] step:4901/10000 train_time:215109ms step_avg:43.89ms +[2025-09-11 12:31:58] [Rank 0] step:4921/10000 train_time:215792ms step_avg:43.85ms +[2025-09-11 12:31:58] [Rank 0] step:4921/10000 train_time:215792ms step_avg:43.85ms +[2025-09-11 12:31:59] [Rank 0] step:4941/10000 train_time:216473ms step_avg:43.81ms +[2025-09-11 12:31:59] [Rank 0] step:4941/10000 train_time:216473ms step_avg:43.81ms +[2025-09-11 12:32:00] [Rank 0] step:4961/10000 train_time:217155ms step_avg:43.77ms +[2025-09-11 12:32:00] [Rank 0] step:4961/10000 train_time:217155ms step_avg:43.77ms +[2025-09-11 12:32:00] [Rank 0] step:4981/10000 train_time:217837ms step_avg:43.73ms +[2025-09-11 12:32:00] [Rank 0] step:4981/10000 train_time:217837ms step_avg:43.73ms +[2025-09-11 12:32:01] [Rank 0] step:5001/10000 train_time:218520ms step_avg:43.70ms +[2025-09-11 12:32:01] [Rank 0] step:5001/10000 train_time:218520ms step_avg:43.70ms +[2025-09-11 12:32:02] [Rank 0] step:5021/10000 train_time:219201ms step_avg:43.66ms +[2025-09-11 12:32:02] [Rank 0] step:5021/10000 train_time:219201ms step_avg:43.66ms +[2025-09-11 12:32:02] [Rank 0] step:5041/10000 train_time:219882ms step_avg:43.62ms +[2025-09-11 12:32:02] [Rank 0] step:5041/10000 train_time:219882ms step_avg:43.62ms +[2025-09-11 12:32:03] [Rank 0] step:5061/10000 train_time:220564ms step_avg:43.58ms +[2025-09-11 12:32:03] [Rank 0] step:5061/10000 train_time:220564ms step_avg:43.58ms +[2025-09-11 12:32:04] [Rank 0] step:5081/10000 train_time:221245ms step_avg:43.54ms +[2025-09-11 12:32:04] [Rank 0] step:5081/10000 train_time:221245ms step_avg:43.54ms +[2025-09-11 12:32:04] [Rank 0] step:5101/10000 train_time:221926ms step_avg:43.51ms +[2025-09-11 12:32:04] [Rank 0] step:5101/10000 train_time:221926ms step_avg:43.51ms +[2025-09-11 12:32:05] [Rank 0] step:5121/10000 train_time:222607ms step_avg:43.47ms +[2025-09-11 12:32:05] [Rank 0] step:5121/10000 train_time:222607ms step_avg:43.47ms +[2025-09-11 12:32:06] [Rank 0] step:5141/10000 train_time:223289ms step_avg:43.43ms +[2025-09-11 12:32:06] [Rank 0] step:5141/10000 train_time:223289ms step_avg:43.43ms +[2025-09-11 12:32:07] [Rank 0] step:5161/10000 train_time:223970ms step_avg:43.40ms +[2025-09-11 12:32:07] [Rank 0] step:5161/10000 train_time:223970ms step_avg:43.40ms +[2025-09-11 12:32:07] [Rank 0] step:5181/10000 train_time:224650ms step_avg:43.36ms +[2025-09-11 12:32:07] [Rank 0] step:5181/10000 train_time:224650ms step_avg:43.36ms +[2025-09-11 12:32:08] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:32:08] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:32:19] [Rank 0] PRINT: step:5200/10000 val_loss:5.2134 total_sharp:1.7494e-02 L1_sharp:2.8945e-02 L2_sharp:3.2463e-02 L3_sharp:3.5967e-02 L4_sharp:5.9493e-02 L5_sharp:8.4475e-02 L6_sharp:1.3254e-01 L7_sharp:2.1826e-01 L8_sharp:3.3872e-01 L9_sharp:3.9635e-01 L10_sharp:5.7947e-01 L11_sharp:6.5241e-01 L12_sharp:1.5853e+00 total_fnorm:4.3125e+00 total_l1_linf:4.3840e+03 total_spectral:2.1562e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1377e-01 L1_l1linf:2.9907e-02 L2_l1linf:2.9541e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.9907e-02 L5_l1linf:2.9541e-02 L6_l1linf:3.0518e-02 L7_l1linf:2.9541e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9175e-02 L10_l1linf:3.0396e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.7100e-02 L1_spectral:1.6194e-03 L2_spectral:1.6228e-03 L3_spectral:1.6195e-03 L4_spectral:1.6308e-03 L5_spectral:1.6189e-03 L6_spectral:1.6225e-03 L7_spectral:1.6205e-03 L8_spectral:1.6102e-03 L9_spectral:1.6058e-03 L10_spectral:1.6132e-03 L11_spectral:1.6026e-03 L12_spectral:1.6192e-03 train_time:225317ms step_avg:43.33ms +[2025-09-11 12:32:19] [Rank 0] PRINT: step:5200/10000 val_loss:5.2134 total_sharp:1.7494e-02 L1_sharp:2.8945e-02 L2_sharp:3.2463e-02 L3_sharp:3.5967e-02 L4_sharp:5.9493e-02 L5_sharp:8.4475e-02 L6_sharp:1.3254e-01 L7_sharp:2.1826e-01 L8_sharp:3.3872e-01 L9_sharp:3.9635e-01 L10_sharp:5.7947e-01 L11_sharp:6.5241e-01 L12_sharp:1.5853e+00 total_fnorm:4.3125e+00 total_l1_linf:4.3840e+03 total_spectral:2.1562e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1377e-01 L1_l1linf:2.9907e-02 L2_l1linf:2.9541e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.9907e-02 L5_l1linf:2.9541e-02 L6_l1linf:3.0518e-02 L7_l1linf:2.9541e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9175e-02 L10_l1linf:3.0396e-02 L11_l1linf:3.1250e-02 L12_l1linf:2.7100e-02 L1_spectral:1.6194e-03 L2_spectral:1.6228e-03 L3_spectral:1.6195e-03 L4_spectral:1.6308e-03 L5_spectral:1.6189e-03 L6_spectral:1.6225e-03 L7_spectral:1.6205e-03 L8_spectral:1.6102e-03 L9_spectral:1.6058e-03 L10_spectral:1.6132e-03 L11_spectral:1.6026e-03 L12_spectral:1.6192e-03 train_time:225317ms step_avg:43.33ms +[2025-09-11 12:32:21] [Rank 0] step:5201/10000 train_time:227100ms step_avg:43.66ms +[2025-09-11 12:32:21] [Rank 0] step:5201/10000 train_time:227100ms step_avg:43.66ms +[2025-09-11 12:32:22] [Rank 0] step:5221/10000 train_time:227829ms step_avg:43.64ms +[2025-09-11 12:32:22] [Rank 0] step:5221/10000 train_time:227829ms step_avg:43.64ms +[2025-09-11 12:32:22] [Rank 0] step:5241/10000 train_time:228519ms step_avg:43.60ms +[2025-09-11 12:32:22] [Rank 0] step:5241/10000 train_time:228519ms step_avg:43.60ms +[2025-09-11 12:32:23] [Rank 0] step:5261/10000 train_time:229208ms step_avg:43.57ms +[2025-09-11 12:32:23] [Rank 0] step:5261/10000 train_time:229208ms step_avg:43.57ms +[2025-09-11 12:32:24] [Rank 0] step:5281/10000 train_time:229897ms step_avg:43.53ms +[2025-09-11 12:32:24] [Rank 0] step:5281/10000 train_time:229897ms step_avg:43.53ms +[2025-09-11 12:32:24] [Rank 0] step:5301/10000 train_time:230585ms step_avg:43.50ms +[2025-09-11 12:32:24] [Rank 0] step:5301/10000 train_time:230585ms step_avg:43.50ms +[2025-09-11 12:32:25] [Rank 0] step:5321/10000 train_time:231274ms step_avg:43.46ms +[2025-09-11 12:32:25] [Rank 0] step:5321/10000 train_time:231274ms step_avg:43.46ms +[2025-09-11 12:32:26] [Rank 0] step:5341/10000 train_time:231962ms step_avg:43.43ms +[2025-09-11 12:32:26] [Rank 0] step:5341/10000 train_time:231962ms step_avg:43.43ms +[2025-09-11 12:32:26] [Rank 0] step:5361/10000 train_time:232652ms step_avg:43.40ms +[2025-09-11 12:32:26] [Rank 0] step:5361/10000 train_time:232652ms step_avg:43.40ms +[2025-09-11 12:32:27] [Rank 0] step:5381/10000 train_time:233344ms step_avg:43.36ms +[2025-09-11 12:32:27] [Rank 0] step:5381/10000 train_time:233344ms step_avg:43.36ms +[2025-09-11 12:32:28] [Rank 0] step:5401/10000 train_time:234032ms step_avg:43.33ms +[2025-09-11 12:32:28] [Rank 0] step:5401/10000 train_time:234032ms step_avg:43.33ms +[2025-09-11 12:32:28] [Rank 0] step:5421/10000 train_time:234722ms step_avg:43.30ms +[2025-09-11 12:32:28] [Rank 0] step:5421/10000 train_time:234722ms step_avg:43.30ms +[2025-09-11 12:32:29] [Rank 0] step:5441/10000 train_time:235412ms step_avg:43.27ms +[2025-09-11 12:32:29] [Rank 0] step:5441/10000 train_time:235412ms step_avg:43.27ms +[2025-09-11 12:32:30] [Rank 0] step:5461/10000 train_time:236101ms step_avg:43.23ms +[2025-09-11 12:32:30] [Rank 0] step:5461/10000 train_time:236101ms step_avg:43.23ms +[2025-09-11 12:32:31] [Rank 0] step:5481/10000 train_time:236791ms step_avg:43.20ms +[2025-09-11 12:32:31] [Rank 0] step:5481/10000 train_time:236791ms step_avg:43.20ms +[2025-09-11 12:32:31] [Rank 0] step:5501/10000 train_time:237479ms step_avg:43.17ms +[2025-09-11 12:32:31] [Rank 0] step:5501/10000 train_time:237479ms step_avg:43.17ms +[2025-09-11 12:32:32] [Rank 0] step:5521/10000 train_time:238168ms step_avg:43.14ms +[2025-09-11 12:32:32] [Rank 0] step:5521/10000 train_time:238168ms step_avg:43.14ms +[2025-09-11 12:32:33] [Rank 0] step:5541/10000 train_time:238859ms step_avg:43.11ms +[2025-09-11 12:32:33] [Rank 0] step:5541/10000 train_time:238859ms step_avg:43.11ms +[2025-09-11 12:32:33] [Rank 0] step:5561/10000 train_time:239551ms step_avg:43.08ms +[2025-09-11 12:32:33] [Rank 0] step:5561/10000 train_time:239551ms step_avg:43.08ms +[2025-09-11 12:32:34] [Rank 0] step:5581/10000 train_time:240241ms step_avg:43.05ms +[2025-09-11 12:32:34] [Rank 0] step:5581/10000 train_time:240241ms step_avg:43.05ms +[2025-09-11 12:32:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:32:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:32:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:32:46] [Rank 0] PRINT: step:5600/10000 val_loss:5.1838 total_sharp:1.1361e-02 L1_sharp:2.2832e-02 L2_sharp:2.5303e-02 L3_sharp:2.8770e-02 L4_sharp:3.8125e-02 L5_sharp:5.3460e-02 L6_sharp:8.2608e-02 L7_sharp:1.3023e-01 L8_sharp:2.0689e-01 L9_sharp:2.8516e-01 L10_sharp:4.5583e-01 L11_sharp:5.6782e-01 L12_sharp:9.8375e-01 total_fnorm:4.3438e+00 total_l1_linf:4.4480e+03 total_spectral:2.1875e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1328e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.9419e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.8687e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.8809e-02 L10_l1linf:3.0151e-02 L11_l1linf:3.0273e-02 L12_l1linf:2.7954e-02 L1_spectral:1.6193e-03 L2_spectral:1.6386e-03 L3_spectral:1.6225e-03 L4_spectral:1.6266e-03 L5_spectral:1.6268e-03 L6_spectral:1.6168e-03 L7_spectral:1.6367e-03 L8_spectral:1.6045e-03 L9_spectral:1.6052e-03 L10_spectral:1.6130e-03 L11_spectral:1.6129e-03 L12_spectral:1.6194e-03 train_time:240911ms step_avg:43.02ms +[2025-09-11 12:32:46] [Rank 0] PRINT: step:5600/10000 val_loss:5.1838 total_sharp:1.1361e-02 L1_sharp:2.2832e-02 L2_sharp:2.5303e-02 L3_sharp:2.8770e-02 L4_sharp:3.8125e-02 L5_sharp:5.3460e-02 L6_sharp:8.2608e-02 L7_sharp:1.3023e-01 L8_sharp:2.0689e-01 L9_sharp:2.8516e-01 L10_sharp:4.5583e-01 L11_sharp:5.6782e-01 L12_sharp:9.8375e-01 total_fnorm:4.3438e+00 total_l1_linf:4.4480e+03 total_spectral:2.1875e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1328e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.9419e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.8687e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.8809e-02 L10_l1linf:3.0151e-02 L11_l1linf:3.0273e-02 L12_l1linf:2.7954e-02 L1_spectral:1.6193e-03 L2_spectral:1.6386e-03 L3_spectral:1.6225e-03 L4_spectral:1.6266e-03 L5_spectral:1.6268e-03 L6_spectral:1.6168e-03 L7_spectral:1.6367e-03 L8_spectral:1.6045e-03 L9_spectral:1.6052e-03 L10_spectral:1.6130e-03 L11_spectral:1.6129e-03 L12_spectral:1.6194e-03 train_time:240911ms step_avg:43.02ms +[2025-09-11 12:32:47] [Rank 0] step:5601/10000 train_time:242697ms step_avg:43.33ms +[2025-09-11 12:32:47] [Rank 0] step:5601/10000 train_time:242697ms step_avg:43.33ms +[2025-09-11 12:32:48] [Rank 0] step:5621/10000 train_time:243409ms step_avg:43.30ms +[2025-09-11 12:32:48] [Rank 0] step:5621/10000 train_time:243409ms step_avg:43.30ms +[2025-09-11 12:32:49] [Rank 0] step:5641/10000 train_time:244100ms step_avg:43.27ms +[2025-09-11 12:32:49] [Rank 0] step:5641/10000 train_time:244100ms step_avg:43.27ms +[2025-09-11 12:32:49] [Rank 0] step:5661/10000 train_time:244790ms step_avg:43.24ms +[2025-09-11 12:32:49] [Rank 0] step:5661/10000 train_time:244790ms step_avg:43.24ms +[2025-09-11 12:32:50] [Rank 0] step:5681/10000 train_time:245480ms step_avg:43.21ms +[2025-09-11 12:32:50] [Rank 0] step:5681/10000 train_time:245480ms step_avg:43.21ms +[2025-09-11 12:32:51] [Rank 0] step:5701/10000 train_time:246175ms step_avg:43.18ms +[2025-09-11 12:32:51] [Rank 0] step:5701/10000 train_time:246175ms step_avg:43.18ms +[2025-09-11 12:32:52] [Rank 0] step:5721/10000 train_time:246864ms step_avg:43.15ms +[2025-09-11 12:32:52] [Rank 0] step:5721/10000 train_time:246864ms step_avg:43.15ms +[2025-09-11 12:32:52] [Rank 0] step:5741/10000 train_time:247555ms step_avg:43.12ms +[2025-09-11 12:32:52] [Rank 0] step:5741/10000 train_time:247555ms step_avg:43.12ms +[2025-09-11 12:32:53] [Rank 0] step:5761/10000 train_time:248247ms step_avg:43.09ms +[2025-09-11 12:32:53] [Rank 0] step:5761/10000 train_time:248247ms step_avg:43.09ms +[2025-09-11 12:32:54] [Rank 0] step:5781/10000 train_time:248938ms step_avg:43.06ms +[2025-09-11 12:32:54] [Rank 0] step:5781/10000 train_time:248938ms step_avg:43.06ms +[2025-09-11 12:32:54] [Rank 0] step:5801/10000 train_time:249630ms step_avg:43.03ms +[2025-09-11 12:32:54] [Rank 0] step:5801/10000 train_time:249630ms step_avg:43.03ms +[2025-09-11 12:32:55] [Rank 0] step:5821/10000 train_time:250320ms step_avg:43.00ms +[2025-09-11 12:32:55] [Rank 0] step:5821/10000 train_time:250320ms step_avg:43.00ms +[2025-09-11 12:32:56] [Rank 0] step:5841/10000 train_time:251012ms step_avg:42.97ms +[2025-09-11 12:32:56] [Rank 0] step:5841/10000 train_time:251012ms step_avg:42.97ms +[2025-09-11 12:32:56] [Rank 0] step:5861/10000 train_time:251702ms step_avg:42.95ms +[2025-09-11 12:32:56] [Rank 0] step:5861/10000 train_time:251702ms step_avg:42.95ms +[2025-09-11 12:32:57] [Rank 0] step:5881/10000 train_time:252392ms step_avg:42.92ms +[2025-09-11 12:32:57] [Rank 0] step:5881/10000 train_time:252392ms step_avg:42.92ms +[2025-09-11 12:32:58] [Rank 0] step:5901/10000 train_time:253082ms step_avg:42.89ms +[2025-09-11 12:32:58] [Rank 0] step:5901/10000 train_time:253082ms step_avg:42.89ms +[2025-09-11 12:32:58] [Rank 0] step:5921/10000 train_time:253774ms step_avg:42.86ms +[2025-09-11 12:32:58] [Rank 0] step:5921/10000 train_time:253774ms step_avg:42.86ms +[2025-09-11 12:32:59] [Rank 0] step:5941/10000 train_time:254466ms step_avg:42.83ms +[2025-09-11 12:32:59] [Rank 0] step:5941/10000 train_time:254466ms step_avg:42.83ms +[2025-09-11 12:33:00] [Rank 0] step:5961/10000 train_time:255158ms step_avg:42.80ms +[2025-09-11 12:33:00] [Rank 0] step:5961/10000 train_time:255158ms step_avg:42.80ms +[2025-09-11 12:33:01] [Rank 0] step:5981/10000 train_time:255850ms step_avg:42.78ms +[2025-09-11 12:33:01] [Rank 0] step:5981/10000 train_time:255850ms step_avg:42.78ms +[2025-09-11 12:33:01] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:33:01] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:33:16] [Rank 0] PRINT: step:6000/10000 val_loss:5.1411 total_sharp:1.4918e-02 L1_sharp:2.3334e-02 L2_sharp:2.1603e-02 L3_sharp:2.5534e-02 L4_sharp:3.1408e-02 L5_sharp:4.0624e-02 L6_sharp:7.2783e-02 L7_sharp:1.2562e-01 L8_sharp:1.8824e-01 L9_sharp:3.2032e-01 L10_sharp:5.4229e-01 L11_sharp:7.9479e-01 L12_sharp:2.8166e+00 total_fnorm:4.2812e+00 total_l1_linf:4.2880e+03 total_spectral:2.1562e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1279e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9419e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.9419e-02 L6_l1linf:2.8687e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.8931e-02 L10_l1linf:2.8931e-02 L11_l1linf:3.0396e-02 L12_l1linf:2.5879e-02 L1_spectral:1.6281e-03 L2_spectral:1.6281e-03 L3_spectral:1.6355e-03 L4_spectral:1.6254e-03 L5_spectral:1.6144e-03 L6_spectral:1.6387e-03 L7_spectral:1.6269e-03 L8_spectral:1.6151e-03 L9_spectral:1.6246e-03 L10_spectral:1.6163e-03 L11_spectral:1.5996e-03 L12_spectral:1.6097e-03 train_time:256525ms step_avg:42.75ms +[2025-09-11 12:33:16] [Rank 0] PRINT: step:6000/10000 val_loss:5.1411 total_sharp:1.4918e-02 L1_sharp:2.3334e-02 L2_sharp:2.1603e-02 L3_sharp:2.5534e-02 L4_sharp:3.1408e-02 L5_sharp:4.0624e-02 L6_sharp:7.2783e-02 L7_sharp:1.2562e-01 L8_sharp:1.8824e-01 L9_sharp:3.2032e-01 L10_sharp:5.4229e-01 L11_sharp:7.9479e-01 L12_sharp:2.8166e+00 total_fnorm:4.2812e+00 total_l1_linf:4.2880e+03 total_spectral:2.1562e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1279e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9419e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.9419e-02 L6_l1linf:2.8687e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.8931e-02 L10_l1linf:2.8931e-02 L11_l1linf:3.0396e-02 L12_l1linf:2.5879e-02 L1_spectral:1.6281e-03 L2_spectral:1.6281e-03 L3_spectral:1.6355e-03 L4_spectral:1.6254e-03 L5_spectral:1.6144e-03 L6_spectral:1.6387e-03 L7_spectral:1.6269e-03 L8_spectral:1.6151e-03 L9_spectral:1.6246e-03 L10_spectral:1.6163e-03 L11_spectral:1.5996e-03 L12_spectral:1.6097e-03 train_time:256525ms step_avg:42.75ms +[2025-09-11 12:33:18] [Rank 0] step:6001/10000 train_time:258478ms step_avg:43.07ms +[2025-09-11 12:33:18] [Rank 0] step:6001/10000 train_time:258478ms step_avg:43.07ms +[2025-09-11 12:33:19] [Rank 0] step:6021/10000 train_time:259194ms step_avg:43.05ms +[2025-09-11 12:33:19] [Rank 0] step:6021/10000 train_time:259194ms step_avg:43.05ms +[2025-09-11 12:33:19] [Rank 0] step:6041/10000 train_time:259889ms step_avg:43.02ms +[2025-09-11 12:33:19] [Rank 0] step:6041/10000 train_time:259889ms step_avg:43.02ms +[2025-09-11 12:33:20] [Rank 0] step:6061/10000 train_time:260582ms step_avg:42.99ms +[2025-09-11 12:33:20] [Rank 0] step:6061/10000 train_time:260582ms step_avg:42.99ms +[2025-09-11 12:33:21] [Rank 0] step:6081/10000 train_time:261275ms step_avg:42.97ms +[2025-09-11 12:33:21] [Rank 0] step:6081/10000 train_time:261275ms step_avg:42.97ms +[2025-09-11 12:33:21] [Rank 0] step:6101/10000 train_time:261966ms step_avg:42.94ms +[2025-09-11 12:33:21] [Rank 0] step:6101/10000 train_time:261966ms step_avg:42.94ms +[2025-09-11 12:33:22] [Rank 0] step:6121/10000 train_time:262659ms step_avg:42.91ms +[2025-09-11 12:33:22] [Rank 0] step:6121/10000 train_time:262659ms step_avg:42.91ms +[2025-09-11 12:33:23] [Rank 0] step:6141/10000 train_time:263352ms step_avg:42.88ms +[2025-09-11 12:33:23] [Rank 0] step:6141/10000 train_time:263352ms step_avg:42.88ms +[2025-09-11 12:33:24] [Rank 0] step:6161/10000 train_time:264043ms step_avg:42.86ms +[2025-09-11 12:33:24] [Rank 0] step:6161/10000 train_time:264043ms step_avg:42.86ms +[2025-09-11 12:33:24] [Rank 0] step:6181/10000 train_time:264734ms step_avg:42.83ms +[2025-09-11 12:33:24] [Rank 0] step:6181/10000 train_time:264734ms step_avg:42.83ms +[2025-09-11 12:33:25] [Rank 0] step:6201/10000 train_time:265427ms step_avg:42.80ms +[2025-09-11 12:33:25] [Rank 0] step:6201/10000 train_time:265427ms step_avg:42.80ms +[2025-09-11 12:33:26] [Rank 0] step:6221/10000 train_time:266121ms step_avg:42.78ms +[2025-09-11 12:33:26] [Rank 0] step:6221/10000 train_time:266121ms step_avg:42.78ms +[2025-09-11 12:33:26] [Rank 0] step:6241/10000 train_time:266813ms step_avg:42.75ms +[2025-09-11 12:33:26] [Rank 0] step:6241/10000 train_time:266813ms step_avg:42.75ms +[2025-09-11 12:33:27] [Rank 0] step:6261/10000 train_time:267503ms step_avg:42.73ms +[2025-09-11 12:33:27] [Rank 0] step:6261/10000 train_time:267503ms step_avg:42.73ms +[2025-09-11 12:33:28] [Rank 0] step:6281/10000 train_time:268198ms step_avg:42.70ms +[2025-09-11 12:33:28] [Rank 0] step:6281/10000 train_time:268198ms step_avg:42.70ms +[2025-09-11 12:33:28] [Rank 0] step:6301/10000 train_time:268889ms step_avg:42.67ms +[2025-09-11 12:33:28] [Rank 0] step:6301/10000 train_time:268889ms step_avg:42.67ms +[2025-09-11 12:33:29] [Rank 0] step:6321/10000 train_time:269584ms step_avg:42.65ms +[2025-09-11 12:33:29] [Rank 0] step:6321/10000 train_time:269584ms step_avg:42.65ms +[2025-09-11 12:33:30] [Rank 0] step:6341/10000 train_time:270278ms step_avg:42.62ms +[2025-09-11 12:33:30] [Rank 0] step:6341/10000 train_time:270278ms step_avg:42.62ms +[2025-09-11 12:33:30] [Rank 0] step:6361/10000 train_time:270971ms step_avg:42.60ms +[2025-09-11 12:33:30] [Rank 0] step:6361/10000 train_time:270971ms step_avg:42.60ms +[2025-09-11 12:33:31] [Rank 0] step:6381/10000 train_time:271663ms step_avg:42.57ms +[2025-09-11 12:33:31] [Rank 0] step:6381/10000 train_time:271663ms step_avg:42.57ms +[2025-09-11 12:33:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:33:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:33:43] [Rank 0] PRINT: step:6400/10000 val_loss:5.1134 total_sharp:1.2402e-02 L1_sharp:2.7443e-02 L2_sharp:3.0899e-02 L3_sharp:3.2608e-02 L4_sharp:4.0960e-02 L5_sharp:6.1124e-02 L6_sharp:1.0150e-01 L7_sharp:1.8371e-01 L8_sharp:2.8171e-01 L9_sharp:2.9149e-01 L10_sharp:4.0738e-01 L11_sharp:5.4943e-01 L12_sharp:9.3482e-01 total_fnorm:3.7812e+00 total_l1_linf:3.5680e+03 total_spectral:1.8984e+00 L1_fnorm:1.0059e-01 L2_fnorm:1.0107e-01 L3_fnorm:1.0107e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0010e-01 L7_fnorm:1.0010e-01 L8_fnorm:9.9121e-02 L9_fnorm:1.0010e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0059e-01 L12_fnorm:9.8633e-02 L1_l1linf:2.4536e-02 L2_l1linf:2.5024e-02 L3_l1linf:2.4536e-02 L4_l1linf:2.4780e-02 L5_l1linf:2.4536e-02 L6_l1linf:2.4170e-02 L7_l1linf:2.4658e-02 L8_l1linf:2.4292e-02 L9_l1linf:2.4414e-02 L10_l1linf:2.4780e-02 L11_l1linf:2.5635e-02 L12_l1linf:2.2705e-02 L1_spectral:1.4427e-03 L2_spectral:1.4473e-03 L3_spectral:1.4568e-03 L4_spectral:1.4434e-03 L5_spectral:1.4457e-03 L6_spectral:1.4493e-03 L7_spectral:1.4472e-03 L8_spectral:1.4485e-03 L9_spectral:1.4403e-03 L10_spectral:1.4452e-03 L11_spectral:1.4549e-03 L12_spectral:1.4397e-03 train_time:272336ms step_avg:42.55ms +[2025-09-11 12:33:43] [Rank 0] PRINT: step:6400/10000 val_loss:5.1134 total_sharp:1.2402e-02 L1_sharp:2.7443e-02 L2_sharp:3.0899e-02 L3_sharp:3.2608e-02 L4_sharp:4.0960e-02 L5_sharp:6.1124e-02 L6_sharp:1.0150e-01 L7_sharp:1.8371e-01 L8_sharp:2.8171e-01 L9_sharp:2.9149e-01 L10_sharp:4.0738e-01 L11_sharp:5.4943e-01 L12_sharp:9.3482e-01 total_fnorm:3.7812e+00 total_l1_linf:3.5680e+03 total_spectral:1.8984e+00 L1_fnorm:1.0059e-01 L2_fnorm:1.0107e-01 L3_fnorm:1.0107e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0059e-01 L6_fnorm:1.0010e-01 L7_fnorm:1.0010e-01 L8_fnorm:9.9121e-02 L9_fnorm:1.0010e-01 L10_fnorm:1.0059e-01 L11_fnorm:1.0059e-01 L12_fnorm:9.8633e-02 L1_l1linf:2.4536e-02 L2_l1linf:2.5024e-02 L3_l1linf:2.4536e-02 L4_l1linf:2.4780e-02 L5_l1linf:2.4536e-02 L6_l1linf:2.4170e-02 L7_l1linf:2.4658e-02 L8_l1linf:2.4292e-02 L9_l1linf:2.4414e-02 L10_l1linf:2.4780e-02 L11_l1linf:2.5635e-02 L12_l1linf:2.2705e-02 L1_spectral:1.4427e-03 L2_spectral:1.4473e-03 L3_spectral:1.4568e-03 L4_spectral:1.4434e-03 L5_spectral:1.4457e-03 L6_spectral:1.4493e-03 L7_spectral:1.4472e-03 L8_spectral:1.4485e-03 L9_spectral:1.4403e-03 L10_spectral:1.4452e-03 L11_spectral:1.4549e-03 L12_spectral:1.4397e-03 train_time:272336ms step_avg:42.55ms +[2025-09-11 12:33:45] [Rank 0] step:6401/10000 train_time:274255ms step_avg:42.85ms +[2025-09-11 12:33:45] [Rank 0] step:6401/10000 train_time:274255ms step_avg:42.85ms +[2025-09-11 12:33:46] [Rank 0] step:6421/10000 train_time:274972ms step_avg:42.82ms +[2025-09-11 12:33:46] [Rank 0] step:6421/10000 train_time:274972ms step_avg:42.82ms +[2025-09-11 12:33:47] [Rank 0] step:6441/10000 train_time:275666ms step_avg:42.80ms +[2025-09-11 12:33:47] [Rank 0] step:6441/10000 train_time:275666ms step_avg:42.80ms +[2025-09-11 12:33:47] [Rank 0] step:6461/10000 train_time:276361ms step_avg:42.77ms +[2025-09-11 12:33:47] [Rank 0] step:6461/10000 train_time:276361ms step_avg:42.77ms +[2025-09-11 12:33:48] [Rank 0] step:6481/10000 train_time:277056ms step_avg:42.75ms +[2025-09-11 12:33:48] [Rank 0] step:6481/10000 train_time:277056ms step_avg:42.75ms +[2025-09-11 12:33:49] [Rank 0] step:6501/10000 train_time:277752ms step_avg:42.72ms +[2025-09-11 12:33:49] [Rank 0] step:6501/10000 train_time:277752ms step_avg:42.72ms +[2025-09-11 12:33:49] [Rank 0] step:6521/10000 train_time:278447ms step_avg:42.70ms +[2025-09-11 12:33:49] [Rank 0] step:6521/10000 train_time:278447ms step_avg:42.70ms +[2025-09-11 12:33:50] [Rank 0] step:6541/10000 train_time:279138ms step_avg:42.68ms +[2025-09-11 12:33:50] [Rank 0] step:6541/10000 train_time:279138ms step_avg:42.68ms +[2025-09-11 12:33:51] [Rank 0] step:6561/10000 train_time:279832ms step_avg:42.65ms +[2025-09-11 12:33:51] [Rank 0] step:6561/10000 train_time:279832ms step_avg:42.65ms +[2025-09-11 12:33:51] [Rank 0] step:6581/10000 train_time:280527ms step_avg:42.63ms +[2025-09-11 12:33:51] [Rank 0] step:6581/10000 train_time:280527ms step_avg:42.63ms +[2025-09-11 12:33:52] [Rank 0] step:6601/10000 train_time:281222ms step_avg:42.60ms +[2025-09-11 12:33:52] [Rank 0] step:6601/10000 train_time:281222ms step_avg:42.60ms +[2025-09-11 12:33:53] [Rank 0] step:6621/10000 train_time:281914ms step_avg:42.58ms +[2025-09-11 12:33:53] [Rank 0] step:6621/10000 train_time:281914ms step_avg:42.58ms +[2025-09-11 12:33:54] [Rank 0] step:6641/10000 train_time:282609ms step_avg:42.56ms +[2025-09-11 12:33:54] [Rank 0] step:6641/10000 train_time:282609ms step_avg:42.56ms +[2025-09-11 12:33:54] [Rank 0] step:6661/10000 train_time:283304ms step_avg:42.53ms +[2025-09-11 12:33:54] [Rank 0] step:6661/10000 train_time:283304ms step_avg:42.53ms +[2025-09-11 12:33:55] [Rank 0] step:6681/10000 train_time:284007ms step_avg:42.51ms +[2025-09-11 12:33:55] [Rank 0] step:6681/10000 train_time:284007ms step_avg:42.51ms +[2025-09-11 12:33:56] [Rank 0] step:6701/10000 train_time:284707ms step_avg:42.49ms +[2025-09-11 12:33:56] [Rank 0] step:6701/10000 train_time:284707ms step_avg:42.49ms +[2025-09-11 12:33:56] [Rank 0] step:6721/10000 train_time:285408ms step_avg:42.47ms +[2025-09-11 12:33:56] [Rank 0] step:6721/10000 train_time:285408ms step_avg:42.47ms +[2025-09-11 12:33:57] [Rank 0] step:6741/10000 train_time:286109ms step_avg:42.44ms +[2025-09-11 12:33:57] [Rank 0] step:6741/10000 train_time:286109ms step_avg:42.44ms +[2025-09-11 12:33:58] [Rank 0] step:6761/10000 train_time:286808ms step_avg:42.42ms +[2025-09-11 12:33:58] [Rank 0] step:6761/10000 train_time:286808ms step_avg:42.42ms +[2025-09-11 12:33:58] [Rank 0] step:6781/10000 train_time:287509ms step_avg:42.40ms +[2025-09-11 12:33:58] [Rank 0] step:6781/10000 train_time:287509ms step_avg:42.40ms +[2025-09-11 12:33:59] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:33:59] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:34:12] [Rank 0] PRINT: step:6800/10000 val_loss:5.0806 total_sharp:8.6120e-03 L1_sharp:2.3578e-02 L2_sharp:2.4323e-02 L3_sharp:2.5006e-02 L4_sharp:3.0026e-02 L5_sharp:4.6452e-02 L6_sharp:7.1540e-02 L7_sharp:1.0346e-01 L8_sharp:1.5968e-01 L9_sharp:2.6856e-01 L10_sharp:4.5161e-01 L11_sharp:5.5666e-01 L12_sharp:9.9862e-01 total_fnorm:3.4688e+00 total_l1_linf:3.1200e+03 total_spectral:1.7344e+00 L1_fnorm:8.6426e-02 L2_fnorm:8.6914e-02 L3_fnorm:8.7402e-02 L4_fnorm:8.6914e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.5938e-02 L7_fnorm:8.5938e-02 L8_fnorm:8.5449e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.5938e-02 L12_fnorm:8.4473e-02 L1_l1linf:2.0386e-02 L2_l1linf:2.0630e-02 L3_l1linf:2.0996e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.0996e-02 L6_l1linf:2.0630e-02 L7_l1linf:2.0264e-02 L8_l1linf:2.0142e-02 L9_l1linf:2.0264e-02 L10_l1linf:1.9897e-02 L11_l1linf:2.0874e-02 L12_l1linf:1.8921e-02 L1_spectral:1.2971e-03 L2_spectral:1.2975e-03 L3_spectral:1.3021e-03 L4_spectral:1.2980e-03 L5_spectral:1.2924e-03 L6_spectral:1.2890e-03 L7_spectral:1.2976e-03 L8_spectral:1.2934e-03 L9_spectral:1.2990e-03 L10_spectral:1.3007e-03 L11_spectral:1.2714e-03 L12_spectral:1.2486e-03 train_time:288190ms step_avg:42.38ms +[2025-09-11 12:34:12] [Rank 0] PRINT: step:6800/10000 val_loss:5.0806 total_sharp:8.6120e-03 L1_sharp:2.3578e-02 L2_sharp:2.4323e-02 L3_sharp:2.5006e-02 L4_sharp:3.0026e-02 L5_sharp:4.6452e-02 L6_sharp:7.1540e-02 L7_sharp:1.0346e-01 L8_sharp:1.5968e-01 L9_sharp:2.6856e-01 L10_sharp:4.5161e-01 L11_sharp:5.5666e-01 L12_sharp:9.9862e-01 total_fnorm:3.4688e+00 total_l1_linf:3.1200e+03 total_spectral:1.7344e+00 L1_fnorm:8.6426e-02 L2_fnorm:8.6914e-02 L3_fnorm:8.7402e-02 L4_fnorm:8.6914e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.5938e-02 L7_fnorm:8.5938e-02 L8_fnorm:8.5449e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.5938e-02 L12_fnorm:8.4473e-02 L1_l1linf:2.0386e-02 L2_l1linf:2.0630e-02 L3_l1linf:2.0996e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.0996e-02 L6_l1linf:2.0630e-02 L7_l1linf:2.0264e-02 L8_l1linf:2.0142e-02 L9_l1linf:2.0264e-02 L10_l1linf:1.9897e-02 L11_l1linf:2.0874e-02 L12_l1linf:1.8921e-02 L1_spectral:1.2971e-03 L2_spectral:1.2975e-03 L3_spectral:1.3021e-03 L4_spectral:1.2980e-03 L5_spectral:1.2924e-03 L6_spectral:1.2890e-03 L7_spectral:1.2976e-03 L8_spectral:1.2934e-03 L9_spectral:1.2990e-03 L10_spectral:1.3007e-03 L11_spectral:1.2714e-03 L12_spectral:1.2486e-03 train_time:288190ms step_avg:42.38ms +[2025-09-11 12:34:14] [Rank 0] step:6801/10000 train_time:290332ms step_avg:42.69ms +[2025-09-11 12:34:14] [Rank 0] step:6801/10000 train_time:290332ms step_avg:42.69ms +[2025-09-11 12:34:15] [Rank 0] step:6821/10000 train_time:291056ms step_avg:42.67ms +[2025-09-11 12:34:15] [Rank 0] step:6821/10000 train_time:291056ms step_avg:42.67ms +[2025-09-11 12:34:16] [Rank 0] step:6841/10000 train_time:291759ms step_avg:42.65ms +[2025-09-11 12:34:16] [Rank 0] step:6841/10000 train_time:291759ms step_avg:42.65ms +[2025-09-11 12:34:17] [Rank 0] step:6861/10000 train_time:292749ms step_avg:42.67ms +[2025-09-11 12:34:17] [Rank 0] step:6861/10000 train_time:292749ms step_avg:42.67ms +[2025-09-11 12:34:17] [Rank 0] step:6881/10000 train_time:293451ms step_avg:42.65ms +[2025-09-11 12:34:17] [Rank 0] step:6881/10000 train_time:293451ms step_avg:42.65ms +[2025-09-11 12:34:18] [Rank 0] step:6901/10000 train_time:294151ms step_avg:42.62ms +[2025-09-11 12:34:18] [Rank 0] step:6901/10000 train_time:294151ms step_avg:42.62ms +[2025-09-11 12:34:19] [Rank 0] step:6921/10000 train_time:295125ms step_avg:42.64ms +[2025-09-11 12:34:19] [Rank 0] step:6921/10000 train_time:295125ms step_avg:42.64ms +[2025-09-11 12:34:20] [Rank 0] step:6941/10000 train_time:295825ms step_avg:42.62ms +[2025-09-11 12:34:20] [Rank 0] step:6941/10000 train_time:295825ms step_avg:42.62ms +[2025-09-11 12:34:21] [Rank 0] step:6961/10000 train_time:296527ms step_avg:42.60ms +[2025-09-11 12:34:21] [Rank 0] step:6961/10000 train_time:296527ms step_avg:42.60ms +[2025-09-11 12:34:21] [Rank 0] step:6981/10000 train_time:297230ms step_avg:42.58ms +[2025-09-11 12:34:21] [Rank 0] step:6981/10000 train_time:297230ms step_avg:42.58ms +[2025-09-11 12:34:22] [Rank 0] step:7001/10000 train_time:297930ms step_avg:42.56ms +[2025-09-11 12:34:22] [Rank 0] step:7001/10000 train_time:297930ms step_avg:42.56ms +[2025-09-11 12:34:23] [Rank 0] step:7021/10000 train_time:298630ms step_avg:42.53ms +[2025-09-11 12:34:23] [Rank 0] step:7021/10000 train_time:298630ms step_avg:42.53ms +[2025-09-11 12:34:23] [Rank 0] step:7041/10000 train_time:299329ms step_avg:42.51ms +[2025-09-11 12:34:23] [Rank 0] step:7041/10000 train_time:299329ms step_avg:42.51ms +[2025-09-11 12:34:24] [Rank 0] step:7061/10000 train_time:300031ms step_avg:42.49ms +[2025-09-11 12:34:24] [Rank 0] step:7061/10000 train_time:300031ms step_avg:42.49ms +[2025-09-11 12:34:25] [Rank 0] step:7081/10000 train_time:300731ms step_avg:42.47ms +[2025-09-11 12:34:25] [Rank 0] step:7081/10000 train_time:300731ms step_avg:42.47ms +[2025-09-11 12:34:25] [Rank 0] step:7101/10000 train_time:301431ms step_avg:42.45ms +[2025-09-11 12:34:25] [Rank 0] step:7101/10000 train_time:301431ms step_avg:42.45ms +[2025-09-11 12:34:26] [Rank 0] step:7121/10000 train_time:302133ms step_avg:42.43ms +[2025-09-11 12:34:26] [Rank 0] step:7121/10000 train_time:302133ms step_avg:42.43ms +[2025-09-11 12:34:27] [Rank 0] step:7141/10000 train_time:302833ms step_avg:42.41ms +[2025-09-11 12:34:27] [Rank 0] step:7141/10000 train_time:302833ms step_avg:42.41ms +[2025-09-11 12:34:28] [Rank 0] step:7161/10000 train_time:303535ms step_avg:42.39ms +[2025-09-11 12:34:28] [Rank 0] step:7161/10000 train_time:303535ms step_avg:42.39ms +[2025-09-11 12:34:28] [Rank 0] step:7181/10000 train_time:304236ms step_avg:42.37ms +[2025-09-11 12:34:28] [Rank 0] step:7181/10000 train_time:304236ms step_avg:42.37ms +[2025-09-11 12:34:29] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:34:29] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:34:40] [Rank 0] PRINT: step:7200/10000 val_loss:5.0516 total_sharp:8.4436e-03 L1_sharp:1.8348e-02 L2_sharp:1.8669e-02 L3_sharp:1.9600e-02 L4_sharp:2.2786e-02 L5_sharp:3.9531e-02 L6_sharp:5.2484e-02 L7_sharp:9.5779e-02 L8_sharp:1.4674e-01 L9_sharp:2.1951e-01 L10_sharp:3.6601e-01 L11_sharp:5.1632e-01 L12_sharp:9.6362e-01 total_fnorm:2.8125e+00 total_l1_linf:2.3200e+03 total_spectral:1.4062e+00 L1_fnorm:7.3730e-02 L2_fnorm:7.3730e-02 L3_fnorm:7.4219e-02 L4_fnorm:7.3730e-02 L5_fnorm:7.3242e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3242e-02 L8_fnorm:7.2266e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.2266e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.6724e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.6968e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.7700e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6602e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6724e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.5076e-02 L1_spectral:1.1547e-03 L2_spectral:1.1490e-03 L3_spectral:1.1554e-03 L4_spectral:1.1544e-03 L5_spectral:1.1580e-03 L6_spectral:1.1510e-03 L7_spectral:1.1444e-03 L8_spectral:1.1529e-03 L9_spectral:1.1528e-03 L10_spectral:1.1456e-03 L11_spectral:1.1235e-03 L12_spectral:1.0621e-03 train_time:304917ms step_avg:42.35ms +[2025-09-11 12:34:40] [Rank 0] PRINT: step:7200/10000 val_loss:5.0516 total_sharp:8.4436e-03 L1_sharp:1.8348e-02 L2_sharp:1.8669e-02 L3_sharp:1.9600e-02 L4_sharp:2.2786e-02 L5_sharp:3.9531e-02 L6_sharp:5.2484e-02 L7_sharp:9.5779e-02 L8_sharp:1.4674e-01 L9_sharp:2.1951e-01 L10_sharp:3.6601e-01 L11_sharp:5.1632e-01 L12_sharp:9.6362e-01 total_fnorm:2.8125e+00 total_l1_linf:2.3200e+03 total_spectral:1.4062e+00 L1_fnorm:7.3730e-02 L2_fnorm:7.3730e-02 L3_fnorm:7.4219e-02 L4_fnorm:7.3730e-02 L5_fnorm:7.3242e-02 L6_fnorm:7.3242e-02 L7_fnorm:7.3242e-02 L8_fnorm:7.2266e-02 L9_fnorm:7.2754e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.2266e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.6724e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.6968e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.7700e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6602e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6724e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.5076e-02 L1_spectral:1.1547e-03 L2_spectral:1.1490e-03 L3_spectral:1.1554e-03 L4_spectral:1.1544e-03 L5_spectral:1.1580e-03 L6_spectral:1.1510e-03 L7_spectral:1.1444e-03 L8_spectral:1.1529e-03 L9_spectral:1.1528e-03 L10_spectral:1.1456e-03 L11_spectral:1.1235e-03 L12_spectral:1.0621e-03 train_time:304917ms step_avg:42.35ms +[2025-09-11 12:34:42] [Rank 0] step:7201/10000 train_time:306886ms step_avg:42.62ms +[2025-09-11 12:34:42] [Rank 0] step:7201/10000 train_time:306886ms step_avg:42.62ms +[2025-09-11 12:34:43] [Rank 0] step:7221/10000 train_time:307618ms step_avg:42.60ms +[2025-09-11 12:34:43] [Rank 0] step:7221/10000 train_time:307618ms step_avg:42.60ms +[2025-09-11 12:34:43] [Rank 0] step:7241/10000 train_time:308319ms step_avg:42.58ms +[2025-09-11 12:34:43] [Rank 0] step:7241/10000 train_time:308319ms step_avg:42.58ms +[2025-09-11 12:34:44] [Rank 0] step:7261/10000 train_time:309023ms step_avg:42.56ms +[2025-09-11 12:34:44] [Rank 0] step:7261/10000 train_time:309023ms step_avg:42.56ms +[2025-09-11 12:34:45] [Rank 0] step:7281/10000 train_time:309731ms step_avg:42.54ms +[2025-09-11 12:34:45] [Rank 0] step:7281/10000 train_time:309731ms step_avg:42.54ms +[2025-09-11 12:34:45] [Rank 0] step:7301/10000 train_time:310433ms step_avg:42.52ms +[2025-09-11 12:34:45] [Rank 0] step:7301/10000 train_time:310433ms step_avg:42.52ms +[2025-09-11 12:34:46] [Rank 0] step:7321/10000 train_time:311135ms step_avg:42.50ms +[2025-09-11 12:34:46] [Rank 0] step:7321/10000 train_time:311135ms step_avg:42.50ms +[2025-09-11 12:34:47] [Rank 0] step:7341/10000 train_time:311837ms step_avg:42.48ms +[2025-09-11 12:34:47] [Rank 0] step:7341/10000 train_time:311837ms step_avg:42.48ms +[2025-09-11 12:34:47] [Rank 0] step:7361/10000 train_time:312539ms step_avg:42.46ms +[2025-09-11 12:34:47] [Rank 0] step:7361/10000 train_time:312539ms step_avg:42.46ms +[2025-09-11 12:34:48] [Rank 0] step:7381/10000 train_time:313241ms step_avg:42.44ms +[2025-09-11 12:34:48] [Rank 0] step:7381/10000 train_time:313241ms step_avg:42.44ms +[2025-09-11 12:34:49] [Rank 0] step:7401/10000 train_time:313942ms step_avg:42.42ms +[2025-09-11 12:34:49] [Rank 0] step:7401/10000 train_time:313942ms step_avg:42.42ms +[2025-09-11 12:34:50] [Rank 0] step:7421/10000 train_time:314643ms step_avg:42.40ms +[2025-09-11 12:34:50] [Rank 0] step:7421/10000 train_time:314643ms step_avg:42.40ms +[2025-09-11 12:34:50] [Rank 0] step:7441/10000 train_time:315346ms step_avg:42.38ms +[2025-09-11 12:34:50] [Rank 0] step:7441/10000 train_time:315346ms step_avg:42.38ms +[2025-09-11 12:34:51] [Rank 0] step:7461/10000 train_time:316049ms step_avg:42.36ms +[2025-09-11 12:34:51] [Rank 0] step:7461/10000 train_time:316049ms step_avg:42.36ms +[2025-09-11 12:34:52] [Rank 0] step:7481/10000 train_time:316752ms step_avg:42.34ms +[2025-09-11 12:34:52] [Rank 0] step:7481/10000 train_time:316752ms step_avg:42.34ms +[2025-09-11 12:34:52] [Rank 0] step:7501/10000 train_time:317454ms step_avg:42.32ms +[2025-09-11 12:34:52] [Rank 0] step:7501/10000 train_time:317454ms step_avg:42.32ms +[2025-09-11 12:34:53] [Rank 0] step:7521/10000 train_time:318158ms step_avg:42.30ms +[2025-09-11 12:34:53] [Rank 0] step:7521/10000 train_time:318158ms step_avg:42.30ms +[2025-09-11 12:34:54] [Rank 0] step:7541/10000 train_time:318859ms step_avg:42.28ms +[2025-09-11 12:34:54] [Rank 0] step:7541/10000 train_time:318859ms step_avg:42.28ms +[2025-09-11 12:34:55] [Rank 0] step:7561/10000 train_time:319564ms step_avg:42.26ms +[2025-09-11 12:34:55] [Rank 0] step:7561/10000 train_time:319564ms step_avg:42.26ms +[2025-09-11 12:34:55] [Rank 0] step:7581/10000 train_time:320267ms step_avg:42.25ms +[2025-09-11 12:34:55] [Rank 0] step:7581/10000 train_time:320267ms step_avg:42.25ms +[2025-09-11 12:34:56] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:34:56] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:35:07] [Rank 0] PRINT: step:7600/10000 val_loss:5.0320 total_sharp:1.3033e-02 L1_sharp:2.2987e-02 L2_sharp:2.3537e-02 L3_sharp:2.4833e-02 L4_sharp:2.6889e-02 L5_sharp:3.9030e-02 L6_sharp:6.0523e-02 L7_sharp:8.7376e-02 L8_sharp:1.4715e-01 L9_sharp:2.6191e-01 L10_sharp:4.3190e-01 L11_sharp:6.2116e-01 L12_sharp:2.1648e+00 total_fnorm:2.0469e+00 total_l1_linf:1.6080e+03 total_spectral:1.0312e+00 L1_fnorm:6.0791e-02 L2_fnorm:6.1035e-02 L3_fnorm:6.1279e-02 L4_fnorm:6.1035e-02 L5_fnorm:6.0547e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0303e-02 L8_fnorm:6.0059e-02 L9_fnorm:6.0303e-02 L10_fnorm:5.9814e-02 L11_fnorm:5.9814e-02 L12_fnorm:5.7617e-02 L1_l1linf:1.3306e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3245e-02 L4_l1linf:1.3367e-02 L5_l1linf:1.3062e-02 L6_l1linf:1.3245e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3062e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.2939e-02 L11_l1linf:1.3794e-02 L12_l1linf:1.1597e-02 L1_spectral:1.0050e-03 L2_spectral:1.0060e-03 L3_spectral:1.0046e-03 L4_spectral:1.0007e-03 L5_spectral:9.9790e-04 L6_spectral:9.9443e-04 L7_spectral:1.0012e-03 L8_spectral:9.8268e-04 L9_spectral:9.8902e-04 L10_spectral:9.7654e-04 L11_spectral:9.4866e-04 L12_spectral:8.9666e-04 train_time:320950ms step_avg:42.23ms +[2025-09-11 12:35:07] [Rank 0] PRINT: step:7600/10000 val_loss:5.0320 total_sharp:1.3033e-02 L1_sharp:2.2987e-02 L2_sharp:2.3537e-02 L3_sharp:2.4833e-02 L4_sharp:2.6889e-02 L5_sharp:3.9030e-02 L6_sharp:6.0523e-02 L7_sharp:8.7376e-02 L8_sharp:1.4715e-01 L9_sharp:2.6191e-01 L10_sharp:4.3190e-01 L11_sharp:6.2116e-01 L12_sharp:2.1648e+00 total_fnorm:2.0469e+00 total_l1_linf:1.6080e+03 total_spectral:1.0312e+00 L1_fnorm:6.0791e-02 L2_fnorm:6.1035e-02 L3_fnorm:6.1279e-02 L4_fnorm:6.1035e-02 L5_fnorm:6.0547e-02 L6_fnorm:6.0547e-02 L7_fnorm:6.0303e-02 L8_fnorm:6.0059e-02 L9_fnorm:6.0303e-02 L10_fnorm:5.9814e-02 L11_fnorm:5.9814e-02 L12_fnorm:5.7617e-02 L1_l1linf:1.3306e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3245e-02 L4_l1linf:1.3367e-02 L5_l1linf:1.3062e-02 L6_l1linf:1.3245e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.3062e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.2939e-02 L11_l1linf:1.3794e-02 L12_l1linf:1.1597e-02 L1_spectral:1.0050e-03 L2_spectral:1.0060e-03 L3_spectral:1.0046e-03 L4_spectral:1.0007e-03 L5_spectral:9.9790e-04 L6_spectral:9.9443e-04 L7_spectral:1.0012e-03 L8_spectral:9.8268e-04 L9_spectral:9.8902e-04 L10_spectral:9.7654e-04 L11_spectral:9.4866e-04 L12_spectral:8.9666e-04 train_time:320950ms step_avg:42.23ms +[2025-09-11 12:35:09] [Rank 0] step:7601/10000 train_time:322939ms step_avg:42.49ms +[2025-09-11 12:35:09] [Rank 0] step:7601/10000 train_time:322939ms step_avg:42.49ms +[2025-09-11 12:35:10] [Rank 0] step:7621/10000 train_time:323718ms step_avg:42.48ms +[2025-09-11 12:35:10] [Rank 0] step:7621/10000 train_time:323718ms step_avg:42.48ms +[2025-09-11 12:35:11] [Rank 0] step:7641/10000 train_time:324423ms step_avg:42.46ms +[2025-09-11 12:35:11] [Rank 0] step:7641/10000 train_time:324423ms step_avg:42.46ms +[2025-09-11 12:35:11] [Rank 0] step:7661/10000 train_time:325125ms step_avg:42.44ms +[2025-09-11 12:35:11] [Rank 0] step:7661/10000 train_time:325125ms step_avg:42.44ms +[2025-09-11 12:35:12] [Rank 0] step:7681/10000 train_time:325829ms step_avg:42.42ms +[2025-09-11 12:35:12] [Rank 0] step:7681/10000 train_time:325829ms step_avg:42.42ms +[2025-09-11 12:35:13] [Rank 0] step:7701/10000 train_time:326533ms step_avg:42.40ms +[2025-09-11 12:35:13] [Rank 0] step:7701/10000 train_time:326533ms step_avg:42.40ms +[2025-09-11 12:35:13] [Rank 0] step:7721/10000 train_time:327237ms step_avg:42.38ms +[2025-09-11 12:35:13] [Rank 0] step:7721/10000 train_time:327237ms step_avg:42.38ms +[2025-09-11 12:35:14] [Rank 0] step:7741/10000 train_time:327941ms step_avg:42.36ms +[2025-09-11 12:35:14] [Rank 0] step:7741/10000 train_time:327941ms step_avg:42.36ms +[2025-09-11 12:35:15] [Rank 0] step:7761/10000 train_time:328644ms step_avg:42.35ms +[2025-09-11 12:35:15] [Rank 0] step:7761/10000 train_time:328644ms step_avg:42.35ms +[2025-09-11 12:35:16] [Rank 0] step:7781/10000 train_time:329349ms step_avg:42.33ms +[2025-09-11 12:35:16] [Rank 0] step:7781/10000 train_time:329349ms step_avg:42.33ms +[2025-09-11 12:35:16] [Rank 0] step:7801/10000 train_time:330051ms step_avg:42.31ms +[2025-09-11 12:35:16] [Rank 0] step:7801/10000 train_time:330051ms step_avg:42.31ms +[2025-09-11 12:35:17] [Rank 0] step:7821/10000 train_time:330754ms step_avg:42.29ms +[2025-09-11 12:35:17] [Rank 0] step:7821/10000 train_time:330754ms step_avg:42.29ms +[2025-09-11 12:35:18] [Rank 0] step:7841/10000 train_time:331459ms step_avg:42.27ms +[2025-09-11 12:35:18] [Rank 0] step:7841/10000 train_time:331459ms step_avg:42.27ms +[2025-09-11 12:35:18] [Rank 0] step:7861/10000 train_time:332165ms step_avg:42.25ms +[2025-09-11 12:35:18] [Rank 0] step:7861/10000 train_time:332165ms step_avg:42.25ms +[2025-09-11 12:35:19] [Rank 0] step:7881/10000 train_time:332868ms step_avg:42.24ms +[2025-09-11 12:35:19] [Rank 0] step:7881/10000 train_time:332868ms step_avg:42.24ms +[2025-09-11 12:35:20] [Rank 0] step:7901/10000 train_time:333841ms step_avg:42.25ms +[2025-09-11 12:35:20] [Rank 0] step:7901/10000 train_time:333841ms step_avg:42.25ms +[2025-09-11 12:35:21] [Rank 0] step:7921/10000 train_time:334545ms step_avg:42.24ms +[2025-09-11 12:35:21] [Rank 0] step:7921/10000 train_time:334545ms step_avg:42.24ms +[2025-09-11 12:35:21] [Rank 0] step:7941/10000 train_time:335249ms step_avg:42.22ms +[2025-09-11 12:35:21] [Rank 0] step:7941/10000 train_time:335249ms step_avg:42.22ms +[2025-09-11 12:35:22] [Rank 0] step:7961/10000 train_time:336223ms step_avg:42.23ms +[2025-09-11 12:35:22] [Rank 0] step:7961/10000 train_time:336223ms step_avg:42.23ms +[2025-09-11 12:35:23] [Rank 0] step:7981/10000 train_time:336929ms step_avg:42.22ms +[2025-09-11 12:35:23] [Rank 0] step:7981/10000 train_time:336929ms step_avg:42.22ms +[2025-09-11 12:35:24] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:35:24] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:35:35] [Rank 0] PRINT: step:8000/10000 val_loss:5.0226 total_sharp:1.4795e-02 L1_sharp:1.8170e-02 L2_sharp:1.8946e-02 L3_sharp:2.3127e-02 L4_sharp:2.8979e-02 L5_sharp:4.0402e-02 L6_sharp:8.5261e-02 L7_sharp:1.3833e-01 L8_sharp:2.1841e-01 L9_sharp:3.3742e-01 L10_sharp:5.4321e-01 L11_sharp:6.7307e-01 L12_sharp:1.6956e+00 total_fnorm:1.6406e+00 total_l1_linf:1.1920e+03 total_spectral:8.2422e-01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.5898e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0010e-02 L3_l1linf:1.0315e-02 L4_l1linf:1.0193e-02 L5_l1linf:1.0071e-02 L6_l1linf:9.7046e-03 L7_l1linf:9.7656e-03 L8_l1linf:9.8267e-03 L9_l1linf:9.9487e-03 L10_l1linf:1.0071e-02 L11_l1linf:1.0437e-02 L12_l1linf:8.7891e-03 L1_spectral:8.4374e-04 L2_spectral:8.4401e-04 L3_spectral:8.4688e-04 L4_spectral:8.4086e-04 L5_spectral:8.3614e-04 L6_spectral:8.2288e-04 L7_spectral:8.3183e-04 L8_spectral:8.2221e-04 L9_spectral:8.2439e-04 L10_spectral:8.0482e-04 L11_spectral:7.8587e-04 L12_spectral:7.2711e-04 train_time:337610ms step_avg:42.20ms +[2025-09-11 12:35:35] [Rank 0] PRINT: step:8000/10000 val_loss:5.0226 total_sharp:1.4795e-02 L1_sharp:1.8170e-02 L2_sharp:1.8946e-02 L3_sharp:2.3127e-02 L4_sharp:2.8979e-02 L5_sharp:4.0402e-02 L6_sharp:8.5261e-02 L7_sharp:1.3833e-01 L8_sharp:2.1841e-01 L9_sharp:3.3742e-01 L10_sharp:5.4321e-01 L11_sharp:6.7307e-01 L12_sharp:1.6956e+00 total_fnorm:1.6406e+00 total_l1_linf:1.1920e+03 total_spectral:8.2422e-01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.5898e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0010e-02 L3_l1linf:1.0315e-02 L4_l1linf:1.0193e-02 L5_l1linf:1.0071e-02 L6_l1linf:9.7046e-03 L7_l1linf:9.7656e-03 L8_l1linf:9.8267e-03 L9_l1linf:9.9487e-03 L10_l1linf:1.0071e-02 L11_l1linf:1.0437e-02 L12_l1linf:8.7891e-03 L1_spectral:8.4374e-04 L2_spectral:8.4401e-04 L3_spectral:8.4688e-04 L4_spectral:8.4086e-04 L5_spectral:8.3614e-04 L6_spectral:8.2288e-04 L7_spectral:8.3183e-04 L8_spectral:8.2221e-04 L9_spectral:8.2439e-04 L10_spectral:8.0482e-04 L11_spectral:7.8587e-04 L12_spectral:7.2711e-04 train_time:337610ms step_avg:42.20ms +[2025-09-11 12:35:37] [Rank 0] step:8001/10000 train_time:339554ms step_avg:42.44ms +[2025-09-11 12:35:37] [Rank 0] step:8001/10000 train_time:339554ms step_avg:42.44ms +[2025-09-11 12:35:38] [Rank 0] step:8021/10000 train_time:340285ms step_avg:42.42ms +[2025-09-11 12:35:38] [Rank 0] step:8021/10000 train_time:340285ms step_avg:42.42ms +[2025-09-11 12:35:38] [Rank 0] step:8041/10000 train_time:340989ms step_avg:42.41ms +[2025-09-11 12:35:38] [Rank 0] step:8041/10000 train_time:340989ms step_avg:42.41ms +[2025-09-11 12:35:39] [Rank 0] step:8061/10000 train_time:341695ms step_avg:42.39ms +[2025-09-11 12:35:39] [Rank 0] step:8061/10000 train_time:341695ms step_avg:42.39ms +[2025-09-11 12:35:40] [Rank 0] step:8081/10000 train_time:342404ms step_avg:42.37ms +[2025-09-11 12:35:40] [Rank 0] step:8081/10000 train_time:342404ms step_avg:42.37ms +[2025-09-11 12:35:41] [Rank 0] step:8101/10000 train_time:343107ms step_avg:42.35ms +[2025-09-11 12:35:41] [Rank 0] step:8101/10000 train_time:343107ms step_avg:42.35ms +[2025-09-11 12:35:41] [Rank 0] step:8121/10000 train_time:343814ms step_avg:42.34ms +[2025-09-11 12:35:41] [Rank 0] step:8121/10000 train_time:343814ms step_avg:42.34ms +[2025-09-11 12:35:43] [Rank 0] step:8141/10000 train_time:345274ms step_avg:42.41ms +[2025-09-11 12:35:43] [Rank 0] step:8141/10000 train_time:345274ms step_avg:42.41ms +[2025-09-11 12:35:43] [Rank 0] step:8161/10000 train_time:345981ms step_avg:42.39ms +[2025-09-11 12:35:43] [Rank 0] step:8161/10000 train_time:345981ms step_avg:42.39ms +[2025-09-11 12:35:44] [Rank 0] step:8181/10000 train_time:346696ms step_avg:42.38ms +[2025-09-11 12:35:44] [Rank 0] step:8181/10000 train_time:346696ms step_avg:42.38ms +[2025-09-11 12:35:45] [Rank 0] step:8201/10000 train_time:347407ms step_avg:42.36ms +[2025-09-11 12:35:45] [Rank 0] step:8201/10000 train_time:347407ms step_avg:42.36ms +[2025-09-11 12:35:46] [Rank 0] step:8221/10000 train_time:348117ms step_avg:42.34ms +[2025-09-11 12:35:46] [Rank 0] step:8221/10000 train_time:348117ms step_avg:42.34ms +[2025-09-11 12:35:46] [Rank 0] step:8241/10000 train_time:348835ms step_avg:42.33ms +[2025-09-11 12:35:46] [Rank 0] step:8241/10000 train_time:348835ms step_avg:42.33ms +[2025-09-11 12:35:47] [Rank 0] step:8261/10000 train_time:349544ms step_avg:42.31ms +[2025-09-11 12:35:47] [Rank 0] step:8261/10000 train_time:349544ms step_avg:42.31ms +[2025-09-11 12:35:48] [Rank 0] step:8281/10000 train_time:350250ms step_avg:42.30ms +[2025-09-11 12:35:48] [Rank 0] step:8281/10000 train_time:350250ms step_avg:42.30ms +[2025-09-11 12:35:48] [Rank 0] step:8301/10000 train_time:350961ms step_avg:42.28ms +[2025-09-11 12:35:48] [Rank 0] step:8301/10000 train_time:350961ms step_avg:42.28ms +[2025-09-11 12:35:49] [Rank 0] step:8321/10000 train_time:351669ms step_avg:42.26ms +[2025-09-11 12:35:49] [Rank 0] step:8321/10000 train_time:351669ms step_avg:42.26ms +[2025-09-11 12:35:50] [Rank 0] step:8341/10000 train_time:352386ms step_avg:42.25ms +[2025-09-11 12:35:50] [Rank 0] step:8341/10000 train_time:352386ms step_avg:42.25ms +[2025-09-11 12:35:50] [Rank 0] step:8361/10000 train_time:353091ms step_avg:42.23ms +[2025-09-11 12:35:50] [Rank 0] step:8361/10000 train_time:353091ms step_avg:42.23ms +[2025-09-11 12:35:51] [Rank 0] step:8381/10000 train_time:353804ms step_avg:42.22ms +[2025-09-11 12:35:51] [Rank 0] step:8381/10000 train_time:353804ms step_avg:42.22ms +[2025-09-11 12:35:52] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:35:52] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:35:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:36:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:03] [Rank 0] PRINT: step:8400/10000 val_loss:5.0096 total_sharp:1.4042e-02 L1_sharp:2.1678e-02 L2_sharp:1.8488e-02 L3_sharp:1.8256e-02 L4_sharp:3.2090e-02 L5_sharp:3.5782e-02 L6_sharp:6.5729e-02 L7_sharp:1.0860e-01 L8_sharp:1.7598e-01 L9_sharp:2.6557e-01 L10_sharp:4.9731e-01 L11_sharp:6.2330e-01 L12_sharp:1.4455e+00 total_fnorm:1.2344e+00 total_l1_linf:7.9200e+02 total_spectral:6.1719e-01 L1_fnorm:3.8086e-02 L2_fnorm:3.8086e-02 L3_fnorm:3.8330e-02 L4_fnorm:3.8330e-02 L5_fnorm:3.7842e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7598e-02 L8_fnorm:3.7354e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7109e-02 L12_fnorm:3.5400e-02 L1_l1linf:7.2632e-03 L2_l1linf:7.3242e-03 L3_l1linf:7.2021e-03 L4_l1linf:7.2632e-03 L5_l1linf:7.2327e-03 L6_l1linf:7.1716e-03 L7_l1linf:7.1716e-03 L8_l1linf:7.1411e-03 L9_l1linf:7.1411e-03 L10_l1linf:7.3853e-03 L11_l1linf:7.4463e-03 L12_l1linf:6.2866e-03 L1_spectral:6.8310e-04 L2_spectral:6.7698e-04 L3_spectral:6.7670e-04 L4_spectral:6.7600e-04 L5_spectral:6.6688e-04 L6_spectral:6.6280e-04 L7_spectral:6.6109e-04 L8_spectral:6.5478e-04 L9_spectral:6.6211e-04 L10_spectral:6.4190e-04 L11_spectral:6.2607e-04 L12_spectral:5.7254e-04 train_time:354497ms step_avg:42.20ms +[2025-09-11 12:36:03] [Rank 0] PRINT: step:8400/10000 val_loss:5.0096 total_sharp:1.4042e-02 L1_sharp:2.1678e-02 L2_sharp:1.8488e-02 L3_sharp:1.8256e-02 L4_sharp:3.2090e-02 L5_sharp:3.5782e-02 L6_sharp:6.5729e-02 L7_sharp:1.0860e-01 L8_sharp:1.7598e-01 L9_sharp:2.6557e-01 L10_sharp:4.9731e-01 L11_sharp:6.2330e-01 L12_sharp:1.4455e+00 total_fnorm:1.2344e+00 total_l1_linf:7.9200e+02 total_spectral:6.1719e-01 L1_fnorm:3.8086e-02 L2_fnorm:3.8086e-02 L3_fnorm:3.8330e-02 L4_fnorm:3.8330e-02 L5_fnorm:3.7842e-02 L6_fnorm:3.7842e-02 L7_fnorm:3.7598e-02 L8_fnorm:3.7354e-02 L9_fnorm:3.7598e-02 L10_fnorm:3.7354e-02 L11_fnorm:3.7109e-02 L12_fnorm:3.5400e-02 L1_l1linf:7.2632e-03 L2_l1linf:7.3242e-03 L3_l1linf:7.2021e-03 L4_l1linf:7.2632e-03 L5_l1linf:7.2327e-03 L6_l1linf:7.1716e-03 L7_l1linf:7.1716e-03 L8_l1linf:7.1411e-03 L9_l1linf:7.1411e-03 L10_l1linf:7.3853e-03 L11_l1linf:7.4463e-03 L12_l1linf:6.2866e-03 L1_spectral:6.8310e-04 L2_spectral:6.7698e-04 L3_spectral:6.7670e-04 L4_spectral:6.7600e-04 L5_spectral:6.6688e-04 L6_spectral:6.6280e-04 L7_spectral:6.6109e-04 L8_spectral:6.5478e-04 L9_spectral:6.6211e-04 L10_spectral:6.4190e-04 L11_spectral:6.2607e-04 L12_spectral:5.7254e-04 train_time:354497ms step_avg:42.20ms +[2025-09-11 12:36:05] [Rank 0] step:8401/10000 train_time:356428ms step_avg:42.43ms +[2025-09-11 12:36:05] [Rank 0] step:8401/10000 train_time:356428ms step_avg:42.43ms +[2025-09-11 12:36:06] [Rank 0] step:8421/10000 train_time:357163ms step_avg:42.41ms +[2025-09-11 12:36:06] [Rank 0] step:8421/10000 train_time:357163ms step_avg:42.41ms +[2025-09-11 12:36:07] [Rank 0] step:8441/10000 train_time:357874ms step_avg:42.40ms +[2025-09-11 12:36:07] [Rank 0] step:8441/10000 train_time:357874ms step_avg:42.40ms +[2025-09-11 12:36:07] [Rank 0] step:8461/10000 train_time:358585ms step_avg:42.38ms +[2025-09-11 12:36:07] [Rank 0] step:8461/10000 train_time:358585ms step_avg:42.38ms +[2025-09-11 12:36:08] [Rank 0] step:8481/10000 train_time:359297ms step_avg:42.36ms +[2025-09-11 12:36:08] [Rank 0] step:8481/10000 train_time:359297ms step_avg:42.36ms +[2025-09-11 12:36:09] [Rank 0] step:8501/10000 train_time:360006ms step_avg:42.35ms +[2025-09-11 12:36:09] [Rank 0] step:8501/10000 train_time:360006ms step_avg:42.35ms +[2025-09-11 12:36:09] [Rank 0] step:8521/10000 train_time:360715ms step_avg:42.33ms +[2025-09-11 12:36:09] [Rank 0] step:8521/10000 train_time:360715ms step_avg:42.33ms +[2025-09-11 12:36:10] [Rank 0] step:8541/10000 train_time:361425ms step_avg:42.32ms +[2025-09-11 12:36:10] [Rank 0] step:8541/10000 train_time:361425ms step_avg:42.32ms +[2025-09-11 12:36:11] [Rank 0] step:8561/10000 train_time:362139ms step_avg:42.30ms +[2025-09-11 12:36:11] [Rank 0] step:8561/10000 train_time:362139ms step_avg:42.30ms +[2025-09-11 12:36:12] [Rank 0] step:8581/10000 train_time:362853ms step_avg:42.29ms +[2025-09-11 12:36:12] [Rank 0] step:8581/10000 train_time:362853ms step_avg:42.29ms +[2025-09-11 12:36:12] [Rank 0] step:8601/10000 train_time:363564ms step_avg:42.27ms +[2025-09-11 12:36:12] [Rank 0] step:8601/10000 train_time:363564ms step_avg:42.27ms +[2025-09-11 12:36:13] [Rank 0] step:8621/10000 train_time:364273ms step_avg:42.25ms +[2025-09-11 12:36:13] [Rank 0] step:8621/10000 train_time:364273ms step_avg:42.25ms +[2025-09-11 12:36:14] [Rank 0] step:8641/10000 train_time:364983ms step_avg:42.24ms +[2025-09-11 12:36:14] [Rank 0] step:8641/10000 train_time:364983ms step_avg:42.24ms +[2025-09-11 12:36:14] [Rank 0] step:8661/10000 train_time:365693ms step_avg:42.22ms +[2025-09-11 12:36:14] [Rank 0] step:8661/10000 train_time:365693ms step_avg:42.22ms +[2025-09-11 12:36:15] [Rank 0] step:8681/10000 train_time:366405ms step_avg:42.21ms +[2025-09-11 12:36:15] [Rank 0] step:8681/10000 train_time:366405ms step_avg:42.21ms +[2025-09-11 12:36:16] [Rank 0] step:8701/10000 train_time:367114ms step_avg:42.19ms +[2025-09-11 12:36:16] [Rank 0] step:8701/10000 train_time:367114ms step_avg:42.19ms +[2025-09-11 12:36:17] [Rank 0] step:8721/10000 train_time:367826ms step_avg:42.18ms +[2025-09-11 12:36:17] [Rank 0] step:8721/10000 train_time:367826ms step_avg:42.18ms +[2025-09-11 12:36:17] [Rank 0] step:8741/10000 train_time:368532ms step_avg:42.16ms +[2025-09-11 12:36:17] [Rank 0] step:8741/10000 train_time:368532ms step_avg:42.16ms +[2025-09-11 12:36:18] [Rank 0] step:8761/10000 train_time:369246ms step_avg:42.15ms +[2025-09-11 12:36:18] [Rank 0] step:8761/10000 train_time:369246ms step_avg:42.15ms +[2025-09-11 12:36:19] [Rank 0] step:8781/10000 train_time:369953ms step_avg:42.13ms +[2025-09-11 12:36:19] [Rank 0] step:8781/10000 train_time:369953ms step_avg:42.13ms +[2025-09-11 12:36:19] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:36:19] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:31] [Rank 0] PRINT: step:8800/10000 val_loss:5.0010 total_sharp:8.2538e-03 L1_sharp:1.2932e-02 L2_sharp:1.4080e-02 L3_sharp:1.8571e-02 L4_sharp:2.0786e-02 L5_sharp:3.4055e-02 L6_sharp:4.7496e-02 L7_sharp:6.3071e-02 L8_sharp:1.0527e-01 L9_sharp:1.9165e-01 L10_sharp:3.1877e-01 L11_sharp:4.0006e-01 L12_sharp:7.4069e-01 total_fnorm:8.7109e-01 total_l1_linf:4.9200e+02 total_spectral:4.3750e-01 L1_fnorm:2.7222e-02 L2_fnorm:2.7344e-02 L3_fnorm:2.7466e-02 L4_fnorm:2.7344e-02 L5_fnorm:2.7100e-02 L6_fnorm:2.7100e-02 L7_fnorm:2.6978e-02 L8_fnorm:2.6733e-02 L9_fnorm:2.6855e-02 L10_fnorm:2.6367e-02 L11_fnorm:2.6489e-02 L12_fnorm:2.5513e-02 L1_l1linf:4.7607e-03 L2_l1linf:4.7607e-03 L3_l1linf:4.8523e-03 L4_l1linf:4.6997e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.6692e-03 L7_l1linf:4.6692e-03 L8_l1linf:4.5471e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.6082e-03 L11_l1linf:4.7607e-03 L12_l1linf:4.5471e-03 L1_spectral:5.0117e-04 L2_spectral:5.0063e-04 L3_spectral:5.1037e-04 L4_spectral:5.0127e-04 L5_spectral:4.9224e-04 L6_spectral:4.9107e-04 L7_spectral:4.8419e-04 L8_spectral:4.7857e-04 L9_spectral:4.8453e-04 L10_spectral:4.6798e-04 L11_spectral:4.5694e-04 L12_spectral:4.1310e-04 train_time:370641ms step_avg:42.12ms +[2025-09-11 12:36:31] [Rank 0] PRINT: step:8800/10000 val_loss:5.0010 total_sharp:8.2538e-03 L1_sharp:1.2932e-02 L2_sharp:1.4080e-02 L3_sharp:1.8571e-02 L4_sharp:2.0786e-02 L5_sharp:3.4055e-02 L6_sharp:4.7496e-02 L7_sharp:6.3071e-02 L8_sharp:1.0527e-01 L9_sharp:1.9165e-01 L10_sharp:3.1877e-01 L11_sharp:4.0006e-01 L12_sharp:7.4069e-01 total_fnorm:8.7109e-01 total_l1_linf:4.9200e+02 total_spectral:4.3750e-01 L1_fnorm:2.7222e-02 L2_fnorm:2.7344e-02 L3_fnorm:2.7466e-02 L4_fnorm:2.7344e-02 L5_fnorm:2.7100e-02 L6_fnorm:2.7100e-02 L7_fnorm:2.6978e-02 L8_fnorm:2.6733e-02 L9_fnorm:2.6855e-02 L10_fnorm:2.6367e-02 L11_fnorm:2.6489e-02 L12_fnorm:2.5513e-02 L1_l1linf:4.7607e-03 L2_l1linf:4.7607e-03 L3_l1linf:4.8523e-03 L4_l1linf:4.6997e-03 L5_l1linf:4.7302e-03 L6_l1linf:4.6692e-03 L7_l1linf:4.6692e-03 L8_l1linf:4.5471e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.6082e-03 L11_l1linf:4.7607e-03 L12_l1linf:4.5471e-03 L1_spectral:5.0117e-04 L2_spectral:5.0063e-04 L3_spectral:5.1037e-04 L4_spectral:5.0127e-04 L5_spectral:4.9224e-04 L6_spectral:4.9107e-04 L7_spectral:4.8419e-04 L8_spectral:4.7857e-04 L9_spectral:4.8453e-04 L10_spectral:4.6798e-04 L11_spectral:4.5694e-04 L12_spectral:4.1310e-04 train_time:370641ms step_avg:42.12ms +[2025-09-11 12:36:33] [Rank 0] step:8801/10000 train_time:372611ms step_avg:42.34ms +[2025-09-11 12:36:33] [Rank 0] step:8801/10000 train_time:372611ms step_avg:42.34ms +[2025-09-11 12:36:34] [Rank 0] step:8821/10000 train_time:373364ms step_avg:42.33ms +[2025-09-11 12:36:34] [Rank 0] step:8821/10000 train_time:373364ms step_avg:42.33ms +[2025-09-11 12:36:34] [Rank 0] step:8841/10000 train_time:374076ms step_avg:42.31ms +[2025-09-11 12:36:34] [Rank 0] step:8841/10000 train_time:374076ms step_avg:42.31ms +[2025-09-11 12:36:35] [Rank 0] step:8861/10000 train_time:374787ms step_avg:42.30ms +[2025-09-11 12:36:35] [Rank 0] step:8861/10000 train_time:374787ms step_avg:42.30ms +[2025-09-11 12:36:36] [Rank 0] step:8881/10000 train_time:375500ms step_avg:42.28ms +[2025-09-11 12:36:36] [Rank 0] step:8881/10000 train_time:375500ms step_avg:42.28ms +[2025-09-11 12:36:36] [Rank 0] step:8901/10000 train_time:376213ms step_avg:42.27ms +[2025-09-11 12:36:36] [Rank 0] step:8901/10000 train_time:376213ms step_avg:42.27ms +[2025-09-11 12:36:37] [Rank 0] step:8921/10000 train_time:376921ms step_avg:42.25ms +[2025-09-11 12:36:37] [Rank 0] step:8921/10000 train_time:376921ms step_avg:42.25ms +[2025-09-11 12:36:38] [Rank 0] step:8941/10000 train_time:377636ms step_avg:42.24ms +[2025-09-11 12:36:38] [Rank 0] step:8941/10000 train_time:377636ms step_avg:42.24ms +[2025-09-11 12:36:39] [Rank 0] step:8961/10000 train_time:378356ms step_avg:42.22ms +[2025-09-11 12:36:39] [Rank 0] step:8961/10000 train_time:378356ms step_avg:42.22ms +[2025-09-11 12:36:39] [Rank 0] step:8981/10000 train_time:379071ms step_avg:42.21ms +[2025-09-11 12:36:39] [Rank 0] step:8981/10000 train_time:379071ms step_avg:42.21ms +[2025-09-11 12:36:40] [Rank 0] step:9001/10000 train_time:379778ms step_avg:42.19ms +[2025-09-11 12:36:40] [Rank 0] step:9001/10000 train_time:379778ms step_avg:42.19ms +[2025-09-11 12:36:41] [Rank 0] step:9021/10000 train_time:380489ms step_avg:42.18ms +[2025-09-11 12:36:41] [Rank 0] step:9021/10000 train_time:380489ms step_avg:42.18ms +[2025-09-11 12:36:41] [Rank 0] step:9041/10000 train_time:381203ms step_avg:42.16ms +[2025-09-11 12:36:41] [Rank 0] step:9041/10000 train_time:381203ms step_avg:42.16ms +[2025-09-11 12:36:42] [Rank 0] step:9061/10000 train_time:381913ms step_avg:42.15ms +[2025-09-11 12:36:42] [Rank 0] step:9061/10000 train_time:381913ms step_avg:42.15ms +[2025-09-11 12:36:43] [Rank 0] step:9081/10000 train_time:382627ms step_avg:42.13ms +[2025-09-11 12:36:43] [Rank 0] step:9081/10000 train_time:382627ms step_avg:42.13ms +[2025-09-11 12:36:43] [Rank 0] step:9101/10000 train_time:383342ms step_avg:42.12ms +[2025-09-11 12:36:43] [Rank 0] step:9101/10000 train_time:383342ms step_avg:42.12ms +[2025-09-11 12:36:44] [Rank 0] step:9121/10000 train_time:384058ms step_avg:42.11ms +[2025-09-11 12:36:44] [Rank 0] step:9121/10000 train_time:384058ms step_avg:42.11ms +[2025-09-11 12:36:45] [Rank 0] step:9141/10000 train_time:384767ms step_avg:42.09ms +[2025-09-11 12:36:45] [Rank 0] step:9141/10000 train_time:384767ms step_avg:42.09ms +[2025-09-11 12:36:46] [Rank 0] step:9161/10000 train_time:385482ms step_avg:42.08ms +[2025-09-11 12:36:46] [Rank 0] step:9161/10000 train_time:385482ms step_avg:42.08ms +[2025-09-11 12:36:46] [Rank 0] step:9181/10000 train_time:386196ms step_avg:42.06ms +[2025-09-11 12:36:46] [Rank 0] step:9181/10000 train_time:386196ms step_avg:42.06ms +[2025-09-11 12:36:47] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:36:47] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:36:58] [Rank 0] PRINT: step:9200/10000 val_loss:4.9942 total_sharp:1.1245e-02 L1_sharp:1.1578e-02 L2_sharp:1.1314e-02 L3_sharp:1.5886e-02 L4_sharp:2.3211e-02 L5_sharp:3.0136e-02 L6_sharp:5.1598e-02 L7_sharp:7.3841e-02 L8_sharp:1.4121e-01 L9_sharp:2.0820e-01 L10_sharp:3.0503e-01 L11_sharp:4.1333e-01 L12_sharp:1.1497e+00 total_fnorm:5.3906e-01 total_l1_linf:2.6800e+02 total_spectral:2.7148e-01 L1_fnorm:1.8066e-02 L2_fnorm:1.8066e-02 L3_fnorm:1.8188e-02 L4_fnorm:1.8066e-02 L5_fnorm:1.7944e-02 L6_fnorm:1.7822e-02 L7_fnorm:1.7700e-02 L8_fnorm:1.7700e-02 L9_fnorm:1.7822e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7578e-02 L12_fnorm:1.6602e-02 L1_l1linf:2.7466e-03 L2_l1linf:2.7313e-03 L3_l1linf:2.8534e-03 L4_l1linf:2.7771e-03 L5_l1linf:2.8229e-03 L6_l1linf:2.7313e-03 L7_l1linf:2.7008e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.7466e-03 L10_l1linf:2.7924e-03 L11_l1linf:2.8687e-03 L12_l1linf:2.4719e-03 L1_spectral:3.4252e-04 L2_spectral:3.4471e-04 L3_spectral:3.4507e-04 L4_spectral:3.3924e-04 L5_spectral:3.3589e-04 L6_spectral:3.3156e-04 L7_spectral:3.2862e-04 L8_spectral:3.2790e-04 L9_spectral:3.2970e-04 L10_spectral:3.1865e-04 L11_spectral:3.0473e-04 L12_spectral:2.8240e-04 train_time:386892ms step_avg:42.05ms +[2025-09-11 12:36:58] [Rank 0] PRINT: step:9200/10000 val_loss:4.9942 total_sharp:1.1245e-02 L1_sharp:1.1578e-02 L2_sharp:1.1314e-02 L3_sharp:1.5886e-02 L4_sharp:2.3211e-02 L5_sharp:3.0136e-02 L6_sharp:5.1598e-02 L7_sharp:7.3841e-02 L8_sharp:1.4121e-01 L9_sharp:2.0820e-01 L10_sharp:3.0503e-01 L11_sharp:4.1333e-01 L12_sharp:1.1497e+00 total_fnorm:5.3906e-01 total_l1_linf:2.6800e+02 total_spectral:2.7148e-01 L1_fnorm:1.8066e-02 L2_fnorm:1.8066e-02 L3_fnorm:1.8188e-02 L4_fnorm:1.8066e-02 L5_fnorm:1.7944e-02 L6_fnorm:1.7822e-02 L7_fnorm:1.7700e-02 L8_fnorm:1.7700e-02 L9_fnorm:1.7822e-02 L10_fnorm:1.7578e-02 L11_fnorm:1.7578e-02 L12_fnorm:1.6602e-02 L1_l1linf:2.7466e-03 L2_l1linf:2.7313e-03 L3_l1linf:2.8534e-03 L4_l1linf:2.7771e-03 L5_l1linf:2.8229e-03 L6_l1linf:2.7313e-03 L7_l1linf:2.7008e-03 L8_l1linf:2.7161e-03 L9_l1linf:2.7466e-03 L10_l1linf:2.7924e-03 L11_l1linf:2.8687e-03 L12_l1linf:2.4719e-03 L1_spectral:3.4252e-04 L2_spectral:3.4471e-04 L3_spectral:3.4507e-04 L4_spectral:3.3924e-04 L5_spectral:3.3589e-04 L6_spectral:3.3156e-04 L7_spectral:3.2862e-04 L8_spectral:3.2790e-04 L9_spectral:3.2970e-04 L10_spectral:3.1865e-04 L11_spectral:3.0473e-04 L12_spectral:2.8240e-04 train_time:386892ms step_avg:42.05ms +[2025-09-11 12:37:01] [Rank 0] step:9201/10000 train_time:388902ms step_avg:42.27ms +[2025-09-11 12:37:01] [Rank 0] step:9201/10000 train_time:388902ms step_avg:42.27ms +[2025-09-11 12:37:01] [Rank 0] step:9221/10000 train_time:389651ms step_avg:42.26ms +[2025-09-11 12:37:01] [Rank 0] step:9221/10000 train_time:389651ms step_avg:42.26ms +[2025-09-11 12:37:02] [Rank 0] step:9241/10000 train_time:390362ms step_avg:42.24ms +[2025-09-11 12:37:02] [Rank 0] step:9241/10000 train_time:390362ms step_avg:42.24ms +[2025-09-11 12:37:03] [Rank 0] step:9261/10000 train_time:391076ms step_avg:42.23ms +[2025-09-11 12:37:03] [Rank 0] step:9261/10000 train_time:391076ms step_avg:42.23ms +[2025-09-11 12:37:03] [Rank 0] step:9281/10000 train_time:391790ms step_avg:42.21ms +[2025-09-11 12:37:03] [Rank 0] step:9281/10000 train_time:391790ms step_avg:42.21ms +[2025-09-11 12:37:04] [Rank 0] step:9301/10000 train_time:392501ms step_avg:42.20ms +[2025-09-11 12:37:04] [Rank 0] step:9301/10000 train_time:392501ms step_avg:42.20ms +[2025-09-11 12:37:05] [Rank 0] step:9321/10000 train_time:393214ms step_avg:42.19ms +[2025-09-11 12:37:05] [Rank 0] step:9321/10000 train_time:393214ms step_avg:42.19ms +[2025-09-11 12:37:06] [Rank 0] step:9341/10000 train_time:393924ms step_avg:42.17ms +[2025-09-11 12:37:06] [Rank 0] step:9341/10000 train_time:393924ms step_avg:42.17ms +[2025-09-11 12:37:06] [Rank 0] step:9361/10000 train_time:394631ms step_avg:42.16ms +[2025-09-11 12:37:06] [Rank 0] step:9361/10000 train_time:394631ms step_avg:42.16ms +[2025-09-11 12:37:07] [Rank 0] step:9381/10000 train_time:395342ms step_avg:42.14ms +[2025-09-11 12:37:07] [Rank 0] step:9381/10000 train_time:395342ms step_avg:42.14ms +[2025-09-11 12:37:08] [Rank 0] step:9401/10000 train_time:396056ms step_avg:42.13ms +[2025-09-11 12:37:08] [Rank 0] step:9401/10000 train_time:396056ms step_avg:42.13ms +[2025-09-11 12:37:08] [Rank 0] step:9421/10000 train_time:396770ms step_avg:42.12ms +[2025-09-11 12:37:08] [Rank 0] step:9421/10000 train_time:396770ms step_avg:42.12ms +[2025-09-11 12:37:09] [Rank 0] step:9441/10000 train_time:397485ms step_avg:42.10ms +[2025-09-11 12:37:09] [Rank 0] step:9441/10000 train_time:397485ms step_avg:42.10ms +[2025-09-11 12:37:10] [Rank 0] step:9461/10000 train_time:398198ms step_avg:42.09ms +[2025-09-11 12:37:10] [Rank 0] step:9461/10000 train_time:398198ms step_avg:42.09ms +[2025-09-11 12:37:11] [Rank 0] step:9481/10000 train_time:398911ms step_avg:42.07ms +[2025-09-11 12:37:11] [Rank 0] step:9481/10000 train_time:398911ms step_avg:42.07ms +[2025-09-11 12:37:11] [Rank 0] step:9501/10000 train_time:399624ms step_avg:42.06ms +[2025-09-11 12:37:11] [Rank 0] step:9501/10000 train_time:399624ms step_avg:42.06ms +[2025-09-11 12:37:12] [Rank 0] step:9521/10000 train_time:400339ms step_avg:42.05ms +[2025-09-11 12:37:12] [Rank 0] step:9521/10000 train_time:400339ms step_avg:42.05ms +[2025-09-11 12:37:13] [Rank 0] step:9541/10000 train_time:401049ms step_avg:42.03ms +[2025-09-11 12:37:13] [Rank 0] step:9541/10000 train_time:401049ms step_avg:42.03ms +[2025-09-11 12:37:13] [Rank 0] step:9561/10000 train_time:401761ms step_avg:42.02ms +[2025-09-11 12:37:13] [Rank 0] step:9561/10000 train_time:401761ms step_avg:42.02ms +[2025-09-11 12:37:14] [Rank 0] step:9581/10000 train_time:402475ms step_avg:42.01ms +[2025-09-11 12:37:14] [Rank 0] step:9581/10000 train_time:402475ms step_avg:42.01ms +[2025-09-11 12:37:15] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:37:15] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:37:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:37:26] [Rank 0] PRINT: step:9600/10000 val_loss:4.9892 total_sharp:4.5045e-03 L1_sharp:1.0807e-02 L2_sharp:1.2584e-02 L3_sharp:1.3281e-02 L4_sharp:1.3848e-02 L5_sharp:2.0680e-02 L6_sharp:3.0186e-02 L7_sharp:4.4544e-02 L8_sharp:6.6404e-02 L9_sharp:9.3017e-02 L10_sharp:1.8364e-01 L11_sharp:2.7942e-01 L12_sharp:5.5043e-01 total_fnorm:3.1836e-01 total_l1_linf:1.3000e+02 total_spectral:1.6016e-01 L1_fnorm:1.0132e-02 L2_fnorm:1.0132e-02 L3_fnorm:1.0132e-02 L4_fnorm:1.0132e-02 L5_fnorm:1.0010e-02 L6_fnorm:1.0010e-02 L7_fnorm:9.8877e-03 L8_fnorm:9.8877e-03 L9_fnorm:9.9487e-03 L10_fnorm:9.8877e-03 L11_fnorm:9.8267e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.2970e-03 L2_l1linf:1.2970e-03 L3_l1linf:1.2894e-03 L4_l1linf:1.3199e-03 L5_l1linf:1.2970e-03 L6_l1linf:1.3046e-03 L7_l1linf:1.2970e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3123e-03 L10_l1linf:1.3657e-03 L11_l1linf:1.3809e-03 L12_l1linf:1.1902e-03 L1_spectral:1.9596e-04 L2_spectral:1.9704e-04 L3_spectral:1.9621e-04 L4_spectral:1.9398e-04 L5_spectral:1.9372e-04 L6_spectral:1.8981e-04 L7_spectral:1.9090e-04 L8_spectral:1.8691e-04 L9_spectral:1.8739e-04 L10_spectral:1.8070e-04 L11_spectral:1.7302e-04 L12_spectral:1.6181e-04 train_time:403164ms step_avg:42.00ms +[2025-09-11 12:37:26] [Rank 0] PRINT: step:9600/10000 val_loss:4.9892 total_sharp:4.5045e-03 L1_sharp:1.0807e-02 L2_sharp:1.2584e-02 L3_sharp:1.3281e-02 L4_sharp:1.3848e-02 L5_sharp:2.0680e-02 L6_sharp:3.0186e-02 L7_sharp:4.4544e-02 L8_sharp:6.6404e-02 L9_sharp:9.3017e-02 L10_sharp:1.8364e-01 L11_sharp:2.7942e-01 L12_sharp:5.5043e-01 total_fnorm:3.1836e-01 total_l1_linf:1.3000e+02 total_spectral:1.6016e-01 L1_fnorm:1.0132e-02 L2_fnorm:1.0132e-02 L3_fnorm:1.0132e-02 L4_fnorm:1.0132e-02 L5_fnorm:1.0010e-02 L6_fnorm:1.0010e-02 L7_fnorm:9.8877e-03 L8_fnorm:9.8877e-03 L9_fnorm:9.9487e-03 L10_fnorm:9.8877e-03 L11_fnorm:9.8267e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.2970e-03 L2_l1linf:1.2970e-03 L3_l1linf:1.2894e-03 L4_l1linf:1.3199e-03 L5_l1linf:1.2970e-03 L6_l1linf:1.3046e-03 L7_l1linf:1.2970e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3123e-03 L10_l1linf:1.3657e-03 L11_l1linf:1.3809e-03 L12_l1linf:1.1902e-03 L1_spectral:1.9596e-04 L2_spectral:1.9704e-04 L3_spectral:1.9621e-04 L4_spectral:1.9398e-04 L5_spectral:1.9372e-04 L6_spectral:1.8981e-04 L7_spectral:1.9090e-04 L8_spectral:1.8691e-04 L9_spectral:1.8739e-04 L10_spectral:1.8070e-04 L11_spectral:1.7302e-04 L12_spectral:1.6181e-04 train_time:403164ms step_avg:42.00ms +[2025-09-11 12:37:28] [Rank 0] step:9601/10000 train_time:405150ms step_avg:42.20ms +[2025-09-11 12:37:28] [Rank 0] step:9601/10000 train_time:405150ms step_avg:42.20ms +[2025-09-11 12:37:29] [Rank 0] step:9621/10000 train_time:406123ms step_avg:42.21ms +[2025-09-11 12:37:29] [Rank 0] step:9621/10000 train_time:406123ms step_avg:42.21ms +[2025-09-11 12:37:30] [Rank 0] step:9641/10000 train_time:406840ms step_avg:42.20ms +[2025-09-11 12:37:30] [Rank 0] step:9641/10000 train_time:406840ms step_avg:42.20ms +[2025-09-11 12:37:31] [Rank 0] step:9661/10000 train_time:407564ms step_avg:42.19ms +[2025-09-11 12:37:31] [Rank 0] step:9661/10000 train_time:407564ms step_avg:42.19ms +[2025-09-11 12:37:31] [Rank 0] step:9681/10000 train_time:408282ms step_avg:42.17ms +[2025-09-11 12:37:31] [Rank 0] step:9681/10000 train_time:408282ms step_avg:42.17ms +[2025-09-11 12:37:32] [Rank 0] step:9701/10000 train_time:408999ms step_avg:42.16ms +[2025-09-11 12:37:32] [Rank 0] step:9701/10000 train_time:408999ms step_avg:42.16ms +[2025-09-11 12:37:33] [Rank 0] step:9721/10000 train_time:409722ms step_avg:42.15ms +[2025-09-11 12:37:33] [Rank 0] step:9721/10000 train_time:409722ms step_avg:42.15ms +[2025-09-11 12:37:33] [Rank 0] step:9741/10000 train_time:410442ms step_avg:42.14ms +[2025-09-11 12:37:33] [Rank 0] step:9741/10000 train_time:410442ms step_avg:42.14ms +[2025-09-11 12:37:34] [Rank 0] step:9761/10000 train_time:411163ms step_avg:42.12ms +[2025-09-11 12:37:34] [Rank 0] step:9761/10000 train_time:411163ms step_avg:42.12ms +[2025-09-11 12:37:35] [Rank 0] step:9781/10000 train_time:411880ms step_avg:42.11ms +[2025-09-11 12:37:35] [Rank 0] step:9781/10000 train_time:411880ms step_avg:42.11ms +[2025-09-11 12:37:36] [Rank 0] step:9801/10000 train_time:412603ms step_avg:42.10ms +[2025-09-11 12:37:36] [Rank 0] step:9801/10000 train_time:412603ms step_avg:42.10ms +[2025-09-11 12:37:36] [Rank 0] step:9821/10000 train_time:413323ms step_avg:42.09ms +[2025-09-11 12:37:36] [Rank 0] step:9821/10000 train_time:413323ms step_avg:42.09ms +[2025-09-11 12:37:37] [Rank 0] step:9841/10000 train_time:414046ms step_avg:42.07ms +[2025-09-11 12:37:37] [Rank 0] step:9841/10000 train_time:414046ms step_avg:42.07ms +[2025-09-11 12:37:38] [Rank 0] step:9861/10000 train_time:414764ms step_avg:42.06ms +[2025-09-11 12:37:38] [Rank 0] step:9861/10000 train_time:414764ms step_avg:42.06ms +[2025-09-11 12:37:38] [Rank 0] step:9881/10000 train_time:415483ms step_avg:42.05ms +[2025-09-11 12:37:38] [Rank 0] step:9881/10000 train_time:415483ms step_avg:42.05ms +[2025-09-11 12:37:39] [Rank 0] step:9901/10000 train_time:416199ms step_avg:42.04ms +[2025-09-11 12:37:39] [Rank 0] step:9901/10000 train_time:416199ms step_avg:42.04ms +[2025-09-11 12:37:40] [Rank 0] step:9921/10000 train_time:416917ms step_avg:42.02ms +[2025-09-11 12:37:40] [Rank 0] step:9921/10000 train_time:416917ms step_avg:42.02ms +[2025-09-11 12:37:41] [Rank 0] step:9941/10000 train_time:417640ms step_avg:42.01ms +[2025-09-11 12:37:41] [Rank 0] step:9941/10000 train_time:417640ms step_avg:42.01ms +[2025-09-11 12:37:41] [Rank 0] step:9961/10000 train_time:418365ms step_avg:42.00ms +[2025-09-11 12:37:41] [Rank 0] step:9961/10000 train_time:418365ms step_avg:42.00ms +[2025-09-11 12:37:42] [Rank 0] step:9981/10000 train_time:419086ms step_avg:41.99ms +[2025-09-11 12:37:42] [Rank 0] step:9981/10000 train_time:419086ms step_avg:41.99ms +[2025-09-11 12:37:43] [Rank 0] step:10000/10000 train_time:419779ms step_avg:41.98ms +[2025-09-11 12:37:43] [Rank 0] step:10000/10000 train_time:419779ms step_avg:41.98ms +[2025-09-11 12:37:43] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:37:43] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:37:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:37:54] [Rank 0] PRINT: step:10000/10000 val_loss:4.9876 total_sharp:3.8180e-03 L1_sharp:7.4376e-03 L2_sharp:7.7368e-03 L3_sharp:1.0733e-02 L4_sharp:1.0141e-02 L5_sharp:1.4852e-02 L6_sharp:2.6862e-02 L7_sharp:5.0489e-02 L8_sharp:6.5232e-02 L9_sharp:9.4742e-02 L10_sharp:1.6136e-01 L11_sharp:2.0970e-01 L12_sharp:5.4369e-01 total_fnorm:1.2061e-01 total_l1_linf:3.6000e+01 total_spectral:6.0547e-02 L1_fnorm:3.9062e-03 L2_fnorm:3.9673e-03 L3_fnorm:3.9673e-03 L4_fnorm:3.9368e-03 L5_fnorm:3.9062e-03 L6_fnorm:3.9062e-03 L7_fnorm:3.9062e-03 L8_fnorm:3.8605e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8452e-03 L12_fnorm:3.6316e-03 L1_l1linf:4.2534e-04 L2_l1linf:3.8528e-04 L3_l1linf:4.1008e-04 L4_l1linf:4.1580e-04 L5_l1linf:4.1199e-04 L6_l1linf:4.3106e-04 L7_l1linf:3.9482e-04 L8_l1linf:4.1389e-04 L9_l1linf:4.0245e-04 L10_l1linf:4.2725e-04 L11_l1linf:4.0627e-04 L12_l1linf:3.5858e-04 L1_spectral:7.8337e-05 L2_spectral:7.9618e-05 L3_spectral:7.9538e-05 L4_spectral:7.8388e-05 L5_spectral:7.8688e-05 L6_spectral:7.5498e-05 L7_spectral:7.5602e-05 L8_spectral:7.4149e-05 L9_spectral:7.5084e-05 L10_spectral:7.2692e-05 L11_spectral:7.0312e-05 L12_spectral:6.5297e-05 train_time:419800ms step_avg:41.98ms +[2025-09-11 12:37:54] [Rank 0] PRINT: step:10000/10000 val_loss:4.9876 total_sharp:3.8180e-03 L1_sharp:7.4376e-03 L2_sharp:7.7368e-03 L3_sharp:1.0733e-02 L4_sharp:1.0141e-02 L5_sharp:1.4852e-02 L6_sharp:2.6862e-02 L7_sharp:5.0489e-02 L8_sharp:6.5232e-02 L9_sharp:9.4742e-02 L10_sharp:1.6136e-01 L11_sharp:2.0970e-01 L12_sharp:5.4369e-01 total_fnorm:1.2061e-01 total_l1_linf:3.6000e+01 total_spectral:6.0547e-02 L1_fnorm:3.9062e-03 L2_fnorm:3.9673e-03 L3_fnorm:3.9673e-03 L4_fnorm:3.9368e-03 L5_fnorm:3.9062e-03 L6_fnorm:3.9062e-03 L7_fnorm:3.9062e-03 L8_fnorm:3.8605e-03 L9_fnorm:3.9062e-03 L10_fnorm:3.8452e-03 L11_fnorm:3.8452e-03 L12_fnorm:3.6316e-03 L1_l1linf:4.2534e-04 L2_l1linf:3.8528e-04 L3_l1linf:4.1008e-04 L4_l1linf:4.1580e-04 L5_l1linf:4.1199e-04 L6_l1linf:4.3106e-04 L7_l1linf:3.9482e-04 L8_l1linf:4.1389e-04 L9_l1linf:4.0245e-04 L10_l1linf:4.2725e-04 L11_l1linf:4.0627e-04 L12_l1linf:3.5858e-04 L1_spectral:7.8337e-05 L2_spectral:7.9618e-05 L3_spectral:7.9538e-05 L4_spectral:7.8388e-05 L5_spectral:7.8688e-05 L6_spectral:7.5498e-05 L7_spectral:7.5602e-05 L8_spectral:7.4149e-05 L9_spectral:7.5084e-05 L10_spectral:7.2692e-05 L11_spectral:7.0312e-05 L12_spectral:6.5297e-05 train_time:419800ms step_avg:41.98ms +[2025-09-11 12:37:54] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:37:54 2025 --- +[2025-09-11 12:37:54] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:37:54 2025 --- +[2025-09-11 12:37:54] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 12:37:54] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7c47569de20358d9d56938e9eb2b4b44b8118a59 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "02398284-62a6-4a94-b351-aeddcff7f8c3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/training_log_02398284-62a6-4a94-b351-aeddcff7f8c3.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/training_log_02398284-62a6-4a94-b351-aeddcff7f8c3.txt new file mode 100644 index 0000000000000000000000000000000000000000..07e80de321e1fa7bfa6c5c488391a2da9995bf99 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42/training_log_02398284-62a6-4a94-b351-aeddcff7f8c3.txt @@ -0,0 +1,4264 @@ +[2025-09-11 12:10:19] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:10:19 2025 --- +[2025-09-11 12:10:19] [Rank 0] PRINT: --- Script Start: Thu Sep 11 12:10:19 2025 --- +[2025-09-11 12:10:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:10:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 12:10:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:10:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 12:10:19] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:10:19] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 12:10:20] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42 +[2025-09-11 12:10:20] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.002_seed_42 +[2025-09-11 12:10:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:10:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 12:10:20] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:10:20] [Rank 0] PRINT: Constructing model... +[2025-09-11 12:10:20] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:10:20] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 12:10:20] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:10:20] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 12:10:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:10:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 12:10:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:10:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 12:10:21] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:10:21] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 12:10:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:10:23] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 12:10:23] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:10:23] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 12:10:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:10:23] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 12:10:29] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:10:29] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 12:10:29] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:10:29] [Rank 0] PRINT: Starting warmup... +[2025-09-11 12:11:06] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:11:06] [Rank 0] PRINT: Warmup complete. +[2025-09-11 12:11:06] [Rank 0] PRINT: Starting training... +[2025-09-11 12:11:06] [Rank 0] PRINT: Starting training... +[2025-09-11 12:11:07] [Rank 0] step:21/10000 train_time:1280ms step_avg:60.95ms +[2025-09-11 12:11:07] [Rank 0] step:21/10000 train_time:1280ms step_avg:60.95ms +[2025-09-11 12:11:08] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.94ms +[2025-09-11 12:11:08] [Rank 0] step:41/10000 train_time:2130ms step_avg:51.94ms +[2025-09-11 12:11:09] [Rank 0] step:61/10000 train_time:2860ms step_avg:46.89ms +[2025-09-11 12:11:09] [Rank 0] step:61/10000 train_time:2860ms step_avg:46.89ms +[2025-09-11 12:11:09] [Rank 0] step:81/10000 train_time:3593ms step_avg:44.35ms +[2025-09-11 12:11:09] [Rank 0] step:81/10000 train_time:3593ms step_avg:44.35ms +[2025-09-11 12:11:10] [Rank 0] step:101/10000 train_time:4324ms step_avg:42.81ms +[2025-09-11 12:11:10] [Rank 0] step:101/10000 train_time:4324ms step_avg:42.81ms +[2025-09-11 12:11:11] [Rank 0] step:121/10000 train_time:5054ms step_avg:41.76ms +[2025-09-11 12:11:11] [Rank 0] step:121/10000 train_time:5054ms step_avg:41.76ms +[2025-09-11 12:11:12] [Rank 0] step:141/10000 train_time:5784ms step_avg:41.02ms +[2025-09-11 12:11:12] [Rank 0] step:141/10000 train_time:5784ms step_avg:41.02ms +[2025-09-11 12:11:12] [Rank 0] step:161/10000 train_time:6515ms step_avg:40.47ms +[2025-09-11 12:11:12] [Rank 0] step:161/10000 train_time:6515ms step_avg:40.47ms +[2025-09-11 12:11:13] [Rank 0] step:181/10000 train_time:7245ms step_avg:40.03ms +[2025-09-11 12:11:13] [Rank 0] step:181/10000 train_time:7245ms step_avg:40.03ms +[2025-09-11 12:11:14] [Rank 0] step:201/10000 train_time:7977ms step_avg:39.69ms +[2025-09-11 12:11:14] [Rank 0] step:201/10000 train_time:7977ms step_avg:39.69ms +[2025-09-11 12:11:15] [Rank 0] step:221/10000 train_time:8708ms step_avg:39.40ms +[2025-09-11 12:11:15] [Rank 0] step:221/10000 train_time:8708ms step_avg:39.40ms +[2025-09-11 12:11:15] [Rank 0] step:241/10000 train_time:9438ms step_avg:39.16ms +[2025-09-11 12:11:15] [Rank 0] step:241/10000 train_time:9438ms step_avg:39.16ms +[2025-09-11 12:11:16] [Rank 0] step:261/10000 train_time:10169ms step_avg:38.96ms +[2025-09-11 12:11:16] [Rank 0] step:261/10000 train_time:10169ms step_avg:38.96ms +[2025-09-11 12:11:17] [Rank 0] step:281/10000 train_time:10900ms step_avg:38.79ms +[2025-09-11 12:11:17] [Rank 0] step:281/10000 train_time:10900ms step_avg:38.79ms +[2025-09-11 12:11:17] [Rank 0] step:301/10000 train_time:11630ms step_avg:38.64ms +[2025-09-11 12:11:17] [Rank 0] step:301/10000 train_time:11630ms step_avg:38.64ms +[2025-09-11 12:11:18] [Rank 0] step:321/10000 train_time:12360ms step_avg:38.51ms +[2025-09-11 12:11:18] [Rank 0] step:321/10000 train_time:12360ms step_avg:38.51ms +[2025-09-11 12:11:19] [Rank 0] step:341/10000 train_time:13090ms step_avg:38.39ms +[2025-09-11 12:11:19] [Rank 0] step:341/10000 train_time:13090ms step_avg:38.39ms +[2025-09-11 12:11:20] [Rank 0] step:361/10000 train_time:13821ms step_avg:38.28ms +[2025-09-11 12:11:20] [Rank 0] step:361/10000 train_time:13821ms step_avg:38.28ms +[2025-09-11 12:11:20] [Rank 0] step:381/10000 train_time:14551ms step_avg:38.19ms +[2025-09-11 12:11:20] [Rank 0] step:381/10000 train_time:14551ms step_avg:38.19ms +[2025-09-11 12:11:21] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:11:21] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 12:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 12:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 12:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 12:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 12:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 12:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 12:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 12:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 12:12:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:12:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:12:13] [Rank 0] PRINT: step:400/10000 val_loss:6.6852 total_sharp:1.1285e-02 L1_sharp:8.3390e-02 L2_sharp:8.2291e-02 L3_sharp:7.3214e-02 L4_sharp:8.4847e-02 L5_sharp:9.5139e-02 L6_sharp:1.2219e-01 L7_sharp:1.1496e-01 L8_sharp:1.6015e-01 L9_sharp:2.3414e-01 L10_sharp:2.6200e-01 L11_sharp:4.0517e-01 L12_sharp:5.2119e-01 total_fnorm:9.3317e+00 total_l1_linf:2.5114e+04 total_spectral:4.6656e+00 L1_fnorm:2.4342e-01 L2_fnorm:2.4335e-01 L3_fnorm:2.4301e-01 L4_fnorm:2.4217e-01 L5_fnorm:2.4171e-01 L6_fnorm:2.3788e-01 L7_fnorm:2.3936e-01 L8_fnorm:2.3729e-01 L9_fnorm:2.3165e-01 L10_fnorm:2.1863e-01 L11_fnorm:1.9799e-01 L12_fnorm:1.8274e-01 L1_l1linf:8.5872e-02 L2_l1linf:8.4819e-02 L3_l1linf:8.4737e-02 L4_l1linf:8.3911e-02 L5_l1linf:8.4354e-02 L6_l1linf:8.4519e-02 L7_l1linf:8.5419e-02 L8_l1linf:8.5094e-02 L9_l1linf:8.2763e-02 L10_l1linf:7.8281e-02 L11_l1linf:6.9443e-02 L12_l1linf:6.0934e-02 L1_spectral:2.4122e-03 L2_spectral:2.4113e-03 L3_spectral:2.4120e-03 L4_spectral:2.4122e-03 L5_spectral:2.4112e-03 L6_spectral:2.4089e-03 L7_spectral:2.4125e-03 L8_spectral:2.4091e-03 L9_spectral:2.4102e-03 L10_spectral:2.4082e-03 L11_spectral:2.4083e-03 L12_spectral:2.4080e-03 train_time:15261ms step_avg:38.15ms +[2025-09-11 12:12:13] [Rank 0] PRINT: step:400/10000 val_loss:6.6852 total_sharp:1.1285e-02 L1_sharp:8.3390e-02 L2_sharp:8.2291e-02 L3_sharp:7.3214e-02 L4_sharp:8.4847e-02 L5_sharp:9.5139e-02 L6_sharp:1.2219e-01 L7_sharp:1.1496e-01 L8_sharp:1.6015e-01 L9_sharp:2.3414e-01 L10_sharp:2.6200e-01 L11_sharp:4.0517e-01 L12_sharp:5.2119e-01 total_fnorm:9.3317e+00 total_l1_linf:2.5114e+04 total_spectral:4.6656e+00 L1_fnorm:2.4342e-01 L2_fnorm:2.4335e-01 L3_fnorm:2.4301e-01 L4_fnorm:2.4217e-01 L5_fnorm:2.4171e-01 L6_fnorm:2.3788e-01 L7_fnorm:2.3936e-01 L8_fnorm:2.3729e-01 L9_fnorm:2.3165e-01 L10_fnorm:2.1863e-01 L11_fnorm:1.9799e-01 L12_fnorm:1.8274e-01 L1_l1linf:8.5872e-02 L2_l1linf:8.4819e-02 L3_l1linf:8.4737e-02 L4_l1linf:8.3911e-02 L5_l1linf:8.4354e-02 L6_l1linf:8.4519e-02 L7_l1linf:8.5419e-02 L8_l1linf:8.5094e-02 L9_l1linf:8.2763e-02 L10_l1linf:7.8281e-02 L11_l1linf:6.9443e-02 L12_l1linf:6.0934e-02 L1_spectral:2.4122e-03 L2_spectral:2.4113e-03 L3_spectral:2.4120e-03 L4_spectral:2.4122e-03 L5_spectral:2.4112e-03 L6_spectral:2.4089e-03 L7_spectral:2.4125e-03 L8_spectral:2.4091e-03 L9_spectral:2.4102e-03 L10_spectral:2.4082e-03 L11_spectral:2.4083e-03 L12_spectral:2.4080e-03 train_time:15261ms step_avg:38.15ms +[2025-09-11 12:12:44] [Rank 0] step:401/10000 train_time:46195ms step_avg:115.20ms +[2025-09-11 12:12:44] [Rank 0] step:401/10000 train_time:46195ms step_avg:115.20ms +[2025-09-11 12:12:46] [Rank 0] step:421/10000 train_time:48393ms step_avg:114.95ms +[2025-09-11 12:12:46] [Rank 0] step:421/10000 train_time:48393ms step_avg:114.95ms +[2025-09-11 12:12:47] [Rank 0] step:441/10000 train_time:49035ms step_avg:111.19ms +[2025-09-11 12:12:47] [Rank 0] step:441/10000 train_time:49035ms step_avg:111.19ms +[2025-09-11 12:12:47] [Rank 0] step:461/10000 train_time:49677ms step_avg:107.76ms +[2025-09-11 12:12:47] [Rank 0] step:461/10000 train_time:49677ms step_avg:107.76ms +[2025-09-11 12:12:48] [Rank 0] step:481/10000 train_time:50319ms step_avg:104.61ms +[2025-09-11 12:12:48] [Rank 0] step:481/10000 train_time:50319ms step_avg:104.61ms +[2025-09-11 12:12:49] [Rank 0] step:501/10000 train_time:50960ms step_avg:101.72ms +[2025-09-11 12:12:49] [Rank 0] step:501/10000 train_time:50960ms step_avg:101.72ms +[2025-09-11 12:12:49] [Rank 0] step:521/10000 train_time:51601ms step_avg:99.04ms +[2025-09-11 12:12:49] [Rank 0] step:521/10000 train_time:51601ms step_avg:99.04ms +[2025-09-11 12:12:50] [Rank 0] step:541/10000 train_time:52242ms step_avg:96.57ms +[2025-09-11 12:12:50] [Rank 0] step:541/10000 train_time:52242ms step_avg:96.57ms +[2025-09-11 12:12:50] [Rank 0] step:561/10000 train_time:52884ms step_avg:94.27ms +[2025-09-11 12:12:50] [Rank 0] step:561/10000 train_time:52884ms step_avg:94.27ms +[2025-09-11 12:12:51] [Rank 0] step:581/10000 train_time:53524ms step_avg:92.12ms +[2025-09-11 12:12:51] [Rank 0] step:581/10000 train_time:53524ms step_avg:92.12ms +[2025-09-11 12:12:52] [Rank 0] step:601/10000 train_time:54167ms step_avg:90.13ms +[2025-09-11 12:12:52] [Rank 0] step:601/10000 train_time:54167ms step_avg:90.13ms +[2025-09-11 12:12:52] [Rank 0] step:621/10000 train_time:54807ms step_avg:88.26ms +[2025-09-11 12:12:52] [Rank 0] step:621/10000 train_time:54807ms step_avg:88.26ms +[2025-09-11 12:12:53] [Rank 0] step:641/10000 train_time:55448ms step_avg:86.50ms +[2025-09-11 12:12:53] [Rank 0] step:641/10000 train_time:55448ms step_avg:86.50ms +[2025-09-11 12:12:54] [Rank 0] step:661/10000 train_time:56089ms step_avg:84.85ms +[2025-09-11 12:12:54] [Rank 0] step:661/10000 train_time:56089ms step_avg:84.85ms +[2025-09-11 12:12:54] [Rank 0] step:681/10000 train_time:56730ms step_avg:83.30ms +[2025-09-11 12:12:54] [Rank 0] step:681/10000 train_time:56730ms step_avg:83.30ms +[2025-09-11 12:12:55] [Rank 0] step:701/10000 train_time:57370ms step_avg:81.84ms +[2025-09-11 12:12:55] [Rank 0] step:701/10000 train_time:57370ms step_avg:81.84ms +[2025-09-11 12:12:56] [Rank 0] step:721/10000 train_time:58011ms step_avg:80.46ms +[2025-09-11 12:12:56] [Rank 0] step:721/10000 train_time:58011ms step_avg:80.46ms +[2025-09-11 12:12:56] [Rank 0] step:741/10000 train_time:58652ms step_avg:79.15ms +[2025-09-11 12:12:56] [Rank 0] step:741/10000 train_time:58652ms step_avg:79.15ms +[2025-09-11 12:12:57] [Rank 0] step:761/10000 train_time:59298ms step_avg:77.92ms +[2025-09-11 12:12:57] [Rank 0] step:761/10000 train_time:59298ms step_avg:77.92ms +[2025-09-11 12:12:58] [Rank 0] step:781/10000 train_time:59944ms step_avg:76.75ms +[2025-09-11 12:12:58] [Rank 0] step:781/10000 train_time:59944ms step_avg:76.75ms +[2025-09-11 12:12:58] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:12:58] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 12:13:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:13:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 12:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 12:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 12:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:13:42] [Rank 0] PRINT: step:800/10000 val_loss:6.2357 total_sharp:3.3249e-02 L1_sharp:6.3917e-02 L2_sharp:6.6540e-02 L3_sharp:6.7989e-02 L4_sharp:8.6296e-02 L5_sharp:1.1907e-01 L6_sharp:1.7781e-01 L7_sharp:2.1639e-01 L8_sharp:3.3299e-01 L9_sharp:3.3165e-01 L10_sharp:4.0816e-01 L11_sharp:6.1653e-01 L12_sharp:9.8431e-01 total_fnorm:6.5625e+00 total_l1_linf:9.9200e+03 total_spectral:3.3125e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.3828e-01 L11_fnorm:2.2070e-01 L12_fnorm:1.8652e-01 L1_l1linf:8.1543e-02 L2_l1linf:8.2031e-02 L3_l1linf:8.2031e-02 L4_l1linf:8.1055e-02 L5_l1linf:8.0566e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.3984e-02 L8_l1linf:8.3984e-02 L9_l1linf:8.2031e-02 L10_l1linf:7.8125e-02 L11_l1linf:6.7383e-02 L12_l1linf:4.2480e-02 L1_spectral:3.1411e-03 L2_spectral:3.1187e-03 L3_spectral:3.1088e-03 L4_spectral:3.1175e-03 L5_spectral:3.1189e-03 L6_spectral:3.1042e-03 L7_spectral:3.0979e-03 L8_spectral:3.0613e-03 L9_spectral:3.0990e-03 L10_spectral:3.0837e-03 L11_spectral:3.0561e-03 L12_spectral:2.9927e-03 train_time:60572ms step_avg:75.72ms +[2025-09-11 12:13:42] [Rank 0] PRINT: step:800/10000 val_loss:6.2357 total_sharp:3.3249e-02 L1_sharp:6.3917e-02 L2_sharp:6.6540e-02 L3_sharp:6.7989e-02 L4_sharp:8.6296e-02 L5_sharp:1.1907e-01 L6_sharp:1.7781e-01 L7_sharp:2.1639e-01 L8_sharp:3.3299e-01 L9_sharp:3.3165e-01 L10_sharp:4.0816e-01 L11_sharp:6.1653e-01 L12_sharp:9.8431e-01 total_fnorm:6.5625e+00 total_l1_linf:9.9200e+03 total_spectral:3.3125e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.3828e-01 L11_fnorm:2.2070e-01 L12_fnorm:1.8652e-01 L1_l1linf:8.1543e-02 L2_l1linf:8.2031e-02 L3_l1linf:8.2031e-02 L4_l1linf:8.1055e-02 L5_l1linf:8.0566e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.3984e-02 L8_l1linf:8.3984e-02 L9_l1linf:8.2031e-02 L10_l1linf:7.8125e-02 L11_l1linf:6.7383e-02 L12_l1linf:4.2480e-02 L1_spectral:3.1411e-03 L2_spectral:3.1187e-03 L3_spectral:3.1088e-03 L4_spectral:3.1175e-03 L5_spectral:3.1189e-03 L6_spectral:3.1042e-03 L7_spectral:3.0979e-03 L8_spectral:3.0613e-03 L9_spectral:3.0990e-03 L10_spectral:3.0837e-03 L11_spectral:3.0561e-03 L12_spectral:2.9927e-03 train_time:60572ms step_avg:75.72ms +[2025-09-11 12:13:44] [Rank 0] step:801/10000 train_time:62190ms step_avg:77.64ms +[2025-09-11 12:13:44] [Rank 0] step:801/10000 train_time:62190ms step_avg:77.64ms +[2025-09-11 12:13:44] [Rank 0] step:821/10000 train_time:62839ms step_avg:76.54ms +[2025-09-11 12:13:44] [Rank 0] step:821/10000 train_time:62839ms step_avg:76.54ms +[2025-09-11 12:13:45] [Rank 0] step:841/10000 train_time:63486ms step_avg:75.49ms +[2025-09-11 12:13:45] [Rank 0] step:841/10000 train_time:63486ms step_avg:75.49ms +[2025-09-11 12:13:46] [Rank 0] step:861/10000 train_time:64131ms step_avg:74.48ms +[2025-09-11 12:13:46] [Rank 0] step:861/10000 train_time:64131ms step_avg:74.48ms +[2025-09-11 12:13:46] [Rank 0] step:881/10000 train_time:64777ms step_avg:73.53ms +[2025-09-11 12:13:46] [Rank 0] step:881/10000 train_time:64777ms step_avg:73.53ms +[2025-09-11 12:13:47] [Rank 0] step:901/10000 train_time:65422ms step_avg:72.61ms +[2025-09-11 12:13:47] [Rank 0] step:901/10000 train_time:65422ms step_avg:72.61ms +[2025-09-11 12:13:47] [Rank 0] step:921/10000 train_time:66068ms step_avg:71.73ms +[2025-09-11 12:13:47] [Rank 0] step:921/10000 train_time:66068ms step_avg:71.73ms +[2025-09-11 12:13:48] [Rank 0] step:941/10000 train_time:66813ms step_avg:71.00ms +[2025-09-11 12:13:48] [Rank 0] step:941/10000 train_time:66813ms step_avg:71.00ms +[2025-09-11 12:13:49] [Rank 0] step:961/10000 train_time:67459ms step_avg:70.20ms +[2025-09-11 12:13:49] [Rank 0] step:961/10000 train_time:67459ms step_avg:70.20ms +[2025-09-11 12:13:49] [Rank 0] step:981/10000 train_time:68103ms step_avg:69.42ms +[2025-09-11 12:13:49] [Rank 0] step:981/10000 train_time:68103ms step_avg:69.42ms +[2025-09-11 12:13:50] [Rank 0] step:1001/10000 train_time:68748ms step_avg:68.68ms +[2025-09-11 12:13:50] [Rank 0] step:1001/10000 train_time:68748ms step_avg:68.68ms +[2025-09-11 12:13:51] [Rank 0] step:1021/10000 train_time:69394ms step_avg:67.97ms +[2025-09-11 12:13:51] [Rank 0] step:1021/10000 train_time:69394ms step_avg:67.97ms +[2025-09-11 12:13:51] [Rank 0] step:1041/10000 train_time:70040ms step_avg:67.28ms +[2025-09-11 12:13:51] [Rank 0] step:1041/10000 train_time:70040ms step_avg:67.28ms +[2025-09-11 12:13:52] [Rank 0] step:1061/10000 train_time:70686ms step_avg:66.62ms +[2025-09-11 12:13:52] [Rank 0] step:1061/10000 train_time:70686ms step_avg:66.62ms +[2025-09-11 12:13:53] [Rank 0] step:1081/10000 train_time:71331ms step_avg:65.99ms +[2025-09-11 12:13:53] [Rank 0] step:1081/10000 train_time:71331ms step_avg:65.99ms +[2025-09-11 12:13:53] [Rank 0] step:1101/10000 train_time:71978ms step_avg:65.37ms +[2025-09-11 12:13:53] [Rank 0] step:1101/10000 train_time:71978ms step_avg:65.37ms +[2025-09-11 12:13:54] [Rank 0] step:1121/10000 train_time:72623ms step_avg:64.78ms +[2025-09-11 12:13:54] [Rank 0] step:1121/10000 train_time:72623ms step_avg:64.78ms +[2025-09-11 12:13:55] [Rank 0] step:1141/10000 train_time:73267ms step_avg:64.21ms +[2025-09-11 12:13:55] [Rank 0] step:1141/10000 train_time:73267ms step_avg:64.21ms +[2025-09-11 12:13:55] [Rank 0] step:1161/10000 train_time:73913ms step_avg:63.66ms +[2025-09-11 12:13:55] [Rank 0] step:1161/10000 train_time:73913ms step_avg:63.66ms +[2025-09-11 12:13:56] [Rank 0] step:1181/10000 train_time:74558ms step_avg:63.13ms +[2025-09-11 12:13:56] [Rank 0] step:1181/10000 train_time:74558ms step_avg:63.13ms +[2025-09-11 12:13:57] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:13:57] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 12:13:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:13:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 12:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 12:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:07] [Rank 0] PRINT: step:1200/10000 val_loss:5.9615 total_sharp:1.8987e-02 L1_sharp:3.2440e-02 L2_sharp:3.4410e-02 L3_sharp:3.3484e-02 L4_sharp:4.0549e-02 L5_sharp:4.8745e-02 L6_sharp:7.3554e-02 L7_sharp:9.4997e-02 L8_sharp:1.5352e-01 L9_sharp:1.6907e-01 L10_sharp:1.7911e-01 L11_sharp:2.4624e-01 L12_sharp:4.8096e-01 total_fnorm:5.9062e+00 total_l1_linf:8.5120e+03 total_spectral:2.9844e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2461e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.5684e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.4707e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.5195e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.8125e-02 L10_l1linf:8.0078e-02 L11_l1linf:7.7637e-02 L12_l1linf:5.7617e-02 L1_spectral:3.1128e-03 L2_spectral:3.1525e-03 L3_spectral:3.1325e-03 L4_spectral:3.1414e-03 L5_spectral:3.1289e-03 L6_spectral:3.1197e-03 L7_spectral:3.1084e-03 L8_spectral:3.1031e-03 L9_spectral:3.1183e-03 L10_spectral:3.1133e-03 L11_spectral:3.0941e-03 L12_spectral:3.0622e-03 train_time:75185ms step_avg:62.65ms +[2025-09-11 12:14:07] [Rank 0] PRINT: step:1200/10000 val_loss:5.9615 total_sharp:1.8987e-02 L1_sharp:3.2440e-02 L2_sharp:3.4410e-02 L3_sharp:3.3484e-02 L4_sharp:4.0549e-02 L5_sharp:4.8745e-02 L6_sharp:7.3554e-02 L7_sharp:9.4997e-02 L8_sharp:1.5352e-01 L9_sharp:1.6907e-01 L10_sharp:1.7911e-01 L11_sharp:2.4624e-01 L12_sharp:4.8096e-01 total_fnorm:5.9062e+00 total_l1_linf:8.5120e+03 total_spectral:2.9844e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.2461e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.5684e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.4707e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.5195e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.8125e-02 L10_l1linf:8.0078e-02 L11_l1linf:7.7637e-02 L12_l1linf:5.7617e-02 L1_spectral:3.1128e-03 L2_spectral:3.1525e-03 L3_spectral:3.1325e-03 L4_spectral:3.1414e-03 L5_spectral:3.1289e-03 L6_spectral:3.1197e-03 L7_spectral:3.1084e-03 L8_spectral:3.1031e-03 L9_spectral:3.1183e-03 L10_spectral:3.1133e-03 L11_spectral:3.0941e-03 L12_spectral:3.0622e-03 train_time:75185ms step_avg:62.65ms +[2025-09-11 12:14:09] [Rank 0] step:1201/10000 train_time:76964ms step_avg:64.08ms +[2025-09-11 12:14:09] [Rank 0] step:1201/10000 train_time:76964ms step_avg:64.08ms +[2025-09-11 12:14:10] [Rank 0] step:1221/10000 train_time:77677ms step_avg:63.62ms +[2025-09-11 12:14:10] [Rank 0] step:1221/10000 train_time:77677ms step_avg:63.62ms +[2025-09-11 12:14:10] [Rank 0] step:1241/10000 train_time:78324ms step_avg:63.11ms +[2025-09-11 12:14:10] [Rank 0] step:1241/10000 train_time:78324ms step_avg:63.11ms +[2025-09-11 12:14:11] [Rank 0] step:1261/10000 train_time:78972ms step_avg:62.63ms +[2025-09-11 12:14:11] [Rank 0] step:1261/10000 train_time:78972ms step_avg:62.63ms +[2025-09-11 12:14:12] [Rank 0] step:1281/10000 train_time:79619ms step_avg:62.15ms +[2025-09-11 12:14:12] [Rank 0] step:1281/10000 train_time:79619ms step_avg:62.15ms +[2025-09-11 12:14:12] [Rank 0] step:1301/10000 train_time:80268ms step_avg:61.70ms +[2025-09-11 12:14:12] [Rank 0] step:1301/10000 train_time:80268ms step_avg:61.70ms +[2025-09-11 12:14:13] [Rank 0] step:1321/10000 train_time:80915ms step_avg:61.25ms +[2025-09-11 12:14:13] [Rank 0] step:1321/10000 train_time:80915ms step_avg:61.25ms +[2025-09-11 12:14:14] [Rank 0] step:1341/10000 train_time:81561ms step_avg:60.82ms +[2025-09-11 12:14:14] [Rank 0] step:1341/10000 train_time:81561ms step_avg:60.82ms +[2025-09-11 12:14:14] [Rank 0] step:1361/10000 train_time:82209ms step_avg:60.40ms +[2025-09-11 12:14:14] [Rank 0] step:1361/10000 train_time:82209ms step_avg:60.40ms +[2025-09-11 12:14:15] [Rank 0] step:1381/10000 train_time:83151ms step_avg:60.21ms +[2025-09-11 12:14:15] [Rank 0] step:1381/10000 train_time:83151ms step_avg:60.21ms +[2025-09-11 12:14:16] [Rank 0] step:1401/10000 train_time:83797ms step_avg:59.81ms +[2025-09-11 12:14:16] [Rank 0] step:1401/10000 train_time:83797ms step_avg:59.81ms +[2025-09-11 12:14:16] [Rank 0] step:1421/10000 train_time:84444ms step_avg:59.43ms +[2025-09-11 12:14:16] [Rank 0] step:1421/10000 train_time:84444ms step_avg:59.43ms +[2025-09-11 12:14:17] [Rank 0] step:1441/10000 train_time:85242ms step_avg:59.15ms +[2025-09-11 12:14:17] [Rank 0] step:1441/10000 train_time:85242ms step_avg:59.15ms +[2025-09-11 12:14:18] [Rank 0] step:1461/10000 train_time:86041ms step_avg:58.89ms +[2025-09-11 12:14:18] [Rank 0] step:1461/10000 train_time:86041ms step_avg:58.89ms +[2025-09-11 12:14:19] [Rank 0] step:1481/10000 train_time:86687ms step_avg:58.53ms +[2025-09-11 12:14:19] [Rank 0] step:1481/10000 train_time:86687ms step_avg:58.53ms +[2025-09-11 12:14:19] [Rank 0] step:1501/10000 train_time:87337ms step_avg:58.19ms +[2025-09-11 12:14:19] [Rank 0] step:1501/10000 train_time:87337ms step_avg:58.19ms +[2025-09-11 12:14:20] [Rank 0] step:1521/10000 train_time:87987ms step_avg:57.85ms +[2025-09-11 12:14:20] [Rank 0] step:1521/10000 train_time:87987ms step_avg:57.85ms +[2025-09-11 12:14:21] [Rank 0] step:1541/10000 train_time:88637ms step_avg:57.52ms +[2025-09-11 12:14:21] [Rank 0] step:1541/10000 train_time:88637ms step_avg:57.52ms +[2025-09-11 12:14:21] [Rank 0] step:1561/10000 train_time:89288ms step_avg:57.20ms +[2025-09-11 12:14:21] [Rank 0] step:1561/10000 train_time:89288ms step_avg:57.20ms +[2025-09-11 12:14:22] [Rank 0] step:1581/10000 train_time:89938ms step_avg:56.89ms +[2025-09-11 12:14:22] [Rank 0] step:1581/10000 train_time:89938ms step_avg:56.89ms +[2025-09-11 12:14:23] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:14:23] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 12:14:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:14:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 12:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 12:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 12:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:33] [Rank 0] PRINT: step:1600/10000 val_loss:5.7788 total_sharp:2.0125e-02 L1_sharp:2.7317e-02 L2_sharp:2.2904e-02 L3_sharp:2.3484e-02 L4_sharp:3.1793e-02 L5_sharp:4.0136e-02 L6_sharp:6.2037e-02 L7_sharp:7.8562e-02 L8_sharp:1.2584e-01 L9_sharp:1.3155e-01 L10_sharp:1.5875e-01 L11_sharp:2.0774e-01 L12_sharp:5.2966e-01 total_fnorm:5.4062e+00 total_l1_linf:7.4880e+03 total_spectral:2.7344e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2949e-01 L1_l1linf:7.4219e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.1289e-02 L7_l1linf:7.2266e-02 L8_l1linf:7.4707e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.6660e-02 L11_l1linf:7.7148e-02 L12_l1linf:5.4443e-02 L1_spectral:3.1539e-03 L2_spectral:3.1623e-03 L3_spectral:3.1410e-03 L4_spectral:3.1448e-03 L5_spectral:3.1244e-03 L6_spectral:3.1390e-03 L7_spectral:3.1246e-03 L8_spectral:3.1149e-03 L9_spectral:3.1297e-03 L10_spectral:3.1521e-03 L11_spectral:3.1236e-03 L12_spectral:3.1143e-03 train_time:90571ms step_avg:56.61ms +[2025-09-11 12:14:33] [Rank 0] PRINT: step:1600/10000 val_loss:5.7788 total_sharp:2.0125e-02 L1_sharp:2.7317e-02 L2_sharp:2.2904e-02 L3_sharp:2.3484e-02 L4_sharp:3.1793e-02 L5_sharp:4.0136e-02 L6_sharp:6.2037e-02 L7_sharp:7.8562e-02 L8_sharp:1.2584e-01 L9_sharp:1.3155e-01 L10_sharp:1.5875e-01 L11_sharp:2.0774e-01 L12_sharp:5.2966e-01 total_fnorm:5.4062e+00 total_l1_linf:7.4880e+03 total_spectral:2.7344e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.2949e-01 L1_l1linf:7.4219e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.1289e-02 L7_l1linf:7.2266e-02 L8_l1linf:7.4707e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.6660e-02 L11_l1linf:7.7148e-02 L12_l1linf:5.4443e-02 L1_spectral:3.1539e-03 L2_spectral:3.1623e-03 L3_spectral:3.1410e-03 L4_spectral:3.1448e-03 L5_spectral:3.1244e-03 L6_spectral:3.1390e-03 L7_spectral:3.1246e-03 L8_spectral:3.1149e-03 L9_spectral:3.1297e-03 L10_spectral:3.1521e-03 L11_spectral:3.1236e-03 L12_spectral:3.1143e-03 train_time:90571ms step_avg:56.61ms +[2025-09-11 12:14:35] [Rank 0] step:1601/10000 train_time:92070ms step_avg:57.51ms +[2025-09-11 12:14:35] [Rank 0] step:1601/10000 train_time:92070ms step_avg:57.51ms +[2025-09-11 12:14:35] [Rank 0] step:1621/10000 train_time:92727ms step_avg:57.20ms +[2025-09-11 12:14:35] [Rank 0] step:1621/10000 train_time:92727ms step_avg:57.20ms +[2025-09-11 12:14:36] [Rank 0] step:1641/10000 train_time:93378ms step_avg:56.90ms +[2025-09-11 12:14:36] [Rank 0] step:1641/10000 train_time:93378ms step_avg:56.90ms +[2025-09-11 12:14:37] [Rank 0] step:1661/10000 train_time:94029ms step_avg:56.61ms +[2025-09-11 12:14:37] [Rank 0] step:1661/10000 train_time:94029ms step_avg:56.61ms +[2025-09-11 12:14:37] [Rank 0] step:1681/10000 train_time:94679ms step_avg:56.32ms +[2025-09-11 12:14:37] [Rank 0] step:1681/10000 train_time:94679ms step_avg:56.32ms +[2025-09-11 12:14:38] [Rank 0] step:1701/10000 train_time:95329ms step_avg:56.04ms +[2025-09-11 12:14:38] [Rank 0] step:1701/10000 train_time:95329ms step_avg:56.04ms +[2025-09-11 12:14:38] [Rank 0] step:1721/10000 train_time:95978ms step_avg:55.77ms +[2025-09-11 12:14:38] [Rank 0] step:1721/10000 train_time:95978ms step_avg:55.77ms +[2025-09-11 12:14:39] [Rank 0] step:1741/10000 train_time:96628ms step_avg:55.50ms +[2025-09-11 12:14:39] [Rank 0] step:1741/10000 train_time:96628ms step_avg:55.50ms +[2025-09-11 12:14:40] [Rank 0] step:1761/10000 train_time:97278ms step_avg:55.24ms +[2025-09-11 12:14:40] [Rank 0] step:1761/10000 train_time:97278ms step_avg:55.24ms +[2025-09-11 12:14:40] [Rank 0] step:1781/10000 train_time:97928ms step_avg:54.98ms +[2025-09-11 12:14:40] [Rank 0] step:1781/10000 train_time:97928ms step_avg:54.98ms +[2025-09-11 12:14:41] [Rank 0] step:1801/10000 train_time:98578ms step_avg:54.73ms +[2025-09-11 12:14:41] [Rank 0] step:1801/10000 train_time:98578ms step_avg:54.73ms +[2025-09-11 12:14:42] [Rank 0] step:1821/10000 train_time:99228ms step_avg:54.49ms +[2025-09-11 12:14:42] [Rank 0] step:1821/10000 train_time:99228ms step_avg:54.49ms +[2025-09-11 12:14:42] [Rank 0] step:1841/10000 train_time:99878ms step_avg:54.25ms +[2025-09-11 12:14:42] [Rank 0] step:1841/10000 train_time:99878ms step_avg:54.25ms +[2025-09-11 12:14:43] [Rank 0] step:1861/10000 train_time:100530ms step_avg:54.02ms +[2025-09-11 12:14:43] [Rank 0] step:1861/10000 train_time:100530ms step_avg:54.02ms +[2025-09-11 12:14:44] [Rank 0] step:1881/10000 train_time:101178ms step_avg:53.79ms +[2025-09-11 12:14:44] [Rank 0] step:1881/10000 train_time:101178ms step_avg:53.79ms +[2025-09-11 12:14:44] [Rank 0] step:1901/10000 train_time:101828ms step_avg:53.57ms +[2025-09-11 12:14:44] [Rank 0] step:1901/10000 train_time:101828ms step_avg:53.57ms +[2025-09-11 12:14:45] [Rank 0] step:1921/10000 train_time:102477ms step_avg:53.35ms +[2025-09-11 12:14:45] [Rank 0] step:1921/10000 train_time:102477ms step_avg:53.35ms +[2025-09-11 12:14:46] [Rank 0] step:1941/10000 train_time:103127ms step_avg:53.13ms +[2025-09-11 12:14:46] [Rank 0] step:1941/10000 train_time:103127ms step_avg:53.13ms +[2025-09-11 12:14:46] [Rank 0] step:1961/10000 train_time:103779ms step_avg:52.92ms +[2025-09-11 12:14:46] [Rank 0] step:1961/10000 train_time:103779ms step_avg:52.92ms +[2025-09-11 12:14:47] [Rank 0] step:1981/10000 train_time:104431ms step_avg:52.72ms +[2025-09-11 12:14:47] [Rank 0] step:1981/10000 train_time:104431ms step_avg:52.72ms +[2025-09-11 12:14:48] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:14:48] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 12:14:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:14:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 12:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 12:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:14:58] [Rank 0] PRINT: step:2000/10000 val_loss:5.6265 total_sharp:1.9252e-02 L1_sharp:1.9192e-02 L2_sharp:1.6096e-02 L3_sharp:1.6402e-02 L4_sharp:1.9166e-02 L5_sharp:2.8416e-02 L6_sharp:4.8524e-02 L7_sharp:5.8517e-02 L8_sharp:9.8207e-02 L9_sharp:1.1477e-01 L10_sharp:1.4934e-01 L11_sharp:2.0449e-01 L12_sharp:1.0238e+00 total_fnorm:5.3750e+00 total_l1_linf:7.5200e+03 total_spectral:2.7344e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3047e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.2754e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.3242e-02 L10_l1linf:7.4707e-02 L11_l1linf:7.5195e-02 L12_l1linf:5.1758e-02 L1_spectral:3.1736e-03 L2_spectral:3.1588e-03 L3_spectral:3.1776e-03 L4_spectral:3.1566e-03 L5_spectral:3.1529e-03 L6_spectral:3.1352e-03 L7_spectral:3.1415e-03 L8_spectral:3.1342e-03 L9_spectral:3.1582e-03 L10_spectral:3.1475e-03 L11_spectral:3.1201e-03 L12_spectral:3.1120e-03 train_time:105063ms step_avg:52.53ms +[2025-09-11 12:14:58] [Rank 0] PRINT: step:2000/10000 val_loss:5.6265 total_sharp:1.9252e-02 L1_sharp:1.9192e-02 L2_sharp:1.6096e-02 L3_sharp:1.6402e-02 L4_sharp:1.9166e-02 L5_sharp:2.8416e-02 L6_sharp:4.8524e-02 L7_sharp:5.8517e-02 L8_sharp:9.8207e-02 L9_sharp:1.1477e-01 L10_sharp:1.4934e-01 L11_sharp:2.0449e-01 L12_sharp:1.0238e+00 total_fnorm:5.3750e+00 total_l1_linf:7.5200e+03 total_spectral:2.7344e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3047e-01 L1_l1linf:7.3242e-02 L2_l1linf:7.2754e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.3242e-02 L10_l1linf:7.4707e-02 L11_l1linf:7.5195e-02 L12_l1linf:5.1758e-02 L1_spectral:3.1736e-03 L2_spectral:3.1588e-03 L3_spectral:3.1776e-03 L4_spectral:3.1566e-03 L5_spectral:3.1529e-03 L6_spectral:3.1352e-03 L7_spectral:3.1415e-03 L8_spectral:3.1342e-03 L9_spectral:3.1582e-03 L10_spectral:3.1475e-03 L11_spectral:3.1201e-03 L12_spectral:3.1120e-03 train_time:105063ms step_avg:52.53ms +[2025-09-11 12:15:00] [Rank 0] step:2001/10000 train_time:106730ms step_avg:53.34ms +[2025-09-11 12:15:00] [Rank 0] step:2001/10000 train_time:106730ms step_avg:53.34ms +[2025-09-11 12:15:01] [Rank 0] step:2021/10000 train_time:107385ms step_avg:53.13ms +[2025-09-11 12:15:01] [Rank 0] step:2021/10000 train_time:107385ms step_avg:53.13ms +[2025-09-11 12:15:01] [Rank 0] step:2041/10000 train_time:108036ms step_avg:52.93ms +[2025-09-11 12:15:01] [Rank 0] step:2041/10000 train_time:108036ms step_avg:52.93ms +[2025-09-11 12:15:02] [Rank 0] step:2061/10000 train_time:108687ms step_avg:52.74ms +[2025-09-11 12:15:02] [Rank 0] step:2061/10000 train_time:108687ms step_avg:52.74ms +[2025-09-11 12:15:03] [Rank 0] step:2081/10000 train_time:109339ms step_avg:52.54ms +[2025-09-11 12:15:03] [Rank 0] step:2081/10000 train_time:109339ms step_avg:52.54ms +[2025-09-11 12:15:03] [Rank 0] step:2101/10000 train_time:109989ms step_avg:52.35ms +[2025-09-11 12:15:03] [Rank 0] step:2101/10000 train_time:109989ms step_avg:52.35ms +[2025-09-11 12:15:04] [Rank 0] step:2121/10000 train_time:110685ms step_avg:52.19ms +[2025-09-11 12:15:04] [Rank 0] step:2121/10000 train_time:110685ms step_avg:52.19ms +[2025-09-11 12:15:05] [Rank 0] step:2141/10000 train_time:111336ms step_avg:52.00ms +[2025-09-11 12:15:05] [Rank 0] step:2141/10000 train_time:111336ms step_avg:52.00ms +[2025-09-11 12:15:05] [Rank 0] step:2161/10000 train_time:112039ms step_avg:51.85ms +[2025-09-11 12:15:05] [Rank 0] step:2161/10000 train_time:112039ms step_avg:51.85ms +[2025-09-11 12:15:06] [Rank 0] step:2181/10000 train_time:112689ms step_avg:51.67ms +[2025-09-11 12:15:06] [Rank 0] step:2181/10000 train_time:112689ms step_avg:51.67ms +[2025-09-11 12:15:07] [Rank 0] step:2201/10000 train_time:113340ms step_avg:51.49ms +[2025-09-11 12:15:07] [Rank 0] step:2201/10000 train_time:113340ms step_avg:51.49ms +[2025-09-11 12:15:07] [Rank 0] step:2221/10000 train_time:113989ms step_avg:51.32ms +[2025-09-11 12:15:07] [Rank 0] step:2221/10000 train_time:113989ms step_avg:51.32ms +[2025-09-11 12:15:08] [Rank 0] step:2241/10000 train_time:114651ms step_avg:51.16ms +[2025-09-11 12:15:08] [Rank 0] step:2241/10000 train_time:114651ms step_avg:51.16ms +[2025-09-11 12:15:09] [Rank 0] step:2261/10000 train_time:115314ms step_avg:51.00ms +[2025-09-11 12:15:09] [Rank 0] step:2261/10000 train_time:115314ms step_avg:51.00ms +[2025-09-11 12:15:09] [Rank 0] step:2281/10000 train_time:115977ms step_avg:50.84ms +[2025-09-11 12:15:09] [Rank 0] step:2281/10000 train_time:115977ms step_avg:50.84ms +[2025-09-11 12:15:10] [Rank 0] step:2301/10000 train_time:116639ms step_avg:50.69ms +[2025-09-11 12:15:10] [Rank 0] step:2301/10000 train_time:116639ms step_avg:50.69ms +[2025-09-11 12:15:11] [Rank 0] step:2321/10000 train_time:117302ms step_avg:50.54ms +[2025-09-11 12:15:11] [Rank 0] step:2321/10000 train_time:117302ms step_avg:50.54ms +[2025-09-11 12:15:11] [Rank 0] step:2341/10000 train_time:117965ms step_avg:50.39ms +[2025-09-11 12:15:11] [Rank 0] step:2341/10000 train_time:117965ms step_avg:50.39ms +[2025-09-11 12:15:12] [Rank 0] step:2361/10000 train_time:118629ms step_avg:50.25ms +[2025-09-11 12:15:12] [Rank 0] step:2361/10000 train_time:118629ms step_avg:50.25ms +[2025-09-11 12:15:13] [Rank 0] step:2381/10000 train_time:119292ms step_avg:50.10ms +[2025-09-11 12:15:13] [Rank 0] step:2381/10000 train_time:119292ms step_avg:50.10ms +[2025-09-11 12:15:13] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:15:13] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 12:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 12:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 12:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 12:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:15:24] [Rank 0] PRINT: step:2400/10000 val_loss:5.4996 total_sharp:1.7925e-02 L1_sharp:1.6716e-02 L2_sharp:1.4904e-02 L3_sharp:1.3732e-02 L4_sharp:1.5230e-02 L5_sharp:2.0016e-02 L6_sharp:2.9947e-02 L7_sharp:3.8845e-02 L8_sharp:7.0842e-02 L9_sharp:9.6327e-02 L10_sharp:1.3567e-01 L11_sharp:1.7984e-01 L12_sharp:9.9099e-01 total_fnorm:4.9375e+00 total_l1_linf:6.7840e+03 total_spectral:2.5156e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3633e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.1289e-02 L3_l1linf:7.1289e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9824e-02 L6_l1linf:6.8359e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.7871e-02 L9_l1linf:6.8848e-02 L10_l1linf:7.0801e-02 L11_l1linf:7.4707e-02 L12_l1linf:5.2002e-02 L1_spectral:3.1941e-03 L2_spectral:3.1743e-03 L3_spectral:3.1818e-03 L4_spectral:3.1773e-03 L5_spectral:3.1763e-03 L6_spectral:3.1447e-03 L7_spectral:3.1484e-03 L8_spectral:3.1398e-03 L9_spectral:3.1377e-03 L10_spectral:3.1537e-03 L11_spectral:3.1416e-03 L12_spectral:3.1090e-03 train_time:119935ms step_avg:49.97ms +[2025-09-11 12:15:24] [Rank 0] PRINT: step:2400/10000 val_loss:5.4996 total_sharp:1.7925e-02 L1_sharp:1.6716e-02 L2_sharp:1.4904e-02 L3_sharp:1.3732e-02 L4_sharp:1.5230e-02 L5_sharp:2.0016e-02 L6_sharp:2.9947e-02 L7_sharp:3.8845e-02 L8_sharp:7.0842e-02 L9_sharp:9.6327e-02 L10_sharp:1.3567e-01 L11_sharp:1.7984e-01 L12_sharp:9.9099e-01 total_fnorm:4.9375e+00 total_l1_linf:6.7840e+03 total_spectral:2.5156e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3633e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.1289e-02 L3_l1linf:7.1289e-02 L4_l1linf:7.0801e-02 L5_l1linf:6.9824e-02 L6_l1linf:6.8359e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.7871e-02 L9_l1linf:6.8848e-02 L10_l1linf:7.0801e-02 L11_l1linf:7.4707e-02 L12_l1linf:5.2002e-02 L1_spectral:3.1941e-03 L2_spectral:3.1743e-03 L3_spectral:3.1818e-03 L4_spectral:3.1773e-03 L5_spectral:3.1763e-03 L6_spectral:3.1447e-03 L7_spectral:3.1484e-03 L8_spectral:3.1398e-03 L9_spectral:3.1377e-03 L10_spectral:3.1537e-03 L11_spectral:3.1416e-03 L12_spectral:3.1090e-03 train_time:119935ms step_avg:49.97ms +[2025-09-11 12:15:25] [Rank 0] step:2401/10000 train_time:121551ms step_avg:50.63ms +[2025-09-11 12:15:25] [Rank 0] step:2401/10000 train_time:121551ms step_avg:50.63ms +[2025-09-11 12:15:26] [Rank 0] step:2421/10000 train_time:122247ms step_avg:50.49ms +[2025-09-11 12:15:26] [Rank 0] step:2421/10000 train_time:122247ms step_avg:50.49ms +[2025-09-11 12:15:27] [Rank 0] step:2441/10000 train_time:122912ms step_avg:50.35ms +[2025-09-11 12:15:27] [Rank 0] step:2441/10000 train_time:122912ms step_avg:50.35ms +[2025-09-11 12:15:27] [Rank 0] step:2461/10000 train_time:123577ms step_avg:50.21ms +[2025-09-11 12:15:27] [Rank 0] step:2461/10000 train_time:123577ms step_avg:50.21ms +[2025-09-11 12:15:28] [Rank 0] step:2481/10000 train_time:124242ms step_avg:50.08ms +[2025-09-11 12:15:28] [Rank 0] step:2481/10000 train_time:124242ms step_avg:50.08ms +[2025-09-11 12:15:29] [Rank 0] step:2501/10000 train_time:124905ms step_avg:49.94ms +[2025-09-11 12:15:29] [Rank 0] step:2501/10000 train_time:124905ms step_avg:49.94ms +[2025-09-11 12:15:29] [Rank 0] step:2521/10000 train_time:125570ms step_avg:49.81ms +[2025-09-11 12:15:29] [Rank 0] step:2521/10000 train_time:125570ms step_avg:49.81ms +[2025-09-11 12:15:30] [Rank 0] step:2541/10000 train_time:126233ms step_avg:49.68ms +[2025-09-11 12:15:30] [Rank 0] step:2541/10000 train_time:126233ms step_avg:49.68ms +[2025-09-11 12:15:31] [Rank 0] step:2561/10000 train_time:126897ms step_avg:49.55ms +[2025-09-11 12:15:31] [Rank 0] step:2561/10000 train_time:126897ms step_avg:49.55ms +[2025-09-11 12:15:31] [Rank 0] step:2581/10000 train_time:127562ms step_avg:49.42ms +[2025-09-11 12:15:31] [Rank 0] step:2581/10000 train_time:127562ms step_avg:49.42ms +[2025-09-11 12:15:32] [Rank 0] step:2601/10000 train_time:128226ms step_avg:49.30ms +[2025-09-11 12:15:32] [Rank 0] step:2601/10000 train_time:128226ms step_avg:49.30ms +[2025-09-11 12:15:33] [Rank 0] step:2621/10000 train_time:128890ms step_avg:49.18ms +[2025-09-11 12:15:33] [Rank 0] step:2621/10000 train_time:128890ms step_avg:49.18ms +[2025-09-11 12:15:33] [Rank 0] step:2641/10000 train_time:129554ms step_avg:49.05ms +[2025-09-11 12:15:33] [Rank 0] step:2641/10000 train_time:129554ms step_avg:49.05ms +[2025-09-11 12:15:34] [Rank 0] step:2661/10000 train_time:130218ms step_avg:48.94ms +[2025-09-11 12:15:34] [Rank 0] step:2661/10000 train_time:130218ms step_avg:48.94ms +[2025-09-11 12:15:35] [Rank 0] step:2681/10000 train_time:130881ms step_avg:48.82ms +[2025-09-11 12:15:35] [Rank 0] step:2681/10000 train_time:130881ms step_avg:48.82ms +[2025-09-11 12:15:35] [Rank 0] step:2701/10000 train_time:131547ms step_avg:48.70ms +[2025-09-11 12:15:35] [Rank 0] step:2701/10000 train_time:131547ms step_avg:48.70ms +[2025-09-11 12:15:36] [Rank 0] step:2721/10000 train_time:132211ms step_avg:48.59ms +[2025-09-11 12:15:36] [Rank 0] step:2721/10000 train_time:132211ms step_avg:48.59ms +[2025-09-11 12:15:37] [Rank 0] step:2741/10000 train_time:132876ms step_avg:48.48ms +[2025-09-11 12:15:37] [Rank 0] step:2741/10000 train_time:132876ms step_avg:48.48ms +[2025-09-11 12:15:37] [Rank 0] step:2761/10000 train_time:133541ms step_avg:48.37ms +[2025-09-11 12:15:37] [Rank 0] step:2761/10000 train_time:133541ms step_avg:48.37ms +[2025-09-11 12:15:38] [Rank 0] step:2781/10000 train_time:134205ms step_avg:48.26ms +[2025-09-11 12:15:38] [Rank 0] step:2781/10000 train_time:134205ms step_avg:48.26ms +[2025-09-11 12:15:39] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:15:39] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 12:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 12:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 12:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:15:49] [Rank 0] PRINT: step:2800/10000 val_loss:5.4150 total_sharp:2.3181e-02 L1_sharp:1.7375e-02 L2_sharp:1.4759e-02 L3_sharp:1.5022e-02 L4_sharp:1.5697e-02 L5_sharp:2.5467e-02 L6_sharp:3.2389e-02 L7_sharp:4.7275e-02 L8_sharp:8.5908e-02 L9_sharp:1.0376e-01 L10_sharp:1.5637e-01 L11_sharp:1.9736e-01 L12_sharp:6.1692e-01 total_fnorm:4.5625e+00 total_l1_linf:6.2080e+03 total_spectral:2.3281e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3535e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0801e-02 L3_l1linf:7.0312e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.9824e-02 L10_l1linf:7.0312e-02 L11_l1linf:7.3242e-02 L12_l1linf:4.9805e-02 L1_spectral:3.1864e-03 L2_spectral:3.1804e-03 L3_spectral:3.1820e-03 L4_spectral:3.1737e-03 L5_spectral:3.1738e-03 L6_spectral:3.1758e-03 L7_spectral:3.1475e-03 L8_spectral:3.1376e-03 L9_spectral:3.1510e-03 L10_spectral:3.1455e-03 L11_spectral:3.1452e-03 L12_spectral:3.1414e-03 train_time:134851ms step_avg:48.16ms +[2025-09-11 12:15:49] [Rank 0] PRINT: step:2800/10000 val_loss:5.4150 total_sharp:2.3181e-02 L1_sharp:1.7375e-02 L2_sharp:1.4759e-02 L3_sharp:1.5022e-02 L4_sharp:1.5697e-02 L5_sharp:2.5467e-02 L6_sharp:3.2389e-02 L7_sharp:4.7275e-02 L8_sharp:8.5908e-02 L9_sharp:1.0376e-01 L10_sharp:1.5637e-01 L11_sharp:1.9736e-01 L12_sharp:6.1692e-01 total_fnorm:4.5625e+00 total_l1_linf:6.2080e+03 total_spectral:2.3281e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.3535e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0801e-02 L3_l1linf:7.0312e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.9824e-02 L10_l1linf:7.0312e-02 L11_l1linf:7.3242e-02 L12_l1linf:4.9805e-02 L1_spectral:3.1864e-03 L2_spectral:3.1804e-03 L3_spectral:3.1820e-03 L4_spectral:3.1737e-03 L5_spectral:3.1738e-03 L6_spectral:3.1758e-03 L7_spectral:3.1475e-03 L8_spectral:3.1376e-03 L9_spectral:3.1510e-03 L10_spectral:3.1455e-03 L11_spectral:3.1452e-03 L12_spectral:3.1414e-03 train_time:134851ms step_avg:48.16ms +[2025-09-11 12:15:51] [Rank 0] step:2801/10000 train_time:136495ms step_avg:48.73ms +[2025-09-11 12:15:51] [Rank 0] step:2801/10000 train_time:136495ms step_avg:48.73ms +[2025-09-11 12:15:52] [Rank 0] step:2821/10000 train_time:137187ms step_avg:48.63ms +[2025-09-11 12:15:52] [Rank 0] step:2821/10000 train_time:137187ms step_avg:48.63ms +[2025-09-11 12:15:53] [Rank 0] step:2841/10000 train_time:137851ms step_avg:48.52ms +[2025-09-11 12:15:53] [Rank 0] step:2841/10000 train_time:137851ms step_avg:48.52ms +[2025-09-11 12:15:53] [Rank 0] step:2861/10000 train_time:138516ms step_avg:48.42ms +[2025-09-11 12:15:53] [Rank 0] step:2861/10000 train_time:138516ms step_avg:48.42ms +[2025-09-11 12:15:54] [Rank 0] step:2881/10000 train_time:139180ms step_avg:48.31ms +[2025-09-11 12:15:54] [Rank 0] step:2881/10000 train_time:139180ms step_avg:48.31ms +[2025-09-11 12:15:55] [Rank 0] step:2901/10000 train_time:139844ms step_avg:48.21ms +[2025-09-11 12:15:55] [Rank 0] step:2901/10000 train_time:139844ms step_avg:48.21ms +[2025-09-11 12:15:55] [Rank 0] step:2921/10000 train_time:140508ms step_avg:48.10ms +[2025-09-11 12:15:55] [Rank 0] step:2921/10000 train_time:140508ms step_avg:48.10ms +[2025-09-11 12:15:56] [Rank 0] step:2941/10000 train_time:141172ms step_avg:48.00ms +[2025-09-11 12:15:56] [Rank 0] step:2941/10000 train_time:141172ms step_avg:48.00ms +[2025-09-11 12:15:56] [Rank 0] step:2961/10000 train_time:141835ms step_avg:47.90ms +[2025-09-11 12:15:56] [Rank 0] step:2961/10000 train_time:141835ms step_avg:47.90ms +[2025-09-11 12:15:57] [Rank 0] step:2981/10000 train_time:142501ms step_avg:47.80ms +[2025-09-11 12:15:57] [Rank 0] step:2981/10000 train_time:142501ms step_avg:47.80ms +[2025-09-11 12:15:58] [Rank 0] step:3001/10000 train_time:143167ms step_avg:47.71ms +[2025-09-11 12:15:58] [Rank 0] step:3001/10000 train_time:143167ms step_avg:47.71ms +[2025-09-11 12:15:58] [Rank 0] step:3021/10000 train_time:143833ms step_avg:47.61ms +[2025-09-11 12:15:58] [Rank 0] step:3021/10000 train_time:143833ms step_avg:47.61ms +[2025-09-11 12:15:59] [Rank 0] step:3041/10000 train_time:144500ms step_avg:47.52ms +[2025-09-11 12:15:59] [Rank 0] step:3041/10000 train_time:144500ms step_avg:47.52ms +[2025-09-11 12:16:00] [Rank 0] step:3061/10000 train_time:145166ms step_avg:47.42ms +[2025-09-11 12:16:00] [Rank 0] step:3061/10000 train_time:145166ms step_avg:47.42ms +[2025-09-11 12:16:00] [Rank 0] step:3081/10000 train_time:145833ms step_avg:47.33ms +[2025-09-11 12:16:00] [Rank 0] step:3081/10000 train_time:145833ms step_avg:47.33ms +[2025-09-11 12:16:01] [Rank 0] step:3101/10000 train_time:146499ms step_avg:47.24ms +[2025-09-11 12:16:01] [Rank 0] step:3101/10000 train_time:146499ms step_avg:47.24ms +[2025-09-11 12:16:02] [Rank 0] step:3121/10000 train_time:147167ms step_avg:47.15ms +[2025-09-11 12:16:02] [Rank 0] step:3121/10000 train_time:147167ms step_avg:47.15ms +[2025-09-11 12:16:02] [Rank 0] step:3141/10000 train_time:147834ms step_avg:47.07ms +[2025-09-11 12:16:02] [Rank 0] step:3141/10000 train_time:147834ms step_avg:47.07ms +[2025-09-11 12:16:03] [Rank 0] step:3161/10000 train_time:148500ms step_avg:46.98ms +[2025-09-11 12:16:03] [Rank 0] step:3161/10000 train_time:148500ms step_avg:46.98ms +[2025-09-11 12:16:04] [Rank 0] step:3181/10000 train_time:149167ms step_avg:46.89ms +[2025-09-11 12:16:04] [Rank 0] step:3181/10000 train_time:149167ms step_avg:46.89ms +[2025-09-11 12:16:04] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:16:04] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 12:16:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:16:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 12:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 12:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 12:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:16:17] [Rank 0] PRINT: step:3200/10000 val_loss:5.3363 total_sharp:1.2780e-02 L1_sharp:1.7642e-02 L2_sharp:1.3716e-02 L3_sharp:1.2874e-02 L4_sharp:1.4297e-02 L5_sharp:2.1777e-02 L6_sharp:2.9648e-02 L7_sharp:3.6830e-02 L8_sharp:6.3001e-02 L9_sharp:7.3705e-02 L10_sharp:1.1875e-01 L11_sharp:1.5161e-01 L12_sharp:4.3514e-01 total_fnorm:5.1875e+00 total_l1_linf:7.1040e+03 total_spectral:2.6250e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.1289e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.7383e-02 L11_l1linf:6.9336e-02 L12_l1linf:5.2734e-02 L1_spectral:3.2065e-03 L2_spectral:3.2222e-03 L3_spectral:3.2119e-03 L4_spectral:3.1769e-03 L5_spectral:3.1713e-03 L6_spectral:3.1662e-03 L7_spectral:3.1573e-03 L8_spectral:3.1610e-03 L9_spectral:3.1746e-03 L10_spectral:3.1901e-03 L11_spectral:3.1570e-03 L12_spectral:3.1332e-03 train_time:149815ms step_avg:46.82ms +[2025-09-11 12:16:17] [Rank 0] PRINT: step:3200/10000 val_loss:5.3363 total_sharp:1.2780e-02 L1_sharp:1.7642e-02 L2_sharp:1.3716e-02 L3_sharp:1.2874e-02 L4_sharp:1.4297e-02 L5_sharp:2.1777e-02 L6_sharp:2.9648e-02 L7_sharp:3.6830e-02 L8_sharp:6.3001e-02 L9_sharp:7.3705e-02 L10_sharp:1.1875e-01 L11_sharp:1.5161e-01 L12_sharp:4.3514e-01 total_fnorm:5.1875e+00 total_l1_linf:7.1040e+03 total_spectral:2.6250e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.1289e-02 L3_l1linf:6.9336e-02 L4_l1linf:6.9336e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.7383e-02 L11_l1linf:6.9336e-02 L12_l1linf:5.2734e-02 L1_spectral:3.2065e-03 L2_spectral:3.2222e-03 L3_spectral:3.2119e-03 L4_spectral:3.1769e-03 L5_spectral:3.1713e-03 L6_spectral:3.1662e-03 L7_spectral:3.1573e-03 L8_spectral:3.1610e-03 L9_spectral:3.1746e-03 L10_spectral:3.1901e-03 L11_spectral:3.1570e-03 L12_spectral:3.1332e-03 train_time:149815ms step_avg:46.82ms +[2025-09-11 12:16:19] [Rank 0] step:3201/10000 train_time:151631ms step_avg:47.37ms +[2025-09-11 12:16:19] [Rank 0] step:3201/10000 train_time:151631ms step_avg:47.37ms +[2025-09-11 12:16:20] [Rank 0] step:3221/10000 train_time:152302ms step_avg:47.28ms +[2025-09-11 12:16:20] [Rank 0] step:3221/10000 train_time:152302ms step_avg:47.28ms +[2025-09-11 12:16:20] [Rank 0] step:3241/10000 train_time:152969ms step_avg:47.20ms +[2025-09-11 12:16:20] [Rank 0] step:3241/10000 train_time:152969ms step_avg:47.20ms +[2025-09-11 12:16:21] [Rank 0] step:3261/10000 train_time:153636ms step_avg:47.11ms +[2025-09-11 12:16:21] [Rank 0] step:3261/10000 train_time:153636ms step_avg:47.11ms +[2025-09-11 12:16:22] [Rank 0] step:3281/10000 train_time:154607ms step_avg:47.12ms +[2025-09-11 12:16:22] [Rank 0] step:3281/10000 train_time:154607ms step_avg:47.12ms +[2025-09-11 12:16:22] [Rank 0] step:3301/10000 train_time:155274ms step_avg:47.04ms +[2025-09-11 12:16:22] [Rank 0] step:3301/10000 train_time:155274ms step_avg:47.04ms +[2025-09-11 12:16:23] [Rank 0] step:3321/10000 train_time:155939ms step_avg:46.96ms +[2025-09-11 12:16:23] [Rank 0] step:3321/10000 train_time:155939ms step_avg:46.96ms +[2025-09-11 12:16:24] [Rank 0] step:3341/10000 train_time:156908ms step_avg:46.96ms +[2025-09-11 12:16:24] [Rank 0] step:3341/10000 train_time:156908ms step_avg:46.96ms +[2025-09-11 12:16:25] [Rank 0] step:3361/10000 train_time:157575ms step_avg:46.88ms +[2025-09-11 12:16:25] [Rank 0] step:3361/10000 train_time:157575ms step_avg:46.88ms +[2025-09-11 12:16:25] [Rank 0] step:3381/10000 train_time:158241ms step_avg:46.80ms +[2025-09-11 12:16:25] [Rank 0] step:3381/10000 train_time:158241ms step_avg:46.80ms +[2025-09-11 12:16:26] [Rank 0] step:3401/10000 train_time:158907ms step_avg:46.72ms +[2025-09-11 12:16:26] [Rank 0] step:3401/10000 train_time:158907ms step_avg:46.72ms +[2025-09-11 12:16:27] [Rank 0] step:3421/10000 train_time:159573ms step_avg:46.65ms +[2025-09-11 12:16:27] [Rank 0] step:3421/10000 train_time:159573ms step_avg:46.65ms +[2025-09-11 12:16:27] [Rank 0] step:3441/10000 train_time:160238ms step_avg:46.57ms +[2025-09-11 12:16:27] [Rank 0] step:3441/10000 train_time:160238ms step_avg:46.57ms +[2025-09-11 12:16:28] [Rank 0] step:3461/10000 train_time:160904ms step_avg:46.49ms +[2025-09-11 12:16:28] [Rank 0] step:3461/10000 train_time:160904ms step_avg:46.49ms +[2025-09-11 12:16:29] [Rank 0] step:3481/10000 train_time:161577ms step_avg:46.42ms +[2025-09-11 12:16:29] [Rank 0] step:3481/10000 train_time:161577ms step_avg:46.42ms +[2025-09-11 12:16:29] [Rank 0] step:3501/10000 train_time:162243ms step_avg:46.34ms +[2025-09-11 12:16:29] [Rank 0] step:3501/10000 train_time:162243ms step_avg:46.34ms +[2025-09-11 12:16:30] [Rank 0] step:3521/10000 train_time:162908ms step_avg:46.27ms +[2025-09-11 12:16:30] [Rank 0] step:3521/10000 train_time:162908ms step_avg:46.27ms +[2025-09-11 12:16:31] [Rank 0] step:3541/10000 train_time:163574ms step_avg:46.19ms +[2025-09-11 12:16:31] [Rank 0] step:3541/10000 train_time:163574ms step_avg:46.19ms +[2025-09-11 12:16:31] [Rank 0] step:3561/10000 train_time:164241ms step_avg:46.12ms +[2025-09-11 12:16:31] [Rank 0] step:3561/10000 train_time:164241ms step_avg:46.12ms +[2025-09-11 12:16:32] [Rank 0] step:3581/10000 train_time:164906ms step_avg:46.05ms +[2025-09-11 12:16:32] [Rank 0] step:3581/10000 train_time:164906ms step_avg:46.05ms +[2025-09-11 12:16:33] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:16:33] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 12:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 12:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 12:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:16:43] [Rank 0] PRINT: step:3600/10000 val_loss:5.2877 total_sharp:1.3901e-02 L1_sharp:1.2852e-02 L2_sharp:8.6712e-03 L3_sharp:1.0557e-02 L4_sharp:1.2381e-02 L5_sharp:1.7628e-02 L6_sharp:1.7021e-02 L7_sharp:2.9912e-02 L8_sharp:5.1946e-02 L9_sharp:7.0205e-02 L10_sharp:9.7599e-02 L11_sharp:1.5890e-01 L12_sharp:3.9703e-01 total_fnorm:4.5312e+00 total_l1_linf:6.1440e+03 total_spectral:2.3125e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:6.9336e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.9824e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.9824e-02 L12_l1linf:5.0537e-02 L1_spectral:3.2102e-03 L2_spectral:3.2287e-03 L3_spectral:3.2065e-03 L4_spectral:3.1783e-03 L5_spectral:3.1870e-03 L6_spectral:3.1898e-03 L7_spectral:3.1816e-03 L8_spectral:3.1607e-03 L9_spectral:3.1486e-03 L10_spectral:3.1738e-03 L11_spectral:3.1673e-03 L12_spectral:3.1361e-03 train_time:165554ms step_avg:45.99ms +[2025-09-11 12:16:43] [Rank 0] PRINT: step:3600/10000 val_loss:5.2877 total_sharp:1.3901e-02 L1_sharp:1.2852e-02 L2_sharp:8.6712e-03 L3_sharp:1.0557e-02 L4_sharp:1.2381e-02 L5_sharp:1.7628e-02 L6_sharp:1.7021e-02 L7_sharp:2.9912e-02 L8_sharp:5.1946e-02 L9_sharp:7.0205e-02 L10_sharp:9.7599e-02 L11_sharp:1.5890e-01 L12_sharp:3.9703e-01 total_fnorm:4.5312e+00 total_l1_linf:6.1440e+03 total_spectral:2.3125e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:6.9336e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.9824e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.9824e-02 L12_l1linf:5.0537e-02 L1_spectral:3.2102e-03 L2_spectral:3.2287e-03 L3_spectral:3.2065e-03 L4_spectral:3.1783e-03 L5_spectral:3.1870e-03 L6_spectral:3.1898e-03 L7_spectral:3.1816e-03 L8_spectral:3.1607e-03 L9_spectral:3.1486e-03 L10_spectral:3.1738e-03 L11_spectral:3.1673e-03 L12_spectral:3.1361e-03 train_time:165554ms step_avg:45.99ms +[2025-09-11 12:16:45] [Rank 0] step:3601/10000 train_time:167227ms step_avg:46.44ms +[2025-09-11 12:16:45] [Rank 0] step:3601/10000 train_time:167227ms step_avg:46.44ms +[2025-09-11 12:16:46] [Rank 0] step:3621/10000 train_time:167899ms step_avg:46.37ms +[2025-09-11 12:16:46] [Rank 0] step:3621/10000 train_time:167899ms step_avg:46.37ms +[2025-09-11 12:16:46] [Rank 0] step:3641/10000 train_time:168567ms step_avg:46.30ms +[2025-09-11 12:16:46] [Rank 0] step:3641/10000 train_time:168567ms step_avg:46.30ms +[2025-09-11 12:16:47] [Rank 0] step:3661/10000 train_time:169233ms step_avg:46.23ms +[2025-09-11 12:16:47] [Rank 0] step:3661/10000 train_time:169233ms step_avg:46.23ms +[2025-09-11 12:16:48] [Rank 0] step:3681/10000 train_time:169901ms step_avg:46.16ms +[2025-09-11 12:16:48] [Rank 0] step:3681/10000 train_time:169901ms step_avg:46.16ms +[2025-09-11 12:16:48] [Rank 0] step:3701/10000 train_time:170566ms step_avg:46.09ms +[2025-09-11 12:16:48] [Rank 0] step:3701/10000 train_time:170566ms step_avg:46.09ms +[2025-09-11 12:16:49] [Rank 0] step:3721/10000 train_time:171243ms step_avg:46.02ms +[2025-09-11 12:16:49] [Rank 0] step:3721/10000 train_time:171243ms step_avg:46.02ms +[2025-09-11 12:16:50] [Rank 0] step:3741/10000 train_time:171919ms step_avg:45.96ms +[2025-09-11 12:16:50] [Rank 0] step:3741/10000 train_time:171919ms step_avg:45.96ms +[2025-09-11 12:16:50] [Rank 0] step:3761/10000 train_time:172597ms step_avg:45.89ms +[2025-09-11 12:16:50] [Rank 0] step:3761/10000 train_time:172597ms step_avg:45.89ms +[2025-09-11 12:16:51] [Rank 0] step:3781/10000 train_time:173273ms step_avg:45.83ms +[2025-09-11 12:16:51] [Rank 0] step:3781/10000 train_time:173273ms step_avg:45.83ms +[2025-09-11 12:16:52] [Rank 0] step:3801/10000 train_time:173951ms step_avg:45.76ms +[2025-09-11 12:16:52] [Rank 0] step:3801/10000 train_time:173951ms step_avg:45.76ms +[2025-09-11 12:16:53] [Rank 0] step:3821/10000 train_time:174628ms step_avg:45.70ms +[2025-09-11 12:16:53] [Rank 0] step:3821/10000 train_time:174628ms step_avg:45.70ms +[2025-09-11 12:16:53] [Rank 0] step:3841/10000 train_time:175305ms step_avg:45.64ms +[2025-09-11 12:16:53] [Rank 0] step:3841/10000 train_time:175305ms step_avg:45.64ms +[2025-09-11 12:16:54] [Rank 0] step:3861/10000 train_time:175982ms step_avg:45.58ms +[2025-09-11 12:16:54] [Rank 0] step:3861/10000 train_time:175982ms step_avg:45.58ms +[2025-09-11 12:16:55] [Rank 0] step:3881/10000 train_time:176660ms step_avg:45.52ms +[2025-09-11 12:16:55] [Rank 0] step:3881/10000 train_time:176660ms step_avg:45.52ms +[2025-09-11 12:16:55] [Rank 0] step:3901/10000 train_time:177336ms step_avg:45.46ms +[2025-09-11 12:16:55] [Rank 0] step:3901/10000 train_time:177336ms step_avg:45.46ms +[2025-09-11 12:16:56] [Rank 0] step:3921/10000 train_time:178013ms step_avg:45.40ms +[2025-09-11 12:16:56] [Rank 0] step:3921/10000 train_time:178013ms step_avg:45.40ms +[2025-09-11 12:16:57] [Rank 0] step:3941/10000 train_time:178690ms step_avg:45.34ms +[2025-09-11 12:16:57] [Rank 0] step:3941/10000 train_time:178690ms step_avg:45.34ms +[2025-09-11 12:16:57] [Rank 0] step:3961/10000 train_time:179367ms step_avg:45.28ms +[2025-09-11 12:16:57] [Rank 0] step:3961/10000 train_time:179367ms step_avg:45.28ms +[2025-09-11 12:16:58] [Rank 0] step:3981/10000 train_time:180044ms step_avg:45.23ms +[2025-09-11 12:16:58] [Rank 0] step:3981/10000 train_time:180044ms step_avg:45.23ms +[2025-09-11 12:16:59] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:16:59] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 12:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 12:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 12:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 12:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:17:10] [Rank 0] PRINT: step:4000/10000 val_loss:5.2363 total_sharp:1.4578e-02 L1_sharp:1.3330e-02 L2_sharp:1.0889e-02 L3_sharp:9.0447e-03 L4_sharp:1.1311e-02 L5_sharp:1.8315e-02 L6_sharp:2.9389e-02 L7_sharp:4.1469e-02 L8_sharp:6.0676e-02 L9_sharp:9.6408e-02 L10_sharp:1.3978e-01 L11_sharp:2.1351e-01 L12_sharp:7.8156e-01 total_fnorm:5.5625e+00 total_l1_linf:7.1360e+03 total_spectral:2.8125e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.3730e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.7871e-02 L12_l1linf:4.7119e-02 L1_spectral:3.2140e-03 L2_spectral:3.2247e-03 L3_spectral:3.2227e-03 L4_spectral:3.1919e-03 L5_spectral:3.1830e-03 L6_spectral:3.1718e-03 L7_spectral:3.1863e-03 L8_spectral:3.1806e-03 L9_spectral:3.1733e-03 L10_spectral:3.1754e-03 L11_spectral:3.1948e-03 L12_spectral:3.1462e-03 train_time:180702ms step_avg:45.18ms +[2025-09-11 12:17:10] [Rank 0] PRINT: step:4000/10000 val_loss:5.2363 total_sharp:1.4578e-02 L1_sharp:1.3330e-02 L2_sharp:1.0889e-02 L3_sharp:9.0447e-03 L4_sharp:1.1311e-02 L5_sharp:1.8315e-02 L6_sharp:2.9389e-02 L7_sharp:4.1469e-02 L8_sharp:6.0676e-02 L9_sharp:9.6408e-02 L10_sharp:1.3978e-01 L11_sharp:2.1351e-01 L12_sharp:7.8156e-01 total_fnorm:5.5625e+00 total_l1_linf:7.1360e+03 total_spectral:2.8125e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.3730e-01 L1_l1linf:6.7871e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.3965e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.7871e-02 L12_l1linf:4.7119e-02 L1_spectral:3.2140e-03 L2_spectral:3.2247e-03 L3_spectral:3.2227e-03 L4_spectral:3.1919e-03 L5_spectral:3.1830e-03 L6_spectral:3.1718e-03 L7_spectral:3.1863e-03 L8_spectral:3.1806e-03 L9_spectral:3.1733e-03 L10_spectral:3.1754e-03 L11_spectral:3.1948e-03 L12_spectral:3.1462e-03 train_time:180702ms step_avg:45.18ms +[2025-09-11 12:17:12] [Rank 0] step:4001/10000 train_time:183434ms step_avg:45.85ms +[2025-09-11 12:17:12] [Rank 0] step:4001/10000 train_time:183434ms step_avg:45.85ms +[2025-09-11 12:17:13] [Rank 0] step:4021/10000 train_time:184291ms step_avg:45.83ms +[2025-09-11 12:17:13] [Rank 0] step:4021/10000 train_time:184291ms step_avg:45.83ms +[2025-09-11 12:17:14] [Rank 0] step:4041/10000 train_time:184969ms step_avg:45.77ms +[2025-09-11 12:17:14] [Rank 0] step:4041/10000 train_time:184969ms step_avg:45.77ms +[2025-09-11 12:17:14] [Rank 0] step:4061/10000 train_time:185646ms step_avg:45.71ms +[2025-09-11 12:17:14] [Rank 0] step:4061/10000 train_time:185646ms step_avg:45.71ms +[2025-09-11 12:17:15] [Rank 0] step:4081/10000 train_time:186325ms step_avg:45.66ms +[2025-09-11 12:17:15] [Rank 0] step:4081/10000 train_time:186325ms step_avg:45.66ms +[2025-09-11 12:17:16] [Rank 0] step:4101/10000 train_time:187002ms step_avg:45.60ms +[2025-09-11 12:17:16] [Rank 0] step:4101/10000 train_time:187002ms step_avg:45.60ms +[2025-09-11 12:17:17] [Rank 0] step:4121/10000 train_time:187681ms step_avg:45.54ms +[2025-09-11 12:17:17] [Rank 0] step:4121/10000 train_time:187681ms step_avg:45.54ms +[2025-09-11 12:17:17] [Rank 0] step:4141/10000 train_time:188358ms step_avg:45.49ms +[2025-09-11 12:17:17] [Rank 0] step:4141/10000 train_time:188358ms step_avg:45.49ms +[2025-09-11 12:17:18] [Rank 0] step:4161/10000 train_time:189036ms step_avg:45.43ms +[2025-09-11 12:17:18] [Rank 0] step:4161/10000 train_time:189036ms step_avg:45.43ms +[2025-09-11 12:17:19] [Rank 0] step:4181/10000 train_time:189713ms step_avg:45.38ms +[2025-09-11 12:17:19] [Rank 0] step:4181/10000 train_time:189713ms step_avg:45.38ms +[2025-09-11 12:17:19] [Rank 0] step:4201/10000 train_time:190391ms step_avg:45.32ms +[2025-09-11 12:17:19] [Rank 0] step:4201/10000 train_time:190391ms step_avg:45.32ms +[2025-09-11 12:17:20] [Rank 0] step:4221/10000 train_time:191068ms step_avg:45.27ms +[2025-09-11 12:17:20] [Rank 0] step:4221/10000 train_time:191068ms step_avg:45.27ms +[2025-09-11 12:17:21] [Rank 0] step:4241/10000 train_time:191746ms step_avg:45.21ms +[2025-09-11 12:17:21] [Rank 0] step:4241/10000 train_time:191746ms step_avg:45.21ms +[2025-09-11 12:17:21] [Rank 0] step:4261/10000 train_time:192423ms step_avg:45.16ms +[2025-09-11 12:17:21] [Rank 0] step:4261/10000 train_time:192423ms step_avg:45.16ms +[2025-09-11 12:17:22] [Rank 0] step:4281/10000 train_time:193102ms step_avg:45.11ms +[2025-09-11 12:17:22] [Rank 0] step:4281/10000 train_time:193102ms step_avg:45.11ms +[2025-09-11 12:17:23] [Rank 0] step:4301/10000 train_time:193780ms step_avg:45.05ms +[2025-09-11 12:17:23] [Rank 0] step:4301/10000 train_time:193780ms step_avg:45.05ms +[2025-09-11 12:17:23] [Rank 0] step:4321/10000 train_time:194457ms step_avg:45.00ms +[2025-09-11 12:17:23] [Rank 0] step:4321/10000 train_time:194457ms step_avg:45.00ms +[2025-09-11 12:17:24] [Rank 0] step:4341/10000 train_time:195136ms step_avg:44.95ms +[2025-09-11 12:17:24] [Rank 0] step:4341/10000 train_time:195136ms step_avg:44.95ms +[2025-09-11 12:17:25] [Rank 0] step:4361/10000 train_time:196121ms step_avg:44.97ms +[2025-09-11 12:17:25] [Rank 0] step:4361/10000 train_time:196121ms step_avg:44.97ms +[2025-09-11 12:17:26] [Rank 0] step:4381/10000 train_time:196799ms step_avg:44.92ms +[2025-09-11 12:17:26] [Rank 0] step:4381/10000 train_time:196799ms step_avg:44.92ms +[2025-09-11 12:17:26] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:17:26] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 12:17:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:17:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 12:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 12:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 12:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:17:37] [Rank 0] PRINT: step:4400/10000 val_loss:5.1961 total_sharp:1.2339e-02 L1_sharp:6.0365e-03 L2_sharp:6.1777e-03 L3_sharp:5.3884e-03 L4_sharp:6.8320e-03 L5_sharp:1.0198e-02 L6_sharp:2.1136e-02 L7_sharp:2.8865e-02 L8_sharp:4.9381e-02 L9_sharp:7.0107e-02 L10_sharp:9.9775e-02 L11_sharp:1.6040e-01 L12_sharp:3.9194e-01 total_fnorm:4.6875e+00 total_l1_linf:6.1120e+03 total_spectral:2.3750e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.3633e-01 L1_l1linf:6.6895e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.2988e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.6895e-02 L12_l1linf:4.6387e-02 L1_spectral:3.2190e-03 L2_spectral:3.2201e-03 L3_spectral:3.2063e-03 L4_spectral:3.1920e-03 L5_spectral:3.1802e-03 L6_spectral:3.1763e-03 L7_spectral:3.1732e-03 L8_spectral:3.1712e-03 L9_spectral:3.1891e-03 L10_spectral:3.1741e-03 L11_spectral:3.1664e-03 L12_spectral:3.1523e-03 train_time:197457ms step_avg:44.88ms +[2025-09-11 12:17:37] [Rank 0] PRINT: step:4400/10000 val_loss:5.1961 total_sharp:1.2339e-02 L1_sharp:6.0365e-03 L2_sharp:6.1777e-03 L3_sharp:5.3884e-03 L4_sharp:6.8320e-03 L5_sharp:1.0198e-02 L6_sharp:2.1136e-02 L7_sharp:2.8865e-02 L8_sharp:4.9381e-02 L9_sharp:7.0107e-02 L10_sharp:9.9775e-02 L11_sharp:1.6040e-01 L12_sharp:3.9194e-01 total_fnorm:4.6875e+00 total_l1_linf:6.1120e+03 total_spectral:2.3750e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.3633e-01 L1_l1linf:6.6895e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.2988e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.6895e-02 L12_l1linf:4.6387e-02 L1_spectral:3.2190e-03 L2_spectral:3.2201e-03 L3_spectral:3.2063e-03 L4_spectral:3.1920e-03 L5_spectral:3.1802e-03 L6_spectral:3.1763e-03 L7_spectral:3.1732e-03 L8_spectral:3.1712e-03 L9_spectral:3.1891e-03 L10_spectral:3.1741e-03 L11_spectral:3.1664e-03 L12_spectral:3.1523e-03 train_time:197457ms step_avg:44.88ms +[2025-09-11 12:17:39] [Rank 0] step:4401/10000 train_time:199166ms step_avg:45.25ms +[2025-09-11 12:17:39] [Rank 0] step:4401/10000 train_time:199166ms step_avg:45.25ms +[2025-09-11 12:17:40] [Rank 0] step:4421/10000 train_time:199876ms step_avg:45.21ms +[2025-09-11 12:17:40] [Rank 0] step:4421/10000 train_time:199876ms step_avg:45.21ms +[2025-09-11 12:17:40] [Rank 0] step:4441/10000 train_time:200555ms step_avg:45.16ms +[2025-09-11 12:17:40] [Rank 0] step:4441/10000 train_time:200555ms step_avg:45.16ms +[2025-09-11 12:17:41] [Rank 0] step:4461/10000 train_time:201235ms step_avg:45.11ms +[2025-09-11 12:17:41] [Rank 0] step:4461/10000 train_time:201235ms step_avg:45.11ms +[2025-09-11 12:17:42] [Rank 0] step:4481/10000 train_time:201915ms step_avg:45.06ms +[2025-09-11 12:17:42] [Rank 0] step:4481/10000 train_time:201915ms step_avg:45.06ms +[2025-09-11 12:17:42] [Rank 0] step:4501/10000 train_time:202606ms step_avg:45.01ms +[2025-09-11 12:17:42] [Rank 0] step:4501/10000 train_time:202606ms step_avg:45.01ms +[2025-09-11 12:17:43] [Rank 0] step:4521/10000 train_time:203287ms step_avg:44.96ms +[2025-09-11 12:17:43] [Rank 0] step:4521/10000 train_time:203287ms step_avg:44.96ms +[2025-09-11 12:17:44] [Rank 0] step:4541/10000 train_time:203967ms step_avg:44.92ms +[2025-09-11 12:17:44] [Rank 0] step:4541/10000 train_time:203967ms step_avg:44.92ms +[2025-09-11 12:17:44] [Rank 0] step:4561/10000 train_time:204647ms step_avg:44.87ms +[2025-09-11 12:17:44] [Rank 0] step:4561/10000 train_time:204647ms step_avg:44.87ms +[2025-09-11 12:17:45] [Rank 0] step:4581/10000 train_time:205327ms step_avg:44.82ms +[2025-09-11 12:17:45] [Rank 0] step:4581/10000 train_time:205327ms step_avg:44.82ms +[2025-09-11 12:17:46] [Rank 0] step:4601/10000 train_time:206007ms step_avg:44.77ms +[2025-09-11 12:17:46] [Rank 0] step:4601/10000 train_time:206007ms step_avg:44.77ms +[2025-09-11 12:17:46] [Rank 0] step:4621/10000 train_time:206687ms step_avg:44.73ms +[2025-09-11 12:17:46] [Rank 0] step:4621/10000 train_time:206687ms step_avg:44.73ms +[2025-09-11 12:17:47] [Rank 0] step:4641/10000 train_time:207367ms step_avg:44.68ms +[2025-09-11 12:17:47] [Rank 0] step:4641/10000 train_time:207367ms step_avg:44.68ms +[2025-09-11 12:17:48] [Rank 0] step:4661/10000 train_time:208047ms step_avg:44.64ms +[2025-09-11 12:17:48] [Rank 0] step:4661/10000 train_time:208047ms step_avg:44.64ms +[2025-09-11 12:17:48] [Rank 0] step:4681/10000 train_time:208727ms step_avg:44.59ms +[2025-09-11 12:17:48] [Rank 0] step:4681/10000 train_time:208727ms step_avg:44.59ms +[2025-09-11 12:17:49] [Rank 0] step:4701/10000 train_time:209407ms step_avg:44.55ms +[2025-09-11 12:17:49] [Rank 0] step:4701/10000 train_time:209407ms step_avg:44.55ms +[2025-09-11 12:17:50] [Rank 0] step:4721/10000 train_time:210087ms step_avg:44.50ms +[2025-09-11 12:17:50] [Rank 0] step:4721/10000 train_time:210087ms step_avg:44.50ms +[2025-09-11 12:17:51] [Rank 0] step:4741/10000 train_time:210766ms step_avg:44.46ms +[2025-09-11 12:17:51] [Rank 0] step:4741/10000 train_time:210766ms step_avg:44.46ms +[2025-09-11 12:17:51] [Rank 0] step:4761/10000 train_time:211448ms step_avg:44.41ms +[2025-09-11 12:17:51] [Rank 0] step:4761/10000 train_time:211448ms step_avg:44.41ms +[2025-09-11 12:17:52] [Rank 0] step:4781/10000 train_time:212128ms step_avg:44.37ms +[2025-09-11 12:17:52] [Rank 0] step:4781/10000 train_time:212128ms step_avg:44.37ms +[2025-09-11 12:17:53] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:17:53] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 12:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 12:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 12:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 12:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:03] [Rank 0] PRINT: step:4800/10000 val_loss:5.1510 total_sharp:1.4813e-02 L1_sharp:9.5966e-03 L2_sharp:7.6127e-03 L3_sharp:6.7522e-03 L4_sharp:1.0770e-02 L5_sharp:1.2116e-02 L6_sharp:2.0758e-02 L7_sharp:3.1388e-02 L8_sharp:5.3052e-02 L9_sharp:7.4931e-02 L10_sharp:1.0593e-01 L11_sharp:1.7702e-01 L12_sharp:7.0845e-01 total_fnorm:4.7812e+00 total_l1_linf:6.3680e+03 total_spectral:2.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.3828e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.2012e-02 L7_l1linf:6.1523e-02 L8_l1linf:6.1035e-02 L9_l1linf:6.1523e-02 L10_l1linf:6.2500e-02 L11_l1linf:6.5430e-02 L12_l1linf:4.4922e-02 L1_spectral:3.2418e-03 L2_spectral:3.2236e-03 L3_spectral:3.2232e-03 L4_spectral:3.2095e-03 L5_spectral:3.1952e-03 L6_spectral:3.2085e-03 L7_spectral:3.1841e-03 L8_spectral:3.1959e-03 L9_spectral:3.1836e-03 L10_spectral:3.2083e-03 L11_spectral:3.2066e-03 L12_spectral:3.1536e-03 train_time:212788ms step_avg:44.33ms +[2025-09-11 12:18:03] [Rank 0] PRINT: step:4800/10000 val_loss:5.1510 total_sharp:1.4813e-02 L1_sharp:9.5966e-03 L2_sharp:7.6127e-03 L3_sharp:6.7522e-03 L4_sharp:1.0770e-02 L5_sharp:1.2116e-02 L6_sharp:2.0758e-02 L7_sharp:3.1388e-02 L8_sharp:5.3052e-02 L9_sharp:7.4931e-02 L10_sharp:1.0593e-01 L11_sharp:1.7702e-01 L12_sharp:7.0845e-01 total_fnorm:4.7812e+00 total_l1_linf:6.3680e+03 total_spectral:2.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.3828e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.2012e-02 L7_l1linf:6.1523e-02 L8_l1linf:6.1035e-02 L9_l1linf:6.1523e-02 L10_l1linf:6.2500e-02 L11_l1linf:6.5430e-02 L12_l1linf:4.4922e-02 L1_spectral:3.2418e-03 L2_spectral:3.2236e-03 L3_spectral:3.2232e-03 L4_spectral:3.2095e-03 L5_spectral:3.1952e-03 L6_spectral:3.2085e-03 L7_spectral:3.1841e-03 L8_spectral:3.1959e-03 L9_spectral:3.1836e-03 L10_spectral:3.2083e-03 L11_spectral:3.2066e-03 L12_spectral:3.1536e-03 train_time:212788ms step_avg:44.33ms +[2025-09-11 12:18:05] [Rank 0] step:4801/10000 train_time:214516ms step_avg:44.68ms +[2025-09-11 12:18:05] [Rank 0] step:4801/10000 train_time:214516ms step_avg:44.68ms +[2025-09-11 12:18:06] [Rank 0] step:4821/10000 train_time:215234ms step_avg:44.65ms +[2025-09-11 12:18:06] [Rank 0] step:4821/10000 train_time:215234ms step_avg:44.65ms +[2025-09-11 12:18:06] [Rank 0] step:4841/10000 train_time:215918ms step_avg:44.60ms +[2025-09-11 12:18:06] [Rank 0] step:4841/10000 train_time:215918ms step_avg:44.60ms +[2025-09-11 12:18:07] [Rank 0] step:4861/10000 train_time:216598ms step_avg:44.56ms +[2025-09-11 12:18:07] [Rank 0] step:4861/10000 train_time:216598ms step_avg:44.56ms +[2025-09-11 12:18:08] [Rank 0] step:4881/10000 train_time:217279ms step_avg:44.52ms +[2025-09-11 12:18:08] [Rank 0] step:4881/10000 train_time:217279ms step_avg:44.52ms +[2025-09-11 12:18:08] [Rank 0] step:4901/10000 train_time:217961ms step_avg:44.47ms +[2025-09-11 12:18:08] [Rank 0] step:4901/10000 train_time:217961ms step_avg:44.47ms +[2025-09-11 12:18:09] [Rank 0] step:4921/10000 train_time:218642ms step_avg:44.43ms +[2025-09-11 12:18:09] [Rank 0] step:4921/10000 train_time:218642ms step_avg:44.43ms +[2025-09-11 12:18:10] [Rank 0] step:4941/10000 train_time:219322ms step_avg:44.39ms +[2025-09-11 12:18:10] [Rank 0] step:4941/10000 train_time:219322ms step_avg:44.39ms +[2025-09-11 12:18:10] [Rank 0] step:4961/10000 train_time:220003ms step_avg:44.35ms +[2025-09-11 12:18:10] [Rank 0] step:4961/10000 train_time:220003ms step_avg:44.35ms +[2025-09-11 12:18:11] [Rank 0] step:4981/10000 train_time:220684ms step_avg:44.31ms +[2025-09-11 12:18:11] [Rank 0] step:4981/10000 train_time:220684ms step_avg:44.31ms +[2025-09-11 12:18:12] [Rank 0] step:5001/10000 train_time:221366ms step_avg:44.26ms +[2025-09-11 12:18:12] [Rank 0] step:5001/10000 train_time:221366ms step_avg:44.26ms +[2025-09-11 12:18:12] [Rank 0] step:5021/10000 train_time:222047ms step_avg:44.22ms +[2025-09-11 12:18:12] [Rank 0] step:5021/10000 train_time:222047ms step_avg:44.22ms +[2025-09-11 12:18:13] [Rank 0] step:5041/10000 train_time:222732ms step_avg:44.18ms +[2025-09-11 12:18:13] [Rank 0] step:5041/10000 train_time:222732ms step_avg:44.18ms +[2025-09-11 12:18:14] [Rank 0] step:5061/10000 train_time:223413ms step_avg:44.14ms +[2025-09-11 12:18:14] [Rank 0] step:5061/10000 train_time:223413ms step_avg:44.14ms +[2025-09-11 12:18:14] [Rank 0] step:5081/10000 train_time:224092ms step_avg:44.10ms +[2025-09-11 12:18:14] [Rank 0] step:5081/10000 train_time:224092ms step_avg:44.10ms +[2025-09-11 12:18:15] [Rank 0] step:5101/10000 train_time:224774ms step_avg:44.06ms +[2025-09-11 12:18:15] [Rank 0] step:5101/10000 train_time:224774ms step_avg:44.06ms +[2025-09-11 12:18:16] [Rank 0] step:5121/10000 train_time:225455ms step_avg:44.03ms +[2025-09-11 12:18:16] [Rank 0] step:5121/10000 train_time:225455ms step_avg:44.03ms +[2025-09-11 12:18:16] [Rank 0] step:5141/10000 train_time:226136ms step_avg:43.99ms +[2025-09-11 12:18:16] [Rank 0] step:5141/10000 train_time:226136ms step_avg:43.99ms +[2025-09-11 12:18:17] [Rank 0] step:5161/10000 train_time:226818ms step_avg:43.95ms +[2025-09-11 12:18:17] [Rank 0] step:5161/10000 train_time:226818ms step_avg:43.95ms +[2025-09-11 12:18:18] [Rank 0] step:5181/10000 train_time:227499ms step_avg:43.91ms +[2025-09-11 12:18:18] [Rank 0] step:5181/10000 train_time:227499ms step_avg:43.91ms +[2025-09-11 12:18:18] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:18:18] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 12:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 12:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 12:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 12:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:29] [Rank 0] PRINT: step:5200/10000 val_loss:5.1199 total_sharp:1.4399e-02 L1_sharp:8.4958e-03 L2_sharp:6.9887e-03 L3_sharp:5.7315e-03 L4_sharp:7.1987e-03 L5_sharp:1.0986e-02 L6_sharp:1.4177e-02 L7_sharp:2.1419e-02 L8_sharp:3.4142e-02 L9_sharp:5.6359e-02 L10_sharp:8.3539e-02 L11_sharp:1.4821e-01 L12_sharp:7.5917e-01 total_fnorm:4.4062e+00 total_l1_linf:5.6960e+03 total_spectral:2.2500e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4023e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.0547e-02 L10_l1linf:6.0303e-02 L11_l1linf:6.3965e-02 L12_l1linf:4.5898e-02 L1_spectral:3.2355e-03 L2_spectral:3.2613e-03 L3_spectral:3.2150e-03 L4_spectral:3.2197e-03 L5_spectral:3.1961e-03 L6_spectral:3.2079e-03 L7_spectral:3.2032e-03 L8_spectral:3.1952e-03 L9_spectral:3.1874e-03 L10_spectral:3.1874e-03 L11_spectral:3.1946e-03 L12_spectral:3.1749e-03 train_time:228166ms step_avg:43.88ms +[2025-09-11 12:18:29] [Rank 0] PRINT: step:5200/10000 val_loss:5.1199 total_sharp:1.4399e-02 L1_sharp:8.4958e-03 L2_sharp:6.9887e-03 L3_sharp:5.7315e-03 L4_sharp:7.1987e-03 L5_sharp:1.0986e-02 L6_sharp:1.4177e-02 L7_sharp:2.1419e-02 L8_sharp:3.4142e-02 L9_sharp:5.6359e-02 L10_sharp:8.3539e-02 L11_sharp:1.4821e-01 L12_sharp:7.5917e-01 total_fnorm:4.4062e+00 total_l1_linf:5.6960e+03 total_spectral:2.2500e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4023e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.5430e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.0547e-02 L10_l1linf:6.0303e-02 L11_l1linf:6.3965e-02 L12_l1linf:4.5898e-02 L1_spectral:3.2355e-03 L2_spectral:3.2613e-03 L3_spectral:3.2150e-03 L4_spectral:3.2197e-03 L5_spectral:3.1961e-03 L6_spectral:3.2079e-03 L7_spectral:3.2032e-03 L8_spectral:3.1952e-03 L9_spectral:3.1874e-03 L10_spectral:3.1874e-03 L11_spectral:3.1946e-03 L12_spectral:3.1749e-03 train_time:228166ms step_avg:43.88ms +[2025-09-11 12:18:31] [Rank 0] step:5201/10000 train_time:229919ms step_avg:44.21ms +[2025-09-11 12:18:31] [Rank 0] step:5201/10000 train_time:229919ms step_avg:44.21ms +[2025-09-11 12:18:32] [Rank 0] step:5221/10000 train_time:230644ms step_avg:44.18ms +[2025-09-11 12:18:32] [Rank 0] step:5221/10000 train_time:230644ms step_avg:44.18ms +[2025-09-11 12:18:32] [Rank 0] step:5241/10000 train_time:231333ms step_avg:44.14ms +[2025-09-11 12:18:32] [Rank 0] step:5241/10000 train_time:231333ms step_avg:44.14ms +[2025-09-11 12:18:33] [Rank 0] step:5261/10000 train_time:232025ms step_avg:44.10ms +[2025-09-11 12:18:33] [Rank 0] step:5261/10000 train_time:232025ms step_avg:44.10ms +[2025-09-11 12:18:34] [Rank 0] step:5281/10000 train_time:232714ms step_avg:44.07ms +[2025-09-11 12:18:34] [Rank 0] step:5281/10000 train_time:232714ms step_avg:44.07ms +[2025-09-11 12:18:34] [Rank 0] step:5301/10000 train_time:233404ms step_avg:44.03ms +[2025-09-11 12:18:34] [Rank 0] step:5301/10000 train_time:233404ms step_avg:44.03ms +[2025-09-11 12:18:35] [Rank 0] step:5321/10000 train_time:234094ms step_avg:43.99ms +[2025-09-11 12:18:35] [Rank 0] step:5321/10000 train_time:234094ms step_avg:43.99ms +[2025-09-11 12:18:36] [Rank 0] step:5341/10000 train_time:234783ms step_avg:43.96ms +[2025-09-11 12:18:36] [Rank 0] step:5341/10000 train_time:234783ms step_avg:43.96ms +[2025-09-11 12:18:37] [Rank 0] step:5361/10000 train_time:235473ms step_avg:43.92ms +[2025-09-11 12:18:37] [Rank 0] step:5361/10000 train_time:235473ms step_avg:43.92ms +[2025-09-11 12:18:37] [Rank 0] step:5381/10000 train_time:236166ms step_avg:43.89ms +[2025-09-11 12:18:37] [Rank 0] step:5381/10000 train_time:236166ms step_avg:43.89ms +[2025-09-11 12:18:38] [Rank 0] step:5401/10000 train_time:236852ms step_avg:43.85ms +[2025-09-11 12:18:38] [Rank 0] step:5401/10000 train_time:236852ms step_avg:43.85ms +[2025-09-11 12:18:39] [Rank 0] step:5421/10000 train_time:237543ms step_avg:43.82ms +[2025-09-11 12:18:39] [Rank 0] step:5421/10000 train_time:237543ms step_avg:43.82ms +[2025-09-11 12:18:39] [Rank 0] step:5441/10000 train_time:238233ms step_avg:43.78ms +[2025-09-11 12:18:39] [Rank 0] step:5441/10000 train_time:238233ms step_avg:43.78ms +[2025-09-11 12:18:40] [Rank 0] step:5461/10000 train_time:238923ms step_avg:43.75ms +[2025-09-11 12:18:40] [Rank 0] step:5461/10000 train_time:238923ms step_avg:43.75ms +[2025-09-11 12:18:41] [Rank 0] step:5481/10000 train_time:239613ms step_avg:43.72ms +[2025-09-11 12:18:41] [Rank 0] step:5481/10000 train_time:239613ms step_avg:43.72ms +[2025-09-11 12:18:41] [Rank 0] step:5501/10000 train_time:240302ms step_avg:43.68ms +[2025-09-11 12:18:41] [Rank 0] step:5501/10000 train_time:240302ms step_avg:43.68ms +[2025-09-11 12:18:42] [Rank 0] step:5521/10000 train_time:240991ms step_avg:43.65ms +[2025-09-11 12:18:42] [Rank 0] step:5521/10000 train_time:240991ms step_avg:43.65ms +[2025-09-11 12:18:43] [Rank 0] step:5541/10000 train_time:241683ms step_avg:43.62ms +[2025-09-11 12:18:43] [Rank 0] step:5541/10000 train_time:241683ms step_avg:43.62ms +[2025-09-11 12:18:43] [Rank 0] step:5561/10000 train_time:242374ms step_avg:43.58ms +[2025-09-11 12:18:43] [Rank 0] step:5561/10000 train_time:242374ms step_avg:43.58ms +[2025-09-11 12:18:44] [Rank 0] step:5581/10000 train_time:243064ms step_avg:43.55ms +[2025-09-11 12:18:44] [Rank 0] step:5581/10000 train_time:243064ms step_avg:43.55ms +[2025-09-11 12:18:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:18:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 12:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 12:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 12:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:18:55] [Rank 0] PRINT: step:5600/10000 val_loss:5.0909 total_sharp:1.2672e-02 L1_sharp:9.5773e-03 L2_sharp:7.6401e-03 L3_sharp:7.6388e-03 L4_sharp:8.0481e-03 L5_sharp:1.1417e-02 L6_sharp:1.7335e-02 L7_sharp:2.3103e-02 L8_sharp:3.9306e-02 L9_sharp:6.6281e-02 L10_sharp:9.2985e-02 L11_sharp:1.3890e-01 L12_sharp:3.4613e-01 total_fnorm:4.4062e+00 total_l1_linf:5.6960e+03 total_spectral:2.2344e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.3926e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0547e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.1035e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.2500e-02 L12_l1linf:4.5654e-02 L1_spectral:3.2429e-03 L2_spectral:3.2367e-03 L3_spectral:3.2237e-03 L4_spectral:3.2032e-03 L5_spectral:3.2109e-03 L6_spectral:3.2011e-03 L7_spectral:3.1917e-03 L8_spectral:3.1885e-03 L9_spectral:3.1954e-03 L10_spectral:3.2017e-03 L11_spectral:3.1842e-03 L12_spectral:3.1805e-03 train_time:243735ms step_avg:43.52ms +[2025-09-11 12:18:55] [Rank 0] PRINT: step:5600/10000 val_loss:5.0909 total_sharp:1.2672e-02 L1_sharp:9.5773e-03 L2_sharp:7.6401e-03 L3_sharp:7.6388e-03 L4_sharp:8.0481e-03 L5_sharp:1.1417e-02 L6_sharp:1.7335e-02 L7_sharp:2.3103e-02 L8_sharp:3.9306e-02 L9_sharp:6.6281e-02 L10_sharp:9.2985e-02 L11_sharp:1.3890e-01 L12_sharp:3.4613e-01 total_fnorm:4.4062e+00 total_l1_linf:5.6960e+03 total_spectral:2.2344e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.3926e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0547e-02 L8_l1linf:5.9570e-02 L9_l1linf:6.1035e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.2500e-02 L12_l1linf:4.5654e-02 L1_spectral:3.2429e-03 L2_spectral:3.2367e-03 L3_spectral:3.2237e-03 L4_spectral:3.2032e-03 L5_spectral:3.2109e-03 L6_spectral:3.2011e-03 L7_spectral:3.1917e-03 L8_spectral:3.1885e-03 L9_spectral:3.1954e-03 L10_spectral:3.2017e-03 L11_spectral:3.1842e-03 L12_spectral:3.1805e-03 train_time:243735ms step_avg:43.52ms +[2025-09-11 12:18:57] [Rank 0] step:5601/10000 train_time:245504ms step_avg:43.83ms +[2025-09-11 12:18:57] [Rank 0] step:5601/10000 train_time:245504ms step_avg:43.83ms +[2025-09-11 12:18:58] [Rank 0] step:5621/10000 train_time:246231ms step_avg:43.81ms +[2025-09-11 12:18:58] [Rank 0] step:5621/10000 train_time:246231ms step_avg:43.81ms +[2025-09-11 12:18:59] [Rank 0] step:5641/10000 train_time:246921ms step_avg:43.77ms +[2025-09-11 12:18:59] [Rank 0] step:5641/10000 train_time:246921ms step_avg:43.77ms +[2025-09-11 12:18:59] [Rank 0] step:5661/10000 train_time:247611ms step_avg:43.74ms +[2025-09-11 12:18:59] [Rank 0] step:5661/10000 train_time:247611ms step_avg:43.74ms +[2025-09-11 12:19:00] [Rank 0] step:5681/10000 train_time:248302ms step_avg:43.71ms +[2025-09-11 12:19:00] [Rank 0] step:5681/10000 train_time:248302ms step_avg:43.71ms +[2025-09-11 12:19:01] [Rank 0] step:5701/10000 train_time:248995ms step_avg:43.68ms +[2025-09-11 12:19:01] [Rank 0] step:5701/10000 train_time:248995ms step_avg:43.68ms +[2025-09-11 12:19:01] [Rank 0] step:5721/10000 train_time:249685ms step_avg:43.64ms +[2025-09-11 12:19:01] [Rank 0] step:5721/10000 train_time:249685ms step_avg:43.64ms +[2025-09-11 12:19:02] [Rank 0] step:5741/10000 train_time:250375ms step_avg:43.61ms +[2025-09-11 12:19:02] [Rank 0] step:5741/10000 train_time:250375ms step_avg:43.61ms +[2025-09-11 12:19:03] [Rank 0] step:5761/10000 train_time:251066ms step_avg:43.58ms +[2025-09-11 12:19:03] [Rank 0] step:5761/10000 train_time:251066ms step_avg:43.58ms +[2025-09-11 12:19:03] [Rank 0] step:5781/10000 train_time:251758ms step_avg:43.55ms +[2025-09-11 12:19:03] [Rank 0] step:5781/10000 train_time:251758ms step_avg:43.55ms +[2025-09-11 12:19:04] [Rank 0] step:5801/10000 train_time:252451ms step_avg:43.52ms +[2025-09-11 12:19:04] [Rank 0] step:5801/10000 train_time:252451ms step_avg:43.52ms +[2025-09-11 12:19:05] [Rank 0] step:5821/10000 train_time:253141ms step_avg:43.49ms +[2025-09-11 12:19:05] [Rank 0] step:5821/10000 train_time:253141ms step_avg:43.49ms +[2025-09-11 12:19:05] [Rank 0] step:5841/10000 train_time:253832ms step_avg:43.46ms +[2025-09-11 12:19:05] [Rank 0] step:5841/10000 train_time:253832ms step_avg:43.46ms +[2025-09-11 12:19:06] [Rank 0] step:5861/10000 train_time:254522ms step_avg:43.43ms +[2025-09-11 12:19:06] [Rank 0] step:5861/10000 train_time:254522ms step_avg:43.43ms +[2025-09-11 12:19:07] [Rank 0] step:5881/10000 train_time:255212ms step_avg:43.40ms +[2025-09-11 12:19:07] [Rank 0] step:5881/10000 train_time:255212ms step_avg:43.40ms +[2025-09-11 12:19:08] [Rank 0] step:5901/10000 train_time:255902ms step_avg:43.37ms +[2025-09-11 12:19:08] [Rank 0] step:5901/10000 train_time:255902ms step_avg:43.37ms +[2025-09-11 12:19:08] [Rank 0] step:5921/10000 train_time:256593ms step_avg:43.34ms +[2025-09-11 12:19:08] [Rank 0] step:5921/10000 train_time:256593ms step_avg:43.34ms +[2025-09-11 12:19:09] [Rank 0] step:5941/10000 train_time:257285ms step_avg:43.31ms +[2025-09-11 12:19:09] [Rank 0] step:5941/10000 train_time:257285ms step_avg:43.31ms +[2025-09-11 12:19:10] [Rank 0] step:5961/10000 train_time:257978ms step_avg:43.28ms +[2025-09-11 12:19:10] [Rank 0] step:5961/10000 train_time:257978ms step_avg:43.28ms +[2025-09-11 12:19:10] [Rank 0] step:5981/10000 train_time:258670ms step_avg:43.25ms +[2025-09-11 12:19:10] [Rank 0] step:5981/10000 train_time:258670ms step_avg:43.25ms +[2025-09-11 12:19:11] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:19:11] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 12:19:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:19:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 12:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 12:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 12:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:19:22] [Rank 0] PRINT: step:6000/10000 val_loss:5.0538 total_sharp:1.4059e-02 L1_sharp:7.5639e-03 L2_sharp:5.5014e-03 L3_sharp:6.7918e-03 L4_sharp:5.4395e-03 L5_sharp:9.3667e-03 L6_sharp:1.6059e-02 L7_sharp:2.1947e-02 L8_sharp:4.1838e-02 L9_sharp:6.3986e-02 L10_sharp:8.4762e-02 L11_sharp:1.4116e-01 L12_sharp:7.7427e-01 total_fnorm:4.3438e+00 total_l1_linf:5.5680e+03 total_spectral:2.2188e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3926e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2012e-02 L4_l1linf:6.1523e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0547e-02 L7_l1linf:5.9082e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.7617e-02 L10_l1linf:5.9326e-02 L11_l1linf:6.2012e-02 L12_l1linf:4.5654e-02 L1_spectral:3.2638e-03 L2_spectral:3.2683e-03 L3_spectral:3.2353e-03 L4_spectral:3.2053e-03 L5_spectral:3.2104e-03 L6_spectral:3.2290e-03 L7_spectral:3.2422e-03 L8_spectral:3.1904e-03 L9_spectral:3.2087e-03 L10_spectral:3.2106e-03 L11_spectral:3.2108e-03 L12_spectral:3.1689e-03 train_time:259345ms step_avg:43.22ms +[2025-09-11 12:19:22] [Rank 0] PRINT: step:6000/10000 val_loss:5.0538 total_sharp:1.4059e-02 L1_sharp:7.5639e-03 L2_sharp:5.5014e-03 L3_sharp:6.7918e-03 L4_sharp:5.4395e-03 L5_sharp:9.3667e-03 L6_sharp:1.6059e-02 L7_sharp:2.1947e-02 L8_sharp:4.1838e-02 L9_sharp:6.3986e-02 L10_sharp:8.4762e-02 L11_sharp:1.4116e-01 L12_sharp:7.7427e-01 total_fnorm:4.3438e+00 total_l1_linf:5.5680e+03 total_spectral:2.2188e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4219e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3926e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2012e-02 L4_l1linf:6.1523e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0547e-02 L7_l1linf:5.9082e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.7617e-02 L10_l1linf:5.9326e-02 L11_l1linf:6.2012e-02 L12_l1linf:4.5654e-02 L1_spectral:3.2638e-03 L2_spectral:3.2683e-03 L3_spectral:3.2353e-03 L4_spectral:3.2053e-03 L5_spectral:3.2104e-03 L6_spectral:3.2290e-03 L7_spectral:3.2422e-03 L8_spectral:3.1904e-03 L9_spectral:3.2087e-03 L10_spectral:3.2106e-03 L11_spectral:3.2108e-03 L12_spectral:3.1689e-03 train_time:259345ms step_avg:43.22ms +[2025-09-11 12:19:24] [Rank 0] step:6001/10000 train_time:261110ms step_avg:43.51ms +[2025-09-11 12:19:24] [Rank 0] step:6001/10000 train_time:261110ms step_avg:43.51ms +[2025-09-11 12:19:24] [Rank 0] step:6021/10000 train_time:261822ms step_avg:43.48ms +[2025-09-11 12:19:24] [Rank 0] step:6021/10000 train_time:261822ms step_avg:43.48ms +[2025-09-11 12:19:25] [Rank 0] step:6041/10000 train_time:262517ms step_avg:43.46ms +[2025-09-11 12:19:25] [Rank 0] step:6041/10000 train_time:262517ms step_avg:43.46ms +[2025-09-11 12:19:26] [Rank 0] step:6061/10000 train_time:263209ms step_avg:43.43ms +[2025-09-11 12:19:26] [Rank 0] step:6061/10000 train_time:263209ms step_avg:43.43ms +[2025-09-11 12:19:26] [Rank 0] step:6081/10000 train_time:263902ms step_avg:43.40ms +[2025-09-11 12:19:26] [Rank 0] step:6081/10000 train_time:263902ms step_avg:43.40ms +[2025-09-11 12:19:27] [Rank 0] step:6101/10000 train_time:264595ms step_avg:43.37ms +[2025-09-11 12:19:27] [Rank 0] step:6101/10000 train_time:264595ms step_avg:43.37ms +[2025-09-11 12:19:28] [Rank 0] step:6121/10000 train_time:265289ms step_avg:43.34ms +[2025-09-11 12:19:28] [Rank 0] step:6121/10000 train_time:265289ms step_avg:43.34ms +[2025-09-11 12:19:28] [Rank 0] step:6141/10000 train_time:265982ms step_avg:43.31ms +[2025-09-11 12:19:28] [Rank 0] step:6141/10000 train_time:265982ms step_avg:43.31ms +[2025-09-11 12:19:29] [Rank 0] step:6161/10000 train_time:266675ms step_avg:43.28ms +[2025-09-11 12:19:29] [Rank 0] step:6161/10000 train_time:266675ms step_avg:43.28ms +[2025-09-11 12:19:30] [Rank 0] step:6181/10000 train_time:267365ms step_avg:43.26ms +[2025-09-11 12:19:30] [Rank 0] step:6181/10000 train_time:267365ms step_avg:43.26ms +[2025-09-11 12:19:31] [Rank 0] step:6201/10000 train_time:268060ms step_avg:43.23ms +[2025-09-11 12:19:31] [Rank 0] step:6201/10000 train_time:268060ms step_avg:43.23ms +[2025-09-11 12:19:31] [Rank 0] step:6221/10000 train_time:269023ms step_avg:43.24ms +[2025-09-11 12:19:31] [Rank 0] step:6221/10000 train_time:269023ms step_avg:43.24ms +[2025-09-11 12:19:32] [Rank 0] step:6241/10000 train_time:269717ms step_avg:43.22ms +[2025-09-11 12:19:32] [Rank 0] step:6241/10000 train_time:269717ms step_avg:43.22ms +[2025-09-11 12:19:33] [Rank 0] step:6261/10000 train_time:270407ms step_avg:43.19ms +[2025-09-11 12:19:33] [Rank 0] step:6261/10000 train_time:270407ms step_avg:43.19ms +[2025-09-11 12:19:34] [Rank 0] step:6281/10000 train_time:271399ms step_avg:43.21ms +[2025-09-11 12:19:34] [Rank 0] step:6281/10000 train_time:271399ms step_avg:43.21ms +[2025-09-11 12:19:35] [Rank 0] step:6301/10000 train_time:272093ms step_avg:43.18ms +[2025-09-11 12:19:35] [Rank 0] step:6301/10000 train_time:272093ms step_avg:43.18ms +[2025-09-11 12:19:35] [Rank 0] step:6321/10000 train_time:272789ms step_avg:43.16ms +[2025-09-11 12:19:35] [Rank 0] step:6321/10000 train_time:272789ms step_avg:43.16ms +[2025-09-11 12:19:36] [Rank 0] step:6341/10000 train_time:273483ms step_avg:43.13ms +[2025-09-11 12:19:36] [Rank 0] step:6341/10000 train_time:273483ms step_avg:43.13ms +[2025-09-11 12:19:37] [Rank 0] step:6361/10000 train_time:274176ms step_avg:43.10ms +[2025-09-11 12:19:37] [Rank 0] step:6361/10000 train_time:274176ms step_avg:43.10ms +[2025-09-11 12:19:37] [Rank 0] step:6381/10000 train_time:274870ms step_avg:43.08ms +[2025-09-11 12:19:37] [Rank 0] step:6381/10000 train_time:274870ms step_avg:43.08ms +[2025-09-11 12:19:38] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:19:38] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 12:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 12:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 12:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 12:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 12:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 12:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:19:49] [Rank 0] PRINT: step:6400/10000 val_loss:5.0225 total_sharp:1.2354e-02 L1_sharp:9.9215e-03 L2_sharp:6.6718e-03 L3_sharp:6.1907e-03 L4_sharp:8.0230e-03 L5_sharp:1.2236e-02 L6_sharp:1.5863e-02 L7_sharp:2.1740e-02 L8_sharp:3.8167e-02 L9_sharp:5.8742e-02 L10_sharp:7.7927e-02 L11_sharp:1.3618e-01 L12_sharp:3.9380e-01 total_fnorm:3.8281e+00 total_l1_linf:4.6400e+03 total_spectral:1.9453e+00 L1_fnorm:2.2168e-01 L2_fnorm:2.1973e-01 L3_fnorm:2.1973e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1582e-01 L8_fnorm:2.1582e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1777e-01 L12_fnorm:2.1289e-01 L1_l1linf:5.5908e-02 L2_l1linf:5.5176e-02 L3_l1linf:5.3711e-02 L4_l1linf:5.4443e-02 L5_l1linf:5.2246e-02 L6_l1linf:5.1270e-02 L7_l1linf:5.0293e-02 L8_l1linf:5.1025e-02 L9_l1linf:5.0537e-02 L10_l1linf:5.0537e-02 L11_l1linf:5.3223e-02 L12_l1linf:3.9551e-02 L1_spectral:2.9429e-03 L2_spectral:2.9365e-03 L3_spectral:2.9250e-03 L4_spectral:2.9099e-03 L5_spectral:2.9121e-03 L6_spectral:2.9046e-03 L7_spectral:2.9206e-03 L8_spectral:2.9042e-03 L9_spectral:2.9058e-03 L10_spectral:2.9141e-03 L11_spectral:2.8996e-03 L12_spectral:2.8979e-03 train_time:275543ms step_avg:43.05ms +[2025-09-11 12:19:49] [Rank 0] PRINT: step:6400/10000 val_loss:5.0225 total_sharp:1.2354e-02 L1_sharp:9.9215e-03 L2_sharp:6.6718e-03 L3_sharp:6.1907e-03 L4_sharp:8.0230e-03 L5_sharp:1.2236e-02 L6_sharp:1.5863e-02 L7_sharp:2.1740e-02 L8_sharp:3.8167e-02 L9_sharp:5.8742e-02 L10_sharp:7.7927e-02 L11_sharp:1.3618e-01 L12_sharp:3.9380e-01 total_fnorm:3.8281e+00 total_l1_linf:4.6400e+03 total_spectral:1.9453e+00 L1_fnorm:2.2168e-01 L2_fnorm:2.1973e-01 L3_fnorm:2.1973e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1582e-01 L8_fnorm:2.1582e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1777e-01 L12_fnorm:2.1289e-01 L1_l1linf:5.5908e-02 L2_l1linf:5.5176e-02 L3_l1linf:5.3711e-02 L4_l1linf:5.4443e-02 L5_l1linf:5.2246e-02 L6_l1linf:5.1270e-02 L7_l1linf:5.0293e-02 L8_l1linf:5.1025e-02 L9_l1linf:5.0537e-02 L10_l1linf:5.0537e-02 L11_l1linf:5.3223e-02 L12_l1linf:3.9551e-02 L1_spectral:2.9429e-03 L2_spectral:2.9365e-03 L3_spectral:2.9250e-03 L4_spectral:2.9099e-03 L5_spectral:2.9121e-03 L6_spectral:2.9046e-03 L7_spectral:2.9206e-03 L8_spectral:2.9042e-03 L9_spectral:2.9058e-03 L10_spectral:2.9141e-03 L11_spectral:2.8996e-03 L12_spectral:2.8979e-03 train_time:275543ms step_avg:43.05ms +[2025-09-11 12:19:51] [Rank 0] step:6401/10000 train_time:277517ms step_avg:43.36ms +[2025-09-11 12:19:51] [Rank 0] step:6401/10000 train_time:277517ms step_avg:43.36ms +[2025-09-11 12:19:51] [Rank 0] step:6421/10000 train_time:278245ms step_avg:43.33ms +[2025-09-11 12:19:51] [Rank 0] step:6421/10000 train_time:278245ms step_avg:43.33ms +[2025-09-11 12:19:52] [Rank 0] step:6441/10000 train_time:278948ms step_avg:43.31ms +[2025-09-11 12:19:52] [Rank 0] step:6441/10000 train_time:278948ms step_avg:43.31ms +[2025-09-11 12:19:53] [Rank 0] step:6461/10000 train_time:279641ms step_avg:43.28ms +[2025-09-11 12:19:53] [Rank 0] step:6461/10000 train_time:279641ms step_avg:43.28ms +[2025-09-11 12:19:54] [Rank 0] step:6481/10000 train_time:280335ms step_avg:43.25ms +[2025-09-11 12:19:54] [Rank 0] step:6481/10000 train_time:280335ms step_avg:43.25ms +[2025-09-11 12:19:54] [Rank 0] step:6501/10000 train_time:281030ms step_avg:43.23ms +[2025-09-11 12:19:54] [Rank 0] step:6501/10000 train_time:281030ms step_avg:43.23ms +[2025-09-11 12:19:55] [Rank 0] step:6521/10000 train_time:281722ms step_avg:43.20ms +[2025-09-11 12:19:55] [Rank 0] step:6521/10000 train_time:281722ms step_avg:43.20ms +[2025-09-11 12:19:56] [Rank 0] step:6541/10000 train_time:282413ms step_avg:43.18ms +[2025-09-11 12:19:56] [Rank 0] step:6541/10000 train_time:282413ms step_avg:43.18ms +[2025-09-11 12:19:56] [Rank 0] step:6561/10000 train_time:283106ms step_avg:43.15ms +[2025-09-11 12:19:56] [Rank 0] step:6561/10000 train_time:283106ms step_avg:43.15ms +[2025-09-11 12:19:57] [Rank 0] step:6581/10000 train_time:283800ms step_avg:43.12ms +[2025-09-11 12:19:57] [Rank 0] step:6581/10000 train_time:283800ms step_avg:43.12ms +[2025-09-11 12:19:58] [Rank 0] step:6601/10000 train_time:284492ms step_avg:43.10ms +[2025-09-11 12:19:58] [Rank 0] step:6601/10000 train_time:284492ms step_avg:43.10ms +[2025-09-11 12:19:58] [Rank 0] step:6621/10000 train_time:285183ms step_avg:43.07ms +[2025-09-11 12:19:58] [Rank 0] step:6621/10000 train_time:285183ms step_avg:43.07ms +[2025-09-11 12:19:59] [Rank 0] step:6641/10000 train_time:285876ms step_avg:43.05ms +[2025-09-11 12:19:59] [Rank 0] step:6641/10000 train_time:285876ms step_avg:43.05ms +[2025-09-11 12:20:00] [Rank 0] step:6661/10000 train_time:286570ms step_avg:43.02ms +[2025-09-11 12:20:00] [Rank 0] step:6661/10000 train_time:286570ms step_avg:43.02ms +[2025-09-11 12:20:01] [Rank 0] step:6681/10000 train_time:287269ms step_avg:43.00ms +[2025-09-11 12:20:01] [Rank 0] step:6681/10000 train_time:287269ms step_avg:43.00ms +[2025-09-11 12:20:01] [Rank 0] step:6701/10000 train_time:287968ms step_avg:42.97ms +[2025-09-11 12:20:01] [Rank 0] step:6701/10000 train_time:287968ms step_avg:42.97ms +[2025-09-11 12:20:02] [Rank 0] step:6721/10000 train_time:288668ms step_avg:42.95ms +[2025-09-11 12:20:02] [Rank 0] step:6721/10000 train_time:288668ms step_avg:42.95ms +[2025-09-11 12:20:03] [Rank 0] step:6741/10000 train_time:289367ms step_avg:42.93ms +[2025-09-11 12:20:03] [Rank 0] step:6741/10000 train_time:289367ms step_avg:42.93ms +[2025-09-11 12:20:03] [Rank 0] step:6761/10000 train_time:290065ms step_avg:42.90ms +[2025-09-11 12:20:03] [Rank 0] step:6761/10000 train_time:290065ms step_avg:42.90ms +[2025-09-11 12:20:04] [Rank 0] step:6781/10000 train_time:290813ms step_avg:42.89ms +[2025-09-11 12:20:04] [Rank 0] step:6781/10000 train_time:290813ms step_avg:42.89ms +[2025-09-11 12:20:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:20:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 12:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 12:20:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:20:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 12:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:20:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.9916 total_sharp:9.0601e-03 L1_sharp:8.1972e-03 L2_sharp:5.0744e-03 L3_sharp:4.3953e-03 L4_sharp:6.0985e-03 L5_sharp:7.6405e-03 L6_sharp:1.2771e-02 L7_sharp:2.0103e-02 L8_sharp:3.1954e-02 L9_sharp:4.6075e-02 L10_sharp:6.8484e-02 L11_sharp:1.1530e-01 L12_sharp:3.2170e-01 total_fnorm:3.4844e+00 total_l1_linf:4.0000e+03 total_spectral:1.7734e+00 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8945e-01 L6_fnorm:1.8945e-01 L7_fnorm:1.8945e-01 L8_fnorm:1.8750e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8945e-01 L12_fnorm:1.8457e-01 L1_l1linf:4.8340e-02 L2_l1linf:4.7607e-02 L3_l1linf:4.5410e-02 L4_l1linf:4.5410e-02 L5_l1linf:4.4189e-02 L6_l1linf:4.2969e-02 L7_l1linf:4.2236e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1260e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.2969e-02 L12_l1linf:3.3447e-02 L1_spectral:2.6020e-03 L2_spectral:2.5977e-03 L3_spectral:2.5799e-03 L4_spectral:2.5806e-03 L5_spectral:2.5976e-03 L6_spectral:2.5784e-03 L7_spectral:2.6214e-03 L8_spectral:2.6053e-03 L9_spectral:2.5957e-03 L10_spectral:2.6014e-03 L11_spectral:2.6088e-03 L12_spectral:2.5952e-03 train_time:291543ms step_avg:42.87ms +[2025-09-11 12:20:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.9916 total_sharp:9.0601e-03 L1_sharp:8.1972e-03 L2_sharp:5.0744e-03 L3_sharp:4.3953e-03 L4_sharp:6.0985e-03 L5_sharp:7.6405e-03 L6_sharp:1.2771e-02 L7_sharp:2.0103e-02 L8_sharp:3.1954e-02 L9_sharp:4.6075e-02 L10_sharp:6.8484e-02 L11_sharp:1.1530e-01 L12_sharp:3.2170e-01 total_fnorm:3.4844e+00 total_l1_linf:4.0000e+03 total_spectral:1.7734e+00 L1_fnorm:1.9434e-01 L2_fnorm:1.9238e-01 L3_fnorm:1.9141e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8945e-01 L6_fnorm:1.8945e-01 L7_fnorm:1.8945e-01 L8_fnorm:1.8750e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8945e-01 L12_fnorm:1.8457e-01 L1_l1linf:4.8340e-02 L2_l1linf:4.7607e-02 L3_l1linf:4.5410e-02 L4_l1linf:4.5410e-02 L5_l1linf:4.4189e-02 L6_l1linf:4.2969e-02 L7_l1linf:4.2236e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1260e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.2969e-02 L12_l1linf:3.3447e-02 L1_spectral:2.6020e-03 L2_spectral:2.5977e-03 L3_spectral:2.5799e-03 L4_spectral:2.5806e-03 L5_spectral:2.5976e-03 L6_spectral:2.5784e-03 L7_spectral:2.6214e-03 L8_spectral:2.6053e-03 L9_spectral:2.5957e-03 L10_spectral:2.6014e-03 L11_spectral:2.6088e-03 L12_spectral:2.5952e-03 train_time:291543ms step_avg:42.87ms +[2025-09-11 12:20:19] [Rank 0] step:6801/10000 train_time:293565ms step_avg:43.17ms +[2025-09-11 12:20:19] [Rank 0] step:6801/10000 train_time:293565ms step_avg:43.17ms +[2025-09-11 12:20:20] [Rank 0] step:6821/10000 train_time:294290ms step_avg:43.14ms +[2025-09-11 12:20:20] [Rank 0] step:6821/10000 train_time:294290ms step_avg:43.14ms +[2025-09-11 12:20:20] [Rank 0] step:6841/10000 train_time:294992ms step_avg:43.12ms +[2025-09-11 12:20:20] [Rank 0] step:6841/10000 train_time:294992ms step_avg:43.12ms +[2025-09-11 12:20:21] [Rank 0] step:6861/10000 train_time:295692ms step_avg:43.10ms +[2025-09-11 12:20:21] [Rank 0] step:6861/10000 train_time:295692ms step_avg:43.10ms +[2025-09-11 12:20:22] [Rank 0] step:6881/10000 train_time:296394ms step_avg:43.07ms +[2025-09-11 12:20:22] [Rank 0] step:6881/10000 train_time:296394ms step_avg:43.07ms +[2025-09-11 12:20:23] [Rank 0] step:6901/10000 train_time:297094ms step_avg:43.05ms +[2025-09-11 12:20:23] [Rank 0] step:6901/10000 train_time:297094ms step_avg:43.05ms +[2025-09-11 12:20:23] [Rank 0] step:6921/10000 train_time:297792ms step_avg:43.03ms +[2025-09-11 12:20:23] [Rank 0] step:6921/10000 train_time:297792ms step_avg:43.03ms +[2025-09-11 12:20:24] [Rank 0] step:6941/10000 train_time:298492ms step_avg:43.00ms +[2025-09-11 12:20:24] [Rank 0] step:6941/10000 train_time:298492ms step_avg:43.00ms +[2025-09-11 12:20:25] [Rank 0] step:6961/10000 train_time:299194ms step_avg:42.98ms +[2025-09-11 12:20:25] [Rank 0] step:6961/10000 train_time:299194ms step_avg:42.98ms +[2025-09-11 12:20:25] [Rank 0] step:6981/10000 train_time:299896ms step_avg:42.96ms +[2025-09-11 12:20:25] [Rank 0] step:6981/10000 train_time:299896ms step_avg:42.96ms +[2025-09-11 12:20:26] [Rank 0] step:7001/10000 train_time:300596ms step_avg:42.94ms +[2025-09-11 12:20:26] [Rank 0] step:7001/10000 train_time:300596ms step_avg:42.94ms +[2025-09-11 12:20:27] [Rank 0] step:7021/10000 train_time:301295ms step_avg:42.91ms +[2025-09-11 12:20:27] [Rank 0] step:7021/10000 train_time:301295ms step_avg:42.91ms +[2025-09-11 12:20:27] [Rank 0] step:7041/10000 train_time:301994ms step_avg:42.89ms +[2025-09-11 12:20:27] [Rank 0] step:7041/10000 train_time:301994ms step_avg:42.89ms +[2025-09-11 12:20:28] [Rank 0] step:7061/10000 train_time:302695ms step_avg:42.87ms +[2025-09-11 12:20:28] [Rank 0] step:7061/10000 train_time:302695ms step_avg:42.87ms +[2025-09-11 12:20:29] [Rank 0] step:7081/10000 train_time:303395ms step_avg:42.85ms +[2025-09-11 12:20:29] [Rank 0] step:7081/10000 train_time:303395ms step_avg:42.85ms +[2025-09-11 12:20:30] [Rank 0] step:7101/10000 train_time:304095ms step_avg:42.82ms +[2025-09-11 12:20:30] [Rank 0] step:7101/10000 train_time:304095ms step_avg:42.82ms +[2025-09-11 12:20:30] [Rank 0] step:7121/10000 train_time:304796ms step_avg:42.80ms +[2025-09-11 12:20:30] [Rank 0] step:7121/10000 train_time:304796ms step_avg:42.80ms +[2025-09-11 12:20:31] [Rank 0] step:7141/10000 train_time:305495ms step_avg:42.78ms +[2025-09-11 12:20:31] [Rank 0] step:7141/10000 train_time:305495ms step_avg:42.78ms +[2025-09-11 12:20:32] [Rank 0] step:7161/10000 train_time:306198ms step_avg:42.76ms +[2025-09-11 12:20:32] [Rank 0] step:7161/10000 train_time:306198ms step_avg:42.76ms +[2025-09-11 12:20:32] [Rank 0] step:7181/10000 train_time:306897ms step_avg:42.74ms +[2025-09-11 12:20:32] [Rank 0] step:7181/10000 train_time:306897ms step_avg:42.74ms +[2025-09-11 12:20:33] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:20:33] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 12:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 12:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 12:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 12:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:20:44] [Rank 0] PRINT: step:7200/10000 val_loss:4.9632 total_sharp:8.7913e-03 L1_sharp:6.4583e-03 L2_sharp:5.2442e-03 L3_sharp:2.2575e-03 L4_sharp:4.6755e-03 L5_sharp:4.8574e-03 L6_sharp:9.5055e-03 L7_sharp:1.7067e-02 L8_sharp:2.7084e-02 L9_sharp:4.5911e-02 L10_sharp:7.1873e-02 L11_sharp:1.1248e-01 L12_sharp:3.1958e-01 total_fnorm:2.8281e+00 total_l1_linf:3.0400e+03 total_spectral:1.4453e+00 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6602e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6504e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6211e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6309e-01 L12_fnorm:1.5820e-01 L1_l1linf:3.9551e-02 L2_l1linf:4.0039e-02 L3_l1linf:3.8330e-02 L4_l1linf:3.8330e-02 L5_l1linf:3.6133e-02 L6_l1linf:3.6621e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4912e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4912e-02 L12_l1linf:2.6978e-02 L1_spectral:2.3102e-03 L2_spectral:2.3230e-03 L3_spectral:2.3367e-03 L4_spectral:2.3252e-03 L5_spectral:2.3123e-03 L6_spectral:2.3309e-03 L7_spectral:2.3036e-03 L8_spectral:2.2961e-03 L9_spectral:2.3053e-03 L10_spectral:2.2807e-03 L11_spectral:2.2767e-03 L12_spectral:2.2858e-03 train_time:307576ms step_avg:42.72ms +[2025-09-11 12:20:44] [Rank 0] PRINT: step:7200/10000 val_loss:4.9632 total_sharp:8.7913e-03 L1_sharp:6.4583e-03 L2_sharp:5.2442e-03 L3_sharp:2.2575e-03 L4_sharp:4.6755e-03 L5_sharp:4.8574e-03 L6_sharp:9.5055e-03 L7_sharp:1.7067e-02 L8_sharp:2.7084e-02 L9_sharp:4.5911e-02 L10_sharp:7.1873e-02 L11_sharp:1.1248e-01 L12_sharp:3.1958e-01 total_fnorm:2.8281e+00 total_l1_linf:3.0400e+03 total_spectral:1.4453e+00 L1_fnorm:1.6797e-01 L2_fnorm:1.6602e-01 L3_fnorm:1.6602e-01 L4_fnorm:1.6504e-01 L5_fnorm:1.6504e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6211e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6309e-01 L12_fnorm:1.5820e-01 L1_l1linf:3.9551e-02 L2_l1linf:4.0039e-02 L3_l1linf:3.8330e-02 L4_l1linf:3.8330e-02 L5_l1linf:3.6133e-02 L6_l1linf:3.6621e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4912e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4912e-02 L12_l1linf:2.6978e-02 L1_spectral:2.3102e-03 L2_spectral:2.3230e-03 L3_spectral:2.3367e-03 L4_spectral:2.3252e-03 L5_spectral:2.3123e-03 L6_spectral:2.3309e-03 L7_spectral:2.3036e-03 L8_spectral:2.2961e-03 L9_spectral:2.3053e-03 L10_spectral:2.2807e-03 L11_spectral:2.2767e-03 L12_spectral:2.2858e-03 train_time:307576ms step_avg:42.72ms +[2025-09-11 12:20:46] [Rank 0] step:7201/10000 train_time:309659ms step_avg:43.00ms +[2025-09-11 12:20:46] [Rank 0] step:7201/10000 train_time:309659ms step_avg:43.00ms +[2025-09-11 12:20:47] [Rank 0] step:7221/10000 train_time:310380ms step_avg:42.98ms +[2025-09-11 12:20:47] [Rank 0] step:7221/10000 train_time:310380ms step_avg:42.98ms +[2025-09-11 12:20:48] [Rank 0] step:7241/10000 train_time:311082ms step_avg:42.96ms +[2025-09-11 12:20:48] [Rank 0] step:7241/10000 train_time:311082ms step_avg:42.96ms +[2025-09-11 12:20:48] [Rank 0] step:7261/10000 train_time:311786ms step_avg:42.94ms +[2025-09-11 12:20:48] [Rank 0] step:7261/10000 train_time:311786ms step_avg:42.94ms +[2025-09-11 12:20:49] [Rank 0] step:7281/10000 train_time:312493ms step_avg:42.92ms +[2025-09-11 12:20:49] [Rank 0] step:7281/10000 train_time:312493ms step_avg:42.92ms +[2025-09-11 12:20:50] [Rank 0] step:7301/10000 train_time:313195ms step_avg:42.90ms +[2025-09-11 12:20:50] [Rank 0] step:7301/10000 train_time:313195ms step_avg:42.90ms +[2025-09-11 12:20:50] [Rank 0] step:7321/10000 train_time:313896ms step_avg:42.88ms +[2025-09-11 12:20:50] [Rank 0] step:7321/10000 train_time:313896ms step_avg:42.88ms +[2025-09-11 12:20:51] [Rank 0] step:7341/10000 train_time:314599ms step_avg:42.86ms +[2025-09-11 12:20:51] [Rank 0] step:7341/10000 train_time:314599ms step_avg:42.86ms +[2025-09-11 12:20:52] [Rank 0] step:7361/10000 train_time:315301ms step_avg:42.83ms +[2025-09-11 12:20:52] [Rank 0] step:7361/10000 train_time:315301ms step_avg:42.83ms +[2025-09-11 12:20:53] [Rank 0] step:7381/10000 train_time:316003ms step_avg:42.81ms +[2025-09-11 12:20:53] [Rank 0] step:7381/10000 train_time:316003ms step_avg:42.81ms +[2025-09-11 12:20:53] [Rank 0] step:7401/10000 train_time:316703ms step_avg:42.79ms +[2025-09-11 12:20:53] [Rank 0] step:7401/10000 train_time:316703ms step_avg:42.79ms +[2025-09-11 12:20:54] [Rank 0] step:7421/10000 train_time:317405ms step_avg:42.77ms +[2025-09-11 12:20:54] [Rank 0] step:7421/10000 train_time:317405ms step_avg:42.77ms +[2025-09-11 12:20:55] [Rank 0] step:7441/10000 train_time:318109ms step_avg:42.75ms +[2025-09-11 12:20:55] [Rank 0] step:7441/10000 train_time:318109ms step_avg:42.75ms +[2025-09-11 12:20:55] [Rank 0] step:7461/10000 train_time:318812ms step_avg:42.73ms +[2025-09-11 12:20:55] [Rank 0] step:7461/10000 train_time:318812ms step_avg:42.73ms +[2025-09-11 12:20:56] [Rank 0] step:7481/10000 train_time:319516ms step_avg:42.71ms +[2025-09-11 12:20:56] [Rank 0] step:7481/10000 train_time:319516ms step_avg:42.71ms +[2025-09-11 12:20:57] [Rank 0] step:7501/10000 train_time:320219ms step_avg:42.69ms +[2025-09-11 12:20:57] [Rank 0] step:7501/10000 train_time:320219ms step_avg:42.69ms +[2025-09-11 12:20:57] [Rank 0] step:7521/10000 train_time:320922ms step_avg:42.67ms +[2025-09-11 12:20:57] [Rank 0] step:7521/10000 train_time:320922ms step_avg:42.67ms +[2025-09-11 12:20:58] [Rank 0] step:7541/10000 train_time:321657ms step_avg:42.65ms +[2025-09-11 12:20:58] [Rank 0] step:7541/10000 train_time:321657ms step_avg:42.65ms +[2025-09-11 12:20:59] [Rank 0] step:7561/10000 train_time:322362ms step_avg:42.63ms +[2025-09-11 12:20:59] [Rank 0] step:7561/10000 train_time:322362ms step_avg:42.63ms +[2025-09-11 12:21:00] [Rank 0] step:7581/10000 train_time:323067ms step_avg:42.62ms +[2025-09-11 12:21:00] [Rank 0] step:7581/10000 train_time:323067ms step_avg:42.62ms +[2025-09-11 12:21:00] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:21:00] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 12:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:21:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 12:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 12:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 12:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 12:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 12:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 12:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 12:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:21:14] [Rank 0] PRINT: step:7600/10000 val_loss:4.9421 total_sharp:1.2574e-02 L1_sharp:6.1400e-03 L2_sharp:3.8870e-03 L3_sharp:3.1544e-03 L4_sharp:4.1376e-03 L5_sharp:8.4788e-03 L6_sharp:1.2820e-02 L7_sharp:1.6248e-02 L8_sharp:2.9252e-02 L9_sharp:4.9907e-02 L10_sharp:6.7619e-02 L11_sharp:1.1289e-01 L12_sharp:4.1034e-01 total_fnorm:2.1094e+00 total_l1_linf:2.2080e+03 total_spectral:1.0859e+00 L1_fnorm:1.4160e-01 L2_fnorm:1.3965e-01 L3_fnorm:1.3965e-01 L4_fnorm:1.3965e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3770e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3574e-01 L9_fnorm:1.3672e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3574e-01 L12_fnorm:1.3184e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.0518e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8198e-02 L8_l1linf:2.7100e-02 L9_l1linf:2.6001e-02 L10_l1linf:2.6611e-02 L11_l1linf:2.7954e-02 L12_l1linf:2.1729e-02 L1_spectral:2.0203e-03 L2_spectral:2.0407e-03 L3_spectral:2.0203e-03 L4_spectral:2.0353e-03 L5_spectral:2.0170e-03 L6_spectral:2.0166e-03 L7_spectral:1.9998e-03 L8_spectral:1.9891e-03 L9_spectral:1.9990e-03 L10_spectral:1.9633e-03 L11_spectral:1.9551e-03 L12_spectral:1.9611e-03 train_time:323751ms step_avg:42.60ms +[2025-09-11 12:21:14] [Rank 0] PRINT: step:7600/10000 val_loss:4.9421 total_sharp:1.2574e-02 L1_sharp:6.1400e-03 L2_sharp:3.8870e-03 L3_sharp:3.1544e-03 L4_sharp:4.1376e-03 L5_sharp:8.4788e-03 L6_sharp:1.2820e-02 L7_sharp:1.6248e-02 L8_sharp:2.9252e-02 L9_sharp:4.9907e-02 L10_sharp:6.7619e-02 L11_sharp:1.1289e-01 L12_sharp:4.1034e-01 total_fnorm:2.1094e+00 total_l1_linf:2.2080e+03 total_spectral:1.0859e+00 L1_fnorm:1.4160e-01 L2_fnorm:1.3965e-01 L3_fnorm:1.3965e-01 L4_fnorm:1.3965e-01 L5_fnorm:1.3770e-01 L6_fnorm:1.3770e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3574e-01 L9_fnorm:1.3672e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3574e-01 L12_fnorm:1.3184e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.0518e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8198e-02 L8_l1linf:2.7100e-02 L9_l1linf:2.6001e-02 L10_l1linf:2.6611e-02 L11_l1linf:2.7954e-02 L12_l1linf:2.1729e-02 L1_spectral:2.0203e-03 L2_spectral:2.0407e-03 L3_spectral:2.0203e-03 L4_spectral:2.0353e-03 L5_spectral:2.0170e-03 L6_spectral:2.0166e-03 L7_spectral:1.9998e-03 L8_spectral:1.9891e-03 L9_spectral:1.9990e-03 L10_spectral:1.9633e-03 L11_spectral:1.9551e-03 L12_spectral:1.9611e-03 train_time:323751ms step_avg:42.60ms +[2025-09-11 12:21:16] [Rank 0] step:7601/10000 train_time:325778ms step_avg:42.86ms +[2025-09-11 12:21:16] [Rank 0] step:7601/10000 train_time:325778ms step_avg:42.86ms +[2025-09-11 12:21:17] [Rank 0] step:7621/10000 train_time:326502ms step_avg:42.84ms +[2025-09-11 12:21:17] [Rank 0] step:7621/10000 train_time:326502ms step_avg:42.84ms +[2025-09-11 12:21:17] [Rank 0] step:7641/10000 train_time:327207ms step_avg:42.82ms +[2025-09-11 12:21:17] [Rank 0] step:7641/10000 train_time:327207ms step_avg:42.82ms +[2025-09-11 12:21:18] [Rank 0] step:7661/10000 train_time:327910ms step_avg:42.80ms +[2025-09-11 12:21:18] [Rank 0] step:7661/10000 train_time:327910ms step_avg:42.80ms +[2025-09-11 12:21:19] [Rank 0] step:7681/10000 train_time:328613ms step_avg:42.78ms +[2025-09-11 12:21:19] [Rank 0] step:7681/10000 train_time:328613ms step_avg:42.78ms +[2025-09-11 12:21:19] [Rank 0] step:7701/10000 train_time:329316ms step_avg:42.76ms +[2025-09-11 12:21:19] [Rank 0] step:7701/10000 train_time:329316ms step_avg:42.76ms +[2025-09-11 12:21:20] [Rank 0] step:7721/10000 train_time:330020ms step_avg:42.74ms +[2025-09-11 12:21:20] [Rank 0] step:7721/10000 train_time:330020ms step_avg:42.74ms +[2025-09-11 12:21:21] [Rank 0] step:7741/10000 train_time:330723ms step_avg:42.72ms +[2025-09-11 12:21:21] [Rank 0] step:7741/10000 train_time:330723ms step_avg:42.72ms +[2025-09-11 12:21:21] [Rank 0] step:7761/10000 train_time:331425ms step_avg:42.70ms +[2025-09-11 12:21:21] [Rank 0] step:7761/10000 train_time:331425ms step_avg:42.70ms +[2025-09-11 12:21:22] [Rank 0] step:7781/10000 train_time:332130ms step_avg:42.68ms +[2025-09-11 12:21:22] [Rank 0] step:7781/10000 train_time:332130ms step_avg:42.68ms +[2025-09-11 12:21:23] [Rank 0] step:7801/10000 train_time:332829ms step_avg:42.66ms +[2025-09-11 12:21:23] [Rank 0] step:7801/10000 train_time:332829ms step_avg:42.66ms +[2025-09-11 12:21:24] [Rank 0] step:7821/10000 train_time:333532ms step_avg:42.65ms +[2025-09-11 12:21:24] [Rank 0] step:7821/10000 train_time:333532ms step_avg:42.65ms +[2025-09-11 12:21:24] [Rank 0] step:7841/10000 train_time:334237ms step_avg:42.63ms +[2025-09-11 12:21:24] [Rank 0] step:7841/10000 train_time:334237ms step_avg:42.63ms +[2025-09-11 12:21:25] [Rank 0] step:7861/10000 train_time:334943ms step_avg:42.61ms +[2025-09-11 12:21:25] [Rank 0] step:7861/10000 train_time:334943ms step_avg:42.61ms +[2025-09-11 12:21:26] [Rank 0] step:7881/10000 train_time:335647ms step_avg:42.59ms +[2025-09-11 12:21:26] [Rank 0] step:7881/10000 train_time:335647ms step_avg:42.59ms +[2025-09-11 12:21:26] [Rank 0] step:7901/10000 train_time:336351ms step_avg:42.57ms +[2025-09-11 12:21:26] [Rank 0] step:7901/10000 train_time:336351ms step_avg:42.57ms +[2025-09-11 12:21:27] [Rank 0] step:7921/10000 train_time:337054ms step_avg:42.55ms +[2025-09-11 12:21:27] [Rank 0] step:7921/10000 train_time:337054ms step_avg:42.55ms +[2025-09-11 12:21:28] [Rank 0] step:7941/10000 train_time:337758ms step_avg:42.53ms +[2025-09-11 12:21:28] [Rank 0] step:7941/10000 train_time:337758ms step_avg:42.53ms +[2025-09-11 12:21:29] [Rank 0] step:7961/10000 train_time:338459ms step_avg:42.51ms +[2025-09-11 12:21:29] [Rank 0] step:7961/10000 train_time:338459ms step_avg:42.51ms +[2025-09-11 12:21:29] [Rank 0] step:7981/10000 train_time:339165ms step_avg:42.50ms +[2025-09-11 12:21:29] [Rank 0] step:7981/10000 train_time:339165ms step_avg:42.50ms +[2025-09-11 12:21:30] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:21:30] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 12:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 12:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 12:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 12:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 12:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 12:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:21:41] [Rank 0] PRINT: step:8000/10000 val_loss:4.9314 total_sharp:1.2665e-02 L1_sharp:5.2745e-03 L2_sharp:2.9154e-03 L3_sharp:2.4824e-03 L4_sharp:3.5013e-03 L5_sharp:7.4854e-03 L6_sharp:9.5494e-03 L7_sharp:1.7173e-02 L8_sharp:3.1912e-02 L9_sharp:5.1053e-02 L10_sharp:7.0271e-02 L11_sharp:1.1194e-01 L12_sharp:3.0831e-01 total_fnorm:1.6641e+00 total_l1_linf:1.6240e+03 total_spectral:8.5156e-01 L1_fnorm:1.1572e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1328e-01 L5_fnorm:1.1230e-01 L6_fnorm:1.1230e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.1084e-01 L12_fnorm:1.0645e-01 L1_l1linf:2.4536e-02 L2_l1linf:2.3560e-02 L3_l1linf:2.2461e-02 L4_l1linf:2.2827e-02 L5_l1linf:2.1851e-02 L6_l1linf:2.1484e-02 L7_l1linf:2.0752e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0020e-02 L10_l1linf:2.0020e-02 L11_l1linf:2.0874e-02 L12_l1linf:1.6968e-02 L1_spectral:1.7353e-03 L2_spectral:1.7235e-03 L3_spectral:1.7381e-03 L4_spectral:1.7178e-03 L5_spectral:1.6980e-03 L6_spectral:1.7034e-03 L7_spectral:1.6958e-03 L8_spectral:1.6620e-03 L9_spectral:1.6740e-03 L10_spectral:1.6326e-03 L11_spectral:1.6373e-03 L12_spectral:1.6512e-03 train_time:339846ms step_avg:42.48ms +[2025-09-11 12:21:41] [Rank 0] PRINT: step:8000/10000 val_loss:4.9314 total_sharp:1.2665e-02 L1_sharp:5.2745e-03 L2_sharp:2.9154e-03 L3_sharp:2.4824e-03 L4_sharp:3.5013e-03 L5_sharp:7.4854e-03 L6_sharp:9.5494e-03 L7_sharp:1.7173e-02 L8_sharp:3.1912e-02 L9_sharp:5.1053e-02 L10_sharp:7.0271e-02 L11_sharp:1.1194e-01 L12_sharp:3.0831e-01 total_fnorm:1.6641e+00 total_l1_linf:1.6240e+03 total_spectral:8.5156e-01 L1_fnorm:1.1572e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1328e-01 L5_fnorm:1.1230e-01 L6_fnorm:1.1230e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.1084e-01 L12_fnorm:1.0645e-01 L1_l1linf:2.4536e-02 L2_l1linf:2.3560e-02 L3_l1linf:2.2461e-02 L4_l1linf:2.2827e-02 L5_l1linf:2.1851e-02 L6_l1linf:2.1484e-02 L7_l1linf:2.0752e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0020e-02 L10_l1linf:2.0020e-02 L11_l1linf:2.0874e-02 L12_l1linf:1.6968e-02 L1_spectral:1.7353e-03 L2_spectral:1.7235e-03 L3_spectral:1.7381e-03 L4_spectral:1.7178e-03 L5_spectral:1.6980e-03 L6_spectral:1.7034e-03 L7_spectral:1.6958e-03 L8_spectral:1.6620e-03 L9_spectral:1.6740e-03 L10_spectral:1.6326e-03 L11_spectral:1.6373e-03 L12_spectral:1.6512e-03 train_time:339846ms step_avg:42.48ms +[2025-09-11 12:21:43] [Rank 0] step:8001/10000 train_time:341844ms step_avg:42.73ms +[2025-09-11 12:21:43] [Rank 0] step:8001/10000 train_time:341844ms step_avg:42.73ms +[2025-09-11 12:21:43] [Rank 0] step:8021/10000 train_time:342563ms step_avg:42.71ms +[2025-09-11 12:21:43] [Rank 0] step:8021/10000 train_time:342563ms step_avg:42.71ms +[2025-09-11 12:21:44] [Rank 0] step:8041/10000 train_time:343268ms step_avg:42.69ms +[2025-09-11 12:21:44] [Rank 0] step:8041/10000 train_time:343268ms step_avg:42.69ms +[2025-09-11 12:21:45] [Rank 0] step:8061/10000 train_time:343974ms step_avg:42.67ms +[2025-09-11 12:21:45] [Rank 0] step:8061/10000 train_time:343974ms step_avg:42.67ms +[2025-09-11 12:21:46] [Rank 0] step:8081/10000 train_time:344675ms step_avg:42.65ms +[2025-09-11 12:21:46] [Rank 0] step:8081/10000 train_time:344675ms step_avg:42.65ms +[2025-09-11 12:21:46] [Rank 0] step:8101/10000 train_time:345377ms step_avg:42.63ms +[2025-09-11 12:21:46] [Rank 0] step:8101/10000 train_time:345377ms step_avg:42.63ms +[2025-09-11 12:21:47] [Rank 0] step:8121/10000 train_time:346084ms step_avg:42.62ms +[2025-09-11 12:21:47] [Rank 0] step:8121/10000 train_time:346084ms step_avg:42.62ms +[2025-09-11 12:21:48] [Rank 0] step:8141/10000 train_time:347540ms step_avg:42.69ms +[2025-09-11 12:21:48] [Rank 0] step:8141/10000 train_time:347540ms step_avg:42.69ms +[2025-09-11 12:21:49] [Rank 0] step:8161/10000 train_time:348247ms step_avg:42.67ms +[2025-09-11 12:21:49] [Rank 0] step:8161/10000 train_time:348247ms step_avg:42.67ms +[2025-09-11 12:21:50] [Rank 0] step:8181/10000 train_time:348960ms step_avg:42.65ms +[2025-09-11 12:21:50] [Rank 0] step:8181/10000 train_time:348960ms step_avg:42.65ms +[2025-09-11 12:21:51] [Rank 0] step:8201/10000 train_time:349671ms step_avg:42.64ms +[2025-09-11 12:21:51] [Rank 0] step:8201/10000 train_time:349671ms step_avg:42.64ms +[2025-09-11 12:21:51] [Rank 0] step:8221/10000 train_time:350381ms step_avg:42.62ms +[2025-09-11 12:21:51] [Rank 0] step:8221/10000 train_time:350381ms step_avg:42.62ms +[2025-09-11 12:21:52] [Rank 0] step:8241/10000 train_time:351098ms step_avg:42.60ms +[2025-09-11 12:21:52] [Rank 0] step:8241/10000 train_time:351098ms step_avg:42.60ms +[2025-09-11 12:21:53] [Rank 0] step:8261/10000 train_time:351807ms step_avg:42.59ms +[2025-09-11 12:21:53] [Rank 0] step:8261/10000 train_time:351807ms step_avg:42.59ms +[2025-09-11 12:21:53] [Rank 0] step:8281/10000 train_time:352512ms step_avg:42.57ms +[2025-09-11 12:21:53] [Rank 0] step:8281/10000 train_time:352512ms step_avg:42.57ms +[2025-09-11 12:21:54] [Rank 0] step:8301/10000 train_time:353221ms step_avg:42.55ms +[2025-09-11 12:21:54] [Rank 0] step:8301/10000 train_time:353221ms step_avg:42.55ms +[2025-09-11 12:21:55] [Rank 0] step:8321/10000 train_time:353929ms step_avg:42.53ms +[2025-09-11 12:21:55] [Rank 0] step:8321/10000 train_time:353929ms step_avg:42.53ms +[2025-09-11 12:21:56] [Rank 0] step:8341/10000 train_time:354645ms step_avg:42.52ms +[2025-09-11 12:21:56] [Rank 0] step:8341/10000 train_time:354645ms step_avg:42.52ms +[2025-09-11 12:21:56] [Rank 0] step:8361/10000 train_time:355349ms step_avg:42.50ms +[2025-09-11 12:21:56] [Rank 0] step:8361/10000 train_time:355349ms step_avg:42.50ms +[2025-09-11 12:21:57] [Rank 0] step:8381/10000 train_time:356061ms step_avg:42.48ms +[2025-09-11 12:21:57] [Rank 0] step:8381/10000 train_time:356061ms step_avg:42.48ms +[2025-09-11 12:21:58] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:21:58] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 12:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:22:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 12:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 12:22:08] [Rank 0] PRINT: step:8400/10000 val_loss:4.9179 total_sharp:1.1265e-02 L1_sharp:5.6818e-03 L2_sharp:4.5494e-03 L3_sharp:3.0079e-03 L4_sharp:3.1255e-03 L5_sharp:6.3532e-03 L6_sharp:7.4965e-03 L7_sharp:1.5209e-02 L8_sharp:2.5975e-02 L9_sharp:4.0718e-02 L10_sharp:5.3540e-02 L11_sharp:9.1548e-02 L12_sharp:2.9650e-01 total_fnorm:1.2500e+00 total_l1_linf:1.1040e+03 total_spectral:6.4062e-01 L1_fnorm:9.1309e-02 L2_fnorm:8.9355e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.8379e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.7891e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.6426e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.6426e-02 L12_fnorm:8.3008e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.7578e-02 L3_l1linf:1.6846e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.4832e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4709e-02 L12_l1linf:1.3062e-02 L1_spectral:1.4018e-03 L2_spectral:1.4050e-03 L3_spectral:1.3929e-03 L4_spectral:1.3847e-03 L5_spectral:1.3705e-03 L6_spectral:1.3829e-03 L7_spectral:1.3639e-03 L8_spectral:1.3354e-03 L9_spectral:1.3438e-03 L10_spectral:1.3115e-03 L11_spectral:1.3008e-03 L12_spectral:1.3174e-03 train_time:356754ms step_avg:42.47ms +[2025-09-11 12:22:08] [Rank 0] PRINT: step:8400/10000 val_loss:4.9179 total_sharp:1.1265e-02 L1_sharp:5.6818e-03 L2_sharp:4.5494e-03 L3_sharp:3.0079e-03 L4_sharp:3.1255e-03 L5_sharp:6.3532e-03 L6_sharp:7.4965e-03 L7_sharp:1.5209e-02 L8_sharp:2.5975e-02 L9_sharp:4.0718e-02 L10_sharp:5.3540e-02 L11_sharp:9.1548e-02 L12_sharp:2.9650e-01 total_fnorm:1.2500e+00 total_l1_linf:1.1040e+03 total_spectral:6.4062e-01 L1_fnorm:9.1309e-02 L2_fnorm:8.9355e-02 L3_fnorm:8.8867e-02 L4_fnorm:8.8379e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.7891e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.6426e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.6426e-02 L12_fnorm:8.3008e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.7578e-02 L3_l1linf:1.6846e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.4832e-02 L9_l1linf:1.4404e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4709e-02 L12_l1linf:1.3062e-02 L1_spectral:1.4018e-03 L2_spectral:1.4050e-03 L3_spectral:1.3929e-03 L4_spectral:1.3847e-03 L5_spectral:1.3705e-03 L6_spectral:1.3829e-03 L7_spectral:1.3639e-03 L8_spectral:1.3354e-03 L9_spectral:1.3438e-03 L10_spectral:1.3115e-03 L11_spectral:1.3008e-03 L12_spectral:1.3174e-03 train_time:356754ms step_avg:42.47ms +[2025-09-11 12:22:10] [Rank 0] step:8401/10000 train_time:358786ms step_avg:42.71ms +[2025-09-11 12:22:10] [Rank 0] step:8401/10000 train_time:358786ms step_avg:42.71ms +[2025-09-11 12:22:11] [Rank 0] step:8421/10000 train_time:359609ms step_avg:42.70ms +[2025-09-11 12:22:11] [Rank 0] step:8421/10000 train_time:359609ms step_avg:42.70ms +[2025-09-11 12:22:12] [Rank 0] step:8441/10000 train_time:360318ms step_avg:42.69ms +[2025-09-11 12:22:12] [Rank 0] step:8441/10000 train_time:360318ms step_avg:42.69ms +[2025-09-11 12:22:13] [Rank 0] step:8461/10000 train_time:361029ms step_avg:42.67ms +[2025-09-11 12:22:13] [Rank 0] step:8461/10000 train_time:361029ms step_avg:42.67ms +[2025-09-11 12:22:13] [Rank 0] step:8481/10000 train_time:361739ms step_avg:42.65ms +[2025-09-11 12:22:13] [Rank 0] step:8481/10000 train_time:361739ms step_avg:42.65ms +[2025-09-11 12:22:14] [Rank 0] step:8501/10000 train_time:362447ms step_avg:42.64ms +[2025-09-11 12:22:14] [Rank 0] step:8501/10000 train_time:362447ms step_avg:42.64ms +[2025-09-11 12:22:15] [Rank 0] step:8521/10000 train_time:363156ms step_avg:42.62ms +[2025-09-11 12:22:15] [Rank 0] step:8521/10000 train_time:363156ms step_avg:42.62ms +[2025-09-11 12:22:15] [Rank 0] step:8541/10000 train_time:363864ms step_avg:42.60ms +[2025-09-11 12:22:15] [Rank 0] step:8541/10000 train_time:363864ms step_avg:42.60ms +[2025-09-11 12:22:16] [Rank 0] step:8561/10000 train_time:364578ms step_avg:42.59ms +[2025-09-11 12:22:16] [Rank 0] step:8561/10000 train_time:364578ms step_avg:42.59ms +[2025-09-11 12:22:17] [Rank 0] step:8581/10000 train_time:365290ms step_avg:42.57ms +[2025-09-11 12:22:17] [Rank 0] step:8581/10000 train_time:365290ms step_avg:42.57ms +[2025-09-11 12:22:18] [Rank 0] step:8601/10000 train_time:366001ms step_avg:42.55ms +[2025-09-11 12:22:18] [Rank 0] step:8601/10000 train_time:366001ms step_avg:42.55ms +[2025-09-11 12:22:18] [Rank 0] step:8621/10000 train_time:366708ms step_avg:42.54ms +[2025-09-11 12:22:18] [Rank 0] step:8621/10000 train_time:366708ms step_avg:42.54ms +[2025-09-11 12:22:19] [Rank 0] step:8641/10000 train_time:367416ms step_avg:42.52ms +[2025-09-11 12:22:19] [Rank 0] step:8641/10000 train_time:367416ms step_avg:42.52ms +[2025-09-11 12:22:20] [Rank 0] step:8661/10000 train_time:368125ms step_avg:42.50ms +[2025-09-11 12:22:20] [Rank 0] step:8661/10000 train_time:368125ms step_avg:42.50ms +[2025-09-11 12:22:20] [Rank 0] step:8681/10000 train_time:368835ms step_avg:42.49ms +[2025-09-11 12:22:20] [Rank 0] step:8681/10000 train_time:368835ms step_avg:42.49ms +[2025-09-11 12:22:21] [Rank 0] step:8701/10000 train_time:369543ms step_avg:42.47ms +[2025-09-11 12:22:21] [Rank 0] step:8701/10000 train_time:369543ms step_avg:42.47ms +[2025-09-11 12:22:22] [Rank 0] step:8721/10000 train_time:370254ms step_avg:42.46ms +[2025-09-11 12:22:22] [Rank 0] step:8721/10000 train_time:370254ms step_avg:42.46ms +[2025-09-11 12:22:22] [Rank 0] step:8741/10000 train_time:370958ms step_avg:42.44ms +[2025-09-11 12:22:22] [Rank 0] step:8741/10000 train_time:370958ms step_avg:42.44ms +[2025-09-11 12:22:23] [Rank 0] step:8761/10000 train_time:371669ms step_avg:42.42ms +[2025-09-11 12:22:23] [Rank 0] step:8761/10000 train_time:371669ms step_avg:42.42ms +[2025-09-11 12:22:24] [Rank 0] step:8781/10000 train_time:372374ms step_avg:42.41ms +[2025-09-11 12:22:24] [Rank 0] step:8781/10000 train_time:372374ms step_avg:42.41ms +[2025-09-11 12:22:25] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:22:25] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 12:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 12:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 12:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 12:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 12:22:36] [Rank 0] PRINT: step:8800/10000 val_loss:4.9057 total_sharp:9.2812e-03 L1_sharp:4.2416e-03 L2_sharp:2.4993e-03 L3_sharp:3.5177e-03 L4_sharp:4.7033e-03 L5_sharp:4.4850e-03 L6_sharp:8.4016e-03 L7_sharp:1.1986e-02 L8_sharp:2.0952e-02 L9_sharp:3.2457e-02 L10_sharp:4.7461e-02 L11_sharp:8.2083e-02 L12_sharp:2.6365e-01 total_fnorm:8.8672e-01 total_l1_linf:7.0000e+02 total_spectral:4.5703e-01 L1_fnorm:6.5918e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.4453e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.3477e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.2500e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.1768e-02 L11_fnorm:6.1768e-02 L12_fnorm:6.0059e-02 L1_l1linf:1.1719e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1353e-02 L4_l1linf:1.0986e-02 L5_l1linf:1.0620e-02 L6_l1linf:1.0437e-02 L7_l1linf:9.8267e-03 L8_l1linf:9.5825e-03 L9_l1linf:9.3384e-03 L10_l1linf:9.4604e-03 L11_l1linf:9.6436e-03 L12_l1linf:9.6436e-03 L1_spectral:1.0640e-03 L2_spectral:1.0579e-03 L3_spectral:1.0477e-03 L4_spectral:1.0281e-03 L5_spectral:1.0068e-03 L6_spectral:1.0176e-03 L7_spectral:1.0062e-03 L8_spectral:9.7986e-04 L9_spectral:9.9096e-04 L10_spectral:9.6618e-04 L11_spectral:9.5875e-04 L12_spectral:9.8484e-04 train_time:373061ms step_avg:42.39ms +[2025-09-11 12:22:36] [Rank 0] PRINT: step:8800/10000 val_loss:4.9057 total_sharp:9.2812e-03 L1_sharp:4.2416e-03 L2_sharp:2.4993e-03 L3_sharp:3.5177e-03 L4_sharp:4.7033e-03 L5_sharp:4.4850e-03 L6_sharp:8.4016e-03 L7_sharp:1.1986e-02 L8_sharp:2.0952e-02 L9_sharp:3.2457e-02 L10_sharp:4.7461e-02 L11_sharp:8.2083e-02 L12_sharp:2.6365e-01 total_fnorm:8.8672e-01 total_l1_linf:7.0000e+02 total_spectral:4.5703e-01 L1_fnorm:6.5918e-02 L2_fnorm:6.4941e-02 L3_fnorm:6.4453e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.3477e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.2500e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.1768e-02 L11_fnorm:6.1768e-02 L12_fnorm:6.0059e-02 L1_l1linf:1.1719e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1353e-02 L4_l1linf:1.0986e-02 L5_l1linf:1.0620e-02 L6_l1linf:1.0437e-02 L7_l1linf:9.8267e-03 L8_l1linf:9.5825e-03 L9_l1linf:9.3384e-03 L10_l1linf:9.4604e-03 L11_l1linf:9.6436e-03 L12_l1linf:9.6436e-03 L1_spectral:1.0640e-03 L2_spectral:1.0579e-03 L3_spectral:1.0477e-03 L4_spectral:1.0281e-03 L5_spectral:1.0068e-03 L6_spectral:1.0176e-03 L7_spectral:1.0062e-03 L8_spectral:9.7986e-04 L9_spectral:9.9096e-04 L10_spectral:9.6618e-04 L11_spectral:9.5875e-04 L12_spectral:9.8484e-04 train_time:373061ms step_avg:42.39ms +[2025-09-11 12:22:38] [Rank 0] step:8801/10000 train_time:375004ms step_avg:42.61ms +[2025-09-11 12:22:38] [Rank 0] step:8801/10000 train_time:375004ms step_avg:42.61ms +[2025-09-11 12:22:38] [Rank 0] step:8821/10000 train_time:375750ms step_avg:42.60ms +[2025-09-11 12:22:38] [Rank 0] step:8821/10000 train_time:375750ms step_avg:42.60ms +[2025-09-11 12:22:39] [Rank 0] step:8841/10000 train_time:376460ms step_avg:42.58ms +[2025-09-11 12:22:39] [Rank 0] step:8841/10000 train_time:376460ms step_avg:42.58ms +[2025-09-11 12:22:40] [Rank 0] step:8861/10000 train_time:377169ms step_avg:42.57ms +[2025-09-11 12:22:40] [Rank 0] step:8861/10000 train_time:377169ms step_avg:42.57ms +[2025-09-11 12:22:41] [Rank 0] step:8881/10000 train_time:378119ms step_avg:42.58ms +[2025-09-11 12:22:41] [Rank 0] step:8881/10000 train_time:378119ms step_avg:42.58ms +[2025-09-11 12:22:41] [Rank 0] step:8901/10000 train_time:378831ms step_avg:42.56ms +[2025-09-11 12:22:41] [Rank 0] step:8901/10000 train_time:378831ms step_avg:42.56ms +[2025-09-11 12:22:42] [Rank 0] step:8921/10000 train_time:379538ms step_avg:42.54ms +[2025-09-11 12:22:42] [Rank 0] step:8921/10000 train_time:379538ms step_avg:42.54ms +[2025-09-11 12:22:43] [Rank 0] step:8941/10000 train_time:380360ms step_avg:42.54ms +[2025-09-11 12:22:43] [Rank 0] step:8941/10000 train_time:380360ms step_avg:42.54ms +[2025-09-11 12:22:44] [Rank 0] step:8961/10000 train_time:381233ms step_avg:42.54ms +[2025-09-11 12:22:44] [Rank 0] step:8961/10000 train_time:381233ms step_avg:42.54ms +[2025-09-11 12:22:44] [Rank 0] step:8981/10000 train_time:381947ms step_avg:42.53ms +[2025-09-11 12:22:44] [Rank 0] step:8981/10000 train_time:381947ms step_avg:42.53ms +[2025-09-11 12:22:45] [Rank 0] step:9001/10000 train_time:382651ms step_avg:42.51ms +[2025-09-11 12:22:45] [Rank 0] step:9001/10000 train_time:382651ms step_avg:42.51ms +[2025-09-11 12:22:46] [Rank 0] step:9021/10000 train_time:383361ms step_avg:42.50ms +[2025-09-11 12:22:46] [Rank 0] step:9021/10000 train_time:383361ms step_avg:42.50ms +[2025-09-11 12:22:47] [Rank 0] step:9041/10000 train_time:384073ms step_avg:42.48ms +[2025-09-11 12:22:47] [Rank 0] step:9041/10000 train_time:384073ms step_avg:42.48ms +[2025-09-11 12:22:47] [Rank 0] step:9061/10000 train_time:384782ms step_avg:42.47ms +[2025-09-11 12:22:47] [Rank 0] step:9061/10000 train_time:384782ms step_avg:42.47ms +[2025-09-11 12:22:48] [Rank 0] step:9081/10000 train_time:385495ms step_avg:42.45ms +[2025-09-11 12:22:48] [Rank 0] step:9081/10000 train_time:385495ms step_avg:42.45ms +[2025-09-11 12:22:49] [Rank 0] step:9101/10000 train_time:386208ms step_avg:42.44ms +[2025-09-11 12:22:49] [Rank 0] step:9101/10000 train_time:386208ms step_avg:42.44ms +[2025-09-11 12:22:49] [Rank 0] step:9121/10000 train_time:386923ms step_avg:42.42ms +[2025-09-11 12:22:49] [Rank 0] step:9121/10000 train_time:386923ms step_avg:42.42ms +[2025-09-11 12:22:50] [Rank 0] step:9141/10000 train_time:387631ms step_avg:42.41ms +[2025-09-11 12:22:50] [Rank 0] step:9141/10000 train_time:387631ms step_avg:42.41ms +[2025-09-11 12:22:51] [Rank 0] step:9161/10000 train_time:388346ms step_avg:42.39ms +[2025-09-11 12:22:51] [Rank 0] step:9161/10000 train_time:388346ms step_avg:42.39ms +[2025-09-11 12:22:52] [Rank 0] step:9181/10000 train_time:389059ms step_avg:42.38ms +[2025-09-11 12:22:52] [Rank 0] step:9181/10000 train_time:389059ms step_avg:42.38ms +[2025-09-11 12:22:52] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:22:52] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 12:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 12:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 12:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 12:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:03] [Rank 0] PRINT: step:9200/10000 val_loss:4.8980 total_sharp:1.1261e-02 L1_sharp:5.5202e-03 L2_sharp:3.8176e-03 L3_sharp:3.7576e-03 L4_sharp:3.0479e-03 L5_sharp:4.6369e-03 L6_sharp:4.6271e-03 L7_sharp:1.2582e-02 L8_sharp:1.7332e-02 L9_sharp:2.8209e-02 L10_sharp:4.3080e-02 L11_sharp:6.8941e-02 L12_sharp:4.1328e-01 total_fnorm:5.4297e-01 total_l1_linf:3.8600e+02 total_spectral:2.8125e-01 L1_fnorm:4.4189e-02 L2_fnorm:4.2969e-02 L3_fnorm:4.2725e-02 L4_fnorm:4.2480e-02 L5_fnorm:4.2480e-02 L6_fnorm:4.2236e-02 L7_fnorm:4.1992e-02 L8_fnorm:4.1504e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.9795e-02 L1_l1linf:7.0496e-03 L2_l1linf:6.7444e-03 L3_l1linf:6.5613e-03 L4_l1linf:6.3782e-03 L5_l1linf:6.0730e-03 L6_l1linf:6.3477e-03 L7_l1linf:5.9204e-03 L8_l1linf:5.7983e-03 L9_l1linf:5.7373e-03 L10_l1linf:5.7373e-03 L11_l1linf:5.6763e-03 L12_l1linf:5.9814e-03 L1_spectral:7.3710e-04 L2_spectral:7.2465e-04 L3_spectral:7.1658e-04 L4_spectral:7.1638e-04 L5_spectral:7.0530e-04 L6_spectral:6.9865e-04 L7_spectral:6.8957e-04 L8_spectral:6.7423e-04 L9_spectral:6.7581e-04 L10_spectral:6.5331e-04 L11_spectral:6.5214e-04 L12_spectral:6.7142e-04 train_time:389753ms step_avg:42.36ms +[2025-09-11 12:23:03] [Rank 0] PRINT: step:9200/10000 val_loss:4.8980 total_sharp:1.1261e-02 L1_sharp:5.5202e-03 L2_sharp:3.8176e-03 L3_sharp:3.7576e-03 L4_sharp:3.0479e-03 L5_sharp:4.6369e-03 L6_sharp:4.6271e-03 L7_sharp:1.2582e-02 L8_sharp:1.7332e-02 L9_sharp:2.8209e-02 L10_sharp:4.3080e-02 L11_sharp:6.8941e-02 L12_sharp:4.1328e-01 total_fnorm:5.4297e-01 total_l1_linf:3.8600e+02 total_spectral:2.8125e-01 L1_fnorm:4.4189e-02 L2_fnorm:4.2969e-02 L3_fnorm:4.2725e-02 L4_fnorm:4.2480e-02 L5_fnorm:4.2480e-02 L6_fnorm:4.2236e-02 L7_fnorm:4.1992e-02 L8_fnorm:4.1504e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.1260e-02 L12_fnorm:3.9795e-02 L1_l1linf:7.0496e-03 L2_l1linf:6.7444e-03 L3_l1linf:6.5613e-03 L4_l1linf:6.3782e-03 L5_l1linf:6.0730e-03 L6_l1linf:6.3477e-03 L7_l1linf:5.9204e-03 L8_l1linf:5.7983e-03 L9_l1linf:5.7373e-03 L10_l1linf:5.7373e-03 L11_l1linf:5.6763e-03 L12_l1linf:5.9814e-03 L1_spectral:7.3710e-04 L2_spectral:7.2465e-04 L3_spectral:7.1658e-04 L4_spectral:7.1638e-04 L5_spectral:7.0530e-04 L6_spectral:6.9865e-04 L7_spectral:6.8957e-04 L8_spectral:6.7423e-04 L9_spectral:6.7581e-04 L10_spectral:6.5331e-04 L11_spectral:6.5214e-04 L12_spectral:6.7142e-04 train_time:389753ms step_avg:42.36ms +[2025-09-11 12:23:05] [Rank 0] step:9201/10000 train_time:391697ms step_avg:42.57ms +[2025-09-11 12:23:05] [Rank 0] step:9201/10000 train_time:391697ms step_avg:42.57ms +[2025-09-11 12:23:06] [Rank 0] step:9221/10000 train_time:392422ms step_avg:42.56ms +[2025-09-11 12:23:06] [Rank 0] step:9221/10000 train_time:392422ms step_avg:42.56ms +[2025-09-11 12:23:06] [Rank 0] step:9241/10000 train_time:393131ms step_avg:42.54ms +[2025-09-11 12:23:06] [Rank 0] step:9241/10000 train_time:393131ms step_avg:42.54ms +[2025-09-11 12:23:07] [Rank 0] step:9261/10000 train_time:393844ms step_avg:42.53ms +[2025-09-11 12:23:07] [Rank 0] step:9261/10000 train_time:393844ms step_avg:42.53ms +[2025-09-11 12:23:08] [Rank 0] step:9281/10000 train_time:394556ms step_avg:42.51ms +[2025-09-11 12:23:08] [Rank 0] step:9281/10000 train_time:394556ms step_avg:42.51ms +[2025-09-11 12:23:09] [Rank 0] step:9301/10000 train_time:395264ms step_avg:42.50ms +[2025-09-11 12:23:09] [Rank 0] step:9301/10000 train_time:395264ms step_avg:42.50ms +[2025-09-11 12:23:09] [Rank 0] step:9321/10000 train_time:395977ms step_avg:42.48ms +[2025-09-11 12:23:09] [Rank 0] step:9321/10000 train_time:395977ms step_avg:42.48ms +[2025-09-11 12:23:10] [Rank 0] step:9341/10000 train_time:396686ms step_avg:42.47ms +[2025-09-11 12:23:10] [Rank 0] step:9341/10000 train_time:396686ms step_avg:42.47ms +[2025-09-11 12:23:11] [Rank 0] step:9361/10000 train_time:397392ms step_avg:42.45ms +[2025-09-11 12:23:11] [Rank 0] step:9361/10000 train_time:397392ms step_avg:42.45ms +[2025-09-11 12:23:11] [Rank 0] step:9381/10000 train_time:398102ms step_avg:42.44ms +[2025-09-11 12:23:11] [Rank 0] step:9381/10000 train_time:398102ms step_avg:42.44ms +[2025-09-11 12:23:12] [Rank 0] step:9401/10000 train_time:398815ms step_avg:42.42ms +[2025-09-11 12:23:12] [Rank 0] step:9401/10000 train_time:398815ms step_avg:42.42ms +[2025-09-11 12:23:13] [Rank 0] step:9421/10000 train_time:399528ms step_avg:42.41ms +[2025-09-11 12:23:13] [Rank 0] step:9421/10000 train_time:399528ms step_avg:42.41ms +[2025-09-11 12:23:14] [Rank 0] step:9441/10000 train_time:400241ms step_avg:42.39ms +[2025-09-11 12:23:14] [Rank 0] step:9441/10000 train_time:400241ms step_avg:42.39ms +[2025-09-11 12:23:14] [Rank 0] step:9461/10000 train_time:400952ms step_avg:42.38ms +[2025-09-11 12:23:14] [Rank 0] step:9461/10000 train_time:400952ms step_avg:42.38ms +[2025-09-11 12:23:15] [Rank 0] step:9481/10000 train_time:401665ms step_avg:42.37ms +[2025-09-11 12:23:15] [Rank 0] step:9481/10000 train_time:401665ms step_avg:42.37ms +[2025-09-11 12:23:16] [Rank 0] step:9501/10000 train_time:402379ms step_avg:42.35ms +[2025-09-11 12:23:16] [Rank 0] step:9501/10000 train_time:402379ms step_avg:42.35ms +[2025-09-11 12:23:16] [Rank 0] step:9521/10000 train_time:403094ms step_avg:42.34ms +[2025-09-11 12:23:16] [Rank 0] step:9521/10000 train_time:403094ms step_avg:42.34ms +[2025-09-11 12:23:17] [Rank 0] step:9541/10000 train_time:403803ms step_avg:42.32ms +[2025-09-11 12:23:17] [Rank 0] step:9541/10000 train_time:403803ms step_avg:42.32ms +[2025-09-11 12:23:18] [Rank 0] step:9561/10000 train_time:404514ms step_avg:42.31ms +[2025-09-11 12:23:18] [Rank 0] step:9561/10000 train_time:404514ms step_avg:42.31ms +[2025-09-11 12:23:19] [Rank 0] step:9581/10000 train_time:405227ms step_avg:42.29ms +[2025-09-11 12:23:19] [Rank 0] step:9581/10000 train_time:405227ms step_avg:42.29ms +[2025-09-11 12:23:19] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:23:19] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 12:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 12:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 12:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:30] [Rank 0] PRINT: step:9600/10000 val_loss:4.8919 total_sharp:5.0121e-03 L1_sharp:3.0658e-03 L2_sharp:2.5545e-03 L3_sharp:1.7661e-03 L4_sharp:3.0550e-03 L5_sharp:3.5804e-03 L6_sharp:4.7076e-03 L7_sharp:7.2609e-03 L8_sharp:1.1366e-02 L9_sharp:2.2226e-02 L10_sharp:2.9019e-02 L11_sharp:4.6491e-02 L12_sharp:1.7697e-01 total_fnorm:3.2422e-01 total_l1_linf:1.8500e+02 total_spectral:1.6602e-01 L1_fnorm:2.4902e-02 L2_fnorm:2.4170e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3926e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3682e-02 L7_fnorm:2.3560e-02 L8_fnorm:2.3193e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2217e-02 L1_l1linf:3.2501e-03 L2_l1linf:3.2196e-03 L3_l1linf:3.0060e-03 L4_l1linf:3.1128e-03 L5_l1linf:2.9755e-03 L6_l1linf:2.9297e-03 L7_l1linf:2.8687e-03 L8_l1linf:2.8229e-03 L9_l1linf:2.6550e-03 L10_l1linf:2.5940e-03 L11_l1linf:3.0212e-03 L12_l1linf:2.8534e-03 L1_spectral:4.2968e-04 L2_spectral:4.1842e-04 L3_spectral:4.1922e-04 L4_spectral:4.0945e-04 L5_spectral:4.0466e-04 L6_spectral:4.0293e-04 L7_spectral:3.9565e-04 L8_spectral:3.8846e-04 L9_spectral:3.8943e-04 L10_spectral:3.7148e-04 L11_spectral:3.6850e-04 L12_spectral:3.8931e-04 train_time:405916ms step_avg:42.28ms +[2025-09-11 12:23:30] [Rank 0] PRINT: step:9600/10000 val_loss:4.8919 total_sharp:5.0121e-03 L1_sharp:3.0658e-03 L2_sharp:2.5545e-03 L3_sharp:1.7661e-03 L4_sharp:3.0550e-03 L5_sharp:3.5804e-03 L6_sharp:4.7076e-03 L7_sharp:7.2609e-03 L8_sharp:1.1366e-02 L9_sharp:2.2226e-02 L10_sharp:2.9019e-02 L11_sharp:4.6491e-02 L12_sharp:1.7697e-01 total_fnorm:3.2422e-01 total_l1_linf:1.8500e+02 total_spectral:1.6602e-01 L1_fnorm:2.4902e-02 L2_fnorm:2.4170e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3926e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3682e-02 L7_fnorm:2.3560e-02 L8_fnorm:2.3193e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2217e-02 L1_l1linf:3.2501e-03 L2_l1linf:3.2196e-03 L3_l1linf:3.0060e-03 L4_l1linf:3.1128e-03 L5_l1linf:2.9755e-03 L6_l1linf:2.9297e-03 L7_l1linf:2.8687e-03 L8_l1linf:2.8229e-03 L9_l1linf:2.6550e-03 L10_l1linf:2.5940e-03 L11_l1linf:3.0212e-03 L12_l1linf:2.8534e-03 L1_spectral:4.2968e-04 L2_spectral:4.1842e-04 L3_spectral:4.1922e-04 L4_spectral:4.0945e-04 L5_spectral:4.0466e-04 L6_spectral:4.0293e-04 L7_spectral:3.9565e-04 L8_spectral:3.8846e-04 L9_spectral:3.8943e-04 L10_spectral:3.7148e-04 L11_spectral:3.6850e-04 L12_spectral:3.8931e-04 train_time:405916ms step_avg:42.28ms +[2025-09-11 12:23:32] [Rank 0] step:9601/10000 train_time:407880ms step_avg:42.48ms +[2025-09-11 12:23:32] [Rank 0] step:9601/10000 train_time:407880ms step_avg:42.48ms +[2025-09-11 12:23:33] [Rank 0] step:9621/10000 train_time:408618ms step_avg:42.47ms +[2025-09-11 12:23:33] [Rank 0] step:9621/10000 train_time:408618ms step_avg:42.47ms +[2025-09-11 12:23:34] [Rank 0] step:9641/10000 train_time:409334ms step_avg:42.46ms +[2025-09-11 12:23:34] [Rank 0] step:9641/10000 train_time:409334ms step_avg:42.46ms +[2025-09-11 12:23:34] [Rank 0] step:9661/10000 train_time:410057ms step_avg:42.44ms +[2025-09-11 12:23:34] [Rank 0] step:9661/10000 train_time:410057ms step_avg:42.44ms +[2025-09-11 12:23:35] [Rank 0] step:9681/10000 train_time:410776ms step_avg:42.43ms +[2025-09-11 12:23:35] [Rank 0] step:9681/10000 train_time:410776ms step_avg:42.43ms +[2025-09-11 12:23:36] [Rank 0] step:9701/10000 train_time:411493ms step_avg:42.42ms +[2025-09-11 12:23:36] [Rank 0] step:9701/10000 train_time:411493ms step_avg:42.42ms +[2025-09-11 12:23:37] [Rank 0] step:9721/10000 train_time:412215ms step_avg:42.40ms +[2025-09-11 12:23:37] [Rank 0] step:9721/10000 train_time:412215ms step_avg:42.40ms +[2025-09-11 12:23:37] [Rank 0] step:9741/10000 train_time:412934ms step_avg:42.39ms +[2025-09-11 12:23:37] [Rank 0] step:9741/10000 train_time:412934ms step_avg:42.39ms +[2025-09-11 12:23:38] [Rank 0] step:9761/10000 train_time:413652ms step_avg:42.38ms +[2025-09-11 12:23:38] [Rank 0] step:9761/10000 train_time:413652ms step_avg:42.38ms +[2025-09-11 12:23:39] [Rank 0] step:9781/10000 train_time:414368ms step_avg:42.36ms +[2025-09-11 12:23:39] [Rank 0] step:9781/10000 train_time:414368ms step_avg:42.36ms +[2025-09-11 12:23:39] [Rank 0] step:9801/10000 train_time:415091ms step_avg:42.35ms +[2025-09-11 12:23:39] [Rank 0] step:9801/10000 train_time:415091ms step_avg:42.35ms +[2025-09-11 12:23:40] [Rank 0] step:9821/10000 train_time:415810ms step_avg:42.34ms +[2025-09-11 12:23:40] [Rank 0] step:9821/10000 train_time:415810ms step_avg:42.34ms +[2025-09-11 12:23:41] [Rank 0] step:9841/10000 train_time:416532ms step_avg:42.33ms +[2025-09-11 12:23:41] [Rank 0] step:9841/10000 train_time:416532ms step_avg:42.33ms +[2025-09-11 12:23:42] [Rank 0] step:9861/10000 train_time:417249ms step_avg:42.31ms +[2025-09-11 12:23:42] [Rank 0] step:9861/10000 train_time:417249ms step_avg:42.31ms +[2025-09-11 12:23:42] [Rank 0] step:9881/10000 train_time:417969ms step_avg:42.30ms +[2025-09-11 12:23:42] [Rank 0] step:9881/10000 train_time:417969ms step_avg:42.30ms +[2025-09-11 12:23:43] [Rank 0] step:9901/10000 train_time:418688ms step_avg:42.29ms +[2025-09-11 12:23:43] [Rank 0] step:9901/10000 train_time:418688ms step_avg:42.29ms +[2025-09-11 12:23:44] [Rank 0] step:9921/10000 train_time:419697ms step_avg:42.30ms +[2025-09-11 12:23:44] [Rank 0] step:9921/10000 train_time:419697ms step_avg:42.30ms +[2025-09-11 12:23:45] [Rank 0] step:9941/10000 train_time:420420ms step_avg:42.29ms +[2025-09-11 12:23:45] [Rank 0] step:9941/10000 train_time:420420ms step_avg:42.29ms +[2025-09-11 12:23:45] [Rank 0] step:9961/10000 train_time:421143ms step_avg:42.28ms +[2025-09-11 12:23:45] [Rank 0] step:9961/10000 train_time:421143ms step_avg:42.28ms +[2025-09-11 12:23:46] [Rank 0] step:9981/10000 train_time:422133ms step_avg:42.29ms +[2025-09-11 12:23:46] [Rank 0] step:9981/10000 train_time:422133ms step_avg:42.29ms +[2025-09-11 12:23:47] [Rank 0] step:10000/10000 train_time:422825ms step_avg:42.28ms +[2025-09-11 12:23:47] [Rank 0] step:10000/10000 train_time:422825ms step_avg:42.28ms +[2025-09-11 12:23:47] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:23:47] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 12:23:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:23:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 12:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 12:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 12:23:58] [Rank 0] PRINT: step:10000/10000 val_loss:4.8901 total_sharp:3.6859e-03 L1_sharp:2.4827e-03 L2_sharp:1.3376e-03 L3_sharp:1.3257e-03 L4_sharp:1.5362e-03 L5_sharp:1.8753e-03 L6_sharp:2.5610e-03 L7_sharp:6.2743e-03 L8_sharp:9.7766e-03 L9_sharp:1.5435e-02 L10_sharp:1.9971e-02 L11_sharp:3.4238e-02 L12_sharp:1.5798e-01 total_fnorm:1.2256e-01 total_l1_linf:5.2250e+01 total_spectral:6.3477e-02 L1_fnorm:9.7656e-03 L2_fnorm:9.4604e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.3384e-03 L5_fnorm:9.3384e-03 L6_fnorm:9.3384e-03 L7_fnorm:9.2163e-03 L8_fnorm:9.0942e-03 L9_fnorm:9.1553e-03 L10_fnorm:9.0332e-03 L11_fnorm:9.0332e-03 L12_fnorm:8.6670e-03 L1_l1linf:9.9182e-04 L2_l1linf:9.6512e-04 L3_l1linf:9.4986e-04 L4_l1linf:9.4604e-04 L5_l1linf:9.0027e-04 L6_l1linf:8.6594e-04 L7_l1linf:8.4305e-04 L8_l1linf:8.2397e-04 L9_l1linf:8.1253e-04 L10_l1linf:8.7738e-04 L11_l1linf:8.6975e-04 L12_l1linf:9.5749e-04 L1_spectral:1.7442e-04 L2_spectral:1.6795e-04 L3_spectral:1.6743e-04 L4_spectral:1.6437e-04 L5_spectral:1.6213e-04 L6_spectral:1.6109e-04 L7_spectral:1.6001e-04 L8_spectral:1.5428e-04 L9_spectral:1.5705e-04 L10_spectral:1.5032e-04 L11_spectral:1.5041e-04 L12_spectral:1.5778e-04 train_time:422846ms step_avg:42.28ms +[2025-09-11 12:23:58] [Rank 0] PRINT: step:10000/10000 val_loss:4.8901 total_sharp:3.6859e-03 L1_sharp:2.4827e-03 L2_sharp:1.3376e-03 L3_sharp:1.3257e-03 L4_sharp:1.5362e-03 L5_sharp:1.8753e-03 L6_sharp:2.5610e-03 L7_sharp:6.2743e-03 L8_sharp:9.7766e-03 L9_sharp:1.5435e-02 L10_sharp:1.9971e-02 L11_sharp:3.4238e-02 L12_sharp:1.5798e-01 total_fnorm:1.2256e-01 total_l1_linf:5.2250e+01 total_spectral:6.3477e-02 L1_fnorm:9.7656e-03 L2_fnorm:9.4604e-03 L3_fnorm:9.3994e-03 L4_fnorm:9.3384e-03 L5_fnorm:9.3384e-03 L6_fnorm:9.3384e-03 L7_fnorm:9.2163e-03 L8_fnorm:9.0942e-03 L9_fnorm:9.1553e-03 L10_fnorm:9.0332e-03 L11_fnorm:9.0332e-03 L12_fnorm:8.6670e-03 L1_l1linf:9.9182e-04 L2_l1linf:9.6512e-04 L3_l1linf:9.4986e-04 L4_l1linf:9.4604e-04 L5_l1linf:9.0027e-04 L6_l1linf:8.6594e-04 L7_l1linf:8.4305e-04 L8_l1linf:8.2397e-04 L9_l1linf:8.1253e-04 L10_l1linf:8.7738e-04 L11_l1linf:8.6975e-04 L12_l1linf:9.5749e-04 L1_spectral:1.7442e-04 L2_spectral:1.6795e-04 L3_spectral:1.6743e-04 L4_spectral:1.6437e-04 L5_spectral:1.6213e-04 L6_spectral:1.6109e-04 L7_spectral:1.6001e-04 L8_spectral:1.5428e-04 L9_spectral:1.5705e-04 L10_spectral:1.5032e-04 L11_spectral:1.5041e-04 L12_spectral:1.5778e-04 train_time:422846ms step_avg:42.28ms +[2025-09-11 12:23:58] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:23:58 2025 --- +[2025-09-11 12:23:58] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 12:23:58 2025 --- +[2025-09-11 12:23:58] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 12:23:58] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fd46e4eedef63fce23fa7892437f9d77351119f0 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "99d6117a-44c7-4aea-95cb-1f775290d3ce", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/training_log_99d6117a-44c7-4aea-95cb-1f775290d3ce.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/training_log_99d6117a-44c7-4aea-95cb-1f775290d3ce.txt new file mode 100644 index 0000000000000000000000000000000000000000..60a018322bb8c28b9608eee4be37283703c8065b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44/training_log_99d6117a-44c7-4aea-95cb-1f775290d3ce.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:58:36] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:58:36 2025 --- +[2025-09-11 10:58:36] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:58:36 2025 --- +[2025-09-11 10:58:36] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:58:36] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:58:36] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:58:36] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:58:36] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:58:36] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:58:36] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44 +[2025-09-11 10:58:36] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_44 +[2025-09-11 10:58:36] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:58:36] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:58:36] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:58:36] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:58:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:58:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:58:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:58:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:58:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:58:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:58:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:58:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:58:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:58:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:58:40] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:58:40] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:58:40] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:58:40] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:58:40] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:58:40] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:58:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:58:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:58:46] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:58:46] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:59:22] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:59:22] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:59:22] [Rank 0] PRINT: Starting training... +[2025-09-11 10:59:22] [Rank 0] PRINT: Starting training... +[2025-09-11 10:59:23] [Rank 0] step:21/10000 train_time:922ms step_avg:43.88ms +[2025-09-11 10:59:23] [Rank 0] step:21/10000 train_time:922ms step_avg:43.88ms +[2025-09-11 10:59:24] [Rank 0] step:41/10000 train_time:1651ms step_avg:40.26ms +[2025-09-11 10:59:24] [Rank 0] step:41/10000 train_time:1651ms step_avg:40.26ms +[2025-09-11 10:59:25] [Rank 0] step:61/10000 train_time:2379ms step_avg:39.00ms +[2025-09-11 10:59:25] [Rank 0] step:61/10000 train_time:2379ms step_avg:39.00ms +[2025-09-11 10:59:26] [Rank 0] step:81/10000 train_time:3107ms step_avg:38.36ms +[2025-09-11 10:59:26] [Rank 0] step:81/10000 train_time:3107ms step_avg:38.36ms +[2025-09-11 10:59:26] [Rank 0] step:101/10000 train_time:3836ms step_avg:37.98ms +[2025-09-11 10:59:26] [Rank 0] step:101/10000 train_time:3836ms step_avg:37.98ms +[2025-09-11 10:59:27] [Rank 0] step:121/10000 train_time:4563ms step_avg:37.71ms +[2025-09-11 10:59:27] [Rank 0] step:121/10000 train_time:4563ms step_avg:37.71ms +[2025-09-11 10:59:28] [Rank 0] step:141/10000 train_time:5293ms step_avg:37.54ms +[2025-09-11 10:59:28] [Rank 0] step:141/10000 train_time:5293ms step_avg:37.54ms +[2025-09-11 10:59:28] [Rank 0] step:161/10000 train_time:6020ms step_avg:37.39ms +[2025-09-11 10:59:28] [Rank 0] step:161/10000 train_time:6020ms step_avg:37.39ms +[2025-09-11 10:59:29] [Rank 0] step:181/10000 train_time:6748ms step_avg:37.28ms +[2025-09-11 10:59:29] [Rank 0] step:181/10000 train_time:6748ms step_avg:37.28ms +[2025-09-11 10:59:30] [Rank 0] step:201/10000 train_time:7476ms step_avg:37.19ms +[2025-09-11 10:59:30] [Rank 0] step:201/10000 train_time:7476ms step_avg:37.19ms +[2025-09-11 10:59:31] [Rank 0] step:221/10000 train_time:8204ms step_avg:37.12ms +[2025-09-11 10:59:31] [Rank 0] step:221/10000 train_time:8204ms step_avg:37.12ms +[2025-09-11 10:59:31] [Rank 0] step:241/10000 train_time:8931ms step_avg:37.06ms +[2025-09-11 10:59:31] [Rank 0] step:241/10000 train_time:8931ms step_avg:37.06ms +[2025-09-11 10:59:32] [Rank 0] step:261/10000 train_time:9659ms step_avg:37.01ms +[2025-09-11 10:59:32] [Rank 0] step:261/10000 train_time:9659ms step_avg:37.01ms +[2025-09-11 10:59:33] [Rank 0] step:281/10000 train_time:10386ms step_avg:36.96ms +[2025-09-11 10:59:33] [Rank 0] step:281/10000 train_time:10386ms step_avg:36.96ms +[2025-09-11 10:59:34] [Rank 0] step:301/10000 train_time:11114ms step_avg:36.92ms +[2025-09-11 10:59:34] [Rank 0] step:301/10000 train_time:11114ms step_avg:36.92ms +[2025-09-11 10:59:34] [Rank 0] step:321/10000 train_time:11841ms step_avg:36.89ms +[2025-09-11 10:59:34] [Rank 0] step:321/10000 train_time:11841ms step_avg:36.89ms +[2025-09-11 10:59:35] [Rank 0] step:341/10000 train_time:12569ms step_avg:36.86ms +[2025-09-11 10:59:35] [Rank 0] step:341/10000 train_time:12569ms step_avg:36.86ms +[2025-09-11 10:59:36] [Rank 0] step:361/10000 train_time:13297ms step_avg:36.83ms +[2025-09-11 10:59:36] [Rank 0] step:361/10000 train_time:13297ms step_avg:36.83ms +[2025-09-11 10:59:36] [Rank 0] step:381/10000 train_time:14024ms step_avg:36.81ms +[2025-09-11 10:59:36] [Rank 0] step:381/10000 train_time:14024ms step_avg:36.81ms +[2025-09-11 10:59:37] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:24] [Rank 0] PRINT: step:400/10000 val_loss:6.6268 total_sharp:2.1735e-02 L1_sharp:2.8924e-02 L2_sharp:2.6631e-02 L3_sharp:2.1219e-02 L4_sharp:1.8234e-02 L5_sharp:2.3978e-02 L6_sharp:2.5320e-02 L7_sharp:1.6457e-02 L8_sharp:1.3528e-02 L9_sharp:1.5515e-02 L10_sharp:1.4511e-02 L11_sharp:1.5903e-02 L12_sharp:2.8265e-02 total_fnorm:9.6759e+00 total_l1_linf:3.3464e+04 total_spectral:4.8378e+00 L1_fnorm:1.2250e+00 L2_fnorm:1.2173e+00 L3_fnorm:1.2256e+00 L4_fnorm:1.2237e+00 L5_fnorm:1.1703e+00 L6_fnorm:1.1165e+00 L7_fnorm:1.0404e+00 L8_fnorm:9.7991e-01 L9_fnorm:9.2116e-01 L10_fnorm:8.8250e-01 L11_fnorm:8.3463e-01 L12_fnorm:7.6642e-01 L1_l1linf:3.9714e-01 L2_l1linf:3.9843e-01 L3_l1linf:4.1366e-01 L4_l1linf:4.1694e-01 L5_l1linf:3.9237e-01 L6_l1linf:3.5773e-01 L7_l1linf:3.4415e-01 L8_l1linf:3.2574e-01 L9_l1linf:3.0030e-01 L10_l1linf:2.8954e-01 L11_l1linf:2.8138e-01 L12_l1linf:2.5632e-01 L1_spectral:1.2038e-02 L2_spectral:1.2045e-02 L3_spectral:1.2041e-02 L4_spectral:1.2046e-02 L5_spectral:1.2050e-02 L6_spectral:1.2041e-02 L7_spectral:1.2038e-02 L8_spectral:1.2032e-02 L9_spectral:1.2027e-02 L10_spectral:1.2026e-02 L11_spectral:1.2024e-02 L12_spectral:1.2023e-02 train_time:14732ms step_avg:36.83ms +[2025-09-11 11:00:24] [Rank 0] PRINT: step:400/10000 val_loss:6.6268 total_sharp:2.1735e-02 L1_sharp:2.8924e-02 L2_sharp:2.6631e-02 L3_sharp:2.1219e-02 L4_sharp:1.8234e-02 L5_sharp:2.3978e-02 L6_sharp:2.5320e-02 L7_sharp:1.6457e-02 L8_sharp:1.3528e-02 L9_sharp:1.5515e-02 L10_sharp:1.4511e-02 L11_sharp:1.5903e-02 L12_sharp:2.8265e-02 total_fnorm:9.6759e+00 total_l1_linf:3.3464e+04 total_spectral:4.8378e+00 L1_fnorm:1.2250e+00 L2_fnorm:1.2173e+00 L3_fnorm:1.2256e+00 L4_fnorm:1.2237e+00 L5_fnorm:1.1703e+00 L6_fnorm:1.1165e+00 L7_fnorm:1.0404e+00 L8_fnorm:9.7991e-01 L9_fnorm:9.2116e-01 L10_fnorm:8.8250e-01 L11_fnorm:8.3463e-01 L12_fnorm:7.6642e-01 L1_l1linf:3.9714e-01 L2_l1linf:3.9843e-01 L3_l1linf:4.1366e-01 L4_l1linf:4.1694e-01 L5_l1linf:3.9237e-01 L6_l1linf:3.5773e-01 L7_l1linf:3.4415e-01 L8_l1linf:3.2574e-01 L9_l1linf:3.0030e-01 L10_l1linf:2.8954e-01 L11_l1linf:2.8138e-01 L12_l1linf:2.5632e-01 L1_spectral:1.2038e-02 L2_spectral:1.2045e-02 L3_spectral:1.2041e-02 L4_spectral:1.2046e-02 L5_spectral:1.2050e-02 L6_spectral:1.2041e-02 L7_spectral:1.2038e-02 L8_spectral:1.2032e-02 L9_spectral:1.2027e-02 L10_spectral:1.2026e-02 L11_spectral:1.2024e-02 L12_spectral:1.2023e-02 train_time:14732ms step_avg:36.83ms +[2025-09-11 11:00:54] [Rank 0] step:401/10000 train_time:45162ms step_avg:112.62ms +[2025-09-11 11:00:54] [Rank 0] step:401/10000 train_time:45162ms step_avg:112.62ms +[2025-09-11 11:00:57] [Rank 0] step:421/10000 train_time:47621ms step_avg:113.11ms +[2025-09-11 11:00:57] [Rank 0] step:421/10000 train_time:47621ms step_avg:113.11ms +[2025-09-11 11:00:57] [Rank 0] step:441/10000 train_time:48261ms step_avg:109.44ms +[2025-09-11 11:00:57] [Rank 0] step:441/10000 train_time:48261ms step_avg:109.44ms +[2025-09-11 11:00:58] [Rank 0] step:461/10000 train_time:48901ms step_avg:106.08ms +[2025-09-11 11:00:58] [Rank 0] step:461/10000 train_time:48901ms step_avg:106.08ms +[2025-09-11 11:00:59] [Rank 0] step:481/10000 train_time:49540ms step_avg:102.99ms +[2025-09-11 11:00:59] [Rank 0] step:481/10000 train_time:49540ms step_avg:102.99ms +[2025-09-11 11:00:59] [Rank 0] step:501/10000 train_time:50179ms step_avg:100.16ms +[2025-09-11 11:00:59] [Rank 0] step:501/10000 train_time:50179ms step_avg:100.16ms +[2025-09-11 11:01:00] [Rank 0] step:521/10000 train_time:51281ms step_avg:98.43ms +[2025-09-11 11:01:00] [Rank 0] step:521/10000 train_time:51281ms step_avg:98.43ms +[2025-09-11 11:01:01] [Rank 0] step:541/10000 train_time:52064ms step_avg:96.24ms +[2025-09-11 11:01:01] [Rank 0] step:541/10000 train_time:52064ms step_avg:96.24ms +[2025-09-11 11:01:02] [Rank 0] step:561/10000 train_time:52703ms step_avg:93.94ms +[2025-09-11 11:01:02] [Rank 0] step:561/10000 train_time:52703ms step_avg:93.94ms +[2025-09-11 11:01:03] [Rank 0] step:581/10000 train_time:53604ms step_avg:92.26ms +[2025-09-11 11:01:03] [Rank 0] step:581/10000 train_time:53604ms step_avg:92.26ms +[2025-09-11 11:01:03] [Rank 0] step:601/10000 train_time:54243ms step_avg:90.25ms +[2025-09-11 11:01:03] [Rank 0] step:601/10000 train_time:54243ms step_avg:90.25ms +[2025-09-11 11:01:04] [Rank 0] step:621/10000 train_time:54881ms step_avg:88.38ms +[2025-09-11 11:01:04] [Rank 0] step:621/10000 train_time:54881ms step_avg:88.38ms +[2025-09-11 11:01:05] [Rank 0] step:641/10000 train_time:55520ms step_avg:86.62ms +[2025-09-11 11:01:05] [Rank 0] step:641/10000 train_time:55520ms step_avg:86.62ms +[2025-09-11 11:01:05] [Rank 0] step:661/10000 train_time:56159ms step_avg:84.96ms +[2025-09-11 11:01:05] [Rank 0] step:661/10000 train_time:56159ms step_avg:84.96ms +[2025-09-11 11:01:06] [Rank 0] step:681/10000 train_time:56810ms step_avg:83.42ms +[2025-09-11 11:01:06] [Rank 0] step:681/10000 train_time:56810ms step_avg:83.42ms +[2025-09-11 11:01:07] [Rank 0] step:701/10000 train_time:57448ms step_avg:81.95ms +[2025-09-11 11:01:07] [Rank 0] step:701/10000 train_time:57448ms step_avg:81.95ms +[2025-09-11 11:01:07] [Rank 0] step:721/10000 train_time:58087ms step_avg:80.56ms +[2025-09-11 11:01:07] [Rank 0] step:721/10000 train_time:58087ms step_avg:80.56ms +[2025-09-11 11:01:08] [Rank 0] step:741/10000 train_time:58725ms step_avg:79.25ms +[2025-09-11 11:01:08] [Rank 0] step:741/10000 train_time:58725ms step_avg:79.25ms +[2025-09-11 11:01:08] [Rank 0] step:761/10000 train_time:59368ms step_avg:78.01ms +[2025-09-11 11:01:08] [Rank 0] step:761/10000 train_time:59368ms step_avg:78.01ms +[2025-09-11 11:01:09] [Rank 0] step:781/10000 train_time:60012ms step_avg:76.84ms +[2025-09-11 11:01:09] [Rank 0] step:781/10000 train_time:60012ms step_avg:76.84ms +[2025-09-11 11:01:10] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:01:10] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:01:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:01:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:01:53] [Rank 0] PRINT: step:800/10000 val_loss:6.1912 total_sharp:2.9714e-02 L1_sharp:2.3457e-02 L2_sharp:1.3869e-02 L3_sharp:1.2165e-02 L4_sharp:1.3747e-02 L5_sharp:1.6906e-02 L6_sharp:1.3829e-02 L7_sharp:1.1068e-02 L8_sharp:1.3396e-02 L9_sharp:1.3569e-02 L10_sharp:1.2303e-02 L11_sharp:1.7197e-02 L12_sharp:3.4534e-02 total_fnorm:7.8125e+00 total_l1_linf:1.9712e+04 total_spectral:3.9062e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2109e+00 L6_fnorm:1.1953e+00 L7_fnorm:1.1562e+00 L8_fnorm:1.0938e+00 L9_fnorm:1.0703e+00 L10_fnorm:1.0078e+00 L11_fnorm:9.2969e-01 L12_fnorm:8.3984e-01 L1_l1linf:3.8867e-01 L2_l1linf:3.9258e-01 L3_l1linf:3.9844e-01 L4_l1linf:4.0039e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.7109e-01 L7_l1linf:3.6914e-01 L8_l1linf:3.5547e-01 L9_l1linf:3.1641e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.4707e-01 L12_l1linf:2.1875e-01 L1_spectral:1.3544e-02 L2_spectral:1.3510e-02 L3_spectral:1.3456e-02 L4_spectral:1.3518e-02 L5_spectral:1.3540e-02 L6_spectral:1.3594e-02 L7_spectral:1.3599e-02 L8_spectral:1.3600e-02 L9_spectral:1.3617e-02 L10_spectral:1.3574e-02 L11_spectral:1.3384e-02 L12_spectral:1.3233e-02 train_time:60637ms step_avg:75.80ms +[2025-09-11 11:01:53] [Rank 0] PRINT: step:800/10000 val_loss:6.1912 total_sharp:2.9714e-02 L1_sharp:2.3457e-02 L2_sharp:1.3869e-02 L3_sharp:1.2165e-02 L4_sharp:1.3747e-02 L5_sharp:1.6906e-02 L6_sharp:1.3829e-02 L7_sharp:1.1068e-02 L8_sharp:1.3396e-02 L9_sharp:1.3569e-02 L10_sharp:1.2303e-02 L11_sharp:1.7197e-02 L12_sharp:3.4534e-02 total_fnorm:7.8125e+00 total_l1_linf:1.9712e+04 total_spectral:3.9062e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2109e+00 L6_fnorm:1.1953e+00 L7_fnorm:1.1562e+00 L8_fnorm:1.0938e+00 L9_fnorm:1.0703e+00 L10_fnorm:1.0078e+00 L11_fnorm:9.2969e-01 L12_fnorm:8.3984e-01 L1_l1linf:3.8867e-01 L2_l1linf:3.9258e-01 L3_l1linf:3.9844e-01 L4_l1linf:4.0039e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.7109e-01 L7_l1linf:3.6914e-01 L8_l1linf:3.5547e-01 L9_l1linf:3.1641e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.4707e-01 L12_l1linf:2.1875e-01 L1_spectral:1.3544e-02 L2_spectral:1.3510e-02 L3_spectral:1.3456e-02 L4_spectral:1.3518e-02 L5_spectral:1.3540e-02 L6_spectral:1.3594e-02 L7_spectral:1.3599e-02 L8_spectral:1.3600e-02 L9_spectral:1.3617e-02 L10_spectral:1.3574e-02 L11_spectral:1.3384e-02 L12_spectral:1.3233e-02 train_time:60637ms step_avg:75.80ms +[2025-09-11 11:01:55] [Rank 0] step:801/10000 train_time:62452ms step_avg:77.97ms +[2025-09-11 11:01:55] [Rank 0] step:801/10000 train_time:62452ms step_avg:77.97ms +[2025-09-11 11:01:56] [Rank 0] step:821/10000 train_time:63101ms step_avg:76.86ms +[2025-09-11 11:01:56] [Rank 0] step:821/10000 train_time:63101ms step_avg:76.86ms +[2025-09-11 11:01:56] [Rank 0] step:841/10000 train_time:63746ms step_avg:75.80ms +[2025-09-11 11:01:56] [Rank 0] step:841/10000 train_time:63746ms step_avg:75.80ms +[2025-09-11 11:01:57] [Rank 0] step:861/10000 train_time:64390ms step_avg:74.79ms +[2025-09-11 11:01:57] [Rank 0] step:861/10000 train_time:64390ms step_avg:74.79ms +[2025-09-11 11:01:58] [Rank 0] step:881/10000 train_time:65034ms step_avg:73.82ms +[2025-09-11 11:01:58] [Rank 0] step:881/10000 train_time:65034ms step_avg:73.82ms +[2025-09-11 11:01:58] [Rank 0] step:901/10000 train_time:65678ms step_avg:72.89ms +[2025-09-11 11:01:58] [Rank 0] step:901/10000 train_time:65678ms step_avg:72.89ms +[2025-09-11 11:01:59] [Rank 0] step:921/10000 train_time:66324ms step_avg:72.01ms +[2025-09-11 11:01:59] [Rank 0] step:921/10000 train_time:66324ms step_avg:72.01ms +[2025-09-11 11:02:00] [Rank 0] step:941/10000 train_time:66967ms step_avg:71.17ms +[2025-09-11 11:02:00] [Rank 0] step:941/10000 train_time:66967ms step_avg:71.17ms +[2025-09-11 11:02:00] [Rank 0] step:961/10000 train_time:67611ms step_avg:70.35ms +[2025-09-11 11:02:00] [Rank 0] step:961/10000 train_time:67611ms step_avg:70.35ms +[2025-09-11 11:02:01] [Rank 0] step:981/10000 train_time:68254ms step_avg:69.58ms +[2025-09-11 11:02:01] [Rank 0] step:981/10000 train_time:68254ms step_avg:69.58ms +[2025-09-11 11:02:02] [Rank 0] step:1001/10000 train_time:68898ms step_avg:68.83ms +[2025-09-11 11:02:02] [Rank 0] step:1001/10000 train_time:68898ms step_avg:68.83ms +[2025-09-11 11:02:02] [Rank 0] step:1021/10000 train_time:69541ms step_avg:68.11ms +[2025-09-11 11:02:02] [Rank 0] step:1021/10000 train_time:69541ms step_avg:68.11ms +[2025-09-11 11:02:03] [Rank 0] step:1041/10000 train_time:70185ms step_avg:67.42ms +[2025-09-11 11:02:03] [Rank 0] step:1041/10000 train_time:70185ms step_avg:67.42ms +[2025-09-11 11:02:04] [Rank 0] step:1061/10000 train_time:71377ms step_avg:67.27ms +[2025-09-11 11:02:04] [Rank 0] step:1061/10000 train_time:71377ms step_avg:67.27ms +[2025-09-11 11:02:05] [Rank 0] step:1081/10000 train_time:72020ms step_avg:66.62ms +[2025-09-11 11:02:05] [Rank 0] step:1081/10000 train_time:72020ms step_avg:66.62ms +[2025-09-11 11:02:05] [Rank 0] step:1101/10000 train_time:72662ms step_avg:66.00ms +[2025-09-11 11:02:05] [Rank 0] step:1101/10000 train_time:72662ms step_avg:66.00ms +[2025-09-11 11:02:06] [Rank 0] step:1121/10000 train_time:73588ms step_avg:65.65ms +[2025-09-11 11:02:06] [Rank 0] step:1121/10000 train_time:73588ms step_avg:65.65ms +[2025-09-11 11:02:07] [Rank 0] step:1141/10000 train_time:74231ms step_avg:65.06ms +[2025-09-11 11:02:07] [Rank 0] step:1141/10000 train_time:74231ms step_avg:65.06ms +[2025-09-11 11:02:08] [Rank 0] step:1161/10000 train_time:74874ms step_avg:64.49ms +[2025-09-11 11:02:08] [Rank 0] step:1161/10000 train_time:74874ms step_avg:64.49ms +[2025-09-11 11:02:08] [Rank 0] step:1181/10000 train_time:75518ms step_avg:63.94ms +[2025-09-11 11:02:08] [Rank 0] step:1181/10000 train_time:75518ms step_avg:63.94ms +[2025-09-11 11:02:09] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:02:09] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:02:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:19] [Rank 0] PRINT: step:1200/10000 val_loss:5.8721 total_sharp:2.8225e-02 L1_sharp:1.6798e-02 L2_sharp:8.3286e-03 L3_sharp:8.1423e-03 L4_sharp:8.2074e-03 L5_sharp:1.0510e-02 L6_sharp:9.0066e-03 L7_sharp:8.5285e-03 L8_sharp:1.2993e-02 L9_sharp:1.0022e-02 L10_sharp:8.1123e-03 L11_sharp:1.0020e-02 L12_sharp:3.5930e-02 total_fnorm:7.2188e+00 total_l1_linf:1.8688e+04 total_spectral:3.6250e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2031e+00 L9_fnorm:1.2109e+00 L10_fnorm:1.1875e+00 L11_fnorm:1.1484e+00 L12_fnorm:1.0312e+00 L1_l1linf:3.8086e-01 L2_l1linf:3.6133e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7891e-01 L5_l1linf:3.7305e-01 L6_l1linf:3.7500e-01 L7_l1linf:3.8281e-01 L8_l1linf:3.7695e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.0859e-01 L12_l1linf:2.3535e-01 L1_spectral:1.4017e-02 L2_spectral:1.4005e-02 L3_spectral:1.3917e-02 L4_spectral:1.3855e-02 L5_spectral:1.3805e-02 L6_spectral:1.3830e-02 L7_spectral:1.3846e-02 L8_spectral:1.3989e-02 L9_spectral:1.4002e-02 L10_spectral:1.4128e-02 L11_spectral:1.4114e-02 L12_spectral:1.4075e-02 train_time:76143ms step_avg:63.45ms +[2025-09-11 11:02:19] [Rank 0] PRINT: step:1200/10000 val_loss:5.8721 total_sharp:2.8225e-02 L1_sharp:1.6798e-02 L2_sharp:8.3286e-03 L3_sharp:8.1423e-03 L4_sharp:8.2074e-03 L5_sharp:1.0510e-02 L6_sharp:9.0066e-03 L7_sharp:8.5285e-03 L8_sharp:1.2993e-02 L9_sharp:1.0022e-02 L10_sharp:8.1123e-03 L11_sharp:1.0020e-02 L12_sharp:3.5930e-02 total_fnorm:7.2188e+00 total_l1_linf:1.8688e+04 total_spectral:3.6250e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2031e+00 L9_fnorm:1.2109e+00 L10_fnorm:1.1875e+00 L11_fnorm:1.1484e+00 L12_fnorm:1.0312e+00 L1_l1linf:3.8086e-01 L2_l1linf:3.6133e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7891e-01 L5_l1linf:3.7305e-01 L6_l1linf:3.7500e-01 L7_l1linf:3.8281e-01 L8_l1linf:3.7695e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.0859e-01 L12_l1linf:2.3535e-01 L1_spectral:1.4017e-02 L2_spectral:1.4005e-02 L3_spectral:1.3917e-02 L4_spectral:1.3855e-02 L5_spectral:1.3805e-02 L6_spectral:1.3830e-02 L7_spectral:1.3846e-02 L8_spectral:1.3989e-02 L9_spectral:1.4002e-02 L10_spectral:1.4128e-02 L11_spectral:1.4114e-02 L12_spectral:1.4075e-02 train_time:76143ms step_avg:63.45ms +[2025-09-11 11:02:21] [Rank 0] step:1201/10000 train_time:77900ms step_avg:64.86ms +[2025-09-11 11:02:21] [Rank 0] step:1201/10000 train_time:77900ms step_avg:64.86ms +[2025-09-11 11:02:21] [Rank 0] step:1221/10000 train_time:78549ms step_avg:64.33ms +[2025-09-11 11:02:21] [Rank 0] step:1221/10000 train_time:78549ms step_avg:64.33ms +[2025-09-11 11:02:22] [Rank 0] step:1241/10000 train_time:79194ms step_avg:63.81ms +[2025-09-11 11:02:22] [Rank 0] step:1241/10000 train_time:79194ms step_avg:63.81ms +[2025-09-11 11:02:23] [Rank 0] step:1261/10000 train_time:79840ms step_avg:63.31ms +[2025-09-11 11:02:23] [Rank 0] step:1261/10000 train_time:79840ms step_avg:63.31ms +[2025-09-11 11:02:23] [Rank 0] step:1281/10000 train_time:80485ms step_avg:62.83ms +[2025-09-11 11:02:23] [Rank 0] step:1281/10000 train_time:80485ms step_avg:62.83ms +[2025-09-11 11:02:24] [Rank 0] step:1301/10000 train_time:81130ms step_avg:62.36ms +[2025-09-11 11:02:24] [Rank 0] step:1301/10000 train_time:81130ms step_avg:62.36ms +[2025-09-11 11:02:25] [Rank 0] step:1321/10000 train_time:81774ms step_avg:61.90ms +[2025-09-11 11:02:25] [Rank 0] step:1321/10000 train_time:81774ms step_avg:61.90ms +[2025-09-11 11:02:25] [Rank 0] step:1341/10000 train_time:82418ms step_avg:61.46ms +[2025-09-11 11:02:25] [Rank 0] step:1341/10000 train_time:82418ms step_avg:61.46ms +[2025-09-11 11:02:26] [Rank 0] step:1361/10000 train_time:83063ms step_avg:61.03ms +[2025-09-11 11:02:26] [Rank 0] step:1361/10000 train_time:83063ms step_avg:61.03ms +[2025-09-11 11:02:27] [Rank 0] step:1381/10000 train_time:83707ms step_avg:60.61ms +[2025-09-11 11:02:27] [Rank 0] step:1381/10000 train_time:83707ms step_avg:60.61ms +[2025-09-11 11:02:27] [Rank 0] step:1401/10000 train_time:84352ms step_avg:60.21ms +[2025-09-11 11:02:27] [Rank 0] step:1401/10000 train_time:84352ms step_avg:60.21ms +[2025-09-11 11:02:28] [Rank 0] step:1421/10000 train_time:84995ms step_avg:59.81ms +[2025-09-11 11:02:28] [Rank 0] step:1421/10000 train_time:84995ms step_avg:59.81ms +[2025-09-11 11:02:29] [Rank 0] step:1441/10000 train_time:85639ms step_avg:59.43ms +[2025-09-11 11:02:29] [Rank 0] step:1441/10000 train_time:85639ms step_avg:59.43ms +[2025-09-11 11:02:29] [Rank 0] step:1461/10000 train_time:86284ms step_avg:59.06ms +[2025-09-11 11:02:29] [Rank 0] step:1461/10000 train_time:86284ms step_avg:59.06ms +[2025-09-11 11:02:30] [Rank 0] step:1481/10000 train_time:86928ms step_avg:58.70ms +[2025-09-11 11:02:30] [Rank 0] step:1481/10000 train_time:86928ms step_avg:58.70ms +[2025-09-11 11:02:30] [Rank 0] step:1501/10000 train_time:87584ms step_avg:58.35ms +[2025-09-11 11:02:30] [Rank 0] step:1501/10000 train_time:87584ms step_avg:58.35ms +[2025-09-11 11:02:31] [Rank 0] step:1521/10000 train_time:88232ms step_avg:58.01ms +[2025-09-11 11:02:31] [Rank 0] step:1521/10000 train_time:88232ms step_avg:58.01ms +[2025-09-11 11:02:32] [Rank 0] step:1541/10000 train_time:88880ms step_avg:57.68ms +[2025-09-11 11:02:32] [Rank 0] step:1541/10000 train_time:88880ms step_avg:57.68ms +[2025-09-11 11:02:32] [Rank 0] step:1561/10000 train_time:89529ms step_avg:57.35ms +[2025-09-11 11:02:32] [Rank 0] step:1561/10000 train_time:89529ms step_avg:57.35ms +[2025-09-11 11:02:33] [Rank 0] step:1581/10000 train_time:90177ms step_avg:57.04ms +[2025-09-11 11:02:33] [Rank 0] step:1581/10000 train_time:90177ms step_avg:57.04ms +[2025-09-11 11:02:34] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:02:34] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:44] [Rank 0] PRINT: step:1600/10000 val_loss:5.6935 total_sharp:1.6264e-02 L1_sharp:1.1218e-02 L2_sharp:4.2208e-03 L3_sharp:4.0090e-03 L4_sharp:4.8378e-03 L5_sharp:5.4209e-03 L6_sharp:4.2874e-03 L7_sharp:4.5451e-03 L8_sharp:8.7466e-03 L9_sharp:5.7522e-03 L10_sharp:6.7724e-03 L11_sharp:7.9181e-03 L12_sharp:2.1636e-02 total_fnorm:7.1562e+00 total_l1_linf:1.8176e+04 total_spectral:3.5781e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2422e+00 L11_fnorm:1.2109e+00 L12_fnorm:1.0938e+00 L1_l1linf:3.7695e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5352e-01 L4_l1linf:3.5938e-01 L5_l1linf:3.5938e-01 L6_l1linf:3.5742e-01 L7_l1linf:3.6328e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.6133e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.2227e-01 L12_l1linf:2.4902e-01 L1_spectral:1.4633e-02 L2_spectral:1.4425e-02 L3_spectral:1.4333e-02 L4_spectral:1.4261e-02 L5_spectral:1.4179e-02 L6_spectral:1.4172e-02 L7_spectral:1.4123e-02 L8_spectral:1.4131e-02 L9_spectral:1.4171e-02 L10_spectral:1.4317e-02 L11_spectral:1.4398e-02 L12_spectral:1.4429e-02 train_time:90807ms step_avg:56.75ms +[2025-09-11 11:02:44] [Rank 0] PRINT: step:1600/10000 val_loss:5.6935 total_sharp:1.6264e-02 L1_sharp:1.1218e-02 L2_sharp:4.2208e-03 L3_sharp:4.0090e-03 L4_sharp:4.8378e-03 L5_sharp:5.4209e-03 L6_sharp:4.2874e-03 L7_sharp:4.5451e-03 L8_sharp:8.7466e-03 L9_sharp:5.7522e-03 L10_sharp:6.7724e-03 L11_sharp:7.9181e-03 L12_sharp:2.1636e-02 total_fnorm:7.1562e+00 total_l1_linf:1.8176e+04 total_spectral:3.5781e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2422e+00 L11_fnorm:1.2109e+00 L12_fnorm:1.0938e+00 L1_l1linf:3.7695e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5352e-01 L4_l1linf:3.5938e-01 L5_l1linf:3.5938e-01 L6_l1linf:3.5742e-01 L7_l1linf:3.6328e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.6133e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.2227e-01 L12_l1linf:2.4902e-01 L1_spectral:1.4633e-02 L2_spectral:1.4425e-02 L3_spectral:1.4333e-02 L4_spectral:1.4261e-02 L5_spectral:1.4179e-02 L6_spectral:1.4172e-02 L7_spectral:1.4123e-02 L8_spectral:1.4131e-02 L9_spectral:1.4171e-02 L10_spectral:1.4317e-02 L11_spectral:1.4398e-02 L12_spectral:1.4429e-02 train_time:90807ms step_avg:56.75ms +[2025-09-11 11:02:46] [Rank 0] step:1601/10000 train_time:92106ms step_avg:57.53ms +[2025-09-11 11:02:46] [Rank 0] step:1601/10000 train_time:92106ms step_avg:57.53ms +[2025-09-11 11:02:46] [Rank 0] step:1621/10000 train_time:92759ms step_avg:57.22ms +[2025-09-11 11:02:46] [Rank 0] step:1621/10000 train_time:92759ms step_avg:57.22ms +[2025-09-11 11:02:47] [Rank 0] step:1641/10000 train_time:93407ms step_avg:56.92ms +[2025-09-11 11:02:47] [Rank 0] step:1641/10000 train_time:93407ms step_avg:56.92ms +[2025-09-11 11:02:48] [Rank 0] step:1661/10000 train_time:94056ms step_avg:56.63ms +[2025-09-11 11:02:48] [Rank 0] step:1661/10000 train_time:94056ms step_avg:56.63ms +[2025-09-11 11:02:48] [Rank 0] step:1681/10000 train_time:94704ms step_avg:56.34ms +[2025-09-11 11:02:48] [Rank 0] step:1681/10000 train_time:94704ms step_avg:56.34ms +[2025-09-11 11:02:49] [Rank 0] step:1701/10000 train_time:95352ms step_avg:56.06ms +[2025-09-11 11:02:49] [Rank 0] step:1701/10000 train_time:95352ms step_avg:56.06ms +[2025-09-11 11:02:50] [Rank 0] step:1721/10000 train_time:96000ms step_avg:55.78ms +[2025-09-11 11:02:50] [Rank 0] step:1721/10000 train_time:96000ms step_avg:55.78ms +[2025-09-11 11:02:50] [Rank 0] step:1741/10000 train_time:96649ms step_avg:55.51ms +[2025-09-11 11:02:50] [Rank 0] step:1741/10000 train_time:96649ms step_avg:55.51ms +[2025-09-11 11:02:51] [Rank 0] step:1761/10000 train_time:97297ms step_avg:55.25ms +[2025-09-11 11:02:51] [Rank 0] step:1761/10000 train_time:97297ms step_avg:55.25ms +[2025-09-11 11:02:51] [Rank 0] step:1781/10000 train_time:97945ms step_avg:54.99ms +[2025-09-11 11:02:51] [Rank 0] step:1781/10000 train_time:97945ms step_avg:54.99ms +[2025-09-11 11:02:52] [Rank 0] step:1801/10000 train_time:98593ms step_avg:54.74ms +[2025-09-11 11:02:52] [Rank 0] step:1801/10000 train_time:98593ms step_avg:54.74ms +[2025-09-11 11:02:53] [Rank 0] step:1821/10000 train_time:99241ms step_avg:54.50ms +[2025-09-11 11:02:53] [Rank 0] step:1821/10000 train_time:99241ms step_avg:54.50ms +[2025-09-11 11:02:53] [Rank 0] step:1841/10000 train_time:99888ms step_avg:54.26ms +[2025-09-11 11:02:53] [Rank 0] step:1841/10000 train_time:99888ms step_avg:54.26ms +[2025-09-11 11:02:54] [Rank 0] step:1861/10000 train_time:100537ms step_avg:54.02ms +[2025-09-11 11:02:54] [Rank 0] step:1861/10000 train_time:100537ms step_avg:54.02ms +[2025-09-11 11:02:55] [Rank 0] step:1881/10000 train_time:101185ms step_avg:53.79ms +[2025-09-11 11:02:55] [Rank 0] step:1881/10000 train_time:101185ms step_avg:53.79ms +[2025-09-11 11:02:55] [Rank 0] step:1901/10000 train_time:101834ms step_avg:53.57ms +[2025-09-11 11:02:55] [Rank 0] step:1901/10000 train_time:101834ms step_avg:53.57ms +[2025-09-11 11:02:56] [Rank 0] step:1921/10000 train_time:102482ms step_avg:53.35ms +[2025-09-11 11:02:56] [Rank 0] step:1921/10000 train_time:102482ms step_avg:53.35ms +[2025-09-11 11:02:57] [Rank 0] step:1941/10000 train_time:103129ms step_avg:53.13ms +[2025-09-11 11:02:57] [Rank 0] step:1941/10000 train_time:103129ms step_avg:53.13ms +[2025-09-11 11:02:57] [Rank 0] step:1961/10000 train_time:103777ms step_avg:52.92ms +[2025-09-11 11:02:57] [Rank 0] step:1961/10000 train_time:103777ms step_avg:52.92ms +[2025-09-11 11:02:58] [Rank 0] step:1981/10000 train_time:104425ms step_avg:52.71ms +[2025-09-11 11:02:58] [Rank 0] step:1981/10000 train_time:104425ms step_avg:52.71ms +[2025-09-11 11:02:59] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:02:59] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:09] [Rank 0] PRINT: step:2000/10000 val_loss:5.5547 total_sharp:2.0183e-02 L1_sharp:1.0533e-02 L2_sharp:2.3506e-03 L3_sharp:2.6274e-03 L4_sharp:3.6779e-03 L5_sharp:5.6828e-03 L6_sharp:3.6552e-03 L7_sharp:4.4637e-03 L8_sharp:1.0123e-02 L9_sharp:7.5095e-03 L10_sharp:7.3107e-03 L11_sharp:8.7324e-03 L12_sharp:3.8461e-02 total_fnorm:6.8438e+00 total_l1_linf:1.7536e+04 total_spectral:3.4375e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.1172e+00 L1_l1linf:3.6914e-01 L2_l1linf:3.4766e-01 L3_l1linf:3.3984e-01 L4_l1linf:3.5156e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4570e-01 L7_l1linf:3.5352e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.4180e-01 L11_l1linf:3.1445e-01 L12_l1linf:2.4023e-01 L1_spectral:1.4856e-02 L2_spectral:1.4782e-02 L3_spectral:1.4650e-02 L4_spectral:1.4625e-02 L5_spectral:1.4516e-02 L6_spectral:1.4523e-02 L7_spectral:1.4488e-02 L8_spectral:1.4559e-02 L9_spectral:1.4445e-02 L10_spectral:1.4435e-02 L11_spectral:1.4628e-02 L12_spectral:1.4788e-02 train_time:105056ms step_avg:52.53ms +[2025-09-11 11:03:09] [Rank 0] PRINT: step:2000/10000 val_loss:5.5547 total_sharp:2.0183e-02 L1_sharp:1.0533e-02 L2_sharp:2.3506e-03 L3_sharp:2.6274e-03 L4_sharp:3.6779e-03 L5_sharp:5.6828e-03 L6_sharp:3.6552e-03 L7_sharp:4.4637e-03 L8_sharp:1.0123e-02 L9_sharp:7.5095e-03 L10_sharp:7.3107e-03 L11_sharp:8.7324e-03 L12_sharp:3.8461e-02 total_fnorm:6.8438e+00 total_l1_linf:1.7536e+04 total_spectral:3.4375e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.1172e+00 L1_l1linf:3.6914e-01 L2_l1linf:3.4766e-01 L3_l1linf:3.3984e-01 L4_l1linf:3.5156e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4570e-01 L7_l1linf:3.5352e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.4180e-01 L11_l1linf:3.1445e-01 L12_l1linf:2.4023e-01 L1_spectral:1.4856e-02 L2_spectral:1.4782e-02 L3_spectral:1.4650e-02 L4_spectral:1.4625e-02 L5_spectral:1.4516e-02 L6_spectral:1.4523e-02 L7_spectral:1.4488e-02 L8_spectral:1.4559e-02 L9_spectral:1.4445e-02 L10_spectral:1.4435e-02 L11_spectral:1.4628e-02 L12_spectral:1.4788e-02 train_time:105056ms step_avg:52.53ms +[2025-09-11 11:03:10] [Rank 0] step:2001/10000 train_time:106448ms step_avg:53.20ms +[2025-09-11 11:03:10] [Rank 0] step:2001/10000 train_time:106448ms step_avg:53.20ms +[2025-09-11 11:03:11] [Rank 0] step:2021/10000 train_time:107100ms step_avg:52.99ms +[2025-09-11 11:03:11] [Rank 0] step:2021/10000 train_time:107100ms step_avg:52.99ms +[2025-09-11 11:03:11] [Rank 0] step:2041/10000 train_time:107748ms step_avg:52.79ms +[2025-09-11 11:03:11] [Rank 0] step:2041/10000 train_time:107748ms step_avg:52.79ms +[2025-09-11 11:03:12] [Rank 0] step:2061/10000 train_time:108396ms step_avg:52.59ms +[2025-09-11 11:03:12] [Rank 0] step:2061/10000 train_time:108396ms step_avg:52.59ms +[2025-09-11 11:03:13] [Rank 0] step:2081/10000 train_time:109043ms step_avg:52.40ms +[2025-09-11 11:03:13] [Rank 0] step:2081/10000 train_time:109043ms step_avg:52.40ms +[2025-09-11 11:03:13] [Rank 0] step:2101/10000 train_time:109691ms step_avg:52.21ms +[2025-09-11 11:03:13] [Rank 0] step:2101/10000 train_time:109691ms step_avg:52.21ms +[2025-09-11 11:03:14] [Rank 0] step:2121/10000 train_time:110339ms step_avg:52.02ms +[2025-09-11 11:03:14] [Rank 0] step:2121/10000 train_time:110339ms step_avg:52.02ms +[2025-09-11 11:03:14] [Rank 0] step:2141/10000 train_time:110986ms step_avg:51.84ms +[2025-09-11 11:03:14] [Rank 0] step:2141/10000 train_time:110986ms step_avg:51.84ms +[2025-09-11 11:03:15] [Rank 0] step:2161/10000 train_time:111633ms step_avg:51.66ms +[2025-09-11 11:03:15] [Rank 0] step:2161/10000 train_time:111633ms step_avg:51.66ms +[2025-09-11 11:03:16] [Rank 0] step:2181/10000 train_time:112280ms step_avg:51.48ms +[2025-09-11 11:03:16] [Rank 0] step:2181/10000 train_time:112280ms step_avg:51.48ms +[2025-09-11 11:03:16] [Rank 0] step:2201/10000 train_time:112928ms step_avg:51.31ms +[2025-09-11 11:03:16] [Rank 0] step:2201/10000 train_time:112928ms step_avg:51.31ms +[2025-09-11 11:03:17] [Rank 0] step:2221/10000 train_time:113576ms step_avg:51.14ms +[2025-09-11 11:03:17] [Rank 0] step:2221/10000 train_time:113576ms step_avg:51.14ms +[2025-09-11 11:03:18] [Rank 0] step:2241/10000 train_time:114239ms step_avg:50.98ms +[2025-09-11 11:03:18] [Rank 0] step:2241/10000 train_time:114239ms step_avg:50.98ms +[2025-09-11 11:03:18] [Rank 0] step:2261/10000 train_time:114899ms step_avg:50.82ms +[2025-09-11 11:03:18] [Rank 0] step:2261/10000 train_time:114899ms step_avg:50.82ms +[2025-09-11 11:03:19] [Rank 0] step:2281/10000 train_time:115559ms step_avg:50.66ms +[2025-09-11 11:03:19] [Rank 0] step:2281/10000 train_time:115559ms step_avg:50.66ms +[2025-09-11 11:03:20] [Rank 0] step:2301/10000 train_time:116220ms step_avg:50.51ms +[2025-09-11 11:03:20] [Rank 0] step:2301/10000 train_time:116220ms step_avg:50.51ms +[2025-09-11 11:03:20] [Rank 0] step:2321/10000 train_time:116880ms step_avg:50.36ms +[2025-09-11 11:03:20] [Rank 0] step:2321/10000 train_time:116880ms step_avg:50.36ms +[2025-09-11 11:03:21] [Rank 0] step:2341/10000 train_time:117541ms step_avg:50.21ms +[2025-09-11 11:03:21] [Rank 0] step:2341/10000 train_time:117541ms step_avg:50.21ms +[2025-09-11 11:03:22] [Rank 0] step:2361/10000 train_time:118201ms step_avg:50.06ms +[2025-09-11 11:03:22] [Rank 0] step:2361/10000 train_time:118201ms step_avg:50.06ms +[2025-09-11 11:03:22] [Rank 0] step:2381/10000 train_time:118861ms step_avg:49.92ms +[2025-09-11 11:03:22] [Rank 0] step:2381/10000 train_time:118861ms step_avg:49.92ms +[2025-09-11 11:03:23] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:03:23] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:33] [Rank 0] PRINT: step:2400/10000 val_loss:5.4333 total_sharp:1.7421e-02 L1_sharp:9.4571e-03 L2_sharp:3.1030e-03 L3_sharp:2.0646e-03 L4_sharp:2.7089e-03 L5_sharp:4.1855e-03 L6_sharp:3.0784e-03 L7_sharp:3.0002e-03 L8_sharp:7.9871e-03 L9_sharp:6.3045e-03 L10_sharp:5.7532e-03 L11_sharp:6.9929e-03 L12_sharp:2.7218e-02 total_fnorm:6.5312e+00 total_l1_linf:1.6640e+04 total_spectral:3.2812e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1562e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4375e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3789e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.3203e-01 L10_l1linf:3.3594e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.4707e-01 L1_spectral:1.5159e-02 L2_spectral:1.4980e-02 L3_spectral:1.4917e-02 L4_spectral:1.4760e-02 L5_spectral:1.4729e-02 L6_spectral:1.4816e-02 L7_spectral:1.4789e-02 L8_spectral:1.5029e-02 L9_spectral:1.4720e-02 L10_spectral:1.4685e-02 L11_spectral:1.4796e-02 L12_spectral:1.4998e-02 train_time:119503ms step_avg:49.79ms +[2025-09-11 11:03:33] [Rank 0] PRINT: step:2400/10000 val_loss:5.4333 total_sharp:1.7421e-02 L1_sharp:9.4571e-03 L2_sharp:3.1030e-03 L3_sharp:2.0646e-03 L4_sharp:2.7089e-03 L5_sharp:4.1855e-03 L6_sharp:3.0784e-03 L7_sharp:3.0002e-03 L8_sharp:7.9871e-03 L9_sharp:6.3045e-03 L10_sharp:5.7532e-03 L11_sharp:6.9929e-03 L12_sharp:2.7218e-02 total_fnorm:6.5312e+00 total_l1_linf:1.6640e+04 total_spectral:3.2812e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1562e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4375e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3789e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.3203e-01 L10_l1linf:3.3594e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.4707e-01 L1_spectral:1.5159e-02 L2_spectral:1.4980e-02 L3_spectral:1.4917e-02 L4_spectral:1.4760e-02 L5_spectral:1.4729e-02 L6_spectral:1.4816e-02 L7_spectral:1.4789e-02 L8_spectral:1.5029e-02 L9_spectral:1.4720e-02 L10_spectral:1.4685e-02 L11_spectral:1.4796e-02 L12_spectral:1.4998e-02 train_time:119503ms step_avg:49.79ms +[2025-09-11 11:03:34] [Rank 0] step:2401/10000 train_time:120879ms step_avg:50.35ms +[2025-09-11 11:03:34] [Rank 0] step:2401/10000 train_time:120879ms step_avg:50.35ms +[2025-09-11 11:03:35] [Rank 0] step:2421/10000 train_time:121544ms step_avg:50.20ms +[2025-09-11 11:03:35] [Rank 0] step:2421/10000 train_time:121544ms step_avg:50.20ms +[2025-09-11 11:03:36] [Rank 0] step:2441/10000 train_time:122206ms step_avg:50.06ms +[2025-09-11 11:03:36] [Rank 0] step:2441/10000 train_time:122206ms step_avg:50.06ms +[2025-09-11 11:03:36] [Rank 0] step:2461/10000 train_time:122868ms step_avg:49.93ms +[2025-09-11 11:03:36] [Rank 0] step:2461/10000 train_time:122868ms step_avg:49.93ms +[2025-09-11 11:03:37] [Rank 0] step:2481/10000 train_time:123528ms step_avg:49.79ms +[2025-09-11 11:03:37] [Rank 0] step:2481/10000 train_time:123528ms step_avg:49.79ms +[2025-09-11 11:03:38] [Rank 0] step:2501/10000 train_time:124189ms step_avg:49.66ms +[2025-09-11 11:03:38] [Rank 0] step:2501/10000 train_time:124189ms step_avg:49.66ms +[2025-09-11 11:03:38] [Rank 0] step:2521/10000 train_time:124851ms step_avg:49.52ms +[2025-09-11 11:03:38] [Rank 0] step:2521/10000 train_time:124851ms step_avg:49.52ms +[2025-09-11 11:03:39] [Rank 0] step:2541/10000 train_time:125512ms step_avg:49.39ms +[2025-09-11 11:03:39] [Rank 0] step:2541/10000 train_time:125512ms step_avg:49.39ms +[2025-09-11 11:03:40] [Rank 0] step:2561/10000 train_time:126172ms step_avg:49.27ms +[2025-09-11 11:03:40] [Rank 0] step:2561/10000 train_time:126172ms step_avg:49.27ms +[2025-09-11 11:03:40] [Rank 0] step:2581/10000 train_time:126833ms step_avg:49.14ms +[2025-09-11 11:03:40] [Rank 0] step:2581/10000 train_time:126833ms step_avg:49.14ms +[2025-09-11 11:03:41] [Rank 0] step:2601/10000 train_time:127493ms step_avg:49.02ms +[2025-09-11 11:03:41] [Rank 0] step:2601/10000 train_time:127493ms step_avg:49.02ms +[2025-09-11 11:03:42] [Rank 0] step:2621/10000 train_time:128154ms step_avg:48.90ms +[2025-09-11 11:03:42] [Rank 0] step:2621/10000 train_time:128154ms step_avg:48.90ms +[2025-09-11 11:03:42] [Rank 0] step:2641/10000 train_time:128815ms step_avg:48.78ms +[2025-09-11 11:03:42] [Rank 0] step:2641/10000 train_time:128815ms step_avg:48.78ms +[2025-09-11 11:03:43] [Rank 0] step:2661/10000 train_time:129475ms step_avg:48.66ms +[2025-09-11 11:03:43] [Rank 0] step:2661/10000 train_time:129475ms step_avg:48.66ms +[2025-09-11 11:03:44] [Rank 0] step:2681/10000 train_time:130136ms step_avg:48.54ms +[2025-09-11 11:03:44] [Rank 0] step:2681/10000 train_time:130136ms step_avg:48.54ms +[2025-09-11 11:03:44] [Rank 0] step:2701/10000 train_time:130797ms step_avg:48.43ms +[2025-09-11 11:03:44] [Rank 0] step:2701/10000 train_time:130797ms step_avg:48.43ms +[2025-09-11 11:03:45] [Rank 0] step:2721/10000 train_time:131457ms step_avg:48.31ms +[2025-09-11 11:03:45] [Rank 0] step:2721/10000 train_time:131457ms step_avg:48.31ms +[2025-09-11 11:03:46] [Rank 0] step:2741/10000 train_time:132118ms step_avg:48.20ms +[2025-09-11 11:03:46] [Rank 0] step:2741/10000 train_time:132118ms step_avg:48.20ms +[2025-09-11 11:03:46] [Rank 0] step:2761/10000 train_time:132779ms step_avg:48.09ms +[2025-09-11 11:03:46] [Rank 0] step:2761/10000 train_time:132779ms step_avg:48.09ms +[2025-09-11 11:03:47] [Rank 0] step:2781/10000 train_time:133439ms step_avg:47.98ms +[2025-09-11 11:03:47] [Rank 0] step:2781/10000 train_time:133439ms step_avg:47.98ms +[2025-09-11 11:03:48] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:03:48] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:03:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:03:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:03:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:03:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:03:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:03:58] [Rank 0] PRINT: step:2800/10000 val_loss:5.3557 total_sharp:1.6459e-02 L1_sharp:5.8071e-03 L2_sharp:2.4542e-03 L3_sharp:2.1098e-03 L4_sharp:3.8062e-03 L5_sharp:3.3512e-03 L6_sharp:3.0141e-03 L7_sharp:2.9394e-03 L8_sharp:6.7844e-03 L9_sharp:5.4379e-03 L10_sharp:5.2268e-03 L11_sharp:7.3785e-03 L12_sharp:1.9427e-02 total_fnorm:6.2812e+00 total_l1_linf:1.6000e+04 total_spectral:3.1719e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.1641e+00 L1_l1linf:3.5938e-01 L2_l1linf:3.4375e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.2812e-01 L5_l1linf:3.2617e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.4512e-01 L1_spectral:1.5398e-02 L2_spectral:1.5135e-02 L3_spectral:1.5052e-02 L4_spectral:1.4994e-02 L5_spectral:1.4962e-02 L6_spectral:1.5042e-02 L7_spectral:1.5034e-02 L8_spectral:1.5206e-02 L9_spectral:1.4983e-02 L10_spectral:1.4850e-02 L11_spectral:1.4860e-02 L12_spectral:1.5144e-02 train_time:134082ms step_avg:47.89ms +[2025-09-11 11:03:58] [Rank 0] PRINT: step:2800/10000 val_loss:5.3557 total_sharp:1.6459e-02 L1_sharp:5.8071e-03 L2_sharp:2.4542e-03 L3_sharp:2.1098e-03 L4_sharp:3.8062e-03 L5_sharp:3.3512e-03 L6_sharp:3.0141e-03 L7_sharp:2.9394e-03 L8_sharp:6.7844e-03 L9_sharp:5.4379e-03 L10_sharp:5.2268e-03 L11_sharp:7.3785e-03 L12_sharp:1.9427e-02 total_fnorm:6.2812e+00 total_l1_linf:1.6000e+04 total_spectral:3.1719e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.1641e+00 L1_l1linf:3.5938e-01 L2_l1linf:3.4375e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.2812e-01 L5_l1linf:3.2617e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.4512e-01 L1_spectral:1.5398e-02 L2_spectral:1.5135e-02 L3_spectral:1.5052e-02 L4_spectral:1.4994e-02 L5_spectral:1.4962e-02 L6_spectral:1.5042e-02 L7_spectral:1.5034e-02 L8_spectral:1.5206e-02 L9_spectral:1.4983e-02 L10_spectral:1.4850e-02 L11_spectral:1.4860e-02 L12_spectral:1.5144e-02 train_time:134082ms step_avg:47.89ms +[2025-09-11 11:03:59] [Rank 0] step:2801/10000 train_time:135374ms step_avg:48.33ms +[2025-09-11 11:03:59] [Rank 0] step:2801/10000 train_time:135374ms step_avg:48.33ms +[2025-09-11 11:03:59] [Rank 0] step:2821/10000 train_time:136039ms step_avg:48.22ms +[2025-09-11 11:03:59] [Rank 0] step:2821/10000 train_time:136039ms step_avg:48.22ms +[2025-09-11 11:04:00] [Rank 0] step:2841/10000 train_time:136701ms step_avg:48.12ms +[2025-09-11 11:04:00] [Rank 0] step:2841/10000 train_time:136701ms step_avg:48.12ms +[2025-09-11 11:04:01] [Rank 0] step:2861/10000 train_time:137364ms step_avg:48.01ms +[2025-09-11 11:04:01] [Rank 0] step:2861/10000 train_time:137364ms step_avg:48.01ms +[2025-09-11 11:04:01] [Rank 0] step:2881/10000 train_time:138025ms step_avg:47.91ms +[2025-09-11 11:04:01] [Rank 0] step:2881/10000 train_time:138025ms step_avg:47.91ms +[2025-09-11 11:04:02] [Rank 0] step:2901/10000 train_time:138686ms step_avg:47.81ms +[2025-09-11 11:04:02] [Rank 0] step:2901/10000 train_time:138686ms step_avg:47.81ms +[2025-09-11 11:04:03] [Rank 0] step:2921/10000 train_time:139347ms step_avg:47.71ms +[2025-09-11 11:04:03] [Rank 0] step:2921/10000 train_time:139347ms step_avg:47.71ms +[2025-09-11 11:04:03] [Rank 0] step:2941/10000 train_time:140008ms step_avg:47.61ms +[2025-09-11 11:04:03] [Rank 0] step:2941/10000 train_time:140008ms step_avg:47.61ms +[2025-09-11 11:04:04] [Rank 0] step:2961/10000 train_time:140669ms step_avg:47.51ms +[2025-09-11 11:04:04] [Rank 0] step:2961/10000 train_time:140669ms step_avg:47.51ms +[2025-09-11 11:04:05] [Rank 0] step:2981/10000 train_time:141333ms step_avg:47.41ms +[2025-09-11 11:04:05] [Rank 0] step:2981/10000 train_time:141333ms step_avg:47.41ms +[2025-09-11 11:04:05] [Rank 0] step:3001/10000 train_time:141998ms step_avg:47.32ms +[2025-09-11 11:04:05] [Rank 0] step:3001/10000 train_time:141998ms step_avg:47.32ms +[2025-09-11 11:04:06] [Rank 0] step:3021/10000 train_time:142662ms step_avg:47.22ms +[2025-09-11 11:04:06] [Rank 0] step:3021/10000 train_time:142662ms step_avg:47.22ms +[2025-09-11 11:04:07] [Rank 0] step:3041/10000 train_time:143326ms step_avg:47.13ms +[2025-09-11 11:04:07] [Rank 0] step:3041/10000 train_time:143326ms step_avg:47.13ms +[2025-09-11 11:04:07] [Rank 0] step:3061/10000 train_time:143990ms step_avg:47.04ms +[2025-09-11 11:04:07] [Rank 0] step:3061/10000 train_time:143990ms step_avg:47.04ms +[2025-09-11 11:04:08] [Rank 0] step:3081/10000 train_time:144653ms step_avg:46.95ms +[2025-09-11 11:04:08] [Rank 0] step:3081/10000 train_time:144653ms step_avg:46.95ms +[2025-09-11 11:04:09] [Rank 0] step:3101/10000 train_time:145317ms step_avg:46.86ms +[2025-09-11 11:04:09] [Rank 0] step:3101/10000 train_time:145317ms step_avg:46.86ms +[2025-09-11 11:04:10] [Rank 0] step:3121/10000 train_time:146294ms step_avg:46.87ms +[2025-09-11 11:04:10] [Rank 0] step:3121/10000 train_time:146294ms step_avg:46.87ms +[2025-09-11 11:04:11] [Rank 0] step:3141/10000 train_time:147191ms step_avg:46.86ms +[2025-09-11 11:04:11] [Rank 0] step:3141/10000 train_time:147191ms step_avg:46.86ms +[2025-09-11 11:04:11] [Rank 0] step:3161/10000 train_time:147854ms step_avg:46.77ms +[2025-09-11 11:04:11] [Rank 0] step:3161/10000 train_time:147854ms step_avg:46.77ms +[2025-09-11 11:04:12] [Rank 0] step:3181/10000 train_time:148814ms step_avg:46.78ms +[2025-09-11 11:04:12] [Rank 0] step:3181/10000 train_time:148814ms step_avg:46.78ms +[2025-09-11 11:04:13] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:04:13] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:04:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:04:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.2742 total_sharp:1.4456e-02 L1_sharp:8.4249e-03 L2_sharp:3.5724e-03 L3_sharp:2.6023e-03 L4_sharp:2.3870e-03 L5_sharp:3.5447e-03 L6_sharp:2.8305e-03 L7_sharp:2.5666e-03 L8_sharp:6.5854e-03 L9_sharp:4.8843e-03 L10_sharp:5.0938e-03 L11_sharp:6.8246e-03 L12_sharp:2.6652e-02 total_fnorm:6.7188e+00 total_l1_linf:1.6640e+04 total_spectral:3.3906e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.2031e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.2031e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0859e-01 L12_l1linf:2.4316e-01 L1_spectral:1.5565e-02 L2_spectral:1.5412e-02 L3_spectral:1.5279e-02 L4_spectral:1.5202e-02 L5_spectral:1.5128e-02 L6_spectral:1.5229e-02 L7_spectral:1.5188e-02 L8_spectral:1.5469e-02 L9_spectral:1.5181e-02 L10_spectral:1.5151e-02 L11_spectral:1.5067e-02 L12_spectral:1.5260e-02 train_time:149458ms step_avg:46.71ms +[2025-09-11 11:04:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.2742 total_sharp:1.4456e-02 L1_sharp:8.4249e-03 L2_sharp:3.5724e-03 L3_sharp:2.6023e-03 L4_sharp:2.3870e-03 L5_sharp:3.5447e-03 L6_sharp:2.8305e-03 L7_sharp:2.5666e-03 L8_sharp:6.5854e-03 L9_sharp:4.8843e-03 L10_sharp:5.0938e-03 L11_sharp:6.8246e-03 L12_sharp:2.6652e-02 total_fnorm:6.7188e+00 total_l1_linf:1.6640e+04 total_spectral:3.3906e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.2031e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.2031e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0859e-01 L12_l1linf:2.4316e-01 L1_spectral:1.5565e-02 L2_spectral:1.5412e-02 L3_spectral:1.5279e-02 L4_spectral:1.5202e-02 L5_spectral:1.5128e-02 L6_spectral:1.5229e-02 L7_spectral:1.5188e-02 L8_spectral:1.5469e-02 L9_spectral:1.5181e-02 L10_spectral:1.5151e-02 L11_spectral:1.5067e-02 L12_spectral:1.5260e-02 train_time:149458ms step_avg:46.71ms +[2025-09-11 11:04:24] [Rank 0] step:3201/10000 train_time:150719ms step_avg:47.09ms +[2025-09-11 11:04:24] [Rank 0] step:3201/10000 train_time:150719ms step_avg:47.09ms +[2025-09-11 11:04:25] [Rank 0] step:3221/10000 train_time:151402ms step_avg:47.00ms +[2025-09-11 11:04:25] [Rank 0] step:3221/10000 train_time:151402ms step_avg:47.00ms +[2025-09-11 11:04:25] [Rank 0] step:3241/10000 train_time:152067ms step_avg:46.92ms +[2025-09-11 11:04:25] [Rank 0] step:3241/10000 train_time:152067ms step_avg:46.92ms +[2025-09-11 11:04:26] [Rank 0] step:3261/10000 train_time:152732ms step_avg:46.84ms +[2025-09-11 11:04:26] [Rank 0] step:3261/10000 train_time:152732ms step_avg:46.84ms +[2025-09-11 11:04:27] [Rank 0] step:3281/10000 train_time:153397ms step_avg:46.75ms +[2025-09-11 11:04:27] [Rank 0] step:3281/10000 train_time:153397ms step_avg:46.75ms +[2025-09-11 11:04:27] [Rank 0] step:3301/10000 train_time:154061ms step_avg:46.67ms +[2025-09-11 11:04:27] [Rank 0] step:3301/10000 train_time:154061ms step_avg:46.67ms +[2025-09-11 11:04:28] [Rank 0] step:3321/10000 train_time:154725ms step_avg:46.59ms +[2025-09-11 11:04:28] [Rank 0] step:3321/10000 train_time:154725ms step_avg:46.59ms +[2025-09-11 11:04:29] [Rank 0] step:3341/10000 train_time:155389ms step_avg:46.51ms +[2025-09-11 11:04:29] [Rank 0] step:3341/10000 train_time:155389ms step_avg:46.51ms +[2025-09-11 11:04:29] [Rank 0] step:3361/10000 train_time:156054ms step_avg:46.43ms +[2025-09-11 11:04:29] [Rank 0] step:3361/10000 train_time:156054ms step_avg:46.43ms +[2025-09-11 11:04:30] [Rank 0] step:3381/10000 train_time:156719ms step_avg:46.35ms +[2025-09-11 11:04:30] [Rank 0] step:3381/10000 train_time:156719ms step_avg:46.35ms +[2025-09-11 11:04:31] [Rank 0] step:3401/10000 train_time:157383ms step_avg:46.28ms +[2025-09-11 11:04:31] [Rank 0] step:3401/10000 train_time:157383ms step_avg:46.28ms +[2025-09-11 11:04:31] [Rank 0] step:3421/10000 train_time:158047ms step_avg:46.20ms +[2025-09-11 11:04:31] [Rank 0] step:3421/10000 train_time:158047ms step_avg:46.20ms +[2025-09-11 11:04:32] [Rank 0] step:3441/10000 train_time:158710ms step_avg:46.12ms +[2025-09-11 11:04:32] [Rank 0] step:3441/10000 train_time:158710ms step_avg:46.12ms +[2025-09-11 11:04:33] [Rank 0] step:3461/10000 train_time:159373ms step_avg:46.05ms +[2025-09-11 11:04:33] [Rank 0] step:3461/10000 train_time:159373ms step_avg:46.05ms +[2025-09-11 11:04:33] [Rank 0] step:3481/10000 train_time:160037ms step_avg:45.97ms +[2025-09-11 11:04:33] [Rank 0] step:3481/10000 train_time:160037ms step_avg:45.97ms +[2025-09-11 11:04:34] [Rank 0] step:3501/10000 train_time:160701ms step_avg:45.90ms +[2025-09-11 11:04:34] [Rank 0] step:3501/10000 train_time:160701ms step_avg:45.90ms +[2025-09-11 11:04:35] [Rank 0] step:3521/10000 train_time:161366ms step_avg:45.83ms +[2025-09-11 11:04:35] [Rank 0] step:3521/10000 train_time:161366ms step_avg:45.83ms +[2025-09-11 11:04:35] [Rank 0] step:3541/10000 train_time:162030ms step_avg:45.76ms +[2025-09-11 11:04:35] [Rank 0] step:3541/10000 train_time:162030ms step_avg:45.76ms +[2025-09-11 11:04:36] [Rank 0] step:3561/10000 train_time:162693ms step_avg:45.69ms +[2025-09-11 11:04:36] [Rank 0] step:3561/10000 train_time:162693ms step_avg:45.69ms +[2025-09-11 11:04:37] [Rank 0] step:3581/10000 train_time:163357ms step_avg:45.62ms +[2025-09-11 11:04:37] [Rank 0] step:3581/10000 train_time:163357ms step_avg:45.62ms +[2025-09-11 11:04:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:04:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:47] [Rank 0] PRINT: step:3600/10000 val_loss:5.2339 total_sharp:1.1244e-02 L1_sharp:4.8801e-03 L2_sharp:1.7320e-03 L3_sharp:1.2283e-03 L4_sharp:1.7677e-03 L5_sharp:2.7679e-03 L6_sharp:2.3800e-03 L7_sharp:2.0753e-03 L8_sharp:5.2719e-03 L9_sharp:4.5722e-03 L10_sharp:4.7820e-03 L11_sharp:6.0405e-03 L12_sharp:1.6061e-02 total_fnorm:6.2500e+00 total_l1_linf:1.5552e+04 total_spectral:3.1719e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2031e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.4414e-01 L1_spectral:1.5762e-02 L2_spectral:1.5435e-02 L3_spectral:1.5403e-02 L4_spectral:1.5342e-02 L5_spectral:1.5296e-02 L6_spectral:1.5356e-02 L7_spectral:1.5433e-02 L8_spectral:1.5640e-02 L9_spectral:1.5418e-02 L10_spectral:1.5218e-02 L11_spectral:1.5275e-02 L12_spectral:1.5391e-02 train_time:164004ms step_avg:45.56ms +[2025-09-11 11:04:47] [Rank 0] PRINT: step:3600/10000 val_loss:5.2339 total_sharp:1.1244e-02 L1_sharp:4.8801e-03 L2_sharp:1.7320e-03 L3_sharp:1.2283e-03 L4_sharp:1.7677e-03 L5_sharp:2.7679e-03 L6_sharp:2.3800e-03 L7_sharp:2.0753e-03 L8_sharp:5.2719e-03 L9_sharp:4.5722e-03 L10_sharp:4.7820e-03 L11_sharp:6.0405e-03 L12_sharp:1.6061e-02 total_fnorm:6.2500e+00 total_l1_linf:1.5552e+04 total_spectral:3.1719e+00 L1_fnorm:1.2891e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2031e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.4414e-01 L1_spectral:1.5762e-02 L2_spectral:1.5435e-02 L3_spectral:1.5403e-02 L4_spectral:1.5342e-02 L5_spectral:1.5296e-02 L6_spectral:1.5356e-02 L7_spectral:1.5433e-02 L8_spectral:1.5640e-02 L9_spectral:1.5418e-02 L10_spectral:1.5218e-02 L11_spectral:1.5275e-02 L12_spectral:1.5391e-02 train_time:164004ms step_avg:45.56ms +[2025-09-11 11:04:49] [Rank 0] step:3601/10000 train_time:165270ms step_avg:45.90ms +[2025-09-11 11:04:49] [Rank 0] step:3601/10000 train_time:165270ms step_avg:45.90ms +[2025-09-11 11:04:49] [Rank 0] step:3621/10000 train_time:165937ms step_avg:45.83ms +[2025-09-11 11:04:49] [Rank 0] step:3621/10000 train_time:165937ms step_avg:45.83ms +[2025-09-11 11:04:50] [Rank 0] step:3641/10000 train_time:166602ms step_avg:45.76ms +[2025-09-11 11:04:50] [Rank 0] step:3641/10000 train_time:166602ms step_avg:45.76ms +[2025-09-11 11:04:51] [Rank 0] step:3661/10000 train_time:167266ms step_avg:45.69ms +[2025-09-11 11:04:51] [Rank 0] step:3661/10000 train_time:167266ms step_avg:45.69ms +[2025-09-11 11:04:51] [Rank 0] step:3681/10000 train_time:167929ms step_avg:45.62ms +[2025-09-11 11:04:51] [Rank 0] step:3681/10000 train_time:167929ms step_avg:45.62ms +[2025-09-11 11:04:52] [Rank 0] step:3701/10000 train_time:168594ms step_avg:45.55ms +[2025-09-11 11:04:52] [Rank 0] step:3701/10000 train_time:168594ms step_avg:45.55ms +[2025-09-11 11:04:53] [Rank 0] step:3721/10000 train_time:169268ms step_avg:45.49ms +[2025-09-11 11:04:53] [Rank 0] step:3721/10000 train_time:169268ms step_avg:45.49ms +[2025-09-11 11:04:53] [Rank 0] step:3741/10000 train_time:169942ms step_avg:45.43ms +[2025-09-11 11:04:53] [Rank 0] step:3741/10000 train_time:169942ms step_avg:45.43ms +[2025-09-11 11:04:54] [Rank 0] step:3761/10000 train_time:170618ms step_avg:45.36ms +[2025-09-11 11:04:54] [Rank 0] step:3761/10000 train_time:170618ms step_avg:45.36ms +[2025-09-11 11:04:55] [Rank 0] step:3781/10000 train_time:171293ms step_avg:45.30ms +[2025-09-11 11:04:55] [Rank 0] step:3781/10000 train_time:171293ms step_avg:45.30ms +[2025-09-11 11:04:55] [Rank 0] step:3801/10000 train_time:171967ms step_avg:45.24ms +[2025-09-11 11:04:55] [Rank 0] step:3801/10000 train_time:171967ms step_avg:45.24ms +[2025-09-11 11:04:56] [Rank 0] step:3821/10000 train_time:172642ms step_avg:45.18ms +[2025-09-11 11:04:56] [Rank 0] step:3821/10000 train_time:172642ms step_avg:45.18ms +[2025-09-11 11:04:57] [Rank 0] step:3841/10000 train_time:173317ms step_avg:45.12ms +[2025-09-11 11:04:57] [Rank 0] step:3841/10000 train_time:173317ms step_avg:45.12ms +[2025-09-11 11:04:57] [Rank 0] step:3861/10000 train_time:173991ms step_avg:45.06ms +[2025-09-11 11:04:57] [Rank 0] step:3861/10000 train_time:173991ms step_avg:45.06ms +[2025-09-11 11:04:58] [Rank 0] step:3881/10000 train_time:174666ms step_avg:45.01ms +[2025-09-11 11:04:58] [Rank 0] step:3881/10000 train_time:174666ms step_avg:45.01ms +[2025-09-11 11:04:59] [Rank 0] step:3901/10000 train_time:175340ms step_avg:44.95ms +[2025-09-11 11:04:59] [Rank 0] step:3901/10000 train_time:175340ms step_avg:44.95ms +[2025-09-11 11:04:59] [Rank 0] step:3921/10000 train_time:176015ms step_avg:44.89ms +[2025-09-11 11:04:59] [Rank 0] step:3921/10000 train_time:176015ms step_avg:44.89ms +[2025-09-11 11:05:00] [Rank 0] step:3941/10000 train_time:176690ms step_avg:44.83ms +[2025-09-11 11:05:00] [Rank 0] step:3941/10000 train_time:176690ms step_avg:44.83ms +[2025-09-11 11:05:01] [Rank 0] step:3961/10000 train_time:177432ms step_avg:44.79ms +[2025-09-11 11:05:01] [Rank 0] step:3961/10000 train_time:177432ms step_avg:44.79ms +[2025-09-11 11:05:02] [Rank 0] step:3981/10000 train_time:178189ms step_avg:44.76ms +[2025-09-11 11:05:02] [Rank 0] step:3981/10000 train_time:178189ms step_avg:44.76ms +[2025-09-11 11:05:02] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:12] [Rank 0] PRINT: step:4000/10000 val_loss:5.1738 total_sharp:1.1672e-02 L1_sharp:5.3098e-03 L2_sharp:2.1399e-03 L3_sharp:2.3725e-03 L4_sharp:2.0476e-03 L5_sharp:2.4435e-03 L6_sharp:2.2792e-03 L7_sharp:1.9626e-03 L8_sharp:5.5445e-03 L9_sharp:5.2999e-03 L10_sharp:5.4702e-03 L11_sharp:6.7831e-03 L12_sharp:1.7940e-02 total_fnorm:7.0000e+00 total_l1_linf:1.6384e+04 total_spectral:3.5469e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.4570e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0664e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0469e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.9102e-01 L12_l1linf:2.4316e-01 L1_spectral:1.5877e-02 L2_spectral:1.5627e-02 L3_spectral:1.5464e-02 L4_spectral:1.5492e-02 L5_spectral:1.5389e-02 L6_spectral:1.5480e-02 L7_spectral:1.5391e-02 L8_spectral:1.5703e-02 L9_spectral:1.5506e-02 L10_spectral:1.5370e-02 L11_spectral:1.5394e-02 L12_spectral:1.5476e-02 train_time:178912ms step_avg:44.73ms +[2025-09-11 11:05:12] [Rank 0] PRINT: step:4000/10000 val_loss:5.1738 total_sharp:1.1672e-02 L1_sharp:5.3098e-03 L2_sharp:2.1399e-03 L3_sharp:2.3725e-03 L4_sharp:2.0476e-03 L5_sharp:2.4435e-03 L6_sharp:2.2792e-03 L7_sharp:1.9626e-03 L8_sharp:5.5445e-03 L9_sharp:5.2999e-03 L10_sharp:5.4702e-03 L11_sharp:6.7831e-03 L12_sharp:1.7940e-02 total_fnorm:7.0000e+00 total_l1_linf:1.6384e+04 total_spectral:3.5469e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.4570e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0664e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0469e-01 L10_l1linf:3.0859e-01 L11_l1linf:2.9102e-01 L12_l1linf:2.4316e-01 L1_spectral:1.5877e-02 L2_spectral:1.5627e-02 L3_spectral:1.5464e-02 L4_spectral:1.5492e-02 L5_spectral:1.5389e-02 L6_spectral:1.5480e-02 L7_spectral:1.5391e-02 L8_spectral:1.5703e-02 L9_spectral:1.5506e-02 L10_spectral:1.5370e-02 L11_spectral:1.5394e-02 L12_spectral:1.5476e-02 train_time:178912ms step_avg:44.73ms +[2025-09-11 11:05:14] [Rank 0] step:4001/10000 train_time:180139ms step_avg:45.02ms +[2025-09-11 11:05:14] [Rank 0] step:4001/10000 train_time:180139ms step_avg:45.02ms +[2025-09-11 11:05:14] [Rank 0] step:4021/10000 train_time:180819ms step_avg:44.97ms +[2025-09-11 11:05:14] [Rank 0] step:4021/10000 train_time:180819ms step_avg:44.97ms +[2025-09-11 11:05:15] [Rank 0] step:4041/10000 train_time:181638ms step_avg:44.95ms +[2025-09-11 11:05:15] [Rank 0] step:4041/10000 train_time:181638ms step_avg:44.95ms +[2025-09-11 11:05:16] [Rank 0] step:4061/10000 train_time:182442ms step_avg:44.93ms +[2025-09-11 11:05:16] [Rank 0] step:4061/10000 train_time:182442ms step_avg:44.93ms +[2025-09-11 11:05:17] [Rank 0] step:4081/10000 train_time:183118ms step_avg:44.87ms +[2025-09-11 11:05:17] [Rank 0] step:4081/10000 train_time:183118ms step_avg:44.87ms +[2025-09-11 11:05:17] [Rank 0] step:4101/10000 train_time:183793ms step_avg:44.82ms +[2025-09-11 11:05:17] [Rank 0] step:4101/10000 train_time:183793ms step_avg:44.82ms +[2025-09-11 11:05:18] [Rank 0] step:4121/10000 train_time:184471ms step_avg:44.76ms +[2025-09-11 11:05:18] [Rank 0] step:4121/10000 train_time:184471ms step_avg:44.76ms +[2025-09-11 11:05:19] [Rank 0] step:4141/10000 train_time:185146ms step_avg:44.71ms +[2025-09-11 11:05:19] [Rank 0] step:4141/10000 train_time:185146ms step_avg:44.71ms +[2025-09-11 11:05:19] [Rank 0] step:4161/10000 train_time:185820ms step_avg:44.66ms +[2025-09-11 11:05:19] [Rank 0] step:4161/10000 train_time:185820ms step_avg:44.66ms +[2025-09-11 11:05:20] [Rank 0] step:4181/10000 train_time:186496ms step_avg:44.61ms +[2025-09-11 11:05:20] [Rank 0] step:4181/10000 train_time:186496ms step_avg:44.61ms +[2025-09-11 11:05:21] [Rank 0] step:4201/10000 train_time:187171ms step_avg:44.55ms +[2025-09-11 11:05:21] [Rank 0] step:4201/10000 train_time:187171ms step_avg:44.55ms +[2025-09-11 11:05:21] [Rank 0] step:4221/10000 train_time:187845ms step_avg:44.50ms +[2025-09-11 11:05:21] [Rank 0] step:4221/10000 train_time:187845ms step_avg:44.50ms +[2025-09-11 11:05:22] [Rank 0] step:4241/10000 train_time:188520ms step_avg:44.45ms +[2025-09-11 11:05:22] [Rank 0] step:4241/10000 train_time:188520ms step_avg:44.45ms +[2025-09-11 11:05:23] [Rank 0] step:4261/10000 train_time:189195ms step_avg:44.40ms +[2025-09-11 11:05:23] [Rank 0] step:4261/10000 train_time:189195ms step_avg:44.40ms +[2025-09-11 11:05:23] [Rank 0] step:4281/10000 train_time:189871ms step_avg:44.35ms +[2025-09-11 11:05:23] [Rank 0] step:4281/10000 train_time:189871ms step_avg:44.35ms +[2025-09-11 11:05:24] [Rank 0] step:4301/10000 train_time:190547ms step_avg:44.30ms +[2025-09-11 11:05:24] [Rank 0] step:4301/10000 train_time:190547ms step_avg:44.30ms +[2025-09-11 11:05:25] [Rank 0] step:4321/10000 train_time:191221ms step_avg:44.25ms +[2025-09-11 11:05:25] [Rank 0] step:4321/10000 train_time:191221ms step_avg:44.25ms +[2025-09-11 11:05:25] [Rank 0] step:4341/10000 train_time:191896ms step_avg:44.21ms +[2025-09-11 11:05:25] [Rank 0] step:4341/10000 train_time:191896ms step_avg:44.21ms +[2025-09-11 11:05:26] [Rank 0] step:4361/10000 train_time:192570ms step_avg:44.16ms +[2025-09-11 11:05:26] [Rank 0] step:4361/10000 train_time:192570ms step_avg:44.16ms +[2025-09-11 11:05:27] [Rank 0] step:4381/10000 train_time:193246ms step_avg:44.11ms +[2025-09-11 11:05:27] [Rank 0] step:4381/10000 train_time:193246ms step_avg:44.11ms +[2025-09-11 11:05:27] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:05:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:39] [Rank 0] PRINT: step:4400/10000 val_loss:5.1328 total_sharp:8.4032e-03 L1_sharp:3.3623e-03 L2_sharp:1.8723e-03 L3_sharp:6.9952e-04 L4_sharp:1.1498e-03 L5_sharp:1.5682e-03 L6_sharp:1.9782e-03 L7_sharp:1.8461e-03 L8_sharp:4.1141e-03 L9_sharp:3.6176e-03 L10_sharp:3.9962e-03 L11_sharp:5.0510e-03 L12_sharp:1.7010e-02 total_fnorm:6.3750e+00 total_l1_linf:1.5296e+04 total_spectral:3.2344e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0273e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.9492e-01 L12_l1linf:2.3340e-01 L1_spectral:1.5994e-02 L2_spectral:1.5669e-02 L3_spectral:1.5656e-02 L4_spectral:1.5621e-02 L5_spectral:1.5543e-02 L6_spectral:1.5600e-02 L7_spectral:1.5608e-02 L8_spectral:1.5784e-02 L9_spectral:1.5593e-02 L10_spectral:1.5525e-02 L11_spectral:1.5496e-02 L12_spectral:1.5470e-02 train_time:193901ms step_avg:44.07ms +[2025-09-11 11:05:39] [Rank 0] PRINT: step:4400/10000 val_loss:5.1328 total_sharp:8.4032e-03 L1_sharp:3.3623e-03 L2_sharp:1.8723e-03 L3_sharp:6.9952e-04 L4_sharp:1.1498e-03 L5_sharp:1.5682e-03 L6_sharp:1.9782e-03 L7_sharp:1.8461e-03 L8_sharp:4.1141e-03 L9_sharp:3.6176e-03 L10_sharp:3.9962e-03 L11_sharp:5.0510e-03 L12_sharp:1.7010e-02 total_fnorm:6.3750e+00 total_l1_linf:1.5296e+04 total_spectral:3.2344e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.1953e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0273e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.9492e-01 L12_l1linf:2.3340e-01 L1_spectral:1.5994e-02 L2_spectral:1.5669e-02 L3_spectral:1.5656e-02 L4_spectral:1.5621e-02 L5_spectral:1.5543e-02 L6_spectral:1.5600e-02 L7_spectral:1.5608e-02 L8_spectral:1.5784e-02 L9_spectral:1.5593e-02 L10_spectral:1.5525e-02 L11_spectral:1.5496e-02 L12_spectral:1.5470e-02 train_time:193901ms step_avg:44.07ms +[2025-09-11 11:05:41] [Rank 0] step:4401/10000 train_time:196081ms step_avg:44.55ms +[2025-09-11 11:05:41] [Rank 0] step:4401/10000 train_time:196081ms step_avg:44.55ms +[2025-09-11 11:05:41] [Rank 0] step:4421/10000 train_time:196771ms step_avg:44.51ms +[2025-09-11 11:05:41] [Rank 0] step:4421/10000 train_time:196771ms step_avg:44.51ms +[2025-09-11 11:05:42] [Rank 0] step:4441/10000 train_time:197448ms step_avg:44.46ms +[2025-09-11 11:05:42] [Rank 0] step:4441/10000 train_time:197448ms step_avg:44.46ms +[2025-09-11 11:05:43] [Rank 0] step:4461/10000 train_time:198124ms step_avg:44.41ms +[2025-09-11 11:05:43] [Rank 0] step:4461/10000 train_time:198124ms step_avg:44.41ms +[2025-09-11 11:05:44] [Rank 0] step:4481/10000 train_time:198801ms step_avg:44.37ms +[2025-09-11 11:05:44] [Rank 0] step:4481/10000 train_time:198801ms step_avg:44.37ms +[2025-09-11 11:05:44] [Rank 0] step:4501/10000 train_time:199479ms step_avg:44.32ms +[2025-09-11 11:05:44] [Rank 0] step:4501/10000 train_time:199479ms step_avg:44.32ms +[2025-09-11 11:05:45] [Rank 0] step:4521/10000 train_time:200157ms step_avg:44.27ms +[2025-09-11 11:05:45] [Rank 0] step:4521/10000 train_time:200157ms step_avg:44.27ms +[2025-09-11 11:05:46] [Rank 0] step:4541/10000 train_time:200835ms step_avg:44.23ms +[2025-09-11 11:05:46] [Rank 0] step:4541/10000 train_time:200835ms step_avg:44.23ms +[2025-09-11 11:05:46] [Rank 0] step:4561/10000 train_time:201512ms step_avg:44.18ms +[2025-09-11 11:05:46] [Rank 0] step:4561/10000 train_time:201512ms step_avg:44.18ms +[2025-09-11 11:05:47] [Rank 0] step:4581/10000 train_time:202190ms step_avg:44.14ms +[2025-09-11 11:05:47] [Rank 0] step:4581/10000 train_time:202190ms step_avg:44.14ms +[2025-09-11 11:05:48] [Rank 0] step:4601/10000 train_time:202867ms step_avg:44.09ms +[2025-09-11 11:05:48] [Rank 0] step:4601/10000 train_time:202867ms step_avg:44.09ms +[2025-09-11 11:05:48] [Rank 0] step:4621/10000 train_time:203545ms step_avg:44.05ms +[2025-09-11 11:05:48] [Rank 0] step:4621/10000 train_time:203545ms step_avg:44.05ms +[2025-09-11 11:05:49] [Rank 0] step:4641/10000 train_time:204222ms step_avg:44.00ms +[2025-09-11 11:05:49] [Rank 0] step:4641/10000 train_time:204222ms step_avg:44.00ms +[2025-09-11 11:05:50] [Rank 0] step:4661/10000 train_time:204900ms step_avg:43.96ms +[2025-09-11 11:05:50] [Rank 0] step:4661/10000 train_time:204900ms step_avg:43.96ms +[2025-09-11 11:05:50] [Rank 0] step:4681/10000 train_time:205576ms step_avg:43.92ms +[2025-09-11 11:05:50] [Rank 0] step:4681/10000 train_time:205576ms step_avg:43.92ms +[2025-09-11 11:05:51] [Rank 0] step:4701/10000 train_time:206254ms step_avg:43.87ms +[2025-09-11 11:05:51] [Rank 0] step:4701/10000 train_time:206254ms step_avg:43.87ms +[2025-09-11 11:05:52] [Rank 0] step:4721/10000 train_time:206930ms step_avg:43.83ms +[2025-09-11 11:05:52] [Rank 0] step:4721/10000 train_time:206930ms step_avg:43.83ms +[2025-09-11 11:05:52] [Rank 0] step:4741/10000 train_time:207607ms step_avg:43.79ms +[2025-09-11 11:05:52] [Rank 0] step:4741/10000 train_time:207607ms step_avg:43.79ms +[2025-09-11 11:05:53] [Rank 0] step:4761/10000 train_time:208285ms step_avg:43.75ms +[2025-09-11 11:05:53] [Rank 0] step:4761/10000 train_time:208285ms step_avg:43.75ms +[2025-09-11 11:05:54] [Rank 0] step:4781/10000 train_time:208961ms step_avg:43.71ms +[2025-09-11 11:05:54] [Rank 0] step:4781/10000 train_time:208961ms step_avg:43.71ms +[2025-09-11 11:05:54] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:05:54] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:05:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:05:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:04] [Rank 0] PRINT: step:4800/10000 val_loss:5.0983 total_sharp:1.0123e-02 L1_sharp:6.4960e-03 L2_sharp:2.1801e-03 L3_sharp:1.1513e-03 L4_sharp:1.4312e-03 L5_sharp:1.8561e-03 L6_sharp:1.7658e-03 L7_sharp:1.4568e-03 L8_sharp:4.2569e-03 L9_sharp:3.9437e-03 L10_sharp:4.1505e-03 L11_sharp:5.5131e-03 L12_sharp:1.4083e-02 total_fnorm:6.4375e+00 total_l1_linf:1.5296e+04 total_spectral:3.2500e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0664e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.9492e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.8711e-01 L9_l1linf:2.8516e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.8125e-01 L12_l1linf:2.3633e-01 L1_spectral:1.6026e-02 L2_spectral:1.5805e-02 L3_spectral:1.5738e-02 L4_spectral:1.5699e-02 L5_spectral:1.5665e-02 L6_spectral:1.5758e-02 L7_spectral:1.5735e-02 L8_spectral:1.5945e-02 L9_spectral:1.5856e-02 L10_spectral:1.5745e-02 L11_spectral:1.5612e-02 L12_spectral:1.5617e-02 train_time:209618ms step_avg:43.67ms +[2025-09-11 11:06:04] [Rank 0] PRINT: step:4800/10000 val_loss:5.0983 total_sharp:1.0123e-02 L1_sharp:6.4960e-03 L2_sharp:2.1801e-03 L3_sharp:1.1513e-03 L4_sharp:1.4312e-03 L5_sharp:1.8561e-03 L6_sharp:1.7658e-03 L7_sharp:1.4568e-03 L8_sharp:4.2569e-03 L9_sharp:3.9437e-03 L10_sharp:4.1505e-03 L11_sharp:5.5131e-03 L12_sharp:1.4083e-02 total_fnorm:6.4375e+00 total_l1_linf:1.5296e+04 total_spectral:3.2500e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0664e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.9492e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.8711e-01 L9_l1linf:2.8516e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.8125e-01 L12_l1linf:2.3633e-01 L1_spectral:1.6026e-02 L2_spectral:1.5805e-02 L3_spectral:1.5738e-02 L4_spectral:1.5699e-02 L5_spectral:1.5665e-02 L6_spectral:1.5758e-02 L7_spectral:1.5735e-02 L8_spectral:1.5945e-02 L9_spectral:1.5856e-02 L10_spectral:1.5745e-02 L11_spectral:1.5612e-02 L12_spectral:1.5617e-02 train_time:209618ms step_avg:43.67ms +[2025-09-11 11:06:06] [Rank 0] step:4801/10000 train_time:210901ms step_avg:43.93ms +[2025-09-11 11:06:06] [Rank 0] step:4801/10000 train_time:210901ms step_avg:43.93ms +[2025-09-11 11:06:06] [Rank 0] step:4821/10000 train_time:211591ms step_avg:43.89ms +[2025-09-11 11:06:06] [Rank 0] step:4821/10000 train_time:211591ms step_avg:43.89ms +[2025-09-11 11:06:07] [Rank 0] step:4841/10000 train_time:212271ms step_avg:43.85ms +[2025-09-11 11:06:07] [Rank 0] step:4841/10000 train_time:212271ms step_avg:43.85ms +[2025-09-11 11:06:08] [Rank 0] step:4861/10000 train_time:212949ms step_avg:43.81ms +[2025-09-11 11:06:08] [Rank 0] step:4861/10000 train_time:212949ms step_avg:43.81ms +[2025-09-11 11:06:08] [Rank 0] step:4881/10000 train_time:213627ms step_avg:43.77ms +[2025-09-11 11:06:08] [Rank 0] step:4881/10000 train_time:213627ms step_avg:43.77ms +[2025-09-11 11:06:09] [Rank 0] step:4901/10000 train_time:214306ms step_avg:43.73ms +[2025-09-11 11:06:09] [Rank 0] step:4901/10000 train_time:214306ms step_avg:43.73ms +[2025-09-11 11:06:10] [Rank 0] step:4921/10000 train_time:214984ms step_avg:43.69ms +[2025-09-11 11:06:10] [Rank 0] step:4921/10000 train_time:214984ms step_avg:43.69ms +[2025-09-11 11:06:10] [Rank 0] step:4941/10000 train_time:215661ms step_avg:43.65ms +[2025-09-11 11:06:10] [Rank 0] step:4941/10000 train_time:215661ms step_avg:43.65ms +[2025-09-11 11:06:11] [Rank 0] step:4961/10000 train_time:216339ms step_avg:43.61ms +[2025-09-11 11:06:11] [Rank 0] step:4961/10000 train_time:216339ms step_avg:43.61ms +[2025-09-11 11:06:12] [Rank 0] step:4981/10000 train_time:217017ms step_avg:43.57ms +[2025-09-11 11:06:12] [Rank 0] step:4981/10000 train_time:217017ms step_avg:43.57ms +[2025-09-11 11:06:12] [Rank 0] step:5001/10000 train_time:217697ms step_avg:43.53ms +[2025-09-11 11:06:12] [Rank 0] step:5001/10000 train_time:217697ms step_avg:43.53ms +[2025-09-11 11:06:13] [Rank 0] step:5021/10000 train_time:218374ms step_avg:43.49ms +[2025-09-11 11:06:13] [Rank 0] step:5021/10000 train_time:218374ms step_avg:43.49ms +[2025-09-11 11:06:14] [Rank 0] step:5041/10000 train_time:219051ms step_avg:43.45ms +[2025-09-11 11:06:14] [Rank 0] step:5041/10000 train_time:219051ms step_avg:43.45ms +[2025-09-11 11:06:14] [Rank 0] step:5061/10000 train_time:219729ms step_avg:43.42ms +[2025-09-11 11:06:14] [Rank 0] step:5061/10000 train_time:219729ms step_avg:43.42ms +[2025-09-11 11:06:15] [Rank 0] step:5081/10000 train_time:220406ms step_avg:43.38ms +[2025-09-11 11:06:15] [Rank 0] step:5081/10000 train_time:220406ms step_avg:43.38ms +[2025-09-11 11:06:16] [Rank 0] step:5101/10000 train_time:221236ms step_avg:43.37ms +[2025-09-11 11:06:16] [Rank 0] step:5101/10000 train_time:221236ms step_avg:43.37ms +[2025-09-11 11:06:17] [Rank 0] step:5121/10000 train_time:222338ms step_avg:43.42ms +[2025-09-11 11:06:17] [Rank 0] step:5121/10000 train_time:222338ms step_avg:43.42ms +[2025-09-11 11:06:18] [Rank 0] step:5141/10000 train_time:223016ms step_avg:43.38ms +[2025-09-11 11:06:18] [Rank 0] step:5141/10000 train_time:223016ms step_avg:43.38ms +[2025-09-11 11:06:18] [Rank 0] step:5161/10000 train_time:223848ms step_avg:43.37ms +[2025-09-11 11:06:18] [Rank 0] step:5161/10000 train_time:223848ms step_avg:43.37ms +[2025-09-11 11:06:19] [Rank 0] step:5181/10000 train_time:224635ms step_avg:43.36ms +[2025-09-11 11:06:19] [Rank 0] step:5181/10000 train_time:224635ms step_avg:43.36ms +[2025-09-11 11:06:20] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:06:20] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:06:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:06:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:30] [Rank 0] PRINT: step:5200/10000 val_loss:5.0615 total_sharp:1.2186e-02 L1_sharp:5.7681e-03 L2_sharp:2.9107e-03 L3_sharp:1.4755e-03 L4_sharp:1.4246e-03 L5_sharp:1.9158e-03 L6_sharp:1.6955e-03 L7_sharp:1.8239e-03 L8_sharp:4.3162e-03 L9_sharp:4.0092e-03 L10_sharp:4.3768e-03 L11_sharp:6.4263e-03 L12_sharp:2.7588e-02 total_fnorm:6.2188e+00 total_l1_linf:1.4656e+04 total_spectral:3.1406e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.2812e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0664e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.9492e-01 L7_l1linf:2.9883e-01 L8_l1linf:2.8516e-01 L9_l1linf:2.8320e-01 L10_l1linf:2.7930e-01 L11_l1linf:2.8125e-01 L12_l1linf:2.3535e-01 L1_spectral:1.6227e-02 L2_spectral:1.5783e-02 L3_spectral:1.5792e-02 L4_spectral:1.5782e-02 L5_spectral:1.5739e-02 L6_spectral:1.5773e-02 L7_spectral:1.5784e-02 L8_spectral:1.6048e-02 L9_spectral:1.5838e-02 L10_spectral:1.5892e-02 L11_spectral:1.5801e-02 L12_spectral:1.5635e-02 train_time:225300ms step_avg:43.33ms +[2025-09-11 11:06:30] [Rank 0] PRINT: step:5200/10000 val_loss:5.0615 total_sharp:1.2186e-02 L1_sharp:5.7681e-03 L2_sharp:2.9107e-03 L3_sharp:1.4755e-03 L4_sharp:1.4246e-03 L5_sharp:1.9158e-03 L6_sharp:1.6955e-03 L7_sharp:1.8239e-03 L8_sharp:4.3162e-03 L9_sharp:4.0092e-03 L10_sharp:4.3768e-03 L11_sharp:6.4263e-03 L12_sharp:2.7588e-02 total_fnorm:6.2188e+00 total_l1_linf:1.4656e+04 total_spectral:3.1406e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2734e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.2812e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0664e-01 L5_l1linf:2.9492e-01 L6_l1linf:2.9492e-01 L7_l1linf:2.9883e-01 L8_l1linf:2.8516e-01 L9_l1linf:2.8320e-01 L10_l1linf:2.7930e-01 L11_l1linf:2.8125e-01 L12_l1linf:2.3535e-01 L1_spectral:1.6227e-02 L2_spectral:1.5783e-02 L3_spectral:1.5792e-02 L4_spectral:1.5782e-02 L5_spectral:1.5739e-02 L6_spectral:1.5773e-02 L7_spectral:1.5784e-02 L8_spectral:1.6048e-02 L9_spectral:1.5838e-02 L10_spectral:1.5892e-02 L11_spectral:1.5801e-02 L12_spectral:1.5635e-02 train_time:225300ms step_avg:43.33ms +[2025-09-11 11:06:31] [Rank 0] step:5201/10000 train_time:226536ms step_avg:43.56ms +[2025-09-11 11:06:31] [Rank 0] step:5201/10000 train_time:226536ms step_avg:43.56ms +[2025-09-11 11:06:32] [Rank 0] step:5221/10000 train_time:227268ms step_avg:43.53ms +[2025-09-11 11:06:32] [Rank 0] step:5221/10000 train_time:227268ms step_avg:43.53ms +[2025-09-11 11:06:33] [Rank 0] step:5241/10000 train_time:227956ms step_avg:43.49ms +[2025-09-11 11:06:33] [Rank 0] step:5241/10000 train_time:227956ms step_avg:43.49ms +[2025-09-11 11:06:33] [Rank 0] step:5261/10000 train_time:228644ms step_avg:43.46ms +[2025-09-11 11:06:33] [Rank 0] step:5261/10000 train_time:228644ms step_avg:43.46ms +[2025-09-11 11:06:34] [Rank 0] step:5281/10000 train_time:229331ms step_avg:43.43ms +[2025-09-11 11:06:34] [Rank 0] step:5281/10000 train_time:229331ms step_avg:43.43ms +[2025-09-11 11:06:35] [Rank 0] step:5301/10000 train_time:230019ms step_avg:43.39ms +[2025-09-11 11:06:35] [Rank 0] step:5301/10000 train_time:230019ms step_avg:43.39ms +[2025-09-11 11:06:35] [Rank 0] step:5321/10000 train_time:230706ms step_avg:43.36ms +[2025-09-11 11:06:35] [Rank 0] step:5321/10000 train_time:230706ms step_avg:43.36ms +[2025-09-11 11:06:36] [Rank 0] step:5341/10000 train_time:231393ms step_avg:43.32ms +[2025-09-11 11:06:36] [Rank 0] step:5341/10000 train_time:231393ms step_avg:43.32ms +[2025-09-11 11:06:37] [Rank 0] step:5361/10000 train_time:232080ms step_avg:43.29ms +[2025-09-11 11:06:37] [Rank 0] step:5361/10000 train_time:232080ms step_avg:43.29ms +[2025-09-11 11:06:37] [Rank 0] step:5381/10000 train_time:232768ms step_avg:43.26ms +[2025-09-11 11:06:37] [Rank 0] step:5381/10000 train_time:232768ms step_avg:43.26ms +[2025-09-11 11:06:38] [Rank 0] step:5401/10000 train_time:233454ms step_avg:43.22ms +[2025-09-11 11:06:38] [Rank 0] step:5401/10000 train_time:233454ms step_avg:43.22ms +[2025-09-11 11:06:39] [Rank 0] step:5421/10000 train_time:234143ms step_avg:43.19ms +[2025-09-11 11:06:39] [Rank 0] step:5421/10000 train_time:234143ms step_avg:43.19ms +[2025-09-11 11:06:39] [Rank 0] step:5441/10000 train_time:234830ms step_avg:43.16ms +[2025-09-11 11:06:39] [Rank 0] step:5441/10000 train_time:234830ms step_avg:43.16ms +[2025-09-11 11:06:40] [Rank 0] step:5461/10000 train_time:235517ms step_avg:43.13ms +[2025-09-11 11:06:40] [Rank 0] step:5461/10000 train_time:235517ms step_avg:43.13ms +[2025-09-11 11:06:41] [Rank 0] step:5481/10000 train_time:236204ms step_avg:43.10ms +[2025-09-11 11:06:41] [Rank 0] step:5481/10000 train_time:236204ms step_avg:43.10ms +[2025-09-11 11:06:41] [Rank 0] step:5501/10000 train_time:236891ms step_avg:43.06ms +[2025-09-11 11:06:41] [Rank 0] step:5501/10000 train_time:236891ms step_avg:43.06ms +[2025-09-11 11:06:42] [Rank 0] step:5521/10000 train_time:237578ms step_avg:43.03ms +[2025-09-11 11:06:42] [Rank 0] step:5521/10000 train_time:237578ms step_avg:43.03ms +[2025-09-11 11:06:43] [Rank 0] step:5541/10000 train_time:238267ms step_avg:43.00ms +[2025-09-11 11:06:43] [Rank 0] step:5541/10000 train_time:238267ms step_avg:43.00ms +[2025-09-11 11:06:44] [Rank 0] step:5561/10000 train_time:238956ms step_avg:42.97ms +[2025-09-11 11:06:44] [Rank 0] step:5561/10000 train_time:238956ms step_avg:42.97ms +[2025-09-11 11:06:44] [Rank 0] step:5581/10000 train_time:239644ms step_avg:42.94ms +[2025-09-11 11:06:44] [Rank 0] step:5581/10000 train_time:239644ms step_avg:42.94ms +[2025-09-11 11:06:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:06:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:06:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:06:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:06:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:06:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:55] [Rank 0] PRINT: step:5600/10000 val_loss:5.0312 total_sharp:1.0277e-02 L1_sharp:5.9560e-03 L2_sharp:1.5691e-03 L3_sharp:1.5995e-03 L4_sharp:1.4837e-03 L5_sharp:2.1175e-03 L6_sharp:1.4833e-03 L7_sharp:1.7331e-03 L8_sharp:3.8765e-03 L9_sharp:4.0055e-03 L10_sharp:4.4106e-03 L11_sharp:5.8429e-03 L12_sharp:1.4603e-02 total_fnorm:6.1562e+00 total_l1_linf:1.4464e+04 total_spectral:3.1094e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2031e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0469e-01 L5_l1linf:2.8906e-01 L6_l1linf:2.8906e-01 L7_l1linf:2.9297e-01 L8_l1linf:2.8125e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.7539e-01 L12_l1linf:2.4121e-01 L1_spectral:1.6188e-02 L2_spectral:1.5953e-02 L3_spectral:1.5878e-02 L4_spectral:1.5895e-02 L5_spectral:1.5825e-02 L6_spectral:1.5878e-02 L7_spectral:1.5858e-02 L8_spectral:1.5946e-02 L9_spectral:1.5946e-02 L10_spectral:1.5878e-02 L11_spectral:1.5847e-02 L12_spectral:1.5714e-02 train_time:240311ms step_avg:42.91ms +[2025-09-11 11:06:55] [Rank 0] PRINT: step:5600/10000 val_loss:5.0312 total_sharp:1.0277e-02 L1_sharp:5.9560e-03 L2_sharp:1.5691e-03 L3_sharp:1.5995e-03 L4_sharp:1.4837e-03 L5_sharp:2.1175e-03 L6_sharp:1.4833e-03 L7_sharp:1.7331e-03 L8_sharp:3.8765e-03 L9_sharp:4.0055e-03 L10_sharp:4.4106e-03 L11_sharp:5.8429e-03 L12_sharp:1.4603e-02 total_fnorm:6.1562e+00 total_l1_linf:1.4464e+04 total_spectral:3.1094e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2031e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0469e-01 L5_l1linf:2.8906e-01 L6_l1linf:2.8906e-01 L7_l1linf:2.9297e-01 L8_l1linf:2.8125e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.7539e-01 L12_l1linf:2.4121e-01 L1_spectral:1.6188e-02 L2_spectral:1.5953e-02 L3_spectral:1.5878e-02 L4_spectral:1.5895e-02 L5_spectral:1.5825e-02 L6_spectral:1.5878e-02 L7_spectral:1.5858e-02 L8_spectral:1.5946e-02 L9_spectral:1.5946e-02 L10_spectral:1.5878e-02 L11_spectral:1.5847e-02 L12_spectral:1.5714e-02 train_time:240311ms step_avg:42.91ms +[2025-09-11 11:06:56] [Rank 0] step:5601/10000 train_time:241585ms step_avg:43.13ms +[2025-09-11 11:06:56] [Rank 0] step:5601/10000 train_time:241585ms step_avg:43.13ms +[2025-09-11 11:06:57] [Rank 0] step:5621/10000 train_time:242306ms step_avg:43.11ms +[2025-09-11 11:06:57] [Rank 0] step:5621/10000 train_time:242306ms step_avg:43.11ms +[2025-09-11 11:06:58] [Rank 0] step:5641/10000 train_time:242993ms step_avg:43.08ms +[2025-09-11 11:06:58] [Rank 0] step:5641/10000 train_time:242993ms step_avg:43.08ms +[2025-09-11 11:06:58] [Rank 0] step:5661/10000 train_time:243681ms step_avg:43.05ms +[2025-09-11 11:06:58] [Rank 0] step:5661/10000 train_time:243681ms step_avg:43.05ms +[2025-09-11 11:06:59] [Rank 0] step:5681/10000 train_time:244368ms step_avg:43.01ms +[2025-09-11 11:06:59] [Rank 0] step:5681/10000 train_time:244368ms step_avg:43.01ms +[2025-09-11 11:07:00] [Rank 0] step:5701/10000 train_time:245057ms step_avg:42.98ms +[2025-09-11 11:07:00] [Rank 0] step:5701/10000 train_time:245057ms step_avg:42.98ms +[2025-09-11 11:07:00] [Rank 0] step:5721/10000 train_time:245744ms step_avg:42.95ms +[2025-09-11 11:07:00] [Rank 0] step:5721/10000 train_time:245744ms step_avg:42.95ms +[2025-09-11 11:07:01] [Rank 0] step:5741/10000 train_time:246433ms step_avg:42.93ms +[2025-09-11 11:07:01] [Rank 0] step:5741/10000 train_time:246433ms step_avg:42.93ms +[2025-09-11 11:07:02] [Rank 0] step:5761/10000 train_time:247122ms step_avg:42.90ms +[2025-09-11 11:07:02] [Rank 0] step:5761/10000 train_time:247122ms step_avg:42.90ms +[2025-09-11 11:07:02] [Rank 0] step:5781/10000 train_time:247811ms step_avg:42.87ms +[2025-09-11 11:07:02] [Rank 0] step:5781/10000 train_time:247811ms step_avg:42.87ms +[2025-09-11 11:07:03] [Rank 0] step:5801/10000 train_time:248500ms step_avg:42.84ms +[2025-09-11 11:07:03] [Rank 0] step:5801/10000 train_time:248500ms step_avg:42.84ms +[2025-09-11 11:07:04] [Rank 0] step:5821/10000 train_time:249187ms step_avg:42.81ms +[2025-09-11 11:07:04] [Rank 0] step:5821/10000 train_time:249187ms step_avg:42.81ms +[2025-09-11 11:07:04] [Rank 0] step:5841/10000 train_time:249876ms step_avg:42.78ms +[2025-09-11 11:07:04] [Rank 0] step:5841/10000 train_time:249876ms step_avg:42.78ms +[2025-09-11 11:07:05] [Rank 0] step:5861/10000 train_time:250563ms step_avg:42.75ms +[2025-09-11 11:07:05] [Rank 0] step:5861/10000 train_time:250563ms step_avg:42.75ms +[2025-09-11 11:07:06] [Rank 0] step:5881/10000 train_time:251252ms step_avg:42.72ms +[2025-09-11 11:07:06] [Rank 0] step:5881/10000 train_time:251252ms step_avg:42.72ms +[2025-09-11 11:07:06] [Rank 0] step:5901/10000 train_time:251939ms step_avg:42.69ms +[2025-09-11 11:07:06] [Rank 0] step:5901/10000 train_time:251939ms step_avg:42.69ms +[2025-09-11 11:07:07] [Rank 0] step:5921/10000 train_time:252630ms step_avg:42.67ms +[2025-09-11 11:07:07] [Rank 0] step:5921/10000 train_time:252630ms step_avg:42.67ms +[2025-09-11 11:07:08] [Rank 0] step:5941/10000 train_time:253319ms step_avg:42.64ms +[2025-09-11 11:07:08] [Rank 0] step:5941/10000 train_time:253319ms step_avg:42.64ms +[2025-09-11 11:07:09] [Rank 0] step:5961/10000 train_time:254007ms step_avg:42.61ms +[2025-09-11 11:07:09] [Rank 0] step:5961/10000 train_time:254007ms step_avg:42.61ms +[2025-09-11 11:07:09] [Rank 0] step:5981/10000 train_time:254703ms step_avg:42.59ms +[2025-09-11 11:07:09] [Rank 0] step:5981/10000 train_time:254703ms step_avg:42.59ms +[2025-09-11 11:07:10] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:07:10] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:07:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.9976 total_sharp:1.2625e-02 L1_sharp:4.7447e-03 L2_sharp:1.4695e-03 L3_sharp:8.9938e-04 L4_sharp:1.8123e-03 L5_sharp:1.9599e-03 L6_sharp:2.1335e-03 L7_sharp:1.5322e-03 L8_sharp:4.0731e-03 L9_sharp:4.1475e-03 L10_sharp:4.5877e-03 L11_sharp:5.7938e-03 L12_sharp:2.7201e-02 total_fnorm:6.0938e+00 total_l1_linf:1.4144e+04 total_spectral:3.0781e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.3008e-01 L2_l1linf:3.2227e-01 L3_l1linf:2.9492e-01 L4_l1linf:2.9883e-01 L5_l1linf:2.9297e-01 L6_l1linf:2.8906e-01 L7_l1linf:2.9297e-01 L8_l1linf:2.7734e-01 L9_l1linf:2.7148e-01 L10_l1linf:2.7148e-01 L11_l1linf:2.7148e-01 L12_l1linf:2.3730e-01 L1_spectral:1.6224e-02 L2_spectral:1.5973e-02 L3_spectral:1.5868e-02 L4_spectral:1.5940e-02 L5_spectral:1.5877e-02 L6_spectral:1.5919e-02 L7_spectral:1.5930e-02 L8_spectral:1.6091e-02 L9_spectral:1.5981e-02 L10_spectral:1.6023e-02 L11_spectral:1.5964e-02 L12_spectral:1.5816e-02 train_time:255375ms step_avg:42.56ms +[2025-09-11 11:07:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.9976 total_sharp:1.2625e-02 L1_sharp:4.7447e-03 L2_sharp:1.4695e-03 L3_sharp:8.9938e-04 L4_sharp:1.8123e-03 L5_sharp:1.9599e-03 L6_sharp:2.1335e-03 L7_sharp:1.5322e-03 L8_sharp:4.0731e-03 L9_sharp:4.1475e-03 L10_sharp:4.5877e-03 L11_sharp:5.7938e-03 L12_sharp:2.7201e-02 total_fnorm:6.0938e+00 total_l1_linf:1.4144e+04 total_spectral:3.0781e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.3008e-01 L2_l1linf:3.2227e-01 L3_l1linf:2.9492e-01 L4_l1linf:2.9883e-01 L5_l1linf:2.9297e-01 L6_l1linf:2.8906e-01 L7_l1linf:2.9297e-01 L8_l1linf:2.7734e-01 L9_l1linf:2.7148e-01 L10_l1linf:2.7148e-01 L11_l1linf:2.7148e-01 L12_l1linf:2.3730e-01 L1_spectral:1.6224e-02 L2_spectral:1.5973e-02 L3_spectral:1.5868e-02 L4_spectral:1.5940e-02 L5_spectral:1.5877e-02 L6_spectral:1.5919e-02 L7_spectral:1.5930e-02 L8_spectral:1.6091e-02 L9_spectral:1.5981e-02 L10_spectral:1.6023e-02 L11_spectral:1.5964e-02 L12_spectral:1.5816e-02 train_time:255375ms step_avg:42.56ms +[2025-09-11 11:07:21] [Rank 0] step:6001/10000 train_time:256621ms step_avg:42.76ms +[2025-09-11 11:07:21] [Rank 0] step:6001/10000 train_time:256621ms step_avg:42.76ms +[2025-09-11 11:07:22] [Rank 0] step:6021/10000 train_time:257597ms step_avg:42.78ms +[2025-09-11 11:07:22] [Rank 0] step:6021/10000 train_time:257597ms step_avg:42.78ms +[2025-09-11 11:07:23] [Rank 0] step:6041/10000 train_time:258291ms step_avg:42.76ms +[2025-09-11 11:07:23] [Rank 0] step:6041/10000 train_time:258291ms step_avg:42.76ms +[2025-09-11 11:07:24] [Rank 0] step:6061/10000 train_time:258982ms step_avg:42.73ms +[2025-09-11 11:07:24] [Rank 0] step:6061/10000 train_time:258982ms step_avg:42.73ms +[2025-09-11 11:07:24] [Rank 0] step:6081/10000 train_time:259675ms step_avg:42.70ms +[2025-09-11 11:07:24] [Rank 0] step:6081/10000 train_time:259675ms step_avg:42.70ms +[2025-09-11 11:07:25] [Rank 0] step:6101/10000 train_time:260367ms step_avg:42.68ms +[2025-09-11 11:07:25] [Rank 0] step:6101/10000 train_time:260367ms step_avg:42.68ms +[2025-09-11 11:07:26] [Rank 0] step:6121/10000 train_time:261057ms step_avg:42.65ms +[2025-09-11 11:07:26] [Rank 0] step:6121/10000 train_time:261057ms step_avg:42.65ms +[2025-09-11 11:07:26] [Rank 0] step:6141/10000 train_time:261748ms step_avg:42.62ms +[2025-09-11 11:07:26] [Rank 0] step:6141/10000 train_time:261748ms step_avg:42.62ms +[2025-09-11 11:07:27] [Rank 0] step:6161/10000 train_time:262438ms step_avg:42.60ms +[2025-09-11 11:07:27] [Rank 0] step:6161/10000 train_time:262438ms step_avg:42.60ms +[2025-09-11 11:07:28] [Rank 0] step:6181/10000 train_time:263127ms step_avg:42.57ms +[2025-09-11 11:07:28] [Rank 0] step:6181/10000 train_time:263127ms step_avg:42.57ms +[2025-09-11 11:07:28] [Rank 0] step:6201/10000 train_time:263818ms step_avg:42.54ms +[2025-09-11 11:07:28] [Rank 0] step:6201/10000 train_time:263818ms step_avg:42.54ms +[2025-09-11 11:07:29] [Rank 0] step:6221/10000 train_time:264510ms step_avg:42.52ms +[2025-09-11 11:07:29] [Rank 0] step:6221/10000 train_time:264510ms step_avg:42.52ms +[2025-09-11 11:07:30] [Rank 0] step:6241/10000 train_time:265201ms step_avg:42.49ms +[2025-09-11 11:07:30] [Rank 0] step:6241/10000 train_time:265201ms step_avg:42.49ms +[2025-09-11 11:07:30] [Rank 0] step:6261/10000 train_time:265891ms step_avg:42.47ms +[2025-09-11 11:07:30] [Rank 0] step:6261/10000 train_time:265891ms step_avg:42.47ms +[2025-09-11 11:07:31] [Rank 0] step:6281/10000 train_time:266581ms step_avg:42.44ms +[2025-09-11 11:07:31] [Rank 0] step:6281/10000 train_time:266581ms step_avg:42.44ms +[2025-09-11 11:07:32] [Rank 0] step:6301/10000 train_time:267271ms step_avg:42.42ms +[2025-09-11 11:07:32] [Rank 0] step:6301/10000 train_time:267271ms step_avg:42.42ms +[2025-09-11 11:07:33] [Rank 0] step:6321/10000 train_time:267965ms step_avg:42.39ms +[2025-09-11 11:07:33] [Rank 0] step:6321/10000 train_time:267965ms step_avg:42.39ms +[2025-09-11 11:07:33] [Rank 0] step:6341/10000 train_time:268656ms step_avg:42.37ms +[2025-09-11 11:07:33] [Rank 0] step:6341/10000 train_time:268656ms step_avg:42.37ms +[2025-09-11 11:07:34] [Rank 0] step:6361/10000 train_time:269348ms step_avg:42.34ms +[2025-09-11 11:07:34] [Rank 0] step:6361/10000 train_time:269348ms step_avg:42.34ms +[2025-09-11 11:07:35] [Rank 0] step:6381/10000 train_time:270039ms step_avg:42.32ms +[2025-09-11 11:07:35] [Rank 0] step:6381/10000 train_time:270039ms step_avg:42.32ms +[2025-09-11 11:07:35] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:07:35] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:07:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:07:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:07:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:46] [Rank 0] PRINT: step:6400/10000 val_loss:4.9674 total_sharp:9.1961e-03 L1_sharp:3.5864e-03 L2_sharp:9.3470e-04 L3_sharp:6.2559e-04 L4_sharp:8.9916e-04 L5_sharp:1.3833e-03 L6_sharp:1.4358e-03 L7_sharp:1.4685e-03 L8_sharp:3.3905e-03 L9_sharp:3.6691e-03 L10_sharp:4.4791e-03 L11_sharp:5.5417e-03 L12_sharp:1.2585e-02 total_fnorm:5.4688e+00 total_l1_linf:1.2032e+04 total_spectral:2.7188e+00 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.1094e+00 L9_fnorm:1.1328e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1406e+00 L12_fnorm:1.1016e+00 L1_l1linf:2.8320e-01 L2_l1linf:2.7734e-01 L3_l1linf:2.5586e-01 L4_l1linf:2.5977e-01 L5_l1linf:2.4902e-01 L6_l1linf:2.4609e-01 L7_l1linf:2.5195e-01 L8_l1linf:2.3633e-01 L9_l1linf:2.3633e-01 L10_l1linf:2.3926e-01 L11_l1linf:2.3242e-01 L12_l1linf:2.0703e-01 L1_spectral:1.4867e-02 L2_spectral:1.4640e-02 L3_spectral:1.4609e-02 L4_spectral:1.4773e-02 L5_spectral:1.4518e-02 L6_spectral:1.4671e-02 L7_spectral:1.4548e-02 L8_spectral:1.4533e-02 L9_spectral:1.4775e-02 L10_spectral:1.4666e-02 L11_spectral:1.4676e-02 L12_spectral:1.4566e-02 train_time:270709ms step_avg:42.30ms +[2025-09-11 11:07:46] [Rank 0] PRINT: step:6400/10000 val_loss:4.9674 total_sharp:9.1961e-03 L1_sharp:3.5864e-03 L2_sharp:9.3470e-04 L3_sharp:6.2559e-04 L4_sharp:8.9916e-04 L5_sharp:1.3833e-03 L6_sharp:1.4358e-03 L7_sharp:1.4685e-03 L8_sharp:3.3905e-03 L9_sharp:3.6691e-03 L10_sharp:4.4791e-03 L11_sharp:5.5417e-03 L12_sharp:1.2585e-02 total_fnorm:5.4688e+00 total_l1_linf:1.2032e+04 total_spectral:2.7188e+00 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.1094e+00 L9_fnorm:1.1328e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1406e+00 L12_fnorm:1.1016e+00 L1_l1linf:2.8320e-01 L2_l1linf:2.7734e-01 L3_l1linf:2.5586e-01 L4_l1linf:2.5977e-01 L5_l1linf:2.4902e-01 L6_l1linf:2.4609e-01 L7_l1linf:2.5195e-01 L8_l1linf:2.3633e-01 L9_l1linf:2.3633e-01 L10_l1linf:2.3926e-01 L11_l1linf:2.3242e-01 L12_l1linf:2.0703e-01 L1_spectral:1.4867e-02 L2_spectral:1.4640e-02 L3_spectral:1.4609e-02 L4_spectral:1.4773e-02 L5_spectral:1.4518e-02 L6_spectral:1.4671e-02 L7_spectral:1.4548e-02 L8_spectral:1.4533e-02 L9_spectral:1.4775e-02 L10_spectral:1.4666e-02 L11_spectral:1.4676e-02 L12_spectral:1.4566e-02 train_time:270709ms step_avg:42.30ms +[2025-09-11 11:07:47] [Rank 0] step:6401/10000 train_time:271984ms step_avg:42.49ms +[2025-09-11 11:07:47] [Rank 0] step:6401/10000 train_time:271984ms step_avg:42.49ms +[2025-09-11 11:07:48] [Rank 0] step:6421/10000 train_time:272717ms step_avg:42.47ms +[2025-09-11 11:07:48] [Rank 0] step:6421/10000 train_time:272717ms step_avg:42.47ms +[2025-09-11 11:07:48] [Rank 0] step:6441/10000 train_time:273410ms step_avg:42.45ms +[2025-09-11 11:07:48] [Rank 0] step:6441/10000 train_time:273410ms step_avg:42.45ms +[2025-09-11 11:07:49] [Rank 0] step:6461/10000 train_time:274102ms step_avg:42.42ms +[2025-09-11 11:07:49] [Rank 0] step:6461/10000 train_time:274102ms step_avg:42.42ms +[2025-09-11 11:07:50] [Rank 0] step:6481/10000 train_time:274795ms step_avg:42.40ms +[2025-09-11 11:07:50] [Rank 0] step:6481/10000 train_time:274795ms step_avg:42.40ms +[2025-09-11 11:07:50] [Rank 0] step:6501/10000 train_time:275488ms step_avg:42.38ms +[2025-09-11 11:07:50] [Rank 0] step:6501/10000 train_time:275488ms step_avg:42.38ms +[2025-09-11 11:07:51] [Rank 0] step:6521/10000 train_time:276180ms step_avg:42.35ms +[2025-09-11 11:07:51] [Rank 0] step:6521/10000 train_time:276180ms step_avg:42.35ms +[2025-09-11 11:07:52] [Rank 0] step:6541/10000 train_time:276870ms step_avg:42.33ms +[2025-09-11 11:07:52] [Rank 0] step:6541/10000 train_time:276870ms step_avg:42.33ms +[2025-09-11 11:07:52] [Rank 0] step:6561/10000 train_time:277561ms step_avg:42.30ms +[2025-09-11 11:07:52] [Rank 0] step:6561/10000 train_time:277561ms step_avg:42.30ms +[2025-09-11 11:07:53] [Rank 0] step:6581/10000 train_time:278253ms step_avg:42.28ms +[2025-09-11 11:07:53] [Rank 0] step:6581/10000 train_time:278253ms step_avg:42.28ms +[2025-09-11 11:07:54] [Rank 0] step:6601/10000 train_time:278944ms step_avg:42.26ms +[2025-09-11 11:07:54] [Rank 0] step:6601/10000 train_time:278944ms step_avg:42.26ms +[2025-09-11 11:07:55] [Rank 0] step:6621/10000 train_time:279634ms step_avg:42.23ms +[2025-09-11 11:07:55] [Rank 0] step:6621/10000 train_time:279634ms step_avg:42.23ms +[2025-09-11 11:07:55] [Rank 0] step:6641/10000 train_time:280325ms step_avg:42.21ms +[2025-09-11 11:07:55] [Rank 0] step:6641/10000 train_time:280325ms step_avg:42.21ms +[2025-09-11 11:07:56] [Rank 0] step:6661/10000 train_time:281017ms step_avg:42.19ms +[2025-09-11 11:07:56] [Rank 0] step:6661/10000 train_time:281017ms step_avg:42.19ms +[2025-09-11 11:07:57] [Rank 0] step:6681/10000 train_time:281716ms step_avg:42.17ms +[2025-09-11 11:07:57] [Rank 0] step:6681/10000 train_time:281716ms step_avg:42.17ms +[2025-09-11 11:07:57] [Rank 0] step:6701/10000 train_time:282413ms step_avg:42.14ms +[2025-09-11 11:07:57] [Rank 0] step:6701/10000 train_time:282413ms step_avg:42.14ms +[2025-09-11 11:07:58] [Rank 0] step:6721/10000 train_time:283111ms step_avg:42.12ms +[2025-09-11 11:07:58] [Rank 0] step:6721/10000 train_time:283111ms step_avg:42.12ms +[2025-09-11 11:07:59] [Rank 0] step:6741/10000 train_time:283810ms step_avg:42.10ms +[2025-09-11 11:07:59] [Rank 0] step:6741/10000 train_time:283810ms step_avg:42.10ms +[2025-09-11 11:07:59] [Rank 0] step:6761/10000 train_time:284507ms step_avg:42.08ms +[2025-09-11 11:07:59] [Rank 0] step:6761/10000 train_time:284507ms step_avg:42.08ms +[2025-09-11 11:08:00] [Rank 0] step:6781/10000 train_time:285207ms step_avg:42.06ms +[2025-09-11 11:08:00] [Rank 0] step:6781/10000 train_time:285207ms step_avg:42.06ms +[2025-09-11 11:08:01] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:08:01] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:11] [Rank 0] PRINT: step:6800/10000 val_loss:4.9293 total_sharp:7.8830e-03 L1_sharp:4.8931e-03 L2_sharp:1.0856e-03 L3_sharp:1.1023e-03 L4_sharp:1.1899e-03 L5_sharp:1.4632e-03 L6_sharp:1.4871e-03 L7_sharp:1.1765e-03 L8_sharp:3.1887e-03 L9_sharp:3.4779e-03 L10_sharp:3.9639e-03 L11_sharp:5.3471e-03 L12_sharp:1.1596e-02 total_fnorm:4.9062e+00 total_l1_linf:1.0240e+04 total_spectral:2.4375e+00 L1_fnorm:1.0312e+00 L2_fnorm:9.8828e-01 L3_fnorm:9.9219e-01 L4_fnorm:1.0000e+00 L5_fnorm:9.9609e-01 L6_fnorm:1.0000e+00 L7_fnorm:9.9609e-01 L8_fnorm:9.7656e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0000e+00 L11_fnorm:1.0000e+00 L12_fnorm:9.7656e-01 L1_l1linf:2.4219e-01 L2_l1linf:2.3828e-01 L3_l1linf:2.2266e-01 L4_l1linf:2.1875e-01 L5_l1linf:2.1191e-01 L6_l1linf:2.1289e-01 L7_l1linf:2.1387e-01 L8_l1linf:2.0117e-01 L9_l1linf:1.9434e-01 L10_l1linf:1.9727e-01 L11_l1linf:1.9922e-01 L12_l1linf:1.8164e-01 L1_spectral:1.3430e-02 L2_spectral:1.3142e-02 L3_spectral:1.3187e-02 L4_spectral:1.3236e-02 L5_spectral:1.3267e-02 L6_spectral:1.3188e-02 L7_spectral:1.3171e-02 L8_spectral:1.3015e-02 L9_spectral:1.3247e-02 L10_spectral:1.3244e-02 L11_spectral:1.3142e-02 L12_spectral:1.3324e-02 train_time:285885ms step_avg:42.04ms +[2025-09-11 11:08:11] [Rank 0] PRINT: step:6800/10000 val_loss:4.9293 total_sharp:7.8830e-03 L1_sharp:4.8931e-03 L2_sharp:1.0856e-03 L3_sharp:1.1023e-03 L4_sharp:1.1899e-03 L5_sharp:1.4632e-03 L6_sharp:1.4871e-03 L7_sharp:1.1765e-03 L8_sharp:3.1887e-03 L9_sharp:3.4779e-03 L10_sharp:3.9639e-03 L11_sharp:5.3471e-03 L12_sharp:1.1596e-02 total_fnorm:4.9062e+00 total_l1_linf:1.0240e+04 total_spectral:2.4375e+00 L1_fnorm:1.0312e+00 L2_fnorm:9.8828e-01 L3_fnorm:9.9219e-01 L4_fnorm:1.0000e+00 L5_fnorm:9.9609e-01 L6_fnorm:1.0000e+00 L7_fnorm:9.9609e-01 L8_fnorm:9.7656e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0000e+00 L11_fnorm:1.0000e+00 L12_fnorm:9.7656e-01 L1_l1linf:2.4219e-01 L2_l1linf:2.3828e-01 L3_l1linf:2.2266e-01 L4_l1linf:2.1875e-01 L5_l1linf:2.1191e-01 L6_l1linf:2.1289e-01 L7_l1linf:2.1387e-01 L8_l1linf:2.0117e-01 L9_l1linf:1.9434e-01 L10_l1linf:1.9727e-01 L11_l1linf:1.9922e-01 L12_l1linf:1.8164e-01 L1_spectral:1.3430e-02 L2_spectral:1.3142e-02 L3_spectral:1.3187e-02 L4_spectral:1.3236e-02 L5_spectral:1.3267e-02 L6_spectral:1.3188e-02 L7_spectral:1.3171e-02 L8_spectral:1.3015e-02 L9_spectral:1.3247e-02 L10_spectral:1.3244e-02 L11_spectral:1.3142e-02 L12_spectral:1.3324e-02 train_time:285885ms step_avg:42.04ms +[2025-09-11 11:08:12] [Rank 0] step:6801/10000 train_time:287191ms step_avg:42.23ms +[2025-09-11 11:08:12] [Rank 0] step:6801/10000 train_time:287191ms step_avg:42.23ms +[2025-09-11 11:08:13] [Rank 0] step:6821/10000 train_time:287909ms step_avg:42.21ms +[2025-09-11 11:08:13] [Rank 0] step:6821/10000 train_time:287909ms step_avg:42.21ms +[2025-09-11 11:08:14] [Rank 0] step:6841/10000 train_time:288611ms step_avg:42.19ms +[2025-09-11 11:08:14] [Rank 0] step:6841/10000 train_time:288611ms step_avg:42.19ms +[2025-09-11 11:08:14] [Rank 0] step:6861/10000 train_time:289310ms step_avg:42.17ms +[2025-09-11 11:08:14] [Rank 0] step:6861/10000 train_time:289310ms step_avg:42.17ms +[2025-09-11 11:08:15] [Rank 0] step:6881/10000 train_time:290009ms step_avg:42.15ms +[2025-09-11 11:08:15] [Rank 0] step:6881/10000 train_time:290009ms step_avg:42.15ms +[2025-09-11 11:08:16] [Rank 0] step:6901/10000 train_time:290706ms step_avg:42.13ms +[2025-09-11 11:08:16] [Rank 0] step:6901/10000 train_time:290706ms step_avg:42.13ms +[2025-09-11 11:08:16] [Rank 0] step:6921/10000 train_time:291403ms step_avg:42.10ms +[2025-09-11 11:08:16] [Rank 0] step:6921/10000 train_time:291403ms step_avg:42.10ms +[2025-09-11 11:08:17] [Rank 0] step:6941/10000 train_time:292102ms step_avg:42.08ms +[2025-09-11 11:08:17] [Rank 0] step:6941/10000 train_time:292102ms step_avg:42.08ms +[2025-09-11 11:08:18] [Rank 0] step:6961/10000 train_time:292800ms step_avg:42.06ms +[2025-09-11 11:08:18] [Rank 0] step:6961/10000 train_time:292800ms step_avg:42.06ms +[2025-09-11 11:08:18] [Rank 0] step:6981/10000 train_time:293499ms step_avg:42.04ms +[2025-09-11 11:08:18] [Rank 0] step:6981/10000 train_time:293499ms step_avg:42.04ms +[2025-09-11 11:08:19] [Rank 0] step:7001/10000 train_time:294199ms step_avg:42.02ms +[2025-09-11 11:08:19] [Rank 0] step:7001/10000 train_time:294199ms step_avg:42.02ms +[2025-09-11 11:08:20] [Rank 0] step:7021/10000 train_time:294897ms step_avg:42.00ms +[2025-09-11 11:08:20] [Rank 0] step:7021/10000 train_time:294897ms step_avg:42.00ms +[2025-09-11 11:08:21] [Rank 0] step:7041/10000 train_time:295594ms step_avg:41.98ms +[2025-09-11 11:08:21] [Rank 0] step:7041/10000 train_time:295594ms step_avg:41.98ms +[2025-09-11 11:08:21] [Rank 0] step:7061/10000 train_time:296294ms step_avg:41.96ms +[2025-09-11 11:08:21] [Rank 0] step:7061/10000 train_time:296294ms step_avg:41.96ms +[2025-09-11 11:08:22] [Rank 0] step:7081/10000 train_time:296992ms step_avg:41.94ms +[2025-09-11 11:08:22] [Rank 0] step:7081/10000 train_time:296992ms step_avg:41.94ms +[2025-09-11 11:08:23] [Rank 0] step:7101/10000 train_time:298173ms step_avg:41.99ms +[2025-09-11 11:08:23] [Rank 0] step:7101/10000 train_time:298173ms step_avg:41.99ms +[2025-09-11 11:08:24] [Rank 0] step:7121/10000 train_time:298872ms step_avg:41.97ms +[2025-09-11 11:08:24] [Rank 0] step:7121/10000 train_time:298872ms step_avg:41.97ms +[2025-09-11 11:08:25] [Rank 0] step:7141/10000 train_time:299570ms step_avg:41.95ms +[2025-09-11 11:08:25] [Rank 0] step:7141/10000 train_time:299570ms step_avg:41.95ms +[2025-09-11 11:08:25] [Rank 0] step:7161/10000 train_time:300558ms step_avg:41.97ms +[2025-09-11 11:08:25] [Rank 0] step:7161/10000 train_time:300558ms step_avg:41.97ms +[2025-09-11 11:08:26] [Rank 0] step:7181/10000 train_time:301255ms step_avg:41.95ms +[2025-09-11 11:08:26] [Rank 0] step:7181/10000 train_time:301255ms step_avg:41.95ms +[2025-09-11 11:08:27] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:08:27] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:08:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:08:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:08:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:08:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:37] [Rank 0] PRINT: step:7200/10000 val_loss:4.8970 total_sharp:8.1102e-03 L1_sharp:2.6032e-03 L2_sharp:8.2170e-04 L3_sharp:6.2300e-04 L4_sharp:9.3795e-04 L5_sharp:1.3252e-03 L6_sharp:1.3629e-03 L7_sharp:1.3350e-03 L8_sharp:2.9480e-03 L9_sharp:3.3213e-03 L10_sharp:3.9920e-03 L11_sharp:4.8893e-03 L12_sharp:1.1249e-02 total_fnorm:4.0938e+00 total_l1_linf:8.1280e+03 total_spectral:2.0469e+00 L1_fnorm:8.9844e-01 L2_fnorm:8.5547e-01 L3_fnorm:8.5547e-01 L4_fnorm:8.6719e-01 L5_fnorm:8.5938e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.5938e-01 L8_fnorm:8.3984e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5938e-01 L11_fnorm:8.6328e-01 L12_fnorm:8.3984e-01 L1_l1linf:1.9434e-01 L2_l1linf:1.9727e-01 L3_l1linf:1.8359e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.7676e-01 L6_l1linf:1.7676e-01 L7_l1linf:1.7383e-01 L8_l1linf:1.6113e-01 L9_l1linf:1.6016e-01 L10_l1linf:1.6016e-01 L11_l1linf:1.5918e-01 L12_l1linf:1.4941e-01 L1_spectral:1.1808e-02 L2_spectral:1.1531e-02 L3_spectral:1.1572e-02 L4_spectral:1.1625e-02 L5_spectral:1.1580e-02 L6_spectral:1.1657e-02 L7_spectral:1.1561e-02 L8_spectral:1.1461e-02 L9_spectral:1.1653e-02 L10_spectral:1.1642e-02 L11_spectral:1.1628e-02 L12_spectral:1.1623e-02 train_time:301933ms step_avg:41.94ms +[2025-09-11 11:08:37] [Rank 0] PRINT: step:7200/10000 val_loss:4.8970 total_sharp:8.1102e-03 L1_sharp:2.6032e-03 L2_sharp:8.2170e-04 L3_sharp:6.2300e-04 L4_sharp:9.3795e-04 L5_sharp:1.3252e-03 L6_sharp:1.3629e-03 L7_sharp:1.3350e-03 L8_sharp:2.9480e-03 L9_sharp:3.3213e-03 L10_sharp:3.9920e-03 L11_sharp:4.8893e-03 L12_sharp:1.1249e-02 total_fnorm:4.0938e+00 total_l1_linf:8.1280e+03 total_spectral:2.0469e+00 L1_fnorm:8.9844e-01 L2_fnorm:8.5547e-01 L3_fnorm:8.5547e-01 L4_fnorm:8.6719e-01 L5_fnorm:8.5938e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.5938e-01 L8_fnorm:8.3984e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5938e-01 L11_fnorm:8.6328e-01 L12_fnorm:8.3984e-01 L1_l1linf:1.9434e-01 L2_l1linf:1.9727e-01 L3_l1linf:1.8359e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.7676e-01 L6_l1linf:1.7676e-01 L7_l1linf:1.7383e-01 L8_l1linf:1.6113e-01 L9_l1linf:1.6016e-01 L10_l1linf:1.6016e-01 L11_l1linf:1.5918e-01 L12_l1linf:1.4941e-01 L1_spectral:1.1808e-02 L2_spectral:1.1531e-02 L3_spectral:1.1572e-02 L4_spectral:1.1625e-02 L5_spectral:1.1580e-02 L6_spectral:1.1657e-02 L7_spectral:1.1561e-02 L8_spectral:1.1461e-02 L9_spectral:1.1653e-02 L10_spectral:1.1642e-02 L11_spectral:1.1628e-02 L12_spectral:1.1623e-02 train_time:301933ms step_avg:41.94ms +[2025-09-11 11:08:39] [Rank 0] step:7201/10000 train_time:303569ms step_avg:42.16ms +[2025-09-11 11:08:39] [Rank 0] step:7201/10000 train_time:303569ms step_avg:42.16ms +[2025-09-11 11:08:39] [Rank 0] step:7221/10000 train_time:304348ms step_avg:42.15ms +[2025-09-11 11:08:39] [Rank 0] step:7221/10000 train_time:304348ms step_avg:42.15ms +[2025-09-11 11:08:40] [Rank 0] step:7241/10000 train_time:305048ms step_avg:42.13ms +[2025-09-11 11:08:40] [Rank 0] step:7241/10000 train_time:305048ms step_avg:42.13ms +[2025-09-11 11:08:41] [Rank 0] step:7261/10000 train_time:305750ms step_avg:42.11ms +[2025-09-11 11:08:41] [Rank 0] step:7261/10000 train_time:305750ms step_avg:42.11ms +[2025-09-11 11:08:41] [Rank 0] step:7281/10000 train_time:306454ms step_avg:42.09ms +[2025-09-11 11:08:41] [Rank 0] step:7281/10000 train_time:306454ms step_avg:42.09ms +[2025-09-11 11:08:42] [Rank 0] step:7301/10000 train_time:307153ms step_avg:42.07ms +[2025-09-11 11:08:42] [Rank 0] step:7301/10000 train_time:307153ms step_avg:42.07ms +[2025-09-11 11:08:43] [Rank 0] step:7321/10000 train_time:307852ms step_avg:42.05ms +[2025-09-11 11:08:43] [Rank 0] step:7321/10000 train_time:307852ms step_avg:42.05ms +[2025-09-11 11:08:44] [Rank 0] step:7341/10000 train_time:308553ms step_avg:42.03ms +[2025-09-11 11:08:44] [Rank 0] step:7341/10000 train_time:308553ms step_avg:42.03ms +[2025-09-11 11:08:44] [Rank 0] step:7361/10000 train_time:309253ms step_avg:42.01ms +[2025-09-11 11:08:44] [Rank 0] step:7361/10000 train_time:309253ms step_avg:42.01ms +[2025-09-11 11:08:45] [Rank 0] step:7381/10000 train_time:309953ms step_avg:41.99ms +[2025-09-11 11:08:45] [Rank 0] step:7381/10000 train_time:309953ms step_avg:41.99ms +[2025-09-11 11:08:46] [Rank 0] step:7401/10000 train_time:310652ms step_avg:41.97ms +[2025-09-11 11:08:46] [Rank 0] step:7401/10000 train_time:310652ms step_avg:41.97ms +[2025-09-11 11:08:46] [Rank 0] step:7421/10000 train_time:311351ms step_avg:41.96ms +[2025-09-11 11:08:46] [Rank 0] step:7421/10000 train_time:311351ms step_avg:41.96ms +[2025-09-11 11:08:47] [Rank 0] step:7441/10000 train_time:312051ms step_avg:41.94ms +[2025-09-11 11:08:47] [Rank 0] step:7441/10000 train_time:312051ms step_avg:41.94ms +[2025-09-11 11:08:48] [Rank 0] step:7461/10000 train_time:312751ms step_avg:41.92ms +[2025-09-11 11:08:48] [Rank 0] step:7461/10000 train_time:312751ms step_avg:41.92ms +[2025-09-11 11:08:49] [Rank 0] step:7481/10000 train_time:313455ms step_avg:41.90ms +[2025-09-11 11:08:49] [Rank 0] step:7481/10000 train_time:313455ms step_avg:41.90ms +[2025-09-11 11:08:49] [Rank 0] step:7501/10000 train_time:314157ms step_avg:41.88ms +[2025-09-11 11:08:49] [Rank 0] step:7501/10000 train_time:314157ms step_avg:41.88ms +[2025-09-11 11:08:50] [Rank 0] step:7521/10000 train_time:314858ms step_avg:41.86ms +[2025-09-11 11:08:50] [Rank 0] step:7521/10000 train_time:314858ms step_avg:41.86ms +[2025-09-11 11:08:51] [Rank 0] step:7541/10000 train_time:315556ms step_avg:41.85ms +[2025-09-11 11:08:51] [Rank 0] step:7541/10000 train_time:315556ms step_avg:41.85ms +[2025-09-11 11:08:51] [Rank 0] step:7561/10000 train_time:316258ms step_avg:41.83ms +[2025-09-11 11:08:51] [Rank 0] step:7561/10000 train_time:316258ms step_avg:41.83ms +[2025-09-11 11:08:52] [Rank 0] step:7581/10000 train_time:316959ms step_avg:41.81ms +[2025-09-11 11:08:52] [Rank 0] step:7581/10000 train_time:316959ms step_avg:41.81ms +[2025-09-11 11:08:53] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:08:53] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:08:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:08:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:08:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:09:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:09:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:03] [Rank 0] PRINT: step:7600/10000 val_loss:4.8749 total_sharp:9.2400e-03 L1_sharp:3.4221e-03 L2_sharp:9.2039e-04 L3_sharp:9.2450e-04 L4_sharp:6.8777e-04 L5_sharp:1.4116e-03 L6_sharp:1.3404e-03 L7_sharp:1.4779e-03 L8_sharp:3.3717e-03 L9_sharp:3.4748e-03 L10_sharp:3.6444e-03 L11_sharp:5.0245e-03 L12_sharp:1.3297e-02 total_fnorm:3.2969e+00 total_l1_linf:6.2080e+03 total_spectral:1.6250e+00 L1_fnorm:7.5000e-01 L2_fnorm:7.1094e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.2656e-01 L5_fnorm:7.1875e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.1875e-01 L8_fnorm:6.9922e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.1875e-01 L11_fnorm:7.1875e-01 L12_fnorm:7.0312e-01 L1_l1linf:1.5332e-01 L2_l1linf:1.4844e-01 L3_l1linf:1.4453e-01 L4_l1linf:1.4551e-01 L5_l1linf:1.4062e-01 L6_l1linf:1.3867e-01 L7_l1linf:1.4062e-01 L8_l1linf:1.2988e-01 L9_l1linf:1.2500e-01 L10_l1linf:1.2500e-01 L11_l1linf:1.2891e-01 L12_l1linf:1.2061e-01 L1_spectral:9.9621e-03 L2_spectral:9.8954e-03 L3_spectral:9.9005e-03 L4_spectral:1.0005e-02 L5_spectral:9.8781e-03 L6_spectral:9.9434e-03 L7_spectral:9.9368e-03 L8_spectral:9.9124e-03 L9_spectral:9.9327e-03 L10_spectral:9.9733e-03 L11_spectral:9.9255e-03 L12_spectral:9.8920e-03 train_time:317641ms step_avg:41.79ms +[2025-09-11 11:09:03] [Rank 0] PRINT: step:7600/10000 val_loss:4.8749 total_sharp:9.2400e-03 L1_sharp:3.4221e-03 L2_sharp:9.2039e-04 L3_sharp:9.2450e-04 L4_sharp:6.8777e-04 L5_sharp:1.4116e-03 L6_sharp:1.3404e-03 L7_sharp:1.4779e-03 L8_sharp:3.3717e-03 L9_sharp:3.4748e-03 L10_sharp:3.6444e-03 L11_sharp:5.0245e-03 L12_sharp:1.3297e-02 total_fnorm:3.2969e+00 total_l1_linf:6.2080e+03 total_spectral:1.6250e+00 L1_fnorm:7.5000e-01 L2_fnorm:7.1094e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.2656e-01 L5_fnorm:7.1875e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.1875e-01 L8_fnorm:6.9922e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.1875e-01 L11_fnorm:7.1875e-01 L12_fnorm:7.0312e-01 L1_l1linf:1.5332e-01 L2_l1linf:1.4844e-01 L3_l1linf:1.4453e-01 L4_l1linf:1.4551e-01 L5_l1linf:1.4062e-01 L6_l1linf:1.3867e-01 L7_l1linf:1.4062e-01 L8_l1linf:1.2988e-01 L9_l1linf:1.2500e-01 L10_l1linf:1.2500e-01 L11_l1linf:1.2891e-01 L12_l1linf:1.2061e-01 L1_spectral:9.9621e-03 L2_spectral:9.8954e-03 L3_spectral:9.9005e-03 L4_spectral:1.0005e-02 L5_spectral:9.8781e-03 L6_spectral:9.9434e-03 L7_spectral:9.9368e-03 L8_spectral:9.9124e-03 L9_spectral:9.9327e-03 L10_spectral:9.9733e-03 L11_spectral:9.9255e-03 L12_spectral:9.8920e-03 train_time:317641ms step_avg:41.79ms +[2025-09-11 11:09:04] [Rank 0] step:7601/10000 train_time:318945ms step_avg:41.96ms +[2025-09-11 11:09:04] [Rank 0] step:7601/10000 train_time:318945ms step_avg:41.96ms +[2025-09-11 11:09:05] [Rank 0] step:7621/10000 train_time:319669ms step_avg:41.95ms +[2025-09-11 11:09:05] [Rank 0] step:7621/10000 train_time:319669ms step_avg:41.95ms +[2025-09-11 11:09:05] [Rank 0] step:7641/10000 train_time:320371ms step_avg:41.93ms +[2025-09-11 11:09:05] [Rank 0] step:7641/10000 train_time:320371ms step_avg:41.93ms +[2025-09-11 11:09:06] [Rank 0] step:7661/10000 train_time:321071ms step_avg:41.91ms +[2025-09-11 11:09:06] [Rank 0] step:7661/10000 train_time:321071ms step_avg:41.91ms +[2025-09-11 11:09:07] [Rank 0] step:7681/10000 train_time:321772ms step_avg:41.89ms +[2025-09-11 11:09:07] [Rank 0] step:7681/10000 train_time:321772ms step_avg:41.89ms +[2025-09-11 11:09:08] [Rank 0] step:7701/10000 train_time:322474ms step_avg:41.87ms +[2025-09-11 11:09:08] [Rank 0] step:7701/10000 train_time:322474ms step_avg:41.87ms +[2025-09-11 11:09:08] [Rank 0] step:7721/10000 train_time:323175ms step_avg:41.86ms +[2025-09-11 11:09:08] [Rank 0] step:7721/10000 train_time:323175ms step_avg:41.86ms +[2025-09-11 11:09:09] [Rank 0] step:7741/10000 train_time:323876ms step_avg:41.84ms +[2025-09-11 11:09:09] [Rank 0] step:7741/10000 train_time:323876ms step_avg:41.84ms +[2025-09-11 11:09:10] [Rank 0] step:7761/10000 train_time:324577ms step_avg:41.82ms +[2025-09-11 11:09:10] [Rank 0] step:7761/10000 train_time:324577ms step_avg:41.82ms +[2025-09-11 11:09:10] [Rank 0] step:7781/10000 train_time:325280ms step_avg:41.80ms +[2025-09-11 11:09:10] [Rank 0] step:7781/10000 train_time:325280ms step_avg:41.80ms +[2025-09-11 11:09:11] [Rank 0] step:7801/10000 train_time:325979ms step_avg:41.79ms +[2025-09-11 11:09:11] [Rank 0] step:7801/10000 train_time:325979ms step_avg:41.79ms +[2025-09-11 11:09:12] [Rank 0] step:7821/10000 train_time:326681ms step_avg:41.77ms +[2025-09-11 11:09:12] [Rank 0] step:7821/10000 train_time:326681ms step_avg:41.77ms +[2025-09-11 11:09:12] [Rank 0] step:7841/10000 train_time:327383ms step_avg:41.75ms +[2025-09-11 11:09:12] [Rank 0] step:7841/10000 train_time:327383ms step_avg:41.75ms +[2025-09-11 11:09:13] [Rank 0] step:7861/10000 train_time:328086ms step_avg:41.74ms +[2025-09-11 11:09:13] [Rank 0] step:7861/10000 train_time:328086ms step_avg:41.74ms +[2025-09-11 11:09:14] [Rank 0] step:7881/10000 train_time:328789ms step_avg:41.72ms +[2025-09-11 11:09:14] [Rank 0] step:7881/10000 train_time:328789ms step_avg:41.72ms +[2025-09-11 11:09:15] [Rank 0] step:7901/10000 train_time:329492ms step_avg:41.70ms +[2025-09-11 11:09:15] [Rank 0] step:7901/10000 train_time:329492ms step_avg:41.70ms +[2025-09-11 11:09:15] [Rank 0] step:7921/10000 train_time:330193ms step_avg:41.69ms +[2025-09-11 11:09:15] [Rank 0] step:7921/10000 train_time:330193ms step_avg:41.69ms +[2025-09-11 11:09:16] [Rank 0] step:7941/10000 train_time:330895ms step_avg:41.67ms +[2025-09-11 11:09:16] [Rank 0] step:7941/10000 train_time:330895ms step_avg:41.67ms +[2025-09-11 11:09:17] [Rank 0] step:7961/10000 train_time:331595ms step_avg:41.65ms +[2025-09-11 11:09:17] [Rank 0] step:7961/10000 train_time:331595ms step_avg:41.65ms +[2025-09-11 11:09:17] [Rank 0] step:7981/10000 train_time:332297ms step_avg:41.64ms +[2025-09-11 11:09:17] [Rank 0] step:7981/10000 train_time:332297ms step_avg:41.64ms +[2025-09-11 11:09:18] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:09:18] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:09:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:09:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:28] [Rank 0] PRINT: step:8000/10000 val_loss:4.8632 total_sharp:8.6709e-03 L1_sharp:3.6073e-03 L2_sharp:1.1140e-03 L3_sharp:1.1490e-03 L4_sharp:8.4171e-04 L5_sharp:1.4368e-03 L6_sharp:1.3730e-03 L7_sharp:1.2589e-03 L8_sharp:2.6939e-03 L9_sharp:2.9645e-03 L10_sharp:3.3243e-03 L11_sharp:4.8849e-03 L12_sharp:1.5651e-02 total_fnorm:2.6875e+00 total_l1_linf:4.7360e+03 total_spectral:1.3203e+00 L1_fnorm:6.3281e-01 L2_fnorm:5.8984e-01 L3_fnorm:5.8984e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8594e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7031e-01 L9_fnorm:5.8594e-01 L10_fnorm:5.8594e-01 L11_fnorm:5.8984e-01 L12_fnorm:5.7422e-01 L1_l1linf:1.2061e-01 L2_l1linf:1.1768e-01 L3_l1linf:1.1279e-01 L4_l1linf:1.1084e-01 L5_l1linf:1.0791e-01 L6_l1linf:1.0596e-01 L7_l1linf:1.0547e-01 L8_l1linf:1.0156e-01 L9_l1linf:9.5703e-02 L10_l1linf:9.6680e-02 L11_l1linf:9.6191e-02 L12_l1linf:9.3750e-02 L1_spectral:8.3222e-03 L2_spectral:8.4284e-03 L3_spectral:8.3947e-03 L4_spectral:8.4713e-03 L5_spectral:8.2787e-03 L6_spectral:8.2880e-03 L7_spectral:8.2829e-03 L8_spectral:8.3236e-03 L9_spectral:8.3991e-03 L10_spectral:8.3706e-03 L11_spectral:8.2876e-03 L12_spectral:8.2653e-03 train_time:332978ms step_avg:41.62ms +[2025-09-11 11:09:28] [Rank 0] PRINT: step:8000/10000 val_loss:4.8632 total_sharp:8.6709e-03 L1_sharp:3.6073e-03 L2_sharp:1.1140e-03 L3_sharp:1.1490e-03 L4_sharp:8.4171e-04 L5_sharp:1.4368e-03 L6_sharp:1.3730e-03 L7_sharp:1.2589e-03 L8_sharp:2.6939e-03 L9_sharp:2.9645e-03 L10_sharp:3.3243e-03 L11_sharp:4.8849e-03 L12_sharp:1.5651e-02 total_fnorm:2.6875e+00 total_l1_linf:4.7360e+03 total_spectral:1.3203e+00 L1_fnorm:6.3281e-01 L2_fnorm:5.8984e-01 L3_fnorm:5.8984e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8594e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7031e-01 L9_fnorm:5.8594e-01 L10_fnorm:5.8594e-01 L11_fnorm:5.8984e-01 L12_fnorm:5.7422e-01 L1_l1linf:1.2061e-01 L2_l1linf:1.1768e-01 L3_l1linf:1.1279e-01 L4_l1linf:1.1084e-01 L5_l1linf:1.0791e-01 L6_l1linf:1.0596e-01 L7_l1linf:1.0547e-01 L8_l1linf:1.0156e-01 L9_l1linf:9.5703e-02 L10_l1linf:9.6680e-02 L11_l1linf:9.6191e-02 L12_l1linf:9.3750e-02 L1_spectral:8.3222e-03 L2_spectral:8.4284e-03 L3_spectral:8.3947e-03 L4_spectral:8.4713e-03 L5_spectral:8.2787e-03 L6_spectral:8.2880e-03 L7_spectral:8.2829e-03 L8_spectral:8.3236e-03 L9_spectral:8.3991e-03 L10_spectral:8.3706e-03 L11_spectral:8.2876e-03 L12_spectral:8.2653e-03 train_time:332978ms step_avg:41.62ms +[2025-09-11 11:09:29] [Rank 0] step:8001/10000 train_time:334297ms step_avg:41.78ms +[2025-09-11 11:09:29] [Rank 0] step:8001/10000 train_time:334297ms step_avg:41.78ms +[2025-09-11 11:09:30] [Rank 0] step:8021/10000 train_time:335023ms step_avg:41.77ms +[2025-09-11 11:09:30] [Rank 0] step:8021/10000 train_time:335023ms step_avg:41.77ms +[2025-09-11 11:09:31] [Rank 0] step:8041/10000 train_time:335726ms step_avg:41.75ms +[2025-09-11 11:09:31] [Rank 0] step:8041/10000 train_time:335726ms step_avg:41.75ms +[2025-09-11 11:09:32] [Rank 0] step:8061/10000 train_time:336431ms step_avg:41.74ms +[2025-09-11 11:09:32] [Rank 0] step:8061/10000 train_time:336431ms step_avg:41.74ms +[2025-09-11 11:09:32] [Rank 0] step:8081/10000 train_time:337130ms step_avg:41.72ms +[2025-09-11 11:09:32] [Rank 0] step:8081/10000 train_time:337130ms step_avg:41.72ms +[2025-09-11 11:09:33] [Rank 0] step:8101/10000 train_time:337830ms step_avg:41.70ms +[2025-09-11 11:09:33] [Rank 0] step:8101/10000 train_time:337830ms step_avg:41.70ms +[2025-09-11 11:09:34] [Rank 0] step:8121/10000 train_time:338536ms step_avg:41.69ms +[2025-09-11 11:09:34] [Rank 0] step:8121/10000 train_time:338536ms step_avg:41.69ms +[2025-09-11 11:09:35] [Rank 0] step:8141/10000 train_time:339991ms step_avg:41.76ms +[2025-09-11 11:09:35] [Rank 0] step:8141/10000 train_time:339991ms step_avg:41.76ms +[2025-09-11 11:09:36] [Rank 0] step:8161/10000 train_time:340696ms step_avg:41.75ms +[2025-09-11 11:09:36] [Rank 0] step:8161/10000 train_time:340696ms step_avg:41.75ms +[2025-09-11 11:09:37] [Rank 0] step:8181/10000 train_time:341410ms step_avg:41.73ms +[2025-09-11 11:09:37] [Rank 0] step:8181/10000 train_time:341410ms step_avg:41.73ms +[2025-09-11 11:09:37] [Rank 0] step:8201/10000 train_time:342119ms step_avg:41.72ms +[2025-09-11 11:09:37] [Rank 0] step:8201/10000 train_time:342119ms step_avg:41.72ms +[2025-09-11 11:09:38] [Rank 0] step:8221/10000 train_time:342828ms step_avg:41.70ms +[2025-09-11 11:09:38] [Rank 0] step:8221/10000 train_time:342828ms step_avg:41.70ms +[2025-09-11 11:09:39] [Rank 0] step:8241/10000 train_time:343545ms step_avg:41.69ms +[2025-09-11 11:09:39] [Rank 0] step:8241/10000 train_time:343545ms step_avg:41.69ms +[2025-09-11 11:09:39] [Rank 0] step:8261/10000 train_time:344253ms step_avg:41.67ms +[2025-09-11 11:09:39] [Rank 0] step:8261/10000 train_time:344253ms step_avg:41.67ms +[2025-09-11 11:09:40] [Rank 0] step:8281/10000 train_time:344957ms step_avg:41.66ms +[2025-09-11 11:09:40] [Rank 0] step:8281/10000 train_time:344957ms step_avg:41.66ms +[2025-09-11 11:09:41] [Rank 0] step:8301/10000 train_time:345665ms step_avg:41.64ms +[2025-09-11 11:09:41] [Rank 0] step:8301/10000 train_time:345665ms step_avg:41.64ms +[2025-09-11 11:09:41] [Rank 0] step:8321/10000 train_time:346373ms step_avg:41.63ms +[2025-09-11 11:09:41] [Rank 0] step:8321/10000 train_time:346373ms step_avg:41.63ms +[2025-09-11 11:09:42] [Rank 0] step:8341/10000 train_time:347087ms step_avg:41.61ms +[2025-09-11 11:09:42] [Rank 0] step:8341/10000 train_time:347087ms step_avg:41.61ms +[2025-09-11 11:09:43] [Rank 0] step:8361/10000 train_time:347791ms step_avg:41.60ms +[2025-09-11 11:09:43] [Rank 0] step:8361/10000 train_time:347791ms step_avg:41.60ms +[2025-09-11 11:09:44] [Rank 0] step:8381/10000 train_time:348501ms step_avg:41.58ms +[2025-09-11 11:09:44] [Rank 0] step:8381/10000 train_time:348501ms step_avg:41.58ms +[2025-09-11 11:09:44] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:09:44] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:09:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:54] [Rank 0] PRINT: step:8400/10000 val_loss:4.8443 total_sharp:6.9779e-03 L1_sharp:2.4289e-03 L2_sharp:1.2047e-03 L3_sharp:8.1537e-04 L4_sharp:5.8204e-04 L5_sharp:1.4947e-03 L6_sharp:1.0453e-03 L7_sharp:1.1850e-03 L8_sharp:2.3465e-03 L9_sharp:2.5294e-03 L10_sharp:2.8762e-03 L11_sharp:3.5686e-03 L12_sharp:9.3199e-03 total_fnorm:2.0000e+00 total_l1_linf:3.3280e+03 total_spectral:1.0156e+00 L1_fnorm:5.0781e-01 L2_fnorm:4.6289e-01 L3_fnorm:4.6289e-01 L4_fnorm:4.6680e-01 L5_fnorm:4.5898e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.5898e-01 L8_fnorm:4.4531e-01 L9_fnorm:4.5703e-01 L10_fnorm:4.5508e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.4922e-01 L1_l1linf:9.0332e-02 L2_l1linf:8.7891e-02 L3_l1linf:8.2520e-02 L4_l1linf:8.0566e-02 L5_l1linf:7.7637e-02 L6_l1linf:7.9590e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.1777e-02 L9_l1linf:6.7871e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.3730e-02 L1_spectral:7.1173e-03 L2_spectral:6.8347e-03 L3_spectral:6.7639e-03 L4_spectral:6.7966e-03 L5_spectral:6.6658e-03 L6_spectral:6.6887e-03 L7_spectral:6.6309e-03 L8_spectral:6.7337e-03 L9_spectral:6.7156e-03 L10_spectral:6.6779e-03 L11_spectral:6.6609e-03 L12_spectral:6.6835e-03 train_time:349191ms step_avg:41.57ms +[2025-09-11 11:09:54] [Rank 0] PRINT: step:8400/10000 val_loss:4.8443 total_sharp:6.9779e-03 L1_sharp:2.4289e-03 L2_sharp:1.2047e-03 L3_sharp:8.1537e-04 L4_sharp:5.8204e-04 L5_sharp:1.4947e-03 L6_sharp:1.0453e-03 L7_sharp:1.1850e-03 L8_sharp:2.3465e-03 L9_sharp:2.5294e-03 L10_sharp:2.8762e-03 L11_sharp:3.5686e-03 L12_sharp:9.3199e-03 total_fnorm:2.0000e+00 total_l1_linf:3.3280e+03 total_spectral:1.0156e+00 L1_fnorm:5.0781e-01 L2_fnorm:4.6289e-01 L3_fnorm:4.6289e-01 L4_fnorm:4.6680e-01 L5_fnorm:4.5898e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.5898e-01 L8_fnorm:4.4531e-01 L9_fnorm:4.5703e-01 L10_fnorm:4.5508e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.4922e-01 L1_l1linf:9.0332e-02 L2_l1linf:8.7891e-02 L3_l1linf:8.2520e-02 L4_l1linf:8.0566e-02 L5_l1linf:7.7637e-02 L6_l1linf:7.9590e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.1777e-02 L9_l1linf:6.7871e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.3730e-02 L1_spectral:7.1173e-03 L2_spectral:6.8347e-03 L3_spectral:6.7639e-03 L4_spectral:6.7966e-03 L5_spectral:6.6658e-03 L6_spectral:6.6887e-03 L7_spectral:6.6309e-03 L8_spectral:6.7337e-03 L9_spectral:6.7156e-03 L10_spectral:6.6779e-03 L11_spectral:6.6609e-03 L12_spectral:6.6835e-03 train_time:349191ms step_avg:41.57ms +[2025-09-11 11:09:56] [Rank 0] step:8401/10000 train_time:350538ms step_avg:41.73ms +[2025-09-11 11:09:56] [Rank 0] step:8401/10000 train_time:350538ms step_avg:41.73ms +[2025-09-11 11:09:56] [Rank 0] step:8421/10000 train_time:351276ms step_avg:41.71ms +[2025-09-11 11:09:56] [Rank 0] step:8421/10000 train_time:351276ms step_avg:41.71ms +[2025-09-11 11:09:57] [Rank 0] step:8441/10000 train_time:351988ms step_avg:41.70ms +[2025-09-11 11:09:57] [Rank 0] step:8441/10000 train_time:351988ms step_avg:41.70ms +[2025-09-11 11:09:58] [Rank 0] step:8461/10000 train_time:352697ms step_avg:41.69ms +[2025-09-11 11:09:58] [Rank 0] step:8461/10000 train_time:352697ms step_avg:41.69ms +[2025-09-11 11:09:59] [Rank 0] step:8481/10000 train_time:353408ms step_avg:41.67ms +[2025-09-11 11:09:59] [Rank 0] step:8481/10000 train_time:353408ms step_avg:41.67ms +[2025-09-11 11:09:59] [Rank 0] step:8501/10000 train_time:354117ms step_avg:41.66ms +[2025-09-11 11:09:59] [Rank 0] step:8501/10000 train_time:354117ms step_avg:41.66ms +[2025-09-11 11:10:00] [Rank 0] step:8521/10000 train_time:354825ms step_avg:41.64ms +[2025-09-11 11:10:00] [Rank 0] step:8521/10000 train_time:354825ms step_avg:41.64ms +[2025-09-11 11:10:01] [Rank 0] step:8541/10000 train_time:355532ms step_avg:41.63ms +[2025-09-11 11:10:01] [Rank 0] step:8541/10000 train_time:355532ms step_avg:41.63ms +[2025-09-11 11:10:01] [Rank 0] step:8561/10000 train_time:356389ms step_avg:41.63ms +[2025-09-11 11:10:01] [Rank 0] step:8561/10000 train_time:356389ms step_avg:41.63ms +[2025-09-11 11:10:02] [Rank 0] step:8581/10000 train_time:357141ms step_avg:41.62ms +[2025-09-11 11:10:02] [Rank 0] step:8581/10000 train_time:357141ms step_avg:41.62ms +[2025-09-11 11:10:03] [Rank 0] step:8601/10000 train_time:357851ms step_avg:41.61ms +[2025-09-11 11:10:03] [Rank 0] step:8601/10000 train_time:357851ms step_avg:41.61ms +[2025-09-11 11:10:04] [Rank 0] step:8621/10000 train_time:358560ms step_avg:41.59ms +[2025-09-11 11:10:04] [Rank 0] step:8621/10000 train_time:358560ms step_avg:41.59ms +[2025-09-11 11:10:04] [Rank 0] step:8641/10000 train_time:359268ms step_avg:41.58ms +[2025-09-11 11:10:04] [Rank 0] step:8641/10000 train_time:359268ms step_avg:41.58ms +[2025-09-11 11:10:05] [Rank 0] step:8661/10000 train_time:359978ms step_avg:41.56ms +[2025-09-11 11:10:05] [Rank 0] step:8661/10000 train_time:359978ms step_avg:41.56ms +[2025-09-11 11:10:06] [Rank 0] step:8681/10000 train_time:360689ms step_avg:41.55ms +[2025-09-11 11:10:06] [Rank 0] step:8681/10000 train_time:360689ms step_avg:41.55ms +[2025-09-11 11:10:06] [Rank 0] step:8701/10000 train_time:361396ms step_avg:41.53ms +[2025-09-11 11:10:06] [Rank 0] step:8701/10000 train_time:361396ms step_avg:41.53ms +[2025-09-11 11:10:07] [Rank 0] step:8721/10000 train_time:362107ms step_avg:41.52ms +[2025-09-11 11:10:07] [Rank 0] step:8721/10000 train_time:362107ms step_avg:41.52ms +[2025-09-11 11:10:08] [Rank 0] step:8741/10000 train_time:362813ms step_avg:41.51ms +[2025-09-11 11:10:08] [Rank 0] step:8741/10000 train_time:362813ms step_avg:41.51ms +[2025-09-11 11:10:09] [Rank 0] step:8761/10000 train_time:363524ms step_avg:41.49ms +[2025-09-11 11:10:09] [Rank 0] step:8761/10000 train_time:363524ms step_avg:41.49ms +[2025-09-11 11:10:09] [Rank 0] step:8781/10000 train_time:364229ms step_avg:41.48ms +[2025-09-11 11:10:09] [Rank 0] step:8781/10000 train_time:364229ms step_avg:41.48ms +[2025-09-11 11:10:10] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:10:10] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:20] [Rank 0] PRINT: step:8800/10000 val_loss:4.8314 total_sharp:6.8564e-03 L1_sharp:2.0881e-03 L2_sharp:5.4155e-04 L3_sharp:4.8186e-04 L4_sharp:5.7382e-04 L5_sharp:1.1783e-03 L6_sharp:1.1115e-03 L7_sharp:7.6963e-04 L8_sharp:2.0435e-03 L9_sharp:2.3252e-03 L10_sharp:2.5253e-03 L11_sharp:3.9758e-03 L12_sharp:1.1032e-02 total_fnorm:1.4844e+00 total_l1_linf:2.1760e+03 total_spectral:7.3828e-01 L1_fnorm:3.8867e-01 L2_fnorm:3.4375e-01 L3_fnorm:3.4180e-01 L4_fnorm:3.4375e-01 L5_fnorm:3.3984e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.3789e-01 L8_fnorm:3.2812e-01 L9_fnorm:3.3594e-01 L10_fnorm:3.3398e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3008e-01 L1_l1linf:6.6406e-02 L2_l1linf:5.9326e-02 L3_l1linf:5.5664e-02 L4_l1linf:5.5420e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.1270e-02 L7_l1linf:5.3223e-02 L8_l1linf:5.0293e-02 L9_l1linf:4.6143e-02 L10_l1linf:4.5166e-02 L11_l1linf:4.5410e-02 L12_l1linf:5.0781e-02 L1_spectral:5.8047e-03 L2_spectral:5.2348e-03 L3_spectral:5.2190e-03 L4_spectral:5.0921e-03 L5_spectral:4.9960e-03 L6_spectral:4.9939e-03 L7_spectral:4.9720e-03 L8_spectral:5.1258e-03 L9_spectral:5.0741e-03 L10_spectral:5.0490e-03 L11_spectral:5.0175e-03 L12_spectral:4.9856e-03 train_time:364918ms step_avg:41.47ms +[2025-09-11 11:10:20] [Rank 0] PRINT: step:8800/10000 val_loss:4.8314 total_sharp:6.8564e-03 L1_sharp:2.0881e-03 L2_sharp:5.4155e-04 L3_sharp:4.8186e-04 L4_sharp:5.7382e-04 L5_sharp:1.1783e-03 L6_sharp:1.1115e-03 L7_sharp:7.6963e-04 L8_sharp:2.0435e-03 L9_sharp:2.3252e-03 L10_sharp:2.5253e-03 L11_sharp:3.9758e-03 L12_sharp:1.1032e-02 total_fnorm:1.4844e+00 total_l1_linf:2.1760e+03 total_spectral:7.3828e-01 L1_fnorm:3.8867e-01 L2_fnorm:3.4375e-01 L3_fnorm:3.4180e-01 L4_fnorm:3.4375e-01 L5_fnorm:3.3984e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.3789e-01 L8_fnorm:3.2812e-01 L9_fnorm:3.3594e-01 L10_fnorm:3.3398e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3008e-01 L1_l1linf:6.6406e-02 L2_l1linf:5.9326e-02 L3_l1linf:5.5664e-02 L4_l1linf:5.5420e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.1270e-02 L7_l1linf:5.3223e-02 L8_l1linf:5.0293e-02 L9_l1linf:4.6143e-02 L10_l1linf:4.5166e-02 L11_l1linf:4.5410e-02 L12_l1linf:5.0781e-02 L1_spectral:5.8047e-03 L2_spectral:5.2348e-03 L3_spectral:5.2190e-03 L4_spectral:5.0921e-03 L5_spectral:4.9960e-03 L6_spectral:4.9939e-03 L7_spectral:4.9720e-03 L8_spectral:5.1258e-03 L9_spectral:5.0741e-03 L10_spectral:5.0490e-03 L11_spectral:5.0175e-03 L12_spectral:4.9856e-03 train_time:364918ms step_avg:41.47ms +[2025-09-11 11:10:21] [Rank 0] step:8801/10000 train_time:366188ms step_avg:41.61ms +[2025-09-11 11:10:21] [Rank 0] step:8801/10000 train_time:366188ms step_avg:41.61ms +[2025-09-11 11:10:22] [Rank 0] step:8821/10000 train_time:366922ms step_avg:41.60ms +[2025-09-11 11:10:22] [Rank 0] step:8821/10000 train_time:366922ms step_avg:41.60ms +[2025-09-11 11:10:23] [Rank 0] step:8841/10000 train_time:367632ms step_avg:41.58ms +[2025-09-11 11:10:23] [Rank 0] step:8841/10000 train_time:367632ms step_avg:41.58ms +[2025-09-11 11:10:23] [Rank 0] step:8861/10000 train_time:368341ms step_avg:41.57ms +[2025-09-11 11:10:23] [Rank 0] step:8861/10000 train_time:368341ms step_avg:41.57ms +[2025-09-11 11:10:24] [Rank 0] step:8881/10000 train_time:369050ms step_avg:41.56ms +[2025-09-11 11:10:24] [Rank 0] step:8881/10000 train_time:369050ms step_avg:41.56ms +[2025-09-11 11:10:25] [Rank 0] step:8901/10000 train_time:369762ms step_avg:41.54ms +[2025-09-11 11:10:25] [Rank 0] step:8901/10000 train_time:369762ms step_avg:41.54ms +[2025-09-11 11:10:26] [Rank 0] step:8921/10000 train_time:370467ms step_avg:41.53ms +[2025-09-11 11:10:26] [Rank 0] step:8921/10000 train_time:370467ms step_avg:41.53ms +[2025-09-11 11:10:26] [Rank 0] step:8941/10000 train_time:371178ms step_avg:41.51ms +[2025-09-11 11:10:26] [Rank 0] step:8941/10000 train_time:371178ms step_avg:41.51ms +[2025-09-11 11:10:27] [Rank 0] step:8961/10000 train_time:371896ms step_avg:41.50ms +[2025-09-11 11:10:27] [Rank 0] step:8961/10000 train_time:371896ms step_avg:41.50ms +[2025-09-11 11:10:28] [Rank 0] step:8981/10000 train_time:372609ms step_avg:41.49ms +[2025-09-11 11:10:28] [Rank 0] step:8981/10000 train_time:372609ms step_avg:41.49ms +[2025-09-11 11:10:28] [Rank 0] step:9001/10000 train_time:373313ms step_avg:41.47ms +[2025-09-11 11:10:28] [Rank 0] step:9001/10000 train_time:373313ms step_avg:41.47ms +[2025-09-11 11:10:30] [Rank 0] step:9021/10000 train_time:374567ms step_avg:41.52ms +[2025-09-11 11:10:30] [Rank 0] step:9021/10000 train_time:374567ms step_avg:41.52ms +[2025-09-11 11:10:30] [Rank 0] step:9041/10000 train_time:375279ms step_avg:41.51ms +[2025-09-11 11:10:30] [Rank 0] step:9041/10000 train_time:375279ms step_avg:41.51ms +[2025-09-11 11:10:31] [Rank 0] step:9061/10000 train_time:375987ms step_avg:41.50ms +[2025-09-11 11:10:31] [Rank 0] step:9061/10000 train_time:375987ms step_avg:41.50ms +[2025-09-11 11:10:32] [Rank 0] step:9081/10000 train_time:376970ms step_avg:41.51ms +[2025-09-11 11:10:32] [Rank 0] step:9081/10000 train_time:376970ms step_avg:41.51ms +[2025-09-11 11:10:33] [Rank 0] step:9101/10000 train_time:377683ms step_avg:41.50ms +[2025-09-11 11:10:33] [Rank 0] step:9101/10000 train_time:377683ms step_avg:41.50ms +[2025-09-11 11:10:33] [Rank 0] step:9121/10000 train_time:378397ms step_avg:41.49ms +[2025-09-11 11:10:33] [Rank 0] step:9121/10000 train_time:378397ms step_avg:41.49ms +[2025-09-11 11:10:34] [Rank 0] step:9141/10000 train_time:379105ms step_avg:41.47ms +[2025-09-11 11:10:34] [Rank 0] step:9141/10000 train_time:379105ms step_avg:41.47ms +[2025-09-11 11:10:35] [Rank 0] step:9161/10000 train_time:379818ms step_avg:41.46ms +[2025-09-11 11:10:35] [Rank 0] step:9161/10000 train_time:379818ms step_avg:41.46ms +[2025-09-11 11:10:36] [Rank 0] step:9181/10000 train_time:380529ms step_avg:41.45ms +[2025-09-11 11:10:36] [Rank 0] step:9181/10000 train_time:380529ms step_avg:41.45ms +[2025-09-11 11:10:36] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:10:36] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:10:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:10:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:47] [Rank 0] PRINT: step:9200/10000 val_loss:4.8187 total_sharp:5.6521e-03 L1_sharp:2.5402e-03 L2_sharp:9.1135e-04 L3_sharp:7.6281e-04 L4_sharp:4.1406e-04 L5_sharp:5.7199e-04 L6_sharp:8.9679e-04 L7_sharp:9.8164e-04 L8_sharp:1.8020e-03 L9_sharp:1.8836e-03 L10_sharp:2.0134e-03 L11_sharp:2.9224e-03 L12_sharp:9.8731e-03 total_fnorm:9.4922e-01 total_l1_linf:1.2480e+03 total_spectral:4.7852e-01 L1_fnorm:2.6953e-01 L2_fnorm:2.2559e-01 L3_fnorm:2.2656e-01 L4_fnorm:2.2656e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2559e-01 L7_fnorm:2.2363e-01 L8_fnorm:2.1680e-01 L9_fnorm:2.2168e-01 L10_fnorm:2.2070e-01 L11_fnorm:2.2168e-01 L12_fnorm:2.1680e-01 L1_l1linf:4.4678e-02 L2_l1linf:3.4668e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.1738e-02 L8_l1linf:2.8320e-02 L9_l1linf:2.7344e-02 L10_l1linf:2.7344e-02 L11_l1linf:2.8198e-02 L12_l1linf:2.9541e-02 L1_spectral:4.4388e-03 L2_spectral:3.5255e-03 L3_spectral:3.4726e-03 L4_spectral:3.4362e-03 L5_spectral:3.3591e-03 L6_spectral:3.3199e-03 L7_spectral:3.3376e-03 L8_spectral:3.4949e-03 L9_spectral:3.3693e-03 L10_spectral:3.3994e-03 L11_spectral:3.3526e-03 L12_spectral:3.3564e-03 train_time:381222ms step_avg:41.44ms +[2025-09-11 11:10:47] [Rank 0] PRINT: step:9200/10000 val_loss:4.8187 total_sharp:5.6521e-03 L1_sharp:2.5402e-03 L2_sharp:9.1135e-04 L3_sharp:7.6281e-04 L4_sharp:4.1406e-04 L5_sharp:5.7199e-04 L6_sharp:8.9679e-04 L7_sharp:9.8164e-04 L8_sharp:1.8020e-03 L9_sharp:1.8836e-03 L10_sharp:2.0134e-03 L11_sharp:2.9224e-03 L12_sharp:9.8731e-03 total_fnorm:9.4922e-01 total_l1_linf:1.2480e+03 total_spectral:4.7852e-01 L1_fnorm:2.6953e-01 L2_fnorm:2.2559e-01 L3_fnorm:2.2656e-01 L4_fnorm:2.2656e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2559e-01 L7_fnorm:2.2363e-01 L8_fnorm:2.1680e-01 L9_fnorm:2.2168e-01 L10_fnorm:2.2070e-01 L11_fnorm:2.2168e-01 L12_fnorm:2.1680e-01 L1_l1linf:4.4678e-02 L2_l1linf:3.4668e-02 L3_l1linf:3.2471e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.1738e-02 L8_l1linf:2.8320e-02 L9_l1linf:2.7344e-02 L10_l1linf:2.7344e-02 L11_l1linf:2.8198e-02 L12_l1linf:2.9541e-02 L1_spectral:4.4388e-03 L2_spectral:3.5255e-03 L3_spectral:3.4726e-03 L4_spectral:3.4362e-03 L5_spectral:3.3591e-03 L6_spectral:3.3199e-03 L7_spectral:3.3376e-03 L8_spectral:3.4949e-03 L9_spectral:3.3693e-03 L10_spectral:3.3994e-03 L11_spectral:3.3526e-03 L12_spectral:3.3564e-03 train_time:381222ms step_avg:41.44ms +[2025-09-11 11:10:48] [Rank 0] step:9201/10000 train_time:382531ms step_avg:41.57ms +[2025-09-11 11:10:48] [Rank 0] step:9201/10000 train_time:382531ms step_avg:41.57ms +[2025-09-11 11:10:49] [Rank 0] step:9221/10000 train_time:383282ms step_avg:41.57ms +[2025-09-11 11:10:49] [Rank 0] step:9221/10000 train_time:383282ms step_avg:41.57ms +[2025-09-11 11:10:49] [Rank 0] step:9241/10000 train_time:383991ms step_avg:41.55ms +[2025-09-11 11:10:49] [Rank 0] step:9241/10000 train_time:383991ms step_avg:41.55ms +[2025-09-11 11:10:50] [Rank 0] step:9261/10000 train_time:384703ms step_avg:41.54ms +[2025-09-11 11:10:50] [Rank 0] step:9261/10000 train_time:384703ms step_avg:41.54ms +[2025-09-11 11:10:51] [Rank 0] step:9281/10000 train_time:385415ms step_avg:41.53ms +[2025-09-11 11:10:51] [Rank 0] step:9281/10000 train_time:385415ms step_avg:41.53ms +[2025-09-11 11:10:51] [Rank 0] step:9301/10000 train_time:386123ms step_avg:41.51ms +[2025-09-11 11:10:51] [Rank 0] step:9301/10000 train_time:386123ms step_avg:41.51ms +[2025-09-11 11:10:52] [Rank 0] step:9321/10000 train_time:386835ms step_avg:41.50ms +[2025-09-11 11:10:52] [Rank 0] step:9321/10000 train_time:386835ms step_avg:41.50ms +[2025-09-11 11:10:53] [Rank 0] step:9341/10000 train_time:387541ms step_avg:41.49ms +[2025-09-11 11:10:53] [Rank 0] step:9341/10000 train_time:387541ms step_avg:41.49ms +[2025-09-11 11:10:54] [Rank 0] step:9361/10000 train_time:388247ms step_avg:41.47ms +[2025-09-11 11:10:54] [Rank 0] step:9361/10000 train_time:388247ms step_avg:41.47ms +[2025-09-11 11:10:54] [Rank 0] step:9381/10000 train_time:388955ms step_avg:41.46ms +[2025-09-11 11:10:54] [Rank 0] step:9381/10000 train_time:388955ms step_avg:41.46ms +[2025-09-11 11:10:55] [Rank 0] step:9401/10000 train_time:389665ms step_avg:41.45ms +[2025-09-11 11:10:55] [Rank 0] step:9401/10000 train_time:389665ms step_avg:41.45ms +[2025-09-11 11:10:56] [Rank 0] step:9421/10000 train_time:390376ms step_avg:41.44ms +[2025-09-11 11:10:56] [Rank 0] step:9421/10000 train_time:390376ms step_avg:41.44ms +[2025-09-11 11:10:56] [Rank 0] step:9441/10000 train_time:391090ms step_avg:41.42ms +[2025-09-11 11:10:56] [Rank 0] step:9441/10000 train_time:391090ms step_avg:41.42ms +[2025-09-11 11:10:57] [Rank 0] step:9461/10000 train_time:391799ms step_avg:41.41ms +[2025-09-11 11:10:57] [Rank 0] step:9461/10000 train_time:391799ms step_avg:41.41ms +[2025-09-11 11:10:58] [Rank 0] step:9481/10000 train_time:392510ms step_avg:41.40ms +[2025-09-11 11:10:58] [Rank 0] step:9481/10000 train_time:392510ms step_avg:41.40ms +[2025-09-11 11:10:59] [Rank 0] step:9501/10000 train_time:393221ms step_avg:41.39ms +[2025-09-11 11:10:59] [Rank 0] step:9501/10000 train_time:393221ms step_avg:41.39ms +[2025-09-11 11:10:59] [Rank 0] step:9521/10000 train_time:393934ms step_avg:41.38ms +[2025-09-11 11:10:59] [Rank 0] step:9521/10000 train_time:393934ms step_avg:41.38ms +[2025-09-11 11:11:00] [Rank 0] step:9541/10000 train_time:394641ms step_avg:41.36ms +[2025-09-11 11:11:00] [Rank 0] step:9541/10000 train_time:394641ms step_avg:41.36ms +[2025-09-11 11:11:01] [Rank 0] step:9561/10000 train_time:395350ms step_avg:41.35ms +[2025-09-11 11:11:01] [Rank 0] step:9561/10000 train_time:395350ms step_avg:41.35ms +[2025-09-11 11:11:01] [Rank 0] step:9581/10000 train_time:396063ms step_avg:41.34ms +[2025-09-11 11:11:01] [Rank 0] step:9581/10000 train_time:396063ms step_avg:41.34ms +[2025-09-11 11:11:02] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:11:02] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:11:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:11:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:11:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:11:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:12] [Rank 0] PRINT: step:9600/10000 val_loss:4.8107 total_sharp:3.3978e-03 L1_sharp:2.9907e-04 L2_sharp:6.9215e-04 L3_sharp:3.1240e-04 L4_sharp:4.8124e-04 L5_sharp:7.6665e-04 L6_sharp:6.9770e-04 L7_sharp:7.0170e-04 L8_sharp:1.3227e-03 L9_sharp:1.4155e-03 L10_sharp:1.6195e-03 L11_sharp:2.0958e-03 L12_sharp:6.1034e-03 total_fnorm:5.6250e-01 total_l1_linf:5.9600e+02 total_spectral:2.7734e-01 L1_fnorm:1.6113e-01 L2_fnorm:1.2891e-01 L3_fnorm:1.2891e-01 L4_fnorm:1.2988e-01 L5_fnorm:1.2793e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2354e-01 L9_fnorm:1.2500e-01 L10_fnorm:1.2500e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2305e-01 L1_l1linf:2.5146e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.6479e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.4832e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3367e-02 L11_l1linf:1.3611e-02 L12_l1linf:1.4587e-02 L1_spectral:2.9281e-03 L2_spectral:2.0729e-03 L3_spectral:2.0306e-03 L4_spectral:2.0230e-03 L5_spectral:1.9652e-03 L6_spectral:1.9645e-03 L7_spectral:1.9496e-03 L8_spectral:2.0435e-03 L9_spectral:1.9810e-03 L10_spectral:1.9630e-03 L11_spectral:1.9467e-03 L12_spectral:1.9640e-03 train_time:396750ms step_avg:41.33ms +[2025-09-11 11:11:12] [Rank 0] PRINT: step:9600/10000 val_loss:4.8107 total_sharp:3.3978e-03 L1_sharp:2.9907e-04 L2_sharp:6.9215e-04 L3_sharp:3.1240e-04 L4_sharp:4.8124e-04 L5_sharp:7.6665e-04 L6_sharp:6.9770e-04 L7_sharp:7.0170e-04 L8_sharp:1.3227e-03 L9_sharp:1.4155e-03 L10_sharp:1.6195e-03 L11_sharp:2.0958e-03 L12_sharp:6.1034e-03 total_fnorm:5.6250e-01 total_l1_linf:5.9600e+02 total_spectral:2.7734e-01 L1_fnorm:1.6113e-01 L2_fnorm:1.2891e-01 L3_fnorm:1.2891e-01 L4_fnorm:1.2988e-01 L5_fnorm:1.2793e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2354e-01 L9_fnorm:1.2500e-01 L10_fnorm:1.2500e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2305e-01 L1_l1linf:2.5146e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.6479e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.4832e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.3367e-02 L11_l1linf:1.3611e-02 L12_l1linf:1.4587e-02 L1_spectral:2.9281e-03 L2_spectral:2.0729e-03 L3_spectral:2.0306e-03 L4_spectral:2.0230e-03 L5_spectral:1.9652e-03 L6_spectral:1.9645e-03 L7_spectral:1.9496e-03 L8_spectral:2.0435e-03 L9_spectral:1.9810e-03 L10_spectral:1.9630e-03 L11_spectral:1.9467e-03 L12_spectral:1.9640e-03 train_time:396750ms step_avg:41.33ms +[2025-09-11 11:11:13] [Rank 0] step:9601/10000 train_time:398047ms step_avg:41.46ms +[2025-09-11 11:11:13] [Rank 0] step:9601/10000 train_time:398047ms step_avg:41.46ms +[2025-09-11 11:11:14] [Rank 0] step:9621/10000 train_time:398792ms step_avg:41.45ms +[2025-09-11 11:11:14] [Rank 0] step:9621/10000 train_time:398792ms step_avg:41.45ms +[2025-09-11 11:11:15] [Rank 0] step:9641/10000 train_time:399507ms step_avg:41.44ms +[2025-09-11 11:11:15] [Rank 0] step:9641/10000 train_time:399507ms step_avg:41.44ms +[2025-09-11 11:11:16] [Rank 0] step:9661/10000 train_time:400230ms step_avg:41.43ms +[2025-09-11 11:11:16] [Rank 0] step:9661/10000 train_time:400230ms step_avg:41.43ms +[2025-09-11 11:11:16] [Rank 0] step:9681/10000 train_time:400945ms step_avg:41.42ms +[2025-09-11 11:11:16] [Rank 0] step:9681/10000 train_time:400945ms step_avg:41.42ms +[2025-09-11 11:11:17] [Rank 0] step:9701/10000 train_time:401662ms step_avg:41.40ms +[2025-09-11 11:11:17] [Rank 0] step:9701/10000 train_time:401662ms step_avg:41.40ms +[2025-09-11 11:11:18] [Rank 0] step:9721/10000 train_time:402383ms step_avg:41.39ms +[2025-09-11 11:11:18] [Rank 0] step:9721/10000 train_time:402383ms step_avg:41.39ms +[2025-09-11 11:11:19] [Rank 0] step:9741/10000 train_time:403101ms step_avg:41.38ms +[2025-09-11 11:11:19] [Rank 0] step:9741/10000 train_time:403101ms step_avg:41.38ms +[2025-09-11 11:11:19] [Rank 0] step:9761/10000 train_time:403819ms step_avg:41.37ms +[2025-09-11 11:11:19] [Rank 0] step:9761/10000 train_time:403819ms step_avg:41.37ms +[2025-09-11 11:11:20] [Rank 0] step:9781/10000 train_time:404535ms step_avg:41.36ms +[2025-09-11 11:11:20] [Rank 0] step:9781/10000 train_time:404535ms step_avg:41.36ms +[2025-09-11 11:11:21] [Rank 0] step:9801/10000 train_time:405256ms step_avg:41.35ms +[2025-09-11 11:11:21] [Rank 0] step:9801/10000 train_time:405256ms step_avg:41.35ms +[2025-09-11 11:11:21] [Rank 0] step:9821/10000 train_time:405976ms step_avg:41.34ms +[2025-09-11 11:11:21] [Rank 0] step:9821/10000 train_time:405976ms step_avg:41.34ms +[2025-09-11 11:11:22] [Rank 0] step:9841/10000 train_time:406698ms step_avg:41.33ms +[2025-09-11 11:11:22] [Rank 0] step:9841/10000 train_time:406698ms step_avg:41.33ms +[2025-09-11 11:11:23] [Rank 0] step:9861/10000 train_time:407416ms step_avg:41.32ms +[2025-09-11 11:11:23] [Rank 0] step:9861/10000 train_time:407416ms step_avg:41.32ms +[2025-09-11 11:11:24] [Rank 0] step:9881/10000 train_time:408134ms step_avg:41.30ms +[2025-09-11 11:11:24] [Rank 0] step:9881/10000 train_time:408134ms step_avg:41.30ms +[2025-09-11 11:11:24] [Rank 0] step:9901/10000 train_time:408849ms step_avg:41.29ms +[2025-09-11 11:11:24] [Rank 0] step:9901/10000 train_time:408849ms step_avg:41.29ms +[2025-09-11 11:11:25] [Rank 0] step:9921/10000 train_time:409566ms step_avg:41.28ms +[2025-09-11 11:11:25] [Rank 0] step:9921/10000 train_time:409566ms step_avg:41.28ms +[2025-09-11 11:11:26] [Rank 0] step:9941/10000 train_time:410288ms step_avg:41.27ms +[2025-09-11 11:11:26] [Rank 0] step:9941/10000 train_time:410288ms step_avg:41.27ms +[2025-09-11 11:11:26] [Rank 0] step:9961/10000 train_time:411010ms step_avg:41.26ms +[2025-09-11 11:11:26] [Rank 0] step:9961/10000 train_time:411010ms step_avg:41.26ms +[2025-09-11 11:11:27] [Rank 0] step:9981/10000 train_time:411729ms step_avg:41.25ms +[2025-09-11 11:11:27] [Rank 0] step:9981/10000 train_time:411729ms step_avg:41.25ms +[2025-09-11 11:11:28] [Rank 0] step:10000/10000 train_time:412418ms step_avg:41.24ms +[2025-09-11 11:11:28] [Rank 0] step:10000/10000 train_time:412418ms step_avg:41.24ms +[2025-09-11 11:11:28] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:11:28] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:11:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:11:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:39] [Rank 0] PRINT: step:10000/10000 val_loss:4.8082 total_sharp:2.0138e-03 L1_sharp:3.9570e-05 L2_sharp:2.8278e-04 L3_sharp:2.1675e-04 L4_sharp:5.1897e-05 L5_sharp:4.4246e-04 L6_sharp:4.4740e-04 L7_sharp:4.9704e-04 L8_sharp:1.0366e-03 L9_sharp:1.0161e-03 L10_sharp:1.1010e-03 L11_sharp:1.4974e-03 L12_sharp:4.8245e-03 total_fnorm:2.1387e-01 total_l1_linf:1.6800e+02 total_spectral:1.0693e-01 L1_fnorm:6.5430e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9805e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.7607e-02 L1_l1linf:8.6060e-03 L2_l1linf:5.2185e-03 L3_l1linf:4.7913e-03 L4_l1linf:5.0964e-03 L5_l1linf:4.4556e-03 L6_l1linf:4.5166e-03 L7_l1linf:4.5776e-03 L8_l1linf:5.0659e-03 L9_l1linf:3.9673e-03 L10_l1linf:3.9673e-03 L11_l1linf:4.2725e-03 L12_l1linf:4.8523e-03 L1_spectral:1.2930e-03 L2_spectral:8.1766e-04 L3_spectral:8.1113e-04 L4_spectral:7.9775e-04 L5_spectral:7.6757e-04 L6_spectral:7.7121e-04 L7_spectral:7.8693e-04 L8_spectral:8.2271e-04 L9_spectral:7.8878e-04 L10_spectral:7.7843e-04 L11_spectral:7.8374e-04 L12_spectral:7.6665e-04 train_time:412438ms step_avg:41.24ms +[2025-09-11 11:11:39] [Rank 0] PRINT: step:10000/10000 val_loss:4.8082 total_sharp:2.0138e-03 L1_sharp:3.9570e-05 L2_sharp:2.8278e-04 L3_sharp:2.1675e-04 L4_sharp:5.1897e-05 L5_sharp:4.4246e-04 L6_sharp:4.4740e-04 L7_sharp:4.9704e-04 L8_sharp:1.0366e-03 L9_sharp:1.0161e-03 L10_sharp:1.1010e-03 L11_sharp:1.4974e-03 L12_sharp:4.8245e-03 total_fnorm:2.1387e-01 total_l1_linf:1.6800e+02 total_spectral:1.0693e-01 L1_fnorm:6.5430e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9805e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.7607e-02 L1_l1linf:8.6060e-03 L2_l1linf:5.2185e-03 L3_l1linf:4.7913e-03 L4_l1linf:5.0964e-03 L5_l1linf:4.4556e-03 L6_l1linf:4.5166e-03 L7_l1linf:4.5776e-03 L8_l1linf:5.0659e-03 L9_l1linf:3.9673e-03 L10_l1linf:3.9673e-03 L11_l1linf:4.2725e-03 L12_l1linf:4.8523e-03 L1_spectral:1.2930e-03 L2_spectral:8.1766e-04 L3_spectral:8.1113e-04 L4_spectral:7.9775e-04 L5_spectral:7.6757e-04 L6_spectral:7.7121e-04 L7_spectral:7.8693e-04 L8_spectral:8.2271e-04 L9_spectral:7.8878e-04 L10_spectral:7.7843e-04 L11_spectral:7.8374e-04 L12_spectral:7.6665e-04 train_time:412438ms step_avg:41.24ms +[2025-09-11 11:11:39] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:11:39 2025 --- +[2025-09-11 11:11:39] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:11:39 2025 --- +[2025-09-11 11:11:39] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:11:39] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0fa0a1610352894ef780be26d3a6935774b121d0 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "44bd9f0c-4607-473b-a31d-1415570b65a7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/training_log_44bd9f0c-4607-473b-a31d-1415570b65a7.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/training_log_44bd9f0c-4607-473b-a31d-1415570b65a7.txt new file mode 100644 index 0000000000000000000000000000000000000000..55075a24d31e8121f8a24e912e4c24c3a6b5cb24 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44/training_log_44bd9f0c-4607-473b-a31d-1415570b65a7.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:38:50] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:38:50 2025 --- +[2025-09-11 11:38:50] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:38:50 2025 --- +[2025-09-11 11:38:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:38:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:38:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:38:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:38:50] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:38:50] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:38:50] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44 +[2025-09-11 11:38:50] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.02_seed_44 +[2025-09-11 11:38:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:38:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:38:50] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:38:50] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:38:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:38:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:38:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:38:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:38:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:38:51] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:38:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:38:51] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:38:51] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:38:51] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:38:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:38:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:38:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:38:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:38:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:38:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:38:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:38:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:38:59] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:38:59] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:39:35] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:39:35] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:39:35] [Rank 0] PRINT: Starting training... +[2025-09-11 11:39:35] [Rank 0] PRINT: Starting training... +[2025-09-11 11:39:37] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 11:39:37] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 11:39:37] [Rank 0] step:41/10000 train_time:1857ms step_avg:45.29ms +[2025-09-11 11:39:37] [Rank 0] step:41/10000 train_time:1857ms step_avg:45.29ms +[2025-09-11 11:39:38] [Rank 0] step:61/10000 train_time:2581ms step_avg:42.31ms +[2025-09-11 11:39:38] [Rank 0] step:61/10000 train_time:2581ms step_avg:42.31ms +[2025-09-11 11:39:39] [Rank 0] step:81/10000 train_time:3304ms step_avg:40.79ms +[2025-09-11 11:39:39] [Rank 0] step:81/10000 train_time:3304ms step_avg:40.79ms +[2025-09-11 11:39:39] [Rank 0] step:101/10000 train_time:4028ms step_avg:39.88ms +[2025-09-11 11:39:39] [Rank 0] step:101/10000 train_time:4028ms step_avg:39.88ms +[2025-09-11 11:39:40] [Rank 0] step:121/10000 train_time:4751ms step_avg:39.27ms +[2025-09-11 11:39:40] [Rank 0] step:121/10000 train_time:4751ms step_avg:39.27ms +[2025-09-11 11:39:41] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.81ms +[2025-09-11 11:39:41] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.81ms +[2025-09-11 11:39:42] [Rank 0] step:161/10000 train_time:6195ms step_avg:38.48ms +[2025-09-11 11:39:42] [Rank 0] step:161/10000 train_time:6195ms step_avg:38.48ms +[2025-09-11 11:39:42] [Rank 0] step:181/10000 train_time:6917ms step_avg:38.22ms +[2025-09-11 11:39:42] [Rank 0] step:181/10000 train_time:6917ms step_avg:38.22ms +[2025-09-11 11:39:43] [Rank 0] step:201/10000 train_time:7639ms step_avg:38.00ms +[2025-09-11 11:39:43] [Rank 0] step:201/10000 train_time:7639ms step_avg:38.00ms +[2025-09-11 11:39:44] [Rank 0] step:221/10000 train_time:8361ms step_avg:37.83ms +[2025-09-11 11:39:44] [Rank 0] step:221/10000 train_time:8361ms step_avg:37.83ms +[2025-09-11 11:39:44] [Rank 0] step:241/10000 train_time:9083ms step_avg:37.69ms +[2025-09-11 11:39:44] [Rank 0] step:241/10000 train_time:9083ms step_avg:37.69ms +[2025-09-11 11:39:45] [Rank 0] step:261/10000 train_time:9805ms step_avg:37.57ms +[2025-09-11 11:39:45] [Rank 0] step:261/10000 train_time:9805ms step_avg:37.57ms +[2025-09-11 11:39:46] [Rank 0] step:281/10000 train_time:10527ms step_avg:37.46ms +[2025-09-11 11:39:46] [Rank 0] step:281/10000 train_time:10527ms step_avg:37.46ms +[2025-09-11 11:39:47] [Rank 0] step:301/10000 train_time:11248ms step_avg:37.37ms +[2025-09-11 11:39:47] [Rank 0] step:301/10000 train_time:11248ms step_avg:37.37ms +[2025-09-11 11:39:47] [Rank 0] step:321/10000 train_time:11970ms step_avg:37.29ms +[2025-09-11 11:39:47] [Rank 0] step:321/10000 train_time:11970ms step_avg:37.29ms +[2025-09-11 11:39:48] [Rank 0] step:341/10000 train_time:12691ms step_avg:37.22ms +[2025-09-11 11:39:48] [Rank 0] step:341/10000 train_time:12691ms step_avg:37.22ms +[2025-09-11 11:39:49] [Rank 0] step:361/10000 train_time:13413ms step_avg:37.16ms +[2025-09-11 11:39:49] [Rank 0] step:361/10000 train_time:13413ms step_avg:37.16ms +[2025-09-11 11:39:50] [Rank 0] step:381/10000 train_time:14135ms step_avg:37.10ms +[2025-09-11 11:39:50] [Rank 0] step:381/10000 train_time:14135ms step_avg:37.10ms +[2025-09-11 11:39:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:39:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:40:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:36] [Rank 0] PRINT: step:400/10000 val_loss:6.6644 total_sharp:2.1750e-02 L1_sharp:2.4963e-02 L2_sharp:1.4130e-02 L3_sharp:9.4318e-03 L4_sharp:1.1085e-02 L5_sharp:8.7875e-03 L6_sharp:5.0644e-03 L7_sharp:3.7452e-03 L8_sharp:2.4633e-03 L9_sharp:3.1311e-03 L10_sharp:3.1959e-03 L11_sharp:2.7200e-03 L12_sharp:7.1010e-03 total_fnorm:1.1479e+01 total_l1_linf:4.4481e+04 total_spectral:5.7394e+00 L1_fnorm:2.4633e+00 L2_fnorm:2.4538e+00 L3_fnorm:2.3991e+00 L4_fnorm:2.2937e+00 L5_fnorm:2.1746e+00 L6_fnorm:2.0867e+00 L7_fnorm:1.9411e+00 L8_fnorm:1.8661e+00 L9_fnorm:1.7029e+00 L10_fnorm:1.6858e+00 L11_fnorm:1.6231e+00 L12_fnorm:1.4990e+00 L1_l1linf:8.2510e-01 L2_l1linf:8.1394e-01 L3_l1linf:7.9482e-01 L4_l1linf:7.3719e-01 L5_l1linf:6.9493e-01 L6_l1linf:6.8030e-01 L7_l1linf:6.3433e-01 L8_l1linf:6.1647e-01 L9_l1linf:5.8819e-01 L10_l1linf:5.8189e-01 L11_l1linf:5.5942e-01 L12_l1linf:5.0465e-01 L1_spectral:2.4081e-02 L2_spectral:2.4100e-02 L3_spectral:2.4100e-02 L4_spectral:2.4089e-02 L5_spectral:2.4091e-02 L6_spectral:2.4085e-02 L7_spectral:2.4061e-02 L8_spectral:2.4055e-02 L9_spectral:2.4038e-02 L10_spectral:2.4043e-02 L11_spectral:2.4035e-02 L12_spectral:2.4023e-02 train_time:14836ms step_avg:37.09ms +[2025-09-11 11:40:36] [Rank 0] PRINT: step:400/10000 val_loss:6.6644 total_sharp:2.1750e-02 L1_sharp:2.4963e-02 L2_sharp:1.4130e-02 L3_sharp:9.4318e-03 L4_sharp:1.1085e-02 L5_sharp:8.7875e-03 L6_sharp:5.0644e-03 L7_sharp:3.7452e-03 L8_sharp:2.4633e-03 L9_sharp:3.1311e-03 L10_sharp:3.1959e-03 L11_sharp:2.7200e-03 L12_sharp:7.1010e-03 total_fnorm:1.1479e+01 total_l1_linf:4.4481e+04 total_spectral:5.7394e+00 L1_fnorm:2.4633e+00 L2_fnorm:2.4538e+00 L3_fnorm:2.3991e+00 L4_fnorm:2.2937e+00 L5_fnorm:2.1746e+00 L6_fnorm:2.0867e+00 L7_fnorm:1.9411e+00 L8_fnorm:1.8661e+00 L9_fnorm:1.7029e+00 L10_fnorm:1.6858e+00 L11_fnorm:1.6231e+00 L12_fnorm:1.4990e+00 L1_l1linf:8.2510e-01 L2_l1linf:8.1394e-01 L3_l1linf:7.9482e-01 L4_l1linf:7.3719e-01 L5_l1linf:6.9493e-01 L6_l1linf:6.8030e-01 L7_l1linf:6.3433e-01 L8_l1linf:6.1647e-01 L9_l1linf:5.8819e-01 L10_l1linf:5.8189e-01 L11_l1linf:5.5942e-01 L12_l1linf:5.0465e-01 L1_spectral:2.4081e-02 L2_spectral:2.4100e-02 L3_spectral:2.4100e-02 L4_spectral:2.4089e-02 L5_spectral:2.4091e-02 L6_spectral:2.4085e-02 L7_spectral:2.4061e-02 L8_spectral:2.4055e-02 L9_spectral:2.4038e-02 L10_spectral:2.4043e-02 L11_spectral:2.4035e-02 L12_spectral:2.4023e-02 train_time:14836ms step_avg:37.09ms +[2025-09-11 11:41:05] [Rank 0] step:401/10000 train_time:44144ms step_avg:110.08ms +[2025-09-11 11:41:05] [Rank 0] step:401/10000 train_time:44144ms step_avg:110.08ms +[2025-09-11 11:41:07] [Rank 0] step:421/10000 train_time:45977ms step_avg:109.21ms +[2025-09-11 11:41:07] [Rank 0] step:421/10000 train_time:45977ms step_avg:109.21ms +[2025-09-11 11:41:08] [Rank 0] step:441/10000 train_time:46611ms step_avg:105.69ms +[2025-09-11 11:41:08] [Rank 0] step:441/10000 train_time:46611ms step_avg:105.69ms +[2025-09-11 11:41:09] [Rank 0] step:461/10000 train_time:47245ms step_avg:102.48ms +[2025-09-11 11:41:09] [Rank 0] step:461/10000 train_time:47245ms step_avg:102.48ms +[2025-09-11 11:41:09] [Rank 0] step:481/10000 train_time:47878ms step_avg:99.54ms +[2025-09-11 11:41:09] [Rank 0] step:481/10000 train_time:47878ms step_avg:99.54ms +[2025-09-11 11:41:10] [Rank 0] step:501/10000 train_time:48512ms step_avg:96.83ms +[2025-09-11 11:41:10] [Rank 0] step:501/10000 train_time:48512ms step_avg:96.83ms +[2025-09-11 11:41:10] [Rank 0] step:521/10000 train_time:49145ms step_avg:94.33ms +[2025-09-11 11:41:10] [Rank 0] step:521/10000 train_time:49145ms step_avg:94.33ms +[2025-09-11 11:41:11] [Rank 0] step:541/10000 train_time:49778ms step_avg:92.01ms +[2025-09-11 11:41:11] [Rank 0] step:541/10000 train_time:49778ms step_avg:92.01ms +[2025-09-11 11:41:12] [Rank 0] step:561/10000 train_time:50411ms step_avg:89.86ms +[2025-09-11 11:41:12] [Rank 0] step:561/10000 train_time:50411ms step_avg:89.86ms +[2025-09-11 11:41:12] [Rank 0] step:581/10000 train_time:51043ms step_avg:87.85ms +[2025-09-11 11:41:12] [Rank 0] step:581/10000 train_time:51043ms step_avg:87.85ms +[2025-09-11 11:41:13] [Rank 0] step:601/10000 train_time:51676ms step_avg:85.98ms +[2025-09-11 11:41:13] [Rank 0] step:601/10000 train_time:51676ms step_avg:85.98ms +[2025-09-11 11:41:14] [Rank 0] step:621/10000 train_time:52308ms step_avg:84.23ms +[2025-09-11 11:41:14] [Rank 0] step:621/10000 train_time:52308ms step_avg:84.23ms +[2025-09-11 11:41:14] [Rank 0] step:641/10000 train_time:52941ms step_avg:82.59ms +[2025-09-11 11:41:14] [Rank 0] step:641/10000 train_time:52941ms step_avg:82.59ms +[2025-09-11 11:41:15] [Rank 0] step:661/10000 train_time:53574ms step_avg:81.05ms +[2025-09-11 11:41:15] [Rank 0] step:661/10000 train_time:53574ms step_avg:81.05ms +[2025-09-11 11:41:15] [Rank 0] step:681/10000 train_time:54205ms step_avg:79.60ms +[2025-09-11 11:41:15] [Rank 0] step:681/10000 train_time:54205ms step_avg:79.60ms +[2025-09-11 11:41:16] [Rank 0] step:701/10000 train_time:54838ms step_avg:78.23ms +[2025-09-11 11:41:16] [Rank 0] step:701/10000 train_time:54838ms step_avg:78.23ms +[2025-09-11 11:41:17] [Rank 0] step:721/10000 train_time:55470ms step_avg:76.94ms +[2025-09-11 11:41:17] [Rank 0] step:721/10000 train_time:55470ms step_avg:76.94ms +[2025-09-11 11:41:17] [Rank 0] step:741/10000 train_time:56103ms step_avg:75.71ms +[2025-09-11 11:41:17] [Rank 0] step:741/10000 train_time:56103ms step_avg:75.71ms +[2025-09-11 11:41:18] [Rank 0] step:761/10000 train_time:56741ms step_avg:74.56ms +[2025-09-11 11:41:18] [Rank 0] step:761/10000 train_time:56741ms step_avg:74.56ms +[2025-09-11 11:41:19] [Rank 0] step:781/10000 train_time:57378ms step_avg:73.47ms +[2025-09-11 11:41:19] [Rank 0] step:781/10000 train_time:57378ms step_avg:73.47ms +[2025-09-11 11:41:19] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:02] [Rank 0] PRINT: step:800/10000 val_loss:6.2535 total_sharp:1.7314e-02 L1_sharp:1.3018e-02 L2_sharp:5.3995e-03 L3_sharp:4.7179e-03 L4_sharp:4.2998e-03 L5_sharp:3.9635e-03 L6_sharp:2.2457e-03 L7_sharp:2.2767e-03 L8_sharp:1.8026e-03 L9_sharp:2.3874e-03 L10_sharp:2.7489e-03 L11_sharp:3.4302e-03 L12_sharp:9.3882e-03 total_fnorm:1.0188e+01 total_l1_linf:3.0976e+04 total_spectral:5.0938e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4219e+00 L5_fnorm:2.3438e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.0625e+00 L9_fnorm:2.0469e+00 L10_fnorm:1.9688e+00 L11_fnorm:1.8516e+00 L12_fnorm:1.6641e+00 L1_l1linf:8.0859e-01 L2_l1linf:7.5391e-01 L3_l1linf:7.6172e-01 L4_l1linf:7.4219e-01 L5_l1linf:7.3438e-01 L6_l1linf:7.2266e-01 L7_l1linf:6.9922e-01 L8_l1linf:6.9531e-01 L9_l1linf:6.3281e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.1562e-01 L12_l1linf:4.5117e-01 L1_spectral:2.6870e-02 L2_spectral:2.6796e-02 L3_spectral:2.6630e-02 L4_spectral:2.6490e-02 L5_spectral:2.6581e-02 L6_spectral:2.6610e-02 L7_spectral:2.6610e-02 L8_spectral:2.6635e-02 L9_spectral:2.6600e-02 L10_spectral:2.6562e-02 L11_spectral:2.6523e-02 L12_spectral:2.6026e-02 train_time:57998ms step_avg:72.50ms +[2025-09-11 11:42:02] [Rank 0] PRINT: step:800/10000 val_loss:6.2535 total_sharp:1.7314e-02 L1_sharp:1.3018e-02 L2_sharp:5.3995e-03 L3_sharp:4.7179e-03 L4_sharp:4.2998e-03 L5_sharp:3.9635e-03 L6_sharp:2.2457e-03 L7_sharp:2.2767e-03 L8_sharp:1.8026e-03 L9_sharp:2.3874e-03 L10_sharp:2.7489e-03 L11_sharp:3.4302e-03 L12_sharp:9.3882e-03 total_fnorm:1.0188e+01 total_l1_linf:3.0976e+04 total_spectral:5.0938e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4219e+00 L5_fnorm:2.3438e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.0625e+00 L9_fnorm:2.0469e+00 L10_fnorm:1.9688e+00 L11_fnorm:1.8516e+00 L12_fnorm:1.6641e+00 L1_l1linf:8.0859e-01 L2_l1linf:7.5391e-01 L3_l1linf:7.6172e-01 L4_l1linf:7.4219e-01 L5_l1linf:7.3438e-01 L6_l1linf:7.2266e-01 L7_l1linf:6.9922e-01 L8_l1linf:6.9531e-01 L9_l1linf:6.3281e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.1562e-01 L12_l1linf:4.5117e-01 L1_spectral:2.6870e-02 L2_spectral:2.6796e-02 L3_spectral:2.6630e-02 L4_spectral:2.6490e-02 L5_spectral:2.6581e-02 L6_spectral:2.6610e-02 L7_spectral:2.6610e-02 L8_spectral:2.6635e-02 L9_spectral:2.6600e-02 L10_spectral:2.6562e-02 L11_spectral:2.6523e-02 L12_spectral:2.6026e-02 train_time:57998ms step_avg:72.50ms +[2025-09-11 11:42:03] [Rank 0] step:801/10000 train_time:59089ms step_avg:73.77ms +[2025-09-11 11:42:03] [Rank 0] step:801/10000 train_time:59089ms step_avg:73.77ms +[2025-09-11 11:42:04] [Rank 0] step:821/10000 train_time:59716ms step_avg:72.74ms +[2025-09-11 11:42:04] [Rank 0] step:821/10000 train_time:59716ms step_avg:72.74ms +[2025-09-11 11:42:05] [Rank 0] step:841/10000 train_time:60357ms step_avg:71.77ms +[2025-09-11 11:42:05] [Rank 0] step:841/10000 train_time:60357ms step_avg:71.77ms +[2025-09-11 11:42:05] [Rank 0] step:861/10000 train_time:60996ms step_avg:70.84ms +[2025-09-11 11:42:05] [Rank 0] step:861/10000 train_time:60996ms step_avg:70.84ms +[2025-09-11 11:42:06] [Rank 0] step:881/10000 train_time:61809ms step_avg:70.16ms +[2025-09-11 11:42:06] [Rank 0] step:881/10000 train_time:61809ms step_avg:70.16ms +[2025-09-11 11:42:07] [Rank 0] step:901/10000 train_time:62820ms step_avg:69.72ms +[2025-09-11 11:42:07] [Rank 0] step:901/10000 train_time:62820ms step_avg:69.72ms +[2025-09-11 11:42:08] [Rank 0] step:921/10000 train_time:63458ms step_avg:68.90ms +[2025-09-11 11:42:08] [Rank 0] step:921/10000 train_time:63458ms step_avg:68.90ms +[2025-09-11 11:42:08] [Rank 0] step:941/10000 train_time:64097ms step_avg:68.12ms +[2025-09-11 11:42:08] [Rank 0] step:941/10000 train_time:64097ms step_avg:68.12ms +[2025-09-11 11:42:09] [Rank 0] step:961/10000 train_time:64994ms step_avg:67.63ms +[2025-09-11 11:42:09] [Rank 0] step:961/10000 train_time:64994ms step_avg:67.63ms +[2025-09-11 11:42:10] [Rank 0] step:981/10000 train_time:65632ms step_avg:66.90ms +[2025-09-11 11:42:10] [Rank 0] step:981/10000 train_time:65632ms step_avg:66.90ms +[2025-09-11 11:42:11] [Rank 0] step:1001/10000 train_time:66271ms step_avg:66.20ms +[2025-09-11 11:42:11] [Rank 0] step:1001/10000 train_time:66271ms step_avg:66.20ms +[2025-09-11 11:42:11] [Rank 0] step:1021/10000 train_time:66909ms step_avg:65.53ms +[2025-09-11 11:42:11] [Rank 0] step:1021/10000 train_time:66909ms step_avg:65.53ms +[2025-09-11 11:42:12] [Rank 0] step:1041/10000 train_time:67547ms step_avg:64.89ms +[2025-09-11 11:42:12] [Rank 0] step:1041/10000 train_time:67547ms step_avg:64.89ms +[2025-09-11 11:42:13] [Rank 0] step:1061/10000 train_time:68186ms step_avg:64.27ms +[2025-09-11 11:42:13] [Rank 0] step:1061/10000 train_time:68186ms step_avg:64.27ms +[2025-09-11 11:42:13] [Rank 0] step:1081/10000 train_time:68824ms step_avg:63.67ms +[2025-09-11 11:42:13] [Rank 0] step:1081/10000 train_time:68824ms step_avg:63.67ms +[2025-09-11 11:42:14] [Rank 0] step:1101/10000 train_time:69463ms step_avg:63.09ms +[2025-09-11 11:42:14] [Rank 0] step:1101/10000 train_time:69463ms step_avg:63.09ms +[2025-09-11 11:42:14] [Rank 0] step:1121/10000 train_time:70101ms step_avg:62.53ms +[2025-09-11 11:42:14] [Rank 0] step:1121/10000 train_time:70101ms step_avg:62.53ms +[2025-09-11 11:42:15] [Rank 0] step:1141/10000 train_time:70740ms step_avg:62.00ms +[2025-09-11 11:42:15] [Rank 0] step:1141/10000 train_time:70740ms step_avg:62.00ms +[2025-09-11 11:42:16] [Rank 0] step:1161/10000 train_time:71378ms step_avg:61.48ms +[2025-09-11 11:42:16] [Rank 0] step:1161/10000 train_time:71378ms step_avg:61.48ms +[2025-09-11 11:42:16] [Rank 0] step:1181/10000 train_time:72016ms step_avg:60.98ms +[2025-09-11 11:42:16] [Rank 0] step:1181/10000 train_time:72016ms step_avg:60.98ms +[2025-09-11 11:42:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:42:17] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:27] [Rank 0] PRINT: step:1200/10000 val_loss:5.9328 total_sharp:1.3180e-02 L1_sharp:7.0680e-03 L2_sharp:2.5300e-03 L3_sharp:2.2079e-03 L4_sharp:2.3930e-03 L5_sharp:3.1381e-03 L6_sharp:1.7344e-03 L7_sharp:1.5830e-03 L8_sharp:1.7289e-03 L9_sharp:1.9008e-03 L10_sharp:1.7117e-03 L11_sharp:1.8896e-03 L12_sharp:8.1329e-03 total_fnorm:1.0188e+01 total_l1_linf:3.0976e+04 total_spectral:5.0625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4219e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4375e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.3438e+00 L10_fnorm:2.3281e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0312e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.1875e-01 L3_l1linf:7.3438e-01 L4_l1linf:7.3047e-01 L5_l1linf:7.2656e-01 L6_l1linf:7.3438e-01 L7_l1linf:7.3828e-01 L8_l1linf:7.3828e-01 L9_l1linf:7.0703e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.4062e-01 L12_l1linf:5.0391e-01 L1_spectral:2.7958e-02 L2_spectral:2.7616e-02 L3_spectral:2.7551e-02 L4_spectral:2.7463e-02 L5_spectral:2.7468e-02 L6_spectral:2.7303e-02 L7_spectral:2.7290e-02 L8_spectral:2.7435e-02 L9_spectral:2.7529e-02 L10_spectral:2.7666e-02 L11_spectral:2.7687e-02 L12_spectral:2.7543e-02 train_time:72637ms step_avg:60.53ms +[2025-09-11 11:42:27] [Rank 0] PRINT: step:1200/10000 val_loss:5.9328 total_sharp:1.3180e-02 L1_sharp:7.0680e-03 L2_sharp:2.5300e-03 L3_sharp:2.2079e-03 L4_sharp:2.3930e-03 L5_sharp:3.1381e-03 L6_sharp:1.7344e-03 L7_sharp:1.5830e-03 L8_sharp:1.7289e-03 L9_sharp:1.9008e-03 L10_sharp:1.7117e-03 L11_sharp:1.8896e-03 L12_sharp:8.1329e-03 total_fnorm:1.0188e+01 total_l1_linf:3.0976e+04 total_spectral:5.0625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4219e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4375e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.3438e+00 L10_fnorm:2.3281e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0312e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.1875e-01 L3_l1linf:7.3438e-01 L4_l1linf:7.3047e-01 L5_l1linf:7.2656e-01 L6_l1linf:7.3438e-01 L7_l1linf:7.3828e-01 L8_l1linf:7.3828e-01 L9_l1linf:7.0703e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.4062e-01 L12_l1linf:5.0391e-01 L1_spectral:2.7958e-02 L2_spectral:2.7616e-02 L3_spectral:2.7551e-02 L4_spectral:2.7463e-02 L5_spectral:2.7468e-02 L6_spectral:2.7303e-02 L7_spectral:2.7290e-02 L8_spectral:2.7435e-02 L9_spectral:2.7529e-02 L10_spectral:2.7666e-02 L11_spectral:2.7687e-02 L12_spectral:2.7543e-02 train_time:72637ms step_avg:60.53ms +[2025-09-11 11:42:28] [Rank 0] step:1201/10000 train_time:73742ms step_avg:61.40ms +[2025-09-11 11:42:28] [Rank 0] step:1201/10000 train_time:73742ms step_avg:61.40ms +[2025-09-11 11:42:28] [Rank 0] step:1221/10000 train_time:74372ms step_avg:60.91ms +[2025-09-11 11:42:28] [Rank 0] step:1221/10000 train_time:74372ms step_avg:60.91ms +[2025-09-11 11:42:29] [Rank 0] step:1241/10000 train_time:75011ms step_avg:60.44ms +[2025-09-11 11:42:29] [Rank 0] step:1241/10000 train_time:75011ms step_avg:60.44ms +[2025-09-11 11:42:30] [Rank 0] step:1261/10000 train_time:75649ms step_avg:59.99ms +[2025-09-11 11:42:30] [Rank 0] step:1261/10000 train_time:75649ms step_avg:59.99ms +[2025-09-11 11:42:30] [Rank 0] step:1281/10000 train_time:76288ms step_avg:59.55ms +[2025-09-11 11:42:30] [Rank 0] step:1281/10000 train_time:76288ms step_avg:59.55ms +[2025-09-11 11:42:31] [Rank 0] step:1301/10000 train_time:76927ms step_avg:59.13ms +[2025-09-11 11:42:31] [Rank 0] step:1301/10000 train_time:76927ms step_avg:59.13ms +[2025-09-11 11:42:32] [Rank 0] step:1321/10000 train_time:77565ms step_avg:58.72ms +[2025-09-11 11:42:32] [Rank 0] step:1321/10000 train_time:77565ms step_avg:58.72ms +[2025-09-11 11:42:32] [Rank 0] step:1341/10000 train_time:78203ms step_avg:58.32ms +[2025-09-11 11:42:32] [Rank 0] step:1341/10000 train_time:78203ms step_avg:58.32ms +[2025-09-11 11:42:33] [Rank 0] step:1361/10000 train_time:78841ms step_avg:57.93ms +[2025-09-11 11:42:33] [Rank 0] step:1361/10000 train_time:78841ms step_avg:57.93ms +[2025-09-11 11:42:34] [Rank 0] step:1381/10000 train_time:79479ms step_avg:57.55ms +[2025-09-11 11:42:34] [Rank 0] step:1381/10000 train_time:79479ms step_avg:57.55ms +[2025-09-11 11:42:34] [Rank 0] step:1401/10000 train_time:80117ms step_avg:57.19ms +[2025-09-11 11:42:34] [Rank 0] step:1401/10000 train_time:80117ms step_avg:57.19ms +[2025-09-11 11:42:35] [Rank 0] step:1421/10000 train_time:80755ms step_avg:56.83ms +[2025-09-11 11:42:35] [Rank 0] step:1421/10000 train_time:80755ms step_avg:56.83ms +[2025-09-11 11:42:36] [Rank 0] step:1441/10000 train_time:81393ms step_avg:56.48ms +[2025-09-11 11:42:36] [Rank 0] step:1441/10000 train_time:81393ms step_avg:56.48ms +[2025-09-11 11:42:36] [Rank 0] step:1461/10000 train_time:82031ms step_avg:56.15ms +[2025-09-11 11:42:36] [Rank 0] step:1461/10000 train_time:82031ms step_avg:56.15ms +[2025-09-11 11:42:37] [Rank 0] step:1481/10000 train_time:82669ms step_avg:55.82ms +[2025-09-11 11:42:37] [Rank 0] step:1481/10000 train_time:82669ms step_avg:55.82ms +[2025-09-11 11:42:37] [Rank 0] step:1501/10000 train_time:83311ms step_avg:55.50ms +[2025-09-11 11:42:37] [Rank 0] step:1501/10000 train_time:83311ms step_avg:55.50ms +[2025-09-11 11:42:38] [Rank 0] step:1521/10000 train_time:83953ms step_avg:55.20ms +[2025-09-11 11:42:38] [Rank 0] step:1521/10000 train_time:83953ms step_avg:55.20ms +[2025-09-11 11:42:39] [Rank 0] step:1541/10000 train_time:84595ms step_avg:54.90ms +[2025-09-11 11:42:39] [Rank 0] step:1541/10000 train_time:84595ms step_avg:54.90ms +[2025-09-11 11:42:39] [Rank 0] step:1561/10000 train_time:85237ms step_avg:54.60ms +[2025-09-11 11:42:39] [Rank 0] step:1561/10000 train_time:85237ms step_avg:54.60ms +[2025-09-11 11:42:40] [Rank 0] step:1581/10000 train_time:85879ms step_avg:54.32ms +[2025-09-11 11:42:40] [Rank 0] step:1581/10000 train_time:85879ms step_avg:54.32ms +[2025-09-11 11:42:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:42:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:42:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:42:50] [Rank 0] PRINT: step:1600/10000 val_loss:5.7394 total_sharp:1.0094e-02 L1_sharp:6.8712e-03 L2_sharp:1.8429e-03 L3_sharp:1.2342e-03 L4_sharp:1.4141e-03 L5_sharp:2.5482e-03 L6_sharp:1.3519e-03 L7_sharp:1.0735e-03 L8_sharp:1.6121e-03 L9_sharp:1.3118e-03 L10_sharp:1.6073e-03 L11_sharp:1.5217e-03 L12_sharp:9.5031e-03 total_fnorm:1.0250e+01 total_l1_linf:3.0336e+04 total_spectral:5.0938e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.3906e+00 L9_fnorm:2.4531e+00 L10_fnorm:2.4375e+00 L11_fnorm:2.3750e+00 L12_fnorm:2.1406e+00 L1_l1linf:7.4609e-01 L2_l1linf:6.7578e-01 L3_l1linf:7.0312e-01 L4_l1linf:6.9922e-01 L5_l1linf:6.8750e-01 L6_l1linf:6.9141e-01 L7_l1linf:6.9531e-01 L8_l1linf:7.1094e-01 L9_l1linf:7.0312e-01 L10_l1linf:7.0312e-01 L11_l1linf:6.6016e-01 L12_l1linf:4.9414e-01 L1_spectral:2.9081e-02 L2_spectral:2.8393e-02 L3_spectral:2.8411e-02 L4_spectral:2.8595e-02 L5_spectral:2.8665e-02 L6_spectral:2.8157e-02 L7_spectral:2.8051e-02 L8_spectral:2.7806e-02 L9_spectral:2.7898e-02 L10_spectral:2.8205e-02 L11_spectral:2.8374e-02 L12_spectral:2.8340e-02 train_time:86503ms step_avg:54.06ms +[2025-09-11 11:42:50] [Rank 0] PRINT: step:1600/10000 val_loss:5.7394 total_sharp:1.0094e-02 L1_sharp:6.8712e-03 L2_sharp:1.8429e-03 L3_sharp:1.2342e-03 L4_sharp:1.4141e-03 L5_sharp:2.5482e-03 L6_sharp:1.3519e-03 L7_sharp:1.0735e-03 L8_sharp:1.6121e-03 L9_sharp:1.3118e-03 L10_sharp:1.6073e-03 L11_sharp:1.5217e-03 L12_sharp:9.5031e-03 total_fnorm:1.0250e+01 total_l1_linf:3.0336e+04 total_spectral:5.0938e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.3906e+00 L9_fnorm:2.4531e+00 L10_fnorm:2.4375e+00 L11_fnorm:2.3750e+00 L12_fnorm:2.1406e+00 L1_l1linf:7.4609e-01 L2_l1linf:6.7578e-01 L3_l1linf:7.0312e-01 L4_l1linf:6.9922e-01 L5_l1linf:6.8750e-01 L6_l1linf:6.9141e-01 L7_l1linf:6.9531e-01 L8_l1linf:7.1094e-01 L9_l1linf:7.0312e-01 L10_l1linf:7.0312e-01 L11_l1linf:6.6016e-01 L12_l1linf:4.9414e-01 L1_spectral:2.9081e-02 L2_spectral:2.8393e-02 L3_spectral:2.8411e-02 L4_spectral:2.8595e-02 L5_spectral:2.8665e-02 L6_spectral:2.8157e-02 L7_spectral:2.8051e-02 L8_spectral:2.7806e-02 L9_spectral:2.7898e-02 L10_spectral:2.8205e-02 L11_spectral:2.8374e-02 L12_spectral:2.8340e-02 train_time:86503ms step_avg:54.06ms +[2025-09-11 11:42:51] [Rank 0] step:1601/10000 train_time:87620ms step_avg:54.73ms +[2025-09-11 11:42:51] [Rank 0] step:1601/10000 train_time:87620ms step_avg:54.73ms +[2025-09-11 11:42:52] [Rank 0] step:1621/10000 train_time:88253ms step_avg:54.44ms +[2025-09-11 11:42:52] [Rank 0] step:1621/10000 train_time:88253ms step_avg:54.44ms +[2025-09-11 11:42:53] [Rank 0] step:1641/10000 train_time:88896ms step_avg:54.17ms +[2025-09-11 11:42:53] [Rank 0] step:1641/10000 train_time:88896ms step_avg:54.17ms +[2025-09-11 11:42:53] [Rank 0] step:1661/10000 train_time:89539ms step_avg:53.91ms +[2025-09-11 11:42:53] [Rank 0] step:1661/10000 train_time:89539ms step_avg:53.91ms +[2025-09-11 11:42:54] [Rank 0] step:1681/10000 train_time:90183ms step_avg:53.65ms +[2025-09-11 11:42:54] [Rank 0] step:1681/10000 train_time:90183ms step_avg:53.65ms +[2025-09-11 11:42:55] [Rank 0] step:1701/10000 train_time:90825ms step_avg:53.40ms +[2025-09-11 11:42:55] [Rank 0] step:1701/10000 train_time:90825ms step_avg:53.40ms +[2025-09-11 11:42:55] [Rank 0] step:1721/10000 train_time:91470ms step_avg:53.15ms +[2025-09-11 11:42:55] [Rank 0] step:1721/10000 train_time:91470ms step_avg:53.15ms +[2025-09-11 11:42:56] [Rank 0] step:1741/10000 train_time:92113ms step_avg:52.91ms +[2025-09-11 11:42:56] [Rank 0] step:1741/10000 train_time:92113ms step_avg:52.91ms +[2025-09-11 11:42:57] [Rank 0] step:1761/10000 train_time:92756ms step_avg:52.67ms +[2025-09-11 11:42:57] [Rank 0] step:1761/10000 train_time:92756ms step_avg:52.67ms +[2025-09-11 11:42:57] [Rank 0] step:1781/10000 train_time:93399ms step_avg:52.44ms +[2025-09-11 11:42:57] [Rank 0] step:1781/10000 train_time:93399ms step_avg:52.44ms +[2025-09-11 11:42:58] [Rank 0] step:1801/10000 train_time:94041ms step_avg:52.22ms +[2025-09-11 11:42:58] [Rank 0] step:1801/10000 train_time:94041ms step_avg:52.22ms +[2025-09-11 11:42:59] [Rank 0] step:1821/10000 train_time:94684ms step_avg:52.00ms +[2025-09-11 11:42:59] [Rank 0] step:1821/10000 train_time:94684ms step_avg:52.00ms +[2025-09-11 11:42:59] [Rank 0] step:1841/10000 train_time:95326ms step_avg:51.78ms +[2025-09-11 11:42:59] [Rank 0] step:1841/10000 train_time:95326ms step_avg:51.78ms +[2025-09-11 11:43:00] [Rank 0] step:1861/10000 train_time:95969ms step_avg:51.57ms +[2025-09-11 11:43:00] [Rank 0] step:1861/10000 train_time:95969ms step_avg:51.57ms +[2025-09-11 11:43:00] [Rank 0] step:1881/10000 train_time:96612ms step_avg:51.36ms +[2025-09-11 11:43:00] [Rank 0] step:1881/10000 train_time:96612ms step_avg:51.36ms +[2025-09-11 11:43:01] [Rank 0] step:1901/10000 train_time:97254ms step_avg:51.16ms +[2025-09-11 11:43:01] [Rank 0] step:1901/10000 train_time:97254ms step_avg:51.16ms +[2025-09-11 11:43:02] [Rank 0] step:1921/10000 train_time:97897ms step_avg:50.96ms +[2025-09-11 11:43:02] [Rank 0] step:1921/10000 train_time:97897ms step_avg:50.96ms +[2025-09-11 11:43:02] [Rank 0] step:1941/10000 train_time:98540ms step_avg:50.77ms +[2025-09-11 11:43:02] [Rank 0] step:1941/10000 train_time:98540ms step_avg:50.77ms +[2025-09-11 11:43:03] [Rank 0] step:1961/10000 train_time:99182ms step_avg:50.58ms +[2025-09-11 11:43:03] [Rank 0] step:1961/10000 train_time:99182ms step_avg:50.58ms +[2025-09-11 11:43:04] [Rank 0] step:1981/10000 train_time:99825ms step_avg:50.39ms +[2025-09-11 11:43:04] [Rank 0] step:1981/10000 train_time:99825ms step_avg:50.39ms +[2025-09-11 11:43:04] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:43:04] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:43:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:43:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:43:14] [Rank 0] PRINT: step:2000/10000 val_loss:5.6034 total_sharp:8.3247e-03 L1_sharp:3.8354e-03 L2_sharp:8.8036e-04 L3_sharp:1.0366e-03 L4_sharp:1.2288e-03 L5_sharp:1.8447e-03 L6_sharp:9.3954e-04 L7_sharp:1.0001e-03 L8_sharp:1.6365e-03 L9_sharp:1.3391e-03 L10_sharp:1.6097e-03 L11_sharp:1.5983e-03 L12_sharp:8.8891e-03 total_fnorm:1.0125e+01 total_l1_linf:2.9696e+04 total_spectral:5.0625e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4219e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.4375e+00 L12_fnorm:2.2188e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.8359e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.5625e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.7969e-01 L9_l1linf:6.9141e-01 L10_l1linf:6.7969e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.0000e-01 L1_spectral:2.9688e-02 L2_spectral:2.9240e-02 L3_spectral:2.8900e-02 L4_spectral:2.8994e-02 L5_spectral:2.9416e-02 L6_spectral:2.8916e-02 L7_spectral:2.8724e-02 L8_spectral:2.8426e-02 L9_spectral:2.8448e-02 L10_spectral:2.8468e-02 L11_spectral:2.8722e-02 L12_spectral:2.8933e-02 train_time:100450ms step_avg:50.23ms +[2025-09-11 11:43:14] [Rank 0] PRINT: step:2000/10000 val_loss:5.6034 total_sharp:8.3247e-03 L1_sharp:3.8354e-03 L2_sharp:8.8036e-04 L3_sharp:1.0366e-03 L4_sharp:1.2288e-03 L5_sharp:1.8447e-03 L6_sharp:9.3954e-04 L7_sharp:1.0001e-03 L8_sharp:1.6365e-03 L9_sharp:1.3391e-03 L10_sharp:1.6097e-03 L11_sharp:1.5983e-03 L12_sharp:8.8891e-03 total_fnorm:1.0125e+01 total_l1_linf:2.9696e+04 total_spectral:5.0625e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4219e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.4375e+00 L12_fnorm:2.2188e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.8359e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.5625e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.7969e-01 L9_l1linf:6.9141e-01 L10_l1linf:6.7969e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.0000e-01 L1_spectral:2.9688e-02 L2_spectral:2.9240e-02 L3_spectral:2.8900e-02 L4_spectral:2.8994e-02 L5_spectral:2.9416e-02 L6_spectral:2.8916e-02 L7_spectral:2.8724e-02 L8_spectral:2.8426e-02 L9_spectral:2.8448e-02 L10_spectral:2.8468e-02 L11_spectral:2.8722e-02 L12_spectral:2.8933e-02 train_time:100450ms step_avg:50.23ms +[2025-09-11 11:43:15] [Rank 0] step:2001/10000 train_time:101582ms step_avg:50.77ms +[2025-09-11 11:43:15] [Rank 0] step:2001/10000 train_time:101582ms step_avg:50.77ms +[2025-09-11 11:43:16] [Rank 0] step:2021/10000 train_time:102218ms step_avg:50.58ms +[2025-09-11 11:43:16] [Rank 0] step:2021/10000 train_time:102218ms step_avg:50.58ms +[2025-09-11 11:43:17] [Rank 0] step:2041/10000 train_time:102861ms step_avg:50.40ms +[2025-09-11 11:43:17] [Rank 0] step:2041/10000 train_time:102861ms step_avg:50.40ms +[2025-09-11 11:43:17] [Rank 0] step:2061/10000 train_time:103504ms step_avg:50.22ms +[2025-09-11 11:43:17] [Rank 0] step:2061/10000 train_time:103504ms step_avg:50.22ms +[2025-09-11 11:43:18] [Rank 0] step:2081/10000 train_time:104147ms step_avg:50.05ms +[2025-09-11 11:43:18] [Rank 0] step:2081/10000 train_time:104147ms step_avg:50.05ms +[2025-09-11 11:43:19] [Rank 0] step:2101/10000 train_time:104789ms step_avg:49.88ms +[2025-09-11 11:43:19] [Rank 0] step:2101/10000 train_time:104789ms step_avg:49.88ms +[2025-09-11 11:43:19] [Rank 0] step:2121/10000 train_time:105432ms step_avg:49.71ms +[2025-09-11 11:43:19] [Rank 0] step:2121/10000 train_time:105432ms step_avg:49.71ms +[2025-09-11 11:43:20] [Rank 0] step:2141/10000 train_time:106074ms step_avg:49.54ms +[2025-09-11 11:43:20] [Rank 0] step:2141/10000 train_time:106074ms step_avg:49.54ms +[2025-09-11 11:43:20] [Rank 0] step:2161/10000 train_time:106716ms step_avg:49.38ms +[2025-09-11 11:43:20] [Rank 0] step:2161/10000 train_time:106716ms step_avg:49.38ms +[2025-09-11 11:43:21] [Rank 0] step:2181/10000 train_time:107359ms step_avg:49.22ms +[2025-09-11 11:43:21] [Rank 0] step:2181/10000 train_time:107359ms step_avg:49.22ms +[2025-09-11 11:43:22] [Rank 0] step:2201/10000 train_time:108001ms step_avg:49.07ms +[2025-09-11 11:43:22] [Rank 0] step:2201/10000 train_time:108001ms step_avg:49.07ms +[2025-09-11 11:43:22] [Rank 0] step:2221/10000 train_time:108643ms step_avg:48.92ms +[2025-09-11 11:43:22] [Rank 0] step:2221/10000 train_time:108643ms step_avg:48.92ms +[2025-09-11 11:43:23] [Rank 0] step:2241/10000 train_time:109298ms step_avg:48.77ms +[2025-09-11 11:43:23] [Rank 0] step:2241/10000 train_time:109298ms step_avg:48.77ms +[2025-09-11 11:43:24] [Rank 0] step:2261/10000 train_time:109953ms step_avg:48.63ms +[2025-09-11 11:43:24] [Rank 0] step:2261/10000 train_time:109953ms step_avg:48.63ms +[2025-09-11 11:43:24] [Rank 0] step:2281/10000 train_time:110609ms step_avg:48.49ms +[2025-09-11 11:43:24] [Rank 0] step:2281/10000 train_time:110609ms step_avg:48.49ms +[2025-09-11 11:43:25] [Rank 0] step:2301/10000 train_time:111266ms step_avg:48.36ms +[2025-09-11 11:43:25] [Rank 0] step:2301/10000 train_time:111266ms step_avg:48.36ms +[2025-09-11 11:43:26] [Rank 0] step:2321/10000 train_time:111922ms step_avg:48.22ms +[2025-09-11 11:43:26] [Rank 0] step:2321/10000 train_time:111922ms step_avg:48.22ms +[2025-09-11 11:43:26] [Rank 0] step:2341/10000 train_time:112578ms step_avg:48.09ms +[2025-09-11 11:43:26] [Rank 0] step:2341/10000 train_time:112578ms step_avg:48.09ms +[2025-09-11 11:43:27] [Rank 0] step:2361/10000 train_time:113234ms step_avg:47.96ms +[2025-09-11 11:43:27] [Rank 0] step:2361/10000 train_time:113234ms step_avg:47.96ms +[2025-09-11 11:43:28] [Rank 0] step:2381/10000 train_time:113889ms step_avg:47.83ms +[2025-09-11 11:43:28] [Rank 0] step:2381/10000 train_time:113889ms step_avg:47.83ms +[2025-09-11 11:43:28] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:43:28] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:43:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:43:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:43:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:43:40] [Rank 0] PRINT: step:2400/10000 val_loss:5.4745 total_sharp:7.1772e-03 L1_sharp:3.3562e-03 L2_sharp:1.2393e-03 L3_sharp:8.6971e-04 L4_sharp:9.8988e-04 L5_sharp:1.4115e-03 L6_sharp:8.2394e-04 L7_sharp:7.9901e-04 L8_sharp:1.3942e-03 L9_sharp:1.0704e-03 L10_sharp:1.4776e-03 L11_sharp:1.6241e-03 L12_sharp:7.1046e-03 total_fnorm:9.9375e+00 total_l1_linf:2.8672e+04 total_spectral:5.0000e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.2969e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4453e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.6797e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.3516e-01 L1_spectral:3.0164e-02 L2_spectral:2.9903e-02 L3_spectral:2.9468e-02 L4_spectral:2.9532e-02 L5_spectral:3.0055e-02 L6_spectral:2.9665e-02 L7_spectral:2.9502e-02 L8_spectral:2.9088e-02 L9_spectral:2.9150e-02 L10_spectral:2.9141e-02 L11_spectral:2.9071e-02 L12_spectral:2.9401e-02 train_time:114526ms step_avg:47.72ms +[2025-09-11 11:43:40] [Rank 0] PRINT: step:2400/10000 val_loss:5.4745 total_sharp:7.1772e-03 L1_sharp:3.3562e-03 L2_sharp:1.2393e-03 L3_sharp:8.6971e-04 L4_sharp:9.8988e-04 L5_sharp:1.4115e-03 L6_sharp:8.2394e-04 L7_sharp:7.9901e-04 L8_sharp:1.3942e-03 L9_sharp:1.0704e-03 L10_sharp:1.4776e-03 L11_sharp:1.6241e-03 L12_sharp:7.1046e-03 total_fnorm:9.9375e+00 total_l1_linf:2.8672e+04 total_spectral:5.0000e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.2969e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4453e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6797e-01 L10_l1linf:6.6797e-01 L11_l1linf:6.5625e-01 L12_l1linf:5.3516e-01 L1_spectral:3.0164e-02 L2_spectral:2.9903e-02 L3_spectral:2.9468e-02 L4_spectral:2.9532e-02 L5_spectral:3.0055e-02 L6_spectral:2.9665e-02 L7_spectral:2.9502e-02 L8_spectral:2.9088e-02 L9_spectral:2.9150e-02 L10_spectral:2.9141e-02 L11_spectral:2.9071e-02 L12_spectral:2.9401e-02 train_time:114526ms step_avg:47.72ms +[2025-09-11 11:43:42] [Rank 0] step:2401/10000 train_time:115782ms step_avg:48.22ms +[2025-09-11 11:43:42] [Rank 0] step:2401/10000 train_time:115782ms step_avg:48.22ms +[2025-09-11 11:43:42] [Rank 0] step:2421/10000 train_time:116493ms step_avg:48.12ms +[2025-09-11 11:43:42] [Rank 0] step:2421/10000 train_time:116493ms step_avg:48.12ms +[2025-09-11 11:43:43] [Rank 0] step:2441/10000 train_time:117149ms step_avg:47.99ms +[2025-09-11 11:43:43] [Rank 0] step:2441/10000 train_time:117149ms step_avg:47.99ms +[2025-09-11 11:43:44] [Rank 0] step:2461/10000 train_time:117805ms step_avg:47.87ms +[2025-09-11 11:43:44] [Rank 0] step:2461/10000 train_time:117805ms step_avg:47.87ms +[2025-09-11 11:43:44] [Rank 0] step:2481/10000 train_time:118461ms step_avg:47.75ms +[2025-09-11 11:43:44] [Rank 0] step:2481/10000 train_time:118461ms step_avg:47.75ms +[2025-09-11 11:43:45] [Rank 0] step:2501/10000 train_time:119117ms step_avg:47.63ms +[2025-09-11 11:43:45] [Rank 0] step:2501/10000 train_time:119117ms step_avg:47.63ms +[2025-09-11 11:43:46] [Rank 0] step:2521/10000 train_time:119772ms step_avg:47.51ms +[2025-09-11 11:43:46] [Rank 0] step:2521/10000 train_time:119772ms step_avg:47.51ms +[2025-09-11 11:43:46] [Rank 0] step:2541/10000 train_time:120428ms step_avg:47.39ms +[2025-09-11 11:43:46] [Rank 0] step:2541/10000 train_time:120428ms step_avg:47.39ms +[2025-09-11 11:43:47] [Rank 0] step:2561/10000 train_time:121084ms step_avg:47.28ms +[2025-09-11 11:43:47] [Rank 0] step:2561/10000 train_time:121084ms step_avg:47.28ms +[2025-09-11 11:43:48] [Rank 0] step:2581/10000 train_time:121739ms step_avg:47.17ms +[2025-09-11 11:43:48] [Rank 0] step:2581/10000 train_time:121739ms step_avg:47.17ms +[2025-09-11 11:43:48] [Rank 0] step:2601/10000 train_time:122394ms step_avg:47.06ms +[2025-09-11 11:43:48] [Rank 0] step:2601/10000 train_time:122394ms step_avg:47.06ms +[2025-09-11 11:43:49] [Rank 0] step:2621/10000 train_time:123049ms step_avg:46.95ms +[2025-09-11 11:43:49] [Rank 0] step:2621/10000 train_time:123049ms step_avg:46.95ms +[2025-09-11 11:43:50] [Rank 0] step:2641/10000 train_time:123704ms step_avg:46.84ms +[2025-09-11 11:43:50] [Rank 0] step:2641/10000 train_time:123704ms step_avg:46.84ms +[2025-09-11 11:43:50] [Rank 0] step:2661/10000 train_time:124359ms step_avg:46.73ms +[2025-09-11 11:43:50] [Rank 0] step:2661/10000 train_time:124359ms step_avg:46.73ms +[2025-09-11 11:43:51] [Rank 0] step:2681/10000 train_time:125014ms step_avg:46.63ms +[2025-09-11 11:43:51] [Rank 0] step:2681/10000 train_time:125014ms step_avg:46.63ms +[2025-09-11 11:43:52] [Rank 0] step:2701/10000 train_time:125670ms step_avg:46.53ms +[2025-09-11 11:43:52] [Rank 0] step:2701/10000 train_time:125670ms step_avg:46.53ms +[2025-09-11 11:43:52] [Rank 0] step:2721/10000 train_time:126326ms step_avg:46.43ms +[2025-09-11 11:43:52] [Rank 0] step:2721/10000 train_time:126326ms step_avg:46.43ms +[2025-09-11 11:43:53] [Rank 0] step:2741/10000 train_time:126982ms step_avg:46.33ms +[2025-09-11 11:43:53] [Rank 0] step:2741/10000 train_time:126982ms step_avg:46.33ms +[2025-09-11 11:43:54] [Rank 0] step:2761/10000 train_time:127638ms step_avg:46.23ms +[2025-09-11 11:43:54] [Rank 0] step:2761/10000 train_time:127638ms step_avg:46.23ms +[2025-09-11 11:43:54] [Rank 0] step:2781/10000 train_time:128293ms step_avg:46.13ms +[2025-09-11 11:43:54] [Rank 0] step:2781/10000 train_time:128293ms step_avg:46.13ms +[2025-09-11 11:43:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:43:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:43:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:43:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:05] [Rank 0] PRINT: step:2800/10000 val_loss:5.3981 total_sharp:7.4346e-03 L1_sharp:1.4696e-03 L2_sharp:1.2728e-03 L3_sharp:7.3005e-04 L4_sharp:5.9738e-04 L5_sharp:1.3750e-03 L6_sharp:8.0603e-04 L7_sharp:7.9086e-04 L8_sharp:1.5862e-03 L9_sharp:1.0547e-03 L10_sharp:1.3344e-03 L11_sharp:1.6108e-03 L12_sharp:5.4201e-03 total_fnorm:9.8125e+00 total_l1_linf:2.7904e+04 total_spectral:4.9375e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4844e+00 L12_fnorm:2.3125e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.5234e-01 L4_l1linf:6.4844e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.4453e-01 L12_l1linf:5.1953e-01 L1_spectral:3.0703e-02 L2_spectral:2.9865e-02 L3_spectral:2.9985e-02 L4_spectral:3.0209e-02 L5_spectral:3.0369e-02 L6_spectral:3.0127e-02 L7_spectral:3.0114e-02 L8_spectral:2.9586e-02 L9_spectral:2.9565e-02 L10_spectral:2.9482e-02 L11_spectral:2.9466e-02 L12_spectral:2.9626e-02 train_time:128929ms step_avg:46.05ms +[2025-09-11 11:44:05] [Rank 0] PRINT: step:2800/10000 val_loss:5.3981 total_sharp:7.4346e-03 L1_sharp:1.4696e-03 L2_sharp:1.2728e-03 L3_sharp:7.3005e-04 L4_sharp:5.9738e-04 L5_sharp:1.3750e-03 L6_sharp:8.0603e-04 L7_sharp:7.9086e-04 L8_sharp:1.5862e-03 L9_sharp:1.0547e-03 L10_sharp:1.3344e-03 L11_sharp:1.6108e-03 L12_sharp:5.4201e-03 total_fnorm:9.8125e+00 total_l1_linf:2.7904e+04 total_spectral:4.9375e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4844e+00 L12_fnorm:2.3125e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.5234e-01 L4_l1linf:6.4844e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.4453e-01 L12_l1linf:5.1953e-01 L1_spectral:3.0703e-02 L2_spectral:2.9865e-02 L3_spectral:2.9985e-02 L4_spectral:3.0209e-02 L5_spectral:3.0369e-02 L6_spectral:3.0127e-02 L7_spectral:3.0114e-02 L8_spectral:2.9586e-02 L9_spectral:2.9565e-02 L10_spectral:2.9482e-02 L11_spectral:2.9466e-02 L12_spectral:2.9626e-02 train_time:128929ms step_avg:46.05ms +[2025-09-11 11:44:06] [Rank 0] step:2801/10000 train_time:130078ms step_avg:46.44ms +[2025-09-11 11:44:06] [Rank 0] step:2801/10000 train_time:130078ms step_avg:46.44ms +[2025-09-11 11:44:06] [Rank 0] step:2821/10000 train_time:130739ms step_avg:46.34ms +[2025-09-11 11:44:06] [Rank 0] step:2821/10000 train_time:130739ms step_avg:46.34ms +[2025-09-11 11:44:07] [Rank 0] step:2841/10000 train_time:131396ms step_avg:46.25ms +[2025-09-11 11:44:07] [Rank 0] step:2841/10000 train_time:131396ms step_avg:46.25ms +[2025-09-11 11:44:08] [Rank 0] step:2861/10000 train_time:132053ms step_avg:46.16ms +[2025-09-11 11:44:08] [Rank 0] step:2861/10000 train_time:132053ms step_avg:46.16ms +[2025-09-11 11:44:08] [Rank 0] step:2881/10000 train_time:132709ms step_avg:46.06ms +[2025-09-11 11:44:08] [Rank 0] step:2881/10000 train_time:132709ms step_avg:46.06ms +[2025-09-11 11:44:09] [Rank 0] step:2901/10000 train_time:133365ms step_avg:45.97ms +[2025-09-11 11:44:09] [Rank 0] step:2901/10000 train_time:133365ms step_avg:45.97ms +[2025-09-11 11:44:10] [Rank 0] step:2921/10000 train_time:134022ms step_avg:45.88ms +[2025-09-11 11:44:10] [Rank 0] step:2921/10000 train_time:134022ms step_avg:45.88ms +[2025-09-11 11:44:10] [Rank 0] step:2941/10000 train_time:134678ms step_avg:45.79ms +[2025-09-11 11:44:10] [Rank 0] step:2941/10000 train_time:134678ms step_avg:45.79ms +[2025-09-11 11:44:11] [Rank 0] step:2961/10000 train_time:135334ms step_avg:45.71ms +[2025-09-11 11:44:11] [Rank 0] step:2961/10000 train_time:135334ms step_avg:45.71ms +[2025-09-11 11:44:12] [Rank 0] step:2981/10000 train_time:135993ms step_avg:45.62ms +[2025-09-11 11:44:12] [Rank 0] step:2981/10000 train_time:135993ms step_avg:45.62ms +[2025-09-11 11:44:13] [Rank 0] step:3001/10000 train_time:136801ms step_avg:45.59ms +[2025-09-11 11:44:13] [Rank 0] step:3001/10000 train_time:136801ms step_avg:45.59ms +[2025-09-11 11:44:14] [Rank 0] step:3021/10000 train_time:137874ms step_avg:45.64ms +[2025-09-11 11:44:14] [Rank 0] step:3021/10000 train_time:137874ms step_avg:45.64ms +[2025-09-11 11:44:14] [Rank 0] step:3041/10000 train_time:138534ms step_avg:45.56ms +[2025-09-11 11:44:14] [Rank 0] step:3041/10000 train_time:138534ms step_avg:45.56ms +[2025-09-11 11:44:15] [Rank 0] step:3061/10000 train_time:139341ms step_avg:45.52ms +[2025-09-11 11:44:15] [Rank 0] step:3061/10000 train_time:139341ms step_avg:45.52ms +[2025-09-11 11:44:16] [Rank 0] step:3081/10000 train_time:140161ms step_avg:45.49ms +[2025-09-11 11:44:16] [Rank 0] step:3081/10000 train_time:140161ms step_avg:45.49ms +[2025-09-11 11:44:17] [Rank 0] step:3101/10000 train_time:140820ms step_avg:45.41ms +[2025-09-11 11:44:17] [Rank 0] step:3101/10000 train_time:140820ms step_avg:45.41ms +[2025-09-11 11:44:17] [Rank 0] step:3121/10000 train_time:141479ms step_avg:45.33ms +[2025-09-11 11:44:17] [Rank 0] step:3121/10000 train_time:141479ms step_avg:45.33ms +[2025-09-11 11:44:18] [Rank 0] step:3141/10000 train_time:142137ms step_avg:45.25ms +[2025-09-11 11:44:18] [Rank 0] step:3141/10000 train_time:142137ms step_avg:45.25ms +[2025-09-11 11:44:19] [Rank 0] step:3161/10000 train_time:142796ms step_avg:45.17ms +[2025-09-11 11:44:19] [Rank 0] step:3161/10000 train_time:142796ms step_avg:45.17ms +[2025-09-11 11:44:19] [Rank 0] step:3181/10000 train_time:143455ms step_avg:45.10ms +[2025-09-11 11:44:19] [Rank 0] step:3181/10000 train_time:143455ms step_avg:45.10ms +[2025-09-11 11:44:20] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:44:20] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:44:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:44:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:30] [Rank 0] PRINT: step:3200/10000 val_loss:5.3152 total_sharp:7.8899e-03 L1_sharp:4.3538e-03 L2_sharp:1.5968e-03 L3_sharp:6.4578e-04 L4_sharp:8.8664e-04 L5_sharp:1.3916e-03 L6_sharp:8.9203e-04 L7_sharp:7.2244e-04 L8_sharp:1.2160e-03 L9_sharp:1.1130e-03 L10_sharp:1.4206e-03 L11_sharp:1.5909e-03 L12_sharp:6.0313e-03 total_fnorm:1.0062e+01 total_l1_linf:2.8288e+04 total_spectral:5.0938e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.3750e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.4062e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0547e-01 L7_l1linf:6.1328e-01 L8_l1linf:6.1719e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.3281e-01 L12_l1linf:5.3125e-01 L1_spectral:3.1205e-02 L2_spectral:3.0421e-02 L3_spectral:3.0316e-02 L4_spectral:3.0403e-02 L5_spectral:3.0639e-02 L6_spectral:3.0494e-02 L7_spectral:3.0584e-02 L8_spectral:2.9892e-02 L9_spectral:3.0149e-02 L10_spectral:3.0107e-02 L11_spectral:3.0006e-02 L12_spectral:2.9980e-02 train_time:144095ms step_avg:45.03ms +[2025-09-11 11:44:30] [Rank 0] PRINT: step:3200/10000 val_loss:5.3152 total_sharp:7.8899e-03 L1_sharp:4.3538e-03 L2_sharp:1.5968e-03 L3_sharp:6.4578e-04 L4_sharp:8.8664e-04 L5_sharp:1.3916e-03 L6_sharp:8.9203e-04 L7_sharp:7.2244e-04 L8_sharp:1.2160e-03 L9_sharp:1.1130e-03 L10_sharp:1.4206e-03 L11_sharp:1.5909e-03 L12_sharp:6.0313e-03 total_fnorm:1.0062e+01 total_l1_linf:2.8288e+04 total_spectral:5.0938e+00 L1_fnorm:2.5625e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.3750e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.4062e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0547e-01 L7_l1linf:6.1328e-01 L8_l1linf:6.1719e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.3281e-01 L12_l1linf:5.3125e-01 L1_spectral:3.1205e-02 L2_spectral:3.0421e-02 L3_spectral:3.0316e-02 L4_spectral:3.0403e-02 L5_spectral:3.0639e-02 L6_spectral:3.0494e-02 L7_spectral:3.0584e-02 L8_spectral:2.9892e-02 L9_spectral:3.0149e-02 L10_spectral:3.0107e-02 L11_spectral:3.0006e-02 L12_spectral:2.9980e-02 train_time:144095ms step_avg:45.03ms +[2025-09-11 11:44:31] [Rank 0] step:3201/10000 train_time:145214ms step_avg:45.37ms +[2025-09-11 11:44:31] [Rank 0] step:3201/10000 train_time:145214ms step_avg:45.37ms +[2025-09-11 11:44:31] [Rank 0] step:3221/10000 train_time:145863ms step_avg:45.29ms +[2025-09-11 11:44:31] [Rank 0] step:3221/10000 train_time:145863ms step_avg:45.29ms +[2025-09-11 11:44:32] [Rank 0] step:3241/10000 train_time:146524ms step_avg:45.21ms +[2025-09-11 11:44:32] [Rank 0] step:3241/10000 train_time:146524ms step_avg:45.21ms +[2025-09-11 11:44:33] [Rank 0] step:3261/10000 train_time:147185ms step_avg:45.13ms +[2025-09-11 11:44:33] [Rank 0] step:3261/10000 train_time:147185ms step_avg:45.13ms +[2025-09-11 11:44:33] [Rank 0] step:3281/10000 train_time:147845ms step_avg:45.06ms +[2025-09-11 11:44:33] [Rank 0] step:3281/10000 train_time:147845ms step_avg:45.06ms +[2025-09-11 11:44:34] [Rank 0] step:3301/10000 train_time:148505ms step_avg:44.99ms +[2025-09-11 11:44:34] [Rank 0] step:3301/10000 train_time:148505ms step_avg:44.99ms +[2025-09-11 11:44:35] [Rank 0] step:3321/10000 train_time:149165ms step_avg:44.92ms +[2025-09-11 11:44:35] [Rank 0] step:3321/10000 train_time:149165ms step_avg:44.92ms +[2025-09-11 11:44:35] [Rank 0] step:3341/10000 train_time:149826ms step_avg:44.84ms +[2025-09-11 11:44:35] [Rank 0] step:3341/10000 train_time:149826ms step_avg:44.84ms +[2025-09-11 11:44:36] [Rank 0] step:3361/10000 train_time:150486ms step_avg:44.77ms +[2025-09-11 11:44:36] [Rank 0] step:3361/10000 train_time:150486ms step_avg:44.77ms +[2025-09-11 11:44:37] [Rank 0] step:3381/10000 train_time:151146ms step_avg:44.70ms +[2025-09-11 11:44:37] [Rank 0] step:3381/10000 train_time:151146ms step_avg:44.70ms +[2025-09-11 11:44:37] [Rank 0] step:3401/10000 train_time:151805ms step_avg:44.64ms +[2025-09-11 11:44:37] [Rank 0] step:3401/10000 train_time:151805ms step_avg:44.64ms +[2025-09-11 11:44:38] [Rank 0] step:3421/10000 train_time:152465ms step_avg:44.57ms +[2025-09-11 11:44:38] [Rank 0] step:3421/10000 train_time:152465ms step_avg:44.57ms +[2025-09-11 11:44:39] [Rank 0] step:3441/10000 train_time:153124ms step_avg:44.50ms +[2025-09-11 11:44:39] [Rank 0] step:3441/10000 train_time:153124ms step_avg:44.50ms +[2025-09-11 11:44:39] [Rank 0] step:3461/10000 train_time:153785ms step_avg:44.43ms +[2025-09-11 11:44:39] [Rank 0] step:3461/10000 train_time:153785ms step_avg:44.43ms +[2025-09-11 11:44:40] [Rank 0] step:3481/10000 train_time:154446ms step_avg:44.37ms +[2025-09-11 11:44:40] [Rank 0] step:3481/10000 train_time:154446ms step_avg:44.37ms +[2025-09-11 11:44:41] [Rank 0] step:3501/10000 train_time:155106ms step_avg:44.30ms +[2025-09-11 11:44:41] [Rank 0] step:3501/10000 train_time:155106ms step_avg:44.30ms +[2025-09-11 11:44:41] [Rank 0] step:3521/10000 train_time:155766ms step_avg:44.24ms +[2025-09-11 11:44:41] [Rank 0] step:3521/10000 train_time:155766ms step_avg:44.24ms +[2025-09-11 11:44:42] [Rank 0] step:3541/10000 train_time:156427ms step_avg:44.18ms +[2025-09-11 11:44:42] [Rank 0] step:3541/10000 train_time:156427ms step_avg:44.18ms +[2025-09-11 11:44:43] [Rank 0] step:3561/10000 train_time:157087ms step_avg:44.11ms +[2025-09-11 11:44:43] [Rank 0] step:3561/10000 train_time:157087ms step_avg:44.11ms +[2025-09-11 11:44:43] [Rank 0] step:3581/10000 train_time:157748ms step_avg:44.05ms +[2025-09-11 11:44:43] [Rank 0] step:3581/10000 train_time:157748ms step_avg:44.05ms +[2025-09-11 11:44:44] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:44:44] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:44:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:44:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:44:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:44:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:44:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:44:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:44:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:54] [Rank 0] PRINT: step:3600/10000 val_loss:5.2669 total_sharp:5.6834e-03 L1_sharp:3.0798e-03 L2_sharp:1.0392e-03 L3_sharp:3.3293e-04 L4_sharp:3.5455e-04 L5_sharp:1.1193e-03 L6_sharp:7.8043e-04 L7_sharp:7.9506e-04 L8_sharp:1.1812e-03 L9_sharp:9.1390e-04 L10_sharp:1.2420e-03 L11_sharp:1.2955e-03 L12_sharp:4.1825e-03 total_fnorm:9.6875e+00 total_l1_linf:2.6880e+04 total_spectral:4.9062e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0156e-01 L6_l1linf:5.9375e-01 L7_l1linf:5.9766e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.1328e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.2109e-01 L12_l1linf:5.2734e-01 L1_spectral:3.1386e-02 L2_spectral:3.0802e-02 L3_spectral:3.0622e-02 L4_spectral:3.0684e-02 L5_spectral:3.1272e-02 L6_spectral:3.0884e-02 L7_spectral:3.1142e-02 L8_spectral:3.0269e-02 L9_spectral:3.0430e-02 L10_spectral:3.0426e-02 L11_spectral:3.0447e-02 L12_spectral:3.0171e-02 train_time:158389ms step_avg:44.00ms +[2025-09-11 11:44:54] [Rank 0] PRINT: step:3600/10000 val_loss:5.2669 total_sharp:5.6834e-03 L1_sharp:3.0798e-03 L2_sharp:1.0392e-03 L3_sharp:3.3293e-04 L4_sharp:3.5455e-04 L5_sharp:1.1193e-03 L6_sharp:7.8043e-04 L7_sharp:7.9506e-04 L8_sharp:1.1812e-03 L9_sharp:9.1390e-04 L10_sharp:1.2420e-03 L11_sharp:1.2955e-03 L12_sharp:4.1825e-03 total_fnorm:9.6875e+00 total_l1_linf:2.6880e+04 total_spectral:4.9062e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0156e-01 L6_l1linf:5.9375e-01 L7_l1linf:5.9766e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.1328e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.2109e-01 L12_l1linf:5.2734e-01 L1_spectral:3.1386e-02 L2_spectral:3.0802e-02 L3_spectral:3.0622e-02 L4_spectral:3.0684e-02 L5_spectral:3.1272e-02 L6_spectral:3.0884e-02 L7_spectral:3.1142e-02 L8_spectral:3.0269e-02 L9_spectral:3.0430e-02 L10_spectral:3.0426e-02 L11_spectral:3.0447e-02 L12_spectral:3.0171e-02 train_time:158389ms step_avg:44.00ms +[2025-09-11 11:44:55] [Rank 0] step:3601/10000 train_time:159537ms step_avg:44.30ms +[2025-09-11 11:44:55] [Rank 0] step:3601/10000 train_time:159537ms step_avg:44.30ms +[2025-09-11 11:44:55] [Rank 0] step:3621/10000 train_time:160198ms step_avg:44.24ms +[2025-09-11 11:44:55] [Rank 0] step:3621/10000 train_time:160198ms step_avg:44.24ms +[2025-09-11 11:44:56] [Rank 0] step:3641/10000 train_time:160858ms step_avg:44.18ms +[2025-09-11 11:44:56] [Rank 0] step:3641/10000 train_time:160858ms step_avg:44.18ms +[2025-09-11 11:44:57] [Rank 0] step:3661/10000 train_time:161517ms step_avg:44.12ms +[2025-09-11 11:44:57] [Rank 0] step:3661/10000 train_time:161517ms step_avg:44.12ms +[2025-09-11 11:44:57] [Rank 0] step:3681/10000 train_time:162176ms step_avg:44.06ms +[2025-09-11 11:44:57] [Rank 0] step:3681/10000 train_time:162176ms step_avg:44.06ms +[2025-09-11 11:44:58] [Rank 0] step:3701/10000 train_time:162835ms step_avg:44.00ms +[2025-09-11 11:44:58] [Rank 0] step:3701/10000 train_time:162835ms step_avg:44.00ms +[2025-09-11 11:44:59] [Rank 0] step:3721/10000 train_time:163504ms step_avg:43.94ms +[2025-09-11 11:44:59] [Rank 0] step:3721/10000 train_time:163504ms step_avg:43.94ms +[2025-09-11 11:44:59] [Rank 0] step:3741/10000 train_time:164174ms step_avg:43.89ms +[2025-09-11 11:44:59] [Rank 0] step:3741/10000 train_time:164174ms step_avg:43.89ms +[2025-09-11 11:45:00] [Rank 0] step:3761/10000 train_time:164845ms step_avg:43.83ms +[2025-09-11 11:45:00] [Rank 0] step:3761/10000 train_time:164845ms step_avg:43.83ms +[2025-09-11 11:45:01] [Rank 0] step:3781/10000 train_time:165515ms step_avg:43.78ms +[2025-09-11 11:45:01] [Rank 0] step:3781/10000 train_time:165515ms step_avg:43.78ms +[2025-09-11 11:45:02] [Rank 0] step:3801/10000 train_time:166336ms step_avg:43.76ms +[2025-09-11 11:45:02] [Rank 0] step:3801/10000 train_time:166336ms step_avg:43.76ms +[2025-09-11 11:45:02] [Rank 0] step:3821/10000 train_time:167101ms step_avg:43.73ms +[2025-09-11 11:45:02] [Rank 0] step:3821/10000 train_time:167101ms step_avg:43.73ms +[2025-09-11 11:45:03] [Rank 0] step:3841/10000 train_time:167772ms step_avg:43.68ms +[2025-09-11 11:45:03] [Rank 0] step:3841/10000 train_time:167772ms step_avg:43.68ms +[2025-09-11 11:45:04] [Rank 0] step:3861/10000 train_time:168442ms step_avg:43.63ms +[2025-09-11 11:45:04] [Rank 0] step:3861/10000 train_time:168442ms step_avg:43.63ms +[2025-09-11 11:45:04] [Rank 0] step:3881/10000 train_time:169111ms step_avg:43.57ms +[2025-09-11 11:45:04] [Rank 0] step:3881/10000 train_time:169111ms step_avg:43.57ms +[2025-09-11 11:45:05] [Rank 0] step:3901/10000 train_time:169781ms step_avg:43.52ms +[2025-09-11 11:45:05] [Rank 0] step:3901/10000 train_time:169781ms step_avg:43.52ms +[2025-09-11 11:45:06] [Rank 0] step:3921/10000 train_time:170452ms step_avg:43.47ms +[2025-09-11 11:45:06] [Rank 0] step:3921/10000 train_time:170452ms step_avg:43.47ms +[2025-09-11 11:45:06] [Rank 0] step:3941/10000 train_time:171123ms step_avg:43.42ms +[2025-09-11 11:45:06] [Rank 0] step:3941/10000 train_time:171123ms step_avg:43.42ms +[2025-09-11 11:45:07] [Rank 0] step:3961/10000 train_time:171793ms step_avg:43.37ms +[2025-09-11 11:45:07] [Rank 0] step:3961/10000 train_time:171793ms step_avg:43.37ms +[2025-09-11 11:45:08] [Rank 0] step:3981/10000 train_time:172463ms step_avg:43.32ms +[2025-09-11 11:45:08] [Rank 0] step:3981/10000 train_time:172463ms step_avg:43.32ms +[2025-09-11 11:45:08] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:45:08] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:45:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:45:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:45:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:45:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:18] [Rank 0] PRINT: step:4000/10000 val_loss:5.2105 total_sharp:6.2194e-03 L1_sharp:3.3367e-03 L2_sharp:6.8522e-04 L3_sharp:6.0526e-04 L4_sharp:4.8172e-04 L5_sharp:1.0138e-03 L6_sharp:6.1631e-04 L7_sharp:8.1282e-04 L8_sharp:1.3861e-03 L9_sharp:1.0665e-03 L10_sharp:1.3316e-03 L11_sharp:1.5396e-03 L12_sharp:4.5926e-03 total_fnorm:1.0000e+01 total_l1_linf:2.7264e+04 total_spectral:5.0625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.2500e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.0547e-01 L6_l1linf:5.8984e-01 L7_l1linf:5.9375e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1719e-01 L12_l1linf:5.2734e-01 L1_spectral:3.1687e-02 L2_spectral:3.0793e-02 L3_spectral:3.0825e-02 L4_spectral:3.0871e-02 L5_spectral:3.1047e-02 L6_spectral:3.1171e-02 L7_spectral:3.1074e-02 L8_spectral:3.0523e-02 L9_spectral:3.0593e-02 L10_spectral:3.0626e-02 L11_spectral:3.0685e-02 L12_spectral:3.0409e-02 train_time:173114ms step_avg:43.28ms +[2025-09-11 11:45:18] [Rank 0] PRINT: step:4000/10000 val_loss:5.2105 total_sharp:6.2194e-03 L1_sharp:3.3367e-03 L2_sharp:6.8522e-04 L3_sharp:6.0526e-04 L4_sharp:4.8172e-04 L5_sharp:1.0138e-03 L6_sharp:6.1631e-04 L7_sharp:8.1282e-04 L8_sharp:1.3861e-03 L9_sharp:1.0665e-03 L10_sharp:1.3316e-03 L11_sharp:1.5396e-03 L12_sharp:4.5926e-03 total_fnorm:1.0000e+01 total_l1_linf:2.7264e+04 total_spectral:5.0625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.2500e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.0547e-01 L6_l1linf:5.8984e-01 L7_l1linf:5.9375e-01 L8_l1linf:6.0156e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1719e-01 L12_l1linf:5.2734e-01 L1_spectral:3.1687e-02 L2_spectral:3.0793e-02 L3_spectral:3.0825e-02 L4_spectral:3.0871e-02 L5_spectral:3.1047e-02 L6_spectral:3.1171e-02 L7_spectral:3.1074e-02 L8_spectral:3.0523e-02 L9_spectral:3.0593e-02 L10_spectral:3.0626e-02 L11_spectral:3.0685e-02 L12_spectral:3.0409e-02 train_time:173114ms step_avg:43.28ms +[2025-09-11 11:45:19] [Rank 0] step:4001/10000 train_time:174253ms step_avg:43.55ms +[2025-09-11 11:45:19] [Rank 0] step:4001/10000 train_time:174253ms step_avg:43.55ms +[2025-09-11 11:45:20] [Rank 0] step:4021/10000 train_time:174913ms step_avg:43.50ms +[2025-09-11 11:45:20] [Rank 0] step:4021/10000 train_time:174913ms step_avg:43.50ms +[2025-09-11 11:45:21] [Rank 0] step:4041/10000 train_time:175584ms step_avg:43.45ms +[2025-09-11 11:45:21] [Rank 0] step:4041/10000 train_time:175584ms step_avg:43.45ms +[2025-09-11 11:45:21] [Rank 0] step:4061/10000 train_time:176254ms step_avg:43.40ms +[2025-09-11 11:45:21] [Rank 0] step:4061/10000 train_time:176254ms step_avg:43.40ms +[2025-09-11 11:45:22] [Rank 0] step:4081/10000 train_time:176924ms step_avg:43.35ms +[2025-09-11 11:45:22] [Rank 0] step:4081/10000 train_time:176924ms step_avg:43.35ms +[2025-09-11 11:45:23] [Rank 0] step:4101/10000 train_time:177594ms step_avg:43.30ms +[2025-09-11 11:45:23] [Rank 0] step:4101/10000 train_time:177594ms step_avg:43.30ms +[2025-09-11 11:45:23] [Rank 0] step:4121/10000 train_time:178263ms step_avg:43.26ms +[2025-09-11 11:45:23] [Rank 0] step:4121/10000 train_time:178263ms step_avg:43.26ms +[2025-09-11 11:45:24] [Rank 0] step:4141/10000 train_time:178933ms step_avg:43.21ms +[2025-09-11 11:45:24] [Rank 0] step:4141/10000 train_time:178933ms step_avg:43.21ms +[2025-09-11 11:45:25] [Rank 0] step:4161/10000 train_time:179602ms step_avg:43.16ms +[2025-09-11 11:45:25] [Rank 0] step:4161/10000 train_time:179602ms step_avg:43.16ms +[2025-09-11 11:45:25] [Rank 0] step:4181/10000 train_time:180272ms step_avg:43.12ms +[2025-09-11 11:45:25] [Rank 0] step:4181/10000 train_time:180272ms step_avg:43.12ms +[2025-09-11 11:45:26] [Rank 0] step:4201/10000 train_time:180943ms step_avg:43.07ms +[2025-09-11 11:45:26] [Rank 0] step:4201/10000 train_time:180943ms step_avg:43.07ms +[2025-09-11 11:45:27] [Rank 0] step:4221/10000 train_time:181612ms step_avg:43.03ms +[2025-09-11 11:45:27] [Rank 0] step:4221/10000 train_time:181612ms step_avg:43.03ms +[2025-09-11 11:45:28] [Rank 0] step:4241/10000 train_time:182282ms step_avg:42.98ms +[2025-09-11 11:45:28] [Rank 0] step:4241/10000 train_time:182282ms step_avg:42.98ms +[2025-09-11 11:45:28] [Rank 0] step:4261/10000 train_time:182952ms step_avg:42.94ms +[2025-09-11 11:45:28] [Rank 0] step:4261/10000 train_time:182952ms step_avg:42.94ms +[2025-09-11 11:45:29] [Rank 0] step:4281/10000 train_time:183623ms step_avg:42.89ms +[2025-09-11 11:45:29] [Rank 0] step:4281/10000 train_time:183623ms step_avg:42.89ms +[2025-09-11 11:45:30] [Rank 0] step:4301/10000 train_time:184295ms step_avg:42.85ms +[2025-09-11 11:45:30] [Rank 0] step:4301/10000 train_time:184295ms step_avg:42.85ms +[2025-09-11 11:45:30] [Rank 0] step:4321/10000 train_time:184964ms step_avg:42.81ms +[2025-09-11 11:45:30] [Rank 0] step:4321/10000 train_time:184964ms step_avg:42.81ms +[2025-09-11 11:45:31] [Rank 0] step:4341/10000 train_time:185634ms step_avg:42.76ms +[2025-09-11 11:45:31] [Rank 0] step:4341/10000 train_time:185634ms step_avg:42.76ms +[2025-09-11 11:45:32] [Rank 0] step:4361/10000 train_time:186303ms step_avg:42.72ms +[2025-09-11 11:45:32] [Rank 0] step:4361/10000 train_time:186303ms step_avg:42.72ms +[2025-09-11 11:45:32] [Rank 0] step:4381/10000 train_time:186974ms step_avg:42.68ms +[2025-09-11 11:45:32] [Rank 0] step:4381/10000 train_time:186974ms step_avg:42.68ms +[2025-09-11 11:45:33] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:45:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:44] [Rank 0] PRINT: step:4400/10000 val_loss:5.1718 total_sharp:5.3642e-03 L1_sharp:2.9336e-03 L2_sharp:6.9765e-04 L3_sharp:3.0958e-04 L4_sharp:6.1240e-04 L5_sharp:1.1519e-03 L6_sharp:6.2343e-04 L7_sharp:5.4784e-04 L8_sharp:1.0223e-03 L9_sharp:8.2737e-04 L10_sharp:1.1701e-03 L11_sharp:1.3524e-03 L12_sharp:5.7690e-03 total_fnorm:9.8750e+00 total_l1_linf:2.6496e+04 total_spectral:4.9688e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.0938e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.8984e-01 L7_l1linf:5.8594e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.9766e-01 L11_l1linf:6.0156e-01 L12_l1linf:5.0391e-01 L1_spectral:3.1792e-02 L2_spectral:3.1043e-02 L3_spectral:3.1151e-02 L4_spectral:3.1076e-02 L5_spectral:3.1115e-02 L6_spectral:3.1327e-02 L7_spectral:3.1382e-02 L8_spectral:3.0897e-02 L9_spectral:3.0981e-02 L10_spectral:3.0921e-02 L11_spectral:3.1021e-02 L12_spectral:3.0789e-02 train_time:187625ms step_avg:42.64ms +[2025-09-11 11:45:44] [Rank 0] PRINT: step:4400/10000 val_loss:5.1718 total_sharp:5.3642e-03 L1_sharp:2.9336e-03 L2_sharp:6.9765e-04 L3_sharp:3.0958e-04 L4_sharp:6.1240e-04 L5_sharp:1.1519e-03 L6_sharp:6.2343e-04 L7_sharp:5.4784e-04 L8_sharp:1.0223e-03 L9_sharp:8.2737e-04 L10_sharp:1.1701e-03 L11_sharp:1.3524e-03 L12_sharp:5.7690e-03 total_fnorm:9.8750e+00 total_l1_linf:2.6496e+04 total_spectral:4.9688e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.0938e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.8984e-01 L7_l1linf:5.8594e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.9766e-01 L11_l1linf:6.0156e-01 L12_l1linf:5.0391e-01 L1_spectral:3.1792e-02 L2_spectral:3.1043e-02 L3_spectral:3.1151e-02 L4_spectral:3.1076e-02 L5_spectral:3.1115e-02 L6_spectral:3.1327e-02 L7_spectral:3.1382e-02 L8_spectral:3.0897e-02 L9_spectral:3.0981e-02 L10_spectral:3.0921e-02 L11_spectral:3.1021e-02 L12_spectral:3.0789e-02 train_time:187625ms step_avg:42.64ms +[2025-09-11 11:45:45] [Rank 0] step:4401/10000 train_time:188776ms step_avg:42.89ms +[2025-09-11 11:45:45] [Rank 0] step:4401/10000 train_time:188776ms step_avg:42.89ms +[2025-09-11 11:45:46] [Rank 0] step:4421/10000 train_time:189451ms step_avg:42.85ms +[2025-09-11 11:45:46] [Rank 0] step:4421/10000 train_time:189451ms step_avg:42.85ms +[2025-09-11 11:45:47] [Rank 0] step:4441/10000 train_time:190122ms step_avg:42.81ms +[2025-09-11 11:45:47] [Rank 0] step:4441/10000 train_time:190122ms step_avg:42.81ms +[2025-09-11 11:45:47] [Rank 0] step:4461/10000 train_time:190795ms step_avg:42.77ms +[2025-09-11 11:45:47] [Rank 0] step:4461/10000 train_time:190795ms step_avg:42.77ms +[2025-09-11 11:45:48] [Rank 0] step:4481/10000 train_time:191467ms step_avg:42.73ms +[2025-09-11 11:45:48] [Rank 0] step:4481/10000 train_time:191467ms step_avg:42.73ms +[2025-09-11 11:45:49] [Rank 0] step:4501/10000 train_time:192140ms step_avg:42.69ms +[2025-09-11 11:45:49] [Rank 0] step:4501/10000 train_time:192140ms step_avg:42.69ms +[2025-09-11 11:45:49] [Rank 0] step:4521/10000 train_time:192813ms step_avg:42.65ms +[2025-09-11 11:45:49] [Rank 0] step:4521/10000 train_time:192813ms step_avg:42.65ms +[2025-09-11 11:45:50] [Rank 0] step:4541/10000 train_time:193486ms step_avg:42.61ms +[2025-09-11 11:45:50] [Rank 0] step:4541/10000 train_time:193486ms step_avg:42.61ms +[2025-09-11 11:45:51] [Rank 0] step:4561/10000 train_time:194158ms step_avg:42.57ms +[2025-09-11 11:45:51] [Rank 0] step:4561/10000 train_time:194158ms step_avg:42.57ms +[2025-09-11 11:45:51] [Rank 0] step:4581/10000 train_time:194839ms step_avg:42.53ms +[2025-09-11 11:45:51] [Rank 0] step:4581/10000 train_time:194839ms step_avg:42.53ms +[2025-09-11 11:45:52] [Rank 0] step:4601/10000 train_time:195511ms step_avg:42.49ms +[2025-09-11 11:45:52] [Rank 0] step:4601/10000 train_time:195511ms step_avg:42.49ms +[2025-09-11 11:45:53] [Rank 0] step:4621/10000 train_time:196183ms step_avg:42.45ms +[2025-09-11 11:45:53] [Rank 0] step:4621/10000 train_time:196183ms step_avg:42.45ms +[2025-09-11 11:45:53] [Rank 0] step:4641/10000 train_time:196869ms step_avg:42.42ms +[2025-09-11 11:45:53] [Rank 0] step:4641/10000 train_time:196869ms step_avg:42.42ms +[2025-09-11 11:45:54] [Rank 0] step:4661/10000 train_time:197541ms step_avg:42.38ms +[2025-09-11 11:45:54] [Rank 0] step:4661/10000 train_time:197541ms step_avg:42.38ms +[2025-09-11 11:45:55] [Rank 0] step:4681/10000 train_time:198213ms step_avg:42.34ms +[2025-09-11 11:45:55] [Rank 0] step:4681/10000 train_time:198213ms step_avg:42.34ms +[2025-09-11 11:45:55] [Rank 0] step:4701/10000 train_time:198886ms step_avg:42.31ms +[2025-09-11 11:45:55] [Rank 0] step:4701/10000 train_time:198886ms step_avg:42.31ms +[2025-09-11 11:45:56] [Rank 0] step:4721/10000 train_time:199560ms step_avg:42.27ms +[2025-09-11 11:45:56] [Rank 0] step:4721/10000 train_time:199560ms step_avg:42.27ms +[2025-09-11 11:45:57] [Rank 0] step:4741/10000 train_time:200232ms step_avg:42.23ms +[2025-09-11 11:45:57] [Rank 0] step:4741/10000 train_time:200232ms step_avg:42.23ms +[2025-09-11 11:45:58] [Rank 0] step:4761/10000 train_time:200905ms step_avg:42.20ms +[2025-09-11 11:45:58] [Rank 0] step:4761/10000 train_time:200905ms step_avg:42.20ms +[2025-09-11 11:45:58] [Rank 0] step:4781/10000 train_time:201576ms step_avg:42.16ms +[2025-09-11 11:45:58] [Rank 0] step:4781/10000 train_time:201576ms step_avg:42.16ms +[2025-09-11 11:45:59] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:46:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:46:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:09] [Rank 0] PRINT: step:4800/10000 val_loss:5.1253 total_sharp:4.0354e-03 L1_sharp:2.4575e-03 L2_sharp:3.8043e-04 L3_sharp:4.5310e-04 L4_sharp:3.5316e-04 L5_sharp:8.1471e-04 L6_sharp:4.7090e-04 L7_sharp:4.8290e-04 L8_sharp:8.9356e-04 L9_sharp:8.0698e-04 L10_sharp:1.0302e-03 L11_sharp:1.2071e-03 L12_sharp:3.3861e-03 total_fnorm:9.8125e+00 total_l1_linf:2.6240e+04 total_spectral:4.9688e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.1328e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.8594e-01 L6_l1linf:5.7812e-01 L7_l1linf:5.7812e-01 L8_l1linf:5.7422e-01 L9_l1linf:5.8594e-01 L10_l1linf:5.8594e-01 L11_l1linf:5.8594e-01 L12_l1linf:5.3516e-01 L1_spectral:3.2107e-02 L2_spectral:3.1269e-02 L3_spectral:3.1228e-02 L4_spectral:3.1444e-02 L5_spectral:3.1464e-02 L6_spectral:3.1708e-02 L7_spectral:3.1815e-02 L8_spectral:3.0911e-02 L9_spectral:3.1413e-02 L10_spectral:3.1254e-02 L11_spectral:3.1158e-02 L12_spectral:3.1100e-02 train_time:202229ms step_avg:42.13ms +[2025-09-11 11:46:09] [Rank 0] PRINT: step:4800/10000 val_loss:5.1253 total_sharp:4.0354e-03 L1_sharp:2.4575e-03 L2_sharp:3.8043e-04 L3_sharp:4.5310e-04 L4_sharp:3.5316e-04 L5_sharp:8.1471e-04 L6_sharp:4.7090e-04 L7_sharp:4.8290e-04 L8_sharp:8.9356e-04 L9_sharp:8.0698e-04 L10_sharp:1.0302e-03 L11_sharp:1.2071e-03 L12_sharp:3.3861e-03 total_fnorm:9.8125e+00 total_l1_linf:2.6240e+04 total_spectral:4.9688e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.1328e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.8594e-01 L6_l1linf:5.7812e-01 L7_l1linf:5.7812e-01 L8_l1linf:5.7422e-01 L9_l1linf:5.8594e-01 L10_l1linf:5.8594e-01 L11_l1linf:5.8594e-01 L12_l1linf:5.3516e-01 L1_spectral:3.2107e-02 L2_spectral:3.1269e-02 L3_spectral:3.1228e-02 L4_spectral:3.1444e-02 L5_spectral:3.1464e-02 L6_spectral:3.1708e-02 L7_spectral:3.1815e-02 L8_spectral:3.0911e-02 L9_spectral:3.1413e-02 L10_spectral:3.1254e-02 L11_spectral:3.1158e-02 L12_spectral:3.1100e-02 train_time:202229ms step_avg:42.13ms +[2025-09-11 11:46:10] [Rank 0] step:4801/10000 train_time:203402ms step_avg:42.37ms +[2025-09-11 11:46:10] [Rank 0] step:4801/10000 train_time:203402ms step_avg:42.37ms +[2025-09-11 11:46:11] [Rank 0] step:4821/10000 train_time:204085ms step_avg:42.33ms +[2025-09-11 11:46:11] [Rank 0] step:4821/10000 train_time:204085ms step_avg:42.33ms +[2025-09-11 11:46:11] [Rank 0] step:4841/10000 train_time:204760ms step_avg:42.30ms +[2025-09-11 11:46:11] [Rank 0] step:4841/10000 train_time:204760ms step_avg:42.30ms +[2025-09-11 11:46:12] [Rank 0] step:4861/10000 train_time:205433ms step_avg:42.26ms +[2025-09-11 11:46:12] [Rank 0] step:4861/10000 train_time:205433ms step_avg:42.26ms +[2025-09-11 11:46:13] [Rank 0] step:4881/10000 train_time:206106ms step_avg:42.23ms +[2025-09-11 11:46:13] [Rank 0] step:4881/10000 train_time:206106ms step_avg:42.23ms +[2025-09-11 11:46:13] [Rank 0] step:4901/10000 train_time:206780ms step_avg:42.19ms +[2025-09-11 11:46:13] [Rank 0] step:4901/10000 train_time:206780ms step_avg:42.19ms +[2025-09-11 11:46:14] [Rank 0] step:4921/10000 train_time:207454ms step_avg:42.16ms +[2025-09-11 11:46:14] [Rank 0] step:4921/10000 train_time:207454ms step_avg:42.16ms +[2025-09-11 11:46:15] [Rank 0] step:4941/10000 train_time:208127ms step_avg:42.12ms +[2025-09-11 11:46:15] [Rank 0] step:4941/10000 train_time:208127ms step_avg:42.12ms +[2025-09-11 11:46:15] [Rank 0] step:4961/10000 train_time:208799ms step_avg:42.09ms +[2025-09-11 11:46:15] [Rank 0] step:4961/10000 train_time:208799ms step_avg:42.09ms +[2025-09-11 11:46:16] [Rank 0] step:4981/10000 train_time:209472ms step_avg:42.05ms +[2025-09-11 11:46:16] [Rank 0] step:4981/10000 train_time:209472ms step_avg:42.05ms +[2025-09-11 11:46:17] [Rank 0] step:5001/10000 train_time:210146ms step_avg:42.02ms +[2025-09-11 11:46:17] [Rank 0] step:5001/10000 train_time:210146ms step_avg:42.02ms +[2025-09-11 11:46:17] [Rank 0] step:5021/10000 train_time:210818ms step_avg:41.99ms +[2025-09-11 11:46:17] [Rank 0] step:5021/10000 train_time:210818ms step_avg:41.99ms +[2025-09-11 11:46:18] [Rank 0] step:5041/10000 train_time:211490ms step_avg:41.95ms +[2025-09-11 11:46:18] [Rank 0] step:5041/10000 train_time:211490ms step_avg:41.95ms +[2025-09-11 11:46:19] [Rank 0] step:5061/10000 train_time:212337ms step_avg:41.96ms +[2025-09-11 11:46:19] [Rank 0] step:5061/10000 train_time:212337ms step_avg:41.96ms +[2025-09-11 11:46:20] [Rank 0] step:5081/10000 train_time:213447ms step_avg:42.01ms +[2025-09-11 11:46:20] [Rank 0] step:5081/10000 train_time:213447ms step_avg:42.01ms +[2025-09-11 11:46:21] [Rank 0] step:5101/10000 train_time:214120ms step_avg:41.98ms +[2025-09-11 11:46:21] [Rank 0] step:5101/10000 train_time:214120ms step_avg:41.98ms +[2025-09-11 11:46:21] [Rank 0] step:5121/10000 train_time:214932ms step_avg:41.97ms +[2025-09-11 11:46:21] [Rank 0] step:5121/10000 train_time:214932ms step_avg:41.97ms +[2025-09-11 11:46:22] [Rank 0] step:5141/10000 train_time:215739ms step_avg:41.96ms +[2025-09-11 11:46:22] [Rank 0] step:5141/10000 train_time:215739ms step_avg:41.96ms +[2025-09-11 11:46:23] [Rank 0] step:5161/10000 train_time:216412ms step_avg:41.93ms +[2025-09-11 11:46:23] [Rank 0] step:5161/10000 train_time:216412ms step_avg:41.93ms +[2025-09-11 11:46:24] [Rank 0] step:5181/10000 train_time:217084ms step_avg:41.90ms +[2025-09-11 11:46:24] [Rank 0] step:5181/10000 train_time:217084ms step_avg:41.90ms +[2025-09-11 11:46:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:46:24] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:34] [Rank 0] PRINT: step:5200/10000 val_loss:5.0934 total_sharp:4.4940e-03 L1_sharp:3.1515e-03 L2_sharp:5.9225e-04 L3_sharp:5.4233e-04 L4_sharp:3.3153e-04 L5_sharp:6.6550e-04 L6_sharp:4.9762e-04 L7_sharp:5.2034e-04 L8_sharp:8.7781e-04 L9_sharp:8.1543e-04 L10_sharp:1.1282e-03 L11_sharp:1.2735e-03 L12_sharp:6.6750e-03 total_fnorm:9.6250e+00 total_l1_linf:2.5344e+04 total_spectral:4.8750e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.0938e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.0547e-01 L5_l1linf:5.8594e-01 L6_l1linf:5.7031e-01 L7_l1linf:5.6641e-01 L8_l1linf:5.6250e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.1562e-01 L1_spectral:3.2106e-02 L2_spectral:3.1684e-02 L3_spectral:3.1441e-02 L4_spectral:3.1632e-02 L5_spectral:3.1926e-02 L6_spectral:3.1827e-02 L7_spectral:3.1884e-02 L8_spectral:3.1259e-02 L9_spectral:3.1425e-02 L10_spectral:3.1493e-02 L11_spectral:3.1353e-02 L12_spectral:3.1286e-02 train_time:217744ms step_avg:41.87ms +[2025-09-11 11:46:34] [Rank 0] PRINT: step:5200/10000 val_loss:5.0934 total_sharp:4.4940e-03 L1_sharp:3.1515e-03 L2_sharp:5.9225e-04 L3_sharp:5.4233e-04 L4_sharp:3.3153e-04 L5_sharp:6.6550e-04 L6_sharp:4.9762e-04 L7_sharp:5.2034e-04 L8_sharp:8.7781e-04 L9_sharp:8.1543e-04 L10_sharp:1.1282e-03 L11_sharp:1.2735e-03 L12_sharp:6.6750e-03 total_fnorm:9.6250e+00 total_l1_linf:2.5344e+04 total_spectral:4.8750e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.0938e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.0547e-01 L5_l1linf:5.8594e-01 L6_l1linf:5.7031e-01 L7_l1linf:5.6641e-01 L8_l1linf:5.6250e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.1562e-01 L1_spectral:3.2106e-02 L2_spectral:3.1684e-02 L3_spectral:3.1441e-02 L4_spectral:3.1632e-02 L5_spectral:3.1926e-02 L6_spectral:3.1827e-02 L7_spectral:3.1884e-02 L8_spectral:3.1259e-02 L9_spectral:3.1425e-02 L10_spectral:3.1493e-02 L11_spectral:3.1353e-02 L12_spectral:3.1286e-02 train_time:217744ms step_avg:41.87ms +[2025-09-11 11:46:35] [Rank 0] step:5201/10000 train_time:218891ms step_avg:42.09ms +[2025-09-11 11:46:35] [Rank 0] step:5201/10000 train_time:218891ms step_avg:42.09ms +[2025-09-11 11:46:36] [Rank 0] step:5221/10000 train_time:219574ms step_avg:42.06ms +[2025-09-11 11:46:36] [Rank 0] step:5221/10000 train_time:219574ms step_avg:42.06ms +[2025-09-11 11:46:37] [Rank 0] step:5241/10000 train_time:220257ms step_avg:42.03ms +[2025-09-11 11:46:37] [Rank 0] step:5241/10000 train_time:220257ms step_avg:42.03ms +[2025-09-11 11:46:37] [Rank 0] step:5261/10000 train_time:220940ms step_avg:42.00ms +[2025-09-11 11:46:37] [Rank 0] step:5261/10000 train_time:220940ms step_avg:42.00ms +[2025-09-11 11:46:38] [Rank 0] step:5281/10000 train_time:221622ms step_avg:41.97ms +[2025-09-11 11:46:38] [Rank 0] step:5281/10000 train_time:221622ms step_avg:41.97ms +[2025-09-11 11:46:39] [Rank 0] step:5301/10000 train_time:222304ms step_avg:41.94ms +[2025-09-11 11:46:39] [Rank 0] step:5301/10000 train_time:222304ms step_avg:41.94ms +[2025-09-11 11:46:39] [Rank 0] step:5321/10000 train_time:222986ms step_avg:41.91ms +[2025-09-11 11:46:39] [Rank 0] step:5321/10000 train_time:222986ms step_avg:41.91ms +[2025-09-11 11:46:40] [Rank 0] step:5341/10000 train_time:223667ms step_avg:41.88ms +[2025-09-11 11:46:40] [Rank 0] step:5341/10000 train_time:223667ms step_avg:41.88ms +[2025-09-11 11:46:41] [Rank 0] step:5361/10000 train_time:224350ms step_avg:41.85ms +[2025-09-11 11:46:41] [Rank 0] step:5361/10000 train_time:224350ms step_avg:41.85ms +[2025-09-11 11:46:41] [Rank 0] step:5381/10000 train_time:225034ms step_avg:41.82ms +[2025-09-11 11:46:41] [Rank 0] step:5381/10000 train_time:225034ms step_avg:41.82ms +[2025-09-11 11:46:42] [Rank 0] step:5401/10000 train_time:225715ms step_avg:41.79ms +[2025-09-11 11:46:42] [Rank 0] step:5401/10000 train_time:225715ms step_avg:41.79ms +[2025-09-11 11:46:43] [Rank 0] step:5421/10000 train_time:226398ms step_avg:41.76ms +[2025-09-11 11:46:43] [Rank 0] step:5421/10000 train_time:226398ms step_avg:41.76ms +[2025-09-11 11:46:44] [Rank 0] step:5441/10000 train_time:227080ms step_avg:41.73ms +[2025-09-11 11:46:44] [Rank 0] step:5441/10000 train_time:227080ms step_avg:41.73ms +[2025-09-11 11:46:44] [Rank 0] step:5461/10000 train_time:227763ms step_avg:41.71ms +[2025-09-11 11:46:44] [Rank 0] step:5461/10000 train_time:227763ms step_avg:41.71ms +[2025-09-11 11:46:45] [Rank 0] step:5481/10000 train_time:228445ms step_avg:41.68ms +[2025-09-11 11:46:45] [Rank 0] step:5481/10000 train_time:228445ms step_avg:41.68ms +[2025-09-11 11:46:46] [Rank 0] step:5501/10000 train_time:229127ms step_avg:41.65ms +[2025-09-11 11:46:46] [Rank 0] step:5501/10000 train_time:229127ms step_avg:41.65ms +[2025-09-11 11:46:46] [Rank 0] step:5521/10000 train_time:229808ms step_avg:41.62ms +[2025-09-11 11:46:46] [Rank 0] step:5521/10000 train_time:229808ms step_avg:41.62ms +[2025-09-11 11:46:47] [Rank 0] step:5541/10000 train_time:230493ms step_avg:41.60ms +[2025-09-11 11:46:47] [Rank 0] step:5541/10000 train_time:230493ms step_avg:41.60ms +[2025-09-11 11:46:48] [Rank 0] step:5561/10000 train_time:231177ms step_avg:41.57ms +[2025-09-11 11:46:48] [Rank 0] step:5561/10000 train_time:231177ms step_avg:41.57ms +[2025-09-11 11:46:48] [Rank 0] step:5581/10000 train_time:231860ms step_avg:41.54ms +[2025-09-11 11:46:48] [Rank 0] step:5581/10000 train_time:231860ms step_avg:41.54ms +[2025-09-11 11:46:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:46:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:46:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:46:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:59] [Rank 0] PRINT: step:5600/10000 val_loss:5.0648 total_sharp:4.7690e-03 L1_sharp:2.7343e-03 L2_sharp:8.3152e-04 L3_sharp:4.4653e-04 L4_sharp:4.4667e-04 L5_sharp:7.0411e-04 L6_sharp:5.0286e-04 L7_sharp:5.3355e-04 L8_sharp:1.0144e-03 L9_sharp:8.5079e-04 L10_sharp:1.1406e-03 L11_sharp:1.2779e-03 L12_sharp:4.3334e-03 total_fnorm:9.5625e+00 total_l1_linf:2.5088e+04 total_spectral:4.8438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.6406e-01 L2_l1linf:5.9766e-01 L3_l1linf:6.0547e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.7422e-01 L6_l1linf:5.6641e-01 L7_l1linf:5.6250e-01 L8_l1linf:5.5859e-01 L9_l1linf:5.5469e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.8203e-01 L12_l1linf:5.1562e-01 L1_spectral:3.2235e-02 L2_spectral:3.1525e-02 L3_spectral:3.1635e-02 L4_spectral:3.1561e-02 L5_spectral:3.1689e-02 L6_spectral:3.1952e-02 L7_spectral:3.1969e-02 L8_spectral:3.1201e-02 L9_spectral:3.1718e-02 L10_spectral:3.1639e-02 L11_spectral:3.1516e-02 L12_spectral:3.1472e-02 train_time:232522ms step_avg:41.52ms +[2025-09-11 11:46:59] [Rank 0] PRINT: step:5600/10000 val_loss:5.0648 total_sharp:4.7690e-03 L1_sharp:2.7343e-03 L2_sharp:8.3152e-04 L3_sharp:4.4653e-04 L4_sharp:4.4667e-04 L5_sharp:7.0411e-04 L6_sharp:5.0286e-04 L7_sharp:5.3355e-04 L8_sharp:1.0144e-03 L9_sharp:8.5079e-04 L10_sharp:1.1406e-03 L11_sharp:1.2779e-03 L12_sharp:4.3334e-03 total_fnorm:9.5625e+00 total_l1_linf:2.5088e+04 total_spectral:4.8438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5156e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:6.6406e-01 L2_l1linf:5.9766e-01 L3_l1linf:6.0547e-01 L4_l1linf:5.9766e-01 L5_l1linf:5.7422e-01 L6_l1linf:5.6641e-01 L7_l1linf:5.6250e-01 L8_l1linf:5.5859e-01 L9_l1linf:5.5469e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.8203e-01 L12_l1linf:5.1562e-01 L1_spectral:3.2235e-02 L2_spectral:3.1525e-02 L3_spectral:3.1635e-02 L4_spectral:3.1561e-02 L5_spectral:3.1689e-02 L6_spectral:3.1952e-02 L7_spectral:3.1969e-02 L8_spectral:3.1201e-02 L9_spectral:3.1718e-02 L10_spectral:3.1639e-02 L11_spectral:3.1516e-02 L12_spectral:3.1472e-02 train_time:232522ms step_avg:41.52ms +[2025-09-11 11:47:00] [Rank 0] step:5601/10000 train_time:233679ms step_avg:41.72ms +[2025-09-11 11:47:00] [Rank 0] step:5601/10000 train_time:233679ms step_avg:41.72ms +[2025-09-11 11:47:01] [Rank 0] step:5621/10000 train_time:234395ms step_avg:41.70ms +[2025-09-11 11:47:01] [Rank 0] step:5621/10000 train_time:234395ms step_avg:41.70ms +[2025-09-11 11:47:01] [Rank 0] step:5641/10000 train_time:235078ms step_avg:41.67ms +[2025-09-11 11:47:01] [Rank 0] step:5641/10000 train_time:235078ms step_avg:41.67ms +[2025-09-11 11:47:02] [Rank 0] step:5661/10000 train_time:235760ms step_avg:41.65ms +[2025-09-11 11:47:02] [Rank 0] step:5661/10000 train_time:235760ms step_avg:41.65ms +[2025-09-11 11:47:03] [Rank 0] step:5681/10000 train_time:236443ms step_avg:41.62ms +[2025-09-11 11:47:03] [Rank 0] step:5681/10000 train_time:236443ms step_avg:41.62ms +[2025-09-11 11:47:03] [Rank 0] step:5701/10000 train_time:237127ms step_avg:41.59ms +[2025-09-11 11:47:03] [Rank 0] step:5701/10000 train_time:237127ms step_avg:41.59ms +[2025-09-11 11:47:04] [Rank 0] step:5721/10000 train_time:237808ms step_avg:41.57ms +[2025-09-11 11:47:04] [Rank 0] step:5721/10000 train_time:237808ms step_avg:41.57ms +[2025-09-11 11:47:05] [Rank 0] step:5741/10000 train_time:238492ms step_avg:41.54ms +[2025-09-11 11:47:05] [Rank 0] step:5741/10000 train_time:238492ms step_avg:41.54ms +[2025-09-11 11:47:06] [Rank 0] step:5761/10000 train_time:239176ms step_avg:41.52ms +[2025-09-11 11:47:06] [Rank 0] step:5761/10000 train_time:239176ms step_avg:41.52ms +[2025-09-11 11:47:06] [Rank 0] step:5781/10000 train_time:239859ms step_avg:41.49ms +[2025-09-11 11:47:06] [Rank 0] step:5781/10000 train_time:239859ms step_avg:41.49ms +[2025-09-11 11:47:07] [Rank 0] step:5801/10000 train_time:240543ms step_avg:41.47ms +[2025-09-11 11:47:07] [Rank 0] step:5801/10000 train_time:240543ms step_avg:41.47ms +[2025-09-11 11:47:08] [Rank 0] step:5821/10000 train_time:241224ms step_avg:41.44ms +[2025-09-11 11:47:08] [Rank 0] step:5821/10000 train_time:241224ms step_avg:41.44ms +[2025-09-11 11:47:08] [Rank 0] step:5841/10000 train_time:241907ms step_avg:41.42ms +[2025-09-11 11:47:08] [Rank 0] step:5841/10000 train_time:241907ms step_avg:41.42ms +[2025-09-11 11:47:09] [Rank 0] step:5861/10000 train_time:242588ms step_avg:41.39ms +[2025-09-11 11:47:09] [Rank 0] step:5861/10000 train_time:242588ms step_avg:41.39ms +[2025-09-11 11:47:10] [Rank 0] step:5881/10000 train_time:243271ms step_avg:41.37ms +[2025-09-11 11:47:10] [Rank 0] step:5881/10000 train_time:243271ms step_avg:41.37ms +[2025-09-11 11:47:10] [Rank 0] step:5901/10000 train_time:243953ms step_avg:41.34ms +[2025-09-11 11:47:10] [Rank 0] step:5901/10000 train_time:243953ms step_avg:41.34ms +[2025-09-11 11:47:11] [Rank 0] step:5921/10000 train_time:244638ms step_avg:41.32ms +[2025-09-11 11:47:11] [Rank 0] step:5921/10000 train_time:244638ms step_avg:41.32ms +[2025-09-11 11:47:12] [Rank 0] step:5941/10000 train_time:245322ms step_avg:41.29ms +[2025-09-11 11:47:12] [Rank 0] step:5941/10000 train_time:245322ms step_avg:41.29ms +[2025-09-11 11:47:12] [Rank 0] step:5961/10000 train_time:246005ms step_avg:41.27ms +[2025-09-11 11:47:12] [Rank 0] step:5961/10000 train_time:246005ms step_avg:41.27ms +[2025-09-11 11:47:13] [Rank 0] step:5981/10000 train_time:246689ms step_avg:41.25ms +[2025-09-11 11:47:13] [Rank 0] step:5981/10000 train_time:246689ms step_avg:41.25ms +[2025-09-11 11:47:14] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:47:14] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:47:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:47:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:24] [Rank 0] PRINT: step:6000/10000 val_loss:5.0269 total_sharp:5.6582e-03 L1_sharp:2.9308e-03 L2_sharp:7.5705e-04 L3_sharp:5.7626e-04 L4_sharp:4.0856e-04 L5_sharp:8.1159e-04 L6_sharp:5.4678e-04 L7_sharp:5.9585e-04 L8_sharp:9.6449e-04 L9_sharp:7.8909e-04 L10_sharp:1.1752e-03 L11_sharp:1.3416e-03 L12_sharp:6.7128e-03 total_fnorm:9.5625e+00 total_l1_linf:2.4576e+04 total_spectral:4.8438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4531e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.9375e-01 L4_l1linf:5.8984e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.6250e-01 L7_l1linf:5.5859e-01 L8_l1linf:5.4688e-01 L9_l1linf:5.4297e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.1172e-01 L1_spectral:3.2433e-02 L2_spectral:3.1823e-02 L3_spectral:3.1697e-02 L4_spectral:3.1865e-02 L5_spectral:3.1749e-02 L6_spectral:3.2127e-02 L7_spectral:3.2139e-02 L8_spectral:3.1610e-02 L9_spectral:3.1760e-02 L10_spectral:3.1593e-02 L11_spectral:3.1586e-02 L12_spectral:3.1666e-02 train_time:247354ms step_avg:41.23ms +[2025-09-11 11:47:24] [Rank 0] PRINT: step:6000/10000 val_loss:5.0269 total_sharp:5.6582e-03 L1_sharp:2.9308e-03 L2_sharp:7.5705e-04 L3_sharp:5.7626e-04 L4_sharp:4.0856e-04 L5_sharp:8.1159e-04 L6_sharp:5.4678e-04 L7_sharp:5.9585e-04 L8_sharp:9.6449e-04 L9_sharp:7.8909e-04 L10_sharp:1.1752e-03 L11_sharp:1.3416e-03 L12_sharp:6.7128e-03 total_fnorm:9.5625e+00 total_l1_linf:2.4576e+04 total_spectral:4.8438e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4531e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.9375e-01 L4_l1linf:5.8984e-01 L5_l1linf:5.8203e-01 L6_l1linf:5.6250e-01 L7_l1linf:5.5859e-01 L8_l1linf:5.4688e-01 L9_l1linf:5.4297e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.1172e-01 L1_spectral:3.2433e-02 L2_spectral:3.1823e-02 L3_spectral:3.1697e-02 L4_spectral:3.1865e-02 L5_spectral:3.1749e-02 L6_spectral:3.2127e-02 L7_spectral:3.2139e-02 L8_spectral:3.1610e-02 L9_spectral:3.1760e-02 L10_spectral:3.1593e-02 L11_spectral:3.1586e-02 L12_spectral:3.1666e-02 train_time:247354ms step_avg:41.23ms +[2025-09-11 11:47:25] [Rank 0] step:6001/10000 train_time:248515ms step_avg:41.41ms +[2025-09-11 11:47:25] [Rank 0] step:6001/10000 train_time:248515ms step_avg:41.41ms +[2025-09-11 11:47:26] [Rank 0] step:6021/10000 train_time:249228ms step_avg:41.39ms +[2025-09-11 11:47:26] [Rank 0] step:6021/10000 train_time:249228ms step_avg:41.39ms +[2025-09-11 11:47:26] [Rank 0] step:6041/10000 train_time:249916ms step_avg:41.37ms +[2025-09-11 11:47:26] [Rank 0] step:6041/10000 train_time:249916ms step_avg:41.37ms +[2025-09-11 11:47:27] [Rank 0] step:6061/10000 train_time:250601ms step_avg:41.35ms +[2025-09-11 11:47:27] [Rank 0] step:6061/10000 train_time:250601ms step_avg:41.35ms +[2025-09-11 11:47:28] [Rank 0] step:6081/10000 train_time:251288ms step_avg:41.32ms +[2025-09-11 11:47:28] [Rank 0] step:6081/10000 train_time:251288ms step_avg:41.32ms +[2025-09-11 11:47:28] [Rank 0] step:6101/10000 train_time:251972ms step_avg:41.30ms +[2025-09-11 11:47:28] [Rank 0] step:6101/10000 train_time:251972ms step_avg:41.30ms +[2025-09-11 11:47:29] [Rank 0] step:6121/10000 train_time:252658ms step_avg:41.28ms +[2025-09-11 11:47:29] [Rank 0] step:6121/10000 train_time:252658ms step_avg:41.28ms +[2025-09-11 11:47:30] [Rank 0] step:6141/10000 train_time:253343ms step_avg:41.25ms +[2025-09-11 11:47:30] [Rank 0] step:6141/10000 train_time:253343ms step_avg:41.25ms +[2025-09-11 11:47:30] [Rank 0] step:6161/10000 train_time:254028ms step_avg:41.23ms +[2025-09-11 11:47:30] [Rank 0] step:6161/10000 train_time:254028ms step_avg:41.23ms +[2025-09-11 11:47:31] [Rank 0] step:6181/10000 train_time:254710ms step_avg:41.21ms +[2025-09-11 11:47:31] [Rank 0] step:6181/10000 train_time:254710ms step_avg:41.21ms +[2025-09-11 11:47:32] [Rank 0] step:6201/10000 train_time:255396ms step_avg:41.19ms +[2025-09-11 11:47:32] [Rank 0] step:6201/10000 train_time:255396ms step_avg:41.19ms +[2025-09-11 11:47:32] [Rank 0] step:6221/10000 train_time:256082ms step_avg:41.16ms +[2025-09-11 11:47:32] [Rank 0] step:6221/10000 train_time:256082ms step_avg:41.16ms +[2025-09-11 11:47:33] [Rank 0] step:6241/10000 train_time:256767ms step_avg:41.14ms +[2025-09-11 11:47:33] [Rank 0] step:6241/10000 train_time:256767ms step_avg:41.14ms +[2025-09-11 11:47:34] [Rank 0] step:6261/10000 train_time:257450ms step_avg:41.12ms +[2025-09-11 11:47:34] [Rank 0] step:6261/10000 train_time:257450ms step_avg:41.12ms +[2025-09-11 11:47:35] [Rank 0] step:6281/10000 train_time:258135ms step_avg:41.10ms +[2025-09-11 11:47:35] [Rank 0] step:6281/10000 train_time:258135ms step_avg:41.10ms +[2025-09-11 11:47:35] [Rank 0] step:6301/10000 train_time:258819ms step_avg:41.08ms +[2025-09-11 11:47:35] [Rank 0] step:6301/10000 train_time:258819ms step_avg:41.08ms +[2025-09-11 11:47:36] [Rank 0] step:6321/10000 train_time:259506ms step_avg:41.05ms +[2025-09-11 11:47:36] [Rank 0] step:6321/10000 train_time:259506ms step_avg:41.05ms +[2025-09-11 11:47:37] [Rank 0] step:6341/10000 train_time:260191ms step_avg:41.03ms +[2025-09-11 11:47:37] [Rank 0] step:6341/10000 train_time:260191ms step_avg:41.03ms +[2025-09-11 11:47:37] [Rank 0] step:6361/10000 train_time:260876ms step_avg:41.01ms +[2025-09-11 11:47:37] [Rank 0] step:6361/10000 train_time:260876ms step_avg:41.01ms +[2025-09-11 11:47:38] [Rank 0] step:6381/10000 train_time:261561ms step_avg:40.99ms +[2025-09-11 11:47:38] [Rank 0] step:6381/10000 train_time:261561ms step_avg:40.99ms +[2025-09-11 11:47:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:47:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:47:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:47:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:47:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:47:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:47:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:49] [Rank 0] PRINT: step:6400/10000 val_loss:4.9934 total_sharp:4.7717e-03 L1_sharp:2.2113e-03 L2_sharp:5.5238e-04 L3_sharp:4.3859e-04 L4_sharp:4.1033e-04 L5_sharp:5.4797e-04 L6_sharp:4.4093e-04 L7_sharp:5.5491e-04 L8_sharp:9.6378e-04 L9_sharp:7.8425e-04 L10_sharp:1.1338e-03 L11_sharp:1.2500e-03 L12_sharp:3.7595e-03 total_fnorm:8.6250e+00 total_l1_linf:2.0992e+04 total_spectral:4.3125e+00 L1_fnorm:2.2812e+00 L2_fnorm:2.2500e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2344e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2031e+00 L1_l1linf:5.7422e-01 L2_l1linf:5.2734e-01 L3_l1linf:5.2344e-01 L4_l1linf:5.1953e-01 L5_l1linf:4.9414e-01 L6_l1linf:4.8438e-01 L7_l1linf:4.8828e-01 L8_l1linf:4.7656e-01 L9_l1linf:4.6680e-01 L10_l1linf:4.7656e-01 L11_l1linf:4.8633e-01 L12_l1linf:4.5312e-01 L1_spectral:2.9560e-02 L2_spectral:2.9178e-02 L3_spectral:2.9046e-02 L4_spectral:2.9016e-02 L5_spectral:2.9249e-02 L6_spectral:2.9505e-02 L7_spectral:2.9323e-02 L8_spectral:2.8627e-02 L9_spectral:2.9140e-02 L10_spectral:2.9381e-02 L11_spectral:2.9147e-02 L12_spectral:2.9180e-02 train_time:262225ms step_avg:40.97ms +[2025-09-11 11:47:49] [Rank 0] PRINT: step:6400/10000 val_loss:4.9934 total_sharp:4.7717e-03 L1_sharp:2.2113e-03 L2_sharp:5.5238e-04 L3_sharp:4.3859e-04 L4_sharp:4.1033e-04 L5_sharp:5.4797e-04 L6_sharp:4.4093e-04 L7_sharp:5.5491e-04 L8_sharp:9.6378e-04 L9_sharp:7.8425e-04 L10_sharp:1.1338e-03 L11_sharp:1.2500e-03 L12_sharp:3.7595e-03 total_fnorm:8.6250e+00 total_l1_linf:2.0992e+04 total_spectral:4.3125e+00 L1_fnorm:2.2812e+00 L2_fnorm:2.2500e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2344e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2031e+00 L1_l1linf:5.7422e-01 L2_l1linf:5.2734e-01 L3_l1linf:5.2344e-01 L4_l1linf:5.1953e-01 L5_l1linf:4.9414e-01 L6_l1linf:4.8438e-01 L7_l1linf:4.8828e-01 L8_l1linf:4.7656e-01 L9_l1linf:4.6680e-01 L10_l1linf:4.7656e-01 L11_l1linf:4.8633e-01 L12_l1linf:4.5312e-01 L1_spectral:2.9560e-02 L2_spectral:2.9178e-02 L3_spectral:2.9046e-02 L4_spectral:2.9016e-02 L5_spectral:2.9249e-02 L6_spectral:2.9505e-02 L7_spectral:2.9323e-02 L8_spectral:2.8627e-02 L9_spectral:2.9140e-02 L10_spectral:2.9381e-02 L11_spectral:2.9147e-02 L12_spectral:2.9180e-02 train_time:262225ms step_avg:40.97ms +[2025-09-11 11:47:50] [Rank 0] step:6401/10000 train_time:263398ms step_avg:41.15ms +[2025-09-11 11:47:50] [Rank 0] step:6401/10000 train_time:263398ms step_avg:41.15ms +[2025-09-11 11:47:50] [Rank 0] step:6421/10000 train_time:264107ms step_avg:41.13ms +[2025-09-11 11:47:50] [Rank 0] step:6421/10000 train_time:264107ms step_avg:41.13ms +[2025-09-11 11:47:51] [Rank 0] step:6441/10000 train_time:264793ms step_avg:41.11ms +[2025-09-11 11:47:51] [Rank 0] step:6441/10000 train_time:264793ms step_avg:41.11ms +[2025-09-11 11:47:52] [Rank 0] step:6461/10000 train_time:265479ms step_avg:41.09ms +[2025-09-11 11:47:52] [Rank 0] step:6461/10000 train_time:265479ms step_avg:41.09ms +[2025-09-11 11:47:52] [Rank 0] step:6481/10000 train_time:266166ms step_avg:41.07ms +[2025-09-11 11:47:52] [Rank 0] step:6481/10000 train_time:266166ms step_avg:41.07ms +[2025-09-11 11:47:53] [Rank 0] step:6501/10000 train_time:266853ms step_avg:41.05ms +[2025-09-11 11:47:53] [Rank 0] step:6501/10000 train_time:266853ms step_avg:41.05ms +[2025-09-11 11:47:54] [Rank 0] step:6521/10000 train_time:267539ms step_avg:41.03ms +[2025-09-11 11:47:54] [Rank 0] step:6521/10000 train_time:267539ms step_avg:41.03ms +[2025-09-11 11:47:55] [Rank 0] step:6541/10000 train_time:268223ms step_avg:41.01ms +[2025-09-11 11:47:55] [Rank 0] step:6541/10000 train_time:268223ms step_avg:41.01ms +[2025-09-11 11:47:55] [Rank 0] step:6561/10000 train_time:268908ms step_avg:40.99ms +[2025-09-11 11:47:55] [Rank 0] step:6561/10000 train_time:268908ms step_avg:40.99ms +[2025-09-11 11:47:56] [Rank 0] step:6581/10000 train_time:269594ms step_avg:40.97ms +[2025-09-11 11:47:56] [Rank 0] step:6581/10000 train_time:269594ms step_avg:40.97ms +[2025-09-11 11:47:57] [Rank 0] step:6601/10000 train_time:270279ms step_avg:40.95ms +[2025-09-11 11:47:57] [Rank 0] step:6601/10000 train_time:270279ms step_avg:40.95ms +[2025-09-11 11:47:57] [Rank 0] step:6621/10000 train_time:270963ms step_avg:40.92ms +[2025-09-11 11:47:57] [Rank 0] step:6621/10000 train_time:270963ms step_avg:40.92ms +[2025-09-11 11:47:58] [Rank 0] step:6641/10000 train_time:271648ms step_avg:40.90ms +[2025-09-11 11:47:58] [Rank 0] step:6641/10000 train_time:271648ms step_avg:40.90ms +[2025-09-11 11:47:59] [Rank 0] step:6661/10000 train_time:272333ms step_avg:40.88ms +[2025-09-11 11:47:59] [Rank 0] step:6661/10000 train_time:272333ms step_avg:40.88ms +[2025-09-11 11:47:59] [Rank 0] step:6681/10000 train_time:273025ms step_avg:40.87ms +[2025-09-11 11:47:59] [Rank 0] step:6681/10000 train_time:273025ms step_avg:40.87ms +[2025-09-11 11:48:00] [Rank 0] step:6701/10000 train_time:273717ms step_avg:40.85ms +[2025-09-11 11:48:00] [Rank 0] step:6701/10000 train_time:273717ms step_avg:40.85ms +[2025-09-11 11:48:01] [Rank 0] step:6721/10000 train_time:274410ms step_avg:40.83ms +[2025-09-11 11:48:01] [Rank 0] step:6721/10000 train_time:274410ms step_avg:40.83ms +[2025-09-11 11:48:01] [Rank 0] step:6741/10000 train_time:275103ms step_avg:40.81ms +[2025-09-11 11:48:01] [Rank 0] step:6741/10000 train_time:275103ms step_avg:40.81ms +[2025-09-11 11:48:02] [Rank 0] step:6761/10000 train_time:275799ms step_avg:40.79ms +[2025-09-11 11:48:02] [Rank 0] step:6761/10000 train_time:275799ms step_avg:40.79ms +[2025-09-11 11:48:03] [Rank 0] step:6781/10000 train_time:276492ms step_avg:40.77ms +[2025-09-11 11:48:03] [Rank 0] step:6781/10000 train_time:276492ms step_avg:40.77ms +[2025-09-11 11:48:03] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:48:03] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:48:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:13] [Rank 0] PRINT: step:6800/10000 val_loss:4.9549 total_sharp:3.8595e-03 L1_sharp:1.9301e-03 L2_sharp:5.4945e-04 L3_sharp:3.1057e-04 L4_sharp:3.2227e-04 L5_sharp:6.1149e-04 L6_sharp:4.4498e-04 L7_sharp:4.1319e-04 L8_sharp:8.7108e-04 L9_sharp:7.9207e-04 L10_sharp:9.9878e-04 L11_sharp:1.1105e-03 L12_sharp:3.7603e-03 total_fnorm:7.7500e+00 total_l1_linf:1.7920e+04 total_spectral:3.8594e+00 L1_fnorm:2.0312e+00 L2_fnorm:2.0000e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9688e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9375e+00 L9_fnorm:1.9922e+00 L10_fnorm:1.9844e+00 L11_fnorm:2.0000e+00 L12_fnorm:1.9609e+00 L1_l1linf:4.8828e-01 L2_l1linf:4.4922e-01 L3_l1linf:4.4336e-01 L4_l1linf:4.3945e-01 L5_l1linf:4.2383e-01 L6_l1linf:4.1992e-01 L7_l1linf:4.1992e-01 L8_l1linf:4.0039e-01 L9_l1linf:3.8867e-01 L10_l1linf:3.9648e-01 L11_l1linf:4.0625e-01 L12_l1linf:3.9844e-01 L1_spectral:2.6318e-02 L2_spectral:2.6063e-02 L3_spectral:2.6059e-02 L4_spectral:2.6171e-02 L5_spectral:2.5930e-02 L6_spectral:2.6538e-02 L7_spectral:2.6521e-02 L8_spectral:2.6019e-02 L9_spectral:2.6385e-02 L10_spectral:2.6527e-02 L11_spectral:2.6373e-02 L12_spectral:2.6309e-02 train_time:277164ms step_avg:40.76ms +[2025-09-11 11:48:13] [Rank 0] PRINT: step:6800/10000 val_loss:4.9549 total_sharp:3.8595e-03 L1_sharp:1.9301e-03 L2_sharp:5.4945e-04 L3_sharp:3.1057e-04 L4_sharp:3.2227e-04 L5_sharp:6.1149e-04 L6_sharp:4.4498e-04 L7_sharp:4.1319e-04 L8_sharp:8.7108e-04 L9_sharp:7.9207e-04 L10_sharp:9.9878e-04 L11_sharp:1.1105e-03 L12_sharp:3.7603e-03 total_fnorm:7.7500e+00 total_l1_linf:1.7920e+04 total_spectral:3.8594e+00 L1_fnorm:2.0312e+00 L2_fnorm:2.0000e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9688e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9375e+00 L9_fnorm:1.9922e+00 L10_fnorm:1.9844e+00 L11_fnorm:2.0000e+00 L12_fnorm:1.9609e+00 L1_l1linf:4.8828e-01 L2_l1linf:4.4922e-01 L3_l1linf:4.4336e-01 L4_l1linf:4.3945e-01 L5_l1linf:4.2383e-01 L6_l1linf:4.1992e-01 L7_l1linf:4.1992e-01 L8_l1linf:4.0039e-01 L9_l1linf:3.8867e-01 L10_l1linf:3.9648e-01 L11_l1linf:4.0625e-01 L12_l1linf:3.9844e-01 L1_spectral:2.6318e-02 L2_spectral:2.6063e-02 L3_spectral:2.6059e-02 L4_spectral:2.6171e-02 L5_spectral:2.5930e-02 L6_spectral:2.6538e-02 L7_spectral:2.6521e-02 L8_spectral:2.6019e-02 L9_spectral:2.6385e-02 L10_spectral:2.6527e-02 L11_spectral:2.6373e-02 L12_spectral:2.6309e-02 train_time:277164ms step_avg:40.76ms +[2025-09-11 11:48:15] [Rank 0] step:6801/10000 train_time:278314ms step_avg:40.92ms +[2025-09-11 11:48:15] [Rank 0] step:6801/10000 train_time:278314ms step_avg:40.92ms +[2025-09-11 11:48:15] [Rank 0] step:6821/10000 train_time:279039ms step_avg:40.91ms +[2025-09-11 11:48:15] [Rank 0] step:6821/10000 train_time:279039ms step_avg:40.91ms +[2025-09-11 11:48:16] [Rank 0] step:6841/10000 train_time:279736ms step_avg:40.89ms +[2025-09-11 11:48:16] [Rank 0] step:6841/10000 train_time:279736ms step_avg:40.89ms +[2025-09-11 11:48:17] [Rank 0] step:6861/10000 train_time:280430ms step_avg:40.87ms +[2025-09-11 11:48:17] [Rank 0] step:6861/10000 train_time:280430ms step_avg:40.87ms +[2025-09-11 11:48:17] [Rank 0] step:6881/10000 train_time:281124ms step_avg:40.86ms +[2025-09-11 11:48:17] [Rank 0] step:6881/10000 train_time:281124ms step_avg:40.86ms +[2025-09-11 11:48:18] [Rank 0] step:6901/10000 train_time:281816ms step_avg:40.84ms +[2025-09-11 11:48:18] [Rank 0] step:6901/10000 train_time:281816ms step_avg:40.84ms +[2025-09-11 11:48:19] [Rank 0] step:6921/10000 train_time:282507ms step_avg:40.82ms +[2025-09-11 11:48:19] [Rank 0] step:6921/10000 train_time:282507ms step_avg:40.82ms +[2025-09-11 11:48:19] [Rank 0] step:6941/10000 train_time:283201ms step_avg:40.80ms +[2025-09-11 11:48:19] [Rank 0] step:6941/10000 train_time:283201ms step_avg:40.80ms +[2025-09-11 11:48:20] [Rank 0] step:6961/10000 train_time:283894ms step_avg:40.78ms +[2025-09-11 11:48:20] [Rank 0] step:6961/10000 train_time:283894ms step_avg:40.78ms +[2025-09-11 11:48:21] [Rank 0] step:6981/10000 train_time:284589ms step_avg:40.77ms +[2025-09-11 11:48:21] [Rank 0] step:6981/10000 train_time:284589ms step_avg:40.77ms +[2025-09-11 11:48:22] [Rank 0] step:7001/10000 train_time:285282ms step_avg:40.75ms +[2025-09-11 11:48:22] [Rank 0] step:7001/10000 train_time:285282ms step_avg:40.75ms +[2025-09-11 11:48:22] [Rank 0] step:7021/10000 train_time:285975ms step_avg:40.73ms +[2025-09-11 11:48:22] [Rank 0] step:7021/10000 train_time:285975ms step_avg:40.73ms +[2025-09-11 11:48:23] [Rank 0] step:7041/10000 train_time:286667ms step_avg:40.71ms +[2025-09-11 11:48:23] [Rank 0] step:7041/10000 train_time:286667ms step_avg:40.71ms +[2025-09-11 11:48:24] [Rank 0] step:7061/10000 train_time:287362ms step_avg:40.70ms +[2025-09-11 11:48:24] [Rank 0] step:7061/10000 train_time:287362ms step_avg:40.70ms +[2025-09-11 11:48:24] [Rank 0] step:7081/10000 train_time:288054ms step_avg:40.68ms +[2025-09-11 11:48:24] [Rank 0] step:7081/10000 train_time:288054ms step_avg:40.68ms +[2025-09-11 11:48:25] [Rank 0] step:7101/10000 train_time:288747ms step_avg:40.66ms +[2025-09-11 11:48:25] [Rank 0] step:7101/10000 train_time:288747ms step_avg:40.66ms +[2025-09-11 11:48:26] [Rank 0] step:7121/10000 train_time:289970ms step_avg:40.72ms +[2025-09-11 11:48:26] [Rank 0] step:7121/10000 train_time:289970ms step_avg:40.72ms +[2025-09-11 11:48:27] [Rank 0] step:7141/10000 train_time:290663ms step_avg:40.70ms +[2025-09-11 11:48:27] [Rank 0] step:7141/10000 train_time:290663ms step_avg:40.70ms +[2025-09-11 11:48:28] [Rank 0] step:7161/10000 train_time:291389ms step_avg:40.69ms +[2025-09-11 11:48:28] [Rank 0] step:7161/10000 train_time:291389ms step_avg:40.69ms +[2025-09-11 11:48:29] [Rank 0] step:7181/10000 train_time:292380ms step_avg:40.72ms +[2025-09-11 11:48:29] [Rank 0] step:7181/10000 train_time:292380ms step_avg:40.72ms +[2025-09-11 11:48:29] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:48:29] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:43] [Rank 0] PRINT: step:7200/10000 val_loss:4.9186 total_sharp:3.7950e-03 L1_sharp:1.6291e-03 L2_sharp:2.6856e-04 L3_sharp:2.3636e-04 L4_sharp:3.8985e-04 L5_sharp:5.4107e-04 L6_sharp:4.2669e-04 L7_sharp:4.9339e-04 L8_sharp:7.5007e-04 L9_sharp:6.6588e-04 L10_sharp:9.9169e-04 L11_sharp:1.2230e-03 L12_sharp:4.4419e-03 total_fnorm:6.5312e+00 total_l1_linf:1.4336e+04 total_spectral:3.2812e+00 L1_fnorm:1.7578e+00 L2_fnorm:1.7344e+00 L3_fnorm:1.7266e+00 L4_fnorm:1.7188e+00 L5_fnorm:1.6953e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6641e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7031e+00 L11_fnorm:1.7188e+00 L12_fnorm:1.6797e+00 L1_l1linf:3.9453e-01 L2_l1linf:3.7695e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7500e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.4766e-01 L7_l1linf:3.4180e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.4961e-01 L12_l1linf:3.3008e-01 L1_spectral:2.3254e-02 L2_spectral:2.2849e-02 L3_spectral:2.3161e-02 L4_spectral:2.3185e-02 L5_spectral:2.2858e-02 L6_spectral:2.3281e-02 L7_spectral:2.3379e-02 L8_spectral:2.2941e-02 L9_spectral:2.3288e-02 L10_spectral:2.3326e-02 L11_spectral:2.3265e-02 L12_spectral:2.3173e-02 train_time:293053ms step_avg:40.70ms +[2025-09-11 11:48:43] [Rank 0] PRINT: step:7200/10000 val_loss:4.9186 total_sharp:3.7950e-03 L1_sharp:1.6291e-03 L2_sharp:2.6856e-04 L3_sharp:2.3636e-04 L4_sharp:3.8985e-04 L5_sharp:5.4107e-04 L6_sharp:4.2669e-04 L7_sharp:4.9339e-04 L8_sharp:7.5007e-04 L9_sharp:6.6588e-04 L10_sharp:9.9169e-04 L11_sharp:1.2230e-03 L12_sharp:4.4419e-03 total_fnorm:6.5312e+00 total_l1_linf:1.4336e+04 total_spectral:3.2812e+00 L1_fnorm:1.7578e+00 L2_fnorm:1.7344e+00 L3_fnorm:1.7266e+00 L4_fnorm:1.7188e+00 L5_fnorm:1.6953e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6641e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7031e+00 L11_fnorm:1.7188e+00 L12_fnorm:1.6797e+00 L1_l1linf:3.9453e-01 L2_l1linf:3.7695e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7500e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.4766e-01 L7_l1linf:3.4180e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.4961e-01 L12_l1linf:3.3008e-01 L1_spectral:2.3254e-02 L2_spectral:2.2849e-02 L3_spectral:2.3161e-02 L4_spectral:2.3185e-02 L5_spectral:2.2858e-02 L6_spectral:2.3281e-02 L7_spectral:2.3379e-02 L8_spectral:2.2941e-02 L9_spectral:2.3288e-02 L10_spectral:2.3326e-02 L11_spectral:2.3265e-02 L12_spectral:2.3173e-02 train_time:293053ms step_avg:40.70ms +[2025-09-11 11:48:44] [Rank 0] step:7201/10000 train_time:294201ms step_avg:40.86ms +[2025-09-11 11:48:44] [Rank 0] step:7201/10000 train_time:294201ms step_avg:40.86ms +[2025-09-11 11:48:44] [Rank 0] step:7221/10000 train_time:294900ms step_avg:40.84ms +[2025-09-11 11:48:44] [Rank 0] step:7221/10000 train_time:294900ms step_avg:40.84ms +[2025-09-11 11:48:45] [Rank 0] step:7241/10000 train_time:295595ms step_avg:40.82ms +[2025-09-11 11:48:45] [Rank 0] step:7241/10000 train_time:295595ms step_avg:40.82ms +[2025-09-11 11:48:46] [Rank 0] step:7261/10000 train_time:296290ms step_avg:40.81ms +[2025-09-11 11:48:46] [Rank 0] step:7261/10000 train_time:296290ms step_avg:40.81ms +[2025-09-11 11:48:46] [Rank 0] step:7281/10000 train_time:296989ms step_avg:40.79ms +[2025-09-11 11:48:46] [Rank 0] step:7281/10000 train_time:296989ms step_avg:40.79ms +[2025-09-11 11:48:47] [Rank 0] step:7301/10000 train_time:297681ms step_avg:40.77ms +[2025-09-11 11:48:47] [Rank 0] step:7301/10000 train_time:297681ms step_avg:40.77ms +[2025-09-11 11:48:48] [Rank 0] step:7321/10000 train_time:298374ms step_avg:40.76ms +[2025-09-11 11:48:48] [Rank 0] step:7321/10000 train_time:298374ms step_avg:40.76ms +[2025-09-11 11:48:49] [Rank 0] step:7341/10000 train_time:299069ms step_avg:40.74ms +[2025-09-11 11:48:49] [Rank 0] step:7341/10000 train_time:299069ms step_avg:40.74ms +[2025-09-11 11:48:49] [Rank 0] step:7361/10000 train_time:299762ms step_avg:40.72ms +[2025-09-11 11:48:49] [Rank 0] step:7361/10000 train_time:299762ms step_avg:40.72ms +[2025-09-11 11:48:50] [Rank 0] step:7381/10000 train_time:300457ms step_avg:40.71ms +[2025-09-11 11:48:50] [Rank 0] step:7381/10000 train_time:300457ms step_avg:40.71ms +[2025-09-11 11:48:51] [Rank 0] step:7401/10000 train_time:301150ms step_avg:40.69ms +[2025-09-11 11:48:51] [Rank 0] step:7401/10000 train_time:301150ms step_avg:40.69ms +[2025-09-11 11:48:51] [Rank 0] step:7421/10000 train_time:301843ms step_avg:40.67ms +[2025-09-11 11:48:51] [Rank 0] step:7421/10000 train_time:301843ms step_avg:40.67ms +[2025-09-11 11:48:52] [Rank 0] step:7441/10000 train_time:302538ms step_avg:40.66ms +[2025-09-11 11:48:52] [Rank 0] step:7441/10000 train_time:302538ms step_avg:40.66ms +[2025-09-11 11:48:53] [Rank 0] step:7461/10000 train_time:303232ms step_avg:40.64ms +[2025-09-11 11:48:53] [Rank 0] step:7461/10000 train_time:303232ms step_avg:40.64ms +[2025-09-11 11:48:53] [Rank 0] step:7481/10000 train_time:303929ms step_avg:40.63ms +[2025-09-11 11:48:53] [Rank 0] step:7481/10000 train_time:303929ms step_avg:40.63ms +[2025-09-11 11:48:54] [Rank 0] step:7501/10000 train_time:304625ms step_avg:40.61ms +[2025-09-11 11:48:54] [Rank 0] step:7501/10000 train_time:304625ms step_avg:40.61ms +[2025-09-11 11:48:55] [Rank 0] step:7521/10000 train_time:305320ms step_avg:40.60ms +[2025-09-11 11:48:55] [Rank 0] step:7521/10000 train_time:305320ms step_avg:40.60ms +[2025-09-11 11:48:55] [Rank 0] step:7541/10000 train_time:306013ms step_avg:40.58ms +[2025-09-11 11:48:55] [Rank 0] step:7541/10000 train_time:306013ms step_avg:40.58ms +[2025-09-11 11:48:56] [Rank 0] step:7561/10000 train_time:306709ms step_avg:40.56ms +[2025-09-11 11:48:56] [Rank 0] step:7561/10000 train_time:306709ms step_avg:40.56ms +[2025-09-11 11:48:57] [Rank 0] step:7581/10000 train_time:307405ms step_avg:40.55ms +[2025-09-11 11:48:57] [Rank 0] step:7581/10000 train_time:307405ms step_avg:40.55ms +[2025-09-11 11:48:58] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:48:58] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:49:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:49:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:49:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:49:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:49:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:08] [Rank 0] PRINT: step:7600/10000 val_loss:4.8961 total_sharp:3.5727e-03 L1_sharp:1.4150e-03 L2_sharp:5.7792e-04 L3_sharp:3.1090e-04 L4_sharp:2.7577e-04 L5_sharp:5.6406e-04 L6_sharp:3.8771e-04 L7_sharp:4.8566e-04 L8_sharp:7.1970e-04 L9_sharp:6.3369e-04 L10_sharp:9.4831e-04 L11_sharp:1.1319e-03 L12_sharp:2.7436e-03 total_fnorm:5.4375e+00 total_l1_linf:1.1136e+04 total_spectral:2.7031e+00 L1_fnorm:1.4844e+00 L2_fnorm:1.4531e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4141e+00 L6_fnorm:1.4297e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3828e+00 L9_fnorm:1.4375e+00 L10_fnorm:1.4297e+00 L11_fnorm:1.4453e+00 L12_fnorm:1.4141e+00 L1_l1linf:3.1445e-01 L2_l1linf:2.9102e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9297e-01 L5_l1linf:2.8320e-01 L6_l1linf:2.7930e-01 L7_l1linf:2.7539e-01 L8_l1linf:2.5586e-01 L9_l1linf:2.5391e-01 L10_l1linf:2.5977e-01 L11_l1linf:2.7148e-01 L12_l1linf:2.7148e-01 L1_spectral:1.9742e-02 L2_spectral:1.9662e-02 L3_spectral:1.9707e-02 L4_spectral:1.9720e-02 L5_spectral:1.9577e-02 L6_spectral:1.9888e-02 L7_spectral:1.9929e-02 L8_spectral:1.9700e-02 L9_spectral:1.9872e-02 L10_spectral:2.0000e-02 L11_spectral:1.9888e-02 L12_spectral:1.9977e-02 train_time:308087ms step_avg:40.54ms +[2025-09-11 11:49:08] [Rank 0] PRINT: step:7600/10000 val_loss:4.8961 total_sharp:3.5727e-03 L1_sharp:1.4150e-03 L2_sharp:5.7792e-04 L3_sharp:3.1090e-04 L4_sharp:2.7577e-04 L5_sharp:5.6406e-04 L6_sharp:3.8771e-04 L7_sharp:4.8566e-04 L8_sharp:7.1970e-04 L9_sharp:6.3369e-04 L10_sharp:9.4831e-04 L11_sharp:1.1319e-03 L12_sharp:2.7436e-03 total_fnorm:5.4375e+00 total_l1_linf:1.1136e+04 total_spectral:2.7031e+00 L1_fnorm:1.4844e+00 L2_fnorm:1.4531e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4141e+00 L6_fnorm:1.4297e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3828e+00 L9_fnorm:1.4375e+00 L10_fnorm:1.4297e+00 L11_fnorm:1.4453e+00 L12_fnorm:1.4141e+00 L1_l1linf:3.1445e-01 L2_l1linf:2.9102e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9297e-01 L5_l1linf:2.8320e-01 L6_l1linf:2.7930e-01 L7_l1linf:2.7539e-01 L8_l1linf:2.5586e-01 L9_l1linf:2.5391e-01 L10_l1linf:2.5977e-01 L11_l1linf:2.7148e-01 L12_l1linf:2.7148e-01 L1_spectral:1.9742e-02 L2_spectral:1.9662e-02 L3_spectral:1.9707e-02 L4_spectral:1.9720e-02 L5_spectral:1.9577e-02 L6_spectral:1.9888e-02 L7_spectral:1.9929e-02 L8_spectral:1.9700e-02 L9_spectral:1.9872e-02 L10_spectral:2.0000e-02 L11_spectral:1.9888e-02 L12_spectral:1.9977e-02 train_time:308087ms step_avg:40.54ms +[2025-09-11 11:49:09] [Rank 0] step:7601/10000 train_time:309256ms step_avg:40.69ms +[2025-09-11 11:49:09] [Rank 0] step:7601/10000 train_time:309256ms step_avg:40.69ms +[2025-09-11 11:49:09] [Rank 0] step:7621/10000 train_time:309972ms step_avg:40.67ms +[2025-09-11 11:49:09] [Rank 0] step:7621/10000 train_time:309972ms step_avg:40.67ms +[2025-09-11 11:49:10] [Rank 0] step:7641/10000 train_time:310669ms step_avg:40.66ms +[2025-09-11 11:49:10] [Rank 0] step:7641/10000 train_time:310669ms step_avg:40.66ms +[2025-09-11 11:49:11] [Rank 0] step:7661/10000 train_time:311364ms step_avg:40.64ms +[2025-09-11 11:49:11] [Rank 0] step:7661/10000 train_time:311364ms step_avg:40.64ms +[2025-09-11 11:49:12] [Rank 0] step:7681/10000 train_time:312059ms step_avg:40.63ms +[2025-09-11 11:49:12] [Rank 0] step:7681/10000 train_time:312059ms step_avg:40.63ms +[2025-09-11 11:49:12] [Rank 0] step:7701/10000 train_time:312756ms step_avg:40.61ms +[2025-09-11 11:49:12] [Rank 0] step:7701/10000 train_time:312756ms step_avg:40.61ms +[2025-09-11 11:49:13] [Rank 0] step:7721/10000 train_time:313450ms step_avg:40.60ms +[2025-09-11 11:49:13] [Rank 0] step:7721/10000 train_time:313450ms step_avg:40.60ms +[2025-09-11 11:49:14] [Rank 0] step:7741/10000 train_time:314144ms step_avg:40.58ms +[2025-09-11 11:49:14] [Rank 0] step:7741/10000 train_time:314144ms step_avg:40.58ms +[2025-09-11 11:49:14] [Rank 0] step:7761/10000 train_time:314839ms step_avg:40.57ms +[2025-09-11 11:49:14] [Rank 0] step:7761/10000 train_time:314839ms step_avg:40.57ms +[2025-09-11 11:49:15] [Rank 0] step:7781/10000 train_time:315535ms step_avg:40.55ms +[2025-09-11 11:49:15] [Rank 0] step:7781/10000 train_time:315535ms step_avg:40.55ms +[2025-09-11 11:49:16] [Rank 0] step:7801/10000 train_time:316229ms step_avg:40.54ms +[2025-09-11 11:49:16] [Rank 0] step:7801/10000 train_time:316229ms step_avg:40.54ms +[2025-09-11 11:49:16] [Rank 0] step:7821/10000 train_time:316924ms step_avg:40.52ms +[2025-09-11 11:49:16] [Rank 0] step:7821/10000 train_time:316924ms step_avg:40.52ms +[2025-09-11 11:49:17] [Rank 0] step:7841/10000 train_time:317622ms step_avg:40.51ms +[2025-09-11 11:49:17] [Rank 0] step:7841/10000 train_time:317622ms step_avg:40.51ms +[2025-09-11 11:49:18] [Rank 0] step:7861/10000 train_time:318319ms step_avg:40.49ms +[2025-09-11 11:49:18] [Rank 0] step:7861/10000 train_time:318319ms step_avg:40.49ms +[2025-09-11 11:49:19] [Rank 0] step:7881/10000 train_time:319015ms step_avg:40.48ms +[2025-09-11 11:49:19] [Rank 0] step:7881/10000 train_time:319015ms step_avg:40.48ms +[2025-09-11 11:49:19] [Rank 0] step:7901/10000 train_time:319711ms step_avg:40.46ms +[2025-09-11 11:49:19] [Rank 0] step:7901/10000 train_time:319711ms step_avg:40.46ms +[2025-09-11 11:49:20] [Rank 0] step:7921/10000 train_time:320407ms step_avg:40.45ms +[2025-09-11 11:49:20] [Rank 0] step:7921/10000 train_time:320407ms step_avg:40.45ms +[2025-09-11 11:49:21] [Rank 0] step:7941/10000 train_time:321104ms step_avg:40.44ms +[2025-09-11 11:49:21] [Rank 0] step:7941/10000 train_time:321104ms step_avg:40.44ms +[2025-09-11 11:49:21] [Rank 0] step:7961/10000 train_time:321798ms step_avg:40.42ms +[2025-09-11 11:49:21] [Rank 0] step:7961/10000 train_time:321798ms step_avg:40.42ms +[2025-09-11 11:49:22] [Rank 0] step:7981/10000 train_time:322496ms step_avg:40.41ms +[2025-09-11 11:49:22] [Rank 0] step:7981/10000 train_time:322496ms step_avg:40.41ms +[2025-09-11 11:49:23] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:49:23] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:32] [Rank 0] PRINT: step:8000/10000 val_loss:4.8776 total_sharp:3.4956e-03 L1_sharp:1.1085e-03 L2_sharp:5.3350e-04 L3_sharp:4.1360e-04 L4_sharp:3.0802e-04 L5_sharp:5.0077e-04 L6_sharp:5.0314e-04 L7_sharp:4.2566e-04 L8_sharp:6.3050e-04 L9_sharp:6.3107e-04 L10_sharp:9.1846e-04 L11_sharp:1.1165e-03 L12_sharp:3.0805e-03 total_fnorm:4.4375e+00 total_l1_linf:8.5120e+03 total_spectral:2.2031e+00 L1_fnorm:1.2578e+00 L2_fnorm:1.1953e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1641e+00 L6_fnorm:1.1719e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1328e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1562e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.1562e+00 L1_l1linf:2.5195e-01 L2_l1linf:2.2754e-01 L3_l1linf:2.3047e-01 L4_l1linf:2.3047e-01 L5_l1linf:2.1875e-01 L6_l1linf:2.1387e-01 L7_l1linf:2.1387e-01 L8_l1linf:2.0020e-01 L9_l1linf:1.9629e-01 L10_l1linf:2.0410e-01 L11_l1linf:2.0410e-01 L12_l1linf:2.1094e-01 L1_spectral:1.6788e-02 L2_spectral:1.6525e-02 L3_spectral:1.6576e-02 L4_spectral:1.6573e-02 L5_spectral:1.6494e-02 L6_spectral:1.6508e-02 L7_spectral:1.6679e-02 L8_spectral:1.6553e-02 L9_spectral:1.6756e-02 L10_spectral:1.6641e-02 L11_spectral:1.6652e-02 L12_spectral:1.6606e-02 train_time:323170ms step_avg:40.40ms +[2025-09-11 11:49:32] [Rank 0] PRINT: step:8000/10000 val_loss:4.8776 total_sharp:3.4956e-03 L1_sharp:1.1085e-03 L2_sharp:5.3350e-04 L3_sharp:4.1360e-04 L4_sharp:3.0802e-04 L5_sharp:5.0077e-04 L6_sharp:5.0314e-04 L7_sharp:4.2566e-04 L8_sharp:6.3050e-04 L9_sharp:6.3107e-04 L10_sharp:9.1846e-04 L11_sharp:1.1165e-03 L12_sharp:3.0805e-03 total_fnorm:4.4375e+00 total_l1_linf:8.5120e+03 total_spectral:2.2031e+00 L1_fnorm:1.2578e+00 L2_fnorm:1.1953e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1641e+00 L6_fnorm:1.1719e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1328e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1562e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.1562e+00 L1_l1linf:2.5195e-01 L2_l1linf:2.2754e-01 L3_l1linf:2.3047e-01 L4_l1linf:2.3047e-01 L5_l1linf:2.1875e-01 L6_l1linf:2.1387e-01 L7_l1linf:2.1387e-01 L8_l1linf:2.0020e-01 L9_l1linf:1.9629e-01 L10_l1linf:2.0410e-01 L11_l1linf:2.0410e-01 L12_l1linf:2.1094e-01 L1_spectral:1.6788e-02 L2_spectral:1.6525e-02 L3_spectral:1.6576e-02 L4_spectral:1.6573e-02 L5_spectral:1.6494e-02 L6_spectral:1.6508e-02 L7_spectral:1.6679e-02 L8_spectral:1.6553e-02 L9_spectral:1.6756e-02 L10_spectral:1.6641e-02 L11_spectral:1.6652e-02 L12_spectral:1.6606e-02 train_time:323170ms step_avg:40.40ms +[2025-09-11 11:49:34] [Rank 0] step:8001/10000 train_time:324344ms step_avg:40.54ms +[2025-09-11 11:49:34] [Rank 0] step:8001/10000 train_time:324344ms step_avg:40.54ms +[2025-09-11 11:49:34] [Rank 0] step:8021/10000 train_time:325046ms step_avg:40.52ms +[2025-09-11 11:49:34] [Rank 0] step:8021/10000 train_time:325046ms step_avg:40.52ms +[2025-09-11 11:49:35] [Rank 0] step:8041/10000 train_time:325744ms step_avg:40.51ms +[2025-09-11 11:49:35] [Rank 0] step:8041/10000 train_time:325744ms step_avg:40.51ms +[2025-09-11 11:49:36] [Rank 0] step:8061/10000 train_time:326442ms step_avg:40.50ms +[2025-09-11 11:49:36] [Rank 0] step:8061/10000 train_time:326442ms step_avg:40.50ms +[2025-09-11 11:49:36] [Rank 0] step:8081/10000 train_time:327137ms step_avg:40.48ms +[2025-09-11 11:49:36] [Rank 0] step:8081/10000 train_time:327137ms step_avg:40.48ms +[2025-09-11 11:49:37] [Rank 0] step:8101/10000 train_time:327831ms step_avg:40.47ms +[2025-09-11 11:49:37] [Rank 0] step:8101/10000 train_time:327831ms step_avg:40.47ms +[2025-09-11 11:49:38] [Rank 0] step:8121/10000 train_time:328531ms step_avg:40.45ms +[2025-09-11 11:49:38] [Rank 0] step:8121/10000 train_time:328531ms step_avg:40.45ms +[2025-09-11 11:49:39] [Rank 0] step:8141/10000 train_time:329976ms step_avg:40.53ms +[2025-09-11 11:49:39] [Rank 0] step:8141/10000 train_time:329976ms step_avg:40.53ms +[2025-09-11 11:49:40] [Rank 0] step:8161/10000 train_time:330676ms step_avg:40.52ms +[2025-09-11 11:49:40] [Rank 0] step:8161/10000 train_time:330676ms step_avg:40.52ms +[2025-09-11 11:49:41] [Rank 0] step:8181/10000 train_time:331384ms step_avg:40.51ms +[2025-09-11 11:49:41] [Rank 0] step:8181/10000 train_time:331384ms step_avg:40.51ms +[2025-09-11 11:49:41] [Rank 0] step:8201/10000 train_time:332088ms step_avg:40.49ms +[2025-09-11 11:49:41] [Rank 0] step:8201/10000 train_time:332088ms step_avg:40.49ms +[2025-09-11 11:49:42] [Rank 0] step:8221/10000 train_time:332790ms step_avg:40.48ms +[2025-09-11 11:49:42] [Rank 0] step:8221/10000 train_time:332790ms step_avg:40.48ms +[2025-09-11 11:49:43] [Rank 0] step:8241/10000 train_time:333502ms step_avg:40.47ms +[2025-09-11 11:49:43] [Rank 0] step:8241/10000 train_time:333502ms step_avg:40.47ms +[2025-09-11 11:49:43] [Rank 0] step:8261/10000 train_time:334204ms step_avg:40.46ms +[2025-09-11 11:49:43] [Rank 0] step:8261/10000 train_time:334204ms step_avg:40.46ms +[2025-09-11 11:49:44] [Rank 0] step:8281/10000 train_time:334904ms step_avg:40.44ms +[2025-09-11 11:49:44] [Rank 0] step:8281/10000 train_time:334904ms step_avg:40.44ms +[2025-09-11 11:49:45] [Rank 0] step:8301/10000 train_time:335606ms step_avg:40.43ms +[2025-09-11 11:49:45] [Rank 0] step:8301/10000 train_time:335606ms step_avg:40.43ms +[2025-09-11 11:49:45] [Rank 0] step:8321/10000 train_time:336308ms step_avg:40.42ms +[2025-09-11 11:49:45] [Rank 0] step:8321/10000 train_time:336308ms step_avg:40.42ms +[2025-09-11 11:49:46] [Rank 0] step:8341/10000 train_time:337016ms step_avg:40.40ms +[2025-09-11 11:49:46] [Rank 0] step:8341/10000 train_time:337016ms step_avg:40.40ms +[2025-09-11 11:49:47] [Rank 0] step:8361/10000 train_time:337715ms step_avg:40.39ms +[2025-09-11 11:49:47] [Rank 0] step:8361/10000 train_time:337715ms step_avg:40.39ms +[2025-09-11 11:49:48] [Rank 0] step:8381/10000 train_time:338420ms step_avg:40.38ms +[2025-09-11 11:49:48] [Rank 0] step:8381/10000 train_time:338420ms step_avg:40.38ms +[2025-09-11 11:49:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:49:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:49:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:49:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:58] [Rank 0] PRINT: step:8400/10000 val_loss:4.8583 total_sharp:2.9260e-03 L1_sharp:6.6368e-04 L2_sharp:3.4619e-04 L3_sharp:3.6131e-04 L4_sharp:2.5961e-04 L5_sharp:4.9309e-04 L6_sharp:3.5072e-04 L7_sharp:3.8472e-04 L8_sharp:5.3137e-04 L9_sharp:5.6706e-04 L10_sharp:7.4191e-04 L11_sharp:8.4633e-04 L12_sharp:2.6507e-03 total_fnorm:3.4062e+00 total_l1_linf:6.0480e+03 total_spectral:1.7188e+00 L1_fnorm:1.0156e+00 L2_fnorm:9.4531e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2188e-01 L5_fnorm:9.1406e-01 L6_fnorm:9.1797e-01 L7_fnorm:9.1797e-01 L8_fnorm:8.8672e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.0234e-01 L11_fnorm:9.1406e-01 L12_fnorm:9.0234e-01 L1_l1linf:1.8066e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.6602e-01 L5_l1linf:1.6016e-01 L6_l1linf:1.5332e-01 L7_l1linf:1.5527e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.3965e-01 L10_l1linf:1.4258e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.5625e-01 L1_spectral:1.3919e-02 L2_spectral:1.3407e-02 L3_spectral:1.3249e-02 L4_spectral:1.3132e-02 L5_spectral:1.3430e-02 L6_spectral:1.3350e-02 L7_spectral:1.3360e-02 L8_spectral:1.3392e-02 L9_spectral:1.3447e-02 L10_spectral:1.3404e-02 L11_spectral:1.3410e-02 L12_spectral:1.3369e-02 train_time:339104ms step_avg:40.37ms +[2025-09-11 11:49:58] [Rank 0] PRINT: step:8400/10000 val_loss:4.8583 total_sharp:2.9260e-03 L1_sharp:6.6368e-04 L2_sharp:3.4619e-04 L3_sharp:3.6131e-04 L4_sharp:2.5961e-04 L5_sharp:4.9309e-04 L6_sharp:3.5072e-04 L7_sharp:3.8472e-04 L8_sharp:5.3137e-04 L9_sharp:5.6706e-04 L10_sharp:7.4191e-04 L11_sharp:8.4633e-04 L12_sharp:2.6507e-03 total_fnorm:3.4062e+00 total_l1_linf:6.0480e+03 total_spectral:1.7188e+00 L1_fnorm:1.0156e+00 L2_fnorm:9.4531e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2188e-01 L5_fnorm:9.1406e-01 L6_fnorm:9.1797e-01 L7_fnorm:9.1797e-01 L8_fnorm:8.8672e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.0234e-01 L11_fnorm:9.1406e-01 L12_fnorm:9.0234e-01 L1_l1linf:1.8066e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.6602e-01 L5_l1linf:1.6016e-01 L6_l1linf:1.5332e-01 L7_l1linf:1.5527e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.3965e-01 L10_l1linf:1.4258e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.5625e-01 L1_spectral:1.3919e-02 L2_spectral:1.3407e-02 L3_spectral:1.3249e-02 L4_spectral:1.3132e-02 L5_spectral:1.3430e-02 L6_spectral:1.3350e-02 L7_spectral:1.3360e-02 L8_spectral:1.3392e-02 L9_spectral:1.3447e-02 L10_spectral:1.3404e-02 L11_spectral:1.3410e-02 L12_spectral:1.3369e-02 train_time:339104ms step_avg:40.37ms +[2025-09-11 11:49:59] [Rank 0] step:8401/10000 train_time:340298ms step_avg:40.51ms +[2025-09-11 11:49:59] [Rank 0] step:8401/10000 train_time:340298ms step_avg:40.51ms +[2025-09-11 11:50:00] [Rank 0] step:8421/10000 train_time:341020ms step_avg:40.50ms +[2025-09-11 11:50:00] [Rank 0] step:8421/10000 train_time:341020ms step_avg:40.50ms +[2025-09-11 11:50:01] [Rank 0] step:8441/10000 train_time:341760ms step_avg:40.49ms +[2025-09-11 11:50:01] [Rank 0] step:8441/10000 train_time:341760ms step_avg:40.49ms +[2025-09-11 11:50:02] [Rank 0] step:8461/10000 train_time:342545ms step_avg:40.49ms +[2025-09-11 11:50:02] [Rank 0] step:8461/10000 train_time:342545ms step_avg:40.49ms +[2025-09-11 11:50:02] [Rank 0] step:8481/10000 train_time:343323ms step_avg:40.48ms +[2025-09-11 11:50:02] [Rank 0] step:8481/10000 train_time:343323ms step_avg:40.48ms +[2025-09-11 11:50:03] [Rank 0] step:8501/10000 train_time:344026ms step_avg:40.47ms +[2025-09-11 11:50:03] [Rank 0] step:8501/10000 train_time:344026ms step_avg:40.47ms +[2025-09-11 11:50:04] [Rank 0] step:8521/10000 train_time:344728ms step_avg:40.46ms +[2025-09-11 11:50:04] [Rank 0] step:8521/10000 train_time:344728ms step_avg:40.46ms +[2025-09-11 11:50:05] [Rank 0] step:8541/10000 train_time:345430ms step_avg:40.44ms +[2025-09-11 11:50:05] [Rank 0] step:8541/10000 train_time:345430ms step_avg:40.44ms +[2025-09-11 11:50:05] [Rank 0] step:8561/10000 train_time:346138ms step_avg:40.43ms +[2025-09-11 11:50:05] [Rank 0] step:8561/10000 train_time:346138ms step_avg:40.43ms +[2025-09-11 11:50:06] [Rank 0] step:8581/10000 train_time:346843ms step_avg:40.42ms +[2025-09-11 11:50:06] [Rank 0] step:8581/10000 train_time:346843ms step_avg:40.42ms +[2025-09-11 11:50:07] [Rank 0] step:8601/10000 train_time:347546ms step_avg:40.41ms +[2025-09-11 11:50:07] [Rank 0] step:8601/10000 train_time:347546ms step_avg:40.41ms +[2025-09-11 11:50:07] [Rank 0] step:8621/10000 train_time:348248ms step_avg:40.40ms +[2025-09-11 11:50:07] [Rank 0] step:8621/10000 train_time:348248ms step_avg:40.40ms +[2025-09-11 11:50:08] [Rank 0] step:8641/10000 train_time:348949ms step_avg:40.38ms +[2025-09-11 11:50:08] [Rank 0] step:8641/10000 train_time:348949ms step_avg:40.38ms +[2025-09-11 11:50:09] [Rank 0] step:8661/10000 train_time:349652ms step_avg:40.37ms +[2025-09-11 11:50:09] [Rank 0] step:8661/10000 train_time:349652ms step_avg:40.37ms +[2025-09-11 11:50:09] [Rank 0] step:8681/10000 train_time:350356ms step_avg:40.36ms +[2025-09-11 11:50:09] [Rank 0] step:8681/10000 train_time:350356ms step_avg:40.36ms +[2025-09-11 11:50:10] [Rank 0] step:8701/10000 train_time:351057ms step_avg:40.35ms +[2025-09-11 11:50:10] [Rank 0] step:8701/10000 train_time:351057ms step_avg:40.35ms +[2025-09-11 11:50:11] [Rank 0] step:8721/10000 train_time:351764ms step_avg:40.34ms +[2025-09-11 11:50:11] [Rank 0] step:8721/10000 train_time:351764ms step_avg:40.34ms +[2025-09-11 11:50:12] [Rank 0] step:8741/10000 train_time:352463ms step_avg:40.32ms +[2025-09-11 11:50:12] [Rank 0] step:8741/10000 train_time:352463ms step_avg:40.32ms +[2025-09-11 11:50:12] [Rank 0] step:8761/10000 train_time:353168ms step_avg:40.31ms +[2025-09-11 11:50:12] [Rank 0] step:8761/10000 train_time:353168ms step_avg:40.31ms +[2025-09-11 11:50:13] [Rank 0] step:8781/10000 train_time:353868ms step_avg:40.30ms +[2025-09-11 11:50:13] [Rank 0] step:8781/10000 train_time:353868ms step_avg:40.30ms +[2025-09-11 11:50:14] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:50:14] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:50:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:50:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:50:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:50:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:50:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:24] [Rank 0] PRINT: step:8800/10000 val_loss:4.8450 total_sharp:2.3120e-03 L1_sharp:7.5004e-04 L2_sharp:3.0896e-04 L3_sharp:2.7829e-04 L4_sharp:1.8980e-04 L5_sharp:3.9001e-04 L6_sharp:2.8341e-04 L7_sharp:3.3013e-04 L8_sharp:5.2454e-04 L9_sharp:4.7303e-04 L10_sharp:6.8325e-04 L11_sharp:8.2147e-04 L12_sharp:2.4548e-03 total_fnorm:2.5156e+00 total_l1_linf:3.9840e+03 total_spectral:1.2656e+00 L1_fnorm:7.7734e-01 L2_fnorm:6.9531e-01 L3_fnorm:6.8359e-01 L4_fnorm:6.7969e-01 L5_fnorm:6.7188e-01 L6_fnorm:6.7578e-01 L7_fnorm:6.7578e-01 L8_fnorm:6.4844e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6406e-01 L11_fnorm:6.7188e-01 L12_fnorm:6.6406e-01 L1_l1linf:1.4258e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.1816e-01 L4_l1linf:1.1182e-01 L5_l1linf:1.0645e-01 L6_l1linf:1.0400e-01 L7_l1linf:1.0400e-01 L8_l1linf:9.7168e-02 L9_l1linf:9.2773e-02 L10_l1linf:9.7168e-02 L11_l1linf:9.8633e-02 L12_l1linf:1.1816e-01 L1_spectral:1.1444e-02 L2_spectral:1.0184e-02 L3_spectral:9.9495e-03 L4_spectral:9.8788e-03 L5_spectral:1.0129e-02 L6_spectral:1.0054e-02 L7_spectral:9.9935e-03 L8_spectral:1.0114e-02 L9_spectral:1.0084e-02 L10_spectral:1.0032e-02 L11_spectral:1.0076e-02 L12_spectral:1.0013e-02 train_time:354548ms step_avg:40.29ms +[2025-09-11 11:50:24] [Rank 0] PRINT: step:8800/10000 val_loss:4.8450 total_sharp:2.3120e-03 L1_sharp:7.5004e-04 L2_sharp:3.0896e-04 L3_sharp:2.7829e-04 L4_sharp:1.8980e-04 L5_sharp:3.9001e-04 L6_sharp:2.8341e-04 L7_sharp:3.3013e-04 L8_sharp:5.2454e-04 L9_sharp:4.7303e-04 L10_sharp:6.8325e-04 L11_sharp:8.2147e-04 L12_sharp:2.4548e-03 total_fnorm:2.5156e+00 total_l1_linf:3.9840e+03 total_spectral:1.2656e+00 L1_fnorm:7.7734e-01 L2_fnorm:6.9531e-01 L3_fnorm:6.8359e-01 L4_fnorm:6.7969e-01 L5_fnorm:6.7188e-01 L6_fnorm:6.7578e-01 L7_fnorm:6.7578e-01 L8_fnorm:6.4844e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6406e-01 L11_fnorm:6.7188e-01 L12_fnorm:6.6406e-01 L1_l1linf:1.4258e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.1816e-01 L4_l1linf:1.1182e-01 L5_l1linf:1.0645e-01 L6_l1linf:1.0400e-01 L7_l1linf:1.0400e-01 L8_l1linf:9.7168e-02 L9_l1linf:9.2773e-02 L10_l1linf:9.7168e-02 L11_l1linf:9.8633e-02 L12_l1linf:1.1816e-01 L1_spectral:1.1444e-02 L2_spectral:1.0184e-02 L3_spectral:9.9495e-03 L4_spectral:9.8788e-03 L5_spectral:1.0129e-02 L6_spectral:1.0054e-02 L7_spectral:9.9935e-03 L8_spectral:1.0114e-02 L9_spectral:1.0084e-02 L10_spectral:1.0032e-02 L11_spectral:1.0076e-02 L12_spectral:1.0013e-02 train_time:354548ms step_avg:40.29ms +[2025-09-11 11:50:25] [Rank 0] step:8801/10000 train_time:355743ms step_avg:40.42ms +[2025-09-11 11:50:25] [Rank 0] step:8801/10000 train_time:355743ms step_avg:40.42ms +[2025-09-11 11:50:25] [Rank 0] step:8821/10000 train_time:356471ms step_avg:40.41ms +[2025-09-11 11:50:25] [Rank 0] step:8821/10000 train_time:356471ms step_avg:40.41ms +[2025-09-11 11:50:26] [Rank 0] step:8841/10000 train_time:357176ms step_avg:40.40ms +[2025-09-11 11:50:26] [Rank 0] step:8841/10000 train_time:357176ms step_avg:40.40ms +[2025-09-11 11:50:27] [Rank 0] step:8861/10000 train_time:357880ms step_avg:40.39ms +[2025-09-11 11:50:27] [Rank 0] step:8861/10000 train_time:357880ms step_avg:40.39ms +[2025-09-11 11:50:28] [Rank 0] step:8881/10000 train_time:358583ms step_avg:40.38ms +[2025-09-11 11:50:28] [Rank 0] step:8881/10000 train_time:358583ms step_avg:40.38ms +[2025-09-11 11:50:28] [Rank 0] step:8901/10000 train_time:359288ms step_avg:40.36ms +[2025-09-11 11:50:28] [Rank 0] step:8901/10000 train_time:359288ms step_avg:40.36ms +[2025-09-11 11:50:29] [Rank 0] step:8921/10000 train_time:359989ms step_avg:40.35ms +[2025-09-11 11:50:29] [Rank 0] step:8921/10000 train_time:359989ms step_avg:40.35ms +[2025-09-11 11:50:30] [Rank 0] step:8941/10000 train_time:360694ms step_avg:40.34ms +[2025-09-11 11:50:30] [Rank 0] step:8941/10000 train_time:360694ms step_avg:40.34ms +[2025-09-11 11:50:30] [Rank 0] step:8961/10000 train_time:361405ms step_avg:40.33ms +[2025-09-11 11:50:30] [Rank 0] step:8961/10000 train_time:361405ms step_avg:40.33ms +[2025-09-11 11:50:31] [Rank 0] step:8981/10000 train_time:362112ms step_avg:40.32ms +[2025-09-11 11:50:31] [Rank 0] step:8981/10000 train_time:362112ms step_avg:40.32ms +[2025-09-11 11:50:32] [Rank 0] step:9001/10000 train_time:363095ms step_avg:40.34ms +[2025-09-11 11:50:32] [Rank 0] step:9001/10000 train_time:363095ms step_avg:40.34ms +[2025-09-11 11:50:33] [Rank 0] step:9021/10000 train_time:364090ms step_avg:40.36ms +[2025-09-11 11:50:33] [Rank 0] step:9021/10000 train_time:364090ms step_avg:40.36ms +[2025-09-11 11:50:34] [Rank 0] step:9041/10000 train_time:364797ms step_avg:40.35ms +[2025-09-11 11:50:34] [Rank 0] step:9041/10000 train_time:364797ms step_avg:40.35ms +[2025-09-11 11:50:35] [Rank 0] step:9061/10000 train_time:365764ms step_avg:40.37ms +[2025-09-11 11:50:35] [Rank 0] step:9061/10000 train_time:365764ms step_avg:40.37ms +[2025-09-11 11:50:35] [Rank 0] step:9081/10000 train_time:366469ms step_avg:40.36ms +[2025-09-11 11:50:35] [Rank 0] step:9081/10000 train_time:366469ms step_avg:40.36ms +[2025-09-11 11:50:36] [Rank 0] step:9101/10000 train_time:367176ms step_avg:40.34ms +[2025-09-11 11:50:36] [Rank 0] step:9101/10000 train_time:367176ms step_avg:40.34ms +[2025-09-11 11:50:37] [Rank 0] step:9121/10000 train_time:367884ms step_avg:40.33ms +[2025-09-11 11:50:37] [Rank 0] step:9121/10000 train_time:367884ms step_avg:40.33ms +[2025-09-11 11:50:38] [Rank 0] step:9141/10000 train_time:368585ms step_avg:40.32ms +[2025-09-11 11:50:38] [Rank 0] step:9141/10000 train_time:368585ms step_avg:40.32ms +[2025-09-11 11:50:38] [Rank 0] step:9161/10000 train_time:369292ms step_avg:40.31ms +[2025-09-11 11:50:38] [Rank 0] step:9161/10000 train_time:369292ms step_avg:40.31ms +[2025-09-11 11:50:39] [Rank 0] step:9181/10000 train_time:369997ms step_avg:40.30ms +[2025-09-11 11:50:39] [Rank 0] step:9181/10000 train_time:369997ms step_avg:40.30ms +[2025-09-11 11:50:40] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:50:40] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:50:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:50] [Rank 0] PRINT: step:9200/10000 val_loss:4.8301 total_sharp:2.2368e-03 L1_sharp:2.0537e-03 L2_sharp:2.3032e-04 L3_sharp:2.0588e-04 L4_sharp:1.8096e-04 L5_sharp:4.5895e-04 L6_sharp:3.1385e-04 L7_sharp:2.9312e-04 L8_sharp:4.1867e-04 L9_sharp:3.9923e-04 L10_sharp:5.5039e-04 L11_sharp:6.8551e-04 L12_sharp:2.3422e-03 total_fnorm:1.6562e+00 total_l1_linf:2.3040e+03 total_spectral:8.3203e-01 L1_fnorm:5.4688e-01 L2_fnorm:4.6094e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4141e-01 L6_fnorm:4.4531e-01 L7_fnorm:4.4727e-01 L8_fnorm:4.3164e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3945e-01 L11_fnorm:4.4336e-01 L12_fnorm:4.3945e-01 L1_l1linf:9.9121e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.1768e-02 L7_l1linf:6.2500e-02 L8_l1linf:5.7373e-02 L9_l1linf:5.4932e-02 L10_l1linf:5.5908e-02 L11_l1linf:5.7861e-02 L12_l1linf:6.7383e-02 L1_spectral:8.7860e-03 L2_spectral:6.8086e-03 L3_spectral:6.7255e-03 L4_spectral:6.6975e-03 L5_spectral:6.9228e-03 L6_spectral:6.7361e-03 L7_spectral:6.7933e-03 L8_spectral:6.9076e-03 L9_spectral:6.8330e-03 L10_spectral:6.7428e-03 L11_spectral:6.8151e-03 L12_spectral:6.7376e-03 train_time:370684ms step_avg:40.29ms +[2025-09-11 11:50:50] [Rank 0] PRINT: step:9200/10000 val_loss:4.8301 total_sharp:2.2368e-03 L1_sharp:2.0537e-03 L2_sharp:2.3032e-04 L3_sharp:2.0588e-04 L4_sharp:1.8096e-04 L5_sharp:4.5895e-04 L6_sharp:3.1385e-04 L7_sharp:2.9312e-04 L8_sharp:4.1867e-04 L9_sharp:3.9923e-04 L10_sharp:5.5039e-04 L11_sharp:6.8551e-04 L12_sharp:2.3422e-03 total_fnorm:1.6562e+00 total_l1_linf:2.3040e+03 total_spectral:8.3203e-01 L1_fnorm:5.4688e-01 L2_fnorm:4.6094e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4141e-01 L6_fnorm:4.4531e-01 L7_fnorm:4.4727e-01 L8_fnorm:4.3164e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3945e-01 L11_fnorm:4.4336e-01 L12_fnorm:4.3945e-01 L1_l1linf:9.9121e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.1768e-02 L7_l1linf:6.2500e-02 L8_l1linf:5.7373e-02 L9_l1linf:5.4932e-02 L10_l1linf:5.5908e-02 L11_l1linf:5.7861e-02 L12_l1linf:6.7383e-02 L1_spectral:8.7860e-03 L2_spectral:6.8086e-03 L3_spectral:6.7255e-03 L4_spectral:6.6975e-03 L5_spectral:6.9228e-03 L6_spectral:6.7361e-03 L7_spectral:6.7933e-03 L8_spectral:6.9076e-03 L9_spectral:6.8330e-03 L10_spectral:6.7428e-03 L11_spectral:6.8151e-03 L12_spectral:6.7376e-03 train_time:370684ms step_avg:40.29ms +[2025-09-11 11:50:51] [Rank 0] step:9201/10000 train_time:371877ms step_avg:40.42ms +[2025-09-11 11:50:51] [Rank 0] step:9201/10000 train_time:371877ms step_avg:40.42ms +[2025-09-11 11:50:51] [Rank 0] step:9221/10000 train_time:372604ms step_avg:40.41ms +[2025-09-11 11:50:51] [Rank 0] step:9221/10000 train_time:372604ms step_avg:40.41ms +[2025-09-11 11:50:52] [Rank 0] step:9241/10000 train_time:373309ms step_avg:40.40ms +[2025-09-11 11:50:52] [Rank 0] step:9241/10000 train_time:373309ms step_avg:40.40ms +[2025-09-11 11:50:53] [Rank 0] step:9261/10000 train_time:374015ms step_avg:40.39ms +[2025-09-11 11:50:53] [Rank 0] step:9261/10000 train_time:374015ms step_avg:40.39ms +[2025-09-11 11:50:54] [Rank 0] step:9281/10000 train_time:374722ms step_avg:40.38ms +[2025-09-11 11:50:54] [Rank 0] step:9281/10000 train_time:374722ms step_avg:40.38ms +[2025-09-11 11:50:54] [Rank 0] step:9301/10000 train_time:375424ms step_avg:40.36ms +[2025-09-11 11:50:54] [Rank 0] step:9301/10000 train_time:375424ms step_avg:40.36ms +[2025-09-11 11:50:55] [Rank 0] step:9321/10000 train_time:376130ms step_avg:40.35ms +[2025-09-11 11:50:55] [Rank 0] step:9321/10000 train_time:376130ms step_avg:40.35ms +[2025-09-11 11:50:56] [Rank 0] step:9341/10000 train_time:376830ms step_avg:40.34ms +[2025-09-11 11:50:56] [Rank 0] step:9341/10000 train_time:376830ms step_avg:40.34ms +[2025-09-11 11:50:56] [Rank 0] step:9361/10000 train_time:377532ms step_avg:40.33ms +[2025-09-11 11:50:56] [Rank 0] step:9361/10000 train_time:377532ms step_avg:40.33ms +[2025-09-11 11:50:57] [Rank 0] step:9381/10000 train_time:378234ms step_avg:40.32ms +[2025-09-11 11:50:57] [Rank 0] step:9381/10000 train_time:378234ms step_avg:40.32ms +[2025-09-11 11:50:58] [Rank 0] step:9401/10000 train_time:378939ms step_avg:40.31ms +[2025-09-11 11:50:58] [Rank 0] step:9401/10000 train_time:378939ms step_avg:40.31ms +[2025-09-11 11:50:59] [Rank 0] step:9421/10000 train_time:379645ms step_avg:40.30ms +[2025-09-11 11:50:59] [Rank 0] step:9421/10000 train_time:379645ms step_avg:40.30ms +[2025-09-11 11:50:59] [Rank 0] step:9441/10000 train_time:380352ms step_avg:40.29ms +[2025-09-11 11:50:59] [Rank 0] step:9441/10000 train_time:380352ms step_avg:40.29ms +[2025-09-11 11:51:00] [Rank 0] step:9461/10000 train_time:381057ms step_avg:40.28ms +[2025-09-11 11:51:00] [Rank 0] step:9461/10000 train_time:381057ms step_avg:40.28ms +[2025-09-11 11:51:01] [Rank 0] step:9481/10000 train_time:381762ms step_avg:40.27ms +[2025-09-11 11:51:01] [Rank 0] step:9481/10000 train_time:381762ms step_avg:40.27ms +[2025-09-11 11:51:01] [Rank 0] step:9501/10000 train_time:382467ms step_avg:40.26ms +[2025-09-11 11:51:01] [Rank 0] step:9501/10000 train_time:382467ms step_avg:40.26ms +[2025-09-11 11:51:02] [Rank 0] step:9521/10000 train_time:383174ms step_avg:40.25ms +[2025-09-11 11:51:02] [Rank 0] step:9521/10000 train_time:383174ms step_avg:40.25ms +[2025-09-11 11:51:03] [Rank 0] step:9541/10000 train_time:383875ms step_avg:40.23ms +[2025-09-11 11:51:03] [Rank 0] step:9541/10000 train_time:383875ms step_avg:40.23ms +[2025-09-11 11:51:03] [Rank 0] step:9561/10000 train_time:384580ms step_avg:40.22ms +[2025-09-11 11:51:03] [Rank 0] step:9561/10000 train_time:384580ms step_avg:40.22ms +[2025-09-11 11:51:04] [Rank 0] step:9581/10000 train_time:385285ms step_avg:40.21ms +[2025-09-11 11:51:04] [Rank 0] step:9581/10000 train_time:385285ms step_avg:40.21ms +[2025-09-11 11:51:05] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:51:05] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:15] [Rank 0] PRINT: step:9600/10000 val_loss:4.8220 total_sharp:1.2737e-03 L1_sharp:2.6392e-04 L2_sharp:1.4833e-04 L3_sharp:1.7398e-04 L4_sharp:1.3903e-04 L5_sharp:3.0364e-04 L6_sharp:2.1605e-04 L7_sharp:2.0964e-04 L8_sharp:3.0753e-04 L9_sharp:3.0876e-04 L10_sharp:3.7813e-04 L11_sharp:4.8037e-04 L12_sharp:1.5636e-03 total_fnorm:9.6875e-01 total_l1_linf:1.1040e+03 total_spectral:4.8047e-01 L1_fnorm:3.3008e-01 L2_fnorm:2.6562e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5586e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5391e-01 L7_fnorm:2.5391e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4805e-01 L1_l1linf:5.6396e-02 L2_l1linf:3.2959e-02 L3_l1linf:3.2715e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.0151e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.7954e-02 L11_l1linf:2.8564e-02 L12_l1linf:3.2227e-02 L1_spectral:6.0224e-03 L2_spectral:3.9991e-03 L3_spectral:3.9343e-03 L4_spectral:3.8878e-03 L5_spectral:4.0956e-03 L6_spectral:3.9393e-03 L7_spectral:3.9774e-03 L8_spectral:4.0619e-03 L9_spectral:3.9435e-03 L10_spectral:3.9307e-03 L11_spectral:3.8987e-03 L12_spectral:3.9073e-03 train_time:385966ms step_avg:40.20ms +[2025-09-11 11:51:15] [Rank 0] PRINT: step:9600/10000 val_loss:4.8220 total_sharp:1.2737e-03 L1_sharp:2.6392e-04 L2_sharp:1.4833e-04 L3_sharp:1.7398e-04 L4_sharp:1.3903e-04 L5_sharp:3.0364e-04 L6_sharp:2.1605e-04 L7_sharp:2.0964e-04 L8_sharp:3.0753e-04 L9_sharp:3.0876e-04 L10_sharp:3.7813e-04 L11_sharp:4.8037e-04 L12_sharp:1.5636e-03 total_fnorm:9.6875e-01 total_l1_linf:1.1040e+03 total_spectral:4.8047e-01 L1_fnorm:3.3008e-01 L2_fnorm:2.6562e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5586e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5391e-01 L7_fnorm:2.5391e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4805e-01 L1_l1linf:5.6396e-02 L2_l1linf:3.2959e-02 L3_l1linf:3.2715e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.0151e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.7954e-02 L11_l1linf:2.8564e-02 L12_l1linf:3.2227e-02 L1_spectral:6.0224e-03 L2_spectral:3.9991e-03 L3_spectral:3.9343e-03 L4_spectral:3.8878e-03 L5_spectral:4.0956e-03 L6_spectral:3.9393e-03 L7_spectral:3.9774e-03 L8_spectral:4.0619e-03 L9_spectral:3.9435e-03 L10_spectral:3.9307e-03 L11_spectral:3.8987e-03 L12_spectral:3.9073e-03 train_time:385966ms step_avg:40.20ms +[2025-09-11 11:51:16] [Rank 0] step:9601/10000 train_time:387174ms step_avg:40.33ms +[2025-09-11 11:51:16] [Rank 0] step:9601/10000 train_time:387174ms step_avg:40.33ms +[2025-09-11 11:51:17] [Rank 0] step:9621/10000 train_time:387916ms step_avg:40.32ms +[2025-09-11 11:51:17] [Rank 0] step:9621/10000 train_time:387916ms step_avg:40.32ms +[2025-09-11 11:51:17] [Rank 0] step:9641/10000 train_time:388626ms step_avg:40.31ms +[2025-09-11 11:51:17] [Rank 0] step:9641/10000 train_time:388626ms step_avg:40.31ms +[2025-09-11 11:51:18] [Rank 0] step:9661/10000 train_time:389343ms step_avg:40.30ms +[2025-09-11 11:51:18] [Rank 0] step:9661/10000 train_time:389343ms step_avg:40.30ms +[2025-09-11 11:51:19] [Rank 0] step:9681/10000 train_time:390052ms step_avg:40.29ms +[2025-09-11 11:51:19] [Rank 0] step:9681/10000 train_time:390052ms step_avg:40.29ms +[2025-09-11 11:51:20] [Rank 0] step:9701/10000 train_time:390762ms step_avg:40.28ms +[2025-09-11 11:51:20] [Rank 0] step:9701/10000 train_time:390762ms step_avg:40.28ms +[2025-09-11 11:51:20] [Rank 0] step:9721/10000 train_time:391478ms step_avg:40.27ms +[2025-09-11 11:51:20] [Rank 0] step:9721/10000 train_time:391478ms step_avg:40.27ms +[2025-09-11 11:51:21] [Rank 0] step:9741/10000 train_time:392190ms step_avg:40.26ms +[2025-09-11 11:51:21] [Rank 0] step:9741/10000 train_time:392190ms step_avg:40.26ms +[2025-09-11 11:51:22] [Rank 0] step:9761/10000 train_time:392901ms step_avg:40.25ms +[2025-09-11 11:51:22] [Rank 0] step:9761/10000 train_time:392901ms step_avg:40.25ms +[2025-09-11 11:51:22] [Rank 0] step:9781/10000 train_time:393611ms step_avg:40.24ms +[2025-09-11 11:51:22] [Rank 0] step:9781/10000 train_time:393611ms step_avg:40.24ms +[2025-09-11 11:51:23] [Rank 0] step:9801/10000 train_time:394326ms step_avg:40.23ms +[2025-09-11 11:51:23] [Rank 0] step:9801/10000 train_time:394326ms step_avg:40.23ms +[2025-09-11 11:51:24] [Rank 0] step:9821/10000 train_time:395040ms step_avg:40.22ms +[2025-09-11 11:51:24] [Rank 0] step:9821/10000 train_time:395040ms step_avg:40.22ms +[2025-09-11 11:51:25] [Rank 0] step:9841/10000 train_time:395756ms step_avg:40.21ms +[2025-09-11 11:51:25] [Rank 0] step:9841/10000 train_time:395756ms step_avg:40.21ms +[2025-09-11 11:51:25] [Rank 0] step:9861/10000 train_time:396468ms step_avg:40.21ms +[2025-09-11 11:51:25] [Rank 0] step:9861/10000 train_time:396468ms step_avg:40.21ms +[2025-09-11 11:51:26] [Rank 0] step:9881/10000 train_time:397180ms step_avg:40.20ms +[2025-09-11 11:51:26] [Rank 0] step:9881/10000 train_time:397180ms step_avg:40.20ms +[2025-09-11 11:51:27] [Rank 0] step:9901/10000 train_time:397889ms step_avg:40.19ms +[2025-09-11 11:51:27] [Rank 0] step:9901/10000 train_time:397889ms step_avg:40.19ms +[2025-09-11 11:51:27] [Rank 0] step:9921/10000 train_time:398599ms step_avg:40.18ms +[2025-09-11 11:51:27] [Rank 0] step:9921/10000 train_time:398599ms step_avg:40.18ms +[2025-09-11 11:51:28] [Rank 0] step:9941/10000 train_time:399315ms step_avg:40.17ms +[2025-09-11 11:51:28] [Rank 0] step:9941/10000 train_time:399315ms step_avg:40.17ms +[2025-09-11 11:51:29] [Rank 0] step:9961/10000 train_time:400031ms step_avg:40.16ms +[2025-09-11 11:51:29] [Rank 0] step:9961/10000 train_time:400031ms step_avg:40.16ms +[2025-09-11 11:51:30] [Rank 0] step:9981/10000 train_time:400743ms step_avg:40.15ms +[2025-09-11 11:51:30] [Rank 0] step:9981/10000 train_time:400743ms step_avg:40.15ms +[2025-09-11 11:51:30] [Rank 0] step:10000/10000 train_time:401427ms step_avg:40.14ms +[2025-09-11 11:51:30] [Rank 0] step:10000/10000 train_time:401427ms step_avg:40.14ms +[2025-09-11 11:51:30] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:51:30] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:44] [Rank 0] PRINT: step:10000/10000 val_loss:4.8192 total_sharp:6.7014e-04 L1_sharp:-3.5456e-04 L2_sharp:7.9603e-05 L3_sharp:9.2272e-05 L4_sharp:9.1986e-05 L5_sharp:2.1233e-04 L6_sharp:1.6188e-04 L7_sharp:1.5084e-04 L8_sharp:2.0053e-04 L9_sharp:2.3182e-04 L10_sharp:2.8301e-04 L11_sharp:3.3713e-04 L12_sharp:1.3583e-03 total_fnorm:3.7305e-01 total_l1_linf:3.1200e+02 total_spectral:1.8555e-01 L1_fnorm:1.3770e-01 L2_fnorm:1.0156e-01 L3_fnorm:9.8633e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.6191e-02 L6_fnorm:9.8145e-02 L7_fnorm:9.8145e-02 L8_fnorm:9.4238e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.6191e-02 L11_fnorm:9.7168e-02 L12_fnorm:9.6191e-02 L1_l1linf:2.0386e-02 L2_l1linf:9.6436e-03 L3_l1linf:1.0132e-02 L4_l1linf:1.0132e-02 L5_l1linf:1.0254e-02 L6_l1linf:9.8267e-03 L7_l1linf:1.0071e-02 L8_l1linf:8.6670e-03 L9_l1linf:8.0566e-03 L10_l1linf:8.2397e-03 L11_l1linf:9.2773e-03 L12_l1linf:9.8877e-03 L1_spectral:2.8180e-03 L2_spectral:1.5833e-03 L3_spectral:1.5422e-03 L4_spectral:1.5416e-03 L5_spectral:1.6162e-03 L6_spectral:1.5490e-03 L7_spectral:1.5364e-03 L8_spectral:1.6180e-03 L9_spectral:1.5712e-03 L10_spectral:1.5527e-03 L11_spectral:1.5570e-03 L12_spectral:1.5443e-03 train_time:401446ms step_avg:40.14ms +[2025-09-11 11:51:44] [Rank 0] PRINT: step:10000/10000 val_loss:4.8192 total_sharp:6.7014e-04 L1_sharp:-3.5456e-04 L2_sharp:7.9603e-05 L3_sharp:9.2272e-05 L4_sharp:9.1986e-05 L5_sharp:2.1233e-04 L6_sharp:1.6188e-04 L7_sharp:1.5084e-04 L8_sharp:2.0053e-04 L9_sharp:2.3182e-04 L10_sharp:2.8301e-04 L11_sharp:3.3713e-04 L12_sharp:1.3583e-03 total_fnorm:3.7305e-01 total_l1_linf:3.1200e+02 total_spectral:1.8555e-01 L1_fnorm:1.3770e-01 L2_fnorm:1.0156e-01 L3_fnorm:9.8633e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.6191e-02 L6_fnorm:9.8145e-02 L7_fnorm:9.8145e-02 L8_fnorm:9.4238e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.6191e-02 L11_fnorm:9.7168e-02 L12_fnorm:9.6191e-02 L1_l1linf:2.0386e-02 L2_l1linf:9.6436e-03 L3_l1linf:1.0132e-02 L4_l1linf:1.0132e-02 L5_l1linf:1.0254e-02 L6_l1linf:9.8267e-03 L7_l1linf:1.0071e-02 L8_l1linf:8.6670e-03 L9_l1linf:8.0566e-03 L10_l1linf:8.2397e-03 L11_l1linf:9.2773e-03 L12_l1linf:9.8877e-03 L1_spectral:2.8180e-03 L2_spectral:1.5833e-03 L3_spectral:1.5422e-03 L4_spectral:1.5416e-03 L5_spectral:1.6162e-03 L6_spectral:1.5490e-03 L7_spectral:1.5364e-03 L8_spectral:1.6180e-03 L9_spectral:1.5712e-03 L10_spectral:1.5527e-03 L11_spectral:1.5570e-03 L12_spectral:1.5443e-03 train_time:401446ms step_avg:40.14ms +[2025-09-11 11:51:44] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:51:44 2025 --- +[2025-09-11 11:51:44] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:51:44 2025 --- +[2025-09-11 11:51:44] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:51:44] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a129e1f3b16c3b1b6991971b66e9a0f28f0bd74e --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "e89055ef-8506-4fb4-a60c-18ad9b29a00f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/training_log_e89055ef-8506-4fb4-a60c-18ad9b29a00f.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/training_log_e89055ef-8506-4fb4-a60c-18ad9b29a00f.txt new file mode 100644 index 0000000000000000000000000000000000000000..efcd88e448b7aee629b46d8785a88e3d09a6b973 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44/training_log_e89055ef-8506-4fb4-a60c-18ad9b29a00f.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:25:31] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:25:31 2025 --- +[2025-09-11 11:25:31] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:25:31 2025 --- +[2025-09-11 11:25:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:25:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:25:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:25:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:25:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:25:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:25:31] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44 +[2025-09-11 11:25:31] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.05_seed_44 +[2025-09-11 11:25:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:25:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:25:31] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:25:31] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:25:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:25:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:25:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:25:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:25:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:25:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:25:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:25:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:25:32] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:25:32] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:25:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:25:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:25:34] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:25:34] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:25:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:25:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:25:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:25:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:25:40] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:25:40] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:26:22] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:26:22] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:26:22] [Rank 0] PRINT: Starting training... +[2025-09-11 11:26:22] [Rank 0] PRINT: Starting training... +[2025-09-11 11:26:23] [Rank 0] step:21/10000 train_time:1131ms step_avg:53.84ms +[2025-09-11 11:26:23] [Rank 0] step:21/10000 train_time:1131ms step_avg:53.84ms +[2025-09-11 11:26:24] [Rank 0] step:41/10000 train_time:1855ms step_avg:45.24ms +[2025-09-11 11:26:24] [Rank 0] step:41/10000 train_time:1855ms step_avg:45.24ms +[2025-09-11 11:26:25] [Rank 0] step:61/10000 train_time:2579ms step_avg:42.28ms +[2025-09-11 11:26:25] [Rank 0] step:61/10000 train_time:2579ms step_avg:42.28ms +[2025-09-11 11:26:26] [Rank 0] step:81/10000 train_time:3303ms step_avg:40.77ms +[2025-09-11 11:26:26] [Rank 0] step:81/10000 train_time:3303ms step_avg:40.77ms +[2025-09-11 11:26:26] [Rank 0] step:101/10000 train_time:4026ms step_avg:39.86ms +[2025-09-11 11:26:26] [Rank 0] step:101/10000 train_time:4026ms step_avg:39.86ms +[2025-09-11 11:26:27] [Rank 0] step:121/10000 train_time:4749ms step_avg:39.25ms +[2025-09-11 11:26:27] [Rank 0] step:121/10000 train_time:4749ms step_avg:39.25ms +[2025-09-11 11:26:28] [Rank 0] step:141/10000 train_time:5472ms step_avg:38.81ms +[2025-09-11 11:26:28] [Rank 0] step:141/10000 train_time:5472ms step_avg:38.81ms +[2025-09-11 11:26:29] [Rank 0] step:161/10000 train_time:6195ms step_avg:38.48ms +[2025-09-11 11:26:29] [Rank 0] step:161/10000 train_time:6195ms step_avg:38.48ms +[2025-09-11 11:26:29] [Rank 0] step:181/10000 train_time:6918ms step_avg:38.22ms +[2025-09-11 11:26:29] [Rank 0] step:181/10000 train_time:6918ms step_avg:38.22ms +[2025-09-11 11:26:30] [Rank 0] step:201/10000 train_time:7642ms step_avg:38.02ms +[2025-09-11 11:26:30] [Rank 0] step:201/10000 train_time:7642ms step_avg:38.02ms +[2025-09-11 11:26:31] [Rank 0] step:221/10000 train_time:8365ms step_avg:37.85ms +[2025-09-11 11:26:31] [Rank 0] step:221/10000 train_time:8365ms step_avg:37.85ms +[2025-09-11 11:26:31] [Rank 0] step:241/10000 train_time:9088ms step_avg:37.71ms +[2025-09-11 11:26:31] [Rank 0] step:241/10000 train_time:9088ms step_avg:37.71ms +[2025-09-11 11:26:32] [Rank 0] step:261/10000 train_time:9812ms step_avg:37.59ms +[2025-09-11 11:26:32] [Rank 0] step:261/10000 train_time:9812ms step_avg:37.59ms +[2025-09-11 11:26:33] [Rank 0] step:281/10000 train_time:10535ms step_avg:37.49ms +[2025-09-11 11:26:33] [Rank 0] step:281/10000 train_time:10535ms step_avg:37.49ms +[2025-09-11 11:26:34] [Rank 0] step:301/10000 train_time:11258ms step_avg:37.40ms +[2025-09-11 11:26:34] [Rank 0] step:301/10000 train_time:11258ms step_avg:37.40ms +[2025-09-11 11:26:34] [Rank 0] step:321/10000 train_time:11981ms step_avg:37.32ms +[2025-09-11 11:26:34] [Rank 0] step:321/10000 train_time:11981ms step_avg:37.32ms +[2025-09-11 11:26:35] [Rank 0] step:341/10000 train_time:12703ms step_avg:37.25ms +[2025-09-11 11:26:35] [Rank 0] step:341/10000 train_time:12703ms step_avg:37.25ms +[2025-09-11 11:26:36] [Rank 0] step:361/10000 train_time:13426ms step_avg:37.19ms +[2025-09-11 11:26:36] [Rank 0] step:361/10000 train_time:13426ms step_avg:37.19ms +[2025-09-11 11:26:37] [Rank 0] step:381/10000 train_time:14156ms step_avg:37.16ms +[2025-09-11 11:26:37] [Rank 0] step:381/10000 train_time:14156ms step_avg:37.16ms +[2025-09-11 11:26:37] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:26:37] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:27:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:27:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:27:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:23] [Rank 0] PRINT: step:400/10000 val_loss:6.7441 total_sharp:7.0208e-03 L1_sharp:6.1601e-03 L2_sharp:2.7482e-03 L3_sharp:2.6624e-03 L4_sharp:1.6548e-03 L5_sharp:1.0700e-03 L6_sharp:4.8442e-04 L7_sharp:4.3106e-04 L8_sharp:2.9022e-04 L9_sharp:1.9814e-04 L10_sharp:2.8055e-04 L11_sharp:3.7106e-04 L12_sharp:6.2633e-04 total_fnorm:1.9195e+01 total_l1_linf:7.4876e+04 total_spectral:9.5975e+00 L1_fnorm:6.1974e+00 L2_fnorm:5.9527e+00 L3_fnorm:5.7580e+00 L4_fnorm:5.5006e+00 L5_fnorm:5.1771e+00 L6_fnorm:4.8165e+00 L7_fnorm:4.5094e+00 L8_fnorm:4.3632e+00 L9_fnorm:3.9979e+00 L10_fnorm:4.0302e+00 L11_fnorm:3.7917e+00 L12_fnorm:3.7180e+00 L1_l1linf:2.0113e+00 L2_l1linf:1.9126e+00 L3_l1linf:1.8417e+00 L4_l1linf:1.7556e+00 L5_l1linf:1.7069e+00 L6_l1linf:1.5922e+00 L7_l1linf:1.5072e+00 L8_l1linf:1.4970e+00 L9_l1linf:1.3889e+00 L10_l1linf:1.3985e+00 L11_l1linf:1.2835e+00 L12_l1linf:1.1837e+00 L1_spectral:6.0385e-02 L2_spectral:6.0421e-02 L3_spectral:6.0344e-02 L4_spectral:6.0350e-02 L5_spectral:6.0233e-02 L6_spectral:6.0202e-02 L7_spectral:6.0175e-02 L8_spectral:6.0166e-02 L9_spectral:6.0049e-02 L10_spectral:6.0046e-02 L11_spectral:5.9990e-02 L12_spectral:6.0015e-02 train_time:14859ms step_avg:37.15ms +[2025-09-11 11:27:23] [Rank 0] PRINT: step:400/10000 val_loss:6.7441 total_sharp:7.0208e-03 L1_sharp:6.1601e-03 L2_sharp:2.7482e-03 L3_sharp:2.6624e-03 L4_sharp:1.6548e-03 L5_sharp:1.0700e-03 L6_sharp:4.8442e-04 L7_sharp:4.3106e-04 L8_sharp:2.9022e-04 L9_sharp:1.9814e-04 L10_sharp:2.8055e-04 L11_sharp:3.7106e-04 L12_sharp:6.2633e-04 total_fnorm:1.9195e+01 total_l1_linf:7.4876e+04 total_spectral:9.5975e+00 L1_fnorm:6.1974e+00 L2_fnorm:5.9527e+00 L3_fnorm:5.7580e+00 L4_fnorm:5.5006e+00 L5_fnorm:5.1771e+00 L6_fnorm:4.8165e+00 L7_fnorm:4.5094e+00 L8_fnorm:4.3632e+00 L9_fnorm:3.9979e+00 L10_fnorm:4.0302e+00 L11_fnorm:3.7917e+00 L12_fnorm:3.7180e+00 L1_l1linf:2.0113e+00 L2_l1linf:1.9126e+00 L3_l1linf:1.8417e+00 L4_l1linf:1.7556e+00 L5_l1linf:1.7069e+00 L6_l1linf:1.5922e+00 L7_l1linf:1.5072e+00 L8_l1linf:1.4970e+00 L9_l1linf:1.3889e+00 L10_l1linf:1.3985e+00 L11_l1linf:1.2835e+00 L12_l1linf:1.1837e+00 L1_spectral:6.0385e-02 L2_spectral:6.0421e-02 L3_spectral:6.0344e-02 L4_spectral:6.0350e-02 L5_spectral:6.0233e-02 L6_spectral:6.0202e-02 L7_spectral:6.0175e-02 L8_spectral:6.0166e-02 L9_spectral:6.0049e-02 L10_spectral:6.0046e-02 L11_spectral:5.9990e-02 L12_spectral:6.0015e-02 train_time:14859ms step_avg:37.15ms +[2025-09-11 11:27:53] [Rank 0] step:401/10000 train_time:44943ms step_avg:112.08ms +[2025-09-11 11:27:53] [Rank 0] step:401/10000 train_time:44943ms step_avg:112.08ms +[2025-09-11 11:27:55] [Rank 0] step:421/10000 train_time:46869ms step_avg:111.33ms +[2025-09-11 11:27:55] [Rank 0] step:421/10000 train_time:46869ms step_avg:111.33ms +[2025-09-11 11:27:56] [Rank 0] step:441/10000 train_time:47505ms step_avg:107.72ms +[2025-09-11 11:27:56] [Rank 0] step:441/10000 train_time:47505ms step_avg:107.72ms +[2025-09-11 11:27:57] [Rank 0] step:461/10000 train_time:48141ms step_avg:104.43ms +[2025-09-11 11:27:57] [Rank 0] step:461/10000 train_time:48141ms step_avg:104.43ms +[2025-09-11 11:27:57] [Rank 0] step:481/10000 train_time:48778ms step_avg:101.41ms +[2025-09-11 11:27:57] [Rank 0] step:481/10000 train_time:48778ms step_avg:101.41ms +[2025-09-11 11:27:58] [Rank 0] step:501/10000 train_time:49414ms step_avg:98.63ms +[2025-09-11 11:27:58] [Rank 0] step:501/10000 train_time:49414ms step_avg:98.63ms +[2025-09-11 11:27:59] [Rank 0] step:521/10000 train_time:50050ms step_avg:96.07ms +[2025-09-11 11:27:59] [Rank 0] step:521/10000 train_time:50050ms step_avg:96.07ms +[2025-09-11 11:27:59] [Rank 0] step:541/10000 train_time:50685ms step_avg:93.69ms +[2025-09-11 11:27:59] [Rank 0] step:541/10000 train_time:50685ms step_avg:93.69ms +[2025-09-11 11:28:00] [Rank 0] step:561/10000 train_time:51320ms step_avg:91.48ms +[2025-09-11 11:28:00] [Rank 0] step:561/10000 train_time:51320ms step_avg:91.48ms +[2025-09-11 11:28:01] [Rank 0] step:581/10000 train_time:51955ms step_avg:89.42ms +[2025-09-11 11:28:01] [Rank 0] step:581/10000 train_time:51955ms step_avg:89.42ms +[2025-09-11 11:28:01] [Rank 0] step:601/10000 train_time:52590ms step_avg:87.50ms +[2025-09-11 11:28:01] [Rank 0] step:601/10000 train_time:52590ms step_avg:87.50ms +[2025-09-11 11:28:02] [Rank 0] step:621/10000 train_time:53224ms step_avg:85.71ms +[2025-09-11 11:28:02] [Rank 0] step:621/10000 train_time:53224ms step_avg:85.71ms +[2025-09-11 11:28:02] [Rank 0] step:641/10000 train_time:53859ms step_avg:84.02ms +[2025-09-11 11:28:02] [Rank 0] step:641/10000 train_time:53859ms step_avg:84.02ms +[2025-09-11 11:28:03] [Rank 0] step:661/10000 train_time:54494ms step_avg:82.44ms +[2025-09-11 11:28:03] [Rank 0] step:661/10000 train_time:54494ms step_avg:82.44ms +[2025-09-11 11:28:04] [Rank 0] step:681/10000 train_time:55129ms step_avg:80.95ms +[2025-09-11 11:28:04] [Rank 0] step:681/10000 train_time:55129ms step_avg:80.95ms +[2025-09-11 11:28:04] [Rank 0] step:701/10000 train_time:55763ms step_avg:79.55ms +[2025-09-11 11:28:04] [Rank 0] step:701/10000 train_time:55763ms step_avg:79.55ms +[2025-09-11 11:28:05] [Rank 0] step:721/10000 train_time:56398ms step_avg:78.22ms +[2025-09-11 11:28:05] [Rank 0] step:721/10000 train_time:56398ms step_avg:78.22ms +[2025-09-11 11:28:06] [Rank 0] step:741/10000 train_time:57032ms step_avg:76.97ms +[2025-09-11 11:28:06] [Rank 0] step:741/10000 train_time:57032ms step_avg:76.97ms +[2025-09-11 11:28:06] [Rank 0] step:761/10000 train_time:57671ms step_avg:75.78ms +[2025-09-11 11:28:06] [Rank 0] step:761/10000 train_time:57671ms step_avg:75.78ms +[2025-09-11 11:28:07] [Rank 0] step:781/10000 train_time:58310ms step_avg:74.66ms +[2025-09-11 11:28:07] [Rank 0] step:781/10000 train_time:58310ms step_avg:74.66ms +[2025-09-11 11:28:07] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:28:07] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:28:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:28:51] [Rank 0] PRINT: step:800/10000 val_loss:6.2959 total_sharp:6.2408e-03 L1_sharp:3.8455e-03 L2_sharp:1.8863e-03 L3_sharp:1.3113e-03 L4_sharp:8.3350e-04 L5_sharp:5.7019e-04 L6_sharp:4.7019e-04 L7_sharp:4.3701e-04 L8_sharp:5.0197e-04 L9_sharp:5.2065e-04 L10_sharp:4.6110e-04 L11_sharp:7.1866e-04 L12_sharp:1.6170e-03 total_fnorm:1.9750e+01 total_l1_linf:6.2976e+04 total_spectral:9.8125e+00 L1_fnorm:6.3125e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.9375e+00 L4_fnorm:5.8438e+00 L5_fnorm:5.6875e+00 L6_fnorm:5.5000e+00 L7_fnorm:5.2500e+00 L8_fnorm:5.0312e+00 L9_fnorm:4.7812e+00 L10_fnorm:4.5625e+00 L11_fnorm:4.3125e+00 L12_fnorm:4.0312e+00 L1_l1linf:2.0000e+00 L2_l1linf:1.8594e+00 L3_l1linf:1.8125e+00 L4_l1linf:1.8047e+00 L5_l1linf:1.7969e+00 L6_l1linf:1.7656e+00 L7_l1linf:1.6875e+00 L8_l1linf:1.6250e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4375e+00 L11_l1linf:1.1953e+00 L12_l1linf:1.1094e+00 L1_spectral:6.7232e-02 L2_spectral:6.6158e-02 L3_spectral:6.6040e-02 L4_spectral:6.5911e-02 L5_spectral:6.5563e-02 L6_spectral:6.5651e-02 L7_spectral:6.5535e-02 L8_spectral:6.5584e-02 L9_spectral:6.5409e-02 L10_spectral:6.5126e-02 L11_spectral:6.4960e-02 L12_spectral:6.4383e-02 train_time:58933ms step_avg:73.67ms +[2025-09-11 11:28:51] [Rank 0] PRINT: step:800/10000 val_loss:6.2959 total_sharp:6.2408e-03 L1_sharp:3.8455e-03 L2_sharp:1.8863e-03 L3_sharp:1.3113e-03 L4_sharp:8.3350e-04 L5_sharp:5.7019e-04 L6_sharp:4.7019e-04 L7_sharp:4.3701e-04 L8_sharp:5.0197e-04 L9_sharp:5.2065e-04 L10_sharp:4.6110e-04 L11_sharp:7.1866e-04 L12_sharp:1.6170e-03 total_fnorm:1.9750e+01 total_l1_linf:6.2976e+04 total_spectral:9.8125e+00 L1_fnorm:6.3125e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.9375e+00 L4_fnorm:5.8438e+00 L5_fnorm:5.6875e+00 L6_fnorm:5.5000e+00 L7_fnorm:5.2500e+00 L8_fnorm:5.0312e+00 L9_fnorm:4.7812e+00 L10_fnorm:4.5625e+00 L11_fnorm:4.3125e+00 L12_fnorm:4.0312e+00 L1_l1linf:2.0000e+00 L2_l1linf:1.8594e+00 L3_l1linf:1.8125e+00 L4_l1linf:1.8047e+00 L5_l1linf:1.7969e+00 L6_l1linf:1.7656e+00 L7_l1linf:1.6875e+00 L8_l1linf:1.6250e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4375e+00 L11_l1linf:1.1953e+00 L12_l1linf:1.1094e+00 L1_spectral:6.7232e-02 L2_spectral:6.6158e-02 L3_spectral:6.6040e-02 L4_spectral:6.5911e-02 L5_spectral:6.5563e-02 L6_spectral:6.5651e-02 L7_spectral:6.5535e-02 L8_spectral:6.5584e-02 L9_spectral:6.5409e-02 L10_spectral:6.5126e-02 L11_spectral:6.4960e-02 L12_spectral:6.4383e-02 train_time:58933ms step_avg:73.67ms +[2025-09-11 11:28:53] [Rank 0] step:801/10000 train_time:60086ms step_avg:75.01ms +[2025-09-11 11:28:53] [Rank 0] step:801/10000 train_time:60086ms step_avg:75.01ms +[2025-09-11 11:28:53] [Rank 0] step:821/10000 train_time:60729ms step_avg:73.97ms +[2025-09-11 11:28:53] [Rank 0] step:821/10000 train_time:60729ms step_avg:73.97ms +[2025-09-11 11:28:54] [Rank 0] step:841/10000 train_time:61370ms step_avg:72.97ms +[2025-09-11 11:28:54] [Rank 0] step:841/10000 train_time:61370ms step_avg:72.97ms +[2025-09-11 11:28:54] [Rank 0] step:861/10000 train_time:62012ms step_avg:72.02ms +[2025-09-11 11:28:54] [Rank 0] step:861/10000 train_time:62012ms step_avg:72.02ms +[2025-09-11 11:28:55] [Rank 0] step:881/10000 train_time:62652ms step_avg:71.11ms +[2025-09-11 11:28:55] [Rank 0] step:881/10000 train_time:62652ms step_avg:71.11ms +[2025-09-11 11:28:56] [Rank 0] step:901/10000 train_time:63292ms step_avg:70.25ms +[2025-09-11 11:28:56] [Rank 0] step:901/10000 train_time:63292ms step_avg:70.25ms +[2025-09-11 11:28:56] [Rank 0] step:921/10000 train_time:63932ms step_avg:69.42ms +[2025-09-11 11:28:56] [Rank 0] step:921/10000 train_time:63932ms step_avg:69.42ms +[2025-09-11 11:28:57] [Rank 0] step:941/10000 train_time:64572ms step_avg:68.62ms +[2025-09-11 11:28:57] [Rank 0] step:941/10000 train_time:64572ms step_avg:68.62ms +[2025-09-11 11:28:58] [Rank 0] step:961/10000 train_time:65212ms step_avg:67.86ms +[2025-09-11 11:28:58] [Rank 0] step:961/10000 train_time:65212ms step_avg:67.86ms +[2025-09-11 11:28:58] [Rank 0] step:981/10000 train_time:65852ms step_avg:67.13ms +[2025-09-11 11:28:58] [Rank 0] step:981/10000 train_time:65852ms step_avg:67.13ms +[2025-09-11 11:28:59] [Rank 0] step:1001/10000 train_time:66493ms step_avg:66.43ms +[2025-09-11 11:28:59] [Rank 0] step:1001/10000 train_time:66493ms step_avg:66.43ms +[2025-09-11 11:29:00] [Rank 0] step:1021/10000 train_time:67131ms step_avg:65.75ms +[2025-09-11 11:29:00] [Rank 0] step:1021/10000 train_time:67131ms step_avg:65.75ms +[2025-09-11 11:29:00] [Rank 0] step:1041/10000 train_time:67770ms step_avg:65.10ms +[2025-09-11 11:29:00] [Rank 0] step:1041/10000 train_time:67770ms step_avg:65.10ms +[2025-09-11 11:29:01] [Rank 0] step:1061/10000 train_time:68409ms step_avg:64.48ms +[2025-09-11 11:29:01] [Rank 0] step:1061/10000 train_time:68409ms step_avg:64.48ms +[2025-09-11 11:29:02] [Rank 0] step:1081/10000 train_time:69049ms step_avg:63.87ms +[2025-09-11 11:29:02] [Rank 0] step:1081/10000 train_time:69049ms step_avg:63.87ms +[2025-09-11 11:29:02] [Rank 0] step:1101/10000 train_time:69688ms step_avg:63.29ms +[2025-09-11 11:29:02] [Rank 0] step:1101/10000 train_time:69688ms step_avg:63.29ms +[2025-09-11 11:29:03] [Rank 0] step:1121/10000 train_time:70326ms step_avg:62.74ms +[2025-09-11 11:29:03] [Rank 0] step:1121/10000 train_time:70326ms step_avg:62.74ms +[2025-09-11 11:29:03] [Rank 0] step:1141/10000 train_time:70965ms step_avg:62.20ms +[2025-09-11 11:29:03] [Rank 0] step:1141/10000 train_time:70965ms step_avg:62.20ms +[2025-09-11 11:29:04] [Rank 0] step:1161/10000 train_time:71604ms step_avg:61.67ms +[2025-09-11 11:29:04] [Rank 0] step:1161/10000 train_time:71604ms step_avg:61.67ms +[2025-09-11 11:29:05] [Rank 0] step:1181/10000 train_time:72242ms step_avg:61.17ms +[2025-09-11 11:29:05] [Rank 0] step:1181/10000 train_time:72242ms step_avg:61.17ms +[2025-09-11 11:29:05] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:29:05] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:29:15] [Rank 0] PRINT: step:1200/10000 val_loss:5.9901 total_sharp:3.3785e-03 L1_sharp:2.4671e-03 L2_sharp:8.4976e-04 L3_sharp:5.6711e-04 L4_sharp:3.9015e-04 L5_sharp:3.8475e-04 L6_sharp:2.9303e-04 L7_sharp:1.9834e-04 L8_sharp:3.2581e-04 L9_sharp:2.7078e-04 L10_sharp:2.8762e-04 L11_sharp:4.8053e-04 L12_sharp:1.1512e-03 total_fnorm:2.1000e+01 total_l1_linf:6.6048e+04 total_spectral:1.0500e+01 L1_fnorm:6.3125e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.0938e+00 L5_fnorm:5.9688e+00 L6_fnorm:5.9688e+00 L7_fnorm:5.8438e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5000e+00 L11_fnorm:5.2812e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.8828e+00 L2_l1linf:1.7812e+00 L3_l1linf:1.7969e+00 L4_l1linf:1.7812e+00 L5_l1linf:1.7812e+00 L6_l1linf:1.7812e+00 L7_l1linf:1.7734e+00 L8_l1linf:1.7734e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7031e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.2734e+00 L1_spectral:6.9764e-02 L2_spectral:6.8575e-02 L3_spectral:6.8781e-02 L4_spectral:6.8364e-02 L5_spectral:6.8236e-02 L6_spectral:6.7862e-02 L7_spectral:6.7736e-02 L8_spectral:6.7708e-02 L9_spectral:6.7865e-02 L10_spectral:6.8140e-02 L11_spectral:6.8213e-02 L12_spectral:6.7816e-02 train_time:72864ms step_avg:60.72ms +[2025-09-11 11:29:15] [Rank 0] PRINT: step:1200/10000 val_loss:5.9901 total_sharp:3.3785e-03 L1_sharp:2.4671e-03 L2_sharp:8.4976e-04 L3_sharp:5.6711e-04 L4_sharp:3.9015e-04 L5_sharp:3.8475e-04 L6_sharp:2.9303e-04 L7_sharp:1.9834e-04 L8_sharp:3.2581e-04 L9_sharp:2.7078e-04 L10_sharp:2.8762e-04 L11_sharp:4.8053e-04 L12_sharp:1.1512e-03 total_fnorm:2.1000e+01 total_l1_linf:6.6048e+04 total_spectral:1.0500e+01 L1_fnorm:6.3125e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.0938e+00 L5_fnorm:5.9688e+00 L6_fnorm:5.9688e+00 L7_fnorm:5.8438e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5000e+00 L11_fnorm:5.2812e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.8828e+00 L2_l1linf:1.7812e+00 L3_l1linf:1.7969e+00 L4_l1linf:1.7812e+00 L5_l1linf:1.7812e+00 L6_l1linf:1.7812e+00 L7_l1linf:1.7734e+00 L8_l1linf:1.7734e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7031e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.2734e+00 L1_spectral:6.9764e-02 L2_spectral:6.8575e-02 L3_spectral:6.8781e-02 L4_spectral:6.8364e-02 L5_spectral:6.8236e-02 L6_spectral:6.7862e-02 L7_spectral:6.7736e-02 L8_spectral:6.7708e-02 L9_spectral:6.7865e-02 L10_spectral:6.8140e-02 L11_spectral:6.8213e-02 L12_spectral:6.7816e-02 train_time:72864ms step_avg:60.72ms +[2025-09-11 11:29:16] [Rank 0] step:1201/10000 train_time:74045ms step_avg:61.65ms +[2025-09-11 11:29:16] [Rank 0] step:1201/10000 train_time:74045ms step_avg:61.65ms +[2025-09-11 11:29:17] [Rank 0] step:1221/10000 train_time:74688ms step_avg:61.17ms +[2025-09-11 11:29:17] [Rank 0] step:1221/10000 train_time:74688ms step_avg:61.17ms +[2025-09-11 11:29:18] [Rank 0] step:1241/10000 train_time:75329ms step_avg:60.70ms +[2025-09-11 11:29:18] [Rank 0] step:1241/10000 train_time:75329ms step_avg:60.70ms +[2025-09-11 11:29:18] [Rank 0] step:1261/10000 train_time:75969ms step_avg:60.25ms +[2025-09-11 11:29:18] [Rank 0] step:1261/10000 train_time:75969ms step_avg:60.25ms +[2025-09-11 11:29:19] [Rank 0] step:1281/10000 train_time:76609ms step_avg:59.80ms +[2025-09-11 11:29:19] [Rank 0] step:1281/10000 train_time:76609ms step_avg:59.80ms +[2025-09-11 11:29:20] [Rank 0] step:1301/10000 train_time:77249ms step_avg:59.38ms +[2025-09-11 11:29:20] [Rank 0] step:1301/10000 train_time:77249ms step_avg:59.38ms +[2025-09-11 11:29:20] [Rank 0] step:1321/10000 train_time:77888ms step_avg:58.96ms +[2025-09-11 11:29:20] [Rank 0] step:1321/10000 train_time:77888ms step_avg:58.96ms +[2025-09-11 11:29:21] [Rank 0] step:1341/10000 train_time:78528ms step_avg:58.56ms +[2025-09-11 11:29:21] [Rank 0] step:1341/10000 train_time:78528ms step_avg:58.56ms +[2025-09-11 11:29:22] [Rank 0] step:1361/10000 train_time:79167ms step_avg:58.17ms +[2025-09-11 11:29:22] [Rank 0] step:1361/10000 train_time:79167ms step_avg:58.17ms +[2025-09-11 11:29:22] [Rank 0] step:1381/10000 train_time:79807ms step_avg:57.79ms +[2025-09-11 11:29:22] [Rank 0] step:1381/10000 train_time:79807ms step_avg:57.79ms +[2025-09-11 11:29:23] [Rank 0] step:1401/10000 train_time:80447ms step_avg:57.42ms +[2025-09-11 11:29:23] [Rank 0] step:1401/10000 train_time:80447ms step_avg:57.42ms +[2025-09-11 11:29:23] [Rank 0] step:1421/10000 train_time:81086ms step_avg:57.06ms +[2025-09-11 11:29:23] [Rank 0] step:1421/10000 train_time:81086ms step_avg:57.06ms +[2025-09-11 11:29:24] [Rank 0] step:1441/10000 train_time:81733ms step_avg:56.72ms +[2025-09-11 11:29:24] [Rank 0] step:1441/10000 train_time:81733ms step_avg:56.72ms +[2025-09-11 11:29:25] [Rank 0] step:1461/10000 train_time:82372ms step_avg:56.38ms +[2025-09-11 11:29:25] [Rank 0] step:1461/10000 train_time:82372ms step_avg:56.38ms +[2025-09-11 11:29:25] [Rank 0] step:1481/10000 train_time:83011ms step_avg:56.05ms +[2025-09-11 11:29:25] [Rank 0] step:1481/10000 train_time:83011ms step_avg:56.05ms +[2025-09-11 11:29:26] [Rank 0] step:1501/10000 train_time:83655ms step_avg:55.73ms +[2025-09-11 11:29:26] [Rank 0] step:1501/10000 train_time:83655ms step_avg:55.73ms +[2025-09-11 11:29:27] [Rank 0] step:1521/10000 train_time:84299ms step_avg:55.42ms +[2025-09-11 11:29:27] [Rank 0] step:1521/10000 train_time:84299ms step_avg:55.42ms +[2025-09-11 11:29:28] [Rank 0] step:1541/10000 train_time:85204ms step_avg:55.29ms +[2025-09-11 11:29:28] [Rank 0] step:1541/10000 train_time:85204ms step_avg:55.29ms +[2025-09-11 11:29:28] [Rank 0] step:1561/10000 train_time:86107ms step_avg:55.16ms +[2025-09-11 11:29:28] [Rank 0] step:1561/10000 train_time:86107ms step_avg:55.16ms +[2025-09-11 11:29:29] [Rank 0] step:1581/10000 train_time:86751ms step_avg:54.87ms +[2025-09-11 11:29:29] [Rank 0] step:1581/10000 train_time:86751ms step_avg:54.87ms +[2025-09-11 11:29:30] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:29:30] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:29:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:29:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:29:43] [Rank 0] PRINT: step:1600/10000 val_loss:5.7874 total_sharp:2.2422e-03 L1_sharp:1.3124e-03 L2_sharp:5.0614e-04 L3_sharp:2.2105e-04 L4_sharp:1.7818e-04 L5_sharp:2.1385e-04 L6_sharp:2.0072e-04 L7_sharp:1.5678e-04 L8_sharp:2.8711e-04 L9_sharp:2.4921e-04 L10_sharp:2.2390e-04 L11_sharp:3.1137e-04 L12_sharp:1.3226e-03 total_fnorm:2.1625e+01 total_l1_linf:6.5536e+04 total_spectral:1.0750e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.8750e+00 L11_fnorm:5.7188e+00 L12_fnorm:5.2500e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7188e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.7109e+00 L5_l1linf:1.6797e+00 L6_l1linf:1.6953e+00 L7_l1linf:1.7031e+00 L8_l1linf:1.7344e+00 L9_l1linf:1.7188e+00 L10_l1linf:1.7500e+00 L11_l1linf:1.6172e+00 L12_l1linf:1.3047e+00 L1_spectral:7.2536e-02 L2_spectral:7.0600e-02 L3_spectral:7.1167e-02 L4_spectral:7.1172e-02 L5_spectral:7.0648e-02 L6_spectral:6.9942e-02 L7_spectral:6.9991e-02 L8_spectral:6.9765e-02 L9_spectral:6.9284e-02 L10_spectral:6.9055e-02 L11_spectral:6.9555e-02 L12_spectral:6.9975e-02 train_time:87654ms step_avg:54.78ms +[2025-09-11 11:29:43] [Rank 0] PRINT: step:1600/10000 val_loss:5.7874 total_sharp:2.2422e-03 L1_sharp:1.3124e-03 L2_sharp:5.0614e-04 L3_sharp:2.2105e-04 L4_sharp:1.7818e-04 L5_sharp:2.1385e-04 L6_sharp:2.0072e-04 L7_sharp:1.5678e-04 L8_sharp:2.8711e-04 L9_sharp:2.4921e-04 L10_sharp:2.2390e-04 L11_sharp:3.1137e-04 L12_sharp:1.3226e-03 total_fnorm:2.1625e+01 total_l1_linf:6.5536e+04 total_spectral:1.0750e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.8750e+00 L11_fnorm:5.7188e+00 L12_fnorm:5.2500e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7188e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.7109e+00 L5_l1linf:1.6797e+00 L6_l1linf:1.6953e+00 L7_l1linf:1.7031e+00 L8_l1linf:1.7344e+00 L9_l1linf:1.7188e+00 L10_l1linf:1.7500e+00 L11_l1linf:1.6172e+00 L12_l1linf:1.3047e+00 L1_spectral:7.2536e-02 L2_spectral:7.0600e-02 L3_spectral:7.1167e-02 L4_spectral:7.1172e-02 L5_spectral:7.0648e-02 L6_spectral:6.9942e-02 L7_spectral:6.9991e-02 L8_spectral:6.9765e-02 L9_spectral:6.9284e-02 L10_spectral:6.9055e-02 L11_spectral:6.9555e-02 L12_spectral:6.9975e-02 train_time:87654ms step_avg:54.78ms +[2025-09-11 11:29:44] [Rank 0] step:1601/10000 train_time:88818ms step_avg:55.48ms +[2025-09-11 11:29:44] [Rank 0] step:1601/10000 train_time:88818ms step_avg:55.48ms +[2025-09-11 11:29:45] [Rank 0] step:1621/10000 train_time:89466ms step_avg:55.19ms +[2025-09-11 11:29:45] [Rank 0] step:1621/10000 train_time:89466ms step_avg:55.19ms +[2025-09-11 11:29:46] [Rank 0] step:1641/10000 train_time:90111ms step_avg:54.91ms +[2025-09-11 11:29:46] [Rank 0] step:1641/10000 train_time:90111ms step_avg:54.91ms +[2025-09-11 11:29:46] [Rank 0] step:1661/10000 train_time:90755ms step_avg:54.64ms +[2025-09-11 11:29:46] [Rank 0] step:1661/10000 train_time:90755ms step_avg:54.64ms +[2025-09-11 11:29:47] [Rank 0] step:1681/10000 train_time:91400ms step_avg:54.37ms +[2025-09-11 11:29:47] [Rank 0] step:1681/10000 train_time:91400ms step_avg:54.37ms +[2025-09-11 11:29:48] [Rank 0] step:1701/10000 train_time:92044ms step_avg:54.11ms +[2025-09-11 11:29:48] [Rank 0] step:1701/10000 train_time:92044ms step_avg:54.11ms +[2025-09-11 11:29:48] [Rank 0] step:1721/10000 train_time:92688ms step_avg:53.86ms +[2025-09-11 11:29:48] [Rank 0] step:1721/10000 train_time:92688ms step_avg:53.86ms +[2025-09-11 11:29:49] [Rank 0] step:1741/10000 train_time:93332ms step_avg:53.61ms +[2025-09-11 11:29:49] [Rank 0] step:1741/10000 train_time:93332ms step_avg:53.61ms +[2025-09-11 11:29:50] [Rank 0] step:1761/10000 train_time:93977ms step_avg:53.37ms +[2025-09-11 11:29:50] [Rank 0] step:1761/10000 train_time:93977ms step_avg:53.37ms +[2025-09-11 11:29:50] [Rank 0] step:1781/10000 train_time:94621ms step_avg:53.13ms +[2025-09-11 11:29:50] [Rank 0] step:1781/10000 train_time:94621ms step_avg:53.13ms +[2025-09-11 11:29:51] [Rank 0] step:1801/10000 train_time:95265ms step_avg:52.90ms +[2025-09-11 11:29:51] [Rank 0] step:1801/10000 train_time:95265ms step_avg:52.90ms +[2025-09-11 11:29:52] [Rank 0] step:1821/10000 train_time:95909ms step_avg:52.67ms +[2025-09-11 11:29:52] [Rank 0] step:1821/10000 train_time:95909ms step_avg:52.67ms +[2025-09-11 11:29:52] [Rank 0] step:1841/10000 train_time:96552ms step_avg:52.45ms +[2025-09-11 11:29:52] [Rank 0] step:1841/10000 train_time:96552ms step_avg:52.45ms +[2025-09-11 11:29:53] [Rank 0] step:1861/10000 train_time:97196ms step_avg:52.23ms +[2025-09-11 11:29:53] [Rank 0] step:1861/10000 train_time:97196ms step_avg:52.23ms +[2025-09-11 11:29:53] [Rank 0] step:1881/10000 train_time:97840ms step_avg:52.02ms +[2025-09-11 11:29:53] [Rank 0] step:1881/10000 train_time:97840ms step_avg:52.02ms +[2025-09-11 11:29:54] [Rank 0] step:1901/10000 train_time:98485ms step_avg:51.81ms +[2025-09-11 11:29:54] [Rank 0] step:1901/10000 train_time:98485ms step_avg:51.81ms +[2025-09-11 11:29:55] [Rank 0] step:1921/10000 train_time:99130ms step_avg:51.60ms +[2025-09-11 11:29:55] [Rank 0] step:1921/10000 train_time:99130ms step_avg:51.60ms +[2025-09-11 11:29:55] [Rank 0] step:1941/10000 train_time:99774ms step_avg:51.40ms +[2025-09-11 11:29:55] [Rank 0] step:1941/10000 train_time:99774ms step_avg:51.40ms +[2025-09-11 11:29:56] [Rank 0] step:1961/10000 train_time:100417ms step_avg:51.21ms +[2025-09-11 11:29:56] [Rank 0] step:1961/10000 train_time:100417ms step_avg:51.21ms +[2025-09-11 11:29:57] [Rank 0] step:1981/10000 train_time:101061ms step_avg:51.02ms +[2025-09-11 11:29:57] [Rank 0] step:1981/10000 train_time:101061ms step_avg:51.02ms +[2025-09-11 11:29:57] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:29:57] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:30:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:30:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:07] [Rank 0] PRINT: step:2000/10000 val_loss:5.6308 total_sharp:1.9319e-03 L1_sharp:9.9486e-04 L2_sharp:4.1101e-04 L3_sharp:2.3641e-04 L4_sharp:1.9555e-04 L5_sharp:2.8990e-04 L6_sharp:1.5817e-04 L7_sharp:1.0841e-04 L8_sharp:2.9477e-04 L9_sharp:2.4761e-04 L10_sharp:1.9508e-04 L11_sharp:3.9120e-04 L12_sharp:1.5427e-03 total_fnorm:2.1750e+01 total_l1_linf:6.4768e+04 total_spectral:1.0812e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0625e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.9375e+00 L9_fnorm:6.0312e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.9062e+00 L12_fnorm:5.4375e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.7109e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6250e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6875e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.7188e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.3203e+00 L1_spectral:7.4143e-02 L2_spectral:7.2238e-02 L3_spectral:7.3418e-02 L4_spectral:7.2789e-02 L5_spectral:7.3302e-02 L6_spectral:7.1976e-02 L7_spectral:7.1386e-02 L8_spectral:7.1335e-02 L9_spectral:7.0880e-02 L10_spectral:7.0451e-02 L11_spectral:7.1032e-02 L12_spectral:7.1702e-02 train_time:101687ms step_avg:50.84ms +[2025-09-11 11:30:07] [Rank 0] PRINT: step:2000/10000 val_loss:5.6308 total_sharp:1.9319e-03 L1_sharp:9.9486e-04 L2_sharp:4.1101e-04 L3_sharp:2.3641e-04 L4_sharp:1.9555e-04 L5_sharp:2.8990e-04 L6_sharp:1.5817e-04 L7_sharp:1.0841e-04 L8_sharp:2.9477e-04 L9_sharp:2.4761e-04 L10_sharp:1.9508e-04 L11_sharp:3.9120e-04 L12_sharp:1.5427e-03 total_fnorm:2.1750e+01 total_l1_linf:6.4768e+04 total_spectral:1.0812e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0625e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.9375e+00 L9_fnorm:6.0312e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.9062e+00 L12_fnorm:5.4375e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.7109e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6250e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6875e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.7188e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.3203e+00 L1_spectral:7.4143e-02 L2_spectral:7.2238e-02 L3_spectral:7.3418e-02 L4_spectral:7.2789e-02 L5_spectral:7.3302e-02 L6_spectral:7.1976e-02 L7_spectral:7.1386e-02 L8_spectral:7.1335e-02 L9_spectral:7.0880e-02 L10_spectral:7.0451e-02 L11_spectral:7.1032e-02 L12_spectral:7.1702e-02 train_time:101687ms step_avg:50.84ms +[2025-09-11 11:30:08] [Rank 0] step:2001/10000 train_time:102892ms step_avg:51.42ms +[2025-09-11 11:30:08] [Rank 0] step:2001/10000 train_time:102892ms step_avg:51.42ms +[2025-09-11 11:30:09] [Rank 0] step:2021/10000 train_time:103540ms step_avg:51.23ms +[2025-09-11 11:30:09] [Rank 0] step:2021/10000 train_time:103540ms step_avg:51.23ms +[2025-09-11 11:30:10] [Rank 0] step:2041/10000 train_time:104185ms step_avg:51.05ms +[2025-09-11 11:30:10] [Rank 0] step:2041/10000 train_time:104185ms step_avg:51.05ms +[2025-09-11 11:30:10] [Rank 0] step:2061/10000 train_time:104830ms step_avg:50.86ms +[2025-09-11 11:30:10] [Rank 0] step:2061/10000 train_time:104830ms step_avg:50.86ms +[2025-09-11 11:30:11] [Rank 0] step:2081/10000 train_time:105475ms step_avg:50.68ms +[2025-09-11 11:30:11] [Rank 0] step:2081/10000 train_time:105475ms step_avg:50.68ms +[2025-09-11 11:30:12] [Rank 0] step:2101/10000 train_time:106119ms step_avg:50.51ms +[2025-09-11 11:30:12] [Rank 0] step:2101/10000 train_time:106119ms step_avg:50.51ms +[2025-09-11 11:30:12] [Rank 0] step:2121/10000 train_time:106763ms step_avg:50.34ms +[2025-09-11 11:30:12] [Rank 0] step:2121/10000 train_time:106763ms step_avg:50.34ms +[2025-09-11 11:30:13] [Rank 0] step:2141/10000 train_time:107407ms step_avg:50.17ms +[2025-09-11 11:30:13] [Rank 0] step:2141/10000 train_time:107407ms step_avg:50.17ms +[2025-09-11 11:30:14] [Rank 0] step:2161/10000 train_time:108052ms step_avg:50.00ms +[2025-09-11 11:30:14] [Rank 0] step:2161/10000 train_time:108052ms step_avg:50.00ms +[2025-09-11 11:30:14] [Rank 0] step:2181/10000 train_time:108694ms step_avg:49.84ms +[2025-09-11 11:30:14] [Rank 0] step:2181/10000 train_time:108694ms step_avg:49.84ms +[2025-09-11 11:30:15] [Rank 0] step:2201/10000 train_time:109339ms step_avg:49.68ms +[2025-09-11 11:30:15] [Rank 0] step:2201/10000 train_time:109339ms step_avg:49.68ms +[2025-09-11 11:30:15] [Rank 0] step:2221/10000 train_time:109982ms step_avg:49.52ms +[2025-09-11 11:30:15] [Rank 0] step:2221/10000 train_time:109982ms step_avg:49.52ms +[2025-09-11 11:30:16] [Rank 0] step:2241/10000 train_time:110638ms step_avg:49.37ms +[2025-09-11 11:30:16] [Rank 0] step:2241/10000 train_time:110638ms step_avg:49.37ms +[2025-09-11 11:30:17] [Rank 0] step:2261/10000 train_time:111294ms step_avg:49.22ms +[2025-09-11 11:30:17] [Rank 0] step:2261/10000 train_time:111294ms step_avg:49.22ms +[2025-09-11 11:30:17] [Rank 0] step:2281/10000 train_time:111950ms step_avg:49.08ms +[2025-09-11 11:30:17] [Rank 0] step:2281/10000 train_time:111950ms step_avg:49.08ms +[2025-09-11 11:30:18] [Rank 0] step:2301/10000 train_time:112607ms step_avg:48.94ms +[2025-09-11 11:30:18] [Rank 0] step:2301/10000 train_time:112607ms step_avg:48.94ms +[2025-09-11 11:30:19] [Rank 0] step:2321/10000 train_time:113264ms step_avg:48.80ms +[2025-09-11 11:30:19] [Rank 0] step:2321/10000 train_time:113264ms step_avg:48.80ms +[2025-09-11 11:30:19] [Rank 0] step:2341/10000 train_time:113921ms step_avg:48.66ms +[2025-09-11 11:30:19] [Rank 0] step:2341/10000 train_time:113921ms step_avg:48.66ms +[2025-09-11 11:30:20] [Rank 0] step:2361/10000 train_time:114578ms step_avg:48.53ms +[2025-09-11 11:30:20] [Rank 0] step:2361/10000 train_time:114578ms step_avg:48.53ms +[2025-09-11 11:30:21] [Rank 0] step:2381/10000 train_time:115235ms step_avg:48.40ms +[2025-09-11 11:30:21] [Rank 0] step:2381/10000 train_time:115235ms step_avg:48.40ms +[2025-09-11 11:30:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:30:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:31] [Rank 0] PRINT: step:2400/10000 val_loss:5.4958 total_sharp:1.5854e-03 L1_sharp:8.5475e-04 L2_sharp:2.7820e-04 L3_sharp:1.4149e-04 L4_sharp:1.1407e-04 L5_sharp:1.7393e-04 L6_sharp:1.5710e-04 L7_sharp:1.1953e-04 L8_sharp:2.6537e-04 L9_sharp:2.2521e-04 L10_sharp:2.1515e-04 L11_sharp:3.4964e-04 L12_sharp:1.0412e-03 total_fnorm:2.1875e+01 total_l1_linf:6.3488e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.6562e+00 L1_l1linf:1.7578e+00 L2_l1linf:1.6406e+00 L3_l1linf:1.6328e+00 L4_l1linf:1.6094e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5703e+00 L8_l1linf:1.6328e+00 L9_l1linf:1.6328e+00 L10_l1linf:1.6562e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.3984e+00 L1_spectral:7.5205e-02 L2_spectral:7.3472e-02 L3_spectral:7.4042e-02 L4_spectral:7.4586e-02 L5_spectral:7.4542e-02 L6_spectral:7.3615e-02 L7_spectral:7.3212e-02 L8_spectral:7.2900e-02 L9_spectral:7.2885e-02 L10_spectral:7.1988e-02 L11_spectral:7.1609e-02 L12_spectral:7.2806e-02 train_time:115873ms step_avg:48.28ms +[2025-09-11 11:30:31] [Rank 0] PRINT: step:2400/10000 val_loss:5.4958 total_sharp:1.5854e-03 L1_sharp:8.5475e-04 L2_sharp:2.7820e-04 L3_sharp:1.4149e-04 L4_sharp:1.1407e-04 L5_sharp:1.7393e-04 L6_sharp:1.5710e-04 L7_sharp:1.1953e-04 L8_sharp:2.6537e-04 L9_sharp:2.2521e-04 L10_sharp:2.1515e-04 L11_sharp:3.4964e-04 L12_sharp:1.0412e-03 total_fnorm:2.1875e+01 total_l1_linf:6.3488e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.6562e+00 L1_l1linf:1.7578e+00 L2_l1linf:1.6406e+00 L3_l1linf:1.6328e+00 L4_l1linf:1.6094e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5703e+00 L8_l1linf:1.6328e+00 L9_l1linf:1.6328e+00 L10_l1linf:1.6562e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.3984e+00 L1_spectral:7.5205e-02 L2_spectral:7.3472e-02 L3_spectral:7.4042e-02 L4_spectral:7.4586e-02 L5_spectral:7.4542e-02 L6_spectral:7.3615e-02 L7_spectral:7.3212e-02 L8_spectral:7.2900e-02 L9_spectral:7.2885e-02 L10_spectral:7.1988e-02 L11_spectral:7.1609e-02 L12_spectral:7.2806e-02 train_time:115873ms step_avg:48.28ms +[2025-09-11 11:30:32] [Rank 0] step:2401/10000 train_time:117088ms step_avg:48.77ms +[2025-09-11 11:30:32] [Rank 0] step:2401/10000 train_time:117088ms step_avg:48.77ms +[2025-09-11 11:30:33] [Rank 0] step:2421/10000 train_time:118061ms step_avg:48.77ms +[2025-09-11 11:30:33] [Rank 0] step:2421/10000 train_time:118061ms step_avg:48.77ms +[2025-09-11 11:30:34] [Rank 0] step:2441/10000 train_time:118720ms step_avg:48.64ms +[2025-09-11 11:30:34] [Rank 0] step:2441/10000 train_time:118720ms step_avg:48.64ms +[2025-09-11 11:30:35] [Rank 0] step:2461/10000 train_time:119378ms step_avg:48.51ms +[2025-09-11 11:30:35] [Rank 0] step:2461/10000 train_time:119378ms step_avg:48.51ms +[2025-09-11 11:30:35] [Rank 0] step:2481/10000 train_time:120036ms step_avg:48.38ms +[2025-09-11 11:30:35] [Rank 0] step:2481/10000 train_time:120036ms step_avg:48.38ms +[2025-09-11 11:30:36] [Rank 0] step:2501/10000 train_time:120694ms step_avg:48.26ms +[2025-09-11 11:30:36] [Rank 0] step:2501/10000 train_time:120694ms step_avg:48.26ms +[2025-09-11 11:30:37] [Rank 0] step:2521/10000 train_time:121352ms step_avg:48.14ms +[2025-09-11 11:30:37] [Rank 0] step:2521/10000 train_time:121352ms step_avg:48.14ms +[2025-09-11 11:30:37] [Rank 0] step:2541/10000 train_time:122009ms step_avg:48.02ms +[2025-09-11 11:30:37] [Rank 0] step:2541/10000 train_time:122009ms step_avg:48.02ms +[2025-09-11 11:30:38] [Rank 0] step:2561/10000 train_time:122667ms step_avg:47.90ms +[2025-09-11 11:30:38] [Rank 0] step:2561/10000 train_time:122667ms step_avg:47.90ms +[2025-09-11 11:30:39] [Rank 0] step:2581/10000 train_time:123323ms step_avg:47.78ms +[2025-09-11 11:30:39] [Rank 0] step:2581/10000 train_time:123323ms step_avg:47.78ms +[2025-09-11 11:30:39] [Rank 0] step:2601/10000 train_time:123981ms step_avg:47.67ms +[2025-09-11 11:30:39] [Rank 0] step:2601/10000 train_time:123981ms step_avg:47.67ms +[2025-09-11 11:30:40] [Rank 0] step:2621/10000 train_time:124638ms step_avg:47.55ms +[2025-09-11 11:30:40] [Rank 0] step:2621/10000 train_time:124638ms step_avg:47.55ms +[2025-09-11 11:30:41] [Rank 0] step:2641/10000 train_time:125295ms step_avg:47.44ms +[2025-09-11 11:30:41] [Rank 0] step:2641/10000 train_time:125295ms step_avg:47.44ms +[2025-09-11 11:30:41] [Rank 0] step:2661/10000 train_time:125953ms step_avg:47.33ms +[2025-09-11 11:30:41] [Rank 0] step:2661/10000 train_time:125953ms step_avg:47.33ms +[2025-09-11 11:30:42] [Rank 0] step:2681/10000 train_time:126610ms step_avg:47.22ms +[2025-09-11 11:30:42] [Rank 0] step:2681/10000 train_time:126610ms step_avg:47.22ms +[2025-09-11 11:30:43] [Rank 0] step:2701/10000 train_time:127268ms step_avg:47.12ms +[2025-09-11 11:30:43] [Rank 0] step:2701/10000 train_time:127268ms step_avg:47.12ms +[2025-09-11 11:30:43] [Rank 0] step:2721/10000 train_time:127925ms step_avg:47.01ms +[2025-09-11 11:30:43] [Rank 0] step:2721/10000 train_time:127925ms step_avg:47.01ms +[2025-09-11 11:30:44] [Rank 0] step:2741/10000 train_time:128583ms step_avg:46.91ms +[2025-09-11 11:30:44] [Rank 0] step:2741/10000 train_time:128583ms step_avg:46.91ms +[2025-09-11 11:30:45] [Rank 0] step:2761/10000 train_time:129240ms step_avg:46.81ms +[2025-09-11 11:30:45] [Rank 0] step:2761/10000 train_time:129240ms step_avg:46.81ms +[2025-09-11 11:30:45] [Rank 0] step:2781/10000 train_time:129899ms step_avg:46.71ms +[2025-09-11 11:30:45] [Rank 0] step:2781/10000 train_time:129899ms step_avg:46.71ms +[2025-09-11 11:30:46] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:30:46] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:30:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:55] [Rank 0] PRINT: step:2800/10000 val_loss:5.4152 total_sharp:1.5249e-03 L1_sharp:6.0910e-04 L2_sharp:2.8685e-04 L3_sharp:1.8051e-04 L4_sharp:8.0890e-05 L5_sharp:1.6005e-04 L6_sharp:1.2828e-04 L7_sharp:9.6415e-05 L8_sharp:2.2097e-04 L9_sharp:2.3396e-04 L10_sharp:1.9881e-04 L11_sharp:3.2177e-04 L12_sharp:9.8829e-04 total_fnorm:2.1875e+01 total_l1_linf:6.1952e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1250e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0938e+00 L12_fnorm:5.7188e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.6406e+00 L3_l1linf:1.6172e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5391e+00 L7_l1linf:1.5312e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.6016e+00 L10_l1linf:1.6250e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.3438e+00 L1_spectral:7.6793e-02 L2_spectral:7.4541e-02 L3_spectral:7.5314e-02 L4_spectral:7.5540e-02 L5_spectral:7.5290e-02 L6_spectral:7.4691e-02 L7_spectral:7.4274e-02 L8_spectral:7.3839e-02 L9_spectral:7.3658e-02 L10_spectral:7.3743e-02 L11_spectral:7.3401e-02 L12_spectral:7.3825e-02 train_time:130537ms step_avg:46.62ms +[2025-09-11 11:30:55] [Rank 0] PRINT: step:2800/10000 val_loss:5.4152 total_sharp:1.5249e-03 L1_sharp:6.0910e-04 L2_sharp:2.8685e-04 L3_sharp:1.8051e-04 L4_sharp:8.0890e-05 L5_sharp:1.6005e-04 L6_sharp:1.2828e-04 L7_sharp:9.6415e-05 L8_sharp:2.2097e-04 L9_sharp:2.3396e-04 L10_sharp:1.9881e-04 L11_sharp:3.2177e-04 L12_sharp:9.8829e-04 total_fnorm:2.1875e+01 total_l1_linf:6.1952e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1250e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.0938e+00 L12_fnorm:5.7188e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.6406e+00 L3_l1linf:1.6172e+00 L4_l1linf:1.5859e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5391e+00 L7_l1linf:1.5312e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.6016e+00 L10_l1linf:1.6250e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.3438e+00 L1_spectral:7.6793e-02 L2_spectral:7.4541e-02 L3_spectral:7.5314e-02 L4_spectral:7.5540e-02 L5_spectral:7.5290e-02 L6_spectral:7.4691e-02 L7_spectral:7.4274e-02 L8_spectral:7.3839e-02 L9_spectral:7.3658e-02 L10_spectral:7.3743e-02 L11_spectral:7.3401e-02 L12_spectral:7.3825e-02 train_time:130537ms step_avg:46.62ms +[2025-09-11 11:30:57] [Rank 0] step:2801/10000 train_time:131754ms step_avg:47.04ms +[2025-09-11 11:30:57] [Rank 0] step:2801/10000 train_time:131754ms step_avg:47.04ms +[2025-09-11 11:30:57] [Rank 0] step:2821/10000 train_time:132415ms step_avg:46.94ms +[2025-09-11 11:30:57] [Rank 0] step:2821/10000 train_time:132415ms step_avg:46.94ms +[2025-09-11 11:30:58] [Rank 0] step:2841/10000 train_time:133074ms step_avg:46.84ms +[2025-09-11 11:30:58] [Rank 0] step:2841/10000 train_time:133074ms step_avg:46.84ms +[2025-09-11 11:30:59] [Rank 0] step:2861/10000 train_time:133732ms step_avg:46.74ms +[2025-09-11 11:30:59] [Rank 0] step:2861/10000 train_time:133732ms step_avg:46.74ms +[2025-09-11 11:30:59] [Rank 0] step:2881/10000 train_time:134391ms step_avg:46.65ms +[2025-09-11 11:30:59] [Rank 0] step:2881/10000 train_time:134391ms step_avg:46.65ms +[2025-09-11 11:31:00] [Rank 0] step:2901/10000 train_time:135048ms step_avg:46.55ms +[2025-09-11 11:31:00] [Rank 0] step:2901/10000 train_time:135048ms step_avg:46.55ms +[2025-09-11 11:31:01] [Rank 0] step:2921/10000 train_time:135705ms step_avg:46.46ms +[2025-09-11 11:31:01] [Rank 0] step:2921/10000 train_time:135705ms step_avg:46.46ms +[2025-09-11 11:31:01] [Rank 0] step:2941/10000 train_time:136363ms step_avg:46.37ms +[2025-09-11 11:31:01] [Rank 0] step:2941/10000 train_time:136363ms step_avg:46.37ms +[2025-09-11 11:31:02] [Rank 0] step:2961/10000 train_time:137021ms step_avg:46.28ms +[2025-09-11 11:31:02] [Rank 0] step:2961/10000 train_time:137021ms step_avg:46.28ms +[2025-09-11 11:31:03] [Rank 0] step:2981/10000 train_time:137681ms step_avg:46.19ms +[2025-09-11 11:31:03] [Rank 0] step:2981/10000 train_time:137681ms step_avg:46.19ms +[2025-09-11 11:31:03] [Rank 0] step:3001/10000 train_time:138341ms step_avg:46.10ms +[2025-09-11 11:31:03] [Rank 0] step:3001/10000 train_time:138341ms step_avg:46.10ms +[2025-09-11 11:31:04] [Rank 0] step:3021/10000 train_time:139002ms step_avg:46.01ms +[2025-09-11 11:31:04] [Rank 0] step:3021/10000 train_time:139002ms step_avg:46.01ms +[2025-09-11 11:31:05] [Rank 0] step:3041/10000 train_time:139662ms step_avg:45.93ms +[2025-09-11 11:31:05] [Rank 0] step:3041/10000 train_time:139662ms step_avg:45.93ms +[2025-09-11 11:31:05] [Rank 0] step:3061/10000 train_time:140322ms step_avg:45.84ms +[2025-09-11 11:31:05] [Rank 0] step:3061/10000 train_time:140322ms step_avg:45.84ms +[2025-09-11 11:31:06] [Rank 0] step:3081/10000 train_time:140983ms step_avg:45.76ms +[2025-09-11 11:31:06] [Rank 0] step:3081/10000 train_time:140983ms step_avg:45.76ms +[2025-09-11 11:31:07] [Rank 0] step:3101/10000 train_time:141643ms step_avg:45.68ms +[2025-09-11 11:31:07] [Rank 0] step:3101/10000 train_time:141643ms step_avg:45.68ms +[2025-09-11 11:31:07] [Rank 0] step:3121/10000 train_time:142303ms step_avg:45.60ms +[2025-09-11 11:31:07] [Rank 0] step:3121/10000 train_time:142303ms step_avg:45.60ms +[2025-09-11 11:31:08] [Rank 0] step:3141/10000 train_time:142963ms step_avg:45.52ms +[2025-09-11 11:31:08] [Rank 0] step:3141/10000 train_time:142963ms step_avg:45.52ms +[2025-09-11 11:31:09] [Rank 0] step:3161/10000 train_time:143623ms step_avg:45.44ms +[2025-09-11 11:31:09] [Rank 0] step:3161/10000 train_time:143623ms step_avg:45.44ms +[2025-09-11 11:31:09] [Rank 0] step:3181/10000 train_time:144283ms step_avg:45.36ms +[2025-09-11 11:31:09] [Rank 0] step:3181/10000 train_time:144283ms step_avg:45.36ms +[2025-09-11 11:31:10] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:31:10] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:20] [Rank 0] PRINT: step:3200/10000 val_loss:5.3264 total_sharp:1.3752e-03 L1_sharp:6.3185e-04 L2_sharp:3.2570e-04 L3_sharp:1.9769e-04 L4_sharp:9.8529e-05 L5_sharp:1.7875e-04 L6_sharp:1.3683e-04 L7_sharp:9.0955e-05 L8_sharp:2.3525e-04 L9_sharp:1.9938e-04 L10_sharp:1.8712e-04 L11_sharp:2.7750e-04 L12_sharp:8.6772e-04 total_fnorm:2.2125e+01 total_l1_linf:6.2464e+04 total_spectral:1.1062e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1562e+00 L12_fnorm:5.8750e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5000e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5938e+00 L12_l1linf:1.3828e+00 L1_spectral:7.7230e-02 L2_spectral:7.5215e-02 L3_spectral:7.5985e-02 L4_spectral:7.6434e-02 L5_spectral:7.6098e-02 L6_spectral:7.5919e-02 L7_spectral:7.5738e-02 L8_spectral:7.4577e-02 L9_spectral:7.4971e-02 L10_spectral:7.5192e-02 L11_spectral:7.4458e-02 L12_spectral:7.4501e-02 train_time:144925ms step_avg:45.29ms +[2025-09-11 11:31:20] [Rank 0] PRINT: step:3200/10000 val_loss:5.3264 total_sharp:1.3752e-03 L1_sharp:6.3185e-04 L2_sharp:3.2570e-04 L3_sharp:1.9769e-04 L4_sharp:9.8529e-05 L5_sharp:1.7875e-04 L6_sharp:1.3683e-04 L7_sharp:9.0955e-05 L8_sharp:2.3525e-04 L9_sharp:1.9938e-04 L10_sharp:1.8712e-04 L11_sharp:2.7750e-04 L12_sharp:8.6772e-04 total_fnorm:2.2125e+01 total_l1_linf:6.2464e+04 total_spectral:1.1062e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1562e+00 L12_fnorm:5.8750e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5000e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5938e+00 L12_l1linf:1.3828e+00 L1_spectral:7.7230e-02 L2_spectral:7.5215e-02 L3_spectral:7.5985e-02 L4_spectral:7.6434e-02 L5_spectral:7.6098e-02 L6_spectral:7.5919e-02 L7_spectral:7.5738e-02 L8_spectral:7.4577e-02 L9_spectral:7.4971e-02 L10_spectral:7.5192e-02 L11_spectral:7.4458e-02 L12_spectral:7.4501e-02 train_time:144925ms step_avg:45.29ms +[2025-09-11 11:31:21] [Rank 0] step:3201/10000 train_time:146180ms step_avg:45.67ms +[2025-09-11 11:31:21] [Rank 0] step:3201/10000 train_time:146180ms step_avg:45.67ms +[2025-09-11 11:31:22] [Rank 0] step:3221/10000 train_time:146845ms step_avg:45.59ms +[2025-09-11 11:31:22] [Rank 0] step:3221/10000 train_time:146845ms step_avg:45.59ms +[2025-09-11 11:31:22] [Rank 0] step:3241/10000 train_time:147507ms step_avg:45.51ms +[2025-09-11 11:31:22] [Rank 0] step:3241/10000 train_time:147507ms step_avg:45.51ms +[2025-09-11 11:31:23] [Rank 0] step:3261/10000 train_time:148168ms step_avg:45.44ms +[2025-09-11 11:31:23] [Rank 0] step:3261/10000 train_time:148168ms step_avg:45.44ms +[2025-09-11 11:31:24] [Rank 0] step:3281/10000 train_time:148829ms step_avg:45.36ms +[2025-09-11 11:31:24] [Rank 0] step:3281/10000 train_time:148829ms step_avg:45.36ms +[2025-09-11 11:31:24] [Rank 0] step:3301/10000 train_time:149490ms step_avg:45.29ms +[2025-09-11 11:31:24] [Rank 0] step:3301/10000 train_time:149490ms step_avg:45.29ms +[2025-09-11 11:31:25] [Rank 0] step:3321/10000 train_time:150150ms step_avg:45.21ms +[2025-09-11 11:31:25] [Rank 0] step:3321/10000 train_time:150150ms step_avg:45.21ms +[2025-09-11 11:31:26] [Rank 0] step:3341/10000 train_time:150811ms step_avg:45.14ms +[2025-09-11 11:31:26] [Rank 0] step:3341/10000 train_time:150811ms step_avg:45.14ms +[2025-09-11 11:31:26] [Rank 0] step:3361/10000 train_time:151474ms step_avg:45.07ms +[2025-09-11 11:31:26] [Rank 0] step:3361/10000 train_time:151474ms step_avg:45.07ms +[2025-09-11 11:31:27] [Rank 0] step:3381/10000 train_time:152134ms step_avg:45.00ms +[2025-09-11 11:31:27] [Rank 0] step:3381/10000 train_time:152134ms step_avg:45.00ms +[2025-09-11 11:31:28] [Rank 0] step:3401/10000 train_time:152795ms step_avg:44.93ms +[2025-09-11 11:31:28] [Rank 0] step:3401/10000 train_time:152795ms step_avg:44.93ms +[2025-09-11 11:31:28] [Rank 0] step:3421/10000 train_time:153455ms step_avg:44.86ms +[2025-09-11 11:31:28] [Rank 0] step:3421/10000 train_time:153455ms step_avg:44.86ms +[2025-09-11 11:31:29] [Rank 0] step:3441/10000 train_time:154115ms step_avg:44.79ms +[2025-09-11 11:31:29] [Rank 0] step:3441/10000 train_time:154115ms step_avg:44.79ms +[2025-09-11 11:31:30] [Rank 0] step:3461/10000 train_time:154776ms step_avg:44.72ms +[2025-09-11 11:31:30] [Rank 0] step:3461/10000 train_time:154776ms step_avg:44.72ms +[2025-09-11 11:31:30] [Rank 0] step:3481/10000 train_time:155436ms step_avg:44.65ms +[2025-09-11 11:31:30] [Rank 0] step:3481/10000 train_time:155436ms step_avg:44.65ms +[2025-09-11 11:31:31] [Rank 0] step:3501/10000 train_time:156096ms step_avg:44.59ms +[2025-09-11 11:31:31] [Rank 0] step:3501/10000 train_time:156096ms step_avg:44.59ms +[2025-09-11 11:31:32] [Rank 0] step:3521/10000 train_time:156756ms step_avg:44.52ms +[2025-09-11 11:31:32] [Rank 0] step:3521/10000 train_time:156756ms step_avg:44.52ms +[2025-09-11 11:31:32] [Rank 0] step:3541/10000 train_time:157417ms step_avg:44.46ms +[2025-09-11 11:31:32] [Rank 0] step:3541/10000 train_time:157417ms step_avg:44.46ms +[2025-09-11 11:31:33] [Rank 0] step:3561/10000 train_time:158078ms step_avg:44.39ms +[2025-09-11 11:31:33] [Rank 0] step:3561/10000 train_time:158078ms step_avg:44.39ms +[2025-09-11 11:31:34] [Rank 0] step:3581/10000 train_time:159032ms step_avg:44.41ms +[2025-09-11 11:31:34] [Rank 0] step:3581/10000 train_time:159032ms step_avg:44.41ms +[2025-09-11 11:31:35] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:31:35] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:31:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:31:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:45] [Rank 0] PRINT: step:3600/10000 val_loss:5.2756 total_sharp:1.2873e-03 L1_sharp:4.1350e-04 L2_sharp:1.9648e-04 L3_sharp:1.6362e-04 L4_sharp:6.1910e-05 L5_sharp:1.7250e-04 L6_sharp:1.2699e-04 L7_sharp:1.0724e-04 L8_sharp:2.5720e-04 L9_sharp:2.1161e-04 L10_sharp:2.0045e-04 L11_sharp:3.1372e-04 L12_sharp:1.1438e-03 total_fnorm:2.2000e+01 total_l1_linf:6.0416e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5547e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.4844e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4922e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.3672e+00 L1_spectral:7.8251e-02 L2_spectral:7.5857e-02 L3_spectral:7.6427e-02 L4_spectral:7.7305e-02 L5_spectral:7.6789e-02 L6_spectral:7.7250e-02 L7_spectral:7.6749e-02 L8_spectral:7.5320e-02 L9_spectral:7.5535e-02 L10_spectral:7.5844e-02 L11_spectral:7.5471e-02 L12_spectral:7.5266e-02 train_time:159938ms step_avg:44.43ms +[2025-09-11 11:31:45] [Rank 0] PRINT: step:3600/10000 val_loss:5.2756 total_sharp:1.2873e-03 L1_sharp:4.1350e-04 L2_sharp:1.9648e-04 L3_sharp:1.6362e-04 L4_sharp:6.1910e-05 L5_sharp:1.7250e-04 L6_sharp:1.2699e-04 L7_sharp:1.0724e-04 L8_sharp:2.5720e-04 L9_sharp:2.1161e-04 L10_sharp:2.0045e-04 L11_sharp:3.1372e-04 L12_sharp:1.1438e-03 total_fnorm:2.2000e+01 total_l1_linf:6.0416e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5547e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.4844e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4922e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.3672e+00 L1_spectral:7.8251e-02 L2_spectral:7.5857e-02 L3_spectral:7.6427e-02 L4_spectral:7.7305e-02 L5_spectral:7.6789e-02 L6_spectral:7.7250e-02 L7_spectral:7.6749e-02 L8_spectral:7.5320e-02 L9_spectral:7.5535e-02 L10_spectral:7.5844e-02 L11_spectral:7.5471e-02 L12_spectral:7.5266e-02 train_time:159938ms step_avg:44.43ms +[2025-09-11 11:31:46] [Rank 0] step:3601/10000 train_time:161311ms step_avg:44.80ms +[2025-09-11 11:31:46] [Rank 0] step:3601/10000 train_time:161311ms step_avg:44.80ms +[2025-09-11 11:31:47] [Rank 0] step:3621/10000 train_time:161977ms step_avg:44.73ms +[2025-09-11 11:31:47] [Rank 0] step:3621/10000 train_time:161977ms step_avg:44.73ms +[2025-09-11 11:31:48] [Rank 0] step:3641/10000 train_time:162638ms step_avg:44.67ms +[2025-09-11 11:31:48] [Rank 0] step:3641/10000 train_time:162638ms step_avg:44.67ms +[2025-09-11 11:31:48] [Rank 0] step:3661/10000 train_time:163299ms step_avg:44.61ms +[2025-09-11 11:31:48] [Rank 0] step:3661/10000 train_time:163299ms step_avg:44.61ms +[2025-09-11 11:31:49] [Rank 0] step:3681/10000 train_time:163960ms step_avg:44.54ms +[2025-09-11 11:31:49] [Rank 0] step:3681/10000 train_time:163960ms step_avg:44.54ms +[2025-09-11 11:31:50] [Rank 0] step:3701/10000 train_time:164620ms step_avg:44.48ms +[2025-09-11 11:31:50] [Rank 0] step:3701/10000 train_time:164620ms step_avg:44.48ms +[2025-09-11 11:31:50] [Rank 0] step:3721/10000 train_time:165291ms step_avg:44.42ms +[2025-09-11 11:31:50] [Rank 0] step:3721/10000 train_time:165291ms step_avg:44.42ms +[2025-09-11 11:31:51] [Rank 0] step:3741/10000 train_time:165963ms step_avg:44.36ms +[2025-09-11 11:31:51] [Rank 0] step:3741/10000 train_time:165963ms step_avg:44.36ms +[2025-09-11 11:31:52] [Rank 0] step:3761/10000 train_time:166635ms step_avg:44.31ms +[2025-09-11 11:31:52] [Rank 0] step:3761/10000 train_time:166635ms step_avg:44.31ms +[2025-09-11 11:31:52] [Rank 0] step:3781/10000 train_time:167307ms step_avg:44.25ms +[2025-09-11 11:31:52] [Rank 0] step:3781/10000 train_time:167307ms step_avg:44.25ms +[2025-09-11 11:31:53] [Rank 0] step:3801/10000 train_time:167979ms step_avg:44.19ms +[2025-09-11 11:31:53] [Rank 0] step:3801/10000 train_time:167979ms step_avg:44.19ms +[2025-09-11 11:31:54] [Rank 0] step:3821/10000 train_time:168652ms step_avg:44.14ms +[2025-09-11 11:31:54] [Rank 0] step:3821/10000 train_time:168652ms step_avg:44.14ms +[2025-09-11 11:31:54] [Rank 0] step:3841/10000 train_time:169324ms step_avg:44.08ms +[2025-09-11 11:31:54] [Rank 0] step:3841/10000 train_time:169324ms step_avg:44.08ms +[2025-09-11 11:31:55] [Rank 0] step:3861/10000 train_time:169996ms step_avg:44.03ms +[2025-09-11 11:31:55] [Rank 0] step:3861/10000 train_time:169996ms step_avg:44.03ms +[2025-09-11 11:31:56] [Rank 0] step:3881/10000 train_time:170667ms step_avg:43.97ms +[2025-09-11 11:31:56] [Rank 0] step:3881/10000 train_time:170667ms step_avg:43.97ms +[2025-09-11 11:31:56] [Rank 0] step:3901/10000 train_time:171338ms step_avg:43.92ms +[2025-09-11 11:31:56] [Rank 0] step:3901/10000 train_time:171338ms step_avg:43.92ms +[2025-09-11 11:31:57] [Rank 0] step:3921/10000 train_time:172010ms step_avg:43.87ms +[2025-09-11 11:31:57] [Rank 0] step:3921/10000 train_time:172010ms step_avg:43.87ms +[2025-09-11 11:31:58] [Rank 0] step:3941/10000 train_time:172682ms step_avg:43.82ms +[2025-09-11 11:31:58] [Rank 0] step:3941/10000 train_time:172682ms step_avg:43.82ms +[2025-09-11 11:31:58] [Rank 0] step:3961/10000 train_time:173353ms step_avg:43.77ms +[2025-09-11 11:31:58] [Rank 0] step:3961/10000 train_time:173353ms step_avg:43.77ms +[2025-09-11 11:31:59] [Rank 0] step:3981/10000 train_time:174025ms step_avg:43.71ms +[2025-09-11 11:31:59] [Rank 0] step:3981/10000 train_time:174025ms step_avg:43.71ms +[2025-09-11 11:32:00] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:32:00] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:32:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:32:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:10] [Rank 0] PRINT: step:4000/10000 val_loss:5.2180 total_sharp:1.6515e-03 L1_sharp:3.6507e-04 L2_sharp:2.7105e-04 L3_sharp:1.5293e-04 L4_sharp:1.2143e-04 L5_sharp:1.7252e-04 L6_sharp:1.8553e-04 L7_sharp:1.0405e-04 L8_sharp:2.8740e-04 L9_sharp:2.5715e-04 L10_sharp:2.2630e-04 L11_sharp:3.3230e-04 L12_sharp:9.9318e-04 total_fnorm:2.1875e+01 total_l1_linf:6.0160e+04 total_spectral:1.1000e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1562e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.7031e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.4844e+00 L6_l1linf:1.4766e+00 L7_l1linf:1.4688e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.5547e+00 L12_l1linf:1.3516e+00 L1_spectral:7.8702e-02 L2_spectral:7.6185e-02 L3_spectral:7.6970e-02 L4_spectral:7.7388e-02 L5_spectral:7.6569e-02 L6_spectral:7.7243e-02 L7_spectral:7.6955e-02 L8_spectral:7.6125e-02 L9_spectral:7.6287e-02 L10_spectral:7.6213e-02 L11_spectral:7.6179e-02 L12_spectral:7.5901e-02 train_time:174677ms step_avg:43.67ms +[2025-09-11 11:32:10] [Rank 0] PRINT: step:4000/10000 val_loss:5.2180 total_sharp:1.6515e-03 L1_sharp:3.6507e-04 L2_sharp:2.7105e-04 L3_sharp:1.5293e-04 L4_sharp:1.2143e-04 L5_sharp:1.7252e-04 L6_sharp:1.8553e-04 L7_sharp:1.0405e-04 L8_sharp:2.8740e-04 L9_sharp:2.5715e-04 L10_sharp:2.2630e-04 L11_sharp:3.3230e-04 L12_sharp:9.9318e-04 total_fnorm:2.1875e+01 total_l1_linf:6.0160e+04 total_spectral:1.1000e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1562e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.7031e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.4844e+00 L6_l1linf:1.4766e+00 L7_l1linf:1.4688e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.5547e+00 L12_l1linf:1.3516e+00 L1_spectral:7.8702e-02 L2_spectral:7.6185e-02 L3_spectral:7.6970e-02 L4_spectral:7.7388e-02 L5_spectral:7.6569e-02 L6_spectral:7.7243e-02 L7_spectral:7.6955e-02 L8_spectral:7.6125e-02 L9_spectral:7.6287e-02 L10_spectral:7.6213e-02 L11_spectral:7.6179e-02 L12_spectral:7.5901e-02 train_time:174677ms step_avg:43.67ms +[2025-09-11 11:32:11] [Rank 0] step:4001/10000 train_time:175928ms step_avg:43.97ms +[2025-09-11 11:32:11] [Rank 0] step:4001/10000 train_time:175928ms step_avg:43.97ms +[2025-09-11 11:32:12] [Rank 0] step:4021/10000 train_time:176628ms step_avg:43.93ms +[2025-09-11 11:32:12] [Rank 0] step:4021/10000 train_time:176628ms step_avg:43.93ms +[2025-09-11 11:32:12] [Rank 0] step:4041/10000 train_time:177301ms step_avg:43.88ms +[2025-09-11 11:32:12] [Rank 0] step:4041/10000 train_time:177301ms step_avg:43.88ms +[2025-09-11 11:32:13] [Rank 0] step:4061/10000 train_time:177972ms step_avg:43.82ms +[2025-09-11 11:32:13] [Rank 0] step:4061/10000 train_time:177972ms step_avg:43.82ms +[2025-09-11 11:32:14] [Rank 0] step:4081/10000 train_time:178643ms step_avg:43.77ms +[2025-09-11 11:32:14] [Rank 0] step:4081/10000 train_time:178643ms step_avg:43.77ms +[2025-09-11 11:32:14] [Rank 0] step:4101/10000 train_time:179315ms step_avg:43.72ms +[2025-09-11 11:32:14] [Rank 0] step:4101/10000 train_time:179315ms step_avg:43.72ms +[2025-09-11 11:32:15] [Rank 0] step:4121/10000 train_time:179985ms step_avg:43.68ms +[2025-09-11 11:32:15] [Rank 0] step:4121/10000 train_time:179985ms step_avg:43.68ms +[2025-09-11 11:32:16] [Rank 0] step:4141/10000 train_time:180656ms step_avg:43.63ms +[2025-09-11 11:32:16] [Rank 0] step:4141/10000 train_time:180656ms step_avg:43.63ms +[2025-09-11 11:32:16] [Rank 0] step:4161/10000 train_time:181327ms step_avg:43.58ms +[2025-09-11 11:32:16] [Rank 0] step:4161/10000 train_time:181327ms step_avg:43.58ms +[2025-09-11 11:32:17] [Rank 0] step:4181/10000 train_time:181999ms step_avg:43.53ms +[2025-09-11 11:32:17] [Rank 0] step:4181/10000 train_time:181999ms step_avg:43.53ms +[2025-09-11 11:32:18] [Rank 0] step:4201/10000 train_time:182670ms step_avg:43.48ms +[2025-09-11 11:32:18] [Rank 0] step:4201/10000 train_time:182670ms step_avg:43.48ms +[2025-09-11 11:32:18] [Rank 0] step:4221/10000 train_time:183340ms step_avg:43.44ms +[2025-09-11 11:32:18] [Rank 0] step:4221/10000 train_time:183340ms step_avg:43.44ms +[2025-09-11 11:32:19] [Rank 0] step:4241/10000 train_time:184011ms step_avg:43.39ms +[2025-09-11 11:32:19] [Rank 0] step:4241/10000 train_time:184011ms step_avg:43.39ms +[2025-09-11 11:32:20] [Rank 0] step:4261/10000 train_time:184683ms step_avg:43.34ms +[2025-09-11 11:32:20] [Rank 0] step:4261/10000 train_time:184683ms step_avg:43.34ms +[2025-09-11 11:32:20] [Rank 0] step:4281/10000 train_time:185355ms step_avg:43.30ms +[2025-09-11 11:32:20] [Rank 0] step:4281/10000 train_time:185355ms step_avg:43.30ms +[2025-09-11 11:32:21] [Rank 0] step:4301/10000 train_time:186028ms step_avg:43.25ms +[2025-09-11 11:32:21] [Rank 0] step:4301/10000 train_time:186028ms step_avg:43.25ms +[2025-09-11 11:32:22] [Rank 0] step:4321/10000 train_time:186699ms step_avg:43.21ms +[2025-09-11 11:32:22] [Rank 0] step:4321/10000 train_time:186699ms step_avg:43.21ms +[2025-09-11 11:32:22] [Rank 0] step:4341/10000 train_time:187370ms step_avg:43.16ms +[2025-09-11 11:32:22] [Rank 0] step:4341/10000 train_time:187370ms step_avg:43.16ms +[2025-09-11 11:32:23] [Rank 0] step:4361/10000 train_time:188041ms step_avg:43.12ms +[2025-09-11 11:32:23] [Rank 0] step:4361/10000 train_time:188041ms step_avg:43.12ms +[2025-09-11 11:32:24] [Rank 0] step:4381/10000 train_time:188713ms step_avg:43.08ms +[2025-09-11 11:32:24] [Rank 0] step:4381/10000 train_time:188713ms step_avg:43.08ms +[2025-09-11 11:32:24] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:32:24] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:32:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:32:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:32:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:32:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:34] [Rank 0] PRINT: step:4400/10000 val_loss:5.1763 total_sharp:1.0754e-03 L1_sharp:4.3400e-04 L2_sharp:1.5399e-04 L3_sharp:9.1401e-05 L4_sharp:6.9817e-05 L5_sharp:1.1679e-04 L6_sharp:1.1988e-04 L7_sharp:7.9000e-05 L8_sharp:2.2552e-04 L9_sharp:1.8845e-04 L10_sharp:2.0231e-04 L11_sharp:2.5718e-04 L12_sharp:1.0888e-03 total_fnorm:2.1875e+01 total_l1_linf:5.8880e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.6953e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.5234e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4766e+00 L6_l1linf:1.4531e+00 L7_l1linf:1.4453e+00 L8_l1linf:1.4609e+00 L9_l1linf:1.4766e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.5391e+00 L12_l1linf:1.3281e+00 L1_spectral:7.9051e-02 L2_spectral:7.6618e-02 L3_spectral:7.7117e-02 L4_spectral:7.7711e-02 L5_spectral:7.7669e-02 L6_spectral:7.7766e-02 L7_spectral:7.7628e-02 L8_spectral:7.6678e-02 L9_spectral:7.6557e-02 L10_spectral:7.6805e-02 L11_spectral:7.7112e-02 L12_spectral:7.6619e-02 train_time:189365ms step_avg:43.04ms +[2025-09-11 11:32:34] [Rank 0] PRINT: step:4400/10000 val_loss:5.1763 total_sharp:1.0754e-03 L1_sharp:4.3400e-04 L2_sharp:1.5399e-04 L3_sharp:9.1401e-05 L4_sharp:6.9817e-05 L5_sharp:1.1679e-04 L6_sharp:1.1988e-04 L7_sharp:7.9000e-05 L8_sharp:2.2552e-04 L9_sharp:1.8845e-04 L10_sharp:2.0231e-04 L11_sharp:2.5718e-04 L12_sharp:1.0888e-03 total_fnorm:2.1875e+01 total_l1_linf:5.8880e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.6953e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.5234e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4766e+00 L6_l1linf:1.4531e+00 L7_l1linf:1.4453e+00 L8_l1linf:1.4609e+00 L9_l1linf:1.4766e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.5391e+00 L12_l1linf:1.3281e+00 L1_spectral:7.9051e-02 L2_spectral:7.6618e-02 L3_spectral:7.7117e-02 L4_spectral:7.7711e-02 L5_spectral:7.7669e-02 L6_spectral:7.7766e-02 L7_spectral:7.7628e-02 L8_spectral:7.6678e-02 L9_spectral:7.6557e-02 L10_spectral:7.6805e-02 L11_spectral:7.7112e-02 L12_spectral:7.6619e-02 train_time:189365ms step_avg:43.04ms +[2025-09-11 11:32:36] [Rank 0] step:4401/10000 train_time:190715ms step_avg:43.33ms +[2025-09-11 11:32:36] [Rank 0] step:4401/10000 train_time:190715ms step_avg:43.33ms +[2025-09-11 11:32:36] [Rank 0] step:4421/10000 train_time:191406ms step_avg:43.29ms +[2025-09-11 11:32:36] [Rank 0] step:4421/10000 train_time:191406ms step_avg:43.29ms +[2025-09-11 11:32:37] [Rank 0] step:4441/10000 train_time:192443ms step_avg:43.33ms +[2025-09-11 11:32:37] [Rank 0] step:4441/10000 train_time:192443ms step_avg:43.33ms +[2025-09-11 11:32:38] [Rank 0] step:4461/10000 train_time:193254ms step_avg:43.32ms +[2025-09-11 11:32:38] [Rank 0] step:4461/10000 train_time:193254ms step_avg:43.32ms +[2025-09-11 11:32:39] [Rank 0] step:4481/10000 train_time:193929ms step_avg:43.28ms +[2025-09-11 11:32:39] [Rank 0] step:4481/10000 train_time:193929ms step_avg:43.28ms +[2025-09-11 11:32:40] [Rank 0] step:4501/10000 train_time:194898ms step_avg:43.30ms +[2025-09-11 11:32:40] [Rank 0] step:4501/10000 train_time:194898ms step_avg:43.30ms +[2025-09-11 11:32:40] [Rank 0] step:4521/10000 train_time:195573ms step_avg:43.26ms +[2025-09-11 11:32:40] [Rank 0] step:4521/10000 train_time:195573ms step_avg:43.26ms +[2025-09-11 11:32:41] [Rank 0] step:4541/10000 train_time:196248ms step_avg:43.22ms +[2025-09-11 11:32:41] [Rank 0] step:4541/10000 train_time:196248ms step_avg:43.22ms +[2025-09-11 11:32:42] [Rank 0] step:4561/10000 train_time:196922ms step_avg:43.18ms +[2025-09-11 11:32:42] [Rank 0] step:4561/10000 train_time:196922ms step_avg:43.18ms +[2025-09-11 11:32:42] [Rank 0] step:4581/10000 train_time:197596ms step_avg:43.13ms +[2025-09-11 11:32:42] [Rank 0] step:4581/10000 train_time:197596ms step_avg:43.13ms +[2025-09-11 11:32:43] [Rank 0] step:4601/10000 train_time:198270ms step_avg:43.09ms +[2025-09-11 11:32:43] [Rank 0] step:4601/10000 train_time:198270ms step_avg:43.09ms +[2025-09-11 11:32:44] [Rank 0] step:4621/10000 train_time:198945ms step_avg:43.05ms +[2025-09-11 11:32:44] [Rank 0] step:4621/10000 train_time:198945ms step_avg:43.05ms +[2025-09-11 11:32:44] [Rank 0] step:4641/10000 train_time:199619ms step_avg:43.01ms +[2025-09-11 11:32:44] [Rank 0] step:4641/10000 train_time:199619ms step_avg:43.01ms +[2025-09-11 11:32:45] [Rank 0] step:4661/10000 train_time:200294ms step_avg:42.97ms +[2025-09-11 11:32:45] [Rank 0] step:4661/10000 train_time:200294ms step_avg:42.97ms +[2025-09-11 11:32:46] [Rank 0] step:4681/10000 train_time:200968ms step_avg:42.93ms +[2025-09-11 11:32:46] [Rank 0] step:4681/10000 train_time:200968ms step_avg:42.93ms +[2025-09-11 11:32:46] [Rank 0] step:4701/10000 train_time:201642ms step_avg:42.89ms +[2025-09-11 11:32:46] [Rank 0] step:4701/10000 train_time:201642ms step_avg:42.89ms +[2025-09-11 11:32:47] [Rank 0] step:4721/10000 train_time:202317ms step_avg:42.85ms +[2025-09-11 11:32:47] [Rank 0] step:4721/10000 train_time:202317ms step_avg:42.85ms +[2025-09-11 11:32:48] [Rank 0] step:4741/10000 train_time:202991ms step_avg:42.82ms +[2025-09-11 11:32:48] [Rank 0] step:4741/10000 train_time:202991ms step_avg:42.82ms +[2025-09-11 11:32:49] [Rank 0] step:4761/10000 train_time:203666ms step_avg:42.78ms +[2025-09-11 11:32:49] [Rank 0] step:4761/10000 train_time:203666ms step_avg:42.78ms +[2025-09-11 11:32:49] [Rank 0] step:4781/10000 train_time:204340ms step_avg:42.74ms +[2025-09-11 11:32:49] [Rank 0] step:4781/10000 train_time:204340ms step_avg:42.74ms +[2025-09-11 11:32:50] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:32:50] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:32:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:32:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:32:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:00] [Rank 0] PRINT: step:4800/10000 val_loss:5.1349 total_sharp:1.0415e-03 L1_sharp:3.9712e-04 L2_sharp:1.5731e-04 L3_sharp:1.2714e-04 L4_sharp:4.6957e-05 L5_sharp:1.1254e-04 L6_sharp:1.0261e-04 L7_sharp:6.2383e-05 L8_sharp:2.0745e-04 L9_sharp:2.0265e-04 L10_sharp:1.7302e-04 L11_sharp:2.6862e-04 L12_sharp:7.3437e-04 total_fnorm:2.1875e+01 total_l1_linf:5.8624e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0000e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.4688e+00 L6_l1linf:1.4609e+00 L7_l1linf:1.4453e+00 L8_l1linf:1.4062e+00 L9_l1linf:1.4375e+00 L10_l1linf:1.4766e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.3594e+00 L1_spectral:7.9341e-02 L2_spectral:7.7223e-02 L3_spectral:7.7476e-02 L4_spectral:7.8203e-02 L5_spectral:7.7740e-02 L6_spectral:7.8137e-02 L7_spectral:7.7963e-02 L8_spectral:7.7044e-02 L9_spectral:7.7373e-02 L10_spectral:7.7886e-02 L11_spectral:7.7396e-02 L12_spectral:7.7138e-02 train_time:204994ms step_avg:42.71ms +[2025-09-11 11:33:00] [Rank 0] PRINT: step:4800/10000 val_loss:5.1349 total_sharp:1.0415e-03 L1_sharp:3.9712e-04 L2_sharp:1.5731e-04 L3_sharp:1.2714e-04 L4_sharp:4.6957e-05 L5_sharp:1.1254e-04 L6_sharp:1.0261e-04 L7_sharp:6.2383e-05 L8_sharp:2.0745e-04 L9_sharp:2.0265e-04 L10_sharp:1.7302e-04 L11_sharp:2.6862e-04 L12_sharp:7.3437e-04 total_fnorm:2.1875e+01 total_l1_linf:5.8624e+04 total_spectral:1.1000e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0000e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.4688e+00 L6_l1linf:1.4609e+00 L7_l1linf:1.4453e+00 L8_l1linf:1.4062e+00 L9_l1linf:1.4375e+00 L10_l1linf:1.4766e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.3594e+00 L1_spectral:7.9341e-02 L2_spectral:7.7223e-02 L3_spectral:7.7476e-02 L4_spectral:7.8203e-02 L5_spectral:7.7740e-02 L6_spectral:7.8137e-02 L7_spectral:7.7963e-02 L8_spectral:7.7044e-02 L9_spectral:7.7373e-02 L10_spectral:7.7886e-02 L11_spectral:7.7396e-02 L12_spectral:7.7138e-02 train_time:204994ms step_avg:42.71ms +[2025-09-11 11:33:01] [Rank 0] step:4801/10000 train_time:206280ms step_avg:42.97ms +[2025-09-11 11:33:01] [Rank 0] step:4801/10000 train_time:206280ms step_avg:42.97ms +[2025-09-11 11:33:02] [Rank 0] step:4821/10000 train_time:206985ms step_avg:42.93ms +[2025-09-11 11:33:02] [Rank 0] step:4821/10000 train_time:206985ms step_avg:42.93ms +[2025-09-11 11:33:03] [Rank 0] step:4841/10000 train_time:207661ms step_avg:42.90ms +[2025-09-11 11:33:03] [Rank 0] step:4841/10000 train_time:207661ms step_avg:42.90ms +[2025-09-11 11:33:03] [Rank 0] step:4861/10000 train_time:208336ms step_avg:42.86ms +[2025-09-11 11:33:03] [Rank 0] step:4861/10000 train_time:208336ms step_avg:42.86ms +[2025-09-11 11:33:04] [Rank 0] step:4881/10000 train_time:209011ms step_avg:42.82ms +[2025-09-11 11:33:04] [Rank 0] step:4881/10000 train_time:209011ms step_avg:42.82ms +[2025-09-11 11:33:05] [Rank 0] step:4901/10000 train_time:209687ms step_avg:42.78ms +[2025-09-11 11:33:05] [Rank 0] step:4901/10000 train_time:209687ms step_avg:42.78ms +[2025-09-11 11:33:05] [Rank 0] step:4921/10000 train_time:210363ms step_avg:42.75ms +[2025-09-11 11:33:05] [Rank 0] step:4921/10000 train_time:210363ms step_avg:42.75ms +[2025-09-11 11:33:06] [Rank 0] step:4941/10000 train_time:211038ms step_avg:42.71ms +[2025-09-11 11:33:06] [Rank 0] step:4941/10000 train_time:211038ms step_avg:42.71ms +[2025-09-11 11:33:07] [Rank 0] step:4961/10000 train_time:211713ms step_avg:42.68ms +[2025-09-11 11:33:07] [Rank 0] step:4961/10000 train_time:211713ms step_avg:42.68ms +[2025-09-11 11:33:07] [Rank 0] step:4981/10000 train_time:212388ms step_avg:42.64ms +[2025-09-11 11:33:07] [Rank 0] step:4981/10000 train_time:212388ms step_avg:42.64ms +[2025-09-11 11:33:08] [Rank 0] step:5001/10000 train_time:213064ms step_avg:42.60ms +[2025-09-11 11:33:08] [Rank 0] step:5001/10000 train_time:213064ms step_avg:42.60ms +[2025-09-11 11:33:09] [Rank 0] step:5021/10000 train_time:213738ms step_avg:42.57ms +[2025-09-11 11:33:09] [Rank 0] step:5021/10000 train_time:213738ms step_avg:42.57ms +[2025-09-11 11:33:09] [Rank 0] step:5041/10000 train_time:214411ms step_avg:42.53ms +[2025-09-11 11:33:09] [Rank 0] step:5041/10000 train_time:214411ms step_avg:42.53ms +[2025-09-11 11:33:10] [Rank 0] step:5061/10000 train_time:215086ms step_avg:42.50ms +[2025-09-11 11:33:10] [Rank 0] step:5061/10000 train_time:215086ms step_avg:42.50ms +[2025-09-11 11:33:11] [Rank 0] step:5081/10000 train_time:215760ms step_avg:42.46ms +[2025-09-11 11:33:11] [Rank 0] step:5081/10000 train_time:215760ms step_avg:42.46ms +[2025-09-11 11:33:11] [Rank 0] step:5101/10000 train_time:216434ms step_avg:42.43ms +[2025-09-11 11:33:11] [Rank 0] step:5101/10000 train_time:216434ms step_avg:42.43ms +[2025-09-11 11:33:12] [Rank 0] step:5121/10000 train_time:217108ms step_avg:42.40ms +[2025-09-11 11:33:12] [Rank 0] step:5121/10000 train_time:217108ms step_avg:42.40ms +[2025-09-11 11:33:13] [Rank 0] step:5141/10000 train_time:217783ms step_avg:42.36ms +[2025-09-11 11:33:13] [Rank 0] step:5141/10000 train_time:217783ms step_avg:42.36ms +[2025-09-11 11:33:13] [Rank 0] step:5161/10000 train_time:218458ms step_avg:42.33ms +[2025-09-11 11:33:13] [Rank 0] step:5161/10000 train_time:218458ms step_avg:42.33ms +[2025-09-11 11:33:14] [Rank 0] step:5181/10000 train_time:219132ms step_avg:42.30ms +[2025-09-11 11:33:14] [Rank 0] step:5181/10000 train_time:219132ms step_avg:42.30ms +[2025-09-11 11:33:15] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:33:15] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:33:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:33:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:33:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:25] [Rank 0] PRINT: step:5200/10000 val_loss:5.1028 total_sharp:1.1640e-03 L1_sharp:4.4670e-04 L2_sharp:2.2525e-04 L3_sharp:1.0767e-04 L4_sharp:7.3814e-05 L5_sharp:1.1516e-04 L6_sharp:1.1501e-04 L7_sharp:6.6603e-05 L8_sharp:1.9965e-04 L9_sharp:2.0185e-04 L10_sharp:1.8284e-04 L11_sharp:2.9377e-04 L12_sharp:1.3839e-03 total_fnorm:2.1750e+01 total_l1_linf:5.7088e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4297e+00 L8_l1linf:1.3984e+00 L9_l1linf:1.3906e+00 L10_l1linf:1.4219e+00 L11_l1linf:1.4844e+00 L12_l1linf:1.3359e+00 L1_spectral:7.9654e-02 L2_spectral:7.7577e-02 L3_spectral:7.8144e-02 L4_spectral:7.8638e-02 L5_spectral:7.7856e-02 L6_spectral:7.8626e-02 L7_spectral:7.8220e-02 L8_spectral:7.7584e-02 L9_spectral:7.7787e-02 L10_spectral:7.8047e-02 L11_spectral:7.8202e-02 L12_spectral:7.7791e-02 train_time:219794ms step_avg:42.27ms +[2025-09-11 11:33:25] [Rank 0] PRINT: step:5200/10000 val_loss:5.1028 total_sharp:1.1640e-03 L1_sharp:4.4670e-04 L2_sharp:2.2525e-04 L3_sharp:1.0767e-04 L4_sharp:7.3814e-05 L5_sharp:1.1516e-04 L6_sharp:1.1501e-04 L7_sharp:6.6603e-05 L8_sharp:1.9965e-04 L9_sharp:2.0185e-04 L10_sharp:1.8284e-04 L11_sharp:2.9377e-04 L12_sharp:1.3839e-03 total_fnorm:2.1750e+01 total_l1_linf:5.7088e+04 total_spectral:1.0938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.4297e+00 L8_l1linf:1.3984e+00 L9_l1linf:1.3906e+00 L10_l1linf:1.4219e+00 L11_l1linf:1.4844e+00 L12_l1linf:1.3359e+00 L1_spectral:7.9654e-02 L2_spectral:7.7577e-02 L3_spectral:7.8144e-02 L4_spectral:7.8638e-02 L5_spectral:7.7856e-02 L6_spectral:7.8626e-02 L7_spectral:7.8220e-02 L8_spectral:7.7584e-02 L9_spectral:7.7787e-02 L10_spectral:7.8047e-02 L11_spectral:7.8202e-02 L12_spectral:7.7791e-02 train_time:219794ms step_avg:42.27ms +[2025-09-11 11:33:26] [Rank 0] step:5201/10000 train_time:221059ms step_avg:42.50ms +[2025-09-11 11:33:26] [Rank 0] step:5201/10000 train_time:221059ms step_avg:42.50ms +[2025-09-11 11:33:27] [Rank 0] step:5221/10000 train_time:221771ms step_avg:42.48ms +[2025-09-11 11:33:27] [Rank 0] step:5221/10000 train_time:221771ms step_avg:42.48ms +[2025-09-11 11:33:27] [Rank 0] step:5241/10000 train_time:222455ms step_avg:42.45ms +[2025-09-11 11:33:27] [Rank 0] step:5241/10000 train_time:222455ms step_avg:42.45ms +[2025-09-11 11:33:28] [Rank 0] step:5261/10000 train_time:223140ms step_avg:42.41ms +[2025-09-11 11:33:28] [Rank 0] step:5261/10000 train_time:223140ms step_avg:42.41ms +[2025-09-11 11:33:29] [Rank 0] step:5281/10000 train_time:223823ms step_avg:42.38ms +[2025-09-11 11:33:29] [Rank 0] step:5281/10000 train_time:223823ms step_avg:42.38ms +[2025-09-11 11:33:29] [Rank 0] step:5301/10000 train_time:224508ms step_avg:42.35ms +[2025-09-11 11:33:29] [Rank 0] step:5301/10000 train_time:224508ms step_avg:42.35ms +[2025-09-11 11:33:30] [Rank 0] step:5321/10000 train_time:225191ms step_avg:42.32ms +[2025-09-11 11:33:30] [Rank 0] step:5321/10000 train_time:225191ms step_avg:42.32ms +[2025-09-11 11:33:31] [Rank 0] step:5341/10000 train_time:225874ms step_avg:42.29ms +[2025-09-11 11:33:31] [Rank 0] step:5341/10000 train_time:225874ms step_avg:42.29ms +[2025-09-11 11:33:32] [Rank 0] step:5361/10000 train_time:226558ms step_avg:42.26ms +[2025-09-11 11:33:32] [Rank 0] step:5361/10000 train_time:226558ms step_avg:42.26ms +[2025-09-11 11:33:32] [Rank 0] step:5381/10000 train_time:227242ms step_avg:42.23ms +[2025-09-11 11:33:32] [Rank 0] step:5381/10000 train_time:227242ms step_avg:42.23ms +[2025-09-11 11:33:33] [Rank 0] step:5401/10000 train_time:227925ms step_avg:42.20ms +[2025-09-11 11:33:33] [Rank 0] step:5401/10000 train_time:227925ms step_avg:42.20ms +[2025-09-11 11:33:34] [Rank 0] step:5421/10000 train_time:228610ms step_avg:42.17ms +[2025-09-11 11:33:34] [Rank 0] step:5421/10000 train_time:228610ms step_avg:42.17ms +[2025-09-11 11:33:34] [Rank 0] step:5441/10000 train_time:229293ms step_avg:42.14ms +[2025-09-11 11:33:34] [Rank 0] step:5441/10000 train_time:229293ms step_avg:42.14ms +[2025-09-11 11:33:35] [Rank 0] step:5461/10000 train_time:229977ms step_avg:42.11ms +[2025-09-11 11:33:35] [Rank 0] step:5461/10000 train_time:229977ms step_avg:42.11ms +[2025-09-11 11:33:36] [Rank 0] step:5481/10000 train_time:230661ms step_avg:42.08ms +[2025-09-11 11:33:36] [Rank 0] step:5481/10000 train_time:230661ms step_avg:42.08ms +[2025-09-11 11:33:36] [Rank 0] step:5501/10000 train_time:231345ms step_avg:42.06ms +[2025-09-11 11:33:36] [Rank 0] step:5501/10000 train_time:231345ms step_avg:42.06ms +[2025-09-11 11:33:37] [Rank 0] step:5521/10000 train_time:232028ms step_avg:42.03ms +[2025-09-11 11:33:37] [Rank 0] step:5521/10000 train_time:232028ms step_avg:42.03ms +[2025-09-11 11:33:38] [Rank 0] step:5541/10000 train_time:232714ms step_avg:42.00ms +[2025-09-11 11:33:38] [Rank 0] step:5541/10000 train_time:232714ms step_avg:42.00ms +[2025-09-11 11:33:38] [Rank 0] step:5561/10000 train_time:233400ms step_avg:41.97ms +[2025-09-11 11:33:38] [Rank 0] step:5561/10000 train_time:233400ms step_avg:41.97ms +[2025-09-11 11:33:39] [Rank 0] step:5581/10000 train_time:234085ms step_avg:41.94ms +[2025-09-11 11:33:39] [Rank 0] step:5581/10000 train_time:234085ms step_avg:41.94ms +[2025-09-11 11:33:40] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:33:40] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:33:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:49] [Rank 0] PRINT: step:5600/10000 val_loss:5.0648 total_sharp:9.0840e-04 L1_sharp:4.3734e-04 L2_sharp:2.0621e-04 L3_sharp:8.0668e-05 L4_sharp:6.0361e-05 L5_sharp:1.3459e-04 L6_sharp:9.3272e-05 L7_sharp:6.9260e-05 L8_sharp:1.8618e-04 L9_sharp:1.7989e-04 L10_sharp:1.7615e-04 L11_sharp:2.6398e-04 L12_sharp:6.7454e-04 total_fnorm:2.1625e+01 total_l1_linf:5.6576e+04 total_spectral:1.0938e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.6719e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.3984e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3906e+00 L10_l1linf:1.4219e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.3281e+00 L1_spectral:7.9488e-02 L2_spectral:7.7684e-02 L3_spectral:7.8165e-02 L4_spectral:7.8497e-02 L5_spectral:7.7829e-02 L6_spectral:7.8707e-02 L7_spectral:7.8598e-02 L8_spectral:7.7773e-02 L9_spectral:7.8661e-02 L10_spectral:7.8098e-02 L11_spectral:7.8318e-02 L12_spectral:7.7967e-02 train_time:234749ms step_avg:41.92ms +[2025-09-11 11:33:49] [Rank 0] PRINT: step:5600/10000 val_loss:5.0648 total_sharp:9.0840e-04 L1_sharp:4.3734e-04 L2_sharp:2.0621e-04 L3_sharp:8.0668e-05 L4_sharp:6.0361e-05 L5_sharp:1.3459e-04 L6_sharp:9.3272e-05 L7_sharp:6.9260e-05 L8_sharp:1.8618e-04 L9_sharp:1.7989e-04 L10_sharp:1.7615e-04 L11_sharp:2.6398e-04 L12_sharp:6.7454e-04 total_fnorm:2.1625e+01 total_l1_linf:5.6576e+04 total_spectral:1.0938e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.6719e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4453e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.3984e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3906e+00 L10_l1linf:1.4219e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.3281e+00 L1_spectral:7.9488e-02 L2_spectral:7.7684e-02 L3_spectral:7.8165e-02 L4_spectral:7.8497e-02 L5_spectral:7.7829e-02 L6_spectral:7.8707e-02 L7_spectral:7.8598e-02 L8_spectral:7.7773e-02 L9_spectral:7.8661e-02 L10_spectral:7.8098e-02 L11_spectral:7.8318e-02 L12_spectral:7.7967e-02 train_time:234749ms step_avg:41.92ms +[2025-09-11 11:33:51] [Rank 0] step:5601/10000 train_time:235915ms step_avg:42.12ms +[2025-09-11 11:33:51] [Rank 0] step:5601/10000 train_time:235915ms step_avg:42.12ms +[2025-09-11 11:33:51] [Rank 0] step:5621/10000 train_time:236604ms step_avg:42.09ms +[2025-09-11 11:33:51] [Rank 0] step:5621/10000 train_time:236604ms step_avg:42.09ms +[2025-09-11 11:33:52] [Rank 0] step:5641/10000 train_time:237288ms step_avg:42.06ms +[2025-09-11 11:33:52] [Rank 0] step:5641/10000 train_time:237288ms step_avg:42.06ms +[2025-09-11 11:33:53] [Rank 0] step:5661/10000 train_time:237971ms step_avg:42.04ms +[2025-09-11 11:33:53] [Rank 0] step:5661/10000 train_time:237971ms step_avg:42.04ms +[2025-09-11 11:33:53] [Rank 0] step:5681/10000 train_time:238656ms step_avg:42.01ms +[2025-09-11 11:33:53] [Rank 0] step:5681/10000 train_time:238656ms step_avg:42.01ms +[2025-09-11 11:33:54] [Rank 0] step:5701/10000 train_time:239341ms step_avg:41.98ms +[2025-09-11 11:33:54] [Rank 0] step:5701/10000 train_time:239341ms step_avg:41.98ms +[2025-09-11 11:33:55] [Rank 0] step:5721/10000 train_time:240024ms step_avg:41.95ms +[2025-09-11 11:33:55] [Rank 0] step:5721/10000 train_time:240024ms step_avg:41.95ms +[2025-09-11 11:33:55] [Rank 0] step:5741/10000 train_time:240710ms step_avg:41.93ms +[2025-09-11 11:33:55] [Rank 0] step:5741/10000 train_time:240710ms step_avg:41.93ms +[2025-09-11 11:33:56] [Rank 0] step:5761/10000 train_time:241395ms step_avg:41.90ms +[2025-09-11 11:33:56] [Rank 0] step:5761/10000 train_time:241395ms step_avg:41.90ms +[2025-09-11 11:33:57] [Rank 0] step:5781/10000 train_time:242079ms step_avg:41.88ms +[2025-09-11 11:33:57] [Rank 0] step:5781/10000 train_time:242079ms step_avg:41.88ms +[2025-09-11 11:33:57] [Rank 0] step:5801/10000 train_time:242767ms step_avg:41.85ms +[2025-09-11 11:33:57] [Rank 0] step:5801/10000 train_time:242767ms step_avg:41.85ms +[2025-09-11 11:33:58] [Rank 0] step:5821/10000 train_time:243450ms step_avg:41.82ms +[2025-09-11 11:33:58] [Rank 0] step:5821/10000 train_time:243450ms step_avg:41.82ms +[2025-09-11 11:33:59] [Rank 0] step:5841/10000 train_time:244134ms step_avg:41.80ms +[2025-09-11 11:33:59] [Rank 0] step:5841/10000 train_time:244134ms step_avg:41.80ms +[2025-09-11 11:33:59] [Rank 0] step:5861/10000 train_time:244817ms step_avg:41.77ms +[2025-09-11 11:33:59] [Rank 0] step:5861/10000 train_time:244817ms step_avg:41.77ms +[2025-09-11 11:34:00] [Rank 0] step:5881/10000 train_time:245502ms step_avg:41.74ms +[2025-09-11 11:34:00] [Rank 0] step:5881/10000 train_time:245502ms step_avg:41.74ms +[2025-09-11 11:34:01] [Rank 0] step:5901/10000 train_time:246186ms step_avg:41.72ms +[2025-09-11 11:34:01] [Rank 0] step:5901/10000 train_time:246186ms step_avg:41.72ms +[2025-09-11 11:34:02] [Rank 0] step:5921/10000 train_time:246873ms step_avg:41.69ms +[2025-09-11 11:34:02] [Rank 0] step:5921/10000 train_time:246873ms step_avg:41.69ms +[2025-09-11 11:34:02] [Rank 0] step:5941/10000 train_time:247559ms step_avg:41.67ms +[2025-09-11 11:34:02] [Rank 0] step:5941/10000 train_time:247559ms step_avg:41.67ms +[2025-09-11 11:34:03] [Rank 0] step:5961/10000 train_time:248244ms step_avg:41.64ms +[2025-09-11 11:34:03] [Rank 0] step:5961/10000 train_time:248244ms step_avg:41.64ms +[2025-09-11 11:34:04] [Rank 0] step:5981/10000 train_time:248930ms step_avg:41.62ms +[2025-09-11 11:34:04] [Rank 0] step:5981/10000 train_time:248930ms step_avg:41.62ms +[2025-09-11 11:34:04] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:34:04] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:34:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:34:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:34:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:34:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:34:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:14] [Rank 0] PRINT: step:6000/10000 val_loss:5.0271 total_sharp:9.5221e-04 L1_sharp:2.7817e-04 L2_sharp:1.4802e-04 L3_sharp:1.0269e-04 L4_sharp:5.2683e-05 L5_sharp:1.0056e-04 L6_sharp:9.3337e-05 L7_sharp:7.9346e-05 L8_sharp:1.8300e-04 L9_sharp:1.8776e-04 L10_sharp:1.8501e-04 L11_sharp:2.7024e-04 L12_sharp:9.2332e-04 total_fnorm:2.1625e+01 total_l1_linf:5.5552e+04 total_spectral:1.0875e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1562e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0625e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4766e+00 L5_l1linf:1.4531e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3516e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.4453e+00 L12_l1linf:1.3438e+00 L1_spectral:7.9952e-02 L2_spectral:7.7998e-02 L3_spectral:7.8207e-02 L4_spectral:7.8793e-02 L5_spectral:7.8043e-02 L6_spectral:7.8822e-02 L7_spectral:7.9370e-02 L8_spectral:7.8124e-02 L9_spectral:7.8733e-02 L10_spectral:7.8410e-02 L11_spectral:7.8369e-02 L12_spectral:7.8028e-02 train_time:249597ms step_avg:41.60ms +[2025-09-11 11:34:14] [Rank 0] PRINT: step:6000/10000 val_loss:5.0271 total_sharp:9.5221e-04 L1_sharp:2.7817e-04 L2_sharp:1.4802e-04 L3_sharp:1.0269e-04 L4_sharp:5.2683e-05 L5_sharp:1.0056e-04 L6_sharp:9.3337e-05 L7_sharp:7.9346e-05 L8_sharp:1.8300e-04 L9_sharp:1.8776e-04 L10_sharp:1.8501e-04 L11_sharp:2.7024e-04 L12_sharp:9.2332e-04 total_fnorm:2.1625e+01 total_l1_linf:5.5552e+04 total_spectral:1.0875e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1562e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0625e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4766e+00 L5_l1linf:1.4531e+00 L6_l1linf:1.4297e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3516e+00 L10_l1linf:1.3984e+00 L11_l1linf:1.4453e+00 L12_l1linf:1.3438e+00 L1_spectral:7.9952e-02 L2_spectral:7.7998e-02 L3_spectral:7.8207e-02 L4_spectral:7.8793e-02 L5_spectral:7.8043e-02 L6_spectral:7.8822e-02 L7_spectral:7.9370e-02 L8_spectral:7.8124e-02 L9_spectral:7.8733e-02 L10_spectral:7.8410e-02 L11_spectral:7.8369e-02 L12_spectral:7.8028e-02 train_time:249597ms step_avg:41.60ms +[2025-09-11 11:34:15] [Rank 0] step:6001/10000 train_time:250782ms step_avg:41.79ms +[2025-09-11 11:34:15] [Rank 0] step:6001/10000 train_time:250782ms step_avg:41.79ms +[2025-09-11 11:34:16] [Rank 0] step:6021/10000 train_time:251458ms step_avg:41.76ms +[2025-09-11 11:34:16] [Rank 0] step:6021/10000 train_time:251458ms step_avg:41.76ms +[2025-09-11 11:34:17] [Rank 0] step:6041/10000 train_time:252148ms step_avg:41.74ms +[2025-09-11 11:34:17] [Rank 0] step:6041/10000 train_time:252148ms step_avg:41.74ms +[2025-09-11 11:34:17] [Rank 0] step:6061/10000 train_time:252836ms step_avg:41.72ms +[2025-09-11 11:34:17] [Rank 0] step:6061/10000 train_time:252836ms step_avg:41.72ms +[2025-09-11 11:34:18] [Rank 0] step:6081/10000 train_time:253525ms step_avg:41.69ms +[2025-09-11 11:34:18] [Rank 0] step:6081/10000 train_time:253525ms step_avg:41.69ms +[2025-09-11 11:34:19] [Rank 0] step:6101/10000 train_time:254212ms step_avg:41.67ms +[2025-09-11 11:34:19] [Rank 0] step:6101/10000 train_time:254212ms step_avg:41.67ms +[2025-09-11 11:34:20] [Rank 0] step:6121/10000 train_time:254899ms step_avg:41.64ms +[2025-09-11 11:34:20] [Rank 0] step:6121/10000 train_time:254899ms step_avg:41.64ms +[2025-09-11 11:34:20] [Rank 0] step:6141/10000 train_time:255586ms step_avg:41.62ms +[2025-09-11 11:34:20] [Rank 0] step:6141/10000 train_time:255586ms step_avg:41.62ms +[2025-09-11 11:34:21] [Rank 0] step:6161/10000 train_time:256274ms step_avg:41.60ms +[2025-09-11 11:34:21] [Rank 0] step:6161/10000 train_time:256274ms step_avg:41.60ms +[2025-09-11 11:34:22] [Rank 0] step:6181/10000 train_time:256959ms step_avg:41.57ms +[2025-09-11 11:34:22] [Rank 0] step:6181/10000 train_time:256959ms step_avg:41.57ms +[2025-09-11 11:34:22] [Rank 0] step:6201/10000 train_time:257647ms step_avg:41.55ms +[2025-09-11 11:34:22] [Rank 0] step:6201/10000 train_time:257647ms step_avg:41.55ms +[2025-09-11 11:34:23] [Rank 0] step:6221/10000 train_time:258334ms step_avg:41.53ms +[2025-09-11 11:34:23] [Rank 0] step:6221/10000 train_time:258334ms step_avg:41.53ms +[2025-09-11 11:34:24] [Rank 0] step:6241/10000 train_time:259021ms step_avg:41.50ms +[2025-09-11 11:34:24] [Rank 0] step:6241/10000 train_time:259021ms step_avg:41.50ms +[2025-09-11 11:34:24] [Rank 0] step:6261/10000 train_time:259707ms step_avg:41.48ms +[2025-09-11 11:34:24] [Rank 0] step:6261/10000 train_time:259707ms step_avg:41.48ms +[2025-09-11 11:34:25] [Rank 0] step:6281/10000 train_time:260395ms step_avg:41.46ms +[2025-09-11 11:34:25] [Rank 0] step:6281/10000 train_time:260395ms step_avg:41.46ms +[2025-09-11 11:34:26] [Rank 0] step:6301/10000 train_time:261081ms step_avg:41.43ms +[2025-09-11 11:34:26] [Rank 0] step:6301/10000 train_time:261081ms step_avg:41.43ms +[2025-09-11 11:34:26] [Rank 0] step:6321/10000 train_time:261770ms step_avg:41.41ms +[2025-09-11 11:34:26] [Rank 0] step:6321/10000 train_time:261770ms step_avg:41.41ms +[2025-09-11 11:34:27] [Rank 0] step:6341/10000 train_time:262457ms step_avg:41.39ms +[2025-09-11 11:34:27] [Rank 0] step:6341/10000 train_time:262457ms step_avg:41.39ms +[2025-09-11 11:34:28] [Rank 0] step:6361/10000 train_time:263145ms step_avg:41.37ms +[2025-09-11 11:34:28] [Rank 0] step:6361/10000 train_time:263145ms step_avg:41.37ms +[2025-09-11 11:34:28] [Rank 0] step:6381/10000 train_time:263832ms step_avg:41.35ms +[2025-09-11 11:34:28] [Rank 0] step:6381/10000 train_time:263832ms step_avg:41.35ms +[2025-09-11 11:34:29] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:34:29] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:42] [Rank 0] PRINT: step:6400/10000 val_loss:4.9931 total_sharp:8.5520e-04 L1_sharp:3.4716e-04 L2_sharp:1.4717e-04 L3_sharp:9.0058e-05 L4_sharp:6.4511e-05 L5_sharp:1.0190e-04 L6_sharp:8.3887e-05 L7_sharp:7.3906e-05 L8_sharp:1.6256e-04 L9_sharp:1.6285e-04 L10_sharp:1.6395e-04 L11_sharp:2.6119e-04 L12_sharp:7.1882e-04 total_fnorm:1.9750e+01 total_l1_linf:4.8128e+04 total_spectral:9.8750e+00 L1_fnorm:5.7812e+00 L2_fnorm:5.6250e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5000e+00 L6_fnorm:5.6250e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.4375e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5625e+00 L11_fnorm:5.6250e+00 L12_fnorm:5.5000e+00 L1_l1linf:1.4531e+00 L2_l1linf:1.3516e+00 L3_l1linf:1.3203e+00 L4_l1linf:1.3047e+00 L5_l1linf:1.2656e+00 L6_l1linf:1.2578e+00 L7_l1linf:1.2344e+00 L8_l1linf:1.1875e+00 L9_l1linf:1.1875e+00 L10_l1linf:1.2188e+00 L11_l1linf:1.2500e+00 L12_l1linf:1.1797e+00 L1_spectral:7.3718e-02 L2_spectral:7.2176e-02 L3_spectral:7.1934e-02 L4_spectral:7.3131e-02 L5_spectral:7.1626e-02 L6_spectral:7.2855e-02 L7_spectral:7.2564e-02 L8_spectral:7.2275e-02 L9_spectral:7.2869e-02 L10_spectral:7.2533e-02 L11_spectral:7.2844e-02 L12_spectral:7.2683e-02 train_time:264498ms step_avg:41.33ms +[2025-09-11 11:34:42] [Rank 0] PRINT: step:6400/10000 val_loss:4.9931 total_sharp:8.5520e-04 L1_sharp:3.4716e-04 L2_sharp:1.4717e-04 L3_sharp:9.0058e-05 L4_sharp:6.4511e-05 L5_sharp:1.0190e-04 L6_sharp:8.3887e-05 L7_sharp:7.3906e-05 L8_sharp:1.6256e-04 L9_sharp:1.6285e-04 L10_sharp:1.6395e-04 L11_sharp:2.6119e-04 L12_sharp:7.1882e-04 total_fnorm:1.9750e+01 total_l1_linf:4.8128e+04 total_spectral:9.8750e+00 L1_fnorm:5.7812e+00 L2_fnorm:5.6250e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5000e+00 L6_fnorm:5.6250e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.4375e+00 L9_fnorm:5.5625e+00 L10_fnorm:5.5625e+00 L11_fnorm:5.6250e+00 L12_fnorm:5.5000e+00 L1_l1linf:1.4531e+00 L2_l1linf:1.3516e+00 L3_l1linf:1.3203e+00 L4_l1linf:1.3047e+00 L5_l1linf:1.2656e+00 L6_l1linf:1.2578e+00 L7_l1linf:1.2344e+00 L8_l1linf:1.1875e+00 L9_l1linf:1.1875e+00 L10_l1linf:1.2188e+00 L11_l1linf:1.2500e+00 L12_l1linf:1.1797e+00 L1_spectral:7.3718e-02 L2_spectral:7.2176e-02 L3_spectral:7.1934e-02 L4_spectral:7.3131e-02 L5_spectral:7.1626e-02 L6_spectral:7.2855e-02 L7_spectral:7.2564e-02 L8_spectral:7.2275e-02 L9_spectral:7.2869e-02 L10_spectral:7.2533e-02 L11_spectral:7.2844e-02 L12_spectral:7.2683e-02 train_time:264498ms step_avg:41.33ms +[2025-09-11 11:34:43] [Rank 0] step:6401/10000 train_time:265697ms step_avg:41.51ms +[2025-09-11 11:34:43] [Rank 0] step:6401/10000 train_time:265697ms step_avg:41.51ms +[2025-09-11 11:34:45] [Rank 0] step:6421/10000 train_time:266943ms step_avg:41.57ms +[2025-09-11 11:34:45] [Rank 0] step:6421/10000 train_time:266943ms step_avg:41.57ms +[2025-09-11 11:34:45] [Rank 0] step:6441/10000 train_time:267631ms step_avg:41.55ms +[2025-09-11 11:34:45] [Rank 0] step:6441/10000 train_time:267631ms step_avg:41.55ms +[2025-09-11 11:34:46] [Rank 0] step:6461/10000 train_time:268319ms step_avg:41.53ms +[2025-09-11 11:34:46] [Rank 0] step:6461/10000 train_time:268319ms step_avg:41.53ms +[2025-09-11 11:34:47] [Rank 0] step:6481/10000 train_time:269275ms step_avg:41.55ms +[2025-09-11 11:34:47] [Rank 0] step:6481/10000 train_time:269275ms step_avg:41.55ms +[2025-09-11 11:34:48] [Rank 0] step:6501/10000 train_time:269964ms step_avg:41.53ms +[2025-09-11 11:34:48] [Rank 0] step:6501/10000 train_time:269964ms step_avg:41.53ms +[2025-09-11 11:34:48] [Rank 0] step:6521/10000 train_time:270651ms step_avg:41.50ms +[2025-09-11 11:34:48] [Rank 0] step:6521/10000 train_time:270651ms step_avg:41.50ms +[2025-09-11 11:34:49] [Rank 0] step:6541/10000 train_time:271336ms step_avg:41.48ms +[2025-09-11 11:34:49] [Rank 0] step:6541/10000 train_time:271336ms step_avg:41.48ms +[2025-09-11 11:34:50] [Rank 0] step:6561/10000 train_time:272023ms step_avg:41.46ms +[2025-09-11 11:34:50] [Rank 0] step:6561/10000 train_time:272023ms step_avg:41.46ms +[2025-09-11 11:34:50] [Rank 0] step:6581/10000 train_time:272710ms step_avg:41.44ms +[2025-09-11 11:34:50] [Rank 0] step:6581/10000 train_time:272710ms step_avg:41.44ms +[2025-09-11 11:34:51] [Rank 0] step:6601/10000 train_time:273397ms step_avg:41.42ms +[2025-09-11 11:34:51] [Rank 0] step:6601/10000 train_time:273397ms step_avg:41.42ms +[2025-09-11 11:34:52] [Rank 0] step:6621/10000 train_time:274083ms step_avg:41.40ms +[2025-09-11 11:34:52] [Rank 0] step:6621/10000 train_time:274083ms step_avg:41.40ms +[2025-09-11 11:34:53] [Rank 0] step:6641/10000 train_time:274769ms step_avg:41.37ms +[2025-09-11 11:34:53] [Rank 0] step:6641/10000 train_time:274769ms step_avg:41.37ms +[2025-09-11 11:34:53] [Rank 0] step:6661/10000 train_time:275457ms step_avg:41.35ms +[2025-09-11 11:34:53] [Rank 0] step:6661/10000 train_time:275457ms step_avg:41.35ms +[2025-09-11 11:34:54] [Rank 0] step:6681/10000 train_time:276151ms step_avg:41.33ms +[2025-09-11 11:34:54] [Rank 0] step:6681/10000 train_time:276151ms step_avg:41.33ms +[2025-09-11 11:34:55] [Rank 0] step:6701/10000 train_time:276844ms step_avg:41.31ms +[2025-09-11 11:34:55] [Rank 0] step:6701/10000 train_time:276844ms step_avg:41.31ms +[2025-09-11 11:34:55] [Rank 0] step:6721/10000 train_time:277538ms step_avg:41.29ms +[2025-09-11 11:34:55] [Rank 0] step:6721/10000 train_time:277538ms step_avg:41.29ms +[2025-09-11 11:34:56] [Rank 0] step:6741/10000 train_time:278233ms step_avg:41.27ms +[2025-09-11 11:34:56] [Rank 0] step:6741/10000 train_time:278233ms step_avg:41.27ms +[2025-09-11 11:34:57] [Rank 0] step:6761/10000 train_time:278926ms step_avg:41.26ms +[2025-09-11 11:34:57] [Rank 0] step:6761/10000 train_time:278926ms step_avg:41.26ms +[2025-09-11 11:34:57] [Rank 0] step:6781/10000 train_time:279621ms step_avg:41.24ms +[2025-09-11 11:34:57] [Rank 0] step:6781/10000 train_time:279621ms step_avg:41.24ms +[2025-09-11 11:34:58] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:34:58] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:08] [Rank 0] PRINT: step:6800/10000 val_loss:4.9564 total_sharp:7.4422e-04 L1_sharp:3.5793e-04 L2_sharp:8.8070e-05 L3_sharp:1.0320e-04 L4_sharp:3.7729e-05 L5_sharp:7.9023e-05 L6_sharp:9.7481e-05 L7_sharp:9.0888e-05 L8_sharp:1.7751e-04 L9_sharp:1.4957e-04 L10_sharp:1.5295e-04 L11_sharp:2.2491e-04 L12_sharp:6.5865e-04 total_fnorm:1.7250e+01 total_l1_linf:4.0448e+04 total_spectral:8.6875e+00 L1_fnorm:5.1562e+00 L2_fnorm:4.9375e+00 L3_fnorm:4.9062e+00 L4_fnorm:4.9375e+00 L5_fnorm:4.8438e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.7812e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9375e+00 L12_fnorm:4.8750e+00 L1_l1linf:1.2812e+00 L2_l1linf:1.1484e+00 L3_l1linf:1.1328e+00 L4_l1linf:1.1172e+00 L5_l1linf:1.0938e+00 L6_l1linf:1.0703e+00 L7_l1linf:1.0469e+00 L8_l1linf:1.0000e+00 L9_l1linf:1.0078e+00 L10_l1linf:9.7656e-01 L11_l1linf:1.0391e+00 L12_l1linf:1.0391e+00 L1_spectral:6.6728e-02 L2_spectral:6.5042e-02 L3_spectral:6.4923e-02 L4_spectral:6.5987e-02 L5_spectral:6.4598e-02 L6_spectral:6.5634e-02 L7_spectral:6.5658e-02 L8_spectral:6.5307e-02 L9_spectral:6.5736e-02 L10_spectral:6.5744e-02 L11_spectral:6.5705e-02 L12_spectral:6.5734e-02 train_time:280296ms step_avg:41.22ms +[2025-09-11 11:35:08] [Rank 0] PRINT: step:6800/10000 val_loss:4.9564 total_sharp:7.4422e-04 L1_sharp:3.5793e-04 L2_sharp:8.8070e-05 L3_sharp:1.0320e-04 L4_sharp:3.7729e-05 L5_sharp:7.9023e-05 L6_sharp:9.7481e-05 L7_sharp:9.0888e-05 L8_sharp:1.7751e-04 L9_sharp:1.4957e-04 L10_sharp:1.5295e-04 L11_sharp:2.2491e-04 L12_sharp:6.5865e-04 total_fnorm:1.7250e+01 total_l1_linf:4.0448e+04 total_spectral:8.6875e+00 L1_fnorm:5.1562e+00 L2_fnorm:4.9375e+00 L3_fnorm:4.9062e+00 L4_fnorm:4.9375e+00 L5_fnorm:4.8438e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.7812e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9375e+00 L12_fnorm:4.8750e+00 L1_l1linf:1.2812e+00 L2_l1linf:1.1484e+00 L3_l1linf:1.1328e+00 L4_l1linf:1.1172e+00 L5_l1linf:1.0938e+00 L6_l1linf:1.0703e+00 L7_l1linf:1.0469e+00 L8_l1linf:1.0000e+00 L9_l1linf:1.0078e+00 L10_l1linf:9.7656e-01 L11_l1linf:1.0391e+00 L12_l1linf:1.0391e+00 L1_spectral:6.6728e-02 L2_spectral:6.5042e-02 L3_spectral:6.4923e-02 L4_spectral:6.5987e-02 L5_spectral:6.4598e-02 L6_spectral:6.5634e-02 L7_spectral:6.5658e-02 L8_spectral:6.5307e-02 L9_spectral:6.5736e-02 L10_spectral:6.5744e-02 L11_spectral:6.5705e-02 L12_spectral:6.5734e-02 train_time:280296ms step_avg:41.22ms +[2025-09-11 11:35:09] [Rank 0] step:6801/10000 train_time:281481ms step_avg:41.39ms +[2025-09-11 11:35:09] [Rank 0] step:6801/10000 train_time:281481ms step_avg:41.39ms +[2025-09-11 11:35:10] [Rank 0] step:6821/10000 train_time:282169ms step_avg:41.37ms +[2025-09-11 11:35:10] [Rank 0] step:6821/10000 train_time:282169ms step_avg:41.37ms +[2025-09-11 11:35:11] [Rank 0] step:6841/10000 train_time:282866ms step_avg:41.35ms +[2025-09-11 11:35:11] [Rank 0] step:6841/10000 train_time:282866ms step_avg:41.35ms +[2025-09-11 11:35:11] [Rank 0] step:6861/10000 train_time:283562ms step_avg:41.33ms +[2025-09-11 11:35:11] [Rank 0] step:6861/10000 train_time:283562ms step_avg:41.33ms +[2025-09-11 11:35:12] [Rank 0] step:6881/10000 train_time:284258ms step_avg:41.31ms +[2025-09-11 11:35:12] [Rank 0] step:6881/10000 train_time:284258ms step_avg:41.31ms +[2025-09-11 11:35:13] [Rank 0] step:6901/10000 train_time:284951ms step_avg:41.29ms +[2025-09-11 11:35:13] [Rank 0] step:6901/10000 train_time:284951ms step_avg:41.29ms +[2025-09-11 11:35:13] [Rank 0] step:6921/10000 train_time:285645ms step_avg:41.27ms +[2025-09-11 11:35:13] [Rank 0] step:6921/10000 train_time:285645ms step_avg:41.27ms +[2025-09-11 11:35:14] [Rank 0] step:6941/10000 train_time:286340ms step_avg:41.25ms +[2025-09-11 11:35:14] [Rank 0] step:6941/10000 train_time:286340ms step_avg:41.25ms +[2025-09-11 11:35:15] [Rank 0] step:6961/10000 train_time:287034ms step_avg:41.23ms +[2025-09-11 11:35:15] [Rank 0] step:6961/10000 train_time:287034ms step_avg:41.23ms +[2025-09-11 11:35:15] [Rank 0] step:6981/10000 train_time:287730ms step_avg:41.22ms +[2025-09-11 11:35:15] [Rank 0] step:6981/10000 train_time:287730ms step_avg:41.22ms +[2025-09-11 11:35:16] [Rank 0] step:7001/10000 train_time:288425ms step_avg:41.20ms +[2025-09-11 11:35:16] [Rank 0] step:7001/10000 train_time:288425ms step_avg:41.20ms +[2025-09-11 11:35:17] [Rank 0] step:7021/10000 train_time:289119ms step_avg:41.18ms +[2025-09-11 11:35:17] [Rank 0] step:7021/10000 train_time:289119ms step_avg:41.18ms +[2025-09-11 11:35:18] [Rank 0] step:7041/10000 train_time:289813ms step_avg:41.16ms +[2025-09-11 11:35:18] [Rank 0] step:7041/10000 train_time:289813ms step_avg:41.16ms +[2025-09-11 11:35:18] [Rank 0] step:7061/10000 train_time:290509ms step_avg:41.14ms +[2025-09-11 11:35:18] [Rank 0] step:7061/10000 train_time:290509ms step_avg:41.14ms +[2025-09-11 11:35:19] [Rank 0] step:7081/10000 train_time:291204ms step_avg:41.12ms +[2025-09-11 11:35:19] [Rank 0] step:7081/10000 train_time:291204ms step_avg:41.12ms +[2025-09-11 11:35:20] [Rank 0] step:7101/10000 train_time:291898ms step_avg:41.11ms +[2025-09-11 11:35:20] [Rank 0] step:7101/10000 train_time:291898ms step_avg:41.11ms +[2025-09-11 11:35:20] [Rank 0] step:7121/10000 train_time:292594ms step_avg:41.09ms +[2025-09-11 11:35:20] [Rank 0] step:7121/10000 train_time:292594ms step_avg:41.09ms +[2025-09-11 11:35:21] [Rank 0] step:7141/10000 train_time:293289ms step_avg:41.07ms +[2025-09-11 11:35:21] [Rank 0] step:7141/10000 train_time:293289ms step_avg:41.07ms +[2025-09-11 11:35:22] [Rank 0] step:7161/10000 train_time:293985ms step_avg:41.05ms +[2025-09-11 11:35:22] [Rank 0] step:7161/10000 train_time:293985ms step_avg:41.05ms +[2025-09-11 11:35:22] [Rank 0] step:7181/10000 train_time:294678ms step_avg:41.04ms +[2025-09-11 11:35:22] [Rank 0] step:7181/10000 train_time:294678ms step_avg:41.04ms +[2025-09-11 11:35:23] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:35:23] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:33] [Rank 0] PRINT: step:7200/10000 val_loss:4.9234 total_sharp:6.7809e-04 L1_sharp:3.2663e-04 L2_sharp:1.0860e-04 L3_sharp:8.3108e-05 L4_sharp:1.5464e-05 L5_sharp:1.0321e-04 L6_sharp:7.0756e-05 L7_sharp:7.0044e-05 L8_sharp:1.3123e-04 L9_sharp:1.2729e-04 L10_sharp:1.3687e-04 L11_sharp:2.2535e-04 L12_sharp:6.2408e-04 total_fnorm:1.5062e+01 total_l1_linf:3.3024e+04 total_spectral:7.5312e+00 L1_fnorm:4.5000e+00 L2_fnorm:4.3125e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.3125e+00 L5_fnorm:4.1875e+00 L6_fnorm:4.3125e+00 L7_fnorm:4.2812e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2188e+00 L1_l1linf:1.0547e+00 L2_l1linf:9.5312e-01 L3_l1linf:9.4922e-01 L4_l1linf:9.3750e-01 L5_l1linf:9.2969e-01 L6_l1linf:9.1797e-01 L7_l1linf:8.7500e-01 L8_l1linf:8.5156e-01 L9_l1linf:8.2812e-01 L10_l1linf:8.1641e-01 L11_l1linf:8.8672e-01 L12_l1linf:9.1406e-01 L1_spectral:5.9240e-02 L2_spectral:5.7742e-02 L3_spectral:5.7743e-02 L4_spectral:5.8081e-02 L5_spectral:5.7359e-02 L6_spectral:5.8708e-02 L7_spectral:5.8563e-02 L8_spectral:5.7796e-02 L9_spectral:5.8352e-02 L10_spectral:5.8468e-02 L11_spectral:5.8513e-02 L12_spectral:5.7943e-02 train_time:295352ms step_avg:41.02ms +[2025-09-11 11:35:33] [Rank 0] PRINT: step:7200/10000 val_loss:4.9234 total_sharp:6.7809e-04 L1_sharp:3.2663e-04 L2_sharp:1.0860e-04 L3_sharp:8.3108e-05 L4_sharp:1.5464e-05 L5_sharp:1.0321e-04 L6_sharp:7.0756e-05 L7_sharp:7.0044e-05 L8_sharp:1.3123e-04 L9_sharp:1.2729e-04 L10_sharp:1.3687e-04 L11_sharp:2.2535e-04 L12_sharp:6.2408e-04 total_fnorm:1.5062e+01 total_l1_linf:3.3024e+04 total_spectral:7.5312e+00 L1_fnorm:4.5000e+00 L2_fnorm:4.3125e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.3125e+00 L5_fnorm:4.1875e+00 L6_fnorm:4.3125e+00 L7_fnorm:4.2812e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2188e+00 L1_l1linf:1.0547e+00 L2_l1linf:9.5312e-01 L3_l1linf:9.4922e-01 L4_l1linf:9.3750e-01 L5_l1linf:9.2969e-01 L6_l1linf:9.1797e-01 L7_l1linf:8.7500e-01 L8_l1linf:8.5156e-01 L9_l1linf:8.2812e-01 L10_l1linf:8.1641e-01 L11_l1linf:8.8672e-01 L12_l1linf:9.1406e-01 L1_spectral:5.9240e-02 L2_spectral:5.7742e-02 L3_spectral:5.7743e-02 L4_spectral:5.8081e-02 L5_spectral:5.7359e-02 L6_spectral:5.8708e-02 L7_spectral:5.8563e-02 L8_spectral:5.7796e-02 L9_spectral:5.8352e-02 L10_spectral:5.8468e-02 L11_spectral:5.8513e-02 L12_spectral:5.7943e-02 train_time:295352ms step_avg:41.02ms +[2025-09-11 11:35:34] [Rank 0] step:7201/10000 train_time:296542ms step_avg:41.18ms +[2025-09-11 11:35:34] [Rank 0] step:7201/10000 train_time:296542ms step_avg:41.18ms +[2025-09-11 11:35:35] [Rank 0] step:7221/10000 train_time:297258ms step_avg:41.17ms +[2025-09-11 11:35:35] [Rank 0] step:7221/10000 train_time:297258ms step_avg:41.17ms +[2025-09-11 11:35:35] [Rank 0] step:7241/10000 train_time:297954ms step_avg:41.15ms +[2025-09-11 11:35:35] [Rank 0] step:7241/10000 train_time:297954ms step_avg:41.15ms +[2025-09-11 11:35:36] [Rank 0] step:7261/10000 train_time:298651ms step_avg:41.13ms +[2025-09-11 11:35:36] [Rank 0] step:7261/10000 train_time:298651ms step_avg:41.13ms +[2025-09-11 11:35:37] [Rank 0] step:7281/10000 train_time:299352ms step_avg:41.11ms +[2025-09-11 11:35:37] [Rank 0] step:7281/10000 train_time:299352ms step_avg:41.11ms +[2025-09-11 11:35:38] [Rank 0] step:7301/10000 train_time:300045ms step_avg:41.10ms +[2025-09-11 11:35:38] [Rank 0] step:7301/10000 train_time:300045ms step_avg:41.10ms +[2025-09-11 11:35:38] [Rank 0] step:7321/10000 train_time:300740ms step_avg:41.08ms +[2025-09-11 11:35:38] [Rank 0] step:7321/10000 train_time:300740ms step_avg:41.08ms +[2025-09-11 11:35:39] [Rank 0] step:7341/10000 train_time:301437ms step_avg:41.06ms +[2025-09-11 11:35:39] [Rank 0] step:7341/10000 train_time:301437ms step_avg:41.06ms +[2025-09-11 11:35:40] [Rank 0] step:7361/10000 train_time:302132ms step_avg:41.04ms +[2025-09-11 11:35:40] [Rank 0] step:7361/10000 train_time:302132ms step_avg:41.04ms +[2025-09-11 11:35:40] [Rank 0] step:7381/10000 train_time:302828ms step_avg:41.03ms +[2025-09-11 11:35:40] [Rank 0] step:7381/10000 train_time:302828ms step_avg:41.03ms +[2025-09-11 11:35:41] [Rank 0] step:7401/10000 train_time:303523ms step_avg:41.01ms +[2025-09-11 11:35:41] [Rank 0] step:7401/10000 train_time:303523ms step_avg:41.01ms +[2025-09-11 11:35:42] [Rank 0] step:7421/10000 train_time:304218ms step_avg:40.99ms +[2025-09-11 11:35:42] [Rank 0] step:7421/10000 train_time:304218ms step_avg:40.99ms +[2025-09-11 11:35:42] [Rank 0] step:7441/10000 train_time:304914ms step_avg:40.98ms +[2025-09-11 11:35:42] [Rank 0] step:7441/10000 train_time:304914ms step_avg:40.98ms +[2025-09-11 11:35:43] [Rank 0] step:7461/10000 train_time:305609ms step_avg:40.96ms +[2025-09-11 11:35:43] [Rank 0] step:7461/10000 train_time:305609ms step_avg:40.96ms +[2025-09-11 11:35:44] [Rank 0] step:7481/10000 train_time:306308ms step_avg:40.94ms +[2025-09-11 11:35:44] [Rank 0] step:7481/10000 train_time:306308ms step_avg:40.94ms +[2025-09-11 11:35:45] [Rank 0] step:7501/10000 train_time:307005ms step_avg:40.93ms +[2025-09-11 11:35:45] [Rank 0] step:7501/10000 train_time:307005ms step_avg:40.93ms +[2025-09-11 11:35:45] [Rank 0] step:7521/10000 train_time:307701ms step_avg:40.91ms +[2025-09-11 11:35:45] [Rank 0] step:7521/10000 train_time:307701ms step_avg:40.91ms +[2025-09-11 11:35:46] [Rank 0] step:7541/10000 train_time:308396ms step_avg:40.90ms +[2025-09-11 11:35:46] [Rank 0] step:7541/10000 train_time:308396ms step_avg:40.90ms +[2025-09-11 11:35:47] [Rank 0] step:7561/10000 train_time:309093ms step_avg:40.88ms +[2025-09-11 11:35:47] [Rank 0] step:7561/10000 train_time:309093ms step_avg:40.88ms +[2025-09-11 11:35:48] [Rank 0] step:7581/10000 train_time:310338ms step_avg:40.94ms +[2025-09-11 11:35:48] [Rank 0] step:7581/10000 train_time:310338ms step_avg:40.94ms +[2025-09-11 11:35:49] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:35:49] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:58] [Rank 0] PRINT: step:7600/10000 val_loss:4.8981 total_sharp:6.7736e-04 L1_sharp:2.0768e-04 L2_sharp:1.3236e-04 L3_sharp:8.6938e-05 L4_sharp:4.5268e-05 L5_sharp:8.0058e-05 L6_sharp:6.4216e-05 L7_sharp:5.4006e-05 L8_sharp:1.4040e-04 L9_sharp:1.2462e-04 L10_sharp:1.2998e-04 L11_sharp:2.1750e-04 L12_sharp:5.9213e-04 total_fnorm:1.2562e+01 total_l1_linf:2.5984e+04 total_spectral:6.3438e+00 L1_fnorm:3.7969e+00 L2_fnorm:3.6719e+00 L3_fnorm:3.6094e+00 L4_fnorm:3.6406e+00 L5_fnorm:3.5469e+00 L6_fnorm:3.6250e+00 L7_fnorm:3.6094e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.5781e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.5625e+00 L1_l1linf:8.4766e-01 L2_l1linf:7.6953e-01 L3_l1linf:7.7734e-01 L4_l1linf:7.5781e-01 L5_l1linf:7.3047e-01 L6_l1linf:7.4609e-01 L7_l1linf:7.1484e-01 L8_l1linf:6.8750e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.6406e-01 L11_l1linf:6.8750e-01 L12_l1linf:7.5000e-01 L1_spectral:5.1376e-02 L2_spectral:4.9788e-02 L3_spectral:4.9959e-02 L4_spectral:5.0316e-02 L5_spectral:4.9444e-02 L6_spectral:5.0502e-02 L7_spectral:5.0915e-02 L8_spectral:5.0238e-02 L9_spectral:5.0353e-02 L10_spectral:5.0365e-02 L11_spectral:5.0567e-02 L12_spectral:5.0484e-02 train_time:311015ms step_avg:40.92ms +[2025-09-11 11:35:58] [Rank 0] PRINT: step:7600/10000 val_loss:4.8981 total_sharp:6.7736e-04 L1_sharp:2.0768e-04 L2_sharp:1.3236e-04 L3_sharp:8.6938e-05 L4_sharp:4.5268e-05 L5_sharp:8.0058e-05 L6_sharp:6.4216e-05 L7_sharp:5.4006e-05 L8_sharp:1.4040e-04 L9_sharp:1.2462e-04 L10_sharp:1.2998e-04 L11_sharp:2.1750e-04 L12_sharp:5.9213e-04 total_fnorm:1.2562e+01 total_l1_linf:2.5984e+04 total_spectral:6.3438e+00 L1_fnorm:3.7969e+00 L2_fnorm:3.6719e+00 L3_fnorm:3.6094e+00 L4_fnorm:3.6406e+00 L5_fnorm:3.5469e+00 L6_fnorm:3.6250e+00 L7_fnorm:3.6094e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.5781e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.5625e+00 L1_l1linf:8.4766e-01 L2_l1linf:7.6953e-01 L3_l1linf:7.7734e-01 L4_l1linf:7.5781e-01 L5_l1linf:7.3047e-01 L6_l1linf:7.4609e-01 L7_l1linf:7.1484e-01 L8_l1linf:6.8750e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.6406e-01 L11_l1linf:6.8750e-01 L12_l1linf:7.5000e-01 L1_spectral:5.1376e-02 L2_spectral:4.9788e-02 L3_spectral:4.9959e-02 L4_spectral:5.0316e-02 L5_spectral:4.9444e-02 L6_spectral:5.0502e-02 L7_spectral:5.0915e-02 L8_spectral:5.0238e-02 L9_spectral:5.0353e-02 L10_spectral:5.0365e-02 L11_spectral:5.0567e-02 L12_spectral:5.0484e-02 train_time:311015ms step_avg:40.92ms +[2025-09-11 11:35:59] [Rank 0] step:7601/10000 train_time:312219ms step_avg:41.08ms +[2025-09-11 11:35:59] [Rank 0] step:7601/10000 train_time:312219ms step_avg:41.08ms +[2025-09-11 11:36:00] [Rank 0] step:7621/10000 train_time:312917ms step_avg:41.06ms +[2025-09-11 11:36:00] [Rank 0] step:7621/10000 train_time:312917ms step_avg:41.06ms +[2025-09-11 11:36:01] [Rank 0] step:7641/10000 train_time:313615ms step_avg:41.04ms +[2025-09-11 11:36:01] [Rank 0] step:7641/10000 train_time:313615ms step_avg:41.04ms +[2025-09-11 11:36:01] [Rank 0] step:7661/10000 train_time:314311ms step_avg:41.03ms +[2025-09-11 11:36:01] [Rank 0] step:7661/10000 train_time:314311ms step_avg:41.03ms +[2025-09-11 11:36:02] [Rank 0] step:7681/10000 train_time:315009ms step_avg:41.01ms +[2025-09-11 11:36:02] [Rank 0] step:7681/10000 train_time:315009ms step_avg:41.01ms +[2025-09-11 11:36:03] [Rank 0] step:7701/10000 train_time:315708ms step_avg:41.00ms +[2025-09-11 11:36:03] [Rank 0] step:7701/10000 train_time:315708ms step_avg:41.00ms +[2025-09-11 11:36:04] [Rank 0] step:7721/10000 train_time:316405ms step_avg:40.98ms +[2025-09-11 11:36:04] [Rank 0] step:7721/10000 train_time:316405ms step_avg:40.98ms +[2025-09-11 11:36:04] [Rank 0] step:7741/10000 train_time:317102ms step_avg:40.96ms +[2025-09-11 11:36:04] [Rank 0] step:7741/10000 train_time:317102ms step_avg:40.96ms +[2025-09-11 11:36:05] [Rank 0] step:7761/10000 train_time:317799ms step_avg:40.95ms +[2025-09-11 11:36:05] [Rank 0] step:7761/10000 train_time:317799ms step_avg:40.95ms +[2025-09-11 11:36:06] [Rank 0] step:7781/10000 train_time:318497ms step_avg:40.93ms +[2025-09-11 11:36:06] [Rank 0] step:7781/10000 train_time:318497ms step_avg:40.93ms +[2025-09-11 11:36:06] [Rank 0] step:7801/10000 train_time:319193ms step_avg:40.92ms +[2025-09-11 11:36:06] [Rank 0] step:7801/10000 train_time:319193ms step_avg:40.92ms +[2025-09-11 11:36:07] [Rank 0] step:7821/10000 train_time:319890ms step_avg:40.90ms +[2025-09-11 11:36:07] [Rank 0] step:7821/10000 train_time:319890ms step_avg:40.90ms +[2025-09-11 11:36:08] [Rank 0] step:7841/10000 train_time:320589ms step_avg:40.89ms +[2025-09-11 11:36:08] [Rank 0] step:7841/10000 train_time:320589ms step_avg:40.89ms +[2025-09-11 11:36:08] [Rank 0] step:7861/10000 train_time:321287ms step_avg:40.87ms +[2025-09-11 11:36:08] [Rank 0] step:7861/10000 train_time:321287ms step_avg:40.87ms +[2025-09-11 11:36:09] [Rank 0] step:7881/10000 train_time:321984ms step_avg:40.86ms +[2025-09-11 11:36:09] [Rank 0] step:7881/10000 train_time:321984ms step_avg:40.86ms +[2025-09-11 11:36:10] [Rank 0] step:7901/10000 train_time:322682ms step_avg:40.84ms +[2025-09-11 11:36:10] [Rank 0] step:7901/10000 train_time:322682ms step_avg:40.84ms +[2025-09-11 11:36:10] [Rank 0] step:7921/10000 train_time:323379ms step_avg:40.83ms +[2025-09-11 11:36:10] [Rank 0] step:7921/10000 train_time:323379ms step_avg:40.83ms +[2025-09-11 11:36:11] [Rank 0] step:7941/10000 train_time:324077ms step_avg:40.81ms +[2025-09-11 11:36:11] [Rank 0] step:7941/10000 train_time:324077ms step_avg:40.81ms +[2025-09-11 11:36:12] [Rank 0] step:7961/10000 train_time:324773ms step_avg:40.80ms +[2025-09-11 11:36:12] [Rank 0] step:7961/10000 train_time:324773ms step_avg:40.80ms +[2025-09-11 11:36:13] [Rank 0] step:7981/10000 train_time:325472ms step_avg:40.78ms +[2025-09-11 11:36:13] [Rank 0] step:7981/10000 train_time:325472ms step_avg:40.78ms +[2025-09-11 11:36:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:36:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.8815 total_sharp:7.1755e-04 L1_sharp:1.8361e-04 L2_sharp:1.2279e-04 L3_sharp:7.4164e-05 L4_sharp:6.8999e-05 L5_sharp:8.3977e-05 L6_sharp:9.4382e-05 L7_sharp:6.8416e-05 L8_sharp:1.4123e-04 L9_sharp:1.3325e-04 L10_sharp:1.5680e-04 L11_sharp:2.2753e-04 L12_sharp:5.7592e-04 total_fnorm:1.0250e+01 total_l1_linf:1.9584e+04 total_spectral:5.1562e+00 L1_fnorm:3.1094e+00 L2_fnorm:2.9688e+00 L3_fnorm:2.9375e+00 L4_fnorm:2.9531e+00 L5_fnorm:2.8906e+00 L6_fnorm:2.9375e+00 L7_fnorm:2.9375e+00 L8_fnorm:2.8125e+00 L9_fnorm:2.8906e+00 L10_fnorm:2.8906e+00 L11_fnorm:2.9375e+00 L12_fnorm:2.8750e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.0547e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.6250e-01 L6_l1linf:5.5469e-01 L7_l1linf:5.3906e-01 L8_l1linf:5.2344e-01 L9_l1linf:5.0781e-01 L10_l1linf:5.1953e-01 L11_l1linf:5.3906e-01 L12_l1linf:5.7812e-01 L1_spectral:4.2953e-02 L2_spectral:4.1388e-02 L3_spectral:4.1911e-02 L4_spectral:4.1974e-02 L5_spectral:4.1617e-02 L6_spectral:4.1729e-02 L7_spectral:4.2026e-02 L8_spectral:4.1953e-02 L9_spectral:4.1885e-02 L10_spectral:4.2008e-02 L11_spectral:4.2125e-02 L12_spectral:4.1859e-02 train_time:326148ms step_avg:40.77ms +[2025-09-11 11:36:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.8815 total_sharp:7.1755e-04 L1_sharp:1.8361e-04 L2_sharp:1.2279e-04 L3_sharp:7.4164e-05 L4_sharp:6.8999e-05 L5_sharp:8.3977e-05 L6_sharp:9.4382e-05 L7_sharp:6.8416e-05 L8_sharp:1.4123e-04 L9_sharp:1.3325e-04 L10_sharp:1.5680e-04 L11_sharp:2.2753e-04 L12_sharp:5.7592e-04 total_fnorm:1.0250e+01 total_l1_linf:1.9584e+04 total_spectral:5.1562e+00 L1_fnorm:3.1094e+00 L2_fnorm:2.9688e+00 L3_fnorm:2.9375e+00 L4_fnorm:2.9531e+00 L5_fnorm:2.8906e+00 L6_fnorm:2.9375e+00 L7_fnorm:2.9375e+00 L8_fnorm:2.8125e+00 L9_fnorm:2.8906e+00 L10_fnorm:2.8906e+00 L11_fnorm:2.9375e+00 L12_fnorm:2.8750e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.0547e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.6250e-01 L6_l1linf:5.5469e-01 L7_l1linf:5.3906e-01 L8_l1linf:5.2344e-01 L9_l1linf:5.0781e-01 L10_l1linf:5.1953e-01 L11_l1linf:5.3906e-01 L12_l1linf:5.7812e-01 L1_spectral:4.2953e-02 L2_spectral:4.1388e-02 L3_spectral:4.1911e-02 L4_spectral:4.1974e-02 L5_spectral:4.1617e-02 L6_spectral:4.1729e-02 L7_spectral:4.2026e-02 L8_spectral:4.1953e-02 L9_spectral:4.1885e-02 L10_spectral:4.2008e-02 L11_spectral:4.2125e-02 L12_spectral:4.1859e-02 train_time:326148ms step_avg:40.77ms +[2025-09-11 11:36:24] [Rank 0] step:8001/10000 train_time:327350ms step_avg:40.91ms +[2025-09-11 11:36:24] [Rank 0] step:8001/10000 train_time:327350ms step_avg:40.91ms +[2025-09-11 11:36:25] [Rank 0] step:8021/10000 train_time:328037ms step_avg:40.90ms +[2025-09-11 11:36:25] [Rank 0] step:8021/10000 train_time:328037ms step_avg:40.90ms +[2025-09-11 11:36:26] [Rank 0] step:8041/10000 train_time:328735ms step_avg:40.88ms +[2025-09-11 11:36:26] [Rank 0] step:8041/10000 train_time:328735ms step_avg:40.88ms +[2025-09-11 11:36:27] [Rank 0] step:8061/10000 train_time:329437ms step_avg:40.87ms +[2025-09-11 11:36:27] [Rank 0] step:8061/10000 train_time:329437ms step_avg:40.87ms +[2025-09-11 11:36:27] [Rank 0] step:8081/10000 train_time:330132ms step_avg:40.85ms +[2025-09-11 11:36:27] [Rank 0] step:8081/10000 train_time:330132ms step_avg:40.85ms +[2025-09-11 11:36:28] [Rank 0] step:8101/10000 train_time:330828ms step_avg:40.84ms +[2025-09-11 11:36:28] [Rank 0] step:8101/10000 train_time:330828ms step_avg:40.84ms +[2025-09-11 11:36:29] [Rank 0] step:8121/10000 train_time:331529ms step_avg:40.82ms +[2025-09-11 11:36:29] [Rank 0] step:8121/10000 train_time:331529ms step_avg:40.82ms +[2025-09-11 11:36:30] [Rank 0] step:8141/10000 train_time:332948ms step_avg:40.90ms +[2025-09-11 11:36:30] [Rank 0] step:8141/10000 train_time:332948ms step_avg:40.90ms +[2025-09-11 11:36:31] [Rank 0] step:8161/10000 train_time:333649ms step_avg:40.88ms +[2025-09-11 11:36:31] [Rank 0] step:8161/10000 train_time:333649ms step_avg:40.88ms +[2025-09-11 11:36:31] [Rank 0] step:8181/10000 train_time:334358ms step_avg:40.87ms +[2025-09-11 11:36:31] [Rank 0] step:8181/10000 train_time:334358ms step_avg:40.87ms +[2025-09-11 11:36:32] [Rank 0] step:8201/10000 train_time:335063ms step_avg:40.86ms +[2025-09-11 11:36:32] [Rank 0] step:8201/10000 train_time:335063ms step_avg:40.86ms +[2025-09-11 11:36:33] [Rank 0] step:8221/10000 train_time:335768ms step_avg:40.84ms +[2025-09-11 11:36:33] [Rank 0] step:8221/10000 train_time:335768ms step_avg:40.84ms +[2025-09-11 11:36:34] [Rank 0] step:8241/10000 train_time:336481ms step_avg:40.83ms +[2025-09-11 11:36:34] [Rank 0] step:8241/10000 train_time:336481ms step_avg:40.83ms +[2025-09-11 11:36:34] [Rank 0] step:8261/10000 train_time:337184ms step_avg:40.82ms +[2025-09-11 11:36:34] [Rank 0] step:8261/10000 train_time:337184ms step_avg:40.82ms +[2025-09-11 11:36:35] [Rank 0] step:8281/10000 train_time:337885ms step_avg:40.80ms +[2025-09-11 11:36:35] [Rank 0] step:8281/10000 train_time:337885ms step_avg:40.80ms +[2025-09-11 11:36:36] [Rank 0] step:8301/10000 train_time:338589ms step_avg:40.79ms +[2025-09-11 11:36:36] [Rank 0] step:8301/10000 train_time:338589ms step_avg:40.79ms +[2025-09-11 11:36:36] [Rank 0] step:8321/10000 train_time:339293ms step_avg:40.78ms +[2025-09-11 11:36:36] [Rank 0] step:8321/10000 train_time:339293ms step_avg:40.78ms +[2025-09-11 11:36:37] [Rank 0] step:8341/10000 train_time:340003ms step_avg:40.76ms +[2025-09-11 11:36:37] [Rank 0] step:8341/10000 train_time:340003ms step_avg:40.76ms +[2025-09-11 11:36:38] [Rank 0] step:8361/10000 train_time:340702ms step_avg:40.75ms +[2025-09-11 11:36:38] [Rank 0] step:8361/10000 train_time:340702ms step_avg:40.75ms +[2025-09-11 11:36:39] [Rank 0] step:8381/10000 train_time:341409ms step_avg:40.74ms +[2025-09-11 11:36:39] [Rank 0] step:8381/10000 train_time:341409ms step_avg:40.74ms +[2025-09-11 11:36:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:36:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:49] [Rank 0] PRINT: step:8400/10000 val_loss:4.8622 total_sharp:5.5058e-04 L1_sharp:2.9698e-04 L2_sharp:1.1272e-04 L3_sharp:4.0987e-05 L4_sharp:4.9553e-05 L5_sharp:7.2742e-05 L6_sharp:7.4249e-05 L7_sharp:4.5513e-05 L8_sharp:1.2255e-04 L9_sharp:1.0591e-04 L10_sharp:1.0907e-04 L11_sharp:1.7304e-04 L12_sharp:5.0034e-04 total_fnorm:8.0625e+00 total_l1_linf:1.4016e+04 total_spectral:4.0000e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.3125e+00 L3_fnorm:2.2969e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.2188e+00 L6_fnorm:2.2812e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2344e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:4.9219e-01 L2_l1linf:4.4922e-01 L3_l1linf:4.3750e-01 L4_l1linf:4.3945e-01 L5_l1linf:4.2188e-01 L6_l1linf:4.2383e-01 L7_l1linf:3.9453e-01 L8_l1linf:3.7500e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.6328e-01 L11_l1linf:3.8672e-01 L12_l1linf:4.3945e-01 L1_spectral:3.4249e-02 L2_spectral:3.2719e-02 L3_spectral:3.2473e-02 L4_spectral:3.3034e-02 L5_spectral:3.2919e-02 L6_spectral:3.3361e-02 L7_spectral:3.3428e-02 L8_spectral:3.3766e-02 L9_spectral:3.3354e-02 L10_spectral:3.3211e-02 L11_spectral:3.3328e-02 L12_spectral:3.3381e-02 train_time:342095ms step_avg:40.73ms +[2025-09-11 11:36:49] [Rank 0] PRINT: step:8400/10000 val_loss:4.8622 total_sharp:5.5058e-04 L1_sharp:2.9698e-04 L2_sharp:1.1272e-04 L3_sharp:4.0987e-05 L4_sharp:4.9553e-05 L5_sharp:7.2742e-05 L6_sharp:7.4249e-05 L7_sharp:4.5513e-05 L8_sharp:1.2255e-04 L9_sharp:1.0591e-04 L10_sharp:1.0907e-04 L11_sharp:1.7304e-04 L12_sharp:5.0034e-04 total_fnorm:8.0625e+00 total_l1_linf:1.4016e+04 total_spectral:4.0000e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.3125e+00 L3_fnorm:2.2969e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.2188e+00 L6_fnorm:2.2812e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2344e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:4.9219e-01 L2_l1linf:4.4922e-01 L3_l1linf:4.3750e-01 L4_l1linf:4.3945e-01 L5_l1linf:4.2188e-01 L6_l1linf:4.2383e-01 L7_l1linf:3.9453e-01 L8_l1linf:3.7500e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.6328e-01 L11_l1linf:3.8672e-01 L12_l1linf:4.3945e-01 L1_spectral:3.4249e-02 L2_spectral:3.2719e-02 L3_spectral:3.2473e-02 L4_spectral:3.3034e-02 L5_spectral:3.2919e-02 L6_spectral:3.3361e-02 L7_spectral:3.3428e-02 L8_spectral:3.3766e-02 L9_spectral:3.3354e-02 L10_spectral:3.3211e-02 L11_spectral:3.3328e-02 L12_spectral:3.3381e-02 train_time:342095ms step_avg:40.73ms +[2025-09-11 11:36:51] [Rank 0] step:8401/10000 train_time:343294ms step_avg:40.86ms +[2025-09-11 11:36:51] [Rank 0] step:8401/10000 train_time:343294ms step_avg:40.86ms +[2025-09-11 11:36:51] [Rank 0] step:8421/10000 train_time:344178ms step_avg:40.87ms +[2025-09-11 11:36:51] [Rank 0] step:8421/10000 train_time:344178ms step_avg:40.87ms +[2025-09-11 11:36:52] [Rank 0] step:8441/10000 train_time:344886ms step_avg:40.86ms +[2025-09-11 11:36:52] [Rank 0] step:8441/10000 train_time:344886ms step_avg:40.86ms +[2025-09-11 11:36:53] [Rank 0] step:8461/10000 train_time:345867ms step_avg:40.88ms +[2025-09-11 11:36:53] [Rank 0] step:8461/10000 train_time:345867ms step_avg:40.88ms +[2025-09-11 11:36:54] [Rank 0] step:8481/10000 train_time:346574ms step_avg:40.86ms +[2025-09-11 11:36:54] [Rank 0] step:8481/10000 train_time:346574ms step_avg:40.86ms +[2025-09-11 11:36:54] [Rank 0] step:8501/10000 train_time:347279ms step_avg:40.85ms +[2025-09-11 11:36:54] [Rank 0] step:8501/10000 train_time:347279ms step_avg:40.85ms +[2025-09-11 11:36:55] [Rank 0] step:8521/10000 train_time:347984ms step_avg:40.84ms +[2025-09-11 11:36:55] [Rank 0] step:8521/10000 train_time:347984ms step_avg:40.84ms +[2025-09-11 11:36:56] [Rank 0] step:8541/10000 train_time:348688ms step_avg:40.83ms +[2025-09-11 11:36:56] [Rank 0] step:8541/10000 train_time:348688ms step_avg:40.83ms +[2025-09-11 11:36:57] [Rank 0] step:8561/10000 train_time:349398ms step_avg:40.81ms +[2025-09-11 11:36:57] [Rank 0] step:8561/10000 train_time:349398ms step_avg:40.81ms +[2025-09-11 11:36:57] [Rank 0] step:8581/10000 train_time:350106ms step_avg:40.80ms +[2025-09-11 11:36:57] [Rank 0] step:8581/10000 train_time:350106ms step_avg:40.80ms +[2025-09-11 11:36:58] [Rank 0] step:8601/10000 train_time:350812ms step_avg:40.79ms +[2025-09-11 11:36:58] [Rank 0] step:8601/10000 train_time:350812ms step_avg:40.79ms +[2025-09-11 11:36:59] [Rank 0] step:8621/10000 train_time:351517ms step_avg:40.77ms +[2025-09-11 11:36:59] [Rank 0] step:8621/10000 train_time:351517ms step_avg:40.77ms +[2025-09-11 11:36:59] [Rank 0] step:8641/10000 train_time:352221ms step_avg:40.76ms +[2025-09-11 11:36:59] [Rank 0] step:8641/10000 train_time:352221ms step_avg:40.76ms +[2025-09-11 11:37:00] [Rank 0] step:8661/10000 train_time:352927ms step_avg:40.75ms +[2025-09-11 11:37:00] [Rank 0] step:8661/10000 train_time:352927ms step_avg:40.75ms +[2025-09-11 11:37:01] [Rank 0] step:8681/10000 train_time:353634ms step_avg:40.74ms +[2025-09-11 11:37:01] [Rank 0] step:8681/10000 train_time:353634ms step_avg:40.74ms +[2025-09-11 11:37:02] [Rank 0] step:8701/10000 train_time:354338ms step_avg:40.72ms +[2025-09-11 11:37:02] [Rank 0] step:8701/10000 train_time:354338ms step_avg:40.72ms +[2025-09-11 11:37:02] [Rank 0] step:8721/10000 train_time:355047ms step_avg:40.71ms +[2025-09-11 11:37:02] [Rank 0] step:8721/10000 train_time:355047ms step_avg:40.71ms +[2025-09-11 11:37:03] [Rank 0] step:8741/10000 train_time:355750ms step_avg:40.70ms +[2025-09-11 11:37:03] [Rank 0] step:8741/10000 train_time:355750ms step_avg:40.70ms +[2025-09-11 11:37:04] [Rank 0] step:8761/10000 train_time:356459ms step_avg:40.69ms +[2025-09-11 11:37:04] [Rank 0] step:8761/10000 train_time:356459ms step_avg:40.69ms +[2025-09-11 11:37:04] [Rank 0] step:8781/10000 train_time:357162ms step_avg:40.67ms +[2025-09-11 11:37:04] [Rank 0] step:8781/10000 train_time:357162ms step_avg:40.67ms +[2025-09-11 11:37:05] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:37:05] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:37:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:15] [Rank 0] PRINT: step:8800/10000 val_loss:4.8479 total_sharp:4.5631e-04 L1_sharp:1.5506e-04 L2_sharp:8.6982e-05 L3_sharp:4.5296e-05 L4_sharp:2.7546e-05 L5_sharp:5.7073e-05 L6_sharp:5.6976e-05 L7_sharp:5.1808e-05 L8_sharp:1.0348e-04 L9_sharp:9.1580e-05 L10_sharp:9.8883e-05 L11_sharp:1.7222e-04 L12_sharp:4.5062e-04 total_fnorm:5.8438e+00 total_l1_linf:9.1520e+03 total_spectral:2.9219e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.7031e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.6797e+00 L5_fnorm:1.6328e+00 L6_fnorm:1.6641e+00 L7_fnorm:1.6562e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6328e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6484e+00 L1_l1linf:3.3008e-01 L2_l1linf:2.9883e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.7344e-01 L6_l1linf:2.7539e-01 L7_l1linf:2.6562e-01 L8_l1linf:2.5977e-01 L9_l1linf:2.3633e-01 L10_l1linf:2.4414e-01 L11_l1linf:2.5195e-01 L12_l1linf:3.0664e-01 L1_spectral:2.5512e-02 L2_spectral:2.4615e-02 L3_spectral:2.4327e-02 L4_spectral:2.4762e-02 L5_spectral:2.5207e-02 L6_spectral:2.4794e-02 L7_spectral:2.5068e-02 L8_spectral:2.5449e-02 L9_spectral:2.4962e-02 L10_spectral:2.4981e-02 L11_spectral:2.5127e-02 L12_spectral:2.4908e-02 train_time:357846ms step_avg:40.66ms +[2025-09-11 11:37:15] [Rank 0] PRINT: step:8800/10000 val_loss:4.8479 total_sharp:4.5631e-04 L1_sharp:1.5506e-04 L2_sharp:8.6982e-05 L3_sharp:4.5296e-05 L4_sharp:2.7546e-05 L5_sharp:5.7073e-05 L6_sharp:5.6976e-05 L7_sharp:5.1808e-05 L8_sharp:1.0348e-04 L9_sharp:9.1580e-05 L10_sharp:9.8883e-05 L11_sharp:1.7222e-04 L12_sharp:4.5062e-04 total_fnorm:5.8438e+00 total_l1_linf:9.1520e+03 total_spectral:2.9219e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.7031e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.6797e+00 L5_fnorm:1.6328e+00 L6_fnorm:1.6641e+00 L7_fnorm:1.6562e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6328e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6484e+00 L1_l1linf:3.3008e-01 L2_l1linf:2.9883e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.7344e-01 L6_l1linf:2.7539e-01 L7_l1linf:2.6562e-01 L8_l1linf:2.5977e-01 L9_l1linf:2.3633e-01 L10_l1linf:2.4414e-01 L11_l1linf:2.5195e-01 L12_l1linf:3.0664e-01 L1_spectral:2.5512e-02 L2_spectral:2.4615e-02 L3_spectral:2.4327e-02 L4_spectral:2.4762e-02 L5_spectral:2.5207e-02 L6_spectral:2.4794e-02 L7_spectral:2.5068e-02 L8_spectral:2.5449e-02 L9_spectral:2.4962e-02 L10_spectral:2.4981e-02 L11_spectral:2.5127e-02 L12_spectral:2.4908e-02 train_time:357846ms step_avg:40.66ms +[2025-09-11 11:37:16] [Rank 0] step:8801/10000 train_time:359033ms step_avg:40.79ms +[2025-09-11 11:37:16] [Rank 0] step:8801/10000 train_time:359033ms step_avg:40.79ms +[2025-09-11 11:37:17] [Rank 0] step:8821/10000 train_time:359744ms step_avg:40.78ms +[2025-09-11 11:37:17] [Rank 0] step:8821/10000 train_time:359744ms step_avg:40.78ms +[2025-09-11 11:37:18] [Rank 0] step:8841/10000 train_time:360450ms step_avg:40.77ms +[2025-09-11 11:37:18] [Rank 0] step:8841/10000 train_time:360450ms step_avg:40.77ms +[2025-09-11 11:37:18] [Rank 0] step:8861/10000 train_time:361155ms step_avg:40.76ms +[2025-09-11 11:37:18] [Rank 0] step:8861/10000 train_time:361155ms step_avg:40.76ms +[2025-09-11 11:37:19] [Rank 0] step:8881/10000 train_time:361859ms step_avg:40.75ms +[2025-09-11 11:37:19] [Rank 0] step:8881/10000 train_time:361859ms step_avg:40.75ms +[2025-09-11 11:37:20] [Rank 0] step:8901/10000 train_time:362566ms step_avg:40.73ms +[2025-09-11 11:37:20] [Rank 0] step:8901/10000 train_time:362566ms step_avg:40.73ms +[2025-09-11 11:37:20] [Rank 0] step:8921/10000 train_time:363267ms step_avg:40.72ms +[2025-09-11 11:37:20] [Rank 0] step:8921/10000 train_time:363267ms step_avg:40.72ms +[2025-09-11 11:37:21] [Rank 0] step:8941/10000 train_time:363974ms step_avg:40.71ms +[2025-09-11 11:37:21] [Rank 0] step:8941/10000 train_time:363974ms step_avg:40.71ms +[2025-09-11 11:37:22] [Rank 0] step:8961/10000 train_time:364687ms step_avg:40.70ms +[2025-09-11 11:37:22] [Rank 0] step:8961/10000 train_time:364687ms step_avg:40.70ms +[2025-09-11 11:37:23] [Rank 0] step:8981/10000 train_time:365394ms step_avg:40.69ms +[2025-09-11 11:37:23] [Rank 0] step:8981/10000 train_time:365394ms step_avg:40.69ms +[2025-09-11 11:37:23] [Rank 0] step:9001/10000 train_time:366094ms step_avg:40.67ms +[2025-09-11 11:37:23] [Rank 0] step:9001/10000 train_time:366094ms step_avg:40.67ms +[2025-09-11 11:37:24] [Rank 0] step:9021/10000 train_time:366801ms step_avg:40.66ms +[2025-09-11 11:37:24] [Rank 0] step:9021/10000 train_time:366801ms step_avg:40.66ms +[2025-09-11 11:37:25] [Rank 0] step:9041/10000 train_time:367508ms step_avg:40.65ms +[2025-09-11 11:37:25] [Rank 0] step:9041/10000 train_time:367508ms step_avg:40.65ms +[2025-09-11 11:37:25] [Rank 0] step:9061/10000 train_time:368213ms step_avg:40.64ms +[2025-09-11 11:37:25] [Rank 0] step:9061/10000 train_time:368213ms step_avg:40.64ms +[2025-09-11 11:37:26] [Rank 0] step:9081/10000 train_time:368919ms step_avg:40.63ms +[2025-09-11 11:37:26] [Rank 0] step:9081/10000 train_time:368919ms step_avg:40.63ms +[2025-09-11 11:37:27] [Rank 0] step:9101/10000 train_time:369627ms step_avg:40.61ms +[2025-09-11 11:37:27] [Rank 0] step:9101/10000 train_time:369627ms step_avg:40.61ms +[2025-09-11 11:37:27] [Rank 0] step:9121/10000 train_time:370337ms step_avg:40.60ms +[2025-09-11 11:37:27] [Rank 0] step:9121/10000 train_time:370337ms step_avg:40.60ms +[2025-09-11 11:37:28] [Rank 0] step:9141/10000 train_time:371041ms step_avg:40.59ms +[2025-09-11 11:37:28] [Rank 0] step:9141/10000 train_time:371041ms step_avg:40.59ms +[2025-09-11 11:37:29] [Rank 0] step:9161/10000 train_time:371748ms step_avg:40.58ms +[2025-09-11 11:37:29] [Rank 0] step:9161/10000 train_time:371748ms step_avg:40.58ms +[2025-09-11 11:37:30] [Rank 0] step:9181/10000 train_time:372454ms step_avg:40.57ms +[2025-09-11 11:37:30] [Rank 0] step:9181/10000 train_time:372454ms step_avg:40.57ms +[2025-09-11 11:37:30] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:37:30] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:37:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:37:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.8345 total_sharp:4.3624e-04 L1_sharp:1.6913e-04 L2_sharp:7.9874e-05 L3_sharp:2.1957e-05 L4_sharp:2.4959e-05 L5_sharp:4.8644e-05 L6_sharp:5.0577e-05 L7_sharp:3.8844e-05 L8_sharp:9.7265e-05 L9_sharp:8.3949e-05 L10_sharp:9.9027e-05 L11_sharp:1.3307e-04 L12_sharp:4.6131e-04 total_fnorm:3.9062e+00 total_l1_linf:5.3440e+03 total_spectral:1.9531e+00 L1_fnorm:1.2109e+00 L2_fnorm:1.1406e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.0938e+00 L6_fnorm:1.1172e+00 L7_fnorm:1.1172e+00 L8_fnorm:1.0781e+00 L9_fnorm:1.0859e+00 L10_fnorm:1.0859e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.0938e+00 L1_l1linf:2.0117e-01 L2_l1linf:1.8555e-01 L3_l1linf:1.7676e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6895e-01 L7_l1linf:1.5820e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.4160e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.8555e-01 L1_spectral:1.7669e-02 L2_spectral:1.6748e-02 L3_spectral:1.6494e-02 L4_spectral:1.6832e-02 L5_spectral:1.7416e-02 L6_spectral:1.7026e-02 L7_spectral:1.7195e-02 L8_spectral:1.7725e-02 L9_spectral:1.7237e-02 L10_spectral:1.7051e-02 L11_spectral:1.7141e-02 L12_spectral:1.7121e-02 train_time:373143ms step_avg:40.56ms +[2025-09-11 11:37:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.8345 total_sharp:4.3624e-04 L1_sharp:1.6913e-04 L2_sharp:7.9874e-05 L3_sharp:2.1957e-05 L4_sharp:2.4959e-05 L5_sharp:4.8644e-05 L6_sharp:5.0577e-05 L7_sharp:3.8844e-05 L8_sharp:9.7265e-05 L9_sharp:8.3949e-05 L10_sharp:9.9027e-05 L11_sharp:1.3307e-04 L12_sharp:4.6131e-04 total_fnorm:3.9062e+00 total_l1_linf:5.3440e+03 total_spectral:1.9531e+00 L1_fnorm:1.2109e+00 L2_fnorm:1.1406e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.0938e+00 L6_fnorm:1.1172e+00 L7_fnorm:1.1172e+00 L8_fnorm:1.0781e+00 L9_fnorm:1.0859e+00 L10_fnorm:1.0859e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.0938e+00 L1_l1linf:2.0117e-01 L2_l1linf:1.8555e-01 L3_l1linf:1.7676e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6895e-01 L7_l1linf:1.5820e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.4160e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.8555e-01 L1_spectral:1.7669e-02 L2_spectral:1.6748e-02 L3_spectral:1.6494e-02 L4_spectral:1.6832e-02 L5_spectral:1.7416e-02 L6_spectral:1.7026e-02 L7_spectral:1.7195e-02 L8_spectral:1.7725e-02 L9_spectral:1.7237e-02 L10_spectral:1.7051e-02 L11_spectral:1.7141e-02 L12_spectral:1.7121e-02 train_time:373143ms step_avg:40.56ms +[2025-09-11 11:37:45] [Rank 0] step:9201/10000 train_time:374351ms step_avg:40.69ms +[2025-09-11 11:37:45] [Rank 0] step:9201/10000 train_time:374351ms step_avg:40.69ms +[2025-09-11 11:37:46] [Rank 0] step:9221/10000 train_time:375090ms step_avg:40.68ms +[2025-09-11 11:37:46] [Rank 0] step:9221/10000 train_time:375090ms step_avg:40.68ms +[2025-09-11 11:37:46] [Rank 0] step:9241/10000 train_time:375794ms step_avg:40.67ms +[2025-09-11 11:37:46] [Rank 0] step:9241/10000 train_time:375794ms step_avg:40.67ms +[2025-09-11 11:37:47] [Rank 0] step:9261/10000 train_time:376501ms step_avg:40.65ms +[2025-09-11 11:37:47] [Rank 0] step:9261/10000 train_time:376501ms step_avg:40.65ms +[2025-09-11 11:37:48] [Rank 0] step:9281/10000 train_time:377209ms step_avg:40.64ms +[2025-09-11 11:37:48] [Rank 0] step:9281/10000 train_time:377209ms step_avg:40.64ms +[2025-09-11 11:37:48] [Rank 0] step:9301/10000 train_time:377913ms step_avg:40.63ms +[2025-09-11 11:37:48] [Rank 0] step:9301/10000 train_time:377913ms step_avg:40.63ms +[2025-09-11 11:37:49] [Rank 0] step:9321/10000 train_time:378621ms step_avg:40.62ms +[2025-09-11 11:37:49] [Rank 0] step:9321/10000 train_time:378621ms step_avg:40.62ms +[2025-09-11 11:37:50] [Rank 0] step:9341/10000 train_time:379323ms step_avg:40.61ms +[2025-09-11 11:37:50] [Rank 0] step:9341/10000 train_time:379323ms step_avg:40.61ms +[2025-09-11 11:37:51] [Rank 0] step:9361/10000 train_time:380025ms step_avg:40.60ms +[2025-09-11 11:37:51] [Rank 0] step:9361/10000 train_time:380025ms step_avg:40.60ms +[2025-09-11 11:37:51] [Rank 0] step:9381/10000 train_time:380728ms step_avg:40.59ms +[2025-09-11 11:37:51] [Rank 0] step:9381/10000 train_time:380728ms step_avg:40.59ms +[2025-09-11 11:37:52] [Rank 0] step:9401/10000 train_time:381434ms step_avg:40.57ms +[2025-09-11 11:37:52] [Rank 0] step:9401/10000 train_time:381434ms step_avg:40.57ms +[2025-09-11 11:37:53] [Rank 0] step:9421/10000 train_time:382141ms step_avg:40.56ms +[2025-09-11 11:37:53] [Rank 0] step:9421/10000 train_time:382141ms step_avg:40.56ms +[2025-09-11 11:37:54] [Rank 0] step:9441/10000 train_time:383179ms step_avg:40.59ms +[2025-09-11 11:37:54] [Rank 0] step:9441/10000 train_time:383179ms step_avg:40.59ms +[2025-09-11 11:37:55] [Rank 0] step:9461/10000 train_time:384065ms step_avg:40.59ms +[2025-09-11 11:37:55] [Rank 0] step:9461/10000 train_time:384065ms step_avg:40.59ms +[2025-09-11 11:37:55] [Rank 0] step:9481/10000 train_time:384772ms step_avg:40.58ms +[2025-09-11 11:37:55] [Rank 0] step:9481/10000 train_time:384772ms step_avg:40.58ms +[2025-09-11 11:37:56] [Rank 0] step:9501/10000 train_time:385762ms step_avg:40.60ms +[2025-09-11 11:37:56] [Rank 0] step:9501/10000 train_time:385762ms step_avg:40.60ms +[2025-09-11 11:37:57] [Rank 0] step:9521/10000 train_time:386470ms step_avg:40.59ms +[2025-09-11 11:37:57] [Rank 0] step:9521/10000 train_time:386470ms step_avg:40.59ms +[2025-09-11 11:37:58] [Rank 0] step:9541/10000 train_time:387173ms step_avg:40.58ms +[2025-09-11 11:37:58] [Rank 0] step:9541/10000 train_time:387173ms step_avg:40.58ms +[2025-09-11 11:37:58] [Rank 0] step:9561/10000 train_time:387878ms step_avg:40.57ms +[2025-09-11 11:37:58] [Rank 0] step:9561/10000 train_time:387878ms step_avg:40.57ms +[2025-09-11 11:37:59] [Rank 0] step:9581/10000 train_time:388585ms step_avg:40.56ms +[2025-09-11 11:37:59] [Rank 0] step:9581/10000 train_time:388585ms step_avg:40.56ms +[2025-09-11 11:38:00] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:38:00] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:10] [Rank 0] PRINT: step:9600/10000 val_loss:4.8257 total_sharp:2.4541e-04 L1_sharp:1.1822e-04 L2_sharp:6.3021e-05 L3_sharp:3.4698e-05 L4_sharp:2.8626e-05 L5_sharp:3.7836e-05 L6_sharp:4.3372e-05 L7_sharp:3.8666e-05 L8_sharp:6.8939e-05 L9_sharp:5.6488e-05 L10_sharp:6.4457e-05 L11_sharp:1.0072e-04 L12_sharp:3.1565e-04 total_fnorm:2.1875e+00 total_l1_linf:2.4960e+03 total_spectral:1.1016e+00 L1_fnorm:6.7578e-01 L2_fnorm:6.4453e-01 L3_fnorm:6.3281e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2500e-01 L8_fnorm:6.0547e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1328e-01 L11_fnorm:6.2500e-01 L12_fnorm:6.2109e-01 L1_l1linf:9.5215e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.5449e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.6426e-02 L6_l1linf:7.8125e-02 L7_l1linf:7.8613e-02 L8_l1linf:8.6426e-02 L9_l1linf:7.6172e-02 L10_l1linf:7.4219e-02 L11_l1linf:7.4219e-02 L12_l1linf:9.1797e-02 L1_spectral:1.0051e-02 L2_spectral:9.5427e-03 L3_spectral:9.5495e-03 L4_spectral:9.7340e-03 L5_spectral:1.0111e-02 L6_spectral:9.6915e-03 L7_spectral:9.8228e-03 L8_spectral:1.0242e-02 L9_spectral:9.8594e-03 L10_spectral:9.7422e-03 L11_spectral:9.8144e-03 L12_spectral:9.8508e-03 train_time:389268ms step_avg:40.55ms +[2025-09-11 11:38:10] [Rank 0] PRINT: step:9600/10000 val_loss:4.8257 total_sharp:2.4541e-04 L1_sharp:1.1822e-04 L2_sharp:6.3021e-05 L3_sharp:3.4698e-05 L4_sharp:2.8626e-05 L5_sharp:3.7836e-05 L6_sharp:4.3372e-05 L7_sharp:3.8666e-05 L8_sharp:6.8939e-05 L9_sharp:5.6488e-05 L10_sharp:6.4457e-05 L11_sharp:1.0072e-04 L12_sharp:3.1565e-04 total_fnorm:2.1875e+00 total_l1_linf:2.4960e+03 total_spectral:1.1016e+00 L1_fnorm:6.7578e-01 L2_fnorm:6.4453e-01 L3_fnorm:6.3281e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2500e-01 L8_fnorm:6.0547e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1328e-01 L11_fnorm:6.2500e-01 L12_fnorm:6.2109e-01 L1_l1linf:9.5215e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.5449e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.6426e-02 L6_l1linf:7.8125e-02 L7_l1linf:7.8613e-02 L8_l1linf:8.6426e-02 L9_l1linf:7.6172e-02 L10_l1linf:7.4219e-02 L11_l1linf:7.4219e-02 L12_l1linf:9.1797e-02 L1_spectral:1.0051e-02 L2_spectral:9.5427e-03 L3_spectral:9.5495e-03 L4_spectral:9.7340e-03 L5_spectral:1.0111e-02 L6_spectral:9.6915e-03 L7_spectral:9.8228e-03 L8_spectral:1.0242e-02 L9_spectral:9.8594e-03 L10_spectral:9.7422e-03 L11_spectral:9.8144e-03 L12_spectral:9.8508e-03 train_time:389268ms step_avg:40.55ms +[2025-09-11 11:38:11] [Rank 0] step:9601/10000 train_time:390488ms step_avg:40.67ms +[2025-09-11 11:38:11] [Rank 0] step:9601/10000 train_time:390488ms step_avg:40.67ms +[2025-09-11 11:38:12] [Rank 0] step:9621/10000 train_time:391218ms step_avg:40.66ms +[2025-09-11 11:38:12] [Rank 0] step:9621/10000 train_time:391218ms step_avg:40.66ms +[2025-09-11 11:38:12] [Rank 0] step:9641/10000 train_time:391929ms step_avg:40.65ms +[2025-09-11 11:38:12] [Rank 0] step:9641/10000 train_time:391929ms step_avg:40.65ms +[2025-09-11 11:38:13] [Rank 0] step:9661/10000 train_time:392648ms step_avg:40.64ms +[2025-09-11 11:38:13] [Rank 0] step:9661/10000 train_time:392648ms step_avg:40.64ms +[2025-09-11 11:38:14] [Rank 0] step:9681/10000 train_time:393358ms step_avg:40.63ms +[2025-09-11 11:38:14] [Rank 0] step:9681/10000 train_time:393358ms step_avg:40.63ms +[2025-09-11 11:38:15] [Rank 0] step:9701/10000 train_time:394071ms step_avg:40.62ms +[2025-09-11 11:38:15] [Rank 0] step:9701/10000 train_time:394071ms step_avg:40.62ms +[2025-09-11 11:38:15] [Rank 0] step:9721/10000 train_time:394787ms step_avg:40.61ms +[2025-09-11 11:38:15] [Rank 0] step:9721/10000 train_time:394787ms step_avg:40.61ms +[2025-09-11 11:38:16] [Rank 0] step:9741/10000 train_time:395501ms step_avg:40.60ms +[2025-09-11 11:38:16] [Rank 0] step:9741/10000 train_time:395501ms step_avg:40.60ms +[2025-09-11 11:38:17] [Rank 0] step:9761/10000 train_time:396213ms step_avg:40.59ms +[2025-09-11 11:38:17] [Rank 0] step:9761/10000 train_time:396213ms step_avg:40.59ms +[2025-09-11 11:38:17] [Rank 0] step:9781/10000 train_time:396925ms step_avg:40.58ms +[2025-09-11 11:38:17] [Rank 0] step:9781/10000 train_time:396925ms step_avg:40.58ms +[2025-09-11 11:38:18] [Rank 0] step:9801/10000 train_time:397642ms step_avg:40.57ms +[2025-09-11 11:38:18] [Rank 0] step:9801/10000 train_time:397642ms step_avg:40.57ms +[2025-09-11 11:38:19] [Rank 0] step:9821/10000 train_time:398356ms step_avg:40.56ms +[2025-09-11 11:38:19] [Rank 0] step:9821/10000 train_time:398356ms step_avg:40.56ms +[2025-09-11 11:38:20] [Rank 0] step:9841/10000 train_time:399073ms step_avg:40.55ms +[2025-09-11 11:38:20] [Rank 0] step:9841/10000 train_time:399073ms step_avg:40.55ms +[2025-09-11 11:38:20] [Rank 0] step:9861/10000 train_time:399786ms step_avg:40.54ms +[2025-09-11 11:38:20] [Rank 0] step:9861/10000 train_time:399786ms step_avg:40.54ms +[2025-09-11 11:38:21] [Rank 0] step:9881/10000 train_time:400501ms step_avg:40.53ms +[2025-09-11 11:38:21] [Rank 0] step:9881/10000 train_time:400501ms step_avg:40.53ms +[2025-09-11 11:38:22] [Rank 0] step:9901/10000 train_time:401211ms step_avg:40.52ms +[2025-09-11 11:38:22] [Rank 0] step:9901/10000 train_time:401211ms step_avg:40.52ms +[2025-09-11 11:38:22] [Rank 0] step:9921/10000 train_time:401923ms step_avg:40.51ms +[2025-09-11 11:38:22] [Rank 0] step:9921/10000 train_time:401923ms step_avg:40.51ms +[2025-09-11 11:38:23] [Rank 0] step:9941/10000 train_time:402640ms step_avg:40.50ms +[2025-09-11 11:38:23] [Rank 0] step:9941/10000 train_time:402640ms step_avg:40.50ms +[2025-09-11 11:38:24] [Rank 0] step:9961/10000 train_time:403357ms step_avg:40.49ms +[2025-09-11 11:38:24] [Rank 0] step:9961/10000 train_time:403357ms step_avg:40.49ms +[2025-09-11 11:38:25] [Rank 0] step:9981/10000 train_time:404071ms step_avg:40.48ms +[2025-09-11 11:38:25] [Rank 0] step:9981/10000 train_time:404071ms step_avg:40.48ms +[2025-09-11 11:38:25] [Rank 0] step:10000/10000 train_time:404756ms step_avg:40.48ms +[2025-09-11 11:38:25] [Rank 0] step:10000/10000 train_time:404756ms step_avg:40.48ms +[2025-09-11 11:38:25] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:38:25] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:35] [Rank 0] PRINT: step:10000/10000 val_loss:4.8231 total_sharp:1.7038e-04 L1_sharp:8.6601e-05 L2_sharp:4.1287e-05 L3_sharp:1.6632e-05 L4_sharp:1.3571e-05 L5_sharp:3.9222e-05 L6_sharp:3.2313e-05 L7_sharp:2.5535e-05 L8_sharp:5.1027e-05 L9_sharp:5.1085e-05 L10_sharp:5.0544e-05 L11_sharp:7.2190e-05 L12_sharp:2.6628e-04 total_fnorm:8.6719e-01 total_l1_linf:7.1200e+02 total_spectral:4.2773e-01 L1_fnorm:2.6562e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.3926e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.3633e-01 L9_fnorm:2.3926e-01 L10_fnorm:2.3828e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4121e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.6367e-02 L3_l1linf:2.6245e-02 L4_l1linf:2.7588e-02 L5_l1linf:2.7344e-02 L6_l1linf:2.4780e-02 L7_l1linf:2.3682e-02 L8_l1linf:2.5146e-02 L9_l1linf:2.1973e-02 L10_l1linf:2.2949e-02 L11_l1linf:2.2705e-02 L12_l1linf:3.0518e-02 L1_spectral:4.0499e-03 L2_spectral:3.8485e-03 L3_spectral:3.7704e-03 L4_spectral:3.8529e-03 L5_spectral:4.1234e-03 L6_spectral:3.8176e-03 L7_spectral:3.9317e-03 L8_spectral:4.0669e-03 L9_spectral:3.9534e-03 L10_spectral:3.8754e-03 L11_spectral:3.9853e-03 L12_spectral:3.9735e-03 train_time:404775ms step_avg:40.48ms +[2025-09-11 11:38:35] [Rank 0] PRINT: step:10000/10000 val_loss:4.8231 total_sharp:1.7038e-04 L1_sharp:8.6601e-05 L2_sharp:4.1287e-05 L3_sharp:1.6632e-05 L4_sharp:1.3571e-05 L5_sharp:3.9222e-05 L6_sharp:3.2313e-05 L7_sharp:2.5535e-05 L8_sharp:5.1027e-05 L9_sharp:5.1085e-05 L10_sharp:5.0544e-05 L11_sharp:7.2190e-05 L12_sharp:2.6628e-04 total_fnorm:8.6719e-01 total_l1_linf:7.1200e+02 total_spectral:4.2773e-01 L1_fnorm:2.6562e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.3926e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.3633e-01 L9_fnorm:2.3926e-01 L10_fnorm:2.3828e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4121e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.6367e-02 L3_l1linf:2.6245e-02 L4_l1linf:2.7588e-02 L5_l1linf:2.7344e-02 L6_l1linf:2.4780e-02 L7_l1linf:2.3682e-02 L8_l1linf:2.5146e-02 L9_l1linf:2.1973e-02 L10_l1linf:2.2949e-02 L11_l1linf:2.2705e-02 L12_l1linf:3.0518e-02 L1_spectral:4.0499e-03 L2_spectral:3.8485e-03 L3_spectral:3.7704e-03 L4_spectral:3.8529e-03 L5_spectral:4.1234e-03 L6_spectral:3.8176e-03 L7_spectral:3.9317e-03 L8_spectral:4.0669e-03 L9_spectral:3.9534e-03 L10_spectral:3.8754e-03 L11_spectral:3.9853e-03 L12_spectral:3.9735e-03 train_time:404775ms step_avg:40.48ms +[2025-09-11 11:38:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:38:35 2025 --- +[2025-09-11 11:38:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:38:35 2025 --- +[2025-09-11 11:38:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:38:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..775c11adec54ae7c7efd3c6e55b7a17ddb9f5d55 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "888b304f-20a3-43b8-90a0-b0a38d25e473", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/training_log_888b304f-20a3-43b8-90a0-b0a38d25e473.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/training_log_888b304f-20a3-43b8-90a0-b0a38d25e473.txt new file mode 100644 index 0000000000000000000000000000000000000000..25c1bb260d9d62094cb71c4a5756f9835efc6ca5 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44/training_log_888b304f-20a3-43b8-90a0-b0a38d25e473.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:11:53] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:11:53 2025 --- +[2025-09-11 11:11:53] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:11:53 2025 --- +[2025-09-11 11:11:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:11:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:11:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:11:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:11:53] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:11:53] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 11:11:53] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44 +[2025-09-11 11:11:53] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.1_seed_44 +[2025-09-11 11:11:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:11:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:11:53] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:11:53] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:11:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:11:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:11:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:11:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:11:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:11:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:11:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:11:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:11:54] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:11:54] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:11:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:11:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:11:57] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:11:57] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:11:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:11:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:12:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:12:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:12:03] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:12:03] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:12:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:12:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:12:40] [Rank 0] PRINT: Starting training... +[2025-09-11 11:12:40] [Rank 0] PRINT: Starting training... +[2025-09-11 11:12:41] [Rank 0] step:21/10000 train_time:1214ms step_avg:57.83ms +[2025-09-11 11:12:41] [Rank 0] step:21/10000 train_time:1214ms step_avg:57.83ms +[2025-09-11 11:12:42] [Rank 0] step:41/10000 train_time:1944ms step_avg:47.42ms +[2025-09-11 11:12:42] [Rank 0] step:41/10000 train_time:1944ms step_avg:47.42ms +[2025-09-11 11:12:43] [Rank 0] step:61/10000 train_time:2673ms step_avg:43.82ms +[2025-09-11 11:12:43] [Rank 0] step:61/10000 train_time:2673ms step_avg:43.82ms +[2025-09-11 11:12:43] [Rank 0] step:81/10000 train_time:3402ms step_avg:42.00ms +[2025-09-11 11:12:43] [Rank 0] step:81/10000 train_time:3402ms step_avg:42.00ms +[2025-09-11 11:12:44] [Rank 0] step:101/10000 train_time:4130ms step_avg:40.89ms +[2025-09-11 11:12:44] [Rank 0] step:101/10000 train_time:4130ms step_avg:40.89ms +[2025-09-11 11:12:45] [Rank 0] step:121/10000 train_time:4859ms step_avg:40.16ms +[2025-09-11 11:12:45] [Rank 0] step:121/10000 train_time:4859ms step_avg:40.16ms +[2025-09-11 11:12:46] [Rank 0] step:141/10000 train_time:5588ms step_avg:39.63ms +[2025-09-11 11:12:46] [Rank 0] step:141/10000 train_time:5588ms step_avg:39.63ms +[2025-09-11 11:12:46] [Rank 0] step:161/10000 train_time:6316ms step_avg:39.23ms +[2025-09-11 11:12:46] [Rank 0] step:161/10000 train_time:6316ms step_avg:39.23ms +[2025-09-11 11:12:47] [Rank 0] step:181/10000 train_time:7045ms step_avg:38.92ms +[2025-09-11 11:12:47] [Rank 0] step:181/10000 train_time:7045ms step_avg:38.92ms +[2025-09-11 11:12:48] [Rank 0] step:201/10000 train_time:7775ms step_avg:38.68ms +[2025-09-11 11:12:48] [Rank 0] step:201/10000 train_time:7775ms step_avg:38.68ms +[2025-09-11 11:12:49] [Rank 0] step:221/10000 train_time:8503ms step_avg:38.48ms +[2025-09-11 11:12:49] [Rank 0] step:221/10000 train_time:8503ms step_avg:38.48ms +[2025-09-11 11:12:49] [Rank 0] step:241/10000 train_time:9240ms step_avg:38.34ms +[2025-09-11 11:12:49] [Rank 0] step:241/10000 train_time:9240ms step_avg:38.34ms +[2025-09-11 11:12:50] [Rank 0] step:261/10000 train_time:9969ms step_avg:38.19ms +[2025-09-11 11:12:50] [Rank 0] step:261/10000 train_time:9969ms step_avg:38.19ms +[2025-09-11 11:12:51] [Rank 0] step:281/10000 train_time:10697ms step_avg:38.07ms +[2025-09-11 11:12:51] [Rank 0] step:281/10000 train_time:10697ms step_avg:38.07ms +[2025-09-11 11:12:51] [Rank 0] step:301/10000 train_time:11426ms step_avg:37.96ms +[2025-09-11 11:12:51] [Rank 0] step:301/10000 train_time:11426ms step_avg:37.96ms +[2025-09-11 11:12:52] [Rank 0] step:321/10000 train_time:12155ms step_avg:37.86ms +[2025-09-11 11:12:52] [Rank 0] step:321/10000 train_time:12155ms step_avg:37.86ms +[2025-09-11 11:12:53] [Rank 0] step:341/10000 train_time:12883ms step_avg:37.78ms +[2025-09-11 11:12:53] [Rank 0] step:341/10000 train_time:12883ms step_avg:37.78ms +[2025-09-11 11:12:54] [Rank 0] step:361/10000 train_time:13611ms step_avg:37.70ms +[2025-09-11 11:12:54] [Rank 0] step:361/10000 train_time:13611ms step_avg:37.70ms +[2025-09-11 11:12:54] [Rank 0] step:381/10000 train_time:14341ms step_avg:37.64ms +[2025-09-11 11:12:54] [Rank 0] step:381/10000 train_time:14341ms step_avg:37.64ms +[2025-09-11 11:12:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:12:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:45] [Rank 0] PRINT: step:400/10000 val_loss:6.7620 total_sharp:2.6015e-03 L1_sharp:1.3451e-03 L2_sharp:1.0832e-03 L3_sharp:8.9224e-04 L4_sharp:4.3942e-04 L5_sharp:2.9588e-04 L6_sharp:1.1530e-04 L7_sharp:7.6071e-05 L8_sharp:6.8622e-05 L9_sharp:4.8790e-05 L10_sharp:3.5413e-06 L11_sharp:2.3612e-05 L12_sharp:1.1744e-04 total_fnorm:3.3760e+01 total_l1_linf:1.1806e+05 total_spectral:1.6880e+01 L1_fnorm:1.2305e+01 L2_fnorm:1.1680e+01 L3_fnorm:1.0856e+01 L4_fnorm:1.0220e+01 L5_fnorm:9.5586e+00 L6_fnorm:8.9761e+00 L7_fnorm:8.6884e+00 L8_fnorm:8.1639e+00 L9_fnorm:8.2065e+00 L10_fnorm:7.7749e+00 L11_fnorm:7.4055e+00 L12_fnorm:7.1480e+00 L1_l1linf:3.7834e+00 L2_l1linf:3.6746e+00 L3_l1linf:3.3330e+00 L4_l1linf:3.1226e+00 L5_l1linf:2.9213e+00 L6_l1linf:2.7498e+00 L7_l1linf:2.6108e+00 L8_l1linf:2.4829e+00 L9_l1linf:2.5594e+00 L10_l1linf:2.3333e+00 L11_l1linf:2.2125e+00 L12_l1linf:2.2930e+00 L1_spectral:1.2177e-01 L2_spectral:1.2128e-01 L3_spectral:1.2117e-01 L4_spectral:1.2125e-01 L5_spectral:1.2102e-01 L6_spectral:1.2110e-01 L7_spectral:1.2061e-01 L8_spectral:1.2054e-01 L9_spectral:1.2043e-01 L10_spectral:1.2047e-01 L11_spectral:1.2024e-01 L12_spectral:1.2025e-01 train_time:15048ms step_avg:37.62ms +[2025-09-11 11:13:45] [Rank 0] PRINT: step:400/10000 val_loss:6.7620 total_sharp:2.6015e-03 L1_sharp:1.3451e-03 L2_sharp:1.0832e-03 L3_sharp:8.9224e-04 L4_sharp:4.3942e-04 L5_sharp:2.9588e-04 L6_sharp:1.1530e-04 L7_sharp:7.6071e-05 L8_sharp:6.8622e-05 L9_sharp:4.8790e-05 L10_sharp:3.5413e-06 L11_sharp:2.3612e-05 L12_sharp:1.1744e-04 total_fnorm:3.3760e+01 total_l1_linf:1.1806e+05 total_spectral:1.6880e+01 L1_fnorm:1.2305e+01 L2_fnorm:1.1680e+01 L3_fnorm:1.0856e+01 L4_fnorm:1.0220e+01 L5_fnorm:9.5586e+00 L6_fnorm:8.9761e+00 L7_fnorm:8.6884e+00 L8_fnorm:8.1639e+00 L9_fnorm:8.2065e+00 L10_fnorm:7.7749e+00 L11_fnorm:7.4055e+00 L12_fnorm:7.1480e+00 L1_l1linf:3.7834e+00 L2_l1linf:3.6746e+00 L3_l1linf:3.3330e+00 L4_l1linf:3.1226e+00 L5_l1linf:2.9213e+00 L6_l1linf:2.7498e+00 L7_l1linf:2.6108e+00 L8_l1linf:2.4829e+00 L9_l1linf:2.5594e+00 L10_l1linf:2.3333e+00 L11_l1linf:2.2125e+00 L12_l1linf:2.2930e+00 L1_spectral:1.2177e-01 L2_spectral:1.2128e-01 L3_spectral:1.2117e-01 L4_spectral:1.2125e-01 L5_spectral:1.2102e-01 L6_spectral:1.2110e-01 L7_spectral:1.2061e-01 L8_spectral:1.2054e-01 L9_spectral:1.2043e-01 L10_spectral:1.2047e-01 L11_spectral:1.2024e-01 L12_spectral:1.2025e-01 train_time:15048ms step_avg:37.62ms +[2025-09-11 11:14:16] [Rank 0] step:401/10000 train_time:46439ms step_avg:115.81ms +[2025-09-11 11:14:16] [Rank 0] step:401/10000 train_time:46439ms step_avg:115.81ms +[2025-09-11 11:14:19] [Rank 0] step:421/10000 train_time:49172ms step_avg:116.80ms +[2025-09-11 11:14:19] [Rank 0] step:421/10000 train_time:49172ms step_avg:116.80ms +[2025-09-11 11:14:20] [Rank 0] step:441/10000 train_time:49814ms step_avg:112.96ms +[2025-09-11 11:14:20] [Rank 0] step:441/10000 train_time:49814ms step_avg:112.96ms +[2025-09-11 11:14:20] [Rank 0] step:461/10000 train_time:50454ms step_avg:109.44ms +[2025-09-11 11:14:20] [Rank 0] step:461/10000 train_time:50454ms step_avg:109.44ms +[2025-09-11 11:14:21] [Rank 0] step:481/10000 train_time:51094ms step_avg:106.22ms +[2025-09-11 11:14:21] [Rank 0] step:481/10000 train_time:51094ms step_avg:106.22ms +[2025-09-11 11:14:22] [Rank 0] step:501/10000 train_time:51734ms step_avg:103.26ms +[2025-09-11 11:14:22] [Rank 0] step:501/10000 train_time:51734ms step_avg:103.26ms +[2025-09-11 11:14:22] [Rank 0] step:521/10000 train_time:52376ms step_avg:100.53ms +[2025-09-11 11:14:22] [Rank 0] step:521/10000 train_time:52376ms step_avg:100.53ms +[2025-09-11 11:14:23] [Rank 0] step:541/10000 train_time:53015ms step_avg:97.99ms +[2025-09-11 11:14:23] [Rank 0] step:541/10000 train_time:53015ms step_avg:97.99ms +[2025-09-11 11:14:24] [Rank 0] step:561/10000 train_time:53655ms step_avg:95.64ms +[2025-09-11 11:14:24] [Rank 0] step:561/10000 train_time:53655ms step_avg:95.64ms +[2025-09-11 11:14:24] [Rank 0] step:581/10000 train_time:54294ms step_avg:93.45ms +[2025-09-11 11:14:24] [Rank 0] step:581/10000 train_time:54294ms step_avg:93.45ms +[2025-09-11 11:14:25] [Rank 0] step:601/10000 train_time:54933ms step_avg:91.40ms +[2025-09-11 11:14:25] [Rank 0] step:601/10000 train_time:54933ms step_avg:91.40ms +[2025-09-11 11:14:26] [Rank 0] step:621/10000 train_time:55573ms step_avg:89.49ms +[2025-09-11 11:14:26] [Rank 0] step:621/10000 train_time:55573ms step_avg:89.49ms +[2025-09-11 11:14:26] [Rank 0] step:641/10000 train_time:56213ms step_avg:87.70ms +[2025-09-11 11:14:26] [Rank 0] step:641/10000 train_time:56213ms step_avg:87.70ms +[2025-09-11 11:14:27] [Rank 0] step:661/10000 train_time:56852ms step_avg:86.01ms +[2025-09-11 11:14:27] [Rank 0] step:661/10000 train_time:56852ms step_avg:86.01ms +[2025-09-11 11:14:28] [Rank 0] step:681/10000 train_time:57491ms step_avg:84.42ms +[2025-09-11 11:14:28] [Rank 0] step:681/10000 train_time:57491ms step_avg:84.42ms +[2025-09-11 11:14:28] [Rank 0] step:701/10000 train_time:58129ms step_avg:82.92ms +[2025-09-11 11:14:28] [Rank 0] step:701/10000 train_time:58129ms step_avg:82.92ms +[2025-09-11 11:14:29] [Rank 0] step:721/10000 train_time:58768ms step_avg:81.51ms +[2025-09-11 11:14:29] [Rank 0] step:721/10000 train_time:58768ms step_avg:81.51ms +[2025-09-11 11:14:29] [Rank 0] step:741/10000 train_time:59407ms step_avg:80.17ms +[2025-09-11 11:14:29] [Rank 0] step:741/10000 train_time:59407ms step_avg:80.17ms +[2025-09-11 11:14:30] [Rank 0] step:761/10000 train_time:60050ms step_avg:78.91ms +[2025-09-11 11:14:30] [Rank 0] step:761/10000 train_time:60050ms step_avg:78.91ms +[2025-09-11 11:14:31] [Rank 0] step:781/10000 train_time:60695ms step_avg:77.71ms +[2025-09-11 11:14:31] [Rank 0] step:781/10000 train_time:60695ms step_avg:77.71ms +[2025-09-11 11:14:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:14:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:15:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:15:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:15:16] [Rank 0] PRINT: step:800/10000 val_loss:6.3265 total_sharp:1.8616e-03 L1_sharp:2.2568e-03 L2_sharp:4.9989e-04 L3_sharp:3.5395e-04 L4_sharp:2.3926e-04 L5_sharp:1.3928e-04 L6_sharp:8.0345e-05 L7_sharp:5.9285e-05 L8_sharp:1.1461e-04 L9_sharp:7.7128e-05 L10_sharp:1.5099e-04 L11_sharp:1.0608e-04 L12_sharp:6.1960e-04 total_fnorm:3.7000e+01 total_l1_linf:1.1110e+05 total_spectral:1.8250e+01 L1_fnorm:1.2562e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1562e+01 L4_fnorm:1.1250e+01 L5_fnorm:1.0688e+01 L6_fnorm:1.0500e+01 L7_fnorm:1.0312e+01 L8_fnorm:9.6875e+00 L9_fnorm:9.6875e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.9375e+00 L12_fnorm:7.6562e+00 L1_l1linf:3.7344e+00 L2_l1linf:3.6875e+00 L3_l1linf:3.3906e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.3438e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.0781e+00 L8_l1linf:2.9219e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.2812e+00 L11_l1linf:2.2656e+00 L12_l1linf:2.2031e+00 L1_spectral:1.3337e-01 L2_spectral:1.3117e-01 L3_spectral:1.3165e-01 L4_spectral:1.3078e-01 L5_spectral:1.3024e-01 L6_spectral:1.3056e-01 L7_spectral:1.3053e-01 L8_spectral:1.3105e-01 L9_spectral:1.3010e-01 L10_spectral:1.2876e-01 L11_spectral:1.2991e-01 L12_spectral:1.2898e-01 train_time:61322ms step_avg:76.65ms +[2025-09-11 11:15:16] [Rank 0] PRINT: step:800/10000 val_loss:6.3265 total_sharp:1.8616e-03 L1_sharp:2.2568e-03 L2_sharp:4.9989e-04 L3_sharp:3.5395e-04 L4_sharp:2.3926e-04 L5_sharp:1.3928e-04 L6_sharp:8.0345e-05 L7_sharp:5.9285e-05 L8_sharp:1.1461e-04 L9_sharp:7.7128e-05 L10_sharp:1.5099e-04 L11_sharp:1.0608e-04 L12_sharp:6.1960e-04 total_fnorm:3.7000e+01 total_l1_linf:1.1110e+05 total_spectral:1.8250e+01 L1_fnorm:1.2562e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1562e+01 L4_fnorm:1.1250e+01 L5_fnorm:1.0688e+01 L6_fnorm:1.0500e+01 L7_fnorm:1.0312e+01 L8_fnorm:9.6875e+00 L9_fnorm:9.6875e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.9375e+00 L12_fnorm:7.6562e+00 L1_l1linf:3.7344e+00 L2_l1linf:3.6875e+00 L3_l1linf:3.3906e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.3438e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.0781e+00 L8_l1linf:2.9219e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.2812e+00 L11_l1linf:2.2656e+00 L12_l1linf:2.2031e+00 L1_spectral:1.3337e-01 L2_spectral:1.3117e-01 L3_spectral:1.3165e-01 L4_spectral:1.3078e-01 L5_spectral:1.3024e-01 L6_spectral:1.3056e-01 L7_spectral:1.3053e-01 L8_spectral:1.3105e-01 L9_spectral:1.3010e-01 L10_spectral:1.2876e-01 L11_spectral:1.2991e-01 L12_spectral:1.2898e-01 train_time:61322ms step_avg:76.65ms +[2025-09-11 11:15:17] [Rank 0] step:801/10000 train_time:63092ms step_avg:78.77ms +[2025-09-11 11:15:17] [Rank 0] step:801/10000 train_time:63092ms step_avg:78.77ms +[2025-09-11 11:15:18] [Rank 0] step:821/10000 train_time:63740ms step_avg:77.64ms +[2025-09-11 11:15:18] [Rank 0] step:821/10000 train_time:63740ms step_avg:77.64ms +[2025-09-11 11:15:19] [Rank 0] step:841/10000 train_time:64384ms step_avg:76.56ms +[2025-09-11 11:15:19] [Rank 0] step:841/10000 train_time:64384ms step_avg:76.56ms +[2025-09-11 11:15:19] [Rank 0] step:861/10000 train_time:65029ms step_avg:75.53ms +[2025-09-11 11:15:19] [Rank 0] step:861/10000 train_time:65029ms step_avg:75.53ms +[2025-09-11 11:15:20] [Rank 0] step:881/10000 train_time:65673ms step_avg:74.54ms +[2025-09-11 11:15:20] [Rank 0] step:881/10000 train_time:65673ms step_avg:74.54ms +[2025-09-11 11:15:21] [Rank 0] step:901/10000 train_time:66317ms step_avg:73.60ms +[2025-09-11 11:15:21] [Rank 0] step:901/10000 train_time:66317ms step_avg:73.60ms +[2025-09-11 11:15:21] [Rank 0] step:921/10000 train_time:66960ms step_avg:72.70ms +[2025-09-11 11:15:21] [Rank 0] step:921/10000 train_time:66960ms step_avg:72.70ms +[2025-09-11 11:15:22] [Rank 0] step:941/10000 train_time:67604ms step_avg:71.84ms +[2025-09-11 11:15:22] [Rank 0] step:941/10000 train_time:67604ms step_avg:71.84ms +[2025-09-11 11:15:23] [Rank 0] step:961/10000 train_time:68247ms step_avg:71.02ms +[2025-09-11 11:15:23] [Rank 0] step:961/10000 train_time:68247ms step_avg:71.02ms +[2025-09-11 11:15:23] [Rank 0] step:981/10000 train_time:68891ms step_avg:70.23ms +[2025-09-11 11:15:23] [Rank 0] step:981/10000 train_time:68891ms step_avg:70.23ms +[2025-09-11 11:15:24] [Rank 0] step:1001/10000 train_time:69534ms step_avg:69.46ms +[2025-09-11 11:15:24] [Rank 0] step:1001/10000 train_time:69534ms step_avg:69.46ms +[2025-09-11 11:15:25] [Rank 0] step:1021/10000 train_time:70177ms step_avg:68.73ms +[2025-09-11 11:15:25] [Rank 0] step:1021/10000 train_time:70177ms step_avg:68.73ms +[2025-09-11 11:15:25] [Rank 0] step:1041/10000 train_time:70820ms step_avg:68.03ms +[2025-09-11 11:15:25] [Rank 0] step:1041/10000 train_time:70820ms step_avg:68.03ms +[2025-09-11 11:15:26] [Rank 0] step:1061/10000 train_time:71463ms step_avg:67.35ms +[2025-09-11 11:15:26] [Rank 0] step:1061/10000 train_time:71463ms step_avg:67.35ms +[2025-09-11 11:15:27] [Rank 0] step:1081/10000 train_time:72106ms step_avg:66.70ms +[2025-09-11 11:15:27] [Rank 0] step:1081/10000 train_time:72106ms step_avg:66.70ms +[2025-09-11 11:15:27] [Rank 0] step:1101/10000 train_time:72748ms step_avg:66.07ms +[2025-09-11 11:15:27] [Rank 0] step:1101/10000 train_time:72748ms step_avg:66.07ms +[2025-09-11 11:15:28] [Rank 0] step:1121/10000 train_time:73390ms step_avg:65.47ms +[2025-09-11 11:15:28] [Rank 0] step:1121/10000 train_time:73390ms step_avg:65.47ms +[2025-09-11 11:15:28] [Rank 0] step:1141/10000 train_time:74033ms step_avg:64.88ms +[2025-09-11 11:15:28] [Rank 0] step:1141/10000 train_time:74033ms step_avg:64.88ms +[2025-09-11 11:15:29] [Rank 0] step:1161/10000 train_time:74676ms step_avg:64.32ms +[2025-09-11 11:15:29] [Rank 0] step:1161/10000 train_time:74676ms step_avg:64.32ms +[2025-09-11 11:15:30] [Rank 0] step:1181/10000 train_time:75319ms step_avg:63.78ms +[2025-09-11 11:15:30] [Rank 0] step:1181/10000 train_time:75319ms step_avg:63.78ms +[2025-09-11 11:15:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:15:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:15:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:15:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:15:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:15:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:15:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:15:44] [Rank 0] PRINT: step:1200/10000 val_loss:6.0287 total_sharp:9.2400e-04 L1_sharp:4.4000e-04 L2_sharp:2.9915e-04 L3_sharp:2.5554e-04 L4_sharp:1.1342e-04 L5_sharp:1.2505e-04 L6_sharp:5.0073e-05 L7_sharp:4.0128e-05 L8_sharp:6.0876e-05 L9_sharp:4.6323e-05 L10_sharp:9.7201e-05 L11_sharp:9.0805e-05 L12_sharp:4.2571e-04 total_fnorm:3.9750e+01 total_l1_linf:1.1981e+05 total_spectral:2.0000e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1562e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1062e+01 L9_fnorm:1.1062e+01 L10_fnorm:1.0625e+01 L11_fnorm:1.0500e+01 L12_fnorm:9.5625e+00 L1_l1linf:3.5469e+00 L2_l1linf:3.6094e+00 L3_l1linf:3.4375e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.4062e+00 L6_l1linf:3.2969e+00 L7_l1linf:3.3281e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.2344e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.1875e+00 L1_spectral:1.3996e-01 L2_spectral:1.3758e-01 L3_spectral:1.3702e-01 L4_spectral:1.3596e-01 L5_spectral:1.3641e-01 L6_spectral:1.3519e-01 L7_spectral:1.3437e-01 L8_spectral:1.3477e-01 L9_spectral:1.3527e-01 L10_spectral:1.3485e-01 L11_spectral:1.3480e-01 L12_spectral:1.3505e-01 train_time:75944ms step_avg:63.29ms +[2025-09-11 11:15:44] [Rank 0] PRINT: step:1200/10000 val_loss:6.0287 total_sharp:9.2400e-04 L1_sharp:4.4000e-04 L2_sharp:2.9915e-04 L3_sharp:2.5554e-04 L4_sharp:1.1342e-04 L5_sharp:1.2505e-04 L6_sharp:5.0073e-05 L7_sharp:4.0128e-05 L8_sharp:6.0876e-05 L9_sharp:4.6323e-05 L10_sharp:9.7201e-05 L11_sharp:9.0805e-05 L12_sharp:4.2571e-04 total_fnorm:3.9750e+01 total_l1_linf:1.1981e+05 total_spectral:2.0000e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1562e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1062e+01 L9_fnorm:1.1062e+01 L10_fnorm:1.0625e+01 L11_fnorm:1.0500e+01 L12_fnorm:9.5625e+00 L1_l1linf:3.5469e+00 L2_l1linf:3.6094e+00 L3_l1linf:3.4375e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.4062e+00 L6_l1linf:3.2969e+00 L7_l1linf:3.3281e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.2344e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.1875e+00 L1_spectral:1.3996e-01 L2_spectral:1.3758e-01 L3_spectral:1.3702e-01 L4_spectral:1.3596e-01 L5_spectral:1.3641e-01 L6_spectral:1.3519e-01 L7_spectral:1.3437e-01 L8_spectral:1.3477e-01 L9_spectral:1.3527e-01 L10_spectral:1.3485e-01 L11_spectral:1.3480e-01 L12_spectral:1.3505e-01 train_time:75944ms step_avg:63.29ms +[2025-09-11 11:15:46] [Rank 0] step:1201/10000 train_time:77615ms step_avg:64.63ms +[2025-09-11 11:15:46] [Rank 0] step:1201/10000 train_time:77615ms step_avg:64.63ms +[2025-09-11 11:15:47] [Rank 0] step:1221/10000 train_time:78532ms step_avg:64.32ms +[2025-09-11 11:15:47] [Rank 0] step:1221/10000 train_time:78532ms step_avg:64.32ms +[2025-09-11 11:15:47] [Rank 0] step:1241/10000 train_time:79178ms step_avg:63.80ms +[2025-09-11 11:15:47] [Rank 0] step:1241/10000 train_time:79178ms step_avg:63.80ms +[2025-09-11 11:15:48] [Rank 0] step:1261/10000 train_time:79957ms step_avg:63.41ms +[2025-09-11 11:15:48] [Rank 0] step:1261/10000 train_time:79957ms step_avg:63.41ms +[2025-09-11 11:15:49] [Rank 0] step:1281/10000 train_time:80726ms step_avg:63.02ms +[2025-09-11 11:15:49] [Rank 0] step:1281/10000 train_time:80726ms step_avg:63.02ms +[2025-09-11 11:15:49] [Rank 0] step:1301/10000 train_time:81369ms step_avg:62.54ms +[2025-09-11 11:15:49] [Rank 0] step:1301/10000 train_time:81369ms step_avg:62.54ms +[2025-09-11 11:15:50] [Rank 0] step:1321/10000 train_time:82013ms step_avg:62.08ms +[2025-09-11 11:15:50] [Rank 0] step:1321/10000 train_time:82013ms step_avg:62.08ms +[2025-09-11 11:15:51] [Rank 0] step:1341/10000 train_time:82656ms step_avg:61.64ms +[2025-09-11 11:15:51] [Rank 0] step:1341/10000 train_time:82656ms step_avg:61.64ms +[2025-09-11 11:15:51] [Rank 0] step:1361/10000 train_time:83299ms step_avg:61.20ms +[2025-09-11 11:15:51] [Rank 0] step:1361/10000 train_time:83299ms step_avg:61.20ms +[2025-09-11 11:15:52] [Rank 0] step:1381/10000 train_time:83942ms step_avg:60.78ms +[2025-09-11 11:15:52] [Rank 0] step:1381/10000 train_time:83942ms step_avg:60.78ms +[2025-09-11 11:15:53] [Rank 0] step:1401/10000 train_time:84585ms step_avg:60.37ms +[2025-09-11 11:15:53] [Rank 0] step:1401/10000 train_time:84585ms step_avg:60.37ms +[2025-09-11 11:15:53] [Rank 0] step:1421/10000 train_time:85228ms step_avg:59.98ms +[2025-09-11 11:15:53] [Rank 0] step:1421/10000 train_time:85228ms step_avg:59.98ms +[2025-09-11 11:15:54] [Rank 0] step:1441/10000 train_time:85871ms step_avg:59.59ms +[2025-09-11 11:15:54] [Rank 0] step:1441/10000 train_time:85871ms step_avg:59.59ms +[2025-09-11 11:15:55] [Rank 0] step:1461/10000 train_time:86515ms step_avg:59.22ms +[2025-09-11 11:15:55] [Rank 0] step:1461/10000 train_time:86515ms step_avg:59.22ms +[2025-09-11 11:15:55] [Rank 0] step:1481/10000 train_time:87158ms step_avg:58.85ms +[2025-09-11 11:15:55] [Rank 0] step:1481/10000 train_time:87158ms step_avg:58.85ms +[2025-09-11 11:15:56] [Rank 0] step:1501/10000 train_time:87806ms step_avg:58.50ms +[2025-09-11 11:15:56] [Rank 0] step:1501/10000 train_time:87806ms step_avg:58.50ms +[2025-09-11 11:15:57] [Rank 0] step:1521/10000 train_time:88453ms step_avg:58.15ms +[2025-09-11 11:15:57] [Rank 0] step:1521/10000 train_time:88453ms step_avg:58.15ms +[2025-09-11 11:15:57] [Rank 0] step:1541/10000 train_time:89100ms step_avg:57.82ms +[2025-09-11 11:15:57] [Rank 0] step:1541/10000 train_time:89100ms step_avg:57.82ms +[2025-09-11 11:15:58] [Rank 0] step:1561/10000 train_time:89747ms step_avg:57.49ms +[2025-09-11 11:15:58] [Rank 0] step:1561/10000 train_time:89747ms step_avg:57.49ms +[2025-09-11 11:15:58] [Rank 0] step:1581/10000 train_time:90394ms step_avg:57.18ms +[2025-09-11 11:15:58] [Rank 0] step:1581/10000 train_time:90394ms step_avg:57.18ms +[2025-09-11 11:15:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:15:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:16:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:16:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:09] [Rank 0] PRINT: step:1600/10000 val_loss:5.8391 total_sharp:6.7968e-04 L1_sharp:2.9826e-04 L2_sharp:2.3418e-04 L3_sharp:9.8512e-05 L4_sharp:8.0390e-05 L5_sharp:8.8819e-05 L6_sharp:2.7695e-05 L7_sharp:2.4178e-05 L8_sharp:6.2037e-05 L9_sharp:4.6890e-05 L10_sharp:7.1254e-05 L11_sharp:8.1539e-05 L12_sharp:6.1691e-04 total_fnorm:4.1250e+01 total_l1_linf:1.1981e+05 total_spectral:2.0500e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.1812e+01 L7_fnorm:1.1875e+01 L8_fnorm:1.1438e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1375e+01 L11_fnorm:1.1188e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.4844e+00 L2_l1linf:3.5312e+00 L3_l1linf:3.3438e+00 L4_l1linf:3.2969e+00 L5_l1linf:3.2500e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.1562e+00 L9_l1linf:3.2188e+00 L10_l1linf:3.1250e+00 L11_l1linf:2.7969e+00 L12_l1linf:2.1719e+00 L1_spectral:1.4444e-01 L2_spectral:1.4202e-01 L3_spectral:1.4231e-01 L4_spectral:1.4078e-01 L5_spectral:1.3886e-01 L6_spectral:1.3984e-01 L7_spectral:1.3901e-01 L8_spectral:1.3881e-01 L9_spectral:1.3784e-01 L10_spectral:1.3832e-01 L11_spectral:1.3860e-01 L12_spectral:1.3924e-01 train_time:91024ms step_avg:56.89ms +[2025-09-11 11:16:09] [Rank 0] PRINT: step:1600/10000 val_loss:5.8391 total_sharp:6.7968e-04 L1_sharp:2.9826e-04 L2_sharp:2.3418e-04 L3_sharp:9.8512e-05 L4_sharp:8.0390e-05 L5_sharp:8.8819e-05 L6_sharp:2.7695e-05 L7_sharp:2.4178e-05 L8_sharp:6.2037e-05 L9_sharp:4.6890e-05 L10_sharp:7.1254e-05 L11_sharp:8.1539e-05 L12_sharp:6.1691e-04 total_fnorm:4.1250e+01 total_l1_linf:1.1981e+05 total_spectral:2.0500e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.1688e+01 L6_fnorm:1.1812e+01 L7_fnorm:1.1875e+01 L8_fnorm:1.1438e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1375e+01 L11_fnorm:1.1188e+01 L12_fnorm:1.0125e+01 L1_l1linf:3.4844e+00 L2_l1linf:3.5312e+00 L3_l1linf:3.3438e+00 L4_l1linf:3.2969e+00 L5_l1linf:3.2500e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.1562e+00 L9_l1linf:3.2188e+00 L10_l1linf:3.1250e+00 L11_l1linf:2.7969e+00 L12_l1linf:2.1719e+00 L1_spectral:1.4444e-01 L2_spectral:1.4202e-01 L3_spectral:1.4231e-01 L4_spectral:1.4078e-01 L5_spectral:1.3886e-01 L6_spectral:1.3984e-01 L7_spectral:1.3901e-01 L8_spectral:1.3881e-01 L9_spectral:1.3784e-01 L10_spectral:1.3832e-01 L11_spectral:1.3860e-01 L12_spectral:1.3924e-01 train_time:91024ms step_avg:56.89ms +[2025-09-11 11:16:11] [Rank 0] step:1601/10000 train_time:92741ms step_avg:57.93ms +[2025-09-11 11:16:11] [Rank 0] step:1601/10000 train_time:92741ms step_avg:57.93ms +[2025-09-11 11:16:12] [Rank 0] step:1621/10000 train_time:93394ms step_avg:57.62ms +[2025-09-11 11:16:12] [Rank 0] step:1621/10000 train_time:93394ms step_avg:57.62ms +[2025-09-11 11:16:12] [Rank 0] step:1641/10000 train_time:94044ms step_avg:57.31ms +[2025-09-11 11:16:12] [Rank 0] step:1641/10000 train_time:94044ms step_avg:57.31ms +[2025-09-11 11:16:13] [Rank 0] step:1661/10000 train_time:94692ms step_avg:57.01ms +[2025-09-11 11:16:13] [Rank 0] step:1661/10000 train_time:94692ms step_avg:57.01ms +[2025-09-11 11:16:14] [Rank 0] step:1681/10000 train_time:95339ms step_avg:56.72ms +[2025-09-11 11:16:14] [Rank 0] step:1681/10000 train_time:95339ms step_avg:56.72ms +[2025-09-11 11:16:14] [Rank 0] step:1701/10000 train_time:95988ms step_avg:56.43ms +[2025-09-11 11:16:14] [Rank 0] step:1701/10000 train_time:95988ms step_avg:56.43ms +[2025-09-11 11:16:15] [Rank 0] step:1721/10000 train_time:96636ms step_avg:56.15ms +[2025-09-11 11:16:15] [Rank 0] step:1721/10000 train_time:96636ms step_avg:56.15ms +[2025-09-11 11:16:16] [Rank 0] step:1741/10000 train_time:97284ms step_avg:55.88ms +[2025-09-11 11:16:16] [Rank 0] step:1741/10000 train_time:97284ms step_avg:55.88ms +[2025-09-11 11:16:16] [Rank 0] step:1761/10000 train_time:97931ms step_avg:55.61ms +[2025-09-11 11:16:16] [Rank 0] step:1761/10000 train_time:97931ms step_avg:55.61ms +[2025-09-11 11:16:17] [Rank 0] step:1781/10000 train_time:98578ms step_avg:55.35ms +[2025-09-11 11:16:17] [Rank 0] step:1781/10000 train_time:98578ms step_avg:55.35ms +[2025-09-11 11:16:18] [Rank 0] step:1801/10000 train_time:99226ms step_avg:55.09ms +[2025-09-11 11:16:18] [Rank 0] step:1801/10000 train_time:99226ms step_avg:55.09ms +[2025-09-11 11:16:18] [Rank 0] step:1821/10000 train_time:99874ms step_avg:54.85ms +[2025-09-11 11:16:18] [Rank 0] step:1821/10000 train_time:99874ms step_avg:54.85ms +[2025-09-11 11:16:19] [Rank 0] step:1841/10000 train_time:100521ms step_avg:54.60ms +[2025-09-11 11:16:19] [Rank 0] step:1841/10000 train_time:100521ms step_avg:54.60ms +[2025-09-11 11:16:20] [Rank 0] step:1861/10000 train_time:101169ms step_avg:54.36ms +[2025-09-11 11:16:20] [Rank 0] step:1861/10000 train_time:101169ms step_avg:54.36ms +[2025-09-11 11:16:20] [Rank 0] step:1881/10000 train_time:101816ms step_avg:54.13ms +[2025-09-11 11:16:20] [Rank 0] step:1881/10000 train_time:101816ms step_avg:54.13ms +[2025-09-11 11:16:21] [Rank 0] step:1901/10000 train_time:102464ms step_avg:53.90ms +[2025-09-11 11:16:21] [Rank 0] step:1901/10000 train_time:102464ms step_avg:53.90ms +[2025-09-11 11:16:22] [Rank 0] step:1921/10000 train_time:103111ms step_avg:53.68ms +[2025-09-11 11:16:22] [Rank 0] step:1921/10000 train_time:103111ms step_avg:53.68ms +[2025-09-11 11:16:22] [Rank 0] step:1941/10000 train_time:103758ms step_avg:53.46ms +[2025-09-11 11:16:22] [Rank 0] step:1941/10000 train_time:103758ms step_avg:53.46ms +[2025-09-11 11:16:23] [Rank 0] step:1961/10000 train_time:104406ms step_avg:53.24ms +[2025-09-11 11:16:23] [Rank 0] step:1961/10000 train_time:104406ms step_avg:53.24ms +[2025-09-11 11:16:23] [Rank 0] step:1981/10000 train_time:105054ms step_avg:53.03ms +[2025-09-11 11:16:23] [Rank 0] step:1981/10000 train_time:105054ms step_avg:53.03ms +[2025-09-11 11:16:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:16:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:16:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:34] [Rank 0] PRINT: step:2000/10000 val_loss:5.6803 total_sharp:7.1238e-04 L1_sharp:2.9953e-04 L2_sharp:2.2606e-04 L3_sharp:7.4092e-05 L4_sharp:8.2704e-05 L5_sharp:9.5835e-05 L6_sharp:4.3861e-05 L7_sharp:3.2008e-05 L8_sharp:7.0600e-05 L9_sharp:5.3846e-05 L10_sharp:9.2791e-05 L11_sharp:9.3604e-05 L12_sharp:6.4288e-04 total_fnorm:4.1750e+01 total_l1_linf:1.1930e+05 total_spectral:2.0875e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.1750e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.2062e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1875e+01 L10_fnorm:1.1812e+01 L11_fnorm:1.1562e+01 L12_fnorm:1.0562e+01 L1_l1linf:3.4219e+00 L2_l1linf:3.4375e+00 L3_l1linf:3.2500e+00 L4_l1linf:3.2344e+00 L5_l1linf:3.1875e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0781e+00 L9_l1linf:3.1719e+00 L10_l1linf:3.1719e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.1875e+00 L1_spectral:1.4764e-01 L2_spectral:1.4503e-01 L3_spectral:1.4544e-01 L4_spectral:1.4524e-01 L5_spectral:1.4212e-01 L6_spectral:1.4281e-01 L7_spectral:1.4249e-01 L8_spectral:1.4235e-01 L9_spectral:1.4070e-01 L10_spectral:1.4137e-01 L11_spectral:1.4106e-01 L12_spectral:1.4232e-01 train_time:105684ms step_avg:52.84ms +[2025-09-11 11:16:34] [Rank 0] PRINT: step:2000/10000 val_loss:5.6803 total_sharp:7.1238e-04 L1_sharp:2.9953e-04 L2_sharp:2.2606e-04 L3_sharp:7.4092e-05 L4_sharp:8.2704e-05 L5_sharp:9.5835e-05 L6_sharp:4.3861e-05 L7_sharp:3.2008e-05 L8_sharp:7.0600e-05 L9_sharp:5.3846e-05 L10_sharp:9.2791e-05 L11_sharp:9.3604e-05 L12_sharp:6.4288e-04 total_fnorm:4.1750e+01 total_l1_linf:1.1930e+05 total_spectral:2.0875e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.1750e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.2062e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1875e+01 L10_fnorm:1.1812e+01 L11_fnorm:1.1562e+01 L12_fnorm:1.0562e+01 L1_l1linf:3.4219e+00 L2_l1linf:3.4375e+00 L3_l1linf:3.2500e+00 L4_l1linf:3.2344e+00 L5_l1linf:3.1875e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0781e+00 L9_l1linf:3.1719e+00 L10_l1linf:3.1719e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.1875e+00 L1_spectral:1.4764e-01 L2_spectral:1.4503e-01 L3_spectral:1.4544e-01 L4_spectral:1.4524e-01 L5_spectral:1.4212e-01 L6_spectral:1.4281e-01 L7_spectral:1.4249e-01 L8_spectral:1.4235e-01 L9_spectral:1.4070e-01 L10_spectral:1.4137e-01 L11_spectral:1.4106e-01 L12_spectral:1.4232e-01 train_time:105684ms step_avg:52.84ms +[2025-09-11 11:16:36] [Rank 0] step:2001/10000 train_time:107316ms step_avg:53.63ms +[2025-09-11 11:16:36] [Rank 0] step:2001/10000 train_time:107316ms step_avg:53.63ms +[2025-09-11 11:16:37] [Rank 0] step:2021/10000 train_time:107993ms step_avg:53.44ms +[2025-09-11 11:16:37] [Rank 0] step:2021/10000 train_time:107993ms step_avg:53.44ms +[2025-09-11 11:16:37] [Rank 0] step:2041/10000 train_time:108642ms step_avg:53.23ms +[2025-09-11 11:16:37] [Rank 0] step:2041/10000 train_time:108642ms step_avg:53.23ms +[2025-09-11 11:16:38] [Rank 0] step:2061/10000 train_time:109291ms step_avg:53.03ms +[2025-09-11 11:16:38] [Rank 0] step:2061/10000 train_time:109291ms step_avg:53.03ms +[2025-09-11 11:16:39] [Rank 0] step:2081/10000 train_time:109939ms step_avg:52.83ms +[2025-09-11 11:16:39] [Rank 0] step:2081/10000 train_time:109939ms step_avg:52.83ms +[2025-09-11 11:16:39] [Rank 0] step:2101/10000 train_time:110587ms step_avg:52.64ms +[2025-09-11 11:16:39] [Rank 0] step:2101/10000 train_time:110587ms step_avg:52.64ms +[2025-09-11 11:16:40] [Rank 0] step:2121/10000 train_time:111235ms step_avg:52.44ms +[2025-09-11 11:16:40] [Rank 0] step:2121/10000 train_time:111235ms step_avg:52.44ms +[2025-09-11 11:16:41] [Rank 0] step:2141/10000 train_time:111882ms step_avg:52.26ms +[2025-09-11 11:16:41] [Rank 0] step:2141/10000 train_time:111882ms step_avg:52.26ms +[2025-09-11 11:16:41] [Rank 0] step:2161/10000 train_time:112530ms step_avg:52.07ms +[2025-09-11 11:16:41] [Rank 0] step:2161/10000 train_time:112530ms step_avg:52.07ms +[2025-09-11 11:16:42] [Rank 0] step:2181/10000 train_time:113177ms step_avg:51.89ms +[2025-09-11 11:16:42] [Rank 0] step:2181/10000 train_time:113177ms step_avg:51.89ms +[2025-09-11 11:16:43] [Rank 0] step:2201/10000 train_time:113825ms step_avg:51.72ms +[2025-09-11 11:16:43] [Rank 0] step:2201/10000 train_time:113825ms step_avg:51.72ms +[2025-09-11 11:16:43] [Rank 0] step:2221/10000 train_time:114472ms step_avg:51.54ms +[2025-09-11 11:16:43] [Rank 0] step:2221/10000 train_time:114472ms step_avg:51.54ms +[2025-09-11 11:16:44] [Rank 0] step:2241/10000 train_time:115131ms step_avg:51.37ms +[2025-09-11 11:16:44] [Rank 0] step:2241/10000 train_time:115131ms step_avg:51.37ms +[2025-09-11 11:16:45] [Rank 0] step:2261/10000 train_time:115792ms step_avg:51.21ms +[2025-09-11 11:16:45] [Rank 0] step:2261/10000 train_time:115792ms step_avg:51.21ms +[2025-09-11 11:16:45] [Rank 0] step:2281/10000 train_time:116453ms step_avg:51.05ms +[2025-09-11 11:16:45] [Rank 0] step:2281/10000 train_time:116453ms step_avg:51.05ms +[2025-09-11 11:16:46] [Rank 0] step:2301/10000 train_time:117114ms step_avg:50.90ms +[2025-09-11 11:16:46] [Rank 0] step:2301/10000 train_time:117114ms step_avg:50.90ms +[2025-09-11 11:16:47] [Rank 0] step:2321/10000 train_time:117775ms step_avg:50.74ms +[2025-09-11 11:16:47] [Rank 0] step:2321/10000 train_time:117775ms step_avg:50.74ms +[2025-09-11 11:16:47] [Rank 0] step:2341/10000 train_time:118435ms step_avg:50.59ms +[2025-09-11 11:16:47] [Rank 0] step:2341/10000 train_time:118435ms step_avg:50.59ms +[2025-09-11 11:16:48] [Rank 0] step:2361/10000 train_time:119096ms step_avg:50.44ms +[2025-09-11 11:16:48] [Rank 0] step:2361/10000 train_time:119096ms step_avg:50.44ms +[2025-09-11 11:16:49] [Rank 0] step:2381/10000 train_time:119900ms step_avg:50.36ms +[2025-09-11 11:16:49] [Rank 0] step:2381/10000 train_time:119900ms step_avg:50.36ms +[2025-09-11 11:16:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:16:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:16:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:16:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:16:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:16:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:16:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:16:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:00] [Rank 0] PRINT: step:2400/10000 val_loss:5.5470 total_sharp:4.4429e-04 L1_sharp:2.0875e-04 L2_sharp:9.9881e-05 L3_sharp:4.3036e-05 L4_sharp:3.8226e-05 L5_sharp:7.2067e-05 L6_sharp:3.4412e-05 L7_sharp:3.0640e-05 L8_sharp:5.5287e-05 L9_sharp:4.9141e-05 L10_sharp:7.0203e-05 L11_sharp:7.4526e-05 L12_sharp:3.5338e-04 total_fnorm:4.2250e+01 total_l1_linf:1.1776e+05 total_spectral:2.1125e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2062e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1812e+01 L12_fnorm:1.1000e+01 L1_l1linf:3.4531e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.0781e+00 L6_l1linf:2.9844e+00 L7_l1linf:3.0312e+00 L8_l1linf:2.9844e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.1250e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5021e-01 L2_spectral:1.4768e-01 L3_spectral:1.4816e-01 L4_spectral:1.4840e-01 L5_spectral:1.4583e-01 L6_spectral:1.4702e-01 L7_spectral:1.4594e-01 L8_spectral:1.4443e-01 L9_spectral:1.4437e-01 L10_spectral:1.4374e-01 L11_spectral:1.4387e-01 L12_spectral:1.4599e-01 train_time:120953ms step_avg:50.40ms +[2025-09-11 11:17:00] [Rank 0] PRINT: step:2400/10000 val_loss:5.5470 total_sharp:4.4429e-04 L1_sharp:2.0875e-04 L2_sharp:9.9881e-05 L3_sharp:4.3036e-05 L4_sharp:3.8226e-05 L5_sharp:7.2067e-05 L6_sharp:3.4412e-05 L7_sharp:3.0640e-05 L8_sharp:5.5287e-05 L9_sharp:4.9141e-05 L10_sharp:7.0203e-05 L11_sharp:7.4526e-05 L12_sharp:3.5338e-04 total_fnorm:4.2250e+01 total_l1_linf:1.1776e+05 total_spectral:2.1125e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2062e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1812e+01 L12_fnorm:1.1000e+01 L1_l1linf:3.4531e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.0781e+00 L6_l1linf:2.9844e+00 L7_l1linf:3.0312e+00 L8_l1linf:2.9844e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.1250e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5021e-01 L2_spectral:1.4768e-01 L3_spectral:1.4816e-01 L4_spectral:1.4840e-01 L5_spectral:1.4583e-01 L6_spectral:1.4702e-01 L7_spectral:1.4594e-01 L8_spectral:1.4443e-01 L9_spectral:1.4437e-01 L10_spectral:1.4374e-01 L11_spectral:1.4387e-01 L12_spectral:1.4599e-01 train_time:120953ms step_avg:50.40ms +[2025-09-11 11:17:01] [Rank 0] step:2401/10000 train_time:122684ms step_avg:51.10ms +[2025-09-11 11:17:01] [Rank 0] step:2401/10000 train_time:122684ms step_avg:51.10ms +[2025-09-11 11:17:02] [Rank 0] step:2421/10000 train_time:123350ms step_avg:50.95ms +[2025-09-11 11:17:02] [Rank 0] step:2421/10000 train_time:123350ms step_avg:50.95ms +[2025-09-11 11:17:03] [Rank 0] step:2441/10000 train_time:124013ms step_avg:50.80ms +[2025-09-11 11:17:03] [Rank 0] step:2441/10000 train_time:124013ms step_avg:50.80ms +[2025-09-11 11:17:03] [Rank 0] step:2461/10000 train_time:124675ms step_avg:50.66ms +[2025-09-11 11:17:03] [Rank 0] step:2461/10000 train_time:124675ms step_avg:50.66ms +[2025-09-11 11:17:04] [Rank 0] step:2481/10000 train_time:125348ms step_avg:50.52ms +[2025-09-11 11:17:04] [Rank 0] step:2481/10000 train_time:125348ms step_avg:50.52ms +[2025-09-11 11:17:05] [Rank 0] step:2501/10000 train_time:126009ms step_avg:50.38ms +[2025-09-11 11:17:05] [Rank 0] step:2501/10000 train_time:126009ms step_avg:50.38ms +[2025-09-11 11:17:05] [Rank 0] step:2521/10000 train_time:126671ms step_avg:50.25ms +[2025-09-11 11:17:05] [Rank 0] step:2521/10000 train_time:126671ms step_avg:50.25ms +[2025-09-11 11:17:06] [Rank 0] step:2541/10000 train_time:127332ms step_avg:50.11ms +[2025-09-11 11:17:06] [Rank 0] step:2541/10000 train_time:127332ms step_avg:50.11ms +[2025-09-11 11:17:07] [Rank 0] step:2561/10000 train_time:127994ms step_avg:49.98ms +[2025-09-11 11:17:07] [Rank 0] step:2561/10000 train_time:127994ms step_avg:49.98ms +[2025-09-11 11:17:07] [Rank 0] step:2581/10000 train_time:128654ms step_avg:49.85ms +[2025-09-11 11:17:07] [Rank 0] step:2581/10000 train_time:128654ms step_avg:49.85ms +[2025-09-11 11:17:08] [Rank 0] step:2601/10000 train_time:129315ms step_avg:49.72ms +[2025-09-11 11:17:08] [Rank 0] step:2601/10000 train_time:129315ms step_avg:49.72ms +[2025-09-11 11:17:09] [Rank 0] step:2621/10000 train_time:129977ms step_avg:49.59ms +[2025-09-11 11:17:09] [Rank 0] step:2621/10000 train_time:129977ms step_avg:49.59ms +[2025-09-11 11:17:09] [Rank 0] step:2641/10000 train_time:130637ms step_avg:49.47ms +[2025-09-11 11:17:09] [Rank 0] step:2641/10000 train_time:130637ms step_avg:49.47ms +[2025-09-11 11:17:10] [Rank 0] step:2661/10000 train_time:131298ms step_avg:49.34ms +[2025-09-11 11:17:10] [Rank 0] step:2661/10000 train_time:131298ms step_avg:49.34ms +[2025-09-11 11:17:11] [Rank 0] step:2681/10000 train_time:131959ms step_avg:49.22ms +[2025-09-11 11:17:11] [Rank 0] step:2681/10000 train_time:131959ms step_avg:49.22ms +[2025-09-11 11:17:11] [Rank 0] step:2701/10000 train_time:132620ms step_avg:49.10ms +[2025-09-11 11:17:11] [Rank 0] step:2701/10000 train_time:132620ms step_avg:49.10ms +[2025-09-11 11:17:12] [Rank 0] step:2721/10000 train_time:133281ms step_avg:48.98ms +[2025-09-11 11:17:12] [Rank 0] step:2721/10000 train_time:133281ms step_avg:48.98ms +[2025-09-11 11:17:13] [Rank 0] step:2741/10000 train_time:133943ms step_avg:48.87ms +[2025-09-11 11:17:13] [Rank 0] step:2741/10000 train_time:133943ms step_avg:48.87ms +[2025-09-11 11:17:13] [Rank 0] step:2761/10000 train_time:134604ms step_avg:48.75ms +[2025-09-11 11:17:13] [Rank 0] step:2761/10000 train_time:134604ms step_avg:48.75ms +[2025-09-11 11:17:14] [Rank 0] step:2781/10000 train_time:135265ms step_avg:48.64ms +[2025-09-11 11:17:14] [Rank 0] step:2781/10000 train_time:135265ms step_avg:48.64ms +[2025-09-11 11:17:15] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:17:15] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:17:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:17:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:17:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:17:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:17:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:17:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:17:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:25] [Rank 0] PRINT: step:2800/10000 val_loss:5.4653 total_sharp:5.2026e-04 L1_sharp:1.8757e-04 L2_sharp:1.2334e-04 L3_sharp:4.5924e-05 L4_sharp:4.4151e-05 L5_sharp:7.1705e-05 L6_sharp:3.2839e-05 L7_sharp:3.7066e-05 L8_sharp:5.8448e-05 L9_sharp:5.5190e-05 L10_sharp:6.6162e-05 L11_sharp:9.6340e-05 L12_sharp:4.8798e-04 total_fnorm:4.2250e+01 total_l1_linf:1.1520e+05 total_spectral:2.1125e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2062e+01 L11_fnorm:1.1875e+01 L12_fnorm:1.1000e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.3125e+00 L3_l1linf:3.1250e+00 L4_l1linf:3.0625e+00 L5_l1linf:3.0000e+00 L6_l1linf:2.9375e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.9375e+00 L9_l1linf:3.0000e+00 L10_l1linf:3.0312e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5294e-01 L2_spectral:1.5012e-01 L3_spectral:1.5038e-01 L4_spectral:1.5009e-01 L5_spectral:1.4772e-01 L6_spectral:1.4872e-01 L7_spectral:1.4831e-01 L8_spectral:1.4675e-01 L9_spectral:1.4628e-01 L10_spectral:1.4656e-01 L11_spectral:1.4649e-01 L12_spectral:1.4757e-01 train_time:135909ms step_avg:48.54ms +[2025-09-11 11:17:25] [Rank 0] PRINT: step:2800/10000 val_loss:5.4653 total_sharp:5.2026e-04 L1_sharp:1.8757e-04 L2_sharp:1.2334e-04 L3_sharp:4.5924e-05 L4_sharp:4.4151e-05 L5_sharp:7.1705e-05 L6_sharp:3.2839e-05 L7_sharp:3.7066e-05 L8_sharp:5.8448e-05 L9_sharp:5.5190e-05 L10_sharp:6.6162e-05 L11_sharp:9.6340e-05 L12_sharp:4.8798e-04 total_fnorm:4.2250e+01 total_l1_linf:1.1520e+05 total_spectral:2.1125e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2062e+01 L11_fnorm:1.1875e+01 L12_fnorm:1.1000e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.3125e+00 L3_l1linf:3.1250e+00 L4_l1linf:3.0625e+00 L5_l1linf:3.0000e+00 L6_l1linf:2.9375e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.9375e+00 L9_l1linf:3.0000e+00 L10_l1linf:3.0312e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5294e-01 L2_spectral:1.5012e-01 L3_spectral:1.5038e-01 L4_spectral:1.5009e-01 L5_spectral:1.4772e-01 L6_spectral:1.4872e-01 L7_spectral:1.4831e-01 L8_spectral:1.4675e-01 L9_spectral:1.4628e-01 L10_spectral:1.4656e-01 L11_spectral:1.4649e-01 L12_spectral:1.4757e-01 train_time:135909ms step_avg:48.54ms +[2025-09-11 11:17:26] [Rank 0] step:2801/10000 train_time:137554ms step_avg:49.11ms +[2025-09-11 11:17:26] [Rank 0] step:2801/10000 train_time:137554ms step_avg:49.11ms +[2025-09-11 11:17:27] [Rank 0] step:2821/10000 train_time:138235ms step_avg:49.00ms +[2025-09-11 11:17:27] [Rank 0] step:2821/10000 train_time:138235ms step_avg:49.00ms +[2025-09-11 11:17:28] [Rank 0] step:2841/10000 train_time:138897ms step_avg:48.89ms +[2025-09-11 11:17:28] [Rank 0] step:2841/10000 train_time:138897ms step_avg:48.89ms +[2025-09-11 11:17:28] [Rank 0] step:2861/10000 train_time:139559ms step_avg:48.78ms +[2025-09-11 11:17:28] [Rank 0] step:2861/10000 train_time:139559ms step_avg:48.78ms +[2025-09-11 11:17:29] [Rank 0] step:2881/10000 train_time:140220ms step_avg:48.67ms +[2025-09-11 11:17:29] [Rank 0] step:2881/10000 train_time:140220ms step_avg:48.67ms +[2025-09-11 11:17:30] [Rank 0] step:2901/10000 train_time:140881ms step_avg:48.56ms +[2025-09-11 11:17:30] [Rank 0] step:2901/10000 train_time:140881ms step_avg:48.56ms +[2025-09-11 11:17:30] [Rank 0] step:2921/10000 train_time:141541ms step_avg:48.46ms +[2025-09-11 11:17:30] [Rank 0] step:2921/10000 train_time:141541ms step_avg:48.46ms +[2025-09-11 11:17:31] [Rank 0] step:2941/10000 train_time:142202ms step_avg:48.35ms +[2025-09-11 11:17:31] [Rank 0] step:2941/10000 train_time:142202ms step_avg:48.35ms +[2025-09-11 11:17:32] [Rank 0] step:2961/10000 train_time:142862ms step_avg:48.25ms +[2025-09-11 11:17:32] [Rank 0] step:2961/10000 train_time:142862ms step_avg:48.25ms +[2025-09-11 11:17:32] [Rank 0] step:2981/10000 train_time:143525ms step_avg:48.15ms +[2025-09-11 11:17:32] [Rank 0] step:2981/10000 train_time:143525ms step_avg:48.15ms +[2025-09-11 11:17:33] [Rank 0] step:3001/10000 train_time:144190ms step_avg:48.05ms +[2025-09-11 11:17:33] [Rank 0] step:3001/10000 train_time:144190ms step_avg:48.05ms +[2025-09-11 11:17:34] [Rank 0] step:3021/10000 train_time:144853ms step_avg:47.95ms +[2025-09-11 11:17:34] [Rank 0] step:3021/10000 train_time:144853ms step_avg:47.95ms +[2025-09-11 11:17:34] [Rank 0] step:3041/10000 train_time:145517ms step_avg:47.85ms +[2025-09-11 11:17:34] [Rank 0] step:3041/10000 train_time:145517ms step_avg:47.85ms +[2025-09-11 11:17:35] [Rank 0] step:3061/10000 train_time:146190ms step_avg:47.76ms +[2025-09-11 11:17:35] [Rank 0] step:3061/10000 train_time:146190ms step_avg:47.76ms +[2025-09-11 11:17:36] [Rank 0] step:3081/10000 train_time:146854ms step_avg:47.66ms +[2025-09-11 11:17:36] [Rank 0] step:3081/10000 train_time:146854ms step_avg:47.66ms +[2025-09-11 11:17:36] [Rank 0] step:3101/10000 train_time:147517ms step_avg:47.57ms +[2025-09-11 11:17:36] [Rank 0] step:3101/10000 train_time:147517ms step_avg:47.57ms +[2025-09-11 11:17:37] [Rank 0] step:3121/10000 train_time:148180ms step_avg:47.48ms +[2025-09-11 11:17:37] [Rank 0] step:3121/10000 train_time:148180ms step_avg:47.48ms +[2025-09-11 11:17:38] [Rank 0] step:3141/10000 train_time:148844ms step_avg:47.39ms +[2025-09-11 11:17:38] [Rank 0] step:3141/10000 train_time:148844ms step_avg:47.39ms +[2025-09-11 11:17:38] [Rank 0] step:3161/10000 train_time:149507ms step_avg:47.30ms +[2025-09-11 11:17:38] [Rank 0] step:3161/10000 train_time:149507ms step_avg:47.30ms +[2025-09-11 11:17:39] [Rank 0] step:3181/10000 train_time:150171ms step_avg:47.21ms +[2025-09-11 11:17:39] [Rank 0] step:3181/10000 train_time:150171ms step_avg:47.21ms +[2025-09-11 11:17:40] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:17:40] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:17:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:17:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:17:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:17:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:17:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:17:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:17:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:17:50] [Rank 0] PRINT: step:3200/10000 val_loss:5.3771 total_sharp:4.3546e-04 L1_sharp:2.1319e-04 L2_sharp:1.2723e-04 L3_sharp:3.3345e-05 L4_sharp:4.2030e-05 L5_sharp:6.5949e-05 L6_sharp:3.6675e-05 L7_sharp:3.2995e-05 L8_sharp:6.3344e-05 L9_sharp:4.4684e-05 L10_sharp:7.6231e-05 L11_sharp:7.7523e-05 L12_sharp:3.7856e-04 total_fnorm:4.2750e+01 total_l1_linf:1.1520e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1438e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.2500e+00 L3_l1linf:3.0938e+00 L4_l1linf:3.0156e+00 L5_l1linf:2.9844e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.9219e+00 L8_l1linf:2.8281e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5460e-01 L2_spectral:1.5106e-01 L3_spectral:1.5265e-01 L4_spectral:1.5356e-01 L5_spectral:1.4842e-01 L6_spectral:1.5104e-01 L7_spectral:1.5033e-01 L8_spectral:1.4967e-01 L9_spectral:1.4940e-01 L10_spectral:1.4862e-01 L11_spectral:1.4898e-01 L12_spectral:1.4885e-01 train_time:150815ms step_avg:47.13ms +[2025-09-11 11:17:50] [Rank 0] PRINT: step:3200/10000 val_loss:5.3771 total_sharp:4.3546e-04 L1_sharp:2.1319e-04 L2_sharp:1.2723e-04 L3_sharp:3.3345e-05 L4_sharp:4.2030e-05 L5_sharp:6.5949e-05 L6_sharp:3.6675e-05 L7_sharp:3.2995e-05 L8_sharp:6.3344e-05 L9_sharp:4.4684e-05 L10_sharp:7.6231e-05 L11_sharp:7.7523e-05 L12_sharp:3.7856e-04 total_fnorm:4.2750e+01 total_l1_linf:1.1520e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1438e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.2500e+00 L3_l1linf:3.0938e+00 L4_l1linf:3.0156e+00 L5_l1linf:2.9844e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.9219e+00 L8_l1linf:2.8281e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5460e-01 L2_spectral:1.5106e-01 L3_spectral:1.5265e-01 L4_spectral:1.5356e-01 L5_spectral:1.4842e-01 L6_spectral:1.5104e-01 L7_spectral:1.5033e-01 L8_spectral:1.4967e-01 L9_spectral:1.4940e-01 L10_spectral:1.4862e-01 L11_spectral:1.4898e-01 L12_spectral:1.4885e-01 train_time:150815ms step_avg:47.13ms +[2025-09-11 11:17:51] [Rank 0] step:3201/10000 train_time:152535ms step_avg:47.65ms +[2025-09-11 11:17:51] [Rank 0] step:3201/10000 train_time:152535ms step_avg:47.65ms +[2025-09-11 11:17:53] [Rank 0] step:3221/10000 train_time:153773ms step_avg:47.74ms +[2025-09-11 11:17:53] [Rank 0] step:3221/10000 train_time:153773ms step_avg:47.74ms +[2025-09-11 11:17:53] [Rank 0] step:3241/10000 train_time:154438ms step_avg:47.65ms +[2025-09-11 11:17:53] [Rank 0] step:3241/10000 train_time:154438ms step_avg:47.65ms +[2025-09-11 11:17:54] [Rank 0] step:3261/10000 train_time:155104ms step_avg:47.56ms +[2025-09-11 11:17:54] [Rank 0] step:3261/10000 train_time:155104ms step_avg:47.56ms +[2025-09-11 11:17:55] [Rank 0] step:3281/10000 train_time:156065ms step_avg:47.57ms +[2025-09-11 11:17:55] [Rank 0] step:3281/10000 train_time:156065ms step_avg:47.57ms +[2025-09-11 11:17:56] [Rank 0] step:3301/10000 train_time:156729ms step_avg:47.48ms +[2025-09-11 11:17:56] [Rank 0] step:3301/10000 train_time:156729ms step_avg:47.48ms +[2025-09-11 11:17:56] [Rank 0] step:3321/10000 train_time:157393ms step_avg:47.39ms +[2025-09-11 11:17:56] [Rank 0] step:3321/10000 train_time:157393ms step_avg:47.39ms +[2025-09-11 11:17:57] [Rank 0] step:3341/10000 train_time:158058ms step_avg:47.31ms +[2025-09-11 11:17:57] [Rank 0] step:3341/10000 train_time:158058ms step_avg:47.31ms +[2025-09-11 11:17:58] [Rank 0] step:3361/10000 train_time:158722ms step_avg:47.22ms +[2025-09-11 11:17:58] [Rank 0] step:3361/10000 train_time:158722ms step_avg:47.22ms +[2025-09-11 11:17:58] [Rank 0] step:3381/10000 train_time:159386ms step_avg:47.14ms +[2025-09-11 11:17:58] [Rank 0] step:3381/10000 train_time:159386ms step_avg:47.14ms +[2025-09-11 11:17:59] [Rank 0] step:3401/10000 train_time:160050ms step_avg:47.06ms +[2025-09-11 11:17:59] [Rank 0] step:3401/10000 train_time:160050ms step_avg:47.06ms +[2025-09-11 11:18:00] [Rank 0] step:3421/10000 train_time:160714ms step_avg:46.98ms +[2025-09-11 11:18:00] [Rank 0] step:3421/10000 train_time:160714ms step_avg:46.98ms +[2025-09-11 11:18:00] [Rank 0] step:3441/10000 train_time:161378ms step_avg:46.90ms +[2025-09-11 11:18:00] [Rank 0] step:3441/10000 train_time:161378ms step_avg:46.90ms +[2025-09-11 11:18:01] [Rank 0] step:3461/10000 train_time:162041ms step_avg:46.82ms +[2025-09-11 11:18:01] [Rank 0] step:3461/10000 train_time:162041ms step_avg:46.82ms +[2025-09-11 11:18:02] [Rank 0] step:3481/10000 train_time:162705ms step_avg:46.74ms +[2025-09-11 11:18:02] [Rank 0] step:3481/10000 train_time:162705ms step_avg:46.74ms +[2025-09-11 11:18:02] [Rank 0] step:3501/10000 train_time:163368ms step_avg:46.66ms +[2025-09-11 11:18:02] [Rank 0] step:3501/10000 train_time:163368ms step_avg:46.66ms +[2025-09-11 11:18:03] [Rank 0] step:3521/10000 train_time:164033ms step_avg:46.59ms +[2025-09-11 11:18:03] [Rank 0] step:3521/10000 train_time:164033ms step_avg:46.59ms +[2025-09-11 11:18:04] [Rank 0] step:3541/10000 train_time:164697ms step_avg:46.51ms +[2025-09-11 11:18:04] [Rank 0] step:3541/10000 train_time:164697ms step_avg:46.51ms +[2025-09-11 11:18:04] [Rank 0] step:3561/10000 train_time:165360ms step_avg:46.44ms +[2025-09-11 11:18:04] [Rank 0] step:3561/10000 train_time:165360ms step_avg:46.44ms +[2025-09-11 11:18:05] [Rank 0] step:3581/10000 train_time:166024ms step_avg:46.36ms +[2025-09-11 11:18:05] [Rank 0] step:3581/10000 train_time:166024ms step_avg:46.36ms +[2025-09-11 11:18:06] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:18:06] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:18:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:18:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:18:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:18:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:18:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:16] [Rank 0] PRINT: step:3600/10000 val_loss:5.3225 total_sharp:3.4556e-04 L1_sharp:2.0326e-04 L2_sharp:8.8146e-05 L3_sharp:4.5224e-05 L4_sharp:3.6135e-05 L5_sharp:5.8170e-05 L6_sharp:3.1861e-05 L7_sharp:2.5082e-05 L8_sharp:4.4614e-05 L9_sharp:4.1408e-05 L10_sharp:6.2429e-05 L11_sharp:6.4102e-05 L12_sharp:3.0723e-04 total_fnorm:4.3000e+01 total_l1_linf:1.1315e+05 total_spectral:2.1375e+01 L1_fnorm:1.2875e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1562e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2344e+00 L3_l1linf:3.0469e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.9219e+00 L6_l1linf:2.8125e+00 L7_l1linf:2.7969e+00 L8_l1linf:2.7812e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.9062e+00 L11_l1linf:2.8125e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5685e-01 L2_spectral:1.5368e-01 L3_spectral:1.5319e-01 L4_spectral:1.5477e-01 L5_spectral:1.5066e-01 L6_spectral:1.5172e-01 L7_spectral:1.5194e-01 L8_spectral:1.5005e-01 L9_spectral:1.5124e-01 L10_spectral:1.5098e-01 L11_spectral:1.4958e-01 L12_spectral:1.5075e-01 train_time:166669ms step_avg:46.30ms +[2025-09-11 11:18:16] [Rank 0] PRINT: step:3600/10000 val_loss:5.3225 total_sharp:3.4556e-04 L1_sharp:2.0326e-04 L2_sharp:8.8146e-05 L3_sharp:4.5224e-05 L4_sharp:3.6135e-05 L5_sharp:5.8170e-05 L6_sharp:3.1861e-05 L7_sharp:2.5082e-05 L8_sharp:4.4614e-05 L9_sharp:4.1408e-05 L10_sharp:6.2429e-05 L11_sharp:6.4102e-05 L12_sharp:3.0723e-04 total_fnorm:4.3000e+01 total_l1_linf:1.1315e+05 total_spectral:2.1375e+01 L1_fnorm:1.2875e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1562e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2344e+00 L3_l1linf:3.0469e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.9219e+00 L6_l1linf:2.8125e+00 L7_l1linf:2.7969e+00 L8_l1linf:2.7812e+00 L9_l1linf:2.8594e+00 L10_l1linf:2.9062e+00 L11_l1linf:2.8125e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5685e-01 L2_spectral:1.5368e-01 L3_spectral:1.5319e-01 L4_spectral:1.5477e-01 L5_spectral:1.5066e-01 L6_spectral:1.5172e-01 L7_spectral:1.5194e-01 L8_spectral:1.5005e-01 L9_spectral:1.5124e-01 L10_spectral:1.5098e-01 L11_spectral:1.4958e-01 L12_spectral:1.5075e-01 train_time:166669ms step_avg:46.30ms +[2025-09-11 11:18:18] [Rank 0] step:3601/10000 train_time:168321ms step_avg:46.74ms +[2025-09-11 11:18:18] [Rank 0] step:3601/10000 train_time:168321ms step_avg:46.74ms +[2025-09-11 11:18:18] [Rank 0] step:3621/10000 train_time:168989ms step_avg:46.67ms +[2025-09-11 11:18:18] [Rank 0] step:3621/10000 train_time:168989ms step_avg:46.67ms +[2025-09-11 11:18:19] [Rank 0] step:3641/10000 train_time:169655ms step_avg:46.60ms +[2025-09-11 11:18:19] [Rank 0] step:3641/10000 train_time:169655ms step_avg:46.60ms +[2025-09-11 11:18:20] [Rank 0] step:3661/10000 train_time:170318ms step_avg:46.52ms +[2025-09-11 11:18:20] [Rank 0] step:3661/10000 train_time:170318ms step_avg:46.52ms +[2025-09-11 11:18:20] [Rank 0] step:3681/10000 train_time:170982ms step_avg:46.45ms +[2025-09-11 11:18:20] [Rank 0] step:3681/10000 train_time:170982ms step_avg:46.45ms +[2025-09-11 11:18:21] [Rank 0] step:3701/10000 train_time:171644ms step_avg:46.38ms +[2025-09-11 11:18:21] [Rank 0] step:3701/10000 train_time:171644ms step_avg:46.38ms +[2025-09-11 11:18:22] [Rank 0] step:3721/10000 train_time:172318ms step_avg:46.31ms +[2025-09-11 11:18:22] [Rank 0] step:3721/10000 train_time:172318ms step_avg:46.31ms +[2025-09-11 11:18:22] [Rank 0] step:3741/10000 train_time:172992ms step_avg:46.24ms +[2025-09-11 11:18:22] [Rank 0] step:3741/10000 train_time:172992ms step_avg:46.24ms +[2025-09-11 11:18:23] [Rank 0] step:3761/10000 train_time:173668ms step_avg:46.18ms +[2025-09-11 11:18:23] [Rank 0] step:3761/10000 train_time:173668ms step_avg:46.18ms +[2025-09-11 11:18:24] [Rank 0] step:3781/10000 train_time:174342ms step_avg:46.11ms +[2025-09-11 11:18:24] [Rank 0] step:3781/10000 train_time:174342ms step_avg:46.11ms +[2025-09-11 11:18:24] [Rank 0] step:3801/10000 train_time:175017ms step_avg:46.04ms +[2025-09-11 11:18:24] [Rank 0] step:3801/10000 train_time:175017ms step_avg:46.04ms +[2025-09-11 11:18:25] [Rank 0] step:3821/10000 train_time:175692ms step_avg:45.98ms +[2025-09-11 11:18:25] [Rank 0] step:3821/10000 train_time:175692ms step_avg:45.98ms +[2025-09-11 11:18:26] [Rank 0] step:3841/10000 train_time:176367ms step_avg:45.92ms +[2025-09-11 11:18:26] [Rank 0] step:3841/10000 train_time:176367ms step_avg:45.92ms +[2025-09-11 11:18:26] [Rank 0] step:3861/10000 train_time:177041ms step_avg:45.85ms +[2025-09-11 11:18:26] [Rank 0] step:3861/10000 train_time:177041ms step_avg:45.85ms +[2025-09-11 11:18:27] [Rank 0] step:3881/10000 train_time:177715ms step_avg:45.79ms +[2025-09-11 11:18:27] [Rank 0] step:3881/10000 train_time:177715ms step_avg:45.79ms +[2025-09-11 11:18:28] [Rank 0] step:3901/10000 train_time:178389ms step_avg:45.73ms +[2025-09-11 11:18:28] [Rank 0] step:3901/10000 train_time:178389ms step_avg:45.73ms +[2025-09-11 11:18:28] [Rank 0] step:3921/10000 train_time:179064ms step_avg:45.67ms +[2025-09-11 11:18:28] [Rank 0] step:3921/10000 train_time:179064ms step_avg:45.67ms +[2025-09-11 11:18:29] [Rank 0] step:3941/10000 train_time:179739ms step_avg:45.61ms +[2025-09-11 11:18:29] [Rank 0] step:3941/10000 train_time:179739ms step_avg:45.61ms +[2025-09-11 11:18:30] [Rank 0] step:3961/10000 train_time:180414ms step_avg:45.55ms +[2025-09-11 11:18:30] [Rank 0] step:3961/10000 train_time:180414ms step_avg:45.55ms +[2025-09-11 11:18:30] [Rank 0] step:3981/10000 train_time:181088ms step_avg:45.49ms +[2025-09-11 11:18:30] [Rank 0] step:3981/10000 train_time:181088ms step_avg:45.49ms +[2025-09-11 11:18:31] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:18:31] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:18:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:18:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:18:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:18:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:18:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:18:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:44] [Rank 0] PRINT: step:4000/10000 val_loss:5.2678 total_sharp:4.4924e-04 L1_sharp:1.5211e-04 L2_sharp:6.4954e-05 L3_sharp:2.5983e-05 L4_sharp:3.4360e-05 L5_sharp:6.0033e-05 L6_sharp:3.6083e-05 L7_sharp:2.8139e-05 L8_sharp:5.2822e-05 L9_sharp:5.1389e-05 L10_sharp:7.5665e-05 L11_sharp:7.9175e-05 L12_sharp:3.6026e-04 total_fnorm:4.2500e+01 total_l1_linf:1.1110e+05 total_spectral:2.1250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1500e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.8438e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.8125e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.8125e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5864e-01 L2_spectral:1.5211e-01 L3_spectral:1.5328e-01 L4_spectral:1.5333e-01 L5_spectral:1.5108e-01 L6_spectral:1.5360e-01 L7_spectral:1.5307e-01 L8_spectral:1.5185e-01 L9_spectral:1.5205e-01 L10_spectral:1.5372e-01 L11_spectral:1.5171e-01 L12_spectral:1.5233e-01 train_time:181745ms step_avg:45.44ms +[2025-09-11 11:18:44] [Rank 0] PRINT: step:4000/10000 val_loss:5.2678 total_sharp:4.4924e-04 L1_sharp:1.5211e-04 L2_sharp:6.4954e-05 L3_sharp:2.5983e-05 L4_sharp:3.4360e-05 L5_sharp:6.0033e-05 L6_sharp:3.6083e-05 L7_sharp:2.8139e-05 L8_sharp:5.2822e-05 L9_sharp:5.1389e-05 L10_sharp:7.5665e-05 L11_sharp:7.9175e-05 L12_sharp:3.6026e-04 total_fnorm:4.2500e+01 total_l1_linf:1.1110e+05 total_spectral:2.1250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2250e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1500e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.8438e+00 L6_l1linf:2.8594e+00 L7_l1linf:2.8125e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.8125e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5864e-01 L2_spectral:1.5211e-01 L3_spectral:1.5328e-01 L4_spectral:1.5333e-01 L5_spectral:1.5108e-01 L6_spectral:1.5360e-01 L7_spectral:1.5307e-01 L8_spectral:1.5185e-01 L9_spectral:1.5205e-01 L10_spectral:1.5372e-01 L11_spectral:1.5171e-01 L12_spectral:1.5233e-01 train_time:181745ms step_avg:45.44ms +[2025-09-11 11:18:46] [Rank 0] step:4001/10000 train_time:183365ms step_avg:45.83ms +[2025-09-11 11:18:46] [Rank 0] step:4001/10000 train_time:183365ms step_avg:45.83ms +[2025-09-11 11:18:46] [Rank 0] step:4021/10000 train_time:184044ms step_avg:45.77ms +[2025-09-11 11:18:46] [Rank 0] step:4021/10000 train_time:184044ms step_avg:45.77ms +[2025-09-11 11:18:47] [Rank 0] step:4041/10000 train_time:184720ms step_avg:45.71ms +[2025-09-11 11:18:47] [Rank 0] step:4041/10000 train_time:184720ms step_avg:45.71ms +[2025-09-11 11:18:48] [Rank 0] step:4061/10000 train_time:185395ms step_avg:45.65ms +[2025-09-11 11:18:48] [Rank 0] step:4061/10000 train_time:185395ms step_avg:45.65ms +[2025-09-11 11:18:48] [Rank 0] step:4081/10000 train_time:186070ms step_avg:45.59ms +[2025-09-11 11:18:48] [Rank 0] step:4081/10000 train_time:186070ms step_avg:45.59ms +[2025-09-11 11:18:49] [Rank 0] step:4101/10000 train_time:186745ms step_avg:45.54ms +[2025-09-11 11:18:49] [Rank 0] step:4101/10000 train_time:186745ms step_avg:45.54ms +[2025-09-11 11:18:50] [Rank 0] step:4121/10000 train_time:187421ms step_avg:45.48ms +[2025-09-11 11:18:50] [Rank 0] step:4121/10000 train_time:187421ms step_avg:45.48ms +[2025-09-11 11:18:50] [Rank 0] step:4141/10000 train_time:188097ms step_avg:45.42ms +[2025-09-11 11:18:50] [Rank 0] step:4141/10000 train_time:188097ms step_avg:45.42ms +[2025-09-11 11:18:51] [Rank 0] step:4161/10000 train_time:188771ms step_avg:45.37ms +[2025-09-11 11:18:51] [Rank 0] step:4161/10000 train_time:188771ms step_avg:45.37ms +[2025-09-11 11:18:52] [Rank 0] step:4181/10000 train_time:189445ms step_avg:45.31ms +[2025-09-11 11:18:52] [Rank 0] step:4181/10000 train_time:189445ms step_avg:45.31ms +[2025-09-11 11:18:52] [Rank 0] step:4201/10000 train_time:190122ms step_avg:45.26ms +[2025-09-11 11:18:52] [Rank 0] step:4201/10000 train_time:190122ms step_avg:45.26ms +[2025-09-11 11:18:53] [Rank 0] step:4221/10000 train_time:190796ms step_avg:45.20ms +[2025-09-11 11:18:53] [Rank 0] step:4221/10000 train_time:190796ms step_avg:45.20ms +[2025-09-11 11:18:54] [Rank 0] step:4241/10000 train_time:191470ms step_avg:45.15ms +[2025-09-11 11:18:54] [Rank 0] step:4241/10000 train_time:191470ms step_avg:45.15ms +[2025-09-11 11:18:54] [Rank 0] step:4261/10000 train_time:192145ms step_avg:45.09ms +[2025-09-11 11:18:54] [Rank 0] step:4261/10000 train_time:192145ms step_avg:45.09ms +[2025-09-11 11:18:56] [Rank 0] step:4281/10000 train_time:193252ms step_avg:45.14ms +[2025-09-11 11:18:56] [Rank 0] step:4281/10000 train_time:193252ms step_avg:45.14ms +[2025-09-11 11:18:56] [Rank 0] step:4301/10000 train_time:194094ms step_avg:45.13ms +[2025-09-11 11:18:56] [Rank 0] step:4301/10000 train_time:194094ms step_avg:45.13ms +[2025-09-11 11:18:57] [Rank 0] step:4321/10000 train_time:194768ms step_avg:45.07ms +[2025-09-11 11:18:57] [Rank 0] step:4321/10000 train_time:194768ms step_avg:45.07ms +[2025-09-11 11:18:58] [Rank 0] step:4341/10000 train_time:195690ms step_avg:45.08ms +[2025-09-11 11:18:58] [Rank 0] step:4341/10000 train_time:195690ms step_avg:45.08ms +[2025-09-11 11:18:59] [Rank 0] step:4361/10000 train_time:196364ms step_avg:45.03ms +[2025-09-11 11:18:59] [Rank 0] step:4361/10000 train_time:196364ms step_avg:45.03ms +[2025-09-11 11:18:59] [Rank 0] step:4381/10000 train_time:197039ms step_avg:44.98ms +[2025-09-11 11:18:59] [Rank 0] step:4381/10000 train_time:197039ms step_avg:44.98ms +[2025-09-11 11:19:00] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:19:00] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:19:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:10] [Rank 0] PRINT: step:4400/10000 val_loss:5.2290 total_sharp:3.7805e-04 L1_sharp:1.7979e-04 L2_sharp:5.3647e-05 L3_sharp:4.0596e-05 L4_sharp:2.7270e-05 L5_sharp:5.0467e-05 L6_sharp:2.9704e-05 L7_sharp:2.2452e-05 L8_sharp:5.0299e-05 L9_sharp:4.6977e-05 L10_sharp:6.5017e-05 L11_sharp:6.9667e-05 L12_sharp:4.0701e-04 total_fnorm:4.2500e+01 total_l1_linf:1.0957e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1500e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.2500e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.9062e+00 L5_l1linf:2.8281e+00 L6_l1linf:2.8125e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.8125e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5826e-01 L2_spectral:1.5413e-01 L3_spectral:1.5496e-01 L4_spectral:1.5507e-01 L5_spectral:1.5231e-01 L6_spectral:1.5551e-01 L7_spectral:1.5403e-01 L8_spectral:1.5265e-01 L9_spectral:1.5407e-01 L10_spectral:1.5429e-01 L11_spectral:1.5298e-01 L12_spectral:1.5250e-01 train_time:197694ms step_avg:44.93ms +[2025-09-11 11:19:10] [Rank 0] PRINT: step:4400/10000 val_loss:5.2290 total_sharp:3.7805e-04 L1_sharp:1.7979e-04 L2_sharp:5.3647e-05 L3_sharp:4.0596e-05 L4_sharp:2.7270e-05 L5_sharp:5.0467e-05 L6_sharp:2.9704e-05 L7_sharp:2.2452e-05 L8_sharp:5.0299e-05 L9_sharp:4.6977e-05 L10_sharp:6.5017e-05 L11_sharp:6.9667e-05 L12_sharp:4.0701e-04 total_fnorm:4.2500e+01 total_l1_linf:1.0957e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.1500e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.2500e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.9062e+00 L5_l1linf:2.8281e+00 L6_l1linf:2.8125e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.8125e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.2656e+00 L1_spectral:1.5826e-01 L2_spectral:1.5413e-01 L3_spectral:1.5496e-01 L4_spectral:1.5507e-01 L5_spectral:1.5231e-01 L6_spectral:1.5551e-01 L7_spectral:1.5403e-01 L8_spectral:1.5265e-01 L9_spectral:1.5407e-01 L10_spectral:1.5429e-01 L11_spectral:1.5298e-01 L12_spectral:1.5250e-01 train_time:197694ms step_avg:44.93ms +[2025-09-11 11:19:12] [Rank 0] step:4401/10000 train_time:199410ms step_avg:45.31ms +[2025-09-11 11:19:12] [Rank 0] step:4401/10000 train_time:199410ms step_avg:45.31ms +[2025-09-11 11:19:13] [Rank 0] step:4421/10000 train_time:200112ms step_avg:45.26ms +[2025-09-11 11:19:13] [Rank 0] step:4421/10000 train_time:200112ms step_avg:45.26ms +[2025-09-11 11:19:14] [Rank 0] step:4441/10000 train_time:200790ms step_avg:45.21ms +[2025-09-11 11:19:14] [Rank 0] step:4441/10000 train_time:200790ms step_avg:45.21ms +[2025-09-11 11:19:14] [Rank 0] step:4461/10000 train_time:201469ms step_avg:45.16ms +[2025-09-11 11:19:14] [Rank 0] step:4461/10000 train_time:201469ms step_avg:45.16ms +[2025-09-11 11:19:15] [Rank 0] step:4481/10000 train_time:202149ms step_avg:45.11ms +[2025-09-11 11:19:15] [Rank 0] step:4481/10000 train_time:202149ms step_avg:45.11ms +[2025-09-11 11:19:16] [Rank 0] step:4501/10000 train_time:202830ms step_avg:45.06ms +[2025-09-11 11:19:16] [Rank 0] step:4501/10000 train_time:202830ms step_avg:45.06ms +[2025-09-11 11:19:16] [Rank 0] step:4521/10000 train_time:203510ms step_avg:45.01ms +[2025-09-11 11:19:16] [Rank 0] step:4521/10000 train_time:203510ms step_avg:45.01ms +[2025-09-11 11:19:17] [Rank 0] step:4541/10000 train_time:204189ms step_avg:44.97ms +[2025-09-11 11:19:17] [Rank 0] step:4541/10000 train_time:204189ms step_avg:44.97ms +[2025-09-11 11:19:18] [Rank 0] step:4561/10000 train_time:204869ms step_avg:44.92ms +[2025-09-11 11:19:18] [Rank 0] step:4561/10000 train_time:204869ms step_avg:44.92ms +[2025-09-11 11:19:18] [Rank 0] step:4581/10000 train_time:205548ms step_avg:44.87ms +[2025-09-11 11:19:18] [Rank 0] step:4581/10000 train_time:205548ms step_avg:44.87ms +[2025-09-11 11:19:19] [Rank 0] step:4601/10000 train_time:206227ms step_avg:44.82ms +[2025-09-11 11:19:19] [Rank 0] step:4601/10000 train_time:206227ms step_avg:44.82ms +[2025-09-11 11:19:20] [Rank 0] step:4621/10000 train_time:206905ms step_avg:44.78ms +[2025-09-11 11:19:20] [Rank 0] step:4621/10000 train_time:206905ms step_avg:44.78ms +[2025-09-11 11:19:20] [Rank 0] step:4641/10000 train_time:207584ms step_avg:44.73ms +[2025-09-11 11:19:20] [Rank 0] step:4641/10000 train_time:207584ms step_avg:44.73ms +[2025-09-11 11:19:21] [Rank 0] step:4661/10000 train_time:208263ms step_avg:44.68ms +[2025-09-11 11:19:21] [Rank 0] step:4661/10000 train_time:208263ms step_avg:44.68ms +[2025-09-11 11:19:22] [Rank 0] step:4681/10000 train_time:208942ms step_avg:44.64ms +[2025-09-11 11:19:22] [Rank 0] step:4681/10000 train_time:208942ms step_avg:44.64ms +[2025-09-11 11:19:22] [Rank 0] step:4701/10000 train_time:209620ms step_avg:44.59ms +[2025-09-11 11:19:22] [Rank 0] step:4701/10000 train_time:209620ms step_avg:44.59ms +[2025-09-11 11:19:23] [Rank 0] step:4721/10000 train_time:210299ms step_avg:44.55ms +[2025-09-11 11:19:23] [Rank 0] step:4721/10000 train_time:210299ms step_avg:44.55ms +[2025-09-11 11:19:24] [Rank 0] step:4741/10000 train_time:210977ms step_avg:44.50ms +[2025-09-11 11:19:24] [Rank 0] step:4741/10000 train_time:210977ms step_avg:44.50ms +[2025-09-11 11:19:24] [Rank 0] step:4761/10000 train_time:211657ms step_avg:44.46ms +[2025-09-11 11:19:24] [Rank 0] step:4761/10000 train_time:211657ms step_avg:44.46ms +[2025-09-11 11:19:25] [Rank 0] step:4781/10000 train_time:212334ms step_avg:44.41ms +[2025-09-11 11:19:25] [Rank 0] step:4781/10000 train_time:212334ms step_avg:44.41ms +[2025-09-11 11:19:26] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:19:26] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:19:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:19:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:36] [Rank 0] PRINT: step:4800/10000 val_loss:5.1857 total_sharp:2.7859e-04 L1_sharp:1.3424e-04 L2_sharp:5.7926e-05 L3_sharp:2.0494e-05 L4_sharp:2.3078e-05 L5_sharp:5.1813e-05 L6_sharp:2.9258e-05 L7_sharp:1.8852e-05 L8_sharp:4.3948e-05 L9_sharp:3.7388e-05 L10_sharp:5.4879e-05 L11_sharp:6.4647e-05 L12_sharp:2.7108e-04 total_fnorm:4.2750e+01 total_l1_linf:1.0906e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1750e+01 L1_l1linf:3.3281e+00 L2_l1linf:3.1875e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.9375e+00 L5_l1linf:2.8125e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.3594e+00 L1_spectral:1.5930e-01 L2_spectral:1.5427e-01 L3_spectral:1.5578e-01 L4_spectral:1.5687e-01 L5_spectral:1.5278e-01 L6_spectral:1.5584e-01 L7_spectral:1.5614e-01 L8_spectral:1.5543e-01 L9_spectral:1.5458e-01 L10_spectral:1.5489e-01 L11_spectral:1.5504e-01 L12_spectral:1.5503e-01 train_time:212993ms step_avg:44.37ms +[2025-09-11 11:19:36] [Rank 0] PRINT: step:4800/10000 val_loss:5.1857 total_sharp:2.7859e-04 L1_sharp:1.3424e-04 L2_sharp:5.7926e-05 L3_sharp:2.0494e-05 L4_sharp:2.3078e-05 L5_sharp:5.1813e-05 L6_sharp:2.9258e-05 L7_sharp:1.8852e-05 L8_sharp:4.3948e-05 L9_sharp:3.7388e-05 L10_sharp:5.4879e-05 L11_sharp:6.4647e-05 L12_sharp:2.7108e-04 total_fnorm:4.2750e+01 total_l1_linf:1.0906e+05 total_spectral:2.1375e+01 L1_fnorm:1.2812e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1750e+01 L1_l1linf:3.3281e+00 L2_l1linf:3.1875e+00 L3_l1linf:3.0312e+00 L4_l1linf:2.9375e+00 L5_l1linf:2.8125e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.3594e+00 L1_spectral:1.5930e-01 L2_spectral:1.5427e-01 L3_spectral:1.5578e-01 L4_spectral:1.5687e-01 L5_spectral:1.5278e-01 L6_spectral:1.5584e-01 L7_spectral:1.5614e-01 L8_spectral:1.5543e-01 L9_spectral:1.5458e-01 L10_spectral:1.5489e-01 L11_spectral:1.5504e-01 L12_spectral:1.5503e-01 train_time:212993ms step_avg:44.37ms +[2025-09-11 11:19:38] [Rank 0] step:4801/10000 train_time:214819ms step_avg:44.74ms +[2025-09-11 11:19:38] [Rank 0] step:4801/10000 train_time:214819ms step_avg:44.74ms +[2025-09-11 11:19:39] [Rank 0] step:4821/10000 train_time:215688ms step_avg:44.74ms +[2025-09-11 11:19:39] [Rank 0] step:4821/10000 train_time:215688ms step_avg:44.74ms +[2025-09-11 11:19:39] [Rank 0] step:4841/10000 train_time:216368ms step_avg:44.69ms +[2025-09-11 11:19:39] [Rank 0] step:4841/10000 train_time:216368ms step_avg:44.69ms +[2025-09-11 11:19:40] [Rank 0] step:4861/10000 train_time:217046ms step_avg:44.65ms +[2025-09-11 11:19:40] [Rank 0] step:4861/10000 train_time:217046ms step_avg:44.65ms +[2025-09-11 11:19:41] [Rank 0] step:4881/10000 train_time:217730ms step_avg:44.61ms +[2025-09-11 11:19:41] [Rank 0] step:4881/10000 train_time:217730ms step_avg:44.61ms +[2025-09-11 11:19:42] [Rank 0] step:4901/10000 train_time:218411ms step_avg:44.56ms +[2025-09-11 11:19:42] [Rank 0] step:4901/10000 train_time:218411ms step_avg:44.56ms +[2025-09-11 11:19:42] [Rank 0] step:4921/10000 train_time:219090ms step_avg:44.52ms +[2025-09-11 11:19:42] [Rank 0] step:4921/10000 train_time:219090ms step_avg:44.52ms +[2025-09-11 11:19:43] [Rank 0] step:4941/10000 train_time:219769ms step_avg:44.48ms +[2025-09-11 11:19:43] [Rank 0] step:4941/10000 train_time:219769ms step_avg:44.48ms +[2025-09-11 11:19:44] [Rank 0] step:4961/10000 train_time:220447ms step_avg:44.44ms +[2025-09-11 11:19:44] [Rank 0] step:4961/10000 train_time:220447ms step_avg:44.44ms +[2025-09-11 11:19:44] [Rank 0] step:4981/10000 train_time:221126ms step_avg:44.39ms +[2025-09-11 11:19:44] [Rank 0] step:4981/10000 train_time:221126ms step_avg:44.39ms +[2025-09-11 11:19:45] [Rank 0] step:5001/10000 train_time:221805ms step_avg:44.35ms +[2025-09-11 11:19:45] [Rank 0] step:5001/10000 train_time:221805ms step_avg:44.35ms +[2025-09-11 11:19:46] [Rank 0] step:5021/10000 train_time:222482ms step_avg:44.31ms +[2025-09-11 11:19:46] [Rank 0] step:5021/10000 train_time:222482ms step_avg:44.31ms +[2025-09-11 11:19:46] [Rank 0] step:5041/10000 train_time:223160ms step_avg:44.27ms +[2025-09-11 11:19:46] [Rank 0] step:5041/10000 train_time:223160ms step_avg:44.27ms +[2025-09-11 11:19:47] [Rank 0] step:5061/10000 train_time:223838ms step_avg:44.23ms +[2025-09-11 11:19:47] [Rank 0] step:5061/10000 train_time:223838ms step_avg:44.23ms +[2025-09-11 11:19:48] [Rank 0] step:5081/10000 train_time:224516ms step_avg:44.19ms +[2025-09-11 11:19:48] [Rank 0] step:5081/10000 train_time:224516ms step_avg:44.19ms +[2025-09-11 11:19:48] [Rank 0] step:5101/10000 train_time:225194ms step_avg:44.15ms +[2025-09-11 11:19:48] [Rank 0] step:5101/10000 train_time:225194ms step_avg:44.15ms +[2025-09-11 11:19:49] [Rank 0] step:5121/10000 train_time:225872ms step_avg:44.11ms +[2025-09-11 11:19:49] [Rank 0] step:5121/10000 train_time:225872ms step_avg:44.11ms +[2025-09-11 11:19:50] [Rank 0] step:5141/10000 train_time:226550ms step_avg:44.07ms +[2025-09-11 11:19:50] [Rank 0] step:5141/10000 train_time:226550ms step_avg:44.07ms +[2025-09-11 11:19:50] [Rank 0] step:5161/10000 train_time:227229ms step_avg:44.03ms +[2025-09-11 11:19:50] [Rank 0] step:5161/10000 train_time:227229ms step_avg:44.03ms +[2025-09-11 11:19:51] [Rank 0] step:5181/10000 train_time:227908ms step_avg:43.99ms +[2025-09-11 11:19:51] [Rank 0] step:5181/10000 train_time:227908ms step_avg:43.99ms +[2025-09-11 11:19:52] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:19:52] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:19:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:19:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:02] [Rank 0] PRINT: step:5200/10000 val_loss:5.1494 total_sharp:3.2628e-04 L1_sharp:1.4671e-04 L2_sharp:5.7318e-05 L3_sharp:2.2789e-05 L4_sharp:3.0207e-05 L5_sharp:5.2107e-05 L6_sharp:2.8679e-05 L7_sharp:3.0426e-05 L8_sharp:3.8115e-05 L9_sharp:4.1381e-05 L10_sharp:6.9718e-05 L11_sharp:7.9103e-05 L12_sharp:3.4447e-04 total_fnorm:4.2750e+01 total_l1_linf:1.0752e+05 total_spectral:2.1375e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1812e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.1562e+00 L3_l1linf:3.0000e+00 L4_l1linf:2.9844e+00 L5_l1linf:2.8125e+00 L6_l1linf:2.7344e+00 L7_l1linf:2.7188e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5928e-01 L2_spectral:1.5596e-01 L3_spectral:1.5587e-01 L4_spectral:1.5708e-01 L5_spectral:1.5521e-01 L6_spectral:1.5677e-01 L7_spectral:1.5641e-01 L8_spectral:1.5492e-01 L9_spectral:1.5637e-01 L10_spectral:1.5599e-01 L11_spectral:1.5477e-01 L12_spectral:1.5457e-01 train_time:228573ms step_avg:43.96ms +[2025-09-11 11:20:02] [Rank 0] PRINT: step:5200/10000 val_loss:5.1494 total_sharp:3.2628e-04 L1_sharp:1.4671e-04 L2_sharp:5.7318e-05 L3_sharp:2.2789e-05 L4_sharp:3.0207e-05 L5_sharp:5.2107e-05 L6_sharp:2.8679e-05 L7_sharp:3.0426e-05 L8_sharp:3.8115e-05 L9_sharp:4.1381e-05 L10_sharp:6.9718e-05 L11_sharp:7.9103e-05 L12_sharp:3.4447e-04 total_fnorm:4.2750e+01 total_l1_linf:1.0752e+05 total_spectral:2.1375e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1812e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.1562e+00 L3_l1linf:3.0000e+00 L4_l1linf:2.9844e+00 L5_l1linf:2.8125e+00 L6_l1linf:2.7344e+00 L7_l1linf:2.7188e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.3125e+00 L1_spectral:1.5928e-01 L2_spectral:1.5596e-01 L3_spectral:1.5587e-01 L4_spectral:1.5708e-01 L5_spectral:1.5521e-01 L6_spectral:1.5677e-01 L7_spectral:1.5641e-01 L8_spectral:1.5492e-01 L9_spectral:1.5637e-01 L10_spectral:1.5599e-01 L11_spectral:1.5477e-01 L12_spectral:1.5457e-01 train_time:228573ms step_avg:43.96ms +[2025-09-11 11:20:04] [Rank 0] step:5201/10000 train_time:230305ms step_avg:44.28ms +[2025-09-11 11:20:04] [Rank 0] step:5201/10000 train_time:230305ms step_avg:44.28ms +[2025-09-11 11:20:04] [Rank 0] step:5221/10000 train_time:231016ms step_avg:44.25ms +[2025-09-11 11:20:04] [Rank 0] step:5221/10000 train_time:231016ms step_avg:44.25ms +[2025-09-11 11:20:05] [Rank 0] step:5241/10000 train_time:231704ms step_avg:44.21ms +[2025-09-11 11:20:05] [Rank 0] step:5241/10000 train_time:231704ms step_avg:44.21ms +[2025-09-11 11:20:06] [Rank 0] step:5261/10000 train_time:232392ms step_avg:44.17ms +[2025-09-11 11:20:06] [Rank 0] step:5261/10000 train_time:232392ms step_avg:44.17ms +[2025-09-11 11:20:06] [Rank 0] step:5281/10000 train_time:233080ms step_avg:44.14ms +[2025-09-11 11:20:06] [Rank 0] step:5281/10000 train_time:233080ms step_avg:44.14ms +[2025-09-11 11:20:07] [Rank 0] step:5301/10000 train_time:233767ms step_avg:44.10ms +[2025-09-11 11:20:07] [Rank 0] step:5301/10000 train_time:233767ms step_avg:44.10ms +[2025-09-11 11:20:08] [Rank 0] step:5321/10000 train_time:234454ms step_avg:44.06ms +[2025-09-11 11:20:08] [Rank 0] step:5321/10000 train_time:234454ms step_avg:44.06ms +[2025-09-11 11:20:09] [Rank 0] step:5341/10000 train_time:235140ms step_avg:44.03ms +[2025-09-11 11:20:09] [Rank 0] step:5341/10000 train_time:235140ms step_avg:44.03ms +[2025-09-11 11:20:09] [Rank 0] step:5361/10000 train_time:235828ms step_avg:43.99ms +[2025-09-11 11:20:09] [Rank 0] step:5361/10000 train_time:235828ms step_avg:43.99ms +[2025-09-11 11:20:10] [Rank 0] step:5381/10000 train_time:236516ms step_avg:43.95ms +[2025-09-11 11:20:10] [Rank 0] step:5381/10000 train_time:236516ms step_avg:43.95ms +[2025-09-11 11:20:11] [Rank 0] step:5401/10000 train_time:237202ms step_avg:43.92ms +[2025-09-11 11:20:11] [Rank 0] step:5401/10000 train_time:237202ms step_avg:43.92ms +[2025-09-11 11:20:11] [Rank 0] step:5421/10000 train_time:237890ms step_avg:43.88ms +[2025-09-11 11:20:11] [Rank 0] step:5421/10000 train_time:237890ms step_avg:43.88ms +[2025-09-11 11:20:12] [Rank 0] step:5441/10000 train_time:238577ms step_avg:43.85ms +[2025-09-11 11:20:12] [Rank 0] step:5441/10000 train_time:238577ms step_avg:43.85ms +[2025-09-11 11:20:13] [Rank 0] step:5461/10000 train_time:239266ms step_avg:43.81ms +[2025-09-11 11:20:13] [Rank 0] step:5461/10000 train_time:239266ms step_avg:43.81ms +[2025-09-11 11:20:13] [Rank 0] step:5481/10000 train_time:239953ms step_avg:43.78ms +[2025-09-11 11:20:13] [Rank 0] step:5481/10000 train_time:239953ms step_avg:43.78ms +[2025-09-11 11:20:14] [Rank 0] step:5501/10000 train_time:240641ms step_avg:43.74ms +[2025-09-11 11:20:14] [Rank 0] step:5501/10000 train_time:240641ms step_avg:43.74ms +[2025-09-11 11:20:15] [Rank 0] step:5521/10000 train_time:241327ms step_avg:43.71ms +[2025-09-11 11:20:15] [Rank 0] step:5521/10000 train_time:241327ms step_avg:43.71ms +[2025-09-11 11:20:15] [Rank 0] step:5541/10000 train_time:242016ms step_avg:43.68ms +[2025-09-11 11:20:15] [Rank 0] step:5541/10000 train_time:242016ms step_avg:43.68ms +[2025-09-11 11:20:16] [Rank 0] step:5561/10000 train_time:242706ms step_avg:43.64ms +[2025-09-11 11:20:16] [Rank 0] step:5561/10000 train_time:242706ms step_avg:43.64ms +[2025-09-11 11:20:17] [Rank 0] step:5581/10000 train_time:243395ms step_avg:43.61ms +[2025-09-11 11:20:17] [Rank 0] step:5581/10000 train_time:243395ms step_avg:43.61ms +[2025-09-11 11:20:17] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:20:17] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:20:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:20:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:20:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:20:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:28] [Rank 0] PRINT: step:5600/10000 val_loss:5.1151 total_sharp:2.9663e-04 L1_sharp:1.1818e-04 L2_sharp:5.8479e-05 L3_sharp:2.1873e-05 L4_sharp:2.6665e-05 L5_sharp:3.7442e-05 L6_sharp:2.5190e-05 L7_sharp:2.1751e-05 L8_sharp:4.1502e-05 L9_sharp:3.9274e-05 L10_sharp:6.5074e-05 L11_sharp:6.9050e-05 L12_sharp:2.6227e-04 total_fnorm:4.2250e+01 total_l1_linf:1.0598e+05 total_spectral:2.1250e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1875e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.1406e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.8438e+00 L5_l1linf:2.7812e+00 L6_l1linf:2.7500e+00 L7_l1linf:2.6875e+00 L8_l1linf:2.6406e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.6875e+00 L12_l1linf:2.3438e+00 L1_spectral:1.5913e-01 L2_spectral:1.5513e-01 L3_spectral:1.5582e-01 L4_spectral:1.5641e-01 L5_spectral:1.5344e-01 L6_spectral:1.5622e-01 L7_spectral:1.5670e-01 L8_spectral:1.5593e-01 L9_spectral:1.5610e-01 L10_spectral:1.5642e-01 L11_spectral:1.5548e-01 L12_spectral:1.5558e-01 train_time:244062ms step_avg:43.58ms +[2025-09-11 11:20:28] [Rank 0] PRINT: step:5600/10000 val_loss:5.1151 total_sharp:2.9663e-04 L1_sharp:1.1818e-04 L2_sharp:5.8479e-05 L3_sharp:2.1873e-05 L4_sharp:2.6665e-05 L5_sharp:3.7442e-05 L6_sharp:2.5190e-05 L7_sharp:2.1751e-05 L8_sharp:4.1502e-05 L9_sharp:3.9274e-05 L10_sharp:6.5074e-05 L11_sharp:6.9050e-05 L12_sharp:2.6227e-04 total_fnorm:4.2250e+01 total_l1_linf:1.0598e+05 total_spectral:2.1250e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1875e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.1406e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.8438e+00 L5_l1linf:2.7812e+00 L6_l1linf:2.7500e+00 L7_l1linf:2.6875e+00 L8_l1linf:2.6406e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.6875e+00 L12_l1linf:2.3438e+00 L1_spectral:1.5913e-01 L2_spectral:1.5513e-01 L3_spectral:1.5582e-01 L4_spectral:1.5641e-01 L5_spectral:1.5344e-01 L6_spectral:1.5622e-01 L7_spectral:1.5670e-01 L8_spectral:1.5593e-01 L9_spectral:1.5610e-01 L10_spectral:1.5642e-01 L11_spectral:1.5548e-01 L12_spectral:1.5558e-01 train_time:244062ms step_avg:43.58ms +[2025-09-11 11:20:30] [Rank 0] step:5601/10000 train_time:245924ms step_avg:43.91ms +[2025-09-11 11:20:30] [Rank 0] step:5601/10000 train_time:245924ms step_avg:43.91ms +[2025-09-11 11:20:31] [Rank 0] step:5621/10000 train_time:246652ms step_avg:43.88ms +[2025-09-11 11:20:31] [Rank 0] step:5621/10000 train_time:246652ms step_avg:43.88ms +[2025-09-11 11:20:31] [Rank 0] step:5641/10000 train_time:247339ms step_avg:43.85ms +[2025-09-11 11:20:31] [Rank 0] step:5641/10000 train_time:247339ms step_avg:43.85ms +[2025-09-11 11:20:32] [Rank 0] step:5661/10000 train_time:248027ms step_avg:43.81ms +[2025-09-11 11:20:32] [Rank 0] step:5661/10000 train_time:248027ms step_avg:43.81ms +[2025-09-11 11:20:33] [Rank 0] step:5681/10000 train_time:248715ms step_avg:43.78ms +[2025-09-11 11:20:33] [Rank 0] step:5681/10000 train_time:248715ms step_avg:43.78ms +[2025-09-11 11:20:33] [Rank 0] step:5701/10000 train_time:249404ms step_avg:43.75ms +[2025-09-11 11:20:33] [Rank 0] step:5701/10000 train_time:249404ms step_avg:43.75ms +[2025-09-11 11:20:34] [Rank 0] step:5721/10000 train_time:250093ms step_avg:43.71ms +[2025-09-11 11:20:34] [Rank 0] step:5721/10000 train_time:250093ms step_avg:43.71ms +[2025-09-11 11:20:35] [Rank 0] step:5741/10000 train_time:250783ms step_avg:43.68ms +[2025-09-11 11:20:35] [Rank 0] step:5741/10000 train_time:250783ms step_avg:43.68ms +[2025-09-11 11:20:36] [Rank 0] step:5761/10000 train_time:251471ms step_avg:43.65ms +[2025-09-11 11:20:36] [Rank 0] step:5761/10000 train_time:251471ms step_avg:43.65ms +[2025-09-11 11:20:36] [Rank 0] step:5781/10000 train_time:252159ms step_avg:43.62ms +[2025-09-11 11:20:36] [Rank 0] step:5781/10000 train_time:252159ms step_avg:43.62ms +[2025-09-11 11:20:37] [Rank 0] step:5801/10000 train_time:252848ms step_avg:43.59ms +[2025-09-11 11:20:37] [Rank 0] step:5801/10000 train_time:252848ms step_avg:43.59ms +[2025-09-11 11:20:38] [Rank 0] step:5821/10000 train_time:253535ms step_avg:43.56ms +[2025-09-11 11:20:38] [Rank 0] step:5821/10000 train_time:253535ms step_avg:43.56ms +[2025-09-11 11:20:38] [Rank 0] step:5841/10000 train_time:254223ms step_avg:43.52ms +[2025-09-11 11:20:38] [Rank 0] step:5841/10000 train_time:254223ms step_avg:43.52ms +[2025-09-11 11:20:39] [Rank 0] step:5861/10000 train_time:254911ms step_avg:43.49ms +[2025-09-11 11:20:39] [Rank 0] step:5861/10000 train_time:254911ms step_avg:43.49ms +[2025-09-11 11:20:40] [Rank 0] step:5881/10000 train_time:255598ms step_avg:43.46ms +[2025-09-11 11:20:40] [Rank 0] step:5881/10000 train_time:255598ms step_avg:43.46ms +[2025-09-11 11:20:40] [Rank 0] step:5901/10000 train_time:256285ms step_avg:43.43ms +[2025-09-11 11:20:40] [Rank 0] step:5901/10000 train_time:256285ms step_avg:43.43ms +[2025-09-11 11:20:41] [Rank 0] step:5921/10000 train_time:256976ms step_avg:43.40ms +[2025-09-11 11:20:41] [Rank 0] step:5921/10000 train_time:256976ms step_avg:43.40ms +[2025-09-11 11:20:42] [Rank 0] step:5941/10000 train_time:257666ms step_avg:43.37ms +[2025-09-11 11:20:42] [Rank 0] step:5941/10000 train_time:257666ms step_avg:43.37ms +[2025-09-11 11:20:42] [Rank 0] step:5961/10000 train_time:258354ms step_avg:43.34ms +[2025-09-11 11:20:42] [Rank 0] step:5961/10000 train_time:258354ms step_avg:43.34ms +[2025-09-11 11:20:43] [Rank 0] step:5981/10000 train_time:259043ms step_avg:43.31ms +[2025-09-11 11:20:43] [Rank 0] step:5981/10000 train_time:259043ms step_avg:43.31ms +[2025-09-11 11:20:44] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:20:44] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:54] [Rank 0] PRINT: step:6000/10000 val_loss:5.0765 total_sharp:2.7889e-04 L1_sharp:8.6734e-05 L2_sharp:3.4471e-05 L3_sharp:1.2719e-05 L4_sharp:2.3373e-05 L5_sharp:3.7628e-05 L6_sharp:2.7687e-05 L7_sharp:1.9554e-05 L8_sharp:4.2545e-05 L9_sharp:4.3091e-05 L10_sharp:6.1818e-05 L11_sharp:6.9704e-05 L12_sharp:4.7185e-04 total_fnorm:4.2500e+01 total_l1_linf:1.0496e+05 total_spectral:2.1375e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2500e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.9531e+00 L5_l1linf:2.7500e+00 L6_l1linf:2.7188e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.6094e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7031e+00 L11_l1linf:2.6719e+00 L12_l1linf:2.3281e+00 L1_spectral:1.6013e-01 L2_spectral:1.5541e-01 L3_spectral:1.5669e-01 L4_spectral:1.5747e-01 L5_spectral:1.5528e-01 L6_spectral:1.5701e-01 L7_spectral:1.5756e-01 L8_spectral:1.5628e-01 L9_spectral:1.5698e-01 L10_spectral:1.5706e-01 L11_spectral:1.5646e-01 L12_spectral:1.5668e-01 train_time:259714ms step_avg:43.29ms +[2025-09-11 11:20:54] [Rank 0] PRINT: step:6000/10000 val_loss:5.0765 total_sharp:2.7889e-04 L1_sharp:8.6734e-05 L2_sharp:3.4471e-05 L3_sharp:1.2719e-05 L4_sharp:2.3373e-05 L5_sharp:3.7628e-05 L6_sharp:2.7687e-05 L7_sharp:1.9554e-05 L8_sharp:4.2545e-05 L9_sharp:4.3091e-05 L10_sharp:6.1818e-05 L11_sharp:6.9704e-05 L12_sharp:4.7185e-04 total_fnorm:4.2500e+01 total_l1_linf:1.0496e+05 total_spectral:2.1375e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2500e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.9531e+00 L5_l1linf:2.7500e+00 L6_l1linf:2.7188e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.6094e+00 L9_l1linf:2.6562e+00 L10_l1linf:2.7031e+00 L11_l1linf:2.6719e+00 L12_l1linf:2.3281e+00 L1_spectral:1.6013e-01 L2_spectral:1.5541e-01 L3_spectral:1.5669e-01 L4_spectral:1.5747e-01 L5_spectral:1.5528e-01 L6_spectral:1.5701e-01 L7_spectral:1.5756e-01 L8_spectral:1.5628e-01 L9_spectral:1.5698e-01 L10_spectral:1.5706e-01 L11_spectral:1.5646e-01 L12_spectral:1.5668e-01 train_time:259714ms step_avg:43.29ms +[2025-09-11 11:20:56] [Rank 0] step:6001/10000 train_time:261563ms step_avg:43.59ms +[2025-09-11 11:20:56] [Rank 0] step:6001/10000 train_time:261563ms step_avg:43.59ms +[2025-09-11 11:20:57] [Rank 0] step:6021/10000 train_time:262278ms step_avg:43.56ms +[2025-09-11 11:20:57] [Rank 0] step:6021/10000 train_time:262278ms step_avg:43.56ms +[2025-09-11 11:20:57] [Rank 0] step:6041/10000 train_time:262972ms step_avg:43.53ms +[2025-09-11 11:20:57] [Rank 0] step:6041/10000 train_time:262972ms step_avg:43.53ms +[2025-09-11 11:20:58] [Rank 0] step:6061/10000 train_time:263663ms step_avg:43.50ms +[2025-09-11 11:20:58] [Rank 0] step:6061/10000 train_time:263663ms step_avg:43.50ms +[2025-09-11 11:20:59] [Rank 0] step:6081/10000 train_time:264355ms step_avg:43.47ms +[2025-09-11 11:20:59] [Rank 0] step:6081/10000 train_time:264355ms step_avg:43.47ms +[2025-09-11 11:20:59] [Rank 0] step:6101/10000 train_time:265046ms step_avg:43.44ms +[2025-09-11 11:20:59] [Rank 0] step:6101/10000 train_time:265046ms step_avg:43.44ms +[2025-09-11 11:21:00] [Rank 0] step:6121/10000 train_time:265737ms step_avg:43.41ms +[2025-09-11 11:21:00] [Rank 0] step:6121/10000 train_time:265737ms step_avg:43.41ms +[2025-09-11 11:21:01] [Rank 0] step:6141/10000 train_time:266428ms step_avg:43.39ms +[2025-09-11 11:21:01] [Rank 0] step:6141/10000 train_time:266428ms step_avg:43.39ms +[2025-09-11 11:21:02] [Rank 0] step:6161/10000 train_time:267481ms step_avg:43.42ms +[2025-09-11 11:21:02] [Rank 0] step:6161/10000 train_time:267481ms step_avg:43.42ms +[2025-09-11 11:21:03] [Rank 0] step:6181/10000 train_time:268316ms step_avg:43.41ms +[2025-09-11 11:21:03] [Rank 0] step:6181/10000 train_time:268316ms step_avg:43.41ms +[2025-09-11 11:21:03] [Rank 0] step:6201/10000 train_time:269009ms step_avg:43.38ms +[2025-09-11 11:21:03] [Rank 0] step:6201/10000 train_time:269009ms step_avg:43.38ms +[2025-09-11 11:21:04] [Rank 0] step:6221/10000 train_time:269968ms step_avg:43.40ms +[2025-09-11 11:21:04] [Rank 0] step:6221/10000 train_time:269968ms step_avg:43.40ms +[2025-09-11 11:21:05] [Rank 0] step:6241/10000 train_time:270659ms step_avg:43.37ms +[2025-09-11 11:21:05] [Rank 0] step:6241/10000 train_time:270659ms step_avg:43.37ms +[2025-09-11 11:21:06] [Rank 0] step:6261/10000 train_time:271349ms step_avg:43.34ms +[2025-09-11 11:21:06] [Rank 0] step:6261/10000 train_time:271349ms step_avg:43.34ms +[2025-09-11 11:21:06] [Rank 0] step:6281/10000 train_time:272040ms step_avg:43.31ms +[2025-09-11 11:21:06] [Rank 0] step:6281/10000 train_time:272040ms step_avg:43.31ms +[2025-09-11 11:21:07] [Rank 0] step:6301/10000 train_time:272730ms step_avg:43.28ms +[2025-09-11 11:21:07] [Rank 0] step:6301/10000 train_time:272730ms step_avg:43.28ms +[2025-09-11 11:21:08] [Rank 0] step:6321/10000 train_time:273423ms step_avg:43.26ms +[2025-09-11 11:21:08] [Rank 0] step:6321/10000 train_time:273423ms step_avg:43.26ms +[2025-09-11 11:21:09] [Rank 0] step:6341/10000 train_time:274216ms step_avg:43.24ms +[2025-09-11 11:21:09] [Rank 0] step:6341/10000 train_time:274216ms step_avg:43.24ms +[2025-09-11 11:21:09] [Rank 0] step:6361/10000 train_time:274907ms step_avg:43.22ms +[2025-09-11 11:21:09] [Rank 0] step:6361/10000 train_time:274907ms step_avg:43.22ms +[2025-09-11 11:21:10] [Rank 0] step:6381/10000 train_time:275598ms step_avg:43.19ms +[2025-09-11 11:21:10] [Rank 0] step:6381/10000 train_time:275598ms step_avg:43.19ms +[2025-09-11 11:21:11] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:21:11] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:21:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:21] [Rank 0] PRINT: step:6400/10000 val_loss:5.0466 total_sharp:2.4034e-04 L1_sharp:8.1045e-05 L2_sharp:6.9812e-05 L3_sharp:2.8423e-05 L4_sharp:2.0493e-05 L5_sharp:4.3235e-05 L6_sharp:2.5993e-05 L7_sharp:1.9655e-05 L8_sharp:3.9780e-05 L9_sharp:4.1240e-05 L10_sharp:5.6103e-05 L11_sharp:6.1670e-05 L12_sharp:2.4621e-04 total_fnorm:3.8750e+01 total_l1_linf:9.1136e+04 total_spectral:1.9375e+01 L1_fnorm:1.1500e+01 L2_fnorm:1.1188e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.0812e+01 L6_fnorm:1.1062e+01 L7_fnorm:1.1125e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1188e+01 L10_fnorm:1.1125e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.0750e+01 L1_l1linf:2.8281e+00 L2_l1linf:2.7031e+00 L3_l1linf:2.5938e+00 L4_l1linf:2.5156e+00 L5_l1linf:2.4062e+00 L6_l1linf:2.3750e+00 L7_l1linf:2.3594e+00 L8_l1linf:2.2656e+00 L9_l1linf:2.3438e+00 L10_l1linf:2.3438e+00 L11_l1linf:2.3438e+00 L12_l1linf:2.0938e+00 L1_spectral:1.4688e-01 L2_spectral:1.4345e-01 L3_spectral:1.4479e-01 L4_spectral:1.4567e-01 L5_spectral:1.4233e-01 L6_spectral:1.4483e-01 L7_spectral:1.4535e-01 L8_spectral:1.4401e-01 L9_spectral:1.4464e-01 L10_spectral:1.4502e-01 L11_spectral:1.4609e-01 L12_spectral:1.4438e-01 train_time:276268ms step_avg:43.17ms +[2025-09-11 11:21:21] [Rank 0] PRINT: step:6400/10000 val_loss:5.0466 total_sharp:2.4034e-04 L1_sharp:8.1045e-05 L2_sharp:6.9812e-05 L3_sharp:2.8423e-05 L4_sharp:2.0493e-05 L5_sharp:4.3235e-05 L6_sharp:2.5993e-05 L7_sharp:1.9655e-05 L8_sharp:3.9780e-05 L9_sharp:4.1240e-05 L10_sharp:5.6103e-05 L11_sharp:6.1670e-05 L12_sharp:2.4621e-04 total_fnorm:3.8750e+01 total_l1_linf:9.1136e+04 total_spectral:1.9375e+01 L1_fnorm:1.1500e+01 L2_fnorm:1.1188e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.0812e+01 L6_fnorm:1.1062e+01 L7_fnorm:1.1125e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1188e+01 L10_fnorm:1.1125e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.0750e+01 L1_l1linf:2.8281e+00 L2_l1linf:2.7031e+00 L3_l1linf:2.5938e+00 L4_l1linf:2.5156e+00 L5_l1linf:2.4062e+00 L6_l1linf:2.3750e+00 L7_l1linf:2.3594e+00 L8_l1linf:2.2656e+00 L9_l1linf:2.3438e+00 L10_l1linf:2.3438e+00 L11_l1linf:2.3438e+00 L12_l1linf:2.0938e+00 L1_spectral:1.4688e-01 L2_spectral:1.4345e-01 L3_spectral:1.4479e-01 L4_spectral:1.4567e-01 L5_spectral:1.4233e-01 L6_spectral:1.4483e-01 L7_spectral:1.4535e-01 L8_spectral:1.4401e-01 L9_spectral:1.4464e-01 L10_spectral:1.4502e-01 L11_spectral:1.4609e-01 L12_spectral:1.4438e-01 train_time:276268ms step_avg:43.17ms +[2025-09-11 11:21:23] [Rank 0] step:6401/10000 train_time:278160ms step_avg:43.46ms +[2025-09-11 11:21:23] [Rank 0] step:6401/10000 train_time:278160ms step_avg:43.46ms +[2025-09-11 11:21:24] [Rank 0] step:6421/10000 train_time:278868ms step_avg:43.43ms +[2025-09-11 11:21:24] [Rank 0] step:6421/10000 train_time:278868ms step_avg:43.43ms +[2025-09-11 11:21:24] [Rank 0] step:6441/10000 train_time:279561ms step_avg:43.40ms +[2025-09-11 11:21:24] [Rank 0] step:6441/10000 train_time:279561ms step_avg:43.40ms +[2025-09-11 11:21:25] [Rank 0] step:6461/10000 train_time:280253ms step_avg:43.38ms +[2025-09-11 11:21:25] [Rank 0] step:6461/10000 train_time:280253ms step_avg:43.38ms +[2025-09-11 11:21:26] [Rank 0] step:6481/10000 train_time:280946ms step_avg:43.35ms +[2025-09-11 11:21:26] [Rank 0] step:6481/10000 train_time:280946ms step_avg:43.35ms +[2025-09-11 11:21:26] [Rank 0] step:6501/10000 train_time:281640ms step_avg:43.32ms +[2025-09-11 11:21:26] [Rank 0] step:6501/10000 train_time:281640ms step_avg:43.32ms +[2025-09-11 11:21:27] [Rank 0] step:6521/10000 train_time:282332ms step_avg:43.30ms +[2025-09-11 11:21:27] [Rank 0] step:6521/10000 train_time:282332ms step_avg:43.30ms +[2025-09-11 11:21:28] [Rank 0] step:6541/10000 train_time:283024ms step_avg:43.27ms +[2025-09-11 11:21:28] [Rank 0] step:6541/10000 train_time:283024ms step_avg:43.27ms +[2025-09-11 11:21:29] [Rank 0] step:6561/10000 train_time:283715ms step_avg:43.24ms +[2025-09-11 11:21:29] [Rank 0] step:6561/10000 train_time:283715ms step_avg:43.24ms +[2025-09-11 11:21:29] [Rank 0] step:6581/10000 train_time:284407ms step_avg:43.22ms +[2025-09-11 11:21:29] [Rank 0] step:6581/10000 train_time:284407ms step_avg:43.22ms +[2025-09-11 11:21:30] [Rank 0] step:6601/10000 train_time:285098ms step_avg:43.19ms +[2025-09-11 11:21:30] [Rank 0] step:6601/10000 train_time:285098ms step_avg:43.19ms +[2025-09-11 11:21:31] [Rank 0] step:6621/10000 train_time:285787ms step_avg:43.16ms +[2025-09-11 11:21:31] [Rank 0] step:6621/10000 train_time:285787ms step_avg:43.16ms +[2025-09-11 11:21:31] [Rank 0] step:6641/10000 train_time:286479ms step_avg:43.14ms +[2025-09-11 11:21:31] [Rank 0] step:6641/10000 train_time:286479ms step_avg:43.14ms +[2025-09-11 11:21:32] [Rank 0] step:6661/10000 train_time:287172ms step_avg:43.11ms +[2025-09-11 11:21:32] [Rank 0] step:6661/10000 train_time:287172ms step_avg:43.11ms +[2025-09-11 11:21:33] [Rank 0] step:6681/10000 train_time:287870ms step_avg:43.09ms +[2025-09-11 11:21:33] [Rank 0] step:6681/10000 train_time:287870ms step_avg:43.09ms +[2025-09-11 11:21:33] [Rank 0] step:6701/10000 train_time:288568ms step_avg:43.06ms +[2025-09-11 11:21:33] [Rank 0] step:6701/10000 train_time:288568ms step_avg:43.06ms +[2025-09-11 11:21:34] [Rank 0] step:6721/10000 train_time:289266ms step_avg:43.04ms +[2025-09-11 11:21:34] [Rank 0] step:6721/10000 train_time:289266ms step_avg:43.04ms +[2025-09-11 11:21:35] [Rank 0] step:6741/10000 train_time:289966ms step_avg:43.02ms +[2025-09-11 11:21:35] [Rank 0] step:6741/10000 train_time:289966ms step_avg:43.02ms +[2025-09-11 11:21:35] [Rank 0] step:6761/10000 train_time:290663ms step_avg:42.99ms +[2025-09-11 11:21:35] [Rank 0] step:6761/10000 train_time:290663ms step_avg:42.99ms +[2025-09-11 11:21:36] [Rank 0] step:6781/10000 train_time:291362ms step_avg:42.97ms +[2025-09-11 11:21:36] [Rank 0] step:6781/10000 train_time:291362ms step_avg:42.97ms +[2025-09-11 11:21:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:21:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:47] [Rank 0] PRINT: step:6800/10000 val_loss:5.0086 total_sharp:2.1537e-04 L1_sharp:8.3902e-05 L2_sharp:4.3354e-05 L3_sharp:2.0351e-05 L4_sharp:1.9436e-05 L5_sharp:3.5077e-05 L6_sharp:1.9748e-05 L7_sharp:1.5726e-05 L8_sharp:3.9442e-05 L9_sharp:4.9544e-05 L10_sharp:5.8816e-05 L11_sharp:6.0841e-05 L12_sharp:2.1133e-04 total_fnorm:3.3500e+01 total_l1_linf:7.6288e+04 total_spectral:1.7000e+01 L1_fnorm:1.0250e+01 L2_fnorm:9.8750e+00 L3_fnorm:9.8125e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.5000e+00 L6_fnorm:9.8125e+00 L7_fnorm:9.8125e+00 L8_fnorm:9.5625e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.8125e+00 L12_fnorm:9.5625e+00 L1_l1linf:2.5000e+00 L2_l1linf:2.3281e+00 L3_l1linf:2.2344e+00 L4_l1linf:2.1875e+00 L5_l1linf:2.1406e+00 L6_l1linf:2.0625e+00 L7_l1linf:2.0312e+00 L8_l1linf:1.9062e+00 L9_l1linf:1.9453e+00 L10_l1linf:2.0000e+00 L11_l1linf:1.9922e+00 L12_l1linf:1.8125e+00 L1_spectral:1.3238e-01 L2_spectral:1.3004e-01 L3_spectral:1.3022e-01 L4_spectral:1.3091e-01 L5_spectral:1.2892e-01 L6_spectral:1.3118e-01 L7_spectral:1.3149e-01 L8_spectral:1.2983e-01 L9_spectral:1.3205e-01 L10_spectral:1.3154e-01 L11_spectral:1.3090e-01 L12_spectral:1.3175e-01 train_time:292041ms step_avg:42.95ms +[2025-09-11 11:21:47] [Rank 0] PRINT: step:6800/10000 val_loss:5.0086 total_sharp:2.1537e-04 L1_sharp:8.3902e-05 L2_sharp:4.3354e-05 L3_sharp:2.0351e-05 L4_sharp:1.9436e-05 L5_sharp:3.5077e-05 L6_sharp:1.9748e-05 L7_sharp:1.5726e-05 L8_sharp:3.9442e-05 L9_sharp:4.9544e-05 L10_sharp:5.8816e-05 L11_sharp:6.0841e-05 L12_sharp:2.1133e-04 total_fnorm:3.3500e+01 total_l1_linf:7.6288e+04 total_spectral:1.7000e+01 L1_fnorm:1.0250e+01 L2_fnorm:9.8750e+00 L3_fnorm:9.8125e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.5000e+00 L6_fnorm:9.8125e+00 L7_fnorm:9.8125e+00 L8_fnorm:9.5625e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.8125e+00 L12_fnorm:9.5625e+00 L1_l1linf:2.5000e+00 L2_l1linf:2.3281e+00 L3_l1linf:2.2344e+00 L4_l1linf:2.1875e+00 L5_l1linf:2.1406e+00 L6_l1linf:2.0625e+00 L7_l1linf:2.0312e+00 L8_l1linf:1.9062e+00 L9_l1linf:1.9453e+00 L10_l1linf:2.0000e+00 L11_l1linf:1.9922e+00 L12_l1linf:1.8125e+00 L1_spectral:1.3238e-01 L2_spectral:1.3004e-01 L3_spectral:1.3022e-01 L4_spectral:1.3091e-01 L5_spectral:1.2892e-01 L6_spectral:1.3118e-01 L7_spectral:1.3149e-01 L8_spectral:1.2983e-01 L9_spectral:1.3205e-01 L10_spectral:1.3154e-01 L11_spectral:1.3090e-01 L12_spectral:1.3175e-01 train_time:292041ms step_avg:42.95ms +[2025-09-11 11:21:49] [Rank 0] step:6801/10000 train_time:293892ms step_avg:43.21ms +[2025-09-11 11:21:49] [Rank 0] step:6801/10000 train_time:293892ms step_avg:43.21ms +[2025-09-11 11:21:50] [Rank 0] step:6821/10000 train_time:294605ms step_avg:43.19ms +[2025-09-11 11:21:50] [Rank 0] step:6821/10000 train_time:294605ms step_avg:43.19ms +[2025-09-11 11:21:51] [Rank 0] step:6841/10000 train_time:295307ms step_avg:43.17ms +[2025-09-11 11:21:51] [Rank 0] step:6841/10000 train_time:295307ms step_avg:43.17ms +[2025-09-11 11:21:51] [Rank 0] step:6861/10000 train_time:296007ms step_avg:43.14ms +[2025-09-11 11:21:51] [Rank 0] step:6861/10000 train_time:296007ms step_avg:43.14ms +[2025-09-11 11:21:52] [Rank 0] step:6881/10000 train_time:296706ms step_avg:43.12ms +[2025-09-11 11:21:52] [Rank 0] step:6881/10000 train_time:296706ms step_avg:43.12ms +[2025-09-11 11:21:53] [Rank 0] step:6901/10000 train_time:297403ms step_avg:43.10ms +[2025-09-11 11:21:53] [Rank 0] step:6901/10000 train_time:297403ms step_avg:43.10ms +[2025-09-11 11:21:54] [Rank 0] step:6921/10000 train_time:298099ms step_avg:43.07ms +[2025-09-11 11:21:54] [Rank 0] step:6921/10000 train_time:298099ms step_avg:43.07ms +[2025-09-11 11:21:54] [Rank 0] step:6941/10000 train_time:298798ms step_avg:43.05ms +[2025-09-11 11:21:54] [Rank 0] step:6941/10000 train_time:298798ms step_avg:43.05ms +[2025-09-11 11:21:55] [Rank 0] step:6961/10000 train_time:299496ms step_avg:43.02ms +[2025-09-11 11:21:55] [Rank 0] step:6961/10000 train_time:299496ms step_avg:43.02ms +[2025-09-11 11:21:56] [Rank 0] step:6981/10000 train_time:300195ms step_avg:43.00ms +[2025-09-11 11:21:56] [Rank 0] step:6981/10000 train_time:300195ms step_avg:43.00ms +[2025-09-11 11:21:56] [Rank 0] step:7001/10000 train_time:300893ms step_avg:42.98ms +[2025-09-11 11:21:56] [Rank 0] step:7001/10000 train_time:300893ms step_avg:42.98ms +[2025-09-11 11:21:57] [Rank 0] step:7021/10000 train_time:301591ms step_avg:42.96ms +[2025-09-11 11:21:57] [Rank 0] step:7021/10000 train_time:301591ms step_avg:42.96ms +[2025-09-11 11:21:58] [Rank 0] step:7041/10000 train_time:302288ms step_avg:42.93ms +[2025-09-11 11:21:58] [Rank 0] step:7041/10000 train_time:302288ms step_avg:42.93ms +[2025-09-11 11:21:58] [Rank 0] step:7061/10000 train_time:302987ms step_avg:42.91ms +[2025-09-11 11:21:58] [Rank 0] step:7061/10000 train_time:302987ms step_avg:42.91ms +[2025-09-11 11:21:59] [Rank 0] step:7081/10000 train_time:303685ms step_avg:42.89ms +[2025-09-11 11:21:59] [Rank 0] step:7081/10000 train_time:303685ms step_avg:42.89ms +[2025-09-11 11:22:00] [Rank 0] step:7101/10000 train_time:304384ms step_avg:42.86ms +[2025-09-11 11:22:00] [Rank 0] step:7101/10000 train_time:304384ms step_avg:42.86ms +[2025-09-11 11:22:01] [Rank 0] step:7121/10000 train_time:305082ms step_avg:42.84ms +[2025-09-11 11:22:01] [Rank 0] step:7121/10000 train_time:305082ms step_avg:42.84ms +[2025-09-11 11:22:01] [Rank 0] step:7141/10000 train_time:305785ms step_avg:42.82ms +[2025-09-11 11:22:01] [Rank 0] step:7141/10000 train_time:305785ms step_avg:42.82ms +[2025-09-11 11:22:02] [Rank 0] step:7161/10000 train_time:306486ms step_avg:42.80ms +[2025-09-11 11:22:02] [Rank 0] step:7161/10000 train_time:306486ms step_avg:42.80ms +[2025-09-11 11:22:03] [Rank 0] step:7181/10000 train_time:307183ms step_avg:42.78ms +[2025-09-11 11:22:03] [Rank 0] step:7181/10000 train_time:307183ms step_avg:42.78ms +[2025-09-11 11:22:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:22:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:22:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:14] [Rank 0] PRINT: step:7200/10000 val_loss:4.9735 total_sharp:1.9737e-04 L1_sharp:6.9897e-05 L2_sharp:2.7316e-05 L3_sharp:1.3689e-05 L4_sharp:1.8140e-05 L5_sharp:3.1928e-05 L6_sharp:2.4099e-05 L7_sharp:1.9187e-05 L8_sharp:4.1088e-05 L9_sharp:4.0394e-05 L10_sharp:4.8169e-05 L11_sharp:5.6647e-05 L12_sharp:2.5258e-04 total_fnorm:2.9375e+01 total_l1_linf:6.2720e+04 total_spectral:1.4812e+01 L1_fnorm:8.9375e+00 L2_fnorm:8.6250e+00 L3_fnorm:8.5000e+00 L4_fnorm:8.5625e+00 L5_fnorm:8.2500e+00 L6_fnorm:8.5000e+00 L7_fnorm:8.5000e+00 L8_fnorm:8.2500e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.3125e+00 L1_l1linf:2.1094e+00 L2_l1linf:2.0312e+00 L3_l1linf:1.9062e+00 L4_l1linf:1.9453e+00 L5_l1linf:1.7500e+00 L6_l1linf:1.7344e+00 L7_l1linf:1.7188e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.6250e+00 L10_l1linf:1.6250e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.5547e+00 L1_spectral:1.1849e-01 L2_spectral:1.1514e-01 L3_spectral:1.1553e-01 L4_spectral:1.1634e-01 L5_spectral:1.1417e-01 L6_spectral:1.1676e-01 L7_spectral:1.1651e-01 L8_spectral:1.1521e-01 L9_spectral:1.1702e-01 L10_spectral:1.1730e-01 L11_spectral:1.1590e-01 L12_spectral:1.1644e-01 train_time:307861ms step_avg:42.76ms +[2025-09-11 11:22:14] [Rank 0] PRINT: step:7200/10000 val_loss:4.9735 total_sharp:1.9737e-04 L1_sharp:6.9897e-05 L2_sharp:2.7316e-05 L3_sharp:1.3689e-05 L4_sharp:1.8140e-05 L5_sharp:3.1928e-05 L6_sharp:2.4099e-05 L7_sharp:1.9187e-05 L8_sharp:4.1088e-05 L9_sharp:4.0394e-05 L10_sharp:4.8169e-05 L11_sharp:5.6647e-05 L12_sharp:2.5258e-04 total_fnorm:2.9375e+01 total_l1_linf:6.2720e+04 total_spectral:1.4812e+01 L1_fnorm:8.9375e+00 L2_fnorm:8.6250e+00 L3_fnorm:8.5000e+00 L4_fnorm:8.5625e+00 L5_fnorm:8.2500e+00 L6_fnorm:8.5000e+00 L7_fnorm:8.5000e+00 L8_fnorm:8.2500e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.3125e+00 L1_l1linf:2.1094e+00 L2_l1linf:2.0312e+00 L3_l1linf:1.9062e+00 L4_l1linf:1.9453e+00 L5_l1linf:1.7500e+00 L6_l1linf:1.7344e+00 L7_l1linf:1.7188e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.6250e+00 L10_l1linf:1.6250e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.5547e+00 L1_spectral:1.1849e-01 L2_spectral:1.1514e-01 L3_spectral:1.1553e-01 L4_spectral:1.1634e-01 L5_spectral:1.1417e-01 L6_spectral:1.1676e-01 L7_spectral:1.1651e-01 L8_spectral:1.1521e-01 L9_spectral:1.1702e-01 L10_spectral:1.1730e-01 L11_spectral:1.1590e-01 L12_spectral:1.1644e-01 train_time:307861ms step_avg:42.76ms +[2025-09-11 11:22:16] [Rank 0] step:7201/10000 train_time:309660ms step_avg:43.00ms +[2025-09-11 11:22:16] [Rank 0] step:7201/10000 train_time:309660ms step_avg:43.00ms +[2025-09-11 11:22:16] [Rank 0] step:7221/10000 train_time:310375ms step_avg:42.98ms +[2025-09-11 11:22:16] [Rank 0] step:7221/10000 train_time:310375ms step_avg:42.98ms +[2025-09-11 11:22:17] [Rank 0] step:7241/10000 train_time:311076ms step_avg:42.96ms +[2025-09-11 11:22:17] [Rank 0] step:7241/10000 train_time:311076ms step_avg:42.96ms +[2025-09-11 11:22:18] [Rank 0] step:7261/10000 train_time:311776ms step_avg:42.94ms +[2025-09-11 11:22:18] [Rank 0] step:7261/10000 train_time:311776ms step_avg:42.94ms +[2025-09-11 11:22:19] [Rank 0] step:7281/10000 train_time:312480ms step_avg:42.92ms +[2025-09-11 11:22:19] [Rank 0] step:7281/10000 train_time:312480ms step_avg:42.92ms +[2025-09-11 11:22:19] [Rank 0] step:7301/10000 train_time:313177ms step_avg:42.90ms +[2025-09-11 11:22:19] [Rank 0] step:7301/10000 train_time:313177ms step_avg:42.90ms +[2025-09-11 11:22:20] [Rank 0] step:7321/10000 train_time:313876ms step_avg:42.87ms +[2025-09-11 11:22:20] [Rank 0] step:7321/10000 train_time:313876ms step_avg:42.87ms +[2025-09-11 11:22:21] [Rank 0] step:7341/10000 train_time:314575ms step_avg:42.85ms +[2025-09-11 11:22:21] [Rank 0] step:7341/10000 train_time:314575ms step_avg:42.85ms +[2025-09-11 11:22:21] [Rank 0] step:7361/10000 train_time:315275ms step_avg:42.83ms +[2025-09-11 11:22:21] [Rank 0] step:7361/10000 train_time:315275ms step_avg:42.83ms +[2025-09-11 11:22:22] [Rank 0] step:7381/10000 train_time:315974ms step_avg:42.81ms +[2025-09-11 11:22:22] [Rank 0] step:7381/10000 train_time:315974ms step_avg:42.81ms +[2025-09-11 11:22:23] [Rank 0] step:7401/10000 train_time:316671ms step_avg:42.79ms +[2025-09-11 11:22:23] [Rank 0] step:7401/10000 train_time:316671ms step_avg:42.79ms +[2025-09-11 11:22:23] [Rank 0] step:7421/10000 train_time:317370ms step_avg:42.77ms +[2025-09-11 11:22:23] [Rank 0] step:7421/10000 train_time:317370ms step_avg:42.77ms +[2025-09-11 11:22:24] [Rank 0] step:7441/10000 train_time:318069ms step_avg:42.75ms +[2025-09-11 11:22:24] [Rank 0] step:7441/10000 train_time:318069ms step_avg:42.75ms +[2025-09-11 11:22:25] [Rank 0] step:7461/10000 train_time:318769ms step_avg:42.72ms +[2025-09-11 11:22:25] [Rank 0] step:7461/10000 train_time:318769ms step_avg:42.72ms +[2025-09-11 11:22:26] [Rank 0] step:7481/10000 train_time:319471ms step_avg:42.70ms +[2025-09-11 11:22:26] [Rank 0] step:7481/10000 train_time:319471ms step_avg:42.70ms +[2025-09-11 11:22:26] [Rank 0] step:7501/10000 train_time:320170ms step_avg:42.68ms +[2025-09-11 11:22:26] [Rank 0] step:7501/10000 train_time:320170ms step_avg:42.68ms +[2025-09-11 11:22:27] [Rank 0] step:7521/10000 train_time:320870ms step_avg:42.66ms +[2025-09-11 11:22:27] [Rank 0] step:7521/10000 train_time:320870ms step_avg:42.66ms +[2025-09-11 11:22:28] [Rank 0] step:7541/10000 train_time:321567ms step_avg:42.64ms +[2025-09-11 11:22:28] [Rank 0] step:7541/10000 train_time:321567ms step_avg:42.64ms +[2025-09-11 11:22:28] [Rank 0] step:7561/10000 train_time:322268ms step_avg:42.62ms +[2025-09-11 11:22:28] [Rank 0] step:7561/10000 train_time:322268ms step_avg:42.62ms +[2025-09-11 11:22:29] [Rank 0] step:7581/10000 train_time:322967ms step_avg:42.60ms +[2025-09-11 11:22:29] [Rank 0] step:7581/10000 train_time:322967ms step_avg:42.60ms +[2025-09-11 11:22:30] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:22:30] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:43] [Rank 0] PRINT: step:7600/10000 val_loss:4.9481 total_sharp:2.0570e-04 L1_sharp:8.7714e-05 L2_sharp:5.0663e-05 L3_sharp:9.4955e-06 L4_sharp:2.0261e-05 L5_sharp:2.5888e-05 L6_sharp:2.4130e-05 L7_sharp:2.3402e-05 L8_sharp:3.4796e-05 L9_sharp:3.7274e-05 L10_sharp:5.1676e-05 L11_sharp:5.6007e-05 L12_sharp:1.9869e-04 total_fnorm:2.5000e+01 total_l1_linf:4.9408e+04 total_spectral:1.2438e+01 L1_fnorm:7.5625e+00 L2_fnorm:7.2812e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.2500e+00 L5_fnorm:6.9375e+00 L6_fnorm:7.1562e+00 L7_fnorm:7.1562e+00 L8_fnorm:6.9688e+00 L9_fnorm:7.1562e+00 L10_fnorm:7.1875e+00 L11_fnorm:7.1562e+00 L12_fnorm:7.0312e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4375e+00 L6_l1linf:1.4141e+00 L7_l1linf:1.3516e+00 L8_l1linf:1.3047e+00 L9_l1linf:1.3203e+00 L10_l1linf:1.3281e+00 L11_l1linf:1.3359e+00 L12_l1linf:1.3125e+00 L1_spectral:1.0188e-01 L2_spectral:9.9957e-02 L3_spectral:1.0035e-01 L4_spectral:9.9986e-02 L5_spectral:9.8078e-02 L6_spectral:1.0132e-01 L7_spectral:1.0111e-01 L8_spectral:9.9990e-02 L9_spectral:1.0153e-01 L10_spectral:1.0158e-01 L11_spectral:1.0133e-01 L12_spectral:1.0081e-01 train_time:323648ms step_avg:42.59ms +[2025-09-11 11:22:43] [Rank 0] PRINT: step:7600/10000 val_loss:4.9481 total_sharp:2.0570e-04 L1_sharp:8.7714e-05 L2_sharp:5.0663e-05 L3_sharp:9.4955e-06 L4_sharp:2.0261e-05 L5_sharp:2.5888e-05 L6_sharp:2.4130e-05 L7_sharp:2.3402e-05 L8_sharp:3.4796e-05 L9_sharp:3.7274e-05 L10_sharp:5.1676e-05 L11_sharp:5.6007e-05 L12_sharp:1.9869e-04 total_fnorm:2.5000e+01 total_l1_linf:4.9408e+04 total_spectral:1.2438e+01 L1_fnorm:7.5625e+00 L2_fnorm:7.2812e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.2500e+00 L5_fnorm:6.9375e+00 L6_fnorm:7.1562e+00 L7_fnorm:7.1562e+00 L8_fnorm:6.9688e+00 L9_fnorm:7.1562e+00 L10_fnorm:7.1875e+00 L11_fnorm:7.1562e+00 L12_fnorm:7.0312e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5938e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.4375e+00 L6_l1linf:1.4141e+00 L7_l1linf:1.3516e+00 L8_l1linf:1.3047e+00 L9_l1linf:1.3203e+00 L10_l1linf:1.3281e+00 L11_l1linf:1.3359e+00 L12_l1linf:1.3125e+00 L1_spectral:1.0188e-01 L2_spectral:9.9957e-02 L3_spectral:1.0035e-01 L4_spectral:9.9986e-02 L5_spectral:9.8078e-02 L6_spectral:1.0132e-01 L7_spectral:1.0111e-01 L8_spectral:9.9990e-02 L9_spectral:1.0153e-01 L10_spectral:1.0158e-01 L11_spectral:1.0133e-01 L12_spectral:1.0081e-01 train_time:323648ms step_avg:42.59ms +[2025-09-11 11:22:45] [Rank 0] step:7601/10000 train_time:325023ms step_avg:42.76ms +[2025-09-11 11:22:45] [Rank 0] step:7601/10000 train_time:325023ms step_avg:42.76ms +[2025-09-11 11:22:45] [Rank 0] step:7621/10000 train_time:325743ms step_avg:42.74ms +[2025-09-11 11:22:45] [Rank 0] step:7621/10000 train_time:325743ms step_avg:42.74ms +[2025-09-11 11:22:46] [Rank 0] step:7641/10000 train_time:326445ms step_avg:42.72ms +[2025-09-11 11:22:46] [Rank 0] step:7641/10000 train_time:326445ms step_avg:42.72ms +[2025-09-11 11:22:47] [Rank 0] step:7661/10000 train_time:327145ms step_avg:42.70ms +[2025-09-11 11:22:47] [Rank 0] step:7661/10000 train_time:327145ms step_avg:42.70ms +[2025-09-11 11:22:47] [Rank 0] step:7681/10000 train_time:327846ms step_avg:42.68ms +[2025-09-11 11:22:47] [Rank 0] step:7681/10000 train_time:327846ms step_avg:42.68ms +[2025-09-11 11:22:48] [Rank 0] step:7701/10000 train_time:328549ms step_avg:42.66ms +[2025-09-11 11:22:48] [Rank 0] step:7701/10000 train_time:328549ms step_avg:42.66ms +[2025-09-11 11:22:49] [Rank 0] step:7721/10000 train_time:329249ms step_avg:42.64ms +[2025-09-11 11:22:49] [Rank 0] step:7721/10000 train_time:329249ms step_avg:42.64ms +[2025-09-11 11:22:50] [Rank 0] step:7741/10000 train_time:329949ms step_avg:42.62ms +[2025-09-11 11:22:50] [Rank 0] step:7741/10000 train_time:329949ms step_avg:42.62ms +[2025-09-11 11:22:50] [Rank 0] step:7761/10000 train_time:330651ms step_avg:42.60ms +[2025-09-11 11:22:50] [Rank 0] step:7761/10000 train_time:330651ms step_avg:42.60ms +[2025-09-11 11:22:51] [Rank 0] step:7781/10000 train_time:331353ms step_avg:42.58ms +[2025-09-11 11:22:51] [Rank 0] step:7781/10000 train_time:331353ms step_avg:42.58ms +[2025-09-11 11:22:52] [Rank 0] step:7801/10000 train_time:332053ms step_avg:42.57ms +[2025-09-11 11:22:52] [Rank 0] step:7801/10000 train_time:332053ms step_avg:42.57ms +[2025-09-11 11:22:52] [Rank 0] step:7821/10000 train_time:332754ms step_avg:42.55ms +[2025-09-11 11:22:52] [Rank 0] step:7821/10000 train_time:332754ms step_avg:42.55ms +[2025-09-11 11:22:53] [Rank 0] step:7841/10000 train_time:333457ms step_avg:42.53ms +[2025-09-11 11:22:53] [Rank 0] step:7841/10000 train_time:333457ms step_avg:42.53ms +[2025-09-11 11:22:54] [Rank 0] step:7861/10000 train_time:334159ms step_avg:42.51ms +[2025-09-11 11:22:54] [Rank 0] step:7861/10000 train_time:334159ms step_avg:42.51ms +[2025-09-11 11:22:54] [Rank 0] step:7881/10000 train_time:334859ms step_avg:42.49ms +[2025-09-11 11:22:54] [Rank 0] step:7881/10000 train_time:334859ms step_avg:42.49ms +[2025-09-11 11:22:55] [Rank 0] step:7901/10000 train_time:335560ms step_avg:42.47ms +[2025-09-11 11:22:55] [Rank 0] step:7901/10000 train_time:335560ms step_avg:42.47ms +[2025-09-11 11:22:56] [Rank 0] step:7921/10000 train_time:336262ms step_avg:42.45ms +[2025-09-11 11:22:56] [Rank 0] step:7921/10000 train_time:336262ms step_avg:42.45ms +[2025-09-11 11:22:57] [Rank 0] step:7941/10000 train_time:336964ms step_avg:42.43ms +[2025-09-11 11:22:57] [Rank 0] step:7941/10000 train_time:336964ms step_avg:42.43ms +[2025-09-11 11:22:57] [Rank 0] step:7961/10000 train_time:337664ms step_avg:42.41ms +[2025-09-11 11:22:57] [Rank 0] step:7961/10000 train_time:337664ms step_avg:42.41ms +[2025-09-11 11:22:58] [Rank 0] step:7981/10000 train_time:338366ms step_avg:42.40ms +[2025-09-11 11:22:58] [Rank 0] step:7981/10000 train_time:338366ms step_avg:42.40ms +[2025-09-11 11:22:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:22:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:09] [Rank 0] PRINT: step:8000/10000 val_loss:4.9296 total_sharp:1.9483e-04 L1_sharp:6.4668e-05 L2_sharp:4.0063e-05 L3_sharp:2.3544e-05 L4_sharp:1.7375e-05 L5_sharp:3.2438e-05 L6_sharp:2.1499e-05 L7_sharp:1.9976e-05 L8_sharp:3.9560e-05 L9_sharp:3.1190e-05 L10_sharp:5.3358e-05 L11_sharp:6.0485e-05 L12_sharp:1.9915e-04 total_fnorm:2.0125e+01 total_l1_linf:3.7376e+04 total_spectral:1.0125e+01 L1_fnorm:6.2188e+00 L2_fnorm:5.9375e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.8750e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.8125e+00 L7_fnorm:5.8125e+00 L8_fnorm:5.6250e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8125e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.3203e+00 L2_l1linf:1.2344e+00 L3_l1linf:1.1875e+00 L4_l1linf:1.1719e+00 L5_l1linf:1.0938e+00 L6_l1linf:1.0781e+00 L7_l1linf:1.0781e+00 L8_l1linf:1.0156e+00 L9_l1linf:1.0000e+00 L10_l1linf:1.0156e+00 L11_l1linf:1.0312e+00 L12_l1linf:1.0078e+00 L1_spectral:8.5835e-02 L2_spectral:8.2410e-02 L3_spectral:8.3272e-02 L4_spectral:8.3976e-02 L5_spectral:8.1488e-02 L6_spectral:8.4289e-02 L7_spectral:8.4158e-02 L8_spectral:8.2798e-02 L9_spectral:8.4235e-02 L10_spectral:8.3621e-02 L11_spectral:8.3589e-02 L12_spectral:8.4373e-02 train_time:339046ms step_avg:42.38ms +[2025-09-11 11:23:09] [Rank 0] PRINT: step:8000/10000 val_loss:4.9296 total_sharp:1.9483e-04 L1_sharp:6.4668e-05 L2_sharp:4.0063e-05 L3_sharp:2.3544e-05 L4_sharp:1.7375e-05 L5_sharp:3.2438e-05 L6_sharp:2.1499e-05 L7_sharp:1.9976e-05 L8_sharp:3.9560e-05 L9_sharp:3.1190e-05 L10_sharp:5.3358e-05 L11_sharp:6.0485e-05 L12_sharp:1.9915e-04 total_fnorm:2.0125e+01 total_l1_linf:3.7376e+04 total_spectral:1.0125e+01 L1_fnorm:6.2188e+00 L2_fnorm:5.9375e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.8750e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.8125e+00 L7_fnorm:5.8125e+00 L8_fnorm:5.6250e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8125e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.3203e+00 L2_l1linf:1.2344e+00 L3_l1linf:1.1875e+00 L4_l1linf:1.1719e+00 L5_l1linf:1.0938e+00 L6_l1linf:1.0781e+00 L7_l1linf:1.0781e+00 L8_l1linf:1.0156e+00 L9_l1linf:1.0000e+00 L10_l1linf:1.0156e+00 L11_l1linf:1.0312e+00 L12_l1linf:1.0078e+00 L1_spectral:8.5835e-02 L2_spectral:8.2410e-02 L3_spectral:8.3272e-02 L4_spectral:8.3976e-02 L5_spectral:8.1488e-02 L6_spectral:8.4289e-02 L7_spectral:8.4158e-02 L8_spectral:8.2798e-02 L9_spectral:8.4235e-02 L10_spectral:8.3621e-02 L11_spectral:8.3589e-02 L12_spectral:8.4373e-02 train_time:339046ms step_avg:42.38ms +[2025-09-11 11:23:10] [Rank 0] step:8001/10000 train_time:340311ms step_avg:42.53ms +[2025-09-11 11:23:10] [Rank 0] step:8001/10000 train_time:340311ms step_avg:42.53ms +[2025-09-11 11:23:11] [Rank 0] step:8021/10000 train_time:341290ms step_avg:42.55ms +[2025-09-11 11:23:11] [Rank 0] step:8021/10000 train_time:341290ms step_avg:42.55ms +[2025-09-11 11:23:12] [Rank 0] step:8041/10000 train_time:341992ms step_avg:42.53ms +[2025-09-11 11:23:12] [Rank 0] step:8041/10000 train_time:341992ms step_avg:42.53ms +[2025-09-11 11:23:12] [Rank 0] step:8061/10000 train_time:342697ms step_avg:42.51ms +[2025-09-11 11:23:12] [Rank 0] step:8061/10000 train_time:342697ms step_avg:42.51ms +[2025-09-11 11:23:13] [Rank 0] step:8081/10000 train_time:343396ms step_avg:42.49ms +[2025-09-11 11:23:13] [Rank 0] step:8081/10000 train_time:343396ms step_avg:42.49ms +[2025-09-11 11:23:14] [Rank 0] step:8101/10000 train_time:344096ms step_avg:42.48ms +[2025-09-11 11:23:14] [Rank 0] step:8101/10000 train_time:344096ms step_avg:42.48ms +[2025-09-11 11:23:14] [Rank 0] step:8121/10000 train_time:344800ms step_avg:42.46ms +[2025-09-11 11:23:14] [Rank 0] step:8121/10000 train_time:344800ms step_avg:42.46ms +[2025-09-11 11:23:15] [Rank 0] step:8141/10000 train_time:345823ms step_avg:42.48ms +[2025-09-11 11:23:15] [Rank 0] step:8141/10000 train_time:345823ms step_avg:42.48ms +[2025-09-11 11:23:16] [Rank 0] step:8161/10000 train_time:346527ms step_avg:42.46ms +[2025-09-11 11:23:16] [Rank 0] step:8161/10000 train_time:346527ms step_avg:42.46ms +[2025-09-11 11:23:17] [Rank 0] step:8181/10000 train_time:347240ms step_avg:42.44ms +[2025-09-11 11:23:17] [Rank 0] step:8181/10000 train_time:347240ms step_avg:42.44ms +[2025-09-11 11:23:18] [Rank 0] step:8201/10000 train_time:347949ms step_avg:42.43ms +[2025-09-11 11:23:18] [Rank 0] step:8201/10000 train_time:347949ms step_avg:42.43ms +[2025-09-11 11:23:18] [Rank 0] step:8221/10000 train_time:348656ms step_avg:42.41ms +[2025-09-11 11:23:18] [Rank 0] step:8221/10000 train_time:348656ms step_avg:42.41ms +[2025-09-11 11:23:19] [Rank 0] step:8241/10000 train_time:349375ms step_avg:42.39ms +[2025-09-11 11:23:19] [Rank 0] step:8241/10000 train_time:349375ms step_avg:42.39ms +[2025-09-11 11:23:20] [Rank 0] step:8261/10000 train_time:350082ms step_avg:42.38ms +[2025-09-11 11:23:20] [Rank 0] step:8261/10000 train_time:350082ms step_avg:42.38ms +[2025-09-11 11:23:20] [Rank 0] step:8281/10000 train_time:350787ms step_avg:42.36ms +[2025-09-11 11:23:20] [Rank 0] step:8281/10000 train_time:350787ms step_avg:42.36ms +[2025-09-11 11:23:21] [Rank 0] step:8301/10000 train_time:351494ms step_avg:42.34ms +[2025-09-11 11:23:21] [Rank 0] step:8301/10000 train_time:351494ms step_avg:42.34ms +[2025-09-11 11:23:22] [Rank 0] step:8321/10000 train_time:352202ms step_avg:42.33ms +[2025-09-11 11:23:22] [Rank 0] step:8321/10000 train_time:352202ms step_avg:42.33ms +[2025-09-11 11:23:23] [Rank 0] step:8341/10000 train_time:352917ms step_avg:42.31ms +[2025-09-11 11:23:23] [Rank 0] step:8341/10000 train_time:352917ms step_avg:42.31ms +[2025-09-11 11:23:23] [Rank 0] step:8361/10000 train_time:353620ms step_avg:42.29ms +[2025-09-11 11:23:23] [Rank 0] step:8361/10000 train_time:353620ms step_avg:42.29ms +[2025-09-11 11:23:24] [Rank 0] step:8381/10000 train_time:354331ms step_avg:42.28ms +[2025-09-11 11:23:24] [Rank 0] step:8381/10000 train_time:354331ms step_avg:42.28ms +[2025-09-11 11:23:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:23:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.9090 total_sharp:1.5365e-04 L1_sharp:5.2086e-05 L2_sharp:2.5896e-05 L3_sharp:9.7908e-06 L4_sharp:1.8530e-05 L5_sharp:2.8385e-05 L6_sharp:1.6435e-05 L7_sharp:1.6451e-05 L8_sharp:3.5501e-05 L9_sharp:2.9594e-05 L10_sharp:4.0710e-05 L11_sharp:4.5663e-05 L12_sharp:1.8724e-04 total_fnorm:1.5875e+01 total_l1_linf:2.6880e+04 total_spectral:7.9062e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.6250e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.5938e+00 L5_fnorm:4.4062e+00 L6_fnorm:4.5000e+00 L7_fnorm:4.5312e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5312e+00 L12_fnorm:4.5000e+00 L1_l1linf:9.7656e-01 L2_l1linf:9.4141e-01 L3_l1linf:8.7500e-01 L4_l1linf:8.5156e-01 L5_l1linf:8.2812e-01 L6_l1linf:7.9297e-01 L7_l1linf:7.8906e-01 L8_l1linf:7.5000e-01 L9_l1linf:7.3828e-01 L10_l1linf:7.7734e-01 L11_l1linf:7.3828e-01 L12_l1linf:7.9688e-01 L1_spectral:6.8477e-02 L2_spectral:6.5994e-02 L3_spectral:6.5937e-02 L4_spectral:6.6260e-02 L5_spectral:6.5904e-02 L6_spectral:6.6713e-02 L7_spectral:6.7305e-02 L8_spectral:6.6635e-02 L9_spectral:6.7015e-02 L10_spectral:6.6568e-02 L11_spectral:6.6283e-02 L12_spectral:6.7827e-02 train_time:355021ms step_avg:42.26ms +[2025-09-11 11:23:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.9090 total_sharp:1.5365e-04 L1_sharp:5.2086e-05 L2_sharp:2.5896e-05 L3_sharp:9.7908e-06 L4_sharp:1.8530e-05 L5_sharp:2.8385e-05 L6_sharp:1.6435e-05 L7_sharp:1.6451e-05 L8_sharp:3.5501e-05 L9_sharp:2.9594e-05 L10_sharp:4.0710e-05 L11_sharp:4.5663e-05 L12_sharp:1.8724e-04 total_fnorm:1.5875e+01 total_l1_linf:2.6880e+04 total_spectral:7.9062e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.6250e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.5938e+00 L5_fnorm:4.4062e+00 L6_fnorm:4.5000e+00 L7_fnorm:4.5312e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5312e+00 L12_fnorm:4.5000e+00 L1_l1linf:9.7656e-01 L2_l1linf:9.4141e-01 L3_l1linf:8.7500e-01 L4_l1linf:8.5156e-01 L5_l1linf:8.2812e-01 L6_l1linf:7.9297e-01 L7_l1linf:7.8906e-01 L8_l1linf:7.5000e-01 L9_l1linf:7.3828e-01 L10_l1linf:7.7734e-01 L11_l1linf:7.3828e-01 L12_l1linf:7.9688e-01 L1_spectral:6.8477e-02 L2_spectral:6.5994e-02 L3_spectral:6.5937e-02 L4_spectral:6.6260e-02 L5_spectral:6.5904e-02 L6_spectral:6.6713e-02 L7_spectral:6.7305e-02 L8_spectral:6.6635e-02 L9_spectral:6.7015e-02 L10_spectral:6.6568e-02 L11_spectral:6.6283e-02 L12_spectral:6.7827e-02 train_time:355021ms step_avg:42.26ms +[2025-09-11 11:23:36] [Rank 0] step:8401/10000 train_time:356223ms step_avg:42.40ms +[2025-09-11 11:23:36] [Rank 0] step:8401/10000 train_time:356223ms step_avg:42.40ms +[2025-09-11 11:23:36] [Rank 0] step:8421/10000 train_time:356959ms step_avg:42.39ms +[2025-09-11 11:23:36] [Rank 0] step:8421/10000 train_time:356959ms step_avg:42.39ms +[2025-09-11 11:23:37] [Rank 0] step:8441/10000 train_time:357670ms step_avg:42.37ms +[2025-09-11 11:23:37] [Rank 0] step:8441/10000 train_time:357670ms step_avg:42.37ms +[2025-09-11 11:23:38] [Rank 0] step:8461/10000 train_time:358378ms step_avg:42.36ms +[2025-09-11 11:23:38] [Rank 0] step:8461/10000 train_time:358378ms step_avg:42.36ms +[2025-09-11 11:23:39] [Rank 0] step:8481/10000 train_time:359089ms step_avg:42.34ms +[2025-09-11 11:23:39] [Rank 0] step:8481/10000 train_time:359089ms step_avg:42.34ms +[2025-09-11 11:23:39] [Rank 0] step:8501/10000 train_time:359797ms step_avg:42.32ms +[2025-09-11 11:23:39] [Rank 0] step:8501/10000 train_time:359797ms step_avg:42.32ms +[2025-09-11 11:23:40] [Rank 0] step:8521/10000 train_time:360504ms step_avg:42.31ms +[2025-09-11 11:23:40] [Rank 0] step:8521/10000 train_time:360504ms step_avg:42.31ms +[2025-09-11 11:23:41] [Rank 0] step:8541/10000 train_time:361211ms step_avg:42.29ms +[2025-09-11 11:23:41] [Rank 0] step:8541/10000 train_time:361211ms step_avg:42.29ms +[2025-09-11 11:23:41] [Rank 0] step:8561/10000 train_time:361924ms step_avg:42.28ms +[2025-09-11 11:23:41] [Rank 0] step:8561/10000 train_time:361924ms step_avg:42.28ms +[2025-09-11 11:23:42] [Rank 0] step:8581/10000 train_time:362634ms step_avg:42.26ms +[2025-09-11 11:23:42] [Rank 0] step:8581/10000 train_time:362634ms step_avg:42.26ms +[2025-09-11 11:23:43] [Rank 0] step:8601/10000 train_time:363344ms step_avg:42.24ms +[2025-09-11 11:23:43] [Rank 0] step:8601/10000 train_time:363344ms step_avg:42.24ms +[2025-09-11 11:23:44] [Rank 0] step:8621/10000 train_time:364051ms step_avg:42.23ms +[2025-09-11 11:23:44] [Rank 0] step:8621/10000 train_time:364051ms step_avg:42.23ms +[2025-09-11 11:23:44] [Rank 0] step:8641/10000 train_time:364758ms step_avg:42.21ms +[2025-09-11 11:23:44] [Rank 0] step:8641/10000 train_time:364758ms step_avg:42.21ms +[2025-09-11 11:23:45] [Rank 0] step:8661/10000 train_time:365468ms step_avg:42.20ms +[2025-09-11 11:23:45] [Rank 0] step:8661/10000 train_time:365468ms step_avg:42.20ms +[2025-09-11 11:23:46] [Rank 0] step:8681/10000 train_time:366178ms step_avg:42.18ms +[2025-09-11 11:23:46] [Rank 0] step:8681/10000 train_time:366178ms step_avg:42.18ms +[2025-09-11 11:23:46] [Rank 0] step:8701/10000 train_time:366885ms step_avg:42.17ms +[2025-09-11 11:23:46] [Rank 0] step:8701/10000 train_time:366885ms step_avg:42.17ms +[2025-09-11 11:23:47] [Rank 0] step:8721/10000 train_time:367596ms step_avg:42.15ms +[2025-09-11 11:23:47] [Rank 0] step:8721/10000 train_time:367596ms step_avg:42.15ms +[2025-09-11 11:23:48] [Rank 0] step:8741/10000 train_time:368302ms step_avg:42.13ms +[2025-09-11 11:23:48] [Rank 0] step:8741/10000 train_time:368302ms step_avg:42.13ms +[2025-09-11 11:23:48] [Rank 0] step:8761/10000 train_time:369014ms step_avg:42.12ms +[2025-09-11 11:23:48] [Rank 0] step:8761/10000 train_time:369014ms step_avg:42.12ms +[2025-09-11 11:23:49] [Rank 0] step:8781/10000 train_time:369720ms step_avg:42.10ms +[2025-09-11 11:23:49] [Rank 0] step:8781/10000 train_time:369720ms step_avg:42.10ms +[2025-09-11 11:23:50] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:23:50] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:00] [Rank 0] PRINT: step:8800/10000 val_loss:4.8932 total_sharp:1.4154e-04 L1_sharp:4.7094e-05 L2_sharp:2.2489e-05 L3_sharp:1.5473e-05 L4_sharp:1.4822e-05 L5_sharp:1.8802e-05 L6_sharp:1.3282e-05 L7_sharp:1.1573e-05 L8_sharp:2.5389e-05 L9_sharp:2.5895e-05 L10_sharp:3.8530e-05 L11_sharp:5.3584e-05 L12_sharp:2.2069e-04 total_fnorm:1.1562e+01 total_l1_linf:1.7664e+04 total_spectral:5.7812e+00 L1_fnorm:3.5938e+00 L2_fnorm:3.4219e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3438e+00 L5_fnorm:3.2500e+00 L6_fnorm:3.3125e+00 L7_fnorm:3.2969e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2656e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2969e+00 L12_fnorm:3.3125e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.8203e-01 L5_l1linf:5.5469e-01 L6_l1linf:5.3516e-01 L7_l1linf:5.3125e-01 L8_l1linf:4.9219e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.8633e-01 L11_l1linf:4.9805e-01 L12_l1linf:5.7031e-01 L1_spectral:5.1355e-02 L2_spectral:4.9093e-02 L3_spectral:4.9418e-02 L4_spectral:4.9132e-02 L5_spectral:4.9914e-02 L6_spectral:5.2403e-02 L7_spectral:4.9693e-02 L8_spectral:5.0013e-02 L9_spectral:4.9695e-02 L10_spectral:4.9621e-02 L11_spectral:4.9590e-02 L12_spectral:5.2162e-02 train_time:370407ms step_avg:42.09ms +[2025-09-11 11:24:00] [Rank 0] PRINT: step:8800/10000 val_loss:4.8932 total_sharp:1.4154e-04 L1_sharp:4.7094e-05 L2_sharp:2.2489e-05 L3_sharp:1.5473e-05 L4_sharp:1.4822e-05 L5_sharp:1.8802e-05 L6_sharp:1.3282e-05 L7_sharp:1.1573e-05 L8_sharp:2.5389e-05 L9_sharp:2.5895e-05 L10_sharp:3.8530e-05 L11_sharp:5.3584e-05 L12_sharp:2.2069e-04 total_fnorm:1.1562e+01 total_l1_linf:1.7664e+04 total_spectral:5.7812e+00 L1_fnorm:3.5938e+00 L2_fnorm:3.4219e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3438e+00 L5_fnorm:3.2500e+00 L6_fnorm:3.3125e+00 L7_fnorm:3.2969e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2656e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2969e+00 L12_fnorm:3.3125e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0156e-01 L4_l1linf:5.8203e-01 L5_l1linf:5.5469e-01 L6_l1linf:5.3516e-01 L7_l1linf:5.3125e-01 L8_l1linf:4.9219e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.8633e-01 L11_l1linf:4.9805e-01 L12_l1linf:5.7031e-01 L1_spectral:5.1355e-02 L2_spectral:4.9093e-02 L3_spectral:4.9418e-02 L4_spectral:4.9132e-02 L5_spectral:4.9914e-02 L6_spectral:5.2403e-02 L7_spectral:4.9693e-02 L8_spectral:5.0013e-02 L9_spectral:4.9695e-02 L10_spectral:4.9621e-02 L11_spectral:4.9590e-02 L12_spectral:5.2162e-02 train_time:370407ms step_avg:42.09ms +[2025-09-11 11:24:01] [Rank 0] step:8801/10000 train_time:371661ms step_avg:42.23ms +[2025-09-11 11:24:01] [Rank 0] step:8801/10000 train_time:371661ms step_avg:42.23ms +[2025-09-11 11:24:02] [Rank 0] step:8821/10000 train_time:372394ms step_avg:42.22ms +[2025-09-11 11:24:02] [Rank 0] step:8821/10000 train_time:372394ms step_avg:42.22ms +[2025-09-11 11:24:03] [Rank 0] step:8841/10000 train_time:373104ms step_avg:42.20ms +[2025-09-11 11:24:03] [Rank 0] step:8841/10000 train_time:373104ms step_avg:42.20ms +[2025-09-11 11:24:03] [Rank 0] step:8861/10000 train_time:373814ms step_avg:42.19ms +[2025-09-11 11:24:03] [Rank 0] step:8861/10000 train_time:373814ms step_avg:42.19ms +[2025-09-11 11:24:04] [Rank 0] step:8881/10000 train_time:374522ms step_avg:42.17ms +[2025-09-11 11:24:04] [Rank 0] step:8881/10000 train_time:374522ms step_avg:42.17ms +[2025-09-11 11:24:05] [Rank 0] step:8901/10000 train_time:375234ms step_avg:42.16ms +[2025-09-11 11:24:05] [Rank 0] step:8901/10000 train_time:375234ms step_avg:42.16ms +[2025-09-11 11:24:05] [Rank 0] step:8921/10000 train_time:375939ms step_avg:42.14ms +[2025-09-11 11:24:05] [Rank 0] step:8921/10000 train_time:375939ms step_avg:42.14ms +[2025-09-11 11:24:06] [Rank 0] step:8941/10000 train_time:376649ms step_avg:42.13ms +[2025-09-11 11:24:06] [Rank 0] step:8941/10000 train_time:376649ms step_avg:42.13ms +[2025-09-11 11:24:07] [Rank 0] step:8961/10000 train_time:377366ms step_avg:42.11ms +[2025-09-11 11:24:07] [Rank 0] step:8961/10000 train_time:377366ms step_avg:42.11ms +[2025-09-11 11:24:07] [Rank 0] step:8981/10000 train_time:378079ms step_avg:42.10ms +[2025-09-11 11:24:07] [Rank 0] step:8981/10000 train_time:378079ms step_avg:42.10ms +[2025-09-11 11:24:08] [Rank 0] step:9001/10000 train_time:378783ms step_avg:42.08ms +[2025-09-11 11:24:08] [Rank 0] step:9001/10000 train_time:378783ms step_avg:42.08ms +[2025-09-11 11:24:09] [Rank 0] step:9021/10000 train_time:379494ms step_avg:42.07ms +[2025-09-11 11:24:09] [Rank 0] step:9021/10000 train_time:379494ms step_avg:42.07ms +[2025-09-11 11:24:10] [Rank 0] step:9041/10000 train_time:380206ms step_avg:42.05ms +[2025-09-11 11:24:10] [Rank 0] step:9041/10000 train_time:380206ms step_avg:42.05ms +[2025-09-11 11:24:10] [Rank 0] step:9061/10000 train_time:380913ms step_avg:42.04ms +[2025-09-11 11:24:10] [Rank 0] step:9061/10000 train_time:380913ms step_avg:42.04ms +[2025-09-11 11:24:11] [Rank 0] step:9081/10000 train_time:381903ms step_avg:42.06ms +[2025-09-11 11:24:11] [Rank 0] step:9081/10000 train_time:381903ms step_avg:42.06ms +[2025-09-11 11:24:12] [Rank 0] step:9101/10000 train_time:382837ms step_avg:42.07ms +[2025-09-11 11:24:12] [Rank 0] step:9101/10000 train_time:382837ms step_avg:42.07ms +[2025-09-11 11:24:13] [Rank 0] step:9121/10000 train_time:383550ms step_avg:42.05ms +[2025-09-11 11:24:13] [Rank 0] step:9121/10000 train_time:383550ms step_avg:42.05ms +[2025-09-11 11:24:14] [Rank 0] step:9141/10000 train_time:384557ms step_avg:42.07ms +[2025-09-11 11:24:14] [Rank 0] step:9141/10000 train_time:384557ms step_avg:42.07ms +[2025-09-11 11:24:15] [Rank 0] step:9161/10000 train_time:385269ms step_avg:42.06ms +[2025-09-11 11:24:15] [Rank 0] step:9161/10000 train_time:385269ms step_avg:42.06ms +[2025-09-11 11:24:15] [Rank 0] step:9181/10000 train_time:385981ms step_avg:42.04ms +[2025-09-11 11:24:15] [Rank 0] step:9181/10000 train_time:385981ms step_avg:42.04ms +[2025-09-11 11:24:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:24:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:24:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.8780 total_sharp:1.2293e-04 L1_sharp:3.3589e-05 L2_sharp:1.1389e-05 L3_sharp:1.6470e-05 L4_sharp:1.1134e-05 L5_sharp:2.0551e-05 L6_sharp:1.4627e-05 L7_sharp:1.1451e-05 L8_sharp:2.8517e-05 L9_sharp:2.2284e-05 L10_sharp:3.1573e-05 L11_sharp:4.4739e-05 L12_sharp:1.5911e-04 total_fnorm:7.7812e+00 total_l1_linf:1.0304e+04 total_spectral:3.8594e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.2656e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2188e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.1406e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2031e+00 L1_l1linf:4.1016e-01 L2_l1linf:3.7305e-01 L3_l1linf:3.7305e-01 L4_l1linf:3.4766e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.2812e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9297e-01 L10_l1linf:3.0078e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.5156e-01 L1_spectral:3.5483e-02 L2_spectral:3.3657e-02 L3_spectral:3.3280e-02 L4_spectral:3.3843e-02 L5_spectral:3.4705e-02 L6_spectral:3.4207e-02 L7_spectral:3.3979e-02 L8_spectral:3.4576e-02 L9_spectral:3.4529e-02 L10_spectral:3.4147e-02 L11_spectral:3.3965e-02 L12_spectral:3.6225e-02 train_time:386674ms step_avg:42.03ms +[2025-09-11 11:24:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.8780 total_sharp:1.2293e-04 L1_sharp:3.3589e-05 L2_sharp:1.1389e-05 L3_sharp:1.6470e-05 L4_sharp:1.1134e-05 L5_sharp:2.0551e-05 L6_sharp:1.4627e-05 L7_sharp:1.1451e-05 L8_sharp:2.8517e-05 L9_sharp:2.2284e-05 L10_sharp:3.1573e-05 L11_sharp:4.4739e-05 L12_sharp:1.5911e-04 total_fnorm:7.7812e+00 total_l1_linf:1.0304e+04 total_spectral:3.8594e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.2656e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2188e+00 L7_fnorm:2.2188e+00 L8_fnorm:2.1406e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2031e+00 L1_l1linf:4.1016e-01 L2_l1linf:3.7305e-01 L3_l1linf:3.7305e-01 L4_l1linf:3.4766e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.2812e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9297e-01 L10_l1linf:3.0078e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.5156e-01 L1_spectral:3.5483e-02 L2_spectral:3.3657e-02 L3_spectral:3.3280e-02 L4_spectral:3.3843e-02 L5_spectral:3.4705e-02 L6_spectral:3.4207e-02 L7_spectral:3.3979e-02 L8_spectral:3.4576e-02 L9_spectral:3.4529e-02 L10_spectral:3.4147e-02 L11_spectral:3.3965e-02 L12_spectral:3.6225e-02 train_time:386674ms step_avg:42.03ms +[2025-09-11 11:24:27] [Rank 0] step:9201/10000 train_time:387882ms step_avg:42.16ms +[2025-09-11 11:24:27] [Rank 0] step:9201/10000 train_time:387882ms step_avg:42.16ms +[2025-09-11 11:24:28] [Rank 0] step:9221/10000 train_time:388602ms step_avg:42.14ms +[2025-09-11 11:24:28] [Rank 0] step:9221/10000 train_time:388602ms step_avg:42.14ms +[2025-09-11 11:24:29] [Rank 0] step:9241/10000 train_time:389311ms step_avg:42.13ms +[2025-09-11 11:24:29] [Rank 0] step:9241/10000 train_time:389311ms step_avg:42.13ms +[2025-09-11 11:24:29] [Rank 0] step:9261/10000 train_time:390023ms step_avg:42.11ms +[2025-09-11 11:24:29] [Rank 0] step:9261/10000 train_time:390023ms step_avg:42.11ms +[2025-09-11 11:24:30] [Rank 0] step:9281/10000 train_time:390735ms step_avg:42.10ms +[2025-09-11 11:24:30] [Rank 0] step:9281/10000 train_time:390735ms step_avg:42.10ms +[2025-09-11 11:24:31] [Rank 0] step:9301/10000 train_time:391444ms step_avg:42.09ms +[2025-09-11 11:24:31] [Rank 0] step:9301/10000 train_time:391444ms step_avg:42.09ms +[2025-09-11 11:24:32] [Rank 0] step:9321/10000 train_time:392157ms step_avg:42.07ms +[2025-09-11 11:24:32] [Rank 0] step:9321/10000 train_time:392157ms step_avg:42.07ms +[2025-09-11 11:24:32] [Rank 0] step:9341/10000 train_time:392863ms step_avg:42.06ms +[2025-09-11 11:24:32] [Rank 0] step:9341/10000 train_time:392863ms step_avg:42.06ms +[2025-09-11 11:24:33] [Rank 0] step:9361/10000 train_time:393569ms step_avg:42.04ms +[2025-09-11 11:24:33] [Rank 0] step:9361/10000 train_time:393569ms step_avg:42.04ms +[2025-09-11 11:24:34] [Rank 0] step:9381/10000 train_time:394277ms step_avg:42.03ms +[2025-09-11 11:24:34] [Rank 0] step:9381/10000 train_time:394277ms step_avg:42.03ms +[2025-09-11 11:24:34] [Rank 0] step:9401/10000 train_time:394989ms step_avg:42.02ms +[2025-09-11 11:24:34] [Rank 0] step:9401/10000 train_time:394989ms step_avg:42.02ms +[2025-09-11 11:24:35] [Rank 0] step:9421/10000 train_time:395700ms step_avg:42.00ms +[2025-09-11 11:24:35] [Rank 0] step:9421/10000 train_time:395700ms step_avg:42.00ms +[2025-09-11 11:24:36] [Rank 0] step:9441/10000 train_time:396413ms step_avg:41.99ms +[2025-09-11 11:24:36] [Rank 0] step:9441/10000 train_time:396413ms step_avg:41.99ms +[2025-09-11 11:24:37] [Rank 0] step:9461/10000 train_time:397123ms step_avg:41.97ms +[2025-09-11 11:24:37] [Rank 0] step:9461/10000 train_time:397123ms step_avg:41.97ms +[2025-09-11 11:24:37] [Rank 0] step:9481/10000 train_time:397834ms step_avg:41.96ms +[2025-09-11 11:24:37] [Rank 0] step:9481/10000 train_time:397834ms step_avg:41.96ms +[2025-09-11 11:24:38] [Rank 0] step:9501/10000 train_time:398545ms step_avg:41.95ms +[2025-09-11 11:24:38] [Rank 0] step:9501/10000 train_time:398545ms step_avg:41.95ms +[2025-09-11 11:24:39] [Rank 0] step:9521/10000 train_time:399258ms step_avg:41.93ms +[2025-09-11 11:24:39] [Rank 0] step:9521/10000 train_time:399258ms step_avg:41.93ms +[2025-09-11 11:24:39] [Rank 0] step:9541/10000 train_time:399965ms step_avg:41.92ms +[2025-09-11 11:24:39] [Rank 0] step:9541/10000 train_time:399965ms step_avg:41.92ms +[2025-09-11 11:24:40] [Rank 0] step:9561/10000 train_time:400674ms step_avg:41.91ms +[2025-09-11 11:24:40] [Rank 0] step:9561/10000 train_time:400674ms step_avg:41.91ms +[2025-09-11 11:24:41] [Rank 0] step:9581/10000 train_time:401386ms step_avg:41.89ms +[2025-09-11 11:24:41] [Rank 0] step:9581/10000 train_time:401386ms step_avg:41.89ms +[2025-09-11 11:24:41] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:24:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:24:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.8686 total_sharp:7.7823e-05 L1_sharp:2.0841e-05 L2_sharp:1.3548e-05 L3_sharp:1.1635e-05 L4_sharp:8.8989e-06 L5_sharp:1.2479e-05 L6_sharp:1.2775e-05 L7_sharp:1.2468e-05 L8_sharp:2.0351e-05 L9_sharp:1.7274e-05 L10_sharp:2.5202e-05 L11_sharp:3.2203e-05 L12_sharp:1.2060e-04 total_fnorm:4.3750e+00 total_l1_linf:4.8640e+03 total_spectral:2.1875e+00 L1_fnorm:1.3672e+00 L2_fnorm:1.2969e+00 L3_fnorm:1.2656e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2109e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.2422e+00 L1_l1linf:1.9434e-01 L2_l1linf:1.8164e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.7285e-01 L6_l1linf:1.5820e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.5332e-01 L9_l1linf:1.3965e-01 L10_l1linf:1.4355e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.7090e-01 L1_spectral:2.0209e-02 L2_spectral:1.9367e-02 L3_spectral:1.9195e-02 L4_spectral:1.9289e-02 L5_spectral:2.0468e-02 L6_spectral:1.9727e-02 L7_spectral:1.9608e-02 L8_spectral:1.9901e-02 L9_spectral:1.9573e-02 L10_spectral:1.9619e-02 L11_spectral:1.9604e-02 L12_spectral:2.1284e-02 train_time:402073ms step_avg:41.88ms +[2025-09-11 11:24:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.8686 total_sharp:7.7823e-05 L1_sharp:2.0841e-05 L2_sharp:1.3548e-05 L3_sharp:1.1635e-05 L4_sharp:8.8989e-06 L5_sharp:1.2479e-05 L6_sharp:1.2775e-05 L7_sharp:1.2468e-05 L8_sharp:2.0351e-05 L9_sharp:1.7274e-05 L10_sharp:2.5202e-05 L11_sharp:3.2203e-05 L12_sharp:1.2060e-04 total_fnorm:4.3750e+00 total_l1_linf:4.8640e+03 total_spectral:2.1875e+00 L1_fnorm:1.3672e+00 L2_fnorm:1.2969e+00 L3_fnorm:1.2656e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2109e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.2422e+00 L1_l1linf:1.9434e-01 L2_l1linf:1.8164e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.7285e-01 L6_l1linf:1.5820e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.5332e-01 L9_l1linf:1.3965e-01 L10_l1linf:1.4355e-01 L11_l1linf:1.5137e-01 L12_l1linf:1.7090e-01 L1_spectral:2.0209e-02 L2_spectral:1.9367e-02 L3_spectral:1.9195e-02 L4_spectral:1.9289e-02 L5_spectral:2.0468e-02 L6_spectral:1.9727e-02 L7_spectral:1.9608e-02 L8_spectral:1.9901e-02 L9_spectral:1.9573e-02 L10_spectral:1.9619e-02 L11_spectral:1.9604e-02 L12_spectral:2.1284e-02 train_time:402073ms step_avg:41.88ms +[2025-09-11 11:24:53] [Rank 0] step:9601/10000 train_time:403280ms step_avg:42.00ms +[2025-09-11 11:24:53] [Rank 0] step:9601/10000 train_time:403280ms step_avg:42.00ms +[2025-09-11 11:24:53] [Rank 0] step:9621/10000 train_time:404030ms step_avg:41.99ms +[2025-09-11 11:24:53] [Rank 0] step:9621/10000 train_time:404030ms step_avg:41.99ms +[2025-09-11 11:24:54] [Rank 0] step:9641/10000 train_time:404746ms step_avg:41.98ms +[2025-09-11 11:24:54] [Rank 0] step:9641/10000 train_time:404746ms step_avg:41.98ms +[2025-09-11 11:24:55] [Rank 0] step:9661/10000 train_time:405470ms step_avg:41.97ms +[2025-09-11 11:24:55] [Rank 0] step:9661/10000 train_time:405470ms step_avg:41.97ms +[2025-09-11 11:24:56] [Rank 0] step:9681/10000 train_time:406185ms step_avg:41.96ms +[2025-09-11 11:24:56] [Rank 0] step:9681/10000 train_time:406185ms step_avg:41.96ms +[2025-09-11 11:24:56] [Rank 0] step:9701/10000 train_time:406902ms step_avg:41.94ms +[2025-09-11 11:24:56] [Rank 0] step:9701/10000 train_time:406902ms step_avg:41.94ms +[2025-09-11 11:24:57] [Rank 0] step:9721/10000 train_time:407623ms step_avg:41.93ms +[2025-09-11 11:24:57] [Rank 0] step:9721/10000 train_time:407623ms step_avg:41.93ms +[2025-09-11 11:24:58] [Rank 0] step:9741/10000 train_time:408341ms step_avg:41.92ms +[2025-09-11 11:24:58] [Rank 0] step:9741/10000 train_time:408341ms step_avg:41.92ms +[2025-09-11 11:24:58] [Rank 0] step:9761/10000 train_time:409058ms step_avg:41.91ms +[2025-09-11 11:24:58] [Rank 0] step:9761/10000 train_time:409058ms step_avg:41.91ms +[2025-09-11 11:24:59] [Rank 0] step:9781/10000 train_time:409774ms step_avg:41.89ms +[2025-09-11 11:24:59] [Rank 0] step:9781/10000 train_time:409774ms step_avg:41.89ms +[2025-09-11 11:25:00] [Rank 0] step:9801/10000 train_time:410495ms step_avg:41.88ms +[2025-09-11 11:25:00] [Rank 0] step:9801/10000 train_time:410495ms step_avg:41.88ms +[2025-09-11 11:25:01] [Rank 0] step:9821/10000 train_time:411217ms step_avg:41.87ms +[2025-09-11 11:25:01] [Rank 0] step:9821/10000 train_time:411217ms step_avg:41.87ms +[2025-09-11 11:25:01] [Rank 0] step:9841/10000 train_time:412093ms step_avg:41.88ms +[2025-09-11 11:25:01] [Rank 0] step:9841/10000 train_time:412093ms step_avg:41.88ms +[2025-09-11 11:25:02] [Rank 0] step:9861/10000 train_time:412854ms step_avg:41.87ms +[2025-09-11 11:25:02] [Rank 0] step:9861/10000 train_time:412854ms step_avg:41.87ms +[2025-09-11 11:25:03] [Rank 0] step:9881/10000 train_time:413572ms step_avg:41.86ms +[2025-09-11 11:25:03] [Rank 0] step:9881/10000 train_time:413572ms step_avg:41.86ms +[2025-09-11 11:25:04] [Rank 0] step:9901/10000 train_time:414286ms step_avg:41.84ms +[2025-09-11 11:25:04] [Rank 0] step:9901/10000 train_time:414286ms step_avg:41.84ms +[2025-09-11 11:25:04] [Rank 0] step:9921/10000 train_time:415003ms step_avg:41.83ms +[2025-09-11 11:25:04] [Rank 0] step:9921/10000 train_time:415003ms step_avg:41.83ms +[2025-09-11 11:25:05] [Rank 0] step:9941/10000 train_time:415725ms step_avg:41.82ms +[2025-09-11 11:25:05] [Rank 0] step:9941/10000 train_time:415725ms step_avg:41.82ms +[2025-09-11 11:25:06] [Rank 0] step:9961/10000 train_time:416446ms step_avg:41.81ms +[2025-09-11 11:25:06] [Rank 0] step:9961/10000 train_time:416446ms step_avg:41.81ms +[2025-09-11 11:25:07] [Rank 0] step:9981/10000 train_time:417165ms step_avg:41.80ms +[2025-09-11 11:25:07] [Rank 0] step:9981/10000 train_time:417165ms step_avg:41.80ms +[2025-09-11 11:25:07] [Rank 0] step:10000/10000 train_time:417854ms step_avg:41.79ms +[2025-09-11 11:25:07] [Rank 0] step:10000/10000 train_time:417854ms step_avg:41.79ms +[2025-09-11 11:25:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:25:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:25:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.8657 total_sharp:5.5155e-05 L1_sharp:2.3324e-05 L2_sharp:9.9451e-06 L3_sharp:5.8139e-06 L4_sharp:6.7657e-06 L5_sharp:1.1971e-05 L6_sharp:8.5972e-06 L7_sharp:8.2169e-06 L8_sharp:1.6806e-05 L9_sharp:1.6370e-05 L10_sharp:1.7343e-05 L11_sharp:2.2838e-05 L12_sharp:1.0379e-04 total_fnorm:1.7031e+00 total_l1_linf:1.3760e+03 total_spectral:8.4766e-01 L1_fnorm:5.3516e-01 L2_fnorm:4.9609e-01 L3_fnorm:4.9023e-01 L4_fnorm:4.9219e-01 L5_fnorm:4.7461e-01 L6_fnorm:4.8242e-01 L7_fnorm:4.8242e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.7852e-01 L10_fnorm:4.7656e-01 L11_fnorm:4.8438e-01 L12_fnorm:4.8438e-01 L1_l1linf:6.2988e-02 L2_l1linf:5.3955e-02 L3_l1linf:5.4688e-02 L4_l1linf:5.4199e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.2002e-02 L7_l1linf:4.8340e-02 L8_l1linf:5.2979e-02 L9_l1linf:4.5166e-02 L10_l1linf:4.4922e-02 L11_l1linf:4.6875e-02 L12_l1linf:5.6885e-02 L1_spectral:8.0691e-03 L2_spectral:7.6210e-03 L3_spectral:7.5006e-03 L4_spectral:7.7225e-03 L5_spectral:8.1885e-03 L6_spectral:7.7458e-03 L7_spectral:7.7513e-03 L8_spectral:8.0731e-03 L9_spectral:7.8826e-03 L10_spectral:7.7799e-03 L11_spectral:7.7695e-03 L12_spectral:8.5880e-03 train_time:417875ms step_avg:41.79ms +[2025-09-11 11:25:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.8657 total_sharp:5.5155e-05 L1_sharp:2.3324e-05 L2_sharp:9.9451e-06 L3_sharp:5.8139e-06 L4_sharp:6.7657e-06 L5_sharp:1.1971e-05 L6_sharp:8.5972e-06 L7_sharp:8.2169e-06 L8_sharp:1.6806e-05 L9_sharp:1.6370e-05 L10_sharp:1.7343e-05 L11_sharp:2.2838e-05 L12_sharp:1.0379e-04 total_fnorm:1.7031e+00 total_l1_linf:1.3760e+03 total_spectral:8.4766e-01 L1_fnorm:5.3516e-01 L2_fnorm:4.9609e-01 L3_fnorm:4.9023e-01 L4_fnorm:4.9219e-01 L5_fnorm:4.7461e-01 L6_fnorm:4.8242e-01 L7_fnorm:4.8242e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.7852e-01 L10_fnorm:4.7656e-01 L11_fnorm:4.8438e-01 L12_fnorm:4.8438e-01 L1_l1linf:6.2988e-02 L2_l1linf:5.3955e-02 L3_l1linf:5.4688e-02 L4_l1linf:5.4199e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.2002e-02 L7_l1linf:4.8340e-02 L8_l1linf:5.2979e-02 L9_l1linf:4.5166e-02 L10_l1linf:4.4922e-02 L11_l1linf:4.6875e-02 L12_l1linf:5.6885e-02 L1_spectral:8.0691e-03 L2_spectral:7.6210e-03 L3_spectral:7.5006e-03 L4_spectral:7.7225e-03 L5_spectral:8.1885e-03 L6_spectral:7.7458e-03 L7_spectral:7.7513e-03 L8_spectral:8.0731e-03 L9_spectral:7.8826e-03 L10_spectral:7.7799e-03 L11_spectral:7.7695e-03 L12_spectral:8.5880e-03 train_time:417875ms step_avg:41.79ms +[2025-09-11 11:25:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:25:17 2025 --- +[2025-09-11 11:25:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:25:17 2025 --- +[2025-09-11 11:25:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:25:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..54010444bbf1569767ed5204ba7b64d17e2b620c --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "75516b8c-735e-4dc3-b9b2-7418d1ab54d3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/training_log_75516b8c-735e-4dc3-b9b2-7418d1ab54d3.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/training_log_75516b8c-735e-4dc3-b9b2-7418d1ab54d3.txt new file mode 100644 index 0000000000000000000000000000000000000000..99caf7146bcfe7ac195108dcd351c21c9b0a1cea --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42/training_log_75516b8c-735e-4dc3-b9b2-7418d1ab54d3.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:14:42] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:14:42 2025 --- +[2025-09-11 11:14:42] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:14:42 2025 --- +[2025-09-11 11:14:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:14:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:14:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:14:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:14:42] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:14:42] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:14:42] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42 +[2025-09-11 11:14:42] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.0005_seed_42 +[2025-09-11 11:14:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:14:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:14:42] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:14:42] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:14:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:14:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:14:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:14:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:14:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:14:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:14:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:14:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:14:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:14:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:14:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:14:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:14:45] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:14:45] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:14:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:14:45] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:14:50] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:14:50] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:14:50] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:14:50] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:15:28] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:15:28] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:15:28] [Rank 0] PRINT: Starting training... +[2025-09-11 11:15:28] [Rank 0] PRINT: Starting training... +[2025-09-11 11:15:29] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 11:15:29] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.97ms +[2025-09-11 11:15:29] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.43ms +[2025-09-11 11:15:29] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.43ms +[2025-09-11 11:15:30] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.48ms +[2025-09-11 11:15:30] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.48ms +[2025-09-11 11:15:31] [Rank 0] step:81/10000 train_time:3321ms step_avg:40.99ms +[2025-09-11 11:15:31] [Rank 0] step:81/10000 train_time:3321ms step_avg:40.99ms +[2025-09-11 11:15:32] [Rank 0] step:101/10000 train_time:4049ms step_avg:40.08ms +[2025-09-11 11:15:32] [Rank 0] step:101/10000 train_time:4049ms step_avg:40.08ms +[2025-09-11 11:15:32] [Rank 0] step:121/10000 train_time:4778ms step_avg:39.49ms +[2025-09-11 11:15:32] [Rank 0] step:121/10000 train_time:4778ms step_avg:39.49ms +[2025-09-11 11:15:33] [Rank 0] step:141/10000 train_time:5506ms step_avg:39.05ms +[2025-09-11 11:15:33] [Rank 0] step:141/10000 train_time:5506ms step_avg:39.05ms +[2025-09-11 11:15:34] [Rank 0] step:161/10000 train_time:6235ms step_avg:38.73ms +[2025-09-11 11:15:34] [Rank 0] step:161/10000 train_time:6235ms step_avg:38.73ms +[2025-09-11 11:15:35] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 11:15:35] [Rank 0] step:181/10000 train_time:6963ms step_avg:38.47ms +[2025-09-11 11:15:35] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.27ms +[2025-09-11 11:15:35] [Rank 0] step:201/10000 train_time:7691ms step_avg:38.27ms +[2025-09-11 11:15:36] [Rank 0] step:221/10000 train_time:8421ms step_avg:38.10ms +[2025-09-11 11:15:36] [Rank 0] step:221/10000 train_time:8421ms step_avg:38.10ms +[2025-09-11 11:15:37] [Rank 0] step:241/10000 train_time:9150ms step_avg:37.97ms +[2025-09-11 11:15:37] [Rank 0] step:241/10000 train_time:9150ms step_avg:37.97ms +[2025-09-11 11:15:37] [Rank 0] step:261/10000 train_time:9878ms step_avg:37.85ms +[2025-09-11 11:15:37] [Rank 0] step:261/10000 train_time:9878ms step_avg:37.85ms +[2025-09-11 11:15:38] [Rank 0] step:281/10000 train_time:10607ms step_avg:37.75ms +[2025-09-11 11:15:38] [Rank 0] step:281/10000 train_time:10607ms step_avg:37.75ms +[2025-09-11 11:15:39] [Rank 0] step:301/10000 train_time:11335ms step_avg:37.66ms +[2025-09-11 11:15:39] [Rank 0] step:301/10000 train_time:11335ms step_avg:37.66ms +[2025-09-11 11:15:40] [Rank 0] step:321/10000 train_time:12064ms step_avg:37.58ms +[2025-09-11 11:15:40] [Rank 0] step:321/10000 train_time:12064ms step_avg:37.58ms +[2025-09-11 11:15:40] [Rank 0] step:341/10000 train_time:12792ms step_avg:37.51ms +[2025-09-11 11:15:40] [Rank 0] step:341/10000 train_time:12792ms step_avg:37.51ms +[2025-09-11 11:15:41] [Rank 0] step:361/10000 train_time:13519ms step_avg:37.45ms +[2025-09-11 11:15:41] [Rank 0] step:361/10000 train_time:13519ms step_avg:37.45ms +[2025-09-11 11:15:42] [Rank 0] step:381/10000 train_time:14247ms step_avg:37.39ms +[2025-09-11 11:15:42] [Rank 0] step:381/10000 train_time:14247ms step_avg:37.39ms +[2025-09-11 11:15:43] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:15:43] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:16:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:16:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:16:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:16:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:16:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:16:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:16:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:16:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:16:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:16:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:16:31] [Rank 0] PRINT: step:400/10000 val_loss:6.6468 total_sharp:6.5201e-04 L1_sharp:4.9120e-02 L2_sharp:6.6457e-02 L3_sharp:7.4421e-02 L4_sharp:1.4420e-01 L5_sharp:1.4386e-01 L6_sharp:1.9557e-01 L7_sharp:2.0605e-01 L8_sharp:2.2563e-01 L9_sharp:3.0357e-01 L10_sharp:4.5053e-01 L11_sharp:4.7713e-01 L12_sharp:5.8665e-01 total_fnorm:2.0596e+01 total_l1_linf:4.9007e+04 total_spectral:1.0300e+01 L1_fnorm:5.7993e-02 L2_fnorm:5.7941e-02 L3_fnorm:5.7785e-02 L4_fnorm:5.6901e-02 L5_fnorm:5.6901e-02 L6_fnorm:5.6827e-02 L7_fnorm:5.6434e-02 L8_fnorm:5.5228e-02 L9_fnorm:5.4958e-02 L10_fnorm:5.4716e-02 L11_fnorm:5.4386e-02 L12_fnorm:5.3772e-02 L1_l1linf:2.2591e-02 L2_l1linf:2.2402e-02 L3_l1linf:2.2372e-02 L4_l1linf:2.2347e-02 L5_l1linf:2.2296e-02 L6_l1linf:2.2351e-02 L7_l1linf:2.2237e-02 L8_l1linf:2.2028e-02 L9_l1linf:2.1961e-02 L10_l1linf:2.1866e-02 L11_l1linf:2.1968e-02 L12_l1linf:2.1471e-02 L1_spectral:6.0255e-04 L2_spectral:6.0233e-04 L3_spectral:6.0265e-04 L4_spectral:6.0258e-04 L5_spectral:6.0253e-04 L6_spectral:6.0243e-04 L7_spectral:6.0261e-04 L8_spectral:6.0247e-04 L9_spectral:6.0285e-04 L10_spectral:6.0250e-04 L11_spectral:6.0248e-04 L12_spectral:6.0251e-04 train_time:14955ms step_avg:37.39ms +[2025-09-11 11:16:31] [Rank 0] PRINT: step:400/10000 val_loss:6.6468 total_sharp:6.5201e-04 L1_sharp:4.9120e-02 L2_sharp:6.6457e-02 L3_sharp:7.4421e-02 L4_sharp:1.4420e-01 L5_sharp:1.4386e-01 L6_sharp:1.9557e-01 L7_sharp:2.0605e-01 L8_sharp:2.2563e-01 L9_sharp:3.0357e-01 L10_sharp:4.5053e-01 L11_sharp:4.7713e-01 L12_sharp:5.8665e-01 total_fnorm:2.0596e+01 total_l1_linf:4.9007e+04 total_spectral:1.0300e+01 L1_fnorm:5.7993e-02 L2_fnorm:5.7941e-02 L3_fnorm:5.7785e-02 L4_fnorm:5.6901e-02 L5_fnorm:5.6901e-02 L6_fnorm:5.6827e-02 L7_fnorm:5.6434e-02 L8_fnorm:5.5228e-02 L9_fnorm:5.4958e-02 L10_fnorm:5.4716e-02 L11_fnorm:5.4386e-02 L12_fnorm:5.3772e-02 L1_l1linf:2.2591e-02 L2_l1linf:2.2402e-02 L3_l1linf:2.2372e-02 L4_l1linf:2.2347e-02 L5_l1linf:2.2296e-02 L6_l1linf:2.2351e-02 L7_l1linf:2.2237e-02 L8_l1linf:2.2028e-02 L9_l1linf:2.1961e-02 L10_l1linf:2.1866e-02 L11_l1linf:2.1968e-02 L12_l1linf:2.1471e-02 L1_spectral:6.0255e-04 L2_spectral:6.0233e-04 L3_spectral:6.0265e-04 L4_spectral:6.0258e-04 L5_spectral:6.0253e-04 L6_spectral:6.0243e-04 L7_spectral:6.0261e-04 L8_spectral:6.0247e-04 L9_spectral:6.0285e-04 L10_spectral:6.0250e-04 L11_spectral:6.0248e-04 L12_spectral:6.0251e-04 train_time:14955ms step_avg:37.39ms +[2025-09-11 11:17:01] [Rank 0] step:401/10000 train_time:45061ms step_avg:112.37ms +[2025-09-11 11:17:01] [Rank 0] step:401/10000 train_time:45061ms step_avg:112.37ms +[2025-09-11 11:17:03] [Rank 0] step:421/10000 train_time:47366ms step_avg:112.51ms +[2025-09-11 11:17:03] [Rank 0] step:421/10000 train_time:47366ms step_avg:112.51ms +[2025-09-11 11:17:04] [Rank 0] step:441/10000 train_time:48006ms step_avg:108.86ms +[2025-09-11 11:17:04] [Rank 0] step:441/10000 train_time:48006ms step_avg:108.86ms +[2025-09-11 11:17:04] [Rank 0] step:461/10000 train_time:48647ms step_avg:105.52ms +[2025-09-11 11:17:04] [Rank 0] step:461/10000 train_time:48647ms step_avg:105.52ms +[2025-09-11 11:17:05] [Rank 0] step:481/10000 train_time:49287ms step_avg:102.47ms +[2025-09-11 11:17:05] [Rank 0] step:481/10000 train_time:49287ms step_avg:102.47ms +[2025-09-11 11:17:06] [Rank 0] step:501/10000 train_time:49927ms step_avg:99.65ms +[2025-09-11 11:17:06] [Rank 0] step:501/10000 train_time:49927ms step_avg:99.65ms +[2025-09-11 11:17:06] [Rank 0] step:521/10000 train_time:50567ms step_avg:97.06ms +[2025-09-11 11:17:06] [Rank 0] step:521/10000 train_time:50567ms step_avg:97.06ms +[2025-09-11 11:17:07] [Rank 0] step:541/10000 train_time:51207ms step_avg:94.65ms +[2025-09-11 11:17:07] [Rank 0] step:541/10000 train_time:51207ms step_avg:94.65ms +[2025-09-11 11:17:08] [Rank 0] step:561/10000 train_time:51846ms step_avg:92.42ms +[2025-09-11 11:17:08] [Rank 0] step:561/10000 train_time:51846ms step_avg:92.42ms +[2025-09-11 11:17:08] [Rank 0] step:581/10000 train_time:52485ms step_avg:90.34ms +[2025-09-11 11:17:08] [Rank 0] step:581/10000 train_time:52485ms step_avg:90.34ms +[2025-09-11 11:17:09] [Rank 0] step:601/10000 train_time:53124ms step_avg:88.39ms +[2025-09-11 11:17:09] [Rank 0] step:601/10000 train_time:53124ms step_avg:88.39ms +[2025-09-11 11:17:09] [Rank 0] step:621/10000 train_time:53765ms step_avg:86.58ms +[2025-09-11 11:17:09] [Rank 0] step:621/10000 train_time:53765ms step_avg:86.58ms +[2025-09-11 11:17:10] [Rank 0] step:641/10000 train_time:54404ms step_avg:84.87ms +[2025-09-11 11:17:10] [Rank 0] step:641/10000 train_time:54404ms step_avg:84.87ms +[2025-09-11 11:17:11] [Rank 0] step:661/10000 train_time:55044ms step_avg:83.27ms +[2025-09-11 11:17:11] [Rank 0] step:661/10000 train_time:55044ms step_avg:83.27ms +[2025-09-11 11:17:11] [Rank 0] step:681/10000 train_time:55683ms step_avg:81.77ms +[2025-09-11 11:17:11] [Rank 0] step:681/10000 train_time:55683ms step_avg:81.77ms +[2025-09-11 11:17:12] [Rank 0] step:701/10000 train_time:56328ms step_avg:80.35ms +[2025-09-11 11:17:12] [Rank 0] step:701/10000 train_time:56328ms step_avg:80.35ms +[2025-09-11 11:17:13] [Rank 0] step:721/10000 train_time:56968ms step_avg:79.01ms +[2025-09-11 11:17:13] [Rank 0] step:721/10000 train_time:56968ms step_avg:79.01ms +[2025-09-11 11:17:13] [Rank 0] step:741/10000 train_time:57607ms step_avg:77.74ms +[2025-09-11 11:17:13] [Rank 0] step:741/10000 train_time:57607ms step_avg:77.74ms +[2025-09-11 11:17:14] [Rank 0] step:761/10000 train_time:58251ms step_avg:76.55ms +[2025-09-11 11:17:14] [Rank 0] step:761/10000 train_time:58251ms step_avg:76.55ms +[2025-09-11 11:17:15] [Rank 0] step:781/10000 train_time:58896ms step_avg:75.41ms +[2025-09-11 11:17:15] [Rank 0] step:781/10000 train_time:58896ms step_avg:75.41ms +[2025-09-11 11:17:15] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:17:15] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:17:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:17:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:00] [Rank 0] PRINT: step:800/10000 val_loss:6.3012 total_sharp:1.4403e-03 L1_sharp:4.9081e-01 L2_sharp:4.3148e-01 L3_sharp:4.7758e-01 L4_sharp:5.9983e-01 L5_sharp:7.1314e-01 L6_sharp:7.2083e-01 L7_sharp:8.1955e-01 L8_sharp:1.1942e+00 L9_sharp:1.2532e+00 L10_sharp:1.3958e+00 L11_sharp:1.4259e+00 L12_sharp:1.5747e+00 total_fnorm:1.8500e+01 total_l1_linf:2.8672e+04 total_spectral:9.2500e+00 L1_fnorm:4.4922e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6387e-02 L4_fnorm:4.5898e-02 L5_fnorm:4.3945e-02 L6_fnorm:4.5410e-02 L7_fnorm:4.5166e-02 L8_fnorm:4.1504e-02 L9_fnorm:4.3457e-02 L10_fnorm:4.2969e-02 L11_fnorm:4.1992e-02 L12_fnorm:4.0039e-02 L1_l1linf:2.0142e-02 L2_l1linf:2.0020e-02 L3_l1linf:2.0142e-02 L4_l1linf:1.9897e-02 L5_l1linf:1.9897e-02 L6_l1linf:1.9775e-02 L7_l1linf:1.9775e-02 L8_l1linf:1.9531e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9165e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.8921e-02 L1_spectral:7.0908e-04 L2_spectral:7.0819e-04 L3_spectral:7.0708e-04 L4_spectral:7.0731e-04 L5_spectral:7.0705e-04 L6_spectral:7.0984e-04 L7_spectral:7.0731e-04 L8_spectral:7.0646e-04 L9_spectral:7.0215e-04 L10_spectral:7.0208e-04 L11_spectral:6.9692e-04 L12_spectral:6.9700e-04 train_time:59522ms step_avg:74.40ms +[2025-09-11 11:18:00] [Rank 0] PRINT: step:800/10000 val_loss:6.3012 total_sharp:1.4403e-03 L1_sharp:4.9081e-01 L2_sharp:4.3148e-01 L3_sharp:4.7758e-01 L4_sharp:5.9983e-01 L5_sharp:7.1314e-01 L6_sharp:7.2083e-01 L7_sharp:8.1955e-01 L8_sharp:1.1942e+00 L9_sharp:1.2532e+00 L10_sharp:1.3958e+00 L11_sharp:1.4259e+00 L12_sharp:1.5747e+00 total_fnorm:1.8500e+01 total_l1_linf:2.8672e+04 total_spectral:9.2500e+00 L1_fnorm:4.4922e-02 L2_fnorm:4.6631e-02 L3_fnorm:4.6387e-02 L4_fnorm:4.5898e-02 L5_fnorm:4.3945e-02 L6_fnorm:4.5410e-02 L7_fnorm:4.5166e-02 L8_fnorm:4.1504e-02 L9_fnorm:4.3457e-02 L10_fnorm:4.2969e-02 L11_fnorm:4.1992e-02 L12_fnorm:4.0039e-02 L1_l1linf:2.0142e-02 L2_l1linf:2.0020e-02 L3_l1linf:2.0142e-02 L4_l1linf:1.9897e-02 L5_l1linf:1.9897e-02 L6_l1linf:1.9775e-02 L7_l1linf:1.9775e-02 L8_l1linf:1.9531e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9165e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.8921e-02 L1_spectral:7.0908e-04 L2_spectral:7.0819e-04 L3_spectral:7.0708e-04 L4_spectral:7.0731e-04 L5_spectral:7.0705e-04 L6_spectral:7.0984e-04 L7_spectral:7.0731e-04 L8_spectral:7.0646e-04 L9_spectral:7.0215e-04 L10_spectral:7.0208e-04 L11_spectral:6.9692e-04 L12_spectral:6.9700e-04 train_time:59522ms step_avg:74.40ms +[2025-09-11 11:18:01] [Rank 0] step:801/10000 train_time:60995ms step_avg:76.15ms +[2025-09-11 11:18:01] [Rank 0] step:801/10000 train_time:60995ms step_avg:76.15ms +[2025-09-11 11:18:02] [Rank 0] step:821/10000 train_time:61631ms step_avg:75.07ms +[2025-09-11 11:18:02] [Rank 0] step:821/10000 train_time:61631ms step_avg:75.07ms +[2025-09-11 11:18:02] [Rank 0] step:841/10000 train_time:62278ms step_avg:74.05ms +[2025-09-11 11:18:02] [Rank 0] step:841/10000 train_time:62278ms step_avg:74.05ms +[2025-09-11 11:18:03] [Rank 0] step:861/10000 train_time:62925ms step_avg:73.08ms +[2025-09-11 11:18:03] [Rank 0] step:861/10000 train_time:62925ms step_avg:73.08ms +[2025-09-11 11:18:04] [Rank 0] step:881/10000 train_time:63572ms step_avg:72.16ms +[2025-09-11 11:18:04] [Rank 0] step:881/10000 train_time:63572ms step_avg:72.16ms +[2025-09-11 11:18:04] [Rank 0] step:901/10000 train_time:64217ms step_avg:71.27ms +[2025-09-11 11:18:04] [Rank 0] step:901/10000 train_time:64217ms step_avg:71.27ms +[2025-09-11 11:18:05] [Rank 0] step:921/10000 train_time:64863ms step_avg:70.43ms +[2025-09-11 11:18:05] [Rank 0] step:921/10000 train_time:64863ms step_avg:70.43ms +[2025-09-11 11:18:06] [Rank 0] step:941/10000 train_time:65509ms step_avg:69.62ms +[2025-09-11 11:18:06] [Rank 0] step:941/10000 train_time:65509ms step_avg:69.62ms +[2025-09-11 11:18:06] [Rank 0] step:961/10000 train_time:66185ms step_avg:68.87ms +[2025-09-11 11:18:06] [Rank 0] step:961/10000 train_time:66185ms step_avg:68.87ms +[2025-09-11 11:18:07] [Rank 0] step:981/10000 train_time:66832ms step_avg:68.13ms +[2025-09-11 11:18:07] [Rank 0] step:981/10000 train_time:66832ms step_avg:68.13ms +[2025-09-11 11:18:08] [Rank 0] step:1001/10000 train_time:67477ms step_avg:67.41ms +[2025-09-11 11:18:08] [Rank 0] step:1001/10000 train_time:67477ms step_avg:67.41ms +[2025-09-11 11:18:08] [Rank 0] step:1021/10000 train_time:68123ms step_avg:66.72ms +[2025-09-11 11:18:08] [Rank 0] step:1021/10000 train_time:68123ms step_avg:66.72ms +[2025-09-11 11:18:09] [Rank 0] step:1041/10000 train_time:68768ms step_avg:66.06ms +[2025-09-11 11:18:09] [Rank 0] step:1041/10000 train_time:68768ms step_avg:66.06ms +[2025-09-11 11:18:10] [Rank 0] step:1061/10000 train_time:69413ms step_avg:65.42ms +[2025-09-11 11:18:10] [Rank 0] step:1061/10000 train_time:69413ms step_avg:65.42ms +[2025-09-11 11:18:10] [Rank 0] step:1081/10000 train_time:70061ms step_avg:64.81ms +[2025-09-11 11:18:10] [Rank 0] step:1081/10000 train_time:70061ms step_avg:64.81ms +[2025-09-11 11:18:11] [Rank 0] step:1101/10000 train_time:70707ms step_avg:64.22ms +[2025-09-11 11:18:11] [Rank 0] step:1101/10000 train_time:70707ms step_avg:64.22ms +[2025-09-11 11:18:11] [Rank 0] step:1121/10000 train_time:71353ms step_avg:63.65ms +[2025-09-11 11:18:11] [Rank 0] step:1121/10000 train_time:71353ms step_avg:63.65ms +[2025-09-11 11:18:12] [Rank 0] step:1141/10000 train_time:71999ms step_avg:63.10ms +[2025-09-11 11:18:12] [Rank 0] step:1141/10000 train_time:71999ms step_avg:63.10ms +[2025-09-11 11:18:13] [Rank 0] step:1161/10000 train_time:72644ms step_avg:62.57ms +[2025-09-11 11:18:13] [Rank 0] step:1161/10000 train_time:72644ms step_avg:62.57ms +[2025-09-11 11:18:13] [Rank 0] step:1181/10000 train_time:73290ms step_avg:62.06ms +[2025-09-11 11:18:13] [Rank 0] step:1181/10000 train_time:73290ms step_avg:62.06ms +[2025-09-11 11:18:14] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:18:14] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:18:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:24] [Rank 0] PRINT: step:1200/10000 val_loss:6.0340 total_sharp:8.5572e-04 L1_sharp:3.3118e-01 L2_sharp:3.1321e-01 L3_sharp:3.1518e-01 L4_sharp:3.3222e-01 L5_sharp:4.5606e-01 L6_sharp:3.6873e-01 L7_sharp:3.3987e-01 L8_sharp:3.9845e-01 L9_sharp:4.0786e-01 L10_sharp:5.5745e-01 L11_sharp:7.2852e-01 L12_sharp:7.7684e-01 total_fnorm:1.8125e+01 total_l1_linf:2.6112e+04 total_spectral:9.0625e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.8799e-02 L2_l1linf:1.8799e-02 L3_l1linf:1.8677e-02 L4_l1linf:1.8677e-02 L5_l1linf:1.8799e-02 L6_l1linf:1.8921e-02 L7_l1linf:1.8799e-02 L8_l1linf:1.8677e-02 L9_l1linf:1.8799e-02 L10_l1linf:1.8799e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.8677e-02 L1_spectral:7.2618e-04 L2_spectral:7.3071e-04 L3_spectral:7.2648e-04 L4_spectral:7.2949e-04 L5_spectral:7.2518e-04 L6_spectral:7.3803e-04 L7_spectral:7.3812e-04 L8_spectral:7.2924e-04 L9_spectral:7.2848e-04 L10_spectral:7.2934e-04 L11_spectral:7.2206e-04 L12_spectral:7.1581e-04 train_time:73918ms step_avg:61.60ms +[2025-09-11 11:18:24] [Rank 0] PRINT: step:1200/10000 val_loss:6.0340 total_sharp:8.5572e-04 L1_sharp:3.3118e-01 L2_sharp:3.1321e-01 L3_sharp:3.1518e-01 L4_sharp:3.3222e-01 L5_sharp:4.5606e-01 L6_sharp:3.6873e-01 L7_sharp:3.3987e-01 L8_sharp:3.9845e-01 L9_sharp:4.0786e-01 L10_sharp:5.5745e-01 L11_sharp:7.2852e-01 L12_sharp:7.7684e-01 total_fnorm:1.8125e+01 total_l1_linf:2.6112e+04 total_spectral:9.0625e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.4434e-02 L1_l1linf:1.8799e-02 L2_l1linf:1.8799e-02 L3_l1linf:1.8677e-02 L4_l1linf:1.8677e-02 L5_l1linf:1.8799e-02 L6_l1linf:1.8921e-02 L7_l1linf:1.8799e-02 L8_l1linf:1.8677e-02 L9_l1linf:1.8799e-02 L10_l1linf:1.8799e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.8677e-02 L1_spectral:7.2618e-04 L2_spectral:7.3071e-04 L3_spectral:7.2648e-04 L4_spectral:7.2949e-04 L5_spectral:7.2518e-04 L6_spectral:7.3803e-04 L7_spectral:7.3812e-04 L8_spectral:7.2924e-04 L9_spectral:7.2848e-04 L10_spectral:7.2934e-04 L11_spectral:7.2206e-04 L12_spectral:7.1581e-04 train_time:73918ms step_avg:61.60ms +[2025-09-11 11:18:25] [Rank 0] step:1201/10000 train_time:75399ms step_avg:62.78ms +[2025-09-11 11:18:25] [Rank 0] step:1201/10000 train_time:75399ms step_avg:62.78ms +[2025-09-11 11:18:26] [Rank 0] step:1221/10000 train_time:76033ms step_avg:62.27ms +[2025-09-11 11:18:26] [Rank 0] step:1221/10000 train_time:76033ms step_avg:62.27ms +[2025-09-11 11:18:27] [Rank 0] step:1241/10000 train_time:76678ms step_avg:61.79ms +[2025-09-11 11:18:27] [Rank 0] step:1241/10000 train_time:76678ms step_avg:61.79ms +[2025-09-11 11:18:27] [Rank 0] step:1261/10000 train_time:77324ms step_avg:61.32ms +[2025-09-11 11:18:27] [Rank 0] step:1261/10000 train_time:77324ms step_avg:61.32ms +[2025-09-11 11:18:28] [Rank 0] step:1281/10000 train_time:77970ms step_avg:60.87ms +[2025-09-11 11:18:28] [Rank 0] step:1281/10000 train_time:77970ms step_avg:60.87ms +[2025-09-11 11:18:29] [Rank 0] step:1301/10000 train_time:78616ms step_avg:60.43ms +[2025-09-11 11:18:29] [Rank 0] step:1301/10000 train_time:78616ms step_avg:60.43ms +[2025-09-11 11:18:29] [Rank 0] step:1321/10000 train_time:79262ms step_avg:60.00ms +[2025-09-11 11:18:29] [Rank 0] step:1321/10000 train_time:79262ms step_avg:60.00ms +[2025-09-11 11:18:30] [Rank 0] step:1341/10000 train_time:79908ms step_avg:59.59ms +[2025-09-11 11:18:30] [Rank 0] step:1341/10000 train_time:79908ms step_avg:59.59ms +[2025-09-11 11:18:31] [Rank 0] step:1361/10000 train_time:80555ms step_avg:59.19ms +[2025-09-11 11:18:31] [Rank 0] step:1361/10000 train_time:80555ms step_avg:59.19ms +[2025-09-11 11:18:31] [Rank 0] step:1381/10000 train_time:81200ms step_avg:58.80ms +[2025-09-11 11:18:31] [Rank 0] step:1381/10000 train_time:81200ms step_avg:58.80ms +[2025-09-11 11:18:32] [Rank 0] step:1401/10000 train_time:81845ms step_avg:58.42ms +[2025-09-11 11:18:32] [Rank 0] step:1401/10000 train_time:81845ms step_avg:58.42ms +[2025-09-11 11:18:33] [Rank 0] step:1421/10000 train_time:82490ms step_avg:58.05ms +[2025-09-11 11:18:33] [Rank 0] step:1421/10000 train_time:82490ms step_avg:58.05ms +[2025-09-11 11:18:33] [Rank 0] step:1441/10000 train_time:83135ms step_avg:57.69ms +[2025-09-11 11:18:33] [Rank 0] step:1441/10000 train_time:83135ms step_avg:57.69ms +[2025-09-11 11:18:34] [Rank 0] step:1461/10000 train_time:83781ms step_avg:57.34ms +[2025-09-11 11:18:34] [Rank 0] step:1461/10000 train_time:83781ms step_avg:57.34ms +[2025-09-11 11:18:35] [Rank 0] step:1481/10000 train_time:84426ms step_avg:57.01ms +[2025-09-11 11:18:35] [Rank 0] step:1481/10000 train_time:84426ms step_avg:57.01ms +[2025-09-11 11:18:35] [Rank 0] step:1501/10000 train_time:85075ms step_avg:56.68ms +[2025-09-11 11:18:35] [Rank 0] step:1501/10000 train_time:85075ms step_avg:56.68ms +[2025-09-11 11:18:36] [Rank 0] step:1521/10000 train_time:85724ms step_avg:56.36ms +[2025-09-11 11:18:36] [Rank 0] step:1521/10000 train_time:85724ms step_avg:56.36ms +[2025-09-11 11:18:36] [Rank 0] step:1541/10000 train_time:86373ms step_avg:56.05ms +[2025-09-11 11:18:36] [Rank 0] step:1541/10000 train_time:86373ms step_avg:56.05ms +[2025-09-11 11:18:37] [Rank 0] step:1561/10000 train_time:87022ms step_avg:55.75ms +[2025-09-11 11:18:37] [Rank 0] step:1561/10000 train_time:87022ms step_avg:55.75ms +[2025-09-11 11:18:38] [Rank 0] step:1581/10000 train_time:87671ms step_avg:55.45ms +[2025-09-11 11:18:38] [Rank 0] step:1581/10000 train_time:87671ms step_avg:55.45ms +[2025-09-11 11:18:38] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:18:38] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:18:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:18:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:18:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:18:49] [Rank 0] PRINT: step:1600/10000 val_loss:5.8403 total_sharp:1.5458e-03 L1_sharp:8.5146e-01 L2_sharp:8.0069e-01 L3_sharp:8.2612e-01 L4_sharp:9.6332e-01 L5_sharp:1.0538e+00 L6_sharp:9.8512e-01 L7_sharp:9.8561e-01 L8_sharp:1.1130e+00 L9_sharp:8.4739e-01 L10_sharp:7.9192e-01 L11_sharp:7.7936e-01 L12_sharp:1.1264e+00 total_fnorm:1.7375e+01 total_l1_linf:2.3552e+04 total_spectral:8.6875e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4922e-02 L1_l1linf:1.7578e-02 L2_l1linf:1.7456e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.7456e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.7578e-02 L7_l1linf:1.7700e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7944e-02 L11_l1linf:1.7944e-02 L12_l1linf:1.7822e-02 L1_spectral:7.2665e-04 L2_spectral:7.4727e-04 L3_spectral:7.4756e-04 L4_spectral:7.4807e-04 L5_spectral:7.3276e-04 L6_spectral:7.6011e-04 L7_spectral:7.5687e-04 L8_spectral:7.3230e-04 L9_spectral:7.4853e-04 L10_spectral:7.5533e-04 L11_spectral:7.4356e-04 L12_spectral:7.1969e-04 train_time:88302ms step_avg:55.19ms +[2025-09-11 11:18:49] [Rank 0] PRINT: step:1600/10000 val_loss:5.8403 total_sharp:1.5458e-03 L1_sharp:8.5146e-01 L2_sharp:8.0069e-01 L3_sharp:8.2612e-01 L4_sharp:9.6332e-01 L5_sharp:1.0538e+00 L6_sharp:9.8512e-01 L7_sharp:9.8561e-01 L8_sharp:1.1130e+00 L9_sharp:8.4739e-01 L10_sharp:7.9192e-01 L11_sharp:7.7936e-01 L12_sharp:1.1264e+00 total_fnorm:1.7375e+01 total_l1_linf:2.3552e+04 total_spectral:8.6875e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.6631e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.4922e-02 L1_l1linf:1.7578e-02 L2_l1linf:1.7456e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.7456e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.7578e-02 L7_l1linf:1.7700e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7944e-02 L11_l1linf:1.7944e-02 L12_l1linf:1.7822e-02 L1_spectral:7.2665e-04 L2_spectral:7.4727e-04 L3_spectral:7.4756e-04 L4_spectral:7.4807e-04 L5_spectral:7.3276e-04 L6_spectral:7.6011e-04 L7_spectral:7.5687e-04 L8_spectral:7.3230e-04 L9_spectral:7.4853e-04 L10_spectral:7.5533e-04 L11_spectral:7.4356e-04 L12_spectral:7.1969e-04 train_time:88302ms step_avg:55.19ms +[2025-09-11 11:18:50] [Rank 0] step:1601/10000 train_time:89795ms step_avg:56.09ms +[2025-09-11 11:18:50] [Rank 0] step:1601/10000 train_time:89795ms step_avg:56.09ms +[2025-09-11 11:18:51] [Rank 0] step:1621/10000 train_time:90435ms step_avg:55.79ms +[2025-09-11 11:18:51] [Rank 0] step:1621/10000 train_time:90435ms step_avg:55.79ms +[2025-09-11 11:18:52] [Rank 0] step:1641/10000 train_time:91086ms step_avg:55.51ms +[2025-09-11 11:18:52] [Rank 0] step:1641/10000 train_time:91086ms step_avg:55.51ms +[2025-09-11 11:18:52] [Rank 0] step:1661/10000 train_time:91737ms step_avg:55.23ms +[2025-09-11 11:18:52] [Rank 0] step:1661/10000 train_time:91737ms step_avg:55.23ms +[2025-09-11 11:18:53] [Rank 0] step:1681/10000 train_time:92389ms step_avg:54.96ms +[2025-09-11 11:18:53] [Rank 0] step:1681/10000 train_time:92389ms step_avg:54.96ms +[2025-09-11 11:18:54] [Rank 0] step:1701/10000 train_time:93040ms step_avg:54.70ms +[2025-09-11 11:18:54] [Rank 0] step:1701/10000 train_time:93040ms step_avg:54.70ms +[2025-09-11 11:18:54] [Rank 0] step:1721/10000 train_time:93690ms step_avg:54.44ms +[2025-09-11 11:18:54] [Rank 0] step:1721/10000 train_time:93690ms step_avg:54.44ms +[2025-09-11 11:18:55] [Rank 0] step:1741/10000 train_time:94340ms step_avg:54.19ms +[2025-09-11 11:18:55] [Rank 0] step:1741/10000 train_time:94340ms step_avg:54.19ms +[2025-09-11 11:18:56] [Rank 0] step:1761/10000 train_time:94991ms step_avg:53.94ms +[2025-09-11 11:18:56] [Rank 0] step:1761/10000 train_time:94991ms step_avg:53.94ms +[2025-09-11 11:18:56] [Rank 0] step:1781/10000 train_time:95642ms step_avg:53.70ms +[2025-09-11 11:18:56] [Rank 0] step:1781/10000 train_time:95642ms step_avg:53.70ms +[2025-09-11 11:18:57] [Rank 0] step:1801/10000 train_time:96292ms step_avg:53.47ms +[2025-09-11 11:18:57] [Rank 0] step:1801/10000 train_time:96292ms step_avg:53.47ms +[2025-09-11 11:18:58] [Rank 0] step:1821/10000 train_time:96942ms step_avg:53.24ms +[2025-09-11 11:18:58] [Rank 0] step:1821/10000 train_time:96942ms step_avg:53.24ms +[2025-09-11 11:18:58] [Rank 0] step:1841/10000 train_time:97593ms step_avg:53.01ms +[2025-09-11 11:18:58] [Rank 0] step:1841/10000 train_time:97593ms step_avg:53.01ms +[2025-09-11 11:18:59] [Rank 0] step:1861/10000 train_time:98244ms step_avg:52.79ms +[2025-09-11 11:18:59] [Rank 0] step:1861/10000 train_time:98244ms step_avg:52.79ms +[2025-09-11 11:19:00] [Rank 0] step:1881/10000 train_time:98895ms step_avg:52.58ms +[2025-09-11 11:19:00] [Rank 0] step:1881/10000 train_time:98895ms step_avg:52.58ms +[2025-09-11 11:19:00] [Rank 0] step:1901/10000 train_time:99545ms step_avg:52.36ms +[2025-09-11 11:19:00] [Rank 0] step:1901/10000 train_time:99545ms step_avg:52.36ms +[2025-09-11 11:19:01] [Rank 0] step:1921/10000 train_time:100198ms step_avg:52.16ms +[2025-09-11 11:19:01] [Rank 0] step:1921/10000 train_time:100198ms step_avg:52.16ms +[2025-09-11 11:19:02] [Rank 0] step:1941/10000 train_time:100849ms step_avg:51.96ms +[2025-09-11 11:19:02] [Rank 0] step:1941/10000 train_time:100849ms step_avg:51.96ms +[2025-09-11 11:19:02] [Rank 0] step:1961/10000 train_time:101501ms step_avg:51.76ms +[2025-09-11 11:19:02] [Rank 0] step:1961/10000 train_time:101501ms step_avg:51.76ms +[2025-09-11 11:19:03] [Rank 0] step:1981/10000 train_time:102153ms step_avg:51.57ms +[2025-09-11 11:19:03] [Rank 0] step:1981/10000 train_time:102153ms step_avg:51.57ms +[2025-09-11 11:19:03] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:19:03] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:19:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:19:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:19:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:19:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:17] [Rank 0] PRINT: step:2000/10000 val_loss:5.6819 total_sharp:2.2527e-03 L1_sharp:7.6869e-01 L2_sharp:7.1287e-01 L3_sharp:8.1884e-01 L4_sharp:8.9957e-01 L5_sharp:1.0151e+00 L6_sharp:1.0623e+00 L7_sharp:1.1128e+00 L8_sharp:1.4133e+00 L9_sharp:2.2267e+00 L10_sharp:3.5471e+00 L11_sharp:4.5055e+00 L12_sharp:5.6064e+00 total_fnorm:1.6750e+01 total_l1_linf:2.3296e+04 total_spectral:8.3750e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.9072e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.4922e-02 L1_l1linf:1.6479e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.6479e-02 L4_l1linf:1.6846e-02 L5_l1linf:1.6724e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7578e-02 L10_l1linf:1.7212e-02 L11_l1linf:1.7456e-02 L12_l1linf:1.6602e-02 L1_spectral:7.4784e-04 L2_spectral:7.6054e-04 L3_spectral:7.6229e-04 L4_spectral:7.6448e-04 L5_spectral:7.5551e-04 L6_spectral:7.7334e-04 L7_spectral:7.7243e-04 L8_spectral:7.5690e-04 L9_spectral:7.7403e-04 L10_spectral:7.6838e-04 L11_spectral:7.6168e-04 L12_spectral:7.1507e-04 train_time:102785ms step_avg:51.39ms +[2025-09-11 11:19:17] [Rank 0] PRINT: step:2000/10000 val_loss:5.6819 total_sharp:2.2527e-03 L1_sharp:7.6869e-01 L2_sharp:7.1287e-01 L3_sharp:8.1884e-01 L4_sharp:8.9957e-01 L5_sharp:1.0151e+00 L6_sharp:1.0623e+00 L7_sharp:1.1128e+00 L8_sharp:1.4133e+00 L9_sharp:2.2267e+00 L10_sharp:3.5471e+00 L11_sharp:4.5055e+00 L12_sharp:5.6064e+00 total_fnorm:1.6750e+01 total_l1_linf:2.3296e+04 total_spectral:8.3750e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.9072e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.4922e-02 L1_l1linf:1.6479e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.6479e-02 L4_l1linf:1.6846e-02 L5_l1linf:1.6724e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7578e-02 L10_l1linf:1.7212e-02 L11_l1linf:1.7456e-02 L12_l1linf:1.6602e-02 L1_spectral:7.4784e-04 L2_spectral:7.6054e-04 L3_spectral:7.6229e-04 L4_spectral:7.6448e-04 L5_spectral:7.5551e-04 L6_spectral:7.7334e-04 L7_spectral:7.7243e-04 L8_spectral:7.5690e-04 L9_spectral:7.7403e-04 L10_spectral:7.6838e-04 L11_spectral:7.6168e-04 L12_spectral:7.1507e-04 train_time:102785ms step_avg:51.39ms +[2025-09-11 11:19:19] [Rank 0] step:2001/10000 train_time:104633ms step_avg:52.29ms +[2025-09-11 11:19:19] [Rank 0] step:2001/10000 train_time:104633ms step_avg:52.29ms +[2025-09-11 11:19:19] [Rank 0] step:2021/10000 train_time:105287ms step_avg:52.10ms +[2025-09-11 11:19:19] [Rank 0] step:2021/10000 train_time:105287ms step_avg:52.10ms +[2025-09-11 11:19:20] [Rank 0] step:2041/10000 train_time:105938ms step_avg:51.91ms +[2025-09-11 11:19:20] [Rank 0] step:2041/10000 train_time:105938ms step_avg:51.91ms +[2025-09-11 11:19:20] [Rank 0] step:2061/10000 train_time:106589ms step_avg:51.72ms +[2025-09-11 11:19:20] [Rank 0] step:2061/10000 train_time:106589ms step_avg:51.72ms +[2025-09-11 11:19:21] [Rank 0] step:2081/10000 train_time:107242ms step_avg:51.53ms +[2025-09-11 11:19:21] [Rank 0] step:2081/10000 train_time:107242ms step_avg:51.53ms +[2025-09-11 11:19:22] [Rank 0] step:2101/10000 train_time:107892ms step_avg:51.35ms +[2025-09-11 11:19:22] [Rank 0] step:2101/10000 train_time:107892ms step_avg:51.35ms +[2025-09-11 11:19:22] [Rank 0] step:2121/10000 train_time:108542ms step_avg:51.18ms +[2025-09-11 11:19:22] [Rank 0] step:2121/10000 train_time:108542ms step_avg:51.18ms +[2025-09-11 11:19:23] [Rank 0] step:2141/10000 train_time:109194ms step_avg:51.00ms +[2025-09-11 11:19:23] [Rank 0] step:2141/10000 train_time:109194ms step_avg:51.00ms +[2025-09-11 11:19:24] [Rank 0] step:2161/10000 train_time:109844ms step_avg:50.83ms +[2025-09-11 11:19:24] [Rank 0] step:2161/10000 train_time:109844ms step_avg:50.83ms +[2025-09-11 11:19:24] [Rank 0] step:2181/10000 train_time:110497ms step_avg:50.66ms +[2025-09-11 11:19:24] [Rank 0] step:2181/10000 train_time:110497ms step_avg:50.66ms +[2025-09-11 11:19:25] [Rank 0] step:2201/10000 train_time:111148ms step_avg:50.50ms +[2025-09-11 11:19:25] [Rank 0] step:2201/10000 train_time:111148ms step_avg:50.50ms +[2025-09-11 11:19:26] [Rank 0] step:2221/10000 train_time:112099ms step_avg:50.47ms +[2025-09-11 11:19:26] [Rank 0] step:2221/10000 train_time:112099ms step_avg:50.47ms +[2025-09-11 11:19:27] [Rank 0] step:2241/10000 train_time:112761ms step_avg:50.32ms +[2025-09-11 11:19:27] [Rank 0] step:2241/10000 train_time:112761ms step_avg:50.32ms +[2025-09-11 11:19:27] [Rank 0] step:2261/10000 train_time:113425ms step_avg:50.17ms +[2025-09-11 11:19:27] [Rank 0] step:2261/10000 train_time:113425ms step_avg:50.17ms +[2025-09-11 11:19:28] [Rank 0] step:2281/10000 train_time:114239ms step_avg:50.08ms +[2025-09-11 11:19:28] [Rank 0] step:2281/10000 train_time:114239ms step_avg:50.08ms +[2025-09-11 11:19:29] [Rank 0] step:2301/10000 train_time:115017ms step_avg:49.99ms +[2025-09-11 11:19:29] [Rank 0] step:2301/10000 train_time:115017ms step_avg:49.99ms +[2025-09-11 11:19:30] [Rank 0] step:2321/10000 train_time:115682ms step_avg:49.84ms +[2025-09-11 11:19:30] [Rank 0] step:2321/10000 train_time:115682ms step_avg:49.84ms +[2025-09-11 11:19:30] [Rank 0] step:2341/10000 train_time:116346ms step_avg:49.70ms +[2025-09-11 11:19:30] [Rank 0] step:2341/10000 train_time:116346ms step_avg:49.70ms +[2025-09-11 11:19:31] [Rank 0] step:2361/10000 train_time:117010ms step_avg:49.56ms +[2025-09-11 11:19:31] [Rank 0] step:2361/10000 train_time:117010ms step_avg:49.56ms +[2025-09-11 11:19:32] [Rank 0] step:2381/10000 train_time:117672ms step_avg:49.42ms +[2025-09-11 11:19:32] [Rank 0] step:2381/10000 train_time:117672ms step_avg:49.42ms +[2025-09-11 11:19:32] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:19:32] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:19:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:19:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:19:43] [Rank 0] PRINT: step:2400/10000 val_loss:5.5484 total_sharp:2.1065e-03 L1_sharp:7.3913e-01 L2_sharp:8.1144e-01 L3_sharp:9.1397e-01 L4_sharp:1.0767e+00 L5_sharp:1.1965e+00 L6_sharp:1.3675e+00 L7_sharp:1.3441e+00 L8_sharp:1.4201e+00 L9_sharp:1.1514e+00 L10_sharp:9.4664e-01 L11_sharp:9.0579e-01 L12_sharp:1.2386e+00 total_fnorm:1.5375e+01 total_l1_linf:1.9968e+04 total_spectral:7.6562e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6357e-02 L10_l1linf:1.6479e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.6235e-02 L1_spectral:7.4836e-04 L2_spectral:7.5085e-04 L3_spectral:7.5376e-04 L4_spectral:7.5655e-04 L5_spectral:7.6164e-04 L6_spectral:7.7340e-04 L7_spectral:7.6994e-04 L8_spectral:7.6698e-04 L9_spectral:7.7499e-04 L10_spectral:7.7540e-04 L11_spectral:7.6919e-04 L12_spectral:7.4581e-04 train_time:118317ms step_avg:49.30ms +[2025-09-11 11:19:43] [Rank 0] PRINT: step:2400/10000 val_loss:5.5484 total_sharp:2.1065e-03 L1_sharp:7.3913e-01 L2_sharp:8.1144e-01 L3_sharp:9.1397e-01 L4_sharp:1.0767e+00 L5_sharp:1.1965e+00 L6_sharp:1.3675e+00 L7_sharp:1.3441e+00 L8_sharp:1.4201e+00 L9_sharp:1.1514e+00 L10_sharp:9.4664e-01 L11_sharp:9.0579e-01 L12_sharp:1.2386e+00 total_fnorm:1.5375e+01 total_l1_linf:1.9968e+04 total_spectral:7.6562e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6357e-02 L10_l1linf:1.6479e-02 L11_l1linf:1.6968e-02 L12_l1linf:1.6235e-02 L1_spectral:7.4836e-04 L2_spectral:7.5085e-04 L3_spectral:7.5376e-04 L4_spectral:7.5655e-04 L5_spectral:7.6164e-04 L6_spectral:7.7340e-04 L7_spectral:7.6994e-04 L8_spectral:7.6698e-04 L9_spectral:7.7499e-04 L10_spectral:7.7540e-04 L11_spectral:7.6919e-04 L12_spectral:7.4581e-04 train_time:118317ms step_avg:49.30ms +[2025-09-11 11:19:45] [Rank 0] step:2401/10000 train_time:120104ms step_avg:50.02ms +[2025-09-11 11:19:45] [Rank 0] step:2401/10000 train_time:120104ms step_avg:50.02ms +[2025-09-11 11:19:46] [Rank 0] step:2421/10000 train_time:120785ms step_avg:49.89ms +[2025-09-11 11:19:46] [Rank 0] step:2421/10000 train_time:120785ms step_avg:49.89ms +[2025-09-11 11:19:46] [Rank 0] step:2441/10000 train_time:121451ms step_avg:49.75ms +[2025-09-11 11:19:46] [Rank 0] step:2441/10000 train_time:121451ms step_avg:49.75ms +[2025-09-11 11:19:47] [Rank 0] step:2461/10000 train_time:122117ms step_avg:49.62ms +[2025-09-11 11:19:47] [Rank 0] step:2461/10000 train_time:122117ms step_avg:49.62ms +[2025-09-11 11:19:48] [Rank 0] step:2481/10000 train_time:122782ms step_avg:49.49ms +[2025-09-11 11:19:48] [Rank 0] step:2481/10000 train_time:122782ms step_avg:49.49ms +[2025-09-11 11:19:48] [Rank 0] step:2501/10000 train_time:123446ms step_avg:49.36ms +[2025-09-11 11:19:48] [Rank 0] step:2501/10000 train_time:123446ms step_avg:49.36ms +[2025-09-11 11:19:49] [Rank 0] step:2521/10000 train_time:124111ms step_avg:49.23ms +[2025-09-11 11:19:49] [Rank 0] step:2521/10000 train_time:124111ms step_avg:49.23ms +[2025-09-11 11:19:50] [Rank 0] step:2541/10000 train_time:124775ms step_avg:49.10ms +[2025-09-11 11:19:50] [Rank 0] step:2541/10000 train_time:124775ms step_avg:49.10ms +[2025-09-11 11:19:50] [Rank 0] step:2561/10000 train_time:125441ms step_avg:48.98ms +[2025-09-11 11:19:50] [Rank 0] step:2561/10000 train_time:125441ms step_avg:48.98ms +[2025-09-11 11:19:51] [Rank 0] step:2581/10000 train_time:126105ms step_avg:48.86ms +[2025-09-11 11:19:51] [Rank 0] step:2581/10000 train_time:126105ms step_avg:48.86ms +[2025-09-11 11:19:52] [Rank 0] step:2601/10000 train_time:126771ms step_avg:48.74ms +[2025-09-11 11:19:52] [Rank 0] step:2601/10000 train_time:126771ms step_avg:48.74ms +[2025-09-11 11:19:52] [Rank 0] step:2621/10000 train_time:127435ms step_avg:48.62ms +[2025-09-11 11:19:52] [Rank 0] step:2621/10000 train_time:127435ms step_avg:48.62ms +[2025-09-11 11:19:53] [Rank 0] step:2641/10000 train_time:128098ms step_avg:48.50ms +[2025-09-11 11:19:53] [Rank 0] step:2641/10000 train_time:128098ms step_avg:48.50ms +[2025-09-11 11:19:54] [Rank 0] step:2661/10000 train_time:128768ms step_avg:48.39ms +[2025-09-11 11:19:54] [Rank 0] step:2661/10000 train_time:128768ms step_avg:48.39ms +[2025-09-11 11:19:54] [Rank 0] step:2681/10000 train_time:129432ms step_avg:48.28ms +[2025-09-11 11:19:54] [Rank 0] step:2681/10000 train_time:129432ms step_avg:48.28ms +[2025-09-11 11:19:55] [Rank 0] step:2701/10000 train_time:130097ms step_avg:48.17ms +[2025-09-11 11:19:55] [Rank 0] step:2701/10000 train_time:130097ms step_avg:48.17ms +[2025-09-11 11:19:56] [Rank 0] step:2721/10000 train_time:130762ms step_avg:48.06ms +[2025-09-11 11:19:56] [Rank 0] step:2721/10000 train_time:130762ms step_avg:48.06ms +[2025-09-11 11:19:56] [Rank 0] step:2741/10000 train_time:131425ms step_avg:47.95ms +[2025-09-11 11:19:56] [Rank 0] step:2741/10000 train_time:131425ms step_avg:47.95ms +[2025-09-11 11:19:57] [Rank 0] step:2761/10000 train_time:132091ms step_avg:47.84ms +[2025-09-11 11:19:57] [Rank 0] step:2761/10000 train_time:132091ms step_avg:47.84ms +[2025-09-11 11:19:58] [Rank 0] step:2781/10000 train_time:132755ms step_avg:47.74ms +[2025-09-11 11:19:58] [Rank 0] step:2781/10000 train_time:132755ms step_avg:47.74ms +[2025-09-11 11:19:58] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:19:58] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:19:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:20:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:20:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:09] [Rank 0] PRINT: step:2800/10000 val_loss:5.4401 total_sharp:2.6221e-03 L1_sharp:5.7823e-01 L2_sharp:6.7930e-01 L3_sharp:7.3713e-01 L4_sharp:9.3177e-01 L5_sharp:1.1505e+00 L6_sharp:1.4144e+00 L7_sharp:1.4559e+00 L8_sharp:1.5500e+00 L9_sharp:1.6725e+00 L10_sharp:1.6759e+00 L11_sharp:2.1363e+00 L12_sharp:3.2870e+00 total_fnorm:1.4375e+01 total_l1_linf:1.8304e+04 total_spectral:7.1875e+00 L1_fnorm:4.7363e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.4893e-02 L2_l1linf:1.4954e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5320e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5991e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6724e-02 L12_l1linf:1.5869e-02 L1_spectral:7.5033e-04 L2_spectral:7.6171e-04 L3_spectral:7.5390e-04 L4_spectral:7.5830e-04 L5_spectral:7.5395e-04 L6_spectral:7.7064e-04 L7_spectral:7.7956e-04 L8_spectral:7.6989e-04 L9_spectral:7.7936e-04 L10_spectral:7.7509e-04 L11_spectral:7.7429e-04 L12_spectral:7.4073e-04 train_time:133401ms step_avg:47.64ms +[2025-09-11 11:20:09] [Rank 0] PRINT: step:2800/10000 val_loss:5.4401 total_sharp:2.6221e-03 L1_sharp:5.7823e-01 L2_sharp:6.7930e-01 L3_sharp:7.3713e-01 L4_sharp:9.3177e-01 L5_sharp:1.1505e+00 L6_sharp:1.4144e+00 L7_sharp:1.4559e+00 L8_sharp:1.5500e+00 L9_sharp:1.6725e+00 L10_sharp:1.6759e+00 L11_sharp:2.1363e+00 L12_sharp:3.2870e+00 total_fnorm:1.4375e+01 total_l1_linf:1.8304e+04 total_spectral:7.1875e+00 L1_fnorm:4.7363e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.4893e-02 L2_l1linf:1.4954e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5320e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5991e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6724e-02 L12_l1linf:1.5869e-02 L1_spectral:7.5033e-04 L2_spectral:7.6171e-04 L3_spectral:7.5390e-04 L4_spectral:7.5830e-04 L5_spectral:7.5395e-04 L6_spectral:7.7064e-04 L7_spectral:7.7956e-04 L8_spectral:7.6989e-04 L9_spectral:7.7936e-04 L10_spectral:7.7509e-04 L11_spectral:7.7429e-04 L12_spectral:7.4073e-04 train_time:133401ms step_avg:47.64ms +[2025-09-11 11:20:11] [Rank 0] step:2801/10000 train_time:136008ms step_avg:48.56ms +[2025-09-11 11:20:11] [Rank 0] step:2801/10000 train_time:136008ms step_avg:48.56ms +[2025-09-11 11:20:12] [Rank 0] step:2821/10000 train_time:136692ms step_avg:48.45ms +[2025-09-11 11:20:12] [Rank 0] step:2821/10000 train_time:136692ms step_avg:48.45ms +[2025-09-11 11:20:13] [Rank 0] step:2841/10000 train_time:137357ms step_avg:48.35ms +[2025-09-11 11:20:13] [Rank 0] step:2841/10000 train_time:137357ms step_avg:48.35ms +[2025-09-11 11:20:13] [Rank 0] step:2861/10000 train_time:138021ms step_avg:48.24ms +[2025-09-11 11:20:13] [Rank 0] step:2861/10000 train_time:138021ms step_avg:48.24ms +[2025-09-11 11:20:14] [Rank 0] step:2881/10000 train_time:138686ms step_avg:48.14ms +[2025-09-11 11:20:14] [Rank 0] step:2881/10000 train_time:138686ms step_avg:48.14ms +[2025-09-11 11:20:15] [Rank 0] step:2901/10000 train_time:139349ms step_avg:48.03ms +[2025-09-11 11:20:15] [Rank 0] step:2901/10000 train_time:139349ms step_avg:48.03ms +[2025-09-11 11:20:15] [Rank 0] step:2921/10000 train_time:140012ms step_avg:47.93ms +[2025-09-11 11:20:15] [Rank 0] step:2921/10000 train_time:140012ms step_avg:47.93ms +[2025-09-11 11:20:16] [Rank 0] step:2941/10000 train_time:140678ms step_avg:47.83ms +[2025-09-11 11:20:16] [Rank 0] step:2941/10000 train_time:140678ms step_avg:47.83ms +[2025-09-11 11:20:17] [Rank 0] step:2961/10000 train_time:141341ms step_avg:47.73ms +[2025-09-11 11:20:17] [Rank 0] step:2961/10000 train_time:141341ms step_avg:47.73ms +[2025-09-11 11:20:17] [Rank 0] step:2981/10000 train_time:142008ms step_avg:47.64ms +[2025-09-11 11:20:17] [Rank 0] step:2981/10000 train_time:142008ms step_avg:47.64ms +[2025-09-11 11:20:18] [Rank 0] step:3001/10000 train_time:142674ms step_avg:47.54ms +[2025-09-11 11:20:18] [Rank 0] step:3001/10000 train_time:142674ms step_avg:47.54ms +[2025-09-11 11:20:19] [Rank 0] step:3021/10000 train_time:143340ms step_avg:47.45ms +[2025-09-11 11:20:19] [Rank 0] step:3021/10000 train_time:143340ms step_avg:47.45ms +[2025-09-11 11:20:19] [Rank 0] step:3041/10000 train_time:144006ms step_avg:47.35ms +[2025-09-11 11:20:19] [Rank 0] step:3041/10000 train_time:144006ms step_avg:47.35ms +[2025-09-11 11:20:20] [Rank 0] step:3061/10000 train_time:144672ms step_avg:47.26ms +[2025-09-11 11:20:20] [Rank 0] step:3061/10000 train_time:144672ms step_avg:47.26ms +[2025-09-11 11:20:21] [Rank 0] step:3081/10000 train_time:145338ms step_avg:47.17ms +[2025-09-11 11:20:21] [Rank 0] step:3081/10000 train_time:145338ms step_avg:47.17ms +[2025-09-11 11:20:21] [Rank 0] step:3101/10000 train_time:146004ms step_avg:47.08ms +[2025-09-11 11:20:21] [Rank 0] step:3101/10000 train_time:146004ms step_avg:47.08ms +[2025-09-11 11:20:22] [Rank 0] step:3121/10000 train_time:146670ms step_avg:46.99ms +[2025-09-11 11:20:22] [Rank 0] step:3121/10000 train_time:146670ms step_avg:46.99ms +[2025-09-11 11:20:23] [Rank 0] step:3141/10000 train_time:147336ms step_avg:46.91ms +[2025-09-11 11:20:23] [Rank 0] step:3141/10000 train_time:147336ms step_avg:46.91ms +[2025-09-11 11:20:23] [Rank 0] step:3161/10000 train_time:148002ms step_avg:46.82ms +[2025-09-11 11:20:23] [Rank 0] step:3161/10000 train_time:148002ms step_avg:46.82ms +[2025-09-11 11:20:24] [Rank 0] step:3181/10000 train_time:148668ms step_avg:46.74ms +[2025-09-11 11:20:24] [Rank 0] step:3181/10000 train_time:148668ms step_avg:46.74ms +[2025-09-11 11:20:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:20:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:20:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:20:36] [Rank 0] PRINT: step:3200/10000 val_loss:5.3516 total_sharp:1.4829e-03 L1_sharp:4.4443e-01 L2_sharp:5.1871e-01 L3_sharp:5.8494e-01 L4_sharp:8.2903e-01 L5_sharp:1.0214e+00 L6_sharp:1.3682e+00 L7_sharp:1.4765e+00 L8_sharp:1.6049e+00 L9_sharp:1.4748e+00 L10_sharp:1.3473e+00 L11_sharp:9.9275e-01 L12_sharp:2.2401e+00 total_fnorm:1.6750e+01 total_l1_linf:2.2528e+04 total_spectral:8.3750e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4038e-02 L2_l1linf:1.4099e-02 L3_l1linf:1.4221e-02 L4_l1linf:1.4893e-02 L5_l1linf:1.4771e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5259e-02 L9_l1linf:1.5259e-02 L10_l1linf:1.5137e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5503e-02 L1_spectral:7.6857e-04 L2_spectral:7.7380e-04 L3_spectral:7.7454e-04 L4_spectral:7.8009e-04 L5_spectral:7.7302e-04 L6_spectral:7.8470e-04 L7_spectral:7.8443e-04 L8_spectral:7.8079e-04 L9_spectral:7.8663e-04 L10_spectral:7.8941e-04 L11_spectral:7.8455e-04 L12_spectral:7.6075e-04 train_time:149315ms step_avg:46.66ms +[2025-09-11 11:20:36] [Rank 0] PRINT: step:3200/10000 val_loss:5.3516 total_sharp:1.4829e-03 L1_sharp:4.4443e-01 L2_sharp:5.1871e-01 L3_sharp:5.8494e-01 L4_sharp:8.2903e-01 L5_sharp:1.0214e+00 L6_sharp:1.3682e+00 L7_sharp:1.4765e+00 L8_sharp:1.6049e+00 L9_sharp:1.4748e+00 L10_sharp:1.3473e+00 L11_sharp:9.9275e-01 L12_sharp:2.2401e+00 total_fnorm:1.6750e+01 total_l1_linf:2.2528e+04 total_spectral:8.3750e+00 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4038e-02 L2_l1linf:1.4099e-02 L3_l1linf:1.4221e-02 L4_l1linf:1.4893e-02 L5_l1linf:1.4771e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5259e-02 L9_l1linf:1.5259e-02 L10_l1linf:1.5137e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5503e-02 L1_spectral:7.6857e-04 L2_spectral:7.7380e-04 L3_spectral:7.7454e-04 L4_spectral:7.8009e-04 L5_spectral:7.7302e-04 L6_spectral:7.8470e-04 L7_spectral:7.8443e-04 L8_spectral:7.8079e-04 L9_spectral:7.8663e-04 L10_spectral:7.8941e-04 L11_spectral:7.8455e-04 L12_spectral:7.6075e-04 train_time:149315ms step_avg:46.66ms +[2025-09-11 11:20:38] [Rank 0] step:3201/10000 train_time:151361ms step_avg:47.29ms +[2025-09-11 11:20:38] [Rank 0] step:3201/10000 train_time:151361ms step_avg:47.29ms +[2025-09-11 11:20:39] [Rank 0] step:3221/10000 train_time:152057ms step_avg:47.21ms +[2025-09-11 11:20:39] [Rank 0] step:3221/10000 train_time:152057ms step_avg:47.21ms +[2025-09-11 11:20:39] [Rank 0] step:3241/10000 train_time:152725ms step_avg:47.12ms +[2025-09-11 11:20:39] [Rank 0] step:3241/10000 train_time:152725ms step_avg:47.12ms +[2025-09-11 11:20:40] [Rank 0] step:3261/10000 train_time:153394ms step_avg:47.04ms +[2025-09-11 11:20:40] [Rank 0] step:3261/10000 train_time:153394ms step_avg:47.04ms +[2025-09-11 11:20:41] [Rank 0] step:3281/10000 train_time:154062ms step_avg:46.96ms +[2025-09-11 11:20:41] [Rank 0] step:3281/10000 train_time:154062ms step_avg:46.96ms +[2025-09-11 11:20:41] [Rank 0] step:3301/10000 train_time:154729ms step_avg:46.87ms +[2025-09-11 11:20:41] [Rank 0] step:3301/10000 train_time:154729ms step_avg:46.87ms +[2025-09-11 11:20:42] [Rank 0] step:3321/10000 train_time:155396ms step_avg:46.79ms +[2025-09-11 11:20:42] [Rank 0] step:3321/10000 train_time:155396ms step_avg:46.79ms +[2025-09-11 11:20:43] [Rank 0] step:3341/10000 train_time:156064ms step_avg:46.71ms +[2025-09-11 11:20:43] [Rank 0] step:3341/10000 train_time:156064ms step_avg:46.71ms +[2025-09-11 11:20:43] [Rank 0] step:3361/10000 train_time:156732ms step_avg:46.63ms +[2025-09-11 11:20:43] [Rank 0] step:3361/10000 train_time:156732ms step_avg:46.63ms +[2025-09-11 11:20:44] [Rank 0] step:3381/10000 train_time:157400ms step_avg:46.55ms +[2025-09-11 11:20:44] [Rank 0] step:3381/10000 train_time:157400ms step_avg:46.55ms +[2025-09-11 11:20:45] [Rank 0] step:3401/10000 train_time:158068ms step_avg:46.48ms +[2025-09-11 11:20:45] [Rank 0] step:3401/10000 train_time:158068ms step_avg:46.48ms +[2025-09-11 11:20:45] [Rank 0] step:3421/10000 train_time:158735ms step_avg:46.40ms +[2025-09-11 11:20:45] [Rank 0] step:3421/10000 train_time:158735ms step_avg:46.40ms +[2025-09-11 11:20:46] [Rank 0] step:3441/10000 train_time:159402ms step_avg:46.32ms +[2025-09-11 11:20:46] [Rank 0] step:3441/10000 train_time:159402ms step_avg:46.32ms +[2025-09-11 11:20:47] [Rank 0] step:3461/10000 train_time:160069ms step_avg:46.25ms +[2025-09-11 11:20:47] [Rank 0] step:3461/10000 train_time:160069ms step_avg:46.25ms +[2025-09-11 11:20:47] [Rank 0] step:3481/10000 train_time:160736ms step_avg:46.18ms +[2025-09-11 11:20:47] [Rank 0] step:3481/10000 train_time:160736ms step_avg:46.18ms +[2025-09-11 11:20:48] [Rank 0] step:3501/10000 train_time:161403ms step_avg:46.10ms +[2025-09-11 11:20:48] [Rank 0] step:3501/10000 train_time:161403ms step_avg:46.10ms +[2025-09-11 11:20:49] [Rank 0] step:3521/10000 train_time:162070ms step_avg:46.03ms +[2025-09-11 11:20:49] [Rank 0] step:3521/10000 train_time:162070ms step_avg:46.03ms +[2025-09-11 11:20:49] [Rank 0] step:3541/10000 train_time:162736ms step_avg:45.96ms +[2025-09-11 11:20:49] [Rank 0] step:3541/10000 train_time:162736ms step_avg:45.96ms +[2025-09-11 11:20:50] [Rank 0] step:3561/10000 train_time:163404ms step_avg:45.89ms +[2025-09-11 11:20:50] [Rank 0] step:3561/10000 train_time:163404ms step_avg:45.89ms +[2025-09-11 11:20:51] [Rank 0] step:3581/10000 train_time:164072ms step_avg:45.82ms +[2025-09-11 11:20:51] [Rank 0] step:3581/10000 train_time:164072ms step_avg:45.82ms +[2025-09-11 11:20:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:20:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:20:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:20:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:21:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:21:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:02] [Rank 0] PRINT: step:3600/10000 val_loss:5.2911 total_sharp:1.6223e-03 L1_sharp:3.8639e-01 L2_sharp:4.6532e-01 L3_sharp:5.8354e-01 L4_sharp:7.8948e-01 L5_sharp:9.8364e-01 L6_sharp:1.1824e+00 L7_sharp:1.2186e+00 L8_sharp:9.7433e-01 L9_sharp:1.0585e+00 L10_sharp:1.2412e+00 L11_sharp:1.0510e+00 L12_sharp:2.2330e+00 total_fnorm:1.4562e+01 total_l1_linf:1.8432e+04 total_spectral:7.2812e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3428e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3733e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4160e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4221e-02 L8_l1linf:1.4099e-02 L9_l1linf:1.4587e-02 L10_l1linf:1.4343e-02 L11_l1linf:1.5137e-02 L12_l1linf:1.5381e-02 L1_spectral:7.7653e-04 L2_spectral:7.8151e-04 L3_spectral:7.8125e-04 L4_spectral:7.8426e-04 L5_spectral:7.8832e-04 L6_spectral:7.9324e-04 L7_spectral:7.8979e-04 L8_spectral:7.9689e-04 L9_spectral:7.9466e-04 L10_spectral:7.9585e-04 L11_spectral:7.8962e-04 L12_spectral:7.6755e-04 train_time:164720ms step_avg:45.76ms +[2025-09-11 11:21:02] [Rank 0] PRINT: step:3600/10000 val_loss:5.2911 total_sharp:1.6223e-03 L1_sharp:3.8639e-01 L2_sharp:4.6532e-01 L3_sharp:5.8354e-01 L4_sharp:7.8948e-01 L5_sharp:9.8364e-01 L6_sharp:1.1824e+00 L7_sharp:1.2186e+00 L8_sharp:9.7433e-01 L9_sharp:1.0585e+00 L10_sharp:1.2412e+00 L11_sharp:1.0510e+00 L12_sharp:2.2330e+00 total_fnorm:1.4562e+01 total_l1_linf:1.8432e+04 total_spectral:7.2812e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3428e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3733e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4160e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4221e-02 L8_l1linf:1.4099e-02 L9_l1linf:1.4587e-02 L10_l1linf:1.4343e-02 L11_l1linf:1.5137e-02 L12_l1linf:1.5381e-02 L1_spectral:7.7653e-04 L2_spectral:7.8151e-04 L3_spectral:7.8125e-04 L4_spectral:7.8426e-04 L5_spectral:7.8832e-04 L6_spectral:7.9324e-04 L7_spectral:7.8979e-04 L8_spectral:7.9689e-04 L9_spectral:7.9466e-04 L10_spectral:7.9585e-04 L11_spectral:7.8962e-04 L12_spectral:7.6755e-04 train_time:164720ms step_avg:45.76ms +[2025-09-11 11:21:04] [Rank 0] step:3601/10000 train_time:166747ms step_avg:46.31ms +[2025-09-11 11:21:04] [Rank 0] step:3601/10000 train_time:166747ms step_avg:46.31ms +[2025-09-11 11:21:05] [Rank 0] step:3621/10000 train_time:167432ms step_avg:46.24ms +[2025-09-11 11:21:05] [Rank 0] step:3621/10000 train_time:167432ms step_avg:46.24ms +[2025-09-11 11:21:06] [Rank 0] step:3641/10000 train_time:168098ms step_avg:46.17ms +[2025-09-11 11:21:06] [Rank 0] step:3641/10000 train_time:168098ms step_avg:46.17ms +[2025-09-11 11:21:06] [Rank 0] step:3661/10000 train_time:168808ms step_avg:46.11ms +[2025-09-11 11:21:06] [Rank 0] step:3661/10000 train_time:168808ms step_avg:46.11ms +[2025-09-11 11:21:07] [Rank 0] step:3681/10000 train_time:169474ms step_avg:46.04ms +[2025-09-11 11:21:07] [Rank 0] step:3681/10000 train_time:169474ms step_avg:46.04ms +[2025-09-11 11:21:08] [Rank 0] step:3701/10000 train_time:170140ms step_avg:45.97ms +[2025-09-11 11:21:08] [Rank 0] step:3701/10000 train_time:170140ms step_avg:45.97ms +[2025-09-11 11:21:08] [Rank 0] step:3721/10000 train_time:170815ms step_avg:45.91ms +[2025-09-11 11:21:08] [Rank 0] step:3721/10000 train_time:170815ms step_avg:45.91ms +[2025-09-11 11:21:09] [Rank 0] step:3741/10000 train_time:171492ms step_avg:45.84ms +[2025-09-11 11:21:09] [Rank 0] step:3741/10000 train_time:171492ms step_avg:45.84ms +[2025-09-11 11:21:10] [Rank 0] step:3761/10000 train_time:172169ms step_avg:45.78ms +[2025-09-11 11:21:10] [Rank 0] step:3761/10000 train_time:172169ms step_avg:45.78ms +[2025-09-11 11:21:10] [Rank 0] step:3781/10000 train_time:172846ms step_avg:45.71ms +[2025-09-11 11:21:10] [Rank 0] step:3781/10000 train_time:172846ms step_avg:45.71ms +[2025-09-11 11:21:11] [Rank 0] step:3801/10000 train_time:173524ms step_avg:45.65ms +[2025-09-11 11:21:11] [Rank 0] step:3801/10000 train_time:173524ms step_avg:45.65ms +[2025-09-11 11:21:12] [Rank 0] step:3821/10000 train_time:174202ms step_avg:45.59ms +[2025-09-11 11:21:12] [Rank 0] step:3821/10000 train_time:174202ms step_avg:45.59ms +[2025-09-11 11:21:12] [Rank 0] step:3841/10000 train_time:174878ms step_avg:45.53ms +[2025-09-11 11:21:12] [Rank 0] step:3841/10000 train_time:174878ms step_avg:45.53ms +[2025-09-11 11:21:13] [Rank 0] step:3861/10000 train_time:175556ms step_avg:45.47ms +[2025-09-11 11:21:13] [Rank 0] step:3861/10000 train_time:175556ms step_avg:45.47ms +[2025-09-11 11:21:14] [Rank 0] step:3881/10000 train_time:176232ms step_avg:45.41ms +[2025-09-11 11:21:14] [Rank 0] step:3881/10000 train_time:176232ms step_avg:45.41ms +[2025-09-11 11:21:15] [Rank 0] step:3901/10000 train_time:176909ms step_avg:45.35ms +[2025-09-11 11:21:15] [Rank 0] step:3901/10000 train_time:176909ms step_avg:45.35ms +[2025-09-11 11:21:15] [Rank 0] step:3921/10000 train_time:177587ms step_avg:45.29ms +[2025-09-11 11:21:15] [Rank 0] step:3921/10000 train_time:177587ms step_avg:45.29ms +[2025-09-11 11:21:16] [Rank 0] step:3941/10000 train_time:178264ms step_avg:45.23ms +[2025-09-11 11:21:16] [Rank 0] step:3941/10000 train_time:178264ms step_avg:45.23ms +[2025-09-11 11:21:17] [Rank 0] step:3961/10000 train_time:178942ms step_avg:45.18ms +[2025-09-11 11:21:17] [Rank 0] step:3961/10000 train_time:178942ms step_avg:45.18ms +[2025-09-11 11:21:17] [Rank 0] step:3981/10000 train_time:179619ms step_avg:45.12ms +[2025-09-11 11:21:17] [Rank 0] step:3981/10000 train_time:179619ms step_avg:45.12ms +[2025-09-11 11:21:18] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:21:18] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:21:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:29] [Rank 0] PRINT: step:4000/10000 val_loss:5.2161 total_sharp:1.4236e-03 L1_sharp:3.6079e-01 L2_sharp:4.0369e-01 L3_sharp:4.9229e-01 L4_sharp:6.6771e-01 L5_sharp:8.4193e-01 L6_sharp:1.2987e+00 L7_sharp:1.4463e+00 L8_sharp:1.6455e+00 L9_sharp:2.1677e+00 L10_sharp:2.1946e+00 L11_sharp:1.9440e+00 L12_sharp:2.5009e+00 total_fnorm:1.7625e+01 total_l1_linf:2.2400e+04 total_spectral:8.8125e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.3550e-02 L3_l1linf:1.3855e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4099e-02 L6_l1linf:1.4465e-02 L7_l1linf:1.4709e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.4648e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5076e-02 L12_l1linf:1.5198e-02 L1_spectral:7.5701e-04 L2_spectral:7.6263e-04 L3_spectral:7.6369e-04 L4_spectral:7.6064e-04 L5_spectral:7.7354e-04 L6_spectral:7.8759e-04 L7_spectral:7.9240e-04 L8_spectral:7.7845e-04 L9_spectral:7.8808e-04 L10_spectral:7.9123e-04 L11_spectral:7.8664e-04 L12_spectral:7.6016e-04 train_time:180276ms step_avg:45.07ms +[2025-09-11 11:21:29] [Rank 0] PRINT: step:4000/10000 val_loss:5.2161 total_sharp:1.4236e-03 L1_sharp:3.6079e-01 L2_sharp:4.0369e-01 L3_sharp:4.9229e-01 L4_sharp:6.6771e-01 L5_sharp:8.4193e-01 L6_sharp:1.2987e+00 L7_sharp:1.4463e+00 L8_sharp:1.6455e+00 L9_sharp:2.1677e+00 L10_sharp:2.1946e+00 L11_sharp:1.9440e+00 L12_sharp:2.5009e+00 total_fnorm:1.7625e+01 total_l1_linf:2.2400e+04 total_spectral:8.8125e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.3550e-02 L3_l1linf:1.3855e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.4099e-02 L6_l1linf:1.4465e-02 L7_l1linf:1.4709e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.4648e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5076e-02 L12_l1linf:1.5198e-02 L1_spectral:7.5701e-04 L2_spectral:7.6263e-04 L3_spectral:7.6369e-04 L4_spectral:7.6064e-04 L5_spectral:7.7354e-04 L6_spectral:7.8759e-04 L7_spectral:7.9240e-04 L8_spectral:7.7845e-04 L9_spectral:7.8808e-04 L10_spectral:7.9123e-04 L11_spectral:7.8664e-04 L12_spectral:7.6016e-04 train_time:180276ms step_avg:45.07ms +[2025-09-11 11:21:31] [Rank 0] step:4001/10000 train_time:182207ms step_avg:45.54ms +[2025-09-11 11:21:31] [Rank 0] step:4001/10000 train_time:182207ms step_avg:45.54ms +[2025-09-11 11:21:32] [Rank 0] step:4021/10000 train_time:183060ms step_avg:45.53ms +[2025-09-11 11:21:32] [Rank 0] step:4021/10000 train_time:183060ms step_avg:45.53ms +[2025-09-11 11:21:33] [Rank 0] step:4041/10000 train_time:183858ms step_avg:45.50ms +[2025-09-11 11:21:33] [Rank 0] step:4041/10000 train_time:183858ms step_avg:45.50ms +[2025-09-11 11:21:33] [Rank 0] step:4061/10000 train_time:184533ms step_avg:45.44ms +[2025-09-11 11:21:33] [Rank 0] step:4061/10000 train_time:184533ms step_avg:45.44ms +[2025-09-11 11:21:34] [Rank 0] step:4081/10000 train_time:185209ms step_avg:45.38ms +[2025-09-11 11:21:34] [Rank 0] step:4081/10000 train_time:185209ms step_avg:45.38ms +[2025-09-11 11:21:35] [Rank 0] step:4101/10000 train_time:186192ms step_avg:45.40ms +[2025-09-11 11:21:35] [Rank 0] step:4101/10000 train_time:186192ms step_avg:45.40ms +[2025-09-11 11:21:36] [Rank 0] step:4121/10000 train_time:186870ms step_avg:45.35ms +[2025-09-11 11:21:36] [Rank 0] step:4121/10000 train_time:186870ms step_avg:45.35ms +[2025-09-11 11:21:37] [Rank 0] step:4141/10000 train_time:187546ms step_avg:45.29ms +[2025-09-11 11:21:37] [Rank 0] step:4141/10000 train_time:187546ms step_avg:45.29ms +[2025-09-11 11:21:37] [Rank 0] step:4161/10000 train_time:188221ms step_avg:45.23ms +[2025-09-11 11:21:37] [Rank 0] step:4161/10000 train_time:188221ms step_avg:45.23ms +[2025-09-11 11:21:38] [Rank 0] step:4181/10000 train_time:188898ms step_avg:45.18ms +[2025-09-11 11:21:38] [Rank 0] step:4181/10000 train_time:188898ms step_avg:45.18ms +[2025-09-11 11:21:39] [Rank 0] step:4201/10000 train_time:189574ms step_avg:45.13ms +[2025-09-11 11:21:39] [Rank 0] step:4201/10000 train_time:189574ms step_avg:45.13ms +[2025-09-11 11:21:39] [Rank 0] step:4221/10000 train_time:190250ms step_avg:45.07ms +[2025-09-11 11:21:39] [Rank 0] step:4221/10000 train_time:190250ms step_avg:45.07ms +[2025-09-11 11:21:40] [Rank 0] step:4241/10000 train_time:190925ms step_avg:45.02ms +[2025-09-11 11:21:40] [Rank 0] step:4241/10000 train_time:190925ms step_avg:45.02ms +[2025-09-11 11:21:41] [Rank 0] step:4261/10000 train_time:191602ms step_avg:44.97ms +[2025-09-11 11:21:41] [Rank 0] step:4261/10000 train_time:191602ms step_avg:44.97ms +[2025-09-11 11:21:41] [Rank 0] step:4281/10000 train_time:192280ms step_avg:44.91ms +[2025-09-11 11:21:41] [Rank 0] step:4281/10000 train_time:192280ms step_avg:44.91ms +[2025-09-11 11:21:42] [Rank 0] step:4301/10000 train_time:192956ms step_avg:44.86ms +[2025-09-11 11:21:42] [Rank 0] step:4301/10000 train_time:192956ms step_avg:44.86ms +[2025-09-11 11:21:43] [Rank 0] step:4321/10000 train_time:193632ms step_avg:44.81ms +[2025-09-11 11:21:43] [Rank 0] step:4321/10000 train_time:193632ms step_avg:44.81ms +[2025-09-11 11:21:43] [Rank 0] step:4341/10000 train_time:194308ms step_avg:44.76ms +[2025-09-11 11:21:43] [Rank 0] step:4341/10000 train_time:194308ms step_avg:44.76ms +[2025-09-11 11:21:44] [Rank 0] step:4361/10000 train_time:194984ms step_avg:44.71ms +[2025-09-11 11:21:44] [Rank 0] step:4361/10000 train_time:194984ms step_avg:44.71ms +[2025-09-11 11:21:45] [Rank 0] step:4381/10000 train_time:195660ms step_avg:44.66ms +[2025-09-11 11:21:45] [Rank 0] step:4381/10000 train_time:195660ms step_avg:44.66ms +[2025-09-11 11:21:45] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:21:45] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:21:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:21:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:21:56] [Rank 0] PRINT: step:4400/10000 val_loss:5.1666 total_sharp:2.0098e-03 L1_sharp:2.9766e-01 L2_sharp:3.7048e-01 L3_sharp:5.2168e-01 L4_sharp:7.5239e-01 L5_sharp:9.2292e-01 L6_sharp:1.2437e+00 L7_sharp:1.5403e+00 L8_sharp:1.6245e+00 L9_sharp:1.9668e+00 L10_sharp:2.2968e+00 L11_sharp:4.3540e+00 L12_sharp:7.9042e+00 total_fnorm:1.5375e+01 total_l1_linf:1.8816e+04 total_spectral:7.6875e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.3123e-02 L2_l1linf:1.3245e-02 L3_l1linf:1.3550e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3855e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.4526e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.4099e-02 L1_spectral:7.6042e-04 L2_spectral:7.6963e-04 L3_spectral:7.7560e-04 L4_spectral:7.7791e-04 L5_spectral:7.6699e-04 L6_spectral:7.8634e-04 L7_spectral:7.9251e-04 L8_spectral:7.8467e-04 L9_spectral:7.9314e-04 L10_spectral:7.8596e-04 L11_spectral:7.8530e-04 L12_spectral:7.4892e-04 train_time:196317ms step_avg:44.62ms +[2025-09-11 11:21:56] [Rank 0] PRINT: step:4400/10000 val_loss:5.1666 total_sharp:2.0098e-03 L1_sharp:2.9766e-01 L2_sharp:3.7048e-01 L3_sharp:5.2168e-01 L4_sharp:7.5239e-01 L5_sharp:9.2292e-01 L6_sharp:1.2437e+00 L7_sharp:1.5403e+00 L8_sharp:1.6245e+00 L9_sharp:1.9668e+00 L10_sharp:2.2968e+00 L11_sharp:4.3540e+00 L12_sharp:7.9042e+00 total_fnorm:1.5375e+01 total_l1_linf:1.8816e+04 total_spectral:7.6875e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.3123e-02 L2_l1linf:1.3245e-02 L3_l1linf:1.3550e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3855e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4343e-02 L9_l1linf:1.4526e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.4099e-02 L1_spectral:7.6042e-04 L2_spectral:7.6963e-04 L3_spectral:7.7560e-04 L4_spectral:7.7791e-04 L5_spectral:7.6699e-04 L6_spectral:7.8634e-04 L7_spectral:7.9251e-04 L8_spectral:7.8467e-04 L9_spectral:7.9314e-04 L10_spectral:7.8596e-04 L11_spectral:7.8530e-04 L12_spectral:7.4892e-04 train_time:196317ms step_avg:44.62ms +[2025-09-11 11:21:58] [Rank 0] step:4401/10000 train_time:198353ms step_avg:45.07ms +[2025-09-11 11:21:58] [Rank 0] step:4401/10000 train_time:198353ms step_avg:45.07ms +[2025-09-11 11:21:59] [Rank 0] step:4421/10000 train_time:199037ms step_avg:45.02ms +[2025-09-11 11:21:59] [Rank 0] step:4421/10000 train_time:199037ms step_avg:45.02ms +[2025-09-11 11:22:00] [Rank 0] step:4441/10000 train_time:199715ms step_avg:44.97ms +[2025-09-11 11:22:00] [Rank 0] step:4441/10000 train_time:199715ms step_avg:44.97ms +[2025-09-11 11:22:00] [Rank 0] step:4461/10000 train_time:200395ms step_avg:44.92ms +[2025-09-11 11:22:00] [Rank 0] step:4461/10000 train_time:200395ms step_avg:44.92ms +[2025-09-11 11:22:01] [Rank 0] step:4481/10000 train_time:201075ms step_avg:44.87ms +[2025-09-11 11:22:01] [Rank 0] step:4481/10000 train_time:201075ms step_avg:44.87ms +[2025-09-11 11:22:02] [Rank 0] step:4501/10000 train_time:201756ms step_avg:44.82ms +[2025-09-11 11:22:02] [Rank 0] step:4501/10000 train_time:201756ms step_avg:44.82ms +[2025-09-11 11:22:02] [Rank 0] step:4521/10000 train_time:202435ms step_avg:44.78ms +[2025-09-11 11:22:02] [Rank 0] step:4521/10000 train_time:202435ms step_avg:44.78ms +[2025-09-11 11:22:03] [Rank 0] step:4541/10000 train_time:203115ms step_avg:44.73ms +[2025-09-11 11:22:03] [Rank 0] step:4541/10000 train_time:203115ms step_avg:44.73ms +[2025-09-11 11:22:04] [Rank 0] step:4561/10000 train_time:203794ms step_avg:44.68ms +[2025-09-11 11:22:04] [Rank 0] step:4561/10000 train_time:203794ms step_avg:44.68ms +[2025-09-11 11:22:05] [Rank 0] step:4581/10000 train_time:204473ms step_avg:44.63ms +[2025-09-11 11:22:05] [Rank 0] step:4581/10000 train_time:204473ms step_avg:44.63ms +[2025-09-11 11:22:05] [Rank 0] step:4601/10000 train_time:205151ms step_avg:44.59ms +[2025-09-11 11:22:05] [Rank 0] step:4601/10000 train_time:205151ms step_avg:44.59ms +[2025-09-11 11:22:06] [Rank 0] step:4621/10000 train_time:205832ms step_avg:44.54ms +[2025-09-11 11:22:06] [Rank 0] step:4621/10000 train_time:205832ms step_avg:44.54ms +[2025-09-11 11:22:07] [Rank 0] step:4641/10000 train_time:206512ms step_avg:44.50ms +[2025-09-11 11:22:07] [Rank 0] step:4641/10000 train_time:206512ms step_avg:44.50ms +[2025-09-11 11:22:07] [Rank 0] step:4661/10000 train_time:207192ms step_avg:44.45ms +[2025-09-11 11:22:07] [Rank 0] step:4661/10000 train_time:207192ms step_avg:44.45ms +[2025-09-11 11:22:08] [Rank 0] step:4681/10000 train_time:207872ms step_avg:44.41ms +[2025-09-11 11:22:08] [Rank 0] step:4681/10000 train_time:207872ms step_avg:44.41ms +[2025-09-11 11:22:09] [Rank 0] step:4701/10000 train_time:208550ms step_avg:44.36ms +[2025-09-11 11:22:09] [Rank 0] step:4701/10000 train_time:208550ms step_avg:44.36ms +[2025-09-11 11:22:09] [Rank 0] step:4721/10000 train_time:209229ms step_avg:44.32ms +[2025-09-11 11:22:09] [Rank 0] step:4721/10000 train_time:209229ms step_avg:44.32ms +[2025-09-11 11:22:10] [Rank 0] step:4741/10000 train_time:209907ms step_avg:44.27ms +[2025-09-11 11:22:10] [Rank 0] step:4741/10000 train_time:209907ms step_avg:44.27ms +[2025-09-11 11:22:11] [Rank 0] step:4761/10000 train_time:210587ms step_avg:44.23ms +[2025-09-11 11:22:11] [Rank 0] step:4761/10000 train_time:210587ms step_avg:44.23ms +[2025-09-11 11:22:11] [Rank 0] step:4781/10000 train_time:211265ms step_avg:44.19ms +[2025-09-11 11:22:11] [Rank 0] step:4781/10000 train_time:211265ms step_avg:44.19ms +[2025-09-11 11:22:12] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:22:12] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:22:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:22:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:22:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:23] [Rank 0] PRINT: step:4800/10000 val_loss:5.1206 total_sharp:1.4033e-03 L1_sharp:2.1531e-01 L2_sharp:2.9182e-01 L3_sharp:3.6515e-01 L4_sharp:5.4874e-01 L5_sharp:7.2928e-01 L6_sharp:9.6132e-01 L7_sharp:1.1632e+00 L8_sharp:1.1507e+00 L9_sharp:1.3891e+00 L10_sharp:1.5870e+00 L11_sharp:1.6422e+00 L12_sharp:5.1107e+00 total_fnorm:1.5375e+01 total_l1_linf:1.9328e+04 total_spectral:7.6875e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2207e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.2756e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.3977e-02 L12_l1linf:1.4404e-02 L1_spectral:7.8807e-04 L2_spectral:7.9235e-04 L3_spectral:7.9280e-04 L4_spectral:7.8912e-04 L5_spectral:7.8962e-04 L6_spectral:7.9629e-04 L7_spectral:8.0028e-04 L8_spectral:7.9141e-04 L9_spectral:7.9748e-04 L10_spectral:7.9538e-04 L11_spectral:7.9225e-04 L12_spectral:7.6739e-04 train_time:211923ms step_avg:44.15ms +[2025-09-11 11:22:23] [Rank 0] PRINT: step:4800/10000 val_loss:5.1206 total_sharp:1.4033e-03 L1_sharp:2.1531e-01 L2_sharp:2.9182e-01 L3_sharp:3.6515e-01 L4_sharp:5.4874e-01 L5_sharp:7.2928e-01 L6_sharp:9.6132e-01 L7_sharp:1.1632e+00 L8_sharp:1.1507e+00 L9_sharp:1.3891e+00 L10_sharp:1.5870e+00 L11_sharp:1.6422e+00 L12_sharp:5.1107e+00 total_fnorm:1.5375e+01 total_l1_linf:1.9328e+04 total_spectral:7.6875e+00 L1_fnorm:4.7119e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2207e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.2756e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3672e-02 L11_l1linf:1.3977e-02 L12_l1linf:1.4404e-02 L1_spectral:7.8807e-04 L2_spectral:7.9235e-04 L3_spectral:7.9280e-04 L4_spectral:7.8912e-04 L5_spectral:7.8962e-04 L6_spectral:7.9629e-04 L7_spectral:8.0028e-04 L8_spectral:7.9141e-04 L9_spectral:7.9748e-04 L10_spectral:7.9538e-04 L11_spectral:7.9225e-04 L12_spectral:7.6739e-04 train_time:211923ms step_avg:44.15ms +[2025-09-11 11:22:25] [Rank 0] step:4801/10000 train_time:213897ms step_avg:44.55ms +[2025-09-11 11:22:25] [Rank 0] step:4801/10000 train_time:213897ms step_avg:44.55ms +[2025-09-11 11:22:26] [Rank 0] step:4821/10000 train_time:214589ms step_avg:44.51ms +[2025-09-11 11:22:26] [Rank 0] step:4821/10000 train_time:214589ms step_avg:44.51ms +[2025-09-11 11:22:26] [Rank 0] step:4841/10000 train_time:215268ms step_avg:44.47ms +[2025-09-11 11:22:26] [Rank 0] step:4841/10000 train_time:215268ms step_avg:44.47ms +[2025-09-11 11:22:27] [Rank 0] step:4861/10000 train_time:215948ms step_avg:44.42ms +[2025-09-11 11:22:27] [Rank 0] step:4861/10000 train_time:215948ms step_avg:44.42ms +[2025-09-11 11:22:28] [Rank 0] step:4881/10000 train_time:216626ms step_avg:44.38ms +[2025-09-11 11:22:28] [Rank 0] step:4881/10000 train_time:216626ms step_avg:44.38ms +[2025-09-11 11:22:28] [Rank 0] step:4901/10000 train_time:217307ms step_avg:44.34ms +[2025-09-11 11:22:28] [Rank 0] step:4901/10000 train_time:217307ms step_avg:44.34ms +[2025-09-11 11:22:29] [Rank 0] step:4921/10000 train_time:217986ms step_avg:44.30ms +[2025-09-11 11:22:29] [Rank 0] step:4921/10000 train_time:217986ms step_avg:44.30ms +[2025-09-11 11:22:30] [Rank 0] step:4941/10000 train_time:218665ms step_avg:44.26ms +[2025-09-11 11:22:30] [Rank 0] step:4941/10000 train_time:218665ms step_avg:44.26ms +[2025-09-11 11:22:31] [Rank 0] step:4961/10000 train_time:219344ms step_avg:44.21ms +[2025-09-11 11:22:31] [Rank 0] step:4961/10000 train_time:219344ms step_avg:44.21ms +[2025-09-11 11:22:31] [Rank 0] step:4981/10000 train_time:220023ms step_avg:44.17ms +[2025-09-11 11:22:31] [Rank 0] step:4981/10000 train_time:220023ms step_avg:44.17ms +[2025-09-11 11:22:32] [Rank 0] step:5001/10000 train_time:220703ms step_avg:44.13ms +[2025-09-11 11:22:32] [Rank 0] step:5001/10000 train_time:220703ms step_avg:44.13ms +[2025-09-11 11:22:33] [Rank 0] step:5021/10000 train_time:221381ms step_avg:44.09ms +[2025-09-11 11:22:33] [Rank 0] step:5021/10000 train_time:221381ms step_avg:44.09ms +[2025-09-11 11:22:33] [Rank 0] step:5041/10000 train_time:222059ms step_avg:44.05ms +[2025-09-11 11:22:33] [Rank 0] step:5041/10000 train_time:222059ms step_avg:44.05ms +[2025-09-11 11:22:34] [Rank 0] step:5061/10000 train_time:222738ms step_avg:44.01ms +[2025-09-11 11:22:34] [Rank 0] step:5061/10000 train_time:222738ms step_avg:44.01ms +[2025-09-11 11:22:35] [Rank 0] step:5081/10000 train_time:223417ms step_avg:43.97ms +[2025-09-11 11:22:35] [Rank 0] step:5081/10000 train_time:223417ms step_avg:43.97ms +[2025-09-11 11:22:36] [Rank 0] step:5101/10000 train_time:224392ms step_avg:43.99ms +[2025-09-11 11:22:36] [Rank 0] step:5101/10000 train_time:224392ms step_avg:43.99ms +[2025-09-11 11:22:36] [Rank 0] step:5121/10000 train_time:225071ms step_avg:43.95ms +[2025-09-11 11:22:36] [Rank 0] step:5121/10000 train_time:225071ms step_avg:43.95ms +[2025-09-11 11:22:37] [Rank 0] step:5141/10000 train_time:225750ms step_avg:43.91ms +[2025-09-11 11:22:37] [Rank 0] step:5141/10000 train_time:225750ms step_avg:43.91ms +[2025-09-11 11:22:38] [Rank 0] step:5161/10000 train_time:226428ms step_avg:43.87ms +[2025-09-11 11:22:38] [Rank 0] step:5161/10000 train_time:226428ms step_avg:43.87ms +[2025-09-11 11:22:39] [Rank 0] step:5181/10000 train_time:227371ms step_avg:43.89ms +[2025-09-11 11:22:39] [Rank 0] step:5181/10000 train_time:227371ms step_avg:43.89ms +[2025-09-11 11:22:39] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:22:39] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:22:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:22:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:22:51] [Rank 0] PRINT: step:5200/10000 val_loss:5.0782 total_sharp:1.9014e-03 L1_sharp:2.1930e-01 L2_sharp:2.9807e-01 L3_sharp:3.6841e-01 L4_sharp:5.3985e-01 L5_sharp:6.7300e-01 L6_sharp:1.1010e+00 L7_sharp:1.3098e+00 L8_sharp:1.4943e+00 L9_sharp:1.6884e+00 L10_sharp:2.0399e+00 L11_sharp:1.9683e+00 L12_sharp:5.3497e+00 total_fnorm:1.4125e+01 total_l1_linf:1.6768e+04 total_spectral:7.0625e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.1780e-02 L2_l1linf:1.2207e-02 L3_l1linf:1.2390e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3245e-02 L10_l1linf:1.3428e-02 L11_l1linf:1.3611e-02 L12_l1linf:1.4343e-02 L1_spectral:7.8663e-04 L2_spectral:7.8602e-04 L3_spectral:7.8605e-04 L4_spectral:7.9035e-04 L5_spectral:7.8829e-04 L6_spectral:7.9217e-04 L7_spectral:7.9944e-04 L8_spectral:7.9696e-04 L9_spectral:8.0166e-04 L10_spectral:7.9282e-04 L11_spectral:7.9334e-04 L12_spectral:7.8185e-04 train_time:228036ms step_avg:43.85ms +[2025-09-11 11:22:51] [Rank 0] PRINT: step:5200/10000 val_loss:5.0782 total_sharp:1.9014e-03 L1_sharp:2.1930e-01 L2_sharp:2.9807e-01 L3_sharp:3.6841e-01 L4_sharp:5.3985e-01 L5_sharp:6.7300e-01 L6_sharp:1.1010e+00 L7_sharp:1.3098e+00 L8_sharp:1.4943e+00 L9_sharp:1.6884e+00 L10_sharp:2.0399e+00 L11_sharp:1.9683e+00 L12_sharp:5.3497e+00 total_fnorm:1.4125e+01 total_l1_linf:1.6768e+04 total_spectral:7.0625e+00 L1_fnorm:4.6875e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.1780e-02 L2_l1linf:1.2207e-02 L3_l1linf:1.2390e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.3184e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3245e-02 L10_l1linf:1.3428e-02 L11_l1linf:1.3611e-02 L12_l1linf:1.4343e-02 L1_spectral:7.8663e-04 L2_spectral:7.8602e-04 L3_spectral:7.8605e-04 L4_spectral:7.9035e-04 L5_spectral:7.8829e-04 L6_spectral:7.9217e-04 L7_spectral:7.9944e-04 L8_spectral:7.9696e-04 L9_spectral:8.0166e-04 L10_spectral:7.9282e-04 L11_spectral:7.9334e-04 L12_spectral:7.8185e-04 train_time:228036ms step_avg:43.85ms +[2025-09-11 11:22:53] [Rank 0] step:5201/10000 train_time:230128ms step_avg:44.25ms +[2025-09-11 11:22:53] [Rank 0] step:5201/10000 train_time:230128ms step_avg:44.25ms +[2025-09-11 11:22:53] [Rank 0] step:5221/10000 train_time:230831ms step_avg:44.21ms +[2025-09-11 11:22:53] [Rank 0] step:5221/10000 train_time:230831ms step_avg:44.21ms +[2025-09-11 11:22:54] [Rank 0] step:5241/10000 train_time:231519ms step_avg:44.17ms +[2025-09-11 11:22:54] [Rank 0] step:5241/10000 train_time:231519ms step_avg:44.17ms +[2025-09-11 11:22:55] [Rank 0] step:5261/10000 train_time:232207ms step_avg:44.14ms +[2025-09-11 11:22:55] [Rank 0] step:5261/10000 train_time:232207ms step_avg:44.14ms +[2025-09-11 11:22:55] [Rank 0] step:5281/10000 train_time:232898ms step_avg:44.10ms +[2025-09-11 11:22:55] [Rank 0] step:5281/10000 train_time:232898ms step_avg:44.10ms +[2025-09-11 11:22:56] [Rank 0] step:5301/10000 train_time:233587ms step_avg:44.06ms +[2025-09-11 11:22:56] [Rank 0] step:5301/10000 train_time:233587ms step_avg:44.06ms +[2025-09-11 11:22:57] [Rank 0] step:5321/10000 train_time:234275ms step_avg:44.03ms +[2025-09-11 11:22:57] [Rank 0] step:5321/10000 train_time:234275ms step_avg:44.03ms +[2025-09-11 11:22:58] [Rank 0] step:5341/10000 train_time:234962ms step_avg:43.99ms +[2025-09-11 11:22:58] [Rank 0] step:5341/10000 train_time:234962ms step_avg:43.99ms +[2025-09-11 11:22:58] [Rank 0] step:5361/10000 train_time:235651ms step_avg:43.96ms +[2025-09-11 11:22:58] [Rank 0] step:5361/10000 train_time:235651ms step_avg:43.96ms +[2025-09-11 11:22:59] [Rank 0] step:5381/10000 train_time:236341ms step_avg:43.92ms +[2025-09-11 11:22:59] [Rank 0] step:5381/10000 train_time:236341ms step_avg:43.92ms +[2025-09-11 11:23:00] [Rank 0] step:5401/10000 train_time:237028ms step_avg:43.89ms +[2025-09-11 11:23:00] [Rank 0] step:5401/10000 train_time:237028ms step_avg:43.89ms +[2025-09-11 11:23:00] [Rank 0] step:5421/10000 train_time:237717ms step_avg:43.85ms +[2025-09-11 11:23:00] [Rank 0] step:5421/10000 train_time:237717ms step_avg:43.85ms +[2025-09-11 11:23:01] [Rank 0] step:5441/10000 train_time:238406ms step_avg:43.82ms +[2025-09-11 11:23:01] [Rank 0] step:5441/10000 train_time:238406ms step_avg:43.82ms +[2025-09-11 11:23:02] [Rank 0] step:5461/10000 train_time:239095ms step_avg:43.78ms +[2025-09-11 11:23:02] [Rank 0] step:5461/10000 train_time:239095ms step_avg:43.78ms +[2025-09-11 11:23:02] [Rank 0] step:5481/10000 train_time:239785ms step_avg:43.75ms +[2025-09-11 11:23:02] [Rank 0] step:5481/10000 train_time:239785ms step_avg:43.75ms +[2025-09-11 11:23:03] [Rank 0] step:5501/10000 train_time:240472ms step_avg:43.71ms +[2025-09-11 11:23:03] [Rank 0] step:5501/10000 train_time:240472ms step_avg:43.71ms +[2025-09-11 11:23:04] [Rank 0] step:5521/10000 train_time:241161ms step_avg:43.68ms +[2025-09-11 11:23:04] [Rank 0] step:5521/10000 train_time:241161ms step_avg:43.68ms +[2025-09-11 11:23:04] [Rank 0] step:5541/10000 train_time:241852ms step_avg:43.65ms +[2025-09-11 11:23:04] [Rank 0] step:5541/10000 train_time:241852ms step_avg:43.65ms +[2025-09-11 11:23:05] [Rank 0] step:5561/10000 train_time:242543ms step_avg:43.61ms +[2025-09-11 11:23:05] [Rank 0] step:5561/10000 train_time:242543ms step_avg:43.61ms +[2025-09-11 11:23:06] [Rank 0] step:5581/10000 train_time:243232ms step_avg:43.58ms +[2025-09-11 11:23:06] [Rank 0] step:5581/10000 train_time:243232ms step_avg:43.58ms +[2025-09-11 11:23:06] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:23:06] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:23:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:18] [Rank 0] PRINT: step:5600/10000 val_loss:5.0277 total_sharp:1.5012e-03 L1_sharp:1.7360e-01 L2_sharp:2.2843e-01 L3_sharp:3.3013e-01 L4_sharp:4.7242e-01 L5_sharp:6.0207e-01 L6_sharp:1.0480e+00 L7_sharp:1.2395e+00 L8_sharp:1.4500e+00 L9_sharp:1.6290e+00 L10_sharp:1.6240e+00 L11_sharp:1.5450e+00 L12_sharp:1.8722e+00 total_fnorm:1.4000e+01 total_l1_linf:1.6896e+04 total_spectral:7.0000e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.1475e-02 L2_l1linf:1.1963e-02 L3_l1linf:1.2207e-02 L4_l1linf:1.2634e-02 L5_l1linf:1.2817e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3367e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.3550e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8323e-04 L2_spectral:7.9182e-04 L3_spectral:7.8934e-04 L4_spectral:7.9181e-04 L5_spectral:7.8949e-04 L6_spectral:7.9688e-04 L7_spectral:8.0091e-04 L8_spectral:7.8888e-04 L9_spectral:7.9626e-04 L10_spectral:7.9684e-04 L11_spectral:7.9878e-04 L12_spectral:7.7665e-04 train_time:243902ms step_avg:43.55ms +[2025-09-11 11:23:18] [Rank 0] PRINT: step:5600/10000 val_loss:5.0277 total_sharp:1.5012e-03 L1_sharp:1.7360e-01 L2_sharp:2.2843e-01 L3_sharp:3.3013e-01 L4_sharp:4.7242e-01 L5_sharp:6.0207e-01 L6_sharp:1.0480e+00 L7_sharp:1.2395e+00 L8_sharp:1.4500e+00 L9_sharp:1.6290e+00 L10_sharp:1.6240e+00 L11_sharp:1.5450e+00 L12_sharp:1.8722e+00 total_fnorm:1.4000e+01 total_l1_linf:1.6896e+04 total_spectral:7.0000e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7119e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.1475e-02 L2_l1linf:1.1963e-02 L3_l1linf:1.2207e-02 L4_l1linf:1.2634e-02 L5_l1linf:1.2817e-02 L6_l1linf:1.3489e-02 L7_l1linf:1.3367e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3733e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.3550e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8323e-04 L2_spectral:7.9182e-04 L3_spectral:7.8934e-04 L4_spectral:7.9181e-04 L5_spectral:7.8949e-04 L6_spectral:7.9688e-04 L7_spectral:8.0091e-04 L8_spectral:7.8888e-04 L9_spectral:7.9626e-04 L10_spectral:7.9684e-04 L11_spectral:7.9878e-04 L12_spectral:7.7665e-04 train_time:243902ms step_avg:43.55ms +[2025-09-11 11:23:20] [Rank 0] step:5601/10000 train_time:245893ms step_avg:43.90ms +[2025-09-11 11:23:20] [Rank 0] step:5601/10000 train_time:245893ms step_avg:43.90ms +[2025-09-11 11:23:20] [Rank 0] step:5621/10000 train_time:246597ms step_avg:43.87ms +[2025-09-11 11:23:20] [Rank 0] step:5621/10000 train_time:246597ms step_avg:43.87ms +[2025-09-11 11:23:21] [Rank 0] step:5641/10000 train_time:247286ms step_avg:43.84ms +[2025-09-11 11:23:21] [Rank 0] step:5641/10000 train_time:247286ms step_avg:43.84ms +[2025-09-11 11:23:22] [Rank 0] step:5661/10000 train_time:247974ms step_avg:43.80ms +[2025-09-11 11:23:22] [Rank 0] step:5661/10000 train_time:247974ms step_avg:43.80ms +[2025-09-11 11:23:22] [Rank 0] step:5681/10000 train_time:248663ms step_avg:43.77ms +[2025-09-11 11:23:22] [Rank 0] step:5681/10000 train_time:248663ms step_avg:43.77ms +[2025-09-11 11:23:23] [Rank 0] step:5701/10000 train_time:249354ms step_avg:43.74ms +[2025-09-11 11:23:23] [Rank 0] step:5701/10000 train_time:249354ms step_avg:43.74ms +[2025-09-11 11:23:24] [Rank 0] step:5721/10000 train_time:250042ms step_avg:43.71ms +[2025-09-11 11:23:24] [Rank 0] step:5721/10000 train_time:250042ms step_avg:43.71ms +[2025-09-11 11:23:24] [Rank 0] step:5741/10000 train_time:250731ms step_avg:43.67ms +[2025-09-11 11:23:24] [Rank 0] step:5741/10000 train_time:250731ms step_avg:43.67ms +[2025-09-11 11:23:25] [Rank 0] step:5761/10000 train_time:251420ms step_avg:43.64ms +[2025-09-11 11:23:25] [Rank 0] step:5761/10000 train_time:251420ms step_avg:43.64ms +[2025-09-11 11:23:26] [Rank 0] step:5781/10000 train_time:252110ms step_avg:43.61ms +[2025-09-11 11:23:26] [Rank 0] step:5781/10000 train_time:252110ms step_avg:43.61ms +[2025-09-11 11:23:26] [Rank 0] step:5801/10000 train_time:252801ms step_avg:43.58ms +[2025-09-11 11:23:26] [Rank 0] step:5801/10000 train_time:252801ms step_avg:43.58ms +[2025-09-11 11:23:27] [Rank 0] step:5821/10000 train_time:253489ms step_avg:43.55ms +[2025-09-11 11:23:27] [Rank 0] step:5821/10000 train_time:253489ms step_avg:43.55ms +[2025-09-11 11:23:28] [Rank 0] step:5841/10000 train_time:254180ms step_avg:43.52ms +[2025-09-11 11:23:28] [Rank 0] step:5841/10000 train_time:254180ms step_avg:43.52ms +[2025-09-11 11:23:29] [Rank 0] step:5861/10000 train_time:254868ms step_avg:43.49ms +[2025-09-11 11:23:29] [Rank 0] step:5861/10000 train_time:254868ms step_avg:43.49ms +[2025-09-11 11:23:29] [Rank 0] step:5881/10000 train_time:255557ms step_avg:43.45ms +[2025-09-11 11:23:29] [Rank 0] step:5881/10000 train_time:255557ms step_avg:43.45ms +[2025-09-11 11:23:30] [Rank 0] step:5901/10000 train_time:256245ms step_avg:43.42ms +[2025-09-11 11:23:30] [Rank 0] step:5901/10000 train_time:256245ms step_avg:43.42ms +[2025-09-11 11:23:31] [Rank 0] step:5921/10000 train_time:256936ms step_avg:43.39ms +[2025-09-11 11:23:31] [Rank 0] step:5921/10000 train_time:256936ms step_avg:43.39ms +[2025-09-11 11:23:31] [Rank 0] step:5941/10000 train_time:257628ms step_avg:43.36ms +[2025-09-11 11:23:31] [Rank 0] step:5941/10000 train_time:257628ms step_avg:43.36ms +[2025-09-11 11:23:32] [Rank 0] step:5961/10000 train_time:258318ms step_avg:43.33ms +[2025-09-11 11:23:32] [Rank 0] step:5961/10000 train_time:258318ms step_avg:43.33ms +[2025-09-11 11:23:33] [Rank 0] step:5981/10000 train_time:259008ms step_avg:43.31ms +[2025-09-11 11:23:33] [Rank 0] step:5981/10000 train_time:259008ms step_avg:43.31ms +[2025-09-11 11:23:33] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:23:33] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:23:45] [Rank 0] PRINT: step:6000/10000 val_loss:4.9767 total_sharp:1.3382e-03 L1_sharp:1.6907e-01 L2_sharp:2.2981e-01 L3_sharp:3.0333e-01 L4_sharp:4.2321e-01 L5_sharp:5.4660e-01 L6_sharp:1.0408e+00 L7_sharp:1.1728e+00 L8_sharp:1.4406e+00 L9_sharp:1.8588e+00 L10_sharp:1.9392e+00 L11_sharp:1.5985e+00 L12_sharp:3.5437e+00 total_fnorm:1.4562e+01 total_l1_linf:1.7152e+04 total_spectral:7.2812e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.1475e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1963e-02 L4_l1linf:1.2085e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.2817e-02 L8_l1linf:1.2817e-02 L9_l1linf:1.2878e-02 L10_l1linf:1.3000e-02 L11_l1linf:1.3245e-02 L12_l1linf:1.4404e-02 L1_spectral:7.9037e-04 L2_spectral:7.8809e-04 L3_spectral:7.8883e-04 L4_spectral:7.9399e-04 L5_spectral:7.8818e-04 L6_spectral:7.9251e-04 L7_spectral:8.0594e-04 L8_spectral:7.9633e-04 L9_spectral:8.0087e-04 L10_spectral:7.9859e-04 L11_spectral:8.0663e-04 L12_spectral:7.8143e-04 train_time:259681ms step_avg:43.28ms +[2025-09-11 11:23:45] [Rank 0] PRINT: step:6000/10000 val_loss:4.9767 total_sharp:1.3382e-03 L1_sharp:1.6907e-01 L2_sharp:2.2981e-01 L3_sharp:3.0333e-01 L4_sharp:4.2321e-01 L5_sharp:5.4660e-01 L6_sharp:1.0408e+00 L7_sharp:1.1728e+00 L8_sharp:1.4406e+00 L9_sharp:1.8588e+00 L10_sharp:1.9392e+00 L11_sharp:1.5985e+00 L12_sharp:3.5437e+00 total_fnorm:1.4562e+01 total_l1_linf:1.7152e+04 total_spectral:7.2812e+00 L1_fnorm:4.6631e-02 L2_fnorm:4.6875e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.7119e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.1475e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1963e-02 L4_l1linf:1.2085e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.3184e-02 L7_l1linf:1.2817e-02 L8_l1linf:1.2817e-02 L9_l1linf:1.2878e-02 L10_l1linf:1.3000e-02 L11_l1linf:1.3245e-02 L12_l1linf:1.4404e-02 L1_spectral:7.9037e-04 L2_spectral:7.8809e-04 L3_spectral:7.8883e-04 L4_spectral:7.9399e-04 L5_spectral:7.8818e-04 L6_spectral:7.9251e-04 L7_spectral:8.0594e-04 L8_spectral:7.9633e-04 L9_spectral:8.0087e-04 L10_spectral:7.9859e-04 L11_spectral:8.0663e-04 L12_spectral:7.8143e-04 train_time:259681ms step_avg:43.28ms +[2025-09-11 11:23:47] [Rank 0] step:6001/10000 train_time:261807ms step_avg:43.63ms +[2025-09-11 11:23:47] [Rank 0] step:6001/10000 train_time:261807ms step_avg:43.63ms +[2025-09-11 11:23:48] [Rank 0] step:6021/10000 train_time:262523ms step_avg:43.60ms +[2025-09-11 11:23:48] [Rank 0] step:6021/10000 train_time:262523ms step_avg:43.60ms +[2025-09-11 11:23:48] [Rank 0] step:6041/10000 train_time:263217ms step_avg:43.57ms +[2025-09-11 11:23:48] [Rank 0] step:6041/10000 train_time:263217ms step_avg:43.57ms +[2025-09-11 11:23:49] [Rank 0] step:6061/10000 train_time:263908ms step_avg:43.54ms +[2025-09-11 11:23:49] [Rank 0] step:6061/10000 train_time:263908ms step_avg:43.54ms +[2025-09-11 11:23:50] [Rank 0] step:6081/10000 train_time:264600ms step_avg:43.51ms +[2025-09-11 11:23:50] [Rank 0] step:6081/10000 train_time:264600ms step_avg:43.51ms +[2025-09-11 11:23:50] [Rank 0] step:6101/10000 train_time:265292ms step_avg:43.48ms +[2025-09-11 11:23:50] [Rank 0] step:6101/10000 train_time:265292ms step_avg:43.48ms +[2025-09-11 11:23:51] [Rank 0] step:6121/10000 train_time:265984ms step_avg:43.45ms +[2025-09-11 11:23:51] [Rank 0] step:6121/10000 train_time:265984ms step_avg:43.45ms +[2025-09-11 11:23:52] [Rank 0] step:6141/10000 train_time:266676ms step_avg:43.43ms +[2025-09-11 11:23:52] [Rank 0] step:6141/10000 train_time:266676ms step_avg:43.43ms +[2025-09-11 11:23:52] [Rank 0] step:6161/10000 train_time:267366ms step_avg:43.40ms +[2025-09-11 11:23:52] [Rank 0] step:6161/10000 train_time:267366ms step_avg:43.40ms +[2025-09-11 11:23:53] [Rank 0] step:6181/10000 train_time:268057ms step_avg:43.37ms +[2025-09-11 11:23:53] [Rank 0] step:6181/10000 train_time:268057ms step_avg:43.37ms +[2025-09-11 11:23:54] [Rank 0] step:6201/10000 train_time:268749ms step_avg:43.34ms +[2025-09-11 11:23:54] [Rank 0] step:6201/10000 train_time:268749ms step_avg:43.34ms +[2025-09-11 11:23:54] [Rank 0] step:6221/10000 train_time:269442ms step_avg:43.31ms +[2025-09-11 11:23:54] [Rank 0] step:6221/10000 train_time:269442ms step_avg:43.31ms +[2025-09-11 11:23:55] [Rank 0] step:6241/10000 train_time:270133ms step_avg:43.28ms +[2025-09-11 11:23:55] [Rank 0] step:6241/10000 train_time:270133ms step_avg:43.28ms +[2025-09-11 11:23:56] [Rank 0] step:6261/10000 train_time:270824ms step_avg:43.26ms +[2025-09-11 11:23:56] [Rank 0] step:6261/10000 train_time:270824ms step_avg:43.26ms +[2025-09-11 11:23:57] [Rank 0] step:6281/10000 train_time:271516ms step_avg:43.23ms +[2025-09-11 11:23:57] [Rank 0] step:6281/10000 train_time:271516ms step_avg:43.23ms +[2025-09-11 11:23:57] [Rank 0] step:6301/10000 train_time:272207ms step_avg:43.20ms +[2025-09-11 11:23:57] [Rank 0] step:6301/10000 train_time:272207ms step_avg:43.20ms +[2025-09-11 11:23:58] [Rank 0] step:6321/10000 train_time:272901ms step_avg:43.17ms +[2025-09-11 11:23:58] [Rank 0] step:6321/10000 train_time:272901ms step_avg:43.17ms +[2025-09-11 11:23:59] [Rank 0] step:6341/10000 train_time:273593ms step_avg:43.15ms +[2025-09-11 11:23:59] [Rank 0] step:6341/10000 train_time:273593ms step_avg:43.15ms +[2025-09-11 11:23:59] [Rank 0] step:6361/10000 train_time:274285ms step_avg:43.12ms +[2025-09-11 11:23:59] [Rank 0] step:6361/10000 train_time:274285ms step_avg:43.12ms +[2025-09-11 11:24:00] [Rank 0] step:6381/10000 train_time:274977ms step_avg:43.09ms +[2025-09-11 11:24:00] [Rank 0] step:6381/10000 train_time:274977ms step_avg:43.09ms +[2025-09-11 11:24:01] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:24:01] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:17] [Rank 0] PRINT: step:6400/10000 val_loss:4.9399 total_sharp:1.4128e-03 L1_sharp:1.6629e-01 L2_sharp:2.4471e-01 L3_sharp:3.0646e-01 L4_sharp:4.6659e-01 L5_sharp:6.1209e-01 L6_sharp:1.0304e+00 L7_sharp:1.1596e+00 L8_sharp:1.3614e+00 L9_sharp:1.5232e+00 L10_sharp:1.7691e+00 L11_sharp:1.6259e+00 L12_sharp:2.2893e+00 total_fnorm:1.2375e+01 total_l1_linf:1.4144e+04 total_spectral:6.1875e+00 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.1016e-02 L4_fnorm:4.1260e-02 L5_fnorm:4.1260e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.1016e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.6436e-03 L2_l1linf:9.7046e-03 L3_l1linf:1.0437e-02 L4_l1linf:1.0437e-02 L5_l1linf:1.0864e-02 L6_l1linf:1.0925e-02 L7_l1linf:1.0986e-02 L8_l1linf:1.0925e-02 L9_l1linf:1.1169e-02 L10_l1linf:1.1230e-02 L11_l1linf:1.1108e-02 L12_l1linf:1.1841e-02 L1_spectral:7.0514e-04 L2_spectral:7.0598e-04 L3_spectral:7.1603e-04 L4_spectral:7.1280e-04 L5_spectral:7.1167e-04 L6_spectral:7.1240e-04 L7_spectral:7.2474e-04 L8_spectral:7.0933e-04 L9_spectral:7.1819e-04 L10_spectral:7.1136e-04 L11_spectral:7.1126e-04 L12_spectral:6.8325e-04 train_time:275649ms step_avg:43.07ms +[2025-09-11 11:24:17] [Rank 0] PRINT: step:6400/10000 val_loss:4.9399 total_sharp:1.4128e-03 L1_sharp:1.6629e-01 L2_sharp:2.4471e-01 L3_sharp:3.0646e-01 L4_sharp:4.6659e-01 L5_sharp:6.1209e-01 L6_sharp:1.0304e+00 L7_sharp:1.1596e+00 L8_sharp:1.3614e+00 L9_sharp:1.5232e+00 L10_sharp:1.7691e+00 L11_sharp:1.6259e+00 L12_sharp:2.2893e+00 total_fnorm:1.2375e+01 total_l1_linf:1.4144e+04 total_spectral:6.1875e+00 L1_fnorm:4.0527e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.1016e-02 L4_fnorm:4.1260e-02 L5_fnorm:4.1260e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.1016e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.6436e-03 L2_l1linf:9.7046e-03 L3_l1linf:1.0437e-02 L4_l1linf:1.0437e-02 L5_l1linf:1.0864e-02 L6_l1linf:1.0925e-02 L7_l1linf:1.0986e-02 L8_l1linf:1.0925e-02 L9_l1linf:1.1169e-02 L10_l1linf:1.1230e-02 L11_l1linf:1.1108e-02 L12_l1linf:1.1841e-02 L1_spectral:7.0514e-04 L2_spectral:7.0598e-04 L3_spectral:7.1603e-04 L4_spectral:7.1280e-04 L5_spectral:7.1167e-04 L6_spectral:7.1240e-04 L7_spectral:7.2474e-04 L8_spectral:7.0933e-04 L9_spectral:7.1819e-04 L10_spectral:7.1136e-04 L11_spectral:7.1126e-04 L12_spectral:6.8325e-04 train_time:275649ms step_avg:43.07ms +[2025-09-11 11:24:18] [Rank 0] step:6401/10000 train_time:277483ms step_avg:43.35ms +[2025-09-11 11:24:18] [Rank 0] step:6401/10000 train_time:277483ms step_avg:43.35ms +[2025-09-11 11:24:19] [Rank 0] step:6421/10000 train_time:278214ms step_avg:43.33ms +[2025-09-11 11:24:19] [Rank 0] step:6421/10000 train_time:278214ms step_avg:43.33ms +[2025-09-11 11:24:20] [Rank 0] step:6441/10000 train_time:278906ms step_avg:43.30ms +[2025-09-11 11:24:20] [Rank 0] step:6441/10000 train_time:278906ms step_avg:43.30ms +[2025-09-11 11:24:20] [Rank 0] step:6461/10000 train_time:279600ms step_avg:43.28ms +[2025-09-11 11:24:20] [Rank 0] step:6461/10000 train_time:279600ms step_avg:43.28ms +[2025-09-11 11:24:21] [Rank 0] step:6481/10000 train_time:280295ms step_avg:43.25ms +[2025-09-11 11:24:21] [Rank 0] step:6481/10000 train_time:280295ms step_avg:43.25ms +[2025-09-11 11:24:22] [Rank 0] step:6501/10000 train_time:280990ms step_avg:43.22ms +[2025-09-11 11:24:22] [Rank 0] step:6501/10000 train_time:280990ms step_avg:43.22ms +[2025-09-11 11:24:23] [Rank 0] step:6521/10000 train_time:281683ms step_avg:43.20ms +[2025-09-11 11:24:23] [Rank 0] step:6521/10000 train_time:281683ms step_avg:43.20ms +[2025-09-11 11:24:23] [Rank 0] step:6541/10000 train_time:282374ms step_avg:43.17ms +[2025-09-11 11:24:23] [Rank 0] step:6541/10000 train_time:282374ms step_avg:43.17ms +[2025-09-11 11:24:24] [Rank 0] step:6561/10000 train_time:283067ms step_avg:43.14ms +[2025-09-11 11:24:24] [Rank 0] step:6561/10000 train_time:283067ms step_avg:43.14ms +[2025-09-11 11:24:25] [Rank 0] step:6581/10000 train_time:283760ms step_avg:43.12ms +[2025-09-11 11:24:25] [Rank 0] step:6581/10000 train_time:283760ms step_avg:43.12ms +[2025-09-11 11:24:25] [Rank 0] step:6601/10000 train_time:284453ms step_avg:43.09ms +[2025-09-11 11:24:25] [Rank 0] step:6601/10000 train_time:284453ms step_avg:43.09ms +[2025-09-11 11:24:26] [Rank 0] step:6621/10000 train_time:285144ms step_avg:43.07ms +[2025-09-11 11:24:26] [Rank 0] step:6621/10000 train_time:285144ms step_avg:43.07ms +[2025-09-11 11:24:27] [Rank 0] step:6641/10000 train_time:285838ms step_avg:43.04ms +[2025-09-11 11:24:27] [Rank 0] step:6641/10000 train_time:285838ms step_avg:43.04ms +[2025-09-11 11:24:27] [Rank 0] step:6661/10000 train_time:286531ms step_avg:43.02ms +[2025-09-11 11:24:27] [Rank 0] step:6661/10000 train_time:286531ms step_avg:43.02ms +[2025-09-11 11:24:28] [Rank 0] step:6681/10000 train_time:287231ms step_avg:42.99ms +[2025-09-11 11:24:28] [Rank 0] step:6681/10000 train_time:287231ms step_avg:42.99ms +[2025-09-11 11:24:29] [Rank 0] step:6701/10000 train_time:287930ms step_avg:42.97ms +[2025-09-11 11:24:29] [Rank 0] step:6701/10000 train_time:287930ms step_avg:42.97ms +[2025-09-11 11:24:30] [Rank 0] step:6721/10000 train_time:288629ms step_avg:42.94ms +[2025-09-11 11:24:30] [Rank 0] step:6721/10000 train_time:288629ms step_avg:42.94ms +[2025-09-11 11:24:30] [Rank 0] step:6741/10000 train_time:289329ms step_avg:42.92ms +[2025-09-11 11:24:30] [Rank 0] step:6741/10000 train_time:289329ms step_avg:42.92ms +[2025-09-11 11:24:31] [Rank 0] step:6761/10000 train_time:290029ms step_avg:42.90ms +[2025-09-11 11:24:31] [Rank 0] step:6761/10000 train_time:290029ms step_avg:42.90ms +[2025-09-11 11:24:32] [Rank 0] step:6781/10000 train_time:290729ms step_avg:42.87ms +[2025-09-11 11:24:32] [Rank 0] step:6781/10000 train_time:290729ms step_avg:42.87ms +[2025-09-11 11:24:32] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:24:32] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:24:44] [Rank 0] PRINT: step:6800/10000 val_loss:4.9050 total_sharp:1.0990e-03 L1_sharp:1.0832e-01 L2_sharp:1.4861e-01 L3_sharp:2.2138e-01 L4_sharp:3.7138e-01 L5_sharp:4.9803e-01 L6_sharp:7.7866e-01 L7_sharp:1.0746e+00 L8_sharp:1.3052e+00 L9_sharp:1.6407e+00 L10_sharp:1.8514e+00 L11_sharp:2.0660e+00 L12_sharp:7.9606e+00 total_fnorm:1.1688e+01 total_l1_linf:1.3056e+04 total_spectral:5.8125e+00 L1_fnorm:3.4180e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4424e-02 L4_fnorm:3.4668e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.5156e-02 L7_fnorm:3.4912e-02 L8_fnorm:3.4668e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.0496e-03 L2_l1linf:7.3242e-03 L3_l1linf:7.6294e-03 L4_l1linf:7.9346e-03 L5_l1linf:8.1177e-03 L6_l1linf:8.4229e-03 L7_l1linf:8.6670e-03 L8_l1linf:8.6060e-03 L9_l1linf:8.7280e-03 L10_l1linf:8.7891e-03 L11_l1linf:8.7891e-03 L12_l1linf:9.7046e-03 L1_spectral:6.3755e-04 L2_spectral:6.3859e-04 L3_spectral:6.3250e-04 L4_spectral:6.3141e-04 L5_spectral:6.3538e-04 L6_spectral:6.3260e-04 L7_spectral:6.2876e-04 L8_spectral:6.3124e-04 L9_spectral:6.3046e-04 L10_spectral:6.2741e-04 L11_spectral:6.2208e-04 L12_spectral:5.9321e-04 train_time:291409ms step_avg:42.85ms +[2025-09-11 11:24:44] [Rank 0] PRINT: step:6800/10000 val_loss:4.9050 total_sharp:1.0990e-03 L1_sharp:1.0832e-01 L2_sharp:1.4861e-01 L3_sharp:2.2138e-01 L4_sharp:3.7138e-01 L5_sharp:4.9803e-01 L6_sharp:7.7866e-01 L7_sharp:1.0746e+00 L8_sharp:1.3052e+00 L9_sharp:1.6407e+00 L10_sharp:1.8514e+00 L11_sharp:2.0660e+00 L12_sharp:7.9606e+00 total_fnorm:1.1688e+01 total_l1_linf:1.3056e+04 total_spectral:5.8125e+00 L1_fnorm:3.4180e-02 L2_fnorm:3.4424e-02 L3_fnorm:3.4424e-02 L4_fnorm:3.4668e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.5156e-02 L7_fnorm:3.4912e-02 L8_fnorm:3.4668e-02 L9_fnorm:3.4668e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.0496e-03 L2_l1linf:7.3242e-03 L3_l1linf:7.6294e-03 L4_l1linf:7.9346e-03 L5_l1linf:8.1177e-03 L6_l1linf:8.4229e-03 L7_l1linf:8.6670e-03 L8_l1linf:8.6060e-03 L9_l1linf:8.7280e-03 L10_l1linf:8.7891e-03 L11_l1linf:8.7891e-03 L12_l1linf:9.7046e-03 L1_spectral:6.3755e-04 L2_spectral:6.3859e-04 L3_spectral:6.3250e-04 L4_spectral:6.3141e-04 L5_spectral:6.3538e-04 L6_spectral:6.3260e-04 L7_spectral:6.2876e-04 L8_spectral:6.3124e-04 L9_spectral:6.3046e-04 L10_spectral:6.2741e-04 L11_spectral:6.2208e-04 L12_spectral:5.9321e-04 train_time:291409ms step_avg:42.85ms +[2025-09-11 11:24:45] [Rank 0] step:6801/10000 train_time:293258ms step_avg:43.12ms +[2025-09-11 11:24:45] [Rank 0] step:6801/10000 train_time:293258ms step_avg:43.12ms +[2025-09-11 11:24:46] [Rank 0] step:6821/10000 train_time:293980ms step_avg:43.10ms +[2025-09-11 11:24:46] [Rank 0] step:6821/10000 train_time:293980ms step_avg:43.10ms +[2025-09-11 11:24:47] [Rank 0] step:6841/10000 train_time:294682ms step_avg:43.08ms +[2025-09-11 11:24:47] [Rank 0] step:6841/10000 train_time:294682ms step_avg:43.08ms +[2025-09-11 11:24:48] [Rank 0] step:6861/10000 train_time:295382ms step_avg:43.05ms +[2025-09-11 11:24:48] [Rank 0] step:6861/10000 train_time:295382ms step_avg:43.05ms +[2025-09-11 11:24:48] [Rank 0] step:6881/10000 train_time:296083ms step_avg:43.03ms +[2025-09-11 11:24:48] [Rank 0] step:6881/10000 train_time:296083ms step_avg:43.03ms +[2025-09-11 11:24:49] [Rank 0] step:6901/10000 train_time:296782ms step_avg:43.01ms +[2025-09-11 11:24:49] [Rank 0] step:6901/10000 train_time:296782ms step_avg:43.01ms +[2025-09-11 11:24:50] [Rank 0] step:6921/10000 train_time:297481ms step_avg:42.98ms +[2025-09-11 11:24:50] [Rank 0] step:6921/10000 train_time:297481ms step_avg:42.98ms +[2025-09-11 11:24:50] [Rank 0] step:6941/10000 train_time:298180ms step_avg:42.96ms +[2025-09-11 11:24:50] [Rank 0] step:6941/10000 train_time:298180ms step_avg:42.96ms +[2025-09-11 11:24:51] [Rank 0] step:6961/10000 train_time:298881ms step_avg:42.94ms +[2025-09-11 11:24:51] [Rank 0] step:6961/10000 train_time:298881ms step_avg:42.94ms +[2025-09-11 11:24:52] [Rank 0] step:6981/10000 train_time:299583ms step_avg:42.91ms +[2025-09-11 11:24:52] [Rank 0] step:6981/10000 train_time:299583ms step_avg:42.91ms +[2025-09-11 11:24:53] [Rank 0] step:7001/10000 train_time:300283ms step_avg:42.89ms +[2025-09-11 11:24:53] [Rank 0] step:7001/10000 train_time:300283ms step_avg:42.89ms +[2025-09-11 11:24:53] [Rank 0] step:7021/10000 train_time:300981ms step_avg:42.87ms +[2025-09-11 11:24:53] [Rank 0] step:7021/10000 train_time:300981ms step_avg:42.87ms +[2025-09-11 11:24:54] [Rank 0] step:7041/10000 train_time:301679ms step_avg:42.85ms +[2025-09-11 11:24:54] [Rank 0] step:7041/10000 train_time:301679ms step_avg:42.85ms +[2025-09-11 11:24:55] [Rank 0] step:7061/10000 train_time:302380ms step_avg:42.82ms +[2025-09-11 11:24:55] [Rank 0] step:7061/10000 train_time:302380ms step_avg:42.82ms +[2025-09-11 11:24:55] [Rank 0] step:7081/10000 train_time:303079ms step_avg:42.80ms +[2025-09-11 11:24:55] [Rank 0] step:7081/10000 train_time:303079ms step_avg:42.80ms +[2025-09-11 11:24:56] [Rank 0] step:7101/10000 train_time:303780ms step_avg:42.78ms +[2025-09-11 11:24:56] [Rank 0] step:7101/10000 train_time:303780ms step_avg:42.78ms +[2025-09-11 11:24:57] [Rank 0] step:7121/10000 train_time:304481ms step_avg:42.76ms +[2025-09-11 11:24:57] [Rank 0] step:7121/10000 train_time:304481ms step_avg:42.76ms +[2025-09-11 11:24:57] [Rank 0] step:7141/10000 train_time:305180ms step_avg:42.74ms +[2025-09-11 11:24:57] [Rank 0] step:7141/10000 train_time:305180ms step_avg:42.74ms +[2025-09-11 11:24:58] [Rank 0] step:7161/10000 train_time:305880ms step_avg:42.71ms +[2025-09-11 11:24:58] [Rank 0] step:7161/10000 train_time:305880ms step_avg:42.71ms +[2025-09-11 11:24:59] [Rank 0] step:7181/10000 train_time:306578ms step_avg:42.69ms +[2025-09-11 11:24:59] [Rank 0] step:7181/10000 train_time:306578ms step_avg:42.69ms +[2025-09-11 11:24:59] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:24:59] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:11] [Rank 0] PRINT: step:7200/10000 val_loss:4.8751 total_sharp:9.3779e-04 L1_sharp:1.1587e-01 L2_sharp:1.5969e-01 L3_sharp:2.5711e-01 L4_sharp:3.5230e-01 L5_sharp:4.7862e-01 L6_sharp:9.0388e-01 L7_sharp:1.1185e+00 L8_sharp:1.3764e+00 L9_sharp:1.5003e+00 L10_sharp:1.6033e+00 L11_sharp:1.7207e+00 L12_sharp:2.3514e+00 total_fnorm:9.8750e+00 total_l1_linf:1.0368e+04 total_spectral:4.9375e+00 L1_fnorm:2.8564e-02 L2_fnorm:2.8687e-02 L3_fnorm:2.8931e-02 L4_fnorm:2.9175e-02 L5_fnorm:2.9419e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9419e-02 L8_fnorm:2.9053e-02 L9_fnorm:2.9053e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:5.5237e-03 L2_l1linf:5.7983e-03 L3_l1linf:5.9814e-03 L4_l1linf:6.2866e-03 L5_l1linf:6.5002e-03 L6_l1linf:6.8359e-03 L7_l1linf:6.9885e-03 L8_l1linf:6.7749e-03 L9_l1linf:7.0496e-03 L10_l1linf:6.9580e-03 L11_l1linf:6.9885e-03 L12_l1linf:7.7515e-03 L1_spectral:5.5040e-04 L2_spectral:5.4677e-04 L3_spectral:5.4715e-04 L4_spectral:5.4537e-04 L5_spectral:5.4528e-04 L6_spectral:5.4347e-04 L7_spectral:5.4333e-04 L8_spectral:5.4697e-04 L9_spectral:5.4548e-04 L10_spectral:5.3734e-04 L11_spectral:5.3653e-04 L12_spectral:5.0809e-04 train_time:307258ms step_avg:42.67ms +[2025-09-11 11:25:11] [Rank 0] PRINT: step:7200/10000 val_loss:4.8751 total_sharp:9.3779e-04 L1_sharp:1.1587e-01 L2_sharp:1.5969e-01 L3_sharp:2.5711e-01 L4_sharp:3.5230e-01 L5_sharp:4.7862e-01 L6_sharp:9.0388e-01 L7_sharp:1.1185e+00 L8_sharp:1.3764e+00 L9_sharp:1.5003e+00 L10_sharp:1.6033e+00 L11_sharp:1.7207e+00 L12_sharp:2.3514e+00 total_fnorm:9.8750e+00 total_l1_linf:1.0368e+04 total_spectral:4.9375e+00 L1_fnorm:2.8564e-02 L2_fnorm:2.8687e-02 L3_fnorm:2.8931e-02 L4_fnorm:2.9175e-02 L5_fnorm:2.9419e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9419e-02 L8_fnorm:2.9053e-02 L9_fnorm:2.9053e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:5.5237e-03 L2_l1linf:5.7983e-03 L3_l1linf:5.9814e-03 L4_l1linf:6.2866e-03 L5_l1linf:6.5002e-03 L6_l1linf:6.8359e-03 L7_l1linf:6.9885e-03 L8_l1linf:6.7749e-03 L9_l1linf:7.0496e-03 L10_l1linf:6.9580e-03 L11_l1linf:6.9885e-03 L12_l1linf:7.7515e-03 L1_spectral:5.5040e-04 L2_spectral:5.4677e-04 L3_spectral:5.4715e-04 L4_spectral:5.4537e-04 L5_spectral:5.4528e-04 L6_spectral:5.4347e-04 L7_spectral:5.4333e-04 L8_spectral:5.4697e-04 L9_spectral:5.4548e-04 L10_spectral:5.3734e-04 L11_spectral:5.3653e-04 L12_spectral:5.0809e-04 train_time:307258ms step_avg:42.67ms +[2025-09-11 11:25:13] [Rank 0] step:7201/10000 train_time:309705ms step_avg:43.01ms +[2025-09-11 11:25:13] [Rank 0] step:7201/10000 train_time:309705ms step_avg:43.01ms +[2025-09-11 11:25:14] [Rank 0] step:7221/10000 train_time:310517ms step_avg:43.00ms +[2025-09-11 11:25:14] [Rank 0] step:7221/10000 train_time:310517ms step_avg:43.00ms +[2025-09-11 11:25:15] [Rank 0] step:7241/10000 train_time:311218ms step_avg:42.98ms +[2025-09-11 11:25:15] [Rank 0] step:7241/10000 train_time:311218ms step_avg:42.98ms +[2025-09-11 11:25:15] [Rank 0] step:7261/10000 train_time:311920ms step_avg:42.96ms +[2025-09-11 11:25:15] [Rank 0] step:7261/10000 train_time:311920ms step_avg:42.96ms +[2025-09-11 11:25:16] [Rank 0] step:7281/10000 train_time:312625ms step_avg:42.94ms +[2025-09-11 11:25:16] [Rank 0] step:7281/10000 train_time:312625ms step_avg:42.94ms +[2025-09-11 11:25:17] [Rank 0] step:7301/10000 train_time:313325ms step_avg:42.92ms +[2025-09-11 11:25:17] [Rank 0] step:7301/10000 train_time:313325ms step_avg:42.92ms +[2025-09-11 11:25:18] [Rank 0] step:7321/10000 train_time:314025ms step_avg:42.89ms +[2025-09-11 11:25:18] [Rank 0] step:7321/10000 train_time:314025ms step_avg:42.89ms +[2025-09-11 11:25:18] [Rank 0] step:7341/10000 train_time:314726ms step_avg:42.87ms +[2025-09-11 11:25:18] [Rank 0] step:7341/10000 train_time:314726ms step_avg:42.87ms +[2025-09-11 11:25:19] [Rank 0] step:7361/10000 train_time:315426ms step_avg:42.85ms +[2025-09-11 11:25:19] [Rank 0] step:7361/10000 train_time:315426ms step_avg:42.85ms +[2025-09-11 11:25:20] [Rank 0] step:7381/10000 train_time:316128ms step_avg:42.83ms +[2025-09-11 11:25:20] [Rank 0] step:7381/10000 train_time:316128ms step_avg:42.83ms +[2025-09-11 11:25:20] [Rank 0] step:7401/10000 train_time:316826ms step_avg:42.81ms +[2025-09-11 11:25:20] [Rank 0] step:7401/10000 train_time:316826ms step_avg:42.81ms +[2025-09-11 11:25:21] [Rank 0] step:7421/10000 train_time:317527ms step_avg:42.79ms +[2025-09-11 11:25:21] [Rank 0] step:7421/10000 train_time:317527ms step_avg:42.79ms +[2025-09-11 11:25:22] [Rank 0] step:7441/10000 train_time:318228ms step_avg:42.77ms +[2025-09-11 11:25:22] [Rank 0] step:7441/10000 train_time:318228ms step_avg:42.77ms +[2025-09-11 11:25:22] [Rank 0] step:7461/10000 train_time:318929ms step_avg:42.75ms +[2025-09-11 11:25:22] [Rank 0] step:7461/10000 train_time:318929ms step_avg:42.75ms +[2025-09-11 11:25:23] [Rank 0] step:7481/10000 train_time:319629ms step_avg:42.73ms +[2025-09-11 11:25:23] [Rank 0] step:7481/10000 train_time:319629ms step_avg:42.73ms +[2025-09-11 11:25:24] [Rank 0] step:7501/10000 train_time:320330ms step_avg:42.71ms +[2025-09-11 11:25:24] [Rank 0] step:7501/10000 train_time:320330ms step_avg:42.71ms +[2025-09-11 11:25:25] [Rank 0] step:7521/10000 train_time:321032ms step_avg:42.68ms +[2025-09-11 11:25:25] [Rank 0] step:7521/10000 train_time:321032ms step_avg:42.68ms +[2025-09-11 11:25:25] [Rank 0] step:7541/10000 train_time:321732ms step_avg:42.66ms +[2025-09-11 11:25:25] [Rank 0] step:7541/10000 train_time:321732ms step_avg:42.66ms +[2025-09-11 11:25:26] [Rank 0] step:7561/10000 train_time:322435ms step_avg:42.64ms +[2025-09-11 11:25:26] [Rank 0] step:7561/10000 train_time:322435ms step_avg:42.64ms +[2025-09-11 11:25:27] [Rank 0] step:7581/10000 train_time:323136ms step_avg:42.62ms +[2025-09-11 11:25:27] [Rank 0] step:7581/10000 train_time:323136ms step_avg:42.62ms +[2025-09-11 11:25:27] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:25:27] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:25:39] [Rank 0] PRINT: step:7600/10000 val_loss:4.8507 total_sharp:8.2201e-04 L1_sharp:9.3841e-02 L2_sharp:1.2847e-01 L3_sharp:1.9914e-01 L4_sharp:2.6740e-01 L5_sharp:3.7292e-01 L6_sharp:6.8654e-01 L7_sharp:9.3859e-01 L8_sharp:1.1158e+00 L9_sharp:1.3304e+00 L10_sharp:1.4759e+00 L11_sharp:1.3616e+00 L12_sharp:1.6649e+00 total_fnorm:7.7188e+00 total_l1_linf:7.2640e+03 total_spectral:3.8594e+00 L1_fnorm:2.3193e-02 L2_fnorm:2.3438e-02 L3_fnorm:2.3560e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3926e-02 L6_fnorm:2.4048e-02 L7_fnorm:2.3926e-02 L8_fnorm:2.3682e-02 L9_fnorm:2.3804e-02 L10_fnorm:2.3438e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2949e-02 L1_l1linf:3.9368e-03 L2_l1linf:4.3030e-03 L3_l1linf:4.5471e-03 L4_l1linf:4.8828e-03 L5_l1linf:5.0049e-03 L6_l1linf:5.1880e-03 L7_l1linf:5.4016e-03 L8_l1linf:5.2490e-03 L9_l1linf:5.4321e-03 L10_l1linf:5.4321e-03 L11_l1linf:5.4626e-03 L12_l1linf:5.9814e-03 L1_spectral:4.7276e-04 L2_spectral:4.6842e-04 L3_spectral:4.6694e-04 L4_spectral:4.6422e-04 L5_spectral:4.6206e-04 L6_spectral:4.6305e-04 L7_spectral:4.6246e-04 L8_spectral:4.6095e-04 L9_spectral:4.6247e-04 L10_spectral:4.5577e-04 L11_spectral:4.4485e-04 L12_spectral:4.2218e-04 train_time:323818ms step_avg:42.61ms +[2025-09-11 11:25:39] [Rank 0] PRINT: step:7600/10000 val_loss:4.8507 total_sharp:8.2201e-04 L1_sharp:9.3841e-02 L2_sharp:1.2847e-01 L3_sharp:1.9914e-01 L4_sharp:2.6740e-01 L5_sharp:3.7292e-01 L6_sharp:6.8654e-01 L7_sharp:9.3859e-01 L8_sharp:1.1158e+00 L9_sharp:1.3304e+00 L10_sharp:1.4759e+00 L11_sharp:1.3616e+00 L12_sharp:1.6649e+00 total_fnorm:7.7188e+00 total_l1_linf:7.2640e+03 total_spectral:3.8594e+00 L1_fnorm:2.3193e-02 L2_fnorm:2.3438e-02 L3_fnorm:2.3560e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3926e-02 L6_fnorm:2.4048e-02 L7_fnorm:2.3926e-02 L8_fnorm:2.3682e-02 L9_fnorm:2.3804e-02 L10_fnorm:2.3438e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2949e-02 L1_l1linf:3.9368e-03 L2_l1linf:4.3030e-03 L3_l1linf:4.5471e-03 L4_l1linf:4.8828e-03 L5_l1linf:5.0049e-03 L6_l1linf:5.1880e-03 L7_l1linf:5.4016e-03 L8_l1linf:5.2490e-03 L9_l1linf:5.4321e-03 L10_l1linf:5.4321e-03 L11_l1linf:5.4626e-03 L12_l1linf:5.9814e-03 L1_spectral:4.7276e-04 L2_spectral:4.6842e-04 L3_spectral:4.6694e-04 L4_spectral:4.6422e-04 L5_spectral:4.6206e-04 L6_spectral:4.6305e-04 L7_spectral:4.6246e-04 L8_spectral:4.6095e-04 L9_spectral:4.6247e-04 L10_spectral:4.5577e-04 L11_spectral:4.4485e-04 L12_spectral:4.2218e-04 train_time:323818ms step_avg:42.61ms +[2025-09-11 11:25:41] [Rank 0] step:7601/10000 train_time:325727ms step_avg:42.85ms +[2025-09-11 11:25:41] [Rank 0] step:7601/10000 train_time:325727ms step_avg:42.85ms +[2025-09-11 11:25:41] [Rank 0] step:7621/10000 train_time:326465ms step_avg:42.84ms +[2025-09-11 11:25:41] [Rank 0] step:7621/10000 train_time:326465ms step_avg:42.84ms +[2025-09-11 11:25:42] [Rank 0] step:7641/10000 train_time:327167ms step_avg:42.82ms +[2025-09-11 11:25:42] [Rank 0] step:7641/10000 train_time:327167ms step_avg:42.82ms +[2025-09-11 11:25:43] [Rank 0] step:7661/10000 train_time:327866ms step_avg:42.80ms +[2025-09-11 11:25:43] [Rank 0] step:7661/10000 train_time:327866ms step_avg:42.80ms +[2025-09-11 11:25:43] [Rank 0] step:7681/10000 train_time:328567ms step_avg:42.78ms +[2025-09-11 11:25:43] [Rank 0] step:7681/10000 train_time:328567ms step_avg:42.78ms +[2025-09-11 11:25:44] [Rank 0] step:7701/10000 train_time:329268ms step_avg:42.76ms +[2025-09-11 11:25:44] [Rank 0] step:7701/10000 train_time:329268ms step_avg:42.76ms +[2025-09-11 11:25:45] [Rank 0] step:7721/10000 train_time:330139ms step_avg:42.76ms +[2025-09-11 11:25:45] [Rank 0] step:7721/10000 train_time:330139ms step_avg:42.76ms +[2025-09-11 11:25:46] [Rank 0] step:7741/10000 train_time:330907ms step_avg:42.75ms +[2025-09-11 11:25:46] [Rank 0] step:7741/10000 train_time:330907ms step_avg:42.75ms +[2025-09-11 11:25:46] [Rank 0] step:7761/10000 train_time:331607ms step_avg:42.73ms +[2025-09-11 11:25:46] [Rank 0] step:7761/10000 train_time:331607ms step_avg:42.73ms +[2025-09-11 11:25:47] [Rank 0] step:7781/10000 train_time:332311ms step_avg:42.71ms +[2025-09-11 11:25:47] [Rank 0] step:7781/10000 train_time:332311ms step_avg:42.71ms +[2025-09-11 11:25:48] [Rank 0] step:7801/10000 train_time:333281ms step_avg:42.72ms +[2025-09-11 11:25:48] [Rank 0] step:7801/10000 train_time:333281ms step_avg:42.72ms +[2025-09-11 11:25:49] [Rank 0] step:7821/10000 train_time:333982ms step_avg:42.70ms +[2025-09-11 11:25:49] [Rank 0] step:7821/10000 train_time:333982ms step_avg:42.70ms +[2025-09-11 11:25:50] [Rank 0] step:7841/10000 train_time:334684ms step_avg:42.68ms +[2025-09-11 11:25:50] [Rank 0] step:7841/10000 train_time:334684ms step_avg:42.68ms +[2025-09-11 11:25:50] [Rank 0] step:7861/10000 train_time:335388ms step_avg:42.66ms +[2025-09-11 11:25:50] [Rank 0] step:7861/10000 train_time:335388ms step_avg:42.66ms +[2025-09-11 11:25:51] [Rank 0] step:7881/10000 train_time:336090ms step_avg:42.65ms +[2025-09-11 11:25:51] [Rank 0] step:7881/10000 train_time:336090ms step_avg:42.65ms +[2025-09-11 11:25:52] [Rank 0] step:7901/10000 train_time:336794ms step_avg:42.63ms +[2025-09-11 11:25:52] [Rank 0] step:7901/10000 train_time:336794ms step_avg:42.63ms +[2025-09-11 11:25:52] [Rank 0] step:7921/10000 train_time:337493ms step_avg:42.61ms +[2025-09-11 11:25:52] [Rank 0] step:7921/10000 train_time:337493ms step_avg:42.61ms +[2025-09-11 11:25:53] [Rank 0] step:7941/10000 train_time:338195ms step_avg:42.59ms +[2025-09-11 11:25:53] [Rank 0] step:7941/10000 train_time:338195ms step_avg:42.59ms +[2025-09-11 11:25:54] [Rank 0] step:7961/10000 train_time:338895ms step_avg:42.57ms +[2025-09-11 11:25:54] [Rank 0] step:7961/10000 train_time:338895ms step_avg:42.57ms +[2025-09-11 11:25:54] [Rank 0] step:7981/10000 train_time:339599ms step_avg:42.55ms +[2025-09-11 11:25:54] [Rank 0] step:7981/10000 train_time:339599ms step_avg:42.55ms +[2025-09-11 11:25:55] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:25:55] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:26:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:26:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:26:07] [Rank 0] PRINT: step:8000/10000 val_loss:4.8361 total_sharp:7.6563e-04 L1_sharp:7.1686e-02 L2_sharp:1.0697e-01 L3_sharp:1.4787e-01 L4_sharp:2.3370e-01 L5_sharp:3.6879e-01 L6_sharp:5.9203e-01 L7_sharp:8.7792e-01 L8_sharp:1.2429e+00 L9_sharp:1.8574e+00 L10_sharp:1.8044e+00 L11_sharp:1.5631e+00 L12_sharp:1.8398e+00 total_fnorm:6.3750e+00 total_l1_linf:5.6320e+03 total_spectral:3.1875e+00 L1_fnorm:1.8433e-02 L2_fnorm:1.8555e-02 L3_fnorm:1.8677e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.9043e-02 L9_fnorm:1.9165e-02 L10_fnorm:1.8677e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8066e-02 L1_l1linf:3.0212e-03 L2_l1linf:3.2806e-03 L3_l1linf:3.4027e-03 L4_l1linf:3.5248e-03 L5_l1linf:3.6774e-03 L6_l1linf:3.8605e-03 L7_l1linf:3.9978e-03 L8_l1linf:4.1199e-03 L9_l1linf:4.0894e-03 L10_l1linf:4.1809e-03 L11_l1linf:4.0894e-03 L12_l1linf:4.3945e-03 L1_spectral:3.8295e-04 L2_spectral:3.8392e-04 L3_spectral:3.8027e-04 L4_spectral:3.7695e-04 L5_spectral:3.7809e-04 L6_spectral:3.7715e-04 L7_spectral:3.7370e-04 L8_spectral:3.7548e-04 L9_spectral:3.7363e-04 L10_spectral:3.6804e-04 L11_spectral:3.6001e-04 L12_spectral:3.3841e-04 train_time:340278ms step_avg:42.53ms +[2025-09-11 11:26:07] [Rank 0] PRINT: step:8000/10000 val_loss:4.8361 total_sharp:7.6563e-04 L1_sharp:7.1686e-02 L2_sharp:1.0697e-01 L3_sharp:1.4787e-01 L4_sharp:2.3370e-01 L5_sharp:3.6879e-01 L6_sharp:5.9203e-01 L7_sharp:8.7792e-01 L8_sharp:1.2429e+00 L9_sharp:1.8574e+00 L10_sharp:1.8044e+00 L11_sharp:1.5631e+00 L12_sharp:1.8398e+00 total_fnorm:6.3750e+00 total_l1_linf:5.6320e+03 total_spectral:3.1875e+00 L1_fnorm:1.8433e-02 L2_fnorm:1.8555e-02 L3_fnorm:1.8677e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.9043e-02 L9_fnorm:1.9165e-02 L10_fnorm:1.8677e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8066e-02 L1_l1linf:3.0212e-03 L2_l1linf:3.2806e-03 L3_l1linf:3.4027e-03 L4_l1linf:3.5248e-03 L5_l1linf:3.6774e-03 L6_l1linf:3.8605e-03 L7_l1linf:3.9978e-03 L8_l1linf:4.1199e-03 L9_l1linf:4.0894e-03 L10_l1linf:4.1809e-03 L11_l1linf:4.0894e-03 L12_l1linf:4.3945e-03 L1_spectral:3.8295e-04 L2_spectral:3.8392e-04 L3_spectral:3.8027e-04 L4_spectral:3.7695e-04 L5_spectral:3.7809e-04 L6_spectral:3.7715e-04 L7_spectral:3.7370e-04 L8_spectral:3.7548e-04 L9_spectral:3.7363e-04 L10_spectral:3.6804e-04 L11_spectral:3.6001e-04 L12_spectral:3.3841e-04 train_time:340278ms step_avg:42.53ms +[2025-09-11 11:26:08] [Rank 0] step:8001/10000 train_time:342179ms step_avg:42.77ms +[2025-09-11 11:26:08] [Rank 0] step:8001/10000 train_time:342179ms step_avg:42.77ms +[2025-09-11 11:26:09] [Rank 0] step:8021/10000 train_time:342921ms step_avg:42.75ms +[2025-09-11 11:26:09] [Rank 0] step:8021/10000 train_time:342921ms step_avg:42.75ms +[2025-09-11 11:26:10] [Rank 0] step:8041/10000 train_time:343624ms step_avg:42.73ms +[2025-09-11 11:26:10] [Rank 0] step:8041/10000 train_time:343624ms step_avg:42.73ms +[2025-09-11 11:26:11] [Rank 0] step:8061/10000 train_time:344333ms step_avg:42.72ms +[2025-09-11 11:26:11] [Rank 0] step:8061/10000 train_time:344333ms step_avg:42.72ms +[2025-09-11 11:26:11] [Rank 0] step:8081/10000 train_time:345034ms step_avg:42.70ms +[2025-09-11 11:26:11] [Rank 0] step:8081/10000 train_time:345034ms step_avg:42.70ms +[2025-09-11 11:26:12] [Rank 0] step:8101/10000 train_time:345735ms step_avg:42.68ms +[2025-09-11 11:26:12] [Rank 0] step:8101/10000 train_time:345735ms step_avg:42.68ms +[2025-09-11 11:26:13] [Rank 0] step:8121/10000 train_time:346443ms step_avg:42.66ms +[2025-09-11 11:26:13] [Rank 0] step:8121/10000 train_time:346443ms step_avg:42.66ms +[2025-09-11 11:26:14] [Rank 0] step:8141/10000 train_time:347896ms step_avg:42.73ms +[2025-09-11 11:26:14] [Rank 0] step:8141/10000 train_time:347896ms step_avg:42.73ms +[2025-09-11 11:26:15] [Rank 0] step:8161/10000 train_time:348602ms step_avg:42.72ms +[2025-09-11 11:26:15] [Rank 0] step:8161/10000 train_time:348602ms step_avg:42.72ms +[2025-09-11 11:26:16] [Rank 0] step:8181/10000 train_time:349315ms step_avg:42.70ms +[2025-09-11 11:26:16] [Rank 0] step:8181/10000 train_time:349315ms step_avg:42.70ms +[2025-09-11 11:26:16] [Rank 0] step:8201/10000 train_time:350025ms step_avg:42.68ms +[2025-09-11 11:26:16] [Rank 0] step:8201/10000 train_time:350025ms step_avg:42.68ms +[2025-09-11 11:26:17] [Rank 0] step:8221/10000 train_time:350735ms step_avg:42.66ms +[2025-09-11 11:26:17] [Rank 0] step:8221/10000 train_time:350735ms step_avg:42.66ms +[2025-09-11 11:26:18] [Rank 0] step:8241/10000 train_time:351452ms step_avg:42.65ms +[2025-09-11 11:26:18] [Rank 0] step:8241/10000 train_time:351452ms step_avg:42.65ms +[2025-09-11 11:26:18] [Rank 0] step:8261/10000 train_time:352160ms step_avg:42.63ms +[2025-09-11 11:26:18] [Rank 0] step:8261/10000 train_time:352160ms step_avg:42.63ms +[2025-09-11 11:26:19] [Rank 0] step:8281/10000 train_time:352866ms step_avg:42.61ms +[2025-09-11 11:26:19] [Rank 0] step:8281/10000 train_time:352866ms step_avg:42.61ms +[2025-09-11 11:26:20] [Rank 0] step:8301/10000 train_time:353576ms step_avg:42.59ms +[2025-09-11 11:26:20] [Rank 0] step:8301/10000 train_time:353576ms step_avg:42.59ms +[2025-09-11 11:26:21] [Rank 0] step:8321/10000 train_time:354284ms step_avg:42.58ms +[2025-09-11 11:26:21] [Rank 0] step:8321/10000 train_time:354284ms step_avg:42.58ms +[2025-09-11 11:26:21] [Rank 0] step:8341/10000 train_time:355000ms step_avg:42.56ms +[2025-09-11 11:26:21] [Rank 0] step:8341/10000 train_time:355000ms step_avg:42.56ms +[2025-09-11 11:26:22] [Rank 0] step:8361/10000 train_time:355705ms step_avg:42.54ms +[2025-09-11 11:26:22] [Rank 0] step:8361/10000 train_time:355705ms step_avg:42.54ms +[2025-09-11 11:26:23] [Rank 0] step:8381/10000 train_time:356418ms step_avg:42.53ms +[2025-09-11 11:26:23] [Rank 0] step:8381/10000 train_time:356418ms step_avg:42.53ms +[2025-09-11 11:26:23] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:26:23] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:26:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:26:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:26:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.8230 total_sharp:8.5631e-04 L1_sharp:7.2561e-02 L2_sharp:1.1413e-01 L3_sharp:1.6393e-01 L4_sharp:2.7562e-01 L5_sharp:3.7614e-01 L6_sharp:7.1671e-01 L7_sharp:8.7662e-01 L8_sharp:1.1552e+00 L9_sharp:1.5167e+00 L10_sharp:1.9949e+00 L11_sharp:1.4453e+00 L12_sharp:1.8168e+00 total_fnorm:4.5312e+00 total_l1_linf:3.5520e+03 total_spectral:2.2656e+00 L1_fnorm:1.4099e-02 L2_fnorm:1.4221e-02 L3_fnorm:1.4282e-02 L4_fnorm:1.4404e-02 L5_fnorm:1.4587e-02 L6_fnorm:1.4709e-02 L7_fnorm:1.4709e-02 L8_fnorm:1.4587e-02 L9_fnorm:1.4648e-02 L10_fnorm:1.4343e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3855e-02 L1_l1linf:2.1210e-03 L2_l1linf:2.1362e-03 L3_l1linf:2.3193e-03 L4_l1linf:2.5177e-03 L5_l1linf:2.6398e-03 L6_l1linf:2.7618e-03 L7_l1linf:2.9144e-03 L8_l1linf:2.8839e-03 L9_l1linf:3.0212e-03 L10_l1linf:3.1281e-03 L11_l1linf:2.9297e-03 L12_l1linf:3.3417e-03 L1_spectral:2.9974e-04 L2_spectral:2.9979e-04 L3_spectral:2.9578e-04 L4_spectral:2.9466e-04 L5_spectral:2.9742e-04 L6_spectral:2.9478e-04 L7_spectral:2.9672e-04 L8_spectral:2.9845e-04 L9_spectral:2.9528e-04 L10_spectral:2.8870e-04 L11_spectral:2.8508e-04 L12_spectral:2.6412e-04 train_time:357110ms step_avg:42.51ms +[2025-09-11 11:26:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.8230 total_sharp:8.5631e-04 L1_sharp:7.2561e-02 L2_sharp:1.1413e-01 L3_sharp:1.6393e-01 L4_sharp:2.7562e-01 L5_sharp:3.7614e-01 L6_sharp:7.1671e-01 L7_sharp:8.7662e-01 L8_sharp:1.1552e+00 L9_sharp:1.5167e+00 L10_sharp:1.9949e+00 L11_sharp:1.4453e+00 L12_sharp:1.8168e+00 total_fnorm:4.5312e+00 total_l1_linf:3.5520e+03 total_spectral:2.2656e+00 L1_fnorm:1.4099e-02 L2_fnorm:1.4221e-02 L3_fnorm:1.4282e-02 L4_fnorm:1.4404e-02 L5_fnorm:1.4587e-02 L6_fnorm:1.4709e-02 L7_fnorm:1.4709e-02 L8_fnorm:1.4587e-02 L9_fnorm:1.4648e-02 L10_fnorm:1.4343e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3855e-02 L1_l1linf:2.1210e-03 L2_l1linf:2.1362e-03 L3_l1linf:2.3193e-03 L4_l1linf:2.5177e-03 L5_l1linf:2.6398e-03 L6_l1linf:2.7618e-03 L7_l1linf:2.9144e-03 L8_l1linf:2.8839e-03 L9_l1linf:3.0212e-03 L10_l1linf:3.1281e-03 L11_l1linf:2.9297e-03 L12_l1linf:3.3417e-03 L1_spectral:2.9974e-04 L2_spectral:2.9979e-04 L3_spectral:2.9578e-04 L4_spectral:2.9466e-04 L5_spectral:2.9742e-04 L6_spectral:2.9478e-04 L7_spectral:2.9672e-04 L8_spectral:2.9845e-04 L9_spectral:2.9528e-04 L10_spectral:2.8870e-04 L11_spectral:2.8508e-04 L12_spectral:2.6412e-04 train_time:357110ms step_avg:42.51ms +[2025-09-11 11:26:36] [Rank 0] step:8401/10000 train_time:359021ms step_avg:42.74ms +[2025-09-11 11:26:36] [Rank 0] step:8401/10000 train_time:359021ms step_avg:42.74ms +[2025-09-11 11:26:37] [Rank 0] step:8421/10000 train_time:359764ms step_avg:42.72ms +[2025-09-11 11:26:37] [Rank 0] step:8421/10000 train_time:359764ms step_avg:42.72ms +[2025-09-11 11:26:38] [Rank 0] step:8441/10000 train_time:360474ms step_avg:42.71ms +[2025-09-11 11:26:38] [Rank 0] step:8441/10000 train_time:360474ms step_avg:42.71ms +[2025-09-11 11:26:39] [Rank 0] step:8461/10000 train_time:361184ms step_avg:42.69ms +[2025-09-11 11:26:39] [Rank 0] step:8461/10000 train_time:361184ms step_avg:42.69ms +[2025-09-11 11:26:39] [Rank 0] step:8481/10000 train_time:361894ms step_avg:42.67ms +[2025-09-11 11:26:39] [Rank 0] step:8481/10000 train_time:361894ms step_avg:42.67ms +[2025-09-11 11:26:40] [Rank 0] step:8501/10000 train_time:362602ms step_avg:42.65ms +[2025-09-11 11:26:40] [Rank 0] step:8501/10000 train_time:362602ms step_avg:42.65ms +[2025-09-11 11:26:41] [Rank 0] step:8521/10000 train_time:363310ms step_avg:42.64ms +[2025-09-11 11:26:41] [Rank 0] step:8521/10000 train_time:363310ms step_avg:42.64ms +[2025-09-11 11:26:41] [Rank 0] step:8541/10000 train_time:364019ms step_avg:42.62ms +[2025-09-11 11:26:41] [Rank 0] step:8541/10000 train_time:364019ms step_avg:42.62ms +[2025-09-11 11:26:42] [Rank 0] step:8561/10000 train_time:364733ms step_avg:42.60ms +[2025-09-11 11:26:42] [Rank 0] step:8561/10000 train_time:364733ms step_avg:42.60ms +[2025-09-11 11:26:43] [Rank 0] step:8581/10000 train_time:365450ms step_avg:42.59ms +[2025-09-11 11:26:43] [Rank 0] step:8581/10000 train_time:365450ms step_avg:42.59ms +[2025-09-11 11:26:44] [Rank 0] step:8601/10000 train_time:366160ms step_avg:42.57ms +[2025-09-11 11:26:44] [Rank 0] step:8601/10000 train_time:366160ms step_avg:42.57ms +[2025-09-11 11:26:44] [Rank 0] step:8621/10000 train_time:366868ms step_avg:42.56ms +[2025-09-11 11:26:44] [Rank 0] step:8621/10000 train_time:366868ms step_avg:42.56ms +[2025-09-11 11:26:45] [Rank 0] step:8641/10000 train_time:367577ms step_avg:42.54ms +[2025-09-11 11:26:45] [Rank 0] step:8641/10000 train_time:367577ms step_avg:42.54ms +[2025-09-11 11:26:46] [Rank 0] step:8661/10000 train_time:368286ms step_avg:42.52ms +[2025-09-11 11:26:46] [Rank 0] step:8661/10000 train_time:368286ms step_avg:42.52ms +[2025-09-11 11:26:46] [Rank 0] step:8681/10000 train_time:368996ms step_avg:42.51ms +[2025-09-11 11:26:46] [Rank 0] step:8681/10000 train_time:368996ms step_avg:42.51ms +[2025-09-11 11:26:47] [Rank 0] step:8701/10000 train_time:369704ms step_avg:42.49ms +[2025-09-11 11:26:47] [Rank 0] step:8701/10000 train_time:369704ms step_avg:42.49ms +[2025-09-11 11:26:48] [Rank 0] step:8721/10000 train_time:370415ms step_avg:42.47ms +[2025-09-11 11:26:48] [Rank 0] step:8721/10000 train_time:370415ms step_avg:42.47ms +[2025-09-11 11:26:49] [Rank 0] step:8741/10000 train_time:371388ms step_avg:42.49ms +[2025-09-11 11:26:49] [Rank 0] step:8741/10000 train_time:371388ms step_avg:42.49ms +[2025-09-11 11:26:49] [Rank 0] step:8761/10000 train_time:372100ms step_avg:42.47ms +[2025-09-11 11:26:49] [Rank 0] step:8761/10000 train_time:372100ms step_avg:42.47ms +[2025-09-11 11:26:50] [Rank 0] step:8781/10000 train_time:372806ms step_avg:42.46ms +[2025-09-11 11:26:50] [Rank 0] step:8781/10000 train_time:372806ms step_avg:42.46ms +[2025-09-11 11:26:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:26:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:26:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:26:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:26:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:27:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:02] [Rank 0] PRINT: step:8800/10000 val_loss:4.8184 total_sharp:6.4337e-04 L1_sharp:5.6636e-02 L2_sharp:1.0113e-01 L3_sharp:1.3332e-01 L4_sharp:2.0686e-01 L5_sharp:3.3144e-01 L6_sharp:5.9876e-01 L7_sharp:8.0852e-01 L8_sharp:9.4988e-01 L9_sharp:1.3141e+00 L10_sharp:1.3295e+00 L11_sharp:1.0192e+00 L12_sharp:1.5440e+00 total_fnorm:3.2500e+00 total_l1_linf:2.2560e+03 total_spectral:1.6250e+00 L1_fnorm:1.0010e-02 L2_fnorm:1.0071e-02 L3_fnorm:1.0132e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0376e-02 L6_fnorm:1.0376e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0376e-02 L9_fnorm:1.0376e-02 L10_fnorm:1.0193e-02 L11_fnorm:9.8877e-03 L12_fnorm:9.7656e-03 L1_l1linf:1.4038e-03 L2_l1linf:1.3657e-03 L3_l1linf:1.4801e-03 L4_l1linf:1.5106e-03 L5_l1linf:1.6708e-03 L6_l1linf:1.7929e-03 L7_l1linf:1.8005e-03 L8_l1linf:1.8463e-03 L9_l1linf:1.9531e-03 L10_l1linf:1.8311e-03 L11_l1linf:1.8234e-03 L12_l1linf:2.2736e-03 L1_spectral:2.2165e-04 L2_spectral:2.1929e-04 L3_spectral:2.2028e-04 L4_spectral:2.1344e-04 L5_spectral:2.1673e-04 L6_spectral:2.1359e-04 L7_spectral:2.1364e-04 L8_spectral:2.1685e-04 L9_spectral:2.1279e-04 L10_spectral:2.0739e-04 L11_spectral:2.0388e-04 L12_spectral:1.8711e-04 train_time:373747ms step_avg:42.47ms +[2025-09-11 11:27:02] [Rank 0] PRINT: step:8800/10000 val_loss:4.8184 total_sharp:6.4337e-04 L1_sharp:5.6636e-02 L2_sharp:1.0113e-01 L3_sharp:1.3332e-01 L4_sharp:2.0686e-01 L5_sharp:3.3144e-01 L6_sharp:5.9876e-01 L7_sharp:8.0852e-01 L8_sharp:9.4988e-01 L9_sharp:1.3141e+00 L10_sharp:1.3295e+00 L11_sharp:1.0192e+00 L12_sharp:1.5440e+00 total_fnorm:3.2500e+00 total_l1_linf:2.2560e+03 total_spectral:1.6250e+00 L1_fnorm:1.0010e-02 L2_fnorm:1.0071e-02 L3_fnorm:1.0132e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0376e-02 L6_fnorm:1.0376e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0376e-02 L9_fnorm:1.0376e-02 L10_fnorm:1.0193e-02 L11_fnorm:9.8877e-03 L12_fnorm:9.7656e-03 L1_l1linf:1.4038e-03 L2_l1linf:1.3657e-03 L3_l1linf:1.4801e-03 L4_l1linf:1.5106e-03 L5_l1linf:1.6708e-03 L6_l1linf:1.7929e-03 L7_l1linf:1.8005e-03 L8_l1linf:1.8463e-03 L9_l1linf:1.9531e-03 L10_l1linf:1.8311e-03 L11_l1linf:1.8234e-03 L12_l1linf:2.2736e-03 L1_spectral:2.2165e-04 L2_spectral:2.1929e-04 L3_spectral:2.2028e-04 L4_spectral:2.1344e-04 L5_spectral:2.1673e-04 L6_spectral:2.1359e-04 L7_spectral:2.1364e-04 L8_spectral:2.1685e-04 L9_spectral:2.1279e-04 L10_spectral:2.0739e-04 L11_spectral:2.0388e-04 L12_spectral:1.8711e-04 train_time:373747ms step_avg:42.47ms +[2025-09-11 11:27:04] [Rank 0] step:8801/10000 train_time:375716ms step_avg:42.69ms +[2025-09-11 11:27:04] [Rank 0] step:8801/10000 train_time:375716ms step_avg:42.69ms +[2025-09-11 11:27:05] [Rank 0] step:8821/10000 train_time:376461ms step_avg:42.68ms +[2025-09-11 11:27:05] [Rank 0] step:8821/10000 train_time:376461ms step_avg:42.68ms +[2025-09-11 11:27:06] [Rank 0] step:8841/10000 train_time:377171ms step_avg:42.66ms +[2025-09-11 11:27:06] [Rank 0] step:8841/10000 train_time:377171ms step_avg:42.66ms +[2025-09-11 11:27:06] [Rank 0] step:8861/10000 train_time:377880ms step_avg:42.65ms +[2025-09-11 11:27:06] [Rank 0] step:8861/10000 train_time:377880ms step_avg:42.65ms +[2025-09-11 11:27:07] [Rank 0] step:8881/10000 train_time:378590ms step_avg:42.63ms +[2025-09-11 11:27:07] [Rank 0] step:8881/10000 train_time:378590ms step_avg:42.63ms +[2025-09-11 11:27:08] [Rank 0] step:8901/10000 train_time:379303ms step_avg:42.61ms +[2025-09-11 11:27:08] [Rank 0] step:8901/10000 train_time:379303ms step_avg:42.61ms +[2025-09-11 11:27:09] [Rank 0] step:8921/10000 train_time:380009ms step_avg:42.60ms +[2025-09-11 11:27:09] [Rank 0] step:8921/10000 train_time:380009ms step_avg:42.60ms +[2025-09-11 11:27:09] [Rank 0] step:8941/10000 train_time:380721ms step_avg:42.58ms +[2025-09-11 11:27:09] [Rank 0] step:8941/10000 train_time:380721ms step_avg:42.58ms +[2025-09-11 11:27:10] [Rank 0] step:8961/10000 train_time:381438ms step_avg:42.57ms +[2025-09-11 11:27:10] [Rank 0] step:8961/10000 train_time:381438ms step_avg:42.57ms +[2025-09-11 11:27:11] [Rank 0] step:8981/10000 train_time:382151ms step_avg:42.55ms +[2025-09-11 11:27:11] [Rank 0] step:8981/10000 train_time:382151ms step_avg:42.55ms +[2025-09-11 11:27:11] [Rank 0] step:9001/10000 train_time:382855ms step_avg:42.53ms +[2025-09-11 11:27:11] [Rank 0] step:9001/10000 train_time:382855ms step_avg:42.53ms +[2025-09-11 11:27:12] [Rank 0] step:9021/10000 train_time:383564ms step_avg:42.52ms +[2025-09-11 11:27:12] [Rank 0] step:9021/10000 train_time:383564ms step_avg:42.52ms +[2025-09-11 11:27:13] [Rank 0] step:9041/10000 train_time:384275ms step_avg:42.50ms +[2025-09-11 11:27:13] [Rank 0] step:9041/10000 train_time:384275ms step_avg:42.50ms +[2025-09-11 11:27:14] [Rank 0] step:9061/10000 train_time:384983ms step_avg:42.49ms +[2025-09-11 11:27:14] [Rank 0] step:9061/10000 train_time:384983ms step_avg:42.49ms +[2025-09-11 11:27:14] [Rank 0] step:9081/10000 train_time:385695ms step_avg:42.47ms +[2025-09-11 11:27:14] [Rank 0] step:9081/10000 train_time:385695ms step_avg:42.47ms +[2025-09-11 11:27:15] [Rank 0] step:9101/10000 train_time:386408ms step_avg:42.46ms +[2025-09-11 11:27:15] [Rank 0] step:9101/10000 train_time:386408ms step_avg:42.46ms +[2025-09-11 11:27:16] [Rank 0] step:9121/10000 train_time:387121ms step_avg:42.44ms +[2025-09-11 11:27:16] [Rank 0] step:9121/10000 train_time:387121ms step_avg:42.44ms +[2025-09-11 11:27:16] [Rank 0] step:9141/10000 train_time:387829ms step_avg:42.43ms +[2025-09-11 11:27:16] [Rank 0] step:9141/10000 train_time:387829ms step_avg:42.43ms +[2025-09-11 11:27:17] [Rank 0] step:9161/10000 train_time:388541ms step_avg:42.41ms +[2025-09-11 11:27:17] [Rank 0] step:9161/10000 train_time:388541ms step_avg:42.41ms +[2025-09-11 11:27:18] [Rank 0] step:9181/10000 train_time:389253ms step_avg:42.40ms +[2025-09-11 11:27:18] [Rank 0] step:9181/10000 train_time:389253ms step_avg:42.40ms +[2025-09-11 11:27:19] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:27:19] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:27:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:27:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:30] [Rank 0] PRINT: step:9200/10000 val_loss:4.8088 total_sharp:7.0390e-04 L1_sharp:4.6065e-02 L2_sharp:8.0346e-02 L3_sharp:1.1389e-01 L4_sharp:1.8224e-01 L5_sharp:3.2069e-01 L6_sharp:5.2454e-01 L7_sharp:6.9701e-01 L8_sharp:9.0601e-01 L9_sharp:1.2740e+00 L10_sharp:1.7948e+00 L11_sharp:1.4440e+00 L12_sharp:3.7614e+00 total_fnorm:2.1562e+00 total_l1_linf:1.2640e+03 total_spectral:1.0703e+00 L1_fnorm:6.5002e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.5918e-03 L4_fnorm:6.6833e-03 L5_fnorm:6.8359e-03 L6_fnorm:6.8665e-03 L7_fnorm:6.8665e-03 L8_fnorm:6.8665e-03 L9_fnorm:6.8665e-03 L10_fnorm:6.7444e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.3782e-03 L1_l1linf:7.6294e-04 L2_l1linf:8.2397e-04 L3_l1linf:8.9645e-04 L4_l1linf:9.4604e-04 L5_l1linf:1.0223e-03 L6_l1linf:1.1215e-03 L7_l1linf:1.1063e-03 L8_l1linf:1.1520e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.1826e-03 L11_l1linf:1.1597e-03 L12_l1linf:1.1902e-03 L1_spectral:1.4362e-04 L2_spectral:1.4105e-04 L3_spectral:1.3874e-04 L4_spectral:1.3813e-04 L5_spectral:1.4115e-04 L6_spectral:1.3936e-04 L7_spectral:1.3977e-04 L8_spectral:1.4384e-04 L9_spectral:1.4168e-04 L10_spectral:1.3582e-04 L11_spectral:1.3574e-04 L12_spectral:1.2207e-04 train_time:389947ms step_avg:42.39ms +[2025-09-11 11:27:30] [Rank 0] PRINT: step:9200/10000 val_loss:4.8088 total_sharp:7.0390e-04 L1_sharp:4.6065e-02 L2_sharp:8.0346e-02 L3_sharp:1.1389e-01 L4_sharp:1.8224e-01 L5_sharp:3.2069e-01 L6_sharp:5.2454e-01 L7_sharp:6.9701e-01 L8_sharp:9.0601e-01 L9_sharp:1.2740e+00 L10_sharp:1.7948e+00 L11_sharp:1.4440e+00 L12_sharp:3.7614e+00 total_fnorm:2.1562e+00 total_l1_linf:1.2640e+03 total_spectral:1.0703e+00 L1_fnorm:6.5002e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.5918e-03 L4_fnorm:6.6833e-03 L5_fnorm:6.8359e-03 L6_fnorm:6.8665e-03 L7_fnorm:6.8665e-03 L8_fnorm:6.8665e-03 L9_fnorm:6.8665e-03 L10_fnorm:6.7444e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.3782e-03 L1_l1linf:7.6294e-04 L2_l1linf:8.2397e-04 L3_l1linf:8.9645e-04 L4_l1linf:9.4604e-04 L5_l1linf:1.0223e-03 L6_l1linf:1.1215e-03 L7_l1linf:1.1063e-03 L8_l1linf:1.1520e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.1826e-03 L11_l1linf:1.1597e-03 L12_l1linf:1.1902e-03 L1_spectral:1.4362e-04 L2_spectral:1.4105e-04 L3_spectral:1.3874e-04 L4_spectral:1.3813e-04 L5_spectral:1.4115e-04 L6_spectral:1.3936e-04 L7_spectral:1.3977e-04 L8_spectral:1.4384e-04 L9_spectral:1.4168e-04 L10_spectral:1.3582e-04 L11_spectral:1.3574e-04 L12_spectral:1.2207e-04 train_time:389947ms step_avg:42.39ms +[2025-09-11 11:27:32] [Rank 0] step:9201/10000 train_time:392203ms step_avg:42.63ms +[2025-09-11 11:27:32] [Rank 0] step:9201/10000 train_time:392203ms step_avg:42.63ms +[2025-09-11 11:27:33] [Rank 0] step:9221/10000 train_time:392944ms step_avg:42.61ms +[2025-09-11 11:27:33] [Rank 0] step:9221/10000 train_time:392944ms step_avg:42.61ms +[2025-09-11 11:27:34] [Rank 0] step:9241/10000 train_time:393652ms step_avg:42.60ms +[2025-09-11 11:27:34] [Rank 0] step:9241/10000 train_time:393652ms step_avg:42.60ms +[2025-09-11 11:27:34] [Rank 0] step:9261/10000 train_time:394363ms step_avg:42.58ms +[2025-09-11 11:27:34] [Rank 0] step:9261/10000 train_time:394363ms step_avg:42.58ms +[2025-09-11 11:27:35] [Rank 0] step:9281/10000 train_time:395074ms step_avg:42.57ms +[2025-09-11 11:27:35] [Rank 0] step:9281/10000 train_time:395074ms step_avg:42.57ms +[2025-09-11 11:27:36] [Rank 0] step:9301/10000 train_time:395781ms step_avg:42.55ms +[2025-09-11 11:27:36] [Rank 0] step:9301/10000 train_time:395781ms step_avg:42.55ms +[2025-09-11 11:27:37] [Rank 0] step:9321/10000 train_time:396492ms step_avg:42.54ms +[2025-09-11 11:27:37] [Rank 0] step:9321/10000 train_time:396492ms step_avg:42.54ms +[2025-09-11 11:27:37] [Rank 0] step:9341/10000 train_time:397198ms step_avg:42.52ms +[2025-09-11 11:27:37] [Rank 0] step:9341/10000 train_time:397198ms step_avg:42.52ms +[2025-09-11 11:27:38] [Rank 0] step:9361/10000 train_time:397903ms step_avg:42.51ms +[2025-09-11 11:27:38] [Rank 0] step:9361/10000 train_time:397903ms step_avg:42.51ms +[2025-09-11 11:27:39] [Rank 0] step:9381/10000 train_time:398613ms step_avg:42.49ms +[2025-09-11 11:27:39] [Rank 0] step:9381/10000 train_time:398613ms step_avg:42.49ms +[2025-09-11 11:27:39] [Rank 0] step:9401/10000 train_time:399324ms step_avg:42.48ms +[2025-09-11 11:27:39] [Rank 0] step:9401/10000 train_time:399324ms step_avg:42.48ms +[2025-09-11 11:27:40] [Rank 0] step:9421/10000 train_time:400036ms step_avg:42.46ms +[2025-09-11 11:27:40] [Rank 0] step:9421/10000 train_time:400036ms step_avg:42.46ms +[2025-09-11 11:27:41] [Rank 0] step:9441/10000 train_time:400750ms step_avg:42.45ms +[2025-09-11 11:27:41] [Rank 0] step:9441/10000 train_time:400750ms step_avg:42.45ms +[2025-09-11 11:27:41] [Rank 0] step:9461/10000 train_time:401459ms step_avg:42.43ms +[2025-09-11 11:27:41] [Rank 0] step:9461/10000 train_time:401459ms step_avg:42.43ms +[2025-09-11 11:27:42] [Rank 0] step:9481/10000 train_time:402170ms step_avg:42.42ms +[2025-09-11 11:27:42] [Rank 0] step:9481/10000 train_time:402170ms step_avg:42.42ms +[2025-09-11 11:27:43] [Rank 0] step:9501/10000 train_time:402882ms step_avg:42.40ms +[2025-09-11 11:27:43] [Rank 0] step:9501/10000 train_time:402882ms step_avg:42.40ms +[2025-09-11 11:27:44] [Rank 0] step:9521/10000 train_time:403595ms step_avg:42.39ms +[2025-09-11 11:27:44] [Rank 0] step:9521/10000 train_time:403595ms step_avg:42.39ms +[2025-09-11 11:27:44] [Rank 0] step:9541/10000 train_time:404303ms step_avg:42.38ms +[2025-09-11 11:27:44] [Rank 0] step:9541/10000 train_time:404303ms step_avg:42.38ms +[2025-09-11 11:27:45] [Rank 0] step:9561/10000 train_time:405015ms step_avg:42.36ms +[2025-09-11 11:27:45] [Rank 0] step:9561/10000 train_time:405015ms step_avg:42.36ms +[2025-09-11 11:27:46] [Rank 0] step:9581/10000 train_time:405726ms step_avg:42.35ms +[2025-09-11 11:27:46] [Rank 0] step:9581/10000 train_time:405726ms step_avg:42.35ms +[2025-09-11 11:27:46] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:27:46] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:27:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:27:58] [Rank 0] PRINT: step:9600/10000 val_loss:4.8049 total_sharp:3.7491e-04 L1_sharp:3.1654e-02 L2_sharp:5.7314e-02 L3_sharp:7.5768e-02 L4_sharp:1.3290e-01 L5_sharp:1.9857e-01 L6_sharp:3.5529e-01 L7_sharp:4.8324e-01 L8_sharp:6.3857e-01 L9_sharp:9.8318e-01 L10_sharp:1.1715e+00 L11_sharp:9.4979e-01 L12_sharp:1.0675e+00 total_fnorm:1.1953e+00 total_l1_linf:6.0000e+02 total_spectral:6.0156e-01 L1_fnorm:3.6469e-03 L2_fnorm:3.6774e-03 L3_fnorm:3.6926e-03 L4_fnorm:3.7537e-03 L5_fnorm:3.8147e-03 L6_fnorm:3.8147e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.8300e-03 L9_fnorm:3.8452e-03 L10_fnorm:3.7842e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.5400e-03 L1_l1linf:4.0245e-04 L2_l1linf:3.8147e-04 L3_l1linf:4.2343e-04 L4_l1linf:4.5967e-04 L5_l1linf:4.6730e-04 L6_l1linf:5.7220e-04 L7_l1linf:5.2643e-04 L8_l1linf:5.4932e-04 L9_l1linf:5.7220e-04 L10_l1linf:5.9128e-04 L11_l1linf:5.5313e-04 L12_l1linf:5.9128e-04 L1_spectral:8.2816e-05 L2_spectral:8.2190e-05 L3_spectral:8.1835e-05 L4_spectral:7.9566e-05 L5_spectral:8.0902e-05 L6_spectral:8.0527e-05 L7_spectral:8.0838e-05 L8_spectral:8.1815e-05 L9_spectral:7.9765e-05 L10_spectral:7.8481e-05 L11_spectral:7.5312e-05 L12_spectral:6.7052e-05 train_time:406414ms step_avg:42.33ms +[2025-09-11 11:27:58] [Rank 0] PRINT: step:9600/10000 val_loss:4.8049 total_sharp:3.7491e-04 L1_sharp:3.1654e-02 L2_sharp:5.7314e-02 L3_sharp:7.5768e-02 L4_sharp:1.3290e-01 L5_sharp:1.9857e-01 L6_sharp:3.5529e-01 L7_sharp:4.8324e-01 L8_sharp:6.3857e-01 L9_sharp:9.8318e-01 L10_sharp:1.1715e+00 L11_sharp:9.4979e-01 L12_sharp:1.0675e+00 total_fnorm:1.1953e+00 total_l1_linf:6.0000e+02 total_spectral:6.0156e-01 L1_fnorm:3.6469e-03 L2_fnorm:3.6774e-03 L3_fnorm:3.6926e-03 L4_fnorm:3.7537e-03 L5_fnorm:3.8147e-03 L6_fnorm:3.8147e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.8300e-03 L9_fnorm:3.8452e-03 L10_fnorm:3.7842e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.5400e-03 L1_l1linf:4.0245e-04 L2_l1linf:3.8147e-04 L3_l1linf:4.2343e-04 L4_l1linf:4.5967e-04 L5_l1linf:4.6730e-04 L6_l1linf:5.7220e-04 L7_l1linf:5.2643e-04 L8_l1linf:5.4932e-04 L9_l1linf:5.7220e-04 L10_l1linf:5.9128e-04 L11_l1linf:5.5313e-04 L12_l1linf:5.9128e-04 L1_spectral:8.2816e-05 L2_spectral:8.2190e-05 L3_spectral:8.1835e-05 L4_spectral:7.9566e-05 L5_spectral:8.0902e-05 L6_spectral:8.0527e-05 L7_spectral:8.0838e-05 L8_spectral:8.1815e-05 L9_spectral:7.9765e-05 L10_spectral:7.8481e-05 L11_spectral:7.5312e-05 L12_spectral:6.7052e-05 train_time:406414ms step_avg:42.33ms +[2025-09-11 11:28:00] [Rank 0] step:9601/10000 train_time:408665ms step_avg:42.56ms +[2025-09-11 11:28:00] [Rank 0] step:9601/10000 train_time:408665ms step_avg:42.56ms +[2025-09-11 11:28:01] [Rank 0] step:9621/10000 train_time:409406ms step_avg:42.55ms +[2025-09-11 11:28:01] [Rank 0] step:9621/10000 train_time:409406ms step_avg:42.55ms +[2025-09-11 11:28:02] [Rank 0] step:9641/10000 train_time:410122ms step_avg:42.54ms +[2025-09-11 11:28:02] [Rank 0] step:9641/10000 train_time:410122ms step_avg:42.54ms +[2025-09-11 11:28:02] [Rank 0] step:9661/10000 train_time:410846ms step_avg:42.53ms +[2025-09-11 11:28:02] [Rank 0] step:9661/10000 train_time:410846ms step_avg:42.53ms +[2025-09-11 11:28:03] [Rank 0] step:9681/10000 train_time:411562ms step_avg:42.51ms +[2025-09-11 11:28:03] [Rank 0] step:9681/10000 train_time:411562ms step_avg:42.51ms +[2025-09-11 11:28:04] [Rank 0] step:9701/10000 train_time:412279ms step_avg:42.50ms +[2025-09-11 11:28:04] [Rank 0] step:9701/10000 train_time:412279ms step_avg:42.50ms +[2025-09-11 11:28:05] [Rank 0] step:9721/10000 train_time:413000ms step_avg:42.49ms +[2025-09-11 11:28:05] [Rank 0] step:9721/10000 train_time:413000ms step_avg:42.49ms +[2025-09-11 11:28:05] [Rank 0] step:9741/10000 train_time:413719ms step_avg:42.47ms +[2025-09-11 11:28:05] [Rank 0] step:9741/10000 train_time:413719ms step_avg:42.47ms +[2025-09-11 11:28:06] [Rank 0] step:9761/10000 train_time:414437ms step_avg:42.46ms +[2025-09-11 11:28:06] [Rank 0] step:9761/10000 train_time:414437ms step_avg:42.46ms +[2025-09-11 11:28:07] [Rank 0] step:9781/10000 train_time:415153ms step_avg:42.44ms +[2025-09-11 11:28:07] [Rank 0] step:9781/10000 train_time:415153ms step_avg:42.44ms +[2025-09-11 11:28:07] [Rank 0] step:9801/10000 train_time:415876ms step_avg:42.43ms +[2025-09-11 11:28:07] [Rank 0] step:9801/10000 train_time:415876ms step_avg:42.43ms +[2025-09-11 11:28:08] [Rank 0] step:9821/10000 train_time:416595ms step_avg:42.42ms +[2025-09-11 11:28:08] [Rank 0] step:9821/10000 train_time:416595ms step_avg:42.42ms +[2025-09-11 11:28:09] [Rank 0] step:9841/10000 train_time:417316ms step_avg:42.41ms +[2025-09-11 11:28:09] [Rank 0] step:9841/10000 train_time:417316ms step_avg:42.41ms +[2025-09-11 11:28:10] [Rank 0] step:9861/10000 train_time:418033ms step_avg:42.39ms +[2025-09-11 11:28:10] [Rank 0] step:9861/10000 train_time:418033ms step_avg:42.39ms +[2025-09-11 11:28:10] [Rank 0] step:9881/10000 train_time:418751ms step_avg:42.38ms +[2025-09-11 11:28:10] [Rank 0] step:9881/10000 train_time:418751ms step_avg:42.38ms +[2025-09-11 11:28:11] [Rank 0] step:9901/10000 train_time:419467ms step_avg:42.37ms +[2025-09-11 11:28:11] [Rank 0] step:9901/10000 train_time:419467ms step_avg:42.37ms +[2025-09-11 11:28:12] [Rank 0] step:9921/10000 train_time:420185ms step_avg:42.35ms +[2025-09-11 11:28:12] [Rank 0] step:9921/10000 train_time:420185ms step_avg:42.35ms +[2025-09-11 11:28:12] [Rank 0] step:9941/10000 train_time:420907ms step_avg:42.34ms +[2025-09-11 11:28:12] [Rank 0] step:9941/10000 train_time:420907ms step_avg:42.34ms +[2025-09-11 11:28:13] [Rank 0] step:9961/10000 train_time:421630ms step_avg:42.33ms +[2025-09-11 11:28:13] [Rank 0] step:9961/10000 train_time:421630ms step_avg:42.33ms +[2025-09-11 11:28:14] [Rank 0] step:9981/10000 train_time:422349ms step_avg:42.32ms +[2025-09-11 11:28:14] [Rank 0] step:9981/10000 train_time:422349ms step_avg:42.32ms +[2025-09-11 11:28:15] [Rank 0] step:10000/10000 train_time:423042ms step_avg:42.30ms +[2025-09-11 11:28:15] [Rank 0] step:10000/10000 train_time:423042ms step_avg:42.30ms +[2025-09-11 11:28:15] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:28:15] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:28:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:28:26] [Rank 0] PRINT: step:10000/10000 val_loss:4.8038 total_sharp:2.7318e-04 L1_sharp:2.2232e-02 L2_sharp:4.3351e-02 L3_sharp:6.3577e-02 L4_sharp:9.0574e-02 L5_sharp:1.3032e-01 L6_sharp:2.6040e-01 L7_sharp:3.7080e-01 L8_sharp:5.2243e-01 L9_sharp:7.3280e-01 L10_sharp:1.0871e+00 L11_sharp:7.6526e-01 L12_sharp:1.8825e+00 total_fnorm:4.5508e-01 total_l1_linf:1.6500e+02 total_spectral:2.2754e-01 L1_fnorm:1.4191e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4267e-03 L4_fnorm:1.4496e-03 L5_fnorm:1.4725e-03 L6_fnorm:1.4877e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.4877e-03 L9_fnorm:1.4954e-03 L10_fnorm:1.4572e-03 L11_fnorm:1.4267e-03 L12_fnorm:1.3809e-03 L1_l1linf:1.2684e-04 L2_l1linf:1.3733e-04 L3_l1linf:1.3733e-04 L4_l1linf:1.3161e-04 L5_l1linf:1.5450e-04 L6_l1linf:1.6785e-04 L7_l1linf:1.6689e-04 L8_l1linf:1.6594e-04 L9_l1linf:1.6880e-04 L10_l1linf:1.8215e-04 L11_l1linf:1.6308e-04 L12_l1linf:2.0599e-04 L1_spectral:3.2893e-05 L2_spectral:3.2436e-05 L3_spectral:3.2283e-05 L4_spectral:3.2672e-05 L5_spectral:3.2913e-05 L6_spectral:3.1967e-05 L7_spectral:3.2399e-05 L8_spectral:3.2393e-05 L9_spectral:3.1927e-05 L10_spectral:3.1332e-05 L11_spectral:3.0950e-05 L12_spectral:2.7695e-05 train_time:423062ms step_avg:42.31ms +[2025-09-11 11:28:26] [Rank 0] PRINT: step:10000/10000 val_loss:4.8038 total_sharp:2.7318e-04 L1_sharp:2.2232e-02 L2_sharp:4.3351e-02 L3_sharp:6.3577e-02 L4_sharp:9.0574e-02 L5_sharp:1.3032e-01 L6_sharp:2.6040e-01 L7_sharp:3.7080e-01 L8_sharp:5.2243e-01 L9_sharp:7.3280e-01 L10_sharp:1.0871e+00 L11_sharp:7.6526e-01 L12_sharp:1.8825e+00 total_fnorm:4.5508e-01 total_l1_linf:1.6500e+02 total_spectral:2.2754e-01 L1_fnorm:1.4191e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4267e-03 L4_fnorm:1.4496e-03 L5_fnorm:1.4725e-03 L6_fnorm:1.4877e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.4877e-03 L9_fnorm:1.4954e-03 L10_fnorm:1.4572e-03 L11_fnorm:1.4267e-03 L12_fnorm:1.3809e-03 L1_l1linf:1.2684e-04 L2_l1linf:1.3733e-04 L3_l1linf:1.3733e-04 L4_l1linf:1.3161e-04 L5_l1linf:1.5450e-04 L6_l1linf:1.6785e-04 L7_l1linf:1.6689e-04 L8_l1linf:1.6594e-04 L9_l1linf:1.6880e-04 L10_l1linf:1.8215e-04 L11_l1linf:1.6308e-04 L12_l1linf:2.0599e-04 L1_spectral:3.2893e-05 L2_spectral:3.2436e-05 L3_spectral:3.2283e-05 L4_spectral:3.2672e-05 L5_spectral:3.2913e-05 L6_spectral:3.1967e-05 L7_spectral:3.2399e-05 L8_spectral:3.2393e-05 L9_spectral:3.1927e-05 L10_spectral:3.1332e-05 L11_spectral:3.0950e-05 L12_spectral:2.7695e-05 train_time:423062ms step_avg:42.31ms +[2025-09-11 11:28:26] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:28:26 2025 --- +[2025-09-11 11:28:26] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:28:26 2025 --- +[2025-09-11 11:28:26] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:28:26] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6a3898d5a2e0a9fd87fbd082841231167198bc5f --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "7063183c-b09c-4b42-8af6-35b7383a2b98", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/training_log_7063183c-b09c-4b42-8af6-35b7383a2b98.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/training_log_7063183c-b09c-4b42-8af6-35b7383a2b98.txt new file mode 100644 index 0000000000000000000000000000000000000000..a5653519679ee7ed1dde3a66ab99b209d268ca1a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42/training_log_7063183c-b09c-4b42-8af6-35b7383a2b98.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:42:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:42:15 2025 --- +[2025-09-11 11:42:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:42:15 2025 --- +[2025-09-11 11:42:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:42:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:42:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:42:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:42:15] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:42:15] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:42:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42 +[2025-09-11 11:42:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.001_seed_42 +[2025-09-11 11:42:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:42:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:42:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:42:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:42:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:42:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:42:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:42:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:42:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:42:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:42:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:42:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:42:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:42:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:42:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:42:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:42:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:42:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:42:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:42:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:42:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:42:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:42:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:42:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:43:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:43:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:43:01] [Rank 0] PRINT: Starting training... +[2025-09-11 11:43:01] [Rank 0] PRINT: Starting training... +[2025-09-11 11:43:02] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.18ms +[2025-09-11 11:43:02] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.18ms +[2025-09-11 11:43:03] [Rank 0] step:41/10000 train_time:1867ms step_avg:45.54ms +[2025-09-11 11:43:03] [Rank 0] step:41/10000 train_time:1867ms step_avg:45.54ms +[2025-09-11 11:43:03] [Rank 0] step:61/10000 train_time:2596ms step_avg:42.55ms +[2025-09-11 11:43:03] [Rank 0] step:61/10000 train_time:2596ms step_avg:42.55ms +[2025-09-11 11:43:04] [Rank 0] step:81/10000 train_time:3324ms step_avg:41.04ms +[2025-09-11 11:43:04] [Rank 0] step:81/10000 train_time:3324ms step_avg:41.04ms +[2025-09-11 11:43:05] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 11:43:05] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 11:43:06] [Rank 0] step:121/10000 train_time:4781ms step_avg:39.51ms +[2025-09-11 11:43:06] [Rank 0] step:121/10000 train_time:4781ms step_avg:39.51ms +[2025-09-11 11:43:06] [Rank 0] step:141/10000 train_time:5510ms step_avg:39.08ms +[2025-09-11 11:43:06] [Rank 0] step:141/10000 train_time:5510ms step_avg:39.08ms +[2025-09-11 11:43:07] [Rank 0] step:161/10000 train_time:6239ms step_avg:38.75ms +[2025-09-11 11:43:07] [Rank 0] step:161/10000 train_time:6239ms step_avg:38.75ms +[2025-09-11 11:43:08] [Rank 0] step:181/10000 train_time:6967ms step_avg:38.49ms +[2025-09-11 11:43:08] [Rank 0] step:181/10000 train_time:6967ms step_avg:38.49ms +[2025-09-11 11:43:08] [Rank 0] step:201/10000 train_time:7696ms step_avg:38.29ms +[2025-09-11 11:43:08] [Rank 0] step:201/10000 train_time:7696ms step_avg:38.29ms +[2025-09-11 11:43:09] [Rank 0] step:221/10000 train_time:8424ms step_avg:38.12ms +[2025-09-11 11:43:09] [Rank 0] step:221/10000 train_time:8424ms step_avg:38.12ms +[2025-09-11 11:43:10] [Rank 0] step:241/10000 train_time:9152ms step_avg:37.98ms +[2025-09-11 11:43:10] [Rank 0] step:241/10000 train_time:9152ms step_avg:37.98ms +[2025-09-11 11:43:11] [Rank 0] step:261/10000 train_time:9880ms step_avg:37.86ms +[2025-09-11 11:43:11] [Rank 0] step:261/10000 train_time:9880ms step_avg:37.86ms +[2025-09-11 11:43:11] [Rank 0] step:281/10000 train_time:10609ms step_avg:37.75ms +[2025-09-11 11:43:11] [Rank 0] step:281/10000 train_time:10609ms step_avg:37.75ms +[2025-09-11 11:43:12] [Rank 0] step:301/10000 train_time:11337ms step_avg:37.67ms +[2025-09-11 11:43:12] [Rank 0] step:301/10000 train_time:11337ms step_avg:37.67ms +[2025-09-11 11:43:13] [Rank 0] step:321/10000 train_time:12066ms step_avg:37.59ms +[2025-09-11 11:43:13] [Rank 0] step:321/10000 train_time:12066ms step_avg:37.59ms +[2025-09-11 11:43:14] [Rank 0] step:341/10000 train_time:12794ms step_avg:37.52ms +[2025-09-11 11:43:14] [Rank 0] step:341/10000 train_time:12794ms step_avg:37.52ms +[2025-09-11 11:43:14] [Rank 0] step:361/10000 train_time:13523ms step_avg:37.46ms +[2025-09-11 11:43:14] [Rank 0] step:361/10000 train_time:13523ms step_avg:37.46ms +[2025-09-11 11:43:15] [Rank 0] step:381/10000 train_time:14252ms step_avg:37.41ms +[2025-09-11 11:43:15] [Rank 0] step:381/10000 train_time:14252ms step_avg:37.41ms +[2025-09-11 11:43:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:43:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:43:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:44:03] [Rank 0] PRINT: step:400/10000 val_loss:6.4997 total_sharp:1.9910e-03 L1_sharp:2.3182e-01 L2_sharp:2.3145e-01 L3_sharp:2.1961e-01 L4_sharp:2.4079e-01 L5_sharp:2.5748e-01 L6_sharp:2.5553e-01 L7_sharp:2.5948e-01 L8_sharp:2.6501e-01 L9_sharp:3.0145e-01 L10_sharp:3.9877e-01 L11_sharp:3.6942e-01 L12_sharp:5.4444e-01 total_fnorm:2.0914e+01 total_l1_linf:5.0232e+04 total_spectral:1.0459e+01 L1_fnorm:1.1925e-01 L2_fnorm:1.1871e-01 L3_fnorm:1.1850e-01 L4_fnorm:1.1705e-01 L5_fnorm:1.1643e-01 L6_fnorm:1.1455e-01 L7_fnorm:1.1447e-01 L8_fnorm:1.1276e-01 L9_fnorm:1.1209e-01 L10_fnorm:1.0877e-01 L11_fnorm:1.0754e-01 L12_fnorm:1.0739e-01 L1_l1linf:4.5186e-02 L2_l1linf:4.5402e-02 L3_l1linf:4.4836e-02 L4_l1linf:4.4663e-02 L5_l1linf:4.4409e-02 L6_l1linf:4.4218e-02 L7_l1linf:4.3585e-02 L8_l1linf:4.3384e-02 L9_l1linf:4.2703e-02 L10_l1linf:4.2227e-02 L11_l1linf:4.1321e-02 L12_l1linf:4.0476e-02 L1_spectral:1.2048e-03 L2_spectral:1.2051e-03 L3_spectral:1.2048e-03 L4_spectral:1.2047e-03 L5_spectral:1.2048e-03 L6_spectral:1.2053e-03 L7_spectral:1.2049e-03 L8_spectral:1.2057e-03 L9_spectral:1.2050e-03 L10_spectral:1.2055e-03 L11_spectral:1.2050e-03 L12_spectral:1.2045e-03 train_time:14960ms step_avg:37.40ms +[2025-09-11 11:44:03] [Rank 0] PRINT: step:400/10000 val_loss:6.4997 total_sharp:1.9910e-03 L1_sharp:2.3182e-01 L2_sharp:2.3145e-01 L3_sharp:2.1961e-01 L4_sharp:2.4079e-01 L5_sharp:2.5748e-01 L6_sharp:2.5553e-01 L7_sharp:2.5948e-01 L8_sharp:2.6501e-01 L9_sharp:3.0145e-01 L10_sharp:3.9877e-01 L11_sharp:3.6942e-01 L12_sharp:5.4444e-01 total_fnorm:2.0914e+01 total_l1_linf:5.0232e+04 total_spectral:1.0459e+01 L1_fnorm:1.1925e-01 L2_fnorm:1.1871e-01 L3_fnorm:1.1850e-01 L4_fnorm:1.1705e-01 L5_fnorm:1.1643e-01 L6_fnorm:1.1455e-01 L7_fnorm:1.1447e-01 L8_fnorm:1.1276e-01 L9_fnorm:1.1209e-01 L10_fnorm:1.0877e-01 L11_fnorm:1.0754e-01 L12_fnorm:1.0739e-01 L1_l1linf:4.5186e-02 L2_l1linf:4.5402e-02 L3_l1linf:4.4836e-02 L4_l1linf:4.4663e-02 L5_l1linf:4.4409e-02 L6_l1linf:4.4218e-02 L7_l1linf:4.3585e-02 L8_l1linf:4.3384e-02 L9_l1linf:4.2703e-02 L10_l1linf:4.2227e-02 L11_l1linf:4.1321e-02 L12_l1linf:4.0476e-02 L1_spectral:1.2048e-03 L2_spectral:1.2051e-03 L3_spectral:1.2048e-03 L4_spectral:1.2047e-03 L5_spectral:1.2048e-03 L6_spectral:1.2053e-03 L7_spectral:1.2049e-03 L8_spectral:1.2057e-03 L9_spectral:1.2050e-03 L10_spectral:1.2055e-03 L11_spectral:1.2050e-03 L12_spectral:1.2045e-03 train_time:14960ms step_avg:37.40ms +[2025-09-11 11:44:37] [Rank 0] step:401/10000 train_time:48685ms step_avg:121.41ms +[2025-09-11 11:44:37] [Rank 0] step:401/10000 train_time:48685ms step_avg:121.41ms +[2025-09-11 11:44:39] [Rank 0] step:421/10000 train_time:50830ms step_avg:120.74ms +[2025-09-11 11:44:39] [Rank 0] step:421/10000 train_time:50830ms step_avg:120.74ms +[2025-09-11 11:44:39] [Rank 0] step:441/10000 train_time:51471ms step_avg:116.71ms +[2025-09-11 11:44:39] [Rank 0] step:441/10000 train_time:51471ms step_avg:116.71ms +[2025-09-11 11:44:40] [Rank 0] step:461/10000 train_time:52111ms step_avg:113.04ms +[2025-09-11 11:44:40] [Rank 0] step:461/10000 train_time:52111ms step_avg:113.04ms +[2025-09-11 11:44:41] [Rank 0] step:481/10000 train_time:52751ms step_avg:109.67ms +[2025-09-11 11:44:41] [Rank 0] step:481/10000 train_time:52751ms step_avg:109.67ms +[2025-09-11 11:44:41] [Rank 0] step:501/10000 train_time:53390ms step_avg:106.57ms +[2025-09-11 11:44:41] [Rank 0] step:501/10000 train_time:53390ms step_avg:106.57ms +[2025-09-11 11:44:42] [Rank 0] step:521/10000 train_time:54030ms step_avg:103.70ms +[2025-09-11 11:44:42] [Rank 0] step:521/10000 train_time:54030ms step_avg:103.70ms +[2025-09-11 11:44:43] [Rank 0] step:541/10000 train_time:54669ms step_avg:101.05ms +[2025-09-11 11:44:43] [Rank 0] step:541/10000 train_time:54669ms step_avg:101.05ms +[2025-09-11 11:44:44] [Rank 0] step:561/10000 train_time:55637ms step_avg:99.17ms +[2025-09-11 11:44:44] [Rank 0] step:561/10000 train_time:55637ms step_avg:99.17ms +[2025-09-11 11:44:44] [Rank 0] step:581/10000 train_time:56276ms step_avg:96.86ms +[2025-09-11 11:44:44] [Rank 0] step:581/10000 train_time:56276ms step_avg:96.86ms +[2025-09-11 11:44:45] [Rank 0] step:601/10000 train_time:56917ms step_avg:94.70ms +[2025-09-11 11:44:45] [Rank 0] step:601/10000 train_time:56917ms step_avg:94.70ms +[2025-09-11 11:44:45] [Rank 0] step:621/10000 train_time:57556ms step_avg:92.68ms +[2025-09-11 11:44:45] [Rank 0] step:621/10000 train_time:57556ms step_avg:92.68ms +[2025-09-11 11:44:46] [Rank 0] step:641/10000 train_time:58457ms step_avg:91.20ms +[2025-09-11 11:44:46] [Rank 0] step:641/10000 train_time:58457ms step_avg:91.20ms +[2025-09-11 11:44:47] [Rank 0] step:661/10000 train_time:59096ms step_avg:89.40ms +[2025-09-11 11:44:47] [Rank 0] step:661/10000 train_time:59096ms step_avg:89.40ms +[2025-09-11 11:44:48] [Rank 0] step:681/10000 train_time:59736ms step_avg:87.72ms +[2025-09-11 11:44:48] [Rank 0] step:681/10000 train_time:59736ms step_avg:87.72ms +[2025-09-11 11:44:48] [Rank 0] step:701/10000 train_time:60375ms step_avg:86.13ms +[2025-09-11 11:44:48] [Rank 0] step:701/10000 train_time:60375ms step_avg:86.13ms +[2025-09-11 11:44:49] [Rank 0] step:721/10000 train_time:61015ms step_avg:84.63ms +[2025-09-11 11:44:49] [Rank 0] step:721/10000 train_time:61015ms step_avg:84.63ms +[2025-09-11 11:44:50] [Rank 0] step:741/10000 train_time:61655ms step_avg:83.20ms +[2025-09-11 11:44:50] [Rank 0] step:741/10000 train_time:61655ms step_avg:83.20ms +[2025-09-11 11:44:50] [Rank 0] step:761/10000 train_time:62299ms step_avg:81.86ms +[2025-09-11 11:44:50] [Rank 0] step:761/10000 train_time:62299ms step_avg:81.86ms +[2025-09-11 11:44:51] [Rank 0] step:781/10000 train_time:62944ms step_avg:80.59ms +[2025-09-11 11:44:51] [Rank 0] step:781/10000 train_time:62944ms step_avg:80.59ms +[2025-09-11 11:44:51] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:44:51] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:45:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:45:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:45:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:45:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:45:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:45:37] [Rank 0] PRINT: step:800/10000 val_loss:6.0523 total_sharp:3.5776e-03 L1_sharp:3.3222e-01 L2_sharp:3.0891e-01 L3_sharp:3.1622e-01 L4_sharp:3.3704e-01 L5_sharp:3.9542e-01 L6_sharp:4.2501e-01 L7_sharp:4.8025e-01 L8_sharp:7.0247e-01 L9_sharp:9.0734e-01 L10_sharp:1.2167e+00 L11_sharp:1.5655e+00 L12_sharp:1.5267e+00 total_fnorm:1.8500e+01 total_l1_linf:2.9568e+04 total_spectral:9.3125e+00 L1_fnorm:1.0938e-01 L2_fnorm:1.1377e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1328e-01 L5_fnorm:1.0986e-01 L6_fnorm:1.1230e-01 L7_fnorm:1.1230e-01 L8_fnorm:1.0693e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0596e-01 L11_fnorm:1.0254e-01 L12_fnorm:9.4238e-02 L1_l1linf:4.1992e-02 L2_l1linf:4.2236e-02 L3_l1linf:4.1992e-02 L4_l1linf:4.1992e-02 L5_l1linf:4.1748e-02 L6_l1linf:4.1504e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0527e-02 L10_l1linf:3.9062e-02 L11_l1linf:3.7354e-02 L12_l1linf:3.2959e-02 L1_spectral:1.6003e-03 L2_spectral:1.6096e-03 L3_spectral:1.6027e-03 L4_spectral:1.5934e-03 L5_spectral:1.6049e-03 L6_spectral:1.6078e-03 L7_spectral:1.5907e-03 L8_spectral:1.5871e-03 L9_spectral:1.5755e-03 L10_spectral:1.5581e-03 L11_spectral:1.5469e-03 L12_spectral:1.5293e-03 train_time:63571ms step_avg:79.46ms +[2025-09-11 11:45:37] [Rank 0] PRINT: step:800/10000 val_loss:6.0523 total_sharp:3.5776e-03 L1_sharp:3.3222e-01 L2_sharp:3.0891e-01 L3_sharp:3.1622e-01 L4_sharp:3.3704e-01 L5_sharp:3.9542e-01 L6_sharp:4.2501e-01 L7_sharp:4.8025e-01 L8_sharp:7.0247e-01 L9_sharp:9.0734e-01 L10_sharp:1.2167e+00 L11_sharp:1.5655e+00 L12_sharp:1.5267e+00 total_fnorm:1.8500e+01 total_l1_linf:2.9568e+04 total_spectral:9.3125e+00 L1_fnorm:1.0938e-01 L2_fnorm:1.1377e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1328e-01 L5_fnorm:1.0986e-01 L6_fnorm:1.1230e-01 L7_fnorm:1.1230e-01 L8_fnorm:1.0693e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0596e-01 L11_fnorm:1.0254e-01 L12_fnorm:9.4238e-02 L1_l1linf:4.1992e-02 L2_l1linf:4.2236e-02 L3_l1linf:4.1992e-02 L4_l1linf:4.1992e-02 L5_l1linf:4.1748e-02 L6_l1linf:4.1504e-02 L7_l1linf:4.1260e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0527e-02 L10_l1linf:3.9062e-02 L11_l1linf:3.7354e-02 L12_l1linf:3.2959e-02 L1_spectral:1.6003e-03 L2_spectral:1.6096e-03 L3_spectral:1.6027e-03 L4_spectral:1.5934e-03 L5_spectral:1.6049e-03 L6_spectral:1.6078e-03 L7_spectral:1.5907e-03 L8_spectral:1.5871e-03 L9_spectral:1.5755e-03 L10_spectral:1.5581e-03 L11_spectral:1.5469e-03 L12_spectral:1.5293e-03 train_time:63571ms step_avg:79.46ms +[2025-09-11 11:45:38] [Rank 0] step:801/10000 train_time:65385ms step_avg:81.63ms +[2025-09-11 11:45:38] [Rank 0] step:801/10000 train_time:65385ms step_avg:81.63ms +[2025-09-11 11:45:39] [Rank 0] step:821/10000 train_time:66034ms step_avg:80.43ms +[2025-09-11 11:45:39] [Rank 0] step:821/10000 train_time:66034ms step_avg:80.43ms +[2025-09-11 11:45:40] [Rank 0] step:841/10000 train_time:66680ms step_avg:79.29ms +[2025-09-11 11:45:40] [Rank 0] step:841/10000 train_time:66680ms step_avg:79.29ms +[2025-09-11 11:45:40] [Rank 0] step:861/10000 train_time:67325ms step_avg:78.19ms +[2025-09-11 11:45:40] [Rank 0] step:861/10000 train_time:67325ms step_avg:78.19ms +[2025-09-11 11:45:41] [Rank 0] step:881/10000 train_time:67970ms step_avg:77.15ms +[2025-09-11 11:45:41] [Rank 0] step:881/10000 train_time:67970ms step_avg:77.15ms +[2025-09-11 11:45:42] [Rank 0] step:901/10000 train_time:68614ms step_avg:76.15ms +[2025-09-11 11:45:42] [Rank 0] step:901/10000 train_time:68614ms step_avg:76.15ms +[2025-09-11 11:45:42] [Rank 0] step:921/10000 train_time:69258ms step_avg:75.20ms +[2025-09-11 11:45:42] [Rank 0] step:921/10000 train_time:69258ms step_avg:75.20ms +[2025-09-11 11:45:43] [Rank 0] step:941/10000 train_time:69903ms step_avg:74.29ms +[2025-09-11 11:45:43] [Rank 0] step:941/10000 train_time:69903ms step_avg:74.29ms +[2025-09-11 11:45:44] [Rank 0] step:961/10000 train_time:70547ms step_avg:73.41ms +[2025-09-11 11:45:44] [Rank 0] step:961/10000 train_time:70547ms step_avg:73.41ms +[2025-09-11 11:45:44] [Rank 0] step:981/10000 train_time:71191ms step_avg:72.57ms +[2025-09-11 11:45:44] [Rank 0] step:981/10000 train_time:71191ms step_avg:72.57ms +[2025-09-11 11:45:45] [Rank 0] step:1001/10000 train_time:71835ms step_avg:71.76ms +[2025-09-11 11:45:45] [Rank 0] step:1001/10000 train_time:71835ms step_avg:71.76ms +[2025-09-11 11:45:46] [Rank 0] step:1021/10000 train_time:72480ms step_avg:70.99ms +[2025-09-11 11:45:46] [Rank 0] step:1021/10000 train_time:72480ms step_avg:70.99ms +[2025-09-11 11:45:46] [Rank 0] step:1041/10000 train_time:73125ms step_avg:70.24ms +[2025-09-11 11:45:46] [Rank 0] step:1041/10000 train_time:73125ms step_avg:70.24ms +[2025-09-11 11:45:47] [Rank 0] step:1061/10000 train_time:74045ms step_avg:69.79ms +[2025-09-11 11:45:47] [Rank 0] step:1061/10000 train_time:74045ms step_avg:69.79ms +[2025-09-11 11:45:48] [Rank 0] step:1081/10000 train_time:74689ms step_avg:69.09ms +[2025-09-11 11:45:48] [Rank 0] step:1081/10000 train_time:74689ms step_avg:69.09ms +[2025-09-11 11:45:48] [Rank 0] step:1101/10000 train_time:75333ms step_avg:68.42ms +[2025-09-11 11:45:48] [Rank 0] step:1101/10000 train_time:75333ms step_avg:68.42ms +[2025-09-11 11:45:49] [Rank 0] step:1121/10000 train_time:76284ms step_avg:68.05ms +[2025-09-11 11:45:49] [Rank 0] step:1121/10000 train_time:76284ms step_avg:68.05ms +[2025-09-11 11:45:50] [Rank 0] step:1141/10000 train_time:76928ms step_avg:67.42ms +[2025-09-11 11:45:50] [Rank 0] step:1141/10000 train_time:76928ms step_avg:67.42ms +[2025-09-11 11:45:51] [Rank 0] step:1161/10000 train_time:77572ms step_avg:66.81ms +[2025-09-11 11:45:51] [Rank 0] step:1161/10000 train_time:77572ms step_avg:66.81ms +[2025-09-11 11:45:51] [Rank 0] step:1181/10000 train_time:78216ms step_avg:66.23ms +[2025-09-11 11:45:51] [Rank 0] step:1181/10000 train_time:78216ms step_avg:66.23ms +[2025-09-11 11:45:52] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:45:52] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:45:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:46:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:46:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:02] [Rank 0] PRINT: step:1200/10000 val_loss:5.7486 total_sharp:2.7167e-03 L1_sharp:3.0829e-01 L2_sharp:2.9416e-01 L3_sharp:2.9742e-01 L4_sharp:3.4933e-01 L5_sharp:4.2129e-01 L6_sharp:4.2460e-01 L7_sharp:4.3714e-01 L8_sharp:5.8095e-01 L9_sharp:4.1111e-01 L10_sharp:4.1870e-01 L11_sharp:4.8631e-01 L12_sharp:6.7058e-01 total_fnorm:1.8500e+01 total_l1_linf:2.7776e+04 total_spectral:9.2500e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0986e-01 L1_l1linf:4.0283e-02 L2_l1linf:4.0527e-02 L3_l1linf:4.0283e-02 L4_l1linf:3.9795e-02 L5_l1linf:3.9795e-02 L6_l1linf:3.9307e-02 L7_l1linf:3.9307e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0283e-02 L10_l1linf:4.0039e-02 L11_l1linf:4.0283e-02 L12_l1linf:3.7842e-02 L1_spectral:1.5981e-03 L2_spectral:1.6220e-03 L3_spectral:1.6185e-03 L4_spectral:1.6095e-03 L5_spectral:1.6029e-03 L6_spectral:1.6055e-03 L7_spectral:1.6128e-03 L8_spectral:1.5999e-03 L9_spectral:1.5972e-03 L10_spectral:1.5650e-03 L11_spectral:1.5806e-03 L12_spectral:1.5809e-03 train_time:78843ms step_avg:65.70ms +[2025-09-11 11:46:02] [Rank 0] PRINT: step:1200/10000 val_loss:5.7486 total_sharp:2.7167e-03 L1_sharp:3.0829e-01 L2_sharp:2.9416e-01 L3_sharp:2.9742e-01 L4_sharp:3.4933e-01 L5_sharp:4.2129e-01 L6_sharp:4.2460e-01 L7_sharp:4.3714e-01 L8_sharp:5.8095e-01 L9_sharp:4.1111e-01 L10_sharp:4.1870e-01 L11_sharp:4.8631e-01 L12_sharp:6.7058e-01 total_fnorm:1.8500e+01 total_l1_linf:2.7776e+04 total_spectral:9.2500e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.0986e-01 L1_l1linf:4.0283e-02 L2_l1linf:4.0527e-02 L3_l1linf:4.0283e-02 L4_l1linf:3.9795e-02 L5_l1linf:3.9795e-02 L6_l1linf:3.9307e-02 L7_l1linf:3.9307e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0283e-02 L10_l1linf:4.0039e-02 L11_l1linf:4.0283e-02 L12_l1linf:3.7842e-02 L1_spectral:1.5981e-03 L2_spectral:1.6220e-03 L3_spectral:1.6185e-03 L4_spectral:1.6095e-03 L5_spectral:1.6029e-03 L6_spectral:1.6055e-03 L7_spectral:1.6128e-03 L8_spectral:1.5999e-03 L9_spectral:1.5972e-03 L10_spectral:1.5650e-03 L11_spectral:1.5806e-03 L12_spectral:1.5809e-03 train_time:78843ms step_avg:65.70ms +[2025-09-11 11:46:04] [Rank 0] step:1201/10000 train_time:80675ms step_avg:67.17ms +[2025-09-11 11:46:04] [Rank 0] step:1201/10000 train_time:80675ms step_avg:67.17ms +[2025-09-11 11:46:05] [Rank 0] step:1221/10000 train_time:81339ms step_avg:66.62ms +[2025-09-11 11:46:05] [Rank 0] step:1221/10000 train_time:81339ms step_avg:66.62ms +[2025-09-11 11:46:06] [Rank 0] step:1241/10000 train_time:81985ms step_avg:66.06ms +[2025-09-11 11:46:06] [Rank 0] step:1241/10000 train_time:81985ms step_avg:66.06ms +[2025-09-11 11:46:06] [Rank 0] step:1261/10000 train_time:82630ms step_avg:65.53ms +[2025-09-11 11:46:06] [Rank 0] step:1261/10000 train_time:82630ms step_avg:65.53ms +[2025-09-11 11:46:07] [Rank 0] step:1281/10000 train_time:83274ms step_avg:65.01ms +[2025-09-11 11:46:07] [Rank 0] step:1281/10000 train_time:83274ms step_avg:65.01ms +[2025-09-11 11:46:08] [Rank 0] step:1301/10000 train_time:83919ms step_avg:64.50ms +[2025-09-11 11:46:08] [Rank 0] step:1301/10000 train_time:83919ms step_avg:64.50ms +[2025-09-11 11:46:08] [Rank 0] step:1321/10000 train_time:84563ms step_avg:64.01ms +[2025-09-11 11:46:08] [Rank 0] step:1321/10000 train_time:84563ms step_avg:64.01ms +[2025-09-11 11:46:09] [Rank 0] step:1341/10000 train_time:85207ms step_avg:63.54ms +[2025-09-11 11:46:09] [Rank 0] step:1341/10000 train_time:85207ms step_avg:63.54ms +[2025-09-11 11:46:09] [Rank 0] step:1361/10000 train_time:85852ms step_avg:63.08ms +[2025-09-11 11:46:09] [Rank 0] step:1361/10000 train_time:85852ms step_avg:63.08ms +[2025-09-11 11:46:10] [Rank 0] step:1381/10000 train_time:86495ms step_avg:62.63ms +[2025-09-11 11:46:10] [Rank 0] step:1381/10000 train_time:86495ms step_avg:62.63ms +[2025-09-11 11:46:11] [Rank 0] step:1401/10000 train_time:87139ms step_avg:62.20ms +[2025-09-11 11:46:11] [Rank 0] step:1401/10000 train_time:87139ms step_avg:62.20ms +[2025-09-11 11:46:11] [Rank 0] step:1421/10000 train_time:87784ms step_avg:61.78ms +[2025-09-11 11:46:11] [Rank 0] step:1421/10000 train_time:87784ms step_avg:61.78ms +[2025-09-11 11:46:12] [Rank 0] step:1441/10000 train_time:88428ms step_avg:61.37ms +[2025-09-11 11:46:12] [Rank 0] step:1441/10000 train_time:88428ms step_avg:61.37ms +[2025-09-11 11:46:13] [Rank 0] step:1461/10000 train_time:89072ms step_avg:60.97ms +[2025-09-11 11:46:13] [Rank 0] step:1461/10000 train_time:89072ms step_avg:60.97ms +[2025-09-11 11:46:13] [Rank 0] step:1481/10000 train_time:89716ms step_avg:60.58ms +[2025-09-11 11:46:13] [Rank 0] step:1481/10000 train_time:89716ms step_avg:60.58ms +[2025-09-11 11:46:14] [Rank 0] step:1501/10000 train_time:90364ms step_avg:60.20ms +[2025-09-11 11:46:14] [Rank 0] step:1501/10000 train_time:90364ms step_avg:60.20ms +[2025-09-11 11:46:15] [Rank 0] step:1521/10000 train_time:91011ms step_avg:59.84ms +[2025-09-11 11:46:15] [Rank 0] step:1521/10000 train_time:91011ms step_avg:59.84ms +[2025-09-11 11:46:15] [Rank 0] step:1541/10000 train_time:91660ms step_avg:59.48ms +[2025-09-11 11:46:15] [Rank 0] step:1541/10000 train_time:91660ms step_avg:59.48ms +[2025-09-11 11:46:16] [Rank 0] step:1561/10000 train_time:92307ms step_avg:59.13ms +[2025-09-11 11:46:16] [Rank 0] step:1561/10000 train_time:92307ms step_avg:59.13ms +[2025-09-11 11:46:17] [Rank 0] step:1581/10000 train_time:92956ms step_avg:58.80ms +[2025-09-11 11:46:17] [Rank 0] step:1581/10000 train_time:92956ms step_avg:58.80ms +[2025-09-11 11:46:17] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:46:17] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:46:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:46:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:46:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:46:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:46:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:28] [Rank 0] PRINT: step:1600/10000 val_loss:5.5598 total_sharp:2.2385e-03 L1_sharp:1.9298e-01 L2_sharp:1.7891e-01 L3_sharp:1.9741e-01 L4_sharp:2.3862e-01 L5_sharp:3.1137e-01 L6_sharp:3.0637e-01 L7_sharp:3.1238e-01 L8_sharp:3.3288e-01 L9_sharp:3.0924e-01 L10_sharp:4.1770e-01 L11_sharp:4.7572e-01 L12_sharp:1.0146e+00 total_fnorm:1.7375e+01 total_l1_linf:2.4704e+04 total_spectral:8.6875e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1182e-01 L1_l1linf:3.8086e-02 L2_l1linf:3.7354e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7109e-02 L5_l1linf:3.6865e-02 L6_l1linf:3.7109e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.6621e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7842e-02 L11_l1linf:3.8574e-02 L12_l1linf:3.6133e-02 L1_spectral:1.6044e-03 L2_spectral:1.6150e-03 L3_spectral:1.6201e-03 L4_spectral:1.6169e-03 L5_spectral:1.6070e-03 L6_spectral:1.6129e-03 L7_spectral:1.6156e-03 L8_spectral:1.6100e-03 L9_spectral:1.6175e-03 L10_spectral:1.5963e-03 L11_spectral:1.6107e-03 L12_spectral:1.5859e-03 train_time:93586ms step_avg:58.49ms +[2025-09-11 11:46:28] [Rank 0] PRINT: step:1600/10000 val_loss:5.5598 total_sharp:2.2385e-03 L1_sharp:1.9298e-01 L2_sharp:1.7891e-01 L3_sharp:1.9741e-01 L4_sharp:2.3862e-01 L5_sharp:3.1137e-01 L6_sharp:3.0637e-01 L7_sharp:3.1238e-01 L8_sharp:3.3288e-01 L9_sharp:3.0924e-01 L10_sharp:4.1770e-01 L11_sharp:4.7572e-01 L12_sharp:1.0146e+00 total_fnorm:1.7375e+01 total_l1_linf:2.4704e+04 total_spectral:8.6875e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1182e-01 L1_l1linf:3.8086e-02 L2_l1linf:3.7354e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7109e-02 L5_l1linf:3.6865e-02 L6_l1linf:3.7109e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.6621e-02 L9_l1linf:3.6865e-02 L10_l1linf:3.7842e-02 L11_l1linf:3.8574e-02 L12_l1linf:3.6133e-02 L1_spectral:1.6044e-03 L2_spectral:1.6150e-03 L3_spectral:1.6201e-03 L4_spectral:1.6169e-03 L5_spectral:1.6070e-03 L6_spectral:1.6129e-03 L7_spectral:1.6156e-03 L8_spectral:1.6100e-03 L9_spectral:1.6175e-03 L10_spectral:1.5963e-03 L11_spectral:1.6107e-03 L12_spectral:1.5859e-03 train_time:93586ms step_avg:58.49ms +[2025-09-11 11:46:30] [Rank 0] step:1601/10000 train_time:95310ms step_avg:59.53ms +[2025-09-11 11:46:30] [Rank 0] step:1601/10000 train_time:95310ms step_avg:59.53ms +[2025-09-11 11:46:30] [Rank 0] step:1621/10000 train_time:95963ms step_avg:59.20ms +[2025-09-11 11:46:30] [Rank 0] step:1621/10000 train_time:95963ms step_avg:59.20ms +[2025-09-11 11:46:31] [Rank 0] step:1641/10000 train_time:96612ms step_avg:58.87ms +[2025-09-11 11:46:31] [Rank 0] step:1641/10000 train_time:96612ms step_avg:58.87ms +[2025-09-11 11:46:31] [Rank 0] step:1661/10000 train_time:97261ms step_avg:58.56ms +[2025-09-11 11:46:31] [Rank 0] step:1661/10000 train_time:97261ms step_avg:58.56ms +[2025-09-11 11:46:32] [Rank 0] step:1681/10000 train_time:97910ms step_avg:58.24ms +[2025-09-11 11:46:32] [Rank 0] step:1681/10000 train_time:97910ms step_avg:58.24ms +[2025-09-11 11:46:33] [Rank 0] step:1701/10000 train_time:98559ms step_avg:57.94ms +[2025-09-11 11:46:33] [Rank 0] step:1701/10000 train_time:98559ms step_avg:57.94ms +[2025-09-11 11:46:33] [Rank 0] step:1721/10000 train_time:99207ms step_avg:57.65ms +[2025-09-11 11:46:33] [Rank 0] step:1721/10000 train_time:99207ms step_avg:57.65ms +[2025-09-11 11:46:34] [Rank 0] step:1741/10000 train_time:99856ms step_avg:57.36ms +[2025-09-11 11:46:34] [Rank 0] step:1741/10000 train_time:99856ms step_avg:57.36ms +[2025-09-11 11:46:35] [Rank 0] step:1761/10000 train_time:100504ms step_avg:57.07ms +[2025-09-11 11:46:35] [Rank 0] step:1761/10000 train_time:100504ms step_avg:57.07ms +[2025-09-11 11:46:35] [Rank 0] step:1781/10000 train_time:101152ms step_avg:56.80ms +[2025-09-11 11:46:35] [Rank 0] step:1781/10000 train_time:101152ms step_avg:56.80ms +[2025-09-11 11:46:36] [Rank 0] step:1801/10000 train_time:101801ms step_avg:56.52ms +[2025-09-11 11:46:36] [Rank 0] step:1801/10000 train_time:101801ms step_avg:56.52ms +[2025-09-11 11:46:37] [Rank 0] step:1821/10000 train_time:102450ms step_avg:56.26ms +[2025-09-11 11:46:37] [Rank 0] step:1821/10000 train_time:102450ms step_avg:56.26ms +[2025-09-11 11:46:37] [Rank 0] step:1841/10000 train_time:103099ms step_avg:56.00ms +[2025-09-11 11:46:37] [Rank 0] step:1841/10000 train_time:103099ms step_avg:56.00ms +[2025-09-11 11:46:38] [Rank 0] step:1861/10000 train_time:103747ms step_avg:55.75ms +[2025-09-11 11:46:38] [Rank 0] step:1861/10000 train_time:103747ms step_avg:55.75ms +[2025-09-11 11:46:39] [Rank 0] step:1881/10000 train_time:104396ms step_avg:55.50ms +[2025-09-11 11:46:39] [Rank 0] step:1881/10000 train_time:104396ms step_avg:55.50ms +[2025-09-11 11:46:39] [Rank 0] step:1901/10000 train_time:105044ms step_avg:55.26ms +[2025-09-11 11:46:39] [Rank 0] step:1901/10000 train_time:105044ms step_avg:55.26ms +[2025-09-11 11:46:40] [Rank 0] step:1921/10000 train_time:105693ms step_avg:55.02ms +[2025-09-11 11:46:40] [Rank 0] step:1921/10000 train_time:105693ms step_avg:55.02ms +[2025-09-11 11:46:41] [Rank 0] step:1941/10000 train_time:106341ms step_avg:54.79ms +[2025-09-11 11:46:41] [Rank 0] step:1941/10000 train_time:106341ms step_avg:54.79ms +[2025-09-11 11:46:41] [Rank 0] step:1961/10000 train_time:106989ms step_avg:54.56ms +[2025-09-11 11:46:41] [Rank 0] step:1961/10000 train_time:106989ms step_avg:54.56ms +[2025-09-11 11:46:42] [Rank 0] step:1981/10000 train_time:107639ms step_avg:54.34ms +[2025-09-11 11:46:42] [Rank 0] step:1981/10000 train_time:107639ms step_avg:54.34ms +[2025-09-11 11:46:42] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:46:42] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:46:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:46:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:46:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:46:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:46:53] [Rank 0] PRINT: step:2000/10000 val_loss:5.4075 total_sharp:2.2824e-03 L1_sharp:9.3965e-02 L2_sharp:9.4995e-02 L3_sharp:1.0091e-01 L4_sharp:1.3091e-01 L5_sharp:1.9064e-01 L6_sharp:2.1093e-01 L7_sharp:2.3762e-01 L8_sharp:3.2572e-01 L9_sharp:3.3974e-01 L10_sharp:5.2752e-01 L11_sharp:8.3707e-01 L12_sharp:3.0996e+00 total_fnorm:1.6875e+01 total_l1_linf:2.4320e+04 total_spectral:8.4375e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1719e-01 L12_fnorm:1.1279e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.4912e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5156e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.5400e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.7598e-02 L12_l1linf:3.3691e-02 L1_spectral:1.5996e-03 L2_spectral:1.6082e-03 L3_spectral:1.6168e-03 L4_spectral:1.6022e-03 L5_spectral:1.5951e-03 L6_spectral:1.6138e-03 L7_spectral:1.6043e-03 L8_spectral:1.6087e-03 L9_spectral:1.6111e-03 L10_spectral:1.5953e-03 L11_spectral:1.6178e-03 L12_spectral:1.6060e-03 train_time:108269ms step_avg:54.13ms +[2025-09-11 11:46:53] [Rank 0] PRINT: step:2000/10000 val_loss:5.4075 total_sharp:2.2824e-03 L1_sharp:9.3965e-02 L2_sharp:9.4995e-02 L3_sharp:1.0091e-01 L4_sharp:1.3091e-01 L5_sharp:1.9064e-01 L6_sharp:2.1093e-01 L7_sharp:2.3762e-01 L8_sharp:3.2572e-01 L9_sharp:3.3974e-01 L10_sharp:5.2752e-01 L11_sharp:8.3707e-01 L12_sharp:3.0996e+00 total_fnorm:1.6875e+01 total_l1_linf:2.4320e+04 total_spectral:8.4375e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1719e-01 L12_fnorm:1.1279e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.4912e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5156e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.5645e-02 L9_l1linf:3.5400e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.7598e-02 L12_l1linf:3.3691e-02 L1_spectral:1.5996e-03 L2_spectral:1.6082e-03 L3_spectral:1.6168e-03 L4_spectral:1.6022e-03 L5_spectral:1.5951e-03 L6_spectral:1.6138e-03 L7_spectral:1.6043e-03 L8_spectral:1.6087e-03 L9_spectral:1.6111e-03 L10_spectral:1.5953e-03 L11_spectral:1.6178e-03 L12_spectral:1.6060e-03 train_time:108269ms step_avg:54.13ms +[2025-09-11 11:46:55] [Rank 0] step:2001/10000 train_time:110125ms step_avg:55.04ms +[2025-09-11 11:46:55] [Rank 0] step:2001/10000 train_time:110125ms step_avg:55.04ms +[2025-09-11 11:46:56] [Rank 0] step:2021/10000 train_time:110792ms step_avg:54.82ms +[2025-09-11 11:46:56] [Rank 0] step:2021/10000 train_time:110792ms step_avg:54.82ms +[2025-09-11 11:46:56] [Rank 0] step:2041/10000 train_time:111442ms step_avg:54.60ms +[2025-09-11 11:46:56] [Rank 0] step:2041/10000 train_time:111442ms step_avg:54.60ms +[2025-09-11 11:46:57] [Rank 0] step:2061/10000 train_time:112090ms step_avg:54.39ms +[2025-09-11 11:46:57] [Rank 0] step:2061/10000 train_time:112090ms step_avg:54.39ms +[2025-09-11 11:46:58] [Rank 0] step:2081/10000 train_time:112738ms step_avg:54.17ms +[2025-09-11 11:46:58] [Rank 0] step:2081/10000 train_time:112738ms step_avg:54.17ms +[2025-09-11 11:46:58] [Rank 0] step:2101/10000 train_time:113386ms step_avg:53.97ms +[2025-09-11 11:46:58] [Rank 0] step:2101/10000 train_time:113386ms step_avg:53.97ms +[2025-09-11 11:46:59] [Rank 0] step:2121/10000 train_time:114033ms step_avg:53.76ms +[2025-09-11 11:46:59] [Rank 0] step:2121/10000 train_time:114033ms step_avg:53.76ms +[2025-09-11 11:47:00] [Rank 0] step:2141/10000 train_time:114681ms step_avg:53.56ms +[2025-09-11 11:47:00] [Rank 0] step:2141/10000 train_time:114681ms step_avg:53.56ms +[2025-09-11 11:47:00] [Rank 0] step:2161/10000 train_time:115329ms step_avg:53.37ms +[2025-09-11 11:47:00] [Rank 0] step:2161/10000 train_time:115329ms step_avg:53.37ms +[2025-09-11 11:47:01] [Rank 0] step:2181/10000 train_time:115976ms step_avg:53.18ms +[2025-09-11 11:47:01] [Rank 0] step:2181/10000 train_time:115976ms step_avg:53.18ms +[2025-09-11 11:47:01] [Rank 0] step:2201/10000 train_time:116624ms step_avg:52.99ms +[2025-09-11 11:47:01] [Rank 0] step:2201/10000 train_time:116624ms step_avg:52.99ms +[2025-09-11 11:47:02] [Rank 0] step:2221/10000 train_time:117271ms step_avg:52.80ms +[2025-09-11 11:47:02] [Rank 0] step:2221/10000 train_time:117271ms step_avg:52.80ms +[2025-09-11 11:47:03] [Rank 0] step:2241/10000 train_time:117931ms step_avg:52.62ms +[2025-09-11 11:47:03] [Rank 0] step:2241/10000 train_time:117931ms step_avg:52.62ms +[2025-09-11 11:47:03] [Rank 0] step:2261/10000 train_time:118592ms step_avg:52.45ms +[2025-09-11 11:47:03] [Rank 0] step:2261/10000 train_time:118592ms step_avg:52.45ms +[2025-09-11 11:47:04] [Rank 0] step:2281/10000 train_time:119253ms step_avg:52.28ms +[2025-09-11 11:47:04] [Rank 0] step:2281/10000 train_time:119253ms step_avg:52.28ms +[2025-09-11 11:47:05] [Rank 0] step:2301/10000 train_time:119914ms step_avg:52.11ms +[2025-09-11 11:47:05] [Rank 0] step:2301/10000 train_time:119914ms step_avg:52.11ms +[2025-09-11 11:47:05] [Rank 0] step:2321/10000 train_time:120575ms step_avg:51.95ms +[2025-09-11 11:47:05] [Rank 0] step:2321/10000 train_time:120575ms step_avg:51.95ms +[2025-09-11 11:47:06] [Rank 0] step:2341/10000 train_time:121236ms step_avg:51.79ms +[2025-09-11 11:47:06] [Rank 0] step:2341/10000 train_time:121236ms step_avg:51.79ms +[2025-09-11 11:47:07] [Rank 0] step:2361/10000 train_time:121898ms step_avg:51.63ms +[2025-09-11 11:47:07] [Rank 0] step:2361/10000 train_time:121898ms step_avg:51.63ms +[2025-09-11 11:47:07] [Rank 0] step:2381/10000 train_time:122558ms step_avg:51.47ms +[2025-09-11 11:47:07] [Rank 0] step:2381/10000 train_time:122558ms step_avg:51.47ms +[2025-09-11 11:47:08] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:47:08] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:47:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:47:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:47:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:47:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:19] [Rank 0] PRINT: step:2400/10000 val_loss:5.2753 total_sharp:2.5938e-03 L1_sharp:8.4150e-02 L2_sharp:1.0020e-01 L3_sharp:1.1521e-01 L4_sharp:1.7400e-01 L5_sharp:2.0760e-01 L6_sharp:2.4571e-01 L7_sharp:3.1007e-01 L8_sharp:3.2129e-01 L9_sharp:3.7800e-01 L10_sharp:4.6369e-01 L11_sharp:7.0839e-01 L12_sharp:2.4607e+00 total_fnorm:1.5562e+01 total_l1_linf:2.1120e+04 total_spectral:7.7812e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.4424e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3936e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.4180e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.4424e-02 L10_l1linf:3.4668e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.3691e-02 L1_spectral:1.6211e-03 L2_spectral:1.6143e-03 L3_spectral:1.6097e-03 L4_spectral:1.6058e-03 L5_spectral:1.6118e-03 L6_spectral:1.6222e-03 L7_spectral:1.6063e-03 L8_spectral:1.5980e-03 L9_spectral:1.6133e-03 L10_spectral:1.6019e-03 L11_spectral:1.6092e-03 L12_spectral:1.6098e-03 train_time:123200ms step_avg:51.33ms +[2025-09-11 11:47:19] [Rank 0] PRINT: step:2400/10000 val_loss:5.2753 total_sharp:2.5938e-03 L1_sharp:8.4150e-02 L2_sharp:1.0020e-01 L3_sharp:1.1521e-01 L4_sharp:1.7400e-01 L5_sharp:2.0760e-01 L6_sharp:2.4571e-01 L7_sharp:3.1007e-01 L8_sharp:3.2129e-01 L9_sharp:3.7800e-01 L10_sharp:4.6369e-01 L11_sharp:7.0839e-01 L12_sharp:2.4607e+00 total_fnorm:1.5562e+01 total_l1_linf:2.1120e+04 total_spectral:7.7812e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.4424e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3936e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.4180e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.4424e-02 L10_l1linf:3.4668e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.3691e-02 L1_spectral:1.6211e-03 L2_spectral:1.6143e-03 L3_spectral:1.6097e-03 L4_spectral:1.6058e-03 L5_spectral:1.6118e-03 L6_spectral:1.6222e-03 L7_spectral:1.6063e-03 L8_spectral:1.5980e-03 L9_spectral:1.6133e-03 L10_spectral:1.6019e-03 L11_spectral:1.6092e-03 L12_spectral:1.6098e-03 train_time:123200ms step_avg:51.33ms +[2025-09-11 11:47:21] [Rank 0] step:2401/10000 train_time:125081ms step_avg:52.10ms +[2025-09-11 11:47:21] [Rank 0] step:2401/10000 train_time:125081ms step_avg:52.10ms +[2025-09-11 11:47:21] [Rank 0] step:2421/10000 train_time:125761ms step_avg:51.95ms +[2025-09-11 11:47:21] [Rank 0] step:2421/10000 train_time:125761ms step_avg:51.95ms +[2025-09-11 11:47:22] [Rank 0] step:2441/10000 train_time:126424ms step_avg:51.79ms +[2025-09-11 11:47:22] [Rank 0] step:2441/10000 train_time:126424ms step_avg:51.79ms +[2025-09-11 11:47:23] [Rank 0] step:2461/10000 train_time:127088ms step_avg:51.64ms +[2025-09-11 11:47:23] [Rank 0] step:2461/10000 train_time:127088ms step_avg:51.64ms +[2025-09-11 11:47:23] [Rank 0] step:2481/10000 train_time:127751ms step_avg:51.49ms +[2025-09-11 11:47:23] [Rank 0] step:2481/10000 train_time:127751ms step_avg:51.49ms +[2025-09-11 11:47:24] [Rank 0] step:2501/10000 train_time:128413ms step_avg:51.34ms +[2025-09-11 11:47:24] [Rank 0] step:2501/10000 train_time:128413ms step_avg:51.34ms +[2025-09-11 11:47:25] [Rank 0] step:2521/10000 train_time:129075ms step_avg:51.20ms +[2025-09-11 11:47:25] [Rank 0] step:2521/10000 train_time:129075ms step_avg:51.20ms +[2025-09-11 11:47:25] [Rank 0] step:2541/10000 train_time:129737ms step_avg:51.06ms +[2025-09-11 11:47:25] [Rank 0] step:2541/10000 train_time:129737ms step_avg:51.06ms +[2025-09-11 11:47:26] [Rank 0] step:2561/10000 train_time:130400ms step_avg:50.92ms +[2025-09-11 11:47:26] [Rank 0] step:2561/10000 train_time:130400ms step_avg:50.92ms +[2025-09-11 11:47:27] [Rank 0] step:2581/10000 train_time:131062ms step_avg:50.78ms +[2025-09-11 11:47:27] [Rank 0] step:2581/10000 train_time:131062ms step_avg:50.78ms +[2025-09-11 11:47:27] [Rank 0] step:2601/10000 train_time:131724ms step_avg:50.64ms +[2025-09-11 11:47:27] [Rank 0] step:2601/10000 train_time:131724ms step_avg:50.64ms +[2025-09-11 11:47:28] [Rank 0] step:2621/10000 train_time:132386ms step_avg:50.51ms +[2025-09-11 11:47:28] [Rank 0] step:2621/10000 train_time:132386ms step_avg:50.51ms +[2025-09-11 11:47:29] [Rank 0] step:2641/10000 train_time:133048ms step_avg:50.38ms +[2025-09-11 11:47:29] [Rank 0] step:2641/10000 train_time:133048ms step_avg:50.38ms +[2025-09-11 11:47:29] [Rank 0] step:2661/10000 train_time:133711ms step_avg:50.25ms +[2025-09-11 11:47:29] [Rank 0] step:2661/10000 train_time:133711ms step_avg:50.25ms +[2025-09-11 11:47:30] [Rank 0] step:2681/10000 train_time:134373ms step_avg:50.12ms +[2025-09-11 11:47:30] [Rank 0] step:2681/10000 train_time:134373ms step_avg:50.12ms +[2025-09-11 11:47:31] [Rank 0] step:2701/10000 train_time:135035ms step_avg:49.99ms +[2025-09-11 11:47:31] [Rank 0] step:2701/10000 train_time:135035ms step_avg:49.99ms +[2025-09-11 11:47:31] [Rank 0] step:2721/10000 train_time:135697ms step_avg:49.87ms +[2025-09-11 11:47:31] [Rank 0] step:2721/10000 train_time:135697ms step_avg:49.87ms +[2025-09-11 11:47:32] [Rank 0] step:2741/10000 train_time:136359ms step_avg:49.75ms +[2025-09-11 11:47:32] [Rank 0] step:2741/10000 train_time:136359ms step_avg:49.75ms +[2025-09-11 11:47:33] [Rank 0] step:2761/10000 train_time:137021ms step_avg:49.63ms +[2025-09-11 11:47:33] [Rank 0] step:2761/10000 train_time:137021ms step_avg:49.63ms +[2025-09-11 11:47:33] [Rank 0] step:2781/10000 train_time:137683ms step_avg:49.51ms +[2025-09-11 11:47:33] [Rank 0] step:2781/10000 train_time:137683ms step_avg:49.51ms +[2025-09-11 11:47:34] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:47:34] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:47:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:47:45] [Rank 0] PRINT: step:2800/10000 val_loss:5.1664 total_sharp:2.6605e-03 L1_sharp:7.7158e-02 L2_sharp:8.9497e-02 L3_sharp:1.0709e-01 L4_sharp:1.4034e-01 L5_sharp:1.8486e-01 L6_sharp:2.3165e-01 L7_sharp:2.8625e-01 L8_sharp:3.2472e-01 L9_sharp:3.3489e-01 L10_sharp:4.1880e-01 L11_sharp:5.0301e-01 L12_sharp:1.4613e+00 total_fnorm:1.4438e+01 total_l1_linf:1.9072e+04 total_spectral:7.2188e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.4180e-02 L5_l1linf:3.3203e-02 L6_l1linf:3.3447e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.3203e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4180e-02 L12_l1linf:3.2959e-02 L1_spectral:1.5967e-03 L2_spectral:1.6068e-03 L3_spectral:1.6192e-03 L4_spectral:1.6092e-03 L5_spectral:1.6041e-03 L6_spectral:1.6045e-03 L7_spectral:1.6045e-03 L8_spectral:1.6115e-03 L9_spectral:1.6042e-03 L10_spectral:1.6009e-03 L11_spectral:1.6100e-03 L12_spectral:1.6030e-03 train_time:138326ms step_avg:49.40ms +[2025-09-11 11:47:45] [Rank 0] PRINT: step:2800/10000 val_loss:5.1664 total_sharp:2.6605e-03 L1_sharp:7.7158e-02 L2_sharp:8.9497e-02 L3_sharp:1.0709e-01 L4_sharp:1.4034e-01 L5_sharp:1.8486e-01 L6_sharp:2.3165e-01 L7_sharp:2.8625e-01 L8_sharp:3.2472e-01 L9_sharp:3.3489e-01 L10_sharp:4.1880e-01 L11_sharp:5.0301e-01 L12_sharp:1.4613e+00 total_fnorm:1.4438e+01 total_l1_linf:1.9072e+04 total_spectral:7.2188e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.4180e-02 L5_l1linf:3.3203e-02 L6_l1linf:3.3447e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.3203e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4180e-02 L12_l1linf:3.2959e-02 L1_spectral:1.5967e-03 L2_spectral:1.6068e-03 L3_spectral:1.6192e-03 L4_spectral:1.6092e-03 L5_spectral:1.6041e-03 L6_spectral:1.6045e-03 L7_spectral:1.6045e-03 L8_spectral:1.6115e-03 L9_spectral:1.6042e-03 L10_spectral:1.6009e-03 L11_spectral:1.6100e-03 L12_spectral:1.6030e-03 train_time:138326ms step_avg:49.40ms +[2025-09-11 11:47:47] [Rank 0] step:2801/10000 train_time:140168ms step_avg:50.04ms +[2025-09-11 11:47:47] [Rank 0] step:2801/10000 train_time:140168ms step_avg:50.04ms +[2025-09-11 11:47:47] [Rank 0] step:2821/10000 train_time:140835ms step_avg:49.92ms +[2025-09-11 11:47:47] [Rank 0] step:2821/10000 train_time:140835ms step_avg:49.92ms +[2025-09-11 11:47:48] [Rank 0] step:2841/10000 train_time:141499ms step_avg:49.81ms +[2025-09-11 11:47:48] [Rank 0] step:2841/10000 train_time:141499ms step_avg:49.81ms +[2025-09-11 11:47:49] [Rank 0] step:2861/10000 train_time:142162ms step_avg:49.69ms +[2025-09-11 11:47:49] [Rank 0] step:2861/10000 train_time:142162ms step_avg:49.69ms +[2025-09-11 11:47:49] [Rank 0] step:2881/10000 train_time:142825ms step_avg:49.57ms +[2025-09-11 11:47:49] [Rank 0] step:2881/10000 train_time:142825ms step_avg:49.57ms +[2025-09-11 11:47:50] [Rank 0] step:2901/10000 train_time:143488ms step_avg:49.46ms +[2025-09-11 11:47:50] [Rank 0] step:2901/10000 train_time:143488ms step_avg:49.46ms +[2025-09-11 11:47:51] [Rank 0] step:2921/10000 train_time:144150ms step_avg:49.35ms +[2025-09-11 11:47:51] [Rank 0] step:2921/10000 train_time:144150ms step_avg:49.35ms +[2025-09-11 11:47:51] [Rank 0] step:2941/10000 train_time:144812ms step_avg:49.24ms +[2025-09-11 11:47:51] [Rank 0] step:2941/10000 train_time:144812ms step_avg:49.24ms +[2025-09-11 11:47:52] [Rank 0] step:2961/10000 train_time:145474ms step_avg:49.13ms +[2025-09-11 11:47:52] [Rank 0] step:2961/10000 train_time:145474ms step_avg:49.13ms +[2025-09-11 11:47:53] [Rank 0] step:2981/10000 train_time:146139ms step_avg:49.02ms +[2025-09-11 11:47:53] [Rank 0] step:2981/10000 train_time:146139ms step_avg:49.02ms +[2025-09-11 11:47:54] [Rank 0] step:3001/10000 train_time:147110ms step_avg:49.02ms +[2025-09-11 11:47:54] [Rank 0] step:3001/10000 train_time:147110ms step_avg:49.02ms +[2025-09-11 11:47:54] [Rank 0] step:3021/10000 train_time:147775ms step_avg:48.92ms +[2025-09-11 11:47:54] [Rank 0] step:3021/10000 train_time:147775ms step_avg:48.92ms +[2025-09-11 11:47:55] [Rank 0] step:3041/10000 train_time:148440ms step_avg:48.81ms +[2025-09-11 11:47:55] [Rank 0] step:3041/10000 train_time:148440ms step_avg:48.81ms +[2025-09-11 11:47:56] [Rank 0] step:3061/10000 train_time:149373ms step_avg:48.80ms +[2025-09-11 11:47:56] [Rank 0] step:3061/10000 train_time:149373ms step_avg:48.80ms +[2025-09-11 11:47:57] [Rank 0] step:3081/10000 train_time:150038ms step_avg:48.70ms +[2025-09-11 11:47:57] [Rank 0] step:3081/10000 train_time:150038ms step_avg:48.70ms +[2025-09-11 11:47:57] [Rank 0] step:3101/10000 train_time:150703ms step_avg:48.60ms +[2025-09-11 11:47:57] [Rank 0] step:3101/10000 train_time:150703ms step_avg:48.60ms +[2025-09-11 11:47:58] [Rank 0] step:3121/10000 train_time:151368ms step_avg:48.50ms +[2025-09-11 11:47:58] [Rank 0] step:3121/10000 train_time:151368ms step_avg:48.50ms +[2025-09-11 11:47:59] [Rank 0] step:3141/10000 train_time:152034ms step_avg:48.40ms +[2025-09-11 11:47:59] [Rank 0] step:3141/10000 train_time:152034ms step_avg:48.40ms +[2025-09-11 11:47:59] [Rank 0] step:3161/10000 train_time:152699ms step_avg:48.31ms +[2025-09-11 11:47:59] [Rank 0] step:3161/10000 train_time:152699ms step_avg:48.31ms +[2025-09-11 11:48:00] [Rank 0] step:3181/10000 train_time:153364ms step_avg:48.21ms +[2025-09-11 11:48:00] [Rank 0] step:3181/10000 train_time:153364ms step_avg:48.21ms +[2025-09-11 11:48:01] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:48:01] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:48:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:48:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:48:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:15] [Rank 0] PRINT: step:3200/10000 val_loss:5.0622 total_sharp:1.6056e-03 L1_sharp:7.7748e-02 L2_sharp:8.7110e-02 L3_sharp:9.4511e-02 L4_sharp:1.2584e-01 L5_sharp:1.6609e-01 L6_sharp:2.1389e-01 L7_sharp:2.6692e-01 L8_sharp:3.0245e-01 L9_sharp:3.3038e-01 L10_sharp:4.4777e-01 L11_sharp:4.7418e-01 L12_sharp:1.1124e+00 total_fnorm:1.7000e+01 total_l1_linf:2.3680e+04 total_spectral:8.5000e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1738e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1494e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3203e-02 L1_spectral:1.6264e-03 L2_spectral:1.6075e-03 L3_spectral:1.6095e-03 L4_spectral:1.6143e-03 L5_spectral:1.6003e-03 L6_spectral:1.6082e-03 L7_spectral:1.6160e-03 L8_spectral:1.6075e-03 L9_spectral:1.6047e-03 L10_spectral:1.6090e-03 L11_spectral:1.6172e-03 L12_spectral:1.6185e-03 train_time:154011ms step_avg:48.13ms +[2025-09-11 11:48:15] [Rank 0] PRINT: step:3200/10000 val_loss:5.0622 total_sharp:1.6056e-03 L1_sharp:7.7748e-02 L2_sharp:8.7110e-02 L3_sharp:9.4511e-02 L4_sharp:1.2584e-01 L5_sharp:1.6609e-01 L6_sharp:2.1389e-01 L7_sharp:2.6692e-01 L8_sharp:3.0245e-01 L9_sharp:3.3038e-01 L10_sharp:4.4777e-01 L11_sharp:4.7418e-01 L12_sharp:1.1124e+00 total_fnorm:1.7000e+01 total_l1_linf:2.3680e+04 total_spectral:8.5000e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1738e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1494e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3203e-02 L1_spectral:1.6264e-03 L2_spectral:1.6075e-03 L3_spectral:1.6095e-03 L4_spectral:1.6143e-03 L5_spectral:1.6003e-03 L6_spectral:1.6082e-03 L7_spectral:1.6160e-03 L8_spectral:1.6075e-03 L9_spectral:1.6047e-03 L10_spectral:1.6090e-03 L11_spectral:1.6172e-03 L12_spectral:1.6185e-03 train_time:154011ms step_avg:48.13ms +[2025-09-11 11:48:17] [Rank 0] step:3201/10000 train_time:155834ms step_avg:48.68ms +[2025-09-11 11:48:17] [Rank 0] step:3201/10000 train_time:155834ms step_avg:48.68ms +[2025-09-11 11:48:18] [Rank 0] step:3221/10000 train_time:156532ms step_avg:48.60ms +[2025-09-11 11:48:18] [Rank 0] step:3221/10000 train_time:156532ms step_avg:48.60ms +[2025-09-11 11:48:19] [Rank 0] step:3241/10000 train_time:157199ms step_avg:48.50ms +[2025-09-11 11:48:19] [Rank 0] step:3241/10000 train_time:157199ms step_avg:48.50ms +[2025-09-11 11:48:19] [Rank 0] step:3261/10000 train_time:157865ms step_avg:48.41ms +[2025-09-11 11:48:19] [Rank 0] step:3261/10000 train_time:157865ms step_avg:48.41ms +[2025-09-11 11:48:20] [Rank 0] step:3281/10000 train_time:158530ms step_avg:48.32ms +[2025-09-11 11:48:20] [Rank 0] step:3281/10000 train_time:158530ms step_avg:48.32ms +[2025-09-11 11:48:21] [Rank 0] step:3301/10000 train_time:159197ms step_avg:48.23ms +[2025-09-11 11:48:21] [Rank 0] step:3301/10000 train_time:159197ms step_avg:48.23ms +[2025-09-11 11:48:21] [Rank 0] step:3321/10000 train_time:159862ms step_avg:48.14ms +[2025-09-11 11:48:21] [Rank 0] step:3321/10000 train_time:159862ms step_avg:48.14ms +[2025-09-11 11:48:22] [Rank 0] step:3341/10000 train_time:160527ms step_avg:48.05ms +[2025-09-11 11:48:22] [Rank 0] step:3341/10000 train_time:160527ms step_avg:48.05ms +[2025-09-11 11:48:23] [Rank 0] step:3361/10000 train_time:161193ms step_avg:47.96ms +[2025-09-11 11:48:23] [Rank 0] step:3361/10000 train_time:161193ms step_avg:47.96ms +[2025-09-11 11:48:23] [Rank 0] step:3381/10000 train_time:161859ms step_avg:47.87ms +[2025-09-11 11:48:23] [Rank 0] step:3381/10000 train_time:161859ms step_avg:47.87ms +[2025-09-11 11:48:24] [Rank 0] step:3401/10000 train_time:162525ms step_avg:47.79ms +[2025-09-11 11:48:24] [Rank 0] step:3401/10000 train_time:162525ms step_avg:47.79ms +[2025-09-11 11:48:25] [Rank 0] step:3421/10000 train_time:163190ms step_avg:47.70ms +[2025-09-11 11:48:25] [Rank 0] step:3421/10000 train_time:163190ms step_avg:47.70ms +[2025-09-11 11:48:25] [Rank 0] step:3441/10000 train_time:163855ms step_avg:47.62ms +[2025-09-11 11:48:25] [Rank 0] step:3441/10000 train_time:163855ms step_avg:47.62ms +[2025-09-11 11:48:26] [Rank 0] step:3461/10000 train_time:164520ms step_avg:47.54ms +[2025-09-11 11:48:26] [Rank 0] step:3461/10000 train_time:164520ms step_avg:47.54ms +[2025-09-11 11:48:27] [Rank 0] step:3481/10000 train_time:165186ms step_avg:47.45ms +[2025-09-11 11:48:27] [Rank 0] step:3481/10000 train_time:165186ms step_avg:47.45ms +[2025-09-11 11:48:27] [Rank 0] step:3501/10000 train_time:165852ms step_avg:47.37ms +[2025-09-11 11:48:27] [Rank 0] step:3501/10000 train_time:165852ms step_avg:47.37ms +[2025-09-11 11:48:28] [Rank 0] step:3521/10000 train_time:166517ms step_avg:47.29ms +[2025-09-11 11:48:28] [Rank 0] step:3521/10000 train_time:166517ms step_avg:47.29ms +[2025-09-11 11:48:29] [Rank 0] step:3541/10000 train_time:167182ms step_avg:47.21ms +[2025-09-11 11:48:29] [Rank 0] step:3541/10000 train_time:167182ms step_avg:47.21ms +[2025-09-11 11:48:29] [Rank 0] step:3561/10000 train_time:167847ms step_avg:47.13ms +[2025-09-11 11:48:29] [Rank 0] step:3561/10000 train_time:167847ms step_avg:47.13ms +[2025-09-11 11:48:30] [Rank 0] step:3581/10000 train_time:168513ms step_avg:47.06ms +[2025-09-11 11:48:30] [Rank 0] step:3581/10000 train_time:168513ms step_avg:47.06ms +[2025-09-11 11:48:30] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:48:30] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:48:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:48:41] [Rank 0] PRINT: step:3600/10000 val_loss:4.9896 total_sharp:1.7409e-03 L1_sharp:7.2225e-02 L2_sharp:8.2019e-02 L3_sharp:9.1101e-02 L4_sharp:1.0880e-01 L5_sharp:1.2191e-01 L6_sharp:1.5555e-01 L7_sharp:1.9265e-01 L8_sharp:2.3460e-01 L9_sharp:2.9902e-01 L10_sharp:3.8339e-01 L11_sharp:4.6207e-01 L12_sharp:1.0534e+00 total_fnorm:1.4875e+01 total_l1_linf:1.9584e+04 total_spectral:7.4688e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.1006e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.1006e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1006e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.2715e-02 L1_spectral:1.6062e-03 L2_spectral:1.6186e-03 L3_spectral:1.6185e-03 L4_spectral:1.6142e-03 L5_spectral:1.6300e-03 L6_spectral:1.6147e-03 L7_spectral:1.6248e-03 L8_spectral:1.6155e-03 L9_spectral:1.6095e-03 L10_spectral:1.6122e-03 L11_spectral:1.6152e-03 L12_spectral:1.6303e-03 train_time:169160ms step_avg:46.99ms +[2025-09-11 11:48:41] [Rank 0] PRINT: step:3600/10000 val_loss:4.9896 total_sharp:1.7409e-03 L1_sharp:7.2225e-02 L2_sharp:8.2019e-02 L3_sharp:9.1101e-02 L4_sharp:1.0880e-01 L5_sharp:1.2191e-01 L6_sharp:1.5555e-01 L7_sharp:1.9265e-01 L8_sharp:2.3460e-01 L9_sharp:2.9902e-01 L10_sharp:3.8339e-01 L11_sharp:4.6207e-01 L12_sharp:1.0534e+00 total_fnorm:1.4875e+01 total_l1_linf:1.9584e+04 total_spectral:7.4688e+00 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.1006e-02 L3_l1linf:3.0518e-02 L4_l1linf:3.0396e-02 L5_l1linf:3.1006e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1006e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.2715e-02 L1_spectral:1.6062e-03 L2_spectral:1.6186e-03 L3_spectral:1.6185e-03 L4_spectral:1.6142e-03 L5_spectral:1.6300e-03 L6_spectral:1.6147e-03 L7_spectral:1.6248e-03 L8_spectral:1.6155e-03 L9_spectral:1.6095e-03 L10_spectral:1.6122e-03 L11_spectral:1.6152e-03 L12_spectral:1.6303e-03 train_time:169160ms step_avg:46.99ms +[2025-09-11 11:48:42] [Rank 0] step:3601/10000 train_time:170713ms step_avg:47.41ms +[2025-09-11 11:48:42] [Rank 0] step:3601/10000 train_time:170713ms step_avg:47.41ms +[2025-09-11 11:48:43] [Rank 0] step:3621/10000 train_time:171408ms step_avg:47.34ms +[2025-09-11 11:48:43] [Rank 0] step:3621/10000 train_time:171408ms step_avg:47.34ms +[2025-09-11 11:48:44] [Rank 0] step:3641/10000 train_time:172074ms step_avg:47.26ms +[2025-09-11 11:48:44] [Rank 0] step:3641/10000 train_time:172074ms step_avg:47.26ms +[2025-09-11 11:48:44] [Rank 0] step:3661/10000 train_time:172739ms step_avg:47.18ms +[2025-09-11 11:48:44] [Rank 0] step:3661/10000 train_time:172739ms step_avg:47.18ms +[2025-09-11 11:48:45] [Rank 0] step:3681/10000 train_time:173404ms step_avg:47.11ms +[2025-09-11 11:48:45] [Rank 0] step:3681/10000 train_time:173404ms step_avg:47.11ms +[2025-09-11 11:48:46] [Rank 0] step:3701/10000 train_time:174068ms step_avg:47.03ms +[2025-09-11 11:48:46] [Rank 0] step:3701/10000 train_time:174068ms step_avg:47.03ms +[2025-09-11 11:48:46] [Rank 0] step:3721/10000 train_time:174742ms step_avg:46.96ms +[2025-09-11 11:48:46] [Rank 0] step:3721/10000 train_time:174742ms step_avg:46.96ms +[2025-09-11 11:48:47] [Rank 0] step:3741/10000 train_time:175418ms step_avg:46.89ms +[2025-09-11 11:48:47] [Rank 0] step:3741/10000 train_time:175418ms step_avg:46.89ms +[2025-09-11 11:48:48] [Rank 0] step:3761/10000 train_time:176094ms step_avg:46.82ms +[2025-09-11 11:48:48] [Rank 0] step:3761/10000 train_time:176094ms step_avg:46.82ms +[2025-09-11 11:48:48] [Rank 0] step:3781/10000 train_time:176769ms step_avg:46.75ms +[2025-09-11 11:48:48] [Rank 0] step:3781/10000 train_time:176769ms step_avg:46.75ms +[2025-09-11 11:48:49] [Rank 0] step:3801/10000 train_time:177445ms step_avg:46.68ms +[2025-09-11 11:48:49] [Rank 0] step:3801/10000 train_time:177445ms step_avg:46.68ms +[2025-09-11 11:48:50] [Rank 0] step:3821/10000 train_time:178122ms step_avg:46.62ms +[2025-09-11 11:48:50] [Rank 0] step:3821/10000 train_time:178122ms step_avg:46.62ms +[2025-09-11 11:48:50] [Rank 0] step:3841/10000 train_time:178798ms step_avg:46.55ms +[2025-09-11 11:48:50] [Rank 0] step:3841/10000 train_time:178798ms step_avg:46.55ms +[2025-09-11 11:48:51] [Rank 0] step:3861/10000 train_time:179473ms step_avg:46.48ms +[2025-09-11 11:48:51] [Rank 0] step:3861/10000 train_time:179473ms step_avg:46.48ms +[2025-09-11 11:48:52] [Rank 0] step:3881/10000 train_time:180148ms step_avg:46.42ms +[2025-09-11 11:48:52] [Rank 0] step:3881/10000 train_time:180148ms step_avg:46.42ms +[2025-09-11 11:48:52] [Rank 0] step:3901/10000 train_time:180824ms step_avg:46.35ms +[2025-09-11 11:48:52] [Rank 0] step:3901/10000 train_time:180824ms step_avg:46.35ms +[2025-09-11 11:48:53] [Rank 0] step:3921/10000 train_time:181499ms step_avg:46.29ms +[2025-09-11 11:48:53] [Rank 0] step:3921/10000 train_time:181499ms step_avg:46.29ms +[2025-09-11 11:48:54] [Rank 0] step:3941/10000 train_time:182175ms step_avg:46.23ms +[2025-09-11 11:48:54] [Rank 0] step:3941/10000 train_time:182175ms step_avg:46.23ms +[2025-09-11 11:48:55] [Rank 0] step:3961/10000 train_time:182851ms step_avg:46.16ms +[2025-09-11 11:48:55] [Rank 0] step:3961/10000 train_time:182851ms step_avg:46.16ms +[2025-09-11 11:48:55] [Rank 0] step:3981/10000 train_time:183527ms step_avg:46.10ms +[2025-09-11 11:48:55] [Rank 0] step:3981/10000 train_time:183527ms step_avg:46.10ms +[2025-09-11 11:48:56] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:48:56] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:48:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:48:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:49:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:49:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.9176 total_sharp:1.7377e-03 L1_sharp:5.1791e-02 L2_sharp:5.4534e-02 L3_sharp:6.5825e-02 L4_sharp:8.8832e-02 L5_sharp:1.2592e-01 L6_sharp:1.5892e-01 L7_sharp:2.3267e-01 L8_sharp:3.5102e-01 L9_sharp:4.4386e-01 L10_sharp:6.4787e-01 L11_sharp:6.9891e-01 L12_sharp:2.4814e+00 total_fnorm:1.8000e+01 total_l1_linf:2.3552e+04 total_spectral:9.0000e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.1250e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.2227e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6046e-03 L2_spectral:1.6074e-03 L3_spectral:1.6205e-03 L4_spectral:1.6044e-03 L5_spectral:1.6051e-03 L6_spectral:1.6128e-03 L7_spectral:1.6134e-03 L8_spectral:1.6050e-03 L9_spectral:1.6068e-03 L10_spectral:1.6131e-03 L11_spectral:1.6121e-03 L12_spectral:1.6147e-03 train_time:184183ms step_avg:46.05ms +[2025-09-11 11:49:07] [Rank 0] PRINT: step:4000/10000 val_loss:4.9176 total_sharp:1.7377e-03 L1_sharp:5.1791e-02 L2_sharp:5.4534e-02 L3_sharp:6.5825e-02 L4_sharp:8.8832e-02 L5_sharp:1.2592e-01 L6_sharp:1.5892e-01 L7_sharp:2.3267e-01 L8_sharp:3.5102e-01 L9_sharp:4.4386e-01 L10_sharp:6.4787e-01 L11_sharp:6.9891e-01 L12_sharp:2.4814e+00 total_fnorm:1.8000e+01 total_l1_linf:2.3552e+04 total_spectral:9.0000e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1475e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.1250e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.2227e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6046e-03 L2_spectral:1.6074e-03 L3_spectral:1.6205e-03 L4_spectral:1.6044e-03 L5_spectral:1.6051e-03 L6_spectral:1.6128e-03 L7_spectral:1.6134e-03 L8_spectral:1.6050e-03 L9_spectral:1.6068e-03 L10_spectral:1.6131e-03 L11_spectral:1.6121e-03 L12_spectral:1.6147e-03 train_time:184183ms step_avg:46.05ms +[2025-09-11 11:49:08] [Rank 0] step:4001/10000 train_time:186141ms step_avg:46.52ms +[2025-09-11 11:49:08] [Rank 0] step:4001/10000 train_time:186141ms step_avg:46.52ms +[2025-09-11 11:49:09] [Rank 0] step:4021/10000 train_time:186821ms step_avg:46.46ms +[2025-09-11 11:49:09] [Rank 0] step:4021/10000 train_time:186821ms step_avg:46.46ms +[2025-09-11 11:49:10] [Rank 0] step:4041/10000 train_time:187499ms step_avg:46.40ms +[2025-09-11 11:49:10] [Rank 0] step:4041/10000 train_time:187499ms step_avg:46.40ms +[2025-09-11 11:49:11] [Rank 0] step:4061/10000 train_time:188174ms step_avg:46.34ms +[2025-09-11 11:49:11] [Rank 0] step:4061/10000 train_time:188174ms step_avg:46.34ms +[2025-09-11 11:49:11] [Rank 0] step:4081/10000 train_time:188851ms step_avg:46.28ms +[2025-09-11 11:49:11] [Rank 0] step:4081/10000 train_time:188851ms step_avg:46.28ms +[2025-09-11 11:49:12] [Rank 0] step:4101/10000 train_time:189526ms step_avg:46.21ms +[2025-09-11 11:49:12] [Rank 0] step:4101/10000 train_time:189526ms step_avg:46.21ms +[2025-09-11 11:49:13] [Rank 0] step:4121/10000 train_time:190203ms step_avg:46.15ms +[2025-09-11 11:49:13] [Rank 0] step:4121/10000 train_time:190203ms step_avg:46.15ms +[2025-09-11 11:49:13] [Rank 0] step:4141/10000 train_time:190878ms step_avg:46.09ms +[2025-09-11 11:49:13] [Rank 0] step:4141/10000 train_time:190878ms step_avg:46.09ms +[2025-09-11 11:49:14] [Rank 0] step:4161/10000 train_time:191554ms step_avg:46.04ms +[2025-09-11 11:49:14] [Rank 0] step:4161/10000 train_time:191554ms step_avg:46.04ms +[2025-09-11 11:49:15] [Rank 0] step:4181/10000 train_time:192230ms step_avg:45.98ms +[2025-09-11 11:49:15] [Rank 0] step:4181/10000 train_time:192230ms step_avg:45.98ms +[2025-09-11 11:49:15] [Rank 0] step:4201/10000 train_time:192906ms step_avg:45.92ms +[2025-09-11 11:49:15] [Rank 0] step:4201/10000 train_time:192906ms step_avg:45.92ms +[2025-09-11 11:49:16] [Rank 0] step:4221/10000 train_time:193582ms step_avg:45.86ms +[2025-09-11 11:49:16] [Rank 0] step:4221/10000 train_time:193582ms step_avg:45.86ms +[2025-09-11 11:49:17] [Rank 0] step:4241/10000 train_time:194258ms step_avg:45.80ms +[2025-09-11 11:49:17] [Rank 0] step:4241/10000 train_time:194258ms step_avg:45.80ms +[2025-09-11 11:49:17] [Rank 0] step:4261/10000 train_time:194935ms step_avg:45.75ms +[2025-09-11 11:49:17] [Rank 0] step:4261/10000 train_time:194935ms step_avg:45.75ms +[2025-09-11 11:49:18] [Rank 0] step:4281/10000 train_time:195612ms step_avg:45.69ms +[2025-09-11 11:49:18] [Rank 0] step:4281/10000 train_time:195612ms step_avg:45.69ms +[2025-09-11 11:49:19] [Rank 0] step:4301/10000 train_time:196289ms step_avg:45.64ms +[2025-09-11 11:49:19] [Rank 0] step:4301/10000 train_time:196289ms step_avg:45.64ms +[2025-09-11 11:49:19] [Rank 0] step:4321/10000 train_time:196965ms step_avg:45.58ms +[2025-09-11 11:49:19] [Rank 0] step:4321/10000 train_time:196965ms step_avg:45.58ms +[2025-09-11 11:49:20] [Rank 0] step:4341/10000 train_time:197641ms step_avg:45.53ms +[2025-09-11 11:49:20] [Rank 0] step:4341/10000 train_time:197641ms step_avg:45.53ms +[2025-09-11 11:49:21] [Rank 0] step:4361/10000 train_time:198316ms step_avg:45.47ms +[2025-09-11 11:49:21] [Rank 0] step:4361/10000 train_time:198316ms step_avg:45.47ms +[2025-09-11 11:49:21] [Rank 0] step:4381/10000 train_time:198993ms step_avg:45.42ms +[2025-09-11 11:49:21] [Rank 0] step:4381/10000 train_time:198993ms step_avg:45.42ms +[2025-09-11 11:49:22] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:49:22] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:49:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:49:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:33] [Rank 0] PRINT: step:4400/10000 val_loss:4.8701 total_sharp:1.5075e-03 L1_sharp:4.0017e-02 L2_sharp:5.3023e-02 L3_sharp:6.0525e-02 L4_sharp:7.8494e-02 L5_sharp:1.1439e-01 L6_sharp:1.4871e-01 L7_sharp:1.9004e-01 L8_sharp:2.1908e-01 L9_sharp:2.7735e-01 L10_sharp:3.8355e-01 L11_sharp:4.9167e-01 L12_sharp:2.2168e+00 total_fnorm:1.5625e+01 total_l1_linf:1.9968e+04 total_spectral:7.8125e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.0151e-02 L2_l1linf:3.0396e-02 L3_l1linf:3.0396e-02 L4_l1linf:2.9663e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9907e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.0029e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.0273e-02 L1_spectral:1.5925e-03 L2_spectral:1.6224e-03 L3_spectral:1.5997e-03 L4_spectral:1.6135e-03 L5_spectral:1.6081e-03 L6_spectral:1.6094e-03 L7_spectral:1.6283e-03 L8_spectral:1.6107e-03 L9_spectral:1.6026e-03 L10_spectral:1.6090e-03 L11_spectral:1.6020e-03 L12_spectral:1.6130e-03 train_time:199650ms step_avg:45.37ms +[2025-09-11 11:49:33] [Rank 0] PRINT: step:4400/10000 val_loss:4.8701 total_sharp:1.5075e-03 L1_sharp:4.0017e-02 L2_sharp:5.3023e-02 L3_sharp:6.0525e-02 L4_sharp:7.8494e-02 L5_sharp:1.1439e-01 L6_sharp:1.4871e-01 L7_sharp:1.9004e-01 L8_sharp:2.1908e-01 L9_sharp:2.7735e-01 L10_sharp:3.8355e-01 L11_sharp:4.9167e-01 L12_sharp:2.2168e+00 total_fnorm:1.5625e+01 total_l1_linf:1.9968e+04 total_spectral:7.8125e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.0151e-02 L2_l1linf:3.0396e-02 L3_l1linf:3.0396e-02 L4_l1linf:2.9663e-02 L5_l1linf:3.0151e-02 L6_l1linf:2.9907e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.0029e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.0273e-02 L1_spectral:1.5925e-03 L2_spectral:1.6224e-03 L3_spectral:1.5997e-03 L4_spectral:1.6135e-03 L5_spectral:1.6081e-03 L6_spectral:1.6094e-03 L7_spectral:1.6283e-03 L8_spectral:1.6107e-03 L9_spectral:1.6026e-03 L10_spectral:1.6090e-03 L11_spectral:1.6020e-03 L12_spectral:1.6130e-03 train_time:199650ms step_avg:45.37ms +[2025-09-11 11:49:35] [Rank 0] step:4401/10000 train_time:201642ms step_avg:45.82ms +[2025-09-11 11:49:35] [Rank 0] step:4401/10000 train_time:201642ms step_avg:45.82ms +[2025-09-11 11:49:36] [Rank 0] step:4421/10000 train_time:202345ms step_avg:45.77ms +[2025-09-11 11:49:36] [Rank 0] step:4421/10000 train_time:202345ms step_avg:45.77ms +[2025-09-11 11:49:36] [Rank 0] step:4441/10000 train_time:203023ms step_avg:45.72ms +[2025-09-11 11:49:36] [Rank 0] step:4441/10000 train_time:203023ms step_avg:45.72ms +[2025-09-11 11:49:37] [Rank 0] step:4461/10000 train_time:203701ms step_avg:45.66ms +[2025-09-11 11:49:37] [Rank 0] step:4461/10000 train_time:203701ms step_avg:45.66ms +[2025-09-11 11:49:38] [Rank 0] step:4481/10000 train_time:204379ms step_avg:45.61ms +[2025-09-11 11:49:38] [Rank 0] step:4481/10000 train_time:204379ms step_avg:45.61ms +[2025-09-11 11:49:38] [Rank 0] step:4501/10000 train_time:205060ms step_avg:45.56ms +[2025-09-11 11:49:38] [Rank 0] step:4501/10000 train_time:205060ms step_avg:45.56ms +[2025-09-11 11:49:39] [Rank 0] step:4521/10000 train_time:205738ms step_avg:45.51ms +[2025-09-11 11:49:39] [Rank 0] step:4521/10000 train_time:205738ms step_avg:45.51ms +[2025-09-11 11:49:40] [Rank 0] step:4541/10000 train_time:206418ms step_avg:45.46ms +[2025-09-11 11:49:40] [Rank 0] step:4541/10000 train_time:206418ms step_avg:45.46ms +[2025-09-11 11:49:40] [Rank 0] step:4561/10000 train_time:207095ms step_avg:45.41ms +[2025-09-11 11:49:40] [Rank 0] step:4561/10000 train_time:207095ms step_avg:45.41ms +[2025-09-11 11:49:41] [Rank 0] step:4581/10000 train_time:207774ms step_avg:45.36ms +[2025-09-11 11:49:41] [Rank 0] step:4581/10000 train_time:207774ms step_avg:45.36ms +[2025-09-11 11:49:42] [Rank 0] step:4601/10000 train_time:208454ms step_avg:45.31ms +[2025-09-11 11:49:42] [Rank 0] step:4601/10000 train_time:208454ms step_avg:45.31ms +[2025-09-11 11:49:42] [Rank 0] step:4621/10000 train_time:209132ms step_avg:45.26ms +[2025-09-11 11:49:42] [Rank 0] step:4621/10000 train_time:209132ms step_avg:45.26ms +[2025-09-11 11:49:43] [Rank 0] step:4641/10000 train_time:209810ms step_avg:45.21ms +[2025-09-11 11:49:43] [Rank 0] step:4641/10000 train_time:209810ms step_avg:45.21ms +[2025-09-11 11:49:44] [Rank 0] step:4661/10000 train_time:210490ms step_avg:45.16ms +[2025-09-11 11:49:44] [Rank 0] step:4661/10000 train_time:210490ms step_avg:45.16ms +[2025-09-11 11:49:44] [Rank 0] step:4681/10000 train_time:211168ms step_avg:45.11ms +[2025-09-11 11:49:44] [Rank 0] step:4681/10000 train_time:211168ms step_avg:45.11ms +[2025-09-11 11:49:45] [Rank 0] step:4701/10000 train_time:211846ms step_avg:45.06ms +[2025-09-11 11:49:45] [Rank 0] step:4701/10000 train_time:211846ms step_avg:45.06ms +[2025-09-11 11:49:46] [Rank 0] step:4721/10000 train_time:212525ms step_avg:45.02ms +[2025-09-11 11:49:46] [Rank 0] step:4721/10000 train_time:212525ms step_avg:45.02ms +[2025-09-11 11:49:46] [Rank 0] step:4741/10000 train_time:213203ms step_avg:44.97ms +[2025-09-11 11:49:46] [Rank 0] step:4741/10000 train_time:213203ms step_avg:44.97ms +[2025-09-11 11:49:47] [Rank 0] step:4761/10000 train_time:213883ms step_avg:44.92ms +[2025-09-11 11:49:47] [Rank 0] step:4761/10000 train_time:213883ms step_avg:44.92ms +[2025-09-11 11:49:48] [Rank 0] step:4781/10000 train_time:214562ms step_avg:44.88ms +[2025-09-11 11:49:48] [Rank 0] step:4781/10000 train_time:214562ms step_avg:44.88ms +[2025-09-11 11:49:48] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:49:48] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:49:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:49:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:49:59] [Rank 0] PRINT: step:4800/10000 val_loss:4.8183 total_sharp:1.5400e-03 L1_sharp:4.2967e-02 L2_sharp:5.1227e-02 L3_sharp:5.8728e-02 L4_sharp:7.3046e-02 L5_sharp:1.0447e-01 L6_sharp:1.4407e-01 L7_sharp:1.9368e-01 L8_sharp:2.4568e-01 L9_sharp:2.8246e-01 L10_sharp:3.9323e-01 L11_sharp:4.3919e-01 L12_sharp:1.5722e+00 total_fnorm:1.5688e+01 total_l1_linf:2.0608e+04 total_spectral:7.8438e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.9297e-02 L2_l1linf:2.9053e-02 L3_l1linf:2.9175e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9175e-02 L8_l1linf:3.0273e-02 L9_l1linf:3.0029e-02 L10_l1linf:3.0029e-02 L11_l1linf:3.0762e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6141e-03 L2_spectral:1.6172e-03 L3_spectral:1.6221e-03 L4_spectral:1.6152e-03 L5_spectral:1.6148e-03 L6_spectral:1.6075e-03 L7_spectral:1.6132e-03 L8_spectral:1.6070e-03 L9_spectral:1.6087e-03 L10_spectral:1.6188e-03 L11_spectral:1.6083e-03 L12_spectral:1.6189e-03 train_time:215219ms step_avg:44.84ms +[2025-09-11 11:49:59] [Rank 0] PRINT: step:4800/10000 val_loss:4.8183 total_sharp:1.5400e-03 L1_sharp:4.2967e-02 L2_sharp:5.1227e-02 L3_sharp:5.8728e-02 L4_sharp:7.3046e-02 L5_sharp:1.0447e-01 L6_sharp:1.4407e-01 L7_sharp:1.9368e-01 L8_sharp:2.4568e-01 L9_sharp:2.8246e-01 L10_sharp:3.9323e-01 L11_sharp:4.3919e-01 L12_sharp:1.5722e+00 total_fnorm:1.5688e+01 total_l1_linf:2.0608e+04 total_spectral:7.8438e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.9297e-02 L2_l1linf:2.9053e-02 L3_l1linf:2.9175e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9175e-02 L8_l1linf:3.0273e-02 L9_l1linf:3.0029e-02 L10_l1linf:3.0029e-02 L11_l1linf:3.0762e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6141e-03 L2_spectral:1.6172e-03 L3_spectral:1.6221e-03 L4_spectral:1.6152e-03 L5_spectral:1.6148e-03 L6_spectral:1.6075e-03 L7_spectral:1.6132e-03 L8_spectral:1.6070e-03 L9_spectral:1.6087e-03 L10_spectral:1.6188e-03 L11_spectral:1.6083e-03 L12_spectral:1.6189e-03 train_time:215219ms step_avg:44.84ms +[2025-09-11 11:50:01] [Rank 0] step:4801/10000 train_time:217227ms step_avg:45.25ms +[2025-09-11 11:50:01] [Rank 0] step:4801/10000 train_time:217227ms step_avg:45.25ms +[2025-09-11 11:50:02] [Rank 0] step:4821/10000 train_time:218183ms step_avg:45.26ms +[2025-09-11 11:50:02] [Rank 0] step:4821/10000 train_time:218183ms step_avg:45.26ms +[2025-09-11 11:50:03] [Rank 0] step:4841/10000 train_time:218910ms step_avg:45.22ms +[2025-09-11 11:50:03] [Rank 0] step:4841/10000 train_time:218910ms step_avg:45.22ms +[2025-09-11 11:50:04] [Rank 0] step:4861/10000 train_time:219592ms step_avg:45.17ms +[2025-09-11 11:50:04] [Rank 0] step:4861/10000 train_time:219592ms step_avg:45.17ms +[2025-09-11 11:50:04] [Rank 0] step:4881/10000 train_time:220329ms step_avg:45.14ms +[2025-09-11 11:50:04] [Rank 0] step:4881/10000 train_time:220329ms step_avg:45.14ms +[2025-09-11 11:50:05] [Rank 0] step:4901/10000 train_time:221009ms step_avg:45.09ms +[2025-09-11 11:50:05] [Rank 0] step:4901/10000 train_time:221009ms step_avg:45.09ms +[2025-09-11 11:50:06] [Rank 0] step:4921/10000 train_time:221689ms step_avg:45.05ms +[2025-09-11 11:50:06] [Rank 0] step:4921/10000 train_time:221689ms step_avg:45.05ms +[2025-09-11 11:50:06] [Rank 0] step:4941/10000 train_time:222368ms step_avg:45.00ms +[2025-09-11 11:50:06] [Rank 0] step:4941/10000 train_time:222368ms step_avg:45.00ms +[2025-09-11 11:50:07] [Rank 0] step:4961/10000 train_time:223047ms step_avg:44.96ms +[2025-09-11 11:50:07] [Rank 0] step:4961/10000 train_time:223047ms step_avg:44.96ms +[2025-09-11 11:50:08] [Rank 0] step:4981/10000 train_time:223727ms step_avg:44.92ms +[2025-09-11 11:50:08] [Rank 0] step:4981/10000 train_time:223727ms step_avg:44.92ms +[2025-09-11 11:50:08] [Rank 0] step:5001/10000 train_time:224407ms step_avg:44.87ms +[2025-09-11 11:50:08] [Rank 0] step:5001/10000 train_time:224407ms step_avg:44.87ms +[2025-09-11 11:50:09] [Rank 0] step:5021/10000 train_time:225085ms step_avg:44.83ms +[2025-09-11 11:50:09] [Rank 0] step:5021/10000 train_time:225085ms step_avg:44.83ms +[2025-09-11 11:50:10] [Rank 0] step:5041/10000 train_time:225763ms step_avg:44.79ms +[2025-09-11 11:50:10] [Rank 0] step:5041/10000 train_time:225763ms step_avg:44.79ms +[2025-09-11 11:50:10] [Rank 0] step:5061/10000 train_time:226442ms step_avg:44.74ms +[2025-09-11 11:50:10] [Rank 0] step:5061/10000 train_time:226442ms step_avg:44.74ms +[2025-09-11 11:50:11] [Rank 0] step:5081/10000 train_time:227121ms step_avg:44.70ms +[2025-09-11 11:50:11] [Rank 0] step:5081/10000 train_time:227121ms step_avg:44.70ms +[2025-09-11 11:50:12] [Rank 0] step:5101/10000 train_time:227799ms step_avg:44.66ms +[2025-09-11 11:50:12] [Rank 0] step:5101/10000 train_time:227799ms step_avg:44.66ms +[2025-09-11 11:50:12] [Rank 0] step:5121/10000 train_time:228479ms step_avg:44.62ms +[2025-09-11 11:50:12] [Rank 0] step:5121/10000 train_time:228479ms step_avg:44.62ms +[2025-09-11 11:50:13] [Rank 0] step:5141/10000 train_time:229158ms step_avg:44.57ms +[2025-09-11 11:50:13] [Rank 0] step:5141/10000 train_time:229158ms step_avg:44.57ms +[2025-09-11 11:50:14] [Rank 0] step:5161/10000 train_time:229837ms step_avg:44.53ms +[2025-09-11 11:50:14] [Rank 0] step:5161/10000 train_time:229837ms step_avg:44.53ms +[2025-09-11 11:50:15] [Rank 0] step:5181/10000 train_time:230516ms step_avg:44.49ms +[2025-09-11 11:50:15] [Rank 0] step:5181/10000 train_time:230516ms step_avg:44.49ms +[2025-09-11 11:50:15] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:50:15] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:50:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.7771 total_sharp:1.8037e-03 L1_sharp:4.2824e-02 L2_sharp:5.1459e-02 L3_sharp:6.3314e-02 L4_sharp:7.6117e-02 L5_sharp:9.6691e-02 L6_sharp:1.3848e-01 L7_sharp:1.8376e-01 L8_sharp:2.2302e-01 L9_sharp:2.8674e-01 L10_sharp:3.9669e-01 L11_sharp:5.0849e-01 L12_sharp:1.4650e+00 total_fnorm:1.4438e+01 total_l1_linf:1.7920e+04 total_spectral:7.2188e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8809e-02 L2_l1linf:2.8687e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8809e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9419e-02 L11_l1linf:2.9907e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6060e-03 L2_spectral:1.6203e-03 L3_spectral:1.6251e-03 L4_spectral:1.6084e-03 L5_spectral:1.5981e-03 L6_spectral:1.6060e-03 L7_spectral:1.6155e-03 L8_spectral:1.6009e-03 L9_spectral:1.6159e-03 L10_spectral:1.6066e-03 L11_spectral:1.6082e-03 L12_spectral:1.6054e-03 train_time:231181ms step_avg:44.46ms +[2025-09-11 11:50:26] [Rank 0] PRINT: step:5200/10000 val_loss:4.7771 total_sharp:1.8037e-03 L1_sharp:4.2824e-02 L2_sharp:5.1459e-02 L3_sharp:6.3314e-02 L4_sharp:7.6117e-02 L5_sharp:9.6691e-02 L6_sharp:1.3848e-01 L7_sharp:1.8376e-01 L8_sharp:2.2302e-01 L9_sharp:2.8674e-01 L10_sharp:3.9669e-01 L11_sharp:5.0849e-01 L12_sharp:1.4650e+00 total_fnorm:1.4438e+01 total_l1_linf:1.7920e+04 total_spectral:7.2188e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8809e-02 L2_l1linf:2.8687e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.9175e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8809e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9419e-02 L11_l1linf:2.9907e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6060e-03 L2_spectral:1.6203e-03 L3_spectral:1.6251e-03 L4_spectral:1.6084e-03 L5_spectral:1.5981e-03 L6_spectral:1.6060e-03 L7_spectral:1.6155e-03 L8_spectral:1.6009e-03 L9_spectral:1.6159e-03 L10_spectral:1.6066e-03 L11_spectral:1.6082e-03 L12_spectral:1.6054e-03 train_time:231181ms step_avg:44.46ms +[2025-09-11 11:50:28] [Rank 0] step:5201/10000 train_time:233185ms step_avg:44.83ms +[2025-09-11 11:50:28] [Rank 0] step:5201/10000 train_time:233185ms step_avg:44.83ms +[2025-09-11 11:50:29] [Rank 0] step:5221/10000 train_time:233889ms step_avg:44.80ms +[2025-09-11 11:50:29] [Rank 0] step:5221/10000 train_time:233889ms step_avg:44.80ms +[2025-09-11 11:50:29] [Rank 0] step:5241/10000 train_time:234577ms step_avg:44.76ms +[2025-09-11 11:50:29] [Rank 0] step:5241/10000 train_time:234577ms step_avg:44.76ms +[2025-09-11 11:50:30] [Rank 0] step:5261/10000 train_time:235266ms step_avg:44.72ms +[2025-09-11 11:50:30] [Rank 0] step:5261/10000 train_time:235266ms step_avg:44.72ms +[2025-09-11 11:50:31] [Rank 0] step:5281/10000 train_time:235955ms step_avg:44.68ms +[2025-09-11 11:50:31] [Rank 0] step:5281/10000 train_time:235955ms step_avg:44.68ms +[2025-09-11 11:50:31] [Rank 0] step:5301/10000 train_time:236642ms step_avg:44.64ms +[2025-09-11 11:50:31] [Rank 0] step:5301/10000 train_time:236642ms step_avg:44.64ms +[2025-09-11 11:50:32] [Rank 0] step:5321/10000 train_time:237331ms step_avg:44.60ms +[2025-09-11 11:50:32] [Rank 0] step:5321/10000 train_time:237331ms step_avg:44.60ms +[2025-09-11 11:50:33] [Rank 0] step:5341/10000 train_time:238019ms step_avg:44.56ms +[2025-09-11 11:50:33] [Rank 0] step:5341/10000 train_time:238019ms step_avg:44.56ms +[2025-09-11 11:50:33] [Rank 0] step:5361/10000 train_time:238707ms step_avg:44.53ms +[2025-09-11 11:50:33] [Rank 0] step:5361/10000 train_time:238707ms step_avg:44.53ms +[2025-09-11 11:50:34] [Rank 0] step:5381/10000 train_time:239396ms step_avg:44.49ms +[2025-09-11 11:50:34] [Rank 0] step:5381/10000 train_time:239396ms step_avg:44.49ms +[2025-09-11 11:50:35] [Rank 0] step:5401/10000 train_time:240083ms step_avg:44.45ms +[2025-09-11 11:50:35] [Rank 0] step:5401/10000 train_time:240083ms step_avg:44.45ms +[2025-09-11 11:50:35] [Rank 0] step:5421/10000 train_time:240772ms step_avg:44.41ms +[2025-09-11 11:50:35] [Rank 0] step:5421/10000 train_time:240772ms step_avg:44.41ms +[2025-09-11 11:50:36] [Rank 0] step:5441/10000 train_time:241461ms step_avg:44.38ms +[2025-09-11 11:50:36] [Rank 0] step:5441/10000 train_time:241461ms step_avg:44.38ms +[2025-09-11 11:50:37] [Rank 0] step:5461/10000 train_time:242150ms step_avg:44.34ms +[2025-09-11 11:50:37] [Rank 0] step:5461/10000 train_time:242150ms step_avg:44.34ms +[2025-09-11 11:50:38] [Rank 0] step:5481/10000 train_time:242839ms step_avg:44.31ms +[2025-09-11 11:50:38] [Rank 0] step:5481/10000 train_time:242839ms step_avg:44.31ms +[2025-09-11 11:50:38] [Rank 0] step:5501/10000 train_time:243527ms step_avg:44.27ms +[2025-09-11 11:50:38] [Rank 0] step:5501/10000 train_time:243527ms step_avg:44.27ms +[2025-09-11 11:50:39] [Rank 0] step:5521/10000 train_time:244216ms step_avg:44.23ms +[2025-09-11 11:50:39] [Rank 0] step:5521/10000 train_time:244216ms step_avg:44.23ms +[2025-09-11 11:50:40] [Rank 0] step:5541/10000 train_time:244906ms step_avg:44.20ms +[2025-09-11 11:50:40] [Rank 0] step:5541/10000 train_time:244906ms step_avg:44.20ms +[2025-09-11 11:50:40] [Rank 0] step:5561/10000 train_time:245596ms step_avg:44.16ms +[2025-09-11 11:50:40] [Rank 0] step:5561/10000 train_time:245596ms step_avg:44.16ms +[2025-09-11 11:50:41] [Rank 0] step:5581/10000 train_time:246287ms step_avg:44.13ms +[2025-09-11 11:50:41] [Rank 0] step:5581/10000 train_time:246287ms step_avg:44.13ms +[2025-09-11 11:50:42] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:50:42] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:50:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:50:52] [Rank 0] PRINT: step:5600/10000 val_loss:4.7382 total_sharp:1.3720e-03 L1_sharp:3.4403e-02 L2_sharp:3.6982e-02 L3_sharp:4.6524e-02 L4_sharp:6.2006e-02 L5_sharp:8.6592e-02 L6_sharp:1.2466e-01 L7_sharp:1.7680e-01 L8_sharp:2.0145e-01 L9_sharp:2.3180e-01 L10_sharp:3.4724e-01 L11_sharp:3.6987e-01 L12_sharp:9.9750e-01 total_fnorm:1.4250e+01 total_l1_linf:1.8048e+04 total_spectral:7.1250e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1377e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.8320e-02 L4_l1linf:2.8442e-02 L5_l1linf:2.9297e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9297e-02 L11_l1linf:3.0029e-02 L12_l1linf:3.0396e-02 L1_spectral:1.6173e-03 L2_spectral:1.6130e-03 L3_spectral:1.6229e-03 L4_spectral:1.6133e-03 L5_spectral:1.6151e-03 L6_spectral:1.6107e-03 L7_spectral:1.6235e-03 L8_spectral:1.6020e-03 L9_spectral:1.6134e-03 L10_spectral:1.6087e-03 L11_spectral:1.6180e-03 L12_spectral:1.6026e-03 train_time:246956ms step_avg:44.10ms +[2025-09-11 11:50:52] [Rank 0] PRINT: step:5600/10000 val_loss:4.7382 total_sharp:1.3720e-03 L1_sharp:3.4403e-02 L2_sharp:3.6982e-02 L3_sharp:4.6524e-02 L4_sharp:6.2006e-02 L5_sharp:8.6592e-02 L6_sharp:1.2466e-01 L7_sharp:1.7680e-01 L8_sharp:2.0145e-01 L9_sharp:2.3180e-01 L10_sharp:3.4724e-01 L11_sharp:3.6987e-01 L12_sharp:9.9750e-01 total_fnorm:1.4250e+01 total_l1_linf:1.8048e+04 total_spectral:7.1250e+00 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1377e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.8320e-02 L4_l1linf:2.8442e-02 L5_l1linf:2.9297e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9297e-02 L11_l1linf:3.0029e-02 L12_l1linf:3.0396e-02 L1_spectral:1.6173e-03 L2_spectral:1.6130e-03 L3_spectral:1.6229e-03 L4_spectral:1.6133e-03 L5_spectral:1.6151e-03 L6_spectral:1.6107e-03 L7_spectral:1.6235e-03 L8_spectral:1.6020e-03 L9_spectral:1.6134e-03 L10_spectral:1.6087e-03 L11_spectral:1.6180e-03 L12_spectral:1.6026e-03 train_time:246956ms step_avg:44.10ms +[2025-09-11 11:50:54] [Rank 0] step:5601/10000 train_time:248970ms step_avg:44.45ms +[2025-09-11 11:50:54] [Rank 0] step:5601/10000 train_time:248970ms step_avg:44.45ms +[2025-09-11 11:50:55] [Rank 0] step:5621/10000 train_time:249685ms step_avg:44.42ms +[2025-09-11 11:50:55] [Rank 0] step:5621/10000 train_time:249685ms step_avg:44.42ms +[2025-09-11 11:50:56] [Rank 0] step:5641/10000 train_time:250375ms step_avg:44.38ms +[2025-09-11 11:50:56] [Rank 0] step:5641/10000 train_time:250375ms step_avg:44.38ms +[2025-09-11 11:50:56] [Rank 0] step:5661/10000 train_time:251064ms step_avg:44.35ms +[2025-09-11 11:50:56] [Rank 0] step:5661/10000 train_time:251064ms step_avg:44.35ms +[2025-09-11 11:50:57] [Rank 0] step:5681/10000 train_time:251753ms step_avg:44.31ms +[2025-09-11 11:50:57] [Rank 0] step:5681/10000 train_time:251753ms step_avg:44.31ms +[2025-09-11 11:50:58] [Rank 0] step:5701/10000 train_time:252456ms step_avg:44.28ms +[2025-09-11 11:50:58] [Rank 0] step:5701/10000 train_time:252456ms step_avg:44.28ms +[2025-09-11 11:50:58] [Rank 0] step:5721/10000 train_time:253144ms step_avg:44.25ms +[2025-09-11 11:50:58] [Rank 0] step:5721/10000 train_time:253144ms step_avg:44.25ms +[2025-09-11 11:50:59] [Rank 0] step:5741/10000 train_time:253833ms step_avg:44.21ms +[2025-09-11 11:50:59] [Rank 0] step:5741/10000 train_time:253833ms step_avg:44.21ms +[2025-09-11 11:51:00] [Rank 0] step:5761/10000 train_time:254525ms step_avg:44.18ms +[2025-09-11 11:51:00] [Rank 0] step:5761/10000 train_time:254525ms step_avg:44.18ms +[2025-09-11 11:51:00] [Rank 0] step:5781/10000 train_time:255214ms step_avg:44.15ms +[2025-09-11 11:51:00] [Rank 0] step:5781/10000 train_time:255214ms step_avg:44.15ms +[2025-09-11 11:51:01] [Rank 0] step:5801/10000 train_time:255905ms step_avg:44.11ms +[2025-09-11 11:51:01] [Rank 0] step:5801/10000 train_time:255905ms step_avg:44.11ms +[2025-09-11 11:51:02] [Rank 0] step:5821/10000 train_time:256594ms step_avg:44.08ms +[2025-09-11 11:51:02] [Rank 0] step:5821/10000 train_time:256594ms step_avg:44.08ms +[2025-09-11 11:51:03] [Rank 0] step:5841/10000 train_time:257586ms step_avg:44.10ms +[2025-09-11 11:51:03] [Rank 0] step:5841/10000 train_time:257586ms step_avg:44.10ms +[2025-09-11 11:51:04] [Rank 0] step:5861/10000 train_time:258274ms step_avg:44.07ms +[2025-09-11 11:51:04] [Rank 0] step:5861/10000 train_time:258274ms step_avg:44.07ms +[2025-09-11 11:51:04] [Rank 0] step:5881/10000 train_time:258964ms step_avg:44.03ms +[2025-09-11 11:51:04] [Rank 0] step:5881/10000 train_time:258964ms step_avg:44.03ms +[2025-09-11 11:51:05] [Rank 0] step:5901/10000 train_time:259799ms step_avg:44.03ms +[2025-09-11 11:51:05] [Rank 0] step:5901/10000 train_time:259799ms step_avg:44.03ms +[2025-09-11 11:51:06] [Rank 0] step:5921/10000 train_time:260598ms step_avg:44.01ms +[2025-09-11 11:51:06] [Rank 0] step:5921/10000 train_time:260598ms step_avg:44.01ms +[2025-09-11 11:51:07] [Rank 0] step:5941/10000 train_time:261289ms step_avg:43.98ms +[2025-09-11 11:51:07] [Rank 0] step:5941/10000 train_time:261289ms step_avg:43.98ms +[2025-09-11 11:51:07] [Rank 0] step:5961/10000 train_time:261979ms step_avg:43.95ms +[2025-09-11 11:51:07] [Rank 0] step:5961/10000 train_time:261979ms step_avg:43.95ms +[2025-09-11 11:51:08] [Rank 0] step:5981/10000 train_time:262669ms step_avg:43.92ms +[2025-09-11 11:51:08] [Rank 0] step:5981/10000 train_time:262669ms step_avg:43.92ms +[2025-09-11 11:51:09] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:51:09] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:51:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.7005 total_sharp:1.0614e-03 L1_sharp:2.4484e-02 L2_sharp:2.1902e-02 L3_sharp:2.9715e-02 L4_sharp:4.2605e-02 L5_sharp:5.8660e-02 L6_sharp:9.4362e-02 L7_sharp:1.2920e-01 L8_sharp:1.9094e-01 L9_sharp:2.3611e-01 L10_sharp:3.2055e-01 L11_sharp:3.9292e-01 L12_sharp:1.0084e+00 total_fnorm:1.4938e+01 total_l1_linf:1.8688e+04 total_spectral:7.5000e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8198e-02 L2_l1linf:2.7954e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.7954e-02 L5_l1linf:2.8076e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.7954e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.8564e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6127e-03 L2_spectral:1.6321e-03 L3_spectral:1.6150e-03 L4_spectral:1.6163e-03 L5_spectral:1.6052e-03 L6_spectral:1.6000e-03 L7_spectral:1.6047e-03 L8_spectral:1.6092e-03 L9_spectral:1.6097e-03 L10_spectral:1.6054e-03 L11_spectral:1.6153e-03 L12_spectral:1.6040e-03 train_time:263343ms step_avg:43.89ms +[2025-09-11 11:51:20] [Rank 0] PRINT: step:6000/10000 val_loss:4.7005 total_sharp:1.0614e-03 L1_sharp:2.4484e-02 L2_sharp:2.1902e-02 L3_sharp:2.9715e-02 L4_sharp:4.2605e-02 L5_sharp:5.8660e-02 L6_sharp:9.4362e-02 L7_sharp:1.2920e-01 L8_sharp:1.9094e-01 L9_sharp:2.3611e-01 L10_sharp:3.2055e-01 L11_sharp:3.9292e-01 L12_sharp:1.0084e+00 total_fnorm:1.4938e+01 total_l1_linf:1.8688e+04 total_spectral:7.5000e+00 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8198e-02 L2_l1linf:2.7954e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.7954e-02 L5_l1linf:2.8076e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.7954e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.8564e-02 L12_l1linf:3.0151e-02 L1_spectral:1.6127e-03 L2_spectral:1.6321e-03 L3_spectral:1.6150e-03 L4_spectral:1.6163e-03 L5_spectral:1.6052e-03 L6_spectral:1.6000e-03 L7_spectral:1.6047e-03 L8_spectral:1.6092e-03 L9_spectral:1.6097e-03 L10_spectral:1.6054e-03 L11_spectral:1.6153e-03 L12_spectral:1.6040e-03 train_time:263343ms step_avg:43.89ms +[2025-09-11 11:51:22] [Rank 0] step:6001/10000 train_time:265373ms step_avg:44.22ms +[2025-09-11 11:51:22] [Rank 0] step:6001/10000 train_time:265373ms step_avg:44.22ms +[2025-09-11 11:51:23] [Rank 0] step:6021/10000 train_time:266085ms step_avg:44.19ms +[2025-09-11 11:51:23] [Rank 0] step:6021/10000 train_time:266085ms step_avg:44.19ms +[2025-09-11 11:51:23] [Rank 0] step:6041/10000 train_time:266779ms step_avg:44.16ms +[2025-09-11 11:51:23] [Rank 0] step:6041/10000 train_time:266779ms step_avg:44.16ms +[2025-09-11 11:51:24] [Rank 0] step:6061/10000 train_time:267469ms step_avg:44.13ms +[2025-09-11 11:51:24] [Rank 0] step:6061/10000 train_time:267469ms step_avg:44.13ms +[2025-09-11 11:51:25] [Rank 0] step:6081/10000 train_time:268162ms step_avg:44.10ms +[2025-09-11 11:51:25] [Rank 0] step:6081/10000 train_time:268162ms step_avg:44.10ms +[2025-09-11 11:51:25] [Rank 0] step:6101/10000 train_time:268853ms step_avg:44.07ms +[2025-09-11 11:51:25] [Rank 0] step:6101/10000 train_time:268853ms step_avg:44.07ms +[2025-09-11 11:51:26] [Rank 0] step:6121/10000 train_time:269547ms step_avg:44.04ms +[2025-09-11 11:51:26] [Rank 0] step:6121/10000 train_time:269547ms step_avg:44.04ms +[2025-09-11 11:51:27] [Rank 0] step:6141/10000 train_time:270239ms step_avg:44.01ms +[2025-09-11 11:51:27] [Rank 0] step:6141/10000 train_time:270239ms step_avg:44.01ms +[2025-09-11 11:51:27] [Rank 0] step:6161/10000 train_time:270930ms step_avg:43.97ms +[2025-09-11 11:51:27] [Rank 0] step:6161/10000 train_time:270930ms step_avg:43.97ms +[2025-09-11 11:51:28] [Rank 0] step:6181/10000 train_time:271620ms step_avg:43.94ms +[2025-09-11 11:51:28] [Rank 0] step:6181/10000 train_time:271620ms step_avg:43.94ms +[2025-09-11 11:51:29] [Rank 0] step:6201/10000 train_time:272312ms step_avg:43.91ms +[2025-09-11 11:51:29] [Rank 0] step:6201/10000 train_time:272312ms step_avg:43.91ms +[2025-09-11 11:51:29] [Rank 0] step:6221/10000 train_time:273005ms step_avg:43.88ms +[2025-09-11 11:51:29] [Rank 0] step:6221/10000 train_time:273005ms step_avg:43.88ms +[2025-09-11 11:51:30] [Rank 0] step:6241/10000 train_time:273697ms step_avg:43.85ms +[2025-09-11 11:51:30] [Rank 0] step:6241/10000 train_time:273697ms step_avg:43.85ms +[2025-09-11 11:51:31] [Rank 0] step:6261/10000 train_time:274388ms step_avg:43.82ms +[2025-09-11 11:51:31] [Rank 0] step:6261/10000 train_time:274388ms step_avg:43.82ms +[2025-09-11 11:51:32] [Rank 0] step:6281/10000 train_time:275080ms step_avg:43.80ms +[2025-09-11 11:51:32] [Rank 0] step:6281/10000 train_time:275080ms step_avg:43.80ms +[2025-09-11 11:51:32] [Rank 0] step:6301/10000 train_time:275771ms step_avg:43.77ms +[2025-09-11 11:51:32] [Rank 0] step:6301/10000 train_time:275771ms step_avg:43.77ms +[2025-09-11 11:51:33] [Rank 0] step:6321/10000 train_time:276464ms step_avg:43.74ms +[2025-09-11 11:51:33] [Rank 0] step:6321/10000 train_time:276464ms step_avg:43.74ms +[2025-09-11 11:51:34] [Rank 0] step:6341/10000 train_time:277157ms step_avg:43.71ms +[2025-09-11 11:51:34] [Rank 0] step:6341/10000 train_time:277157ms step_avg:43.71ms +[2025-09-11 11:51:34] [Rank 0] step:6361/10000 train_time:277848ms step_avg:43.68ms +[2025-09-11 11:51:34] [Rank 0] step:6361/10000 train_time:277848ms step_avg:43.68ms +[2025-09-11 11:51:35] [Rank 0] step:6381/10000 train_time:278539ms step_avg:43.65ms +[2025-09-11 11:51:35] [Rank 0] step:6381/10000 train_time:278539ms step_avg:43.65ms +[2025-09-11 11:51:36] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:51:36] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:51:47] [Rank 0] PRINT: step:6400/10000 val_loss:4.6662 total_sharp:1.4126e-03 L1_sharp:3.0921e-02 L2_sharp:3.3739e-02 L3_sharp:3.5453e-02 L4_sharp:4.8125e-02 L5_sharp:7.0310e-02 L6_sharp:1.0031e-01 L7_sharp:1.3327e-01 L8_sharp:1.8226e-01 L9_sharp:2.4070e-01 L10_sharp:3.4743e-01 L11_sharp:4.7818e-01 L12_sharp:1.4338e+00 total_fnorm:1.2562e+01 total_l1_linf:1.4976e+04 total_spectral:6.2812e+00 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0010e-01 L5_fnorm:9.9609e-02 L6_fnorm:9.9609e-02 L7_fnorm:1.0010e-01 L8_fnorm:9.9609e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3682e-02 L2_l1linf:2.3804e-02 L3_l1linf:2.3926e-02 L4_l1linf:2.3926e-02 L5_l1linf:2.4048e-02 L6_l1linf:2.3804e-02 L7_l1linf:2.4048e-02 L8_l1linf:2.4048e-02 L9_l1linf:2.4048e-02 L10_l1linf:2.4658e-02 L11_l1linf:2.5269e-02 L12_l1linf:2.5879e-02 L1_spectral:1.4501e-03 L2_spectral:1.4481e-03 L3_spectral:1.4458e-03 L4_spectral:1.4528e-03 L5_spectral:1.4543e-03 L6_spectral:1.4423e-03 L7_spectral:1.4498e-03 L8_spectral:1.4539e-03 L9_spectral:1.4493e-03 L10_spectral:1.4386e-03 L11_spectral:1.4474e-03 L12_spectral:1.4344e-03 train_time:279211ms step_avg:43.63ms +[2025-09-11 11:51:47] [Rank 0] PRINT: step:6400/10000 val_loss:4.6662 total_sharp:1.4126e-03 L1_sharp:3.0921e-02 L2_sharp:3.3739e-02 L3_sharp:3.5453e-02 L4_sharp:4.8125e-02 L5_sharp:7.0310e-02 L6_sharp:1.0031e-01 L7_sharp:1.3327e-01 L8_sharp:1.8226e-01 L9_sharp:2.4070e-01 L10_sharp:3.4743e-01 L11_sharp:4.7818e-01 L12_sharp:1.4338e+00 total_fnorm:1.2562e+01 total_l1_linf:1.4976e+04 total_spectral:6.2812e+00 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0010e-01 L5_fnorm:9.9609e-02 L6_fnorm:9.9609e-02 L7_fnorm:1.0010e-01 L8_fnorm:9.9609e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3682e-02 L2_l1linf:2.3804e-02 L3_l1linf:2.3926e-02 L4_l1linf:2.3926e-02 L5_l1linf:2.4048e-02 L6_l1linf:2.3804e-02 L7_l1linf:2.4048e-02 L8_l1linf:2.4048e-02 L9_l1linf:2.4048e-02 L10_l1linf:2.4658e-02 L11_l1linf:2.5269e-02 L12_l1linf:2.5879e-02 L1_spectral:1.4501e-03 L2_spectral:1.4481e-03 L3_spectral:1.4458e-03 L4_spectral:1.4528e-03 L5_spectral:1.4543e-03 L6_spectral:1.4423e-03 L7_spectral:1.4498e-03 L8_spectral:1.4539e-03 L9_spectral:1.4493e-03 L10_spectral:1.4386e-03 L11_spectral:1.4474e-03 L12_spectral:1.4344e-03 train_time:279211ms step_avg:43.63ms +[2025-09-11 11:51:49] [Rank 0] step:6401/10000 train_time:281283ms step_avg:43.94ms +[2025-09-11 11:51:49] [Rank 0] step:6401/10000 train_time:281283ms step_avg:43.94ms +[2025-09-11 11:51:49] [Rank 0] step:6421/10000 train_time:282006ms step_avg:43.92ms +[2025-09-11 11:51:49] [Rank 0] step:6421/10000 train_time:282006ms step_avg:43.92ms +[2025-09-11 11:51:50] [Rank 0] step:6441/10000 train_time:282698ms step_avg:43.89ms +[2025-09-11 11:51:50] [Rank 0] step:6441/10000 train_time:282698ms step_avg:43.89ms +[2025-09-11 11:51:51] [Rank 0] step:6461/10000 train_time:283391ms step_avg:43.86ms +[2025-09-11 11:51:51] [Rank 0] step:6461/10000 train_time:283391ms step_avg:43.86ms +[2025-09-11 11:51:52] [Rank 0] step:6481/10000 train_time:284084ms step_avg:43.83ms +[2025-09-11 11:51:52] [Rank 0] step:6481/10000 train_time:284084ms step_avg:43.83ms +[2025-09-11 11:51:52] [Rank 0] step:6501/10000 train_time:284779ms step_avg:43.81ms +[2025-09-11 11:51:52] [Rank 0] step:6501/10000 train_time:284779ms step_avg:43.81ms +[2025-09-11 11:51:53] [Rank 0] step:6521/10000 train_time:285472ms step_avg:43.78ms +[2025-09-11 11:51:53] [Rank 0] step:6521/10000 train_time:285472ms step_avg:43.78ms +[2025-09-11 11:51:54] [Rank 0] step:6541/10000 train_time:286162ms step_avg:43.75ms +[2025-09-11 11:51:54] [Rank 0] step:6541/10000 train_time:286162ms step_avg:43.75ms +[2025-09-11 11:51:54] [Rank 0] step:6561/10000 train_time:286853ms step_avg:43.72ms +[2025-09-11 11:51:54] [Rank 0] step:6561/10000 train_time:286853ms step_avg:43.72ms +[2025-09-11 11:51:55] [Rank 0] step:6581/10000 train_time:287545ms step_avg:43.69ms +[2025-09-11 11:51:55] [Rank 0] step:6581/10000 train_time:287545ms step_avg:43.69ms +[2025-09-11 11:51:56] [Rank 0] step:6601/10000 train_time:288237ms step_avg:43.67ms +[2025-09-11 11:51:56] [Rank 0] step:6601/10000 train_time:288237ms step_avg:43.67ms +[2025-09-11 11:51:56] [Rank 0] step:6621/10000 train_time:288927ms step_avg:43.64ms +[2025-09-11 11:51:56] [Rank 0] step:6621/10000 train_time:288927ms step_avg:43.64ms +[2025-09-11 11:51:57] [Rank 0] step:6641/10000 train_time:289621ms step_avg:43.61ms +[2025-09-11 11:51:57] [Rank 0] step:6641/10000 train_time:289621ms step_avg:43.61ms +[2025-09-11 11:51:58] [Rank 0] step:6661/10000 train_time:290314ms step_avg:43.58ms +[2025-09-11 11:51:58] [Rank 0] step:6661/10000 train_time:290314ms step_avg:43.58ms +[2025-09-11 11:51:58] [Rank 0] step:6681/10000 train_time:291013ms step_avg:43.56ms +[2025-09-11 11:51:58] [Rank 0] step:6681/10000 train_time:291013ms step_avg:43.56ms +[2025-09-11 11:51:59] [Rank 0] step:6701/10000 train_time:291712ms step_avg:43.53ms +[2025-09-11 11:51:59] [Rank 0] step:6701/10000 train_time:291712ms step_avg:43.53ms +[2025-09-11 11:52:00] [Rank 0] step:6721/10000 train_time:292412ms step_avg:43.51ms +[2025-09-11 11:52:00] [Rank 0] step:6721/10000 train_time:292412ms step_avg:43.51ms +[2025-09-11 11:52:01] [Rank 0] step:6741/10000 train_time:293111ms step_avg:43.48ms +[2025-09-11 11:52:01] [Rank 0] step:6741/10000 train_time:293111ms step_avg:43.48ms +[2025-09-11 11:52:01] [Rank 0] step:6761/10000 train_time:293809ms step_avg:43.46ms +[2025-09-11 11:52:01] [Rank 0] step:6761/10000 train_time:293809ms step_avg:43.46ms +[2025-09-11 11:52:02] [Rank 0] step:6781/10000 train_time:294508ms step_avg:43.43ms +[2025-09-11 11:52:02] [Rank 0] step:6781/10000 train_time:294508ms step_avg:43.43ms +[2025-09-11 11:52:03] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:52:03] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:52:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.6356 total_sharp:9.1509e-04 L1_sharp:2.6643e-02 L2_sharp:3.1221e-02 L3_sharp:3.9703e-02 L4_sharp:5.5849e-02 L5_sharp:7.3358e-02 L6_sharp:1.1711e-01 L7_sharp:1.6271e-01 L8_sharp:1.8286e-01 L9_sharp:2.1439e-01 L10_sharp:3.1775e-01 L11_sharp:3.6846e-01 L12_sharp:8.5547e-01 total_fnorm:1.2000e+01 total_l1_linf:1.4080e+04 total_spectral:6.0000e+00 L1_fnorm:8.5938e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.5449e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9165e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9775e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9653e-02 L6_l1linf:2.0020e-02 L7_l1linf:2.0142e-02 L8_l1linf:1.9897e-02 L9_l1linf:1.9653e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0020e-02 L12_l1linf:2.1240e-02 L1_spectral:1.2979e-03 L2_spectral:1.3013e-03 L3_spectral:1.2960e-03 L4_spectral:1.2939e-03 L5_spectral:1.2969e-03 L6_spectral:1.2979e-03 L7_spectral:1.2954e-03 L8_spectral:1.2927e-03 L9_spectral:1.3030e-03 L10_spectral:1.2956e-03 L11_spectral:1.2656e-03 L12_spectral:1.2548e-03 train_time:295186ms step_avg:43.41ms +[2025-09-11 11:52:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.6356 total_sharp:9.1509e-04 L1_sharp:2.6643e-02 L2_sharp:3.1221e-02 L3_sharp:3.9703e-02 L4_sharp:5.5849e-02 L5_sharp:7.3358e-02 L6_sharp:1.1711e-01 L7_sharp:1.6271e-01 L8_sharp:1.8286e-01 L9_sharp:2.1439e-01 L10_sharp:3.1775e-01 L11_sharp:3.6846e-01 L12_sharp:8.5547e-01 total_fnorm:1.2000e+01 total_l1_linf:1.4080e+04 total_spectral:6.0000e+00 L1_fnorm:8.5938e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.5449e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9165e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9775e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9653e-02 L6_l1linf:2.0020e-02 L7_l1linf:2.0142e-02 L8_l1linf:1.9897e-02 L9_l1linf:1.9653e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0020e-02 L12_l1linf:2.1240e-02 L1_spectral:1.2979e-03 L2_spectral:1.3013e-03 L3_spectral:1.2960e-03 L4_spectral:1.2939e-03 L5_spectral:1.2969e-03 L6_spectral:1.2979e-03 L7_spectral:1.2954e-03 L8_spectral:1.2927e-03 L9_spectral:1.3030e-03 L10_spectral:1.2956e-03 L11_spectral:1.2656e-03 L12_spectral:1.2548e-03 train_time:295186ms step_avg:43.41ms +[2025-09-11 11:52:19] [Rank 0] step:6801/10000 train_time:297128ms step_avg:43.69ms +[2025-09-11 11:52:19] [Rank 0] step:6801/10000 train_time:297128ms step_avg:43.69ms +[2025-09-11 11:52:20] [Rank 0] step:6821/10000 train_time:297838ms step_avg:43.66ms +[2025-09-11 11:52:20] [Rank 0] step:6821/10000 train_time:297838ms step_avg:43.66ms +[2025-09-11 11:52:20] [Rank 0] step:6841/10000 train_time:298541ms step_avg:43.64ms +[2025-09-11 11:52:20] [Rank 0] step:6841/10000 train_time:298541ms step_avg:43.64ms +[2025-09-11 11:52:21] [Rank 0] step:6861/10000 train_time:299243ms step_avg:43.62ms +[2025-09-11 11:52:21] [Rank 0] step:6861/10000 train_time:299243ms step_avg:43.62ms +[2025-09-11 11:52:22] [Rank 0] step:6881/10000 train_time:299945ms step_avg:43.59ms +[2025-09-11 11:52:22] [Rank 0] step:6881/10000 train_time:299945ms step_avg:43.59ms +[2025-09-11 11:52:22] [Rank 0] step:6901/10000 train_time:300643ms step_avg:43.57ms +[2025-09-11 11:52:22] [Rank 0] step:6901/10000 train_time:300643ms step_avg:43.57ms +[2025-09-11 11:52:23] [Rank 0] step:6921/10000 train_time:301342ms step_avg:43.54ms +[2025-09-11 11:52:23] [Rank 0] step:6921/10000 train_time:301342ms step_avg:43.54ms +[2025-09-11 11:52:24] [Rank 0] step:6941/10000 train_time:302042ms step_avg:43.52ms +[2025-09-11 11:52:24] [Rank 0] step:6941/10000 train_time:302042ms step_avg:43.52ms +[2025-09-11 11:52:24] [Rank 0] step:6961/10000 train_time:302742ms step_avg:43.49ms +[2025-09-11 11:52:24] [Rank 0] step:6961/10000 train_time:302742ms step_avg:43.49ms +[2025-09-11 11:52:25] [Rank 0] step:6981/10000 train_time:303443ms step_avg:43.47ms +[2025-09-11 11:52:25] [Rank 0] step:6981/10000 train_time:303443ms step_avg:43.47ms +[2025-09-11 11:52:26] [Rank 0] step:7001/10000 train_time:304143ms step_avg:43.44ms +[2025-09-11 11:52:26] [Rank 0] step:7001/10000 train_time:304143ms step_avg:43.44ms +[2025-09-11 11:52:27] [Rank 0] step:7021/10000 train_time:304842ms step_avg:43.42ms +[2025-09-11 11:52:27] [Rank 0] step:7021/10000 train_time:304842ms step_avg:43.42ms +[2025-09-11 11:52:27] [Rank 0] step:7041/10000 train_time:305541ms step_avg:43.39ms +[2025-09-11 11:52:27] [Rank 0] step:7041/10000 train_time:305541ms step_avg:43.39ms +[2025-09-11 11:52:28] [Rank 0] step:7061/10000 train_time:306241ms step_avg:43.37ms +[2025-09-11 11:52:28] [Rank 0] step:7061/10000 train_time:306241ms step_avg:43.37ms +[2025-09-11 11:52:29] [Rank 0] step:7081/10000 train_time:306940ms step_avg:43.35ms +[2025-09-11 11:52:29] [Rank 0] step:7081/10000 train_time:306940ms step_avg:43.35ms +[2025-09-11 11:52:29] [Rank 0] step:7101/10000 train_time:307640ms step_avg:43.32ms +[2025-09-11 11:52:29] [Rank 0] step:7101/10000 train_time:307640ms step_avg:43.32ms +[2025-09-11 11:52:30] [Rank 0] step:7121/10000 train_time:308341ms step_avg:43.30ms +[2025-09-11 11:52:30] [Rank 0] step:7121/10000 train_time:308341ms step_avg:43.30ms +[2025-09-11 11:52:31] [Rank 0] step:7141/10000 train_time:309041ms step_avg:43.28ms +[2025-09-11 11:52:31] [Rank 0] step:7141/10000 train_time:309041ms step_avg:43.28ms +[2025-09-11 11:52:31] [Rank 0] step:7161/10000 train_time:309742ms step_avg:43.25ms +[2025-09-11 11:52:31] [Rank 0] step:7161/10000 train_time:309742ms step_avg:43.25ms +[2025-09-11 11:52:32] [Rank 0] step:7181/10000 train_time:310441ms step_avg:43.23ms +[2025-09-11 11:52:32] [Rank 0] step:7181/10000 train_time:310441ms step_avg:43.23ms +[2025-09-11 11:52:33] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:52:33] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:52:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:52:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:52:44] [Rank 0] PRINT: step:7200/10000 val_loss:4.6061 total_sharp:9.8331e-04 L1_sharp:2.7466e-02 L2_sharp:3.3300e-02 L3_sharp:4.5432e-02 L4_sharp:5.7839e-02 L5_sharp:8.1048e-02 L6_sharp:1.1206e-01 L7_sharp:1.4449e-01 L8_sharp:1.6512e-01 L9_sharp:2.3214e-01 L10_sharp:3.1871e-01 L11_sharp:3.7597e-01 L12_sharp:1.0997e+00 total_fnorm:9.9375e+00 total_l1_linf:1.0752e+04 total_spectral:4.9688e+00 L1_fnorm:7.2754e-02 L2_fnorm:7.2754e-02 L3_fnorm:7.2754e-02 L4_fnorm:7.2754e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.2266e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.1777e-02 L12_fnorm:7.1289e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.6235e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6235e-02 L7_l1linf:1.6602e-02 L8_l1linf:1.5991e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.7212e-02 L1_spectral:1.1514e-03 L2_spectral:1.1609e-03 L3_spectral:1.1472e-03 L4_spectral:1.1466e-03 L5_spectral:1.1485e-03 L6_spectral:1.1456e-03 L7_spectral:1.1486e-03 L8_spectral:1.1311e-03 L9_spectral:1.1358e-03 L10_spectral:1.1256e-03 L11_spectral:1.1166e-03 L12_spectral:1.0869e-03 train_time:311121ms step_avg:43.21ms +[2025-09-11 11:52:44] [Rank 0] PRINT: step:7200/10000 val_loss:4.6061 total_sharp:9.8331e-04 L1_sharp:2.7466e-02 L2_sharp:3.3300e-02 L3_sharp:4.5432e-02 L4_sharp:5.7839e-02 L5_sharp:8.1048e-02 L6_sharp:1.1206e-01 L7_sharp:1.4449e-01 L8_sharp:1.6512e-01 L9_sharp:2.3214e-01 L10_sharp:3.1871e-01 L11_sharp:3.7597e-01 L12_sharp:1.0997e+00 total_fnorm:9.9375e+00 total_l1_linf:1.0752e+04 total_spectral:4.9688e+00 L1_fnorm:7.2754e-02 L2_fnorm:7.2754e-02 L3_fnorm:7.2754e-02 L4_fnorm:7.2754e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.2266e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.1777e-02 L12_fnorm:7.1289e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.6235e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6235e-02 L7_l1linf:1.6602e-02 L8_l1linf:1.5991e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.7212e-02 L1_spectral:1.1514e-03 L2_spectral:1.1609e-03 L3_spectral:1.1472e-03 L4_spectral:1.1466e-03 L5_spectral:1.1485e-03 L6_spectral:1.1456e-03 L7_spectral:1.1486e-03 L8_spectral:1.1311e-03 L9_spectral:1.1358e-03 L10_spectral:1.1256e-03 L11_spectral:1.1166e-03 L12_spectral:1.0869e-03 train_time:311121ms step_avg:43.21ms +[2025-09-11 11:52:46] [Rank 0] step:7201/10000 train_time:313029ms step_avg:43.47ms +[2025-09-11 11:52:46] [Rank 0] step:7201/10000 train_time:313029ms step_avg:43.47ms +[2025-09-11 11:52:47] [Rank 0] step:7221/10000 train_time:313758ms step_avg:43.45ms +[2025-09-11 11:52:47] [Rank 0] step:7221/10000 train_time:313758ms step_avg:43.45ms +[2025-09-11 11:52:48] [Rank 0] step:7241/10000 train_time:314459ms step_avg:43.43ms +[2025-09-11 11:52:48] [Rank 0] step:7241/10000 train_time:314459ms step_avg:43.43ms +[2025-09-11 11:52:48] [Rank 0] step:7261/10000 train_time:315164ms step_avg:43.41ms +[2025-09-11 11:52:48] [Rank 0] step:7261/10000 train_time:315164ms step_avg:43.41ms +[2025-09-11 11:52:49] [Rank 0] step:7281/10000 train_time:315873ms step_avg:43.38ms +[2025-09-11 11:52:49] [Rank 0] step:7281/10000 train_time:315873ms step_avg:43.38ms +[2025-09-11 11:52:50] [Rank 0] step:7301/10000 train_time:316575ms step_avg:43.36ms +[2025-09-11 11:52:50] [Rank 0] step:7301/10000 train_time:316575ms step_avg:43.36ms +[2025-09-11 11:52:50] [Rank 0] step:7321/10000 train_time:317279ms step_avg:43.34ms +[2025-09-11 11:52:50] [Rank 0] step:7321/10000 train_time:317279ms step_avg:43.34ms +[2025-09-11 11:52:51] [Rank 0] step:7341/10000 train_time:317985ms step_avg:43.32ms +[2025-09-11 11:52:51] [Rank 0] step:7341/10000 train_time:317985ms step_avg:43.32ms +[2025-09-11 11:52:52] [Rank 0] step:7361/10000 train_time:318688ms step_avg:43.29ms +[2025-09-11 11:52:52] [Rank 0] step:7361/10000 train_time:318688ms step_avg:43.29ms +[2025-09-11 11:52:52] [Rank 0] step:7381/10000 train_time:319392ms step_avg:43.27ms +[2025-09-11 11:52:52] [Rank 0] step:7381/10000 train_time:319392ms step_avg:43.27ms +[2025-09-11 11:52:53] [Rank 0] step:7401/10000 train_time:320091ms step_avg:43.25ms +[2025-09-11 11:52:53] [Rank 0] step:7401/10000 train_time:320091ms step_avg:43.25ms +[2025-09-11 11:52:54] [Rank 0] step:7421/10000 train_time:320792ms step_avg:43.23ms +[2025-09-11 11:52:54] [Rank 0] step:7421/10000 train_time:320792ms step_avg:43.23ms +[2025-09-11 11:52:55] [Rank 0] step:7441/10000 train_time:321494ms step_avg:43.21ms +[2025-09-11 11:52:55] [Rank 0] step:7441/10000 train_time:321494ms step_avg:43.21ms +[2025-09-11 11:52:55] [Rank 0] step:7461/10000 train_time:322195ms step_avg:43.18ms +[2025-09-11 11:52:55] [Rank 0] step:7461/10000 train_time:322195ms step_avg:43.18ms +[2025-09-11 11:52:56] [Rank 0] step:7481/10000 train_time:322898ms step_avg:43.16ms +[2025-09-11 11:52:56] [Rank 0] step:7481/10000 train_time:322898ms step_avg:43.16ms +[2025-09-11 11:52:57] [Rank 0] step:7501/10000 train_time:323599ms step_avg:43.14ms +[2025-09-11 11:52:57] [Rank 0] step:7501/10000 train_time:323599ms step_avg:43.14ms +[2025-09-11 11:52:57] [Rank 0] step:7521/10000 train_time:324302ms step_avg:43.12ms +[2025-09-11 11:52:57] [Rank 0] step:7521/10000 train_time:324302ms step_avg:43.12ms +[2025-09-11 11:52:58] [Rank 0] step:7541/10000 train_time:325001ms step_avg:43.10ms +[2025-09-11 11:52:58] [Rank 0] step:7541/10000 train_time:325001ms step_avg:43.10ms +[2025-09-11 11:52:59] [Rank 0] step:7561/10000 train_time:325705ms step_avg:43.08ms +[2025-09-11 11:52:59] [Rank 0] step:7561/10000 train_time:325705ms step_avg:43.08ms +[2025-09-11 11:53:00] [Rank 0] step:7581/10000 train_time:326408ms step_avg:43.06ms +[2025-09-11 11:53:00] [Rank 0] step:7581/10000 train_time:326408ms step_avg:43.06ms +[2025-09-11 11:53:00] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:53:00] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:53:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:53:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:53:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:53:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:53:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:53:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:53:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:53:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:53:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:53:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:53:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:53:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:53:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:53:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:53:15] [Rank 0] PRINT: step:7600/10000 val_loss:4.5839 total_sharp:9.1184e-04 L1_sharp:1.9740e-02 L2_sharp:2.4891e-02 L3_sharp:2.4943e-02 L4_sharp:4.5194e-02 L5_sharp:6.1968e-02 L6_sharp:8.4226e-02 L7_sharp:1.2321e-01 L8_sharp:1.5335e-01 L9_sharp:2.2072e-01 L10_sharp:2.8068e-01 L11_sharp:3.5365e-01 L12_sharp:9.2014e-01 total_fnorm:7.8125e+00 total_l1_linf:7.6800e+03 total_spectral:3.9062e+00 L1_fnorm:6.0059e-02 L2_fnorm:5.9814e-02 L3_fnorm:6.0059e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9570e-02 L6_fnorm:5.9814e-02 L7_fnorm:5.9814e-02 L8_fnorm:5.9326e-02 L9_fnorm:5.9814e-02 L10_fnorm:5.9326e-02 L11_fnorm:5.9082e-02 L12_fnorm:5.8105e-02 L1_l1linf:1.2451e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2451e-02 L6_l1linf:1.2390e-02 L7_l1linf:1.2634e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.2817e-02 L10_l1linf:1.2878e-02 L11_l1linf:1.2756e-02 L12_l1linf:1.3184e-02 L1_spectral:9.9899e-04 L2_spectral:9.9856e-04 L3_spectral:1.0071e-03 L4_spectral:9.8160e-04 L5_spectral:9.8289e-04 L6_spectral:9.9169e-04 L7_spectral:9.8485e-04 L8_spectral:9.6880e-04 L9_spectral:9.7910e-04 L10_spectral:9.6001e-04 L11_spectral:9.5330e-04 L12_spectral:9.0130e-04 train_time:327090ms step_avg:43.04ms +[2025-09-11 11:53:15] [Rank 0] PRINT: step:7600/10000 val_loss:4.5839 total_sharp:9.1184e-04 L1_sharp:1.9740e-02 L2_sharp:2.4891e-02 L3_sharp:2.4943e-02 L4_sharp:4.5194e-02 L5_sharp:6.1968e-02 L6_sharp:8.4226e-02 L7_sharp:1.2321e-01 L8_sharp:1.5335e-01 L9_sharp:2.2072e-01 L10_sharp:2.8068e-01 L11_sharp:3.5365e-01 L12_sharp:9.2014e-01 total_fnorm:7.8125e+00 total_l1_linf:7.6800e+03 total_spectral:3.9062e+00 L1_fnorm:6.0059e-02 L2_fnorm:5.9814e-02 L3_fnorm:6.0059e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9570e-02 L6_fnorm:5.9814e-02 L7_fnorm:5.9814e-02 L8_fnorm:5.9326e-02 L9_fnorm:5.9814e-02 L10_fnorm:5.9326e-02 L11_fnorm:5.9082e-02 L12_fnorm:5.8105e-02 L1_l1linf:1.2451e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2573e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2451e-02 L6_l1linf:1.2390e-02 L7_l1linf:1.2634e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.2817e-02 L10_l1linf:1.2878e-02 L11_l1linf:1.2756e-02 L12_l1linf:1.3184e-02 L1_spectral:9.9899e-04 L2_spectral:9.9856e-04 L3_spectral:1.0071e-03 L4_spectral:9.8160e-04 L5_spectral:9.8289e-04 L6_spectral:9.9169e-04 L7_spectral:9.8485e-04 L8_spectral:9.6880e-04 L9_spectral:9.7910e-04 L10_spectral:9.6001e-04 L11_spectral:9.5330e-04 L12_spectral:9.0130e-04 train_time:327090ms step_avg:43.04ms +[2025-09-11 11:53:17] [Rank 0] step:7601/10000 train_time:329189ms step_avg:43.31ms +[2025-09-11 11:53:17] [Rank 0] step:7601/10000 train_time:329189ms step_avg:43.31ms +[2025-09-11 11:53:18] [Rank 0] step:7621/10000 train_time:329922ms step_avg:43.29ms +[2025-09-11 11:53:18] [Rank 0] step:7621/10000 train_time:329922ms step_avg:43.29ms +[2025-09-11 11:53:19] [Rank 0] step:7641/10000 train_time:330626ms step_avg:43.27ms +[2025-09-11 11:53:19] [Rank 0] step:7641/10000 train_time:330626ms step_avg:43.27ms +[2025-09-11 11:53:19] [Rank 0] step:7661/10000 train_time:331327ms step_avg:43.25ms +[2025-09-11 11:53:19] [Rank 0] step:7661/10000 train_time:331327ms step_avg:43.25ms +[2025-09-11 11:53:20] [Rank 0] step:7681/10000 train_time:332029ms step_avg:43.23ms +[2025-09-11 11:53:20] [Rank 0] step:7681/10000 train_time:332029ms step_avg:43.23ms +[2025-09-11 11:53:21] [Rank 0] step:7701/10000 train_time:332731ms step_avg:43.21ms +[2025-09-11 11:53:21] [Rank 0] step:7701/10000 train_time:332731ms step_avg:43.21ms +[2025-09-11 11:53:21] [Rank 0] step:7721/10000 train_time:333434ms step_avg:43.19ms +[2025-09-11 11:53:21] [Rank 0] step:7721/10000 train_time:333434ms step_avg:43.19ms +[2025-09-11 11:53:22] [Rank 0] step:7741/10000 train_time:334137ms step_avg:43.16ms +[2025-09-11 11:53:22] [Rank 0] step:7741/10000 train_time:334137ms step_avg:43.16ms +[2025-09-11 11:53:23] [Rank 0] step:7761/10000 train_time:334838ms step_avg:43.14ms +[2025-09-11 11:53:23] [Rank 0] step:7761/10000 train_time:334838ms step_avg:43.14ms +[2025-09-11 11:53:24] [Rank 0] step:7781/10000 train_time:335541ms step_avg:43.12ms +[2025-09-11 11:53:24] [Rank 0] step:7781/10000 train_time:335541ms step_avg:43.12ms +[2025-09-11 11:53:24] [Rank 0] step:7801/10000 train_time:336242ms step_avg:43.10ms +[2025-09-11 11:53:24] [Rank 0] step:7801/10000 train_time:336242ms step_avg:43.10ms +[2025-09-11 11:53:25] [Rank 0] step:7821/10000 train_time:336943ms step_avg:43.08ms +[2025-09-11 11:53:25] [Rank 0] step:7821/10000 train_time:336943ms step_avg:43.08ms +[2025-09-11 11:53:26] [Rank 0] step:7841/10000 train_time:337646ms step_avg:43.06ms +[2025-09-11 11:53:26] [Rank 0] step:7841/10000 train_time:337646ms step_avg:43.06ms +[2025-09-11 11:53:26] [Rank 0] step:7861/10000 train_time:338351ms step_avg:43.04ms +[2025-09-11 11:53:26] [Rank 0] step:7861/10000 train_time:338351ms step_avg:43.04ms +[2025-09-11 11:53:27] [Rank 0] step:7881/10000 train_time:339053ms step_avg:43.02ms +[2025-09-11 11:53:27] [Rank 0] step:7881/10000 train_time:339053ms step_avg:43.02ms +[2025-09-11 11:53:28] [Rank 0] step:7901/10000 train_time:339757ms step_avg:43.00ms +[2025-09-11 11:53:28] [Rank 0] step:7901/10000 train_time:339757ms step_avg:43.00ms +[2025-09-11 11:53:29] [Rank 0] step:7921/10000 train_time:340460ms step_avg:42.98ms +[2025-09-11 11:53:29] [Rank 0] step:7921/10000 train_time:340460ms step_avg:42.98ms +[2025-09-11 11:53:29] [Rank 0] step:7941/10000 train_time:341165ms step_avg:42.96ms +[2025-09-11 11:53:29] [Rank 0] step:7941/10000 train_time:341165ms step_avg:42.96ms +[2025-09-11 11:53:30] [Rank 0] step:7961/10000 train_time:341865ms step_avg:42.94ms +[2025-09-11 11:53:30] [Rank 0] step:7961/10000 train_time:341865ms step_avg:42.94ms +[2025-09-11 11:53:31] [Rank 0] step:7981/10000 train_time:342570ms step_avg:42.92ms +[2025-09-11 11:53:31] [Rank 0] step:7981/10000 train_time:342570ms step_avg:42.92ms +[2025-09-11 11:53:31] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:53:31] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:53:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:53:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:53:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:53:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:53:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:53:43] [Rank 0] PRINT: step:8000/10000 val_loss:4.5710 total_sharp:9.6566e-04 L1_sharp:2.7679e-02 L2_sharp:2.8492e-02 L3_sharp:3.4184e-02 L4_sharp:4.9196e-02 L5_sharp:7.2348e-02 L6_sharp:1.0285e-01 L7_sharp:1.3542e-01 L8_sharp:1.7208e-01 L9_sharp:2.1430e-01 L10_sharp:3.2420e-01 L11_sharp:3.9759e-01 L12_sharp:2.2425e+00 total_fnorm:6.4688e+00 total_l1_linf:5.9520e+03 total_spectral:3.2344e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.6143e-02 L1_l1linf:9.3384e-03 L2_l1linf:9.2773e-03 L3_l1linf:9.5825e-03 L4_l1linf:9.7046e-03 L5_l1linf:9.4604e-03 L6_l1linf:9.8267e-03 L7_l1linf:9.6436e-03 L8_l1linf:9.7046e-03 L9_l1linf:9.7656e-03 L10_l1linf:9.9487e-03 L11_l1linf:1.0010e-02 L12_l1linf:1.0010e-02 L1_spectral:8.2990e-04 L2_spectral:8.3151e-04 L3_spectral:8.3521e-04 L4_spectral:8.2659e-04 L5_spectral:8.2025e-04 L6_spectral:8.2436e-04 L7_spectral:8.1727e-04 L8_spectral:8.1443e-04 L9_spectral:8.1662e-04 L10_spectral:7.9397e-04 L11_spectral:7.9064e-04 L12_spectral:7.3810e-04 train_time:343250ms step_avg:42.91ms +[2025-09-11 11:53:43] [Rank 0] PRINT: step:8000/10000 val_loss:4.5710 total_sharp:9.6566e-04 L1_sharp:2.7679e-02 L2_sharp:2.8492e-02 L3_sharp:3.4184e-02 L4_sharp:4.9196e-02 L5_sharp:7.2348e-02 L6_sharp:1.0285e-01 L7_sharp:1.3542e-01 L8_sharp:1.7208e-01 L9_sharp:2.1430e-01 L10_sharp:3.2420e-01 L11_sharp:3.9759e-01 L12_sharp:2.2425e+00 total_fnorm:6.4688e+00 total_l1_linf:5.9520e+03 total_spectral:3.2344e+00 L1_fnorm:4.8096e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.6143e-02 L1_l1linf:9.3384e-03 L2_l1linf:9.2773e-03 L3_l1linf:9.5825e-03 L4_l1linf:9.7046e-03 L5_l1linf:9.4604e-03 L6_l1linf:9.8267e-03 L7_l1linf:9.6436e-03 L8_l1linf:9.7046e-03 L9_l1linf:9.7656e-03 L10_l1linf:9.9487e-03 L11_l1linf:1.0010e-02 L12_l1linf:1.0010e-02 L1_spectral:8.2990e-04 L2_spectral:8.3151e-04 L3_spectral:8.3521e-04 L4_spectral:8.2659e-04 L5_spectral:8.2025e-04 L6_spectral:8.2436e-04 L7_spectral:8.1727e-04 L8_spectral:8.1443e-04 L9_spectral:8.1662e-04 L10_spectral:7.9397e-04 L11_spectral:7.9064e-04 L12_spectral:7.3810e-04 train_time:343250ms step_avg:42.91ms +[2025-09-11 11:53:45] [Rank 0] step:8001/10000 train_time:345405ms step_avg:43.17ms +[2025-09-11 11:53:45] [Rank 0] step:8001/10000 train_time:345405ms step_avg:43.17ms +[2025-09-11 11:53:46] [Rank 0] step:8021/10000 train_time:346129ms step_avg:43.15ms +[2025-09-11 11:53:46] [Rank 0] step:8021/10000 train_time:346129ms step_avg:43.15ms +[2025-09-11 11:53:46] [Rank 0] step:8041/10000 train_time:346832ms step_avg:43.13ms +[2025-09-11 11:53:46] [Rank 0] step:8041/10000 train_time:346832ms step_avg:43.13ms +[2025-09-11 11:53:47] [Rank 0] step:8061/10000 train_time:347537ms step_avg:43.11ms +[2025-09-11 11:53:47] [Rank 0] step:8061/10000 train_time:347537ms step_avg:43.11ms +[2025-09-11 11:53:48] [Rank 0] step:8081/10000 train_time:348237ms step_avg:43.09ms +[2025-09-11 11:53:48] [Rank 0] step:8081/10000 train_time:348237ms step_avg:43.09ms +[2025-09-11 11:53:49] [Rank 0] step:8101/10000 train_time:348938ms step_avg:43.07ms +[2025-09-11 11:53:49] [Rank 0] step:8101/10000 train_time:348938ms step_avg:43.07ms +[2025-09-11 11:53:49] [Rank 0] step:8121/10000 train_time:349644ms step_avg:43.05ms +[2025-09-11 11:53:49] [Rank 0] step:8121/10000 train_time:349644ms step_avg:43.05ms +[2025-09-11 11:53:51] [Rank 0] step:8141/10000 train_time:351100ms step_avg:43.13ms +[2025-09-11 11:53:51] [Rank 0] step:8141/10000 train_time:351100ms step_avg:43.13ms +[2025-09-11 11:53:51] [Rank 0] step:8161/10000 train_time:351806ms step_avg:43.11ms +[2025-09-11 11:53:51] [Rank 0] step:8161/10000 train_time:351806ms step_avg:43.11ms +[2025-09-11 11:53:52] [Rank 0] step:8181/10000 train_time:352519ms step_avg:43.09ms +[2025-09-11 11:53:52] [Rank 0] step:8181/10000 train_time:352519ms step_avg:43.09ms +[2025-09-11 11:53:53] [Rank 0] step:8201/10000 train_time:353230ms step_avg:43.07ms +[2025-09-11 11:53:53] [Rank 0] step:8201/10000 train_time:353230ms step_avg:43.07ms +[2025-09-11 11:53:54] [Rank 0] step:8221/10000 train_time:353938ms step_avg:43.05ms +[2025-09-11 11:53:54] [Rank 0] step:8221/10000 train_time:353938ms step_avg:43.05ms +[2025-09-11 11:53:54] [Rank 0] step:8241/10000 train_time:354655ms step_avg:43.04ms +[2025-09-11 11:53:54] [Rank 0] step:8241/10000 train_time:354655ms step_avg:43.04ms +[2025-09-11 11:53:55] [Rank 0] step:8261/10000 train_time:355363ms step_avg:43.02ms +[2025-09-11 11:53:55] [Rank 0] step:8261/10000 train_time:355363ms step_avg:43.02ms +[2025-09-11 11:53:56] [Rank 0] step:8281/10000 train_time:356069ms step_avg:43.00ms +[2025-09-11 11:53:56] [Rank 0] step:8281/10000 train_time:356069ms step_avg:43.00ms +[2025-09-11 11:53:56] [Rank 0] step:8301/10000 train_time:356778ms step_avg:42.98ms +[2025-09-11 11:53:56] [Rank 0] step:8301/10000 train_time:356778ms step_avg:42.98ms +[2025-09-11 11:53:57] [Rank 0] step:8321/10000 train_time:357486ms step_avg:42.96ms +[2025-09-11 11:53:57] [Rank 0] step:8321/10000 train_time:357486ms step_avg:42.96ms +[2025-09-11 11:53:58] [Rank 0] step:8341/10000 train_time:358201ms step_avg:42.94ms +[2025-09-11 11:53:58] [Rank 0] step:8341/10000 train_time:358201ms step_avg:42.94ms +[2025-09-11 11:53:59] [Rank 0] step:8361/10000 train_time:358905ms step_avg:42.93ms +[2025-09-11 11:53:59] [Rank 0] step:8361/10000 train_time:358905ms step_avg:42.93ms +[2025-09-11 11:53:59] [Rank 0] step:8381/10000 train_time:359615ms step_avg:42.91ms +[2025-09-11 11:53:59] [Rank 0] step:8381/10000 train_time:359615ms step_avg:42.91ms +[2025-09-11 11:54:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:54:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:54:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:54:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:54:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:54:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:54:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:54:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:54:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:54:14] [Rank 0] PRINT: step:8400/10000 val_loss:4.5556 total_sharp:9.9599e-04 L1_sharp:1.8541e-02 L2_sharp:2.0982e-02 L3_sharp:2.7053e-02 L4_sharp:4.4871e-02 L5_sharp:6.0938e-02 L6_sharp:8.5152e-02 L7_sharp:1.2940e-01 L8_sharp:1.7159e-01 L9_sharp:2.3188e-01 L10_sharp:3.7384e-01 L11_sharp:3.3137e-01 L12_sharp:7.8997e-01 total_fnorm:4.5312e+00 total_l1_linf:3.6960e+03 total_spectral:2.2656e+00 L1_fnorm:3.7354e-02 L2_fnorm:3.7354e-02 L3_fnorm:3.7354e-02 L4_fnorm:3.7354e-02 L5_fnorm:3.7109e-02 L6_fnorm:3.7354e-02 L7_fnorm:3.7354e-02 L8_fnorm:3.7109e-02 L9_fnorm:3.7354e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.6621e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.8970e-03 L2_l1linf:6.7444e-03 L3_l1linf:6.7444e-03 L4_l1linf:7.0496e-03 L5_l1linf:6.8359e-03 L6_l1linf:6.9275e-03 L7_l1linf:7.0801e-03 L8_l1linf:7.0496e-03 L9_l1linf:6.9275e-03 L10_l1linf:7.2021e-03 L11_l1linf:7.0190e-03 L12_l1linf:7.0801e-03 L1_spectral:6.7003e-04 L2_spectral:6.6384e-04 L3_spectral:6.6317e-04 L4_spectral:6.6152e-04 L5_spectral:6.5818e-04 L6_spectral:6.6054e-04 L7_spectral:6.5849e-04 L8_spectral:6.4523e-04 L9_spectral:6.5475e-04 L10_spectral:6.3245e-04 L11_spectral:6.2423e-04 L12_spectral:5.8263e-04 train_time:360307ms step_avg:42.89ms +[2025-09-11 11:54:14] [Rank 0] PRINT: step:8400/10000 val_loss:4.5556 total_sharp:9.9599e-04 L1_sharp:1.8541e-02 L2_sharp:2.0982e-02 L3_sharp:2.7053e-02 L4_sharp:4.4871e-02 L5_sharp:6.0938e-02 L6_sharp:8.5152e-02 L7_sharp:1.2940e-01 L8_sharp:1.7159e-01 L9_sharp:2.3188e-01 L10_sharp:3.7384e-01 L11_sharp:3.3137e-01 L12_sharp:7.8997e-01 total_fnorm:4.5312e+00 total_l1_linf:3.6960e+03 total_spectral:2.2656e+00 L1_fnorm:3.7354e-02 L2_fnorm:3.7354e-02 L3_fnorm:3.7354e-02 L4_fnorm:3.7354e-02 L5_fnorm:3.7109e-02 L6_fnorm:3.7354e-02 L7_fnorm:3.7354e-02 L8_fnorm:3.7109e-02 L9_fnorm:3.7354e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.6621e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.8970e-03 L2_l1linf:6.7444e-03 L3_l1linf:6.7444e-03 L4_l1linf:7.0496e-03 L5_l1linf:6.8359e-03 L6_l1linf:6.9275e-03 L7_l1linf:7.0801e-03 L8_l1linf:7.0496e-03 L9_l1linf:6.9275e-03 L10_l1linf:7.2021e-03 L11_l1linf:7.0190e-03 L12_l1linf:7.0801e-03 L1_spectral:6.7003e-04 L2_spectral:6.6384e-04 L3_spectral:6.6317e-04 L4_spectral:6.6152e-04 L5_spectral:6.5818e-04 L6_spectral:6.6054e-04 L7_spectral:6.5849e-04 L8_spectral:6.4523e-04 L9_spectral:6.5475e-04 L10_spectral:6.3245e-04 L11_spectral:6.2423e-04 L12_spectral:5.8263e-04 train_time:360307ms step_avg:42.89ms +[2025-09-11 11:54:16] [Rank 0] step:8401/10000 train_time:362375ms step_avg:43.13ms +[2025-09-11 11:54:16] [Rank 0] step:8401/10000 train_time:362375ms step_avg:43.13ms +[2025-09-11 11:54:17] [Rank 0] step:8421/10000 train_time:363104ms step_avg:43.12ms +[2025-09-11 11:54:17] [Rank 0] step:8421/10000 train_time:363104ms step_avg:43.12ms +[2025-09-11 11:54:18] [Rank 0] step:8441/10000 train_time:363815ms step_avg:43.10ms +[2025-09-11 11:54:18] [Rank 0] step:8441/10000 train_time:363815ms step_avg:43.10ms +[2025-09-11 11:54:18] [Rank 0] step:8461/10000 train_time:364526ms step_avg:43.08ms +[2025-09-11 11:54:18] [Rank 0] step:8461/10000 train_time:364526ms step_avg:43.08ms +[2025-09-11 11:54:19] [Rank 0] step:8481/10000 train_time:365237ms step_avg:43.07ms +[2025-09-11 11:54:19] [Rank 0] step:8481/10000 train_time:365237ms step_avg:43.07ms +[2025-09-11 11:54:20] [Rank 0] step:8501/10000 train_time:365946ms step_avg:43.05ms +[2025-09-11 11:54:20] [Rank 0] step:8501/10000 train_time:365946ms step_avg:43.05ms +[2025-09-11 11:54:21] [Rank 0] step:8521/10000 train_time:366654ms step_avg:43.03ms +[2025-09-11 11:54:21] [Rank 0] step:8521/10000 train_time:366654ms step_avg:43.03ms +[2025-09-11 11:54:21] [Rank 0] step:8541/10000 train_time:367363ms step_avg:43.01ms +[2025-09-11 11:54:21] [Rank 0] step:8541/10000 train_time:367363ms step_avg:43.01ms +[2025-09-11 11:54:22] [Rank 0] step:8561/10000 train_time:368077ms step_avg:42.99ms +[2025-09-11 11:54:22] [Rank 0] step:8561/10000 train_time:368077ms step_avg:42.99ms +[2025-09-11 11:54:23] [Rank 0] step:8581/10000 train_time:368791ms step_avg:42.98ms +[2025-09-11 11:54:23] [Rank 0] step:8581/10000 train_time:368791ms step_avg:42.98ms +[2025-09-11 11:54:23] [Rank 0] step:8601/10000 train_time:369501ms step_avg:42.96ms +[2025-09-11 11:54:23] [Rank 0] step:8601/10000 train_time:369501ms step_avg:42.96ms +[2025-09-11 11:54:24] [Rank 0] step:8621/10000 train_time:370209ms step_avg:42.94ms +[2025-09-11 11:54:24] [Rank 0] step:8621/10000 train_time:370209ms step_avg:42.94ms +[2025-09-11 11:54:25] [Rank 0] step:8641/10000 train_time:370918ms step_avg:42.93ms +[2025-09-11 11:54:25] [Rank 0] step:8641/10000 train_time:370918ms step_avg:42.93ms +[2025-09-11 11:54:26] [Rank 0] step:8661/10000 train_time:371628ms step_avg:42.91ms +[2025-09-11 11:54:26] [Rank 0] step:8661/10000 train_time:371628ms step_avg:42.91ms +[2025-09-11 11:54:26] [Rank 0] step:8681/10000 train_time:372339ms step_avg:42.89ms +[2025-09-11 11:54:26] [Rank 0] step:8681/10000 train_time:372339ms step_avg:42.89ms +[2025-09-11 11:54:27] [Rank 0] step:8701/10000 train_time:373048ms step_avg:42.87ms +[2025-09-11 11:54:27] [Rank 0] step:8701/10000 train_time:373048ms step_avg:42.87ms +[2025-09-11 11:54:28] [Rank 0] step:8721/10000 train_time:373760ms step_avg:42.86ms +[2025-09-11 11:54:28] [Rank 0] step:8721/10000 train_time:373760ms step_avg:42.86ms +[2025-09-11 11:54:28] [Rank 0] step:8741/10000 train_time:374466ms step_avg:42.84ms +[2025-09-11 11:54:28] [Rank 0] step:8741/10000 train_time:374466ms step_avg:42.84ms +[2025-09-11 11:54:29] [Rank 0] step:8761/10000 train_time:375177ms step_avg:42.82ms +[2025-09-11 11:54:29] [Rank 0] step:8761/10000 train_time:375177ms step_avg:42.82ms +[2025-09-11 11:54:30] [Rank 0] step:8781/10000 train_time:375884ms step_avg:42.81ms +[2025-09-11 11:54:30] [Rank 0] step:8781/10000 train_time:375884ms step_avg:42.81ms +[2025-09-11 11:54:30] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:54:30] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:54:42] [Rank 0] PRINT: step:8800/10000 val_loss:4.5504 total_sharp:7.2229e-04 L1_sharp:1.3061e-02 L2_sharp:1.8609e-02 L3_sharp:2.0633e-02 L4_sharp:3.5009e-02 L5_sharp:5.0224e-02 L6_sharp:7.1284e-02 L7_sharp:1.0304e-01 L8_sharp:1.2725e-01 L9_sharp:1.7594e-01 L10_sharp:2.5778e-01 L11_sharp:2.9179e-01 L12_sharp:9.7233e-01 total_fnorm:3.2969e+00 total_l1_linf:2.3840e+03 total_spectral:1.6484e+00 L1_fnorm:2.6611e-02 L2_fnorm:2.6611e-02 L3_fnorm:2.6611e-02 L4_fnorm:2.6611e-02 L5_fnorm:2.6611e-02 L6_fnorm:2.6733e-02 L7_fnorm:2.6611e-02 L8_fnorm:2.6489e-02 L9_fnorm:2.6611e-02 L10_fnorm:2.6367e-02 L11_fnorm:2.6001e-02 L12_fnorm:2.5513e-02 L1_l1linf:4.1809e-03 L2_l1linf:4.3030e-03 L3_l1linf:4.3030e-03 L4_l1linf:4.4861e-03 L5_l1linf:4.2725e-03 L6_l1linf:4.4861e-03 L7_l1linf:4.3640e-03 L8_l1linf:4.7302e-03 L9_l1linf:4.4861e-03 L10_l1linf:4.6387e-03 L11_l1linf:4.4861e-03 L12_l1linf:4.9438e-03 L1_spectral:4.8855e-04 L2_spectral:4.8815e-04 L3_spectral:4.8339e-04 L4_spectral:4.8638e-04 L5_spectral:4.8428e-04 L6_spectral:4.8613e-04 L7_spectral:4.8058e-04 L8_spectral:4.6891e-04 L9_spectral:4.7788e-04 L10_spectral:4.6088e-04 L11_spectral:4.5289e-04 L12_spectral:4.2395e-04 train_time:376572ms step_avg:42.79ms +[2025-09-11 11:54:42] [Rank 0] PRINT: step:8800/10000 val_loss:4.5504 total_sharp:7.2229e-04 L1_sharp:1.3061e-02 L2_sharp:1.8609e-02 L3_sharp:2.0633e-02 L4_sharp:3.5009e-02 L5_sharp:5.0224e-02 L6_sharp:7.1284e-02 L7_sharp:1.0304e-01 L8_sharp:1.2725e-01 L9_sharp:1.7594e-01 L10_sharp:2.5778e-01 L11_sharp:2.9179e-01 L12_sharp:9.7233e-01 total_fnorm:3.2969e+00 total_l1_linf:2.3840e+03 total_spectral:1.6484e+00 L1_fnorm:2.6611e-02 L2_fnorm:2.6611e-02 L3_fnorm:2.6611e-02 L4_fnorm:2.6611e-02 L5_fnorm:2.6611e-02 L6_fnorm:2.6733e-02 L7_fnorm:2.6611e-02 L8_fnorm:2.6489e-02 L9_fnorm:2.6611e-02 L10_fnorm:2.6367e-02 L11_fnorm:2.6001e-02 L12_fnorm:2.5513e-02 L1_l1linf:4.1809e-03 L2_l1linf:4.3030e-03 L3_l1linf:4.3030e-03 L4_l1linf:4.4861e-03 L5_l1linf:4.2725e-03 L6_l1linf:4.4861e-03 L7_l1linf:4.3640e-03 L8_l1linf:4.7302e-03 L9_l1linf:4.4861e-03 L10_l1linf:4.6387e-03 L11_l1linf:4.4861e-03 L12_l1linf:4.9438e-03 L1_spectral:4.8855e-04 L2_spectral:4.8815e-04 L3_spectral:4.8339e-04 L4_spectral:4.8638e-04 L5_spectral:4.8428e-04 L6_spectral:4.8613e-04 L7_spectral:4.8058e-04 L8_spectral:4.6891e-04 L9_spectral:4.7788e-04 L10_spectral:4.6088e-04 L11_spectral:4.5289e-04 L12_spectral:4.2395e-04 train_time:376572ms step_avg:42.79ms +[2025-09-11 11:54:44] [Rank 0] step:8801/10000 train_time:378785ms step_avg:43.04ms +[2025-09-11 11:54:44] [Rank 0] step:8801/10000 train_time:378785ms step_avg:43.04ms +[2025-09-11 11:54:45] [Rank 0] step:8821/10000 train_time:379529ms step_avg:43.03ms +[2025-09-11 11:54:45] [Rank 0] step:8821/10000 train_time:379529ms step_avg:43.03ms +[2025-09-11 11:54:45] [Rank 0] step:8841/10000 train_time:380240ms step_avg:43.01ms +[2025-09-11 11:54:45] [Rank 0] step:8841/10000 train_time:380240ms step_avg:43.01ms +[2025-09-11 11:54:46] [Rank 0] step:8861/10000 train_time:380949ms step_avg:42.99ms +[2025-09-11 11:54:46] [Rank 0] step:8861/10000 train_time:380949ms step_avg:42.99ms +[2025-09-11 11:54:47] [Rank 0] step:8881/10000 train_time:381659ms step_avg:42.97ms +[2025-09-11 11:54:47] [Rank 0] step:8881/10000 train_time:381659ms step_avg:42.97ms +[2025-09-11 11:54:47] [Rank 0] step:8901/10000 train_time:382371ms step_avg:42.96ms +[2025-09-11 11:54:47] [Rank 0] step:8901/10000 train_time:382371ms step_avg:42.96ms +[2025-09-11 11:54:48] [Rank 0] step:8921/10000 train_time:383077ms step_avg:42.94ms +[2025-09-11 11:54:48] [Rank 0] step:8921/10000 train_time:383077ms step_avg:42.94ms +[2025-09-11 11:54:49] [Rank 0] step:8941/10000 train_time:383791ms step_avg:42.92ms +[2025-09-11 11:54:49] [Rank 0] step:8941/10000 train_time:383791ms step_avg:42.92ms +[2025-09-11 11:54:50] [Rank 0] step:8961/10000 train_time:384508ms step_avg:42.91ms +[2025-09-11 11:54:50] [Rank 0] step:8961/10000 train_time:384508ms step_avg:42.91ms +[2025-09-11 11:54:50] [Rank 0] step:8981/10000 train_time:385222ms step_avg:42.89ms +[2025-09-11 11:54:50] [Rank 0] step:8981/10000 train_time:385222ms step_avg:42.89ms +[2025-09-11 11:54:51] [Rank 0] step:9001/10000 train_time:385928ms step_avg:42.88ms +[2025-09-11 11:54:51] [Rank 0] step:9001/10000 train_time:385928ms step_avg:42.88ms +[2025-09-11 11:54:52] [Rank 0] step:9021/10000 train_time:386638ms step_avg:42.86ms +[2025-09-11 11:54:52] [Rank 0] step:9021/10000 train_time:386638ms step_avg:42.86ms +[2025-09-11 11:54:52] [Rank 0] step:9041/10000 train_time:387351ms step_avg:42.84ms +[2025-09-11 11:54:52] [Rank 0] step:9041/10000 train_time:387351ms step_avg:42.84ms +[2025-09-11 11:54:53] [Rank 0] step:9061/10000 train_time:388060ms step_avg:42.83ms +[2025-09-11 11:54:53] [Rank 0] step:9061/10000 train_time:388060ms step_avg:42.83ms +[2025-09-11 11:54:54] [Rank 0] step:9081/10000 train_time:388773ms step_avg:42.81ms +[2025-09-11 11:54:54] [Rank 0] step:9081/10000 train_time:388773ms step_avg:42.81ms +[2025-09-11 11:54:55] [Rank 0] step:9101/10000 train_time:389487ms step_avg:42.80ms +[2025-09-11 11:54:55] [Rank 0] step:9101/10000 train_time:389487ms step_avg:42.80ms +[2025-09-11 11:54:55] [Rank 0] step:9121/10000 train_time:390202ms step_avg:42.78ms +[2025-09-11 11:54:55] [Rank 0] step:9121/10000 train_time:390202ms step_avg:42.78ms +[2025-09-11 11:54:56] [Rank 0] step:9141/10000 train_time:390910ms step_avg:42.76ms +[2025-09-11 11:54:56] [Rank 0] step:9141/10000 train_time:390910ms step_avg:42.76ms +[2025-09-11 11:54:57] [Rank 0] step:9161/10000 train_time:391623ms step_avg:42.75ms +[2025-09-11 11:54:57] [Rank 0] step:9161/10000 train_time:391623ms step_avg:42.75ms +[2025-09-11 11:54:57] [Rank 0] step:9181/10000 train_time:392336ms step_avg:42.73ms +[2025-09-11 11:54:57] [Rank 0] step:9181/10000 train_time:392336ms step_avg:42.73ms +[2025-09-11 11:54:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:54:58] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:55:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.5394 total_sharp:8.2222e-04 L1_sharp:1.0098e-02 L2_sharp:1.4580e-02 L3_sharp:2.0340e-02 L4_sharp:3.1197e-02 L5_sharp:4.0973e-02 L6_sharp:7.0326e-02 L7_sharp:1.0208e-01 L8_sharp:1.4638e-01 L9_sharp:1.9593e-01 L10_sharp:3.0329e-01 L11_sharp:3.3638e-01 L12_sharp:1.2264e+00 total_fnorm:2.1719e+00 total_l1_linf:1.3440e+03 total_spectral:1.0938e+00 L1_fnorm:1.7700e-02 L2_fnorm:1.7700e-02 L3_fnorm:1.7700e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7578e-02 L6_fnorm:1.7578e-02 L7_fnorm:1.7700e-02 L8_fnorm:1.7578e-02 L9_fnorm:1.7578e-02 L10_fnorm:1.7456e-02 L11_fnorm:1.7456e-02 L12_fnorm:1.6846e-02 L1_l1linf:2.5177e-03 L2_l1linf:2.5482e-03 L3_l1linf:2.5330e-03 L4_l1linf:2.5330e-03 L5_l1linf:2.5940e-03 L6_l1linf:2.6245e-03 L7_l1linf:2.7161e-03 L8_l1linf:2.8381e-03 L9_l1linf:2.7466e-03 L10_l1linf:2.7008e-03 L11_l1linf:3.0365e-03 L12_l1linf:2.7618e-03 L1_spectral:3.3637e-04 L2_spectral:3.3231e-04 L3_spectral:3.3380e-04 L4_spectral:3.2819e-04 L5_spectral:3.2880e-04 L6_spectral:3.2854e-04 L7_spectral:3.2399e-04 L8_spectral:3.1874e-04 L9_spectral:3.1993e-04 L10_spectral:3.1071e-04 L11_spectral:3.0436e-04 L12_spectral:2.8645e-04 train_time:393029ms step_avg:42.72ms +[2025-09-11 11:55:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.5394 total_sharp:8.2222e-04 L1_sharp:1.0098e-02 L2_sharp:1.4580e-02 L3_sharp:2.0340e-02 L4_sharp:3.1197e-02 L5_sharp:4.0973e-02 L6_sharp:7.0326e-02 L7_sharp:1.0208e-01 L8_sharp:1.4638e-01 L9_sharp:1.9593e-01 L10_sharp:3.0329e-01 L11_sharp:3.3638e-01 L12_sharp:1.2264e+00 total_fnorm:2.1719e+00 total_l1_linf:1.3440e+03 total_spectral:1.0938e+00 L1_fnorm:1.7700e-02 L2_fnorm:1.7700e-02 L3_fnorm:1.7700e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7578e-02 L6_fnorm:1.7578e-02 L7_fnorm:1.7700e-02 L8_fnorm:1.7578e-02 L9_fnorm:1.7578e-02 L10_fnorm:1.7456e-02 L11_fnorm:1.7456e-02 L12_fnorm:1.6846e-02 L1_l1linf:2.5177e-03 L2_l1linf:2.5482e-03 L3_l1linf:2.5330e-03 L4_l1linf:2.5330e-03 L5_l1linf:2.5940e-03 L6_l1linf:2.6245e-03 L7_l1linf:2.7161e-03 L8_l1linf:2.8381e-03 L9_l1linf:2.7466e-03 L10_l1linf:2.7008e-03 L11_l1linf:3.0365e-03 L12_l1linf:2.7618e-03 L1_spectral:3.3637e-04 L2_spectral:3.3231e-04 L3_spectral:3.3380e-04 L4_spectral:3.2819e-04 L5_spectral:3.2880e-04 L6_spectral:3.2854e-04 L7_spectral:3.2399e-04 L8_spectral:3.1874e-04 L9_spectral:3.1993e-04 L10_spectral:3.1071e-04 L11_spectral:3.0436e-04 L12_spectral:2.8645e-04 train_time:393029ms step_avg:42.72ms +[2025-09-11 11:55:13] [Rank 0] step:9201/10000 train_time:395890ms step_avg:43.03ms +[2025-09-11 11:55:13] [Rank 0] step:9201/10000 train_time:395890ms step_avg:43.03ms +[2025-09-11 11:55:14] [Rank 0] step:9221/10000 train_time:396800ms step_avg:43.03ms +[2025-09-11 11:55:14] [Rank 0] step:9221/10000 train_time:396800ms step_avg:43.03ms +[2025-09-11 11:55:14] [Rank 0] step:9241/10000 train_time:397509ms step_avg:43.02ms +[2025-09-11 11:55:14] [Rank 0] step:9241/10000 train_time:397509ms step_avg:43.02ms +[2025-09-11 11:55:15] [Rank 0] step:9261/10000 train_time:398222ms step_avg:43.00ms +[2025-09-11 11:55:15] [Rank 0] step:9261/10000 train_time:398222ms step_avg:43.00ms +[2025-09-11 11:55:16] [Rank 0] step:9281/10000 train_time:398934ms step_avg:42.98ms +[2025-09-11 11:55:16] [Rank 0] step:9281/10000 train_time:398934ms step_avg:42.98ms +[2025-09-11 11:55:17] [Rank 0] step:9301/10000 train_time:399927ms step_avg:43.00ms +[2025-09-11 11:55:17] [Rank 0] step:9301/10000 train_time:399927ms step_avg:43.00ms +[2025-09-11 11:55:18] [Rank 0] step:9321/10000 train_time:400639ms step_avg:42.98ms +[2025-09-11 11:55:18] [Rank 0] step:9321/10000 train_time:400639ms step_avg:42.98ms +[2025-09-11 11:55:18] [Rank 0] step:9341/10000 train_time:401347ms step_avg:42.97ms +[2025-09-11 11:55:18] [Rank 0] step:9341/10000 train_time:401347ms step_avg:42.97ms +[2025-09-11 11:55:19] [Rank 0] step:9361/10000 train_time:402308ms step_avg:42.98ms +[2025-09-11 11:55:19] [Rank 0] step:9361/10000 train_time:402308ms step_avg:42.98ms +[2025-09-11 11:55:20] [Rank 0] step:9381/10000 train_time:403018ms step_avg:42.96ms +[2025-09-11 11:55:20] [Rank 0] step:9381/10000 train_time:403018ms step_avg:42.96ms +[2025-09-11 11:55:21] [Rank 0] step:9401/10000 train_time:403730ms step_avg:42.95ms +[2025-09-11 11:55:21] [Rank 0] step:9401/10000 train_time:403730ms step_avg:42.95ms +[2025-09-11 11:55:21] [Rank 0] step:9421/10000 train_time:404442ms step_avg:42.93ms +[2025-09-11 11:55:21] [Rank 0] step:9421/10000 train_time:404442ms step_avg:42.93ms +[2025-09-11 11:55:22] [Rank 0] step:9441/10000 train_time:405155ms step_avg:42.91ms +[2025-09-11 11:55:22] [Rank 0] step:9441/10000 train_time:405155ms step_avg:42.91ms +[2025-09-11 11:55:23] [Rank 0] step:9461/10000 train_time:405866ms step_avg:42.90ms +[2025-09-11 11:55:23] [Rank 0] step:9461/10000 train_time:405866ms step_avg:42.90ms +[2025-09-11 11:55:23] [Rank 0] step:9481/10000 train_time:406577ms step_avg:42.88ms +[2025-09-11 11:55:23] [Rank 0] step:9481/10000 train_time:406577ms step_avg:42.88ms +[2025-09-11 11:55:24] [Rank 0] step:9501/10000 train_time:407289ms step_avg:42.87ms +[2025-09-11 11:55:24] [Rank 0] step:9501/10000 train_time:407289ms step_avg:42.87ms +[2025-09-11 11:55:25] [Rank 0] step:9521/10000 train_time:408005ms step_avg:42.85ms +[2025-09-11 11:55:25] [Rank 0] step:9521/10000 train_time:408005ms step_avg:42.85ms +[2025-09-11 11:55:26] [Rank 0] step:9541/10000 train_time:408714ms step_avg:42.84ms +[2025-09-11 11:55:26] [Rank 0] step:9541/10000 train_time:408714ms step_avg:42.84ms +[2025-09-11 11:55:26] [Rank 0] step:9561/10000 train_time:409425ms step_avg:42.82ms +[2025-09-11 11:55:26] [Rank 0] step:9561/10000 train_time:409425ms step_avg:42.82ms +[2025-09-11 11:55:27] [Rank 0] step:9581/10000 train_time:410138ms step_avg:42.81ms +[2025-09-11 11:55:27] [Rank 0] step:9581/10000 train_time:410138ms step_avg:42.81ms +[2025-09-11 11:55:28] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:55:28] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:55:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:55:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:55:39] [Rank 0] PRINT: step:9600/10000 val_loss:4.5348 total_sharp:4.1849e-04 L1_sharp:1.1289e-02 L2_sharp:1.1045e-02 L3_sharp:1.5053e-02 L4_sharp:2.4420e-02 L5_sharp:3.4554e-02 L6_sharp:5.1144e-02 L7_sharp:7.2187e-02 L8_sharp:8.9872e-02 L9_sharp:1.1906e-01 L10_sharp:1.7783e-01 L11_sharp:2.0020e-01 L12_sharp:5.9047e-01 total_fnorm:1.2188e+00 total_l1_linf:6.3600e+02 total_spectral:6.0938e-01 L1_fnorm:9.8267e-03 L2_fnorm:9.8267e-03 L3_fnorm:9.8267e-03 L4_fnorm:9.8267e-03 L5_fnorm:9.8267e-03 L6_fnorm:9.8877e-03 L7_fnorm:9.8877e-03 L8_fnorm:9.8267e-03 L9_fnorm:9.8877e-03 L10_fnorm:9.7656e-03 L11_fnorm:9.7046e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.1826e-03 L2_l1linf:1.2054e-03 L3_l1linf:1.1902e-03 L4_l1linf:1.2360e-03 L5_l1linf:1.2207e-03 L6_l1linf:1.2665e-03 L7_l1linf:1.2512e-03 L8_l1linf:1.3199e-03 L9_l1linf:1.2894e-03 L10_l1linf:1.3199e-03 L11_l1linf:1.3504e-03 L12_l1linf:1.3199e-03 L1_spectral:1.9500e-04 L2_spectral:1.8990e-04 L3_spectral:1.8771e-04 L4_spectral:1.8835e-04 L5_spectral:1.8607e-04 L6_spectral:1.8482e-04 L7_spectral:1.8611e-04 L8_spectral:1.8214e-04 L9_spectral:1.8447e-04 L10_spectral:1.7738e-04 L11_spectral:1.7413e-04 L12_spectral:1.6190e-04 train_time:410826ms step_avg:42.79ms +[2025-09-11 11:55:39] [Rank 0] PRINT: step:9600/10000 val_loss:4.5348 total_sharp:4.1849e-04 L1_sharp:1.1289e-02 L2_sharp:1.1045e-02 L3_sharp:1.5053e-02 L4_sharp:2.4420e-02 L5_sharp:3.4554e-02 L6_sharp:5.1144e-02 L7_sharp:7.2187e-02 L8_sharp:8.9872e-02 L9_sharp:1.1906e-01 L10_sharp:1.7783e-01 L11_sharp:2.0020e-01 L12_sharp:5.9047e-01 total_fnorm:1.2188e+00 total_l1_linf:6.3600e+02 total_spectral:6.0938e-01 L1_fnorm:9.8267e-03 L2_fnorm:9.8267e-03 L3_fnorm:9.8267e-03 L4_fnorm:9.8267e-03 L5_fnorm:9.8267e-03 L6_fnorm:9.8877e-03 L7_fnorm:9.8877e-03 L8_fnorm:9.8267e-03 L9_fnorm:9.8877e-03 L10_fnorm:9.7656e-03 L11_fnorm:9.7046e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.1826e-03 L2_l1linf:1.2054e-03 L3_l1linf:1.1902e-03 L4_l1linf:1.2360e-03 L5_l1linf:1.2207e-03 L6_l1linf:1.2665e-03 L7_l1linf:1.2512e-03 L8_l1linf:1.3199e-03 L9_l1linf:1.2894e-03 L10_l1linf:1.3199e-03 L11_l1linf:1.3504e-03 L12_l1linf:1.3199e-03 L1_spectral:1.9500e-04 L2_spectral:1.8990e-04 L3_spectral:1.8771e-04 L4_spectral:1.8835e-04 L5_spectral:1.8607e-04 L6_spectral:1.8482e-04 L7_spectral:1.8611e-04 L8_spectral:1.8214e-04 L9_spectral:1.8447e-04 L10_spectral:1.7738e-04 L11_spectral:1.7413e-04 L12_spectral:1.6190e-04 train_time:410826ms step_avg:42.79ms +[2025-09-11 11:55:41] [Rank 0] step:9601/10000 train_time:412875ms step_avg:43.00ms +[2025-09-11 11:55:41] [Rank 0] step:9601/10000 train_time:412875ms step_avg:43.00ms +[2025-09-11 11:55:42] [Rank 0] step:9621/10000 train_time:413602ms step_avg:42.99ms +[2025-09-11 11:55:42] [Rank 0] step:9621/10000 train_time:413602ms step_avg:42.99ms +[2025-09-11 11:55:43] [Rank 0] step:9641/10000 train_time:414318ms step_avg:42.97ms +[2025-09-11 11:55:43] [Rank 0] step:9641/10000 train_time:414318ms step_avg:42.97ms +[2025-09-11 11:55:43] [Rank 0] step:9661/10000 train_time:415040ms step_avg:42.96ms +[2025-09-11 11:55:43] [Rank 0] step:9661/10000 train_time:415040ms step_avg:42.96ms +[2025-09-11 11:55:44] [Rank 0] step:9681/10000 train_time:415756ms step_avg:42.95ms +[2025-09-11 11:55:44] [Rank 0] step:9681/10000 train_time:415756ms step_avg:42.95ms +[2025-09-11 11:55:45] [Rank 0] step:9701/10000 train_time:416472ms step_avg:42.93ms +[2025-09-11 11:55:45] [Rank 0] step:9701/10000 train_time:416472ms step_avg:42.93ms +[2025-09-11 11:55:45] [Rank 0] step:9721/10000 train_time:417194ms step_avg:42.92ms +[2025-09-11 11:55:45] [Rank 0] step:9721/10000 train_time:417194ms step_avg:42.92ms +[2025-09-11 11:55:46] [Rank 0] step:9741/10000 train_time:417912ms step_avg:42.90ms +[2025-09-11 11:55:46] [Rank 0] step:9741/10000 train_time:417912ms step_avg:42.90ms +[2025-09-11 11:55:47] [Rank 0] step:9761/10000 train_time:418629ms step_avg:42.89ms +[2025-09-11 11:55:47] [Rank 0] step:9761/10000 train_time:418629ms step_avg:42.89ms +[2025-09-11 11:55:48] [Rank 0] step:9781/10000 train_time:419344ms step_avg:42.87ms +[2025-09-11 11:55:48] [Rank 0] step:9781/10000 train_time:419344ms step_avg:42.87ms +[2025-09-11 11:55:48] [Rank 0] step:9801/10000 train_time:420066ms step_avg:42.86ms +[2025-09-11 11:55:48] [Rank 0] step:9801/10000 train_time:420066ms step_avg:42.86ms +[2025-09-11 11:55:49] [Rank 0] step:9821/10000 train_time:420784ms step_avg:42.85ms +[2025-09-11 11:55:49] [Rank 0] step:9821/10000 train_time:420784ms step_avg:42.85ms +[2025-09-11 11:55:50] [Rank 0] step:9841/10000 train_time:421505ms step_avg:42.83ms +[2025-09-11 11:55:50] [Rank 0] step:9841/10000 train_time:421505ms step_avg:42.83ms +[2025-09-11 11:55:51] [Rank 0] step:9861/10000 train_time:422221ms step_avg:42.82ms +[2025-09-11 11:55:51] [Rank 0] step:9861/10000 train_time:422221ms step_avg:42.82ms +[2025-09-11 11:55:51] [Rank 0] step:9881/10000 train_time:422939ms step_avg:42.80ms +[2025-09-11 11:55:51] [Rank 0] step:9881/10000 train_time:422939ms step_avg:42.80ms +[2025-09-11 11:55:52] [Rank 0] step:9901/10000 train_time:423653ms step_avg:42.79ms +[2025-09-11 11:55:52] [Rank 0] step:9901/10000 train_time:423653ms step_avg:42.79ms +[2025-09-11 11:55:53] [Rank 0] step:9921/10000 train_time:424370ms step_avg:42.77ms +[2025-09-11 11:55:53] [Rank 0] step:9921/10000 train_time:424370ms step_avg:42.77ms +[2025-09-11 11:55:53] [Rank 0] step:9941/10000 train_time:425092ms step_avg:42.76ms +[2025-09-11 11:55:53] [Rank 0] step:9941/10000 train_time:425092ms step_avg:42.76ms +[2025-09-11 11:55:54] [Rank 0] step:9961/10000 train_time:425814ms step_avg:42.75ms +[2025-09-11 11:55:54] [Rank 0] step:9961/10000 train_time:425814ms step_avg:42.75ms +[2025-09-11 11:55:55] [Rank 0] step:9981/10000 train_time:426533ms step_avg:42.73ms +[2025-09-11 11:55:55] [Rank 0] step:9981/10000 train_time:426533ms step_avg:42.73ms +[2025-09-11 11:55:56] [Rank 0] step:10000/10000 train_time:427224ms step_avg:42.72ms +[2025-09-11 11:55:56] [Rank 0] step:10000/10000 train_time:427224ms step_avg:42.72ms +[2025-09-11 11:55:56] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:55:56] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:56:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:56:07] [Rank 0] PRINT: step:10000/10000 val_loss:4.5330 total_sharp:3.2156e-04 L1_sharp:8.5207e-03 L2_sharp:1.0559e-02 L3_sharp:1.1096e-02 L4_sharp:1.9874e-02 L5_sharp:2.1427e-02 L6_sharp:3.9339e-02 L7_sharp:5.4001e-02 L8_sharp:7.4296e-02 L9_sharp:9.8611e-02 L10_sharp:1.3500e-01 L11_sharp:1.5314e-01 L12_sharp:6.4202e-01 total_fnorm:4.5703e-01 total_l1_linf:1.7400e+02 total_spectral:2.2949e-01 L1_fnorm:3.8452e-03 L2_fnorm:3.8300e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.8757e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.8147e-03 L9_fnorm:3.8452e-03 L10_fnorm:3.8300e-03 L11_fnorm:3.7994e-03 L12_fnorm:3.6774e-03 L1_l1linf:3.7575e-04 L2_l1linf:3.8719e-04 L3_l1linf:4.0627e-04 L4_l1linf:3.8147e-04 L5_l1linf:3.9482e-04 L6_l1linf:3.8338e-04 L7_l1linf:4.0054e-04 L8_l1linf:3.8910e-04 L9_l1linf:3.9291e-04 L10_l1linf:4.2343e-04 L11_l1linf:4.2152e-04 L12_l1linf:4.1962e-04 L1_spectral:7.7384e-05 L2_spectral:7.6176e-05 L3_spectral:7.5905e-05 L4_spectral:7.5249e-05 L5_spectral:7.5681e-05 L6_spectral:7.5287e-05 L7_spectral:7.5071e-05 L8_spectral:7.3283e-05 L9_spectral:7.3231e-05 L10_spectral:7.1653e-05 L11_spectral:7.0118e-05 L12_spectral:6.7172e-05 train_time:427243ms step_avg:42.72ms +[2025-09-11 11:56:07] [Rank 0] PRINT: step:10000/10000 val_loss:4.5330 total_sharp:3.2156e-04 L1_sharp:8.5207e-03 L2_sharp:1.0559e-02 L3_sharp:1.1096e-02 L4_sharp:1.9874e-02 L5_sharp:2.1427e-02 L6_sharp:3.9339e-02 L7_sharp:5.4001e-02 L8_sharp:7.4296e-02 L9_sharp:9.8611e-02 L10_sharp:1.3500e-01 L11_sharp:1.5314e-01 L12_sharp:6.4202e-01 total_fnorm:4.5703e-01 total_l1_linf:1.7400e+02 total_spectral:2.2949e-01 L1_fnorm:3.8452e-03 L2_fnorm:3.8300e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.8757e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.8147e-03 L9_fnorm:3.8452e-03 L10_fnorm:3.8300e-03 L11_fnorm:3.7994e-03 L12_fnorm:3.6774e-03 L1_l1linf:3.7575e-04 L2_l1linf:3.8719e-04 L3_l1linf:4.0627e-04 L4_l1linf:3.8147e-04 L5_l1linf:3.9482e-04 L6_l1linf:3.8338e-04 L7_l1linf:4.0054e-04 L8_l1linf:3.8910e-04 L9_l1linf:3.9291e-04 L10_l1linf:4.2343e-04 L11_l1linf:4.2152e-04 L12_l1linf:4.1962e-04 L1_spectral:7.7384e-05 L2_spectral:7.6176e-05 L3_spectral:7.5905e-05 L4_spectral:7.5249e-05 L5_spectral:7.5681e-05 L6_spectral:7.5287e-05 L7_spectral:7.5071e-05 L8_spectral:7.3283e-05 L9_spectral:7.3231e-05 L10_spectral:7.1653e-05 L11_spectral:7.0118e-05 L12_spectral:6.7172e-05 train_time:427243ms step_avg:42.72ms +[2025-09-11 11:56:07] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:56:07 2025 --- +[2025-09-11 11:56:07] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:56:07 2025 --- +[2025-09-11 11:56:07] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:56:07] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cca2d6717c1f73f0febe94a89cdfd242e0896b48 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "665bdf13-c773-43a5-b8c2-4b7c276c4f27", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/training_log_665bdf13-c773-43a5-b8c2-4b7c276c4f27.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/training_log_665bdf13-c773-43a5-b8c2-4b7c276c4f27.txt new file mode 100644 index 0000000000000000000000000000000000000000..0f16447cc2afdc516b16bdd5813ed90735a0e43d --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42/training_log_665bdf13-c773-43a5-b8c2-4b7c276c4f27.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:28:42] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:28:42 2025 --- +[2025-09-11 11:28:42] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:28:42 2025 --- +[2025-09-11 11:28:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:28:42] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:28:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:28:42] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:28:42] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:28:42] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:28:42] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42 +[2025-09-11 11:28:42] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.002_seed_42 +[2025-09-11 11:28:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:28:42] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:28:42] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:28:42] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:28:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:28:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:28:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:28:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:28:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:28:43] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:28:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:28:43] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:28:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:28:43] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:28:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:28:45] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:28:45] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:28:45] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:28:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:28:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:28:51] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:28:51] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:28:51] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:28:51] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:29:30] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:29:30] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:29:30] [Rank 0] PRINT: Starting training... +[2025-09-11 11:29:30] [Rank 0] PRINT: Starting training... +[2025-09-11 11:29:31] [Rank 0] step:21/10000 train_time:941ms step_avg:44.80ms +[2025-09-11 11:29:31] [Rank 0] step:21/10000 train_time:941ms step_avg:44.80ms +[2025-09-11 11:29:32] [Rank 0] step:41/10000 train_time:1668ms step_avg:40.68ms +[2025-09-11 11:29:32] [Rank 0] step:41/10000 train_time:1668ms step_avg:40.68ms +[2025-09-11 11:29:33] [Rank 0] step:61/10000 train_time:2395ms step_avg:39.26ms +[2025-09-11 11:29:33] [Rank 0] step:61/10000 train_time:2395ms step_avg:39.26ms +[2025-09-11 11:29:33] [Rank 0] step:81/10000 train_time:3121ms step_avg:38.53ms +[2025-09-11 11:29:33] [Rank 0] step:81/10000 train_time:3121ms step_avg:38.53ms +[2025-09-11 11:29:34] [Rank 0] step:101/10000 train_time:3847ms step_avg:38.09ms +[2025-09-11 11:29:34] [Rank 0] step:101/10000 train_time:3847ms step_avg:38.09ms +[2025-09-11 11:29:35] [Rank 0] step:121/10000 train_time:4574ms step_avg:37.80ms +[2025-09-11 11:29:35] [Rank 0] step:121/10000 train_time:4574ms step_avg:37.80ms +[2025-09-11 11:29:36] [Rank 0] step:141/10000 train_time:5300ms step_avg:37.59ms +[2025-09-11 11:29:36] [Rank 0] step:141/10000 train_time:5300ms step_avg:37.59ms +[2025-09-11 11:29:36] [Rank 0] step:161/10000 train_time:6026ms step_avg:37.43ms +[2025-09-11 11:29:36] [Rank 0] step:161/10000 train_time:6026ms step_avg:37.43ms +[2025-09-11 11:29:37] [Rank 0] step:181/10000 train_time:6753ms step_avg:37.31ms +[2025-09-11 11:29:37] [Rank 0] step:181/10000 train_time:6753ms step_avg:37.31ms +[2025-09-11 11:29:38] [Rank 0] step:201/10000 train_time:7479ms step_avg:37.21ms +[2025-09-11 11:29:38] [Rank 0] step:201/10000 train_time:7479ms step_avg:37.21ms +[2025-09-11 11:29:38] [Rank 0] step:221/10000 train_time:8205ms step_avg:37.13ms +[2025-09-11 11:29:38] [Rank 0] step:221/10000 train_time:8205ms step_avg:37.13ms +[2025-09-11 11:29:39] [Rank 0] step:241/10000 train_time:8931ms step_avg:37.06ms +[2025-09-11 11:29:39] [Rank 0] step:241/10000 train_time:8931ms step_avg:37.06ms +[2025-09-11 11:29:40] [Rank 0] step:261/10000 train_time:9656ms step_avg:37.00ms +[2025-09-11 11:29:40] [Rank 0] step:261/10000 train_time:9656ms step_avg:37.00ms +[2025-09-11 11:29:41] [Rank 0] step:281/10000 train_time:10381ms step_avg:36.94ms +[2025-09-11 11:29:41] [Rank 0] step:281/10000 train_time:10381ms step_avg:36.94ms +[2025-09-11 11:29:41] [Rank 0] step:301/10000 train_time:11108ms step_avg:36.90ms +[2025-09-11 11:29:41] [Rank 0] step:301/10000 train_time:11108ms step_avg:36.90ms +[2025-09-11 11:29:42] [Rank 0] step:321/10000 train_time:11833ms step_avg:36.86ms +[2025-09-11 11:29:42] [Rank 0] step:321/10000 train_time:11833ms step_avg:36.86ms +[2025-09-11 11:29:43] [Rank 0] step:341/10000 train_time:12558ms step_avg:36.83ms +[2025-09-11 11:29:43] [Rank 0] step:341/10000 train_time:12558ms step_avg:36.83ms +[2025-09-11 11:29:44] [Rank 0] step:361/10000 train_time:13283ms step_avg:36.79ms +[2025-09-11 11:29:44] [Rank 0] step:361/10000 train_time:13283ms step_avg:36.79ms +[2025-09-11 11:29:44] [Rank 0] step:381/10000 train_time:14008ms step_avg:36.77ms +[2025-09-11 11:29:44] [Rank 0] step:381/10000 train_time:14008ms step_avg:36.77ms +[2025-09-11 11:29:45] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:29:45] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:30:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:30:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:30:31] [Rank 0] PRINT: step:400/10000 val_loss:6.3383 total_sharp:3.8035e-03 L1_sharp:8.5670e-02 L2_sharp:7.7672e-02 L3_sharp:7.0731e-02 L4_sharp:8.2741e-02 L5_sharp:9.4738e-02 L6_sharp:1.1214e-01 L7_sharp:1.3873e-01 L8_sharp:1.8979e-01 L9_sharp:2.2998e-01 L10_sharp:2.6533e-01 L11_sharp:3.2107e-01 L12_sharp:4.9829e-01 total_fnorm:2.0673e+01 total_l1_linf:5.1046e+04 total_spectral:1.0337e+01 L1_fnorm:2.4434e-01 L2_fnorm:2.4422e-01 L3_fnorm:2.4379e-01 L4_fnorm:2.4276e-01 L5_fnorm:2.4154e-01 L6_fnorm:2.3916e-01 L7_fnorm:2.3940e-01 L8_fnorm:2.3688e-01 L9_fnorm:2.3576e-01 L10_fnorm:2.2760e-01 L11_fnorm:2.2071e-01 L12_fnorm:2.1378e-01 L1_l1linf:8.9434e-02 L2_l1linf:8.8747e-02 L3_l1linf:8.8153e-02 L4_l1linf:8.7865e-02 L5_l1linf:8.7976e-02 L6_l1linf:8.7553e-02 L7_l1linf:8.7598e-02 L8_l1linf:8.7800e-02 L9_l1linf:8.6196e-02 L10_l1linf:8.4941e-02 L11_l1linf:8.0752e-02 L12_l1linf:7.3039e-02 L1_spectral:2.4097e-03 L2_spectral:2.4098e-03 L3_spectral:2.4089e-03 L4_spectral:2.4100e-03 L5_spectral:2.4095e-03 L6_spectral:2.4100e-03 L7_spectral:2.4106e-03 L8_spectral:2.4117e-03 L9_spectral:2.4091e-03 L10_spectral:2.4095e-03 L11_spectral:2.4086e-03 L12_spectral:2.4089e-03 train_time:14713ms step_avg:36.78ms +[2025-09-11 11:30:31] [Rank 0] PRINT: step:400/10000 val_loss:6.3383 total_sharp:3.8035e-03 L1_sharp:8.5670e-02 L2_sharp:7.7672e-02 L3_sharp:7.0731e-02 L4_sharp:8.2741e-02 L5_sharp:9.4738e-02 L6_sharp:1.1214e-01 L7_sharp:1.3873e-01 L8_sharp:1.8979e-01 L9_sharp:2.2998e-01 L10_sharp:2.6533e-01 L11_sharp:3.2107e-01 L12_sharp:4.9829e-01 total_fnorm:2.0673e+01 total_l1_linf:5.1046e+04 total_spectral:1.0337e+01 L1_fnorm:2.4434e-01 L2_fnorm:2.4422e-01 L3_fnorm:2.4379e-01 L4_fnorm:2.4276e-01 L5_fnorm:2.4154e-01 L6_fnorm:2.3916e-01 L7_fnorm:2.3940e-01 L8_fnorm:2.3688e-01 L9_fnorm:2.3576e-01 L10_fnorm:2.2760e-01 L11_fnorm:2.2071e-01 L12_fnorm:2.1378e-01 L1_l1linf:8.9434e-02 L2_l1linf:8.8747e-02 L3_l1linf:8.8153e-02 L4_l1linf:8.7865e-02 L5_l1linf:8.7976e-02 L6_l1linf:8.7553e-02 L7_l1linf:8.7598e-02 L8_l1linf:8.7800e-02 L9_l1linf:8.6196e-02 L10_l1linf:8.4941e-02 L11_l1linf:8.0752e-02 L12_l1linf:7.3039e-02 L1_spectral:2.4097e-03 L2_spectral:2.4098e-03 L3_spectral:2.4089e-03 L4_spectral:2.4100e-03 L5_spectral:2.4095e-03 L6_spectral:2.4100e-03 L7_spectral:2.4106e-03 L8_spectral:2.4117e-03 L9_spectral:2.4091e-03 L10_spectral:2.4095e-03 L11_spectral:2.4086e-03 L12_spectral:2.4089e-03 train_time:14713ms step_avg:36.78ms +[2025-09-11 11:31:01] [Rank 0] step:401/10000 train_time:43976ms step_avg:109.66ms +[2025-09-11 11:31:01] [Rank 0] step:401/10000 train_time:43976ms step_avg:109.66ms +[2025-09-11 11:31:02] [Rank 0] step:421/10000 train_time:45883ms step_avg:108.99ms +[2025-09-11 11:31:02] [Rank 0] step:421/10000 train_time:45883ms step_avg:108.99ms +[2025-09-11 11:31:03] [Rank 0] step:441/10000 train_time:46524ms step_avg:105.50ms +[2025-09-11 11:31:03] [Rank 0] step:441/10000 train_time:46524ms step_avg:105.50ms +[2025-09-11 11:31:04] [Rank 0] step:461/10000 train_time:47420ms step_avg:102.86ms +[2025-09-11 11:31:04] [Rank 0] step:461/10000 train_time:47420ms step_avg:102.86ms +[2025-09-11 11:31:05] [Rank 0] step:481/10000 train_time:48060ms step_avg:99.92ms +[2025-09-11 11:31:05] [Rank 0] step:481/10000 train_time:48060ms step_avg:99.92ms +[2025-09-11 11:31:05] [Rank 0] step:501/10000 train_time:48700ms step_avg:97.20ms +[2025-09-11 11:31:05] [Rank 0] step:501/10000 train_time:48700ms step_avg:97.20ms +[2025-09-11 11:31:06] [Rank 0] step:521/10000 train_time:49340ms step_avg:94.70ms +[2025-09-11 11:31:06] [Rank 0] step:521/10000 train_time:49340ms step_avg:94.70ms +[2025-09-11 11:31:07] [Rank 0] step:541/10000 train_time:49979ms step_avg:92.38ms +[2025-09-11 11:31:07] [Rank 0] step:541/10000 train_time:49979ms step_avg:92.38ms +[2025-09-11 11:31:07] [Rank 0] step:561/10000 train_time:50617ms step_avg:90.23ms +[2025-09-11 11:31:07] [Rank 0] step:561/10000 train_time:50617ms step_avg:90.23ms +[2025-09-11 11:31:08] [Rank 0] step:581/10000 train_time:51256ms step_avg:88.22ms +[2025-09-11 11:31:08] [Rank 0] step:581/10000 train_time:51256ms step_avg:88.22ms +[2025-09-11 11:31:08] [Rank 0] step:601/10000 train_time:51895ms step_avg:86.35ms +[2025-09-11 11:31:08] [Rank 0] step:601/10000 train_time:51895ms step_avg:86.35ms +[2025-09-11 11:31:09] [Rank 0] step:621/10000 train_time:52534ms step_avg:84.60ms +[2025-09-11 11:31:09] [Rank 0] step:621/10000 train_time:52534ms step_avg:84.60ms +[2025-09-11 11:31:10] [Rank 0] step:641/10000 train_time:53174ms step_avg:82.95ms +[2025-09-11 11:31:10] [Rank 0] step:641/10000 train_time:53174ms step_avg:82.95ms +[2025-09-11 11:31:10] [Rank 0] step:661/10000 train_time:53814ms step_avg:81.41ms +[2025-09-11 11:31:10] [Rank 0] step:661/10000 train_time:53814ms step_avg:81.41ms +[2025-09-11 11:31:11] [Rank 0] step:681/10000 train_time:54453ms step_avg:79.96ms +[2025-09-11 11:31:11] [Rank 0] step:681/10000 train_time:54453ms step_avg:79.96ms +[2025-09-11 11:31:12] [Rank 0] step:701/10000 train_time:55092ms step_avg:78.59ms +[2025-09-11 11:31:12] [Rank 0] step:701/10000 train_time:55092ms step_avg:78.59ms +[2025-09-11 11:31:12] [Rank 0] step:721/10000 train_time:55730ms step_avg:77.30ms +[2025-09-11 11:31:12] [Rank 0] step:721/10000 train_time:55730ms step_avg:77.30ms +[2025-09-11 11:31:13] [Rank 0] step:741/10000 train_time:56370ms step_avg:76.07ms +[2025-09-11 11:31:13] [Rank 0] step:741/10000 train_time:56370ms step_avg:76.07ms +[2025-09-11 11:31:14] [Rank 0] step:761/10000 train_time:57013ms step_avg:74.92ms +[2025-09-11 11:31:14] [Rank 0] step:761/10000 train_time:57013ms step_avg:74.92ms +[2025-09-11 11:31:14] [Rank 0] step:781/10000 train_time:57658ms step_avg:73.83ms +[2025-09-11 11:31:14] [Rank 0] step:781/10000 train_time:57658ms step_avg:73.83ms +[2025-09-11 11:31:15] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:31:15] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:31:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:31:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:31:57] [Rank 0] PRINT: step:800/10000 val_loss:5.8819 total_sharp:4.5653e-03 L1_sharp:8.3646e-02 L2_sharp:8.3707e-02 L3_sharp:8.4090e-02 L4_sharp:9.5874e-02 L5_sharp:1.1094e-01 L6_sharp:1.1879e-01 L7_sharp:1.2916e-01 L8_sharp:1.8234e-01 L9_sharp:2.0340e-01 L10_sharp:3.1553e-01 L11_sharp:4.8648e-01 L12_sharp:7.4525e-01 total_fnorm:1.8125e+01 total_l1_linf:3.0592e+04 total_spectral:9.1250e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.5000e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.3340e-01 L12_fnorm:2.1387e-01 L1_l1linf:8.8379e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.5938e-02 L6_l1linf:8.6914e-02 L7_l1linf:8.5449e-02 L8_l1linf:8.5449e-02 L9_l1linf:8.4961e-02 L10_l1linf:8.3496e-02 L11_l1linf:7.6660e-02 L12_l1linf:5.8350e-02 L1_spectral:3.1152e-03 L2_spectral:3.1081e-03 L3_spectral:3.1173e-03 L4_spectral:3.1204e-03 L5_spectral:3.1078e-03 L6_spectral:3.1308e-03 L7_spectral:3.0964e-03 L8_spectral:3.0682e-03 L9_spectral:3.0854e-03 L10_spectral:3.0526e-03 L11_spectral:3.0529e-03 L12_spectral:3.0588e-03 train_time:58285ms step_avg:72.86ms +[2025-09-11 11:31:57] [Rank 0] PRINT: step:800/10000 val_loss:5.8819 total_sharp:4.5653e-03 L1_sharp:8.3646e-02 L2_sharp:8.3707e-02 L3_sharp:8.4090e-02 L4_sharp:9.5874e-02 L5_sharp:1.1094e-01 L6_sharp:1.1879e-01 L7_sharp:1.2916e-01 L8_sharp:1.8234e-01 L9_sharp:2.0340e-01 L10_sharp:3.1553e-01 L11_sharp:4.8648e-01 L12_sharp:7.4525e-01 total_fnorm:1.8125e+01 total_l1_linf:3.0592e+04 total_spectral:9.1250e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.5000e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.3340e-01 L12_fnorm:2.1387e-01 L1_l1linf:8.8379e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.5938e-02 L6_l1linf:8.6914e-02 L7_l1linf:8.5449e-02 L8_l1linf:8.5449e-02 L9_l1linf:8.4961e-02 L10_l1linf:8.3496e-02 L11_l1linf:7.6660e-02 L12_l1linf:5.8350e-02 L1_spectral:3.1152e-03 L2_spectral:3.1081e-03 L3_spectral:3.1173e-03 L4_spectral:3.1204e-03 L5_spectral:3.1078e-03 L6_spectral:3.1308e-03 L7_spectral:3.0964e-03 L8_spectral:3.0682e-03 L9_spectral:3.0854e-03 L10_spectral:3.0526e-03 L11_spectral:3.0529e-03 L12_spectral:3.0588e-03 train_time:58285ms step_avg:72.86ms +[2025-09-11 11:31:58] [Rank 0] step:801/10000 train_time:59465ms step_avg:74.24ms +[2025-09-11 11:31:58] [Rank 0] step:801/10000 train_time:59465ms step_avg:74.24ms +[2025-09-11 11:31:59] [Rank 0] step:821/10000 train_time:60113ms step_avg:73.22ms +[2025-09-11 11:31:59] [Rank 0] step:821/10000 train_time:60113ms step_avg:73.22ms +[2025-09-11 11:32:00] [Rank 0] step:841/10000 train_time:60761ms step_avg:72.25ms +[2025-09-11 11:32:00] [Rank 0] step:841/10000 train_time:60761ms step_avg:72.25ms +[2025-09-11 11:32:00] [Rank 0] step:861/10000 train_time:61407ms step_avg:71.32ms +[2025-09-11 11:32:00] [Rank 0] step:861/10000 train_time:61407ms step_avg:71.32ms +[2025-09-11 11:32:01] [Rank 0] step:881/10000 train_time:62052ms step_avg:70.43ms +[2025-09-11 11:32:01] [Rank 0] step:881/10000 train_time:62052ms step_avg:70.43ms +[2025-09-11 11:32:01] [Rank 0] step:901/10000 train_time:62698ms step_avg:69.59ms +[2025-09-11 11:32:01] [Rank 0] step:901/10000 train_time:62698ms step_avg:69.59ms +[2025-09-11 11:32:02] [Rank 0] step:921/10000 train_time:63345ms step_avg:68.78ms +[2025-09-11 11:32:02] [Rank 0] step:921/10000 train_time:63345ms step_avg:68.78ms +[2025-09-11 11:32:03] [Rank 0] step:941/10000 train_time:63990ms step_avg:68.00ms +[2025-09-11 11:32:03] [Rank 0] step:941/10000 train_time:63990ms step_avg:68.00ms +[2025-09-11 11:32:03] [Rank 0] step:961/10000 train_time:64636ms step_avg:67.26ms +[2025-09-11 11:32:03] [Rank 0] step:961/10000 train_time:64636ms step_avg:67.26ms +[2025-09-11 11:32:04] [Rank 0] step:981/10000 train_time:65588ms step_avg:66.86ms +[2025-09-11 11:32:04] [Rank 0] step:981/10000 train_time:65588ms step_avg:66.86ms +[2025-09-11 11:32:05] [Rank 0] step:1001/10000 train_time:66233ms step_avg:66.17ms +[2025-09-11 11:32:05] [Rank 0] step:1001/10000 train_time:66233ms step_avg:66.17ms +[2025-09-11 11:32:06] [Rank 0] step:1021/10000 train_time:66886ms step_avg:65.51ms +[2025-09-11 11:32:06] [Rank 0] step:1021/10000 train_time:66886ms step_avg:65.51ms +[2025-09-11 11:32:06] [Rank 0] step:1041/10000 train_time:67531ms step_avg:64.87ms +[2025-09-11 11:32:06] [Rank 0] step:1041/10000 train_time:67531ms step_avg:64.87ms +[2025-09-11 11:32:07] [Rank 0] step:1061/10000 train_time:68485ms step_avg:64.55ms +[2025-09-11 11:32:07] [Rank 0] step:1061/10000 train_time:68485ms step_avg:64.55ms +[2025-09-11 11:32:08] [Rank 0] step:1081/10000 train_time:69130ms step_avg:63.95ms +[2025-09-11 11:32:08] [Rank 0] step:1081/10000 train_time:69130ms step_avg:63.95ms +[2025-09-11 11:32:09] [Rank 0] step:1101/10000 train_time:69775ms step_avg:63.37ms +[2025-09-11 11:32:09] [Rank 0] step:1101/10000 train_time:69775ms step_avg:63.37ms +[2025-09-11 11:32:09] [Rank 0] step:1121/10000 train_time:70419ms step_avg:62.82ms +[2025-09-11 11:32:09] [Rank 0] step:1121/10000 train_time:70419ms step_avg:62.82ms +[2025-09-11 11:32:10] [Rank 0] step:1141/10000 train_time:71065ms step_avg:62.28ms +[2025-09-11 11:32:10] [Rank 0] step:1141/10000 train_time:71065ms step_avg:62.28ms +[2025-09-11 11:32:10] [Rank 0] step:1161/10000 train_time:71710ms step_avg:61.77ms +[2025-09-11 11:32:10] [Rank 0] step:1161/10000 train_time:71710ms step_avg:61.77ms +[2025-09-11 11:32:11] [Rank 0] step:1181/10000 train_time:72354ms step_avg:61.27ms +[2025-09-11 11:32:11] [Rank 0] step:1181/10000 train_time:72354ms step_avg:61.27ms +[2025-09-11 11:32:12] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:32:12] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:32:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:32:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:22] [Rank 0] PRINT: step:1200/10000 val_loss:5.5918 total_sharp:3.2743e-03 L1_sharp:5.5704e-02 L2_sharp:5.2631e-02 L3_sharp:5.3971e-02 L4_sharp:5.9685e-02 L5_sharp:6.9164e-02 L6_sharp:7.9900e-02 L7_sharp:9.4513e-02 L8_sharp:1.3314e-01 L9_sharp:1.4469e-01 L10_sharp:1.9456e-01 L11_sharp:2.7082e-01 L12_sharp:6.7801e-01 total_fnorm:1.8625e+01 total_l1_linf:2.9824e+04 total_spectral:9.3125e+00 L1_fnorm:2.5195e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.6660e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.6172e-02 L6_l1linf:7.7637e-02 L7_l1linf:7.7637e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.9102e-02 L10_l1linf:8.0078e-02 L11_l1linf:8.1543e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1287e-03 L2_spectral:3.1358e-03 L3_spectral:3.1361e-03 L4_spectral:3.1193e-03 L5_spectral:3.1094e-03 L6_spectral:3.1037e-03 L7_spectral:3.1262e-03 L8_spectral:3.0936e-03 L9_spectral:3.0830e-03 L10_spectral:3.1060e-03 L11_spectral:3.0813e-03 L12_spectral:3.1074e-03 train_time:72981ms step_avg:60.82ms +[2025-09-11 11:32:22] [Rank 0] PRINT: step:1200/10000 val_loss:5.5918 total_sharp:3.2743e-03 L1_sharp:5.5704e-02 L2_sharp:5.2631e-02 L3_sharp:5.3971e-02 L4_sharp:5.9685e-02 L5_sharp:6.9164e-02 L6_sharp:7.9900e-02 L7_sharp:9.4513e-02 L8_sharp:1.3314e-01 L9_sharp:1.4469e-01 L10_sharp:1.9456e-01 L11_sharp:2.7082e-01 L12_sharp:6.7801e-01 total_fnorm:1.8625e+01 total_l1_linf:2.9824e+04 total_spectral:9.3125e+00 L1_fnorm:2.5195e-01 L2_fnorm:2.5195e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4121e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.6660e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.6172e-02 L6_l1linf:7.7637e-02 L7_l1linf:7.7637e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.9102e-02 L10_l1linf:8.0078e-02 L11_l1linf:8.1543e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1287e-03 L2_spectral:3.1358e-03 L3_spectral:3.1361e-03 L4_spectral:3.1193e-03 L5_spectral:3.1094e-03 L6_spectral:3.1037e-03 L7_spectral:3.1262e-03 L8_spectral:3.0936e-03 L9_spectral:3.0830e-03 L10_spectral:3.1060e-03 L11_spectral:3.0813e-03 L12_spectral:3.1074e-03 train_time:72981ms step_avg:60.82ms +[2025-09-11 11:32:23] [Rank 0] step:1201/10000 train_time:74160ms step_avg:61.75ms +[2025-09-11 11:32:23] [Rank 0] step:1201/10000 train_time:74160ms step_avg:61.75ms +[2025-09-11 11:32:24] [Rank 0] step:1221/10000 train_time:74810ms step_avg:61.27ms +[2025-09-11 11:32:24] [Rank 0] step:1221/10000 train_time:74810ms step_avg:61.27ms +[2025-09-11 11:32:24] [Rank 0] step:1241/10000 train_time:75459ms step_avg:60.81ms +[2025-09-11 11:32:24] [Rank 0] step:1241/10000 train_time:75459ms step_avg:60.81ms +[2025-09-11 11:32:25] [Rank 0] step:1261/10000 train_time:76104ms step_avg:60.35ms +[2025-09-11 11:32:25] [Rank 0] step:1261/10000 train_time:76104ms step_avg:60.35ms +[2025-09-11 11:32:26] [Rank 0] step:1281/10000 train_time:76749ms step_avg:59.91ms +[2025-09-11 11:32:26] [Rank 0] step:1281/10000 train_time:76749ms step_avg:59.91ms +[2025-09-11 11:32:26] [Rank 0] step:1301/10000 train_time:77392ms step_avg:59.49ms +[2025-09-11 11:32:26] [Rank 0] step:1301/10000 train_time:77392ms step_avg:59.49ms +[2025-09-11 11:32:27] [Rank 0] step:1321/10000 train_time:78036ms step_avg:59.07ms +[2025-09-11 11:32:27] [Rank 0] step:1321/10000 train_time:78036ms step_avg:59.07ms +[2025-09-11 11:32:28] [Rank 0] step:1341/10000 train_time:78680ms step_avg:58.67ms +[2025-09-11 11:32:28] [Rank 0] step:1341/10000 train_time:78680ms step_avg:58.67ms +[2025-09-11 11:32:28] [Rank 0] step:1361/10000 train_time:79324ms step_avg:58.28ms +[2025-09-11 11:32:28] [Rank 0] step:1361/10000 train_time:79324ms step_avg:58.28ms +[2025-09-11 11:32:29] [Rank 0] step:1381/10000 train_time:79968ms step_avg:57.91ms +[2025-09-11 11:32:29] [Rank 0] step:1381/10000 train_time:79968ms step_avg:57.91ms +[2025-09-11 11:32:29] [Rank 0] step:1401/10000 train_time:80612ms step_avg:57.54ms +[2025-09-11 11:32:29] [Rank 0] step:1401/10000 train_time:80612ms step_avg:57.54ms +[2025-09-11 11:32:30] [Rank 0] step:1421/10000 train_time:81256ms step_avg:57.18ms +[2025-09-11 11:32:30] [Rank 0] step:1421/10000 train_time:81256ms step_avg:57.18ms +[2025-09-11 11:32:31] [Rank 0] step:1441/10000 train_time:81900ms step_avg:56.84ms +[2025-09-11 11:32:31] [Rank 0] step:1441/10000 train_time:81900ms step_avg:56.84ms +[2025-09-11 11:32:31] [Rank 0] step:1461/10000 train_time:82544ms step_avg:56.50ms +[2025-09-11 11:32:31] [Rank 0] step:1461/10000 train_time:82544ms step_avg:56.50ms +[2025-09-11 11:32:32] [Rank 0] step:1481/10000 train_time:83188ms step_avg:56.17ms +[2025-09-11 11:32:32] [Rank 0] step:1481/10000 train_time:83188ms step_avg:56.17ms +[2025-09-11 11:32:33] [Rank 0] step:1501/10000 train_time:83836ms step_avg:55.85ms +[2025-09-11 11:32:33] [Rank 0] step:1501/10000 train_time:83836ms step_avg:55.85ms +[2025-09-11 11:32:33] [Rank 0] step:1521/10000 train_time:84485ms step_avg:55.55ms +[2025-09-11 11:32:33] [Rank 0] step:1521/10000 train_time:84485ms step_avg:55.55ms +[2025-09-11 11:32:34] [Rank 0] step:1541/10000 train_time:85133ms step_avg:55.25ms +[2025-09-11 11:32:34] [Rank 0] step:1541/10000 train_time:85133ms step_avg:55.25ms +[2025-09-11 11:32:35] [Rank 0] step:1561/10000 train_time:85781ms step_avg:54.95ms +[2025-09-11 11:32:35] [Rank 0] step:1561/10000 train_time:85781ms step_avg:54.95ms +[2025-09-11 11:32:35] [Rank 0] step:1581/10000 train_time:86430ms step_avg:54.67ms +[2025-09-11 11:32:35] [Rank 0] step:1581/10000 train_time:86430ms step_avg:54.67ms +[2025-09-11 11:32:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:32:36] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:32:46] [Rank 0] PRINT: step:1600/10000 val_loss:5.4199 total_sharp:2.4758e-03 L1_sharp:4.4298e-02 L2_sharp:3.7877e-02 L3_sharp:3.6575e-02 L4_sharp:3.9899e-02 L5_sharp:4.0493e-02 L6_sharp:4.9383e-02 L7_sharp:6.4574e-02 L8_sharp:9.6684e-02 L9_sharp:1.0142e-01 L10_sharp:1.4225e-01 L11_sharp:2.0341e-01 L12_sharp:5.5665e-01 total_fnorm:1.7500e+01 total_l1_linf:2.6368e+04 total_spectral:8.8125e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4512e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.3242e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.4219e-02 L9_l1linf:7.4219e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.8613e-02 L12_l1linf:6.8848e-02 L1_spectral:3.1244e-03 L2_spectral:3.1589e-03 L3_spectral:3.1413e-03 L4_spectral:3.1254e-03 L5_spectral:3.1301e-03 L6_spectral:3.1269e-03 L7_spectral:3.1425e-03 L8_spectral:3.1328e-03 L9_spectral:3.1332e-03 L10_spectral:3.1210e-03 L11_spectral:3.1181e-03 L12_spectral:3.0978e-03 train_time:87060ms step_avg:54.41ms +[2025-09-11 11:32:46] [Rank 0] PRINT: step:1600/10000 val_loss:5.4199 total_sharp:2.4758e-03 L1_sharp:4.4298e-02 L2_sharp:3.7877e-02 L3_sharp:3.6575e-02 L4_sharp:3.9899e-02 L5_sharp:4.0493e-02 L6_sharp:4.9383e-02 L7_sharp:6.4574e-02 L8_sharp:9.6684e-02 L9_sharp:1.0142e-01 L10_sharp:1.4225e-01 L11_sharp:2.0341e-01 L12_sharp:5.5665e-01 total_fnorm:1.7500e+01 total_l1_linf:2.6368e+04 total_spectral:8.8125e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4512e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.3242e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.4219e-02 L9_l1linf:7.4219e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.8613e-02 L12_l1linf:6.8848e-02 L1_spectral:3.1244e-03 L2_spectral:3.1589e-03 L3_spectral:3.1413e-03 L4_spectral:3.1254e-03 L5_spectral:3.1301e-03 L6_spectral:3.1269e-03 L7_spectral:3.1425e-03 L8_spectral:3.1328e-03 L9_spectral:3.1332e-03 L10_spectral:3.1210e-03 L11_spectral:3.1181e-03 L12_spectral:3.0978e-03 train_time:87060ms step_avg:54.41ms +[2025-09-11 11:32:47] [Rank 0] step:1601/10000 train_time:88310ms step_avg:55.16ms +[2025-09-11 11:32:47] [Rank 0] step:1601/10000 train_time:88310ms step_avg:55.16ms +[2025-09-11 11:32:48] [Rank 0] step:1621/10000 train_time:88948ms step_avg:54.87ms +[2025-09-11 11:32:48] [Rank 0] step:1621/10000 train_time:88948ms step_avg:54.87ms +[2025-09-11 11:32:48] [Rank 0] step:1641/10000 train_time:89598ms step_avg:54.60ms +[2025-09-11 11:32:48] [Rank 0] step:1641/10000 train_time:89598ms step_avg:54.60ms +[2025-09-11 11:32:49] [Rank 0] step:1661/10000 train_time:90254ms step_avg:54.34ms +[2025-09-11 11:32:49] [Rank 0] step:1661/10000 train_time:90254ms step_avg:54.34ms +[2025-09-11 11:32:50] [Rank 0] step:1681/10000 train_time:90903ms step_avg:54.08ms +[2025-09-11 11:32:50] [Rank 0] step:1681/10000 train_time:90903ms step_avg:54.08ms +[2025-09-11 11:32:50] [Rank 0] step:1701/10000 train_time:91555ms step_avg:53.82ms +[2025-09-11 11:32:50] [Rank 0] step:1701/10000 train_time:91555ms step_avg:53.82ms +[2025-09-11 11:32:51] [Rank 0] step:1721/10000 train_time:92205ms step_avg:53.58ms +[2025-09-11 11:32:51] [Rank 0] step:1721/10000 train_time:92205ms step_avg:53.58ms +[2025-09-11 11:32:52] [Rank 0] step:1741/10000 train_time:92854ms step_avg:53.33ms +[2025-09-11 11:32:52] [Rank 0] step:1741/10000 train_time:92854ms step_avg:53.33ms +[2025-09-11 11:32:52] [Rank 0] step:1761/10000 train_time:93503ms step_avg:53.10ms +[2025-09-11 11:32:52] [Rank 0] step:1761/10000 train_time:93503ms step_avg:53.10ms +[2025-09-11 11:32:53] [Rank 0] step:1781/10000 train_time:94152ms step_avg:52.86ms +[2025-09-11 11:32:53] [Rank 0] step:1781/10000 train_time:94152ms step_avg:52.86ms +[2025-09-11 11:32:53] [Rank 0] step:1801/10000 train_time:94801ms step_avg:52.64ms +[2025-09-11 11:32:53] [Rank 0] step:1801/10000 train_time:94801ms step_avg:52.64ms +[2025-09-11 11:32:54] [Rank 0] step:1821/10000 train_time:95450ms step_avg:52.42ms +[2025-09-11 11:32:54] [Rank 0] step:1821/10000 train_time:95450ms step_avg:52.42ms +[2025-09-11 11:32:55] [Rank 0] step:1841/10000 train_time:96099ms step_avg:52.20ms +[2025-09-11 11:32:55] [Rank 0] step:1841/10000 train_time:96099ms step_avg:52.20ms +[2025-09-11 11:32:55] [Rank 0] step:1861/10000 train_time:96748ms step_avg:51.99ms +[2025-09-11 11:32:55] [Rank 0] step:1861/10000 train_time:96748ms step_avg:51.99ms +[2025-09-11 11:32:56] [Rank 0] step:1881/10000 train_time:97397ms step_avg:51.78ms +[2025-09-11 11:32:56] [Rank 0] step:1881/10000 train_time:97397ms step_avg:51.78ms +[2025-09-11 11:32:57] [Rank 0] step:1901/10000 train_time:98046ms step_avg:51.58ms +[2025-09-11 11:32:57] [Rank 0] step:1901/10000 train_time:98046ms step_avg:51.58ms +[2025-09-11 11:32:57] [Rank 0] step:1921/10000 train_time:98695ms step_avg:51.38ms +[2025-09-11 11:32:57] [Rank 0] step:1921/10000 train_time:98695ms step_avg:51.38ms +[2025-09-11 11:32:58] [Rank 0] step:1941/10000 train_time:99343ms step_avg:51.18ms +[2025-09-11 11:32:58] [Rank 0] step:1941/10000 train_time:99343ms step_avg:51.18ms +[2025-09-11 11:32:59] [Rank 0] step:1961/10000 train_time:99992ms step_avg:50.99ms +[2025-09-11 11:32:59] [Rank 0] step:1961/10000 train_time:99992ms step_avg:50.99ms +[2025-09-11 11:32:59] [Rank 0] step:1981/10000 train_time:100640ms step_avg:50.80ms +[2025-09-11 11:32:59] [Rank 0] step:1981/10000 train_time:100640ms step_avg:50.80ms +[2025-09-11 11:33:00] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:33:00] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:33:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:33:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:33:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:11] [Rank 0] PRINT: step:2000/10000 val_loss:5.2534 total_sharp:2.3472e-03 L1_sharp:1.5997e-02 L2_sharp:1.1604e-02 L3_sharp:1.3880e-02 L4_sharp:1.2552e-02 L5_sharp:2.4185e-02 L6_sharp:2.7357e-02 L7_sharp:3.7483e-02 L8_sharp:7.7424e-02 L9_sharp:8.9897e-02 L10_sharp:1.4505e-01 L11_sharp:2.0299e-01 L12_sharp:1.5532e+00 total_fnorm:1.7000e+01 total_l1_linf:2.5984e+04 total_spectral:8.5625e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4609e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.0801e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.0801e-02 L7_l1linf:7.1289e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.6172e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1430e-03 L2_spectral:3.1570e-03 L3_spectral:3.1546e-03 L4_spectral:3.1646e-03 L5_spectral:3.1500e-03 L6_spectral:3.1477e-03 L7_spectral:3.1458e-03 L8_spectral:3.1285e-03 L9_spectral:3.1367e-03 L10_spectral:3.1229e-03 L11_spectral:3.1498e-03 L12_spectral:3.1174e-03 train_time:101271ms step_avg:50.64ms +[2025-09-11 11:33:11] [Rank 0] PRINT: step:2000/10000 val_loss:5.2534 total_sharp:2.3472e-03 L1_sharp:1.5997e-02 L2_sharp:1.1604e-02 L3_sharp:1.3880e-02 L4_sharp:1.2552e-02 L5_sharp:2.4185e-02 L6_sharp:2.7357e-02 L7_sharp:3.7483e-02 L8_sharp:7.7424e-02 L9_sharp:8.9897e-02 L10_sharp:1.4505e-01 L11_sharp:2.0299e-01 L12_sharp:1.5532e+00 total_fnorm:1.7000e+01 total_l1_linf:2.5984e+04 total_spectral:8.5625e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.4609e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.0801e-02 L5_l1linf:7.0801e-02 L6_l1linf:7.0801e-02 L7_l1linf:7.1289e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.6172e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1430e-03 L2_spectral:3.1570e-03 L3_spectral:3.1546e-03 L4_spectral:3.1646e-03 L5_spectral:3.1500e-03 L6_spectral:3.1477e-03 L7_spectral:3.1458e-03 L8_spectral:3.1285e-03 L9_spectral:3.1367e-03 L10_spectral:3.1229e-03 L11_spectral:3.1498e-03 L12_spectral:3.1174e-03 train_time:101271ms step_avg:50.64ms +[2025-09-11 11:33:13] [Rank 0] step:2001/10000 train_time:103547ms step_avg:51.75ms +[2025-09-11 11:33:13] [Rank 0] step:2001/10000 train_time:103547ms step_avg:51.75ms +[2025-09-11 11:33:13] [Rank 0] step:2021/10000 train_time:104212ms step_avg:51.56ms +[2025-09-11 11:33:13] [Rank 0] step:2021/10000 train_time:104212ms step_avg:51.56ms +[2025-09-11 11:33:14] [Rank 0] step:2041/10000 train_time:104862ms step_avg:51.38ms +[2025-09-11 11:33:14] [Rank 0] step:2041/10000 train_time:104862ms step_avg:51.38ms +[2025-09-11 11:33:15] [Rank 0] step:2061/10000 train_time:105512ms step_avg:51.19ms +[2025-09-11 11:33:15] [Rank 0] step:2061/10000 train_time:105512ms step_avg:51.19ms +[2025-09-11 11:33:15] [Rank 0] step:2081/10000 train_time:106162ms step_avg:51.01ms +[2025-09-11 11:33:15] [Rank 0] step:2081/10000 train_time:106162ms step_avg:51.01ms +[2025-09-11 11:33:16] [Rank 0] step:2101/10000 train_time:106811ms step_avg:50.84ms +[2025-09-11 11:33:16] [Rank 0] step:2101/10000 train_time:106811ms step_avg:50.84ms +[2025-09-11 11:33:17] [Rank 0] step:2121/10000 train_time:107461ms step_avg:50.67ms +[2025-09-11 11:33:17] [Rank 0] step:2121/10000 train_time:107461ms step_avg:50.67ms +[2025-09-11 11:33:17] [Rank 0] step:2141/10000 train_time:108110ms step_avg:50.49ms +[2025-09-11 11:33:17] [Rank 0] step:2141/10000 train_time:108110ms step_avg:50.49ms +[2025-09-11 11:33:18] [Rank 0] step:2161/10000 train_time:108759ms step_avg:50.33ms +[2025-09-11 11:33:18] [Rank 0] step:2161/10000 train_time:108759ms step_avg:50.33ms +[2025-09-11 11:33:19] [Rank 0] step:2181/10000 train_time:109408ms step_avg:50.16ms +[2025-09-11 11:33:19] [Rank 0] step:2181/10000 train_time:109408ms step_avg:50.16ms +[2025-09-11 11:33:19] [Rank 0] step:2201/10000 train_time:110057ms step_avg:50.00ms +[2025-09-11 11:33:19] [Rank 0] step:2201/10000 train_time:110057ms step_avg:50.00ms +[2025-09-11 11:33:20] [Rank 0] step:2221/10000 train_time:110707ms step_avg:49.85ms +[2025-09-11 11:33:20] [Rank 0] step:2221/10000 train_time:110707ms step_avg:49.85ms +[2025-09-11 11:33:21] [Rank 0] step:2241/10000 train_time:111368ms step_avg:49.70ms +[2025-09-11 11:33:21] [Rank 0] step:2241/10000 train_time:111368ms step_avg:49.70ms +[2025-09-11 11:33:21] [Rank 0] step:2261/10000 train_time:112031ms step_avg:49.55ms +[2025-09-11 11:33:21] [Rank 0] step:2261/10000 train_time:112031ms step_avg:49.55ms +[2025-09-11 11:33:22] [Rank 0] step:2281/10000 train_time:112693ms step_avg:49.41ms +[2025-09-11 11:33:22] [Rank 0] step:2281/10000 train_time:112693ms step_avg:49.41ms +[2025-09-11 11:33:23] [Rank 0] step:2301/10000 train_time:113356ms step_avg:49.26ms +[2025-09-11 11:33:23] [Rank 0] step:2301/10000 train_time:113356ms step_avg:49.26ms +[2025-09-11 11:33:23] [Rank 0] step:2321/10000 train_time:114018ms step_avg:49.12ms +[2025-09-11 11:33:23] [Rank 0] step:2321/10000 train_time:114018ms step_avg:49.12ms +[2025-09-11 11:33:24] [Rank 0] step:2341/10000 train_time:114681ms step_avg:48.99ms +[2025-09-11 11:33:24] [Rank 0] step:2341/10000 train_time:114681ms step_avg:48.99ms +[2025-09-11 11:33:25] [Rank 0] step:2361/10000 train_time:115344ms step_avg:48.85ms +[2025-09-11 11:33:25] [Rank 0] step:2361/10000 train_time:115344ms step_avg:48.85ms +[2025-09-11 11:33:25] [Rank 0] step:2381/10000 train_time:116006ms step_avg:48.72ms +[2025-09-11 11:33:25] [Rank 0] step:2381/10000 train_time:116006ms step_avg:48.72ms +[2025-09-11 11:33:26] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:33:26] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:33:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:33:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:33:36] [Rank 0] PRINT: step:2400/10000 val_loss:5.1058 total_sharp:2.0903e-03 L1_sharp:1.8408e-02 L2_sharp:1.3297e-02 L3_sharp:1.2440e-02 L4_sharp:1.5125e-02 L5_sharp:2.2578e-02 L6_sharp:2.5972e-02 L7_sharp:3.4914e-02 L8_sharp:6.5452e-02 L9_sharp:8.1306e-02 L10_sharp:1.2146e-01 L11_sharp:1.4679e-01 L12_sharp:7.7769e-01 total_fnorm:1.5875e+01 total_l1_linf:2.3040e+04 total_spectral:7.9688e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.0312e-02 L5_l1linf:7.0312e-02 L6_l1linf:6.8848e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.8848e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.0312e-02 L11_l1linf:7.1289e-02 L12_l1linf:6.8359e-02 L1_spectral:3.1627e-03 L2_spectral:3.1715e-03 L3_spectral:3.1484e-03 L4_spectral:3.1679e-03 L5_spectral:3.1572e-03 L6_spectral:3.1509e-03 L7_spectral:3.1392e-03 L8_spectral:3.1404e-03 L9_spectral:3.1535e-03 L10_spectral:3.1723e-03 L11_spectral:3.1577e-03 L12_spectral:3.1488e-03 train_time:116650ms step_avg:48.60ms +[2025-09-11 11:33:36] [Rank 0] PRINT: step:2400/10000 val_loss:5.1058 total_sharp:2.0903e-03 L1_sharp:1.8408e-02 L2_sharp:1.3297e-02 L3_sharp:1.2440e-02 L4_sharp:1.5125e-02 L5_sharp:2.2578e-02 L6_sharp:2.5972e-02 L7_sharp:3.4914e-02 L8_sharp:6.5452e-02 L9_sharp:8.1306e-02 L10_sharp:1.2146e-01 L11_sharp:1.4679e-01 L12_sharp:7.7769e-01 total_fnorm:1.5875e+01 total_l1_linf:2.3040e+04 total_spectral:7.9688e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.0312e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.0312e-02 L5_l1linf:7.0312e-02 L6_l1linf:6.8848e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.8848e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.0312e-02 L11_l1linf:7.1289e-02 L12_l1linf:6.8359e-02 L1_spectral:3.1627e-03 L2_spectral:3.1715e-03 L3_spectral:3.1484e-03 L4_spectral:3.1679e-03 L5_spectral:3.1572e-03 L6_spectral:3.1509e-03 L7_spectral:3.1392e-03 L8_spectral:3.1404e-03 L9_spectral:3.1535e-03 L10_spectral:3.1723e-03 L11_spectral:3.1577e-03 L12_spectral:3.1488e-03 train_time:116650ms step_avg:48.60ms +[2025-09-11 11:33:37] [Rank 0] step:2401/10000 train_time:117910ms step_avg:49.11ms +[2025-09-11 11:33:37] [Rank 0] step:2401/10000 train_time:117910ms step_avg:49.11ms +[2025-09-11 11:33:38] [Rank 0] step:2421/10000 train_time:118566ms step_avg:48.97ms +[2025-09-11 11:33:38] [Rank 0] step:2421/10000 train_time:118566ms step_avg:48.97ms +[2025-09-11 11:33:38] [Rank 0] step:2441/10000 train_time:119230ms step_avg:48.84ms +[2025-09-11 11:33:38] [Rank 0] step:2441/10000 train_time:119230ms step_avg:48.84ms +[2025-09-11 11:33:39] [Rank 0] step:2461/10000 train_time:119894ms step_avg:48.72ms +[2025-09-11 11:33:39] [Rank 0] step:2461/10000 train_time:119894ms step_avg:48.72ms +[2025-09-11 11:33:40] [Rank 0] step:2481/10000 train_time:120557ms step_avg:48.59ms +[2025-09-11 11:33:40] [Rank 0] step:2481/10000 train_time:120557ms step_avg:48.59ms +[2025-09-11 11:33:40] [Rank 0] step:2501/10000 train_time:121220ms step_avg:48.47ms +[2025-09-11 11:33:40] [Rank 0] step:2501/10000 train_time:121220ms step_avg:48.47ms +[2025-09-11 11:33:41] [Rank 0] step:2521/10000 train_time:121883ms step_avg:48.35ms +[2025-09-11 11:33:41] [Rank 0] step:2521/10000 train_time:121883ms step_avg:48.35ms +[2025-09-11 11:33:42] [Rank 0] step:2541/10000 train_time:122545ms step_avg:48.23ms +[2025-09-11 11:33:42] [Rank 0] step:2541/10000 train_time:122545ms step_avg:48.23ms +[2025-09-11 11:33:42] [Rank 0] step:2561/10000 train_time:123207ms step_avg:48.11ms +[2025-09-11 11:33:42] [Rank 0] step:2561/10000 train_time:123207ms step_avg:48.11ms +[2025-09-11 11:33:43] [Rank 0] step:2581/10000 train_time:123870ms step_avg:47.99ms +[2025-09-11 11:33:43] [Rank 0] step:2581/10000 train_time:123870ms step_avg:47.99ms +[2025-09-11 11:33:44] [Rank 0] step:2601/10000 train_time:124533ms step_avg:47.88ms +[2025-09-11 11:33:44] [Rank 0] step:2601/10000 train_time:124533ms step_avg:47.88ms +[2025-09-11 11:33:44] [Rank 0] step:2621/10000 train_time:125196ms step_avg:47.77ms +[2025-09-11 11:33:44] [Rank 0] step:2621/10000 train_time:125196ms step_avg:47.77ms +[2025-09-11 11:33:45] [Rank 0] step:2641/10000 train_time:125858ms step_avg:47.66ms +[2025-09-11 11:33:45] [Rank 0] step:2641/10000 train_time:125858ms step_avg:47.66ms +[2025-09-11 11:33:46] [Rank 0] step:2661/10000 train_time:126521ms step_avg:47.55ms +[2025-09-11 11:33:46] [Rank 0] step:2661/10000 train_time:126521ms step_avg:47.55ms +[2025-09-11 11:33:46] [Rank 0] step:2681/10000 train_time:127183ms step_avg:47.44ms +[2025-09-11 11:33:46] [Rank 0] step:2681/10000 train_time:127183ms step_avg:47.44ms +[2025-09-11 11:33:47] [Rank 0] step:2701/10000 train_time:127846ms step_avg:47.33ms +[2025-09-11 11:33:47] [Rank 0] step:2701/10000 train_time:127846ms step_avg:47.33ms +[2025-09-11 11:33:48] [Rank 0] step:2721/10000 train_time:128509ms step_avg:47.23ms +[2025-09-11 11:33:48] [Rank 0] step:2721/10000 train_time:128509ms step_avg:47.23ms +[2025-09-11 11:33:48] [Rank 0] step:2741/10000 train_time:129171ms step_avg:47.13ms +[2025-09-11 11:33:48] [Rank 0] step:2741/10000 train_time:129171ms step_avg:47.13ms +[2025-09-11 11:33:49] [Rank 0] step:2761/10000 train_time:129834ms step_avg:47.02ms +[2025-09-11 11:33:49] [Rank 0] step:2761/10000 train_time:129834ms step_avg:47.02ms +[2025-09-11 11:33:50] [Rank 0] step:2781/10000 train_time:130496ms step_avg:46.92ms +[2025-09-11 11:33:50] [Rank 0] step:2781/10000 train_time:130496ms step_avg:46.92ms +[2025-09-11 11:33:50] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:33:50] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:33:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:33:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:33:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:00] [Rank 0] PRINT: step:2800/10000 val_loss:5.0100 total_sharp:2.3403e-03 L1_sharp:1.1786e-02 L2_sharp:1.1652e-02 L3_sharp:9.2650e-03 L4_sharp:9.9140e-03 L5_sharp:1.9591e-02 L6_sharp:2.3343e-02 L7_sharp:3.6860e-02 L8_sharp:6.1806e-02 L9_sharp:7.7154e-02 L10_sharp:1.1964e-01 L11_sharp:1.6084e-01 L12_sharp:5.6349e-01 total_fnorm:1.4812e+01 total_l1_linf:2.1120e+04 total_spectral:7.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.9336e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9824e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.7383e-02 L9_l1linf:6.7871e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.0312e-02 L12_l1linf:6.5918e-02 L1_spectral:3.1959e-03 L2_spectral:3.1893e-03 L3_spectral:3.1687e-03 L4_spectral:3.1760e-03 L5_spectral:3.1639e-03 L6_spectral:3.1617e-03 L7_spectral:3.1468e-03 L8_spectral:3.1415e-03 L9_spectral:3.1633e-03 L10_spectral:3.1632e-03 L11_spectral:3.1692e-03 L12_spectral:3.1365e-03 train_time:131143ms step_avg:46.84ms +[2025-09-11 11:34:00] [Rank 0] PRINT: step:2800/10000 val_loss:5.0100 total_sharp:2.3403e-03 L1_sharp:1.1786e-02 L2_sharp:1.1652e-02 L3_sharp:9.2650e-03 L4_sharp:9.9140e-03 L5_sharp:1.9591e-02 L6_sharp:2.3343e-02 L7_sharp:3.6860e-02 L8_sharp:6.1806e-02 L9_sharp:7.7154e-02 L10_sharp:1.1964e-01 L11_sharp:1.6084e-01 L12_sharp:5.6349e-01 total_fnorm:1.4812e+01 total_l1_linf:2.1120e+04 total_spectral:7.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.9336e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9824e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.7383e-02 L9_l1linf:6.7871e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.0312e-02 L12_l1linf:6.5918e-02 L1_spectral:3.1959e-03 L2_spectral:3.1893e-03 L3_spectral:3.1687e-03 L4_spectral:3.1760e-03 L5_spectral:3.1639e-03 L6_spectral:3.1617e-03 L7_spectral:3.1468e-03 L8_spectral:3.1415e-03 L9_spectral:3.1633e-03 L10_spectral:3.1632e-03 L11_spectral:3.1692e-03 L12_spectral:3.1365e-03 train_time:131143ms step_avg:46.84ms +[2025-09-11 11:34:02] [Rank 0] step:2801/10000 train_time:132420ms step_avg:47.28ms +[2025-09-11 11:34:02] [Rank 0] step:2801/10000 train_time:132420ms step_avg:47.28ms +[2025-09-11 11:34:02] [Rank 0] step:2821/10000 train_time:133089ms step_avg:47.18ms +[2025-09-11 11:34:02] [Rank 0] step:2821/10000 train_time:133089ms step_avg:47.18ms +[2025-09-11 11:34:03] [Rank 0] step:2841/10000 train_time:133754ms step_avg:47.08ms +[2025-09-11 11:34:03] [Rank 0] step:2841/10000 train_time:133754ms step_avg:47.08ms +[2025-09-11 11:34:04] [Rank 0] step:2861/10000 train_time:134418ms step_avg:46.98ms +[2025-09-11 11:34:04] [Rank 0] step:2861/10000 train_time:134418ms step_avg:46.98ms +[2025-09-11 11:34:04] [Rank 0] step:2881/10000 train_time:135081ms step_avg:46.89ms +[2025-09-11 11:34:04] [Rank 0] step:2881/10000 train_time:135081ms step_avg:46.89ms +[2025-09-11 11:34:05] [Rank 0] step:2901/10000 train_time:135746ms step_avg:46.79ms +[2025-09-11 11:34:05] [Rank 0] step:2901/10000 train_time:135746ms step_avg:46.79ms +[2025-09-11 11:34:06] [Rank 0] step:2921/10000 train_time:136410ms step_avg:46.70ms +[2025-09-11 11:34:06] [Rank 0] step:2921/10000 train_time:136410ms step_avg:46.70ms +[2025-09-11 11:34:06] [Rank 0] step:2941/10000 train_time:137075ms step_avg:46.61ms +[2025-09-11 11:34:06] [Rank 0] step:2941/10000 train_time:137075ms step_avg:46.61ms +[2025-09-11 11:34:07] [Rank 0] step:2961/10000 train_time:137739ms step_avg:46.52ms +[2025-09-11 11:34:07] [Rank 0] step:2961/10000 train_time:137739ms step_avg:46.52ms +[2025-09-11 11:34:08] [Rank 0] step:2981/10000 train_time:138405ms step_avg:46.43ms +[2025-09-11 11:34:08] [Rank 0] step:2981/10000 train_time:138405ms step_avg:46.43ms +[2025-09-11 11:34:08] [Rank 0] step:3001/10000 train_time:139072ms step_avg:46.34ms +[2025-09-11 11:34:08] [Rank 0] step:3001/10000 train_time:139072ms step_avg:46.34ms +[2025-09-11 11:34:09] [Rank 0] step:3021/10000 train_time:139739ms step_avg:46.26ms +[2025-09-11 11:34:09] [Rank 0] step:3021/10000 train_time:139739ms step_avg:46.26ms +[2025-09-11 11:34:10] [Rank 0] step:3041/10000 train_time:140405ms step_avg:46.17ms +[2025-09-11 11:34:10] [Rank 0] step:3041/10000 train_time:140405ms step_avg:46.17ms +[2025-09-11 11:34:10] [Rank 0] step:3061/10000 train_time:141072ms step_avg:46.09ms +[2025-09-11 11:34:10] [Rank 0] step:3061/10000 train_time:141072ms step_avg:46.09ms +[2025-09-11 11:34:11] [Rank 0] step:3081/10000 train_time:142036ms step_avg:46.10ms +[2025-09-11 11:34:11] [Rank 0] step:3081/10000 train_time:142036ms step_avg:46.10ms +[2025-09-11 11:34:12] [Rank 0] step:3101/10000 train_time:142702ms step_avg:46.02ms +[2025-09-11 11:34:12] [Rank 0] step:3101/10000 train_time:142702ms step_avg:46.02ms +[2025-09-11 11:34:13] [Rank 0] step:3121/10000 train_time:143369ms step_avg:45.94ms +[2025-09-11 11:34:13] [Rank 0] step:3121/10000 train_time:143369ms step_avg:45.94ms +[2025-09-11 11:34:13] [Rank 0] step:3141/10000 train_time:144035ms step_avg:45.86ms +[2025-09-11 11:34:13] [Rank 0] step:3141/10000 train_time:144035ms step_avg:45.86ms +[2025-09-11 11:34:14] [Rank 0] step:3161/10000 train_time:144979ms step_avg:45.86ms +[2025-09-11 11:34:14] [Rank 0] step:3161/10000 train_time:144979ms step_avg:45.86ms +[2025-09-11 11:34:15] [Rank 0] step:3181/10000 train_time:145646ms step_avg:45.79ms +[2025-09-11 11:34:15] [Rank 0] step:3181/10000 train_time:145646ms step_avg:45.79ms +[2025-09-11 11:34:16] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:34:16] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:34:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:34:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:34:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:34:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:34:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:26] [Rank 0] PRINT: step:3200/10000 val_loss:4.9256 total_sharp:1.4855e-03 L1_sharp:1.4550e-02 L2_sharp:1.2184e-02 L3_sharp:1.0460e-02 L4_sharp:1.3893e-02 L5_sharp:1.7811e-02 L6_sharp:1.9478e-02 L7_sharp:3.3105e-02 L8_sharp:5.1963e-02 L9_sharp:7.7998e-02 L10_sharp:1.1805e-01 L11_sharp:1.6417e-01 L12_sharp:5.3812e-01 total_fnorm:1.7250e+01 total_l1_linf:2.5472e+04 total_spectral:8.6250e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.6895e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5918e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.5430e-02 L11_l1linf:6.8359e-02 L12_l1linf:6.5918e-02 L1_spectral:3.2124e-03 L2_spectral:3.1994e-03 L3_spectral:3.1874e-03 L4_spectral:3.1705e-03 L5_spectral:3.1672e-03 L6_spectral:3.1701e-03 L7_spectral:3.1860e-03 L8_spectral:3.1583e-03 L9_spectral:3.1647e-03 L10_spectral:3.1692e-03 L11_spectral:3.1637e-03 L12_spectral:3.1854e-03 train_time:146294ms step_avg:45.72ms +[2025-09-11 11:34:26] [Rank 0] PRINT: step:3200/10000 val_loss:4.9256 total_sharp:1.4855e-03 L1_sharp:1.4550e-02 L2_sharp:1.2184e-02 L3_sharp:1.0460e-02 L4_sharp:1.3893e-02 L5_sharp:1.7811e-02 L6_sharp:1.9478e-02 L7_sharp:3.3105e-02 L8_sharp:5.1963e-02 L9_sharp:7.7998e-02 L10_sharp:1.1805e-01 L11_sharp:1.6417e-01 L12_sharp:5.3812e-01 total_fnorm:1.7250e+01 total_l1_linf:2.5472e+04 total_spectral:8.6250e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.6895e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.5918e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5918e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.5430e-02 L11_l1linf:6.8359e-02 L12_l1linf:6.5918e-02 L1_spectral:3.2124e-03 L2_spectral:3.1994e-03 L3_spectral:3.1874e-03 L4_spectral:3.1705e-03 L5_spectral:3.1672e-03 L6_spectral:3.1701e-03 L7_spectral:3.1860e-03 L8_spectral:3.1583e-03 L9_spectral:3.1647e-03 L10_spectral:3.1692e-03 L11_spectral:3.1637e-03 L12_spectral:3.1854e-03 train_time:146294ms step_avg:45.72ms +[2025-09-11 11:34:27] [Rank 0] step:3201/10000 train_time:147585ms step_avg:46.11ms +[2025-09-11 11:34:27] [Rank 0] step:3201/10000 train_time:147585ms step_avg:46.11ms +[2025-09-11 11:34:28] [Rank 0] step:3221/10000 train_time:148261ms step_avg:46.03ms +[2025-09-11 11:34:28] [Rank 0] step:3221/10000 train_time:148261ms step_avg:46.03ms +[2025-09-11 11:34:28] [Rank 0] step:3241/10000 train_time:148928ms step_avg:45.95ms +[2025-09-11 11:34:28] [Rank 0] step:3241/10000 train_time:148928ms step_avg:45.95ms +[2025-09-11 11:34:29] [Rank 0] step:3261/10000 train_time:149595ms step_avg:45.87ms +[2025-09-11 11:34:29] [Rank 0] step:3261/10000 train_time:149595ms step_avg:45.87ms +[2025-09-11 11:34:30] [Rank 0] step:3281/10000 train_time:150262ms step_avg:45.80ms +[2025-09-11 11:34:30] [Rank 0] step:3281/10000 train_time:150262ms step_avg:45.80ms +[2025-09-11 11:34:30] [Rank 0] step:3301/10000 train_time:150928ms step_avg:45.72ms +[2025-09-11 11:34:30] [Rank 0] step:3301/10000 train_time:150928ms step_avg:45.72ms +[2025-09-11 11:34:31] [Rank 0] step:3321/10000 train_time:151594ms step_avg:45.65ms +[2025-09-11 11:34:31] [Rank 0] step:3321/10000 train_time:151594ms step_avg:45.65ms +[2025-09-11 11:34:32] [Rank 0] step:3341/10000 train_time:152263ms step_avg:45.57ms +[2025-09-11 11:34:32] [Rank 0] step:3341/10000 train_time:152263ms step_avg:45.57ms +[2025-09-11 11:34:32] [Rank 0] step:3361/10000 train_time:152931ms step_avg:45.50ms +[2025-09-11 11:34:32] [Rank 0] step:3361/10000 train_time:152931ms step_avg:45.50ms +[2025-09-11 11:34:33] [Rank 0] step:3381/10000 train_time:153598ms step_avg:45.43ms +[2025-09-11 11:34:33] [Rank 0] step:3381/10000 train_time:153598ms step_avg:45.43ms +[2025-09-11 11:34:34] [Rank 0] step:3401/10000 train_time:154265ms step_avg:45.36ms +[2025-09-11 11:34:34] [Rank 0] step:3401/10000 train_time:154265ms step_avg:45.36ms +[2025-09-11 11:34:34] [Rank 0] step:3421/10000 train_time:154932ms step_avg:45.29ms +[2025-09-11 11:34:34] [Rank 0] step:3421/10000 train_time:154932ms step_avg:45.29ms +[2025-09-11 11:34:35] [Rank 0] step:3441/10000 train_time:155598ms step_avg:45.22ms +[2025-09-11 11:34:35] [Rank 0] step:3441/10000 train_time:155598ms step_avg:45.22ms +[2025-09-11 11:34:36] [Rank 0] step:3461/10000 train_time:156264ms step_avg:45.15ms +[2025-09-11 11:34:36] [Rank 0] step:3461/10000 train_time:156264ms step_avg:45.15ms +[2025-09-11 11:34:36] [Rank 0] step:3481/10000 train_time:156933ms step_avg:45.08ms +[2025-09-11 11:34:36] [Rank 0] step:3481/10000 train_time:156933ms step_avg:45.08ms +[2025-09-11 11:34:37] [Rank 0] step:3501/10000 train_time:157600ms step_avg:45.02ms +[2025-09-11 11:34:37] [Rank 0] step:3501/10000 train_time:157600ms step_avg:45.02ms +[2025-09-11 11:34:38] [Rank 0] step:3521/10000 train_time:158266ms step_avg:44.95ms +[2025-09-11 11:34:38] [Rank 0] step:3521/10000 train_time:158266ms step_avg:44.95ms +[2025-09-11 11:34:38] [Rank 0] step:3541/10000 train_time:158932ms step_avg:44.88ms +[2025-09-11 11:34:38] [Rank 0] step:3541/10000 train_time:158932ms step_avg:44.88ms +[2025-09-11 11:34:39] [Rank 0] step:3561/10000 train_time:159598ms step_avg:44.82ms +[2025-09-11 11:34:39] [Rank 0] step:3561/10000 train_time:159598ms step_avg:44.82ms +[2025-09-11 11:34:40] [Rank 0] step:3581/10000 train_time:160265ms step_avg:44.75ms +[2025-09-11 11:34:40] [Rank 0] step:3581/10000 train_time:160265ms step_avg:44.75ms +[2025-09-11 11:34:40] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:34:40] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:34:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:34:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.8704 total_sharp:1.6762e-03 L1_sharp:1.3932e-02 L2_sharp:8.7979e-03 L3_sharp:6.8634e-03 L4_sharp:7.8462e-03 L5_sharp:1.1489e-02 L6_sharp:1.6792e-02 L7_sharp:2.6049e-02 L8_sharp:4.5182e-02 L9_sharp:5.9990e-02 L10_sharp:8.8377e-02 L11_sharp:1.2969e-01 L12_sharp:6.7501e-01 total_fnorm:1.4812e+01 total_l1_linf:2.0864e+04 total_spectral:7.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.5918e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.6406e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1921e-03 L2_spectral:3.2034e-03 L3_spectral:3.1936e-03 L4_spectral:3.2021e-03 L5_spectral:3.1842e-03 L6_spectral:3.1714e-03 L7_spectral:3.1832e-03 L8_spectral:3.1689e-03 L9_spectral:3.1854e-03 L10_spectral:3.1717e-03 L11_spectral:3.1756e-03 L12_spectral:3.1649e-03 train_time:160913ms step_avg:44.70ms +[2025-09-11 11:34:50] [Rank 0] PRINT: step:3600/10000 val_loss:4.8704 total_sharp:1.6762e-03 L1_sharp:1.3932e-02 L2_sharp:8.7979e-03 L3_sharp:6.8634e-03 L4_sharp:7.8462e-03 L5_sharp:1.1489e-02 L6_sharp:1.6792e-02 L7_sharp:2.6049e-02 L8_sharp:4.5182e-02 L9_sharp:5.9990e-02 L10_sharp:8.8377e-02 L11_sharp:1.2969e-01 L12_sharp:6.7501e-01 total_fnorm:1.4812e+01 total_l1_linf:2.0864e+04 total_spectral:7.4375e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.5918e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.6406e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1921e-03 L2_spectral:3.2034e-03 L3_spectral:3.1936e-03 L4_spectral:3.2021e-03 L5_spectral:3.1842e-03 L6_spectral:3.1714e-03 L7_spectral:3.1832e-03 L8_spectral:3.1689e-03 L9_spectral:3.1854e-03 L10_spectral:3.1717e-03 L11_spectral:3.1756e-03 L12_spectral:3.1649e-03 train_time:160913ms step_avg:44.70ms +[2025-09-11 11:34:52] [Rank 0] step:3601/10000 train_time:162210ms step_avg:45.05ms +[2025-09-11 11:34:52] [Rank 0] step:3601/10000 train_time:162210ms step_avg:45.05ms +[2025-09-11 11:34:52] [Rank 0] step:3621/10000 train_time:162886ms step_avg:44.98ms +[2025-09-11 11:34:52] [Rank 0] step:3621/10000 train_time:162886ms step_avg:44.98ms +[2025-09-11 11:34:53] [Rank 0] step:3641/10000 train_time:163553ms step_avg:44.92ms +[2025-09-11 11:34:53] [Rank 0] step:3641/10000 train_time:163553ms step_avg:44.92ms +[2025-09-11 11:34:54] [Rank 0] step:3661/10000 train_time:164220ms step_avg:44.86ms +[2025-09-11 11:34:54] [Rank 0] step:3661/10000 train_time:164220ms step_avg:44.86ms +[2025-09-11 11:34:54] [Rank 0] step:3681/10000 train_time:164886ms step_avg:44.79ms +[2025-09-11 11:34:54] [Rank 0] step:3681/10000 train_time:164886ms step_avg:44.79ms +[2025-09-11 11:34:55] [Rank 0] step:3701/10000 train_time:165552ms step_avg:44.73ms +[2025-09-11 11:34:55] [Rank 0] step:3701/10000 train_time:165552ms step_avg:44.73ms +[2025-09-11 11:34:56] [Rank 0] step:3721/10000 train_time:166228ms step_avg:44.67ms +[2025-09-11 11:34:56] [Rank 0] step:3721/10000 train_time:166228ms step_avg:44.67ms +[2025-09-11 11:34:56] [Rank 0] step:3741/10000 train_time:166904ms step_avg:44.61ms +[2025-09-11 11:34:56] [Rank 0] step:3741/10000 train_time:166904ms step_avg:44.61ms +[2025-09-11 11:34:57] [Rank 0] step:3761/10000 train_time:167581ms step_avg:44.56ms +[2025-09-11 11:34:57] [Rank 0] step:3761/10000 train_time:167581ms step_avg:44.56ms +[2025-09-11 11:34:58] [Rank 0] step:3781/10000 train_time:168257ms step_avg:44.50ms +[2025-09-11 11:34:58] [Rank 0] step:3781/10000 train_time:168257ms step_avg:44.50ms +[2025-09-11 11:34:58] [Rank 0] step:3801/10000 train_time:168934ms step_avg:44.44ms +[2025-09-11 11:34:58] [Rank 0] step:3801/10000 train_time:168934ms step_avg:44.44ms +[2025-09-11 11:34:59] [Rank 0] step:3821/10000 train_time:169612ms step_avg:44.39ms +[2025-09-11 11:34:59] [Rank 0] step:3821/10000 train_time:169612ms step_avg:44.39ms +[2025-09-11 11:35:00] [Rank 0] step:3841/10000 train_time:170288ms step_avg:44.33ms +[2025-09-11 11:35:00] [Rank 0] step:3841/10000 train_time:170288ms step_avg:44.33ms +[2025-09-11 11:35:00] [Rank 0] step:3861/10000 train_time:170965ms step_avg:44.28ms +[2025-09-11 11:35:00] [Rank 0] step:3861/10000 train_time:170965ms step_avg:44.28ms +[2025-09-11 11:35:01] [Rank 0] step:3881/10000 train_time:171642ms step_avg:44.23ms +[2025-09-11 11:35:01] [Rank 0] step:3881/10000 train_time:171642ms step_avg:44.23ms +[2025-09-11 11:35:02] [Rank 0] step:3901/10000 train_time:172318ms step_avg:44.17ms +[2025-09-11 11:35:02] [Rank 0] step:3901/10000 train_time:172318ms step_avg:44.17ms +[2025-09-11 11:35:02] [Rank 0] step:3921/10000 train_time:173059ms step_avg:44.14ms +[2025-09-11 11:35:02] [Rank 0] step:3921/10000 train_time:173059ms step_avg:44.14ms +[2025-09-11 11:35:03] [Rank 0] step:3941/10000 train_time:173736ms step_avg:44.08ms +[2025-09-11 11:35:03] [Rank 0] step:3941/10000 train_time:173736ms step_avg:44.08ms +[2025-09-11 11:35:04] [Rank 0] step:3961/10000 train_time:174478ms step_avg:44.05ms +[2025-09-11 11:35:04] [Rank 0] step:3961/10000 train_time:174478ms step_avg:44.05ms +[2025-09-11 11:35:05] [Rank 0] step:3981/10000 train_time:175157ms step_avg:44.00ms +[2025-09-11 11:35:05] [Rank 0] step:3981/10000 train_time:175157ms step_avg:44.00ms +[2025-09-11 11:35:05] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:35:05] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:35:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:35:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:35:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:35:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:16] [Rank 0] PRINT: step:4000/10000 val_loss:4.8066 total_sharp:1.5275e-03 L1_sharp:1.5089e-02 L2_sharp:1.2703e-02 L3_sharp:8.8053e-03 L4_sharp:1.0827e-02 L5_sharp:1.9988e-02 L6_sharp:2.0626e-02 L7_sharp:3.5136e-02 L8_sharp:5.4833e-02 L9_sharp:7.3401e-02 L10_sharp:1.1203e-01 L11_sharp:1.7530e-01 L12_sharp:1.1047e+00 total_fnorm:1.8875e+01 total_l1_linf:2.6240e+04 total_spectral:9.4375e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.5430e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4453e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.7383e-02 L12_l1linf:6.0791e-02 L1_spectral:3.2180e-03 L2_spectral:3.2125e-03 L3_spectral:3.2049e-03 L4_spectral:3.1995e-03 L5_spectral:3.1852e-03 L6_spectral:3.1791e-03 L7_spectral:3.1839e-03 L8_spectral:3.1814e-03 L9_spectral:3.1688e-03 L10_spectral:3.1695e-03 L11_spectral:3.1621e-03 L12_spectral:3.1632e-03 train_time:175815ms step_avg:43.95ms +[2025-09-11 11:35:16] [Rank 0] PRINT: step:4000/10000 val_loss:4.8066 total_sharp:1.5275e-03 L1_sharp:1.5089e-02 L2_sharp:1.2703e-02 L3_sharp:8.8053e-03 L4_sharp:1.0827e-02 L5_sharp:1.9988e-02 L6_sharp:2.0626e-02 L7_sharp:3.5136e-02 L8_sharp:5.4833e-02 L9_sharp:7.3401e-02 L10_sharp:1.1203e-01 L11_sharp:1.7530e-01 L12_sharp:1.1047e+00 total_fnorm:1.8875e+01 total_l1_linf:2.6240e+04 total_spectral:9.4375e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.6406e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.5430e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4453e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.7383e-02 L12_l1linf:6.0791e-02 L1_spectral:3.2180e-03 L2_spectral:3.2125e-03 L3_spectral:3.2049e-03 L4_spectral:3.1995e-03 L5_spectral:3.1852e-03 L6_spectral:3.1791e-03 L7_spectral:3.1839e-03 L8_spectral:3.1814e-03 L9_spectral:3.1688e-03 L10_spectral:3.1695e-03 L11_spectral:3.1621e-03 L12_spectral:3.1632e-03 train_time:175815ms step_avg:43.95ms +[2025-09-11 11:35:18] [Rank 0] step:4001/10000 train_time:177132ms step_avg:44.27ms +[2025-09-11 11:35:18] [Rank 0] step:4001/10000 train_time:177132ms step_avg:44.27ms +[2025-09-11 11:35:18] [Rank 0] step:4021/10000 train_time:177817ms step_avg:44.22ms +[2025-09-11 11:35:18] [Rank 0] step:4021/10000 train_time:177817ms step_avg:44.22ms +[2025-09-11 11:35:19] [Rank 0] step:4041/10000 train_time:178494ms step_avg:44.17ms +[2025-09-11 11:35:19] [Rank 0] step:4041/10000 train_time:178494ms step_avg:44.17ms +[2025-09-11 11:35:20] [Rank 0] step:4061/10000 train_time:179169ms step_avg:44.12ms +[2025-09-11 11:35:20] [Rank 0] step:4061/10000 train_time:179169ms step_avg:44.12ms +[2025-09-11 11:35:20] [Rank 0] step:4081/10000 train_time:179846ms step_avg:44.07ms +[2025-09-11 11:35:20] [Rank 0] step:4081/10000 train_time:179846ms step_avg:44.07ms +[2025-09-11 11:35:21] [Rank 0] step:4101/10000 train_time:180522ms step_avg:44.02ms +[2025-09-11 11:35:21] [Rank 0] step:4101/10000 train_time:180522ms step_avg:44.02ms +[2025-09-11 11:35:22] [Rank 0] step:4121/10000 train_time:181200ms step_avg:43.97ms +[2025-09-11 11:35:22] [Rank 0] step:4121/10000 train_time:181200ms step_avg:43.97ms +[2025-09-11 11:35:22] [Rank 0] step:4141/10000 train_time:181876ms step_avg:43.92ms +[2025-09-11 11:35:22] [Rank 0] step:4141/10000 train_time:181876ms step_avg:43.92ms +[2025-09-11 11:35:23] [Rank 0] step:4161/10000 train_time:182552ms step_avg:43.87ms +[2025-09-11 11:35:23] [Rank 0] step:4161/10000 train_time:182552ms step_avg:43.87ms +[2025-09-11 11:35:24] [Rank 0] step:4181/10000 train_time:183229ms step_avg:43.82ms +[2025-09-11 11:35:24] [Rank 0] step:4181/10000 train_time:183229ms step_avg:43.82ms +[2025-09-11 11:35:25] [Rank 0] step:4201/10000 train_time:183906ms step_avg:43.78ms +[2025-09-11 11:35:25] [Rank 0] step:4201/10000 train_time:183906ms step_avg:43.78ms +[2025-09-11 11:35:25] [Rank 0] step:4221/10000 train_time:184584ms step_avg:43.73ms +[2025-09-11 11:35:25] [Rank 0] step:4221/10000 train_time:184584ms step_avg:43.73ms +[2025-09-11 11:35:26] [Rank 0] step:4241/10000 train_time:185261ms step_avg:43.68ms +[2025-09-11 11:35:26] [Rank 0] step:4241/10000 train_time:185261ms step_avg:43.68ms +[2025-09-11 11:35:27] [Rank 0] step:4261/10000 train_time:185937ms step_avg:43.64ms +[2025-09-11 11:35:27] [Rank 0] step:4261/10000 train_time:185937ms step_avg:43.64ms +[2025-09-11 11:35:27] [Rank 0] step:4281/10000 train_time:186615ms step_avg:43.59ms +[2025-09-11 11:35:27] [Rank 0] step:4281/10000 train_time:186615ms step_avg:43.59ms +[2025-09-11 11:35:28] [Rank 0] step:4301/10000 train_time:187292ms step_avg:43.55ms +[2025-09-11 11:35:28] [Rank 0] step:4301/10000 train_time:187292ms step_avg:43.55ms +[2025-09-11 11:35:29] [Rank 0] step:4321/10000 train_time:187968ms step_avg:43.50ms +[2025-09-11 11:35:29] [Rank 0] step:4321/10000 train_time:187968ms step_avg:43.50ms +[2025-09-11 11:35:29] [Rank 0] step:4341/10000 train_time:188644ms step_avg:43.46ms +[2025-09-11 11:35:29] [Rank 0] step:4341/10000 train_time:188644ms step_avg:43.46ms +[2025-09-11 11:35:30] [Rank 0] step:4361/10000 train_time:189319ms step_avg:43.41ms +[2025-09-11 11:35:30] [Rank 0] step:4361/10000 train_time:189319ms step_avg:43.41ms +[2025-09-11 11:35:31] [Rank 0] step:4381/10000 train_time:189996ms step_avg:43.37ms +[2025-09-11 11:35:31] [Rank 0] step:4381/10000 train_time:189996ms step_avg:43.37ms +[2025-09-11 11:35:31] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:35:31] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:35:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:35:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:35:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.7653 total_sharp:1.5336e-03 L1_sharp:8.3054e-03 L2_sharp:7.1141e-03 L3_sharp:7.3228e-03 L4_sharp:5.5923e-03 L5_sharp:1.2356e-02 L6_sharp:1.6479e-02 L7_sharp:2.0753e-02 L8_sharp:4.2074e-02 L9_sharp:5.7179e-02 L10_sharp:9.3442e-02 L11_sharp:1.2788e-01 L12_sharp:9.9794e-01 total_fnorm:1.5500e+01 total_l1_linf:2.1376e+04 total_spectral:7.7812e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.2500e-02 L9_l1linf:6.2012e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.5430e-02 L12_l1linf:5.7861e-02 L1_spectral:3.2103e-03 L2_spectral:3.2120e-03 L3_spectral:3.1962e-03 L4_spectral:3.1957e-03 L5_spectral:3.1785e-03 L6_spectral:3.2200e-03 L7_spectral:3.1825e-03 L8_spectral:3.1671e-03 L9_spectral:3.1895e-03 L10_spectral:3.1644e-03 L11_spectral:3.1896e-03 L12_spectral:3.1627e-03 train_time:190652ms step_avg:43.33ms +[2025-09-11 11:35:41] [Rank 0] PRINT: step:4400/10000 val_loss:4.7653 total_sharp:1.5336e-03 L1_sharp:8.3054e-03 L2_sharp:7.1141e-03 L3_sharp:7.3228e-03 L4_sharp:5.5923e-03 L5_sharp:1.2356e-02 L6_sharp:1.6479e-02 L7_sharp:2.0753e-02 L8_sharp:4.2074e-02 L9_sharp:5.7179e-02 L10_sharp:9.3442e-02 L11_sharp:1.2788e-01 L12_sharp:9.9794e-01 total_fnorm:1.5500e+01 total_l1_linf:2.1376e+04 total_spectral:7.7812e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.5430e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.2500e-02 L9_l1linf:6.2012e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.5430e-02 L12_l1linf:5.7861e-02 L1_spectral:3.2103e-03 L2_spectral:3.2120e-03 L3_spectral:3.1962e-03 L4_spectral:3.1957e-03 L5_spectral:3.1785e-03 L6_spectral:3.2200e-03 L7_spectral:3.1825e-03 L8_spectral:3.1671e-03 L9_spectral:3.1895e-03 L10_spectral:3.1644e-03 L11_spectral:3.1896e-03 L12_spectral:3.1627e-03 train_time:190652ms step_avg:43.33ms +[2025-09-11 11:35:43] [Rank 0] step:4401/10000 train_time:191965ms step_avg:43.62ms +[2025-09-11 11:35:43] [Rank 0] step:4401/10000 train_time:191965ms step_avg:43.62ms +[2025-09-11 11:35:43] [Rank 0] step:4421/10000 train_time:192650ms step_avg:43.58ms +[2025-09-11 11:35:43] [Rank 0] step:4421/10000 train_time:192650ms step_avg:43.58ms +[2025-09-11 11:35:44] [Rank 0] step:4441/10000 train_time:193327ms step_avg:43.53ms +[2025-09-11 11:35:44] [Rank 0] step:4441/10000 train_time:193327ms step_avg:43.53ms +[2025-09-11 11:35:45] [Rank 0] step:4461/10000 train_time:194007ms step_avg:43.49ms +[2025-09-11 11:35:45] [Rank 0] step:4461/10000 train_time:194007ms step_avg:43.49ms +[2025-09-11 11:35:45] [Rank 0] step:4481/10000 train_time:194685ms step_avg:43.45ms +[2025-09-11 11:35:45] [Rank 0] step:4481/10000 train_time:194685ms step_avg:43.45ms +[2025-09-11 11:35:46] [Rank 0] step:4501/10000 train_time:195364ms step_avg:43.40ms +[2025-09-11 11:35:46] [Rank 0] step:4501/10000 train_time:195364ms step_avg:43.40ms +[2025-09-11 11:35:47] [Rank 0] step:4521/10000 train_time:196044ms step_avg:43.36ms +[2025-09-11 11:35:47] [Rank 0] step:4521/10000 train_time:196044ms step_avg:43.36ms +[2025-09-11 11:35:47] [Rank 0] step:4541/10000 train_time:196723ms step_avg:43.32ms +[2025-09-11 11:35:47] [Rank 0] step:4541/10000 train_time:196723ms step_avg:43.32ms +[2025-09-11 11:35:48] [Rank 0] step:4561/10000 train_time:197401ms step_avg:43.28ms +[2025-09-11 11:35:48] [Rank 0] step:4561/10000 train_time:197401ms step_avg:43.28ms +[2025-09-11 11:35:49] [Rank 0] step:4581/10000 train_time:198079ms step_avg:43.24ms +[2025-09-11 11:35:49] [Rank 0] step:4581/10000 train_time:198079ms step_avg:43.24ms +[2025-09-11 11:35:49] [Rank 0] step:4601/10000 train_time:198757ms step_avg:43.20ms +[2025-09-11 11:35:49] [Rank 0] step:4601/10000 train_time:198757ms step_avg:43.20ms +[2025-09-11 11:35:50] [Rank 0] step:4621/10000 train_time:199436ms step_avg:43.16ms +[2025-09-11 11:35:50] [Rank 0] step:4621/10000 train_time:199436ms step_avg:43.16ms +[2025-09-11 11:35:51] [Rank 0] step:4641/10000 train_time:200115ms step_avg:43.12ms +[2025-09-11 11:35:51] [Rank 0] step:4641/10000 train_time:200115ms step_avg:43.12ms +[2025-09-11 11:35:52] [Rank 0] step:4661/10000 train_time:200795ms step_avg:43.08ms +[2025-09-11 11:35:52] [Rank 0] step:4661/10000 train_time:200795ms step_avg:43.08ms +[2025-09-11 11:35:52] [Rank 0] step:4681/10000 train_time:201474ms step_avg:43.04ms +[2025-09-11 11:35:52] [Rank 0] step:4681/10000 train_time:201474ms step_avg:43.04ms +[2025-09-11 11:35:53] [Rank 0] step:4701/10000 train_time:202152ms step_avg:43.00ms +[2025-09-11 11:35:53] [Rank 0] step:4701/10000 train_time:202152ms step_avg:43.00ms +[2025-09-11 11:35:54] [Rank 0] step:4721/10000 train_time:202830ms step_avg:42.96ms +[2025-09-11 11:35:54] [Rank 0] step:4721/10000 train_time:202830ms step_avg:42.96ms +[2025-09-11 11:35:54] [Rank 0] step:4741/10000 train_time:203509ms step_avg:42.93ms +[2025-09-11 11:35:54] [Rank 0] step:4741/10000 train_time:203509ms step_avg:42.93ms +[2025-09-11 11:35:55] [Rank 0] step:4761/10000 train_time:204189ms step_avg:42.89ms +[2025-09-11 11:35:55] [Rank 0] step:4761/10000 train_time:204189ms step_avg:42.89ms +[2025-09-11 11:35:56] [Rank 0] step:4781/10000 train_time:204867ms step_avg:42.85ms +[2025-09-11 11:35:56] [Rank 0] step:4781/10000 train_time:204867ms step_avg:42.85ms +[2025-09-11 11:35:56] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:35:56] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:36:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:06] [Rank 0] PRINT: step:4800/10000 val_loss:4.7219 total_sharp:1.3511e-03 L1_sharp:9.7468e-03 L2_sharp:6.1320e-03 L3_sharp:9.5937e-03 L4_sharp:6.8356e-03 L5_sharp:1.2501e-02 L6_sharp:1.5644e-02 L7_sharp:2.2343e-02 L8_sharp:3.8640e-02 L9_sharp:5.7376e-02 L10_sharp:8.1230e-02 L11_sharp:1.1841e-01 L12_sharp:6.6110e-01 total_fnorm:1.5875e+01 total_l1_linf:2.2144e+04 total_spectral:7.9688e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0303e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.2988e-02 L12_l1linf:5.9326e-02 L1_spectral:3.2289e-03 L2_spectral:3.2205e-03 L3_spectral:3.1994e-03 L4_spectral:3.2070e-03 L5_spectral:3.1933e-03 L6_spectral:3.1935e-03 L7_spectral:3.1862e-03 L8_spectral:3.1817e-03 L9_spectral:3.1863e-03 L10_spectral:3.1950e-03 L11_spectral:3.1906e-03 L12_spectral:3.1807e-03 train_time:205529ms step_avg:42.82ms +[2025-09-11 11:36:06] [Rank 0] PRINT: step:4800/10000 val_loss:4.7219 total_sharp:1.3511e-03 L1_sharp:9.7468e-03 L2_sharp:6.1320e-03 L3_sharp:9.5937e-03 L4_sharp:6.8356e-03 L5_sharp:1.2501e-02 L6_sharp:1.5644e-02 L7_sharp:2.2343e-02 L8_sharp:3.8640e-02 L9_sharp:5.7376e-02 L10_sharp:8.1230e-02 L11_sharp:1.1841e-01 L12_sharp:6.6110e-01 total_fnorm:1.5875e+01 total_l1_linf:2.2144e+04 total_spectral:7.9688e+00 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0303e-02 L10_l1linf:6.1279e-02 L11_l1linf:6.2988e-02 L12_l1linf:5.9326e-02 L1_spectral:3.2289e-03 L2_spectral:3.2205e-03 L3_spectral:3.1994e-03 L4_spectral:3.2070e-03 L5_spectral:3.1933e-03 L6_spectral:3.1935e-03 L7_spectral:3.1862e-03 L8_spectral:3.1817e-03 L9_spectral:3.1863e-03 L10_spectral:3.1950e-03 L11_spectral:3.1906e-03 L12_spectral:3.1807e-03 train_time:205529ms step_avg:42.82ms +[2025-09-11 11:36:08] [Rank 0] step:4801/10000 train_time:206844ms step_avg:43.08ms +[2025-09-11 11:36:08] [Rank 0] step:4801/10000 train_time:206844ms step_avg:43.08ms +[2025-09-11 11:36:08] [Rank 0] step:4821/10000 train_time:207545ms step_avg:43.05ms +[2025-09-11 11:36:08] [Rank 0] step:4821/10000 train_time:207545ms step_avg:43.05ms +[2025-09-11 11:36:09] [Rank 0] step:4841/10000 train_time:208226ms step_avg:43.01ms +[2025-09-11 11:36:09] [Rank 0] step:4841/10000 train_time:208226ms step_avg:43.01ms +[2025-09-11 11:36:10] [Rank 0] step:4861/10000 train_time:208906ms step_avg:42.98ms +[2025-09-11 11:36:10] [Rank 0] step:4861/10000 train_time:208906ms step_avg:42.98ms +[2025-09-11 11:36:10] [Rank 0] step:4881/10000 train_time:209586ms step_avg:42.94ms +[2025-09-11 11:36:10] [Rank 0] step:4881/10000 train_time:209586ms step_avg:42.94ms +[2025-09-11 11:36:11] [Rank 0] step:4901/10000 train_time:210267ms step_avg:42.90ms +[2025-09-11 11:36:11] [Rank 0] step:4901/10000 train_time:210267ms step_avg:42.90ms +[2025-09-11 11:36:12] [Rank 0] step:4921/10000 train_time:210947ms step_avg:42.87ms +[2025-09-11 11:36:12] [Rank 0] step:4921/10000 train_time:210947ms step_avg:42.87ms +[2025-09-11 11:36:13] [Rank 0] step:4941/10000 train_time:211627ms step_avg:42.83ms +[2025-09-11 11:36:13] [Rank 0] step:4941/10000 train_time:211627ms step_avg:42.83ms +[2025-09-11 11:36:13] [Rank 0] step:4961/10000 train_time:212309ms step_avg:42.80ms +[2025-09-11 11:36:13] [Rank 0] step:4961/10000 train_time:212309ms step_avg:42.80ms +[2025-09-11 11:36:14] [Rank 0] step:4981/10000 train_time:212990ms step_avg:42.76ms +[2025-09-11 11:36:14] [Rank 0] step:4981/10000 train_time:212990ms step_avg:42.76ms +[2025-09-11 11:36:15] [Rank 0] step:5001/10000 train_time:213671ms step_avg:42.73ms +[2025-09-11 11:36:15] [Rank 0] step:5001/10000 train_time:213671ms step_avg:42.73ms +[2025-09-11 11:36:15] [Rank 0] step:5021/10000 train_time:214351ms step_avg:42.69ms +[2025-09-11 11:36:15] [Rank 0] step:5021/10000 train_time:214351ms step_avg:42.69ms +[2025-09-11 11:36:16] [Rank 0] step:5041/10000 train_time:215030ms step_avg:42.66ms +[2025-09-11 11:36:16] [Rank 0] step:5041/10000 train_time:215030ms step_avg:42.66ms +[2025-09-11 11:36:17] [Rank 0] step:5061/10000 train_time:215709ms step_avg:42.62ms +[2025-09-11 11:36:17] [Rank 0] step:5061/10000 train_time:215709ms step_avg:42.62ms +[2025-09-11 11:36:17] [Rank 0] step:5081/10000 train_time:216388ms step_avg:42.59ms +[2025-09-11 11:36:17] [Rank 0] step:5081/10000 train_time:216388ms step_avg:42.59ms +[2025-09-11 11:36:18] [Rank 0] step:5101/10000 train_time:217365ms step_avg:42.61ms +[2025-09-11 11:36:18] [Rank 0] step:5101/10000 train_time:217365ms step_avg:42.61ms +[2025-09-11 11:36:19] [Rank 0] step:5121/10000 train_time:218045ms step_avg:42.58ms +[2025-09-11 11:36:19] [Rank 0] step:5121/10000 train_time:218045ms step_avg:42.58ms +[2025-09-11 11:36:20] [Rank 0] step:5141/10000 train_time:218725ms step_avg:42.55ms +[2025-09-11 11:36:20] [Rank 0] step:5141/10000 train_time:218725ms step_avg:42.55ms +[2025-09-11 11:36:21] [Rank 0] step:5161/10000 train_time:219674ms step_avg:42.56ms +[2025-09-11 11:36:21] [Rank 0] step:5161/10000 train_time:219674ms step_avg:42.56ms +[2025-09-11 11:36:21] [Rank 0] step:5181/10000 train_time:220353ms step_avg:42.53ms +[2025-09-11 11:36:21] [Rank 0] step:5181/10000 train_time:220353ms step_avg:42.53ms +[2025-09-11 11:36:22] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:36:22] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:36:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:36:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:32] [Rank 0] PRINT: step:5200/10000 val_loss:4.6846 total_sharp:2.3351e-03 L1_sharp:1.1055e-02 L2_sharp:5.4489e-03 L3_sharp:5.8525e-03 L4_sharp:5.7240e-03 L5_sharp:1.4349e-02 L6_sharp:1.2158e-02 L7_sharp:2.5879e-02 L8_sharp:4.2370e-02 L9_sharp:6.6171e-02 L10_sharp:1.0245e-01 L11_sharp:1.5576e-01 L12_sharp:1.6141e+00 total_fnorm:1.4625e+01 total_l1_linf:1.9456e+04 total_spectral:7.3125e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.2256e-02 L12_l1linf:5.8594e-02 L1_spectral:3.2570e-03 L2_spectral:3.2379e-03 L3_spectral:3.2083e-03 L4_spectral:3.2006e-03 L5_spectral:3.2233e-03 L6_spectral:3.2095e-03 L7_spectral:3.2143e-03 L8_spectral:3.1825e-03 L9_spectral:3.1966e-03 L10_spectral:3.1874e-03 L11_spectral:3.2170e-03 L12_spectral:3.2225e-03 train_time:221020ms step_avg:42.50ms +[2025-09-11 11:36:32] [Rank 0] PRINT: step:5200/10000 val_loss:4.6846 total_sharp:2.3351e-03 L1_sharp:1.1055e-02 L2_sharp:5.4489e-03 L3_sharp:5.8525e-03 L4_sharp:5.7240e-03 L5_sharp:1.4349e-02 L6_sharp:1.2158e-02 L7_sharp:2.5879e-02 L8_sharp:4.2370e-02 L9_sharp:6.6171e-02 L10_sharp:1.0245e-01 L11_sharp:1.5576e-01 L12_sharp:1.6141e+00 total_fnorm:1.4625e+01 total_l1_linf:1.9456e+04 total_spectral:7.3125e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.2256e-02 L12_l1linf:5.8594e-02 L1_spectral:3.2570e-03 L2_spectral:3.2379e-03 L3_spectral:3.2083e-03 L4_spectral:3.2006e-03 L5_spectral:3.2233e-03 L6_spectral:3.2095e-03 L7_spectral:3.2143e-03 L8_spectral:3.1825e-03 L9_spectral:3.1966e-03 L10_spectral:3.1874e-03 L11_spectral:3.2170e-03 L12_spectral:3.2225e-03 train_time:221020ms step_avg:42.50ms +[2025-09-11 11:36:33] [Rank 0] step:5201/10000 train_time:222341ms step_avg:42.75ms +[2025-09-11 11:36:33] [Rank 0] step:5201/10000 train_time:222341ms step_avg:42.75ms +[2025-09-11 11:36:34] [Rank 0] step:5221/10000 train_time:223038ms step_avg:42.72ms +[2025-09-11 11:36:34] [Rank 0] step:5221/10000 train_time:223038ms step_avg:42.72ms +[2025-09-11 11:36:35] [Rank 0] step:5241/10000 train_time:223727ms step_avg:42.69ms +[2025-09-11 11:36:35] [Rank 0] step:5241/10000 train_time:223727ms step_avg:42.69ms +[2025-09-11 11:36:35] [Rank 0] step:5261/10000 train_time:224416ms step_avg:42.66ms +[2025-09-11 11:36:35] [Rank 0] step:5261/10000 train_time:224416ms step_avg:42.66ms +[2025-09-11 11:36:36] [Rank 0] step:5281/10000 train_time:225106ms step_avg:42.63ms +[2025-09-11 11:36:36] [Rank 0] step:5281/10000 train_time:225106ms step_avg:42.63ms +[2025-09-11 11:36:37] [Rank 0] step:5301/10000 train_time:225795ms step_avg:42.59ms +[2025-09-11 11:36:37] [Rank 0] step:5301/10000 train_time:225795ms step_avg:42.59ms +[2025-09-11 11:36:38] [Rank 0] step:5321/10000 train_time:226494ms step_avg:42.57ms +[2025-09-11 11:36:38] [Rank 0] step:5321/10000 train_time:226494ms step_avg:42.57ms +[2025-09-11 11:36:38] [Rank 0] step:5341/10000 train_time:227183ms step_avg:42.54ms +[2025-09-11 11:36:38] [Rank 0] step:5341/10000 train_time:227183ms step_avg:42.54ms +[2025-09-11 11:36:39] [Rank 0] step:5361/10000 train_time:227873ms step_avg:42.51ms +[2025-09-11 11:36:39] [Rank 0] step:5361/10000 train_time:227873ms step_avg:42.51ms +[2025-09-11 11:36:40] [Rank 0] step:5381/10000 train_time:228563ms step_avg:42.48ms +[2025-09-11 11:36:40] [Rank 0] step:5381/10000 train_time:228563ms step_avg:42.48ms +[2025-09-11 11:36:40] [Rank 0] step:5401/10000 train_time:229251ms step_avg:42.45ms +[2025-09-11 11:36:40] [Rank 0] step:5401/10000 train_time:229251ms step_avg:42.45ms +[2025-09-11 11:36:41] [Rank 0] step:5421/10000 train_time:229943ms step_avg:42.42ms +[2025-09-11 11:36:41] [Rank 0] step:5421/10000 train_time:229943ms step_avg:42.42ms +[2025-09-11 11:36:42] [Rank 0] step:5441/10000 train_time:230634ms step_avg:42.39ms +[2025-09-11 11:36:42] [Rank 0] step:5441/10000 train_time:230634ms step_avg:42.39ms +[2025-09-11 11:36:42] [Rank 0] step:5461/10000 train_time:231323ms step_avg:42.36ms +[2025-09-11 11:36:42] [Rank 0] step:5461/10000 train_time:231323ms step_avg:42.36ms +[2025-09-11 11:36:43] [Rank 0] step:5481/10000 train_time:232012ms step_avg:42.33ms +[2025-09-11 11:36:43] [Rank 0] step:5481/10000 train_time:232012ms step_avg:42.33ms +[2025-09-11 11:36:44] [Rank 0] step:5501/10000 train_time:232700ms step_avg:42.30ms +[2025-09-11 11:36:44] [Rank 0] step:5501/10000 train_time:232700ms step_avg:42.30ms +[2025-09-11 11:36:44] [Rank 0] step:5521/10000 train_time:233392ms step_avg:42.27ms +[2025-09-11 11:36:44] [Rank 0] step:5521/10000 train_time:233392ms step_avg:42.27ms +[2025-09-11 11:36:45] [Rank 0] step:5541/10000 train_time:234082ms step_avg:42.25ms +[2025-09-11 11:36:45] [Rank 0] step:5541/10000 train_time:234082ms step_avg:42.25ms +[2025-09-11 11:36:46] [Rank 0] step:5561/10000 train_time:234774ms step_avg:42.22ms +[2025-09-11 11:36:46] [Rank 0] step:5561/10000 train_time:234774ms step_avg:42.22ms +[2025-09-11 11:36:46] [Rank 0] step:5581/10000 train_time:235464ms step_avg:42.19ms +[2025-09-11 11:36:46] [Rank 0] step:5581/10000 train_time:235464ms step_avg:42.19ms +[2025-09-11 11:36:47] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:36:47] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:36:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:36:57] [Rank 0] PRINT: step:5600/10000 val_loss:4.6505 total_sharp:1.4447e-03 L1_sharp:8.7221e-03 L2_sharp:4.9997e-03 L3_sharp:6.5814e-03 L4_sharp:4.9690e-03 L5_sharp:1.1204e-02 L6_sharp:1.3325e-02 L7_sharp:2.3100e-02 L8_sharp:3.9775e-02 L9_sharp:5.6874e-02 L10_sharp:7.9264e-02 L11_sharp:1.1313e-01 L12_sharp:4.4569e-01 total_fnorm:1.4375e+01 total_l1_linf:1.9456e+04 total_spectral:7.1875e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.2988e-02 L2_l1linf:6.2500e-02 L3_l1linf:6.0791e-02 L4_l1linf:6.1523e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9326e-02 L10_l1linf:6.0547e-02 L11_l1linf:6.1523e-02 L12_l1linf:6.0303e-02 L1_spectral:3.2275e-03 L2_spectral:3.2469e-03 L3_spectral:3.2261e-03 L4_spectral:3.2253e-03 L5_spectral:3.1978e-03 L6_spectral:3.2143e-03 L7_spectral:3.2017e-03 L8_spectral:3.1929e-03 L9_spectral:3.2061e-03 L10_spectral:3.2097e-03 L11_spectral:3.1965e-03 L12_spectral:3.1850e-03 train_time:236135ms step_avg:42.17ms +[2025-09-11 11:36:57] [Rank 0] PRINT: step:5600/10000 val_loss:4.6505 total_sharp:1.4447e-03 L1_sharp:8.7221e-03 L2_sharp:4.9997e-03 L3_sharp:6.5814e-03 L4_sharp:4.9690e-03 L5_sharp:1.1204e-02 L6_sharp:1.3325e-02 L7_sharp:2.3100e-02 L8_sharp:3.9775e-02 L9_sharp:5.6874e-02 L10_sharp:7.9264e-02 L11_sharp:1.1313e-01 L12_sharp:4.4569e-01 total_fnorm:1.4375e+01 total_l1_linf:1.9456e+04 total_spectral:7.1875e+00 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.2988e-02 L2_l1linf:6.2500e-02 L3_l1linf:6.0791e-02 L4_l1linf:6.1523e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9326e-02 L10_l1linf:6.0547e-02 L11_l1linf:6.1523e-02 L12_l1linf:6.0303e-02 L1_spectral:3.2275e-03 L2_spectral:3.2469e-03 L3_spectral:3.2261e-03 L4_spectral:3.2253e-03 L5_spectral:3.1978e-03 L6_spectral:3.2143e-03 L7_spectral:3.2017e-03 L8_spectral:3.1929e-03 L9_spectral:3.2061e-03 L10_spectral:3.2097e-03 L11_spectral:3.1965e-03 L12_spectral:3.1850e-03 train_time:236135ms step_avg:42.17ms +[2025-09-11 11:36:58] [Rank 0] step:5601/10000 train_time:237461ms step_avg:42.40ms +[2025-09-11 11:36:58] [Rank 0] step:5601/10000 train_time:237461ms step_avg:42.40ms +[2025-09-11 11:36:59] [Rank 0] step:5621/10000 train_time:238163ms step_avg:42.37ms +[2025-09-11 11:36:59] [Rank 0] step:5621/10000 train_time:238163ms step_avg:42.37ms +[2025-09-11 11:37:00] [Rank 0] step:5641/10000 train_time:238851ms step_avg:42.34ms +[2025-09-11 11:37:00] [Rank 0] step:5641/10000 train_time:238851ms step_avg:42.34ms +[2025-09-11 11:37:00] [Rank 0] step:5661/10000 train_time:239540ms step_avg:42.31ms +[2025-09-11 11:37:00] [Rank 0] step:5661/10000 train_time:239540ms step_avg:42.31ms +[2025-09-11 11:37:01] [Rank 0] step:5681/10000 train_time:240229ms step_avg:42.29ms +[2025-09-11 11:37:01] [Rank 0] step:5681/10000 train_time:240229ms step_avg:42.29ms +[2025-09-11 11:37:02] [Rank 0] step:5701/10000 train_time:240924ms step_avg:42.26ms +[2025-09-11 11:37:02] [Rank 0] step:5701/10000 train_time:240924ms step_avg:42.26ms +[2025-09-11 11:37:02] [Rank 0] step:5721/10000 train_time:241613ms step_avg:42.23ms +[2025-09-11 11:37:02] [Rank 0] step:5721/10000 train_time:241613ms step_avg:42.23ms +[2025-09-11 11:37:03] [Rank 0] step:5741/10000 train_time:242303ms step_avg:42.21ms +[2025-09-11 11:37:03] [Rank 0] step:5741/10000 train_time:242303ms step_avg:42.21ms +[2025-09-11 11:37:04] [Rank 0] step:5761/10000 train_time:242993ms step_avg:42.18ms +[2025-09-11 11:37:04] [Rank 0] step:5761/10000 train_time:242993ms step_avg:42.18ms +[2025-09-11 11:37:05] [Rank 0] step:5781/10000 train_time:243684ms step_avg:42.15ms +[2025-09-11 11:37:05] [Rank 0] step:5781/10000 train_time:243684ms step_avg:42.15ms +[2025-09-11 11:37:05] [Rank 0] step:5801/10000 train_time:244375ms step_avg:42.13ms +[2025-09-11 11:37:05] [Rank 0] step:5801/10000 train_time:244375ms step_avg:42.13ms +[2025-09-11 11:37:06] [Rank 0] step:5821/10000 train_time:245064ms step_avg:42.10ms +[2025-09-11 11:37:06] [Rank 0] step:5821/10000 train_time:245064ms step_avg:42.10ms +[2025-09-11 11:37:07] [Rank 0] step:5841/10000 train_time:245755ms step_avg:42.07ms +[2025-09-11 11:37:07] [Rank 0] step:5841/10000 train_time:245755ms step_avg:42.07ms +[2025-09-11 11:37:07] [Rank 0] step:5861/10000 train_time:246445ms step_avg:42.05ms +[2025-09-11 11:37:07] [Rank 0] step:5861/10000 train_time:246445ms step_avg:42.05ms +[2025-09-11 11:37:08] [Rank 0] step:5881/10000 train_time:247135ms step_avg:42.02ms +[2025-09-11 11:37:08] [Rank 0] step:5881/10000 train_time:247135ms step_avg:42.02ms +[2025-09-11 11:37:09] [Rank 0] step:5901/10000 train_time:247824ms step_avg:42.00ms +[2025-09-11 11:37:09] [Rank 0] step:5901/10000 train_time:247824ms step_avg:42.00ms +[2025-09-11 11:37:09] [Rank 0] step:5921/10000 train_time:248515ms step_avg:41.97ms +[2025-09-11 11:37:09] [Rank 0] step:5921/10000 train_time:248515ms step_avg:41.97ms +[2025-09-11 11:37:10] [Rank 0] step:5941/10000 train_time:249207ms step_avg:41.95ms +[2025-09-11 11:37:10] [Rank 0] step:5941/10000 train_time:249207ms step_avg:41.95ms +[2025-09-11 11:37:11] [Rank 0] step:5961/10000 train_time:249897ms step_avg:41.92ms +[2025-09-11 11:37:11] [Rank 0] step:5961/10000 train_time:249897ms step_avg:41.92ms +[2025-09-11 11:37:11] [Rank 0] step:5981/10000 train_time:250587ms step_avg:41.90ms +[2025-09-11 11:37:11] [Rank 0] step:5981/10000 train_time:250587ms step_avg:41.90ms +[2025-09-11 11:37:12] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:37:12] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:23] [Rank 0] PRINT: step:6000/10000 val_loss:4.6134 total_sharp:1.1555e-03 L1_sharp:5.8875e-03 L2_sharp:3.8127e-03 L3_sharp:3.4446e-03 L4_sharp:4.3824e-03 L5_sharp:9.1460e-03 L6_sharp:1.1475e-02 L7_sharp:1.5638e-02 L8_sharp:3.1950e-02 L9_sharp:4.6934e-02 L10_sharp:7.1762e-02 L11_sharp:1.1472e-01 L12_sharp:4.5844e-01 total_fnorm:1.5062e+01 total_l1_linf:1.9840e+04 total_spectral:7.5312e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4219e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.0547e-02 L4_l1linf:6.1768e-02 L5_l1linf:6.0303e-02 L6_l1linf:6.0791e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9326e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7861e-02 L11_l1linf:5.8838e-02 L12_l1linf:5.9326e-02 L1_spectral:3.2469e-03 L2_spectral:3.2125e-03 L3_spectral:3.2347e-03 L4_spectral:3.2060e-03 L5_spectral:3.2051e-03 L6_spectral:3.2138e-03 L7_spectral:3.2002e-03 L8_spectral:3.2094e-03 L9_spectral:3.2042e-03 L10_spectral:3.2100e-03 L11_spectral:3.2062e-03 L12_spectral:3.1941e-03 train_time:251261ms step_avg:41.88ms +[2025-09-11 11:37:23] [Rank 0] PRINT: step:6000/10000 val_loss:4.6134 total_sharp:1.1555e-03 L1_sharp:5.8875e-03 L2_sharp:3.8127e-03 L3_sharp:3.4446e-03 L4_sharp:4.3824e-03 L5_sharp:9.1460e-03 L6_sharp:1.1475e-02 L7_sharp:1.5638e-02 L8_sharp:3.1950e-02 L9_sharp:4.6934e-02 L10_sharp:7.1762e-02 L11_sharp:1.1472e-01 L12_sharp:4.5844e-01 total_fnorm:1.5062e+01 total_l1_linf:1.9840e+04 total_spectral:7.5312e+00 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4219e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.0547e-02 L4_l1linf:6.1768e-02 L5_l1linf:6.0303e-02 L6_l1linf:6.0791e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9326e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7861e-02 L11_l1linf:5.8838e-02 L12_l1linf:5.9326e-02 L1_spectral:3.2469e-03 L2_spectral:3.2125e-03 L3_spectral:3.2347e-03 L4_spectral:3.2060e-03 L5_spectral:3.2051e-03 L6_spectral:3.2138e-03 L7_spectral:3.2002e-03 L8_spectral:3.2094e-03 L9_spectral:3.2042e-03 L10_spectral:3.2100e-03 L11_spectral:3.2062e-03 L12_spectral:3.1941e-03 train_time:251261ms step_avg:41.88ms +[2025-09-11 11:37:25] [Rank 0] step:6001/10000 train_time:253485ms step_avg:42.24ms +[2025-09-11 11:37:25] [Rank 0] step:6001/10000 train_time:253485ms step_avg:42.24ms +[2025-09-11 11:37:26] [Rank 0] step:6021/10000 train_time:254209ms step_avg:42.22ms +[2025-09-11 11:37:26] [Rank 0] step:6021/10000 train_time:254209ms step_avg:42.22ms +[2025-09-11 11:37:27] [Rank 0] step:6041/10000 train_time:254902ms step_avg:42.20ms +[2025-09-11 11:37:27] [Rank 0] step:6041/10000 train_time:254902ms step_avg:42.20ms +[2025-09-11 11:37:27] [Rank 0] step:6061/10000 train_time:255592ms step_avg:42.17ms +[2025-09-11 11:37:27] [Rank 0] step:6061/10000 train_time:255592ms step_avg:42.17ms +[2025-09-11 11:37:28] [Rank 0] step:6081/10000 train_time:256284ms step_avg:42.15ms +[2025-09-11 11:37:28] [Rank 0] step:6081/10000 train_time:256284ms step_avg:42.15ms +[2025-09-11 11:37:29] [Rank 0] step:6101/10000 train_time:256974ms step_avg:42.12ms +[2025-09-11 11:37:29] [Rank 0] step:6101/10000 train_time:256974ms step_avg:42.12ms +[2025-09-11 11:37:29] [Rank 0] step:6121/10000 train_time:257665ms step_avg:42.10ms +[2025-09-11 11:37:29] [Rank 0] step:6121/10000 train_time:257665ms step_avg:42.10ms +[2025-09-11 11:37:30] [Rank 0] step:6141/10000 train_time:258357ms step_avg:42.07ms +[2025-09-11 11:37:30] [Rank 0] step:6141/10000 train_time:258357ms step_avg:42.07ms +[2025-09-11 11:37:31] [Rank 0] step:6161/10000 train_time:259047ms step_avg:42.05ms +[2025-09-11 11:37:31] [Rank 0] step:6161/10000 train_time:259047ms step_avg:42.05ms +[2025-09-11 11:37:32] [Rank 0] step:6181/10000 train_time:259735ms step_avg:42.02ms +[2025-09-11 11:37:32] [Rank 0] step:6181/10000 train_time:259735ms step_avg:42.02ms +[2025-09-11 11:37:32] [Rank 0] step:6201/10000 train_time:260427ms step_avg:42.00ms +[2025-09-11 11:37:32] [Rank 0] step:6201/10000 train_time:260427ms step_avg:42.00ms +[2025-09-11 11:37:33] [Rank 0] step:6221/10000 train_time:261119ms step_avg:41.97ms +[2025-09-11 11:37:33] [Rank 0] step:6221/10000 train_time:261119ms step_avg:41.97ms +[2025-09-11 11:37:34] [Rank 0] step:6241/10000 train_time:261809ms step_avg:41.95ms +[2025-09-11 11:37:34] [Rank 0] step:6241/10000 train_time:261809ms step_avg:41.95ms +[2025-09-11 11:37:34] [Rank 0] step:6261/10000 train_time:262498ms step_avg:41.93ms +[2025-09-11 11:37:34] [Rank 0] step:6261/10000 train_time:262498ms step_avg:41.93ms +[2025-09-11 11:37:35] [Rank 0] step:6281/10000 train_time:263189ms step_avg:41.90ms +[2025-09-11 11:37:35] [Rank 0] step:6281/10000 train_time:263189ms step_avg:41.90ms +[2025-09-11 11:37:36] [Rank 0] step:6301/10000 train_time:263877ms step_avg:41.88ms +[2025-09-11 11:37:36] [Rank 0] step:6301/10000 train_time:263877ms step_avg:41.88ms +[2025-09-11 11:37:36] [Rank 0] step:6321/10000 train_time:264571ms step_avg:41.86ms +[2025-09-11 11:37:36] [Rank 0] step:6321/10000 train_time:264571ms step_avg:41.86ms +[2025-09-11 11:37:37] [Rank 0] step:6341/10000 train_time:265263ms step_avg:41.83ms +[2025-09-11 11:37:37] [Rank 0] step:6341/10000 train_time:265263ms step_avg:41.83ms +[2025-09-11 11:37:38] [Rank 0] step:6361/10000 train_time:265954ms step_avg:41.81ms +[2025-09-11 11:37:38] [Rank 0] step:6361/10000 train_time:265954ms step_avg:41.81ms +[2025-09-11 11:37:38] [Rank 0] step:6381/10000 train_time:266645ms step_avg:41.79ms +[2025-09-11 11:37:38] [Rank 0] step:6381/10000 train_time:266645ms step_avg:41.79ms +[2025-09-11 11:37:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:37:39] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:37:50] [Rank 0] PRINT: step:6400/10000 val_loss:4.5815 total_sharp:1.7951e-03 L1_sharp:6.2209e-03 L2_sharp:4.4117e-03 L3_sharp:4.2195e-03 L4_sharp:3.0700e-03 L5_sharp:9.6886e-03 L6_sharp:9.1932e-03 L7_sharp:1.8505e-02 L8_sharp:3.1674e-02 L9_sharp:4.8842e-02 L10_sharp:7.0446e-02 L11_sharp:1.2709e-01 L12_sharp:1.3184e+00 total_fnorm:1.2688e+01 total_l1_linf:1.6128e+04 total_spectral:6.3438e+00 L1_fnorm:2.2070e-01 L2_fnorm:2.1973e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1582e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1582e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.4932e-02 L2_l1linf:5.3467e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.2246e-02 L5_l1linf:5.1758e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0537e-02 L8_l1linf:5.1514e-02 L9_l1linf:5.0049e-02 L10_l1linf:5.0293e-02 L11_l1linf:5.1270e-02 L12_l1linf:4.9316e-02 L1_spectral:2.9363e-03 L2_spectral:2.9226e-03 L3_spectral:2.9158e-03 L4_spectral:2.9098e-03 L5_spectral:2.8975e-03 L6_spectral:2.9003e-03 L7_spectral:2.9149e-03 L8_spectral:2.9183e-03 L9_spectral:2.9109e-03 L10_spectral:2.9332e-03 L11_spectral:2.9058e-03 L12_spectral:2.9006e-03 train_time:267316ms step_avg:41.77ms +[2025-09-11 11:37:50] [Rank 0] PRINT: step:6400/10000 val_loss:4.5815 total_sharp:1.7951e-03 L1_sharp:6.2209e-03 L2_sharp:4.4117e-03 L3_sharp:4.2195e-03 L4_sharp:3.0700e-03 L5_sharp:9.6886e-03 L6_sharp:9.1932e-03 L7_sharp:1.8505e-02 L8_sharp:3.1674e-02 L9_sharp:4.8842e-02 L10_sharp:7.0446e-02 L11_sharp:1.2709e-01 L12_sharp:1.3184e+00 total_fnorm:1.2688e+01 total_l1_linf:1.6128e+04 total_spectral:6.3438e+00 L1_fnorm:2.2070e-01 L2_fnorm:2.1973e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1777e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1582e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1582e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.4932e-02 L2_l1linf:5.3467e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.2246e-02 L5_l1linf:5.1758e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0537e-02 L8_l1linf:5.1514e-02 L9_l1linf:5.0049e-02 L10_l1linf:5.0293e-02 L11_l1linf:5.1270e-02 L12_l1linf:4.9316e-02 L1_spectral:2.9363e-03 L2_spectral:2.9226e-03 L3_spectral:2.9158e-03 L4_spectral:2.9098e-03 L5_spectral:2.8975e-03 L6_spectral:2.9003e-03 L7_spectral:2.9149e-03 L8_spectral:2.9183e-03 L9_spectral:2.9109e-03 L10_spectral:2.9332e-03 L11_spectral:2.9058e-03 L12_spectral:2.9006e-03 train_time:267316ms step_avg:41.77ms +[2025-09-11 11:37:52] [Rank 0] step:6401/10000 train_time:269535ms step_avg:42.11ms +[2025-09-11 11:37:52] [Rank 0] step:6401/10000 train_time:269535ms step_avg:42.11ms +[2025-09-11 11:37:53] [Rank 0] step:6421/10000 train_time:270240ms step_avg:42.09ms +[2025-09-11 11:37:53] [Rank 0] step:6421/10000 train_time:270240ms step_avg:42.09ms +[2025-09-11 11:37:54] [Rank 0] step:6441/10000 train_time:270931ms step_avg:42.06ms +[2025-09-11 11:37:54] [Rank 0] step:6441/10000 train_time:270931ms step_avg:42.06ms +[2025-09-11 11:37:54] [Rank 0] step:6461/10000 train_time:271624ms step_avg:42.04ms +[2025-09-11 11:37:54] [Rank 0] step:6461/10000 train_time:271624ms step_avg:42.04ms +[2025-09-11 11:37:55] [Rank 0] step:6481/10000 train_time:272317ms step_avg:42.02ms +[2025-09-11 11:37:55] [Rank 0] step:6481/10000 train_time:272317ms step_avg:42.02ms +[2025-09-11 11:37:56] [Rank 0] step:6501/10000 train_time:273010ms step_avg:42.00ms +[2025-09-11 11:37:56] [Rank 0] step:6501/10000 train_time:273010ms step_avg:42.00ms +[2025-09-11 11:37:57] [Rank 0] step:6521/10000 train_time:273702ms step_avg:41.97ms +[2025-09-11 11:37:57] [Rank 0] step:6521/10000 train_time:273702ms step_avg:41.97ms +[2025-09-11 11:37:57] [Rank 0] step:6541/10000 train_time:274391ms step_avg:41.95ms +[2025-09-11 11:37:57] [Rank 0] step:6541/10000 train_time:274391ms step_avg:41.95ms +[2025-09-11 11:37:58] [Rank 0] step:6561/10000 train_time:275083ms step_avg:41.93ms +[2025-09-11 11:37:58] [Rank 0] step:6561/10000 train_time:275083ms step_avg:41.93ms +[2025-09-11 11:37:59] [Rank 0] step:6581/10000 train_time:275775ms step_avg:41.90ms +[2025-09-11 11:37:59] [Rank 0] step:6581/10000 train_time:275775ms step_avg:41.90ms +[2025-09-11 11:37:59] [Rank 0] step:6601/10000 train_time:276467ms step_avg:41.88ms +[2025-09-11 11:37:59] [Rank 0] step:6601/10000 train_time:276467ms step_avg:41.88ms +[2025-09-11 11:38:00] [Rank 0] step:6621/10000 train_time:277157ms step_avg:41.86ms +[2025-09-11 11:38:00] [Rank 0] step:6621/10000 train_time:277157ms step_avg:41.86ms +[2025-09-11 11:38:01] [Rank 0] step:6641/10000 train_time:277850ms step_avg:41.84ms +[2025-09-11 11:38:01] [Rank 0] step:6641/10000 train_time:277850ms step_avg:41.84ms +[2025-09-11 11:38:01] [Rank 0] step:6661/10000 train_time:278543ms step_avg:41.82ms +[2025-09-11 11:38:01] [Rank 0] step:6661/10000 train_time:278543ms step_avg:41.82ms +[2025-09-11 11:38:02] [Rank 0] step:6681/10000 train_time:279242ms step_avg:41.80ms +[2025-09-11 11:38:02] [Rank 0] step:6681/10000 train_time:279242ms step_avg:41.80ms +[2025-09-11 11:38:03] [Rank 0] step:6701/10000 train_time:279941ms step_avg:41.78ms +[2025-09-11 11:38:03] [Rank 0] step:6701/10000 train_time:279941ms step_avg:41.78ms +[2025-09-11 11:38:03] [Rank 0] step:6721/10000 train_time:280640ms step_avg:41.76ms +[2025-09-11 11:38:03] [Rank 0] step:6721/10000 train_time:280640ms step_avg:41.76ms +[2025-09-11 11:38:04] [Rank 0] step:6741/10000 train_time:281338ms step_avg:41.74ms +[2025-09-11 11:38:04] [Rank 0] step:6741/10000 train_time:281338ms step_avg:41.74ms +[2025-09-11 11:38:05] [Rank 0] step:6761/10000 train_time:282035ms step_avg:41.71ms +[2025-09-11 11:38:05] [Rank 0] step:6761/10000 train_time:282035ms step_avg:41.71ms +[2025-09-11 11:38:06] [Rank 0] step:6781/10000 train_time:282733ms step_avg:41.69ms +[2025-09-11 11:38:06] [Rank 0] step:6781/10000 train_time:282733ms step_avg:41.69ms +[2025-09-11 11:38:06] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:38:06] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:38:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:38:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:38:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.5531 total_sharp:1.0071e-03 L1_sharp:7.7186e-03 L2_sharp:4.6079e-03 L3_sharp:6.2464e-03 L4_sharp:5.7798e-03 L5_sharp:1.0546e-02 L6_sharp:1.1621e-02 L7_sharp:1.8992e-02 L8_sharp:3.6267e-02 L9_sharp:5.2653e-02 L10_sharp:7.0126e-02 L11_sharp:1.0472e-01 L12_sharp:3.5276e-01 total_fnorm:1.1938e+01 total_l1_linf:1.4784e+04 total_spectral:5.9688e+00 L1_fnorm:1.9238e-01 L2_fnorm:1.9141e-01 L3_fnorm:1.9043e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8750e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8750e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.5166e-02 L3_l1linf:4.3945e-02 L4_l1linf:4.4434e-02 L5_l1linf:4.3213e-02 L6_l1linf:4.2969e-02 L7_l1linf:4.2480e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1992e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.1748e-02 L12_l1linf:4.2725e-02 L1_spectral:2.5930e-03 L2_spectral:2.5806e-03 L3_spectral:2.5869e-03 L4_spectral:2.5861e-03 L5_spectral:2.5990e-03 L6_spectral:2.6036e-03 L7_spectral:2.5951e-03 L8_spectral:2.5907e-03 L9_spectral:2.6186e-03 L10_spectral:2.5915e-03 L11_spectral:2.6016e-03 L12_spectral:2.5878e-03 train_time:283410ms step_avg:41.68ms +[2025-09-11 11:38:17] [Rank 0] PRINT: step:6800/10000 val_loss:4.5531 total_sharp:1.0071e-03 L1_sharp:7.7186e-03 L2_sharp:4.6079e-03 L3_sharp:6.2464e-03 L4_sharp:5.7798e-03 L5_sharp:1.0546e-02 L6_sharp:1.1621e-02 L7_sharp:1.8992e-02 L8_sharp:3.6267e-02 L9_sharp:5.2653e-02 L10_sharp:7.0126e-02 L11_sharp:1.0472e-01 L12_sharp:3.5276e-01 total_fnorm:1.1938e+01 total_l1_linf:1.4784e+04 total_spectral:5.9688e+00 L1_fnorm:1.9238e-01 L2_fnorm:1.9141e-01 L3_fnorm:1.9043e-01 L4_fnorm:1.9043e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8750e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8750e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.5166e-02 L3_l1linf:4.3945e-02 L4_l1linf:4.4434e-02 L5_l1linf:4.3213e-02 L6_l1linf:4.2969e-02 L7_l1linf:4.2480e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1992e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.1748e-02 L12_l1linf:4.2725e-02 L1_spectral:2.5930e-03 L2_spectral:2.5806e-03 L3_spectral:2.5869e-03 L4_spectral:2.5861e-03 L5_spectral:2.5990e-03 L6_spectral:2.6036e-03 L7_spectral:2.5951e-03 L8_spectral:2.5907e-03 L9_spectral:2.6186e-03 L10_spectral:2.5915e-03 L11_spectral:2.6016e-03 L12_spectral:2.5878e-03 train_time:283410ms step_avg:41.68ms +[2025-09-11 11:38:19] [Rank 0] step:6801/10000 train_time:285474ms step_avg:41.98ms +[2025-09-11 11:38:19] [Rank 0] step:6801/10000 train_time:285474ms step_avg:41.98ms +[2025-09-11 11:38:20] [Rank 0] step:6821/10000 train_time:286204ms step_avg:41.96ms +[2025-09-11 11:38:20] [Rank 0] step:6821/10000 train_time:286204ms step_avg:41.96ms +[2025-09-11 11:38:21] [Rank 0] step:6841/10000 train_time:286906ms step_avg:41.94ms +[2025-09-11 11:38:21] [Rank 0] step:6841/10000 train_time:286906ms step_avg:41.94ms +[2025-09-11 11:38:21] [Rank 0] step:6861/10000 train_time:287606ms step_avg:41.92ms +[2025-09-11 11:38:21] [Rank 0] step:6861/10000 train_time:287606ms step_avg:41.92ms +[2025-09-11 11:38:22] [Rank 0] step:6881/10000 train_time:288308ms step_avg:41.90ms +[2025-09-11 11:38:22] [Rank 0] step:6881/10000 train_time:288308ms step_avg:41.90ms +[2025-09-11 11:38:23] [Rank 0] step:6901/10000 train_time:289007ms step_avg:41.88ms +[2025-09-11 11:38:23] [Rank 0] step:6901/10000 train_time:289007ms step_avg:41.88ms +[2025-09-11 11:38:23] [Rank 0] step:6921/10000 train_time:289706ms step_avg:41.86ms +[2025-09-11 11:38:23] [Rank 0] step:6921/10000 train_time:289706ms step_avg:41.86ms +[2025-09-11 11:38:24] [Rank 0] step:6941/10000 train_time:290668ms step_avg:41.88ms +[2025-09-11 11:38:24] [Rank 0] step:6941/10000 train_time:290668ms step_avg:41.88ms +[2025-09-11 11:38:25] [Rank 0] step:6961/10000 train_time:291369ms step_avg:41.86ms +[2025-09-11 11:38:25] [Rank 0] step:6961/10000 train_time:291369ms step_avg:41.86ms +[2025-09-11 11:38:26] [Rank 0] step:6981/10000 train_time:292071ms step_avg:41.84ms +[2025-09-11 11:38:26] [Rank 0] step:6981/10000 train_time:292071ms step_avg:41.84ms +[2025-09-11 11:38:27] [Rank 0] step:7001/10000 train_time:293058ms step_avg:41.86ms +[2025-09-11 11:38:27] [Rank 0] step:7001/10000 train_time:293058ms step_avg:41.86ms +[2025-09-11 11:38:27] [Rank 0] step:7021/10000 train_time:293758ms step_avg:41.84ms +[2025-09-11 11:38:27] [Rank 0] step:7021/10000 train_time:293758ms step_avg:41.84ms +[2025-09-11 11:38:28] [Rank 0] step:7041/10000 train_time:294455ms step_avg:41.82ms +[2025-09-11 11:38:28] [Rank 0] step:7041/10000 train_time:294455ms step_avg:41.82ms +[2025-09-11 11:38:29] [Rank 0] step:7061/10000 train_time:295155ms step_avg:41.80ms +[2025-09-11 11:38:29] [Rank 0] step:7061/10000 train_time:295155ms step_avg:41.80ms +[2025-09-11 11:38:30] [Rank 0] step:7081/10000 train_time:295854ms step_avg:41.78ms +[2025-09-11 11:38:30] [Rank 0] step:7081/10000 train_time:295854ms step_avg:41.78ms +[2025-09-11 11:38:30] [Rank 0] step:7101/10000 train_time:296554ms step_avg:41.76ms +[2025-09-11 11:38:30] [Rank 0] step:7101/10000 train_time:296554ms step_avg:41.76ms +[2025-09-11 11:38:31] [Rank 0] step:7121/10000 train_time:297255ms step_avg:41.74ms +[2025-09-11 11:38:31] [Rank 0] step:7121/10000 train_time:297255ms step_avg:41.74ms +[2025-09-11 11:38:32] [Rank 0] step:7141/10000 train_time:297954ms step_avg:41.72ms +[2025-09-11 11:38:32] [Rank 0] step:7141/10000 train_time:297954ms step_avg:41.72ms +[2025-09-11 11:38:32] [Rank 0] step:7161/10000 train_time:298655ms step_avg:41.71ms +[2025-09-11 11:38:32] [Rank 0] step:7161/10000 train_time:298655ms step_avg:41.71ms +[2025-09-11 11:38:33] [Rank 0] step:7181/10000 train_time:299353ms step_avg:41.69ms +[2025-09-11 11:38:33] [Rank 0] step:7181/10000 train_time:299353ms step_avg:41.69ms +[2025-09-11 11:38:34] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:38:34] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:38:45] [Rank 0] PRINT: step:7200/10000 val_loss:4.5203 total_sharp:1.1182e-03 L1_sharp:6.2694e-03 L2_sharp:2.4308e-03 L3_sharp:5.9636e-03 L4_sharp:3.4268e-03 L5_sharp:7.2657e-03 L6_sharp:1.2309e-02 L7_sharp:1.7882e-02 L8_sharp:3.0203e-02 L9_sharp:4.6705e-02 L10_sharp:6.0504e-02 L11_sharp:9.4760e-02 L12_sharp:8.5298e-01 total_fnorm:1.0000e+01 total_l1_linf:1.1584e+04 total_spectral:5.0000e+00 L1_fnorm:1.6797e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6406e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6211e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.6113e-01 L1_l1linf:3.8330e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.4424e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3447e-02 L12_l1linf:3.4180e-02 L1_spectral:2.3184e-03 L2_spectral:2.3247e-03 L3_spectral:2.3163e-03 L4_spectral:2.3254e-03 L5_spectral:2.3221e-03 L6_spectral:2.3266e-03 L7_spectral:2.3116e-03 L8_spectral:2.2835e-03 L9_spectral:2.3303e-03 L10_spectral:2.2972e-03 L11_spectral:2.2730e-03 L12_spectral:2.2988e-03 train_time:300033ms step_avg:41.67ms +[2025-09-11 11:38:45] [Rank 0] PRINT: step:7200/10000 val_loss:4.5203 total_sharp:1.1182e-03 L1_sharp:6.2694e-03 L2_sharp:2.4308e-03 L3_sharp:5.9636e-03 L4_sharp:3.4268e-03 L5_sharp:7.2657e-03 L6_sharp:1.2309e-02 L7_sharp:1.7882e-02 L8_sharp:3.0203e-02 L9_sharp:4.6705e-02 L10_sharp:6.0504e-02 L11_sharp:9.4760e-02 L12_sharp:8.5298e-01 total_fnorm:1.0000e+01 total_l1_linf:1.1584e+04 total_spectral:5.0000e+00 L1_fnorm:1.6797e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6406e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6211e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.6113e-01 L1_l1linf:3.8330e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.4424e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.3691e-02 L11_l1linf:3.3447e-02 L12_l1linf:3.4180e-02 L1_spectral:2.3184e-03 L2_spectral:2.3247e-03 L3_spectral:2.3163e-03 L4_spectral:2.3254e-03 L5_spectral:2.3221e-03 L6_spectral:2.3266e-03 L7_spectral:2.3116e-03 L8_spectral:2.2835e-03 L9_spectral:2.3303e-03 L10_spectral:2.2972e-03 L11_spectral:2.2730e-03 L12_spectral:2.2988e-03 train_time:300033ms step_avg:41.67ms +[2025-09-11 11:38:47] [Rank 0] step:7201/10000 train_time:302227ms step_avg:41.97ms +[2025-09-11 11:38:47] [Rank 0] step:7201/10000 train_time:302227ms step_avg:41.97ms +[2025-09-11 11:38:48] [Rank 0] step:7221/10000 train_time:302969ms step_avg:41.96ms +[2025-09-11 11:38:48] [Rank 0] step:7221/10000 train_time:302969ms step_avg:41.96ms +[2025-09-11 11:38:48] [Rank 0] step:7241/10000 train_time:303669ms step_avg:41.94ms +[2025-09-11 11:38:48] [Rank 0] step:7241/10000 train_time:303669ms step_avg:41.94ms +[2025-09-11 11:38:49] [Rank 0] step:7261/10000 train_time:304371ms step_avg:41.92ms +[2025-09-11 11:38:49] [Rank 0] step:7261/10000 train_time:304371ms step_avg:41.92ms +[2025-09-11 11:38:50] [Rank 0] step:7281/10000 train_time:305077ms step_avg:41.90ms +[2025-09-11 11:38:50] [Rank 0] step:7281/10000 train_time:305077ms step_avg:41.90ms +[2025-09-11 11:38:50] [Rank 0] step:7301/10000 train_time:305776ms step_avg:41.88ms +[2025-09-11 11:38:50] [Rank 0] step:7301/10000 train_time:305776ms step_avg:41.88ms +[2025-09-11 11:38:51] [Rank 0] step:7321/10000 train_time:306476ms step_avg:41.86ms +[2025-09-11 11:38:51] [Rank 0] step:7321/10000 train_time:306476ms step_avg:41.86ms +[2025-09-11 11:38:52] [Rank 0] step:7341/10000 train_time:307176ms step_avg:41.84ms +[2025-09-11 11:38:52] [Rank 0] step:7341/10000 train_time:307176ms step_avg:41.84ms +[2025-09-11 11:38:53] [Rank 0] step:7361/10000 train_time:307876ms step_avg:41.83ms +[2025-09-11 11:38:53] [Rank 0] step:7361/10000 train_time:307876ms step_avg:41.83ms +[2025-09-11 11:38:53] [Rank 0] step:7381/10000 train_time:308576ms step_avg:41.81ms +[2025-09-11 11:38:53] [Rank 0] step:7381/10000 train_time:308576ms step_avg:41.81ms +[2025-09-11 11:38:54] [Rank 0] step:7401/10000 train_time:309275ms step_avg:41.79ms +[2025-09-11 11:38:54] [Rank 0] step:7401/10000 train_time:309275ms step_avg:41.79ms +[2025-09-11 11:38:55] [Rank 0] step:7421/10000 train_time:309976ms step_avg:41.77ms +[2025-09-11 11:38:55] [Rank 0] step:7421/10000 train_time:309976ms step_avg:41.77ms +[2025-09-11 11:38:55] [Rank 0] step:7441/10000 train_time:310678ms step_avg:41.75ms +[2025-09-11 11:38:55] [Rank 0] step:7441/10000 train_time:310678ms step_avg:41.75ms +[2025-09-11 11:38:56] [Rank 0] step:7461/10000 train_time:311378ms step_avg:41.73ms +[2025-09-11 11:38:56] [Rank 0] step:7461/10000 train_time:311378ms step_avg:41.73ms +[2025-09-11 11:38:57] [Rank 0] step:7481/10000 train_time:312079ms step_avg:41.72ms +[2025-09-11 11:38:57] [Rank 0] step:7481/10000 train_time:312079ms step_avg:41.72ms +[2025-09-11 11:38:57] [Rank 0] step:7501/10000 train_time:312780ms step_avg:41.70ms +[2025-09-11 11:38:57] [Rank 0] step:7501/10000 train_time:312780ms step_avg:41.70ms +[2025-09-11 11:38:58] [Rank 0] step:7521/10000 train_time:313482ms step_avg:41.68ms +[2025-09-11 11:38:58] [Rank 0] step:7521/10000 train_time:313482ms step_avg:41.68ms +[2025-09-11 11:38:59] [Rank 0] step:7541/10000 train_time:314180ms step_avg:41.66ms +[2025-09-11 11:38:59] [Rank 0] step:7541/10000 train_time:314180ms step_avg:41.66ms +[2025-09-11 11:39:00] [Rank 0] step:7561/10000 train_time:314882ms step_avg:41.65ms +[2025-09-11 11:39:00] [Rank 0] step:7561/10000 train_time:314882ms step_avg:41.65ms +[2025-09-11 11:39:00] [Rank 0] step:7581/10000 train_time:315584ms step_avg:41.63ms +[2025-09-11 11:39:00] [Rank 0] step:7581/10000 train_time:315584ms step_avg:41.63ms +[2025-09-11 11:39:01] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:39:01] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:39:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:39:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:39:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:39:16] [Rank 0] PRINT: step:7600/10000 val_loss:4.4928 total_sharp:9.6140e-04 L1_sharp:6.3581e-03 L2_sharp:3.6244e-03 L3_sharp:5.8260e-03 L4_sharp:4.0877e-03 L5_sharp:8.0482e-03 L6_sharp:8.4829e-03 L7_sharp:1.6701e-02 L8_sharp:3.2016e-02 L9_sharp:3.9847e-02 L10_sharp:5.8864e-02 L11_sharp:8.5084e-02 L12_sharp:3.4362e-01 total_fnorm:7.8750e+00 total_l1_linf:8.3200e+03 total_spectral:3.9531e+00 L1_fnorm:1.4062e-01 L2_fnorm:1.3867e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3867e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3574e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:3.0396e-02 L2_l1linf:3.0151e-02 L3_l1linf:2.8687e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8687e-02 L6_l1linf:2.8076e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6733e-02 L9_l1linf:2.6978e-02 L10_l1linf:2.6978e-02 L11_l1linf:2.6489e-02 L12_l1linf:2.8076e-02 L1_spectral:2.0304e-03 L2_spectral:2.0205e-03 L3_spectral:2.0335e-03 L4_spectral:2.0205e-03 L5_spectral:2.0169e-03 L6_spectral:2.0010e-03 L7_spectral:2.0230e-03 L8_spectral:1.9899e-03 L9_spectral:2.0034e-03 L10_spectral:1.9882e-03 L11_spectral:1.9553e-03 L12_spectral:1.9732e-03 train_time:316266ms step_avg:41.61ms +[2025-09-11 11:39:16] [Rank 0] PRINT: step:7600/10000 val_loss:4.4928 total_sharp:9.6140e-04 L1_sharp:6.3581e-03 L2_sharp:3.6244e-03 L3_sharp:5.8260e-03 L4_sharp:4.0877e-03 L5_sharp:8.0482e-03 L6_sharp:8.4829e-03 L7_sharp:1.6701e-02 L8_sharp:3.2016e-02 L9_sharp:3.9847e-02 L10_sharp:5.8864e-02 L11_sharp:8.5084e-02 L12_sharp:3.4362e-01 total_fnorm:7.8750e+00 total_l1_linf:8.3200e+03 total_spectral:3.9531e+00 L1_fnorm:1.4062e-01 L2_fnorm:1.3867e-01 L3_fnorm:1.3867e-01 L4_fnorm:1.3867e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3574e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:3.0396e-02 L2_l1linf:3.0151e-02 L3_l1linf:2.8687e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8687e-02 L6_l1linf:2.8076e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6733e-02 L9_l1linf:2.6978e-02 L10_l1linf:2.6978e-02 L11_l1linf:2.6489e-02 L12_l1linf:2.8076e-02 L1_spectral:2.0304e-03 L2_spectral:2.0205e-03 L3_spectral:2.0335e-03 L4_spectral:2.0205e-03 L5_spectral:2.0169e-03 L6_spectral:2.0010e-03 L7_spectral:2.0230e-03 L8_spectral:1.9899e-03 L9_spectral:2.0034e-03 L10_spectral:1.9882e-03 L11_spectral:1.9553e-03 L12_spectral:1.9732e-03 train_time:316266ms step_avg:41.61ms +[2025-09-11 11:39:17] [Rank 0] step:7601/10000 train_time:318140ms step_avg:41.86ms +[2025-09-11 11:39:17] [Rank 0] step:7601/10000 train_time:318140ms step_avg:41.86ms +[2025-09-11 11:39:18] [Rank 0] step:7621/10000 train_time:318857ms step_avg:41.84ms +[2025-09-11 11:39:18] [Rank 0] step:7621/10000 train_time:318857ms step_avg:41.84ms +[2025-09-11 11:39:19] [Rank 0] step:7641/10000 train_time:319560ms step_avg:41.82ms +[2025-09-11 11:39:19] [Rank 0] step:7641/10000 train_time:319560ms step_avg:41.82ms +[2025-09-11 11:39:20] [Rank 0] step:7661/10000 train_time:320261ms step_avg:41.80ms +[2025-09-11 11:39:20] [Rank 0] step:7661/10000 train_time:320261ms step_avg:41.80ms +[2025-09-11 11:39:20] [Rank 0] step:7681/10000 train_time:320962ms step_avg:41.79ms +[2025-09-11 11:39:20] [Rank 0] step:7681/10000 train_time:320962ms step_avg:41.79ms +[2025-09-11 11:39:21] [Rank 0] step:7701/10000 train_time:321664ms step_avg:41.77ms +[2025-09-11 11:39:21] [Rank 0] step:7701/10000 train_time:321664ms step_avg:41.77ms +[2025-09-11 11:39:22] [Rank 0] step:7721/10000 train_time:322366ms step_avg:41.75ms +[2025-09-11 11:39:22] [Rank 0] step:7721/10000 train_time:322366ms step_avg:41.75ms +[2025-09-11 11:39:22] [Rank 0] step:7741/10000 train_time:323068ms step_avg:41.73ms +[2025-09-11 11:39:22] [Rank 0] step:7741/10000 train_time:323068ms step_avg:41.73ms +[2025-09-11 11:39:23] [Rank 0] step:7761/10000 train_time:323769ms step_avg:41.72ms +[2025-09-11 11:39:23] [Rank 0] step:7761/10000 train_time:323769ms step_avg:41.72ms +[2025-09-11 11:39:24] [Rank 0] step:7781/10000 train_time:324472ms step_avg:41.70ms +[2025-09-11 11:39:24] [Rank 0] step:7781/10000 train_time:324472ms step_avg:41.70ms +[2025-09-11 11:39:24] [Rank 0] step:7801/10000 train_time:325172ms step_avg:41.68ms +[2025-09-11 11:39:24] [Rank 0] step:7801/10000 train_time:325172ms step_avg:41.68ms +[2025-09-11 11:39:25] [Rank 0] step:7821/10000 train_time:325874ms step_avg:41.67ms +[2025-09-11 11:39:25] [Rank 0] step:7821/10000 train_time:325874ms step_avg:41.67ms +[2025-09-11 11:39:26] [Rank 0] step:7841/10000 train_time:326577ms step_avg:41.65ms +[2025-09-11 11:39:26] [Rank 0] step:7841/10000 train_time:326577ms step_avg:41.65ms +[2025-09-11 11:39:27] [Rank 0] step:7861/10000 train_time:327281ms step_avg:41.63ms +[2025-09-11 11:39:27] [Rank 0] step:7861/10000 train_time:327281ms step_avg:41.63ms +[2025-09-11 11:39:28] [Rank 0] step:7881/10000 train_time:328248ms step_avg:41.65ms +[2025-09-11 11:39:28] [Rank 0] step:7881/10000 train_time:328248ms step_avg:41.65ms +[2025-09-11 11:39:28] [Rank 0] step:7901/10000 train_time:328949ms step_avg:41.63ms +[2025-09-11 11:39:28] [Rank 0] step:7901/10000 train_time:328949ms step_avg:41.63ms +[2025-09-11 11:39:29] [Rank 0] step:7921/10000 train_time:329650ms step_avg:41.62ms +[2025-09-11 11:39:29] [Rank 0] step:7921/10000 train_time:329650ms step_avg:41.62ms +[2025-09-11 11:39:30] [Rank 0] step:7941/10000 train_time:330464ms step_avg:41.61ms +[2025-09-11 11:39:30] [Rank 0] step:7941/10000 train_time:330464ms step_avg:41.61ms +[2025-09-11 11:39:31] [Rank 0] step:7961/10000 train_time:331320ms step_avg:41.62ms +[2025-09-11 11:39:31] [Rank 0] step:7961/10000 train_time:331320ms step_avg:41.62ms +[2025-09-11 11:39:31] [Rank 0] step:7981/10000 train_time:332024ms step_avg:41.60ms +[2025-09-11 11:39:31] [Rank 0] step:7981/10000 train_time:332024ms step_avg:41.60ms +[2025-09-11 11:39:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:39:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:39:42] [Rank 0] PRINT: step:8000/10000 val_loss:4.4786 total_sharp:9.6690e-04 L1_sharp:3.5878e-03 L2_sharp:2.9996e-03 L3_sharp:3.9252e-03 L4_sharp:2.4410e-03 L5_sharp:8.7489e-03 L6_sharp:1.1590e-02 L7_sharp:1.9518e-02 L8_sharp:2.8929e-02 L9_sharp:4.0671e-02 L10_sharp:5.7069e-02 L11_sharp:8.7127e-02 L12_sharp:4.0227e-01 total_fnorm:6.4688e+00 total_l1_linf:6.3360e+03 total_spectral:3.2344e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1377e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0986e-01 L12_fnorm:1.0840e-01 L1_l1linf:2.3682e-02 L2_l1linf:2.2461e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1851e-02 L5_l1linf:2.1484e-02 L6_l1linf:2.1484e-02 L7_l1linf:2.0752e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0142e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0264e-02 L12_l1linf:2.0264e-02 L1_spectral:1.7075e-03 L2_spectral:1.7169e-03 L3_spectral:1.7219e-03 L4_spectral:1.6989e-03 L5_spectral:1.6909e-03 L6_spectral:1.6896e-03 L7_spectral:1.6956e-03 L8_spectral:1.6619e-03 L9_spectral:1.6765e-03 L10_spectral:1.6484e-03 L11_spectral:1.6199e-03 L12_spectral:1.6657e-03 train_time:332703ms step_avg:41.59ms +[2025-09-11 11:39:42] [Rank 0] PRINT: step:8000/10000 val_loss:4.4786 total_sharp:9.6690e-04 L1_sharp:3.5878e-03 L2_sharp:2.9996e-03 L3_sharp:3.9252e-03 L4_sharp:2.4410e-03 L5_sharp:8.7489e-03 L6_sharp:1.1590e-02 L7_sharp:1.9518e-02 L8_sharp:2.8929e-02 L9_sharp:4.0671e-02 L10_sharp:5.7069e-02 L11_sharp:8.7127e-02 L12_sharp:4.0227e-01 total_fnorm:6.4688e+00 total_l1_linf:6.3360e+03 total_spectral:3.2344e+00 L1_fnorm:1.1523e-01 L2_fnorm:1.1377e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0986e-01 L12_fnorm:1.0840e-01 L1_l1linf:2.3682e-02 L2_l1linf:2.2461e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1851e-02 L5_l1linf:2.1484e-02 L6_l1linf:2.1484e-02 L7_l1linf:2.0752e-02 L8_l1linf:2.0508e-02 L9_l1linf:2.0142e-02 L10_l1linf:2.0142e-02 L11_l1linf:2.0264e-02 L12_l1linf:2.0264e-02 L1_spectral:1.7075e-03 L2_spectral:1.7169e-03 L3_spectral:1.7219e-03 L4_spectral:1.6989e-03 L5_spectral:1.6909e-03 L6_spectral:1.6896e-03 L7_spectral:1.6956e-03 L8_spectral:1.6619e-03 L9_spectral:1.6765e-03 L10_spectral:1.6484e-03 L11_spectral:1.6199e-03 L12_spectral:1.6657e-03 train_time:332703ms step_avg:41.59ms +[2025-09-11 11:39:44] [Rank 0] step:8001/10000 train_time:334085ms step_avg:41.76ms +[2025-09-11 11:39:44] [Rank 0] step:8001/10000 train_time:334085ms step_avg:41.76ms +[2025-09-11 11:39:44] [Rank 0] step:8021/10000 train_time:334814ms step_avg:41.74ms +[2025-09-11 11:39:44] [Rank 0] step:8021/10000 train_time:334814ms step_avg:41.74ms +[2025-09-11 11:39:45] [Rank 0] step:8041/10000 train_time:335515ms step_avg:41.73ms +[2025-09-11 11:39:45] [Rank 0] step:8041/10000 train_time:335515ms step_avg:41.73ms +[2025-09-11 11:39:46] [Rank 0] step:8061/10000 train_time:336219ms step_avg:41.71ms +[2025-09-11 11:39:46] [Rank 0] step:8061/10000 train_time:336219ms step_avg:41.71ms +[2025-09-11 11:39:46] [Rank 0] step:8081/10000 train_time:336918ms step_avg:41.69ms +[2025-09-11 11:39:46] [Rank 0] step:8081/10000 train_time:336918ms step_avg:41.69ms +[2025-09-11 11:39:47] [Rank 0] step:8101/10000 train_time:337618ms step_avg:41.68ms +[2025-09-11 11:39:47] [Rank 0] step:8101/10000 train_time:337618ms step_avg:41.68ms +[2025-09-11 11:39:48] [Rank 0] step:8121/10000 train_time:338324ms step_avg:41.66ms +[2025-09-11 11:39:48] [Rank 0] step:8121/10000 train_time:338324ms step_avg:41.66ms +[2025-09-11 11:39:49] [Rank 0] step:8141/10000 train_time:339750ms step_avg:41.73ms +[2025-09-11 11:39:49] [Rank 0] step:8141/10000 train_time:339750ms step_avg:41.73ms +[2025-09-11 11:39:50] [Rank 0] step:8161/10000 train_time:340455ms step_avg:41.72ms +[2025-09-11 11:39:50] [Rank 0] step:8161/10000 train_time:340455ms step_avg:41.72ms +[2025-09-11 11:39:51] [Rank 0] step:8181/10000 train_time:341168ms step_avg:41.70ms +[2025-09-11 11:39:51] [Rank 0] step:8181/10000 train_time:341168ms step_avg:41.70ms +[2025-09-11 11:39:51] [Rank 0] step:8201/10000 train_time:341877ms step_avg:41.69ms +[2025-09-11 11:39:51] [Rank 0] step:8201/10000 train_time:341877ms step_avg:41.69ms +[2025-09-11 11:39:52] [Rank 0] step:8221/10000 train_time:342585ms step_avg:41.67ms +[2025-09-11 11:39:52] [Rank 0] step:8221/10000 train_time:342585ms step_avg:41.67ms +[2025-09-11 11:39:53] [Rank 0] step:8241/10000 train_time:343300ms step_avg:41.66ms +[2025-09-11 11:39:53] [Rank 0] step:8241/10000 train_time:343300ms step_avg:41.66ms +[2025-09-11 11:39:53] [Rank 0] step:8261/10000 train_time:344006ms step_avg:41.64ms +[2025-09-11 11:39:53] [Rank 0] step:8261/10000 train_time:344006ms step_avg:41.64ms +[2025-09-11 11:39:54] [Rank 0] step:8281/10000 train_time:344710ms step_avg:41.63ms +[2025-09-11 11:39:54] [Rank 0] step:8281/10000 train_time:344710ms step_avg:41.63ms +[2025-09-11 11:39:55] [Rank 0] step:8301/10000 train_time:345419ms step_avg:41.61ms +[2025-09-11 11:39:55] [Rank 0] step:8301/10000 train_time:345419ms step_avg:41.61ms +[2025-09-11 11:39:56] [Rank 0] step:8321/10000 train_time:346125ms step_avg:41.60ms +[2025-09-11 11:39:56] [Rank 0] step:8321/10000 train_time:346125ms step_avg:41.60ms +[2025-09-11 11:39:56] [Rank 0] step:8341/10000 train_time:346838ms step_avg:41.58ms +[2025-09-11 11:39:56] [Rank 0] step:8341/10000 train_time:346838ms step_avg:41.58ms +[2025-09-11 11:39:57] [Rank 0] step:8361/10000 train_time:347541ms step_avg:41.57ms +[2025-09-11 11:39:57] [Rank 0] step:8361/10000 train_time:347541ms step_avg:41.57ms +[2025-09-11 11:39:58] [Rank 0] step:8381/10000 train_time:348250ms step_avg:41.55ms +[2025-09-11 11:39:58] [Rank 0] step:8381/10000 train_time:348250ms step_avg:41.55ms +[2025-09-11 11:39:58] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:39:58] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:09] [Rank 0] PRINT: step:8400/10000 val_loss:4.4625 total_sharp:9.3852e-04 L1_sharp:6.0464e-03 L2_sharp:2.6859e-03 L3_sharp:4.6343e-03 L4_sharp:4.3273e-03 L5_sharp:6.4579e-03 L6_sharp:9.8304e-03 L7_sharp:1.5265e-02 L8_sharp:2.6370e-02 L9_sharp:3.4494e-02 L10_sharp:4.8593e-02 L11_sharp:7.7128e-02 L12_sharp:3.4277e-01 total_fnorm:4.5938e+00 total_l1_linf:4.0640e+03 total_spectral:2.2969e+00 L1_fnorm:9.0332e-02 L2_fnorm:8.8867e-02 L3_fnorm:8.8379e-02 L4_fnorm:8.7891e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.7402e-02 L7_fnorm:8.6914e-02 L8_fnorm:8.6426e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5449e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.7090e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.4954e-02 L8_l1linf:1.4526e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.4404e-02 L12_l1linf:1.5381e-02 L1_spectral:1.3937e-03 L2_spectral:1.3931e-03 L3_spectral:1.3808e-03 L4_spectral:1.3759e-03 L5_spectral:1.3697e-03 L6_spectral:1.3716e-03 L7_spectral:1.3676e-03 L8_spectral:1.3323e-03 L9_spectral:1.3565e-03 L10_spectral:1.3243e-03 L11_spectral:1.2982e-03 L12_spectral:1.3405e-03 train_time:348940ms step_avg:41.54ms +[2025-09-11 11:40:09] [Rank 0] PRINT: step:8400/10000 val_loss:4.4625 total_sharp:9.3852e-04 L1_sharp:6.0464e-03 L2_sharp:2.6859e-03 L3_sharp:4.6343e-03 L4_sharp:4.3273e-03 L5_sharp:6.4579e-03 L6_sharp:9.8304e-03 L7_sharp:1.5265e-02 L8_sharp:2.6370e-02 L9_sharp:3.4494e-02 L10_sharp:4.8593e-02 L11_sharp:7.7128e-02 L12_sharp:3.4277e-01 total_fnorm:4.5938e+00 total_l1_linf:4.0640e+03 total_spectral:2.2969e+00 L1_fnorm:9.0332e-02 L2_fnorm:8.8867e-02 L3_fnorm:8.8379e-02 L4_fnorm:8.7891e-02 L5_fnorm:8.7891e-02 L6_fnorm:8.7402e-02 L7_fnorm:8.6914e-02 L8_fnorm:8.6426e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.5449e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.7090e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.4954e-02 L8_l1linf:1.4526e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.4404e-02 L12_l1linf:1.5381e-02 L1_spectral:1.3937e-03 L2_spectral:1.3931e-03 L3_spectral:1.3808e-03 L4_spectral:1.3759e-03 L5_spectral:1.3697e-03 L6_spectral:1.3716e-03 L7_spectral:1.3676e-03 L8_spectral:1.3323e-03 L9_spectral:1.3565e-03 L10_spectral:1.3243e-03 L11_spectral:1.2982e-03 L12_spectral:1.3405e-03 train_time:348940ms step_avg:41.54ms +[2025-09-11 11:40:10] [Rank 0] step:8401/10000 train_time:350448ms step_avg:41.71ms +[2025-09-11 11:40:10] [Rank 0] step:8401/10000 train_time:350448ms step_avg:41.71ms +[2025-09-11 11:40:11] [Rank 0] step:8421/10000 train_time:351453ms step_avg:41.74ms +[2025-09-11 11:40:11] [Rank 0] step:8421/10000 train_time:351453ms step_avg:41.74ms +[2025-09-11 11:40:12] [Rank 0] step:8441/10000 train_time:352162ms step_avg:41.72ms +[2025-09-11 11:40:12] [Rank 0] step:8441/10000 train_time:352162ms step_avg:41.72ms +[2025-09-11 11:40:13] [Rank 0] step:8461/10000 train_time:352870ms step_avg:41.71ms +[2025-09-11 11:40:13] [Rank 0] step:8461/10000 train_time:352870ms step_avg:41.71ms +[2025-09-11 11:40:13] [Rank 0] step:8481/10000 train_time:353580ms step_avg:41.69ms +[2025-09-11 11:40:13] [Rank 0] step:8481/10000 train_time:353580ms step_avg:41.69ms +[2025-09-11 11:40:14] [Rank 0] step:8501/10000 train_time:354287ms step_avg:41.68ms +[2025-09-11 11:40:14] [Rank 0] step:8501/10000 train_time:354287ms step_avg:41.68ms +[2025-09-11 11:40:15] [Rank 0] step:8521/10000 train_time:354995ms step_avg:41.66ms +[2025-09-11 11:40:15] [Rank 0] step:8521/10000 train_time:354995ms step_avg:41.66ms +[2025-09-11 11:40:15] [Rank 0] step:8541/10000 train_time:355702ms step_avg:41.65ms +[2025-09-11 11:40:15] [Rank 0] step:8541/10000 train_time:355702ms step_avg:41.65ms +[2025-09-11 11:40:16] [Rank 0] step:8561/10000 train_time:356414ms step_avg:41.63ms +[2025-09-11 11:40:16] [Rank 0] step:8561/10000 train_time:356414ms step_avg:41.63ms +[2025-09-11 11:40:17] [Rank 0] step:8581/10000 train_time:357126ms step_avg:41.62ms +[2025-09-11 11:40:17] [Rank 0] step:8581/10000 train_time:357126ms step_avg:41.62ms +[2025-09-11 11:40:17] [Rank 0] step:8601/10000 train_time:357835ms step_avg:41.60ms +[2025-09-11 11:40:17] [Rank 0] step:8601/10000 train_time:357835ms step_avg:41.60ms +[2025-09-11 11:40:18] [Rank 0] step:8621/10000 train_time:358541ms step_avg:41.59ms +[2025-09-11 11:40:18] [Rank 0] step:8621/10000 train_time:358541ms step_avg:41.59ms +[2025-09-11 11:40:19] [Rank 0] step:8641/10000 train_time:359249ms step_avg:41.57ms +[2025-09-11 11:40:19] [Rank 0] step:8641/10000 train_time:359249ms step_avg:41.57ms +[2025-09-11 11:40:20] [Rank 0] step:8661/10000 train_time:359957ms step_avg:41.56ms +[2025-09-11 11:40:20] [Rank 0] step:8661/10000 train_time:359957ms step_avg:41.56ms +[2025-09-11 11:40:20] [Rank 0] step:8681/10000 train_time:360667ms step_avg:41.55ms +[2025-09-11 11:40:20] [Rank 0] step:8681/10000 train_time:360667ms step_avg:41.55ms +[2025-09-11 11:40:21] [Rank 0] step:8701/10000 train_time:361374ms step_avg:41.53ms +[2025-09-11 11:40:21] [Rank 0] step:8701/10000 train_time:361374ms step_avg:41.53ms +[2025-09-11 11:40:22] [Rank 0] step:8721/10000 train_time:362084ms step_avg:41.52ms +[2025-09-11 11:40:22] [Rank 0] step:8721/10000 train_time:362084ms step_avg:41.52ms +[2025-09-11 11:40:22] [Rank 0] step:8741/10000 train_time:362787ms step_avg:41.50ms +[2025-09-11 11:40:22] [Rank 0] step:8741/10000 train_time:362787ms step_avg:41.50ms +[2025-09-11 11:40:23] [Rank 0] step:8761/10000 train_time:363497ms step_avg:41.49ms +[2025-09-11 11:40:23] [Rank 0] step:8761/10000 train_time:363497ms step_avg:41.49ms +[2025-09-11 11:40:24] [Rank 0] step:8781/10000 train_time:364202ms step_avg:41.48ms +[2025-09-11 11:40:24] [Rank 0] step:8781/10000 train_time:364202ms step_avg:41.48ms +[2025-09-11 11:40:25] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:40:25] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:40:35] [Rank 0] PRINT: step:8800/10000 val_loss:4.4550 total_sharp:8.8433e-04 L1_sharp:5.6434e-03 L2_sharp:2.9035e-03 L3_sharp:2.6290e-03 L4_sharp:2.3175e-03 L5_sharp:6.7563e-03 L6_sharp:8.3938e-03 L7_sharp:1.1074e-02 L8_sharp:2.3181e-02 L9_sharp:3.0967e-02 L10_sharp:4.3150e-02 L11_sharp:6.6670e-02 L12_sharp:4.2829e-01 total_fnorm:3.3125e+00 total_l1_linf:2.6080e+03 total_spectral:1.6562e+00 L1_fnorm:6.5430e-02 L2_fnorm:6.3965e-02 L3_fnorm:6.3477e-02 L4_fnorm:6.3477e-02 L5_fnorm:6.2988e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.2256e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.1279e-02 L12_fnorm:6.0547e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.0925e-02 L3_l1linf:1.0193e-02 L4_l1linf:1.0437e-02 L5_l1linf:1.0315e-02 L6_l1linf:1.0071e-02 L7_l1linf:9.7656e-03 L8_l1linf:9.7656e-03 L9_l1linf:9.4604e-03 L10_l1linf:9.3384e-03 L11_l1linf:9.3384e-03 L12_l1linf:1.0071e-02 L1_spectral:1.0484e-03 L2_spectral:1.0322e-03 L3_spectral:1.0212e-03 L4_spectral:1.0219e-03 L5_spectral:1.0133e-03 L6_spectral:1.0228e-03 L7_spectral:1.0090e-03 L8_spectral:9.8824e-04 L9_spectral:9.9722e-04 L10_spectral:9.7841e-04 L11_spectral:9.6069e-04 L12_spectral:9.8386e-04 train_time:364887ms step_avg:41.46ms +[2025-09-11 11:40:35] [Rank 0] PRINT: step:8800/10000 val_loss:4.4550 total_sharp:8.8433e-04 L1_sharp:5.6434e-03 L2_sharp:2.9035e-03 L3_sharp:2.6290e-03 L4_sharp:2.3175e-03 L5_sharp:6.7563e-03 L6_sharp:8.3938e-03 L7_sharp:1.1074e-02 L8_sharp:2.3181e-02 L9_sharp:3.0967e-02 L10_sharp:4.3150e-02 L11_sharp:6.6670e-02 L12_sharp:4.2829e-01 total_fnorm:3.3125e+00 total_l1_linf:2.6080e+03 total_spectral:1.6562e+00 L1_fnorm:6.5430e-02 L2_fnorm:6.3965e-02 L3_fnorm:6.3477e-02 L4_fnorm:6.3477e-02 L5_fnorm:6.2988e-02 L6_fnorm:6.2988e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.2256e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.1279e-02 L12_fnorm:6.0547e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.0925e-02 L3_l1linf:1.0193e-02 L4_l1linf:1.0437e-02 L5_l1linf:1.0315e-02 L6_l1linf:1.0071e-02 L7_l1linf:9.7656e-03 L8_l1linf:9.7656e-03 L9_l1linf:9.4604e-03 L10_l1linf:9.3384e-03 L11_l1linf:9.3384e-03 L12_l1linf:1.0071e-02 L1_spectral:1.0484e-03 L2_spectral:1.0322e-03 L3_spectral:1.0212e-03 L4_spectral:1.0219e-03 L5_spectral:1.0133e-03 L6_spectral:1.0228e-03 L7_spectral:1.0090e-03 L8_spectral:9.8824e-04 L9_spectral:9.9722e-04 L10_spectral:9.7841e-04 L11_spectral:9.6069e-04 L12_spectral:9.8386e-04 train_time:364887ms step_avg:41.46ms +[2025-09-11 11:40:36] [Rank 0] step:8801/10000 train_time:366224ms step_avg:41.61ms +[2025-09-11 11:40:36] [Rank 0] step:8801/10000 train_time:366224ms step_avg:41.61ms +[2025-09-11 11:40:37] [Rank 0] step:8821/10000 train_time:366973ms step_avg:41.60ms +[2025-09-11 11:40:37] [Rank 0] step:8821/10000 train_time:366973ms step_avg:41.60ms +[2025-09-11 11:40:37] [Rank 0] step:8841/10000 train_time:367682ms step_avg:41.59ms +[2025-09-11 11:40:37] [Rank 0] step:8841/10000 train_time:367682ms step_avg:41.59ms +[2025-09-11 11:40:38] [Rank 0] step:8861/10000 train_time:368390ms step_avg:41.57ms +[2025-09-11 11:40:38] [Rank 0] step:8861/10000 train_time:368390ms step_avg:41.57ms +[2025-09-11 11:40:39] [Rank 0] step:8881/10000 train_time:369099ms step_avg:41.56ms +[2025-09-11 11:40:39] [Rank 0] step:8881/10000 train_time:369099ms step_avg:41.56ms +[2025-09-11 11:40:39] [Rank 0] step:8901/10000 train_time:369812ms step_avg:41.55ms +[2025-09-11 11:40:39] [Rank 0] step:8901/10000 train_time:369812ms step_avg:41.55ms +[2025-09-11 11:40:40] [Rank 0] step:8921/10000 train_time:370517ms step_avg:41.53ms +[2025-09-11 11:40:40] [Rank 0] step:8921/10000 train_time:370517ms step_avg:41.53ms +[2025-09-11 11:40:41] [Rank 0] step:8941/10000 train_time:371229ms step_avg:41.52ms +[2025-09-11 11:40:41] [Rank 0] step:8941/10000 train_time:371229ms step_avg:41.52ms +[2025-09-11 11:40:42] [Rank 0] step:8961/10000 train_time:371946ms step_avg:41.51ms +[2025-09-11 11:40:42] [Rank 0] step:8961/10000 train_time:371946ms step_avg:41.51ms +[2025-09-11 11:40:42] [Rank 0] step:8981/10000 train_time:372658ms step_avg:41.49ms +[2025-09-11 11:40:42] [Rank 0] step:8981/10000 train_time:372658ms step_avg:41.49ms +[2025-09-11 11:40:43] [Rank 0] step:9001/10000 train_time:373362ms step_avg:41.48ms +[2025-09-11 11:40:43] [Rank 0] step:9001/10000 train_time:373362ms step_avg:41.48ms +[2025-09-11 11:40:44] [Rank 0] step:9021/10000 train_time:374071ms step_avg:41.47ms +[2025-09-11 11:40:44] [Rank 0] step:9021/10000 train_time:374071ms step_avg:41.47ms +[2025-09-11 11:40:44] [Rank 0] step:9041/10000 train_time:374782ms step_avg:41.45ms +[2025-09-11 11:40:44] [Rank 0] step:9041/10000 train_time:374782ms step_avg:41.45ms +[2025-09-11 11:40:45] [Rank 0] step:9061/10000 train_time:375489ms step_avg:41.44ms +[2025-09-11 11:40:45] [Rank 0] step:9061/10000 train_time:375489ms step_avg:41.44ms +[2025-09-11 11:40:46] [Rank 0] step:9081/10000 train_time:376200ms step_avg:41.43ms +[2025-09-11 11:40:46] [Rank 0] step:9081/10000 train_time:376200ms step_avg:41.43ms +[2025-09-11 11:40:47] [Rank 0] step:9101/10000 train_time:376913ms step_avg:41.41ms +[2025-09-11 11:40:47] [Rank 0] step:9101/10000 train_time:376913ms step_avg:41.41ms +[2025-09-11 11:40:47] [Rank 0] step:9121/10000 train_time:377627ms step_avg:41.40ms +[2025-09-11 11:40:47] [Rank 0] step:9121/10000 train_time:377627ms step_avg:41.40ms +[2025-09-11 11:40:48] [Rank 0] step:9141/10000 train_time:378333ms step_avg:41.39ms +[2025-09-11 11:40:48] [Rank 0] step:9141/10000 train_time:378333ms step_avg:41.39ms +[2025-09-11 11:40:49] [Rank 0] step:9161/10000 train_time:379046ms step_avg:41.38ms +[2025-09-11 11:40:49] [Rank 0] step:9161/10000 train_time:379046ms step_avg:41.38ms +[2025-09-11 11:40:49] [Rank 0] step:9181/10000 train_time:379763ms step_avg:41.36ms +[2025-09-11 11:40:49] [Rank 0] step:9181/10000 train_time:379763ms step_avg:41.36ms +[2025-09-11 11:40:50] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:40:50] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:00] [Rank 0] PRINT: step:9200/10000 val_loss:4.4433 total_sharp:8.5316e-04 L1_sharp:3.9621e-03 L2_sharp:2.0737e-03 L3_sharp:3.4455e-03 L4_sharp:2.6016e-03 L5_sharp:4.6104e-03 L6_sharp:7.5877e-03 L7_sharp:1.1570e-02 L8_sharp:1.9916e-02 L9_sharp:2.9327e-02 L10_sharp:3.6620e-02 L11_sharp:5.8931e-02 L12_sharp:5.3684e-01 total_fnorm:2.1875e+00 total_l1_linf:1.4640e+03 total_spectral:1.1016e+00 L1_fnorm:4.3457e-02 L2_fnorm:4.2480e-02 L3_fnorm:4.2236e-02 L4_fnorm:4.2236e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.1260e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0283e-02 L1_l1linf:6.4697e-03 L2_l1linf:6.2561e-03 L3_l1linf:5.9814e-03 L4_l1linf:6.1035e-03 L5_l1linf:5.9814e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.7068e-03 L8_l1linf:5.6763e-03 L9_l1linf:5.5237e-03 L10_l1linf:5.6152e-03 L11_l1linf:5.6152e-03 L12_l1linf:5.9509e-03 L1_spectral:7.2170e-04 L2_spectral:7.0986e-04 L3_spectral:7.0230e-04 L4_spectral:6.9637e-04 L5_spectral:6.9753e-04 L6_spectral:6.9613e-04 L7_spectral:6.9805e-04 L8_spectral:6.7155e-04 L9_spectral:6.8614e-04 L10_spectral:6.6505e-04 L11_spectral:6.4991e-04 L12_spectral:6.7580e-04 train_time:380457ms step_avg:41.35ms +[2025-09-11 11:41:00] [Rank 0] PRINT: step:9200/10000 val_loss:4.4433 total_sharp:8.5316e-04 L1_sharp:3.9621e-03 L2_sharp:2.0737e-03 L3_sharp:3.4455e-03 L4_sharp:2.6016e-03 L5_sharp:4.6104e-03 L6_sharp:7.5877e-03 L7_sharp:1.1570e-02 L8_sharp:1.9916e-02 L9_sharp:2.9327e-02 L10_sharp:3.6620e-02 L11_sharp:5.8931e-02 L12_sharp:5.3684e-01 total_fnorm:2.1875e+00 total_l1_linf:1.4640e+03 total_spectral:1.1016e+00 L1_fnorm:4.3457e-02 L2_fnorm:4.2480e-02 L3_fnorm:4.2236e-02 L4_fnorm:4.2236e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.1260e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0283e-02 L1_l1linf:6.4697e-03 L2_l1linf:6.2561e-03 L3_l1linf:5.9814e-03 L4_l1linf:6.1035e-03 L5_l1linf:5.9814e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.7068e-03 L8_l1linf:5.6763e-03 L9_l1linf:5.5237e-03 L10_l1linf:5.6152e-03 L11_l1linf:5.6152e-03 L12_l1linf:5.9509e-03 L1_spectral:7.2170e-04 L2_spectral:7.0986e-04 L3_spectral:7.0230e-04 L4_spectral:6.9637e-04 L5_spectral:6.9753e-04 L6_spectral:6.9613e-04 L7_spectral:6.9805e-04 L8_spectral:6.7155e-04 L9_spectral:6.8614e-04 L10_spectral:6.6505e-04 L11_spectral:6.4991e-04 L12_spectral:6.7580e-04 train_time:380457ms step_avg:41.35ms +[2025-09-11 11:41:02] [Rank 0] step:9201/10000 train_time:381822ms step_avg:41.50ms +[2025-09-11 11:41:02] [Rank 0] step:9201/10000 train_time:381822ms step_avg:41.50ms +[2025-09-11 11:41:02] [Rank 0] step:9221/10000 train_time:382559ms step_avg:41.49ms +[2025-09-11 11:41:02] [Rank 0] step:9221/10000 train_time:382559ms step_avg:41.49ms +[2025-09-11 11:41:03] [Rank 0] step:9241/10000 train_time:383267ms step_avg:41.47ms +[2025-09-11 11:41:03] [Rank 0] step:9241/10000 train_time:383267ms step_avg:41.47ms +[2025-09-11 11:41:04] [Rank 0] step:9261/10000 train_time:383978ms step_avg:41.46ms +[2025-09-11 11:41:04] [Rank 0] step:9261/10000 train_time:383978ms step_avg:41.46ms +[2025-09-11 11:41:05] [Rank 0] step:9281/10000 train_time:384689ms step_avg:41.45ms +[2025-09-11 11:41:05] [Rank 0] step:9281/10000 train_time:384689ms step_avg:41.45ms +[2025-09-11 11:41:05] [Rank 0] step:9301/10000 train_time:385395ms step_avg:41.44ms +[2025-09-11 11:41:05] [Rank 0] step:9301/10000 train_time:385395ms step_avg:41.44ms +[2025-09-11 11:41:06] [Rank 0] step:9321/10000 train_time:386107ms step_avg:41.42ms +[2025-09-11 11:41:06] [Rank 0] step:9321/10000 train_time:386107ms step_avg:41.42ms +[2025-09-11 11:41:07] [Rank 0] step:9341/10000 train_time:386814ms step_avg:41.41ms +[2025-09-11 11:41:07] [Rank 0] step:9341/10000 train_time:386814ms step_avg:41.41ms +[2025-09-11 11:41:07] [Rank 0] step:9361/10000 train_time:387519ms step_avg:41.40ms +[2025-09-11 11:41:07] [Rank 0] step:9361/10000 train_time:387519ms step_avg:41.40ms +[2025-09-11 11:41:08] [Rank 0] step:9381/10000 train_time:388227ms step_avg:41.38ms +[2025-09-11 11:41:08] [Rank 0] step:9381/10000 train_time:388227ms step_avg:41.38ms +[2025-09-11 11:41:09] [Rank 0] step:9401/10000 train_time:388939ms step_avg:41.37ms +[2025-09-11 11:41:09] [Rank 0] step:9401/10000 train_time:388939ms step_avg:41.37ms +[2025-09-11 11:41:10] [Rank 0] step:9421/10000 train_time:389650ms step_avg:41.36ms +[2025-09-11 11:41:10] [Rank 0] step:9421/10000 train_time:389650ms step_avg:41.36ms +[2025-09-11 11:41:10] [Rank 0] step:9441/10000 train_time:390363ms step_avg:41.35ms +[2025-09-11 11:41:10] [Rank 0] step:9441/10000 train_time:390363ms step_avg:41.35ms +[2025-09-11 11:41:11] [Rank 0] step:9461/10000 train_time:391072ms step_avg:41.34ms +[2025-09-11 11:41:11] [Rank 0] step:9461/10000 train_time:391072ms step_avg:41.34ms +[2025-09-11 11:41:12] [Rank 0] step:9481/10000 train_time:391782ms step_avg:41.32ms +[2025-09-11 11:41:12] [Rank 0] step:9481/10000 train_time:391782ms step_avg:41.32ms +[2025-09-11 11:41:12] [Rank 0] step:9501/10000 train_time:392493ms step_avg:41.31ms +[2025-09-11 11:41:12] [Rank 0] step:9501/10000 train_time:392493ms step_avg:41.31ms +[2025-09-11 11:41:13] [Rank 0] step:9521/10000 train_time:393206ms step_avg:41.30ms +[2025-09-11 11:41:13] [Rank 0] step:9521/10000 train_time:393206ms step_avg:41.30ms +[2025-09-11 11:41:14] [Rank 0] step:9541/10000 train_time:393913ms step_avg:41.29ms +[2025-09-11 11:41:14] [Rank 0] step:9541/10000 train_time:393913ms step_avg:41.29ms +[2025-09-11 11:41:15] [Rank 0] step:9561/10000 train_time:394622ms step_avg:41.27ms +[2025-09-11 11:41:15] [Rank 0] step:9561/10000 train_time:394622ms step_avg:41.27ms +[2025-09-11 11:41:15] [Rank 0] step:9581/10000 train_time:395333ms step_avg:41.26ms +[2025-09-11 11:41:15] [Rank 0] step:9581/10000 train_time:395333ms step_avg:41.26ms +[2025-09-11 11:41:16] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:41:16] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:41:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:41:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:27] [Rank 0] PRINT: step:9600/10000 val_loss:4.4373 total_sharp:4.4005e-04 L1_sharp:4.1771e-03 L2_sharp:2.6019e-03 L3_sharp:3.2201e-03 L4_sharp:1.3167e-03 L5_sharp:2.1277e-03 L6_sharp:5.1009e-03 L7_sharp:8.7921e-03 L8_sharp:1.4403e-02 L9_sharp:1.9606e-02 L10_sharp:2.8722e-02 L11_sharp:4.1411e-02 L12_sharp:2.1223e-01 total_fnorm:1.2344e+00 total_l1_linf:7.0000e+02 total_spectral:6.1719e-01 L1_fnorm:2.4658e-02 L2_fnorm:2.3926e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.3682e-02 L5_fnorm:2.3560e-02 L6_fnorm:2.3560e-02 L7_fnorm:2.3438e-02 L8_fnorm:2.3193e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2949e-02 L12_fnorm:2.2583e-02 L1_l1linf:3.1281e-03 L2_l1linf:3.0212e-03 L3_l1linf:2.9907e-03 L4_l1linf:2.8992e-03 L5_l1linf:2.9297e-03 L6_l1linf:2.7924e-03 L7_l1linf:2.9907e-03 L8_l1linf:2.7466e-03 L9_l1linf:2.6245e-03 L10_l1linf:2.6550e-03 L11_l1linf:2.7008e-03 L12_l1linf:2.7924e-03 L1_spectral:4.2525e-04 L2_spectral:4.1331e-04 L3_spectral:4.0822e-04 L4_spectral:4.0190e-04 L5_spectral:3.9806e-04 L6_spectral:4.0024e-04 L7_spectral:3.9796e-04 L8_spectral:3.8015e-04 L9_spectral:3.9068e-04 L10_spectral:3.7686e-04 L11_spectral:3.7436e-04 L12_spectral:3.8933e-04 train_time:396020ms step_avg:41.25ms +[2025-09-11 11:41:27] [Rank 0] PRINT: step:9600/10000 val_loss:4.4373 total_sharp:4.4005e-04 L1_sharp:4.1771e-03 L2_sharp:2.6019e-03 L3_sharp:3.2201e-03 L4_sharp:1.3167e-03 L5_sharp:2.1277e-03 L6_sharp:5.1009e-03 L7_sharp:8.7921e-03 L8_sharp:1.4403e-02 L9_sharp:1.9606e-02 L10_sharp:2.8722e-02 L11_sharp:4.1411e-02 L12_sharp:2.1223e-01 total_fnorm:1.2344e+00 total_l1_linf:7.0000e+02 total_spectral:6.1719e-01 L1_fnorm:2.4658e-02 L2_fnorm:2.3926e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.3682e-02 L5_fnorm:2.3560e-02 L6_fnorm:2.3560e-02 L7_fnorm:2.3438e-02 L8_fnorm:2.3193e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2949e-02 L12_fnorm:2.2583e-02 L1_l1linf:3.1281e-03 L2_l1linf:3.0212e-03 L3_l1linf:2.9907e-03 L4_l1linf:2.8992e-03 L5_l1linf:2.9297e-03 L6_l1linf:2.7924e-03 L7_l1linf:2.9907e-03 L8_l1linf:2.7466e-03 L9_l1linf:2.6245e-03 L10_l1linf:2.6550e-03 L11_l1linf:2.7008e-03 L12_l1linf:2.7924e-03 L1_spectral:4.2525e-04 L2_spectral:4.1331e-04 L3_spectral:4.0822e-04 L4_spectral:4.0190e-04 L5_spectral:3.9806e-04 L6_spectral:4.0024e-04 L7_spectral:3.9796e-04 L8_spectral:3.8015e-04 L9_spectral:3.9068e-04 L10_spectral:3.7686e-04 L11_spectral:3.7436e-04 L12_spectral:3.8933e-04 train_time:396020ms step_avg:41.25ms +[2025-09-11 11:41:29] [Rank 0] step:9601/10000 train_time:398236ms step_avg:41.48ms +[2025-09-11 11:41:29] [Rank 0] step:9601/10000 train_time:398236ms step_avg:41.48ms +[2025-09-11 11:41:30] [Rank 0] step:9621/10000 train_time:398966ms step_avg:41.47ms +[2025-09-11 11:41:30] [Rank 0] step:9621/10000 train_time:398966ms step_avg:41.47ms +[2025-09-11 11:41:31] [Rank 0] step:9641/10000 train_time:399680ms step_avg:41.46ms +[2025-09-11 11:41:31] [Rank 0] step:9641/10000 train_time:399680ms step_avg:41.46ms +[2025-09-11 11:41:31] [Rank 0] step:9661/10000 train_time:400402ms step_avg:41.45ms +[2025-09-11 11:41:31] [Rank 0] step:9661/10000 train_time:400402ms step_avg:41.45ms +[2025-09-11 11:41:32] [Rank 0] step:9681/10000 train_time:401117ms step_avg:41.43ms +[2025-09-11 11:41:32] [Rank 0] step:9681/10000 train_time:401117ms step_avg:41.43ms +[2025-09-11 11:41:33] [Rank 0] step:9701/10000 train_time:401832ms step_avg:41.42ms +[2025-09-11 11:41:33] [Rank 0] step:9701/10000 train_time:401832ms step_avg:41.42ms +[2025-09-11 11:41:34] [Rank 0] step:9721/10000 train_time:402553ms step_avg:41.41ms +[2025-09-11 11:41:34] [Rank 0] step:9721/10000 train_time:402553ms step_avg:41.41ms +[2025-09-11 11:41:34] [Rank 0] step:9741/10000 train_time:403530ms step_avg:41.43ms +[2025-09-11 11:41:34] [Rank 0] step:9741/10000 train_time:403530ms step_avg:41.43ms +[2025-09-11 11:41:35] [Rank 0] step:9761/10000 train_time:404248ms step_avg:41.41ms +[2025-09-11 11:41:35] [Rank 0] step:9761/10000 train_time:404248ms step_avg:41.41ms +[2025-09-11 11:41:36] [Rank 0] step:9781/10000 train_time:404964ms step_avg:41.40ms +[2025-09-11 11:41:36] [Rank 0] step:9781/10000 train_time:404964ms step_avg:41.40ms +[2025-09-11 11:41:37] [Rank 0] step:9801/10000 train_time:405961ms step_avg:41.42ms +[2025-09-11 11:41:37] [Rank 0] step:9801/10000 train_time:405961ms step_avg:41.42ms +[2025-09-11 11:41:38] [Rank 0] step:9821/10000 train_time:406680ms step_avg:41.41ms +[2025-09-11 11:41:38] [Rank 0] step:9821/10000 train_time:406680ms step_avg:41.41ms +[2025-09-11 11:41:38] [Rank 0] step:9841/10000 train_time:407401ms step_avg:41.40ms +[2025-09-11 11:41:38] [Rank 0] step:9841/10000 train_time:407401ms step_avg:41.40ms +[2025-09-11 11:41:39] [Rank 0] step:9861/10000 train_time:408118ms step_avg:41.39ms +[2025-09-11 11:41:39] [Rank 0] step:9861/10000 train_time:408118ms step_avg:41.39ms +[2025-09-11 11:41:40] [Rank 0] step:9881/10000 train_time:408834ms step_avg:41.38ms +[2025-09-11 11:41:40] [Rank 0] step:9881/10000 train_time:408834ms step_avg:41.38ms +[2025-09-11 11:41:40] [Rank 0] step:9901/10000 train_time:409549ms step_avg:41.36ms +[2025-09-11 11:41:40] [Rank 0] step:9901/10000 train_time:409549ms step_avg:41.36ms +[2025-09-11 11:41:41] [Rank 0] step:9921/10000 train_time:410265ms step_avg:41.35ms +[2025-09-11 11:41:41] [Rank 0] step:9921/10000 train_time:410265ms step_avg:41.35ms +[2025-09-11 11:41:42] [Rank 0] step:9941/10000 train_time:410986ms step_avg:41.34ms +[2025-09-11 11:41:42] [Rank 0] step:9941/10000 train_time:410986ms step_avg:41.34ms +[2025-09-11 11:41:43] [Rank 0] step:9961/10000 train_time:411709ms step_avg:41.33ms +[2025-09-11 11:41:43] [Rank 0] step:9961/10000 train_time:411709ms step_avg:41.33ms +[2025-09-11 11:41:43] [Rank 0] step:9981/10000 train_time:412427ms step_avg:41.32ms +[2025-09-11 11:41:43] [Rank 0] step:9981/10000 train_time:412427ms step_avg:41.32ms +[2025-09-11 11:41:44] [Rank 0] step:10000/10000 train_time:413118ms step_avg:41.31ms +[2025-09-11 11:41:44] [Rank 0] step:10000/10000 train_time:413118ms step_avg:41.31ms +[2025-09-11 11:41:44] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:41:44] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:41:55] [Rank 0] PRINT: step:10000/10000 val_loss:4.4352 total_sharp:3.0855e-04 L1_sharp:3.4234e-03 L2_sharp:9.5123e-04 L3_sharp:6.1723e-04 L4_sharp:3.2626e-03 L5_sharp:4.5142e-03 L6_sharp:4.6526e-03 L7_sharp:8.2444e-03 L8_sharp:1.1696e-02 L9_sharp:1.4790e-02 L10_sharp:2.1170e-02 L11_sharp:3.0155e-02 L12_sharp:1.6274e-01 total_fnorm:4.5898e-01 total_l1_linf:1.9100e+02 total_spectral:2.3047e-01 L1_fnorm:9.6436e-03 L2_fnorm:9.3994e-03 L3_fnorm:9.3384e-03 L4_fnorm:9.2773e-03 L5_fnorm:9.2163e-03 L6_fnorm:9.2163e-03 L7_fnorm:9.2163e-03 L8_fnorm:9.0942e-03 L9_fnorm:9.1553e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.8501e-03 L1_l1linf:9.7656e-04 L2_l1linf:8.9264e-04 L3_l1linf:9.1553e-04 L4_l1linf:9.0790e-04 L5_l1linf:8.5831e-04 L6_l1linf:9.0408e-04 L7_l1linf:8.2779e-04 L8_l1linf:8.1253e-04 L9_l1linf:8.3542e-04 L10_l1linf:8.2016e-04 L11_l1linf:8.3160e-04 L12_l1linf:8.8120e-04 L1_spectral:1.6998e-04 L2_spectral:1.6577e-04 L3_spectral:1.6416e-04 L4_spectral:1.6173e-04 L5_spectral:1.6215e-04 L6_spectral:1.5935e-04 L7_spectral:1.6033e-04 L8_spectral:1.5487e-04 L9_spectral:1.5945e-04 L10_spectral:1.5188e-04 L11_spectral:1.5062e-04 L12_spectral:1.5738e-04 train_time:413139ms step_avg:41.31ms +[2025-09-11 11:41:55] [Rank 0] PRINT: step:10000/10000 val_loss:4.4352 total_sharp:3.0855e-04 L1_sharp:3.4234e-03 L2_sharp:9.5123e-04 L3_sharp:6.1723e-04 L4_sharp:3.2626e-03 L5_sharp:4.5142e-03 L6_sharp:4.6526e-03 L7_sharp:8.2444e-03 L8_sharp:1.1696e-02 L9_sharp:1.4790e-02 L10_sharp:2.1170e-02 L11_sharp:3.0155e-02 L12_sharp:1.6274e-01 total_fnorm:4.5898e-01 total_l1_linf:1.9100e+02 total_spectral:2.3047e-01 L1_fnorm:9.6436e-03 L2_fnorm:9.3994e-03 L3_fnorm:9.3384e-03 L4_fnorm:9.2773e-03 L5_fnorm:9.2163e-03 L6_fnorm:9.2163e-03 L7_fnorm:9.2163e-03 L8_fnorm:9.0942e-03 L9_fnorm:9.1553e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.8501e-03 L1_l1linf:9.7656e-04 L2_l1linf:8.9264e-04 L3_l1linf:9.1553e-04 L4_l1linf:9.0790e-04 L5_l1linf:8.5831e-04 L6_l1linf:9.0408e-04 L7_l1linf:8.2779e-04 L8_l1linf:8.1253e-04 L9_l1linf:8.3542e-04 L10_l1linf:8.2016e-04 L11_l1linf:8.3160e-04 L12_l1linf:8.8120e-04 L1_spectral:1.6998e-04 L2_spectral:1.6577e-04 L3_spectral:1.6416e-04 L4_spectral:1.6173e-04 L5_spectral:1.6215e-04 L6_spectral:1.5935e-04 L7_spectral:1.6033e-04 L8_spectral:1.5487e-04 L9_spectral:1.5945e-04 L10_spectral:1.5188e-04 L11_spectral:1.5062e-04 L12_spectral:1.5738e-04 train_time:413139ms step_avg:41.31ms +[2025-09-11 11:41:55] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:41:55 2025 --- +[2025-09-11 11:41:55] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:41:55 2025 --- +[2025-09-11 11:41:55] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:41:55] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6ce503ebc9fed38c61f7f655cf68f8629a53efc8 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "5bbce0aa-b752-4b4c-b6eb-1fe4fa9fd923", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/training_log_5bbce0aa-b752-4b4c-b6eb-1fe4fa9fd923.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/training_log_5bbce0aa-b752-4b4c-b6eb-1fe4fa9fd923.txt new file mode 100644 index 0000000000000000000000000000000000000000..b9358e45b16a4a07ed8ea8879dbf3f9f6526d355 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44/training_log_5bbce0aa-b752-4b4c-b6eb-1fe4fa9fd923.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:05:34] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:05:34 2025 --- +[2025-09-11 10:05:34] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:05:34 2025 --- +[2025-09-11 10:05:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:05:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:05:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:05:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:05:34] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:05:34] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:05:34] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44 +[2025-09-11 10:05:34] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.01_seed_44 +[2025-09-11 10:05:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:05:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:05:34] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:05:34] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:05:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:05:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:05:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:05:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:05:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:05:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:05:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:05:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:05:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:05:35] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:05:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:05:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:05:37] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:05:37] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:05:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:05:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:05:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:05:43] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:05:43] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:05:43] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:06:19] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:06:19] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:06:19] [Rank 0] PRINT: Starting training... +[2025-09-11 10:06:19] [Rank 0] PRINT: Starting training... +[2025-09-11 10:06:20] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.81ms +[2025-09-11 10:06:20] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.81ms +[2025-09-11 10:06:21] [Rank 0] step:41/10000 train_time:1854ms step_avg:45.22ms +[2025-09-11 10:06:21] [Rank 0] step:41/10000 train_time:1854ms step_avg:45.22ms +[2025-09-11 10:06:22] [Rank 0] step:61/10000 train_time:2578ms step_avg:42.27ms +[2025-09-11 10:06:22] [Rank 0] step:61/10000 train_time:2578ms step_avg:42.27ms +[2025-09-11 10:06:22] [Rank 0] step:81/10000 train_time:3302ms step_avg:40.76ms +[2025-09-11 10:06:22] [Rank 0] step:81/10000 train_time:3302ms step_avg:40.76ms +[2025-09-11 10:06:23] [Rank 0] step:101/10000 train_time:4025ms step_avg:39.85ms +[2025-09-11 10:06:23] [Rank 0] step:101/10000 train_time:4025ms step_avg:39.85ms +[2025-09-11 10:06:24] [Rank 0] step:121/10000 train_time:4750ms step_avg:39.26ms +[2025-09-11 10:06:24] [Rank 0] step:121/10000 train_time:4750ms step_avg:39.26ms +[2025-09-11 10:06:25] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.82ms +[2025-09-11 10:06:25] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.82ms +[2025-09-11 10:06:25] [Rank 0] step:161/10000 train_time:6196ms step_avg:38.48ms +[2025-09-11 10:06:25] [Rank 0] step:161/10000 train_time:6196ms step_avg:38.48ms +[2025-09-11 10:06:26] [Rank 0] step:181/10000 train_time:6918ms step_avg:38.22ms +[2025-09-11 10:06:26] [Rank 0] step:181/10000 train_time:6918ms step_avg:38.22ms +[2025-09-11 10:06:27] [Rank 0] step:201/10000 train_time:7642ms step_avg:38.02ms +[2025-09-11 10:06:27] [Rank 0] step:201/10000 train_time:7642ms step_avg:38.02ms +[2025-09-11 10:06:27] [Rank 0] step:221/10000 train_time:8367ms step_avg:37.86ms +[2025-09-11 10:06:27] [Rank 0] step:221/10000 train_time:8367ms step_avg:37.86ms +[2025-09-11 10:06:28] [Rank 0] step:241/10000 train_time:9090ms step_avg:37.72ms +[2025-09-11 10:06:28] [Rank 0] step:241/10000 train_time:9090ms step_avg:37.72ms +[2025-09-11 10:06:29] [Rank 0] step:261/10000 train_time:9813ms step_avg:37.60ms +[2025-09-11 10:06:29] [Rank 0] step:261/10000 train_time:9813ms step_avg:37.60ms +[2025-09-11 10:06:30] [Rank 0] step:281/10000 train_time:10537ms step_avg:37.50ms +[2025-09-11 10:06:30] [Rank 0] step:281/10000 train_time:10537ms step_avg:37.50ms +[2025-09-11 10:06:30] [Rank 0] step:301/10000 train_time:11260ms step_avg:37.41ms +[2025-09-11 10:06:30] [Rank 0] step:301/10000 train_time:11260ms step_avg:37.41ms +[2025-09-11 10:06:31] [Rank 0] step:321/10000 train_time:11982ms step_avg:37.33ms +[2025-09-11 10:06:31] [Rank 0] step:321/10000 train_time:11982ms step_avg:37.33ms +[2025-09-11 10:06:32] [Rank 0] step:341/10000 train_time:12706ms step_avg:37.26ms +[2025-09-11 10:06:32] [Rank 0] step:341/10000 train_time:12706ms step_avg:37.26ms +[2025-09-11 10:06:33] [Rank 0] step:361/10000 train_time:13429ms step_avg:37.20ms +[2025-09-11 10:06:33] [Rank 0] step:361/10000 train_time:13429ms step_avg:37.20ms +[2025-09-11 10:06:33] [Rank 0] step:381/10000 train_time:14152ms step_avg:37.14ms +[2025-09-11 10:06:33] [Rank 0] step:381/10000 train_time:14152ms step_avg:37.14ms +[2025-09-11 10:06:34] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:06:34] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:06:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:06:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:07:21] [Rank 0] PRINT: step:400/10000 val_loss:6.2530 total_sharp:6.1002e-03 L1_sharp:3.5314e-02 L2_sharp:2.0011e-02 L3_sharp:1.7873e-02 L4_sharp:1.7750e-02 L5_sharp:1.8292e-02 L6_sharp:1.5656e-02 L7_sharp:1.5446e-02 L8_sharp:1.2433e-02 L9_sharp:1.5401e-02 L10_sharp:1.6077e-02 L11_sharp:1.6593e-02 L12_sharp:2.9503e-02 total_fnorm:2.0525e+01 total_l1_linf:6.0188e+04 total_spectral:1.0262e+01 L1_fnorm:1.2217e+00 L2_fnorm:1.2161e+00 L3_fnorm:1.2186e+00 L4_fnorm:1.2258e+00 L5_fnorm:1.2133e+00 L6_fnorm:1.1928e+00 L7_fnorm:1.1716e+00 L8_fnorm:1.1404e+00 L9_fnorm:1.1092e+00 L10_fnorm:1.0699e+00 L11_fnorm:1.0344e+00 L12_fnorm:9.6776e-01 L1_l1linf:3.9772e-01 L2_l1linf:4.0347e-01 L3_l1linf:4.0756e-01 L4_l1linf:4.1862e-01 L5_l1linf:4.1464e-01 L6_l1linf:4.1226e-01 L7_l1linf:4.1065e-01 L8_l1linf:4.0183e-01 L9_l1linf:3.8497e-01 L10_l1linf:3.6209e-01 L11_l1linf:3.3314e-01 L12_l1linf:2.9987e-01 L1_spectral:1.2041e-02 L2_spectral:1.2046e-02 L3_spectral:1.2045e-02 L4_spectral:1.2041e-02 L5_spectral:1.2054e-02 L6_spectral:1.2047e-02 L7_spectral:1.2040e-02 L8_spectral:1.2036e-02 L9_spectral:1.2040e-02 L10_spectral:1.2039e-02 L11_spectral:1.2035e-02 L12_spectral:1.2031e-02 train_time:14855ms step_avg:37.14ms +[2025-09-11 10:07:21] [Rank 0] PRINT: step:400/10000 val_loss:6.2530 total_sharp:6.1002e-03 L1_sharp:3.5314e-02 L2_sharp:2.0011e-02 L3_sharp:1.7873e-02 L4_sharp:1.7750e-02 L5_sharp:1.8292e-02 L6_sharp:1.5656e-02 L7_sharp:1.5446e-02 L8_sharp:1.2433e-02 L9_sharp:1.5401e-02 L10_sharp:1.6077e-02 L11_sharp:1.6593e-02 L12_sharp:2.9503e-02 total_fnorm:2.0525e+01 total_l1_linf:6.0188e+04 total_spectral:1.0262e+01 L1_fnorm:1.2217e+00 L2_fnorm:1.2161e+00 L3_fnorm:1.2186e+00 L4_fnorm:1.2258e+00 L5_fnorm:1.2133e+00 L6_fnorm:1.1928e+00 L7_fnorm:1.1716e+00 L8_fnorm:1.1404e+00 L9_fnorm:1.1092e+00 L10_fnorm:1.0699e+00 L11_fnorm:1.0344e+00 L12_fnorm:9.6776e-01 L1_l1linf:3.9772e-01 L2_l1linf:4.0347e-01 L3_l1linf:4.0756e-01 L4_l1linf:4.1862e-01 L5_l1linf:4.1464e-01 L6_l1linf:4.1226e-01 L7_l1linf:4.1065e-01 L8_l1linf:4.0183e-01 L9_l1linf:3.8497e-01 L10_l1linf:3.6209e-01 L11_l1linf:3.3314e-01 L12_l1linf:2.9987e-01 L1_spectral:1.2041e-02 L2_spectral:1.2046e-02 L3_spectral:1.2045e-02 L4_spectral:1.2041e-02 L5_spectral:1.2054e-02 L6_spectral:1.2047e-02 L7_spectral:1.2040e-02 L8_spectral:1.2036e-02 L9_spectral:1.2040e-02 L10_spectral:1.2039e-02 L11_spectral:1.2035e-02 L12_spectral:1.2031e-02 train_time:14855ms step_avg:37.14ms +[2025-09-11 10:07:52] [Rank 0] step:401/10000 train_time:46151ms step_avg:115.09ms +[2025-09-11 10:07:52] [Rank 0] step:401/10000 train_time:46151ms step_avg:115.09ms +[2025-09-11 10:07:54] [Rank 0] step:421/10000 train_time:48100ms step_avg:114.25ms +[2025-09-11 10:07:54] [Rank 0] step:421/10000 train_time:48100ms step_avg:114.25ms +[2025-09-11 10:07:55] [Rank 0] step:441/10000 train_time:48736ms step_avg:110.51ms +[2025-09-11 10:07:55] [Rank 0] step:441/10000 train_time:48736ms step_avg:110.51ms +[2025-09-11 10:07:56] [Rank 0] step:461/10000 train_time:49371ms step_avg:107.10ms +[2025-09-11 10:07:56] [Rank 0] step:461/10000 train_time:49371ms step_avg:107.10ms +[2025-09-11 10:07:56] [Rank 0] step:481/10000 train_time:50006ms step_avg:103.96ms +[2025-09-11 10:07:56] [Rank 0] step:481/10000 train_time:50006ms step_avg:103.96ms +[2025-09-11 10:07:57] [Rank 0] step:501/10000 train_time:50641ms step_avg:101.08ms +[2025-09-11 10:07:57] [Rank 0] step:501/10000 train_time:50641ms step_avg:101.08ms +[2025-09-11 10:07:57] [Rank 0] step:521/10000 train_time:51275ms step_avg:98.42ms +[2025-09-11 10:07:57] [Rank 0] step:521/10000 train_time:51275ms step_avg:98.42ms +[2025-09-11 10:07:58] [Rank 0] step:541/10000 train_time:51910ms step_avg:95.95ms +[2025-09-11 10:07:58] [Rank 0] step:541/10000 train_time:51910ms step_avg:95.95ms +[2025-09-11 10:07:59] [Rank 0] step:561/10000 train_time:52545ms step_avg:93.66ms +[2025-09-11 10:07:59] [Rank 0] step:561/10000 train_time:52545ms step_avg:93.66ms +[2025-09-11 10:07:59] [Rank 0] step:581/10000 train_time:53182ms step_avg:91.54ms +[2025-09-11 10:07:59] [Rank 0] step:581/10000 train_time:53182ms step_avg:91.54ms +[2025-09-11 10:08:00] [Rank 0] step:601/10000 train_time:53816ms step_avg:89.54ms +[2025-09-11 10:08:00] [Rank 0] step:601/10000 train_time:53816ms step_avg:89.54ms +[2025-09-11 10:08:01] [Rank 0] step:621/10000 train_time:54450ms step_avg:87.68ms +[2025-09-11 10:08:01] [Rank 0] step:621/10000 train_time:54450ms step_avg:87.68ms +[2025-09-11 10:08:01] [Rank 0] step:641/10000 train_time:55084ms step_avg:85.93ms +[2025-09-11 10:08:01] [Rank 0] step:641/10000 train_time:55084ms step_avg:85.93ms +[2025-09-11 10:08:02] [Rank 0] step:661/10000 train_time:55718ms step_avg:84.29ms +[2025-09-11 10:08:02] [Rank 0] step:661/10000 train_time:55718ms step_avg:84.29ms +[2025-09-11 10:08:03] [Rank 0] step:681/10000 train_time:56352ms step_avg:82.75ms +[2025-09-11 10:08:03] [Rank 0] step:681/10000 train_time:56352ms step_avg:82.75ms +[2025-09-11 10:08:03] [Rank 0] step:701/10000 train_time:56985ms step_avg:81.29ms +[2025-09-11 10:08:03] [Rank 0] step:701/10000 train_time:56985ms step_avg:81.29ms +[2025-09-11 10:08:04] [Rank 0] step:721/10000 train_time:57619ms step_avg:79.91ms +[2025-09-11 10:08:04] [Rank 0] step:721/10000 train_time:57619ms step_avg:79.91ms +[2025-09-11 10:08:04] [Rank 0] step:741/10000 train_time:58252ms step_avg:78.61ms +[2025-09-11 10:08:04] [Rank 0] step:741/10000 train_time:58252ms step_avg:78.61ms +[2025-09-11 10:08:05] [Rank 0] step:761/10000 train_time:58891ms step_avg:77.39ms +[2025-09-11 10:08:05] [Rank 0] step:761/10000 train_time:58891ms step_avg:77.39ms +[2025-09-11 10:08:06] [Rank 0] step:781/10000 train_time:59529ms step_avg:76.22ms +[2025-09-11 10:08:06] [Rank 0] step:781/10000 train_time:59529ms step_avg:76.22ms +[2025-09-11 10:08:06] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:08:06] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:08:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:08:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:08:50] [Rank 0] PRINT: step:800/10000 val_loss:5.8235 total_sharp:5.5826e-03 L1_sharp:1.7705e-02 L2_sharp:9.4901e-03 L3_sharp:6.4335e-03 L4_sharp:8.1926e-03 L5_sharp:9.8865e-03 L6_sharp:1.0509e-02 L7_sharp:7.7866e-03 L8_sharp:1.1326e-02 L9_sharp:1.1808e-02 L10_sharp:1.3927e-02 L11_sharp:2.1714e-02 L12_sharp:4.0657e-02 total_fnorm:1.9000e+01 total_l1_linf:4.1984e+04 total_spectral:9.5000e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.1797e+00 L9_fnorm:1.1953e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1172e+00 L12_fnorm:9.7656e-01 L1_l1linf:3.8672e-01 L2_l1linf:3.8281e-01 L3_l1linf:3.8477e-01 L4_l1linf:3.9648e-01 L5_l1linf:4.0039e-01 L6_l1linf:3.9648e-01 L7_l1linf:4.0234e-01 L8_l1linf:4.0234e-01 L9_l1linf:3.8477e-01 L10_l1linf:3.6328e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.3438e-01 L1_spectral:1.3550e-02 L2_spectral:1.3613e-02 L3_spectral:1.3510e-02 L4_spectral:1.3483e-02 L5_spectral:1.3487e-02 L6_spectral:1.3505e-02 L7_spectral:1.3571e-02 L8_spectral:1.3522e-02 L9_spectral:1.3602e-02 L10_spectral:1.3588e-02 L11_spectral:1.3575e-02 L12_spectral:1.3424e-02 train_time:60150ms step_avg:75.19ms +[2025-09-11 10:08:50] [Rank 0] PRINT: step:800/10000 val_loss:5.8235 total_sharp:5.5826e-03 L1_sharp:1.7705e-02 L2_sharp:9.4901e-03 L3_sharp:6.4335e-03 L4_sharp:8.1926e-03 L5_sharp:9.8865e-03 L6_sharp:1.0509e-02 L7_sharp:7.7866e-03 L8_sharp:1.1326e-02 L9_sharp:1.1808e-02 L10_sharp:1.3927e-02 L11_sharp:2.1714e-02 L12_sharp:4.0657e-02 total_fnorm:1.9000e+01 total_l1_linf:4.1984e+04 total_spectral:9.5000e+00 L1_fnorm:1.2500e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.1797e+00 L9_fnorm:1.1953e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1172e+00 L12_fnorm:9.7656e-01 L1_l1linf:3.8672e-01 L2_l1linf:3.8281e-01 L3_l1linf:3.8477e-01 L4_l1linf:3.9648e-01 L5_l1linf:4.0039e-01 L6_l1linf:3.9648e-01 L7_l1linf:4.0234e-01 L8_l1linf:4.0234e-01 L9_l1linf:3.8477e-01 L10_l1linf:3.6328e-01 L11_l1linf:3.1250e-01 L12_l1linf:2.3438e-01 L1_spectral:1.3550e-02 L2_spectral:1.3613e-02 L3_spectral:1.3510e-02 L4_spectral:1.3483e-02 L5_spectral:1.3487e-02 L6_spectral:1.3505e-02 L7_spectral:1.3571e-02 L8_spectral:1.3522e-02 L9_spectral:1.3602e-02 L10_spectral:1.3588e-02 L11_spectral:1.3575e-02 L12_spectral:1.3424e-02 train_time:60150ms step_avg:75.19ms +[2025-09-11 10:08:51] [Rank 0] step:801/10000 train_time:61345ms step_avg:76.59ms +[2025-09-11 10:08:51] [Rank 0] step:801/10000 train_time:61345ms step_avg:76.59ms +[2025-09-11 10:08:52] [Rank 0] step:821/10000 train_time:61988ms step_avg:75.50ms +[2025-09-11 10:08:52] [Rank 0] step:821/10000 train_time:61988ms step_avg:75.50ms +[2025-09-11 10:08:52] [Rank 0] step:841/10000 train_time:62628ms step_avg:74.47ms +[2025-09-11 10:08:52] [Rank 0] step:841/10000 train_time:62628ms step_avg:74.47ms +[2025-09-11 10:08:53] [Rank 0] step:861/10000 train_time:63269ms step_avg:73.48ms +[2025-09-11 10:08:53] [Rank 0] step:861/10000 train_time:63269ms step_avg:73.48ms +[2025-09-11 10:08:54] [Rank 0] step:881/10000 train_time:63909ms step_avg:72.54ms +[2025-09-11 10:08:54] [Rank 0] step:881/10000 train_time:63909ms step_avg:72.54ms +[2025-09-11 10:08:54] [Rank 0] step:901/10000 train_time:64548ms step_avg:71.64ms +[2025-09-11 10:08:54] [Rank 0] step:901/10000 train_time:64548ms step_avg:71.64ms +[2025-09-11 10:08:55] [Rank 0] step:921/10000 train_time:65187ms step_avg:70.78ms +[2025-09-11 10:08:55] [Rank 0] step:921/10000 train_time:65187ms step_avg:70.78ms +[2025-09-11 10:08:56] [Rank 0] step:941/10000 train_time:65825ms step_avg:69.95ms +[2025-09-11 10:08:56] [Rank 0] step:941/10000 train_time:65825ms step_avg:69.95ms +[2025-09-11 10:08:56] [Rank 0] step:961/10000 train_time:66464ms step_avg:69.16ms +[2025-09-11 10:08:56] [Rank 0] step:961/10000 train_time:66464ms step_avg:69.16ms +[2025-09-11 10:08:57] [Rank 0] step:981/10000 train_time:67103ms step_avg:68.40ms +[2025-09-11 10:08:57] [Rank 0] step:981/10000 train_time:67103ms step_avg:68.40ms +[2025-09-11 10:08:57] [Rank 0] step:1001/10000 train_time:67742ms step_avg:67.67ms +[2025-09-11 10:08:57] [Rank 0] step:1001/10000 train_time:67742ms step_avg:67.67ms +[2025-09-11 10:08:58] [Rank 0] step:1021/10000 train_time:68380ms step_avg:66.97ms +[2025-09-11 10:08:58] [Rank 0] step:1021/10000 train_time:68380ms step_avg:66.97ms +[2025-09-11 10:08:59] [Rank 0] step:1041/10000 train_time:69019ms step_avg:66.30ms +[2025-09-11 10:08:59] [Rank 0] step:1041/10000 train_time:69019ms step_avg:66.30ms +[2025-09-11 10:08:59] [Rank 0] step:1061/10000 train_time:69658ms step_avg:65.65ms +[2025-09-11 10:08:59] [Rank 0] step:1061/10000 train_time:69658ms step_avg:65.65ms +[2025-09-11 10:09:00] [Rank 0] step:1081/10000 train_time:70297ms step_avg:65.03ms +[2025-09-11 10:09:00] [Rank 0] step:1081/10000 train_time:70297ms step_avg:65.03ms +[2025-09-11 10:09:01] [Rank 0] step:1101/10000 train_time:70936ms step_avg:64.43ms +[2025-09-11 10:09:01] [Rank 0] step:1101/10000 train_time:70936ms step_avg:64.43ms +[2025-09-11 10:09:01] [Rank 0] step:1121/10000 train_time:71574ms step_avg:63.85ms +[2025-09-11 10:09:01] [Rank 0] step:1121/10000 train_time:71574ms step_avg:63.85ms +[2025-09-11 10:09:02] [Rank 0] step:1141/10000 train_time:72215ms step_avg:63.29ms +[2025-09-11 10:09:02] [Rank 0] step:1141/10000 train_time:72215ms step_avg:63.29ms +[2025-09-11 10:09:03] [Rank 0] step:1161/10000 train_time:72854ms step_avg:62.75ms +[2025-09-11 10:09:03] [Rank 0] step:1161/10000 train_time:72854ms step_avg:62.75ms +[2025-09-11 10:09:03] [Rank 0] step:1181/10000 train_time:73492ms step_avg:62.23ms +[2025-09-11 10:09:03] [Rank 0] step:1181/10000 train_time:73492ms step_avg:62.23ms +[2025-09-11 10:09:04] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:09:04] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:14] [Rank 0] PRINT: step:1200/10000 val_loss:5.4846 total_sharp:3.9363e-03 L1_sharp:2.2486e-02 L2_sharp:6.9369e-03 L3_sharp:4.5504e-03 L4_sharp:5.4491e-03 L5_sharp:4.9730e-03 L6_sharp:4.2570e-03 L7_sharp:5.0854e-03 L8_sharp:1.1487e-02 L9_sharp:8.1252e-03 L10_sharp:1.0564e-02 L11_sharp:1.3718e-02 L12_sharp:3.8385e-02 total_fnorm:1.9375e+01 total_l1_linf:4.0960e+04 total_spectral:9.6250e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2422e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.1484e+00 L1_l1linf:3.7305e-01 L2_l1linf:3.6523e-01 L3_l1linf:3.5742e-01 L4_l1linf:3.6719e-01 L5_l1linf:3.6719e-01 L6_l1linf:3.6914e-01 L7_l1linf:3.7500e-01 L8_l1linf:3.8086e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.8281e-01 L11_l1linf:3.6133e-01 L12_l1linf:2.8711e-01 L1_spectral:1.4102e-02 L2_spectral:1.4078e-02 L3_spectral:1.3958e-02 L4_spectral:1.3927e-02 L5_spectral:1.3898e-02 L6_spectral:1.3824e-02 L7_spectral:1.3887e-02 L8_spectral:1.3923e-02 L9_spectral:1.3823e-02 L10_spectral:1.3867e-02 L11_spectral:1.3945e-02 L12_spectral:1.4122e-02 train_time:74113ms step_avg:61.76ms +[2025-09-11 10:09:14] [Rank 0] PRINT: step:1200/10000 val_loss:5.4846 total_sharp:3.9363e-03 L1_sharp:2.2486e-02 L2_sharp:6.9369e-03 L3_sharp:4.5504e-03 L4_sharp:5.4491e-03 L5_sharp:4.9730e-03 L6_sharp:4.2570e-03 L7_sharp:5.0854e-03 L8_sharp:1.1487e-02 L9_sharp:8.1252e-03 L10_sharp:1.0564e-02 L11_sharp:1.3718e-02 L12_sharp:3.8385e-02 total_fnorm:1.9375e+01 total_l1_linf:4.0960e+04 total_spectral:9.6250e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2422e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.1484e+00 L1_l1linf:3.7305e-01 L2_l1linf:3.6523e-01 L3_l1linf:3.5742e-01 L4_l1linf:3.6719e-01 L5_l1linf:3.6719e-01 L6_l1linf:3.6914e-01 L7_l1linf:3.7500e-01 L8_l1linf:3.8086e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.8281e-01 L11_l1linf:3.6133e-01 L12_l1linf:2.8711e-01 L1_spectral:1.4102e-02 L2_spectral:1.4078e-02 L3_spectral:1.3958e-02 L4_spectral:1.3927e-02 L5_spectral:1.3898e-02 L6_spectral:1.3824e-02 L7_spectral:1.3887e-02 L8_spectral:1.3923e-02 L9_spectral:1.3823e-02 L10_spectral:1.3867e-02 L11_spectral:1.3945e-02 L12_spectral:1.4122e-02 train_time:74113ms step_avg:61.76ms +[2025-09-11 10:09:15] [Rank 0] step:1201/10000 train_time:75276ms step_avg:62.68ms +[2025-09-11 10:09:15] [Rank 0] step:1201/10000 train_time:75276ms step_avg:62.68ms +[2025-09-11 10:09:16] [Rank 0] step:1221/10000 train_time:75904ms step_avg:62.17ms +[2025-09-11 10:09:16] [Rank 0] step:1221/10000 train_time:75904ms step_avg:62.17ms +[2025-09-11 10:09:16] [Rank 0] step:1241/10000 train_time:76545ms step_avg:61.68ms +[2025-09-11 10:09:16] [Rank 0] step:1241/10000 train_time:76545ms step_avg:61.68ms +[2025-09-11 10:09:17] [Rank 0] step:1261/10000 train_time:77185ms step_avg:61.21ms +[2025-09-11 10:09:17] [Rank 0] step:1261/10000 train_time:77185ms step_avg:61.21ms +[2025-09-11 10:09:17] [Rank 0] step:1281/10000 train_time:77825ms step_avg:60.75ms +[2025-09-11 10:09:17] [Rank 0] step:1281/10000 train_time:77825ms step_avg:60.75ms +[2025-09-11 10:09:18] [Rank 0] step:1301/10000 train_time:78464ms step_avg:60.31ms +[2025-09-11 10:09:18] [Rank 0] step:1301/10000 train_time:78464ms step_avg:60.31ms +[2025-09-11 10:09:19] [Rank 0] step:1321/10000 train_time:79103ms step_avg:59.88ms +[2025-09-11 10:09:19] [Rank 0] step:1321/10000 train_time:79103ms step_avg:59.88ms +[2025-09-11 10:09:19] [Rank 0] step:1341/10000 train_time:79742ms step_avg:59.46ms +[2025-09-11 10:09:19] [Rank 0] step:1341/10000 train_time:79742ms step_avg:59.46ms +[2025-09-11 10:09:20] [Rank 0] step:1361/10000 train_time:80381ms step_avg:59.06ms +[2025-09-11 10:09:20] [Rank 0] step:1361/10000 train_time:80381ms step_avg:59.06ms +[2025-09-11 10:09:21] [Rank 0] step:1381/10000 train_time:81020ms step_avg:58.67ms +[2025-09-11 10:09:21] [Rank 0] step:1381/10000 train_time:81020ms step_avg:58.67ms +[2025-09-11 10:09:22] [Rank 0] step:1401/10000 train_time:82268ms step_avg:58.72ms +[2025-09-11 10:09:22] [Rank 0] step:1401/10000 train_time:82268ms step_avg:58.72ms +[2025-09-11 10:09:23] [Rank 0] step:1421/10000 train_time:82908ms step_avg:58.35ms +[2025-09-11 10:09:23] [Rank 0] step:1421/10000 train_time:82908ms step_avg:58.35ms +[2025-09-11 10:09:23] [Rank 0] step:1441/10000 train_time:83547ms step_avg:57.98ms +[2025-09-11 10:09:23] [Rank 0] step:1441/10000 train_time:83547ms step_avg:57.98ms +[2025-09-11 10:09:24] [Rank 0] step:1461/10000 train_time:84478ms step_avg:57.82ms +[2025-09-11 10:09:24] [Rank 0] step:1461/10000 train_time:84478ms step_avg:57.82ms +[2025-09-11 10:09:25] [Rank 0] step:1481/10000 train_time:85117ms step_avg:57.47ms +[2025-09-11 10:09:25] [Rank 0] step:1481/10000 train_time:85117ms step_avg:57.47ms +[2025-09-11 10:09:25] [Rank 0] step:1501/10000 train_time:85760ms step_avg:57.14ms +[2025-09-11 10:09:25] [Rank 0] step:1501/10000 train_time:85760ms step_avg:57.14ms +[2025-09-11 10:09:26] [Rank 0] step:1521/10000 train_time:86403ms step_avg:56.81ms +[2025-09-11 10:09:26] [Rank 0] step:1521/10000 train_time:86403ms step_avg:56.81ms +[2025-09-11 10:09:27] [Rank 0] step:1541/10000 train_time:87046ms step_avg:56.49ms +[2025-09-11 10:09:27] [Rank 0] step:1541/10000 train_time:87046ms step_avg:56.49ms +[2025-09-11 10:09:27] [Rank 0] step:1561/10000 train_time:87690ms step_avg:56.18ms +[2025-09-11 10:09:27] [Rank 0] step:1561/10000 train_time:87690ms step_avg:56.18ms +[2025-09-11 10:09:28] [Rank 0] step:1581/10000 train_time:88332ms step_avg:55.87ms +[2025-09-11 10:09:28] [Rank 0] step:1581/10000 train_time:88332ms step_avg:55.87ms +[2025-09-11 10:09:29] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:09:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:41] [Rank 0] PRINT: step:1600/10000 val_loss:5.3134 total_sharp:2.8336e-03 L1_sharp:1.3681e-02 L2_sharp:5.1803e-03 L3_sharp:2.2241e-03 L4_sharp:3.4483e-03 L5_sharp:4.4352e-03 L6_sharp:4.0023e-03 L7_sharp:4.3756e-03 L8_sharp:8.9784e-03 L9_sharp:6.2172e-03 L10_sharp:6.3179e-03 L11_sharp:8.4598e-03 L12_sharp:2.6570e-02 total_fnorm:1.8500e+01 total_l1_linf:3.7888e+04 total_spectral:9.1875e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.1875e+00 L1_l1linf:3.7109e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.4766e-01 L4_l1linf:3.4961e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.5547e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.6133e-01 L9_l1linf:3.6133e-01 L10_l1linf:3.6523e-01 L11_l1linf:3.5938e-01 L12_l1linf:2.8906e-01 L1_spectral:1.4496e-02 L2_spectral:1.4400e-02 L3_spectral:1.4354e-02 L4_spectral:1.4336e-02 L5_spectral:1.4178e-02 L6_spectral:1.4253e-02 L7_spectral:1.4236e-02 L8_spectral:1.4513e-02 L9_spectral:1.4222e-02 L10_spectral:1.4187e-02 L11_spectral:1.4186e-02 L12_spectral:1.4357e-02 train_time:88958ms step_avg:55.60ms +[2025-09-11 10:09:41] [Rank 0] PRINT: step:1600/10000 val_loss:5.3134 total_sharp:2.8336e-03 L1_sharp:1.3681e-02 L2_sharp:5.1803e-03 L3_sharp:2.2241e-03 L4_sharp:3.4483e-03 L5_sharp:4.4352e-03 L6_sharp:4.0023e-03 L7_sharp:4.3756e-03 L8_sharp:8.9784e-03 L9_sharp:6.2172e-03 L10_sharp:6.3179e-03 L11_sharp:8.4598e-03 L12_sharp:2.6570e-02 total_fnorm:1.8500e+01 total_l1_linf:3.7888e+04 total_spectral:9.1875e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.1875e+00 L1_l1linf:3.7109e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.4766e-01 L4_l1linf:3.4961e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.5547e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.6133e-01 L9_l1linf:3.6133e-01 L10_l1linf:3.6523e-01 L11_l1linf:3.5938e-01 L12_l1linf:2.8906e-01 L1_spectral:1.4496e-02 L2_spectral:1.4400e-02 L3_spectral:1.4354e-02 L4_spectral:1.4336e-02 L5_spectral:1.4178e-02 L6_spectral:1.4253e-02 L7_spectral:1.4236e-02 L8_spectral:1.4513e-02 L9_spectral:1.4222e-02 L10_spectral:1.4187e-02 L11_spectral:1.4186e-02 L12_spectral:1.4357e-02 train_time:88958ms step_avg:55.60ms +[2025-09-11 10:09:43] [Rank 0] step:1601/10000 train_time:90182ms step_avg:56.33ms +[2025-09-11 10:09:43] [Rank 0] step:1601/10000 train_time:90182ms step_avg:56.33ms +[2025-09-11 10:09:43] [Rank 0] step:1621/10000 train_time:90849ms step_avg:56.05ms +[2025-09-11 10:09:43] [Rank 0] step:1621/10000 train_time:90849ms step_avg:56.05ms +[2025-09-11 10:09:44] [Rank 0] step:1641/10000 train_time:91493ms step_avg:55.75ms +[2025-09-11 10:09:44] [Rank 0] step:1641/10000 train_time:91493ms step_avg:55.75ms +[2025-09-11 10:09:44] [Rank 0] step:1661/10000 train_time:92137ms step_avg:55.47ms +[2025-09-11 10:09:44] [Rank 0] step:1661/10000 train_time:92137ms step_avg:55.47ms +[2025-09-11 10:09:45] [Rank 0] step:1681/10000 train_time:92781ms step_avg:55.19ms +[2025-09-11 10:09:45] [Rank 0] step:1681/10000 train_time:92781ms step_avg:55.19ms +[2025-09-11 10:09:46] [Rank 0] step:1701/10000 train_time:93425ms step_avg:54.92ms +[2025-09-11 10:09:46] [Rank 0] step:1701/10000 train_time:93425ms step_avg:54.92ms +[2025-09-11 10:09:46] [Rank 0] step:1721/10000 train_time:94069ms step_avg:54.66ms +[2025-09-11 10:09:46] [Rank 0] step:1721/10000 train_time:94069ms step_avg:54.66ms +[2025-09-11 10:09:47] [Rank 0] step:1741/10000 train_time:94712ms step_avg:54.40ms +[2025-09-11 10:09:47] [Rank 0] step:1741/10000 train_time:94712ms step_avg:54.40ms +[2025-09-11 10:09:48] [Rank 0] step:1761/10000 train_time:95356ms step_avg:54.15ms +[2025-09-11 10:09:48] [Rank 0] step:1761/10000 train_time:95356ms step_avg:54.15ms +[2025-09-11 10:09:48] [Rank 0] step:1781/10000 train_time:95999ms step_avg:53.90ms +[2025-09-11 10:09:48] [Rank 0] step:1781/10000 train_time:95999ms step_avg:53.90ms +[2025-09-11 10:09:49] [Rank 0] step:1801/10000 train_time:96642ms step_avg:53.66ms +[2025-09-11 10:09:49] [Rank 0] step:1801/10000 train_time:96642ms step_avg:53.66ms +[2025-09-11 10:09:50] [Rank 0] step:1821/10000 train_time:97286ms step_avg:53.42ms +[2025-09-11 10:09:50] [Rank 0] step:1821/10000 train_time:97286ms step_avg:53.42ms +[2025-09-11 10:09:50] [Rank 0] step:1841/10000 train_time:97929ms step_avg:53.19ms +[2025-09-11 10:09:50] [Rank 0] step:1841/10000 train_time:97929ms step_avg:53.19ms +[2025-09-11 10:09:51] [Rank 0] step:1861/10000 train_time:98572ms step_avg:52.97ms +[2025-09-11 10:09:51] [Rank 0] step:1861/10000 train_time:98572ms step_avg:52.97ms +[2025-09-11 10:09:52] [Rank 0] step:1881/10000 train_time:99216ms step_avg:52.75ms +[2025-09-11 10:09:52] [Rank 0] step:1881/10000 train_time:99216ms step_avg:52.75ms +[2025-09-11 10:09:52] [Rank 0] step:1901/10000 train_time:99859ms step_avg:52.53ms +[2025-09-11 10:09:52] [Rank 0] step:1901/10000 train_time:99859ms step_avg:52.53ms +[2025-09-11 10:09:53] [Rank 0] step:1921/10000 train_time:100503ms step_avg:52.32ms +[2025-09-11 10:09:53] [Rank 0] step:1921/10000 train_time:100503ms step_avg:52.32ms +[2025-09-11 10:09:53] [Rank 0] step:1941/10000 train_time:101147ms step_avg:52.11ms +[2025-09-11 10:09:53] [Rank 0] step:1941/10000 train_time:101147ms step_avg:52.11ms +[2025-09-11 10:09:54] [Rank 0] step:1961/10000 train_time:101790ms step_avg:51.91ms +[2025-09-11 10:09:54] [Rank 0] step:1961/10000 train_time:101790ms step_avg:51.91ms +[2025-09-11 10:09:55] [Rank 0] step:1981/10000 train_time:102434ms step_avg:51.71ms +[2025-09-11 10:09:55] [Rank 0] step:1981/10000 train_time:102434ms step_avg:51.71ms +[2025-09-11 10:09:55] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:10:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:05] [Rank 0] PRINT: step:2000/10000 val_loss:5.1786 total_sharp:2.5534e-03 L1_sharp:1.0830e-02 L2_sharp:2.1450e-03 L3_sharp:2.1296e-03 L4_sharp:3.3468e-03 L5_sharp:3.6210e-03 L6_sharp:2.8536e-03 L7_sharp:2.1641e-03 L8_sharp:6.9592e-03 L9_sharp:5.3936e-03 L10_sharp:5.8427e-03 L11_sharp:8.1120e-03 L12_sharp:4.2397e-02 total_fnorm:1.8125e+01 total_l1_linf:3.7376e+04 total_spectral:8.9375e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2109e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.3789e-01 L4_l1linf:3.4180e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4766e-01 L7_l1linf:3.4766e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.4961e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.5156e-01 L12_l1linf:2.8125e-01 L1_spectral:1.4990e-02 L2_spectral:1.4732e-02 L3_spectral:1.4661e-02 L4_spectral:1.4562e-02 L5_spectral:1.4517e-02 L6_spectral:1.4561e-02 L7_spectral:1.4617e-02 L8_spectral:1.4935e-02 L9_spectral:1.4572e-02 L10_spectral:1.4500e-02 L11_spectral:1.4565e-02 L12_spectral:1.4625e-02 train_time:103059ms step_avg:51.53ms +[2025-09-11 10:10:05] [Rank 0] PRINT: step:2000/10000 val_loss:5.1786 total_sharp:2.5534e-03 L1_sharp:1.0830e-02 L2_sharp:2.1450e-03 L3_sharp:2.1296e-03 L4_sharp:3.3468e-03 L5_sharp:3.6210e-03 L6_sharp:2.8536e-03 L7_sharp:2.1641e-03 L8_sharp:6.9592e-03 L9_sharp:5.3936e-03 L10_sharp:5.8427e-03 L11_sharp:8.1120e-03 L12_sharp:4.2397e-02 total_fnorm:1.8125e+01 total_l1_linf:3.7376e+04 total_spectral:8.9375e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2109e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.3789e-01 L4_l1linf:3.4180e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4766e-01 L7_l1linf:3.4766e-01 L8_l1linf:3.4766e-01 L9_l1linf:3.4961e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.5156e-01 L12_l1linf:2.8125e-01 L1_spectral:1.4990e-02 L2_spectral:1.4732e-02 L3_spectral:1.4661e-02 L4_spectral:1.4562e-02 L5_spectral:1.4517e-02 L6_spectral:1.4561e-02 L7_spectral:1.4617e-02 L8_spectral:1.4935e-02 L9_spectral:1.4572e-02 L10_spectral:1.4500e-02 L11_spectral:1.4565e-02 L12_spectral:1.4625e-02 train_time:103059ms step_avg:51.53ms +[2025-09-11 10:10:06] [Rank 0] step:2001/10000 train_time:104228ms step_avg:52.09ms +[2025-09-11 10:10:06] [Rank 0] step:2001/10000 train_time:104228ms step_avg:52.09ms +[2025-09-11 10:10:07] [Rank 0] step:2021/10000 train_time:104886ms step_avg:51.90ms +[2025-09-11 10:10:07] [Rank 0] step:2021/10000 train_time:104886ms step_avg:51.90ms +[2025-09-11 10:10:08] [Rank 0] step:2041/10000 train_time:105530ms step_avg:51.71ms +[2025-09-11 10:10:08] [Rank 0] step:2041/10000 train_time:105530ms step_avg:51.71ms +[2025-09-11 10:10:08] [Rank 0] step:2061/10000 train_time:106174ms step_avg:51.52ms +[2025-09-11 10:10:08] [Rank 0] step:2061/10000 train_time:106174ms step_avg:51.52ms +[2025-09-11 10:10:09] [Rank 0] step:2081/10000 train_time:106817ms step_avg:51.33ms +[2025-09-11 10:10:09] [Rank 0] step:2081/10000 train_time:106817ms step_avg:51.33ms +[2025-09-11 10:10:10] [Rank 0] step:2101/10000 train_time:107468ms step_avg:51.15ms +[2025-09-11 10:10:10] [Rank 0] step:2101/10000 train_time:107468ms step_avg:51.15ms +[2025-09-11 10:10:10] [Rank 0] step:2121/10000 train_time:108112ms step_avg:50.97ms +[2025-09-11 10:10:10] [Rank 0] step:2121/10000 train_time:108112ms step_avg:50.97ms +[2025-09-11 10:10:11] [Rank 0] step:2141/10000 train_time:108755ms step_avg:50.80ms +[2025-09-11 10:10:11] [Rank 0] step:2141/10000 train_time:108755ms step_avg:50.80ms +[2025-09-11 10:10:12] [Rank 0] step:2161/10000 train_time:109398ms step_avg:50.62ms +[2025-09-11 10:10:12] [Rank 0] step:2161/10000 train_time:109398ms step_avg:50.62ms +[2025-09-11 10:10:12] [Rank 0] step:2181/10000 train_time:110041ms step_avg:50.45ms +[2025-09-11 10:10:12] [Rank 0] step:2181/10000 train_time:110041ms step_avg:50.45ms +[2025-09-11 10:10:13] [Rank 0] step:2201/10000 train_time:110685ms step_avg:50.29ms +[2025-09-11 10:10:13] [Rank 0] step:2201/10000 train_time:110685ms step_avg:50.29ms +[2025-09-11 10:10:13] [Rank 0] step:2221/10000 train_time:111328ms step_avg:50.13ms +[2025-09-11 10:10:13] [Rank 0] step:2221/10000 train_time:111328ms step_avg:50.13ms +[2025-09-11 10:10:14] [Rank 0] step:2241/10000 train_time:111983ms step_avg:49.97ms +[2025-09-11 10:10:14] [Rank 0] step:2241/10000 train_time:111983ms step_avg:49.97ms +[2025-09-11 10:10:15] [Rank 0] step:2261/10000 train_time:112640ms step_avg:49.82ms +[2025-09-11 10:10:15] [Rank 0] step:2261/10000 train_time:112640ms step_avg:49.82ms +[2025-09-11 10:10:15] [Rank 0] step:2281/10000 train_time:113295ms step_avg:49.67ms +[2025-09-11 10:10:15] [Rank 0] step:2281/10000 train_time:113295ms step_avg:49.67ms +[2025-09-11 10:10:16] [Rank 0] step:2301/10000 train_time:113951ms step_avg:49.52ms +[2025-09-11 10:10:16] [Rank 0] step:2301/10000 train_time:113951ms step_avg:49.52ms +[2025-09-11 10:10:17] [Rank 0] step:2321/10000 train_time:114607ms step_avg:49.38ms +[2025-09-11 10:10:17] [Rank 0] step:2321/10000 train_time:114607ms step_avg:49.38ms +[2025-09-11 10:10:17] [Rank 0] step:2341/10000 train_time:115264ms step_avg:49.24ms +[2025-09-11 10:10:17] [Rank 0] step:2341/10000 train_time:115264ms step_avg:49.24ms +[2025-09-11 10:10:18] [Rank 0] step:2361/10000 train_time:115921ms step_avg:49.10ms +[2025-09-11 10:10:18] [Rank 0] step:2361/10000 train_time:115921ms step_avg:49.10ms +[2025-09-11 10:10:19] [Rank 0] step:2381/10000 train_time:116576ms step_avg:48.96ms +[2025-09-11 10:10:19] [Rank 0] step:2381/10000 train_time:116576ms step_avg:48.96ms +[2025-09-11 10:10:19] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:29] [Rank 0] PRINT: step:2400/10000 val_loss:5.0551 total_sharp:2.9486e-03 L1_sharp:1.3781e-02 L2_sharp:3.0011e-03 L3_sharp:1.4811e-03 L4_sharp:2.4841e-03 L5_sharp:2.8549e-03 L6_sharp:2.5168e-03 L7_sharp:2.0006e-03 L8_sharp:7.1447e-03 L9_sharp:5.9987e-03 L10_sharp:5.7574e-03 L11_sharp:7.7231e-03 L12_sharp:4.0223e-02 total_fnorm:1.7125e+01 total_l1_linf:3.4304e+04 total_spectral:8.4375e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2344e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3398e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.4180e-01 L8_l1linf:3.3594e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.4766e-01 L11_l1linf:3.4180e-01 L12_l1linf:2.8711e-01 L1_spectral:1.5106e-02 L2_spectral:1.4948e-02 L3_spectral:1.4896e-02 L4_spectral:1.4888e-02 L5_spectral:1.4764e-02 L6_spectral:1.4838e-02 L7_spectral:1.4831e-02 L8_spectral:1.5103e-02 L9_spectral:1.4843e-02 L10_spectral:1.4827e-02 L11_spectral:1.4833e-02 L12_spectral:1.4868e-02 train_time:117213ms step_avg:48.84ms +[2025-09-11 10:10:29] [Rank 0] PRINT: step:2400/10000 val_loss:5.0551 total_sharp:2.9486e-03 L1_sharp:1.3781e-02 L2_sharp:3.0011e-03 L3_sharp:1.4811e-03 L4_sharp:2.4841e-03 L5_sharp:2.8549e-03 L6_sharp:2.5168e-03 L7_sharp:2.0006e-03 L8_sharp:7.1447e-03 L9_sharp:5.9987e-03 L10_sharp:5.7574e-03 L11_sharp:7.7231e-03 L12_sharp:4.0223e-02 total_fnorm:1.7125e+01 total_l1_linf:3.4304e+04 total_spectral:8.4375e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2344e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3398e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.4180e-01 L8_l1linf:3.3594e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.4766e-01 L11_l1linf:3.4180e-01 L12_l1linf:2.8711e-01 L1_spectral:1.5106e-02 L2_spectral:1.4948e-02 L3_spectral:1.4896e-02 L4_spectral:1.4888e-02 L5_spectral:1.4764e-02 L6_spectral:1.4838e-02 L7_spectral:1.4831e-02 L8_spectral:1.5103e-02 L9_spectral:1.4843e-02 L10_spectral:1.4827e-02 L11_spectral:1.4833e-02 L12_spectral:1.4868e-02 train_time:117213ms step_avg:48.84ms +[2025-09-11 10:10:30] [Rank 0] step:2401/10000 train_time:118378ms step_avg:49.30ms +[2025-09-11 10:10:30] [Rank 0] step:2401/10000 train_time:118378ms step_avg:49.30ms +[2025-09-11 10:10:31] [Rank 0] step:2421/10000 train_time:119024ms step_avg:49.16ms +[2025-09-11 10:10:31] [Rank 0] step:2421/10000 train_time:119024ms step_avg:49.16ms +[2025-09-11 10:10:32] [Rank 0] step:2441/10000 train_time:119683ms step_avg:49.03ms +[2025-09-11 10:10:32] [Rank 0] step:2441/10000 train_time:119683ms step_avg:49.03ms +[2025-09-11 10:10:32] [Rank 0] step:2461/10000 train_time:120341ms step_avg:48.90ms +[2025-09-11 10:10:32] [Rank 0] step:2461/10000 train_time:120341ms step_avg:48.90ms +[2025-09-11 10:10:33] [Rank 0] step:2481/10000 train_time:120999ms step_avg:48.77ms +[2025-09-11 10:10:33] [Rank 0] step:2481/10000 train_time:120999ms step_avg:48.77ms +[2025-09-11 10:10:34] [Rank 0] step:2501/10000 train_time:121657ms step_avg:48.64ms +[2025-09-11 10:10:34] [Rank 0] step:2501/10000 train_time:121657ms step_avg:48.64ms +[2025-09-11 10:10:34] [Rank 0] step:2521/10000 train_time:122317ms step_avg:48.52ms +[2025-09-11 10:10:34] [Rank 0] step:2521/10000 train_time:122317ms step_avg:48.52ms +[2025-09-11 10:10:35] [Rank 0] step:2541/10000 train_time:122974ms step_avg:48.40ms +[2025-09-11 10:10:35] [Rank 0] step:2541/10000 train_time:122974ms step_avg:48.40ms +[2025-09-11 10:10:36] [Rank 0] step:2561/10000 train_time:123631ms step_avg:48.27ms +[2025-09-11 10:10:36] [Rank 0] step:2561/10000 train_time:123631ms step_avg:48.27ms +[2025-09-11 10:10:36] [Rank 0] step:2581/10000 train_time:124289ms step_avg:48.16ms +[2025-09-11 10:10:36] [Rank 0] step:2581/10000 train_time:124289ms step_avg:48.16ms +[2025-09-11 10:10:37] [Rank 0] step:2601/10000 train_time:124947ms step_avg:48.04ms +[2025-09-11 10:10:37] [Rank 0] step:2601/10000 train_time:124947ms step_avg:48.04ms +[2025-09-11 10:10:38] [Rank 0] step:2621/10000 train_time:125604ms step_avg:47.92ms +[2025-09-11 10:10:38] [Rank 0] step:2621/10000 train_time:125604ms step_avg:47.92ms +[2025-09-11 10:10:38] [Rank 0] step:2641/10000 train_time:126262ms step_avg:47.81ms +[2025-09-11 10:10:38] [Rank 0] step:2641/10000 train_time:126262ms step_avg:47.81ms +[2025-09-11 10:10:39] [Rank 0] step:2661/10000 train_time:126920ms step_avg:47.70ms +[2025-09-11 10:10:39] [Rank 0] step:2661/10000 train_time:126920ms step_avg:47.70ms +[2025-09-11 10:10:40] [Rank 0] step:2681/10000 train_time:127577ms step_avg:47.59ms +[2025-09-11 10:10:40] [Rank 0] step:2681/10000 train_time:127577ms step_avg:47.59ms +[2025-09-11 10:10:40] [Rank 0] step:2701/10000 train_time:128236ms step_avg:47.48ms +[2025-09-11 10:10:40] [Rank 0] step:2701/10000 train_time:128236ms step_avg:47.48ms +[2025-09-11 10:10:41] [Rank 0] step:2721/10000 train_time:128893ms step_avg:47.37ms +[2025-09-11 10:10:41] [Rank 0] step:2721/10000 train_time:128893ms step_avg:47.37ms +[2025-09-11 10:10:42] [Rank 0] step:2741/10000 train_time:129551ms step_avg:47.26ms +[2025-09-11 10:10:42] [Rank 0] step:2741/10000 train_time:129551ms step_avg:47.26ms +[2025-09-11 10:10:42] [Rank 0] step:2761/10000 train_time:130209ms step_avg:47.16ms +[2025-09-11 10:10:42] [Rank 0] step:2761/10000 train_time:130209ms step_avg:47.16ms +[2025-09-11 10:10:43] [Rank 0] step:2781/10000 train_time:130866ms step_avg:47.06ms +[2025-09-11 10:10:43] [Rank 0] step:2781/10000 train_time:130866ms step_avg:47.06ms +[2025-09-11 10:10:44] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:53] [Rank 0] PRINT: step:2800/10000 val_loss:4.9791 total_sharp:3.4023e-03 L1_sharp:8.4398e-03 L2_sharp:2.7313e-03 L3_sharp:1.6241e-03 L4_sharp:1.4318e-03 L5_sharp:2.8410e-03 L6_sharp:2.7896e-03 L7_sharp:2.6295e-03 L8_sharp:7.0669e-03 L9_sharp:5.3943e-03 L10_sharp:6.5557e-03 L11_sharp:8.2854e-03 L12_sharp:3.3349e-02 total_fnorm:1.6000e+01 total_l1_linf:3.1872e+04 total_spectral:7.8750e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.2617e-01 L4_l1linf:3.2422e-01 L5_l1linf:3.2812e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.3203e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.3008e-01 L10_l1linf:3.3789e-01 L11_l1linf:3.3984e-01 L12_l1linf:2.8516e-01 L1_spectral:1.5442e-02 L2_spectral:1.5156e-02 L3_spectral:1.5087e-02 L4_spectral:1.5038e-02 L5_spectral:1.5053e-02 L6_spectral:1.4947e-02 L7_spectral:1.4970e-02 L8_spectral:1.5388e-02 L9_spectral:1.5070e-02 L10_spectral:1.5127e-02 L11_spectral:1.5022e-02 L12_spectral:1.4905e-02 train_time:131506ms step_avg:46.97ms +[2025-09-11 10:10:53] [Rank 0] PRINT: step:2800/10000 val_loss:4.9791 total_sharp:3.4023e-03 L1_sharp:8.4398e-03 L2_sharp:2.7313e-03 L3_sharp:1.6241e-03 L4_sharp:1.4318e-03 L5_sharp:2.8410e-03 L6_sharp:2.7896e-03 L7_sharp:2.6295e-03 L8_sharp:7.0669e-03 L9_sharp:5.3943e-03 L10_sharp:6.5557e-03 L11_sharp:8.2854e-03 L12_sharp:3.3349e-02 total_fnorm:1.6000e+01 total_l1_linf:3.1872e+04 total_spectral:7.8750e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2266e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.2617e-01 L4_l1linf:3.2422e-01 L5_l1linf:3.2812e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.3203e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.3008e-01 L10_l1linf:3.3789e-01 L11_l1linf:3.3984e-01 L12_l1linf:2.8516e-01 L1_spectral:1.5442e-02 L2_spectral:1.5156e-02 L3_spectral:1.5087e-02 L4_spectral:1.5038e-02 L5_spectral:1.5053e-02 L6_spectral:1.4947e-02 L7_spectral:1.4970e-02 L8_spectral:1.5388e-02 L9_spectral:1.5070e-02 L10_spectral:1.5127e-02 L11_spectral:1.5022e-02 L12_spectral:1.4905e-02 train_time:131506ms step_avg:46.97ms +[2025-09-11 10:10:55] [Rank 0] step:2801/10000 train_time:132743ms step_avg:47.39ms +[2025-09-11 10:10:55] [Rank 0] step:2801/10000 train_time:132743ms step_avg:47.39ms +[2025-09-11 10:10:55] [Rank 0] step:2821/10000 train_time:133429ms step_avg:47.30ms +[2025-09-11 10:10:55] [Rank 0] step:2821/10000 train_time:133429ms step_avg:47.30ms +[2025-09-11 10:10:56] [Rank 0] step:2841/10000 train_time:134088ms step_avg:47.20ms +[2025-09-11 10:10:56] [Rank 0] step:2841/10000 train_time:134088ms step_avg:47.20ms +[2025-09-11 10:10:57] [Rank 0] step:2861/10000 train_time:134747ms step_avg:47.10ms +[2025-09-11 10:10:57] [Rank 0] step:2861/10000 train_time:134747ms step_avg:47.10ms +[2025-09-11 10:10:57] [Rank 0] step:2881/10000 train_time:135405ms step_avg:47.00ms +[2025-09-11 10:10:57] [Rank 0] step:2881/10000 train_time:135405ms step_avg:47.00ms +[2025-09-11 10:10:58] [Rank 0] step:2901/10000 train_time:136063ms step_avg:46.90ms +[2025-09-11 10:10:58] [Rank 0] step:2901/10000 train_time:136063ms step_avg:46.90ms +[2025-09-11 10:10:59] [Rank 0] step:2921/10000 train_time:136721ms step_avg:46.81ms +[2025-09-11 10:10:59] [Rank 0] step:2921/10000 train_time:136721ms step_avg:46.81ms +[2025-09-11 10:10:59] [Rank 0] step:2941/10000 train_time:137380ms step_avg:46.71ms +[2025-09-11 10:10:59] [Rank 0] step:2941/10000 train_time:137380ms step_avg:46.71ms +[2025-09-11 10:11:00] [Rank 0] step:2961/10000 train_time:138038ms step_avg:46.62ms +[2025-09-11 10:11:00] [Rank 0] step:2961/10000 train_time:138038ms step_avg:46.62ms +[2025-09-11 10:11:01] [Rank 0] step:2981/10000 train_time:138698ms step_avg:46.53ms +[2025-09-11 10:11:01] [Rank 0] step:2981/10000 train_time:138698ms step_avg:46.53ms +[2025-09-11 10:11:01] [Rank 0] step:3001/10000 train_time:139359ms step_avg:46.44ms +[2025-09-11 10:11:01] [Rank 0] step:3001/10000 train_time:139359ms step_avg:46.44ms +[2025-09-11 10:11:02] [Rank 0] step:3021/10000 train_time:140020ms step_avg:46.35ms +[2025-09-11 10:11:02] [Rank 0] step:3021/10000 train_time:140020ms step_avg:46.35ms +[2025-09-11 10:11:03] [Rank 0] step:3041/10000 train_time:140681ms step_avg:46.26ms +[2025-09-11 10:11:03] [Rank 0] step:3041/10000 train_time:140681ms step_avg:46.26ms +[2025-09-11 10:11:03] [Rank 0] step:3061/10000 train_time:141342ms step_avg:46.18ms +[2025-09-11 10:11:03] [Rank 0] step:3061/10000 train_time:141342ms step_avg:46.18ms +[2025-09-11 10:11:04] [Rank 0] step:3081/10000 train_time:142004ms step_avg:46.09ms +[2025-09-11 10:11:04] [Rank 0] step:3081/10000 train_time:142004ms step_avg:46.09ms +[2025-09-11 10:11:05] [Rank 0] step:3101/10000 train_time:142665ms step_avg:46.01ms +[2025-09-11 10:11:05] [Rank 0] step:3101/10000 train_time:142665ms step_avg:46.01ms +[2025-09-11 10:11:05] [Rank 0] step:3121/10000 train_time:143327ms step_avg:45.92ms +[2025-09-11 10:11:05] [Rank 0] step:3121/10000 train_time:143327ms step_avg:45.92ms +[2025-09-11 10:11:06] [Rank 0] step:3141/10000 train_time:143988ms step_avg:45.84ms +[2025-09-11 10:11:06] [Rank 0] step:3141/10000 train_time:143988ms step_avg:45.84ms +[2025-09-11 10:11:07] [Rank 0] step:3161/10000 train_time:144648ms step_avg:45.76ms +[2025-09-11 10:11:07] [Rank 0] step:3161/10000 train_time:144648ms step_avg:45.76ms +[2025-09-11 10:11:07] [Rank 0] step:3181/10000 train_time:145309ms step_avg:45.68ms +[2025-09-11 10:11:07] [Rank 0] step:3181/10000 train_time:145309ms step_avg:45.68ms +[2025-09-11 10:11:08] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:18] [Rank 0] PRINT: step:3200/10000 val_loss:4.8990 total_sharp:2.0723e-03 L1_sharp:7.8822e-03 L2_sharp:1.2925e-03 L3_sharp:1.1206e-03 L4_sharp:1.7770e-03 L5_sharp:3.0511e-03 L6_sharp:2.4579e-03 L7_sharp:2.2369e-03 L8_sharp:6.6877e-03 L9_sharp:4.9225e-03 L10_sharp:5.7222e-03 L11_sharp:7.0034e-03 L12_sharp:2.6426e-02 total_fnorm:1.8250e+01 total_l1_linf:3.5584e+04 total_spectral:9.0000e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.2422e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.2422e-01 L12_l1linf:2.8711e-01 L1_spectral:1.5542e-02 L2_spectral:1.5277e-02 L3_spectral:1.5240e-02 L4_spectral:1.5258e-02 L5_spectral:1.5174e-02 L6_spectral:1.5211e-02 L7_spectral:1.5226e-02 L8_spectral:1.5574e-02 L9_spectral:1.5234e-02 L10_spectral:1.5298e-02 L11_spectral:1.5270e-02 L12_spectral:1.5163e-02 train_time:145950ms step_avg:45.61ms +[2025-09-11 10:11:18] [Rank 0] PRINT: step:3200/10000 val_loss:4.8990 total_sharp:2.0723e-03 L1_sharp:7.8822e-03 L2_sharp:1.2925e-03 L3_sharp:1.1206e-03 L4_sharp:1.7770e-03 L5_sharp:3.0511e-03 L6_sharp:2.4579e-03 L7_sharp:2.2369e-03 L8_sharp:6.6877e-03 L9_sharp:4.9225e-03 L10_sharp:5.7222e-03 L11_sharp:7.0034e-03 L12_sharp:2.6426e-02 total_fnorm:1.8250e+01 total_l1_linf:3.5584e+04 total_spectral:9.0000e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.2422e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.2422e-01 L12_l1linf:2.8711e-01 L1_spectral:1.5542e-02 L2_spectral:1.5277e-02 L3_spectral:1.5240e-02 L4_spectral:1.5258e-02 L5_spectral:1.5174e-02 L6_spectral:1.5211e-02 L7_spectral:1.5226e-02 L8_spectral:1.5574e-02 L9_spectral:1.5234e-02 L10_spectral:1.5298e-02 L11_spectral:1.5270e-02 L12_spectral:1.5163e-02 train_time:145950ms step_avg:45.61ms +[2025-09-11 10:11:19] [Rank 0] step:3201/10000 train_time:147126ms step_avg:45.96ms +[2025-09-11 10:11:19] [Rank 0] step:3201/10000 train_time:147126ms step_avg:45.96ms +[2025-09-11 10:11:20] [Rank 0] step:3221/10000 train_time:147829ms step_avg:45.90ms +[2025-09-11 10:11:20] [Rank 0] step:3221/10000 train_time:147829ms step_avg:45.90ms +[2025-09-11 10:11:20] [Rank 0] step:3241/10000 train_time:148491ms step_avg:45.82ms +[2025-09-11 10:11:20] [Rank 0] step:3241/10000 train_time:148491ms step_avg:45.82ms +[2025-09-11 10:11:21] [Rank 0] step:3261/10000 train_time:149153ms step_avg:45.74ms +[2025-09-11 10:11:21] [Rank 0] step:3261/10000 train_time:149153ms step_avg:45.74ms +[2025-09-11 10:11:22] [Rank 0] step:3281/10000 train_time:149814ms step_avg:45.66ms +[2025-09-11 10:11:22] [Rank 0] step:3281/10000 train_time:149814ms step_avg:45.66ms +[2025-09-11 10:11:22] [Rank 0] step:3301/10000 train_time:150476ms step_avg:45.58ms +[2025-09-11 10:11:22] [Rank 0] step:3301/10000 train_time:150476ms step_avg:45.58ms +[2025-09-11 10:11:23] [Rank 0] step:3321/10000 train_time:151136ms step_avg:45.51ms +[2025-09-11 10:11:23] [Rank 0] step:3321/10000 train_time:151136ms step_avg:45.51ms +[2025-09-11 10:11:24] [Rank 0] step:3341/10000 train_time:151797ms step_avg:45.43ms +[2025-09-11 10:11:24] [Rank 0] step:3341/10000 train_time:151797ms step_avg:45.43ms +[2025-09-11 10:11:24] [Rank 0] step:3361/10000 train_time:152459ms step_avg:45.36ms +[2025-09-11 10:11:24] [Rank 0] step:3361/10000 train_time:152459ms step_avg:45.36ms +[2025-09-11 10:11:25] [Rank 0] step:3381/10000 train_time:153120ms step_avg:45.29ms +[2025-09-11 10:11:25] [Rank 0] step:3381/10000 train_time:153120ms step_avg:45.29ms +[2025-09-11 10:11:26] [Rank 0] step:3401/10000 train_time:153781ms step_avg:45.22ms +[2025-09-11 10:11:26] [Rank 0] step:3401/10000 train_time:153781ms step_avg:45.22ms +[2025-09-11 10:11:26] [Rank 0] step:3421/10000 train_time:154441ms step_avg:45.14ms +[2025-09-11 10:11:26] [Rank 0] step:3421/10000 train_time:154441ms step_avg:45.14ms +[2025-09-11 10:11:27] [Rank 0] step:3441/10000 train_time:155104ms step_avg:45.08ms +[2025-09-11 10:11:27] [Rank 0] step:3441/10000 train_time:155104ms step_avg:45.08ms +[2025-09-11 10:11:28] [Rank 0] step:3461/10000 train_time:156334ms step_avg:45.17ms +[2025-09-11 10:11:28] [Rank 0] step:3461/10000 train_time:156334ms step_avg:45.17ms +[2025-09-11 10:11:29] [Rank 0] step:3481/10000 train_time:156998ms step_avg:45.10ms +[2025-09-11 10:11:29] [Rank 0] step:3481/10000 train_time:156998ms step_avg:45.10ms +[2025-09-11 10:11:30] [Rank 0] step:3501/10000 train_time:157659ms step_avg:45.03ms +[2025-09-11 10:11:30] [Rank 0] step:3501/10000 train_time:157659ms step_avg:45.03ms +[2025-09-11 10:11:30] [Rank 0] step:3521/10000 train_time:158621ms step_avg:45.05ms +[2025-09-11 10:11:30] [Rank 0] step:3521/10000 train_time:158621ms step_avg:45.05ms +[2025-09-11 10:11:31] [Rank 0] step:3541/10000 train_time:159283ms step_avg:44.98ms +[2025-09-11 10:11:31] [Rank 0] step:3541/10000 train_time:159283ms step_avg:44.98ms +[2025-09-11 10:11:32] [Rank 0] step:3561/10000 train_time:159943ms step_avg:44.92ms +[2025-09-11 10:11:32] [Rank 0] step:3561/10000 train_time:159943ms step_avg:44.92ms +[2025-09-11 10:11:32] [Rank 0] step:3581/10000 train_time:160604ms step_avg:44.85ms +[2025-09-11 10:11:32] [Rank 0] step:3581/10000 train_time:160604ms step_avg:44.85ms +[2025-09-11 10:11:33] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:11:33] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.8494 total_sharp:2.3933e-03 L1_sharp:7.7764e-03 L2_sharp:1.8996e-03 L3_sharp:1.4200e-03 L4_sharp:9.4624e-04 L5_sharp:2.1108e-03 L6_sharp:1.7680e-03 L7_sharp:2.6300e-03 L8_sharp:5.7804e-03 L9_sharp:4.3334e-03 L10_sharp:5.0688e-03 L11_sharp:6.9251e-03 L12_sharp:1.7246e-02 total_fnorm:1.6125e+01 total_l1_linf:3.0976e+04 total_spectral:7.8438e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.4375e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1055e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1445e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.2031e-01 L12_l1linf:2.8906e-01 L1_spectral:1.5793e-02 L2_spectral:1.5400e-02 L3_spectral:1.5385e-02 L4_spectral:1.5370e-02 L5_spectral:1.5342e-02 L6_spectral:1.5390e-02 L7_spectral:1.5336e-02 L8_spectral:1.5602e-02 L9_spectral:1.5476e-02 L10_spectral:1.5542e-02 L11_spectral:1.5466e-02 L12_spectral:1.5375e-02 train_time:161245ms step_avg:44.79ms +[2025-09-11 10:11:44] [Rank 0] PRINT: step:3600/10000 val_loss:4.8494 total_sharp:2.3933e-03 L1_sharp:7.7764e-03 L2_sharp:1.8996e-03 L3_sharp:1.4200e-03 L4_sharp:9.4624e-04 L5_sharp:2.1108e-03 L6_sharp:1.7680e-03 L7_sharp:2.6300e-03 L8_sharp:5.7804e-03 L9_sharp:4.3334e-03 L10_sharp:5.0688e-03 L11_sharp:6.9251e-03 L12_sharp:1.7246e-02 total_fnorm:1.6125e+01 total_l1_linf:3.0976e+04 total_spectral:7.8438e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2734e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.4375e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1055e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1445e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.2031e-01 L12_l1linf:2.8906e-01 L1_spectral:1.5793e-02 L2_spectral:1.5400e-02 L3_spectral:1.5385e-02 L4_spectral:1.5370e-02 L5_spectral:1.5342e-02 L6_spectral:1.5390e-02 L7_spectral:1.5336e-02 L8_spectral:1.5602e-02 L9_spectral:1.5476e-02 L10_spectral:1.5542e-02 L11_spectral:1.5466e-02 L12_spectral:1.5375e-02 train_time:161245ms step_avg:44.79ms +[2025-09-11 10:11:46] [Rank 0] step:3601/10000 train_time:162510ms step_avg:45.13ms +[2025-09-11 10:11:46] [Rank 0] step:3601/10000 train_time:162510ms step_avg:45.13ms +[2025-09-11 10:11:46] [Rank 0] step:3621/10000 train_time:163193ms step_avg:45.07ms +[2025-09-11 10:11:46] [Rank 0] step:3621/10000 train_time:163193ms step_avg:45.07ms +[2025-09-11 10:11:47] [Rank 0] step:3641/10000 train_time:163853ms step_avg:45.00ms +[2025-09-11 10:11:47] [Rank 0] step:3641/10000 train_time:163853ms step_avg:45.00ms +[2025-09-11 10:11:48] [Rank 0] step:3661/10000 train_time:164514ms step_avg:44.94ms +[2025-09-11 10:11:48] [Rank 0] step:3661/10000 train_time:164514ms step_avg:44.94ms +[2025-09-11 10:11:48] [Rank 0] step:3681/10000 train_time:165174ms step_avg:44.87ms +[2025-09-11 10:11:48] [Rank 0] step:3681/10000 train_time:165174ms step_avg:44.87ms +[2025-09-11 10:11:49] [Rank 0] step:3701/10000 train_time:165833ms step_avg:44.81ms +[2025-09-11 10:11:49] [Rank 0] step:3701/10000 train_time:165833ms step_avg:44.81ms +[2025-09-11 10:11:50] [Rank 0] step:3721/10000 train_time:166502ms step_avg:44.75ms +[2025-09-11 10:11:50] [Rank 0] step:3721/10000 train_time:166502ms step_avg:44.75ms +[2025-09-11 10:11:50] [Rank 0] step:3741/10000 train_time:167174ms step_avg:44.69ms +[2025-09-11 10:11:50] [Rank 0] step:3741/10000 train_time:167174ms step_avg:44.69ms +[2025-09-11 10:11:51] [Rank 0] step:3761/10000 train_time:167846ms step_avg:44.63ms +[2025-09-11 10:11:51] [Rank 0] step:3761/10000 train_time:167846ms step_avg:44.63ms +[2025-09-11 10:11:52] [Rank 0] step:3781/10000 train_time:168517ms step_avg:44.57ms +[2025-09-11 10:11:52] [Rank 0] step:3781/10000 train_time:168517ms step_avg:44.57ms +[2025-09-11 10:11:52] [Rank 0] step:3801/10000 train_time:169188ms step_avg:44.51ms +[2025-09-11 10:11:52] [Rank 0] step:3801/10000 train_time:169188ms step_avg:44.51ms +[2025-09-11 10:11:53] [Rank 0] step:3821/10000 train_time:169859ms step_avg:44.45ms +[2025-09-11 10:11:53] [Rank 0] step:3821/10000 train_time:169859ms step_avg:44.45ms +[2025-09-11 10:11:54] [Rank 0] step:3841/10000 train_time:170530ms step_avg:44.40ms +[2025-09-11 10:11:54] [Rank 0] step:3841/10000 train_time:170530ms step_avg:44.40ms +[2025-09-11 10:11:54] [Rank 0] step:3861/10000 train_time:171201ms step_avg:44.34ms +[2025-09-11 10:11:54] [Rank 0] step:3861/10000 train_time:171201ms step_avg:44.34ms +[2025-09-11 10:11:55] [Rank 0] step:3881/10000 train_time:171872ms step_avg:44.29ms +[2025-09-11 10:11:55] [Rank 0] step:3881/10000 train_time:171872ms step_avg:44.29ms +[2025-09-11 10:11:56] [Rank 0] step:3901/10000 train_time:172542ms step_avg:44.23ms +[2025-09-11 10:11:56] [Rank 0] step:3901/10000 train_time:172542ms step_avg:44.23ms +[2025-09-11 10:11:56] [Rank 0] step:3921/10000 train_time:173219ms step_avg:44.18ms +[2025-09-11 10:11:56] [Rank 0] step:3921/10000 train_time:173219ms step_avg:44.18ms +[2025-09-11 10:11:57] [Rank 0] step:3941/10000 train_time:173890ms step_avg:44.12ms +[2025-09-11 10:11:57] [Rank 0] step:3941/10000 train_time:173890ms step_avg:44.12ms +[2025-09-11 10:11:58] [Rank 0] step:3961/10000 train_time:174560ms step_avg:44.07ms +[2025-09-11 10:11:58] [Rank 0] step:3961/10000 train_time:174560ms step_avg:44.07ms +[2025-09-11 10:11:58] [Rank 0] step:3981/10000 train_time:175230ms step_avg:44.02ms +[2025-09-11 10:11:58] [Rank 0] step:3981/10000 train_time:175230ms step_avg:44.02ms +[2025-09-11 10:11:59] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:11:59] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:12:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:12:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:09] [Rank 0] PRINT: step:4000/10000 val_loss:4.7914 total_sharp:2.3252e-03 L1_sharp:1.0962e-02 L2_sharp:1.6429e-03 L3_sharp:9.5648e-04 L4_sharp:1.5796e-03 L5_sharp:1.8881e-03 L6_sharp:2.1175e-03 L7_sharp:2.1555e-03 L8_sharp:5.7582e-03 L9_sharp:4.7473e-03 L10_sharp:5.5602e-03 L11_sharp:7.4123e-03 L12_sharp:4.6125e-02 total_fnorm:1.9000e+01 total_l1_linf:3.4816e+04 total_spectral:9.3125e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2422e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2812e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.1250e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1836e-01 L12_l1linf:2.6562e-01 L1_spectral:1.5809e-02 L2_spectral:1.5535e-02 L3_spectral:1.5538e-02 L4_spectral:1.5529e-02 L5_spectral:1.5433e-02 L6_spectral:1.5475e-02 L7_spectral:1.5486e-02 L8_spectral:1.5697e-02 L9_spectral:1.5518e-02 L10_spectral:1.5565e-02 L11_spectral:1.5683e-02 L12_spectral:1.5436e-02 train_time:175882ms step_avg:43.97ms +[2025-09-11 10:12:09] [Rank 0] PRINT: step:4000/10000 val_loss:4.7914 total_sharp:2.3252e-03 L1_sharp:1.0962e-02 L2_sharp:1.6429e-03 L3_sharp:9.5648e-04 L4_sharp:1.5796e-03 L5_sharp:1.8881e-03 L6_sharp:2.1175e-03 L7_sharp:2.1555e-03 L8_sharp:5.7582e-03 L9_sharp:4.7473e-03 L10_sharp:5.5602e-03 L11_sharp:7.4123e-03 L12_sharp:4.6125e-02 total_fnorm:1.9000e+01 total_l1_linf:3.4816e+04 total_spectral:9.3125e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2422e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2812e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.1250e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1836e-01 L12_l1linf:2.6562e-01 L1_spectral:1.5809e-02 L2_spectral:1.5535e-02 L3_spectral:1.5538e-02 L4_spectral:1.5529e-02 L5_spectral:1.5433e-02 L6_spectral:1.5475e-02 L7_spectral:1.5486e-02 L8_spectral:1.5697e-02 L9_spectral:1.5518e-02 L10_spectral:1.5565e-02 L11_spectral:1.5683e-02 L12_spectral:1.5436e-02 train_time:175882ms step_avg:43.97ms +[2025-09-11 10:12:10] [Rank 0] step:4001/10000 train_time:177128ms step_avg:44.27ms +[2025-09-11 10:12:10] [Rank 0] step:4001/10000 train_time:177128ms step_avg:44.27ms +[2025-09-11 10:12:11] [Rank 0] step:4021/10000 train_time:177803ms step_avg:44.22ms +[2025-09-11 10:12:11] [Rank 0] step:4021/10000 train_time:177803ms step_avg:44.22ms +[2025-09-11 10:12:11] [Rank 0] step:4041/10000 train_time:178474ms step_avg:44.17ms +[2025-09-11 10:12:11] [Rank 0] step:4041/10000 train_time:178474ms step_avg:44.17ms +[2025-09-11 10:12:12] [Rank 0] step:4061/10000 train_time:179143ms step_avg:44.11ms +[2025-09-11 10:12:12] [Rank 0] step:4061/10000 train_time:179143ms step_avg:44.11ms +[2025-09-11 10:12:13] [Rank 0] step:4081/10000 train_time:179812ms step_avg:44.06ms +[2025-09-11 10:12:13] [Rank 0] step:4081/10000 train_time:179812ms step_avg:44.06ms +[2025-09-11 10:12:13] [Rank 0] step:4101/10000 train_time:180482ms step_avg:44.01ms +[2025-09-11 10:12:13] [Rank 0] step:4101/10000 train_time:180482ms step_avg:44.01ms +[2025-09-11 10:12:14] [Rank 0] step:4121/10000 train_time:181152ms step_avg:43.96ms +[2025-09-11 10:12:14] [Rank 0] step:4121/10000 train_time:181152ms step_avg:43.96ms +[2025-09-11 10:12:15] [Rank 0] step:4141/10000 train_time:181821ms step_avg:43.91ms +[2025-09-11 10:12:15] [Rank 0] step:4141/10000 train_time:181821ms step_avg:43.91ms +[2025-09-11 10:12:15] [Rank 0] step:4161/10000 train_time:182491ms step_avg:43.86ms +[2025-09-11 10:12:15] [Rank 0] step:4161/10000 train_time:182491ms step_avg:43.86ms +[2025-09-11 10:12:16] [Rank 0] step:4181/10000 train_time:183160ms step_avg:43.81ms +[2025-09-11 10:12:16] [Rank 0] step:4181/10000 train_time:183160ms step_avg:43.81ms +[2025-09-11 10:12:17] [Rank 0] step:4201/10000 train_time:183830ms step_avg:43.76ms +[2025-09-11 10:12:17] [Rank 0] step:4201/10000 train_time:183830ms step_avg:43.76ms +[2025-09-11 10:12:17] [Rank 0] step:4221/10000 train_time:184498ms step_avg:43.71ms +[2025-09-11 10:12:17] [Rank 0] step:4221/10000 train_time:184498ms step_avg:43.71ms +[2025-09-11 10:12:18] [Rank 0] step:4241/10000 train_time:185167ms step_avg:43.66ms +[2025-09-11 10:12:18] [Rank 0] step:4241/10000 train_time:185167ms step_avg:43.66ms +[2025-09-11 10:12:19] [Rank 0] step:4261/10000 train_time:185836ms step_avg:43.61ms +[2025-09-11 10:12:19] [Rank 0] step:4261/10000 train_time:185836ms step_avg:43.61ms +[2025-09-11 10:12:19] [Rank 0] step:4281/10000 train_time:186507ms step_avg:43.57ms +[2025-09-11 10:12:19] [Rank 0] step:4281/10000 train_time:186507ms step_avg:43.57ms +[2025-09-11 10:12:20] [Rank 0] step:4301/10000 train_time:187177ms step_avg:43.52ms +[2025-09-11 10:12:20] [Rank 0] step:4301/10000 train_time:187177ms step_avg:43.52ms +[2025-09-11 10:12:21] [Rank 0] step:4321/10000 train_time:187846ms step_avg:43.47ms +[2025-09-11 10:12:21] [Rank 0] step:4321/10000 train_time:187846ms step_avg:43.47ms +[2025-09-11 10:12:21] [Rank 0] step:4341/10000 train_time:188515ms step_avg:43.43ms +[2025-09-11 10:12:21] [Rank 0] step:4341/10000 train_time:188515ms step_avg:43.43ms +[2025-09-11 10:12:22] [Rank 0] step:4361/10000 train_time:189184ms step_avg:43.38ms +[2025-09-11 10:12:22] [Rank 0] step:4361/10000 train_time:189184ms step_avg:43.38ms +[2025-09-11 10:12:23] [Rank 0] step:4381/10000 train_time:189854ms step_avg:43.34ms +[2025-09-11 10:12:23] [Rank 0] step:4381/10000 train_time:189854ms step_avg:43.34ms +[2025-09-11 10:12:23] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:12:23] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:12:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:12:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:12:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:33] [Rank 0] PRINT: step:4400/10000 val_loss:4.7539 total_sharp:1.6578e-03 L1_sharp:4.4729e-03 L2_sharp:1.4505e-03 L3_sharp:3.1249e-04 L4_sharp:7.3319e-04 L5_sharp:1.0806e-03 L6_sharp:1.0290e-03 L7_sharp:1.6439e-03 L8_sharp:4.5038e-03 L9_sharp:3.5711e-03 L10_sharp:4.2288e-03 L11_sharp:6.3948e-03 L12_sharp:1.8745e-02 total_fnorm:1.6875e+01 total_l1_linf:3.0976e+04 total_spectral:8.1875e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.3203e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1055e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.0078e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.0859e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9883e-01 L10_l1linf:3.0664e-01 L11_l1linf:3.0664e-01 L12_l1linf:2.6953e-01 L1_spectral:1.6031e-02 L2_spectral:1.5740e-02 L3_spectral:1.5609e-02 L4_spectral:1.5594e-02 L5_spectral:1.5463e-02 L6_spectral:1.5574e-02 L7_spectral:1.5660e-02 L8_spectral:1.5762e-02 L9_spectral:1.5627e-02 L10_spectral:1.5744e-02 L11_spectral:1.5697e-02 L12_spectral:1.5568e-02 train_time:190504ms step_avg:43.30ms +[2025-09-11 10:12:33] [Rank 0] PRINT: step:4400/10000 val_loss:4.7539 total_sharp:1.6578e-03 L1_sharp:4.4729e-03 L2_sharp:1.4505e-03 L3_sharp:3.1249e-04 L4_sharp:7.3319e-04 L5_sharp:1.0806e-03 L6_sharp:1.0290e-03 L7_sharp:1.6439e-03 L8_sharp:4.5038e-03 L9_sharp:3.5711e-03 L10_sharp:4.2288e-03 L11_sharp:6.3948e-03 L12_sharp:1.8745e-02 total_fnorm:1.6875e+01 total_l1_linf:3.0976e+04 total_spectral:8.1875e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.3203e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1055e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.0078e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.0859e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9883e-01 L10_l1linf:3.0664e-01 L11_l1linf:3.0664e-01 L12_l1linf:2.6953e-01 L1_spectral:1.6031e-02 L2_spectral:1.5740e-02 L3_spectral:1.5609e-02 L4_spectral:1.5594e-02 L5_spectral:1.5463e-02 L6_spectral:1.5574e-02 L7_spectral:1.5660e-02 L8_spectral:1.5762e-02 L9_spectral:1.5627e-02 L10_spectral:1.5744e-02 L11_spectral:1.5697e-02 L12_spectral:1.5568e-02 train_time:190504ms step_avg:43.30ms +[2025-09-11 10:12:35] [Rank 0] step:4401/10000 train_time:191729ms step_avg:43.56ms +[2025-09-11 10:12:35] [Rank 0] step:4401/10000 train_time:191729ms step_avg:43.56ms +[2025-09-11 10:12:35] [Rank 0] step:4421/10000 train_time:192425ms step_avg:43.53ms +[2025-09-11 10:12:35] [Rank 0] step:4421/10000 train_time:192425ms step_avg:43.53ms +[2025-09-11 10:12:36] [Rank 0] step:4441/10000 train_time:193097ms step_avg:43.48ms +[2025-09-11 10:12:36] [Rank 0] step:4441/10000 train_time:193097ms step_avg:43.48ms +[2025-09-11 10:12:37] [Rank 0] step:4461/10000 train_time:193770ms step_avg:43.44ms +[2025-09-11 10:12:37] [Rank 0] step:4461/10000 train_time:193770ms step_avg:43.44ms +[2025-09-11 10:12:37] [Rank 0] step:4481/10000 train_time:194443ms step_avg:43.39ms +[2025-09-11 10:12:37] [Rank 0] step:4481/10000 train_time:194443ms step_avg:43.39ms +[2025-09-11 10:12:38] [Rank 0] step:4501/10000 train_time:195117ms step_avg:43.35ms +[2025-09-11 10:12:38] [Rank 0] step:4501/10000 train_time:195117ms step_avg:43.35ms +[2025-09-11 10:12:39] [Rank 0] step:4521/10000 train_time:195790ms step_avg:43.31ms +[2025-09-11 10:12:39] [Rank 0] step:4521/10000 train_time:195790ms step_avg:43.31ms +[2025-09-11 10:12:39] [Rank 0] step:4541/10000 train_time:196465ms step_avg:43.26ms +[2025-09-11 10:12:39] [Rank 0] step:4541/10000 train_time:196465ms step_avg:43.26ms +[2025-09-11 10:12:40] [Rank 0] step:4561/10000 train_time:197138ms step_avg:43.22ms +[2025-09-11 10:12:40] [Rank 0] step:4561/10000 train_time:197138ms step_avg:43.22ms +[2025-09-11 10:12:41] [Rank 0] step:4581/10000 train_time:197812ms step_avg:43.18ms +[2025-09-11 10:12:41] [Rank 0] step:4581/10000 train_time:197812ms step_avg:43.18ms +[2025-09-11 10:12:41] [Rank 0] step:4601/10000 train_time:198485ms step_avg:43.14ms +[2025-09-11 10:12:41] [Rank 0] step:4601/10000 train_time:198485ms step_avg:43.14ms +[2025-09-11 10:12:42] [Rank 0] step:4621/10000 train_time:199159ms step_avg:43.10ms +[2025-09-11 10:12:42] [Rank 0] step:4621/10000 train_time:199159ms step_avg:43.10ms +[2025-09-11 10:12:43] [Rank 0] step:4641/10000 train_time:199831ms step_avg:43.06ms +[2025-09-11 10:12:43] [Rank 0] step:4641/10000 train_time:199831ms step_avg:43.06ms +[2025-09-11 10:12:43] [Rank 0] step:4661/10000 train_time:200504ms step_avg:43.02ms +[2025-09-11 10:12:43] [Rank 0] step:4661/10000 train_time:200504ms step_avg:43.02ms +[2025-09-11 10:12:44] [Rank 0] step:4681/10000 train_time:201178ms step_avg:42.98ms +[2025-09-11 10:12:44] [Rank 0] step:4681/10000 train_time:201178ms step_avg:42.98ms +[2025-09-11 10:12:45] [Rank 0] step:4701/10000 train_time:201851ms step_avg:42.94ms +[2025-09-11 10:12:45] [Rank 0] step:4701/10000 train_time:201851ms step_avg:42.94ms +[2025-09-11 10:12:45] [Rank 0] step:4721/10000 train_time:202524ms step_avg:42.90ms +[2025-09-11 10:12:45] [Rank 0] step:4721/10000 train_time:202524ms step_avg:42.90ms +[2025-09-11 10:12:46] [Rank 0] step:4741/10000 train_time:203196ms step_avg:42.86ms +[2025-09-11 10:12:46] [Rank 0] step:4741/10000 train_time:203196ms step_avg:42.86ms +[2025-09-11 10:12:47] [Rank 0] step:4761/10000 train_time:203870ms step_avg:42.82ms +[2025-09-11 10:12:47] [Rank 0] step:4761/10000 train_time:203870ms step_avg:42.82ms +[2025-09-11 10:12:47] [Rank 0] step:4781/10000 train_time:204542ms step_avg:42.78ms +[2025-09-11 10:12:47] [Rank 0] step:4781/10000 train_time:204542ms step_avg:42.78ms +[2025-09-11 10:12:48] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:12:48] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:12:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:12:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:58] [Rank 0] PRINT: step:4800/10000 val_loss:4.7077 total_sharp:1.8136e-03 L1_sharp:3.6522e-03 L2_sharp:2.0882e-03 L3_sharp:7.3052e-04 L4_sharp:8.3997e-04 L5_sharp:1.1104e-03 L6_sharp:1.4944e-03 L7_sharp:1.1864e-03 L8_sharp:4.3474e-03 L9_sharp:3.8175e-03 L10_sharp:4.0916e-03 L11_sharp:5.8808e-03 L12_sharp:2.2609e-02 total_fnorm:1.6750e+01 total_l1_linf:3.1104e+04 total_spectral:8.1875e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9688e-01 L9_l1linf:2.9297e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.7344e-01 L1_spectral:1.6060e-02 L2_spectral:1.5725e-02 L3_spectral:1.5720e-02 L4_spectral:1.5652e-02 L5_spectral:1.5669e-02 L6_spectral:1.5630e-02 L7_spectral:1.5667e-02 L8_spectral:1.5815e-02 L9_spectral:1.5787e-02 L10_spectral:1.5932e-02 L11_spectral:1.5861e-02 L12_spectral:1.5617e-02 train_time:205195ms step_avg:42.75ms +[2025-09-11 10:12:58] [Rank 0] PRINT: step:4800/10000 val_loss:4.7077 total_sharp:1.8136e-03 L1_sharp:3.6522e-03 L2_sharp:2.0882e-03 L3_sharp:7.3052e-04 L4_sharp:8.3997e-04 L5_sharp:1.1104e-03 L6_sharp:1.4944e-03 L7_sharp:1.1864e-03 L8_sharp:4.3474e-03 L9_sharp:3.8175e-03 L10_sharp:4.0916e-03 L11_sharp:5.8808e-03 L12_sharp:2.2609e-02 total_fnorm:1.6750e+01 total_l1_linf:3.1104e+04 total_spectral:8.1875e+00 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9688e-01 L9_l1linf:2.9297e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.7344e-01 L1_spectral:1.6060e-02 L2_spectral:1.5725e-02 L3_spectral:1.5720e-02 L4_spectral:1.5652e-02 L5_spectral:1.5669e-02 L6_spectral:1.5630e-02 L7_spectral:1.5667e-02 L8_spectral:1.5815e-02 L9_spectral:1.5787e-02 L10_spectral:1.5932e-02 L11_spectral:1.5861e-02 L12_spectral:1.5617e-02 train_time:205195ms step_avg:42.75ms +[2025-09-11 10:12:59] [Rank 0] step:4801/10000 train_time:206415ms step_avg:42.99ms +[2025-09-11 10:12:59] [Rank 0] step:4801/10000 train_time:206415ms step_avg:42.99ms +[2025-09-11 10:13:00] [Rank 0] step:4821/10000 train_time:207120ms step_avg:42.96ms +[2025-09-11 10:13:00] [Rank 0] step:4821/10000 train_time:207120ms step_avg:42.96ms +[2025-09-11 10:13:01] [Rank 0] step:4841/10000 train_time:207794ms step_avg:42.92ms +[2025-09-11 10:13:01] [Rank 0] step:4841/10000 train_time:207794ms step_avg:42.92ms +[2025-09-11 10:13:01] [Rank 0] step:4861/10000 train_time:208467ms step_avg:42.89ms +[2025-09-11 10:13:01] [Rank 0] step:4861/10000 train_time:208467ms step_avg:42.89ms +[2025-09-11 10:13:02] [Rank 0] step:4881/10000 train_time:209142ms step_avg:42.85ms +[2025-09-11 10:13:02] [Rank 0] step:4881/10000 train_time:209142ms step_avg:42.85ms +[2025-09-11 10:13:03] [Rank 0] step:4901/10000 train_time:209817ms step_avg:42.81ms +[2025-09-11 10:13:03] [Rank 0] step:4901/10000 train_time:209817ms step_avg:42.81ms +[2025-09-11 10:13:03] [Rank 0] step:4921/10000 train_time:210490ms step_avg:42.77ms +[2025-09-11 10:13:03] [Rank 0] step:4921/10000 train_time:210490ms step_avg:42.77ms +[2025-09-11 10:13:04] [Rank 0] step:4941/10000 train_time:211163ms step_avg:42.74ms +[2025-09-11 10:13:04] [Rank 0] step:4941/10000 train_time:211163ms step_avg:42.74ms +[2025-09-11 10:13:05] [Rank 0] step:4961/10000 train_time:211836ms step_avg:42.70ms +[2025-09-11 10:13:05] [Rank 0] step:4961/10000 train_time:211836ms step_avg:42.70ms +[2025-09-11 10:13:05] [Rank 0] step:4981/10000 train_time:212509ms step_avg:42.66ms +[2025-09-11 10:13:05] [Rank 0] step:4981/10000 train_time:212509ms step_avg:42.66ms +[2025-09-11 10:13:06] [Rank 0] step:5001/10000 train_time:213183ms step_avg:42.63ms +[2025-09-11 10:13:06] [Rank 0] step:5001/10000 train_time:213183ms step_avg:42.63ms +[2025-09-11 10:13:07] [Rank 0] step:5021/10000 train_time:213855ms step_avg:42.59ms +[2025-09-11 10:13:07] [Rank 0] step:5021/10000 train_time:213855ms step_avg:42.59ms +[2025-09-11 10:13:07] [Rank 0] step:5041/10000 train_time:214527ms step_avg:42.56ms +[2025-09-11 10:13:07] [Rank 0] step:5041/10000 train_time:214527ms step_avg:42.56ms +[2025-09-11 10:13:08] [Rank 0] step:5061/10000 train_time:215200ms step_avg:42.52ms +[2025-09-11 10:13:08] [Rank 0] step:5061/10000 train_time:215200ms step_avg:42.52ms +[2025-09-11 10:13:09] [Rank 0] step:5081/10000 train_time:215872ms step_avg:42.49ms +[2025-09-11 10:13:09] [Rank 0] step:5081/10000 train_time:215872ms step_avg:42.49ms +[2025-09-11 10:13:09] [Rank 0] step:5101/10000 train_time:216545ms step_avg:42.45ms +[2025-09-11 10:13:09] [Rank 0] step:5101/10000 train_time:216545ms step_avg:42.45ms +[2025-09-11 10:13:10] [Rank 0] step:5121/10000 train_time:217218ms step_avg:42.42ms +[2025-09-11 10:13:10] [Rank 0] step:5121/10000 train_time:217218ms step_avg:42.42ms +[2025-09-11 10:13:11] [Rank 0] step:5141/10000 train_time:217891ms step_avg:42.38ms +[2025-09-11 10:13:11] [Rank 0] step:5141/10000 train_time:217891ms step_avg:42.38ms +[2025-09-11 10:13:12] [Rank 0] step:5161/10000 train_time:218564ms step_avg:42.35ms +[2025-09-11 10:13:12] [Rank 0] step:5161/10000 train_time:218564ms step_avg:42.35ms +[2025-09-11 10:13:12] [Rank 0] step:5181/10000 train_time:219236ms step_avg:42.32ms +[2025-09-11 10:13:12] [Rank 0] step:5181/10000 train_time:219236ms step_avg:42.32ms +[2025-09-11 10:13:13] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:13:13] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:13:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:23] [Rank 0] PRINT: step:5200/10000 val_loss:4.6712 total_sharp:2.4618e-03 L1_sharp:7.7186e-03 L2_sharp:1.2988e-03 L3_sharp:8.2942e-04 L4_sharp:9.5144e-04 L5_sharp:1.1624e-03 L6_sharp:1.8133e-03 L7_sharp:1.7926e-03 L8_sharp:4.6695e-03 L9_sharp:4.5849e-03 L10_sharp:5.1008e-03 L11_sharp:7.5754e-03 L12_sharp:4.1393e-02 total_fnorm:1.5938e+01 total_l1_linf:2.8672e+04 total_spectral:7.7500e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0469e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0664e-01 L8_l1linf:2.9688e-01 L9_l1linf:2.8516e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.9688e-01 L12_l1linf:2.7539e-01 L1_spectral:1.6069e-02 L2_spectral:1.5833e-02 L3_spectral:1.5708e-02 L4_spectral:1.5729e-02 L5_spectral:1.5685e-02 L6_spectral:1.5860e-02 L7_spectral:1.5811e-02 L8_spectral:1.5913e-02 L9_spectral:1.5942e-02 L10_spectral:1.5990e-02 L11_spectral:1.5995e-02 L12_spectral:1.5743e-02 train_time:219896ms step_avg:42.29ms +[2025-09-11 10:13:23] [Rank 0] PRINT: step:5200/10000 val_loss:4.6712 total_sharp:2.4618e-03 L1_sharp:7.7186e-03 L2_sharp:1.2988e-03 L3_sharp:8.2942e-04 L4_sharp:9.5144e-04 L5_sharp:1.1624e-03 L6_sharp:1.8133e-03 L7_sharp:1.7926e-03 L8_sharp:4.6695e-03 L9_sharp:4.5849e-03 L10_sharp:5.1008e-03 L11_sharp:7.5754e-03 L12_sharp:4.1393e-02 total_fnorm:1.5938e+01 total_l1_linf:2.8672e+04 total_spectral:7.7500e+00 L1_fnorm:1.2734e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0469e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0664e-01 L8_l1linf:2.9688e-01 L9_l1linf:2.8516e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.9688e-01 L12_l1linf:2.7539e-01 L1_spectral:1.6069e-02 L2_spectral:1.5833e-02 L3_spectral:1.5708e-02 L4_spectral:1.5729e-02 L5_spectral:1.5685e-02 L6_spectral:1.5860e-02 L7_spectral:1.5811e-02 L8_spectral:1.5913e-02 L9_spectral:1.5942e-02 L10_spectral:1.5990e-02 L11_spectral:1.5995e-02 L12_spectral:1.5743e-02 train_time:219896ms step_avg:42.29ms +[2025-09-11 10:13:24] [Rank 0] step:5201/10000 train_time:221123ms step_avg:42.52ms +[2025-09-11 10:13:24] [Rank 0] step:5201/10000 train_time:221123ms step_avg:42.52ms +[2025-09-11 10:13:25] [Rank 0] step:5221/10000 train_time:221839ms step_avg:42.49ms +[2025-09-11 10:13:25] [Rank 0] step:5221/10000 train_time:221839ms step_avg:42.49ms +[2025-09-11 10:13:25] [Rank 0] step:5241/10000 train_time:222521ms step_avg:42.46ms +[2025-09-11 10:13:25] [Rank 0] step:5241/10000 train_time:222521ms step_avg:42.46ms +[2025-09-11 10:13:26] [Rank 0] step:5261/10000 train_time:223204ms step_avg:42.43ms +[2025-09-11 10:13:26] [Rank 0] step:5261/10000 train_time:223204ms step_avg:42.43ms +[2025-09-11 10:13:27] [Rank 0] step:5281/10000 train_time:223887ms step_avg:42.39ms +[2025-09-11 10:13:27] [Rank 0] step:5281/10000 train_time:223887ms step_avg:42.39ms +[2025-09-11 10:13:28] [Rank 0] step:5301/10000 train_time:224570ms step_avg:42.36ms +[2025-09-11 10:13:28] [Rank 0] step:5301/10000 train_time:224570ms step_avg:42.36ms +[2025-09-11 10:13:28] [Rank 0] step:5321/10000 train_time:225253ms step_avg:42.33ms +[2025-09-11 10:13:28] [Rank 0] step:5321/10000 train_time:225253ms step_avg:42.33ms +[2025-09-11 10:13:29] [Rank 0] step:5341/10000 train_time:225935ms step_avg:42.30ms +[2025-09-11 10:13:29] [Rank 0] step:5341/10000 train_time:225935ms step_avg:42.30ms +[2025-09-11 10:13:30] [Rank 0] step:5361/10000 train_time:226618ms step_avg:42.27ms +[2025-09-11 10:13:30] [Rank 0] step:5361/10000 train_time:226618ms step_avg:42.27ms +[2025-09-11 10:13:30] [Rank 0] step:5381/10000 train_time:227301ms step_avg:42.24ms +[2025-09-11 10:13:30] [Rank 0] step:5381/10000 train_time:227301ms step_avg:42.24ms +[2025-09-11 10:13:31] [Rank 0] step:5401/10000 train_time:227982ms step_avg:42.21ms +[2025-09-11 10:13:31] [Rank 0] step:5401/10000 train_time:227982ms step_avg:42.21ms +[2025-09-11 10:13:32] [Rank 0] step:5421/10000 train_time:228666ms step_avg:42.18ms +[2025-09-11 10:13:32] [Rank 0] step:5421/10000 train_time:228666ms step_avg:42.18ms +[2025-09-11 10:13:32] [Rank 0] step:5441/10000 train_time:229350ms step_avg:42.15ms +[2025-09-11 10:13:32] [Rank 0] step:5441/10000 train_time:229350ms step_avg:42.15ms +[2025-09-11 10:13:33] [Rank 0] step:5461/10000 train_time:230033ms step_avg:42.12ms +[2025-09-11 10:13:33] [Rank 0] step:5461/10000 train_time:230033ms step_avg:42.12ms +[2025-09-11 10:13:34] [Rank 0] step:5481/10000 train_time:230996ms step_avg:42.14ms +[2025-09-11 10:13:34] [Rank 0] step:5481/10000 train_time:230996ms step_avg:42.14ms +[2025-09-11 10:13:35] [Rank 0] step:5501/10000 train_time:231949ms step_avg:42.16ms +[2025-09-11 10:13:35] [Rank 0] step:5501/10000 train_time:231949ms step_avg:42.16ms +[2025-09-11 10:13:36] [Rank 0] step:5521/10000 train_time:232630ms step_avg:42.14ms +[2025-09-11 10:13:36] [Rank 0] step:5521/10000 train_time:232630ms step_avg:42.14ms +[2025-09-11 10:13:37] [Rank 0] step:5541/10000 train_time:233577ms step_avg:42.15ms +[2025-09-11 10:13:37] [Rank 0] step:5541/10000 train_time:233577ms step_avg:42.15ms +[2025-09-11 10:13:37] [Rank 0] step:5561/10000 train_time:234261ms step_avg:42.13ms +[2025-09-11 10:13:37] [Rank 0] step:5561/10000 train_time:234261ms step_avg:42.13ms +[2025-09-11 10:13:38] [Rank 0] step:5581/10000 train_time:234944ms step_avg:42.10ms +[2025-09-11 10:13:38] [Rank 0] step:5581/10000 train_time:234944ms step_avg:42.10ms +[2025-09-11 10:13:39] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:13:39] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:13:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:49] [Rank 0] PRINT: step:5600/10000 val_loss:4.6405 total_sharp:2.0002e-03 L1_sharp:6.0796e-03 L2_sharp:1.3643e-03 L3_sharp:1.3528e-03 L4_sharp:1.0079e-03 L5_sharp:1.6340e-03 L6_sharp:1.3585e-03 L7_sharp:1.3695e-03 L8_sharp:4.0587e-03 L9_sharp:3.5428e-03 L10_sharp:4.2896e-03 L11_sharp:6.0959e-03 L12_sharp:1.7268e-02 total_fnorm:1.5562e+01 total_l1_linf:2.8416e+04 total_spectral:7.5625e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2227e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.9883e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0469e-01 L8_l1linf:2.9297e-01 L9_l1linf:2.8711e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.8711e-01 L12_l1linf:2.7344e-01 L1_spectral:1.6167e-02 L2_spectral:1.5978e-02 L3_spectral:1.5884e-02 L4_spectral:1.5856e-02 L5_spectral:1.5785e-02 L6_spectral:1.5871e-02 L7_spectral:1.5846e-02 L8_spectral:1.5854e-02 L9_spectral:1.5991e-02 L10_spectral:1.6047e-02 L11_spectral:1.6079e-02 L12_spectral:1.5832e-02 train_time:235607ms step_avg:42.07ms +[2025-09-11 10:13:49] [Rank 0] PRINT: step:5600/10000 val_loss:4.6405 total_sharp:2.0002e-03 L1_sharp:6.0796e-03 L2_sharp:1.3643e-03 L3_sharp:1.3528e-03 L4_sharp:1.0079e-03 L5_sharp:1.6340e-03 L6_sharp:1.3585e-03 L7_sharp:1.3695e-03 L8_sharp:4.0587e-03 L9_sharp:3.5428e-03 L10_sharp:4.2896e-03 L11_sharp:6.0959e-03 L12_sharp:1.7268e-02 total_fnorm:1.5562e+01 total_l1_linf:2.8416e+04 total_spectral:7.5625e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2227e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0078e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.9883e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0469e-01 L8_l1linf:2.9297e-01 L9_l1linf:2.8711e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.8711e-01 L12_l1linf:2.7344e-01 L1_spectral:1.6167e-02 L2_spectral:1.5978e-02 L3_spectral:1.5884e-02 L4_spectral:1.5856e-02 L5_spectral:1.5785e-02 L6_spectral:1.5871e-02 L7_spectral:1.5846e-02 L8_spectral:1.5854e-02 L9_spectral:1.5991e-02 L10_spectral:1.6047e-02 L11_spectral:1.6079e-02 L12_spectral:1.5832e-02 train_time:235607ms step_avg:42.07ms +[2025-09-11 10:13:50] [Rank 0] step:5601/10000 train_time:236832ms step_avg:42.28ms +[2025-09-11 10:13:50] [Rank 0] step:5601/10000 train_time:236832ms step_avg:42.28ms +[2025-09-11 10:13:50] [Rank 0] step:5621/10000 train_time:237532ms step_avg:42.26ms +[2025-09-11 10:13:50] [Rank 0] step:5621/10000 train_time:237532ms step_avg:42.26ms +[2025-09-11 10:13:51] [Rank 0] step:5641/10000 train_time:238215ms step_avg:42.23ms +[2025-09-11 10:13:51] [Rank 0] step:5641/10000 train_time:238215ms step_avg:42.23ms +[2025-09-11 10:13:52] [Rank 0] step:5661/10000 train_time:238899ms step_avg:42.20ms +[2025-09-11 10:13:52] [Rank 0] step:5661/10000 train_time:238899ms step_avg:42.20ms +[2025-09-11 10:13:53] [Rank 0] step:5681/10000 train_time:239582ms step_avg:42.17ms +[2025-09-11 10:13:53] [Rank 0] step:5681/10000 train_time:239582ms step_avg:42.17ms +[2025-09-11 10:13:53] [Rank 0] step:5701/10000 train_time:240267ms step_avg:42.14ms +[2025-09-11 10:13:53] [Rank 0] step:5701/10000 train_time:240267ms step_avg:42.14ms +[2025-09-11 10:13:54] [Rank 0] step:5721/10000 train_time:240950ms step_avg:42.12ms +[2025-09-11 10:13:54] [Rank 0] step:5721/10000 train_time:240950ms step_avg:42.12ms +[2025-09-11 10:13:55] [Rank 0] step:5741/10000 train_time:241635ms step_avg:42.09ms +[2025-09-11 10:13:55] [Rank 0] step:5741/10000 train_time:241635ms step_avg:42.09ms +[2025-09-11 10:13:55] [Rank 0] step:5761/10000 train_time:242320ms step_avg:42.06ms +[2025-09-11 10:13:55] [Rank 0] step:5761/10000 train_time:242320ms step_avg:42.06ms +[2025-09-11 10:13:56] [Rank 0] step:5781/10000 train_time:243004ms step_avg:42.03ms +[2025-09-11 10:13:56] [Rank 0] step:5781/10000 train_time:243004ms step_avg:42.03ms +[2025-09-11 10:13:57] [Rank 0] step:5801/10000 train_time:243693ms step_avg:42.01ms +[2025-09-11 10:13:57] [Rank 0] step:5801/10000 train_time:243693ms step_avg:42.01ms +[2025-09-11 10:13:57] [Rank 0] step:5821/10000 train_time:244376ms step_avg:41.98ms +[2025-09-11 10:13:57] [Rank 0] step:5821/10000 train_time:244376ms step_avg:41.98ms +[2025-09-11 10:13:58] [Rank 0] step:5841/10000 train_time:245061ms step_avg:41.96ms +[2025-09-11 10:13:58] [Rank 0] step:5841/10000 train_time:245061ms step_avg:41.96ms +[2025-09-11 10:13:59] [Rank 0] step:5861/10000 train_time:245745ms step_avg:41.93ms +[2025-09-11 10:13:59] [Rank 0] step:5861/10000 train_time:245745ms step_avg:41.93ms +[2025-09-11 10:13:59] [Rank 0] step:5881/10000 train_time:246428ms step_avg:41.90ms +[2025-09-11 10:13:59] [Rank 0] step:5881/10000 train_time:246428ms step_avg:41.90ms +[2025-09-11 10:14:00] [Rank 0] step:5901/10000 train_time:247112ms step_avg:41.88ms +[2025-09-11 10:14:00] [Rank 0] step:5901/10000 train_time:247112ms step_avg:41.88ms +[2025-09-11 10:14:01] [Rank 0] step:5921/10000 train_time:247797ms step_avg:41.85ms +[2025-09-11 10:14:01] [Rank 0] step:5921/10000 train_time:247797ms step_avg:41.85ms +[2025-09-11 10:14:01] [Rank 0] step:5941/10000 train_time:248482ms step_avg:41.82ms +[2025-09-11 10:14:01] [Rank 0] step:5941/10000 train_time:248482ms step_avg:41.82ms +[2025-09-11 10:14:02] [Rank 0] step:5961/10000 train_time:249167ms step_avg:41.80ms +[2025-09-11 10:14:02] [Rank 0] step:5961/10000 train_time:249167ms step_avg:41.80ms +[2025-09-11 10:14:03] [Rank 0] step:5981/10000 train_time:249860ms step_avg:41.78ms +[2025-09-11 10:14:03] [Rank 0] step:5981/10000 train_time:249860ms step_avg:41.78ms +[2025-09-11 10:14:03] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:14:03] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.5976 total_sharp:1.5551e-03 L1_sharp:6.2152e-03 L2_sharp:1.2233e-03 L3_sharp:6.7287e-04 L4_sharp:1.1069e-03 L5_sharp:1.3096e-03 L6_sharp:1.2457e-03 L7_sharp:1.2493e-03 L8_sharp:3.5653e-03 L9_sharp:3.0255e-03 L10_sharp:3.6097e-03 L11_sharp:5.3084e-03 L12_sharp:1.3324e-02 total_fnorm:1.6125e+01 total_l1_linf:2.8544e+04 total_spectral:7.8750e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9688e-01 L5_l1linf:3.0078e-01 L6_l1linf:3.0273e-01 L7_l1linf:2.9883e-01 L8_l1linf:2.8711e-01 L9_l1linf:2.8125e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.8320e-01 L12_l1linf:2.7734e-01 L1_spectral:1.6254e-02 L2_spectral:1.5937e-02 L3_spectral:1.5845e-02 L4_spectral:1.5956e-02 L5_spectral:1.5856e-02 L6_spectral:1.5970e-02 L7_spectral:1.5942e-02 L8_spectral:1.6128e-02 L9_spectral:1.5991e-02 L10_spectral:1.6170e-02 L11_spectral:1.6112e-02 L12_spectral:1.5824e-02 train_time:250526ms step_avg:41.75ms +[2025-09-11 10:14:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.5976 total_sharp:1.5551e-03 L1_sharp:6.2152e-03 L2_sharp:1.2233e-03 L3_sharp:6.7287e-04 L4_sharp:1.1069e-03 L5_sharp:1.3096e-03 L6_sharp:1.2457e-03 L7_sharp:1.2493e-03 L8_sharp:3.5653e-03 L9_sharp:3.0255e-03 L10_sharp:3.6097e-03 L11_sharp:5.3084e-03 L12_sharp:1.3324e-02 total_fnorm:1.6125e+01 total_l1_linf:2.8544e+04 total_spectral:7.8750e+00 L1_fnorm:1.2656e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.0078e-01 L4_l1linf:2.9688e-01 L5_l1linf:3.0078e-01 L6_l1linf:3.0273e-01 L7_l1linf:2.9883e-01 L8_l1linf:2.8711e-01 L9_l1linf:2.8125e-01 L10_l1linf:2.7539e-01 L11_l1linf:2.8320e-01 L12_l1linf:2.7734e-01 L1_spectral:1.6254e-02 L2_spectral:1.5937e-02 L3_spectral:1.5845e-02 L4_spectral:1.5956e-02 L5_spectral:1.5856e-02 L6_spectral:1.5970e-02 L7_spectral:1.5942e-02 L8_spectral:1.6128e-02 L9_spectral:1.5991e-02 L10_spectral:1.6170e-02 L11_spectral:1.6112e-02 L12_spectral:1.5824e-02 train_time:250526ms step_avg:41.75ms +[2025-09-11 10:14:15] [Rank 0] step:6001/10000 train_time:251751ms step_avg:41.95ms +[2025-09-11 10:14:15] [Rank 0] step:6001/10000 train_time:251751ms step_avg:41.95ms +[2025-09-11 10:14:16] [Rank 0] step:6021/10000 train_time:252474ms step_avg:41.93ms +[2025-09-11 10:14:16] [Rank 0] step:6021/10000 train_time:252474ms step_avg:41.93ms +[2025-09-11 10:14:16] [Rank 0] step:6041/10000 train_time:253162ms step_avg:41.91ms +[2025-09-11 10:14:16] [Rank 0] step:6041/10000 train_time:253162ms step_avg:41.91ms +[2025-09-11 10:14:17] [Rank 0] step:6061/10000 train_time:253848ms step_avg:41.88ms +[2025-09-11 10:14:17] [Rank 0] step:6061/10000 train_time:253848ms step_avg:41.88ms +[2025-09-11 10:14:18] [Rank 0] step:6081/10000 train_time:254536ms step_avg:41.86ms +[2025-09-11 10:14:18] [Rank 0] step:6081/10000 train_time:254536ms step_avg:41.86ms +[2025-09-11 10:14:18] [Rank 0] step:6101/10000 train_time:255222ms step_avg:41.83ms +[2025-09-11 10:14:18] [Rank 0] step:6101/10000 train_time:255222ms step_avg:41.83ms +[2025-09-11 10:14:19] [Rank 0] step:6121/10000 train_time:255908ms step_avg:41.81ms +[2025-09-11 10:14:19] [Rank 0] step:6121/10000 train_time:255908ms step_avg:41.81ms +[2025-09-11 10:14:20] [Rank 0] step:6141/10000 train_time:256594ms step_avg:41.78ms +[2025-09-11 10:14:20] [Rank 0] step:6141/10000 train_time:256594ms step_avg:41.78ms +[2025-09-11 10:14:20] [Rank 0] step:6161/10000 train_time:257279ms step_avg:41.76ms +[2025-09-11 10:14:20] [Rank 0] step:6161/10000 train_time:257279ms step_avg:41.76ms +[2025-09-11 10:14:21] [Rank 0] step:6181/10000 train_time:257963ms step_avg:41.73ms +[2025-09-11 10:14:21] [Rank 0] step:6181/10000 train_time:257963ms step_avg:41.73ms +[2025-09-11 10:14:22] [Rank 0] step:6201/10000 train_time:258649ms step_avg:41.71ms +[2025-09-11 10:14:22] [Rank 0] step:6201/10000 train_time:258649ms step_avg:41.71ms +[2025-09-11 10:14:22] [Rank 0] step:6221/10000 train_time:259335ms step_avg:41.69ms +[2025-09-11 10:14:22] [Rank 0] step:6221/10000 train_time:259335ms step_avg:41.69ms +[2025-09-11 10:14:23] [Rank 0] step:6241/10000 train_time:260020ms step_avg:41.66ms +[2025-09-11 10:14:23] [Rank 0] step:6241/10000 train_time:260020ms step_avg:41.66ms +[2025-09-11 10:14:24] [Rank 0] step:6261/10000 train_time:260704ms step_avg:41.64ms +[2025-09-11 10:14:24] [Rank 0] step:6261/10000 train_time:260704ms step_avg:41.64ms +[2025-09-11 10:14:24] [Rank 0] step:6281/10000 train_time:261390ms step_avg:41.62ms +[2025-09-11 10:14:24] [Rank 0] step:6281/10000 train_time:261390ms step_avg:41.62ms +[2025-09-11 10:14:25] [Rank 0] step:6301/10000 train_time:262075ms step_avg:41.59ms +[2025-09-11 10:14:25] [Rank 0] step:6301/10000 train_time:262075ms step_avg:41.59ms +[2025-09-11 10:14:26] [Rank 0] step:6321/10000 train_time:262763ms step_avg:41.57ms +[2025-09-11 10:14:26] [Rank 0] step:6321/10000 train_time:262763ms step_avg:41.57ms +[2025-09-11 10:14:27] [Rank 0] step:6341/10000 train_time:263450ms step_avg:41.55ms +[2025-09-11 10:14:27] [Rank 0] step:6341/10000 train_time:263450ms step_avg:41.55ms +[2025-09-11 10:14:27] [Rank 0] step:6361/10000 train_time:264137ms step_avg:41.52ms +[2025-09-11 10:14:27] [Rank 0] step:6361/10000 train_time:264137ms step_avg:41.52ms +[2025-09-11 10:14:28] [Rank 0] step:6381/10000 train_time:264823ms step_avg:41.50ms +[2025-09-11 10:14:28] [Rank 0] step:6381/10000 train_time:264823ms step_avg:41.50ms +[2025-09-11 10:14:29] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:14:29] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:14:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:14:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:41] [Rank 0] PRINT: step:6400/10000 val_loss:4.5640 total_sharp:1.9670e-03 L1_sharp:4.3297e-03 L2_sharp:1.2068e-03 L3_sharp:7.2276e-04 L4_sharp:1.6499e-03 L5_sharp:1.2375e-03 L6_sharp:1.4283e-03 L7_sharp:1.1932e-03 L8_sharp:3.5741e-03 L9_sharp:3.2018e-03 L10_sharp:4.1000e-03 L11_sharp:5.2387e-03 L12_sharp:1.8287e-02 total_fnorm:1.2562e+01 total_l1_linf:2.3424e+04 total_spectral:6.5938e+00 L1_fnorm:1.1484e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1016e+00 L9_fnorm:1.1328e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1406e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.7539e-01 L3_l1linf:2.5977e-01 L4_l1linf:2.6172e-01 L5_l1linf:2.5586e-01 L6_l1linf:2.5977e-01 L7_l1linf:2.5977e-01 L8_l1linf:2.5391e-01 L9_l1linf:2.4219e-01 L10_l1linf:2.3730e-01 L11_l1linf:2.4512e-01 L12_l1linf:2.4414e-01 L1_spectral:1.4902e-02 L2_spectral:1.4512e-02 L3_spectral:1.4562e-02 L4_spectral:1.4600e-02 L5_spectral:1.4562e-02 L6_spectral:1.4605e-02 L7_spectral:1.4584e-02 L8_spectral:1.4577e-02 L9_spectral:1.4720e-02 L10_spectral:1.4696e-02 L11_spectral:1.4690e-02 L12_spectral:1.4638e-02 train_time:265487ms step_avg:41.48ms +[2025-09-11 10:14:41] [Rank 0] PRINT: step:6400/10000 val_loss:4.5640 total_sharp:1.9670e-03 L1_sharp:4.3297e-03 L2_sharp:1.2068e-03 L3_sharp:7.2276e-04 L4_sharp:1.6499e-03 L5_sharp:1.2375e-03 L6_sharp:1.4283e-03 L7_sharp:1.1932e-03 L8_sharp:3.5741e-03 L9_sharp:3.2018e-03 L10_sharp:4.1000e-03 L11_sharp:5.2387e-03 L12_sharp:1.8287e-02 total_fnorm:1.2562e+01 total_l1_linf:2.3424e+04 total_spectral:6.5938e+00 L1_fnorm:1.1484e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1016e+00 L9_fnorm:1.1328e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1406e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.7539e-01 L3_l1linf:2.5977e-01 L4_l1linf:2.6172e-01 L5_l1linf:2.5586e-01 L6_l1linf:2.5977e-01 L7_l1linf:2.5977e-01 L8_l1linf:2.5391e-01 L9_l1linf:2.4219e-01 L10_l1linf:2.3730e-01 L11_l1linf:2.4512e-01 L12_l1linf:2.4414e-01 L1_spectral:1.4902e-02 L2_spectral:1.4512e-02 L3_spectral:1.4562e-02 L4_spectral:1.4600e-02 L5_spectral:1.4562e-02 L6_spectral:1.4605e-02 L7_spectral:1.4584e-02 L8_spectral:1.4577e-02 L9_spectral:1.4720e-02 L10_spectral:1.4696e-02 L11_spectral:1.4690e-02 L12_spectral:1.4638e-02 train_time:265487ms step_avg:41.48ms +[2025-09-11 10:14:42] [Rank 0] step:6401/10000 train_time:267034ms step_avg:41.72ms +[2025-09-11 10:14:42] [Rank 0] step:6401/10000 train_time:267034ms step_avg:41.72ms +[2025-09-11 10:14:43] [Rank 0] step:6421/10000 train_time:267744ms step_avg:41.70ms +[2025-09-11 10:14:43] [Rank 0] step:6421/10000 train_time:267744ms step_avg:41.70ms +[2025-09-11 10:14:44] [Rank 0] step:6441/10000 train_time:268431ms step_avg:41.68ms +[2025-09-11 10:14:44] [Rank 0] step:6441/10000 train_time:268431ms step_avg:41.68ms +[2025-09-11 10:14:45] [Rank 0] step:6461/10000 train_time:269118ms step_avg:41.65ms +[2025-09-11 10:14:45] [Rank 0] step:6461/10000 train_time:269118ms step_avg:41.65ms +[2025-09-11 10:14:45] [Rank 0] step:6481/10000 train_time:269806ms step_avg:41.63ms +[2025-09-11 10:14:45] [Rank 0] step:6481/10000 train_time:269806ms step_avg:41.63ms +[2025-09-11 10:14:46] [Rank 0] step:6501/10000 train_time:270496ms step_avg:41.61ms +[2025-09-11 10:14:46] [Rank 0] step:6501/10000 train_time:270496ms step_avg:41.61ms +[2025-09-11 10:14:47] [Rank 0] step:6521/10000 train_time:271184ms step_avg:41.59ms +[2025-09-11 10:14:47] [Rank 0] step:6521/10000 train_time:271184ms step_avg:41.59ms +[2025-09-11 10:14:47] [Rank 0] step:6541/10000 train_time:271870ms step_avg:41.56ms +[2025-09-11 10:14:47] [Rank 0] step:6541/10000 train_time:271870ms step_avg:41.56ms +[2025-09-11 10:14:48] [Rank 0] step:6561/10000 train_time:272556ms step_avg:41.54ms +[2025-09-11 10:14:48] [Rank 0] step:6561/10000 train_time:272556ms step_avg:41.54ms +[2025-09-11 10:14:49] [Rank 0] step:6581/10000 train_time:273243ms step_avg:41.52ms +[2025-09-11 10:14:49] [Rank 0] step:6581/10000 train_time:273243ms step_avg:41.52ms +[2025-09-11 10:14:49] [Rank 0] step:6601/10000 train_time:273930ms step_avg:41.50ms +[2025-09-11 10:14:49] [Rank 0] step:6601/10000 train_time:273930ms step_avg:41.50ms +[2025-09-11 10:14:50] [Rank 0] step:6621/10000 train_time:274616ms step_avg:41.48ms +[2025-09-11 10:14:50] [Rank 0] step:6621/10000 train_time:274616ms step_avg:41.48ms +[2025-09-11 10:14:51] [Rank 0] step:6641/10000 train_time:275303ms step_avg:41.46ms +[2025-09-11 10:14:51] [Rank 0] step:6641/10000 train_time:275303ms step_avg:41.46ms +[2025-09-11 10:14:51] [Rank 0] step:6661/10000 train_time:275991ms step_avg:41.43ms +[2025-09-11 10:14:51] [Rank 0] step:6661/10000 train_time:275991ms step_avg:41.43ms +[2025-09-11 10:14:52] [Rank 0] step:6681/10000 train_time:276684ms step_avg:41.41ms +[2025-09-11 10:14:52] [Rank 0] step:6681/10000 train_time:276684ms step_avg:41.41ms +[2025-09-11 10:14:53] [Rank 0] step:6701/10000 train_time:277380ms step_avg:41.39ms +[2025-09-11 10:14:53] [Rank 0] step:6701/10000 train_time:277380ms step_avg:41.39ms +[2025-09-11 10:14:53] [Rank 0] step:6721/10000 train_time:278075ms step_avg:41.37ms +[2025-09-11 10:14:53] [Rank 0] step:6721/10000 train_time:278075ms step_avg:41.37ms +[2025-09-11 10:14:54] [Rank 0] step:6741/10000 train_time:278770ms step_avg:41.35ms +[2025-09-11 10:14:54] [Rank 0] step:6741/10000 train_time:278770ms step_avg:41.35ms +[2025-09-11 10:14:55] [Rank 0] step:6761/10000 train_time:279463ms step_avg:41.33ms +[2025-09-11 10:14:55] [Rank 0] step:6761/10000 train_time:279463ms step_avg:41.33ms +[2025-09-11 10:14:56] [Rank 0] step:6781/10000 train_time:280159ms step_avg:41.32ms +[2025-09-11 10:14:56] [Rank 0] step:6781/10000 train_time:280159ms step_avg:41.32ms +[2025-09-11 10:14:56] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:06] [Rank 0] PRINT: step:6800/10000 val_loss:4.5300 total_sharp:1.5493e-03 L1_sharp:4.7016e-03 L2_sharp:1.4979e-03 L3_sharp:4.3748e-04 L4_sharp:7.6881e-04 L5_sharp:1.1471e-03 L6_sharp:1.2287e-03 L7_sharp:1.2165e-03 L8_sharp:3.5908e-03 L9_sharp:3.4216e-03 L10_sharp:3.9963e-03 L11_sharp:5.4134e-03 L12_sharp:1.7213e-02 total_fnorm:1.1750e+01 total_l1_linf:2.0992e+04 total_spectral:6.1250e+00 L1_fnorm:1.0234e+00 L2_fnorm:9.7266e-01 L3_fnorm:9.8438e-01 L4_fnorm:1.0000e+00 L5_fnorm:9.9609e-01 L6_fnorm:1.0000e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.7266e-01 L9_fnorm:1.0000e+00 L10_fnorm:9.9609e-01 L11_fnorm:1.0000e+00 L12_fnorm:9.9609e-01 L1_l1linf:2.3535e-01 L2_l1linf:2.3340e-01 L3_l1linf:2.2363e-01 L4_l1linf:2.2363e-01 L5_l1linf:2.2656e-01 L6_l1linf:2.2266e-01 L7_l1linf:2.2559e-01 L8_l1linf:2.0996e-01 L9_l1linf:2.0703e-01 L10_l1linf:2.0020e-01 L11_l1linf:2.0410e-01 L12_l1linf:2.1289e-01 L1_spectral:1.3435e-02 L2_spectral:1.3094e-02 L3_spectral:1.3147e-02 L4_spectral:1.3173e-02 L5_spectral:1.3107e-02 L6_spectral:1.3169e-02 L7_spectral:1.3170e-02 L8_spectral:1.2984e-02 L9_spectral:1.3243e-02 L10_spectral:1.3271e-02 L11_spectral:1.3183e-02 L12_spectral:1.3211e-02 train_time:280833ms step_avg:41.30ms +[2025-09-11 10:15:06] [Rank 0] PRINT: step:6800/10000 val_loss:4.5300 total_sharp:1.5493e-03 L1_sharp:4.7016e-03 L2_sharp:1.4979e-03 L3_sharp:4.3748e-04 L4_sharp:7.6881e-04 L5_sharp:1.1471e-03 L6_sharp:1.2287e-03 L7_sharp:1.2165e-03 L8_sharp:3.5908e-03 L9_sharp:3.4216e-03 L10_sharp:3.9963e-03 L11_sharp:5.4134e-03 L12_sharp:1.7213e-02 total_fnorm:1.1750e+01 total_l1_linf:2.0992e+04 total_spectral:6.1250e+00 L1_fnorm:1.0234e+00 L2_fnorm:9.7266e-01 L3_fnorm:9.8438e-01 L4_fnorm:1.0000e+00 L5_fnorm:9.9609e-01 L6_fnorm:1.0000e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.7266e-01 L9_fnorm:1.0000e+00 L10_fnorm:9.9609e-01 L11_fnorm:1.0000e+00 L12_fnorm:9.9609e-01 L1_l1linf:2.3535e-01 L2_l1linf:2.3340e-01 L3_l1linf:2.2363e-01 L4_l1linf:2.2363e-01 L5_l1linf:2.2656e-01 L6_l1linf:2.2266e-01 L7_l1linf:2.2559e-01 L8_l1linf:2.0996e-01 L9_l1linf:2.0703e-01 L10_l1linf:2.0020e-01 L11_l1linf:2.0410e-01 L12_l1linf:2.1289e-01 L1_spectral:1.3435e-02 L2_spectral:1.3094e-02 L3_spectral:1.3147e-02 L4_spectral:1.3173e-02 L5_spectral:1.3107e-02 L6_spectral:1.3169e-02 L7_spectral:1.3170e-02 L8_spectral:1.2984e-02 L9_spectral:1.3243e-02 L10_spectral:1.3271e-02 L11_spectral:1.3183e-02 L12_spectral:1.3211e-02 train_time:280833ms step_avg:41.30ms +[2025-09-11 10:15:08] [Rank 0] step:6801/10000 train_time:282089ms step_avg:41.48ms +[2025-09-11 10:15:08] [Rank 0] step:6801/10000 train_time:282089ms step_avg:41.48ms +[2025-09-11 10:15:08] [Rank 0] step:6821/10000 train_time:282823ms step_avg:41.46ms +[2025-09-11 10:15:08] [Rank 0] step:6821/10000 train_time:282823ms step_avg:41.46ms +[2025-09-11 10:15:09] [Rank 0] step:6841/10000 train_time:283520ms step_avg:41.44ms +[2025-09-11 10:15:09] [Rank 0] step:6841/10000 train_time:283520ms step_avg:41.44ms +[2025-09-11 10:15:10] [Rank 0] step:6861/10000 train_time:284215ms step_avg:41.42ms +[2025-09-11 10:15:10] [Rank 0] step:6861/10000 train_time:284215ms step_avg:41.42ms +[2025-09-11 10:15:10] [Rank 0] step:6881/10000 train_time:284909ms step_avg:41.41ms +[2025-09-11 10:15:10] [Rank 0] step:6881/10000 train_time:284909ms step_avg:41.41ms +[2025-09-11 10:15:11] [Rank 0] step:6901/10000 train_time:285601ms step_avg:41.39ms +[2025-09-11 10:15:11] [Rank 0] step:6901/10000 train_time:285601ms step_avg:41.39ms +[2025-09-11 10:15:12] [Rank 0] step:6921/10000 train_time:286295ms step_avg:41.37ms +[2025-09-11 10:15:12] [Rank 0] step:6921/10000 train_time:286295ms step_avg:41.37ms +[2025-09-11 10:15:13] [Rank 0] step:6941/10000 train_time:286989ms step_avg:41.35ms +[2025-09-11 10:15:13] [Rank 0] step:6941/10000 train_time:286989ms step_avg:41.35ms +[2025-09-11 10:15:13] [Rank 0] step:6961/10000 train_time:287682ms step_avg:41.33ms +[2025-09-11 10:15:13] [Rank 0] step:6961/10000 train_time:287682ms step_avg:41.33ms +[2025-09-11 10:15:14] [Rank 0] step:6981/10000 train_time:288377ms step_avg:41.31ms +[2025-09-11 10:15:14] [Rank 0] step:6981/10000 train_time:288377ms step_avg:41.31ms +[2025-09-11 10:15:15] [Rank 0] step:7001/10000 train_time:289071ms step_avg:41.29ms +[2025-09-11 10:15:15] [Rank 0] step:7001/10000 train_time:289071ms step_avg:41.29ms +[2025-09-11 10:15:15] [Rank 0] step:7021/10000 train_time:289765ms step_avg:41.27ms +[2025-09-11 10:15:15] [Rank 0] step:7021/10000 train_time:289765ms step_avg:41.27ms +[2025-09-11 10:15:16] [Rank 0] step:7041/10000 train_time:290457ms step_avg:41.25ms +[2025-09-11 10:15:16] [Rank 0] step:7041/10000 train_time:290457ms step_avg:41.25ms +[2025-09-11 10:15:17] [Rank 0] step:7061/10000 train_time:291153ms step_avg:41.23ms +[2025-09-11 10:15:17] [Rank 0] step:7061/10000 train_time:291153ms step_avg:41.23ms +[2025-09-11 10:15:17] [Rank 0] step:7081/10000 train_time:291846ms step_avg:41.22ms +[2025-09-11 10:15:17] [Rank 0] step:7081/10000 train_time:291846ms step_avg:41.22ms +[2025-09-11 10:15:18] [Rank 0] step:7101/10000 train_time:292540ms step_avg:41.20ms +[2025-09-11 10:15:18] [Rank 0] step:7101/10000 train_time:292540ms step_avg:41.20ms +[2025-09-11 10:15:19] [Rank 0] step:7121/10000 train_time:293234ms step_avg:41.18ms +[2025-09-11 10:15:19] [Rank 0] step:7121/10000 train_time:293234ms step_avg:41.18ms +[2025-09-11 10:15:19] [Rank 0] step:7141/10000 train_time:293927ms step_avg:41.16ms +[2025-09-11 10:15:19] [Rank 0] step:7141/10000 train_time:293927ms step_avg:41.16ms +[2025-09-11 10:15:20] [Rank 0] step:7161/10000 train_time:294622ms step_avg:41.14ms +[2025-09-11 10:15:20] [Rank 0] step:7161/10000 train_time:294622ms step_avg:41.14ms +[2025-09-11 10:15:21] [Rank 0] step:7181/10000 train_time:295315ms step_avg:41.12ms +[2025-09-11 10:15:21] [Rank 0] step:7181/10000 train_time:295315ms step_avg:41.12ms +[2025-09-11 10:15:22] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:32] [Rank 0] PRINT: step:7200/10000 val_loss:4.4918 total_sharp:1.3666e-03 L1_sharp:4.3567e-03 L2_sharp:1.3963e-03 L3_sharp:6.4044e-04 L4_sharp:2.4229e-04 L5_sharp:8.4927e-04 L6_sharp:1.0798e-03 L7_sharp:1.2532e-03 L8_sharp:3.2410e-03 L9_sharp:2.8089e-03 L10_sharp:3.5036e-03 L11_sharp:4.9962e-03 L12_sharp:1.7677e-02 total_fnorm:1.0250e+01 total_l1_linf:1.6512e+04 total_spectral:5.1562e+00 L1_fnorm:8.8672e-01 L2_fnorm:8.4375e-01 L3_fnorm:8.4766e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.5547e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.4375e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5938e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.9531e-01 L3_l1linf:1.8359e-01 L4_l1linf:1.8652e-01 L5_l1linf:1.8262e-01 L6_l1linf:1.8066e-01 L7_l1linf:1.8262e-01 L8_l1linf:1.7578e-01 L9_l1linf:1.6602e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.6602e-01 L12_l1linf:1.7480e-01 L1_spectral:1.1634e-02 L2_spectral:1.1512e-02 L3_spectral:1.1494e-02 L4_spectral:1.1628e-02 L5_spectral:1.1628e-02 L6_spectral:1.1634e-02 L7_spectral:1.1603e-02 L8_spectral:1.1549e-02 L9_spectral:1.1646e-02 L10_spectral:1.1699e-02 L11_spectral:1.1645e-02 L12_spectral:1.1616e-02 train_time:295988ms step_avg:41.11ms +[2025-09-11 10:15:32] [Rank 0] PRINT: step:7200/10000 val_loss:4.4918 total_sharp:1.3666e-03 L1_sharp:4.3567e-03 L2_sharp:1.3963e-03 L3_sharp:6.4044e-04 L4_sharp:2.4229e-04 L5_sharp:8.4927e-04 L6_sharp:1.0798e-03 L7_sharp:1.2532e-03 L8_sharp:3.2410e-03 L9_sharp:2.8089e-03 L10_sharp:3.5036e-03 L11_sharp:4.9962e-03 L12_sharp:1.7677e-02 total_fnorm:1.0250e+01 total_l1_linf:1.6512e+04 total_spectral:5.1562e+00 L1_fnorm:8.8672e-01 L2_fnorm:8.4375e-01 L3_fnorm:8.4766e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.5547e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.4375e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5938e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.9531e-01 L3_l1linf:1.8359e-01 L4_l1linf:1.8652e-01 L5_l1linf:1.8262e-01 L6_l1linf:1.8066e-01 L7_l1linf:1.8262e-01 L8_l1linf:1.7578e-01 L9_l1linf:1.6602e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.6602e-01 L12_l1linf:1.7480e-01 L1_spectral:1.1634e-02 L2_spectral:1.1512e-02 L3_spectral:1.1494e-02 L4_spectral:1.1628e-02 L5_spectral:1.1628e-02 L6_spectral:1.1634e-02 L7_spectral:1.1603e-02 L8_spectral:1.1549e-02 L9_spectral:1.1646e-02 L10_spectral:1.1699e-02 L11_spectral:1.1645e-02 L12_spectral:1.1616e-02 train_time:295988ms step_avg:41.11ms +[2025-09-11 10:15:33] [Rank 0] step:7201/10000 train_time:297251ms step_avg:41.28ms +[2025-09-11 10:15:33] [Rank 0] step:7201/10000 train_time:297251ms step_avg:41.28ms +[2025-09-11 10:15:34] [Rank 0] step:7221/10000 train_time:297983ms step_avg:41.27ms +[2025-09-11 10:15:34] [Rank 0] step:7221/10000 train_time:297983ms step_avg:41.27ms +[2025-09-11 10:15:34] [Rank 0] step:7241/10000 train_time:298679ms step_avg:41.25ms +[2025-09-11 10:15:34] [Rank 0] step:7241/10000 train_time:298679ms step_avg:41.25ms +[2025-09-11 10:15:35] [Rank 0] step:7261/10000 train_time:299375ms step_avg:41.23ms +[2025-09-11 10:15:35] [Rank 0] step:7261/10000 train_time:299375ms step_avg:41.23ms +[2025-09-11 10:15:36] [Rank 0] step:7281/10000 train_time:300074ms step_avg:41.21ms +[2025-09-11 10:15:36] [Rank 0] step:7281/10000 train_time:300074ms step_avg:41.21ms +[2025-09-11 10:15:36] [Rank 0] step:7301/10000 train_time:300767ms step_avg:41.20ms +[2025-09-11 10:15:36] [Rank 0] step:7301/10000 train_time:300767ms step_avg:41.20ms +[2025-09-11 10:15:37] [Rank 0] step:7321/10000 train_time:301461ms step_avg:41.18ms +[2025-09-11 10:15:37] [Rank 0] step:7321/10000 train_time:301461ms step_avg:41.18ms +[2025-09-11 10:15:38] [Rank 0] step:7341/10000 train_time:302157ms step_avg:41.16ms +[2025-09-11 10:15:38] [Rank 0] step:7341/10000 train_time:302157ms step_avg:41.16ms +[2025-09-11 10:15:38] [Rank 0] step:7361/10000 train_time:302851ms step_avg:41.14ms +[2025-09-11 10:15:38] [Rank 0] step:7361/10000 train_time:302851ms step_avg:41.14ms +[2025-09-11 10:15:39] [Rank 0] step:7381/10000 train_time:303546ms step_avg:41.13ms +[2025-09-11 10:15:39] [Rank 0] step:7381/10000 train_time:303546ms step_avg:41.13ms +[2025-09-11 10:15:40] [Rank 0] step:7401/10000 train_time:304240ms step_avg:41.11ms +[2025-09-11 10:15:40] [Rank 0] step:7401/10000 train_time:304240ms step_avg:41.11ms +[2025-09-11 10:15:41] [Rank 0] step:7421/10000 train_time:304933ms step_avg:41.09ms +[2025-09-11 10:15:41] [Rank 0] step:7421/10000 train_time:304933ms step_avg:41.09ms +[2025-09-11 10:15:42] [Rank 0] step:7441/10000 train_time:306157ms step_avg:41.14ms +[2025-09-11 10:15:42] [Rank 0] step:7441/10000 train_time:306157ms step_avg:41.14ms +[2025-09-11 10:15:42] [Rank 0] step:7461/10000 train_time:306852ms step_avg:41.13ms +[2025-09-11 10:15:42] [Rank 0] step:7461/10000 train_time:306852ms step_avg:41.13ms +[2025-09-11 10:15:43] [Rank 0] step:7481/10000 train_time:307603ms step_avg:41.12ms +[2025-09-11 10:15:43] [Rank 0] step:7481/10000 train_time:307603ms step_avg:41.12ms +[2025-09-11 10:15:44] [Rank 0] step:7501/10000 train_time:308481ms step_avg:41.13ms +[2025-09-11 10:15:44] [Rank 0] step:7501/10000 train_time:308481ms step_avg:41.13ms +[2025-09-11 10:15:45] [Rank 0] step:7521/10000 train_time:309177ms step_avg:41.11ms +[2025-09-11 10:15:45] [Rank 0] step:7521/10000 train_time:309177ms step_avg:41.11ms +[2025-09-11 10:15:45] [Rank 0] step:7541/10000 train_time:309870ms step_avg:41.09ms +[2025-09-11 10:15:45] [Rank 0] step:7541/10000 train_time:309870ms step_avg:41.09ms +[2025-09-11 10:15:46] [Rank 0] step:7561/10000 train_time:310568ms step_avg:41.07ms +[2025-09-11 10:15:46] [Rank 0] step:7561/10000 train_time:310568ms step_avg:41.07ms +[2025-09-11 10:15:47] [Rank 0] step:7581/10000 train_time:311264ms step_avg:41.06ms +[2025-09-11 10:15:47] [Rank 0] step:7581/10000 train_time:311264ms step_avg:41.06ms +[2025-09-11 10:15:47] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:58] [Rank 0] PRINT: step:7600/10000 val_loss:4.4634 total_sharp:1.4818e-03 L1_sharp:3.7620e-03 L2_sharp:1.4207e-03 L3_sharp:9.0509e-04 L4_sharp:1.5438e-03 L5_sharp:1.0919e-03 L6_sharp:1.1005e-03 L7_sharp:8.0687e-04 L8_sharp:3.0990e-03 L9_sharp:2.6901e-03 L10_sharp:3.5521e-03 L11_sharp:4.7810e-03 L12_sharp:1.3023e-02 total_fnorm:8.0625e+00 total_l1_linf:1.2416e+04 total_spectral:4.1562e+00 L1_fnorm:7.5000e-01 L2_fnorm:7.0703e-01 L3_fnorm:7.1094e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1875e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.2266e-01 L8_fnorm:6.9922e-01 L9_fnorm:7.1484e-01 L10_fnorm:7.1094e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1484e-01 L1_l1linf:1.5430e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.4551e-01 L4_l1linf:1.4258e-01 L5_l1linf:1.4453e-01 L6_l1linf:1.4453e-01 L7_l1linf:1.4746e-01 L8_l1linf:1.3574e-01 L9_l1linf:1.3184e-01 L10_l1linf:1.3086e-01 L11_l1linf:1.3379e-01 L12_l1linf:1.4355e-01 L1_spectral:9.8988e-03 L2_spectral:9.7575e-03 L3_spectral:9.9452e-03 L4_spectral:1.0024e-02 L5_spectral:9.9379e-03 L6_spectral:9.9599e-03 L7_spectral:9.8926e-03 L8_spectral:9.8026e-03 L9_spectral:9.9384e-03 L10_spectral:1.0002e-02 L11_spectral:9.9495e-03 L12_spectral:1.0013e-02 train_time:311940ms step_avg:41.04ms +[2025-09-11 10:15:58] [Rank 0] PRINT: step:7600/10000 val_loss:4.4634 total_sharp:1.4818e-03 L1_sharp:3.7620e-03 L2_sharp:1.4207e-03 L3_sharp:9.0509e-04 L4_sharp:1.5438e-03 L5_sharp:1.0919e-03 L6_sharp:1.1005e-03 L7_sharp:8.0687e-04 L8_sharp:3.0990e-03 L9_sharp:2.6901e-03 L10_sharp:3.5521e-03 L11_sharp:4.7810e-03 L12_sharp:1.3023e-02 total_fnorm:8.0625e+00 total_l1_linf:1.2416e+04 total_spectral:4.1562e+00 L1_fnorm:7.5000e-01 L2_fnorm:7.0703e-01 L3_fnorm:7.1094e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1875e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.2266e-01 L8_fnorm:6.9922e-01 L9_fnorm:7.1484e-01 L10_fnorm:7.1094e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1484e-01 L1_l1linf:1.5430e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.4551e-01 L4_l1linf:1.4258e-01 L5_l1linf:1.4453e-01 L6_l1linf:1.4453e-01 L7_l1linf:1.4746e-01 L8_l1linf:1.3574e-01 L9_l1linf:1.3184e-01 L10_l1linf:1.3086e-01 L11_l1linf:1.3379e-01 L12_l1linf:1.4355e-01 L1_spectral:9.8988e-03 L2_spectral:9.7575e-03 L3_spectral:9.9452e-03 L4_spectral:1.0024e-02 L5_spectral:9.9379e-03 L6_spectral:9.9599e-03 L7_spectral:9.8926e-03 L8_spectral:9.8026e-03 L9_spectral:9.9384e-03 L10_spectral:1.0002e-02 L11_spectral:9.9495e-03 L12_spectral:1.0013e-02 train_time:311940ms step_avg:41.04ms +[2025-09-11 10:15:59] [Rank 0] step:7601/10000 train_time:313190ms step_avg:41.20ms +[2025-09-11 10:15:59] [Rank 0] step:7601/10000 train_time:313190ms step_avg:41.20ms +[2025-09-11 10:16:00] [Rank 0] step:7621/10000 train_time:313909ms step_avg:41.19ms +[2025-09-11 10:16:00] [Rank 0] step:7621/10000 train_time:313909ms step_avg:41.19ms +[2025-09-11 10:16:00] [Rank 0] step:7641/10000 train_time:314606ms step_avg:41.17ms +[2025-09-11 10:16:00] [Rank 0] step:7641/10000 train_time:314606ms step_avg:41.17ms +[2025-09-11 10:16:01] [Rank 0] step:7661/10000 train_time:315301ms step_avg:41.16ms +[2025-09-11 10:16:01] [Rank 0] step:7661/10000 train_time:315301ms step_avg:41.16ms +[2025-09-11 10:16:02] [Rank 0] step:7681/10000 train_time:315997ms step_avg:41.14ms +[2025-09-11 10:16:02] [Rank 0] step:7681/10000 train_time:315997ms step_avg:41.14ms +[2025-09-11 10:16:02] [Rank 0] step:7701/10000 train_time:316697ms step_avg:41.12ms +[2025-09-11 10:16:02] [Rank 0] step:7701/10000 train_time:316697ms step_avg:41.12ms +[2025-09-11 10:16:03] [Rank 0] step:7721/10000 train_time:317392ms step_avg:41.11ms +[2025-09-11 10:16:03] [Rank 0] step:7721/10000 train_time:317392ms step_avg:41.11ms +[2025-09-11 10:16:04] [Rank 0] step:7741/10000 train_time:318089ms step_avg:41.09ms +[2025-09-11 10:16:04] [Rank 0] step:7741/10000 train_time:318089ms step_avg:41.09ms +[2025-09-11 10:16:04] [Rank 0] step:7761/10000 train_time:318784ms step_avg:41.08ms +[2025-09-11 10:16:04] [Rank 0] step:7761/10000 train_time:318784ms step_avg:41.08ms +[2025-09-11 10:16:05] [Rank 0] step:7781/10000 train_time:319481ms step_avg:41.06ms +[2025-09-11 10:16:05] [Rank 0] step:7781/10000 train_time:319481ms step_avg:41.06ms +[2025-09-11 10:16:06] [Rank 0] step:7801/10000 train_time:320175ms step_avg:41.04ms +[2025-09-11 10:16:06] [Rank 0] step:7801/10000 train_time:320175ms step_avg:41.04ms +[2025-09-11 10:16:07] [Rank 0] step:7821/10000 train_time:320872ms step_avg:41.03ms +[2025-09-11 10:16:07] [Rank 0] step:7821/10000 train_time:320872ms step_avg:41.03ms +[2025-09-11 10:16:07] [Rank 0] step:7841/10000 train_time:321570ms step_avg:41.01ms +[2025-09-11 10:16:07] [Rank 0] step:7841/10000 train_time:321570ms step_avg:41.01ms +[2025-09-11 10:16:08] [Rank 0] step:7861/10000 train_time:322268ms step_avg:41.00ms +[2025-09-11 10:16:08] [Rank 0] step:7861/10000 train_time:322268ms step_avg:41.00ms +[2025-09-11 10:16:09] [Rank 0] step:7881/10000 train_time:322963ms step_avg:40.98ms +[2025-09-11 10:16:09] [Rank 0] step:7881/10000 train_time:322963ms step_avg:40.98ms +[2025-09-11 10:16:09] [Rank 0] step:7901/10000 train_time:323660ms step_avg:40.96ms +[2025-09-11 10:16:09] [Rank 0] step:7901/10000 train_time:323660ms step_avg:40.96ms +[2025-09-11 10:16:10] [Rank 0] step:7921/10000 train_time:324356ms step_avg:40.95ms +[2025-09-11 10:16:10] [Rank 0] step:7921/10000 train_time:324356ms step_avg:40.95ms +[2025-09-11 10:16:11] [Rank 0] step:7941/10000 train_time:325053ms step_avg:40.93ms +[2025-09-11 10:16:11] [Rank 0] step:7941/10000 train_time:325053ms step_avg:40.93ms +[2025-09-11 10:16:11] [Rank 0] step:7961/10000 train_time:325748ms step_avg:40.92ms +[2025-09-11 10:16:11] [Rank 0] step:7961/10000 train_time:325748ms step_avg:40.92ms +[2025-09-11 10:16:12] [Rank 0] step:7981/10000 train_time:326446ms step_avg:40.90ms +[2025-09-11 10:16:12] [Rank 0] step:7981/10000 train_time:326446ms step_avg:40.90ms +[2025-09-11 10:16:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.4427 total_sharp:1.4816e-03 L1_sharp:4.7952e-03 L2_sharp:7.7360e-04 L3_sharp:4.7159e-04 L4_sharp:1.1141e-03 L5_sharp:1.2568e-03 L6_sharp:1.0924e-03 L7_sharp:1.2774e-03 L8_sharp:3.1121e-03 L9_sharp:2.6650e-03 L10_sharp:3.2400e-03 L11_sharp:4.7856e-03 L12_sharp:1.5574e-02 total_fnorm:6.5000e+00 total_l1_linf:9.4720e+03 total_spectral:3.3750e+00 L1_fnorm:6.2500e-01 L2_fnorm:5.7812e-01 L3_fnorm:5.7812e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8984e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7031e-01 L9_fnorm:5.8203e-01 L10_fnorm:5.8203e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8594e-01 L1_l1linf:1.1816e-01 L2_l1linf:1.2305e-01 L3_l1linf:1.1572e-01 L4_l1linf:1.1523e-01 L5_l1linf:1.1279e-01 L6_l1linf:1.1279e-01 L7_l1linf:1.1377e-01 L8_l1linf:1.0596e-01 L9_l1linf:9.9121e-02 L10_l1linf:9.9121e-02 L11_l1linf:1.0303e-01 L12_l1linf:1.1182e-01 L1_spectral:8.4019e-03 L2_spectral:8.2139e-03 L3_spectral:8.3553e-03 L4_spectral:8.4423e-03 L5_spectral:8.3387e-03 L6_spectral:8.3451e-03 L7_spectral:8.2588e-03 L8_spectral:8.3260e-03 L9_spectral:8.3596e-03 L10_spectral:8.3681e-03 L11_spectral:8.3303e-03 L12_spectral:8.3449e-03 train_time:327121ms step_avg:40.89ms +[2025-09-11 10:16:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.4427 total_sharp:1.4816e-03 L1_sharp:4.7952e-03 L2_sharp:7.7360e-04 L3_sharp:4.7159e-04 L4_sharp:1.1141e-03 L5_sharp:1.2568e-03 L6_sharp:1.0924e-03 L7_sharp:1.2774e-03 L8_sharp:3.1121e-03 L9_sharp:2.6650e-03 L10_sharp:3.2400e-03 L11_sharp:4.7856e-03 L12_sharp:1.5574e-02 total_fnorm:6.5000e+00 total_l1_linf:9.4720e+03 total_spectral:3.3750e+00 L1_fnorm:6.2500e-01 L2_fnorm:5.7812e-01 L3_fnorm:5.7812e-01 L4_fnorm:5.9375e-01 L5_fnorm:5.8984e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.7031e-01 L9_fnorm:5.8203e-01 L10_fnorm:5.8203e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8594e-01 L1_l1linf:1.1816e-01 L2_l1linf:1.2305e-01 L3_l1linf:1.1572e-01 L4_l1linf:1.1523e-01 L5_l1linf:1.1279e-01 L6_l1linf:1.1279e-01 L7_l1linf:1.1377e-01 L8_l1linf:1.0596e-01 L9_l1linf:9.9121e-02 L10_l1linf:9.9121e-02 L11_l1linf:1.0303e-01 L12_l1linf:1.1182e-01 L1_spectral:8.4019e-03 L2_spectral:8.2139e-03 L3_spectral:8.3553e-03 L4_spectral:8.4423e-03 L5_spectral:8.3387e-03 L6_spectral:8.3451e-03 L7_spectral:8.2588e-03 L8_spectral:8.3260e-03 L9_spectral:8.3596e-03 L10_spectral:8.3681e-03 L11_spectral:8.3303e-03 L12_spectral:8.3449e-03 train_time:327121ms step_avg:40.89ms +[2025-09-11 10:16:24] [Rank 0] step:8001/10000 train_time:328391ms step_avg:41.04ms +[2025-09-11 10:16:24] [Rank 0] step:8001/10000 train_time:328391ms step_avg:41.04ms +[2025-09-11 10:16:25] [Rank 0] step:8021/10000 train_time:329101ms step_avg:41.03ms +[2025-09-11 10:16:25] [Rank 0] step:8021/10000 train_time:329101ms step_avg:41.03ms +[2025-09-11 10:16:25] [Rank 0] step:8041/10000 train_time:329799ms step_avg:41.01ms +[2025-09-11 10:16:25] [Rank 0] step:8041/10000 train_time:329799ms step_avg:41.01ms +[2025-09-11 10:16:26] [Rank 0] step:8061/10000 train_time:330498ms step_avg:41.00ms +[2025-09-11 10:16:26] [Rank 0] step:8061/10000 train_time:330498ms step_avg:41.00ms +[2025-09-11 10:16:27] [Rank 0] step:8081/10000 train_time:331193ms step_avg:40.98ms +[2025-09-11 10:16:27] [Rank 0] step:8081/10000 train_time:331193ms step_avg:40.98ms +[2025-09-11 10:16:28] [Rank 0] step:8101/10000 train_time:331887ms step_avg:40.97ms +[2025-09-11 10:16:28] [Rank 0] step:8101/10000 train_time:331887ms step_avg:40.97ms +[2025-09-11 10:16:28] [Rank 0] step:8121/10000 train_time:332587ms step_avg:40.95ms +[2025-09-11 10:16:28] [Rank 0] step:8121/10000 train_time:332587ms step_avg:40.95ms +[2025-09-11 10:16:30] [Rank 0] step:8141/10000 train_time:334023ms step_avg:41.03ms +[2025-09-11 10:16:30] [Rank 0] step:8141/10000 train_time:334023ms step_avg:41.03ms +[2025-09-11 10:16:30] [Rank 0] step:8161/10000 train_time:334722ms step_avg:41.01ms +[2025-09-11 10:16:30] [Rank 0] step:8161/10000 train_time:334722ms step_avg:41.01ms +[2025-09-11 10:16:31] [Rank 0] step:8181/10000 train_time:335430ms step_avg:41.00ms +[2025-09-11 10:16:31] [Rank 0] step:8181/10000 train_time:335430ms step_avg:41.00ms +[2025-09-11 10:16:32] [Rank 0] step:8201/10000 train_time:336134ms step_avg:40.99ms +[2025-09-11 10:16:32] [Rank 0] step:8201/10000 train_time:336134ms step_avg:40.99ms +[2025-09-11 10:16:32] [Rank 0] step:8221/10000 train_time:336838ms step_avg:40.97ms +[2025-09-11 10:16:32] [Rank 0] step:8221/10000 train_time:336838ms step_avg:40.97ms +[2025-09-11 10:16:33] [Rank 0] step:8241/10000 train_time:337550ms step_avg:40.96ms +[2025-09-11 10:16:33] [Rank 0] step:8241/10000 train_time:337550ms step_avg:40.96ms +[2025-09-11 10:16:34] [Rank 0] step:8261/10000 train_time:338253ms step_avg:40.95ms +[2025-09-11 10:16:34] [Rank 0] step:8261/10000 train_time:338253ms step_avg:40.95ms +[2025-09-11 10:16:35] [Rank 0] step:8281/10000 train_time:338953ms step_avg:40.93ms +[2025-09-11 10:16:35] [Rank 0] step:8281/10000 train_time:338953ms step_avg:40.93ms +[2025-09-11 10:16:35] [Rank 0] step:8301/10000 train_time:339655ms step_avg:40.92ms +[2025-09-11 10:16:35] [Rank 0] step:8301/10000 train_time:339655ms step_avg:40.92ms +[2025-09-11 10:16:36] [Rank 0] step:8321/10000 train_time:340356ms step_avg:40.90ms +[2025-09-11 10:16:36] [Rank 0] step:8321/10000 train_time:340356ms step_avg:40.90ms +[2025-09-11 10:16:37] [Rank 0] step:8341/10000 train_time:341066ms step_avg:40.89ms +[2025-09-11 10:16:37] [Rank 0] step:8341/10000 train_time:341066ms step_avg:40.89ms +[2025-09-11 10:16:37] [Rank 0] step:8361/10000 train_time:341764ms step_avg:40.88ms +[2025-09-11 10:16:37] [Rank 0] step:8361/10000 train_time:341764ms step_avg:40.88ms +[2025-09-11 10:16:38] [Rank 0] step:8381/10000 train_time:342469ms step_avg:40.86ms +[2025-09-11 10:16:38] [Rank 0] step:8381/10000 train_time:342469ms step_avg:40.86ms +[2025-09-11 10:16:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:49] [Rank 0] PRINT: step:8400/10000 val_loss:4.4207 total_sharp:1.3684e-03 L1_sharp:4.4271e-03 L2_sharp:1.5863e-03 L3_sharp:3.6383e-04 L4_sharp:9.3966e-04 L5_sharp:7.1714e-04 L6_sharp:9.9628e-04 L7_sharp:9.0235e-04 L8_sharp:2.1774e-03 L9_sharp:2.2355e-03 L10_sharp:2.6876e-03 L11_sharp:3.5382e-03 L12_sharp:1.1584e-02 total_fnorm:4.9375e+00 total_l1_linf:6.2400e+03 total_spectral:2.4062e+00 L1_fnorm:5.0000e-01 L2_fnorm:4.5703e-01 L3_fnorm:4.5703e-01 L4_fnorm:4.6484e-01 L5_fnorm:4.6094e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.6094e-01 L8_fnorm:4.4531e-01 L9_fnorm:4.5508e-01 L10_fnorm:4.5117e-01 L11_fnorm:4.5312e-01 L12_fnorm:4.5703e-01 L1_l1linf:8.8867e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.4961e-02 L4_l1linf:8.1055e-02 L5_l1linf:8.4473e-02 L6_l1linf:8.1543e-02 L7_l1linf:8.1055e-02 L8_l1linf:7.5195e-02 L9_l1linf:7.2754e-02 L10_l1linf:6.8359e-02 L11_l1linf:7.4219e-02 L12_l1linf:8.1543e-02 L1_spectral:7.1497e-03 L2_spectral:6.5335e-03 L3_spectral:6.7608e-03 L4_spectral:6.8410e-03 L5_spectral:6.6425e-03 L6_spectral:6.6894e-03 L7_spectral:6.6225e-03 L8_spectral:6.6693e-03 L9_spectral:6.6792e-03 L10_spectral:6.6804e-03 L11_spectral:6.6736e-03 L12_spectral:6.6697e-03 train_time:343154ms step_avg:40.85ms +[2025-09-11 10:16:49] [Rank 0] PRINT: step:8400/10000 val_loss:4.4207 total_sharp:1.3684e-03 L1_sharp:4.4271e-03 L2_sharp:1.5863e-03 L3_sharp:3.6383e-04 L4_sharp:9.3966e-04 L5_sharp:7.1714e-04 L6_sharp:9.9628e-04 L7_sharp:9.0235e-04 L8_sharp:2.1774e-03 L9_sharp:2.2355e-03 L10_sharp:2.6876e-03 L11_sharp:3.5382e-03 L12_sharp:1.1584e-02 total_fnorm:4.9375e+00 total_l1_linf:6.2400e+03 total_spectral:2.4062e+00 L1_fnorm:5.0000e-01 L2_fnorm:4.5703e-01 L3_fnorm:4.5703e-01 L4_fnorm:4.6484e-01 L5_fnorm:4.6094e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.6094e-01 L8_fnorm:4.4531e-01 L9_fnorm:4.5508e-01 L10_fnorm:4.5117e-01 L11_fnorm:4.5312e-01 L12_fnorm:4.5703e-01 L1_l1linf:8.8867e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.4961e-02 L4_l1linf:8.1055e-02 L5_l1linf:8.4473e-02 L6_l1linf:8.1543e-02 L7_l1linf:8.1055e-02 L8_l1linf:7.5195e-02 L9_l1linf:7.2754e-02 L10_l1linf:6.8359e-02 L11_l1linf:7.4219e-02 L12_l1linf:8.1543e-02 L1_spectral:7.1497e-03 L2_spectral:6.5335e-03 L3_spectral:6.7608e-03 L4_spectral:6.8410e-03 L5_spectral:6.6425e-03 L6_spectral:6.6894e-03 L7_spectral:6.6225e-03 L8_spectral:6.6693e-03 L9_spectral:6.6792e-03 L10_spectral:6.6804e-03 L11_spectral:6.6736e-03 L12_spectral:6.6697e-03 train_time:343154ms step_avg:40.85ms +[2025-09-11 10:16:50] [Rank 0] step:8401/10000 train_time:344416ms step_avg:41.00ms +[2025-09-11 10:16:50] [Rank 0] step:8401/10000 train_time:344416ms step_avg:41.00ms +[2025-09-11 10:16:51] [Rank 0] step:8421/10000 train_time:345137ms step_avg:40.99ms +[2025-09-11 10:16:51] [Rank 0] step:8421/10000 train_time:345137ms step_avg:40.99ms +[2025-09-11 10:16:52] [Rank 0] step:8441/10000 train_time:345843ms step_avg:40.97ms +[2025-09-11 10:16:52] [Rank 0] step:8441/10000 train_time:345843ms step_avg:40.97ms +[2025-09-11 10:16:52] [Rank 0] step:8461/10000 train_time:346548ms step_avg:40.96ms +[2025-09-11 10:16:52] [Rank 0] step:8461/10000 train_time:346548ms step_avg:40.96ms +[2025-09-11 10:16:53] [Rank 0] step:8481/10000 train_time:347253ms step_avg:40.94ms +[2025-09-11 10:16:53] [Rank 0] step:8481/10000 train_time:347253ms step_avg:40.94ms +[2025-09-11 10:16:54] [Rank 0] step:8501/10000 train_time:347956ms step_avg:40.93ms +[2025-09-11 10:16:54] [Rank 0] step:8501/10000 train_time:347956ms step_avg:40.93ms +[2025-09-11 10:16:54] [Rank 0] step:8521/10000 train_time:348659ms step_avg:40.92ms +[2025-09-11 10:16:54] [Rank 0] step:8521/10000 train_time:348659ms step_avg:40.92ms +[2025-09-11 10:16:55] [Rank 0] step:8541/10000 train_time:349361ms step_avg:40.90ms +[2025-09-11 10:16:55] [Rank 0] step:8541/10000 train_time:349361ms step_avg:40.90ms +[2025-09-11 10:16:56] [Rank 0] step:8561/10000 train_time:350070ms step_avg:40.89ms +[2025-09-11 10:16:56] [Rank 0] step:8561/10000 train_time:350070ms step_avg:40.89ms +[2025-09-11 10:16:57] [Rank 0] step:8581/10000 train_time:350775ms step_avg:40.88ms +[2025-09-11 10:16:57] [Rank 0] step:8581/10000 train_time:350775ms step_avg:40.88ms +[2025-09-11 10:16:57] [Rank 0] step:8601/10000 train_time:351479ms step_avg:40.86ms +[2025-09-11 10:16:57] [Rank 0] step:8601/10000 train_time:351479ms step_avg:40.86ms +[2025-09-11 10:16:58] [Rank 0] step:8621/10000 train_time:352182ms step_avg:40.85ms +[2025-09-11 10:16:58] [Rank 0] step:8621/10000 train_time:352182ms step_avg:40.85ms +[2025-09-11 10:16:59] [Rank 0] step:8641/10000 train_time:352884ms step_avg:40.84ms +[2025-09-11 10:16:59] [Rank 0] step:8641/10000 train_time:352884ms step_avg:40.84ms +[2025-09-11 10:16:59] [Rank 0] step:8661/10000 train_time:353588ms step_avg:40.83ms +[2025-09-11 10:16:59] [Rank 0] step:8661/10000 train_time:353588ms step_avg:40.83ms +[2025-09-11 10:17:00] [Rank 0] step:8681/10000 train_time:354292ms step_avg:40.81ms +[2025-09-11 10:17:00] [Rank 0] step:8681/10000 train_time:354292ms step_avg:40.81ms +[2025-09-11 10:17:01] [Rank 0] step:8701/10000 train_time:354995ms step_avg:40.80ms +[2025-09-11 10:17:01] [Rank 0] step:8701/10000 train_time:354995ms step_avg:40.80ms +[2025-09-11 10:17:01] [Rank 0] step:8721/10000 train_time:355701ms step_avg:40.79ms +[2025-09-11 10:17:01] [Rank 0] step:8721/10000 train_time:355701ms step_avg:40.79ms +[2025-09-11 10:17:02] [Rank 0] step:8741/10000 train_time:356401ms step_avg:40.77ms +[2025-09-11 10:17:02] [Rank 0] step:8741/10000 train_time:356401ms step_avg:40.77ms +[2025-09-11 10:17:03] [Rank 0] step:8761/10000 train_time:357109ms step_avg:40.76ms +[2025-09-11 10:17:03] [Rank 0] step:8761/10000 train_time:357109ms step_avg:40.76ms +[2025-09-11 10:17:04] [Rank 0] step:8781/10000 train_time:357809ms step_avg:40.75ms +[2025-09-11 10:17:04] [Rank 0] step:8781/10000 train_time:357809ms step_avg:40.75ms +[2025-09-11 10:17:04] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:17:04] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:14] [Rank 0] PRINT: step:8800/10000 val_loss:4.4109 total_sharp:1.1772e-03 L1_sharp:4.8783e-03 L2_sharp:7.8421e-04 L3_sharp:1.6053e-04 L4_sharp:1.0259e-03 L5_sharp:7.7718e-04 L6_sharp:6.6594e-04 L7_sharp:8.3076e-04 L8_sharp:1.9772e-03 L9_sharp:1.7756e-03 L10_sharp:2.2339e-03 L11_sharp:3.2532e-03 L12_sharp:1.0223e-02 total_fnorm:3.5781e+00 total_l1_linf:4.0960e+03 total_spectral:1.7578e+00 L1_fnorm:3.8281e-01 L2_fnorm:3.3789e-01 L3_fnorm:3.3789e-01 L4_fnorm:3.4570e-01 L5_fnorm:3.4180e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.4180e-01 L8_fnorm:3.3008e-01 L9_fnorm:3.3398e-01 L10_fnorm:3.3008e-01 L11_fnorm:3.3203e-01 L12_fnorm:3.3398e-01 L1_l1linf:6.5430e-02 L2_l1linf:5.9326e-02 L3_l1linf:5.5176e-02 L4_l1linf:5.5908e-02 L5_l1linf:5.6152e-02 L6_l1linf:5.4199e-02 L7_l1linf:5.4443e-02 L8_l1linf:5.0781e-02 L9_l1linf:4.8096e-02 L10_l1linf:4.6631e-02 L11_l1linf:4.7119e-02 L12_l1linf:5.6641e-02 L1_spectral:5.8273e-03 L2_spectral:4.9954e-03 L3_spectral:5.1207e-03 L4_spectral:5.2040e-03 L5_spectral:5.0174e-03 L6_spectral:5.0370e-03 L7_spectral:4.9862e-03 L8_spectral:5.0823e-03 L9_spectral:4.9862e-03 L10_spectral:5.0609e-03 L11_spectral:5.0339e-03 L12_spectral:5.0532e-03 train_time:358490ms step_avg:40.74ms +[2025-09-11 10:17:14] [Rank 0] PRINT: step:8800/10000 val_loss:4.4109 total_sharp:1.1772e-03 L1_sharp:4.8783e-03 L2_sharp:7.8421e-04 L3_sharp:1.6053e-04 L4_sharp:1.0259e-03 L5_sharp:7.7718e-04 L6_sharp:6.6594e-04 L7_sharp:8.3076e-04 L8_sharp:1.9772e-03 L9_sharp:1.7756e-03 L10_sharp:2.2339e-03 L11_sharp:3.2532e-03 L12_sharp:1.0223e-02 total_fnorm:3.5781e+00 total_l1_linf:4.0960e+03 total_spectral:1.7578e+00 L1_fnorm:3.8281e-01 L2_fnorm:3.3789e-01 L3_fnorm:3.3789e-01 L4_fnorm:3.4570e-01 L5_fnorm:3.4180e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.4180e-01 L8_fnorm:3.3008e-01 L9_fnorm:3.3398e-01 L10_fnorm:3.3008e-01 L11_fnorm:3.3203e-01 L12_fnorm:3.3398e-01 L1_l1linf:6.5430e-02 L2_l1linf:5.9326e-02 L3_l1linf:5.5176e-02 L4_l1linf:5.5908e-02 L5_l1linf:5.6152e-02 L6_l1linf:5.4199e-02 L7_l1linf:5.4443e-02 L8_l1linf:5.0781e-02 L9_l1linf:4.8096e-02 L10_l1linf:4.6631e-02 L11_l1linf:4.7119e-02 L12_l1linf:5.6641e-02 L1_spectral:5.8273e-03 L2_spectral:4.9954e-03 L3_spectral:5.1207e-03 L4_spectral:5.2040e-03 L5_spectral:5.0174e-03 L6_spectral:5.0370e-03 L7_spectral:4.9862e-03 L8_spectral:5.0823e-03 L9_spectral:4.9862e-03 L10_spectral:5.0609e-03 L11_spectral:5.0339e-03 L12_spectral:5.0532e-03 train_time:358490ms step_avg:40.74ms +[2025-09-11 10:17:15] [Rank 0] step:8801/10000 train_time:359766ms step_avg:40.88ms +[2025-09-11 10:17:15] [Rank 0] step:8801/10000 train_time:359766ms step_avg:40.88ms +[2025-09-11 10:17:16] [Rank 0] step:8821/10000 train_time:360505ms step_avg:40.87ms +[2025-09-11 10:17:16] [Rank 0] step:8821/10000 train_time:360505ms step_avg:40.87ms +[2025-09-11 10:17:17] [Rank 0] step:8841/10000 train_time:361210ms step_avg:40.86ms +[2025-09-11 10:17:17] [Rank 0] step:8841/10000 train_time:361210ms step_avg:40.86ms +[2025-09-11 10:17:18] [Rank 0] step:8861/10000 train_time:361914ms step_avg:40.84ms +[2025-09-11 10:17:18] [Rank 0] step:8861/10000 train_time:361914ms step_avg:40.84ms +[2025-09-11 10:17:18] [Rank 0] step:8881/10000 train_time:362618ms step_avg:40.83ms +[2025-09-11 10:17:18] [Rank 0] step:8881/10000 train_time:362618ms step_avg:40.83ms +[2025-09-11 10:17:19] [Rank 0] step:8901/10000 train_time:363323ms step_avg:40.82ms +[2025-09-11 10:17:19] [Rank 0] step:8901/10000 train_time:363323ms step_avg:40.82ms +[2025-09-11 10:17:20] [Rank 0] step:8921/10000 train_time:364022ms step_avg:40.81ms +[2025-09-11 10:17:20] [Rank 0] step:8921/10000 train_time:364022ms step_avg:40.81ms +[2025-09-11 10:17:20] [Rank 0] step:8941/10000 train_time:364727ms step_avg:40.79ms +[2025-09-11 10:17:20] [Rank 0] step:8941/10000 train_time:364727ms step_avg:40.79ms +[2025-09-11 10:17:21] [Rank 0] step:8961/10000 train_time:365440ms step_avg:40.78ms +[2025-09-11 10:17:21] [Rank 0] step:8961/10000 train_time:365440ms step_avg:40.78ms +[2025-09-11 10:17:22] [Rank 0] step:8981/10000 train_time:366148ms step_avg:40.77ms +[2025-09-11 10:17:22] [Rank 0] step:8981/10000 train_time:366148ms step_avg:40.77ms +[2025-09-11 10:17:23] [Rank 0] step:9001/10000 train_time:366846ms step_avg:40.76ms +[2025-09-11 10:17:23] [Rank 0] step:9001/10000 train_time:366846ms step_avg:40.76ms +[2025-09-11 10:17:23] [Rank 0] step:9021/10000 train_time:367551ms step_avg:40.74ms +[2025-09-11 10:17:23] [Rank 0] step:9021/10000 train_time:367551ms step_avg:40.74ms +[2025-09-11 10:17:24] [Rank 0] step:9041/10000 train_time:368258ms step_avg:40.73ms +[2025-09-11 10:17:24] [Rank 0] step:9041/10000 train_time:368258ms step_avg:40.73ms +[2025-09-11 10:17:25] [Rank 0] step:9061/10000 train_time:368960ms step_avg:40.72ms +[2025-09-11 10:17:25] [Rank 0] step:9061/10000 train_time:368960ms step_avg:40.72ms +[2025-09-11 10:17:25] [Rank 0] step:9081/10000 train_time:369665ms step_avg:40.71ms +[2025-09-11 10:17:25] [Rank 0] step:9081/10000 train_time:369665ms step_avg:40.71ms +[2025-09-11 10:17:26] [Rank 0] step:9101/10000 train_time:370373ms step_avg:40.70ms +[2025-09-11 10:17:26] [Rank 0] step:9101/10000 train_time:370373ms step_avg:40.70ms +[2025-09-11 10:17:27] [Rank 0] step:9121/10000 train_time:371081ms step_avg:40.68ms +[2025-09-11 10:17:27] [Rank 0] step:9121/10000 train_time:371081ms step_avg:40.68ms +[2025-09-11 10:17:27] [Rank 0] step:9141/10000 train_time:371784ms step_avg:40.67ms +[2025-09-11 10:17:27] [Rank 0] step:9141/10000 train_time:371784ms step_avg:40.67ms +[2025-09-11 10:17:28] [Rank 0] step:9161/10000 train_time:372490ms step_avg:40.66ms +[2025-09-11 10:17:28] [Rank 0] step:9161/10000 train_time:372490ms step_avg:40.66ms +[2025-09-11 10:17:29] [Rank 0] step:9181/10000 train_time:373196ms step_avg:40.65ms +[2025-09-11 10:17:29] [Rank 0] step:9181/10000 train_time:373196ms step_avg:40.65ms +[2025-09-11 10:17:30] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:17:30] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:17:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:17:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:43] [Rank 0] PRINT: step:9200/10000 val_loss:4.3948 total_sharp:1.1771e-03 L1_sharp:3.8031e-03 L2_sharp:3.8371e-04 L3_sharp:2.4577e-04 L4_sharp:5.5094e-04 L5_sharp:7.8136e-04 L6_sharp:1.1086e-03 L7_sharp:6.4044e-04 L8_sharp:1.7866e-03 L9_sharp:1.5148e-03 L10_sharp:1.9884e-03 L11_sharp:3.2533e-03 L12_sharp:1.5484e-02 total_fnorm:2.3281e+00 total_l1_linf:2.3040e+03 total_spectral:1.1484e+00 L1_fnorm:2.6172e-01 L2_fnorm:2.2168e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2754e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2559e-01 L7_fnorm:2.2461e-01 L8_fnorm:2.1777e-01 L9_fnorm:2.2168e-01 L10_fnorm:2.1973e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.2070e-01 L1_l1linf:4.1748e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.2959e-02 L8_l1linf:3.0518e-02 L9_l1linf:2.8320e-02 L10_l1linf:2.7710e-02 L11_l1linf:2.8076e-02 L12_l1linf:3.4180e-02 L1_spectral:4.4054e-03 L2_spectral:3.3322e-03 L3_spectral:3.4614e-03 L4_spectral:3.4923e-03 L5_spectral:3.3913e-03 L6_spectral:3.4178e-03 L7_spectral:3.3390e-03 L8_spectral:3.4587e-03 L9_spectral:3.3984e-03 L10_spectral:3.3834e-03 L11_spectral:3.3951e-03 L12_spectral:3.4246e-03 train_time:373883ms step_avg:40.64ms +[2025-09-11 10:17:43] [Rank 0] PRINT: step:9200/10000 val_loss:4.3948 total_sharp:1.1771e-03 L1_sharp:3.8031e-03 L2_sharp:3.8371e-04 L3_sharp:2.4577e-04 L4_sharp:5.5094e-04 L5_sharp:7.8136e-04 L6_sharp:1.1086e-03 L7_sharp:6.4044e-04 L8_sharp:1.7866e-03 L9_sharp:1.5148e-03 L10_sharp:1.9884e-03 L11_sharp:3.2533e-03 L12_sharp:1.5484e-02 total_fnorm:2.3281e+00 total_l1_linf:2.3040e+03 total_spectral:1.1484e+00 L1_fnorm:2.6172e-01 L2_fnorm:2.2168e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2754e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2559e-01 L7_fnorm:2.2461e-01 L8_fnorm:2.1777e-01 L9_fnorm:2.2168e-01 L10_fnorm:2.1973e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.2070e-01 L1_l1linf:4.1748e-02 L2_l1linf:3.5156e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1494e-02 L7_l1linf:3.2959e-02 L8_l1linf:3.0518e-02 L9_l1linf:2.8320e-02 L10_l1linf:2.7710e-02 L11_l1linf:2.8076e-02 L12_l1linf:3.4180e-02 L1_spectral:4.4054e-03 L2_spectral:3.3322e-03 L3_spectral:3.4614e-03 L4_spectral:3.4923e-03 L5_spectral:3.3913e-03 L6_spectral:3.4178e-03 L7_spectral:3.3390e-03 L8_spectral:3.4587e-03 L9_spectral:3.3984e-03 L10_spectral:3.3834e-03 L11_spectral:3.3951e-03 L12_spectral:3.4246e-03 train_time:373883ms step_avg:40.64ms +[2025-09-11 10:17:45] [Rank 0] step:9201/10000 train_time:375158ms step_avg:40.77ms +[2025-09-11 10:17:45] [Rank 0] step:9201/10000 train_time:375158ms step_avg:40.77ms +[2025-09-11 10:17:45] [Rank 0] step:9221/10000 train_time:375887ms step_avg:40.76ms +[2025-09-11 10:17:45] [Rank 0] step:9221/10000 train_time:375887ms step_avg:40.76ms +[2025-09-11 10:17:46] [Rank 0] step:9241/10000 train_time:376589ms step_avg:40.75ms +[2025-09-11 10:17:46] [Rank 0] step:9241/10000 train_time:376589ms step_avg:40.75ms +[2025-09-11 10:17:47] [Rank 0] step:9261/10000 train_time:377297ms step_avg:40.74ms +[2025-09-11 10:17:47] [Rank 0] step:9261/10000 train_time:377297ms step_avg:40.74ms +[2025-09-11 10:17:48] [Rank 0] step:9281/10000 train_time:378567ms step_avg:40.79ms +[2025-09-11 10:17:48] [Rank 0] step:9281/10000 train_time:378567ms step_avg:40.79ms +[2025-09-11 10:17:49] [Rank 0] step:9301/10000 train_time:379269ms step_avg:40.78ms +[2025-09-11 10:17:49] [Rank 0] step:9301/10000 train_time:379269ms step_avg:40.78ms +[2025-09-11 10:17:49] [Rank 0] step:9321/10000 train_time:379976ms step_avg:40.77ms +[2025-09-11 10:17:49] [Rank 0] step:9321/10000 train_time:379976ms step_avg:40.77ms +[2025-09-11 10:17:50] [Rank 0] step:9341/10000 train_time:380949ms step_avg:40.78ms +[2025-09-11 10:17:50] [Rank 0] step:9341/10000 train_time:380949ms step_avg:40.78ms +[2025-09-11 10:17:51] [Rank 0] step:9361/10000 train_time:381649ms step_avg:40.77ms +[2025-09-11 10:17:51] [Rank 0] step:9361/10000 train_time:381649ms step_avg:40.77ms +[2025-09-11 10:17:52] [Rank 0] step:9381/10000 train_time:382350ms step_avg:40.76ms +[2025-09-11 10:17:52] [Rank 0] step:9381/10000 train_time:382350ms step_avg:40.76ms +[2025-09-11 10:17:52] [Rank 0] step:9401/10000 train_time:383056ms step_avg:40.75ms +[2025-09-11 10:17:52] [Rank 0] step:9401/10000 train_time:383056ms step_avg:40.75ms +[2025-09-11 10:17:53] [Rank 0] step:9421/10000 train_time:383761ms step_avg:40.73ms +[2025-09-11 10:17:53] [Rank 0] step:9421/10000 train_time:383761ms step_avg:40.73ms +[2025-09-11 10:17:54] [Rank 0] step:9441/10000 train_time:384469ms step_avg:40.72ms +[2025-09-11 10:17:54] [Rank 0] step:9441/10000 train_time:384469ms step_avg:40.72ms +[2025-09-11 10:17:55] [Rank 0] step:9461/10000 train_time:385173ms step_avg:40.71ms +[2025-09-11 10:17:55] [Rank 0] step:9461/10000 train_time:385173ms step_avg:40.71ms +[2025-09-11 10:17:55] [Rank 0] step:9481/10000 train_time:385879ms step_avg:40.70ms +[2025-09-11 10:17:55] [Rank 0] step:9481/10000 train_time:385879ms step_avg:40.70ms +[2025-09-11 10:17:56] [Rank 0] step:9501/10000 train_time:386585ms step_avg:40.69ms +[2025-09-11 10:17:56] [Rank 0] step:9501/10000 train_time:386585ms step_avg:40.69ms +[2025-09-11 10:17:57] [Rank 0] step:9521/10000 train_time:387291ms step_avg:40.68ms +[2025-09-11 10:17:57] [Rank 0] step:9521/10000 train_time:387291ms step_avg:40.68ms +[2025-09-11 10:17:57] [Rank 0] step:9541/10000 train_time:387999ms step_avg:40.67ms +[2025-09-11 10:17:57] [Rank 0] step:9541/10000 train_time:387999ms step_avg:40.67ms +[2025-09-11 10:17:58] [Rank 0] step:9561/10000 train_time:388703ms step_avg:40.66ms +[2025-09-11 10:17:58] [Rank 0] step:9561/10000 train_time:388703ms step_avg:40.66ms +[2025-09-11 10:17:59] [Rank 0] step:9581/10000 train_time:389409ms step_avg:40.64ms +[2025-09-11 10:17:59] [Rank 0] step:9581/10000 train_time:389409ms step_avg:40.64ms +[2025-09-11 10:17:59] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:09] [Rank 0] PRINT: step:9600/10000 val_loss:4.3866 total_sharp:6.6922e-04 L1_sharp:1.3215e-03 L2_sharp:5.9708e-04 L3_sharp:2.2141e-04 L4_sharp:3.4906e-04 L5_sharp:4.5504e-04 L6_sharp:5.2554e-04 L7_sharp:5.9569e-04 L8_sharp:1.3984e-03 L9_sharp:1.1623e-03 L10_sharp:1.3897e-03 L11_sharp:2.1042e-03 L12_sharp:6.7796e-03 total_fnorm:1.2656e+00 total_l1_linf:1.1040e+03 total_spectral:6.4844e-01 L1_fnorm:1.5527e-01 L2_fnorm:1.2598e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2988e-01 L5_fnorm:1.2695e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2354e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2451e-01 L11_fnorm:1.2451e-01 L12_fnorm:1.2451e-01 L1_l1linf:2.2583e-02 L2_l1linf:1.6724e-02 L3_l1linf:1.6357e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.6113e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5259e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.4160e-02 L11_l1linf:1.5259e-02 L12_l1linf:1.7334e-02 L1_spectral:2.8379e-03 L2_spectral:1.9587e-03 L3_spectral:2.0119e-03 L4_spectral:2.0716e-03 L5_spectral:1.9782e-03 L6_spectral:1.9926e-03 L7_spectral:1.9208e-03 L8_spectral:2.0334e-03 L9_spectral:1.9758e-03 L10_spectral:1.9898e-03 L11_spectral:1.9579e-03 L12_spectral:1.9785e-03 train_time:390090ms step_avg:40.63ms +[2025-09-11 10:18:09] [Rank 0] PRINT: step:9600/10000 val_loss:4.3866 total_sharp:6.6922e-04 L1_sharp:1.3215e-03 L2_sharp:5.9708e-04 L3_sharp:2.2141e-04 L4_sharp:3.4906e-04 L5_sharp:4.5504e-04 L6_sharp:5.2554e-04 L7_sharp:5.9569e-04 L8_sharp:1.3984e-03 L9_sharp:1.1623e-03 L10_sharp:1.3897e-03 L11_sharp:2.1042e-03 L12_sharp:6.7796e-03 total_fnorm:1.2656e+00 total_l1_linf:1.1040e+03 total_spectral:6.4844e-01 L1_fnorm:1.5527e-01 L2_fnorm:1.2598e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2988e-01 L5_fnorm:1.2695e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2695e-01 L8_fnorm:1.2354e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2451e-01 L11_fnorm:1.2451e-01 L12_fnorm:1.2451e-01 L1_l1linf:2.2583e-02 L2_l1linf:1.6724e-02 L3_l1linf:1.6357e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.6113e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5259e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.4160e-02 L11_l1linf:1.5259e-02 L12_l1linf:1.7334e-02 L1_spectral:2.8379e-03 L2_spectral:1.9587e-03 L3_spectral:2.0119e-03 L4_spectral:2.0716e-03 L5_spectral:1.9782e-03 L6_spectral:1.9926e-03 L7_spectral:1.9208e-03 L8_spectral:2.0334e-03 L9_spectral:1.9758e-03 L10_spectral:1.9898e-03 L11_spectral:1.9579e-03 L12_spectral:1.9785e-03 train_time:390090ms step_avg:40.63ms +[2025-09-11 10:18:11] [Rank 0] step:9601/10000 train_time:391381ms step_avg:40.76ms +[2025-09-11 10:18:11] [Rank 0] step:9601/10000 train_time:391381ms step_avg:40.76ms +[2025-09-11 10:18:11] [Rank 0] step:9621/10000 train_time:392097ms step_avg:40.75ms +[2025-09-11 10:18:11] [Rank 0] step:9621/10000 train_time:392097ms step_avg:40.75ms +[2025-09-11 10:18:12] [Rank 0] step:9641/10000 train_time:392807ms step_avg:40.74ms +[2025-09-11 10:18:12] [Rank 0] step:9641/10000 train_time:392807ms step_avg:40.74ms +[2025-09-11 10:18:13] [Rank 0] step:9661/10000 train_time:393525ms step_avg:40.73ms +[2025-09-11 10:18:13] [Rank 0] step:9661/10000 train_time:393525ms step_avg:40.73ms +[2025-09-11 10:18:14] [Rank 0] step:9681/10000 train_time:394235ms step_avg:40.72ms +[2025-09-11 10:18:14] [Rank 0] step:9681/10000 train_time:394235ms step_avg:40.72ms +[2025-09-11 10:18:14] [Rank 0] step:9701/10000 train_time:394947ms step_avg:40.71ms +[2025-09-11 10:18:14] [Rank 0] step:9701/10000 train_time:394947ms step_avg:40.71ms +[2025-09-11 10:18:15] [Rank 0] step:9721/10000 train_time:395662ms step_avg:40.70ms +[2025-09-11 10:18:15] [Rank 0] step:9721/10000 train_time:395662ms step_avg:40.70ms +[2025-09-11 10:18:16] [Rank 0] step:9741/10000 train_time:396375ms step_avg:40.69ms +[2025-09-11 10:18:16] [Rank 0] step:9741/10000 train_time:396375ms step_avg:40.69ms +[2025-09-11 10:18:16] [Rank 0] step:9761/10000 train_time:397086ms step_avg:40.68ms +[2025-09-11 10:18:16] [Rank 0] step:9761/10000 train_time:397086ms step_avg:40.68ms +[2025-09-11 10:18:17] [Rank 0] step:9781/10000 train_time:397796ms step_avg:40.67ms +[2025-09-11 10:18:17] [Rank 0] step:9781/10000 train_time:397796ms step_avg:40.67ms +[2025-09-11 10:18:18] [Rank 0] step:9801/10000 train_time:398512ms step_avg:40.66ms +[2025-09-11 10:18:18] [Rank 0] step:9801/10000 train_time:398512ms step_avg:40.66ms +[2025-09-11 10:18:19] [Rank 0] step:9821/10000 train_time:399227ms step_avg:40.65ms +[2025-09-11 10:18:19] [Rank 0] step:9821/10000 train_time:399227ms step_avg:40.65ms +[2025-09-11 10:18:19] [Rank 0] step:9841/10000 train_time:399943ms step_avg:40.64ms +[2025-09-11 10:18:19] [Rank 0] step:9841/10000 train_time:399943ms step_avg:40.64ms +[2025-09-11 10:18:20] [Rank 0] step:9861/10000 train_time:400656ms step_avg:40.63ms +[2025-09-11 10:18:20] [Rank 0] step:9861/10000 train_time:400656ms step_avg:40.63ms +[2025-09-11 10:18:21] [Rank 0] step:9881/10000 train_time:401369ms step_avg:40.62ms +[2025-09-11 10:18:21] [Rank 0] step:9881/10000 train_time:401369ms step_avg:40.62ms +[2025-09-11 10:18:21] [Rank 0] step:9901/10000 train_time:402078ms step_avg:40.61ms +[2025-09-11 10:18:21] [Rank 0] step:9901/10000 train_time:402078ms step_avg:40.61ms +[2025-09-11 10:18:22] [Rank 0] step:9921/10000 train_time:402789ms step_avg:40.60ms +[2025-09-11 10:18:22] [Rank 0] step:9921/10000 train_time:402789ms step_avg:40.60ms +[2025-09-11 10:18:23] [Rank 0] step:9941/10000 train_time:403505ms step_avg:40.59ms +[2025-09-11 10:18:23] [Rank 0] step:9941/10000 train_time:403505ms step_avg:40.59ms +[2025-09-11 10:18:24] [Rank 0] step:9961/10000 train_time:404221ms step_avg:40.58ms +[2025-09-11 10:18:24] [Rank 0] step:9961/10000 train_time:404221ms step_avg:40.58ms +[2025-09-11 10:18:24] [Rank 0] step:9981/10000 train_time:404934ms step_avg:40.57ms +[2025-09-11 10:18:24] [Rank 0] step:9981/10000 train_time:404934ms step_avg:40.57ms +[2025-09-11 10:18:25] [Rank 0] step:10000/10000 train_time:405618ms step_avg:40.56ms +[2025-09-11 10:18:25] [Rank 0] step:10000/10000 train_time:405618ms step_avg:40.56ms +[2025-09-11 10:18:25] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:35] [Rank 0] PRINT: step:10000/10000 val_loss:4.3838 total_sharp:5.0702e-04 L1_sharp:2.2693e-03 L2_sharp:2.2820e-04 L3_sharp:1.7312e-04 L4_sharp:2.2896e-04 L5_sharp:4.9421e-04 L6_sharp:5.4440e-04 L7_sharp:4.3141e-04 L8_sharp:1.1153e-03 L9_sharp:8.4393e-04 L10_sharp:1.0423e-03 L11_sharp:1.5629e-03 L12_sharp:6.0826e-03 total_fnorm:4.6289e-01 total_l1_linf:3.0600e+02 total_spectral:2.4512e-01 L1_fnorm:6.2988e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8340e-02 L4_fnorm:5.0049e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.8584e-02 L1_l1linf:7.5684e-03 L2_l1linf:5.1880e-03 L3_l1linf:5.1575e-03 L4_l1linf:4.6082e-03 L5_l1linf:4.6997e-03 L6_l1linf:4.6997e-03 L7_l1linf:4.8218e-03 L8_l1linf:4.7607e-03 L9_l1linf:4.2419e-03 L10_l1linf:4.3030e-03 L11_l1linf:4.5166e-03 L12_l1linf:5.2490e-03 L1_spectral:1.2435e-03 L2_spectral:7.5672e-04 L3_spectral:7.9501e-04 L4_spectral:8.1008e-04 L5_spectral:7.9343e-04 L6_spectral:7.8534e-04 L7_spectral:7.7264e-04 L8_spectral:8.1993e-04 L9_spectral:7.8640e-04 L10_spectral:7.9767e-04 L11_spectral:7.8170e-04 L12_spectral:7.8718e-04 train_time:405638ms step_avg:40.56ms +[2025-09-11 10:18:35] [Rank 0] PRINT: step:10000/10000 val_loss:4.3838 total_sharp:5.0702e-04 L1_sharp:2.2693e-03 L2_sharp:2.2820e-04 L3_sharp:1.7312e-04 L4_sharp:2.2896e-04 L5_sharp:4.9421e-04 L6_sharp:5.4440e-04 L7_sharp:4.3141e-04 L8_sharp:1.1153e-03 L9_sharp:8.4393e-04 L10_sharp:1.0423e-03 L11_sharp:1.5629e-03 L12_sharp:6.0826e-03 total_fnorm:4.6289e-01 total_l1_linf:3.0600e+02 total_spectral:2.4512e-01 L1_fnorm:6.2988e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8340e-02 L4_fnorm:5.0049e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.8584e-02 L1_l1linf:7.5684e-03 L2_l1linf:5.1880e-03 L3_l1linf:5.1575e-03 L4_l1linf:4.6082e-03 L5_l1linf:4.6997e-03 L6_l1linf:4.6997e-03 L7_l1linf:4.8218e-03 L8_l1linf:4.7607e-03 L9_l1linf:4.2419e-03 L10_l1linf:4.3030e-03 L11_l1linf:4.5166e-03 L12_l1linf:5.2490e-03 L1_spectral:1.2435e-03 L2_spectral:7.5672e-04 L3_spectral:7.9501e-04 L4_spectral:8.1008e-04 L5_spectral:7.9343e-04 L6_spectral:7.8534e-04 L7_spectral:7.7264e-04 L8_spectral:8.1993e-04 L9_spectral:7.8640e-04 L10_spectral:7.9767e-04 L11_spectral:7.8170e-04 L12_spectral:7.8718e-04 train_time:405638ms step_avg:40.56ms +[2025-09-11 10:18:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:18:35 2025 --- +[2025-09-11 10:18:35] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:18:35 2025 --- +[2025-09-11 10:18:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:18:35] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e67db847abc4d66cb3dcee0dd5e2ec53ab0cc07a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "9d82661b-ea83-4315-a290-ff7e391f4f9f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/training_log_9d82661b-ea83-4315-a290-ff7e391f4f9f.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/training_log_9d82661b-ea83-4315-a290-ff7e391f4f9f.txt new file mode 100644 index 0000000000000000000000000000000000000000..828b6df1dc5d5f4dfa67b854fefb23d7c0599ec0 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44/training_log_9d82661b-ea83-4315-a290-ff7e391f4f9f.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:45:16] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:45:16 2025 --- +[2025-09-11 10:45:16] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:45:16 2025 --- +[2025-09-11 10:45:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:45:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:45:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:45:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:45:16] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:45:16] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:45:16] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44 +[2025-09-11 10:45:16] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.02_seed_44 +[2025-09-11 10:45:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:45:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:45:16] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:45:16] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:45:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:45:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:45:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:45:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:45:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:45:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:45:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:45:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:45:17] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:45:17] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:45:19] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:45:19] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:45:19] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:45:19] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:45:19] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:45:19] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:45:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:45:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:45:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:45:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:46:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:46:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:46:01] [Rank 0] PRINT: Starting training... +[2025-09-11 10:46:01] [Rank 0] PRINT: Starting training... +[2025-09-11 10:46:02] [Rank 0] step:21/10000 train_time:1136ms step_avg:54.09ms +[2025-09-11 10:46:02] [Rank 0] step:21/10000 train_time:1136ms step_avg:54.09ms +[2025-09-11 10:46:03] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.56ms +[2025-09-11 10:46:03] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.56ms +[2025-09-11 10:46:04] [Rank 0] step:61/10000 train_time:2599ms step_avg:42.60ms +[2025-09-11 10:46:04] [Rank 0] step:61/10000 train_time:2599ms step_avg:42.60ms +[2025-09-11 10:46:05] [Rank 0] step:81/10000 train_time:3330ms step_avg:41.11ms +[2025-09-11 10:46:05] [Rank 0] step:81/10000 train_time:3330ms step_avg:41.11ms +[2025-09-11 10:46:05] [Rank 0] step:101/10000 train_time:4060ms step_avg:40.20ms +[2025-09-11 10:46:05] [Rank 0] step:101/10000 train_time:4060ms step_avg:40.20ms +[2025-09-11 10:46:06] [Rank 0] step:121/10000 train_time:4791ms step_avg:39.59ms +[2025-09-11 10:46:06] [Rank 0] step:121/10000 train_time:4791ms step_avg:39.59ms +[2025-09-11 10:46:07] [Rank 0] step:141/10000 train_time:5520ms step_avg:39.15ms +[2025-09-11 10:46:07] [Rank 0] step:141/10000 train_time:5520ms step_avg:39.15ms +[2025-09-11 10:46:08] [Rank 0] step:161/10000 train_time:6250ms step_avg:38.82ms +[2025-09-11 10:46:08] [Rank 0] step:161/10000 train_time:6250ms step_avg:38.82ms +[2025-09-11 10:46:08] [Rank 0] step:181/10000 train_time:6979ms step_avg:38.56ms +[2025-09-11 10:46:08] [Rank 0] step:181/10000 train_time:6979ms step_avg:38.56ms +[2025-09-11 10:46:09] [Rank 0] step:201/10000 train_time:7710ms step_avg:38.36ms +[2025-09-11 10:46:09] [Rank 0] step:201/10000 train_time:7710ms step_avg:38.36ms +[2025-09-11 10:46:10] [Rank 0] step:221/10000 train_time:8439ms step_avg:38.19ms +[2025-09-11 10:46:10] [Rank 0] step:221/10000 train_time:8439ms step_avg:38.19ms +[2025-09-11 10:46:10] [Rank 0] step:241/10000 train_time:9170ms step_avg:38.05ms +[2025-09-11 10:46:10] [Rank 0] step:241/10000 train_time:9170ms step_avg:38.05ms +[2025-09-11 10:46:11] [Rank 0] step:261/10000 train_time:9899ms step_avg:37.93ms +[2025-09-11 10:46:11] [Rank 0] step:261/10000 train_time:9899ms step_avg:37.93ms +[2025-09-11 10:46:12] [Rank 0] step:281/10000 train_time:10630ms step_avg:37.83ms +[2025-09-11 10:46:12] [Rank 0] step:281/10000 train_time:10630ms step_avg:37.83ms +[2025-09-11 10:46:13] [Rank 0] step:301/10000 train_time:11359ms step_avg:37.74ms +[2025-09-11 10:46:13] [Rank 0] step:301/10000 train_time:11359ms step_avg:37.74ms +[2025-09-11 10:46:13] [Rank 0] step:321/10000 train_time:12089ms step_avg:37.66ms +[2025-09-11 10:46:13] [Rank 0] step:321/10000 train_time:12089ms step_avg:37.66ms +[2025-09-11 10:46:14] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 10:46:14] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 10:46:15] [Rank 0] step:361/10000 train_time:14049ms step_avg:38.92ms +[2025-09-11 10:46:15] [Rank 0] step:361/10000 train_time:14049ms step_avg:38.92ms +[2025-09-11 10:46:16] [Rank 0] step:381/10000 train_time:14779ms step_avg:38.79ms +[2025-09-11 10:46:16] [Rank 0] step:381/10000 train_time:14779ms step_avg:38.79ms +[2025-09-11 10:46:17] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:46:17] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:46:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:47:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:47:05] [Rank 0] PRINT: step:400/10000 val_loss:6.2951 total_sharp:7.6808e-03 L1_sharp:2.6184e-02 L2_sharp:1.3636e-02 L3_sharp:7.2017e-03 L4_sharp:9.9726e-03 L5_sharp:6.3851e-03 L6_sharp:4.5870e-03 L7_sharp:3.7291e-03 L8_sharp:2.9710e-03 L9_sharp:4.0372e-03 L10_sharp:3.5536e-03 L11_sharp:4.0771e-03 L12_sharp:9.3092e-03 total_fnorm:2.1493e+01 total_l1_linf:7.2153e+04 total_spectral:1.0746e+01 L1_fnorm:2.4581e+00 L2_fnorm:2.4498e+00 L3_fnorm:2.4473e+00 L4_fnorm:2.3890e+00 L5_fnorm:2.3311e+00 L6_fnorm:2.2777e+00 L7_fnorm:2.2301e+00 L8_fnorm:2.1969e+00 L9_fnorm:2.1043e+00 L10_fnorm:2.0606e+00 L11_fnorm:1.9728e+00 L12_fnorm:1.8776e+00 L1_l1linf:7.9952e-01 L2_l1linf:7.9951e-01 L3_l1linf:7.9668e-01 L4_l1linf:7.8560e-01 L5_l1linf:7.6816e-01 L6_l1linf:7.5506e-01 L7_l1linf:7.5128e-01 L8_l1linf:7.4362e-01 L9_l1linf:6.9764e-01 L10_l1linf:6.7179e-01 L11_l1linf:6.1625e-01 L12_l1linf:5.3118e-01 L1_spectral:2.4081e-02 L2_spectral:2.4101e-02 L3_spectral:2.4100e-02 L4_spectral:2.4080e-02 L5_spectral:2.4089e-02 L6_spectral:2.4075e-02 L7_spectral:2.4076e-02 L8_spectral:2.4066e-02 L9_spectral:2.4071e-02 L10_spectral:2.4072e-02 L11_spectral:2.4067e-02 L12_spectral:2.4047e-02 train_time:15628ms step_avg:39.07ms +[2025-09-11 10:47:05] [Rank 0] PRINT: step:400/10000 val_loss:6.2951 total_sharp:7.6808e-03 L1_sharp:2.6184e-02 L2_sharp:1.3636e-02 L3_sharp:7.2017e-03 L4_sharp:9.9726e-03 L5_sharp:6.3851e-03 L6_sharp:4.5870e-03 L7_sharp:3.7291e-03 L8_sharp:2.9710e-03 L9_sharp:4.0372e-03 L10_sharp:3.5536e-03 L11_sharp:4.0771e-03 L12_sharp:9.3092e-03 total_fnorm:2.1493e+01 total_l1_linf:7.2153e+04 total_spectral:1.0746e+01 L1_fnorm:2.4581e+00 L2_fnorm:2.4498e+00 L3_fnorm:2.4473e+00 L4_fnorm:2.3890e+00 L5_fnorm:2.3311e+00 L6_fnorm:2.2777e+00 L7_fnorm:2.2301e+00 L8_fnorm:2.1969e+00 L9_fnorm:2.1043e+00 L10_fnorm:2.0606e+00 L11_fnorm:1.9728e+00 L12_fnorm:1.8776e+00 L1_l1linf:7.9952e-01 L2_l1linf:7.9951e-01 L3_l1linf:7.9668e-01 L4_l1linf:7.8560e-01 L5_l1linf:7.6816e-01 L6_l1linf:7.5506e-01 L7_l1linf:7.5128e-01 L8_l1linf:7.4362e-01 L9_l1linf:6.9764e-01 L10_l1linf:6.7179e-01 L11_l1linf:6.1625e-01 L12_l1linf:5.3118e-01 L1_spectral:2.4081e-02 L2_spectral:2.4101e-02 L3_spectral:2.4100e-02 L4_spectral:2.4080e-02 L5_spectral:2.4089e-02 L6_spectral:2.4075e-02 L7_spectral:2.4076e-02 L8_spectral:2.4066e-02 L9_spectral:2.4071e-02 L10_spectral:2.4072e-02 L11_spectral:2.4067e-02 L12_spectral:2.4047e-02 train_time:15628ms step_avg:39.07ms +[2025-09-11 10:47:34] [Rank 0] step:401/10000 train_time:45347ms step_avg:113.08ms +[2025-09-11 10:47:34] [Rank 0] step:401/10000 train_time:45347ms step_avg:113.08ms +[2025-09-11 10:47:36] [Rank 0] step:421/10000 train_time:47273ms step_avg:112.29ms +[2025-09-11 10:47:36] [Rank 0] step:421/10000 train_time:47273ms step_avg:112.29ms +[2025-09-11 10:47:37] [Rank 0] step:441/10000 train_time:47912ms step_avg:108.64ms +[2025-09-11 10:47:37] [Rank 0] step:441/10000 train_time:47912ms step_avg:108.64ms +[2025-09-11 10:47:38] [Rank 0] step:461/10000 train_time:48551ms step_avg:105.32ms +[2025-09-11 10:47:38] [Rank 0] step:461/10000 train_time:48551ms step_avg:105.32ms +[2025-09-11 10:47:38] [Rank 0] step:481/10000 train_time:49189ms step_avg:102.26ms +[2025-09-11 10:47:38] [Rank 0] step:481/10000 train_time:49189ms step_avg:102.26ms +[2025-09-11 10:47:39] [Rank 0] step:501/10000 train_time:49828ms step_avg:99.46ms +[2025-09-11 10:47:39] [Rank 0] step:501/10000 train_time:49828ms step_avg:99.46ms +[2025-09-11 10:47:39] [Rank 0] step:521/10000 train_time:50465ms step_avg:96.86ms +[2025-09-11 10:47:39] [Rank 0] step:521/10000 train_time:50465ms step_avg:96.86ms +[2025-09-11 10:47:40] [Rank 0] step:541/10000 train_time:51103ms step_avg:94.46ms +[2025-09-11 10:47:40] [Rank 0] step:541/10000 train_time:51103ms step_avg:94.46ms +[2025-09-11 10:47:41] [Rank 0] step:561/10000 train_time:51742ms step_avg:92.23ms +[2025-09-11 10:47:41] [Rank 0] step:561/10000 train_time:51742ms step_avg:92.23ms +[2025-09-11 10:47:41] [Rank 0] step:581/10000 train_time:52380ms step_avg:90.15ms +[2025-09-11 10:47:41] [Rank 0] step:581/10000 train_time:52380ms step_avg:90.15ms +[2025-09-11 10:47:42] [Rank 0] step:601/10000 train_time:53017ms step_avg:88.22ms +[2025-09-11 10:47:42] [Rank 0] step:601/10000 train_time:53017ms step_avg:88.22ms +[2025-09-11 10:47:43] [Rank 0] step:621/10000 train_time:53655ms step_avg:86.40ms +[2025-09-11 10:47:43] [Rank 0] step:621/10000 train_time:53655ms step_avg:86.40ms +[2025-09-11 10:47:43] [Rank 0] step:641/10000 train_time:54293ms step_avg:84.70ms +[2025-09-11 10:47:43] [Rank 0] step:641/10000 train_time:54293ms step_avg:84.70ms +[2025-09-11 10:47:44] [Rank 0] step:661/10000 train_time:54931ms step_avg:83.10ms +[2025-09-11 10:47:44] [Rank 0] step:661/10000 train_time:54931ms step_avg:83.10ms +[2025-09-11 10:47:45] [Rank 0] step:681/10000 train_time:55568ms step_avg:81.60ms +[2025-09-11 10:47:45] [Rank 0] step:681/10000 train_time:55568ms step_avg:81.60ms +[2025-09-11 10:47:45] [Rank 0] step:701/10000 train_time:56206ms step_avg:80.18ms +[2025-09-11 10:47:45] [Rank 0] step:701/10000 train_time:56206ms step_avg:80.18ms +[2025-09-11 10:47:46] [Rank 0] step:721/10000 train_time:56844ms step_avg:78.84ms +[2025-09-11 10:47:46] [Rank 0] step:721/10000 train_time:56844ms step_avg:78.84ms +[2025-09-11 10:47:46] [Rank 0] step:741/10000 train_time:57482ms step_avg:77.57ms +[2025-09-11 10:47:46] [Rank 0] step:741/10000 train_time:57482ms step_avg:77.57ms +[2025-09-11 10:47:47] [Rank 0] step:761/10000 train_time:58124ms step_avg:76.38ms +[2025-09-11 10:47:47] [Rank 0] step:761/10000 train_time:58124ms step_avg:76.38ms +[2025-09-11 10:47:48] [Rank 0] step:781/10000 train_time:58766ms step_avg:75.24ms +[2025-09-11 10:47:48] [Rank 0] step:781/10000 train_time:58766ms step_avg:75.24ms +[2025-09-11 10:47:48] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:47:48] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:47:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:48:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:32] [Rank 0] PRINT: step:800/10000 val_loss:5.8891 total_sharp:4.5353e-03 L1_sharp:1.1168e-02 L2_sharp:3.0360e-03 L3_sharp:2.1777e-03 L4_sharp:2.4798e-03 L5_sharp:2.9169e-03 L6_sharp:2.1272e-03 L7_sharp:1.9890e-03 L8_sharp:2.9297e-03 L9_sharp:2.5927e-03 L10_sharp:2.4999e-03 L11_sharp:4.6362e-03 L12_sharp:1.6237e-02 total_fnorm:2.1250e+01 total_l1_linf:5.5552e+04 total_spectral:1.0562e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4375e+00 L5_fnorm:2.3906e+00 L6_fnorm:2.3906e+00 L7_fnorm:2.3906e+00 L8_fnorm:2.3125e+00 L9_fnorm:2.2969e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.1406e+00 L12_fnorm:1.8984e+00 L1_l1linf:8.1250e-01 L2_l1linf:7.8125e-01 L3_l1linf:7.5391e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.5000e-01 L6_l1linf:7.6562e-01 L7_l1linf:7.7734e-01 L8_l1linf:7.6172e-01 L9_l1linf:7.3047e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.1719e-01 L12_l1linf:4.5312e-01 L1_spectral:2.6885e-02 L2_spectral:2.6940e-02 L3_spectral:2.6758e-02 L4_spectral:2.6612e-02 L5_spectral:2.6572e-02 L6_spectral:2.6560e-02 L7_spectral:2.6536e-02 L8_spectral:2.6595e-02 L9_spectral:2.6556e-02 L10_spectral:2.6610e-02 L11_spectral:2.6603e-02 L12_spectral:2.6386e-02 train_time:59391ms step_avg:74.24ms +[2025-09-11 10:48:32] [Rank 0] PRINT: step:800/10000 val_loss:5.8891 total_sharp:4.5353e-03 L1_sharp:1.1168e-02 L2_sharp:3.0360e-03 L3_sharp:2.1777e-03 L4_sharp:2.4798e-03 L5_sharp:2.9169e-03 L6_sharp:2.1272e-03 L7_sharp:1.9890e-03 L8_sharp:2.9297e-03 L9_sharp:2.5927e-03 L10_sharp:2.4999e-03 L11_sharp:4.6362e-03 L12_sharp:1.6237e-02 total_fnorm:2.1250e+01 total_l1_linf:5.5552e+04 total_spectral:1.0562e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4375e+00 L5_fnorm:2.3906e+00 L6_fnorm:2.3906e+00 L7_fnorm:2.3906e+00 L8_fnorm:2.3125e+00 L9_fnorm:2.2969e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.1406e+00 L12_fnorm:1.8984e+00 L1_l1linf:8.1250e-01 L2_l1linf:7.8125e-01 L3_l1linf:7.5391e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.5000e-01 L6_l1linf:7.6562e-01 L7_l1linf:7.7734e-01 L8_l1linf:7.6172e-01 L9_l1linf:7.3047e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.1719e-01 L12_l1linf:4.5312e-01 L1_spectral:2.6885e-02 L2_spectral:2.6940e-02 L3_spectral:2.6758e-02 L4_spectral:2.6612e-02 L5_spectral:2.6572e-02 L6_spectral:2.6560e-02 L7_spectral:2.6536e-02 L8_spectral:2.6595e-02 L9_spectral:2.6556e-02 L10_spectral:2.6610e-02 L11_spectral:2.6603e-02 L12_spectral:2.6386e-02 train_time:59391ms step_avg:74.24ms +[2025-09-11 10:48:33] [Rank 0] step:801/10000 train_time:60660ms step_avg:75.73ms +[2025-09-11 10:48:33] [Rank 0] step:801/10000 train_time:60660ms step_avg:75.73ms +[2025-09-11 10:48:34] [Rank 0] step:821/10000 train_time:61308ms step_avg:74.67ms +[2025-09-11 10:48:34] [Rank 0] step:821/10000 train_time:61308ms step_avg:74.67ms +[2025-09-11 10:48:34] [Rank 0] step:841/10000 train_time:61953ms step_avg:73.67ms +[2025-09-11 10:48:34] [Rank 0] step:841/10000 train_time:61953ms step_avg:73.67ms +[2025-09-11 10:48:35] [Rank 0] step:861/10000 train_time:62599ms step_avg:72.70ms +[2025-09-11 10:48:35] [Rank 0] step:861/10000 train_time:62599ms step_avg:72.70ms +[2025-09-11 10:48:36] [Rank 0] step:881/10000 train_time:63243ms step_avg:71.79ms +[2025-09-11 10:48:36] [Rank 0] step:881/10000 train_time:63243ms step_avg:71.79ms +[2025-09-11 10:48:36] [Rank 0] step:901/10000 train_time:63888ms step_avg:70.91ms +[2025-09-11 10:48:36] [Rank 0] step:901/10000 train_time:63888ms step_avg:70.91ms +[2025-09-11 10:48:37] [Rank 0] step:921/10000 train_time:64532ms step_avg:70.07ms +[2025-09-11 10:48:37] [Rank 0] step:921/10000 train_time:64532ms step_avg:70.07ms +[2025-09-11 10:48:38] [Rank 0] step:941/10000 train_time:65176ms step_avg:69.26ms +[2025-09-11 10:48:38] [Rank 0] step:941/10000 train_time:65176ms step_avg:69.26ms +[2025-09-11 10:48:38] [Rank 0] step:961/10000 train_time:65820ms step_avg:68.49ms +[2025-09-11 10:48:38] [Rank 0] step:961/10000 train_time:65820ms step_avg:68.49ms +[2025-09-11 10:48:39] [Rank 0] step:981/10000 train_time:66464ms step_avg:67.75ms +[2025-09-11 10:48:39] [Rank 0] step:981/10000 train_time:66464ms step_avg:67.75ms +[2025-09-11 10:48:40] [Rank 0] step:1001/10000 train_time:67109ms step_avg:67.04ms +[2025-09-11 10:48:40] [Rank 0] step:1001/10000 train_time:67109ms step_avg:67.04ms +[2025-09-11 10:48:40] [Rank 0] step:1021/10000 train_time:67753ms step_avg:66.36ms +[2025-09-11 10:48:40] [Rank 0] step:1021/10000 train_time:67753ms step_avg:66.36ms +[2025-09-11 10:48:41] [Rank 0] step:1041/10000 train_time:68398ms step_avg:65.70ms +[2025-09-11 10:48:41] [Rank 0] step:1041/10000 train_time:68398ms step_avg:65.70ms +[2025-09-11 10:48:42] [Rank 0] step:1061/10000 train_time:69042ms step_avg:65.07ms +[2025-09-11 10:48:42] [Rank 0] step:1061/10000 train_time:69042ms step_avg:65.07ms +[2025-09-11 10:48:42] [Rank 0] step:1081/10000 train_time:69687ms step_avg:64.47ms +[2025-09-11 10:48:42] [Rank 0] step:1081/10000 train_time:69687ms step_avg:64.47ms +[2025-09-11 10:48:43] [Rank 0] step:1101/10000 train_time:70332ms step_avg:63.88ms +[2025-09-11 10:48:43] [Rank 0] step:1101/10000 train_time:70332ms step_avg:63.88ms +[2025-09-11 10:48:43] [Rank 0] step:1121/10000 train_time:70976ms step_avg:63.32ms +[2025-09-11 10:48:43] [Rank 0] step:1121/10000 train_time:70976ms step_avg:63.32ms +[2025-09-11 10:48:44] [Rank 0] step:1141/10000 train_time:71620ms step_avg:62.77ms +[2025-09-11 10:48:44] [Rank 0] step:1141/10000 train_time:71620ms step_avg:62.77ms +[2025-09-11 10:48:45] [Rank 0] step:1161/10000 train_time:72265ms step_avg:62.24ms +[2025-09-11 10:48:45] [Rank 0] step:1161/10000 train_time:72265ms step_avg:62.24ms +[2025-09-11 10:48:45] [Rank 0] step:1181/10000 train_time:72910ms step_avg:61.74ms +[2025-09-11 10:48:45] [Rank 0] step:1181/10000 train_time:72910ms step_avg:61.74ms +[2025-09-11 10:48:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:48:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:48:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.5447 total_sharp:3.7543e-03 L1_sharp:1.0388e-02 L2_sharp:2.5888e-03 L3_sharp:6.0824e-04 L4_sharp:1.5141e-03 L5_sharp:1.5720e-03 L6_sharp:1.5692e-03 L7_sharp:1.6745e-03 L8_sharp:2.9005e-03 L9_sharp:2.0474e-03 L10_sharp:2.0207e-03 L11_sharp:3.3022e-03 L12_sharp:1.2770e-02 total_fnorm:2.0625e+01 total_l1_linf:5.3760e+04 total_spectral:1.0312e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.4531e+00 L10_fnorm:2.4531e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.2500e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.0703e-01 L3_l1linf:7.2266e-01 L4_l1linf:7.3828e-01 L5_l1linf:7.3438e-01 L6_l1linf:7.1875e-01 L7_l1linf:7.3047e-01 L8_l1linf:7.4609e-01 L9_l1linf:7.3438e-01 L10_l1linf:7.1875e-01 L11_l1linf:6.8750e-01 L12_l1linf:5.3906e-01 L1_spectral:2.7922e-02 L2_spectral:2.7896e-02 L3_spectral:2.7765e-02 L4_spectral:2.7611e-02 L5_spectral:2.7717e-02 L6_spectral:2.7627e-02 L7_spectral:2.7435e-02 L8_spectral:2.7672e-02 L9_spectral:2.7248e-02 L10_spectral:2.7384e-02 L11_spectral:2.7441e-02 L12_spectral:2.7571e-02 train_time:73536ms step_avg:61.28ms +[2025-09-11 10:48:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.5447 total_sharp:3.7543e-03 L1_sharp:1.0388e-02 L2_sharp:2.5888e-03 L3_sharp:6.0824e-04 L4_sharp:1.5141e-03 L5_sharp:1.5720e-03 L6_sharp:1.5692e-03 L7_sharp:1.6745e-03 L8_sharp:2.9005e-03 L9_sharp:2.0474e-03 L10_sharp:2.0207e-03 L11_sharp:3.3022e-03 L12_sharp:1.2770e-02 total_fnorm:2.0625e+01 total_l1_linf:5.3760e+04 total_spectral:1.0312e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.4531e+00 L10_fnorm:2.4531e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.2500e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.0703e-01 L3_l1linf:7.2266e-01 L4_l1linf:7.3828e-01 L5_l1linf:7.3438e-01 L6_l1linf:7.1875e-01 L7_l1linf:7.3047e-01 L8_l1linf:7.4609e-01 L9_l1linf:7.3438e-01 L10_l1linf:7.1875e-01 L11_l1linf:6.8750e-01 L12_l1linf:5.3906e-01 L1_spectral:2.7922e-02 L2_spectral:2.7896e-02 L3_spectral:2.7765e-02 L4_spectral:2.7611e-02 L5_spectral:2.7717e-02 L6_spectral:2.7627e-02 L7_spectral:2.7435e-02 L8_spectral:2.7672e-02 L9_spectral:2.7248e-02 L10_spectral:2.7384e-02 L11_spectral:2.7441e-02 L12_spectral:2.7571e-02 train_time:73536ms step_avg:61.28ms +[2025-09-11 10:48:57] [Rank 0] step:1201/10000 train_time:74726ms step_avg:62.22ms +[2025-09-11 10:48:57] [Rank 0] step:1201/10000 train_time:74726ms step_avg:62.22ms +[2025-09-11 10:48:58] [Rank 0] step:1221/10000 train_time:75374ms step_avg:61.73ms +[2025-09-11 10:48:58] [Rank 0] step:1221/10000 train_time:75374ms step_avg:61.73ms +[2025-09-11 10:48:58] [Rank 0] step:1241/10000 train_time:76020ms step_avg:61.26ms +[2025-09-11 10:48:58] [Rank 0] step:1241/10000 train_time:76020ms step_avg:61.26ms +[2025-09-11 10:48:59] [Rank 0] step:1261/10000 train_time:76666ms step_avg:60.80ms +[2025-09-11 10:48:59] [Rank 0] step:1261/10000 train_time:76666ms step_avg:60.80ms +[2025-09-11 10:49:00] [Rank 0] step:1281/10000 train_time:77310ms step_avg:60.35ms +[2025-09-11 10:49:00] [Rank 0] step:1281/10000 train_time:77310ms step_avg:60.35ms +[2025-09-11 10:49:00] [Rank 0] step:1301/10000 train_time:77955ms step_avg:59.92ms +[2025-09-11 10:49:00] [Rank 0] step:1301/10000 train_time:77955ms step_avg:59.92ms +[2025-09-11 10:49:01] [Rank 0] step:1321/10000 train_time:78600ms step_avg:59.50ms +[2025-09-11 10:49:01] [Rank 0] step:1321/10000 train_time:78600ms step_avg:59.50ms +[2025-09-11 10:49:02] [Rank 0] step:1341/10000 train_time:79244ms step_avg:59.09ms +[2025-09-11 10:49:02] [Rank 0] step:1341/10000 train_time:79244ms step_avg:59.09ms +[2025-09-11 10:49:02] [Rank 0] step:1361/10000 train_time:79890ms step_avg:58.70ms +[2025-09-11 10:49:02] [Rank 0] step:1361/10000 train_time:79890ms step_avg:58.70ms +[2025-09-11 10:49:03] [Rank 0] step:1381/10000 train_time:80535ms step_avg:58.32ms +[2025-09-11 10:49:03] [Rank 0] step:1381/10000 train_time:80535ms step_avg:58.32ms +[2025-09-11 10:49:04] [Rank 0] step:1401/10000 train_time:81180ms step_avg:57.94ms +[2025-09-11 10:49:04] [Rank 0] step:1401/10000 train_time:81180ms step_avg:57.94ms +[2025-09-11 10:49:04] [Rank 0] step:1421/10000 train_time:81824ms step_avg:57.58ms +[2025-09-11 10:49:04] [Rank 0] step:1421/10000 train_time:81824ms step_avg:57.58ms +[2025-09-11 10:49:05] [Rank 0] step:1441/10000 train_time:82469ms step_avg:57.23ms +[2025-09-11 10:49:05] [Rank 0] step:1441/10000 train_time:82469ms step_avg:57.23ms +[2025-09-11 10:49:05] [Rank 0] step:1461/10000 train_time:83113ms step_avg:56.89ms +[2025-09-11 10:49:05] [Rank 0] step:1461/10000 train_time:83113ms step_avg:56.89ms +[2025-09-11 10:49:06] [Rank 0] step:1481/10000 train_time:83757ms step_avg:56.55ms +[2025-09-11 10:49:06] [Rank 0] step:1481/10000 train_time:83757ms step_avg:56.55ms +[2025-09-11 10:49:07] [Rank 0] step:1501/10000 train_time:84406ms step_avg:56.23ms +[2025-09-11 10:49:07] [Rank 0] step:1501/10000 train_time:84406ms step_avg:56.23ms +[2025-09-11 10:49:07] [Rank 0] step:1521/10000 train_time:85054ms step_avg:55.92ms +[2025-09-11 10:49:07] [Rank 0] step:1521/10000 train_time:85054ms step_avg:55.92ms +[2025-09-11 10:49:08] [Rank 0] step:1541/10000 train_time:85703ms step_avg:55.62ms +[2025-09-11 10:49:08] [Rank 0] step:1541/10000 train_time:85703ms step_avg:55.62ms +[2025-09-11 10:49:09] [Rank 0] step:1561/10000 train_time:86352ms step_avg:55.32ms +[2025-09-11 10:49:09] [Rank 0] step:1561/10000 train_time:86352ms step_avg:55.32ms +[2025-09-11 10:49:09] [Rank 0] step:1581/10000 train_time:87001ms step_avg:55.03ms +[2025-09-11 10:49:09] [Rank 0] step:1581/10000 train_time:87001ms step_avg:55.03ms +[2025-09-11 10:49:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:49:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:49:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:49:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:49:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.3747 total_sharp:3.2209e-03 L1_sharp:7.7574e-03 L2_sharp:1.2307e-03 L3_sharp:5.8480e-04 L4_sharp:1.4062e-03 L5_sharp:1.5006e-03 L6_sharp:1.1344e-03 L7_sharp:1.1775e-03 L8_sharp:2.5497e-03 L9_sharp:1.9763e-03 L10_sharp:1.6893e-03 L11_sharp:2.4015e-03 L12_sharp:1.0254e-02 total_fnorm:1.9125e+01 total_l1_linf:4.9152e+04 total_spectral:9.5625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.3438e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.8750e-01 L3_l1linf:7.0312e-01 L4_l1linf:7.0312e-01 L5_l1linf:6.9922e-01 L6_l1linf:6.9922e-01 L7_l1linf:6.9922e-01 L8_l1linf:7.0312e-01 L9_l1linf:7.0703e-01 L10_l1linf:7.0312e-01 L11_l1linf:6.8359e-01 L12_l1linf:5.6250e-01 L1_spectral:2.8985e-02 L2_spectral:2.8755e-02 L3_spectral:2.8472e-02 L4_spectral:2.8419e-02 L5_spectral:2.8321e-02 L6_spectral:2.8418e-02 L7_spectral:2.8430e-02 L8_spectral:2.8815e-02 L9_spectral:2.8157e-02 L10_spectral:2.8127e-02 L11_spectral:2.8086e-02 L12_spectral:2.8261e-02 train_time:87632ms step_avg:54.77ms +[2025-09-11 10:49:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.3747 total_sharp:3.2209e-03 L1_sharp:7.7574e-03 L2_sharp:1.2307e-03 L3_sharp:5.8480e-04 L4_sharp:1.4062e-03 L5_sharp:1.5006e-03 L6_sharp:1.1344e-03 L7_sharp:1.1775e-03 L8_sharp:2.5497e-03 L9_sharp:1.9763e-03 L10_sharp:1.6893e-03 L11_sharp:2.4015e-03 L12_sharp:1.0254e-02 total_fnorm:1.9125e+01 total_l1_linf:4.9152e+04 total_spectral:9.5625e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.3438e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.8750e-01 L3_l1linf:7.0312e-01 L4_l1linf:7.0312e-01 L5_l1linf:6.9922e-01 L6_l1linf:6.9922e-01 L7_l1linf:6.9922e-01 L8_l1linf:7.0312e-01 L9_l1linf:7.0703e-01 L10_l1linf:7.0312e-01 L11_l1linf:6.8359e-01 L12_l1linf:5.6250e-01 L1_spectral:2.8985e-02 L2_spectral:2.8755e-02 L3_spectral:2.8472e-02 L4_spectral:2.8419e-02 L5_spectral:2.8321e-02 L6_spectral:2.8418e-02 L7_spectral:2.8430e-02 L8_spectral:2.8815e-02 L9_spectral:2.8157e-02 L10_spectral:2.8127e-02 L11_spectral:2.8086e-02 L12_spectral:2.8261e-02 train_time:87632ms step_avg:54.77ms +[2025-09-11 10:49:21] [Rank 0] step:1601/10000 train_time:88832ms step_avg:55.49ms +[2025-09-11 10:49:21] [Rank 0] step:1601/10000 train_time:88832ms step_avg:55.49ms +[2025-09-11 10:49:22] [Rank 0] step:1621/10000 train_time:89512ms step_avg:55.22ms +[2025-09-11 10:49:22] [Rank 0] step:1621/10000 train_time:89512ms step_avg:55.22ms +[2025-09-11 10:49:22] [Rank 0] step:1641/10000 train_time:90161ms step_avg:54.94ms +[2025-09-11 10:49:22] [Rank 0] step:1641/10000 train_time:90161ms step_avg:54.94ms +[2025-09-11 10:49:23] [Rank 0] step:1661/10000 train_time:90810ms step_avg:54.67ms +[2025-09-11 10:49:23] [Rank 0] step:1661/10000 train_time:90810ms step_avg:54.67ms +[2025-09-11 10:49:24] [Rank 0] step:1681/10000 train_time:91459ms step_avg:54.41ms +[2025-09-11 10:49:24] [Rank 0] step:1681/10000 train_time:91459ms step_avg:54.41ms +[2025-09-11 10:49:25] [Rank 0] step:1701/10000 train_time:92685ms step_avg:54.49ms +[2025-09-11 10:49:25] [Rank 0] step:1701/10000 train_time:92685ms step_avg:54.49ms +[2025-09-11 10:49:26] [Rank 0] step:1721/10000 train_time:93335ms step_avg:54.23ms +[2025-09-11 10:49:26] [Rank 0] step:1721/10000 train_time:93335ms step_avg:54.23ms +[2025-09-11 10:49:26] [Rank 0] step:1741/10000 train_time:93983ms step_avg:53.98ms +[2025-09-11 10:49:26] [Rank 0] step:1741/10000 train_time:93983ms step_avg:53.98ms +[2025-09-11 10:49:27] [Rank 0] step:1761/10000 train_time:94943ms step_avg:53.91ms +[2025-09-11 10:49:27] [Rank 0] step:1761/10000 train_time:94943ms step_avg:53.91ms +[2025-09-11 10:49:28] [Rank 0] step:1781/10000 train_time:95592ms step_avg:53.67ms +[2025-09-11 10:49:28] [Rank 0] step:1781/10000 train_time:95592ms step_avg:53.67ms +[2025-09-11 10:49:28] [Rank 0] step:1801/10000 train_time:96240ms step_avg:53.44ms +[2025-09-11 10:49:28] [Rank 0] step:1801/10000 train_time:96240ms step_avg:53.44ms +[2025-09-11 10:49:29] [Rank 0] step:1821/10000 train_time:96888ms step_avg:53.21ms +[2025-09-11 10:49:29] [Rank 0] step:1821/10000 train_time:96888ms step_avg:53.21ms +[2025-09-11 10:49:30] [Rank 0] step:1841/10000 train_time:97537ms step_avg:52.98ms +[2025-09-11 10:49:30] [Rank 0] step:1841/10000 train_time:97537ms step_avg:52.98ms +[2025-09-11 10:49:30] [Rank 0] step:1861/10000 train_time:98185ms step_avg:52.76ms +[2025-09-11 10:49:30] [Rank 0] step:1861/10000 train_time:98185ms step_avg:52.76ms +[2025-09-11 10:49:31] [Rank 0] step:1881/10000 train_time:98833ms step_avg:52.54ms +[2025-09-11 10:49:31] [Rank 0] step:1881/10000 train_time:98833ms step_avg:52.54ms +[2025-09-11 10:49:32] [Rank 0] step:1901/10000 train_time:99481ms step_avg:52.33ms +[2025-09-11 10:49:32] [Rank 0] step:1901/10000 train_time:99481ms step_avg:52.33ms +[2025-09-11 10:49:32] [Rank 0] step:1921/10000 train_time:100129ms step_avg:52.12ms +[2025-09-11 10:49:32] [Rank 0] step:1921/10000 train_time:100129ms step_avg:52.12ms +[2025-09-11 10:49:33] [Rank 0] step:1941/10000 train_time:100777ms step_avg:51.92ms +[2025-09-11 10:49:33] [Rank 0] step:1941/10000 train_time:100777ms step_avg:51.92ms +[2025-09-11 10:49:34] [Rank 0] step:1961/10000 train_time:101425ms step_avg:51.72ms +[2025-09-11 10:49:34] [Rank 0] step:1961/10000 train_time:101425ms step_avg:51.72ms +[2025-09-11 10:49:34] [Rank 0] step:1981/10000 train_time:102072ms step_avg:51.53ms +[2025-09-11 10:49:34] [Rank 0] step:1981/10000 train_time:102072ms step_avg:51.53ms +[2025-09-11 10:49:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:49:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:49:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:49:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:49:45] [Rank 0] PRINT: step:2000/10000 val_loss:5.2367 total_sharp:2.5551e-03 L1_sharp:5.2232e-03 L2_sharp:8.4912e-04 L3_sharp:5.8481e-04 L4_sharp:9.8838e-04 L5_sharp:1.0350e-03 L6_sharp:9.4112e-04 L7_sharp:8.0706e-04 L8_sharp:2.1038e-03 L9_sharp:1.6248e-03 L10_sharp:1.5834e-03 L11_sharp:2.3313e-03 L12_sharp:1.2352e-02 total_fnorm:1.9125e+01 total_l1_linf:4.9152e+04 total_spectral:9.5625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.7188e-01 L3_l1linf:6.7578e-01 L4_l1linf:6.7578e-01 L5_l1linf:6.7578e-01 L6_l1linf:6.7578e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.7969e-01 L12_l1linf:5.5078e-01 L1_spectral:2.9797e-02 L2_spectral:2.9215e-02 L3_spectral:2.9083e-02 L4_spectral:2.8926e-02 L5_spectral:2.9081e-02 L6_spectral:2.9214e-02 L7_spectral:2.9211e-02 L8_spectral:2.9680e-02 L9_spectral:2.8888e-02 L10_spectral:2.9058e-02 L11_spectral:2.8731e-02 L12_spectral:2.8838e-02 train_time:102703ms step_avg:51.35ms +[2025-09-11 10:49:45] [Rank 0] PRINT: step:2000/10000 val_loss:5.2367 total_sharp:2.5551e-03 L1_sharp:5.2232e-03 L2_sharp:8.4912e-04 L3_sharp:5.8481e-04 L4_sharp:9.8838e-04 L5_sharp:1.0350e-03 L6_sharp:9.4112e-04 L7_sharp:8.0706e-04 L8_sharp:2.1038e-03 L9_sharp:1.6248e-03 L10_sharp:1.5834e-03 L11_sharp:2.3313e-03 L12_sharp:1.2352e-02 total_fnorm:1.9125e+01 total_l1_linf:4.9152e+04 total_spectral:9.5625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.5000e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.3906e+00 L1_l1linf:7.3828e-01 L2_l1linf:6.7188e-01 L3_l1linf:6.7578e-01 L4_l1linf:6.7578e-01 L5_l1linf:6.7578e-01 L6_l1linf:6.7578e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.7969e-01 L12_l1linf:5.5078e-01 L1_spectral:2.9797e-02 L2_spectral:2.9215e-02 L3_spectral:2.9083e-02 L4_spectral:2.8926e-02 L5_spectral:2.9081e-02 L6_spectral:2.9214e-02 L7_spectral:2.9211e-02 L8_spectral:2.9680e-02 L9_spectral:2.8888e-02 L10_spectral:2.9058e-02 L11_spectral:2.8731e-02 L12_spectral:2.8838e-02 train_time:102703ms step_avg:51.35ms +[2025-09-11 10:49:46] [Rank 0] step:2001/10000 train_time:103902ms step_avg:51.93ms +[2025-09-11 10:49:46] [Rank 0] step:2001/10000 train_time:103902ms step_avg:51.93ms +[2025-09-11 10:49:47] [Rank 0] step:2021/10000 train_time:104581ms step_avg:51.75ms +[2025-09-11 10:49:47] [Rank 0] step:2021/10000 train_time:104581ms step_avg:51.75ms +[2025-09-11 10:49:48] [Rank 0] step:2041/10000 train_time:105230ms step_avg:51.56ms +[2025-09-11 10:49:48] [Rank 0] step:2041/10000 train_time:105230ms step_avg:51.56ms +[2025-09-11 10:49:48] [Rank 0] step:2061/10000 train_time:105879ms step_avg:51.37ms +[2025-09-11 10:49:48] [Rank 0] step:2061/10000 train_time:105879ms step_avg:51.37ms +[2025-09-11 10:49:49] [Rank 0] step:2081/10000 train_time:106528ms step_avg:51.19ms +[2025-09-11 10:49:49] [Rank 0] step:2081/10000 train_time:106528ms step_avg:51.19ms +[2025-09-11 10:49:49] [Rank 0] step:2101/10000 train_time:107176ms step_avg:51.01ms +[2025-09-11 10:49:49] [Rank 0] step:2101/10000 train_time:107176ms step_avg:51.01ms +[2025-09-11 10:49:50] [Rank 0] step:2121/10000 train_time:107825ms step_avg:50.84ms +[2025-09-11 10:49:50] [Rank 0] step:2121/10000 train_time:107825ms step_avg:50.84ms +[2025-09-11 10:49:51] [Rank 0] step:2141/10000 train_time:108473ms step_avg:50.66ms +[2025-09-11 10:49:51] [Rank 0] step:2141/10000 train_time:108473ms step_avg:50.66ms +[2025-09-11 10:49:51] [Rank 0] step:2161/10000 train_time:109121ms step_avg:50.50ms +[2025-09-11 10:49:51] [Rank 0] step:2161/10000 train_time:109121ms step_avg:50.50ms +[2025-09-11 10:49:52] [Rank 0] step:2181/10000 train_time:109769ms step_avg:50.33ms +[2025-09-11 10:49:52] [Rank 0] step:2181/10000 train_time:109769ms step_avg:50.33ms +[2025-09-11 10:49:53] [Rank 0] step:2201/10000 train_time:110418ms step_avg:50.17ms +[2025-09-11 10:49:53] [Rank 0] step:2201/10000 train_time:110418ms step_avg:50.17ms +[2025-09-11 10:49:53] [Rank 0] step:2221/10000 train_time:111065ms step_avg:50.01ms +[2025-09-11 10:49:53] [Rank 0] step:2221/10000 train_time:111065ms step_avg:50.01ms +[2025-09-11 10:49:54] [Rank 0] step:2241/10000 train_time:111725ms step_avg:49.86ms +[2025-09-11 10:49:54] [Rank 0] step:2241/10000 train_time:111725ms step_avg:49.86ms +[2025-09-11 10:49:55] [Rank 0] step:2261/10000 train_time:112386ms step_avg:49.71ms +[2025-09-11 10:49:55] [Rank 0] step:2261/10000 train_time:112386ms step_avg:49.71ms +[2025-09-11 10:49:55] [Rank 0] step:2281/10000 train_time:113046ms step_avg:49.56ms +[2025-09-11 10:49:55] [Rank 0] step:2281/10000 train_time:113046ms step_avg:49.56ms +[2025-09-11 10:49:56] [Rank 0] step:2301/10000 train_time:113708ms step_avg:49.42ms +[2025-09-11 10:49:56] [Rank 0] step:2301/10000 train_time:113708ms step_avg:49.42ms +[2025-09-11 10:49:57] [Rank 0] step:2321/10000 train_time:114369ms step_avg:49.28ms +[2025-09-11 10:49:57] [Rank 0] step:2321/10000 train_time:114369ms step_avg:49.28ms +[2025-09-11 10:49:57] [Rank 0] step:2341/10000 train_time:115029ms step_avg:49.14ms +[2025-09-11 10:49:57] [Rank 0] step:2341/10000 train_time:115029ms step_avg:49.14ms +[2025-09-11 10:49:58] [Rank 0] step:2361/10000 train_time:115691ms step_avg:49.00ms +[2025-09-11 10:49:58] [Rank 0] step:2361/10000 train_time:115691ms step_avg:49.00ms +[2025-09-11 10:49:59] [Rank 0] step:2381/10000 train_time:116351ms step_avg:48.87ms +[2025-09-11 10:49:59] [Rank 0] step:2381/10000 train_time:116351ms step_avg:48.87ms +[2025-09-11 10:49:59] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:49:59] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:09] [Rank 0] PRINT: step:2400/10000 val_loss:5.1050 total_sharp:2.8740e-03 L1_sharp:6.2338e-03 L2_sharp:7.6176e-04 L3_sharp:3.7013e-04 L4_sharp:7.7236e-04 L5_sharp:1.0498e-03 L6_sharp:8.3402e-04 L7_sharp:7.3586e-04 L8_sharp:2.1412e-03 L9_sharp:1.5220e-03 L10_sharp:1.5609e-03 L11_sharp:2.2564e-03 L12_sharp:6.2175e-03 total_fnorm:1.7875e+01 total_l1_linf:4.5312e+04 total_spectral:9.0000e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.7578e-01 L3_l1linf:6.4453e-01 L4_l1linf:6.6016e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.5234e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.5234e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.6406e-01 L12_l1linf:5.6641e-01 L1_spectral:3.0219e-02 L2_spectral:2.9788e-02 L3_spectral:2.9539e-02 L4_spectral:2.9474e-02 L5_spectral:2.9458e-02 L6_spectral:2.9547e-02 L7_spectral:2.9795e-02 L8_spectral:3.0414e-02 L9_spectral:2.9565e-02 L10_spectral:2.9558e-02 L11_spectral:2.9270e-02 L12_spectral:2.9239e-02 train_time:116993ms step_avg:48.75ms +[2025-09-11 10:50:09] [Rank 0] PRINT: step:2400/10000 val_loss:5.1050 total_sharp:2.8740e-03 L1_sharp:6.2338e-03 L2_sharp:7.6176e-04 L3_sharp:3.7013e-04 L4_sharp:7.7236e-04 L5_sharp:1.0498e-03 L6_sharp:8.3402e-04 L7_sharp:7.3586e-04 L8_sharp:2.1412e-03 L9_sharp:1.5220e-03 L10_sharp:1.5609e-03 L11_sharp:2.2564e-03 L12_sharp:6.2175e-03 total_fnorm:1.7875e+01 total_l1_linf:4.5312e+04 total_spectral:9.0000e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.7578e-01 L3_l1linf:6.4453e-01 L4_l1linf:6.6016e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.5234e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.5234e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.6406e-01 L12_l1linf:5.6641e-01 L1_spectral:3.0219e-02 L2_spectral:2.9788e-02 L3_spectral:2.9539e-02 L4_spectral:2.9474e-02 L5_spectral:2.9458e-02 L6_spectral:2.9547e-02 L7_spectral:2.9795e-02 L8_spectral:3.0414e-02 L9_spectral:2.9565e-02 L10_spectral:2.9558e-02 L11_spectral:2.9270e-02 L12_spectral:2.9239e-02 train_time:116993ms step_avg:48.75ms +[2025-09-11 10:50:10] [Rank 0] step:2401/10000 train_time:118179ms step_avg:49.22ms +[2025-09-11 10:50:10] [Rank 0] step:2401/10000 train_time:118179ms step_avg:49.22ms +[2025-09-11 10:50:11] [Rank 0] step:2421/10000 train_time:118845ms step_avg:49.09ms +[2025-09-11 10:50:11] [Rank 0] step:2421/10000 train_time:118845ms step_avg:49.09ms +[2025-09-11 10:50:12] [Rank 0] step:2441/10000 train_time:119508ms step_avg:48.96ms +[2025-09-11 10:50:12] [Rank 0] step:2441/10000 train_time:119508ms step_avg:48.96ms +[2025-09-11 10:50:12] [Rank 0] step:2461/10000 train_time:120171ms step_avg:48.83ms +[2025-09-11 10:50:12] [Rank 0] step:2461/10000 train_time:120171ms step_avg:48.83ms +[2025-09-11 10:50:13] [Rank 0] step:2481/10000 train_time:120834ms step_avg:48.70ms +[2025-09-11 10:50:13] [Rank 0] step:2481/10000 train_time:120834ms step_avg:48.70ms +[2025-09-11 10:50:13] [Rank 0] step:2501/10000 train_time:121496ms step_avg:48.58ms +[2025-09-11 10:50:13] [Rank 0] step:2501/10000 train_time:121496ms step_avg:48.58ms +[2025-09-11 10:50:14] [Rank 0] step:2521/10000 train_time:122158ms step_avg:48.46ms +[2025-09-11 10:50:14] [Rank 0] step:2521/10000 train_time:122158ms step_avg:48.46ms +[2025-09-11 10:50:15] [Rank 0] step:2541/10000 train_time:122820ms step_avg:48.34ms +[2025-09-11 10:50:15] [Rank 0] step:2541/10000 train_time:122820ms step_avg:48.34ms +[2025-09-11 10:50:15] [Rank 0] step:2561/10000 train_time:123483ms step_avg:48.22ms +[2025-09-11 10:50:15] [Rank 0] step:2561/10000 train_time:123483ms step_avg:48.22ms +[2025-09-11 10:50:16] [Rank 0] step:2581/10000 train_time:124145ms step_avg:48.10ms +[2025-09-11 10:50:16] [Rank 0] step:2581/10000 train_time:124145ms step_avg:48.10ms +[2025-09-11 10:50:17] [Rank 0] step:2601/10000 train_time:124807ms step_avg:47.98ms +[2025-09-11 10:50:17] [Rank 0] step:2601/10000 train_time:124807ms step_avg:47.98ms +[2025-09-11 10:50:17] [Rank 0] step:2621/10000 train_time:125470ms step_avg:47.87ms +[2025-09-11 10:50:17] [Rank 0] step:2621/10000 train_time:125470ms step_avg:47.87ms +[2025-09-11 10:50:18] [Rank 0] step:2641/10000 train_time:126134ms step_avg:47.76ms +[2025-09-11 10:50:18] [Rank 0] step:2641/10000 train_time:126134ms step_avg:47.76ms +[2025-09-11 10:50:19] [Rank 0] step:2661/10000 train_time:126795ms step_avg:47.65ms +[2025-09-11 10:50:19] [Rank 0] step:2661/10000 train_time:126795ms step_avg:47.65ms +[2025-09-11 10:50:19] [Rank 0] step:2681/10000 train_time:127457ms step_avg:47.54ms +[2025-09-11 10:50:19] [Rank 0] step:2681/10000 train_time:127457ms step_avg:47.54ms +[2025-09-11 10:50:20] [Rank 0] step:2701/10000 train_time:128120ms step_avg:47.43ms +[2025-09-11 10:50:20] [Rank 0] step:2701/10000 train_time:128120ms step_avg:47.43ms +[2025-09-11 10:50:21] [Rank 0] step:2721/10000 train_time:128781ms step_avg:47.33ms +[2025-09-11 10:50:21] [Rank 0] step:2721/10000 train_time:128781ms step_avg:47.33ms +[2025-09-11 10:50:21] [Rank 0] step:2741/10000 train_time:129445ms step_avg:47.23ms +[2025-09-11 10:50:21] [Rank 0] step:2741/10000 train_time:129445ms step_avg:47.23ms +[2025-09-11 10:50:22] [Rank 0] step:2761/10000 train_time:130107ms step_avg:47.12ms +[2025-09-11 10:50:22] [Rank 0] step:2761/10000 train_time:130107ms step_avg:47.12ms +[2025-09-11 10:50:23] [Rank 0] step:2781/10000 train_time:130769ms step_avg:47.02ms +[2025-09-11 10:50:23] [Rank 0] step:2781/10000 train_time:130769ms step_avg:47.02ms +[2025-09-11 10:50:23] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:50:23] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:33] [Rank 0] PRINT: step:2800/10000 val_loss:5.0352 total_sharp:3.1976e-03 L1_sharp:3.1385e-03 L2_sharp:1.0377e-03 L3_sharp:4.3877e-04 L4_sharp:4.5879e-04 L5_sharp:6.8803e-04 L6_sharp:7.0391e-04 L7_sharp:7.0605e-04 L8_sharp:2.0781e-03 L9_sharp:1.6459e-03 L10_sharp:1.5272e-03 L11_sharp:2.3870e-03 L12_sharp:9.9676e-03 total_fnorm:1.7125e+01 total_l1_linf:4.3008e+04 total_spectral:8.5625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.3672e-01 L6_l1linf:6.4453e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.4844e-01 L12_l1linf:5.5469e-01 L1_spectral:3.0581e-02 L2_spectral:3.0166e-02 L3_spectral:3.0127e-02 L4_spectral:2.9848e-02 L5_spectral:2.9893e-02 L6_spectral:3.0109e-02 L7_spectral:3.0131e-02 L8_spectral:3.0869e-02 L9_spectral:3.0085e-02 L10_spectral:2.9976e-02 L11_spectral:2.9946e-02 L12_spectral:2.9665e-02 train_time:131413ms step_avg:46.93ms +[2025-09-11 10:50:33] [Rank 0] PRINT: step:2800/10000 val_loss:5.0352 total_sharp:3.1976e-03 L1_sharp:3.1385e-03 L2_sharp:1.0377e-03 L3_sharp:4.3877e-04 L4_sharp:4.5879e-04 L5_sharp:6.8803e-04 L6_sharp:7.0391e-04 L7_sharp:7.0605e-04 L8_sharp:2.0781e-03 L9_sharp:1.6459e-03 L10_sharp:1.5272e-03 L11_sharp:2.3870e-03 L12_sharp:9.9676e-03 total_fnorm:1.7125e+01 total_l1_linf:4.3008e+04 total_spectral:8.5625e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.3672e-01 L6_l1linf:6.4453e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.4844e-01 L11_l1linf:6.4844e-01 L12_l1linf:5.5469e-01 L1_spectral:3.0581e-02 L2_spectral:3.0166e-02 L3_spectral:3.0127e-02 L4_spectral:2.9848e-02 L5_spectral:2.9893e-02 L6_spectral:3.0109e-02 L7_spectral:3.0131e-02 L8_spectral:3.0869e-02 L9_spectral:3.0085e-02 L10_spectral:2.9976e-02 L11_spectral:2.9946e-02 L12_spectral:2.9665e-02 train_time:131413ms step_avg:46.93ms +[2025-09-11 10:50:35] [Rank 0] step:2801/10000 train_time:132631ms step_avg:47.35ms +[2025-09-11 10:50:35] [Rank 0] step:2801/10000 train_time:132631ms step_avg:47.35ms +[2025-09-11 10:50:35] [Rank 0] step:2821/10000 train_time:133297ms step_avg:47.25ms +[2025-09-11 10:50:35] [Rank 0] step:2821/10000 train_time:133297ms step_avg:47.25ms +[2025-09-11 10:50:36] [Rank 0] step:2841/10000 train_time:133962ms step_avg:47.15ms +[2025-09-11 10:50:36] [Rank 0] step:2841/10000 train_time:133962ms step_avg:47.15ms +[2025-09-11 10:50:37] [Rank 0] step:2861/10000 train_time:134626ms step_avg:47.06ms +[2025-09-11 10:50:37] [Rank 0] step:2861/10000 train_time:134626ms step_avg:47.06ms +[2025-09-11 10:50:37] [Rank 0] step:2881/10000 train_time:135289ms step_avg:46.96ms +[2025-09-11 10:50:37] [Rank 0] step:2881/10000 train_time:135289ms step_avg:46.96ms +[2025-09-11 10:50:38] [Rank 0] step:2901/10000 train_time:135952ms step_avg:46.86ms +[2025-09-11 10:50:38] [Rank 0] step:2901/10000 train_time:135952ms step_avg:46.86ms +[2025-09-11 10:50:39] [Rank 0] step:2921/10000 train_time:136615ms step_avg:46.77ms +[2025-09-11 10:50:39] [Rank 0] step:2921/10000 train_time:136615ms step_avg:46.77ms +[2025-09-11 10:50:39] [Rank 0] step:2941/10000 train_time:137277ms step_avg:46.68ms +[2025-09-11 10:50:39] [Rank 0] step:2941/10000 train_time:137277ms step_avg:46.68ms +[2025-09-11 10:50:40] [Rank 0] step:2961/10000 train_time:137939ms step_avg:46.59ms +[2025-09-11 10:50:40] [Rank 0] step:2961/10000 train_time:137939ms step_avg:46.59ms +[2025-09-11 10:50:41] [Rank 0] step:2981/10000 train_time:138604ms step_avg:46.50ms +[2025-09-11 10:50:41] [Rank 0] step:2981/10000 train_time:138604ms step_avg:46.50ms +[2025-09-11 10:50:41] [Rank 0] step:3001/10000 train_time:139269ms step_avg:46.41ms +[2025-09-11 10:50:41] [Rank 0] step:3001/10000 train_time:139269ms step_avg:46.41ms +[2025-09-11 10:50:42] [Rank 0] step:3021/10000 train_time:139935ms step_avg:46.32ms +[2025-09-11 10:50:42] [Rank 0] step:3021/10000 train_time:139935ms step_avg:46.32ms +[2025-09-11 10:50:43] [Rank 0] step:3041/10000 train_time:140601ms step_avg:46.24ms +[2025-09-11 10:50:43] [Rank 0] step:3041/10000 train_time:140601ms step_avg:46.24ms +[2025-09-11 10:50:43] [Rank 0] step:3061/10000 train_time:141267ms step_avg:46.15ms +[2025-09-11 10:50:43] [Rank 0] step:3061/10000 train_time:141267ms step_avg:46.15ms +[2025-09-11 10:50:44] [Rank 0] step:3081/10000 train_time:141932ms step_avg:46.07ms +[2025-09-11 10:50:44] [Rank 0] step:3081/10000 train_time:141932ms step_avg:46.07ms +[2025-09-11 10:50:45] [Rank 0] step:3101/10000 train_time:142598ms step_avg:45.98ms +[2025-09-11 10:50:45] [Rank 0] step:3101/10000 train_time:142598ms step_avg:45.98ms +[2025-09-11 10:50:45] [Rank 0] step:3121/10000 train_time:143263ms step_avg:45.90ms +[2025-09-11 10:50:45] [Rank 0] step:3121/10000 train_time:143263ms step_avg:45.90ms +[2025-09-11 10:50:46] [Rank 0] step:3141/10000 train_time:143928ms step_avg:45.82ms +[2025-09-11 10:50:46] [Rank 0] step:3141/10000 train_time:143928ms step_avg:45.82ms +[2025-09-11 10:50:46] [Rank 0] step:3161/10000 train_time:144594ms step_avg:45.74ms +[2025-09-11 10:50:46] [Rank 0] step:3161/10000 train_time:144594ms step_avg:45.74ms +[2025-09-11 10:50:47] [Rank 0] step:3181/10000 train_time:145259ms step_avg:45.66ms +[2025-09-11 10:50:47] [Rank 0] step:3181/10000 train_time:145259ms step_avg:45.66ms +[2025-09-11 10:50:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:50:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:50:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:58] [Rank 0] PRINT: step:3200/10000 val_loss:4.9451 total_sharp:1.7098e-03 L1_sharp:3.3369e-03 L2_sharp:7.5519e-04 L3_sharp:4.1009e-04 L4_sharp:5.3193e-04 L5_sharp:6.5435e-04 L6_sharp:5.7871e-04 L7_sharp:4.7630e-04 L8_sharp:1.4135e-03 L9_sharp:1.3523e-03 L10_sharp:1.3465e-03 L11_sharp:1.7607e-03 L12_sharp:5.8024e-03 total_fnorm:1.9500e+01 total_l1_linf:4.7360e+04 total_spectral:9.8125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5469e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.4453e-01 L3_l1linf:6.2500e-01 L4_l1linf:6.4062e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2109e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.1719e-01 L11_l1linf:6.3672e-01 L12_l1linf:5.7031e-01 L1_spectral:3.1151e-02 L2_spectral:3.0532e-02 L3_spectral:3.0394e-02 L4_spectral:3.0184e-02 L5_spectral:3.0120e-02 L6_spectral:3.0647e-02 L7_spectral:3.0494e-02 L8_spectral:3.1371e-02 L9_spectral:3.0484e-02 L10_spectral:3.0655e-02 L11_spectral:3.0262e-02 L12_spectral:3.0315e-02 train_time:145907ms step_avg:45.60ms +[2025-09-11 10:50:58] [Rank 0] PRINT: step:3200/10000 val_loss:4.9451 total_sharp:1.7098e-03 L1_sharp:3.3369e-03 L2_sharp:7.5519e-04 L3_sharp:4.1009e-04 L4_sharp:5.3193e-04 L5_sharp:6.5435e-04 L6_sharp:5.7871e-04 L7_sharp:4.7630e-04 L8_sharp:1.4135e-03 L9_sharp:1.3523e-03 L10_sharp:1.3465e-03 L11_sharp:1.7607e-03 L12_sharp:5.8024e-03 total_fnorm:1.9500e+01 total_l1_linf:4.7360e+04 total_spectral:9.8125e+00 L1_fnorm:2.5469e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5469e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.4453e-01 L3_l1linf:6.2500e-01 L4_l1linf:6.4062e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2109e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.1719e-01 L11_l1linf:6.3672e-01 L12_l1linf:5.7031e-01 L1_spectral:3.1151e-02 L2_spectral:3.0532e-02 L3_spectral:3.0394e-02 L4_spectral:3.0184e-02 L5_spectral:3.0120e-02 L6_spectral:3.0647e-02 L7_spectral:3.0494e-02 L8_spectral:3.1371e-02 L9_spectral:3.0484e-02 L10_spectral:3.0655e-02 L11_spectral:3.0262e-02 L12_spectral:3.0315e-02 train_time:145907ms step_avg:45.60ms +[2025-09-11 10:50:59] [Rank 0] step:3201/10000 train_time:147111ms step_avg:45.96ms +[2025-09-11 10:50:59] [Rank 0] step:3201/10000 train_time:147111ms step_avg:45.96ms +[2025-09-11 10:51:00] [Rank 0] step:3221/10000 train_time:147796ms step_avg:45.89ms +[2025-09-11 10:51:00] [Rank 0] step:3221/10000 train_time:147796ms step_avg:45.89ms +[2025-09-11 10:51:00] [Rank 0] step:3241/10000 train_time:148462ms step_avg:45.81ms +[2025-09-11 10:51:00] [Rank 0] step:3241/10000 train_time:148462ms step_avg:45.81ms +[2025-09-11 10:51:01] [Rank 0] step:3261/10000 train_time:149127ms step_avg:45.73ms +[2025-09-11 10:51:01] [Rank 0] step:3261/10000 train_time:149127ms step_avg:45.73ms +[2025-09-11 10:51:02] [Rank 0] step:3281/10000 train_time:149793ms step_avg:45.65ms +[2025-09-11 10:51:02] [Rank 0] step:3281/10000 train_time:149793ms step_avg:45.65ms +[2025-09-11 10:51:02] [Rank 0] step:3301/10000 train_time:150459ms step_avg:45.58ms +[2025-09-11 10:51:02] [Rank 0] step:3301/10000 train_time:150459ms step_avg:45.58ms +[2025-09-11 10:51:03] [Rank 0] step:3321/10000 train_time:151124ms step_avg:45.51ms +[2025-09-11 10:51:03] [Rank 0] step:3321/10000 train_time:151124ms step_avg:45.51ms +[2025-09-11 10:51:04] [Rank 0] step:3341/10000 train_time:151789ms step_avg:45.43ms +[2025-09-11 10:51:04] [Rank 0] step:3341/10000 train_time:151789ms step_avg:45.43ms +[2025-09-11 10:51:04] [Rank 0] step:3361/10000 train_time:152455ms step_avg:45.36ms +[2025-09-11 10:51:04] [Rank 0] step:3361/10000 train_time:152455ms step_avg:45.36ms +[2025-09-11 10:51:05] [Rank 0] step:3381/10000 train_time:153121ms step_avg:45.29ms +[2025-09-11 10:51:05] [Rank 0] step:3381/10000 train_time:153121ms step_avg:45.29ms +[2025-09-11 10:51:06] [Rank 0] step:3401/10000 train_time:153786ms step_avg:45.22ms +[2025-09-11 10:51:06] [Rank 0] step:3401/10000 train_time:153786ms step_avg:45.22ms +[2025-09-11 10:51:06] [Rank 0] step:3421/10000 train_time:154449ms step_avg:45.15ms +[2025-09-11 10:51:06] [Rank 0] step:3421/10000 train_time:154449ms step_avg:45.15ms +[2025-09-11 10:51:07] [Rank 0] step:3441/10000 train_time:155114ms step_avg:45.08ms +[2025-09-11 10:51:07] [Rank 0] step:3441/10000 train_time:155114ms step_avg:45.08ms +[2025-09-11 10:51:08] [Rank 0] step:3461/10000 train_time:155778ms step_avg:45.01ms +[2025-09-11 10:51:08] [Rank 0] step:3461/10000 train_time:155778ms step_avg:45.01ms +[2025-09-11 10:51:08] [Rank 0] step:3481/10000 train_time:156443ms step_avg:44.94ms +[2025-09-11 10:51:08] [Rank 0] step:3481/10000 train_time:156443ms step_avg:44.94ms +[2025-09-11 10:51:09] [Rank 0] step:3501/10000 train_time:157107ms step_avg:44.88ms +[2025-09-11 10:51:09] [Rank 0] step:3501/10000 train_time:157107ms step_avg:44.88ms +[2025-09-11 10:51:10] [Rank 0] step:3521/10000 train_time:157772ms step_avg:44.81ms +[2025-09-11 10:51:10] [Rank 0] step:3521/10000 train_time:157772ms step_avg:44.81ms +[2025-09-11 10:51:10] [Rank 0] step:3541/10000 train_time:158437ms step_avg:44.74ms +[2025-09-11 10:51:10] [Rank 0] step:3541/10000 train_time:158437ms step_avg:44.74ms +[2025-09-11 10:51:11] [Rank 0] step:3561/10000 train_time:159101ms step_avg:44.68ms +[2025-09-11 10:51:11] [Rank 0] step:3561/10000 train_time:159101ms step_avg:44.68ms +[2025-09-11 10:51:12] [Rank 0] step:3581/10000 train_time:159766ms step_avg:44.61ms +[2025-09-11 10:51:12] [Rank 0] step:3581/10000 train_time:159766ms step_avg:44.61ms +[2025-09-11 10:51:12] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:51:12] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:51:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.8951 total_sharp:2.2760e-03 L1_sharp:4.3596e-03 L2_sharp:7.5913e-04 L3_sharp:1.9810e-04 L4_sharp:3.6904e-04 L5_sharp:5.0478e-04 L6_sharp:7.4123e-04 L7_sharp:6.9353e-04 L8_sharp:1.4352e-03 L9_sharp:1.2803e-03 L10_sharp:1.2766e-03 L11_sharp:1.7690e-03 L12_sharp:5.3383e-03 total_fnorm:1.7250e+01 total_l1_linf:4.1984e+04 total_spectral:8.6250e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.0547e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.1719e-01 L11_l1linf:6.1328e-01 L12_l1linf:5.6250e-01 L1_spectral:3.1328e-02 L2_spectral:3.0890e-02 L3_spectral:3.0644e-02 L4_spectral:3.0512e-02 L5_spectral:3.0634e-02 L6_spectral:3.0960e-02 L7_spectral:3.0847e-02 L8_spectral:3.1251e-02 L9_spectral:3.0957e-02 L10_spectral:3.0942e-02 L11_spectral:3.0629e-02 L12_spectral:3.0503e-02 train_time:160412ms step_avg:44.56ms +[2025-09-11 10:51:22] [Rank 0] PRINT: step:3600/10000 val_loss:4.8951 total_sharp:2.2760e-03 L1_sharp:4.3596e-03 L2_sharp:7.5913e-04 L3_sharp:1.9810e-04 L4_sharp:3.6904e-04 L5_sharp:5.0478e-04 L6_sharp:7.4123e-04 L7_sharp:6.9353e-04 L8_sharp:1.4352e-03 L9_sharp:1.2803e-03 L10_sharp:1.2766e-03 L11_sharp:1.7690e-03 L12_sharp:5.3383e-03 total_fnorm:1.7250e+01 total_l1_linf:4.1984e+04 total_spectral:8.6250e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5156e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.0547e-01 L9_l1linf:6.0547e-01 L10_l1linf:6.1719e-01 L11_l1linf:6.1328e-01 L12_l1linf:5.6250e-01 L1_spectral:3.1328e-02 L2_spectral:3.0890e-02 L3_spectral:3.0644e-02 L4_spectral:3.0512e-02 L5_spectral:3.0634e-02 L6_spectral:3.0960e-02 L7_spectral:3.0847e-02 L8_spectral:3.1251e-02 L9_spectral:3.0957e-02 L10_spectral:3.0942e-02 L11_spectral:3.0629e-02 L12_spectral:3.0503e-02 train_time:160412ms step_avg:44.56ms +[2025-09-11 10:51:23] [Rank 0] step:3601/10000 train_time:161638ms step_avg:44.89ms +[2025-09-11 10:51:23] [Rank 0] step:3601/10000 train_time:161638ms step_avg:44.89ms +[2025-09-11 10:51:24] [Rank 0] step:3621/10000 train_time:162323ms step_avg:44.83ms +[2025-09-11 10:51:24] [Rank 0] step:3621/10000 train_time:162323ms step_avg:44.83ms +[2025-09-11 10:51:25] [Rank 0] step:3641/10000 train_time:162988ms step_avg:44.76ms +[2025-09-11 10:51:25] [Rank 0] step:3641/10000 train_time:162988ms step_avg:44.76ms +[2025-09-11 10:51:25] [Rank 0] step:3661/10000 train_time:163653ms step_avg:44.70ms +[2025-09-11 10:51:25] [Rank 0] step:3661/10000 train_time:163653ms step_avg:44.70ms +[2025-09-11 10:51:26] [Rank 0] step:3681/10000 train_time:164317ms step_avg:44.64ms +[2025-09-11 10:51:26] [Rank 0] step:3681/10000 train_time:164317ms step_avg:44.64ms +[2025-09-11 10:51:27] [Rank 0] step:3701/10000 train_time:164982ms step_avg:44.58ms +[2025-09-11 10:51:27] [Rank 0] step:3701/10000 train_time:164982ms step_avg:44.58ms +[2025-09-11 10:51:27] [Rank 0] step:3721/10000 train_time:165657ms step_avg:44.52ms +[2025-09-11 10:51:27] [Rank 0] step:3721/10000 train_time:165657ms step_avg:44.52ms +[2025-09-11 10:51:28] [Rank 0] step:3741/10000 train_time:166332ms step_avg:44.46ms +[2025-09-11 10:51:28] [Rank 0] step:3741/10000 train_time:166332ms step_avg:44.46ms +[2025-09-11 10:51:29] [Rank 0] step:3761/10000 train_time:167009ms step_avg:44.41ms +[2025-09-11 10:51:29] [Rank 0] step:3761/10000 train_time:167009ms step_avg:44.41ms +[2025-09-11 10:51:29] [Rank 0] step:3781/10000 train_time:167684ms step_avg:44.35ms +[2025-09-11 10:51:29] [Rank 0] step:3781/10000 train_time:167684ms step_avg:44.35ms +[2025-09-11 10:51:30] [Rank 0] step:3801/10000 train_time:168360ms step_avg:44.29ms +[2025-09-11 10:51:30] [Rank 0] step:3801/10000 train_time:168360ms step_avg:44.29ms +[2025-09-11 10:51:31] [Rank 0] step:3821/10000 train_time:169602ms step_avg:44.39ms +[2025-09-11 10:51:31] [Rank 0] step:3821/10000 train_time:169602ms step_avg:44.39ms +[2025-09-11 10:51:32] [Rank 0] step:3841/10000 train_time:170279ms step_avg:44.33ms +[2025-09-11 10:51:32] [Rank 0] step:3841/10000 train_time:170279ms step_avg:44.33ms +[2025-09-11 10:51:33] [Rank 0] step:3861/10000 train_time:170954ms step_avg:44.28ms +[2025-09-11 10:51:33] [Rank 0] step:3861/10000 train_time:170954ms step_avg:44.28ms +[2025-09-11 10:51:34] [Rank 0] step:3881/10000 train_time:171927ms step_avg:44.30ms +[2025-09-11 10:51:34] [Rank 0] step:3881/10000 train_time:171927ms step_avg:44.30ms +[2025-09-11 10:51:34] [Rank 0] step:3901/10000 train_time:172602ms step_avg:44.25ms +[2025-09-11 10:51:34] [Rank 0] step:3901/10000 train_time:172602ms step_avg:44.25ms +[2025-09-11 10:51:35] [Rank 0] step:3921/10000 train_time:173278ms step_avg:44.19ms +[2025-09-11 10:51:35] [Rank 0] step:3921/10000 train_time:173278ms step_avg:44.19ms +[2025-09-11 10:51:36] [Rank 0] step:3941/10000 train_time:173954ms step_avg:44.14ms +[2025-09-11 10:51:36] [Rank 0] step:3941/10000 train_time:173954ms step_avg:44.14ms +[2025-09-11 10:51:36] [Rank 0] step:3961/10000 train_time:174631ms step_avg:44.09ms +[2025-09-11 10:51:36] [Rank 0] step:3961/10000 train_time:174631ms step_avg:44.09ms +[2025-09-11 10:51:37] [Rank 0] step:3981/10000 train_time:175306ms step_avg:44.04ms +[2025-09-11 10:51:37] [Rank 0] step:3981/10000 train_time:175306ms step_avg:44.04ms +[2025-09-11 10:51:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:51:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.8401 total_sharp:2.3163e-03 L1_sharp:4.7718e-03 L2_sharp:3.1017e-04 L3_sharp:4.2227e-04 L4_sharp:4.1810e-04 L5_sharp:4.9295e-04 L6_sharp:5.7846e-04 L7_sharp:5.7641e-04 L8_sharp:1.3807e-03 L9_sharp:1.3652e-03 L10_sharp:1.4084e-03 L11_sharp:2.0955e-03 L12_sharp:1.8485e-02 total_fnorm:1.9500e+01 total_l1_linf:4.5056e+04 total_spectral:9.8125e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4688e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.1719e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0938e-01 L9_l1linf:5.8984e-01 L10_l1linf:6.0547e-01 L11_l1linf:6.2500e-01 L12_l1linf:5.3125e-01 L1_spectral:3.1507e-02 L2_spectral:3.0905e-02 L3_spectral:3.0842e-02 L4_spectral:3.0858e-02 L5_spectral:3.0828e-02 L6_spectral:3.0941e-02 L7_spectral:3.1024e-02 L8_spectral:3.1123e-02 L9_spectral:3.0930e-02 L10_spectral:3.1277e-02 L11_spectral:3.0986e-02 L12_spectral:3.0890e-02 train_time:175963ms step_avg:43.99ms +[2025-09-11 10:51:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.8401 total_sharp:2.3163e-03 L1_sharp:4.7718e-03 L2_sharp:3.1017e-04 L3_sharp:4.2227e-04 L4_sharp:4.1810e-04 L5_sharp:4.9295e-04 L6_sharp:5.7846e-04 L7_sharp:5.7641e-04 L8_sharp:1.3807e-03 L9_sharp:1.3652e-03 L10_sharp:1.4084e-03 L11_sharp:2.0955e-03 L12_sharp:1.8485e-02 total_fnorm:1.9500e+01 total_l1_linf:4.5056e+04 total_spectral:9.8125e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4688e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.1719e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0938e-01 L9_l1linf:5.8984e-01 L10_l1linf:6.0547e-01 L11_l1linf:6.2500e-01 L12_l1linf:5.3125e-01 L1_spectral:3.1507e-02 L2_spectral:3.0905e-02 L3_spectral:3.0842e-02 L4_spectral:3.0858e-02 L5_spectral:3.0828e-02 L6_spectral:3.0941e-02 L7_spectral:3.1024e-02 L8_spectral:3.1123e-02 L9_spectral:3.0930e-02 L10_spectral:3.1277e-02 L11_spectral:3.0986e-02 L12_spectral:3.0890e-02 train_time:175963ms step_avg:43.99ms +[2025-09-11 10:51:49] [Rank 0] step:4001/10000 train_time:177148ms step_avg:44.28ms +[2025-09-11 10:51:49] [Rank 0] step:4001/10000 train_time:177148ms step_avg:44.28ms +[2025-09-11 10:51:50] [Rank 0] step:4021/10000 train_time:177839ms step_avg:44.23ms +[2025-09-11 10:51:50] [Rank 0] step:4021/10000 train_time:177839ms step_avg:44.23ms +[2025-09-11 10:51:50] [Rank 0] step:4041/10000 train_time:178517ms step_avg:44.18ms +[2025-09-11 10:51:50] [Rank 0] step:4041/10000 train_time:178517ms step_avg:44.18ms +[2025-09-11 10:51:51] [Rank 0] step:4061/10000 train_time:179193ms step_avg:44.13ms +[2025-09-11 10:51:51] [Rank 0] step:4061/10000 train_time:179193ms step_avg:44.13ms +[2025-09-11 10:51:52] [Rank 0] step:4081/10000 train_time:179870ms step_avg:44.07ms +[2025-09-11 10:51:52] [Rank 0] step:4081/10000 train_time:179870ms step_avg:44.07ms +[2025-09-11 10:51:52] [Rank 0] step:4101/10000 train_time:180547ms step_avg:44.02ms +[2025-09-11 10:51:52] [Rank 0] step:4101/10000 train_time:180547ms step_avg:44.02ms +[2025-09-11 10:51:53] [Rank 0] step:4121/10000 train_time:181223ms step_avg:43.98ms +[2025-09-11 10:51:53] [Rank 0] step:4121/10000 train_time:181223ms step_avg:43.98ms +[2025-09-11 10:51:54] [Rank 0] step:4141/10000 train_time:181901ms step_avg:43.93ms +[2025-09-11 10:51:54] [Rank 0] step:4141/10000 train_time:181901ms step_avg:43.93ms +[2025-09-11 10:51:54] [Rank 0] step:4161/10000 train_time:182576ms step_avg:43.88ms +[2025-09-11 10:51:54] [Rank 0] step:4161/10000 train_time:182576ms step_avg:43.88ms +[2025-09-11 10:51:55] [Rank 0] step:4181/10000 train_time:183253ms step_avg:43.83ms +[2025-09-11 10:51:55] [Rank 0] step:4181/10000 train_time:183253ms step_avg:43.83ms +[2025-09-11 10:51:56] [Rank 0] step:4201/10000 train_time:183930ms step_avg:43.78ms +[2025-09-11 10:51:56] [Rank 0] step:4201/10000 train_time:183930ms step_avg:43.78ms +[2025-09-11 10:51:57] [Rank 0] step:4221/10000 train_time:184606ms step_avg:43.74ms +[2025-09-11 10:51:57] [Rank 0] step:4221/10000 train_time:184606ms step_avg:43.74ms +[2025-09-11 10:51:57] [Rank 0] step:4241/10000 train_time:185283ms step_avg:43.69ms +[2025-09-11 10:51:57] [Rank 0] step:4241/10000 train_time:185283ms step_avg:43.69ms +[2025-09-11 10:51:58] [Rank 0] step:4261/10000 train_time:185960ms step_avg:43.64ms +[2025-09-11 10:51:58] [Rank 0] step:4261/10000 train_time:185960ms step_avg:43.64ms +[2025-09-11 10:51:59] [Rank 0] step:4281/10000 train_time:186638ms step_avg:43.60ms +[2025-09-11 10:51:59] [Rank 0] step:4281/10000 train_time:186638ms step_avg:43.60ms +[2025-09-11 10:51:59] [Rank 0] step:4301/10000 train_time:187316ms step_avg:43.55ms +[2025-09-11 10:51:59] [Rank 0] step:4301/10000 train_time:187316ms step_avg:43.55ms +[2025-09-11 10:52:00] [Rank 0] step:4321/10000 train_time:187992ms step_avg:43.51ms +[2025-09-11 10:52:00] [Rank 0] step:4321/10000 train_time:187992ms step_avg:43.51ms +[2025-09-11 10:52:01] [Rank 0] step:4341/10000 train_time:188668ms step_avg:43.46ms +[2025-09-11 10:52:01] [Rank 0] step:4341/10000 train_time:188668ms step_avg:43.46ms +[2025-09-11 10:52:01] [Rank 0] step:4361/10000 train_time:189344ms step_avg:43.42ms +[2025-09-11 10:52:01] [Rank 0] step:4361/10000 train_time:189344ms step_avg:43.42ms +[2025-09-11 10:52:02] [Rank 0] step:4381/10000 train_time:190022ms step_avg:43.37ms +[2025-09-11 10:52:02] [Rank 0] step:4381/10000 train_time:190022ms step_avg:43.37ms +[2025-09-11 10:52:03] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:52:03] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:52:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:52:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:52:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:13] [Rank 0] PRINT: step:4400/10000 val_loss:4.8005 total_sharp:1.5812e-03 L1_sharp:3.7892e-03 L2_sharp:4.6721e-04 L3_sharp:3.1344e-04 L4_sharp:6.0013e-04 L5_sharp:5.5950e-04 L6_sharp:8.9559e-04 L7_sharp:5.6623e-04 L8_sharp:1.2584e-03 L9_sharp:1.0694e-03 L10_sharp:1.0018e-03 L11_sharp:1.4559e-03 L12_sharp:5.4307e-03 total_fnorm:1.8250e+01 total_l1_linf:4.2240e+04 total_spectral:9.1250e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.0156e-01 L9_l1linf:5.9375e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.0938e-01 L12_l1linf:5.4297e-01 L1_spectral:3.1696e-02 L2_spectral:3.1383e-02 L3_spectral:3.1370e-02 L4_spectral:3.0935e-02 L5_spectral:3.0900e-02 L6_spectral:3.1233e-02 L7_spectral:3.1280e-02 L8_spectral:3.1380e-02 L9_spectral:3.1287e-02 L10_spectral:3.1510e-02 L11_spectral:3.1041e-02 L12_spectral:3.1175e-02 train_time:190679ms step_avg:43.34ms +[2025-09-11 10:52:13] [Rank 0] PRINT: step:4400/10000 val_loss:4.8005 total_sharp:1.5812e-03 L1_sharp:3.7892e-03 L2_sharp:4.6721e-04 L3_sharp:3.1344e-04 L4_sharp:6.0013e-04 L5_sharp:5.5950e-04 L6_sharp:8.9559e-04 L7_sharp:5.6623e-04 L8_sharp:1.2584e-03 L9_sharp:1.0694e-03 L10_sharp:1.0018e-03 L11_sharp:1.4559e-03 L12_sharp:5.4307e-03 total_fnorm:1.8250e+01 total_l1_linf:4.2240e+04 total_spectral:9.1250e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4844e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.0156e-01 L9_l1linf:5.9375e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.0938e-01 L12_l1linf:5.4297e-01 L1_spectral:3.1696e-02 L2_spectral:3.1383e-02 L3_spectral:3.1370e-02 L4_spectral:3.0935e-02 L5_spectral:3.0900e-02 L6_spectral:3.1233e-02 L7_spectral:3.1280e-02 L8_spectral:3.1380e-02 L9_spectral:3.1287e-02 L10_spectral:3.1510e-02 L11_spectral:3.1041e-02 L12_spectral:3.1175e-02 train_time:190679ms step_avg:43.34ms +[2025-09-11 10:52:14] [Rank 0] step:4401/10000 train_time:191880ms step_avg:43.60ms +[2025-09-11 10:52:14] [Rank 0] step:4401/10000 train_time:191880ms step_avg:43.60ms +[2025-09-11 10:52:15] [Rank 0] step:4421/10000 train_time:192569ms step_avg:43.56ms +[2025-09-11 10:52:15] [Rank 0] step:4421/10000 train_time:192569ms step_avg:43.56ms +[2025-09-11 10:52:15] [Rank 0] step:4441/10000 train_time:193247ms step_avg:43.51ms +[2025-09-11 10:52:15] [Rank 0] step:4441/10000 train_time:193247ms step_avg:43.51ms +[2025-09-11 10:52:16] [Rank 0] step:4461/10000 train_time:193926ms step_avg:43.47ms +[2025-09-11 10:52:16] [Rank 0] step:4461/10000 train_time:193926ms step_avg:43.47ms +[2025-09-11 10:52:17] [Rank 0] step:4481/10000 train_time:194605ms step_avg:43.43ms +[2025-09-11 10:52:17] [Rank 0] step:4481/10000 train_time:194605ms step_avg:43.43ms +[2025-09-11 10:52:17] [Rank 0] step:4501/10000 train_time:195285ms step_avg:43.39ms +[2025-09-11 10:52:17] [Rank 0] step:4501/10000 train_time:195285ms step_avg:43.39ms +[2025-09-11 10:52:18] [Rank 0] step:4521/10000 train_time:195965ms step_avg:43.35ms +[2025-09-11 10:52:18] [Rank 0] step:4521/10000 train_time:195965ms step_avg:43.35ms +[2025-09-11 10:52:19] [Rank 0] step:4541/10000 train_time:196644ms step_avg:43.30ms +[2025-09-11 10:52:19] [Rank 0] step:4541/10000 train_time:196644ms step_avg:43.30ms +[2025-09-11 10:52:19] [Rank 0] step:4561/10000 train_time:197323ms step_avg:43.26ms +[2025-09-11 10:52:19] [Rank 0] step:4561/10000 train_time:197323ms step_avg:43.26ms +[2025-09-11 10:52:20] [Rank 0] step:4581/10000 train_time:198001ms step_avg:43.22ms +[2025-09-11 10:52:20] [Rank 0] step:4581/10000 train_time:198001ms step_avg:43.22ms +[2025-09-11 10:52:21] [Rank 0] step:4601/10000 train_time:198681ms step_avg:43.18ms +[2025-09-11 10:52:21] [Rank 0] step:4601/10000 train_time:198681ms step_avg:43.18ms +[2025-09-11 10:52:21] [Rank 0] step:4621/10000 train_time:199360ms step_avg:43.14ms +[2025-09-11 10:52:21] [Rank 0] step:4621/10000 train_time:199360ms step_avg:43.14ms +[2025-09-11 10:52:22] [Rank 0] step:4641/10000 train_time:200039ms step_avg:43.10ms +[2025-09-11 10:52:22] [Rank 0] step:4641/10000 train_time:200039ms step_avg:43.10ms +[2025-09-11 10:52:23] [Rank 0] step:4661/10000 train_time:200718ms step_avg:43.06ms +[2025-09-11 10:52:23] [Rank 0] step:4661/10000 train_time:200718ms step_avg:43.06ms +[2025-09-11 10:52:23] [Rank 0] step:4681/10000 train_time:201397ms step_avg:43.02ms +[2025-09-11 10:52:23] [Rank 0] step:4681/10000 train_time:201397ms step_avg:43.02ms +[2025-09-11 10:52:24] [Rank 0] step:4701/10000 train_time:202076ms step_avg:42.99ms +[2025-09-11 10:52:24] [Rank 0] step:4701/10000 train_time:202076ms step_avg:42.99ms +[2025-09-11 10:52:25] [Rank 0] step:4721/10000 train_time:202755ms step_avg:42.95ms +[2025-09-11 10:52:25] [Rank 0] step:4721/10000 train_time:202755ms step_avg:42.95ms +[2025-09-11 10:52:25] [Rank 0] step:4741/10000 train_time:203434ms step_avg:42.91ms +[2025-09-11 10:52:25] [Rank 0] step:4741/10000 train_time:203434ms step_avg:42.91ms +[2025-09-11 10:52:26] [Rank 0] step:4761/10000 train_time:204114ms step_avg:42.87ms +[2025-09-11 10:52:26] [Rank 0] step:4761/10000 train_time:204114ms step_avg:42.87ms +[2025-09-11 10:52:27] [Rank 0] step:4781/10000 train_time:204792ms step_avg:42.83ms +[2025-09-11 10:52:27] [Rank 0] step:4781/10000 train_time:204792ms step_avg:42.83ms +[2025-09-11 10:52:27] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:52:27] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:52:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:52:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:52:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:38] [Rank 0] PRINT: step:4800/10000 val_loss:4.7518 total_sharp:1.6840e-03 L1_sharp:2.4581e-03 L2_sharp:5.7052e-04 L3_sharp:2.7434e-04 L4_sharp:1.8967e-04 L5_sharp:3.4547e-04 L6_sharp:3.4893e-04 L7_sharp:4.3229e-04 L8_sharp:1.0622e-03 L9_sharp:1.1141e-03 L10_sharp:9.7562e-04 L11_sharp:1.5232e-03 L12_sharp:7.8560e-03 total_fnorm:1.7875e+01 total_l1_linf:4.1728e+04 total_spectral:8.9375e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4688e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.0938e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0547e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8984e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.7812e-01 L11_l1linf:5.9766e-01 L12_l1linf:5.3906e-01 L1_spectral:3.1920e-02 L2_spectral:3.1469e-02 L3_spectral:3.1334e-02 L4_spectral:3.1483e-02 L5_spectral:3.1438e-02 L6_spectral:3.1603e-02 L7_spectral:3.1487e-02 L8_spectral:3.1761e-02 L9_spectral:3.1866e-02 L10_spectral:3.1820e-02 L11_spectral:3.1309e-02 L12_spectral:3.1195e-02 train_time:205451ms step_avg:42.80ms +[2025-09-11 10:52:38] [Rank 0] PRINT: step:4800/10000 val_loss:4.7518 total_sharp:1.6840e-03 L1_sharp:2.4581e-03 L2_sharp:5.7052e-04 L3_sharp:2.7434e-04 L4_sharp:1.8967e-04 L5_sharp:3.4547e-04 L6_sharp:3.4893e-04 L7_sharp:4.3229e-04 L8_sharp:1.0622e-03 L9_sharp:1.1141e-03 L10_sharp:9.7562e-04 L11_sharp:1.5232e-03 L12_sharp:7.8560e-03 total_fnorm:1.7875e+01 total_l1_linf:4.1728e+04 total_spectral:8.9375e+00 L1_fnorm:2.5312e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.4688e+00 L1_l1linf:6.7578e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.0938e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0547e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8984e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.7812e-01 L11_l1linf:5.9766e-01 L12_l1linf:5.3906e-01 L1_spectral:3.1920e-02 L2_spectral:3.1469e-02 L3_spectral:3.1334e-02 L4_spectral:3.1483e-02 L5_spectral:3.1438e-02 L6_spectral:3.1603e-02 L7_spectral:3.1487e-02 L8_spectral:3.1761e-02 L9_spectral:3.1866e-02 L10_spectral:3.1820e-02 L11_spectral:3.1309e-02 L12_spectral:3.1195e-02 train_time:205451ms step_avg:42.80ms +[2025-09-11 10:52:40] [Rank 0] step:4801/10000 train_time:208036ms step_avg:43.33ms +[2025-09-11 10:52:40] [Rank 0] step:4801/10000 train_time:208036ms step_avg:43.33ms +[2025-09-11 10:52:41] [Rank 0] step:4821/10000 train_time:208736ms step_avg:43.30ms +[2025-09-11 10:52:41] [Rank 0] step:4821/10000 train_time:208736ms step_avg:43.30ms +[2025-09-11 10:52:42] [Rank 0] step:4841/10000 train_time:209416ms step_avg:43.26ms +[2025-09-11 10:52:42] [Rank 0] step:4841/10000 train_time:209416ms step_avg:43.26ms +[2025-09-11 10:52:42] [Rank 0] step:4861/10000 train_time:210094ms step_avg:43.22ms +[2025-09-11 10:52:42] [Rank 0] step:4861/10000 train_time:210094ms step_avg:43.22ms +[2025-09-11 10:52:43] [Rank 0] step:4881/10000 train_time:210773ms step_avg:43.18ms +[2025-09-11 10:52:43] [Rank 0] step:4881/10000 train_time:210773ms step_avg:43.18ms +[2025-09-11 10:52:44] [Rank 0] step:4901/10000 train_time:211453ms step_avg:43.14ms +[2025-09-11 10:52:44] [Rank 0] step:4901/10000 train_time:211453ms step_avg:43.14ms +[2025-09-11 10:52:44] [Rank 0] step:4921/10000 train_time:212132ms step_avg:43.11ms +[2025-09-11 10:52:44] [Rank 0] step:4921/10000 train_time:212132ms step_avg:43.11ms +[2025-09-11 10:52:45] [Rank 0] step:4941/10000 train_time:212810ms step_avg:43.07ms +[2025-09-11 10:52:45] [Rank 0] step:4941/10000 train_time:212810ms step_avg:43.07ms +[2025-09-11 10:52:46] [Rank 0] step:4961/10000 train_time:213488ms step_avg:43.03ms +[2025-09-11 10:52:46] [Rank 0] step:4961/10000 train_time:213488ms step_avg:43.03ms +[2025-09-11 10:52:46] [Rank 0] step:4981/10000 train_time:214167ms step_avg:43.00ms +[2025-09-11 10:52:46] [Rank 0] step:4981/10000 train_time:214167ms step_avg:43.00ms +[2025-09-11 10:52:47] [Rank 0] step:5001/10000 train_time:214846ms step_avg:42.96ms +[2025-09-11 10:52:47] [Rank 0] step:5001/10000 train_time:214846ms step_avg:42.96ms +[2025-09-11 10:52:48] [Rank 0] step:5021/10000 train_time:215524ms step_avg:42.92ms +[2025-09-11 10:52:48] [Rank 0] step:5021/10000 train_time:215524ms step_avg:42.92ms +[2025-09-11 10:52:48] [Rank 0] step:5041/10000 train_time:216202ms step_avg:42.89ms +[2025-09-11 10:52:48] [Rank 0] step:5041/10000 train_time:216202ms step_avg:42.89ms +[2025-09-11 10:52:49] [Rank 0] step:5061/10000 train_time:216881ms step_avg:42.85ms +[2025-09-11 10:52:49] [Rank 0] step:5061/10000 train_time:216881ms step_avg:42.85ms +[2025-09-11 10:52:50] [Rank 0] step:5081/10000 train_time:217559ms step_avg:42.82ms +[2025-09-11 10:52:50] [Rank 0] step:5081/10000 train_time:217559ms step_avg:42.82ms +[2025-09-11 10:52:50] [Rank 0] step:5101/10000 train_time:218238ms step_avg:42.78ms +[2025-09-11 10:52:50] [Rank 0] step:5101/10000 train_time:218238ms step_avg:42.78ms +[2025-09-11 10:52:51] [Rank 0] step:5121/10000 train_time:218916ms step_avg:42.75ms +[2025-09-11 10:52:51] [Rank 0] step:5121/10000 train_time:218916ms step_avg:42.75ms +[2025-09-11 10:52:52] [Rank 0] step:5141/10000 train_time:219594ms step_avg:42.71ms +[2025-09-11 10:52:52] [Rank 0] step:5141/10000 train_time:219594ms step_avg:42.71ms +[2025-09-11 10:52:52] [Rank 0] step:5161/10000 train_time:220272ms step_avg:42.68ms +[2025-09-11 10:52:52] [Rank 0] step:5161/10000 train_time:220272ms step_avg:42.68ms +[2025-09-11 10:52:53] [Rank 0] step:5181/10000 train_time:220949ms step_avg:42.65ms +[2025-09-11 10:52:53] [Rank 0] step:5181/10000 train_time:220949ms step_avg:42.65ms +[2025-09-11 10:52:54] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:52:54] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:52:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:52:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:52:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:53:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:53:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:04] [Rank 0] PRINT: step:5200/10000 val_loss:4.7144 total_sharp:1.9533e-03 L1_sharp:5.0135e-03 L2_sharp:5.7652e-04 L3_sharp:4.7610e-04 L4_sharp:2.6947e-04 L5_sharp:2.9483e-04 L6_sharp:4.7269e-04 L7_sharp:4.2999e-04 L8_sharp:9.7135e-04 L9_sharp:9.3637e-04 L10_sharp:1.1088e-03 L11_sharp:1.6025e-03 L12_sharp:6.9956e-03 total_fnorm:1.6750e+01 total_l1_linf:3.8912e+04 total_spectral:8.4375e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.8203e-01 L4_l1linf:6.0547e-01 L5_l1linf:6.0156e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.2109e-01 L8_l1linf:5.7812e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.5469e-01 L1_spectral:3.1873e-02 L2_spectral:3.1585e-02 L3_spectral:3.1392e-02 L4_spectral:3.1420e-02 L5_spectral:3.1331e-02 L6_spectral:3.2011e-02 L7_spectral:3.1666e-02 L8_spectral:3.1751e-02 L9_spectral:3.1837e-02 L10_spectral:3.1861e-02 L11_spectral:3.1640e-02 L12_spectral:3.1452e-02 train_time:221615ms step_avg:42.62ms +[2025-09-11 10:53:04] [Rank 0] PRINT: step:5200/10000 val_loss:4.7144 total_sharp:1.9533e-03 L1_sharp:5.0135e-03 L2_sharp:5.7652e-04 L3_sharp:4.7610e-04 L4_sharp:2.6947e-04 L5_sharp:2.9483e-04 L6_sharp:4.7269e-04 L7_sharp:4.2999e-04 L8_sharp:9.7135e-04 L9_sharp:9.3637e-04 L10_sharp:1.1088e-03 L11_sharp:1.6025e-03 L12_sharp:6.9956e-03 total_fnorm:1.6750e+01 total_l1_linf:3.8912e+04 total_spectral:8.4375e+00 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.8203e-01 L4_l1linf:6.0547e-01 L5_l1linf:6.0156e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.2109e-01 L8_l1linf:5.7812e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.5469e-01 L1_spectral:3.1873e-02 L2_spectral:3.1585e-02 L3_spectral:3.1392e-02 L4_spectral:3.1420e-02 L5_spectral:3.1331e-02 L6_spectral:3.2011e-02 L7_spectral:3.1666e-02 L8_spectral:3.1751e-02 L9_spectral:3.1837e-02 L10_spectral:3.1861e-02 L11_spectral:3.1640e-02 L12_spectral:3.1452e-02 train_time:221615ms step_avg:42.62ms +[2025-09-11 10:53:05] [Rank 0] step:5201/10000 train_time:222828ms step_avg:42.84ms +[2025-09-11 10:53:05] [Rank 0] step:5201/10000 train_time:222828ms step_avg:42.84ms +[2025-09-11 10:53:06] [Rank 0] step:5221/10000 train_time:223552ms step_avg:42.82ms +[2025-09-11 10:53:06] [Rank 0] step:5221/10000 train_time:223552ms step_avg:42.82ms +[2025-09-11 10:53:06] [Rank 0] step:5241/10000 train_time:224240ms step_avg:42.79ms +[2025-09-11 10:53:06] [Rank 0] step:5241/10000 train_time:224240ms step_avg:42.79ms +[2025-09-11 10:53:07] [Rank 0] step:5261/10000 train_time:224930ms step_avg:42.75ms +[2025-09-11 10:53:07] [Rank 0] step:5261/10000 train_time:224930ms step_avg:42.75ms +[2025-09-11 10:53:08] [Rank 0] step:5281/10000 train_time:225618ms step_avg:42.72ms +[2025-09-11 10:53:08] [Rank 0] step:5281/10000 train_time:225618ms step_avg:42.72ms +[2025-09-11 10:53:08] [Rank 0] step:5301/10000 train_time:226306ms step_avg:42.69ms +[2025-09-11 10:53:08] [Rank 0] step:5301/10000 train_time:226306ms step_avg:42.69ms +[2025-09-11 10:53:09] [Rank 0] step:5321/10000 train_time:226994ms step_avg:42.66ms +[2025-09-11 10:53:09] [Rank 0] step:5321/10000 train_time:226994ms step_avg:42.66ms +[2025-09-11 10:53:10] [Rank 0] step:5341/10000 train_time:227682ms step_avg:42.63ms +[2025-09-11 10:53:10] [Rank 0] step:5341/10000 train_time:227682ms step_avg:42.63ms +[2025-09-11 10:53:11] [Rank 0] step:5361/10000 train_time:228370ms step_avg:42.60ms +[2025-09-11 10:53:11] [Rank 0] step:5361/10000 train_time:228370ms step_avg:42.60ms +[2025-09-11 10:53:11] [Rank 0] step:5381/10000 train_time:229058ms step_avg:42.57ms +[2025-09-11 10:53:11] [Rank 0] step:5381/10000 train_time:229058ms step_avg:42.57ms +[2025-09-11 10:53:12] [Rank 0] step:5401/10000 train_time:229745ms step_avg:42.54ms +[2025-09-11 10:53:12] [Rank 0] step:5401/10000 train_time:229745ms step_avg:42.54ms +[2025-09-11 10:53:13] [Rank 0] step:5421/10000 train_time:230435ms step_avg:42.51ms +[2025-09-11 10:53:13] [Rank 0] step:5421/10000 train_time:230435ms step_avg:42.51ms +[2025-09-11 10:53:13] [Rank 0] step:5441/10000 train_time:231123ms step_avg:42.48ms +[2025-09-11 10:53:13] [Rank 0] step:5441/10000 train_time:231123ms step_avg:42.48ms +[2025-09-11 10:53:14] [Rank 0] step:5461/10000 train_time:231812ms step_avg:42.45ms +[2025-09-11 10:53:14] [Rank 0] step:5461/10000 train_time:231812ms step_avg:42.45ms +[2025-09-11 10:53:15] [Rank 0] step:5481/10000 train_time:232501ms step_avg:42.42ms +[2025-09-11 10:53:15] [Rank 0] step:5481/10000 train_time:232501ms step_avg:42.42ms +[2025-09-11 10:53:15] [Rank 0] step:5501/10000 train_time:233189ms step_avg:42.39ms +[2025-09-11 10:53:15] [Rank 0] step:5501/10000 train_time:233189ms step_avg:42.39ms +[2025-09-11 10:53:16] [Rank 0] step:5521/10000 train_time:233877ms step_avg:42.36ms +[2025-09-11 10:53:16] [Rank 0] step:5521/10000 train_time:233877ms step_avg:42.36ms +[2025-09-11 10:53:17] [Rank 0] step:5541/10000 train_time:234567ms step_avg:42.33ms +[2025-09-11 10:53:17] [Rank 0] step:5541/10000 train_time:234567ms step_avg:42.33ms +[2025-09-11 10:53:17] [Rank 0] step:5561/10000 train_time:235257ms step_avg:42.30ms +[2025-09-11 10:53:17] [Rank 0] step:5561/10000 train_time:235257ms step_avg:42.30ms +[2025-09-11 10:53:18] [Rank 0] step:5581/10000 train_time:235946ms step_avg:42.28ms +[2025-09-11 10:53:18] [Rank 0] step:5581/10000 train_time:235946ms step_avg:42.28ms +[2025-09-11 10:53:19] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:53:19] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:53:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:53:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:53:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:53:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:53:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:53:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:53:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:53:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:53:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:29] [Rank 0] PRINT: step:5600/10000 val_loss:4.6834 total_sharp:1.9152e-03 L1_sharp:3.5497e-03 L2_sharp:6.1800e-04 L3_sharp:2.5061e-04 L4_sharp:3.1503e-04 L5_sharp:3.2556e-04 L6_sharp:4.1180e-04 L7_sharp:3.3790e-04 L8_sharp:1.0396e-03 L9_sharp:1.0234e-03 L10_sharp:1.0839e-03 L11_sharp:1.5059e-03 L12_sharp:8.9410e-03 total_fnorm:1.6625e+01 total_l1_linf:3.8912e+04 total_spectral:8.3750e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.7812e-01 L4_l1linf:6.0547e-01 L5_l1linf:5.9766e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8594e-01 L9_l1linf:5.6641e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.7422e-01 L12_l1linf:5.5859e-01 L1_spectral:3.1991e-02 L2_spectral:3.1788e-02 L3_spectral:3.1389e-02 L4_spectral:3.1525e-02 L5_spectral:3.1763e-02 L6_spectral:3.2006e-02 L7_spectral:3.1765e-02 L8_spectral:3.1570e-02 L9_spectral:3.2063e-02 L10_spectral:3.2209e-02 L11_spectral:3.1625e-02 L12_spectral:3.1598e-02 train_time:236614ms step_avg:42.25ms +[2025-09-11 10:53:29] [Rank 0] PRINT: step:5600/10000 val_loss:4.6834 total_sharp:1.9152e-03 L1_sharp:3.5497e-03 L2_sharp:6.1800e-04 L3_sharp:2.5061e-04 L4_sharp:3.1503e-04 L5_sharp:3.2556e-04 L6_sharp:4.1180e-04 L7_sharp:3.3790e-04 L8_sharp:1.0396e-03 L9_sharp:1.0234e-03 L10_sharp:1.0839e-03 L11_sharp:1.5059e-03 L12_sharp:8.9410e-03 total_fnorm:1.6625e+01 total_l1_linf:3.8912e+04 total_spectral:8.3750e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.7812e-01 L4_l1linf:6.0547e-01 L5_l1linf:5.9766e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8594e-01 L9_l1linf:5.6641e-01 L10_l1linf:5.6641e-01 L11_l1linf:5.7422e-01 L12_l1linf:5.5859e-01 L1_spectral:3.1991e-02 L2_spectral:3.1788e-02 L3_spectral:3.1389e-02 L4_spectral:3.1525e-02 L5_spectral:3.1763e-02 L6_spectral:3.2006e-02 L7_spectral:3.1765e-02 L8_spectral:3.1570e-02 L9_spectral:3.2063e-02 L10_spectral:3.2209e-02 L11_spectral:3.1625e-02 L12_spectral:3.1598e-02 train_time:236614ms step_avg:42.25ms +[2025-09-11 10:53:30] [Rank 0] step:5601/10000 train_time:237831ms step_avg:42.46ms +[2025-09-11 10:53:30] [Rank 0] step:5601/10000 train_time:237831ms step_avg:42.46ms +[2025-09-11 10:53:31] [Rank 0] step:5621/10000 train_time:238554ms step_avg:42.44ms +[2025-09-11 10:53:31] [Rank 0] step:5621/10000 train_time:238554ms step_avg:42.44ms +[2025-09-11 10:53:31] [Rank 0] step:5641/10000 train_time:239242ms step_avg:42.41ms +[2025-09-11 10:53:31] [Rank 0] step:5641/10000 train_time:239242ms step_avg:42.41ms +[2025-09-11 10:53:32] [Rank 0] step:5661/10000 train_time:239930ms step_avg:42.38ms +[2025-09-11 10:53:32] [Rank 0] step:5661/10000 train_time:239930ms step_avg:42.38ms +[2025-09-11 10:53:33] [Rank 0] step:5681/10000 train_time:240620ms step_avg:42.36ms +[2025-09-11 10:53:33] [Rank 0] step:5681/10000 train_time:240620ms step_avg:42.36ms +[2025-09-11 10:53:33] [Rank 0] step:5701/10000 train_time:241310ms step_avg:42.33ms +[2025-09-11 10:53:33] [Rank 0] step:5701/10000 train_time:241310ms step_avg:42.33ms +[2025-09-11 10:53:34] [Rank 0] step:5721/10000 train_time:241997ms step_avg:42.30ms +[2025-09-11 10:53:34] [Rank 0] step:5721/10000 train_time:241997ms step_avg:42.30ms +[2025-09-11 10:53:35] [Rank 0] step:5741/10000 train_time:242687ms step_avg:42.27ms +[2025-09-11 10:53:35] [Rank 0] step:5741/10000 train_time:242687ms step_avg:42.27ms +[2025-09-11 10:53:36] [Rank 0] step:5761/10000 train_time:243376ms step_avg:42.25ms +[2025-09-11 10:53:36] [Rank 0] step:5761/10000 train_time:243376ms step_avg:42.25ms +[2025-09-11 10:53:36] [Rank 0] step:5781/10000 train_time:244074ms step_avg:42.22ms +[2025-09-11 10:53:36] [Rank 0] step:5781/10000 train_time:244074ms step_avg:42.22ms +[2025-09-11 10:53:37] [Rank 0] step:5801/10000 train_time:245017ms step_avg:42.24ms +[2025-09-11 10:53:37] [Rank 0] step:5801/10000 train_time:245017ms step_avg:42.24ms +[2025-09-11 10:53:38] [Rank 0] step:5821/10000 train_time:245916ms step_avg:42.25ms +[2025-09-11 10:53:38] [Rank 0] step:5821/10000 train_time:245916ms step_avg:42.25ms +[2025-09-11 10:53:39] [Rank 0] step:5841/10000 train_time:246605ms step_avg:42.22ms +[2025-09-11 10:53:39] [Rank 0] step:5841/10000 train_time:246605ms step_avg:42.22ms +[2025-09-11 10:53:40] [Rank 0] step:5861/10000 train_time:247586ms step_avg:42.24ms +[2025-09-11 10:53:40] [Rank 0] step:5861/10000 train_time:247586ms step_avg:42.24ms +[2025-09-11 10:53:40] [Rank 0] step:5881/10000 train_time:248273ms step_avg:42.22ms +[2025-09-11 10:53:40] [Rank 0] step:5881/10000 train_time:248273ms step_avg:42.22ms +[2025-09-11 10:53:41] [Rank 0] step:5901/10000 train_time:248961ms step_avg:42.19ms +[2025-09-11 10:53:41] [Rank 0] step:5901/10000 train_time:248961ms step_avg:42.19ms +[2025-09-11 10:53:42] [Rank 0] step:5921/10000 train_time:249651ms step_avg:42.16ms +[2025-09-11 10:53:42] [Rank 0] step:5921/10000 train_time:249651ms step_avg:42.16ms +[2025-09-11 10:53:42] [Rank 0] step:5941/10000 train_time:250341ms step_avg:42.14ms +[2025-09-11 10:53:42] [Rank 0] step:5941/10000 train_time:250341ms step_avg:42.14ms +[2025-09-11 10:53:43] [Rank 0] step:5961/10000 train_time:251030ms step_avg:42.11ms +[2025-09-11 10:53:43] [Rank 0] step:5961/10000 train_time:251030ms step_avg:42.11ms +[2025-09-11 10:53:44] [Rank 0] step:5981/10000 train_time:251719ms step_avg:42.09ms +[2025-09-11 10:53:44] [Rank 0] step:5981/10000 train_time:251719ms step_avg:42.09ms +[2025-09-11 10:53:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:53:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:55] [Rank 0] PRINT: step:6000/10000 val_loss:4.6404 total_sharp:1.4160e-03 L1_sharp:5.7251e-03 L2_sharp:6.2806e-04 L3_sharp:3.2965e-04 L4_sharp:3.7414e-04 L5_sharp:3.1407e-04 L6_sharp:3.6615e-04 L7_sharp:3.3044e-04 L8_sharp:8.5886e-04 L9_sharp:9.0902e-04 L10_sharp:8.6916e-04 L11_sharp:1.2573e-03 L12_sharp:3.5441e-03 total_fnorm:1.7125e+01 total_l1_linf:3.8912e+04 total_spectral:8.6250e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0156e-01 L3_l1linf:5.8594e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.9766e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.5469e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.6250e-01 L12_l1linf:5.6250e-01 L1_spectral:3.2256e-02 L2_spectral:3.1805e-02 L3_spectral:3.1648e-02 L4_spectral:3.1726e-02 L5_spectral:3.1635e-02 L6_spectral:3.1850e-02 L7_spectral:3.2076e-02 L8_spectral:3.1971e-02 L9_spectral:3.2257e-02 L10_spectral:3.2457e-02 L11_spectral:3.2019e-02 L12_spectral:3.1852e-02 train_time:252390ms step_avg:42.07ms +[2025-09-11 10:53:55] [Rank 0] PRINT: step:6000/10000 val_loss:4.6404 total_sharp:1.4160e-03 L1_sharp:5.7251e-03 L2_sharp:6.2806e-04 L3_sharp:3.2965e-04 L4_sharp:3.7414e-04 L5_sharp:3.1407e-04 L6_sharp:3.6615e-04 L7_sharp:3.3044e-04 L8_sharp:8.5886e-04 L9_sharp:9.0902e-04 L10_sharp:8.6916e-04 L11_sharp:1.2573e-03 L12_sharp:3.5441e-03 total_fnorm:1.7125e+01 total_l1_linf:3.8912e+04 total_spectral:8.6250e+00 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0156e-01 L3_l1linf:5.8594e-01 L4_l1linf:5.9375e-01 L5_l1linf:5.9766e-01 L6_l1linf:6.0156e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.8203e-01 L9_l1linf:5.5469e-01 L10_l1linf:5.5078e-01 L11_l1linf:5.6250e-01 L12_l1linf:5.6250e-01 L1_spectral:3.2256e-02 L2_spectral:3.1805e-02 L3_spectral:3.1648e-02 L4_spectral:3.1726e-02 L5_spectral:3.1635e-02 L6_spectral:3.1850e-02 L7_spectral:3.2076e-02 L8_spectral:3.1971e-02 L9_spectral:3.2257e-02 L10_spectral:3.2457e-02 L11_spectral:3.2019e-02 L12_spectral:3.1852e-02 train_time:252390ms step_avg:42.07ms +[2025-09-11 10:53:56] [Rank 0] step:6001/10000 train_time:253614ms step_avg:42.26ms +[2025-09-11 10:53:56] [Rank 0] step:6001/10000 train_time:253614ms step_avg:42.26ms +[2025-09-11 10:53:56] [Rank 0] step:6021/10000 train_time:254335ms step_avg:42.24ms +[2025-09-11 10:53:56] [Rank 0] step:6021/10000 train_time:254335ms step_avg:42.24ms +[2025-09-11 10:53:57] [Rank 0] step:6041/10000 train_time:255028ms step_avg:42.22ms +[2025-09-11 10:53:57] [Rank 0] step:6041/10000 train_time:255028ms step_avg:42.22ms +[2025-09-11 10:53:58] [Rank 0] step:6061/10000 train_time:255719ms step_avg:42.19ms +[2025-09-11 10:53:58] [Rank 0] step:6061/10000 train_time:255719ms step_avg:42.19ms +[2025-09-11 10:53:59] [Rank 0] step:6081/10000 train_time:256411ms step_avg:42.17ms +[2025-09-11 10:53:59] [Rank 0] step:6081/10000 train_time:256411ms step_avg:42.17ms +[2025-09-11 10:53:59] [Rank 0] step:6101/10000 train_time:257102ms step_avg:42.14ms +[2025-09-11 10:53:59] [Rank 0] step:6101/10000 train_time:257102ms step_avg:42.14ms +[2025-09-11 10:54:00] [Rank 0] step:6121/10000 train_time:257793ms step_avg:42.12ms +[2025-09-11 10:54:00] [Rank 0] step:6121/10000 train_time:257793ms step_avg:42.12ms +[2025-09-11 10:54:01] [Rank 0] step:6141/10000 train_time:258485ms step_avg:42.09ms +[2025-09-11 10:54:01] [Rank 0] step:6141/10000 train_time:258485ms step_avg:42.09ms +[2025-09-11 10:54:01] [Rank 0] step:6161/10000 train_time:259181ms step_avg:42.07ms +[2025-09-11 10:54:01] [Rank 0] step:6161/10000 train_time:259181ms step_avg:42.07ms +[2025-09-11 10:54:02] [Rank 0] step:6181/10000 train_time:259870ms step_avg:42.04ms +[2025-09-11 10:54:02] [Rank 0] step:6181/10000 train_time:259870ms step_avg:42.04ms +[2025-09-11 10:54:03] [Rank 0] step:6201/10000 train_time:260562ms step_avg:42.02ms +[2025-09-11 10:54:03] [Rank 0] step:6201/10000 train_time:260562ms step_avg:42.02ms +[2025-09-11 10:54:03] [Rank 0] step:6221/10000 train_time:261253ms step_avg:42.00ms +[2025-09-11 10:54:03] [Rank 0] step:6221/10000 train_time:261253ms step_avg:42.00ms +[2025-09-11 10:54:04] [Rank 0] step:6241/10000 train_time:261945ms step_avg:41.97ms +[2025-09-11 10:54:04] [Rank 0] step:6241/10000 train_time:261945ms step_avg:41.97ms +[2025-09-11 10:54:05] [Rank 0] step:6261/10000 train_time:262635ms step_avg:41.95ms +[2025-09-11 10:54:05] [Rank 0] step:6261/10000 train_time:262635ms step_avg:41.95ms +[2025-09-11 10:54:05] [Rank 0] step:6281/10000 train_time:263326ms step_avg:41.92ms +[2025-09-11 10:54:05] [Rank 0] step:6281/10000 train_time:263326ms step_avg:41.92ms +[2025-09-11 10:54:06] [Rank 0] step:6301/10000 train_time:264016ms step_avg:41.90ms +[2025-09-11 10:54:06] [Rank 0] step:6301/10000 train_time:264016ms step_avg:41.90ms +[2025-09-11 10:54:07] [Rank 0] step:6321/10000 train_time:264709ms step_avg:41.88ms +[2025-09-11 10:54:07] [Rank 0] step:6321/10000 train_time:264709ms step_avg:41.88ms +[2025-09-11 10:54:08] [Rank 0] step:6341/10000 train_time:265403ms step_avg:41.86ms +[2025-09-11 10:54:08] [Rank 0] step:6341/10000 train_time:265403ms step_avg:41.86ms +[2025-09-11 10:54:08] [Rank 0] step:6361/10000 train_time:266096ms step_avg:41.83ms +[2025-09-11 10:54:08] [Rank 0] step:6361/10000 train_time:266096ms step_avg:41.83ms +[2025-09-11 10:54:09] [Rank 0] step:6381/10000 train_time:266786ms step_avg:41.81ms +[2025-09-11 10:54:09] [Rank 0] step:6381/10000 train_time:266786ms step_avg:41.81ms +[2025-09-11 10:54:10] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:54:10] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:54:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:20] [Rank 0] PRINT: step:6400/10000 val_loss:4.6018 total_sharp:1.4799e-03 L1_sharp:4.2725e-04 L2_sharp:2.5270e-04 L3_sharp:2.1484e-04 L4_sharp:2.7169e-04 L5_sharp:5.0941e-04 L6_sharp:4.0588e-04 L7_sharp:4.4599e-04 L8_sharp:9.5678e-04 L9_sharp:9.4482e-04 L10_sharp:9.7144e-04 L11_sharp:1.3429e-03 L12_sharp:4.2065e-03 total_fnorm:1.5062e+01 total_l1_linf:3.2640e+04 total_spectral:7.3750e+00 L1_fnorm:2.2500e+00 L2_fnorm:2.2188e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2344e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2500e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.6250e-01 L2_l1linf:5.3125e-01 L3_l1linf:5.0781e-01 L4_l1linf:5.3516e-01 L5_l1linf:5.1953e-01 L6_l1linf:5.2734e-01 L7_l1linf:5.3516e-01 L8_l1linf:5.0000e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.7461e-01 L11_l1linf:4.8828e-01 L12_l1linf:5.0391e-01 L1_spectral:2.9380e-02 L2_spectral:2.9075e-02 L3_spectral:2.9200e-02 L4_spectral:2.9183e-02 L5_spectral:2.9179e-02 L6_spectral:2.9271e-02 L7_spectral:2.9273e-02 L8_spectral:2.8968e-02 L9_spectral:2.9256e-02 L10_spectral:2.9437e-02 L11_spectral:2.9389e-02 L12_spectral:2.9175e-02 train_time:267456ms step_avg:41.79ms +[2025-09-11 10:54:20] [Rank 0] PRINT: step:6400/10000 val_loss:4.6018 total_sharp:1.4799e-03 L1_sharp:4.2725e-04 L2_sharp:2.5270e-04 L3_sharp:2.1484e-04 L4_sharp:2.7169e-04 L5_sharp:5.0941e-04 L6_sharp:4.0588e-04 L7_sharp:4.4599e-04 L8_sharp:9.5678e-04 L9_sharp:9.4482e-04 L10_sharp:9.7144e-04 L11_sharp:1.3429e-03 L12_sharp:4.2065e-03 total_fnorm:1.5062e+01 total_l1_linf:3.2640e+04 total_spectral:7.3750e+00 L1_fnorm:2.2500e+00 L2_fnorm:2.2188e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2344e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2500e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.6250e-01 L2_l1linf:5.3125e-01 L3_l1linf:5.0781e-01 L4_l1linf:5.3516e-01 L5_l1linf:5.1953e-01 L6_l1linf:5.2734e-01 L7_l1linf:5.3516e-01 L8_l1linf:5.0000e-01 L9_l1linf:4.8242e-01 L10_l1linf:4.7461e-01 L11_l1linf:4.8828e-01 L12_l1linf:5.0391e-01 L1_spectral:2.9380e-02 L2_spectral:2.9075e-02 L3_spectral:2.9200e-02 L4_spectral:2.9183e-02 L5_spectral:2.9179e-02 L6_spectral:2.9271e-02 L7_spectral:2.9273e-02 L8_spectral:2.8968e-02 L9_spectral:2.9256e-02 L10_spectral:2.9437e-02 L11_spectral:2.9389e-02 L12_spectral:2.9175e-02 train_time:267456ms step_avg:41.79ms +[2025-09-11 10:54:21] [Rank 0] step:6401/10000 train_time:268688ms step_avg:41.98ms +[2025-09-11 10:54:21] [Rank 0] step:6401/10000 train_time:268688ms step_avg:41.98ms +[2025-09-11 10:54:22] [Rank 0] step:6421/10000 train_time:269412ms step_avg:41.96ms +[2025-09-11 10:54:22] [Rank 0] step:6421/10000 train_time:269412ms step_avg:41.96ms +[2025-09-11 10:54:22] [Rank 0] step:6441/10000 train_time:270107ms step_avg:41.94ms +[2025-09-11 10:54:22] [Rank 0] step:6441/10000 train_time:270107ms step_avg:41.94ms +[2025-09-11 10:54:23] [Rank 0] step:6461/10000 train_time:270799ms step_avg:41.91ms +[2025-09-11 10:54:23] [Rank 0] step:6461/10000 train_time:270799ms step_avg:41.91ms +[2025-09-11 10:54:24] [Rank 0] step:6481/10000 train_time:271492ms step_avg:41.89ms +[2025-09-11 10:54:24] [Rank 0] step:6481/10000 train_time:271492ms step_avg:41.89ms +[2025-09-11 10:54:24] [Rank 0] step:6501/10000 train_time:272186ms step_avg:41.87ms +[2025-09-11 10:54:24] [Rank 0] step:6501/10000 train_time:272186ms step_avg:41.87ms +[2025-09-11 10:54:25] [Rank 0] step:6521/10000 train_time:272879ms step_avg:41.85ms +[2025-09-11 10:54:25] [Rank 0] step:6521/10000 train_time:272879ms step_avg:41.85ms +[2025-09-11 10:54:26] [Rank 0] step:6541/10000 train_time:273569ms step_avg:41.82ms +[2025-09-11 10:54:26] [Rank 0] step:6541/10000 train_time:273569ms step_avg:41.82ms +[2025-09-11 10:54:26] [Rank 0] step:6561/10000 train_time:274261ms step_avg:41.80ms +[2025-09-11 10:54:26] [Rank 0] step:6561/10000 train_time:274261ms step_avg:41.80ms +[2025-09-11 10:54:27] [Rank 0] step:6581/10000 train_time:274954ms step_avg:41.78ms +[2025-09-11 10:54:27] [Rank 0] step:6581/10000 train_time:274954ms step_avg:41.78ms +[2025-09-11 10:54:28] [Rank 0] step:6601/10000 train_time:275646ms step_avg:41.76ms +[2025-09-11 10:54:28] [Rank 0] step:6601/10000 train_time:275646ms step_avg:41.76ms +[2025-09-11 10:54:29] [Rank 0] step:6621/10000 train_time:276337ms step_avg:41.74ms +[2025-09-11 10:54:29] [Rank 0] step:6621/10000 train_time:276337ms step_avg:41.74ms +[2025-09-11 10:54:29] [Rank 0] step:6641/10000 train_time:277029ms step_avg:41.71ms +[2025-09-11 10:54:29] [Rank 0] step:6641/10000 train_time:277029ms step_avg:41.71ms +[2025-09-11 10:54:30] [Rank 0] step:6661/10000 train_time:277722ms step_avg:41.69ms +[2025-09-11 10:54:30] [Rank 0] step:6661/10000 train_time:277722ms step_avg:41.69ms +[2025-09-11 10:54:31] [Rank 0] step:6681/10000 train_time:278421ms step_avg:41.67ms +[2025-09-11 10:54:31] [Rank 0] step:6681/10000 train_time:278421ms step_avg:41.67ms +[2025-09-11 10:54:31] [Rank 0] step:6701/10000 train_time:279118ms step_avg:41.65ms +[2025-09-11 10:54:31] [Rank 0] step:6701/10000 train_time:279118ms step_avg:41.65ms +[2025-09-11 10:54:32] [Rank 0] step:6721/10000 train_time:279818ms step_avg:41.63ms +[2025-09-11 10:54:32] [Rank 0] step:6721/10000 train_time:279818ms step_avg:41.63ms +[2025-09-11 10:54:33] [Rank 0] step:6741/10000 train_time:280518ms step_avg:41.61ms +[2025-09-11 10:54:33] [Rank 0] step:6741/10000 train_time:280518ms step_avg:41.61ms +[2025-09-11 10:54:33] [Rank 0] step:6761/10000 train_time:281216ms step_avg:41.59ms +[2025-09-11 10:54:33] [Rank 0] step:6761/10000 train_time:281216ms step_avg:41.59ms +[2025-09-11 10:54:34] [Rank 0] step:6781/10000 train_time:281916ms step_avg:41.57ms +[2025-09-11 10:54:34] [Rank 0] step:6781/10000 train_time:281916ms step_avg:41.57ms +[2025-09-11 10:54:35] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:54:35] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:54:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:54:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:54:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:54:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:45] [Rank 0] PRINT: step:6800/10000 val_loss:4.5645 total_sharp:1.4507e-03 L1_sharp:2.4841e-03 L2_sharp:4.4880e-04 L3_sharp:2.2343e-04 L4_sharp:1.8129e-04 L5_sharp:2.5947e-04 L6_sharp:4.0021e-04 L7_sharp:4.0253e-04 L8_sharp:8.7390e-04 L9_sharp:1.0333e-03 L10_sharp:1.0381e-03 L11_sharp:1.4174e-03 L12_sharp:4.9315e-03 total_fnorm:1.3250e+01 total_l1_linf:2.8672e+04 total_spectral:6.8125e+00 L1_fnorm:2.0000e+00 L2_fnorm:1.9531e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9766e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9453e+00 L9_fnorm:1.9922e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9922e+00 L1_l1linf:4.8242e-01 L2_l1linf:4.6680e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.5508e-01 L5_l1linf:4.4922e-01 L6_l1linf:4.5508e-01 L7_l1linf:4.6289e-01 L8_l1linf:4.2578e-01 L9_l1linf:4.0625e-01 L10_l1linf:4.0039e-01 L11_l1linf:4.1602e-01 L12_l1linf:4.2969e-01 L1_spectral:2.6569e-02 L2_spectral:2.6128e-02 L3_spectral:2.6134e-02 L4_spectral:2.6385e-02 L5_spectral:2.6079e-02 L6_spectral:2.6203e-02 L7_spectral:2.6218e-02 L8_spectral:2.5797e-02 L9_spectral:2.6339e-02 L10_spectral:2.6474e-02 L11_spectral:2.6472e-02 L12_spectral:2.6492e-02 train_time:282595ms step_avg:41.56ms +[2025-09-11 10:54:45] [Rank 0] PRINT: step:6800/10000 val_loss:4.5645 total_sharp:1.4507e-03 L1_sharp:2.4841e-03 L2_sharp:4.4880e-04 L3_sharp:2.2343e-04 L4_sharp:1.8129e-04 L5_sharp:2.5947e-04 L6_sharp:4.0021e-04 L7_sharp:4.0253e-04 L8_sharp:8.7390e-04 L9_sharp:1.0333e-03 L10_sharp:1.0381e-03 L11_sharp:1.4174e-03 L12_sharp:4.9315e-03 total_fnorm:1.3250e+01 total_l1_linf:2.8672e+04 total_spectral:6.8125e+00 L1_fnorm:2.0000e+00 L2_fnorm:1.9531e+00 L3_fnorm:1.9844e+00 L4_fnorm:2.0000e+00 L5_fnorm:1.9766e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9453e+00 L9_fnorm:1.9922e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9922e+00 L1_l1linf:4.8242e-01 L2_l1linf:4.6680e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.5508e-01 L5_l1linf:4.4922e-01 L6_l1linf:4.5508e-01 L7_l1linf:4.6289e-01 L8_l1linf:4.2578e-01 L9_l1linf:4.0625e-01 L10_l1linf:4.0039e-01 L11_l1linf:4.1602e-01 L12_l1linf:4.2969e-01 L1_spectral:2.6569e-02 L2_spectral:2.6128e-02 L3_spectral:2.6134e-02 L4_spectral:2.6385e-02 L5_spectral:2.6079e-02 L6_spectral:2.6203e-02 L7_spectral:2.6218e-02 L8_spectral:2.5797e-02 L9_spectral:2.6339e-02 L10_spectral:2.6474e-02 L11_spectral:2.6472e-02 L12_spectral:2.6492e-02 train_time:282595ms step_avg:41.56ms +[2025-09-11 10:54:46] [Rank 0] step:6801/10000 train_time:283838ms step_avg:41.73ms +[2025-09-11 10:54:46] [Rank 0] step:6801/10000 train_time:283838ms step_avg:41.73ms +[2025-09-11 10:54:47] [Rank 0] step:6821/10000 train_time:284564ms step_avg:41.72ms +[2025-09-11 10:54:47] [Rank 0] step:6821/10000 train_time:284564ms step_avg:41.72ms +[2025-09-11 10:54:48] [Rank 0] step:6841/10000 train_time:285268ms step_avg:41.70ms +[2025-09-11 10:54:48] [Rank 0] step:6841/10000 train_time:285268ms step_avg:41.70ms +[2025-09-11 10:54:48] [Rank 0] step:6861/10000 train_time:285969ms step_avg:41.68ms +[2025-09-11 10:54:48] [Rank 0] step:6861/10000 train_time:285969ms step_avg:41.68ms +[2025-09-11 10:54:49] [Rank 0] step:6881/10000 train_time:286669ms step_avg:41.66ms +[2025-09-11 10:54:49] [Rank 0] step:6881/10000 train_time:286669ms step_avg:41.66ms +[2025-09-11 10:54:50] [Rank 0] step:6901/10000 train_time:287368ms step_avg:41.64ms +[2025-09-11 10:54:50] [Rank 0] step:6901/10000 train_time:287368ms step_avg:41.64ms +[2025-09-11 10:54:50] [Rank 0] step:6921/10000 train_time:288066ms step_avg:41.62ms +[2025-09-11 10:54:50] [Rank 0] step:6921/10000 train_time:288066ms step_avg:41.62ms +[2025-09-11 10:54:51] [Rank 0] step:6941/10000 train_time:288766ms step_avg:41.60ms +[2025-09-11 10:54:51] [Rank 0] step:6941/10000 train_time:288766ms step_avg:41.60ms +[2025-09-11 10:54:52] [Rank 0] step:6961/10000 train_time:289465ms step_avg:41.58ms +[2025-09-11 10:54:52] [Rank 0] step:6961/10000 train_time:289465ms step_avg:41.58ms +[2025-09-11 10:54:52] [Rank 0] step:6981/10000 train_time:290167ms step_avg:41.57ms +[2025-09-11 10:54:52] [Rank 0] step:6981/10000 train_time:290167ms step_avg:41.57ms +[2025-09-11 10:54:53] [Rank 0] step:7001/10000 train_time:290867ms step_avg:41.55ms +[2025-09-11 10:54:53] [Rank 0] step:7001/10000 train_time:290867ms step_avg:41.55ms +[2025-09-11 10:54:54] [Rank 0] step:7021/10000 train_time:291566ms step_avg:41.53ms +[2025-09-11 10:54:54] [Rank 0] step:7021/10000 train_time:291566ms step_avg:41.53ms +[2025-09-11 10:54:55] [Rank 0] step:7041/10000 train_time:292265ms step_avg:41.51ms +[2025-09-11 10:54:55] [Rank 0] step:7041/10000 train_time:292265ms step_avg:41.51ms +[2025-09-11 10:54:55] [Rank 0] step:7061/10000 train_time:292966ms step_avg:41.49ms +[2025-09-11 10:54:55] [Rank 0] step:7061/10000 train_time:292966ms step_avg:41.49ms +[2025-09-11 10:54:56] [Rank 0] step:7081/10000 train_time:293665ms step_avg:41.47ms +[2025-09-11 10:54:56] [Rank 0] step:7081/10000 train_time:293665ms step_avg:41.47ms +[2025-09-11 10:54:57] [Rank 0] step:7101/10000 train_time:294365ms step_avg:41.45ms +[2025-09-11 10:54:57] [Rank 0] step:7101/10000 train_time:294365ms step_avg:41.45ms +[2025-09-11 10:54:57] [Rank 0] step:7121/10000 train_time:295065ms step_avg:41.44ms +[2025-09-11 10:54:57] [Rank 0] step:7121/10000 train_time:295065ms step_avg:41.44ms +[2025-09-11 10:54:58] [Rank 0] step:7141/10000 train_time:295776ms step_avg:41.42ms +[2025-09-11 10:54:58] [Rank 0] step:7141/10000 train_time:295776ms step_avg:41.42ms +[2025-09-11 10:54:59] [Rank 0] step:7161/10000 train_time:296477ms step_avg:41.40ms +[2025-09-11 10:54:59] [Rank 0] step:7161/10000 train_time:296477ms step_avg:41.40ms +[2025-09-11 10:54:59] [Rank 0] step:7181/10000 train_time:297175ms step_avg:41.38ms +[2025-09-11 10:54:59] [Rank 0] step:7181/10000 train_time:297175ms step_avg:41.38ms +[2025-09-11 10:55:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:55:00] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:55:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:10] [Rank 0] PRINT: step:7200/10000 val_loss:4.5301 total_sharp:1.2245e-03 L1_sharp:2.4512e-03 L2_sharp:2.9619e-04 L3_sharp:2.1988e-04 L4_sharp:1.9778e-04 L5_sharp:2.6795e-04 L6_sharp:3.7075e-04 L7_sharp:4.0169e-04 L8_sharp:7.6898e-04 L9_sharp:8.2364e-04 L10_sharp:8.4949e-04 L11_sharp:1.2652e-03 L12_sharp:4.9502e-03 total_fnorm:1.1562e+01 total_l1_linf:2.2912e+04 total_spectral:5.7500e+00 L1_fnorm:1.7422e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7266e+00 L5_fnorm:1.7109e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7188e+00 L8_fnorm:1.6719e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7109e+00 L11_fnorm:1.7188e+00 L12_fnorm:1.7109e+00 L1_l1linf:4.0039e-01 L2_l1linf:3.8086e-01 L3_l1linf:3.6133e-01 L4_l1linf:3.8086e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.7891e-01 L7_l1linf:3.8086e-01 L8_l1linf:3.4961e-01 L9_l1linf:3.4180e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.3984e-01 L12_l1linf:3.5938e-01 L1_spectral:2.3309e-02 L2_spectral:2.2849e-02 L3_spectral:2.3127e-02 L4_spectral:2.3229e-02 L5_spectral:2.3232e-02 L6_spectral:2.3143e-02 L7_spectral:2.2982e-02 L8_spectral:2.2886e-02 L9_spectral:2.3149e-02 L10_spectral:2.3183e-02 L11_spectral:2.3371e-02 L12_spectral:2.3286e-02 train_time:297855ms step_avg:41.37ms +[2025-09-11 10:55:10] [Rank 0] PRINT: step:7200/10000 val_loss:4.5301 total_sharp:1.2245e-03 L1_sharp:2.4512e-03 L2_sharp:2.9619e-04 L3_sharp:2.1988e-04 L4_sharp:1.9778e-04 L5_sharp:2.6795e-04 L6_sharp:3.7075e-04 L7_sharp:4.0169e-04 L8_sharp:7.6898e-04 L9_sharp:8.2364e-04 L10_sharp:8.4949e-04 L11_sharp:1.2652e-03 L12_sharp:4.9502e-03 total_fnorm:1.1562e+01 total_l1_linf:2.2912e+04 total_spectral:5.7500e+00 L1_fnorm:1.7422e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7266e+00 L5_fnorm:1.7109e+00 L6_fnorm:1.7266e+00 L7_fnorm:1.7188e+00 L8_fnorm:1.6719e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7109e+00 L11_fnorm:1.7188e+00 L12_fnorm:1.7109e+00 L1_l1linf:4.0039e-01 L2_l1linf:3.8086e-01 L3_l1linf:3.6133e-01 L4_l1linf:3.8086e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.7891e-01 L7_l1linf:3.8086e-01 L8_l1linf:3.4961e-01 L9_l1linf:3.4180e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.3984e-01 L12_l1linf:3.5938e-01 L1_spectral:2.3309e-02 L2_spectral:2.2849e-02 L3_spectral:2.3127e-02 L4_spectral:2.3229e-02 L5_spectral:2.3232e-02 L6_spectral:2.3143e-02 L7_spectral:2.2982e-02 L8_spectral:2.2886e-02 L9_spectral:2.3149e-02 L10_spectral:2.3183e-02 L11_spectral:2.3371e-02 L12_spectral:2.3286e-02 train_time:297855ms step_avg:41.37ms +[2025-09-11 10:55:11] [Rank 0] step:7201/10000 train_time:299109ms step_avg:41.54ms +[2025-09-11 10:55:11] [Rank 0] step:7201/10000 train_time:299109ms step_avg:41.54ms +[2025-09-11 10:55:12] [Rank 0] step:7221/10000 train_time:299842ms step_avg:41.52ms +[2025-09-11 10:55:12] [Rank 0] step:7221/10000 train_time:299842ms step_avg:41.52ms +[2025-09-11 10:55:13] [Rank 0] step:7241/10000 train_time:300543ms step_avg:41.51ms +[2025-09-11 10:55:13] [Rank 0] step:7241/10000 train_time:300543ms step_avg:41.51ms +[2025-09-11 10:55:14] [Rank 0] step:7261/10000 train_time:301244ms step_avg:41.49ms +[2025-09-11 10:55:14] [Rank 0] step:7261/10000 train_time:301244ms step_avg:41.49ms +[2025-09-11 10:55:14] [Rank 0] step:7281/10000 train_time:301949ms step_avg:41.47ms +[2025-09-11 10:55:14] [Rank 0] step:7281/10000 train_time:301949ms step_avg:41.47ms +[2025-09-11 10:55:15] [Rank 0] step:7301/10000 train_time:302647ms step_avg:41.45ms +[2025-09-11 10:55:15] [Rank 0] step:7301/10000 train_time:302647ms step_avg:41.45ms +[2025-09-11 10:55:16] [Rank 0] step:7321/10000 train_time:303346ms step_avg:41.43ms +[2025-09-11 10:55:16] [Rank 0] step:7321/10000 train_time:303346ms step_avg:41.43ms +[2025-09-11 10:55:16] [Rank 0] step:7341/10000 train_time:304045ms step_avg:41.42ms +[2025-09-11 10:55:16] [Rank 0] step:7341/10000 train_time:304045ms step_avg:41.42ms +[2025-09-11 10:55:17] [Rank 0] step:7361/10000 train_time:304745ms step_avg:41.40ms +[2025-09-11 10:55:17] [Rank 0] step:7361/10000 train_time:304745ms step_avg:41.40ms +[2025-09-11 10:55:18] [Rank 0] step:7381/10000 train_time:305445ms step_avg:41.38ms +[2025-09-11 10:55:18] [Rank 0] step:7381/10000 train_time:305445ms step_avg:41.38ms +[2025-09-11 10:55:18] [Rank 0] step:7401/10000 train_time:306144ms step_avg:41.37ms +[2025-09-11 10:55:18] [Rank 0] step:7401/10000 train_time:306144ms step_avg:41.37ms +[2025-09-11 10:55:19] [Rank 0] step:7421/10000 train_time:306843ms step_avg:41.35ms +[2025-09-11 10:55:19] [Rank 0] step:7421/10000 train_time:306843ms step_avg:41.35ms +[2025-09-11 10:55:20] [Rank 0] step:7441/10000 train_time:307544ms step_avg:41.33ms +[2025-09-11 10:55:20] [Rank 0] step:7441/10000 train_time:307544ms step_avg:41.33ms +[2025-09-11 10:55:21] [Rank 0] step:7461/10000 train_time:308244ms step_avg:41.31ms +[2025-09-11 10:55:21] [Rank 0] step:7461/10000 train_time:308244ms step_avg:41.31ms +[2025-09-11 10:55:21] [Rank 0] step:7481/10000 train_time:308947ms step_avg:41.30ms +[2025-09-11 10:55:21] [Rank 0] step:7481/10000 train_time:308947ms step_avg:41.30ms +[2025-09-11 10:55:22] [Rank 0] step:7501/10000 train_time:309647ms step_avg:41.28ms +[2025-09-11 10:55:22] [Rank 0] step:7501/10000 train_time:309647ms step_avg:41.28ms +[2025-09-11 10:55:23] [Rank 0] step:7521/10000 train_time:310348ms step_avg:41.26ms +[2025-09-11 10:55:23] [Rank 0] step:7521/10000 train_time:310348ms step_avg:41.26ms +[2025-09-11 10:55:23] [Rank 0] step:7541/10000 train_time:311046ms step_avg:41.25ms +[2025-09-11 10:55:23] [Rank 0] step:7541/10000 train_time:311046ms step_avg:41.25ms +[2025-09-11 10:55:24] [Rank 0] step:7561/10000 train_time:311748ms step_avg:41.23ms +[2025-09-11 10:55:24] [Rank 0] step:7561/10000 train_time:311748ms step_avg:41.23ms +[2025-09-11 10:55:25] [Rank 0] step:7581/10000 train_time:312448ms step_avg:41.21ms +[2025-09-11 10:55:25] [Rank 0] step:7581/10000 train_time:312448ms step_avg:41.21ms +[2025-09-11 10:55:25] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:55:25] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:55:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:55:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:36] [Rank 0] PRINT: step:7600/10000 val_loss:4.4987 total_sharp:1.3205e-03 L1_sharp:1.9208e-03 L2_sharp:2.9940e-04 L3_sharp:2.3808e-04 L4_sharp:6.2369e-05 L5_sharp:3.1929e-04 L6_sharp:3.6488e-04 L7_sharp:3.0237e-04 L8_sharp:7.7500e-04 L9_sharp:8.7006e-04 L10_sharp:8.9317e-04 L11_sharp:1.3472e-03 L12_sharp:3.5529e-03 total_fnorm:9.0625e+00 total_l1_linf:1.7408e+04 total_spectral:4.6562e+00 L1_fnorm:1.4766e+00 L2_fnorm:1.4219e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4375e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3906e+00 L9_fnorm:1.4297e+00 L10_fnorm:1.4219e+00 L11_fnorm:1.4297e+00 L12_fnorm:1.4297e+00 L1_l1linf:3.0859e-01 L2_l1linf:3.0078e-01 L3_l1linf:2.8125e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.9492e-01 L6_l1linf:3.0078e-01 L7_l1linf:2.9492e-01 L8_l1linf:2.7148e-01 L9_l1linf:2.5781e-01 L10_l1linf:2.5977e-01 L11_l1linf:2.6758e-01 L12_l1linf:3.0078e-01 L1_spectral:1.9850e-02 L2_spectral:1.9370e-02 L3_spectral:1.9765e-02 L4_spectral:1.9886e-02 L5_spectral:1.9709e-02 L6_spectral:1.9793e-02 L7_spectral:1.9716e-02 L8_spectral:1.9672e-02 L9_spectral:1.9974e-02 L10_spectral:1.9914e-02 L11_spectral:1.9929e-02 L12_spectral:1.9872e-02 train_time:313129ms step_avg:41.20ms +[2025-09-11 10:55:36] [Rank 0] PRINT: step:7600/10000 val_loss:4.4987 total_sharp:1.3205e-03 L1_sharp:1.9208e-03 L2_sharp:2.9940e-04 L3_sharp:2.3808e-04 L4_sharp:6.2369e-05 L5_sharp:3.1929e-04 L6_sharp:3.6488e-04 L7_sharp:3.0237e-04 L8_sharp:7.7500e-04 L9_sharp:8.7006e-04 L10_sharp:8.9317e-04 L11_sharp:1.3472e-03 L12_sharp:3.5529e-03 total_fnorm:9.0625e+00 total_l1_linf:1.7408e+04 total_spectral:4.6562e+00 L1_fnorm:1.4766e+00 L2_fnorm:1.4219e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4375e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3906e+00 L9_fnorm:1.4297e+00 L10_fnorm:1.4219e+00 L11_fnorm:1.4297e+00 L12_fnorm:1.4297e+00 L1_l1linf:3.0859e-01 L2_l1linf:3.0078e-01 L3_l1linf:2.8125e-01 L4_l1linf:3.0078e-01 L5_l1linf:2.9492e-01 L6_l1linf:3.0078e-01 L7_l1linf:2.9492e-01 L8_l1linf:2.7148e-01 L9_l1linf:2.5781e-01 L10_l1linf:2.5977e-01 L11_l1linf:2.6758e-01 L12_l1linf:3.0078e-01 L1_spectral:1.9850e-02 L2_spectral:1.9370e-02 L3_spectral:1.9765e-02 L4_spectral:1.9886e-02 L5_spectral:1.9709e-02 L6_spectral:1.9793e-02 L7_spectral:1.9716e-02 L8_spectral:1.9672e-02 L9_spectral:1.9974e-02 L10_spectral:1.9914e-02 L11_spectral:1.9929e-02 L12_spectral:1.9872e-02 train_time:313129ms step_avg:41.20ms +[2025-09-11 10:55:37] [Rank 0] step:7601/10000 train_time:314467ms step_avg:41.37ms +[2025-09-11 10:55:37] [Rank 0] step:7601/10000 train_time:314467ms step_avg:41.37ms +[2025-09-11 10:55:38] [Rank 0] step:7621/10000 train_time:315239ms step_avg:41.36ms +[2025-09-11 10:55:38] [Rank 0] step:7621/10000 train_time:315239ms step_avg:41.36ms +[2025-09-11 10:55:38] [Rank 0] step:7641/10000 train_time:315943ms step_avg:41.35ms +[2025-09-11 10:55:38] [Rank 0] step:7641/10000 train_time:315943ms step_avg:41.35ms +[2025-09-11 10:55:39] [Rank 0] step:7661/10000 train_time:316645ms step_avg:41.33ms +[2025-09-11 10:55:39] [Rank 0] step:7661/10000 train_time:316645ms step_avg:41.33ms +[2025-09-11 10:55:40] [Rank 0] step:7681/10000 train_time:317345ms step_avg:41.32ms +[2025-09-11 10:55:40] [Rank 0] step:7681/10000 train_time:317345ms step_avg:41.32ms +[2025-09-11 10:55:40] [Rank 0] step:7701/10000 train_time:318047ms step_avg:41.30ms +[2025-09-11 10:55:40] [Rank 0] step:7701/10000 train_time:318047ms step_avg:41.30ms +[2025-09-11 10:55:41] [Rank 0] step:7721/10000 train_time:318748ms step_avg:41.28ms +[2025-09-11 10:55:41] [Rank 0] step:7721/10000 train_time:318748ms step_avg:41.28ms +[2025-09-11 10:55:42] [Rank 0] step:7741/10000 train_time:319449ms step_avg:41.27ms +[2025-09-11 10:55:42] [Rank 0] step:7741/10000 train_time:319449ms step_avg:41.27ms +[2025-09-11 10:55:43] [Rank 0] step:7761/10000 train_time:320150ms step_avg:41.25ms +[2025-09-11 10:55:43] [Rank 0] step:7761/10000 train_time:320150ms step_avg:41.25ms +[2025-09-11 10:55:43] [Rank 0] step:7781/10000 train_time:320852ms step_avg:41.24ms +[2025-09-11 10:55:43] [Rank 0] step:7781/10000 train_time:320852ms step_avg:41.24ms +[2025-09-11 10:55:44] [Rank 0] step:7801/10000 train_time:322022ms step_avg:41.28ms +[2025-09-11 10:55:44] [Rank 0] step:7801/10000 train_time:322022ms step_avg:41.28ms +[2025-09-11 10:55:45] [Rank 0] step:7821/10000 train_time:322806ms step_avg:41.27ms +[2025-09-11 10:55:45] [Rank 0] step:7821/10000 train_time:322806ms step_avg:41.27ms +[2025-09-11 10:55:46] [Rank 0] step:7841/10000 train_time:323509ms step_avg:41.26ms +[2025-09-11 10:55:46] [Rank 0] step:7841/10000 train_time:323509ms step_avg:41.26ms +[2025-09-11 10:55:47] [Rank 0] step:7861/10000 train_time:324456ms step_avg:41.27ms +[2025-09-11 10:55:47] [Rank 0] step:7861/10000 train_time:324456ms step_avg:41.27ms +[2025-09-11 10:55:48] [Rank 0] step:7881/10000 train_time:325157ms step_avg:41.26ms +[2025-09-11 10:55:48] [Rank 0] step:7881/10000 train_time:325157ms step_avg:41.26ms +[2025-09-11 10:55:48] [Rank 0] step:7901/10000 train_time:325860ms step_avg:41.24ms +[2025-09-11 10:55:48] [Rank 0] step:7901/10000 train_time:325860ms step_avg:41.24ms +[2025-09-11 10:55:49] [Rank 0] step:7921/10000 train_time:326561ms step_avg:41.23ms +[2025-09-11 10:55:49] [Rank 0] step:7921/10000 train_time:326561ms step_avg:41.23ms +[2025-09-11 10:55:50] [Rank 0] step:7941/10000 train_time:327263ms step_avg:41.21ms +[2025-09-11 10:55:50] [Rank 0] step:7941/10000 train_time:327263ms step_avg:41.21ms +[2025-09-11 10:55:50] [Rank 0] step:7961/10000 train_time:327962ms step_avg:41.20ms +[2025-09-11 10:55:50] [Rank 0] step:7961/10000 train_time:327962ms step_avg:41.20ms +[2025-09-11 10:55:51] [Rank 0] step:7981/10000 train_time:328666ms step_avg:41.18ms +[2025-09-11 10:55:51] [Rank 0] step:7981/10000 train_time:328666ms step_avg:41.18ms +[2025-09-11 10:55:52] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:55:52] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:55:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:02] [Rank 0] PRINT: step:8000/10000 val_loss:4.4761 total_sharp:1.2661e-03 L1_sharp:2.8181e-03 L2_sharp:3.5737e-04 L3_sharp:3.6015e-04 L4_sharp:3.5019e-04 L5_sharp:3.1622e-04 L6_sharp:5.1452e-04 L7_sharp:3.4176e-04 L8_sharp:6.3873e-04 L9_sharp:7.6873e-04 L10_sharp:8.2307e-04 L11_sharp:1.2552e-03 L12_sharp:3.6139e-03 total_fnorm:7.7500e+00 total_l1_linf:1.3312e+04 total_spectral:3.8125e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.1719e+00 L3_fnorm:1.1875e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1719e+00 L6_fnorm:1.1797e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1406e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1719e+00 L12_fnorm:1.1719e+00 L1_l1linf:2.4316e-01 L2_l1linf:2.3340e-01 L3_l1linf:2.2363e-01 L4_l1linf:2.2852e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3242e-01 L7_l1linf:2.3145e-01 L8_l1linf:2.1289e-01 L9_l1linf:2.0312e-01 L10_l1linf:1.9629e-01 L11_l1linf:2.1191e-01 L12_l1linf:2.4023e-01 L1_spectral:1.6565e-02 L2_spectral:1.6103e-02 L3_spectral:1.6513e-02 L4_spectral:1.6506e-02 L5_spectral:1.6544e-02 L6_spectral:1.6437e-02 L7_spectral:1.6434e-02 L8_spectral:1.6774e-02 L9_spectral:1.6837e-02 L10_spectral:1.6689e-02 L11_spectral:1.6866e-02 L12_spectral:1.6748e-02 train_time:329351ms step_avg:41.17ms +[2025-09-11 10:56:02] [Rank 0] PRINT: step:8000/10000 val_loss:4.4761 total_sharp:1.2661e-03 L1_sharp:2.8181e-03 L2_sharp:3.5737e-04 L3_sharp:3.6015e-04 L4_sharp:3.5019e-04 L5_sharp:3.1622e-04 L6_sharp:5.1452e-04 L7_sharp:3.4176e-04 L8_sharp:6.3873e-04 L9_sharp:7.6873e-04 L10_sharp:8.2307e-04 L11_sharp:1.2552e-03 L12_sharp:3.6139e-03 total_fnorm:7.7500e+00 total_l1_linf:1.3312e+04 total_spectral:3.8125e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.1719e+00 L3_fnorm:1.1875e+00 L4_fnorm:1.1797e+00 L5_fnorm:1.1719e+00 L6_fnorm:1.1797e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1406e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1719e+00 L12_fnorm:1.1719e+00 L1_l1linf:2.4316e-01 L2_l1linf:2.3340e-01 L3_l1linf:2.2363e-01 L4_l1linf:2.2852e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3242e-01 L7_l1linf:2.3145e-01 L8_l1linf:2.1289e-01 L9_l1linf:2.0312e-01 L10_l1linf:1.9629e-01 L11_l1linf:2.1191e-01 L12_l1linf:2.4023e-01 L1_spectral:1.6565e-02 L2_spectral:1.6103e-02 L3_spectral:1.6513e-02 L4_spectral:1.6506e-02 L5_spectral:1.6544e-02 L6_spectral:1.6437e-02 L7_spectral:1.6434e-02 L8_spectral:1.6774e-02 L9_spectral:1.6837e-02 L10_spectral:1.6689e-02 L11_spectral:1.6866e-02 L12_spectral:1.6748e-02 train_time:329351ms step_avg:41.17ms +[2025-09-11 10:56:03] [Rank 0] step:8001/10000 train_time:330597ms step_avg:41.32ms +[2025-09-11 10:56:03] [Rank 0] step:8001/10000 train_time:330597ms step_avg:41.32ms +[2025-09-11 10:56:04] [Rank 0] step:8021/10000 train_time:331323ms step_avg:41.31ms +[2025-09-11 10:56:04] [Rank 0] step:8021/10000 train_time:331323ms step_avg:41.31ms +[2025-09-11 10:56:05] [Rank 0] step:8041/10000 train_time:332026ms step_avg:41.29ms +[2025-09-11 10:56:05] [Rank 0] step:8041/10000 train_time:332026ms step_avg:41.29ms +[2025-09-11 10:56:05] [Rank 0] step:8061/10000 train_time:332730ms step_avg:41.28ms +[2025-09-11 10:56:05] [Rank 0] step:8061/10000 train_time:332730ms step_avg:41.28ms +[2025-09-11 10:56:06] [Rank 0] step:8081/10000 train_time:333429ms step_avg:41.26ms +[2025-09-11 10:56:06] [Rank 0] step:8081/10000 train_time:333429ms step_avg:41.26ms +[2025-09-11 10:56:07] [Rank 0] step:8101/10000 train_time:334129ms step_avg:41.25ms +[2025-09-11 10:56:07] [Rank 0] step:8101/10000 train_time:334129ms step_avg:41.25ms +[2025-09-11 10:56:07] [Rank 0] step:8121/10000 train_time:334833ms step_avg:41.23ms +[2025-09-11 10:56:07] [Rank 0] step:8121/10000 train_time:334833ms step_avg:41.23ms +[2025-09-11 10:56:09] [Rank 0] step:8141/10000 train_time:336288ms step_avg:41.31ms +[2025-09-11 10:56:09] [Rank 0] step:8141/10000 train_time:336288ms step_avg:41.31ms +[2025-09-11 10:56:10] [Rank 0] step:8161/10000 train_time:336993ms step_avg:41.29ms +[2025-09-11 10:56:10] [Rank 0] step:8161/10000 train_time:336993ms step_avg:41.29ms +[2025-09-11 10:56:10] [Rank 0] step:8181/10000 train_time:337705ms step_avg:41.28ms +[2025-09-11 10:56:10] [Rank 0] step:8181/10000 train_time:337705ms step_avg:41.28ms +[2025-09-11 10:56:11] [Rank 0] step:8201/10000 train_time:338414ms step_avg:41.27ms +[2025-09-11 10:56:11] [Rank 0] step:8201/10000 train_time:338414ms step_avg:41.27ms +[2025-09-11 10:56:12] [Rank 0] step:8221/10000 train_time:339122ms step_avg:41.25ms +[2025-09-11 10:56:12] [Rank 0] step:8221/10000 train_time:339122ms step_avg:41.25ms +[2025-09-11 10:56:12] [Rank 0] step:8241/10000 train_time:339839ms step_avg:41.24ms +[2025-09-11 10:56:12] [Rank 0] step:8241/10000 train_time:339839ms step_avg:41.24ms +[2025-09-11 10:56:13] [Rank 0] step:8261/10000 train_time:340546ms step_avg:41.22ms +[2025-09-11 10:56:13] [Rank 0] step:8261/10000 train_time:340546ms step_avg:41.22ms +[2025-09-11 10:56:14] [Rank 0] step:8281/10000 train_time:341251ms step_avg:41.21ms +[2025-09-11 10:56:14] [Rank 0] step:8281/10000 train_time:341251ms step_avg:41.21ms +[2025-09-11 10:56:14] [Rank 0] step:8301/10000 train_time:341958ms step_avg:41.19ms +[2025-09-11 10:56:14] [Rank 0] step:8301/10000 train_time:341958ms step_avg:41.19ms +[2025-09-11 10:56:15] [Rank 0] step:8321/10000 train_time:342666ms step_avg:41.18ms +[2025-09-11 10:56:15] [Rank 0] step:8321/10000 train_time:342666ms step_avg:41.18ms +[2025-09-11 10:56:16] [Rank 0] step:8341/10000 train_time:343380ms step_avg:41.17ms +[2025-09-11 10:56:16] [Rank 0] step:8341/10000 train_time:343380ms step_avg:41.17ms +[2025-09-11 10:56:17] [Rank 0] step:8361/10000 train_time:344083ms step_avg:41.15ms +[2025-09-11 10:56:17] [Rank 0] step:8361/10000 train_time:344083ms step_avg:41.15ms +[2025-09-11 10:56:17] [Rank 0] step:8381/10000 train_time:344793ms step_avg:41.14ms +[2025-09-11 10:56:17] [Rank 0] step:8381/10000 train_time:344793ms step_avg:41.14ms +[2025-09-11 10:56:18] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:56:18] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:28] [Rank 0] PRINT: step:8400/10000 val_loss:4.4558 total_sharp:1.1571e-03 L1_sharp:1.8341e-03 L2_sharp:3.1879e-04 L3_sharp:3.3531e-04 L4_sharp:2.9091e-04 L5_sharp:2.3867e-04 L6_sharp:2.7966e-04 L7_sharp:2.5219e-04 L8_sharp:6.0587e-04 L9_sharp:6.9716e-04 L10_sharp:6.7787e-04 L11_sharp:1.0028e-03 L12_sharp:3.6038e-03 total_fnorm:5.4688e+00 total_l1_linf:9.0240e+03 total_spectral:2.7812e+00 L1_fnorm:9.8438e-01 L2_fnorm:9.1797e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2578e-01 L5_fnorm:9.2188e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2188e-01 L8_fnorm:8.9062e-01 L9_fnorm:9.1016e-01 L10_fnorm:9.0625e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7773e-01 L2_l1linf:1.7285e-01 L3_l1linf:1.6406e-01 L4_l1linf:1.7285e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6797e-01 L7_l1linf:1.7090e-01 L8_l1linf:1.5430e-01 L9_l1linf:1.4746e-01 L10_l1linf:1.4160e-01 L11_l1linf:1.4453e-01 L12_l1linf:1.7480e-01 L1_spectral:1.4123e-02 L2_spectral:1.2910e-02 L3_spectral:1.3282e-02 L4_spectral:1.3235e-02 L5_spectral:1.3205e-02 L6_spectral:1.3164e-02 L7_spectral:1.3287e-02 L8_spectral:1.3459e-02 L9_spectral:1.3382e-02 L10_spectral:1.3530e-02 L11_spectral:1.3404e-02 L12_spectral:1.3448e-02 train_time:345482ms step_avg:41.13ms +[2025-09-11 10:56:28] [Rank 0] PRINT: step:8400/10000 val_loss:4.4558 total_sharp:1.1571e-03 L1_sharp:1.8341e-03 L2_sharp:3.1879e-04 L3_sharp:3.3531e-04 L4_sharp:2.9091e-04 L5_sharp:2.3867e-04 L6_sharp:2.7966e-04 L7_sharp:2.5219e-04 L8_sharp:6.0587e-04 L9_sharp:6.9716e-04 L10_sharp:6.7787e-04 L11_sharp:1.0028e-03 L12_sharp:3.6038e-03 total_fnorm:5.4688e+00 total_l1_linf:9.0240e+03 total_spectral:2.7812e+00 L1_fnorm:9.8438e-01 L2_fnorm:9.1797e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2578e-01 L5_fnorm:9.2188e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2188e-01 L8_fnorm:8.9062e-01 L9_fnorm:9.1016e-01 L10_fnorm:9.0625e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7773e-01 L2_l1linf:1.7285e-01 L3_l1linf:1.6406e-01 L4_l1linf:1.7285e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6797e-01 L7_l1linf:1.7090e-01 L8_l1linf:1.5430e-01 L9_l1linf:1.4746e-01 L10_l1linf:1.4160e-01 L11_l1linf:1.4453e-01 L12_l1linf:1.7480e-01 L1_spectral:1.4123e-02 L2_spectral:1.2910e-02 L3_spectral:1.3282e-02 L4_spectral:1.3235e-02 L5_spectral:1.3205e-02 L6_spectral:1.3164e-02 L7_spectral:1.3287e-02 L8_spectral:1.3459e-02 L9_spectral:1.3382e-02 L10_spectral:1.3530e-02 L11_spectral:1.3404e-02 L12_spectral:1.3448e-02 train_time:345482ms step_avg:41.13ms +[2025-09-11 10:56:29] [Rank 0] step:8401/10000 train_time:346765ms step_avg:41.28ms +[2025-09-11 10:56:29] [Rank 0] step:8401/10000 train_time:346765ms step_avg:41.28ms +[2025-09-11 10:56:30] [Rank 0] step:8421/10000 train_time:347523ms step_avg:41.27ms +[2025-09-11 10:56:30] [Rank 0] step:8421/10000 train_time:347523ms step_avg:41.27ms +[2025-09-11 10:56:31] [Rank 0] step:8441/10000 train_time:348235ms step_avg:41.26ms +[2025-09-11 10:56:31] [Rank 0] step:8441/10000 train_time:348235ms step_avg:41.26ms +[2025-09-11 10:56:31] [Rank 0] step:8461/10000 train_time:348946ms step_avg:41.24ms +[2025-09-11 10:56:31] [Rank 0] step:8461/10000 train_time:348946ms step_avg:41.24ms +[2025-09-11 10:56:32] [Rank 0] step:8481/10000 train_time:349658ms step_avg:41.23ms +[2025-09-11 10:56:32] [Rank 0] step:8481/10000 train_time:349658ms step_avg:41.23ms +[2025-09-11 10:56:33] [Rank 0] step:8501/10000 train_time:350366ms step_avg:41.21ms +[2025-09-11 10:56:33] [Rank 0] step:8501/10000 train_time:350366ms step_avg:41.21ms +[2025-09-11 10:56:34] [Rank 0] step:8521/10000 train_time:351075ms step_avg:41.20ms +[2025-09-11 10:56:34] [Rank 0] step:8521/10000 train_time:351075ms step_avg:41.20ms +[2025-09-11 10:56:34] [Rank 0] step:8541/10000 train_time:351782ms step_avg:41.19ms +[2025-09-11 10:56:34] [Rank 0] step:8541/10000 train_time:351782ms step_avg:41.19ms +[2025-09-11 10:56:35] [Rank 0] step:8561/10000 train_time:352497ms step_avg:41.17ms +[2025-09-11 10:56:35] [Rank 0] step:8561/10000 train_time:352497ms step_avg:41.17ms +[2025-09-11 10:56:36] [Rank 0] step:8581/10000 train_time:353209ms step_avg:41.16ms +[2025-09-11 10:56:36] [Rank 0] step:8581/10000 train_time:353209ms step_avg:41.16ms +[2025-09-11 10:56:36] [Rank 0] step:8601/10000 train_time:353919ms step_avg:41.15ms +[2025-09-11 10:56:36] [Rank 0] step:8601/10000 train_time:353919ms step_avg:41.15ms +[2025-09-11 10:56:37] [Rank 0] step:8621/10000 train_time:354627ms step_avg:41.14ms +[2025-09-11 10:56:37] [Rank 0] step:8621/10000 train_time:354627ms step_avg:41.14ms +[2025-09-11 10:56:38] [Rank 0] step:8641/10000 train_time:355335ms step_avg:41.12ms +[2025-09-11 10:56:38] [Rank 0] step:8641/10000 train_time:355335ms step_avg:41.12ms +[2025-09-11 10:56:39] [Rank 0] step:8661/10000 train_time:356046ms step_avg:41.11ms +[2025-09-11 10:56:39] [Rank 0] step:8661/10000 train_time:356046ms step_avg:41.11ms +[2025-09-11 10:56:39] [Rank 0] step:8681/10000 train_time:356757ms step_avg:41.10ms +[2025-09-11 10:56:39] [Rank 0] step:8681/10000 train_time:356757ms step_avg:41.10ms +[2025-09-11 10:56:40] [Rank 0] step:8701/10000 train_time:357464ms step_avg:41.08ms +[2025-09-11 10:56:40] [Rank 0] step:8701/10000 train_time:357464ms step_avg:41.08ms +[2025-09-11 10:56:41] [Rank 0] step:8721/10000 train_time:358176ms step_avg:41.07ms +[2025-09-11 10:56:41] [Rank 0] step:8721/10000 train_time:358176ms step_avg:41.07ms +[2025-09-11 10:56:41] [Rank 0] step:8741/10000 train_time:358882ms step_avg:41.06ms +[2025-09-11 10:56:41] [Rank 0] step:8741/10000 train_time:358882ms step_avg:41.06ms +[2025-09-11 10:56:42] [Rank 0] step:8761/10000 train_time:359595ms step_avg:41.04ms +[2025-09-11 10:56:42] [Rank 0] step:8761/10000 train_time:359595ms step_avg:41.04ms +[2025-09-11 10:56:43] [Rank 0] step:8781/10000 train_time:360302ms step_avg:41.03ms +[2025-09-11 10:56:43] [Rank 0] step:8781/10000 train_time:360302ms step_avg:41.03ms +[2025-09-11 10:56:43] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:56:43] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:53] [Rank 0] PRINT: step:8800/10000 val_loss:4.4428 total_sharp:1.1603e-03 L1_sharp:1.8783e-03 L2_sharp:2.9434e-04 L3_sharp:2.3487e-04 L4_sharp:2.2001e-04 L5_sharp:1.4759e-04 L6_sharp:2.1360e-04 L7_sharp:3.0041e-04 L8_sharp:5.0906e-04 L9_sharp:6.2684e-04 L10_sharp:6.6103e-04 L11_sharp:1.0018e-03 L12_sharp:5.6345e-03 total_fnorm:3.9844e+00 total_l1_linf:5.9200e+03 total_spectral:2.0312e+00 L1_fnorm:7.4609e-01 L2_fnorm:6.7578e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8750e-01 L5_fnorm:6.7969e-01 L6_fnorm:6.8359e-01 L7_fnorm:6.7969e-01 L8_fnorm:6.5625e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6406e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.2891e-01 L2_l1linf:1.1328e-01 L3_l1linf:1.1475e-01 L4_l1linf:1.1523e-01 L5_l1linf:1.1328e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1816e-01 L8_l1linf:1.0596e-01 L9_l1linf:9.8633e-02 L10_l1linf:9.4238e-02 L11_l1linf:9.7168e-02 L12_l1linf:1.2354e-01 L1_spectral:1.1515e-02 L2_spectral:9.6831e-03 L3_spectral:1.0096e-02 L4_spectral:9.9170e-03 L5_spectral:9.9488e-03 L6_spectral:9.9311e-03 L7_spectral:9.8436e-03 L8_spectral:1.0159e-02 L9_spectral:1.0117e-02 L10_spectral:1.0044e-02 L11_spectral:1.0077e-02 L12_spectral:1.0055e-02 train_time:360993ms step_avg:41.02ms +[2025-09-11 10:56:53] [Rank 0] PRINT: step:8800/10000 val_loss:4.4428 total_sharp:1.1603e-03 L1_sharp:1.8783e-03 L2_sharp:2.9434e-04 L3_sharp:2.3487e-04 L4_sharp:2.2001e-04 L5_sharp:1.4759e-04 L6_sharp:2.1360e-04 L7_sharp:3.0041e-04 L8_sharp:5.0906e-04 L9_sharp:6.2684e-04 L10_sharp:6.6103e-04 L11_sharp:1.0018e-03 L12_sharp:5.6345e-03 total_fnorm:3.9844e+00 total_l1_linf:5.9200e+03 total_spectral:2.0312e+00 L1_fnorm:7.4609e-01 L2_fnorm:6.7578e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8750e-01 L5_fnorm:6.7969e-01 L6_fnorm:6.8359e-01 L7_fnorm:6.7969e-01 L8_fnorm:6.5625e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6406e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.2891e-01 L2_l1linf:1.1328e-01 L3_l1linf:1.1475e-01 L4_l1linf:1.1523e-01 L5_l1linf:1.1328e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1816e-01 L8_l1linf:1.0596e-01 L9_l1linf:9.8633e-02 L10_l1linf:9.4238e-02 L11_l1linf:9.7168e-02 L12_l1linf:1.2354e-01 L1_spectral:1.1515e-02 L2_spectral:9.6831e-03 L3_spectral:1.0096e-02 L4_spectral:9.9170e-03 L5_spectral:9.9488e-03 L6_spectral:9.9311e-03 L7_spectral:9.8436e-03 L8_spectral:1.0159e-02 L9_spectral:1.0117e-02 L10_spectral:1.0044e-02 L11_spectral:1.0077e-02 L12_spectral:1.0055e-02 train_time:360993ms step_avg:41.02ms +[2025-09-11 10:56:55] [Rank 0] step:8801/10000 train_time:362261ms step_avg:41.16ms +[2025-09-11 10:56:55] [Rank 0] step:8801/10000 train_time:362261ms step_avg:41.16ms +[2025-09-11 10:56:55] [Rank 0] step:8821/10000 train_time:362986ms step_avg:41.15ms +[2025-09-11 10:56:55] [Rank 0] step:8821/10000 train_time:362986ms step_avg:41.15ms +[2025-09-11 10:56:56] [Rank 0] step:8841/10000 train_time:363696ms step_avg:41.14ms +[2025-09-11 10:56:56] [Rank 0] step:8841/10000 train_time:363696ms step_avg:41.14ms +[2025-09-11 10:56:57] [Rank 0] step:8861/10000 train_time:364405ms step_avg:41.12ms +[2025-09-11 10:56:57] [Rank 0] step:8861/10000 train_time:364405ms step_avg:41.12ms +[2025-09-11 10:56:58] [Rank 0] step:8881/10000 train_time:365114ms step_avg:41.11ms +[2025-09-11 10:56:58] [Rank 0] step:8881/10000 train_time:365114ms step_avg:41.11ms +[2025-09-11 10:56:58] [Rank 0] step:8901/10000 train_time:365826ms step_avg:41.10ms +[2025-09-11 10:56:58] [Rank 0] step:8901/10000 train_time:365826ms step_avg:41.10ms +[2025-09-11 10:56:59] [Rank 0] step:8921/10000 train_time:366532ms step_avg:41.09ms +[2025-09-11 10:56:59] [Rank 0] step:8921/10000 train_time:366532ms step_avg:41.09ms +[2025-09-11 10:57:00] [Rank 0] step:8941/10000 train_time:367243ms step_avg:41.07ms +[2025-09-11 10:57:00] [Rank 0] step:8941/10000 train_time:367243ms step_avg:41.07ms +[2025-09-11 10:57:00] [Rank 0] step:8961/10000 train_time:367961ms step_avg:41.06ms +[2025-09-11 10:57:00] [Rank 0] step:8961/10000 train_time:367961ms step_avg:41.06ms +[2025-09-11 10:57:01] [Rank 0] step:8981/10000 train_time:368674ms step_avg:41.05ms +[2025-09-11 10:57:01] [Rank 0] step:8981/10000 train_time:368674ms step_avg:41.05ms +[2025-09-11 10:57:02] [Rank 0] step:9001/10000 train_time:369379ms step_avg:41.04ms +[2025-09-11 10:57:02] [Rank 0] step:9001/10000 train_time:369379ms step_avg:41.04ms +[2025-09-11 10:57:03] [Rank 0] step:9021/10000 train_time:370090ms step_avg:41.03ms +[2025-09-11 10:57:03] [Rank 0] step:9021/10000 train_time:370090ms step_avg:41.03ms +[2025-09-11 10:57:03] [Rank 0] step:9041/10000 train_time:370803ms step_avg:41.01ms +[2025-09-11 10:57:03] [Rank 0] step:9041/10000 train_time:370803ms step_avg:41.01ms +[2025-09-11 10:57:04] [Rank 0] step:9061/10000 train_time:371512ms step_avg:41.00ms +[2025-09-11 10:57:04] [Rank 0] step:9061/10000 train_time:371512ms step_avg:41.00ms +[2025-09-11 10:57:05] [Rank 0] step:9081/10000 train_time:372224ms step_avg:40.99ms +[2025-09-11 10:57:05] [Rank 0] step:9081/10000 train_time:372224ms step_avg:40.99ms +[2025-09-11 10:57:05] [Rank 0] step:9101/10000 train_time:372937ms step_avg:40.98ms +[2025-09-11 10:57:05] [Rank 0] step:9101/10000 train_time:372937ms step_avg:40.98ms +[2025-09-11 10:57:06] [Rank 0] step:9121/10000 train_time:373652ms step_avg:40.97ms +[2025-09-11 10:57:06] [Rank 0] step:9121/10000 train_time:373652ms step_avg:40.97ms +[2025-09-11 10:57:07] [Rank 0] step:9141/10000 train_time:374360ms step_avg:40.95ms +[2025-09-11 10:57:07] [Rank 0] step:9141/10000 train_time:374360ms step_avg:40.95ms +[2025-09-11 10:57:08] [Rank 0] step:9161/10000 train_time:375071ms step_avg:40.94ms +[2025-09-11 10:57:08] [Rank 0] step:9161/10000 train_time:375071ms step_avg:40.94ms +[2025-09-11 10:57:08] [Rank 0] step:9181/10000 train_time:375783ms step_avg:40.93ms +[2025-09-11 10:57:08] [Rank 0] step:9181/10000 train_time:375783ms step_avg:40.93ms +[2025-09-11 10:57:09] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:57:09] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:57:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:57:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:57:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:19] [Rank 0] PRINT: step:9200/10000 val_loss:4.4237 total_sharp:1.1056e-03 L1_sharp:2.6433e-03 L2_sharp:2.5118e-04 L3_sharp:1.4156e-04 L4_sharp:2.2195e-04 L5_sharp:2.0319e-04 L6_sharp:2.0305e-04 L7_sharp:2.9947e-04 L8_sharp:4.8200e-04 L9_sharp:5.3995e-04 L10_sharp:5.8565e-04 L11_sharp:8.3213e-04 L12_sharp:3.9654e-03 total_fnorm:2.6406e+00 total_l1_linf:3.3600e+03 total_spectral:1.3281e+00 L1_fnorm:5.1172e-01 L2_fnorm:4.4336e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4531e-01 L6_fnorm:4.4922e-01 L7_fnorm:4.4922e-01 L8_fnorm:4.3555e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3750e-01 L11_fnorm:4.4141e-01 L12_fnorm:4.4336e-01 L1_l1linf:8.7402e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.5430e-02 L9_l1linf:5.8105e-02 L10_l1linf:5.6641e-02 L11_l1linf:5.9082e-02 L12_l1linf:7.5195e-02 L1_spectral:8.7804e-03 L2_spectral:6.4793e-03 L3_spectral:6.7568e-03 L4_spectral:6.7301e-03 L5_spectral:6.5601e-03 L6_spectral:6.6867e-03 L7_spectral:6.7174e-03 L8_spectral:7.0328e-03 L9_spectral:6.8035e-03 L10_spectral:6.7460e-03 L11_spectral:6.7263e-03 L12_spectral:6.8439e-03 train_time:376476ms step_avg:40.92ms +[2025-09-11 10:57:19] [Rank 0] PRINT: step:9200/10000 val_loss:4.4237 total_sharp:1.1056e-03 L1_sharp:2.6433e-03 L2_sharp:2.5118e-04 L3_sharp:1.4156e-04 L4_sharp:2.2195e-04 L5_sharp:2.0319e-04 L6_sharp:2.0305e-04 L7_sharp:2.9947e-04 L8_sharp:4.8200e-04 L9_sharp:5.3995e-04 L10_sharp:5.8565e-04 L11_sharp:8.3213e-04 L12_sharp:3.9654e-03 total_fnorm:2.6406e+00 total_l1_linf:3.3600e+03 total_spectral:1.3281e+00 L1_fnorm:5.1172e-01 L2_fnorm:4.4336e-01 L3_fnorm:4.5117e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4531e-01 L6_fnorm:4.4922e-01 L7_fnorm:4.4922e-01 L8_fnorm:4.3555e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3750e-01 L11_fnorm:4.4141e-01 L12_fnorm:4.4336e-01 L1_l1linf:8.7402e-02 L2_l1linf:7.0312e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.8359e-02 L8_l1linf:6.5430e-02 L9_l1linf:5.8105e-02 L10_l1linf:5.6641e-02 L11_l1linf:5.9082e-02 L12_l1linf:7.5195e-02 L1_spectral:8.7804e-03 L2_spectral:6.4793e-03 L3_spectral:6.7568e-03 L4_spectral:6.7301e-03 L5_spectral:6.5601e-03 L6_spectral:6.6867e-03 L7_spectral:6.7174e-03 L8_spectral:7.0328e-03 L9_spectral:6.8035e-03 L10_spectral:6.7460e-03 L11_spectral:6.7263e-03 L12_spectral:6.8439e-03 train_time:376476ms step_avg:40.92ms +[2025-09-11 10:57:20] [Rank 0] step:9201/10000 train_time:377750ms step_avg:41.06ms +[2025-09-11 10:57:20] [Rank 0] step:9201/10000 train_time:377750ms step_avg:41.06ms +[2025-09-11 10:57:21] [Rank 0] step:9221/10000 train_time:378477ms step_avg:41.05ms +[2025-09-11 10:57:21] [Rank 0] step:9221/10000 train_time:378477ms step_avg:41.05ms +[2025-09-11 10:57:22] [Rank 0] step:9241/10000 train_time:379185ms step_avg:41.03ms +[2025-09-11 10:57:22] [Rank 0] step:9241/10000 train_time:379185ms step_avg:41.03ms +[2025-09-11 10:57:22] [Rank 0] step:9261/10000 train_time:379897ms step_avg:41.02ms +[2025-09-11 10:57:22] [Rank 0] step:9261/10000 train_time:379897ms step_avg:41.02ms +[2025-09-11 10:57:23] [Rank 0] step:9281/10000 train_time:380610ms step_avg:41.01ms +[2025-09-11 10:57:23] [Rank 0] step:9281/10000 train_time:380610ms step_avg:41.01ms +[2025-09-11 10:57:24] [Rank 0] step:9301/10000 train_time:381317ms step_avg:41.00ms +[2025-09-11 10:57:24] [Rank 0] step:9301/10000 train_time:381317ms step_avg:41.00ms +[2025-09-11 10:57:25] [Rank 0] step:9321/10000 train_time:382029ms step_avg:40.99ms +[2025-09-11 10:57:25] [Rank 0] step:9321/10000 train_time:382029ms step_avg:40.99ms +[2025-09-11 10:57:25] [Rank 0] step:9341/10000 train_time:382736ms step_avg:40.97ms +[2025-09-11 10:57:25] [Rank 0] step:9341/10000 train_time:382736ms step_avg:40.97ms +[2025-09-11 10:57:26] [Rank 0] step:9361/10000 train_time:383443ms step_avg:40.96ms +[2025-09-11 10:57:26] [Rank 0] step:9361/10000 train_time:383443ms step_avg:40.96ms +[2025-09-11 10:57:27] [Rank 0] step:9381/10000 train_time:384150ms step_avg:40.95ms +[2025-09-11 10:57:27] [Rank 0] step:9381/10000 train_time:384150ms step_avg:40.95ms +[2025-09-11 10:57:27] [Rank 0] step:9401/10000 train_time:384861ms step_avg:40.94ms +[2025-09-11 10:57:27] [Rank 0] step:9401/10000 train_time:384861ms step_avg:40.94ms +[2025-09-11 10:57:28] [Rank 0] step:9421/10000 train_time:385573ms step_avg:40.93ms +[2025-09-11 10:57:28] [Rank 0] step:9421/10000 train_time:385573ms step_avg:40.93ms +[2025-09-11 10:57:29] [Rank 0] step:9441/10000 train_time:386287ms step_avg:40.92ms +[2025-09-11 10:57:29] [Rank 0] step:9441/10000 train_time:386287ms step_avg:40.92ms +[2025-09-11 10:57:30] [Rank 0] step:9461/10000 train_time:386996ms step_avg:40.90ms +[2025-09-11 10:57:30] [Rank 0] step:9461/10000 train_time:386996ms step_avg:40.90ms +[2025-09-11 10:57:30] [Rank 0] step:9481/10000 train_time:387708ms step_avg:40.89ms +[2025-09-11 10:57:30] [Rank 0] step:9481/10000 train_time:387708ms step_avg:40.89ms +[2025-09-11 10:57:31] [Rank 0] step:9501/10000 train_time:388419ms step_avg:40.88ms +[2025-09-11 10:57:31] [Rank 0] step:9501/10000 train_time:388419ms step_avg:40.88ms +[2025-09-11 10:57:32] [Rank 0] step:9521/10000 train_time:389131ms step_avg:40.87ms +[2025-09-11 10:57:32] [Rank 0] step:9521/10000 train_time:389131ms step_avg:40.87ms +[2025-09-11 10:57:32] [Rank 0] step:9541/10000 train_time:389839ms step_avg:40.86ms +[2025-09-11 10:57:32] [Rank 0] step:9541/10000 train_time:389839ms step_avg:40.86ms +[2025-09-11 10:57:33] [Rank 0] step:9561/10000 train_time:390548ms step_avg:40.85ms +[2025-09-11 10:57:33] [Rank 0] step:9561/10000 train_time:390548ms step_avg:40.85ms +[2025-09-11 10:57:34] [Rank 0] step:9581/10000 train_time:391259ms step_avg:40.84ms +[2025-09-11 10:57:34] [Rank 0] step:9581/10000 train_time:391259ms step_avg:40.84ms +[2025-09-11 10:57:34] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:57:34] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:57:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:45] [Rank 0] PRINT: step:9600/10000 val_loss:4.4153 total_sharp:6.6395e-04 L1_sharp:1.7776e-03 L2_sharp:1.5218e-04 L3_sharp:1.3436e-04 L4_sharp:1.8579e-04 L5_sharp:1.5334e-04 L6_sharp:2.4353e-04 L7_sharp:1.9157e-04 L8_sharp:3.2225e-04 L9_sharp:4.1278e-04 L10_sharp:3.8488e-04 L11_sharp:5.9454e-04 L12_sharp:2.1225e-03 total_fnorm:1.4844e+00 total_l1_linf:1.6000e+03 total_spectral:7.5000e-01 L1_fnorm:2.9883e-01 L2_fnorm:2.5391e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5391e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5195e-01 L1_l1linf:4.8584e-02 L2_l1linf:3.3936e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.1738e-02 L9_l1linf:2.9663e-02 L10_l1linf:2.7222e-02 L11_l1linf:2.9663e-02 L12_l1linf:3.7598e-02 L1_spectral:5.6416e-03 L2_spectral:3.7570e-03 L3_spectral:3.9595e-03 L4_spectral:3.8969e-03 L5_spectral:3.8610e-03 L6_spectral:3.9350e-03 L7_spectral:3.8721e-03 L8_spectral:4.1596e-03 L9_spectral:3.9622e-03 L10_spectral:3.9414e-03 L11_spectral:3.9562e-03 L12_spectral:4.0119e-03 train_time:391947ms step_avg:40.83ms +[2025-09-11 10:57:45] [Rank 0] PRINT: step:9600/10000 val_loss:4.4153 total_sharp:6.6395e-04 L1_sharp:1.7776e-03 L2_sharp:1.5218e-04 L3_sharp:1.3436e-04 L4_sharp:1.8579e-04 L5_sharp:1.5334e-04 L6_sharp:2.4353e-04 L7_sharp:1.9157e-04 L8_sharp:3.2225e-04 L9_sharp:4.1278e-04 L10_sharp:3.8488e-04 L11_sharp:5.9454e-04 L12_sharp:2.1225e-03 total_fnorm:1.4844e+00 total_l1_linf:1.6000e+03 total_spectral:7.5000e-01 L1_fnorm:2.9883e-01 L2_fnorm:2.5391e-01 L3_fnorm:2.5781e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5391e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5195e-01 L1_l1linf:4.8584e-02 L2_l1linf:3.3936e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.1738e-02 L9_l1linf:2.9663e-02 L10_l1linf:2.7222e-02 L11_l1linf:2.9663e-02 L12_l1linf:3.7598e-02 L1_spectral:5.6416e-03 L2_spectral:3.7570e-03 L3_spectral:3.9595e-03 L4_spectral:3.8969e-03 L5_spectral:3.8610e-03 L6_spectral:3.9350e-03 L7_spectral:3.8721e-03 L8_spectral:4.1596e-03 L9_spectral:3.9622e-03 L10_spectral:3.9414e-03 L11_spectral:3.9562e-03 L12_spectral:4.0119e-03 train_time:391947ms step_avg:40.83ms +[2025-09-11 10:57:46] [Rank 0] step:9601/10000 train_time:393242ms step_avg:40.96ms +[2025-09-11 10:57:46] [Rank 0] step:9601/10000 train_time:393242ms step_avg:40.96ms +[2025-09-11 10:57:47] [Rank 0] step:9621/10000 train_time:393973ms step_avg:40.95ms +[2025-09-11 10:57:47] [Rank 0] step:9621/10000 train_time:393973ms step_avg:40.95ms +[2025-09-11 10:57:47] [Rank 0] step:9641/10000 train_time:394689ms step_avg:40.94ms +[2025-09-11 10:57:47] [Rank 0] step:9641/10000 train_time:394689ms step_avg:40.94ms +[2025-09-11 10:57:48] [Rank 0] step:9661/10000 train_time:395412ms step_avg:40.93ms +[2025-09-11 10:57:48] [Rank 0] step:9661/10000 train_time:395412ms step_avg:40.93ms +[2025-09-11 10:57:49] [Rank 0] step:9681/10000 train_time:396126ms step_avg:40.92ms +[2025-09-11 10:57:49] [Rank 0] step:9681/10000 train_time:396126ms step_avg:40.92ms +[2025-09-11 10:57:50] [Rank 0] step:9701/10000 train_time:396843ms step_avg:40.91ms +[2025-09-11 10:57:50] [Rank 0] step:9701/10000 train_time:396843ms step_avg:40.91ms +[2025-09-11 10:57:51] [Rank 0] step:9721/10000 train_time:397932ms step_avg:40.94ms +[2025-09-11 10:57:51] [Rank 0] step:9721/10000 train_time:397932ms step_avg:40.94ms +[2025-09-11 10:57:52] [Rank 0] step:9741/10000 train_time:398852ms step_avg:40.95ms +[2025-09-11 10:57:52] [Rank 0] step:9741/10000 train_time:398852ms step_avg:40.95ms +[2025-09-11 10:57:52] [Rank 0] step:9761/10000 train_time:399569ms step_avg:40.94ms +[2025-09-11 10:57:52] [Rank 0] step:9761/10000 train_time:399569ms step_avg:40.94ms +[2025-09-11 10:57:53] [Rank 0] step:9781/10000 train_time:400541ms step_avg:40.95ms +[2025-09-11 10:57:53] [Rank 0] step:9781/10000 train_time:400541ms step_avg:40.95ms +[2025-09-11 10:57:54] [Rank 0] step:9801/10000 train_time:401263ms step_avg:40.94ms +[2025-09-11 10:57:54] [Rank 0] step:9801/10000 train_time:401263ms step_avg:40.94ms +[2025-09-11 10:57:55] [Rank 0] step:9821/10000 train_time:401982ms step_avg:40.93ms +[2025-09-11 10:57:55] [Rank 0] step:9821/10000 train_time:401982ms step_avg:40.93ms +[2025-09-11 10:57:55] [Rank 0] step:9841/10000 train_time:402703ms step_avg:40.92ms +[2025-09-11 10:57:55] [Rank 0] step:9841/10000 train_time:402703ms step_avg:40.92ms +[2025-09-11 10:57:56] [Rank 0] step:9861/10000 train_time:403421ms step_avg:40.91ms +[2025-09-11 10:57:56] [Rank 0] step:9861/10000 train_time:403421ms step_avg:40.91ms +[2025-09-11 10:57:57] [Rank 0] step:9881/10000 train_time:404139ms step_avg:40.90ms +[2025-09-11 10:57:57] [Rank 0] step:9881/10000 train_time:404139ms step_avg:40.90ms +[2025-09-11 10:57:58] [Rank 0] step:9901/10000 train_time:404854ms step_avg:40.89ms +[2025-09-11 10:57:58] [Rank 0] step:9901/10000 train_time:404854ms step_avg:40.89ms +[2025-09-11 10:57:58] [Rank 0] step:9921/10000 train_time:405571ms step_avg:40.88ms +[2025-09-11 10:57:58] [Rank 0] step:9921/10000 train_time:405571ms step_avg:40.88ms +[2025-09-11 10:57:59] [Rank 0] step:9941/10000 train_time:406293ms step_avg:40.87ms +[2025-09-11 10:57:59] [Rank 0] step:9941/10000 train_time:406293ms step_avg:40.87ms +[2025-09-11 10:58:00] [Rank 0] step:9961/10000 train_time:407015ms step_avg:40.86ms +[2025-09-11 10:58:00] [Rank 0] step:9961/10000 train_time:407015ms step_avg:40.86ms +[2025-09-11 10:58:00] [Rank 0] step:9981/10000 train_time:407732ms step_avg:40.85ms +[2025-09-11 10:58:00] [Rank 0] step:9981/10000 train_time:407732ms step_avg:40.85ms +[2025-09-11 10:58:01] [Rank 0] step:10000/10000 train_time:408422ms step_avg:40.84ms +[2025-09-11 10:58:01] [Rank 0] step:10000/10000 train_time:408422ms step_avg:40.84ms +[2025-09-11 10:58:01] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:58:01] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:58:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:11] [Rank 0] PRINT: step:10000/10000 val_loss:4.4118 total_sharp:4.4257e-04 L1_sharp:1.5664e-03 L2_sharp:9.9209e-05 L3_sharp:3.7096e-05 L4_sharp:8.7069e-05 L5_sharp:7.9398e-05 L6_sharp:1.4352e-04 L7_sharp:1.2009e-04 L8_sharp:2.5890e-04 L9_sharp:2.9222e-04 L10_sharp:2.9070e-04 L11_sharp:4.3353e-04 L12_sharp:1.4544e-03 total_fnorm:5.8203e-01 total_l1_linf:4.4600e+02 total_spectral:2.8711e-01 L1_fnorm:1.2256e-01 L2_fnorm:9.6680e-02 L3_fnorm:9.8145e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.7168e-02 L6_fnorm:9.8145e-02 L7_fnorm:9.8145e-02 L8_fnorm:9.5215e-02 L9_fnorm:9.6680e-02 L10_fnorm:9.6191e-02 L11_fnorm:9.6680e-02 L12_fnorm:9.7656e-02 L1_l1linf:1.6357e-02 L2_l1linf:1.0925e-02 L3_l1linf:9.3994e-03 L4_l1linf:1.0437e-02 L5_l1linf:1.0437e-02 L6_l1linf:1.0559e-02 L7_l1linf:1.0498e-02 L8_l1linf:1.0010e-02 L9_l1linf:8.6670e-03 L10_l1linf:7.9346e-03 L11_l1linf:8.9111e-03 L12_l1linf:1.0986e-02 L1_spectral:2.5230e-03 L2_spectral:1.4932e-03 L3_spectral:1.5623e-03 L4_spectral:1.5413e-03 L5_spectral:1.5239e-03 L6_spectral:1.5431e-03 L7_spectral:1.5617e-03 L8_spectral:1.6552e-03 L9_spectral:1.5696e-03 L10_spectral:1.5737e-03 L11_spectral:1.5647e-03 L12_spectral:1.5680e-03 train_time:408443ms step_avg:40.84ms +[2025-09-11 10:58:11] [Rank 0] PRINT: step:10000/10000 val_loss:4.4118 total_sharp:4.4257e-04 L1_sharp:1.5664e-03 L2_sharp:9.9209e-05 L3_sharp:3.7096e-05 L4_sharp:8.7069e-05 L5_sharp:7.9398e-05 L6_sharp:1.4352e-04 L7_sharp:1.2009e-04 L8_sharp:2.5890e-04 L9_sharp:2.9222e-04 L10_sharp:2.9070e-04 L11_sharp:4.3353e-04 L12_sharp:1.4544e-03 total_fnorm:5.8203e-01 total_l1_linf:4.4600e+02 total_spectral:2.8711e-01 L1_fnorm:1.2256e-01 L2_fnorm:9.6680e-02 L3_fnorm:9.8145e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.7168e-02 L6_fnorm:9.8145e-02 L7_fnorm:9.8145e-02 L8_fnorm:9.5215e-02 L9_fnorm:9.6680e-02 L10_fnorm:9.6191e-02 L11_fnorm:9.6680e-02 L12_fnorm:9.7656e-02 L1_l1linf:1.6357e-02 L2_l1linf:1.0925e-02 L3_l1linf:9.3994e-03 L4_l1linf:1.0437e-02 L5_l1linf:1.0437e-02 L6_l1linf:1.0559e-02 L7_l1linf:1.0498e-02 L8_l1linf:1.0010e-02 L9_l1linf:8.6670e-03 L10_l1linf:7.9346e-03 L11_l1linf:8.9111e-03 L12_l1linf:1.0986e-02 L1_spectral:2.5230e-03 L2_spectral:1.4932e-03 L3_spectral:1.5623e-03 L4_spectral:1.5413e-03 L5_spectral:1.5239e-03 L6_spectral:1.5431e-03 L7_spectral:1.5617e-03 L8_spectral:1.6552e-03 L9_spectral:1.5696e-03 L10_spectral:1.5737e-03 L11_spectral:1.5647e-03 L12_spectral:1.5680e-03 train_time:408443ms step_avg:40.84ms +[2025-09-11 10:58:11] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:58:11 2025 --- +[2025-09-11 10:58:11] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:58:11 2025 --- +[2025-09-11 10:58:11] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:58:11] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fe6a38942de33bbe1732ac1c238ae93f43b7f6d4 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "ad892419-29ae-4585-8144-90dc3061d50f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/training_log_ad892419-29ae-4585-8144-90dc3061d50f.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/training_log_ad892419-29ae-4585-8144-90dc3061d50f.txt new file mode 100644 index 0000000000000000000000000000000000000000..dbb8abc34c1dab848ee0a24f6b7f92b6aa4009e6 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44/training_log_ad892419-29ae-4585-8144-90dc3061d50f.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:32:08] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:32:08 2025 --- +[2025-09-11 10:32:08] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:32:08 2025 --- +[2025-09-11 10:32:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:32:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:32:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:32:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:32:08] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:32:08] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:32:08] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44 +[2025-09-11 10:32:08] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.05_seed_44 +[2025-09-11 10:32:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:32:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:32:08] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:32:08] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:32:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:32:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:32:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:32:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:32:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:32:09] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:32:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:32:09] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:32:09] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:32:09] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:32:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:32:11] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:32:11] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:32:11] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:32:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:32:11] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:32:17] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:32:17] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:32:17] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:32:17] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:32:55] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:32:55] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:32:55] [Rank 0] PRINT: Starting training... +[2025-09-11 10:32:55] [Rank 0] PRINT: Starting training... +[2025-09-11 10:32:56] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.79ms +[2025-09-11 10:32:56] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.79ms +[2025-09-11 10:32:57] [Rank 0] step:41/10000 train_time:1854ms step_avg:45.23ms +[2025-09-11 10:32:57] [Rank 0] step:41/10000 train_time:1854ms step_avg:45.23ms +[2025-09-11 10:32:57] [Rank 0] step:61/10000 train_time:2578ms step_avg:42.27ms +[2025-09-11 10:32:57] [Rank 0] step:61/10000 train_time:2578ms step_avg:42.27ms +[2025-09-11 10:32:58] [Rank 0] step:81/10000 train_time:3302ms step_avg:40.76ms +[2025-09-11 10:32:58] [Rank 0] step:81/10000 train_time:3302ms step_avg:40.76ms +[2025-09-11 10:32:59] [Rank 0] step:101/10000 train_time:4025ms step_avg:39.85ms +[2025-09-11 10:32:59] [Rank 0] step:101/10000 train_time:4025ms step_avg:39.85ms +[2025-09-11 10:33:00] [Rank 0] step:121/10000 train_time:4749ms step_avg:39.25ms +[2025-09-11 10:33:00] [Rank 0] step:121/10000 train_time:4749ms step_avg:39.25ms +[2025-09-11 10:33:00] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.81ms +[2025-09-11 10:33:00] [Rank 0] step:141/10000 train_time:5473ms step_avg:38.81ms +[2025-09-11 10:33:01] [Rank 0] step:161/10000 train_time:6197ms step_avg:38.49ms +[2025-09-11 10:33:01] [Rank 0] step:161/10000 train_time:6197ms step_avg:38.49ms +[2025-09-11 10:33:02] [Rank 0] step:181/10000 train_time:6919ms step_avg:38.23ms +[2025-09-11 10:33:02] [Rank 0] step:181/10000 train_time:6919ms step_avg:38.23ms +[2025-09-11 10:33:03] [Rank 0] step:201/10000 train_time:7643ms step_avg:38.02ms +[2025-09-11 10:33:03] [Rank 0] step:201/10000 train_time:7643ms step_avg:38.02ms +[2025-09-11 10:33:03] [Rank 0] step:221/10000 train_time:8367ms step_avg:37.86ms +[2025-09-11 10:33:03] [Rank 0] step:221/10000 train_time:8367ms step_avg:37.86ms +[2025-09-11 10:33:04] [Rank 0] step:241/10000 train_time:9091ms step_avg:37.72ms +[2025-09-11 10:33:04] [Rank 0] step:241/10000 train_time:9091ms step_avg:37.72ms +[2025-09-11 10:33:05] [Rank 0] step:261/10000 train_time:9814ms step_avg:37.60ms +[2025-09-11 10:33:05] [Rank 0] step:261/10000 train_time:9814ms step_avg:37.60ms +[2025-09-11 10:33:05] [Rank 0] step:281/10000 train_time:10537ms step_avg:37.50ms +[2025-09-11 10:33:05] [Rank 0] step:281/10000 train_time:10537ms step_avg:37.50ms +[2025-09-11 10:33:06] [Rank 0] step:301/10000 train_time:11260ms step_avg:37.41ms +[2025-09-11 10:33:06] [Rank 0] step:301/10000 train_time:11260ms step_avg:37.41ms +[2025-09-11 10:33:07] [Rank 0] step:321/10000 train_time:11984ms step_avg:37.33ms +[2025-09-11 10:33:07] [Rank 0] step:321/10000 train_time:11984ms step_avg:37.33ms +[2025-09-11 10:33:08] [Rank 0] step:341/10000 train_time:12707ms step_avg:37.26ms +[2025-09-11 10:33:08] [Rank 0] step:341/10000 train_time:12707ms step_avg:37.26ms +[2025-09-11 10:33:08] [Rank 0] step:361/10000 train_time:13430ms step_avg:37.20ms +[2025-09-11 10:33:08] [Rank 0] step:361/10000 train_time:13430ms step_avg:37.20ms +[2025-09-11 10:33:09] [Rank 0] step:381/10000 train_time:14154ms step_avg:37.15ms +[2025-09-11 10:33:09] [Rank 0] step:381/10000 train_time:14154ms step_avg:37.15ms +[2025-09-11 10:33:10] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:33:10] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:33:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:33:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:33:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:33:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:33:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:33:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:33:56] [Rank 0] PRINT: step:400/10000 val_loss:6.3862 total_sharp:5.1046e-03 L1_sharp:1.1164e-02 L2_sharp:2.3592e-03 L3_sharp:1.5215e-03 L4_sharp:1.3020e-03 L5_sharp:5.5689e-04 L6_sharp:5.0054e-04 L7_sharp:6.3749e-04 L8_sharp:6.2093e-04 L9_sharp:7.3377e-04 L10_sharp:5.8006e-04 L11_sharp:7.5010e-04 L12_sharp:2.0241e-03 total_fnorm:2.7777e+01 total_l1_linf:1.0724e+05 total_spectral:1.3889e+01 L1_fnorm:6.1777e+00 L2_fnorm:6.0299e+00 L3_fnorm:5.9229e+00 L4_fnorm:5.6774e+00 L5_fnorm:5.6186e+00 L6_fnorm:5.4779e+00 L7_fnorm:5.2810e+00 L8_fnorm:5.1327e+00 L9_fnorm:5.0474e+00 L10_fnorm:4.9585e+00 L11_fnorm:4.7724e+00 L12_fnorm:4.5458e+00 L1_l1linf:1.9876e+00 L2_l1linf:1.8272e+00 L3_l1linf:1.8301e+00 L4_l1linf:1.8155e+00 L5_l1linf:1.8497e+00 L6_l1linf:1.8212e+00 L7_l1linf:1.7192e+00 L8_l1linf:1.6806e+00 L9_l1linf:1.6083e+00 L10_l1linf:1.5024e+00 L11_l1linf:1.4380e+00 L12_l1linf:1.2800e+00 L1_spectral:6.0401e-02 L2_spectral:6.0393e-02 L3_spectral:6.0353e-02 L4_spectral:6.0363e-02 L5_spectral:6.0304e-02 L6_spectral:6.0300e-02 L7_spectral:6.0246e-02 L8_spectral:6.0253e-02 L9_spectral:6.0254e-02 L10_spectral:6.0230e-02 L11_spectral:6.0237e-02 L12_spectral:6.0245e-02 train_time:14857ms step_avg:37.14ms +[2025-09-11 10:33:56] [Rank 0] PRINT: step:400/10000 val_loss:6.3862 total_sharp:5.1046e-03 L1_sharp:1.1164e-02 L2_sharp:2.3592e-03 L3_sharp:1.5215e-03 L4_sharp:1.3020e-03 L5_sharp:5.5689e-04 L6_sharp:5.0054e-04 L7_sharp:6.3749e-04 L8_sharp:6.2093e-04 L9_sharp:7.3377e-04 L10_sharp:5.8006e-04 L11_sharp:7.5010e-04 L12_sharp:2.0241e-03 total_fnorm:2.7777e+01 total_l1_linf:1.0724e+05 total_spectral:1.3889e+01 L1_fnorm:6.1777e+00 L2_fnorm:6.0299e+00 L3_fnorm:5.9229e+00 L4_fnorm:5.6774e+00 L5_fnorm:5.6186e+00 L6_fnorm:5.4779e+00 L7_fnorm:5.2810e+00 L8_fnorm:5.1327e+00 L9_fnorm:5.0474e+00 L10_fnorm:4.9585e+00 L11_fnorm:4.7724e+00 L12_fnorm:4.5458e+00 L1_l1linf:1.9876e+00 L2_l1linf:1.8272e+00 L3_l1linf:1.8301e+00 L4_l1linf:1.8155e+00 L5_l1linf:1.8497e+00 L6_l1linf:1.8212e+00 L7_l1linf:1.7192e+00 L8_l1linf:1.6806e+00 L9_l1linf:1.6083e+00 L10_l1linf:1.5024e+00 L11_l1linf:1.4380e+00 L12_l1linf:1.2800e+00 L1_spectral:6.0401e-02 L2_spectral:6.0393e-02 L3_spectral:6.0353e-02 L4_spectral:6.0363e-02 L5_spectral:6.0304e-02 L6_spectral:6.0300e-02 L7_spectral:6.0246e-02 L8_spectral:6.0253e-02 L9_spectral:6.0254e-02 L10_spectral:6.0230e-02 L11_spectral:6.0237e-02 L12_spectral:6.0245e-02 train_time:14857ms step_avg:37.14ms +[2025-09-11 10:34:26] [Rank 0] step:401/10000 train_time:44740ms step_avg:111.57ms +[2025-09-11 10:34:26] [Rank 0] step:401/10000 train_time:44740ms step_avg:111.57ms +[2025-09-11 10:34:28] [Rank 0] step:421/10000 train_time:46677ms step_avg:110.87ms +[2025-09-11 10:34:28] [Rank 0] step:421/10000 train_time:46677ms step_avg:110.87ms +[2025-09-11 10:34:28] [Rank 0] step:441/10000 train_time:47315ms step_avg:107.29ms +[2025-09-11 10:34:28] [Rank 0] step:441/10000 train_time:47315ms step_avg:107.29ms +[2025-09-11 10:34:29] [Rank 0] step:461/10000 train_time:47952ms step_avg:104.02ms +[2025-09-11 10:34:29] [Rank 0] step:461/10000 train_time:47952ms step_avg:104.02ms +[2025-09-11 10:34:30] [Rank 0] step:481/10000 train_time:48589ms step_avg:101.02ms +[2025-09-11 10:34:30] [Rank 0] step:481/10000 train_time:48589ms step_avg:101.02ms +[2025-09-11 10:34:30] [Rank 0] step:501/10000 train_time:49226ms step_avg:98.25ms +[2025-09-11 10:34:30] [Rank 0] step:501/10000 train_time:49226ms step_avg:98.25ms +[2025-09-11 10:34:31] [Rank 0] step:521/10000 train_time:49862ms step_avg:95.70ms +[2025-09-11 10:34:31] [Rank 0] step:521/10000 train_time:49862ms step_avg:95.70ms +[2025-09-11 10:34:32] [Rank 0] step:541/10000 train_time:50499ms step_avg:93.34ms +[2025-09-11 10:34:32] [Rank 0] step:541/10000 train_time:50499ms step_avg:93.34ms +[2025-09-11 10:34:32] [Rank 0] step:561/10000 train_time:51135ms step_avg:91.15ms +[2025-09-11 10:34:32] [Rank 0] step:561/10000 train_time:51135ms step_avg:91.15ms +[2025-09-11 10:34:33] [Rank 0] step:581/10000 train_time:51771ms step_avg:89.11ms +[2025-09-11 10:34:33] [Rank 0] step:581/10000 train_time:51771ms step_avg:89.11ms +[2025-09-11 10:34:34] [Rank 0] step:601/10000 train_time:52408ms step_avg:87.20ms +[2025-09-11 10:34:34] [Rank 0] step:601/10000 train_time:52408ms step_avg:87.20ms +[2025-09-11 10:34:34] [Rank 0] step:621/10000 train_time:53044ms step_avg:85.42ms +[2025-09-11 10:34:34] [Rank 0] step:621/10000 train_time:53044ms step_avg:85.42ms +[2025-09-11 10:34:35] [Rank 0] step:641/10000 train_time:53679ms step_avg:83.74ms +[2025-09-11 10:34:35] [Rank 0] step:641/10000 train_time:53679ms step_avg:83.74ms +[2025-09-11 10:34:35] [Rank 0] step:661/10000 train_time:54315ms step_avg:82.17ms +[2025-09-11 10:34:35] [Rank 0] step:661/10000 train_time:54315ms step_avg:82.17ms +[2025-09-11 10:34:36] [Rank 0] step:681/10000 train_time:54951ms step_avg:80.69ms +[2025-09-11 10:34:36] [Rank 0] step:681/10000 train_time:54951ms step_avg:80.69ms +[2025-09-11 10:34:37] [Rank 0] step:701/10000 train_time:55587ms step_avg:79.30ms +[2025-09-11 10:34:37] [Rank 0] step:701/10000 train_time:55587ms step_avg:79.30ms +[2025-09-11 10:34:37] [Rank 0] step:721/10000 train_time:56223ms step_avg:77.98ms +[2025-09-11 10:34:37] [Rank 0] step:721/10000 train_time:56223ms step_avg:77.98ms +[2025-09-11 10:34:38] [Rank 0] step:741/10000 train_time:56858ms step_avg:76.73ms +[2025-09-11 10:34:38] [Rank 0] step:741/10000 train_time:56858ms step_avg:76.73ms +[2025-09-11 10:34:39] [Rank 0] step:761/10000 train_time:57499ms step_avg:75.56ms +[2025-09-11 10:34:39] [Rank 0] step:761/10000 train_time:57499ms step_avg:75.56ms +[2025-09-11 10:34:40] [Rank 0] step:781/10000 train_time:58463ms step_avg:74.86ms +[2025-09-11 10:34:40] [Rank 0] step:781/10000 train_time:58463ms step_avg:74.86ms +[2025-09-11 10:34:40] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:34:40] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:35:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:35:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:35:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:35:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:24] [Rank 0] PRINT: step:800/10000 val_loss:5.9424 total_sharp:3.2349e-03 L1_sharp:3.8157e-03 L2_sharp:1.3382e-03 L3_sharp:4.2448e-04 L4_sharp:4.6281e-04 L5_sharp:1.8145e-04 L6_sharp:2.9966e-04 L7_sharp:4.0409e-04 L8_sharp:5.1686e-04 L9_sharp:4.2868e-04 L10_sharp:7.4517e-04 L11_sharp:7.7452e-04 L12_sharp:2.7103e-03 total_fnorm:2.7875e+01 total_l1_linf:9.0624e+04 total_spectral:1.3938e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0938e+00 L4_fnorm:5.9688e+00 L5_fnorm:5.9062e+00 L6_fnorm:5.8438e+00 L7_fnorm:5.7188e+00 L8_fnorm:5.5000e+00 L9_fnorm:5.5000e+00 L10_fnorm:5.3750e+00 L11_fnorm:5.1250e+00 L12_fnorm:4.7188e+00 L1_l1linf:1.9453e+00 L2_l1linf:1.7734e+00 L3_l1linf:1.8203e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8594e+00 L6_l1linf:1.8750e+00 L7_l1linf:1.7969e+00 L8_l1linf:1.7812e+00 L9_l1linf:1.7266e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.4297e+00 L12_l1linf:1.1094e+00 L1_spectral:6.7209e-02 L2_spectral:6.6511e-02 L3_spectral:6.6397e-02 L4_spectral:6.6086e-02 L5_spectral:6.6176e-02 L6_spectral:6.5922e-02 L7_spectral:6.5634e-02 L8_spectral:6.5544e-02 L9_spectral:6.5573e-02 L10_spectral:6.5742e-02 L11_spectral:6.5563e-02 L12_spectral:6.5414e-02 train_time:59250ms step_avg:74.06ms +[2025-09-11 10:35:24] [Rank 0] PRINT: step:800/10000 val_loss:5.9424 total_sharp:3.2349e-03 L1_sharp:3.8157e-03 L2_sharp:1.3382e-03 L3_sharp:4.2448e-04 L4_sharp:4.6281e-04 L5_sharp:1.8145e-04 L6_sharp:2.9966e-04 L7_sharp:4.0409e-04 L8_sharp:5.1686e-04 L9_sharp:4.2868e-04 L10_sharp:7.4517e-04 L11_sharp:7.7452e-04 L12_sharp:2.7103e-03 total_fnorm:2.7875e+01 total_l1_linf:9.0624e+04 total_spectral:1.3938e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0938e+00 L4_fnorm:5.9688e+00 L5_fnorm:5.9062e+00 L6_fnorm:5.8438e+00 L7_fnorm:5.7188e+00 L8_fnorm:5.5000e+00 L9_fnorm:5.5000e+00 L10_fnorm:5.3750e+00 L11_fnorm:5.1250e+00 L12_fnorm:4.7188e+00 L1_l1linf:1.9453e+00 L2_l1linf:1.7734e+00 L3_l1linf:1.8203e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8594e+00 L6_l1linf:1.8750e+00 L7_l1linf:1.7969e+00 L8_l1linf:1.7812e+00 L9_l1linf:1.7266e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.4297e+00 L12_l1linf:1.1094e+00 L1_spectral:6.7209e-02 L2_spectral:6.6511e-02 L3_spectral:6.6397e-02 L4_spectral:6.6086e-02 L5_spectral:6.6176e-02 L6_spectral:6.5922e-02 L7_spectral:6.5634e-02 L8_spectral:6.5544e-02 L9_spectral:6.5573e-02 L10_spectral:6.5742e-02 L11_spectral:6.5563e-02 L12_spectral:6.5414e-02 train_time:59250ms step_avg:74.06ms +[2025-09-11 10:35:25] [Rank 0] step:801/10000 train_time:60403ms step_avg:75.41ms +[2025-09-11 10:35:25] [Rank 0] step:801/10000 train_time:60403ms step_avg:75.41ms +[2025-09-11 10:35:26] [Rank 0] step:821/10000 train_time:61073ms step_avg:74.39ms +[2025-09-11 10:35:26] [Rank 0] step:821/10000 train_time:61073ms step_avg:74.39ms +[2025-09-11 10:35:26] [Rank 0] step:841/10000 train_time:61715ms step_avg:73.38ms +[2025-09-11 10:35:26] [Rank 0] step:841/10000 train_time:61715ms step_avg:73.38ms +[2025-09-11 10:35:27] [Rank 0] step:861/10000 train_time:62357ms step_avg:72.42ms +[2025-09-11 10:35:27] [Rank 0] step:861/10000 train_time:62357ms step_avg:72.42ms +[2025-09-11 10:35:28] [Rank 0] step:881/10000 train_time:62998ms step_avg:71.51ms +[2025-09-11 10:35:28] [Rank 0] step:881/10000 train_time:62998ms step_avg:71.51ms +[2025-09-11 10:35:28] [Rank 0] step:901/10000 train_time:63640ms step_avg:70.63ms +[2025-09-11 10:35:28] [Rank 0] step:901/10000 train_time:63640ms step_avg:70.63ms +[2025-09-11 10:35:29] [Rank 0] step:921/10000 train_time:64281ms step_avg:69.79ms +[2025-09-11 10:35:29] [Rank 0] step:921/10000 train_time:64281ms step_avg:69.79ms +[2025-09-11 10:35:30] [Rank 0] step:941/10000 train_time:64922ms step_avg:68.99ms +[2025-09-11 10:35:30] [Rank 0] step:941/10000 train_time:64922ms step_avg:68.99ms +[2025-09-11 10:35:30] [Rank 0] step:961/10000 train_time:65562ms step_avg:68.22ms +[2025-09-11 10:35:30] [Rank 0] step:961/10000 train_time:65562ms step_avg:68.22ms +[2025-09-11 10:35:31] [Rank 0] step:981/10000 train_time:66203ms step_avg:67.49ms +[2025-09-11 10:35:31] [Rank 0] step:981/10000 train_time:66203ms step_avg:67.49ms +[2025-09-11 10:35:31] [Rank 0] step:1001/10000 train_time:66844ms step_avg:66.78ms +[2025-09-11 10:35:31] [Rank 0] step:1001/10000 train_time:66844ms step_avg:66.78ms +[2025-09-11 10:35:32] [Rank 0] step:1021/10000 train_time:67484ms step_avg:66.10ms +[2025-09-11 10:35:32] [Rank 0] step:1021/10000 train_time:67484ms step_avg:66.10ms +[2025-09-11 10:35:33] [Rank 0] step:1041/10000 train_time:68125ms step_avg:65.44ms +[2025-09-11 10:35:33] [Rank 0] step:1041/10000 train_time:68125ms step_avg:65.44ms +[2025-09-11 10:35:33] [Rank 0] step:1061/10000 train_time:68765ms step_avg:64.81ms +[2025-09-11 10:35:33] [Rank 0] step:1061/10000 train_time:68765ms step_avg:64.81ms +[2025-09-11 10:35:34] [Rank 0] step:1081/10000 train_time:69405ms step_avg:64.20ms +[2025-09-11 10:35:34] [Rank 0] step:1081/10000 train_time:69405ms step_avg:64.20ms +[2025-09-11 10:35:35] [Rank 0] step:1101/10000 train_time:70045ms step_avg:63.62ms +[2025-09-11 10:35:35] [Rank 0] step:1101/10000 train_time:70045ms step_avg:63.62ms +[2025-09-11 10:35:35] [Rank 0] step:1121/10000 train_time:70686ms step_avg:63.06ms +[2025-09-11 10:35:35] [Rank 0] step:1121/10000 train_time:70686ms step_avg:63.06ms +[2025-09-11 10:35:36] [Rank 0] step:1141/10000 train_time:71327ms step_avg:62.51ms +[2025-09-11 10:35:36] [Rank 0] step:1141/10000 train_time:71327ms step_avg:62.51ms +[2025-09-11 10:35:37] [Rank 0] step:1161/10000 train_time:71967ms step_avg:61.99ms +[2025-09-11 10:35:37] [Rank 0] step:1161/10000 train_time:71967ms step_avg:61.99ms +[2025-09-11 10:35:37] [Rank 0] step:1181/10000 train_time:72607ms step_avg:61.48ms +[2025-09-11 10:35:37] [Rank 0] step:1181/10000 train_time:72607ms step_avg:61.48ms +[2025-09-11 10:35:38] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:35:38] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:35:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:35:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:48] [Rank 0] PRINT: step:1200/10000 val_loss:5.5927 total_sharp:2.1708e-03 L1_sharp:1.9620e-03 L2_sharp:7.4105e-04 L3_sharp:4.3746e-04 L4_sharp:3.2671e-04 L5_sharp:2.0387e-04 L6_sharp:2.1722e-04 L7_sharp:2.4198e-04 L8_sharp:4.3939e-04 L9_sharp:3.2281e-04 L10_sharp:3.7703e-04 L11_sharp:4.8177e-04 L12_sharp:3.3052e-03 total_fnorm:2.8250e+01 total_l1_linf:9.0624e+04 total_spectral:1.4062e+01 L1_fnorm:6.3125e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.7812e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.9688e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.5000e+00 L1_l1linf:1.8125e+00 L2_l1linf:1.7344e+00 L3_l1linf:1.7344e+00 L4_l1linf:1.7344e+00 L5_l1linf:1.7422e+00 L6_l1linf:1.7734e+00 L7_l1linf:1.7422e+00 L8_l1linf:1.7578e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7344e+00 L11_l1linf:1.6875e+00 L12_l1linf:1.3672e+00 L1_spectral:6.9910e-02 L2_spectral:6.8873e-02 L3_spectral:6.9351e-02 L4_spectral:6.8674e-02 L5_spectral:6.8996e-02 L6_spectral:6.8713e-02 L7_spectral:6.8191e-02 L8_spectral:6.7659e-02 L9_spectral:6.7991e-02 L10_spectral:6.8155e-02 L11_spectral:6.7762e-02 L12_spectral:6.8166e-02 train_time:73230ms step_avg:61.02ms +[2025-09-11 10:35:48] [Rank 0] PRINT: step:1200/10000 val_loss:5.5927 total_sharp:2.1708e-03 L1_sharp:1.9620e-03 L2_sharp:7.4105e-04 L3_sharp:4.3746e-04 L4_sharp:3.2671e-04 L5_sharp:2.0387e-04 L6_sharp:2.1722e-04 L7_sharp:2.4198e-04 L8_sharp:4.3939e-04 L9_sharp:3.2281e-04 L10_sharp:3.7703e-04 L11_sharp:4.8177e-04 L12_sharp:3.3052e-03 total_fnorm:2.8250e+01 total_l1_linf:9.0624e+04 total_spectral:1.4062e+01 L1_fnorm:6.3125e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.7812e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.9688e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.5000e+00 L1_l1linf:1.8125e+00 L2_l1linf:1.7344e+00 L3_l1linf:1.7344e+00 L4_l1linf:1.7344e+00 L5_l1linf:1.7422e+00 L6_l1linf:1.7734e+00 L7_l1linf:1.7422e+00 L8_l1linf:1.7578e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7344e+00 L11_l1linf:1.6875e+00 L12_l1linf:1.3672e+00 L1_spectral:6.9910e-02 L2_spectral:6.8873e-02 L3_spectral:6.9351e-02 L4_spectral:6.8674e-02 L5_spectral:6.8996e-02 L6_spectral:6.8713e-02 L7_spectral:6.8191e-02 L8_spectral:6.7659e-02 L9_spectral:6.7991e-02 L10_spectral:6.8155e-02 L11_spectral:6.7762e-02 L12_spectral:6.8166e-02 train_time:73230ms step_avg:61.02ms +[2025-09-11 10:35:49] [Rank 0] step:1201/10000 train_time:74430ms step_avg:61.97ms +[2025-09-11 10:35:49] [Rank 0] step:1201/10000 train_time:74430ms step_avg:61.97ms +[2025-09-11 10:35:49] [Rank 0] step:1221/10000 train_time:75075ms step_avg:61.49ms +[2025-09-11 10:35:49] [Rank 0] step:1221/10000 train_time:75075ms step_avg:61.49ms +[2025-09-11 10:35:50] [Rank 0] step:1241/10000 train_time:75718ms step_avg:61.01ms +[2025-09-11 10:35:50] [Rank 0] step:1241/10000 train_time:75718ms step_avg:61.01ms +[2025-09-11 10:35:51] [Rank 0] step:1261/10000 train_time:76360ms step_avg:60.56ms +[2025-09-11 10:35:51] [Rank 0] step:1261/10000 train_time:76360ms step_avg:60.56ms +[2025-09-11 10:35:51] [Rank 0] step:1281/10000 train_time:77002ms step_avg:60.11ms +[2025-09-11 10:35:51] [Rank 0] step:1281/10000 train_time:77002ms step_avg:60.11ms +[2025-09-11 10:35:52] [Rank 0] step:1301/10000 train_time:77645ms step_avg:59.68ms +[2025-09-11 10:35:52] [Rank 0] step:1301/10000 train_time:77645ms step_avg:59.68ms +[2025-09-11 10:35:53] [Rank 0] step:1321/10000 train_time:78286ms step_avg:59.26ms +[2025-09-11 10:35:53] [Rank 0] step:1321/10000 train_time:78286ms step_avg:59.26ms +[2025-09-11 10:35:53] [Rank 0] step:1341/10000 train_time:78929ms step_avg:58.86ms +[2025-09-11 10:35:53] [Rank 0] step:1341/10000 train_time:78929ms step_avg:58.86ms +[2025-09-11 10:35:54] [Rank 0] step:1361/10000 train_time:79570ms step_avg:58.46ms +[2025-09-11 10:35:54] [Rank 0] step:1361/10000 train_time:79570ms step_avg:58.46ms +[2025-09-11 10:35:55] [Rank 0] step:1381/10000 train_time:80211ms step_avg:58.08ms +[2025-09-11 10:35:55] [Rank 0] step:1381/10000 train_time:80211ms step_avg:58.08ms +[2025-09-11 10:35:55] [Rank 0] step:1401/10000 train_time:80852ms step_avg:57.71ms +[2025-09-11 10:35:55] [Rank 0] step:1401/10000 train_time:80852ms step_avg:57.71ms +[2025-09-11 10:35:56] [Rank 0] step:1421/10000 train_time:81493ms step_avg:57.35ms +[2025-09-11 10:35:56] [Rank 0] step:1421/10000 train_time:81493ms step_avg:57.35ms +[2025-09-11 10:35:57] [Rank 0] step:1441/10000 train_time:82135ms step_avg:57.00ms +[2025-09-11 10:35:57] [Rank 0] step:1441/10000 train_time:82135ms step_avg:57.00ms +[2025-09-11 10:35:57] [Rank 0] step:1461/10000 train_time:82776ms step_avg:56.66ms +[2025-09-11 10:35:57] [Rank 0] step:1461/10000 train_time:82776ms step_avg:56.66ms +[2025-09-11 10:35:58] [Rank 0] step:1481/10000 train_time:83417ms step_avg:56.32ms +[2025-09-11 10:35:58] [Rank 0] step:1481/10000 train_time:83417ms step_avg:56.32ms +[2025-09-11 10:35:58] [Rank 0] step:1501/10000 train_time:84062ms step_avg:56.00ms +[2025-09-11 10:35:58] [Rank 0] step:1501/10000 train_time:84062ms step_avg:56.00ms +[2025-09-11 10:35:59] [Rank 0] step:1521/10000 train_time:84707ms step_avg:55.69ms +[2025-09-11 10:35:59] [Rank 0] step:1521/10000 train_time:84707ms step_avg:55.69ms +[2025-09-11 10:36:00] [Rank 0] step:1541/10000 train_time:85353ms step_avg:55.39ms +[2025-09-11 10:36:00] [Rank 0] step:1541/10000 train_time:85353ms step_avg:55.39ms +[2025-09-11 10:36:00] [Rank 0] step:1561/10000 train_time:85998ms step_avg:55.09ms +[2025-09-11 10:36:00] [Rank 0] step:1561/10000 train_time:85998ms step_avg:55.09ms +[2025-09-11 10:36:01] [Rank 0] step:1581/10000 train_time:86643ms step_avg:54.80ms +[2025-09-11 10:36:01] [Rank 0] step:1581/10000 train_time:86643ms step_avg:54.80ms +[2025-09-11 10:36:02] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:36:02] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:36:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:36:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:36:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:36:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:36:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:36:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:11] [Rank 0] PRINT: step:1600/10000 val_loss:5.4042 total_sharp:1.7114e-03 L1_sharp:1.5004e-03 L2_sharp:4.9265e-04 L3_sharp:2.3720e-04 L4_sharp:2.1253e-04 L5_sharp:1.8451e-04 L6_sharp:2.0463e-04 L7_sharp:1.7359e-04 L8_sharp:4.3457e-04 L9_sharp:2.6422e-04 L10_sharp:3.3424e-04 L11_sharp:4.5130e-04 L12_sharp:1.8180e-03 total_fnorm:2.7625e+01 total_l1_linf:8.6016e+04 total_spectral:1.3750e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0625e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.7734e+00 L2_l1linf:1.6875e+00 L3_l1linf:1.6562e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6875e+00 L6_l1linf:1.7031e+00 L7_l1linf:1.6719e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6719e+00 L10_l1linf:1.6797e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.4062e+00 L1_spectral:7.2674e-02 L2_spectral:7.0796e-02 L3_spectral:7.1452e-02 L4_spectral:7.0963e-02 L5_spectral:7.1507e-02 L6_spectral:7.1264e-02 L7_spectral:7.0835e-02 L8_spectral:7.0530e-02 L9_spectral:6.9984e-02 L10_spectral:7.0103e-02 L11_spectral:7.0374e-02 L12_spectral:6.9784e-02 train_time:87271ms step_avg:54.54ms +[2025-09-11 10:36:11] [Rank 0] PRINT: step:1600/10000 val_loss:5.4042 total_sharp:1.7114e-03 L1_sharp:1.5004e-03 L2_sharp:4.9265e-04 L3_sharp:2.3720e-04 L4_sharp:2.1253e-04 L5_sharp:1.8451e-04 L6_sharp:2.0463e-04 L7_sharp:1.7359e-04 L8_sharp:4.3457e-04 L9_sharp:2.6422e-04 L10_sharp:3.3424e-04 L11_sharp:4.5130e-04 L12_sharp:1.8180e-03 total_fnorm:2.7625e+01 total_l1_linf:8.6016e+04 total_spectral:1.3750e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0625e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.7734e+00 L2_l1linf:1.6875e+00 L3_l1linf:1.6562e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6875e+00 L6_l1linf:1.7031e+00 L7_l1linf:1.6719e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6719e+00 L10_l1linf:1.6797e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.4062e+00 L1_spectral:7.2674e-02 L2_spectral:7.0796e-02 L3_spectral:7.1452e-02 L4_spectral:7.0963e-02 L5_spectral:7.1507e-02 L6_spectral:7.1264e-02 L7_spectral:7.0835e-02 L8_spectral:7.0530e-02 L9_spectral:6.9984e-02 L10_spectral:7.0103e-02 L11_spectral:7.0374e-02 L12_spectral:6.9784e-02 train_time:87271ms step_avg:54.54ms +[2025-09-11 10:36:13] [Rank 0] step:1601/10000 train_time:88442ms step_avg:55.24ms +[2025-09-11 10:36:13] [Rank 0] step:1601/10000 train_time:88442ms step_avg:55.24ms +[2025-09-11 10:36:13] [Rank 0] step:1621/10000 train_time:89115ms step_avg:54.98ms +[2025-09-11 10:36:13] [Rank 0] step:1621/10000 train_time:89115ms step_avg:54.98ms +[2025-09-11 10:36:14] [Rank 0] step:1641/10000 train_time:89761ms step_avg:54.70ms +[2025-09-11 10:36:14] [Rank 0] step:1641/10000 train_time:89761ms step_avg:54.70ms +[2025-09-11 10:36:14] [Rank 0] step:1661/10000 train_time:90407ms step_avg:54.43ms +[2025-09-11 10:36:14] [Rank 0] step:1661/10000 train_time:90407ms step_avg:54.43ms +[2025-09-11 10:36:15] [Rank 0] step:1681/10000 train_time:91054ms step_avg:54.17ms +[2025-09-11 10:36:15] [Rank 0] step:1681/10000 train_time:91054ms step_avg:54.17ms +[2025-09-11 10:36:16] [Rank 0] step:1701/10000 train_time:91699ms step_avg:53.91ms +[2025-09-11 10:36:16] [Rank 0] step:1701/10000 train_time:91699ms step_avg:53.91ms +[2025-09-11 10:36:16] [Rank 0] step:1721/10000 train_time:92344ms step_avg:53.66ms +[2025-09-11 10:36:16] [Rank 0] step:1721/10000 train_time:92344ms step_avg:53.66ms +[2025-09-11 10:36:17] [Rank 0] step:1741/10000 train_time:92990ms step_avg:53.41ms +[2025-09-11 10:36:17] [Rank 0] step:1741/10000 train_time:92990ms step_avg:53.41ms +[2025-09-11 10:36:18] [Rank 0] step:1761/10000 train_time:93636ms step_avg:53.17ms +[2025-09-11 10:36:18] [Rank 0] step:1761/10000 train_time:93636ms step_avg:53.17ms +[2025-09-11 10:36:18] [Rank 0] step:1781/10000 train_time:94281ms step_avg:52.94ms +[2025-09-11 10:36:18] [Rank 0] step:1781/10000 train_time:94281ms step_avg:52.94ms +[2025-09-11 10:36:19] [Rank 0] step:1801/10000 train_time:94925ms step_avg:52.71ms +[2025-09-11 10:36:19] [Rank 0] step:1801/10000 train_time:94925ms step_avg:52.71ms +[2025-09-11 10:36:20] [Rank 0] step:1821/10000 train_time:95570ms step_avg:52.48ms +[2025-09-11 10:36:20] [Rank 0] step:1821/10000 train_time:95570ms step_avg:52.48ms +[2025-09-11 10:36:20] [Rank 0] step:1841/10000 train_time:96215ms step_avg:52.26ms +[2025-09-11 10:36:20] [Rank 0] step:1841/10000 train_time:96215ms step_avg:52.26ms +[2025-09-11 10:36:21] [Rank 0] step:1861/10000 train_time:96860ms step_avg:52.05ms +[2025-09-11 10:36:21] [Rank 0] step:1861/10000 train_time:96860ms step_avg:52.05ms +[2025-09-11 10:36:22] [Rank 0] step:1881/10000 train_time:97505ms step_avg:51.84ms +[2025-09-11 10:36:22] [Rank 0] step:1881/10000 train_time:97505ms step_avg:51.84ms +[2025-09-11 10:36:22] [Rank 0] step:1901/10000 train_time:98150ms step_avg:51.63ms +[2025-09-11 10:36:22] [Rank 0] step:1901/10000 train_time:98150ms step_avg:51.63ms +[2025-09-11 10:36:23] [Rank 0] step:1921/10000 train_time:98795ms step_avg:51.43ms +[2025-09-11 10:36:23] [Rank 0] step:1921/10000 train_time:98795ms step_avg:51.43ms +[2025-09-11 10:36:24] [Rank 0] step:1941/10000 train_time:99440ms step_avg:51.23ms +[2025-09-11 10:36:24] [Rank 0] step:1941/10000 train_time:99440ms step_avg:51.23ms +[2025-09-11 10:36:24] [Rank 0] step:1961/10000 train_time:100084ms step_avg:51.04ms +[2025-09-11 10:36:24] [Rank 0] step:1961/10000 train_time:100084ms step_avg:51.04ms +[2025-09-11 10:36:25] [Rank 0] step:1981/10000 train_time:100730ms step_avg:50.85ms +[2025-09-11 10:36:25] [Rank 0] step:1981/10000 train_time:100730ms step_avg:50.85ms +[2025-09-11 10:36:25] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:36:25] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:36:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:36:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:35] [Rank 0] PRINT: step:2000/10000 val_loss:5.2575 total_sharp:1.6738e-03 L1_sharp:8.7920e-04 L2_sharp:2.9792e-04 L3_sharp:1.9269e-04 L4_sharp:2.1114e-04 L5_sharp:1.8260e-04 L6_sharp:1.4472e-04 L7_sharp:1.0956e-04 L8_sharp:3.3394e-04 L9_sharp:2.7640e-04 L10_sharp:3.4494e-04 L11_sharp:5.0313e-04 L12_sharp:4.5301e-03 total_fnorm:2.7375e+01 total_l1_linf:8.4992e+04 total_spectral:1.3625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1562e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.6719e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6250e+00 L5_l1linf:1.6172e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6484e+00 L9_l1linf:1.6328e+00 L10_l1linf:1.6484e+00 L11_l1linf:1.6719e+00 L12_l1linf:1.3594e+00 L1_spectral:7.4046e-02 L2_spectral:7.2302e-02 L3_spectral:7.2919e-02 L4_spectral:7.2454e-02 L5_spectral:7.3193e-02 L6_spectral:7.3033e-02 L7_spectral:7.2191e-02 L8_spectral:7.2139e-02 L9_spectral:7.2045e-02 L10_spectral:7.1885e-02 L11_spectral:7.2359e-02 L12_spectral:7.1739e-02 train_time:101357ms step_avg:50.68ms +[2025-09-11 10:36:35] [Rank 0] PRINT: step:2000/10000 val_loss:5.2575 total_sharp:1.6738e-03 L1_sharp:8.7920e-04 L2_sharp:2.9792e-04 L3_sharp:1.9269e-04 L4_sharp:2.1114e-04 L5_sharp:1.8260e-04 L6_sharp:1.4472e-04 L7_sharp:1.0956e-04 L8_sharp:3.3394e-04 L9_sharp:2.7640e-04 L10_sharp:3.4494e-04 L11_sharp:5.0313e-04 L12_sharp:4.5301e-03 total_fnorm:2.7375e+01 total_l1_linf:8.4992e+04 total_spectral:1.3625e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1562e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.6719e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6250e+00 L5_l1linf:1.6172e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6484e+00 L9_l1linf:1.6328e+00 L10_l1linf:1.6484e+00 L11_l1linf:1.6719e+00 L12_l1linf:1.3594e+00 L1_spectral:7.4046e-02 L2_spectral:7.2302e-02 L3_spectral:7.2919e-02 L4_spectral:7.2454e-02 L5_spectral:7.3193e-02 L6_spectral:7.3033e-02 L7_spectral:7.2191e-02 L8_spectral:7.2139e-02 L9_spectral:7.2045e-02 L10_spectral:7.1885e-02 L11_spectral:7.2359e-02 L12_spectral:7.1739e-02 train_time:101357ms step_avg:50.68ms +[2025-09-11 10:36:36] [Rank 0] step:2001/10000 train_time:102551ms step_avg:51.25ms +[2025-09-11 10:36:36] [Rank 0] step:2001/10000 train_time:102551ms step_avg:51.25ms +[2025-09-11 10:36:37] [Rank 0] step:2021/10000 train_time:103206ms step_avg:51.07ms +[2025-09-11 10:36:37] [Rank 0] step:2021/10000 train_time:103206ms step_avg:51.07ms +[2025-09-11 10:36:38] [Rank 0] step:2041/10000 train_time:103853ms step_avg:50.88ms +[2025-09-11 10:36:38] [Rank 0] step:2041/10000 train_time:103853ms step_avg:50.88ms +[2025-09-11 10:36:38] [Rank 0] step:2061/10000 train_time:104499ms step_avg:50.70ms +[2025-09-11 10:36:38] [Rank 0] step:2061/10000 train_time:104499ms step_avg:50.70ms +[2025-09-11 10:36:39] [Rank 0] step:2081/10000 train_time:105146ms step_avg:50.53ms +[2025-09-11 10:36:39] [Rank 0] step:2081/10000 train_time:105146ms step_avg:50.53ms +[2025-09-11 10:36:40] [Rank 0] step:2101/10000 train_time:105792ms step_avg:50.35ms +[2025-09-11 10:36:40] [Rank 0] step:2101/10000 train_time:105792ms step_avg:50.35ms +[2025-09-11 10:36:40] [Rank 0] step:2121/10000 train_time:106438ms step_avg:50.18ms +[2025-09-11 10:36:40] [Rank 0] step:2121/10000 train_time:106438ms step_avg:50.18ms +[2025-09-11 10:36:41] [Rank 0] step:2141/10000 train_time:107084ms step_avg:50.02ms +[2025-09-11 10:36:41] [Rank 0] step:2141/10000 train_time:107084ms step_avg:50.02ms +[2025-09-11 10:36:41] [Rank 0] step:2161/10000 train_time:107730ms step_avg:49.85ms +[2025-09-11 10:36:41] [Rank 0] step:2161/10000 train_time:107730ms step_avg:49.85ms +[2025-09-11 10:36:42] [Rank 0] step:2181/10000 train_time:108375ms step_avg:49.69ms +[2025-09-11 10:36:42] [Rank 0] step:2181/10000 train_time:108375ms step_avg:49.69ms +[2025-09-11 10:36:43] [Rank 0] step:2201/10000 train_time:109020ms step_avg:49.53ms +[2025-09-11 10:36:43] [Rank 0] step:2201/10000 train_time:109020ms step_avg:49.53ms +[2025-09-11 10:36:43] [Rank 0] step:2221/10000 train_time:109666ms step_avg:49.38ms +[2025-09-11 10:36:43] [Rank 0] step:2221/10000 train_time:109666ms step_avg:49.38ms +[2025-09-11 10:36:44] [Rank 0] step:2241/10000 train_time:110323ms step_avg:49.23ms +[2025-09-11 10:36:44] [Rank 0] step:2241/10000 train_time:110323ms step_avg:49.23ms +[2025-09-11 10:36:45] [Rank 0] step:2261/10000 train_time:110983ms step_avg:49.09ms +[2025-09-11 10:36:45] [Rank 0] step:2261/10000 train_time:110983ms step_avg:49.09ms +[2025-09-11 10:36:46] [Rank 0] step:2281/10000 train_time:111804ms step_avg:49.02ms +[2025-09-11 10:36:46] [Rank 0] step:2281/10000 train_time:111804ms step_avg:49.02ms +[2025-09-11 10:36:47] [Rank 0] step:2301/10000 train_time:112902ms step_avg:49.07ms +[2025-09-11 10:36:47] [Rank 0] step:2301/10000 train_time:112902ms step_avg:49.07ms +[2025-09-11 10:36:47] [Rank 0] step:2321/10000 train_time:113561ms step_avg:48.93ms +[2025-09-11 10:36:47] [Rank 0] step:2321/10000 train_time:113561ms step_avg:48.93ms +[2025-09-11 10:36:48] [Rank 0] step:2341/10000 train_time:114369ms step_avg:48.85ms +[2025-09-11 10:36:48] [Rank 0] step:2341/10000 train_time:114369ms step_avg:48.85ms +[2025-09-11 10:36:49] [Rank 0] step:2361/10000 train_time:115174ms step_avg:48.78ms +[2025-09-11 10:36:49] [Rank 0] step:2361/10000 train_time:115174ms step_avg:48.78ms +[2025-09-11 10:36:50] [Rank 0] step:2381/10000 train_time:115833ms step_avg:48.65ms +[2025-09-11 10:36:50] [Rank 0] step:2381/10000 train_time:115833ms step_avg:48.65ms +[2025-09-11 10:36:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:36:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:36:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:00] [Rank 0] PRINT: step:2400/10000 val_loss:5.1266 total_sharp:1.3948e-03 L1_sharp:9.3490e-04 L2_sharp:4.1778e-04 L3_sharp:1.1169e-04 L4_sharp:1.0329e-04 L5_sharp:9.7526e-05 L6_sharp:9.7912e-05 L7_sharp:1.0590e-04 L8_sharp:3.4189e-04 L9_sharp:2.4658e-04 L10_sharp:3.3748e-04 L11_sharp:4.1103e-04 L12_sharp:2.2237e-03 total_fnorm:2.6875e+01 total_l1_linf:8.0896e+04 total_spectral:1.3375e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9688e+00 L1_l1linf:1.7500e+00 L2_l1linf:1.6172e+00 L3_l1linf:1.6016e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.6094e+00 L9_l1linf:1.5938e+00 L10_l1linf:1.6172e+00 L11_l1linf:1.6484e+00 L12_l1linf:1.4531e+00 L1_spectral:7.5939e-02 L2_spectral:7.3492e-02 L3_spectral:7.4347e-02 L4_spectral:7.3683e-02 L5_spectral:7.4416e-02 L6_spectral:7.4483e-02 L7_spectral:7.4158e-02 L8_spectral:7.4149e-02 L9_spectral:7.3470e-02 L10_spectral:7.3349e-02 L11_spectral:7.3265e-02 L12_spectral:7.2703e-02 train_time:116473ms step_avg:48.53ms +[2025-09-11 10:37:00] [Rank 0] PRINT: step:2400/10000 val_loss:5.1266 total_sharp:1.3948e-03 L1_sharp:9.3490e-04 L2_sharp:4.1778e-04 L3_sharp:1.1169e-04 L4_sharp:1.0329e-04 L5_sharp:9.7526e-05 L6_sharp:9.7912e-05 L7_sharp:1.0590e-04 L8_sharp:3.4189e-04 L9_sharp:2.4658e-04 L10_sharp:3.3748e-04 L11_sharp:4.1103e-04 L12_sharp:2.2237e-03 total_fnorm:2.6875e+01 total_l1_linf:8.0896e+04 total_spectral:1.3375e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:5.9688e+00 L1_l1linf:1.7500e+00 L2_l1linf:1.6172e+00 L3_l1linf:1.6016e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.6094e+00 L9_l1linf:1.5938e+00 L10_l1linf:1.6172e+00 L11_l1linf:1.6484e+00 L12_l1linf:1.4531e+00 L1_spectral:7.5939e-02 L2_spectral:7.3492e-02 L3_spectral:7.4347e-02 L4_spectral:7.3683e-02 L5_spectral:7.4416e-02 L6_spectral:7.4483e-02 L7_spectral:7.4158e-02 L8_spectral:7.4149e-02 L9_spectral:7.3470e-02 L10_spectral:7.3349e-02 L11_spectral:7.3265e-02 L12_spectral:7.2703e-02 train_time:116473ms step_avg:48.53ms +[2025-09-11 10:37:01] [Rank 0] step:2401/10000 train_time:117666ms step_avg:49.01ms +[2025-09-11 10:37:01] [Rank 0] step:2401/10000 train_time:117666ms step_avg:49.01ms +[2025-09-11 10:37:02] [Rank 0] step:2421/10000 train_time:118340ms step_avg:48.88ms +[2025-09-11 10:37:02] [Rank 0] step:2421/10000 train_time:118340ms step_avg:48.88ms +[2025-09-11 10:37:03] [Rank 0] step:2441/10000 train_time:119000ms step_avg:48.75ms +[2025-09-11 10:37:03] [Rank 0] step:2441/10000 train_time:119000ms step_avg:48.75ms +[2025-09-11 10:37:03] [Rank 0] step:2461/10000 train_time:119660ms step_avg:48.62ms +[2025-09-11 10:37:03] [Rank 0] step:2461/10000 train_time:119660ms step_avg:48.62ms +[2025-09-11 10:37:04] [Rank 0] step:2481/10000 train_time:120320ms step_avg:48.50ms +[2025-09-11 10:37:04] [Rank 0] step:2481/10000 train_time:120320ms step_avg:48.50ms +[2025-09-11 10:37:05] [Rank 0] step:2501/10000 train_time:120979ms step_avg:48.37ms +[2025-09-11 10:37:05] [Rank 0] step:2501/10000 train_time:120979ms step_avg:48.37ms +[2025-09-11 10:37:05] [Rank 0] step:2521/10000 train_time:121638ms step_avg:48.25ms +[2025-09-11 10:37:05] [Rank 0] step:2521/10000 train_time:121638ms step_avg:48.25ms +[2025-09-11 10:37:06] [Rank 0] step:2541/10000 train_time:122298ms step_avg:48.13ms +[2025-09-11 10:37:06] [Rank 0] step:2541/10000 train_time:122298ms step_avg:48.13ms +[2025-09-11 10:37:07] [Rank 0] step:2561/10000 train_time:122957ms step_avg:48.01ms +[2025-09-11 10:37:07] [Rank 0] step:2561/10000 train_time:122957ms step_avg:48.01ms +[2025-09-11 10:37:07] [Rank 0] step:2581/10000 train_time:123617ms step_avg:47.89ms +[2025-09-11 10:37:07] [Rank 0] step:2581/10000 train_time:123617ms step_avg:47.89ms +[2025-09-11 10:37:08] [Rank 0] step:2601/10000 train_time:124276ms step_avg:47.78ms +[2025-09-11 10:37:08] [Rank 0] step:2601/10000 train_time:124276ms step_avg:47.78ms +[2025-09-11 10:37:09] [Rank 0] step:2621/10000 train_time:124935ms step_avg:47.67ms +[2025-09-11 10:37:09] [Rank 0] step:2621/10000 train_time:124935ms step_avg:47.67ms +[2025-09-11 10:37:09] [Rank 0] step:2641/10000 train_time:125594ms step_avg:47.56ms +[2025-09-11 10:37:09] [Rank 0] step:2641/10000 train_time:125594ms step_avg:47.56ms +[2025-09-11 10:37:10] [Rank 0] step:2661/10000 train_time:126253ms step_avg:47.45ms +[2025-09-11 10:37:10] [Rank 0] step:2661/10000 train_time:126253ms step_avg:47.45ms +[2025-09-11 10:37:10] [Rank 0] step:2681/10000 train_time:126913ms step_avg:47.34ms +[2025-09-11 10:37:10] [Rank 0] step:2681/10000 train_time:126913ms step_avg:47.34ms +[2025-09-11 10:37:11] [Rank 0] step:2701/10000 train_time:127572ms step_avg:47.23ms +[2025-09-11 10:37:11] [Rank 0] step:2701/10000 train_time:127572ms step_avg:47.23ms +[2025-09-11 10:37:12] [Rank 0] step:2721/10000 train_time:128231ms step_avg:47.13ms +[2025-09-11 10:37:12] [Rank 0] step:2721/10000 train_time:128231ms step_avg:47.13ms +[2025-09-11 10:37:12] [Rank 0] step:2741/10000 train_time:128890ms step_avg:47.02ms +[2025-09-11 10:37:12] [Rank 0] step:2741/10000 train_time:128890ms step_avg:47.02ms +[2025-09-11 10:37:13] [Rank 0] step:2761/10000 train_time:129549ms step_avg:46.92ms +[2025-09-11 10:37:13] [Rank 0] step:2761/10000 train_time:129549ms step_avg:46.92ms +[2025-09-11 10:37:14] [Rank 0] step:2781/10000 train_time:130208ms step_avg:46.82ms +[2025-09-11 10:37:14] [Rank 0] step:2781/10000 train_time:130208ms step_avg:46.82ms +[2025-09-11 10:37:14] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:37:14] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:37:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:37:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:37:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:24] [Rank 0] PRINT: step:2800/10000 val_loss:5.0552 total_sharp:1.3808e-03 L1_sharp:6.8299e-04 L2_sharp:3.1316e-04 L3_sharp:1.3556e-04 L4_sharp:1.5718e-04 L5_sharp:9.2487e-05 L6_sharp:1.1309e-04 L7_sharp:1.0420e-04 L8_sharp:2.9632e-04 L9_sharp:2.2285e-04 L10_sharp:2.7493e-04 L11_sharp:3.7160e-04 L12_sharp:1.4956e-03 total_fnorm:2.6250e+01 total_l1_linf:7.8336e+04 total_spectral:1.3125e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0000e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.4375e+00 L1_spectral:7.6743e-02 L2_spectral:7.4590e-02 L3_spectral:7.5251e-02 L4_spectral:7.4712e-02 L5_spectral:7.5244e-02 L6_spectral:7.5463e-02 L7_spectral:7.4730e-02 L8_spectral:7.4460e-02 L9_spectral:7.4735e-02 L10_spectral:7.4498e-02 L11_spectral:7.4377e-02 L12_spectral:7.3851e-02 train_time:130848ms step_avg:46.73ms +[2025-09-11 10:37:24] [Rank 0] PRINT: step:2800/10000 val_loss:5.0552 total_sharp:1.3808e-03 L1_sharp:6.8299e-04 L2_sharp:3.1316e-04 L3_sharp:1.3556e-04 L4_sharp:1.5718e-04 L5_sharp:9.2487e-05 L6_sharp:1.1309e-04 L7_sharp:1.0420e-04 L8_sharp:2.9632e-04 L9_sharp:2.2285e-04 L10_sharp:2.7493e-04 L11_sharp:3.7160e-04 L12_sharp:1.4956e-03 total_fnorm:2.6250e+01 total_l1_linf:7.8336e+04 total_spectral:1.3125e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0000e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.6016e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.4375e+00 L1_spectral:7.6743e-02 L2_spectral:7.4590e-02 L3_spectral:7.5251e-02 L4_spectral:7.4712e-02 L5_spectral:7.5244e-02 L6_spectral:7.5463e-02 L7_spectral:7.4730e-02 L8_spectral:7.4460e-02 L9_spectral:7.4735e-02 L10_spectral:7.4498e-02 L11_spectral:7.4377e-02 L12_spectral:7.3851e-02 train_time:130848ms step_avg:46.73ms +[2025-09-11 10:37:25] [Rank 0] step:2801/10000 train_time:132051ms step_avg:47.14ms +[2025-09-11 10:37:25] [Rank 0] step:2801/10000 train_time:132051ms step_avg:47.14ms +[2025-09-11 10:37:26] [Rank 0] step:2821/10000 train_time:132712ms step_avg:47.04ms +[2025-09-11 10:37:26] [Rank 0] step:2821/10000 train_time:132712ms step_avg:47.04ms +[2025-09-11 10:37:27] [Rank 0] step:2841/10000 train_time:133372ms step_avg:46.95ms +[2025-09-11 10:37:27] [Rank 0] step:2841/10000 train_time:133372ms step_avg:46.95ms +[2025-09-11 10:37:27] [Rank 0] step:2861/10000 train_time:134032ms step_avg:46.85ms +[2025-09-11 10:37:27] [Rank 0] step:2861/10000 train_time:134032ms step_avg:46.85ms +[2025-09-11 10:37:28] [Rank 0] step:2881/10000 train_time:134691ms step_avg:46.75ms +[2025-09-11 10:37:28] [Rank 0] step:2881/10000 train_time:134691ms step_avg:46.75ms +[2025-09-11 10:37:29] [Rank 0] step:2901/10000 train_time:135350ms step_avg:46.66ms +[2025-09-11 10:37:29] [Rank 0] step:2901/10000 train_time:135350ms step_avg:46.66ms +[2025-09-11 10:37:29] [Rank 0] step:2921/10000 train_time:136009ms step_avg:46.56ms +[2025-09-11 10:37:29] [Rank 0] step:2921/10000 train_time:136009ms step_avg:46.56ms +[2025-09-11 10:37:30] [Rank 0] step:2941/10000 train_time:136667ms step_avg:46.47ms +[2025-09-11 10:37:30] [Rank 0] step:2941/10000 train_time:136667ms step_avg:46.47ms +[2025-09-11 10:37:31] [Rank 0] step:2961/10000 train_time:137326ms step_avg:46.38ms +[2025-09-11 10:37:31] [Rank 0] step:2961/10000 train_time:137326ms step_avg:46.38ms +[2025-09-11 10:37:31] [Rank 0] step:2981/10000 train_time:137987ms step_avg:46.29ms +[2025-09-11 10:37:31] [Rank 0] step:2981/10000 train_time:137987ms step_avg:46.29ms +[2025-09-11 10:37:32] [Rank 0] step:3001/10000 train_time:138648ms step_avg:46.20ms +[2025-09-11 10:37:32] [Rank 0] step:3001/10000 train_time:138648ms step_avg:46.20ms +[2025-09-11 10:37:33] [Rank 0] step:3021/10000 train_time:139310ms step_avg:46.11ms +[2025-09-11 10:37:33] [Rank 0] step:3021/10000 train_time:139310ms step_avg:46.11ms +[2025-09-11 10:37:33] [Rank 0] step:3041/10000 train_time:139972ms step_avg:46.03ms +[2025-09-11 10:37:33] [Rank 0] step:3041/10000 train_time:139972ms step_avg:46.03ms +[2025-09-11 10:37:34] [Rank 0] step:3061/10000 train_time:140633ms step_avg:45.94ms +[2025-09-11 10:37:34] [Rank 0] step:3061/10000 train_time:140633ms step_avg:45.94ms +[2025-09-11 10:37:35] [Rank 0] step:3081/10000 train_time:141295ms step_avg:45.86ms +[2025-09-11 10:37:35] [Rank 0] step:3081/10000 train_time:141295ms step_avg:45.86ms +[2025-09-11 10:37:35] [Rank 0] step:3101/10000 train_time:141956ms step_avg:45.78ms +[2025-09-11 10:37:35] [Rank 0] step:3101/10000 train_time:141956ms step_avg:45.78ms +[2025-09-11 10:37:36] [Rank 0] step:3121/10000 train_time:142617ms step_avg:45.70ms +[2025-09-11 10:37:36] [Rank 0] step:3121/10000 train_time:142617ms step_avg:45.70ms +[2025-09-11 10:37:37] [Rank 0] step:3141/10000 train_time:143278ms step_avg:45.62ms +[2025-09-11 10:37:37] [Rank 0] step:3141/10000 train_time:143278ms step_avg:45.62ms +[2025-09-11 10:37:37] [Rank 0] step:3161/10000 train_time:143939ms step_avg:45.54ms +[2025-09-11 10:37:37] [Rank 0] step:3161/10000 train_time:143939ms step_avg:45.54ms +[2025-09-11 10:37:38] [Rank 0] step:3181/10000 train_time:144600ms step_avg:45.46ms +[2025-09-11 10:37:38] [Rank 0] step:3181/10000 train_time:144600ms step_avg:45.46ms +[2025-09-11 10:37:39] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:37:39] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:37:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:49] [Rank 0] PRINT: step:3200/10000 val_loss:4.9634 total_sharp:9.5355e-04 L1_sharp:5.4882e-04 L2_sharp:2.7171e-04 L3_sharp:1.4944e-04 L4_sharp:1.1547e-04 L5_sharp:1.1248e-04 L6_sharp:9.2334e-05 L7_sharp:8.6799e-05 L8_sharp:2.5948e-04 L9_sharp:1.9937e-04 L10_sharp:2.4956e-04 L11_sharp:3.2990e-04 L12_sharp:1.6270e-03 total_fnorm:2.7875e+01 total_l1_linf:8.1920e+04 total_spectral:1.3938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2812e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.4609e+00 L1_spectral:7.7331e-02 L2_spectral:7.5346e-02 L3_spectral:7.6155e-02 L4_spectral:7.6047e-02 L5_spectral:7.6823e-02 L6_spectral:7.6782e-02 L7_spectral:7.6335e-02 L8_spectral:7.6279e-02 L9_spectral:7.5840e-02 L10_spectral:7.6075e-02 L11_spectral:7.5574e-02 L12_spectral:7.5189e-02 train_time:145242ms step_avg:45.39ms +[2025-09-11 10:37:49] [Rank 0] PRINT: step:3200/10000 val_loss:4.9634 total_sharp:9.5355e-04 L1_sharp:5.4882e-04 L2_sharp:2.7171e-04 L3_sharp:1.4944e-04 L4_sharp:1.1547e-04 L5_sharp:1.1248e-04 L6_sharp:9.2334e-05 L7_sharp:8.6799e-05 L8_sharp:2.5948e-04 L9_sharp:1.9937e-04 L10_sharp:2.4956e-04 L11_sharp:3.2990e-04 L12_sharp:1.6270e-03 total_fnorm:2.7875e+01 total_l1_linf:8.1920e+04 total_spectral:1.3938e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.2812e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.4609e+00 L1_spectral:7.7331e-02 L2_spectral:7.5346e-02 L3_spectral:7.6155e-02 L4_spectral:7.6047e-02 L5_spectral:7.6823e-02 L6_spectral:7.6782e-02 L7_spectral:7.6335e-02 L8_spectral:7.6279e-02 L9_spectral:7.5840e-02 L10_spectral:7.6075e-02 L11_spectral:7.5574e-02 L12_spectral:7.5189e-02 train_time:145242ms step_avg:45.39ms +[2025-09-11 10:37:50] [Rank 0] step:3201/10000 train_time:146411ms step_avg:45.74ms +[2025-09-11 10:37:50] [Rank 0] step:3201/10000 train_time:146411ms step_avg:45.74ms +[2025-09-11 10:37:50] [Rank 0] step:3221/10000 train_time:147078ms step_avg:45.66ms +[2025-09-11 10:37:50] [Rank 0] step:3221/10000 train_time:147078ms step_avg:45.66ms +[2025-09-11 10:37:51] [Rank 0] step:3241/10000 train_time:147740ms step_avg:45.58ms +[2025-09-11 10:37:51] [Rank 0] step:3241/10000 train_time:147740ms step_avg:45.58ms +[2025-09-11 10:37:52] [Rank 0] step:3261/10000 train_time:148656ms step_avg:45.59ms +[2025-09-11 10:37:52] [Rank 0] step:3261/10000 train_time:148656ms step_avg:45.59ms +[2025-09-11 10:37:53] [Rank 0] step:3281/10000 train_time:149321ms step_avg:45.51ms +[2025-09-11 10:37:53] [Rank 0] step:3281/10000 train_time:149321ms step_avg:45.51ms +[2025-09-11 10:37:53] [Rank 0] step:3301/10000 train_time:149983ms step_avg:45.44ms +[2025-09-11 10:37:53] [Rank 0] step:3301/10000 train_time:149983ms step_avg:45.44ms +[2025-09-11 10:37:54] [Rank 0] step:3321/10000 train_time:150646ms step_avg:45.36ms +[2025-09-11 10:37:54] [Rank 0] step:3321/10000 train_time:150646ms step_avg:45.36ms +[2025-09-11 10:37:55] [Rank 0] step:3341/10000 train_time:151317ms step_avg:45.29ms +[2025-09-11 10:37:55] [Rank 0] step:3341/10000 train_time:151317ms step_avg:45.29ms +[2025-09-11 10:37:55] [Rank 0] step:3361/10000 train_time:151981ms step_avg:45.22ms +[2025-09-11 10:37:55] [Rank 0] step:3361/10000 train_time:151981ms step_avg:45.22ms +[2025-09-11 10:37:56] [Rank 0] step:3381/10000 train_time:152643ms step_avg:45.15ms +[2025-09-11 10:37:56] [Rank 0] step:3381/10000 train_time:152643ms step_avg:45.15ms +[2025-09-11 10:37:57] [Rank 0] step:3401/10000 train_time:153305ms step_avg:45.08ms +[2025-09-11 10:37:57] [Rank 0] step:3401/10000 train_time:153305ms step_avg:45.08ms +[2025-09-11 10:37:57] [Rank 0] step:3421/10000 train_time:153967ms step_avg:45.01ms +[2025-09-11 10:37:57] [Rank 0] step:3421/10000 train_time:153967ms step_avg:45.01ms +[2025-09-11 10:37:58] [Rank 0] step:3441/10000 train_time:154629ms step_avg:44.94ms +[2025-09-11 10:37:58] [Rank 0] step:3441/10000 train_time:154629ms step_avg:44.94ms +[2025-09-11 10:37:59] [Rank 0] step:3461/10000 train_time:155291ms step_avg:44.87ms +[2025-09-11 10:37:59] [Rank 0] step:3461/10000 train_time:155291ms step_avg:44.87ms +[2025-09-11 10:37:59] [Rank 0] step:3481/10000 train_time:155953ms step_avg:44.80ms +[2025-09-11 10:37:59] [Rank 0] step:3481/10000 train_time:155953ms step_avg:44.80ms +[2025-09-11 10:38:00] [Rank 0] step:3501/10000 train_time:156615ms step_avg:44.73ms +[2025-09-11 10:38:00] [Rank 0] step:3501/10000 train_time:156615ms step_avg:44.73ms +[2025-09-11 10:38:01] [Rank 0] step:3521/10000 train_time:157276ms step_avg:44.67ms +[2025-09-11 10:38:01] [Rank 0] step:3521/10000 train_time:157276ms step_avg:44.67ms +[2025-09-11 10:38:01] [Rank 0] step:3541/10000 train_time:157938ms step_avg:44.60ms +[2025-09-11 10:38:01] [Rank 0] step:3541/10000 train_time:157938ms step_avg:44.60ms +[2025-09-11 10:38:02] [Rank 0] step:3561/10000 train_time:158600ms step_avg:44.54ms +[2025-09-11 10:38:02] [Rank 0] step:3561/10000 train_time:158600ms step_avg:44.54ms +[2025-09-11 10:38:03] [Rank 0] step:3581/10000 train_time:159262ms step_avg:44.47ms +[2025-09-11 10:38:03] [Rank 0] step:3581/10000 train_time:159262ms step_avg:44.47ms +[2025-09-11 10:38:03] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:38:03] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:38:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:38:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:38:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:38:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:38:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:38:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.9108 total_sharp:8.9769e-04 L1_sharp:4.9918e-04 L2_sharp:1.5198e-04 L3_sharp:9.1829e-05 L4_sharp:1.3470e-04 L5_sharp:8.4314e-05 L6_sharp:1.0902e-04 L7_sharp:7.3753e-05 L8_sharp:2.4351e-04 L9_sharp:2.1542e-04 L10_sharp:2.4271e-04 L11_sharp:2.9590e-04 L12_sharp:1.1396e-03 total_fnorm:2.6125e+01 total_l1_linf:7.6288e+04 total_spectral:1.3188e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2812e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.7266e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5391e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5234e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.4531e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.4531e+00 L1_spectral:7.8062e-02 L2_spectral:7.5794e-02 L3_spectral:7.6619e-02 L4_spectral:7.6905e-02 L5_spectral:7.6833e-02 L6_spectral:7.6730e-02 L7_spectral:7.7024e-02 L8_spectral:7.6075e-02 L9_spectral:7.6939e-02 L10_spectral:7.6455e-02 L11_spectral:7.6494e-02 L12_spectral:7.5817e-02 train_time:159905ms step_avg:44.42ms +[2025-09-11 10:38:13] [Rank 0] PRINT: step:3600/10000 val_loss:4.9108 total_sharp:8.9769e-04 L1_sharp:4.9918e-04 L2_sharp:1.5198e-04 L3_sharp:9.1829e-05 L4_sharp:1.3470e-04 L5_sharp:8.4314e-05 L6_sharp:1.0902e-04 L7_sharp:7.3753e-05 L8_sharp:2.4351e-04 L9_sharp:2.1542e-04 L10_sharp:2.4271e-04 L11_sharp:2.9590e-04 L12_sharp:1.1396e-03 total_fnorm:2.6125e+01 total_l1_linf:7.6288e+04 total_spectral:1.3188e+01 L1_fnorm:6.4375e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2812e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.7266e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5391e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5234e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.4531e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.4531e+00 L1_spectral:7.8062e-02 L2_spectral:7.5794e-02 L3_spectral:7.6619e-02 L4_spectral:7.6905e-02 L5_spectral:7.6833e-02 L6_spectral:7.6730e-02 L7_spectral:7.7024e-02 L8_spectral:7.6075e-02 L9_spectral:7.6939e-02 L10_spectral:7.6455e-02 L11_spectral:7.6494e-02 L12_spectral:7.5817e-02 train_time:159905ms step_avg:44.42ms +[2025-09-11 10:38:14] [Rank 0] step:3601/10000 train_time:161142ms step_avg:44.75ms +[2025-09-11 10:38:14] [Rank 0] step:3601/10000 train_time:161142ms step_avg:44.75ms +[2025-09-11 10:38:15] [Rank 0] step:3621/10000 train_time:161826ms step_avg:44.69ms +[2025-09-11 10:38:15] [Rank 0] step:3621/10000 train_time:161826ms step_avg:44.69ms +[2025-09-11 10:38:16] [Rank 0] step:3641/10000 train_time:162488ms step_avg:44.63ms +[2025-09-11 10:38:16] [Rank 0] step:3641/10000 train_time:162488ms step_avg:44.63ms +[2025-09-11 10:38:16] [Rank 0] step:3661/10000 train_time:163150ms step_avg:44.56ms +[2025-09-11 10:38:16] [Rank 0] step:3661/10000 train_time:163150ms step_avg:44.56ms +[2025-09-11 10:38:17] [Rank 0] step:3681/10000 train_time:163812ms step_avg:44.50ms +[2025-09-11 10:38:17] [Rank 0] step:3681/10000 train_time:163812ms step_avg:44.50ms +[2025-09-11 10:38:18] [Rank 0] step:3701/10000 train_time:164475ms step_avg:44.44ms +[2025-09-11 10:38:18] [Rank 0] step:3701/10000 train_time:164475ms step_avg:44.44ms +[2025-09-11 10:38:18] [Rank 0] step:3721/10000 train_time:165146ms step_avg:44.38ms +[2025-09-11 10:38:18] [Rank 0] step:3721/10000 train_time:165146ms step_avg:44.38ms +[2025-09-11 10:38:19] [Rank 0] step:3741/10000 train_time:165819ms step_avg:44.32ms +[2025-09-11 10:38:19] [Rank 0] step:3741/10000 train_time:165819ms step_avg:44.32ms +[2025-09-11 10:38:20] [Rank 0] step:3761/10000 train_time:166493ms step_avg:44.27ms +[2025-09-11 10:38:20] [Rank 0] step:3761/10000 train_time:166493ms step_avg:44.27ms +[2025-09-11 10:38:20] [Rank 0] step:3781/10000 train_time:167166ms step_avg:44.21ms +[2025-09-11 10:38:20] [Rank 0] step:3781/10000 train_time:167166ms step_avg:44.21ms +[2025-09-11 10:38:21] [Rank 0] step:3801/10000 train_time:167839ms step_avg:44.16ms +[2025-09-11 10:38:21] [Rank 0] step:3801/10000 train_time:167839ms step_avg:44.16ms +[2025-09-11 10:38:22] [Rank 0] step:3821/10000 train_time:168512ms step_avg:44.10ms +[2025-09-11 10:38:22] [Rank 0] step:3821/10000 train_time:168512ms step_avg:44.10ms +[2025-09-11 10:38:22] [Rank 0] step:3841/10000 train_time:169185ms step_avg:44.05ms +[2025-09-11 10:38:22] [Rank 0] step:3841/10000 train_time:169185ms step_avg:44.05ms +[2025-09-11 10:38:23] [Rank 0] step:3861/10000 train_time:169858ms step_avg:43.99ms +[2025-09-11 10:38:23] [Rank 0] step:3861/10000 train_time:169858ms step_avg:43.99ms +[2025-09-11 10:38:24] [Rank 0] step:3881/10000 train_time:170530ms step_avg:43.94ms +[2025-09-11 10:38:24] [Rank 0] step:3881/10000 train_time:170530ms step_avg:43.94ms +[2025-09-11 10:38:24] [Rank 0] step:3901/10000 train_time:171202ms step_avg:43.89ms +[2025-09-11 10:38:24] [Rank 0] step:3901/10000 train_time:171202ms step_avg:43.89ms +[2025-09-11 10:38:25] [Rank 0] step:3921/10000 train_time:171875ms step_avg:43.83ms +[2025-09-11 10:38:25] [Rank 0] step:3921/10000 train_time:171875ms step_avg:43.83ms +[2025-09-11 10:38:26] [Rank 0] step:3941/10000 train_time:172550ms step_avg:43.78ms +[2025-09-11 10:38:26] [Rank 0] step:3941/10000 train_time:172550ms step_avg:43.78ms +[2025-09-11 10:38:26] [Rank 0] step:3961/10000 train_time:173222ms step_avg:43.73ms +[2025-09-11 10:38:26] [Rank 0] step:3961/10000 train_time:173222ms step_avg:43.73ms +[2025-09-11 10:38:27] [Rank 0] step:3981/10000 train_time:173894ms step_avg:43.68ms +[2025-09-11 10:38:27] [Rank 0] step:3981/10000 train_time:173894ms step_avg:43.68ms +[2025-09-11 10:38:28] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:38:28] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:38:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:38:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:38:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:38:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:38:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:38:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:38:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.8515 total_sharp:1.1898e-03 L1_sharp:5.5225e-04 L2_sharp:2.2896e-04 L3_sharp:7.2782e-05 L4_sharp:7.4128e-05 L5_sharp:8.9673e-05 L6_sharp:1.1069e-04 L7_sharp:9.1481e-05 L8_sharp:3.0855e-04 L9_sharp:2.4827e-04 L10_sharp:3.0316e-04 L11_sharp:4.0381e-04 L12_sharp:2.8672e-03 total_fnorm:2.7875e+01 total_l1_linf:7.9360e+04 total_spectral:1.4062e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.4141e+00 L1_spectral:7.8932e-02 L2_spectral:7.6260e-02 L3_spectral:7.7385e-02 L4_spectral:7.6869e-02 L5_spectral:7.7474e-02 L6_spectral:7.7635e-02 L7_spectral:7.6729e-02 L8_spectral:7.4865e-02 L9_spectral:7.6287e-02 L10_spectral:7.6646e-02 L11_spectral:7.6866e-02 L12_spectral:7.6440e-02 train_time:174548ms step_avg:43.64ms +[2025-09-11 10:38:38] [Rank 0] PRINT: step:4000/10000 val_loss:4.8515 total_sharp:1.1898e-03 L1_sharp:5.5225e-04 L2_sharp:2.2896e-04 L3_sharp:7.2782e-05 L4_sharp:7.4128e-05 L5_sharp:8.9673e-05 L6_sharp:1.1069e-04 L7_sharp:9.1481e-05 L8_sharp:3.0855e-04 L9_sharp:2.4827e-04 L10_sharp:3.0316e-04 L11_sharp:4.0381e-04 L12_sharp:2.8672e-03 total_fnorm:2.7875e+01 total_l1_linf:7.9360e+04 total_spectral:1.4062e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5391e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.4141e+00 L1_spectral:7.8932e-02 L2_spectral:7.6260e-02 L3_spectral:7.7385e-02 L4_spectral:7.6869e-02 L5_spectral:7.7474e-02 L6_spectral:7.7635e-02 L7_spectral:7.6729e-02 L8_spectral:7.4865e-02 L9_spectral:7.6287e-02 L10_spectral:7.6646e-02 L11_spectral:7.6866e-02 L12_spectral:7.6440e-02 train_time:174548ms step_avg:43.64ms +[2025-09-11 10:38:41] [Rank 0] step:4001/10000 train_time:177549ms step_avg:44.38ms +[2025-09-11 10:38:41] [Rank 0] step:4001/10000 train_time:177549ms step_avg:44.38ms +[2025-09-11 10:38:42] [Rank 0] step:4021/10000 train_time:178238ms step_avg:44.33ms +[2025-09-11 10:38:42] [Rank 0] step:4021/10000 train_time:178238ms step_avg:44.33ms +[2025-09-11 10:38:42] [Rank 0] step:4041/10000 train_time:178913ms step_avg:44.27ms +[2025-09-11 10:38:42] [Rank 0] step:4041/10000 train_time:178913ms step_avg:44.27ms +[2025-09-11 10:38:43] [Rank 0] step:4061/10000 train_time:179584ms step_avg:44.22ms +[2025-09-11 10:38:43] [Rank 0] step:4061/10000 train_time:179584ms step_avg:44.22ms +[2025-09-11 10:38:44] [Rank 0] step:4081/10000 train_time:180258ms step_avg:44.17ms +[2025-09-11 10:38:44] [Rank 0] step:4081/10000 train_time:180258ms step_avg:44.17ms +[2025-09-11 10:38:44] [Rank 0] step:4101/10000 train_time:180930ms step_avg:44.12ms +[2025-09-11 10:38:44] [Rank 0] step:4101/10000 train_time:180930ms step_avg:44.12ms +[2025-09-11 10:38:45] [Rank 0] step:4121/10000 train_time:181604ms step_avg:44.07ms +[2025-09-11 10:38:45] [Rank 0] step:4121/10000 train_time:181604ms step_avg:44.07ms +[2025-09-11 10:38:46] [Rank 0] step:4141/10000 train_time:182276ms step_avg:44.02ms +[2025-09-11 10:38:46] [Rank 0] step:4141/10000 train_time:182276ms step_avg:44.02ms +[2025-09-11 10:38:46] [Rank 0] step:4161/10000 train_time:182948ms step_avg:43.97ms +[2025-09-11 10:38:46] [Rank 0] step:4161/10000 train_time:182948ms step_avg:43.97ms +[2025-09-11 10:38:47] [Rank 0] step:4181/10000 train_time:183621ms step_avg:43.92ms +[2025-09-11 10:38:47] [Rank 0] step:4181/10000 train_time:183621ms step_avg:43.92ms +[2025-09-11 10:38:48] [Rank 0] step:4201/10000 train_time:184294ms step_avg:43.87ms +[2025-09-11 10:38:48] [Rank 0] step:4201/10000 train_time:184294ms step_avg:43.87ms +[2025-09-11 10:38:48] [Rank 0] step:4221/10000 train_time:184966ms step_avg:43.82ms +[2025-09-11 10:38:48] [Rank 0] step:4221/10000 train_time:184966ms step_avg:43.82ms +[2025-09-11 10:38:49] [Rank 0] step:4241/10000 train_time:185638ms step_avg:43.77ms +[2025-09-11 10:38:49] [Rank 0] step:4241/10000 train_time:185638ms step_avg:43.77ms +[2025-09-11 10:38:50] [Rank 0] step:4261/10000 train_time:186312ms step_avg:43.72ms +[2025-09-11 10:38:50] [Rank 0] step:4261/10000 train_time:186312ms step_avg:43.72ms +[2025-09-11 10:38:50] [Rank 0] step:4281/10000 train_time:186985ms step_avg:43.68ms +[2025-09-11 10:38:50] [Rank 0] step:4281/10000 train_time:186985ms step_avg:43.68ms +[2025-09-11 10:38:51] [Rank 0] step:4301/10000 train_time:187658ms step_avg:43.63ms +[2025-09-11 10:38:51] [Rank 0] step:4301/10000 train_time:187658ms step_avg:43.63ms +[2025-09-11 10:38:52] [Rank 0] step:4321/10000 train_time:188331ms step_avg:43.59ms +[2025-09-11 10:38:52] [Rank 0] step:4321/10000 train_time:188331ms step_avg:43.59ms +[2025-09-11 10:38:53] [Rank 0] step:4341/10000 train_time:189570ms step_avg:43.67ms +[2025-09-11 10:38:53] [Rank 0] step:4341/10000 train_time:189570ms step_avg:43.67ms +[2025-09-11 10:38:54] [Rank 0] step:4361/10000 train_time:190242ms step_avg:43.62ms +[2025-09-11 10:38:54] [Rank 0] step:4361/10000 train_time:190242ms step_avg:43.62ms +[2025-09-11 10:38:54] [Rank 0] step:4381/10000 train_time:190915ms step_avg:43.58ms +[2025-09-11 10:38:54] [Rank 0] step:4381/10000 train_time:190915ms step_avg:43.58ms +[2025-09-11 10:38:55] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:39:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:39:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:39:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:39:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:39:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:39:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:05] [Rank 0] PRINT: step:4400/10000 val_loss:4.8109 total_sharp:8.4358e-04 L1_sharp:6.0778e-04 L2_sharp:9.9488e-05 L3_sharp:6.8194e-05 L4_sharp:6.7859e-05 L5_sharp:6.8591e-05 L6_sharp:9.6408e-05 L7_sharp:7.1572e-05 L8_sharp:2.4517e-04 L9_sharp:1.9548e-04 L10_sharp:2.2221e-04 L11_sharp:2.7115e-04 L12_sharp:2.0219e-03 total_fnorm:2.6750e+01 total_l1_linf:7.5264e+04 total_spectral:1.3438e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0625e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.5156e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5312e+00 L8_l1linf:1.4688e+00 L9_l1linf:1.4453e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.3906e+00 L1_spectral:7.8933e-02 L2_spectral:7.6751e-02 L3_spectral:7.7779e-02 L4_spectral:7.7736e-02 L5_spectral:7.7850e-02 L6_spectral:7.7919e-02 L7_spectral:7.7687e-02 L8_spectral:7.6227e-02 L9_spectral:7.7777e-02 L10_spectral:7.7695e-02 L11_spectral:7.7239e-02 L12_spectral:7.7116e-02 train_time:191852ms step_avg:43.60ms +[2025-09-11 10:39:05] [Rank 0] PRINT: step:4400/10000 val_loss:4.8109 total_sharp:8.4358e-04 L1_sharp:6.0778e-04 L2_sharp:9.9488e-05 L3_sharp:6.8194e-05 L4_sharp:6.7859e-05 L5_sharp:6.8591e-05 L6_sharp:9.6408e-05 L7_sharp:7.1572e-05 L8_sharp:2.4517e-04 L9_sharp:1.9548e-04 L10_sharp:2.2221e-04 L11_sharp:2.7115e-04 L12_sharp:2.0219e-03 total_fnorm:2.6750e+01 total_l1_linf:7.5264e+04 total_spectral:1.3438e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.2188e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.0625e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.4922e+00 L5_l1linf:1.5156e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5312e+00 L8_l1linf:1.4688e+00 L9_l1linf:1.4453e+00 L10_l1linf:1.4531e+00 L11_l1linf:1.5312e+00 L12_l1linf:1.3906e+00 L1_spectral:7.8933e-02 L2_spectral:7.6751e-02 L3_spectral:7.7779e-02 L4_spectral:7.7736e-02 L5_spectral:7.7850e-02 L6_spectral:7.7919e-02 L7_spectral:7.7687e-02 L8_spectral:7.6227e-02 L9_spectral:7.7777e-02 L10_spectral:7.7695e-02 L11_spectral:7.7239e-02 L12_spectral:7.7116e-02 train_time:191852ms step_avg:43.60ms +[2025-09-11 10:39:06] [Rank 0] step:4401/10000 train_time:193070ms step_avg:43.87ms +[2025-09-11 10:39:06] [Rank 0] step:4401/10000 train_time:193070ms step_avg:43.87ms +[2025-09-11 10:39:07] [Rank 0] step:4421/10000 train_time:193732ms step_avg:43.82ms +[2025-09-11 10:39:07] [Rank 0] step:4421/10000 train_time:193732ms step_avg:43.82ms +[2025-09-11 10:39:08] [Rank 0] step:4441/10000 train_time:194407ms step_avg:43.78ms +[2025-09-11 10:39:08] [Rank 0] step:4441/10000 train_time:194407ms step_avg:43.78ms +[2025-09-11 10:39:08] [Rank 0] step:4461/10000 train_time:195083ms step_avg:43.73ms +[2025-09-11 10:39:08] [Rank 0] step:4461/10000 train_time:195083ms step_avg:43.73ms +[2025-09-11 10:39:09] [Rank 0] step:4481/10000 train_time:195758ms step_avg:43.69ms +[2025-09-11 10:39:09] [Rank 0] step:4481/10000 train_time:195758ms step_avg:43.69ms +[2025-09-11 10:39:10] [Rank 0] step:4501/10000 train_time:196434ms step_avg:43.64ms +[2025-09-11 10:39:10] [Rank 0] step:4501/10000 train_time:196434ms step_avg:43.64ms +[2025-09-11 10:39:10] [Rank 0] step:4521/10000 train_time:197110ms step_avg:43.60ms +[2025-09-11 10:39:10] [Rank 0] step:4521/10000 train_time:197110ms step_avg:43.60ms +[2025-09-11 10:39:11] [Rank 0] step:4541/10000 train_time:197786ms step_avg:43.56ms +[2025-09-11 10:39:11] [Rank 0] step:4541/10000 train_time:197786ms step_avg:43.56ms +[2025-09-11 10:39:12] [Rank 0] step:4561/10000 train_time:198462ms step_avg:43.51ms +[2025-09-11 10:39:12] [Rank 0] step:4561/10000 train_time:198462ms step_avg:43.51ms +[2025-09-11 10:39:12] [Rank 0] step:4581/10000 train_time:199137ms step_avg:43.47ms +[2025-09-11 10:39:12] [Rank 0] step:4581/10000 train_time:199137ms step_avg:43.47ms +[2025-09-11 10:39:13] [Rank 0] step:4601/10000 train_time:199813ms step_avg:43.43ms +[2025-09-11 10:39:13] [Rank 0] step:4601/10000 train_time:199813ms step_avg:43.43ms +[2025-09-11 10:39:14] [Rank 0] step:4621/10000 train_time:200488ms step_avg:43.39ms +[2025-09-11 10:39:14] [Rank 0] step:4621/10000 train_time:200488ms step_avg:43.39ms +[2025-09-11 10:39:15] [Rank 0] step:4641/10000 train_time:201165ms step_avg:43.35ms +[2025-09-11 10:39:15] [Rank 0] step:4641/10000 train_time:201165ms step_avg:43.35ms +[2025-09-11 10:39:15] [Rank 0] step:4661/10000 train_time:201840ms step_avg:43.30ms +[2025-09-11 10:39:15] [Rank 0] step:4661/10000 train_time:201840ms step_avg:43.30ms +[2025-09-11 10:39:16] [Rank 0] step:4681/10000 train_time:202515ms step_avg:43.26ms +[2025-09-11 10:39:16] [Rank 0] step:4681/10000 train_time:202515ms step_avg:43.26ms +[2025-09-11 10:39:17] [Rank 0] step:4701/10000 train_time:203191ms step_avg:43.22ms +[2025-09-11 10:39:17] [Rank 0] step:4701/10000 train_time:203191ms step_avg:43.22ms +[2025-09-11 10:39:17] [Rank 0] step:4721/10000 train_time:203866ms step_avg:43.18ms +[2025-09-11 10:39:17] [Rank 0] step:4721/10000 train_time:203866ms step_avg:43.18ms +[2025-09-11 10:39:18] [Rank 0] step:4741/10000 train_time:204543ms step_avg:43.14ms +[2025-09-11 10:39:18] [Rank 0] step:4741/10000 train_time:204543ms step_avg:43.14ms +[2025-09-11 10:39:19] [Rank 0] step:4761/10000 train_time:205219ms step_avg:43.10ms +[2025-09-11 10:39:19] [Rank 0] step:4761/10000 train_time:205219ms step_avg:43.10ms +[2025-09-11 10:39:19] [Rank 0] step:4781/10000 train_time:205894ms step_avg:43.07ms +[2025-09-11 10:39:19] [Rank 0] step:4781/10000 train_time:205894ms step_avg:43.07ms +[2025-09-11 10:39:20] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:39:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:39:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:39:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:39:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:39:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:39:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:39:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:39:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.7591 total_sharp:7.5230e-04 L1_sharp:3.2128e-04 L2_sharp:9.7653e-05 L3_sharp:1.1077e-05 L4_sharp:9.2151e-05 L5_sharp:6.7888e-05 L6_sharp:6.6705e-05 L7_sharp:7.6760e-05 L8_sharp:2.8447e-04 L9_sharp:1.8482e-04 L10_sharp:2.2898e-04 L11_sharp:2.7598e-04 L12_sharp:1.0878e-03 total_fnorm:2.6500e+01 total_l1_linf:7.5264e+04 total_spectral:1.3375e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5156e+00 L8_l1linf:1.4531e+00 L9_l1linf:1.4141e+00 L10_l1linf:1.4297e+00 L11_l1linf:1.4766e+00 L12_l1linf:1.4297e+00 L1_spectral:7.9512e-02 L2_spectral:7.7774e-02 L3_spectral:7.8414e-02 L4_spectral:7.8009e-02 L5_spectral:7.7898e-02 L6_spectral:7.7986e-02 L7_spectral:7.7830e-02 L8_spectral:7.6151e-02 L9_spectral:7.7751e-02 L10_spectral:7.8358e-02 L11_spectral:7.8027e-02 L12_spectral:7.7407e-02 train_time:206550ms step_avg:43.03ms +[2025-09-11 10:39:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.7591 total_sharp:7.5230e-04 L1_sharp:3.2128e-04 L2_sharp:9.7653e-05 L3_sharp:1.1077e-05 L4_sharp:9.2151e-05 L5_sharp:6.7888e-05 L6_sharp:6.6705e-05 L7_sharp:7.6760e-05 L8_sharp:2.8447e-04 L9_sharp:1.8482e-04 L10_sharp:2.2898e-04 L11_sharp:2.7598e-04 L12_sharp:1.0878e-03 total_fnorm:2.6500e+01 total_l1_linf:7.5264e+04 total_spectral:1.3375e+01 L1_fnorm:6.4062e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5156e+00 L8_l1linf:1.4531e+00 L9_l1linf:1.4141e+00 L10_l1linf:1.4297e+00 L11_l1linf:1.4766e+00 L12_l1linf:1.4297e+00 L1_spectral:7.9512e-02 L2_spectral:7.7774e-02 L3_spectral:7.8414e-02 L4_spectral:7.8009e-02 L5_spectral:7.7898e-02 L6_spectral:7.7986e-02 L7_spectral:7.7830e-02 L8_spectral:7.6151e-02 L9_spectral:7.7751e-02 L10_spectral:7.8358e-02 L11_spectral:7.8027e-02 L12_spectral:7.7407e-02 train_time:206550ms step_avg:43.03ms +[2025-09-11 10:39:31] [Rank 0] step:4801/10000 train_time:207772ms step_avg:43.28ms +[2025-09-11 10:39:31] [Rank 0] step:4801/10000 train_time:207772ms step_avg:43.28ms +[2025-09-11 10:39:32] [Rank 0] step:4821/10000 train_time:208477ms step_avg:43.24ms +[2025-09-11 10:39:32] [Rank 0] step:4821/10000 train_time:208477ms step_avg:43.24ms +[2025-09-11 10:39:32] [Rank 0] step:4841/10000 train_time:209154ms step_avg:43.20ms +[2025-09-11 10:39:32] [Rank 0] step:4841/10000 train_time:209154ms step_avg:43.20ms +[2025-09-11 10:39:33] [Rank 0] step:4861/10000 train_time:209830ms step_avg:43.17ms +[2025-09-11 10:39:33] [Rank 0] step:4861/10000 train_time:209830ms step_avg:43.17ms +[2025-09-11 10:39:34] [Rank 0] step:4881/10000 train_time:210506ms step_avg:43.13ms +[2025-09-11 10:39:34] [Rank 0] step:4881/10000 train_time:210506ms step_avg:43.13ms +[2025-09-11 10:39:34] [Rank 0] step:4901/10000 train_time:211184ms step_avg:43.09ms +[2025-09-11 10:39:34] [Rank 0] step:4901/10000 train_time:211184ms step_avg:43.09ms +[2025-09-11 10:39:35] [Rank 0] step:4921/10000 train_time:211861ms step_avg:43.05ms +[2025-09-11 10:39:35] [Rank 0] step:4921/10000 train_time:211861ms step_avg:43.05ms +[2025-09-11 10:39:36] [Rank 0] step:4941/10000 train_time:212537ms step_avg:43.01ms +[2025-09-11 10:39:36] [Rank 0] step:4941/10000 train_time:212537ms step_avg:43.01ms +[2025-09-11 10:39:36] [Rank 0] step:4961/10000 train_time:213213ms step_avg:42.98ms +[2025-09-11 10:39:36] [Rank 0] step:4961/10000 train_time:213213ms step_avg:42.98ms +[2025-09-11 10:39:37] [Rank 0] step:4981/10000 train_time:213889ms step_avg:42.94ms +[2025-09-11 10:39:37] [Rank 0] step:4981/10000 train_time:213889ms step_avg:42.94ms +[2025-09-11 10:39:38] [Rank 0] step:5001/10000 train_time:214567ms step_avg:42.90ms +[2025-09-11 10:39:38] [Rank 0] step:5001/10000 train_time:214567ms step_avg:42.90ms +[2025-09-11 10:39:38] [Rank 0] step:5021/10000 train_time:215242ms step_avg:42.87ms +[2025-09-11 10:39:38] [Rank 0] step:5021/10000 train_time:215242ms step_avg:42.87ms +[2025-09-11 10:39:39] [Rank 0] step:5041/10000 train_time:215917ms step_avg:42.83ms +[2025-09-11 10:39:39] [Rank 0] step:5041/10000 train_time:215917ms step_avg:42.83ms +[2025-09-11 10:39:40] [Rank 0] step:5061/10000 train_time:216593ms step_avg:42.80ms +[2025-09-11 10:39:40] [Rank 0] step:5061/10000 train_time:216593ms step_avg:42.80ms +[2025-09-11 10:39:41] [Rank 0] step:5081/10000 train_time:217269ms step_avg:42.76ms +[2025-09-11 10:39:41] [Rank 0] step:5081/10000 train_time:217269ms step_avg:42.76ms +[2025-09-11 10:39:41] [Rank 0] step:5101/10000 train_time:217945ms step_avg:42.73ms +[2025-09-11 10:39:41] [Rank 0] step:5101/10000 train_time:217945ms step_avg:42.73ms +[2025-09-11 10:39:42] [Rank 0] step:5121/10000 train_time:218621ms step_avg:42.69ms +[2025-09-11 10:39:42] [Rank 0] step:5121/10000 train_time:218621ms step_avg:42.69ms +[2025-09-11 10:39:43] [Rank 0] step:5141/10000 train_time:219297ms step_avg:42.66ms +[2025-09-11 10:39:43] [Rank 0] step:5141/10000 train_time:219297ms step_avg:42.66ms +[2025-09-11 10:39:43] [Rank 0] step:5161/10000 train_time:219972ms step_avg:42.62ms +[2025-09-11 10:39:43] [Rank 0] step:5161/10000 train_time:219972ms step_avg:42.62ms +[2025-09-11 10:39:44] [Rank 0] step:5181/10000 train_time:220647ms step_avg:42.59ms +[2025-09-11 10:39:44] [Rank 0] step:5181/10000 train_time:220647ms step_avg:42.59ms +[2025-09-11 10:39:45] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:54] [Rank 0] PRINT: step:5200/10000 val_loss:4.7224 total_sharp:9.0584e-04 L1_sharp:3.9206e-04 L2_sharp:1.4586e-04 L3_sharp:8.2605e-05 L4_sharp:9.7977e-05 L5_sharp:5.7824e-05 L6_sharp:1.0339e-04 L7_sharp:5.1148e-05 L8_sharp:2.0518e-04 L9_sharp:1.9510e-04 L10_sharp:2.5697e-04 L11_sharp:3.2934e-04 L12_sharp:1.9062e-03 total_fnorm:2.5750e+01 total_l1_linf:7.1680e+04 total_spectral:1.3062e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.4922e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5547e+00 L8_l1linf:1.4375e+00 L9_l1linf:1.3984e+00 L10_l1linf:1.3750e+00 L11_l1linf:1.4219e+00 L12_l1linf:1.4531e+00 L1_spectral:7.9721e-02 L2_spectral:7.8059e-02 L3_spectral:7.8548e-02 L4_spectral:7.8423e-02 L5_spectral:7.8854e-02 L6_spectral:7.8671e-02 L7_spectral:7.8356e-02 L8_spectral:7.6692e-02 L9_spectral:7.8565e-02 L10_spectral:7.8233e-02 L11_spectral:7.8174e-02 L12_spectral:7.8172e-02 train_time:221311ms step_avg:42.56ms +[2025-09-11 10:39:54] [Rank 0] PRINT: step:5200/10000 val_loss:4.7224 total_sharp:9.0584e-04 L1_sharp:3.9206e-04 L2_sharp:1.4586e-04 L3_sharp:8.2605e-05 L4_sharp:9.7977e-05 L5_sharp:5.7824e-05 L6_sharp:1.0339e-04 L7_sharp:5.1148e-05 L8_sharp:2.0518e-04 L9_sharp:1.9510e-04 L10_sharp:2.5697e-04 L11_sharp:3.2934e-04 L12_sharp:1.9062e-03 total_fnorm:2.5750e+01 total_l1_linf:7.1680e+04 total_spectral:1.3062e+01 L1_fnorm:6.3750e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.4922e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5547e+00 L8_l1linf:1.4375e+00 L9_l1linf:1.3984e+00 L10_l1linf:1.3750e+00 L11_l1linf:1.4219e+00 L12_l1linf:1.4531e+00 L1_spectral:7.9721e-02 L2_spectral:7.8059e-02 L3_spectral:7.8548e-02 L4_spectral:7.8423e-02 L5_spectral:7.8854e-02 L6_spectral:7.8671e-02 L7_spectral:7.8356e-02 L8_spectral:7.6692e-02 L9_spectral:7.8565e-02 L10_spectral:7.8233e-02 L11_spectral:7.8174e-02 L12_spectral:7.8172e-02 train_time:221311ms step_avg:42.56ms +[2025-09-11 10:39:56] [Rank 0] step:5201/10000 train_time:222527ms step_avg:42.79ms +[2025-09-11 10:39:56] [Rank 0] step:5201/10000 train_time:222527ms step_avg:42.79ms +[2025-09-11 10:39:56] [Rank 0] step:5221/10000 train_time:223393ms step_avg:42.79ms +[2025-09-11 10:39:56] [Rank 0] step:5221/10000 train_time:223393ms step_avg:42.79ms +[2025-09-11 10:39:57] [Rank 0] step:5241/10000 train_time:224078ms step_avg:42.75ms +[2025-09-11 10:39:57] [Rank 0] step:5241/10000 train_time:224078ms step_avg:42.75ms +[2025-09-11 10:39:58] [Rank 0] step:5261/10000 train_time:225041ms step_avg:42.78ms +[2025-09-11 10:39:58] [Rank 0] step:5261/10000 train_time:225041ms step_avg:42.78ms +[2025-09-11 10:39:59] [Rank 0] step:5281/10000 train_time:225727ms step_avg:42.74ms +[2025-09-11 10:39:59] [Rank 0] step:5281/10000 train_time:225727ms step_avg:42.74ms +[2025-09-11 10:40:00] [Rank 0] step:5301/10000 train_time:226411ms step_avg:42.71ms +[2025-09-11 10:40:00] [Rank 0] step:5301/10000 train_time:226411ms step_avg:42.71ms +[2025-09-11 10:40:00] [Rank 0] step:5321/10000 train_time:227095ms step_avg:42.68ms +[2025-09-11 10:40:00] [Rank 0] step:5321/10000 train_time:227095ms step_avg:42.68ms +[2025-09-11 10:40:01] [Rank 0] step:5341/10000 train_time:227841ms step_avg:42.66ms +[2025-09-11 10:40:01] [Rank 0] step:5341/10000 train_time:227841ms step_avg:42.66ms +[2025-09-11 10:40:02] [Rank 0] step:5361/10000 train_time:228654ms step_avg:42.65ms +[2025-09-11 10:40:02] [Rank 0] step:5361/10000 train_time:228654ms step_avg:42.65ms +[2025-09-11 10:40:02] [Rank 0] step:5381/10000 train_time:229353ms step_avg:42.62ms +[2025-09-11 10:40:02] [Rank 0] step:5381/10000 train_time:229353ms step_avg:42.62ms +[2025-09-11 10:40:03] [Rank 0] step:5401/10000 train_time:230037ms step_avg:42.59ms +[2025-09-11 10:40:03] [Rank 0] step:5401/10000 train_time:230037ms step_avg:42.59ms +[2025-09-11 10:40:04] [Rank 0] step:5421/10000 train_time:230723ms step_avg:42.56ms +[2025-09-11 10:40:04] [Rank 0] step:5421/10000 train_time:230723ms step_avg:42.56ms +[2025-09-11 10:40:05] [Rank 0] step:5441/10000 train_time:231409ms step_avg:42.53ms +[2025-09-11 10:40:05] [Rank 0] step:5441/10000 train_time:231409ms step_avg:42.53ms +[2025-09-11 10:40:05] [Rank 0] step:5461/10000 train_time:232094ms step_avg:42.50ms +[2025-09-11 10:40:05] [Rank 0] step:5461/10000 train_time:232094ms step_avg:42.50ms +[2025-09-11 10:40:06] [Rank 0] step:5481/10000 train_time:232778ms step_avg:42.47ms +[2025-09-11 10:40:06] [Rank 0] step:5481/10000 train_time:232778ms step_avg:42.47ms +[2025-09-11 10:40:07] [Rank 0] step:5501/10000 train_time:233463ms step_avg:42.44ms +[2025-09-11 10:40:07] [Rank 0] step:5501/10000 train_time:233463ms step_avg:42.44ms +[2025-09-11 10:40:07] [Rank 0] step:5521/10000 train_time:234147ms step_avg:42.41ms +[2025-09-11 10:40:07] [Rank 0] step:5521/10000 train_time:234147ms step_avg:42.41ms +[2025-09-11 10:40:08] [Rank 0] step:5541/10000 train_time:234834ms step_avg:42.38ms +[2025-09-11 10:40:08] [Rank 0] step:5541/10000 train_time:234834ms step_avg:42.38ms +[2025-09-11 10:40:09] [Rank 0] step:5561/10000 train_time:235521ms step_avg:42.35ms +[2025-09-11 10:40:09] [Rank 0] step:5561/10000 train_time:235521ms step_avg:42.35ms +[2025-09-11 10:40:09] [Rank 0] step:5581/10000 train_time:236206ms step_avg:42.32ms +[2025-09-11 10:40:09] [Rank 0] step:5581/10000 train_time:236206ms step_avg:42.32ms +[2025-09-11 10:40:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:40:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:40:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:40:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:40:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:20] [Rank 0] PRINT: step:5600/10000 val_loss:4.6922 total_sharp:8.2369e-04 L1_sharp:3.3337e-04 L2_sharp:1.4391e-04 L3_sharp:9.8086e-05 L4_sharp:7.3221e-05 L5_sharp:5.7071e-05 L6_sharp:7.2324e-05 L7_sharp:7.5372e-05 L8_sharp:2.3897e-04 L9_sharp:2.1854e-04 L10_sharp:2.4811e-04 L11_sharp:2.8268e-04 L12_sharp:9.1049e-04 total_fnorm:2.5500e+01 total_l1_linf:7.1168e+04 total_spectral:1.2938e+01 L1_fnorm:6.3438e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9375e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.4141e+00 L9_l1linf:1.3828e+00 L10_l1linf:1.3672e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.4375e+00 L1_spectral:7.9369e-02 L2_spectral:7.7527e-02 L3_spectral:7.8517e-02 L4_spectral:7.8400e-02 L5_spectral:7.8364e-02 L6_spectral:7.8688e-02 L7_spectral:7.8350e-02 L8_spectral:7.5962e-02 L9_spectral:7.8504e-02 L10_spectral:7.8538e-02 L11_spectral:7.8619e-02 L12_spectral:7.8595e-02 train_time:236872ms step_avg:42.30ms +[2025-09-11 10:40:20] [Rank 0] PRINT: step:5600/10000 val_loss:4.6922 total_sharp:8.2369e-04 L1_sharp:3.3337e-04 L2_sharp:1.4391e-04 L3_sharp:9.8086e-05 L4_sharp:7.3221e-05 L5_sharp:5.7071e-05 L6_sharp:7.2324e-05 L7_sharp:7.5372e-05 L8_sharp:2.3897e-04 L9_sharp:2.1854e-04 L10_sharp:2.4811e-04 L11_sharp:2.8268e-04 L12_sharp:9.1049e-04 total_fnorm:2.5500e+01 total_l1_linf:7.1168e+04 total_spectral:1.2938e+01 L1_fnorm:6.3438e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1875e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9375e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.4922e+00 L8_l1linf:1.4141e+00 L9_l1linf:1.3828e+00 L10_l1linf:1.3672e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.4375e+00 L1_spectral:7.9369e-02 L2_spectral:7.7527e-02 L3_spectral:7.8517e-02 L4_spectral:7.8400e-02 L5_spectral:7.8364e-02 L6_spectral:7.8688e-02 L7_spectral:7.8350e-02 L8_spectral:7.5962e-02 L9_spectral:7.8504e-02 L10_spectral:7.8538e-02 L11_spectral:7.8619e-02 L12_spectral:7.8595e-02 train_time:236872ms step_avg:42.30ms +[2025-09-11 10:40:21] [Rank 0] step:5601/10000 train_time:238098ms step_avg:42.51ms +[2025-09-11 10:40:21] [Rank 0] step:5601/10000 train_time:238098ms step_avg:42.51ms +[2025-09-11 10:40:22] [Rank 0] step:5621/10000 train_time:238826ms step_avg:42.49ms +[2025-09-11 10:40:22] [Rank 0] step:5621/10000 train_time:238826ms step_avg:42.49ms +[2025-09-11 10:40:23] [Rank 0] step:5641/10000 train_time:239510ms step_avg:42.46ms +[2025-09-11 10:40:23] [Rank 0] step:5641/10000 train_time:239510ms step_avg:42.46ms +[2025-09-11 10:40:23] [Rank 0] step:5661/10000 train_time:240194ms step_avg:42.43ms +[2025-09-11 10:40:23] [Rank 0] step:5661/10000 train_time:240194ms step_avg:42.43ms +[2025-09-11 10:40:24] [Rank 0] step:5681/10000 train_time:240881ms step_avg:42.40ms +[2025-09-11 10:40:24] [Rank 0] step:5681/10000 train_time:240881ms step_avg:42.40ms +[2025-09-11 10:40:25] [Rank 0] step:5701/10000 train_time:241568ms step_avg:42.37ms +[2025-09-11 10:40:25] [Rank 0] step:5701/10000 train_time:241568ms step_avg:42.37ms +[2025-09-11 10:40:25] [Rank 0] step:5721/10000 train_time:242252ms step_avg:42.34ms +[2025-09-11 10:40:25] [Rank 0] step:5721/10000 train_time:242252ms step_avg:42.34ms +[2025-09-11 10:40:26] [Rank 0] step:5741/10000 train_time:242938ms step_avg:42.32ms +[2025-09-11 10:40:26] [Rank 0] step:5741/10000 train_time:242938ms step_avg:42.32ms +[2025-09-11 10:40:27] [Rank 0] step:5761/10000 train_time:243624ms step_avg:42.29ms +[2025-09-11 10:40:27] [Rank 0] step:5761/10000 train_time:243624ms step_avg:42.29ms +[2025-09-11 10:40:27] [Rank 0] step:5781/10000 train_time:244309ms step_avg:42.26ms +[2025-09-11 10:40:27] [Rank 0] step:5781/10000 train_time:244309ms step_avg:42.26ms +[2025-09-11 10:40:28] [Rank 0] step:5801/10000 train_time:244996ms step_avg:42.23ms +[2025-09-11 10:40:28] [Rank 0] step:5801/10000 train_time:244996ms step_avg:42.23ms +[2025-09-11 10:40:29] [Rank 0] step:5821/10000 train_time:245680ms step_avg:42.21ms +[2025-09-11 10:40:29] [Rank 0] step:5821/10000 train_time:245680ms step_avg:42.21ms +[2025-09-11 10:40:29] [Rank 0] step:5841/10000 train_time:246366ms step_avg:42.18ms +[2025-09-11 10:40:29] [Rank 0] step:5841/10000 train_time:246366ms step_avg:42.18ms +[2025-09-11 10:40:30] [Rank 0] step:5861/10000 train_time:247050ms step_avg:42.15ms +[2025-09-11 10:40:30] [Rank 0] step:5861/10000 train_time:247050ms step_avg:42.15ms +[2025-09-11 10:40:31] [Rank 0] step:5881/10000 train_time:247735ms step_avg:42.12ms +[2025-09-11 10:40:31] [Rank 0] step:5881/10000 train_time:247735ms step_avg:42.12ms +[2025-09-11 10:40:31] [Rank 0] step:5901/10000 train_time:248419ms step_avg:42.10ms +[2025-09-11 10:40:31] [Rank 0] step:5901/10000 train_time:248419ms step_avg:42.10ms +[2025-09-11 10:40:32] [Rank 0] step:5921/10000 train_time:249106ms step_avg:42.07ms +[2025-09-11 10:40:32] [Rank 0] step:5921/10000 train_time:249106ms step_avg:42.07ms +[2025-09-11 10:40:33] [Rank 0] step:5941/10000 train_time:249793ms step_avg:42.05ms +[2025-09-11 10:40:33] [Rank 0] step:5941/10000 train_time:249793ms step_avg:42.05ms +[2025-09-11 10:40:34] [Rank 0] step:5961/10000 train_time:250478ms step_avg:42.02ms +[2025-09-11 10:40:34] [Rank 0] step:5961/10000 train_time:250478ms step_avg:42.02ms +[2025-09-11 10:40:34] [Rank 0] step:5981/10000 train_time:251164ms step_avg:41.99ms +[2025-09-11 10:40:34] [Rank 0] step:5981/10000 train_time:251164ms step_avg:41.99ms +[2025-09-11 10:40:35] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:40:35] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:40:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:40:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:45] [Rank 0] PRINT: step:6000/10000 val_loss:4.6409 total_sharp:7.6339e-04 L1_sharp:3.2961e-04 L2_sharp:1.5730e-04 L3_sharp:4.5651e-05 L4_sharp:3.6422e-05 L5_sharp:5.6822e-05 L6_sharp:4.8645e-05 L7_sharp:6.0271e-05 L8_sharp:1.6389e-04 L9_sharp:1.7431e-04 L10_sharp:2.1111e-04 L11_sharp:2.6342e-04 L12_sharp:1.7610e-03 total_fnorm:2.6000e+01 total_l1_linf:7.0656e+04 total_spectral:1.3125e+01 L1_fnorm:6.3438e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5156e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.3750e+00 L10_l1linf:1.3516e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.4453e+00 L1_spectral:7.9694e-02 L2_spectral:7.7976e-02 L3_spectral:7.9215e-02 L4_spectral:7.8724e-02 L5_spectral:7.8472e-02 L6_spectral:7.9076e-02 L7_spectral:7.8910e-02 L8_spectral:7.7108e-02 L9_spectral:7.8818e-02 L10_spectral:7.8836e-02 L11_spectral:7.9035e-02 L12_spectral:7.8950e-02 train_time:251832ms step_avg:41.97ms +[2025-09-11 10:40:45] [Rank 0] PRINT: step:6000/10000 val_loss:4.6409 total_sharp:7.6339e-04 L1_sharp:3.2961e-04 L2_sharp:1.5730e-04 L3_sharp:4.5651e-05 L4_sharp:3.6422e-05 L5_sharp:5.6822e-05 L6_sharp:4.8645e-05 L7_sharp:6.0271e-05 L8_sharp:1.6389e-04 L9_sharp:1.7431e-04 L10_sharp:2.1111e-04 L11_sharp:2.6342e-04 L12_sharp:1.7610e-03 total_fnorm:2.6000e+01 total_l1_linf:7.0656e+04 total_spectral:1.3125e+01 L1_fnorm:6.3438e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.5000e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5156e+00 L7_l1linf:1.5156e+00 L8_l1linf:1.4766e+00 L9_l1linf:1.3750e+00 L10_l1linf:1.3516e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.4453e+00 L1_spectral:7.9694e-02 L2_spectral:7.7976e-02 L3_spectral:7.9215e-02 L4_spectral:7.8724e-02 L5_spectral:7.8472e-02 L6_spectral:7.9076e-02 L7_spectral:7.8910e-02 L8_spectral:7.7108e-02 L9_spectral:7.8818e-02 L10_spectral:7.8836e-02 L11_spectral:7.9035e-02 L12_spectral:7.8950e-02 train_time:251832ms step_avg:41.97ms +[2025-09-11 10:40:47] [Rank 0] step:6001/10000 train_time:253097ms step_avg:42.18ms +[2025-09-11 10:40:47] [Rank 0] step:6001/10000 train_time:253097ms step_avg:42.18ms +[2025-09-11 10:40:47] [Rank 0] step:6021/10000 train_time:253812ms step_avg:42.15ms +[2025-09-11 10:40:47] [Rank 0] step:6021/10000 train_time:253812ms step_avg:42.15ms +[2025-09-11 10:40:48] [Rank 0] step:6041/10000 train_time:254502ms step_avg:42.13ms +[2025-09-11 10:40:48] [Rank 0] step:6041/10000 train_time:254502ms step_avg:42.13ms +[2025-09-11 10:40:49] [Rank 0] step:6061/10000 train_time:255190ms step_avg:42.10ms +[2025-09-11 10:40:49] [Rank 0] step:6061/10000 train_time:255190ms step_avg:42.10ms +[2025-09-11 10:40:49] [Rank 0] step:6081/10000 train_time:255880ms step_avg:42.08ms +[2025-09-11 10:40:49] [Rank 0] step:6081/10000 train_time:255880ms step_avg:42.08ms +[2025-09-11 10:40:50] [Rank 0] step:6101/10000 train_time:256568ms step_avg:42.05ms +[2025-09-11 10:40:50] [Rank 0] step:6101/10000 train_time:256568ms step_avg:42.05ms +[2025-09-11 10:40:51] [Rank 0] step:6121/10000 train_time:257256ms step_avg:42.03ms +[2025-09-11 10:40:51] [Rank 0] step:6121/10000 train_time:257256ms step_avg:42.03ms +[2025-09-11 10:40:51] [Rank 0] step:6141/10000 train_time:257944ms step_avg:42.00ms +[2025-09-11 10:40:51] [Rank 0] step:6141/10000 train_time:257944ms step_avg:42.00ms +[2025-09-11 10:40:52] [Rank 0] step:6161/10000 train_time:258632ms step_avg:41.98ms +[2025-09-11 10:40:52] [Rank 0] step:6161/10000 train_time:258632ms step_avg:41.98ms +[2025-09-11 10:40:53] [Rank 0] step:6181/10000 train_time:259317ms step_avg:41.95ms +[2025-09-11 10:40:53] [Rank 0] step:6181/10000 train_time:259317ms step_avg:41.95ms +[2025-09-11 10:40:54] [Rank 0] step:6201/10000 train_time:260006ms step_avg:41.93ms +[2025-09-11 10:40:54] [Rank 0] step:6201/10000 train_time:260006ms step_avg:41.93ms +[2025-09-11 10:40:54] [Rank 0] step:6221/10000 train_time:260694ms step_avg:41.91ms +[2025-09-11 10:40:54] [Rank 0] step:6221/10000 train_time:260694ms step_avg:41.91ms +[2025-09-11 10:40:55] [Rank 0] step:6241/10000 train_time:261382ms step_avg:41.88ms +[2025-09-11 10:40:55] [Rank 0] step:6241/10000 train_time:261382ms step_avg:41.88ms +[2025-09-11 10:40:56] [Rank 0] step:6261/10000 train_time:262069ms step_avg:41.86ms +[2025-09-11 10:40:56] [Rank 0] step:6261/10000 train_time:262069ms step_avg:41.86ms +[2025-09-11 10:40:56] [Rank 0] step:6281/10000 train_time:262756ms step_avg:41.83ms +[2025-09-11 10:40:56] [Rank 0] step:6281/10000 train_time:262756ms step_avg:41.83ms +[2025-09-11 10:40:57] [Rank 0] step:6301/10000 train_time:263443ms step_avg:41.81ms +[2025-09-11 10:40:57] [Rank 0] step:6301/10000 train_time:263443ms step_avg:41.81ms +[2025-09-11 10:40:58] [Rank 0] step:6321/10000 train_time:264133ms step_avg:41.79ms +[2025-09-11 10:40:58] [Rank 0] step:6321/10000 train_time:264133ms step_avg:41.79ms +[2025-09-11 10:40:58] [Rank 0] step:6341/10000 train_time:264960ms step_avg:41.79ms +[2025-09-11 10:40:58] [Rank 0] step:6341/10000 train_time:264960ms step_avg:41.79ms +[2025-09-11 10:41:00] [Rank 0] step:6361/10000 train_time:266028ms step_avg:41.82ms +[2025-09-11 10:41:00] [Rank 0] step:6361/10000 train_time:266028ms step_avg:41.82ms +[2025-09-11 10:41:00] [Rank 0] step:6381/10000 train_time:266717ms step_avg:41.80ms +[2025-09-11 10:41:00] [Rank 0] step:6381/10000 train_time:266717ms step_avg:41.80ms +[2025-09-11 10:41:01] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:41:01] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:41:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:41:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:41:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:41:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:41:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:41:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:11] [Rank 0] PRINT: step:6400/10000 val_loss:4.6098 total_sharp:6.9503e-04 L1_sharp:2.2320e-04 L2_sharp:5.6332e-05 L3_sharp:4.1491e-05 L4_sharp:4.6645e-05 L5_sharp:5.6894e-05 L6_sharp:9.8879e-05 L7_sharp:6.6046e-05 L8_sharp:2.3775e-04 L9_sharp:1.7488e-04 L10_sharp:2.0250e-04 L11_sharp:2.6623e-04 L12_sharp:1.0690e-03 total_fnorm:2.3250e+01 total_l1_linf:6.0416e+04 total_spectral:1.1562e+01 L1_fnorm:5.7500e+00 L2_fnorm:5.5938e+00 L3_fnorm:5.6250e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5938e+00 L6_fnorm:5.6250e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.3438e+00 L9_fnorm:5.5938e+00 L10_fnorm:5.5938e+00 L11_fnorm:5.5938e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.4297e+00 L2_l1linf:1.3047e+00 L3_l1linf:1.3047e+00 L4_l1linf:1.3047e+00 L5_l1linf:1.3203e+00 L6_l1linf:1.3203e+00 L7_l1linf:1.3203e+00 L8_l1linf:1.2188e+00 L9_l1linf:1.1953e+00 L10_l1linf:1.1953e+00 L11_l1linf:1.2344e+00 L12_l1linf:1.2734e+00 L1_spectral:7.3594e-02 L2_spectral:7.1422e-02 L3_spectral:7.2380e-02 L4_spectral:7.2270e-02 L5_spectral:7.2695e-02 L6_spectral:7.2235e-02 L7_spectral:7.2717e-02 L8_spectral:7.0342e-02 L9_spectral:7.2659e-02 L10_spectral:7.2305e-02 L11_spectral:7.2740e-02 L12_spectral:7.2231e-02 train_time:267527ms step_avg:41.80ms +[2025-09-11 10:41:11] [Rank 0] PRINT: step:6400/10000 val_loss:4.6098 total_sharp:6.9503e-04 L1_sharp:2.2320e-04 L2_sharp:5.6332e-05 L3_sharp:4.1491e-05 L4_sharp:4.6645e-05 L5_sharp:5.6894e-05 L6_sharp:9.8879e-05 L7_sharp:6.6046e-05 L8_sharp:2.3775e-04 L9_sharp:1.7488e-04 L10_sharp:2.0250e-04 L11_sharp:2.6623e-04 L12_sharp:1.0690e-03 total_fnorm:2.3250e+01 total_l1_linf:6.0416e+04 total_spectral:1.1562e+01 L1_fnorm:5.7500e+00 L2_fnorm:5.5938e+00 L3_fnorm:5.6250e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5938e+00 L6_fnorm:5.6250e+00 L7_fnorm:5.5938e+00 L8_fnorm:5.3438e+00 L9_fnorm:5.5938e+00 L10_fnorm:5.5938e+00 L11_fnorm:5.5938e+00 L12_fnorm:5.5625e+00 L1_l1linf:1.4297e+00 L2_l1linf:1.3047e+00 L3_l1linf:1.3047e+00 L4_l1linf:1.3047e+00 L5_l1linf:1.3203e+00 L6_l1linf:1.3203e+00 L7_l1linf:1.3203e+00 L8_l1linf:1.2188e+00 L9_l1linf:1.1953e+00 L10_l1linf:1.1953e+00 L11_l1linf:1.2344e+00 L12_l1linf:1.2734e+00 L1_spectral:7.3594e-02 L2_spectral:7.1422e-02 L3_spectral:7.2380e-02 L4_spectral:7.2270e-02 L5_spectral:7.2695e-02 L6_spectral:7.2235e-02 L7_spectral:7.2717e-02 L8_spectral:7.0342e-02 L9_spectral:7.2659e-02 L10_spectral:7.2305e-02 L11_spectral:7.2740e-02 L12_spectral:7.2231e-02 train_time:267527ms step_avg:41.80ms +[2025-09-11 10:41:12] [Rank 0] step:6401/10000 train_time:268800ms step_avg:41.99ms +[2025-09-11 10:41:12] [Rank 0] step:6401/10000 train_time:268800ms step_avg:41.99ms +[2025-09-11 10:41:13] [Rank 0] step:6421/10000 train_time:269518ms step_avg:41.97ms +[2025-09-11 10:41:13] [Rank 0] step:6421/10000 train_time:269518ms step_avg:41.97ms +[2025-09-11 10:41:14] [Rank 0] step:6441/10000 train_time:270206ms step_avg:41.95ms +[2025-09-11 10:41:14] [Rank 0] step:6441/10000 train_time:270206ms step_avg:41.95ms +[2025-09-11 10:41:14] [Rank 0] step:6461/10000 train_time:270894ms step_avg:41.93ms +[2025-09-11 10:41:14] [Rank 0] step:6461/10000 train_time:270894ms step_avg:41.93ms +[2025-09-11 10:41:15] [Rank 0] step:6481/10000 train_time:271585ms step_avg:41.90ms +[2025-09-11 10:41:15] [Rank 0] step:6481/10000 train_time:271585ms step_avg:41.90ms +[2025-09-11 10:41:16] [Rank 0] step:6501/10000 train_time:272276ms step_avg:41.88ms +[2025-09-11 10:41:16] [Rank 0] step:6501/10000 train_time:272276ms step_avg:41.88ms +[2025-09-11 10:41:16] [Rank 0] step:6521/10000 train_time:272965ms step_avg:41.86ms +[2025-09-11 10:41:16] [Rank 0] step:6521/10000 train_time:272965ms step_avg:41.86ms +[2025-09-11 10:41:17] [Rank 0] step:6541/10000 train_time:273652ms step_avg:41.84ms +[2025-09-11 10:41:17] [Rank 0] step:6541/10000 train_time:273652ms step_avg:41.84ms +[2025-09-11 10:41:18] [Rank 0] step:6561/10000 train_time:274340ms step_avg:41.81ms +[2025-09-11 10:41:18] [Rank 0] step:6561/10000 train_time:274340ms step_avg:41.81ms +[2025-09-11 10:41:18] [Rank 0] step:6581/10000 train_time:275030ms step_avg:41.79ms +[2025-09-11 10:41:18] [Rank 0] step:6581/10000 train_time:275030ms step_avg:41.79ms +[2025-09-11 10:41:19] [Rank 0] step:6601/10000 train_time:275719ms step_avg:41.77ms +[2025-09-11 10:41:19] [Rank 0] step:6601/10000 train_time:275719ms step_avg:41.77ms +[2025-09-11 10:41:20] [Rank 0] step:6621/10000 train_time:276405ms step_avg:41.75ms +[2025-09-11 10:41:20] [Rank 0] step:6621/10000 train_time:276405ms step_avg:41.75ms +[2025-09-11 10:41:21] [Rank 0] step:6641/10000 train_time:277094ms step_avg:41.72ms +[2025-09-11 10:41:21] [Rank 0] step:6641/10000 train_time:277094ms step_avg:41.72ms +[2025-09-11 10:41:21] [Rank 0] step:6661/10000 train_time:277782ms step_avg:41.70ms +[2025-09-11 10:41:21] [Rank 0] step:6661/10000 train_time:277782ms step_avg:41.70ms +[2025-09-11 10:41:22] [Rank 0] step:6681/10000 train_time:278477ms step_avg:41.68ms +[2025-09-11 10:41:22] [Rank 0] step:6681/10000 train_time:278477ms step_avg:41.68ms +[2025-09-11 10:41:23] [Rank 0] step:6701/10000 train_time:279171ms step_avg:41.66ms +[2025-09-11 10:41:23] [Rank 0] step:6701/10000 train_time:279171ms step_avg:41.66ms +[2025-09-11 10:41:23] [Rank 0] step:6721/10000 train_time:279866ms step_avg:41.64ms +[2025-09-11 10:41:23] [Rank 0] step:6721/10000 train_time:279866ms step_avg:41.64ms +[2025-09-11 10:41:24] [Rank 0] step:6741/10000 train_time:280563ms step_avg:41.62ms +[2025-09-11 10:41:24] [Rank 0] step:6741/10000 train_time:280563ms step_avg:41.62ms +[2025-09-11 10:41:25] [Rank 0] step:6761/10000 train_time:281257ms step_avg:41.60ms +[2025-09-11 10:41:25] [Rank 0] step:6761/10000 train_time:281257ms step_avg:41.60ms +[2025-09-11 10:41:25] [Rank 0] step:6781/10000 train_time:281952ms step_avg:41.58ms +[2025-09-11 10:41:25] [Rank 0] step:6781/10000 train_time:281952ms step_avg:41.58ms +[2025-09-11 10:41:26] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:41:26] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:41:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:36] [Rank 0] PRINT: step:6800/10000 val_loss:4.5750 total_sharp:6.8272e-04 L1_sharp:2.3180e-04 L2_sharp:1.7637e-04 L3_sharp:3.0705e-05 L4_sharp:6.2965e-05 L5_sharp:6.0024e-05 L6_sharp:7.0088e-05 L7_sharp:5.8123e-05 L8_sharp:1.8311e-04 L9_sharp:1.4910e-04 L10_sharp:2.0092e-04 L11_sharp:2.9994e-04 L12_sharp:1.6259e-03 total_fnorm:2.0625e+01 total_l1_linf:5.1712e+04 total_spectral:1.0375e+01 L1_fnorm:5.1250e+00 L2_fnorm:4.8750e+00 L3_fnorm:4.9375e+00 L4_fnorm:4.9375e+00 L5_fnorm:4.9375e+00 L6_fnorm:4.9688e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.7188e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9062e+00 L1_l1linf:1.2344e+00 L2_l1linf:1.1094e+00 L3_l1linf:1.1094e+00 L4_l1linf:1.1172e+00 L5_l1linf:1.1406e+00 L6_l1linf:1.1562e+00 L7_l1linf:1.1406e+00 L8_l1linf:1.0469e+00 L9_l1linf:1.0078e+00 L10_l1linf:1.0078e+00 L11_l1linf:1.0312e+00 L12_l1linf:1.1328e+00 L1_spectral:6.6619e-02 L2_spectral:6.4409e-02 L3_spectral:6.5416e-02 L4_spectral:6.5474e-02 L5_spectral:6.5501e-02 L6_spectral:6.5870e-02 L7_spectral:6.5855e-02 L8_spectral:6.3732e-02 L9_spectral:6.5755e-02 L10_spectral:6.5536e-02 L11_spectral:6.5601e-02 L12_spectral:6.5781e-02 train_time:282626ms step_avg:41.56ms +[2025-09-11 10:41:36] [Rank 0] PRINT: step:6800/10000 val_loss:4.5750 total_sharp:6.8272e-04 L1_sharp:2.3180e-04 L2_sharp:1.7637e-04 L3_sharp:3.0705e-05 L4_sharp:6.2965e-05 L5_sharp:6.0024e-05 L6_sharp:7.0088e-05 L7_sharp:5.8123e-05 L8_sharp:1.8311e-04 L9_sharp:1.4910e-04 L10_sharp:2.0092e-04 L11_sharp:2.9994e-04 L12_sharp:1.6259e-03 total_fnorm:2.0625e+01 total_l1_linf:5.1712e+04 total_spectral:1.0375e+01 L1_fnorm:5.1250e+00 L2_fnorm:4.8750e+00 L3_fnorm:4.9375e+00 L4_fnorm:4.9375e+00 L5_fnorm:4.9375e+00 L6_fnorm:4.9688e+00 L7_fnorm:4.9375e+00 L8_fnorm:4.7188e+00 L9_fnorm:4.9062e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9062e+00 L1_l1linf:1.2344e+00 L2_l1linf:1.1094e+00 L3_l1linf:1.1094e+00 L4_l1linf:1.1172e+00 L5_l1linf:1.1406e+00 L6_l1linf:1.1562e+00 L7_l1linf:1.1406e+00 L8_l1linf:1.0469e+00 L9_l1linf:1.0078e+00 L10_l1linf:1.0078e+00 L11_l1linf:1.0312e+00 L12_l1linf:1.1328e+00 L1_spectral:6.6619e-02 L2_spectral:6.4409e-02 L3_spectral:6.5416e-02 L4_spectral:6.5474e-02 L5_spectral:6.5501e-02 L6_spectral:6.5870e-02 L7_spectral:6.5855e-02 L8_spectral:6.3732e-02 L9_spectral:6.5755e-02 L10_spectral:6.5536e-02 L11_spectral:6.5601e-02 L12_spectral:6.5781e-02 train_time:282626ms step_avg:41.56ms +[2025-09-11 10:41:38] [Rank 0] step:6801/10000 train_time:283960ms step_avg:41.75ms +[2025-09-11 10:41:38] [Rank 0] step:6801/10000 train_time:283960ms step_avg:41.75ms +[2025-09-11 10:41:38] [Rank 0] step:6821/10000 train_time:284960ms step_avg:41.78ms +[2025-09-11 10:41:38] [Rank 0] step:6821/10000 train_time:284960ms step_avg:41.78ms +[2025-09-11 10:41:39] [Rank 0] step:6841/10000 train_time:285659ms step_avg:41.76ms +[2025-09-11 10:41:39] [Rank 0] step:6841/10000 train_time:285659ms step_avg:41.76ms +[2025-09-11 10:41:40] [Rank 0] step:6861/10000 train_time:286356ms step_avg:41.74ms +[2025-09-11 10:41:40] [Rank 0] step:6861/10000 train_time:286356ms step_avg:41.74ms +[2025-09-11 10:41:41] [Rank 0] step:6881/10000 train_time:287053ms step_avg:41.72ms +[2025-09-11 10:41:41] [Rank 0] step:6881/10000 train_time:287053ms step_avg:41.72ms +[2025-09-11 10:41:41] [Rank 0] step:6901/10000 train_time:287747ms step_avg:41.70ms +[2025-09-11 10:41:41] [Rank 0] step:6901/10000 train_time:287747ms step_avg:41.70ms +[2025-09-11 10:41:42] [Rank 0] step:6921/10000 train_time:288441ms step_avg:41.68ms +[2025-09-11 10:41:42] [Rank 0] step:6921/10000 train_time:288441ms step_avg:41.68ms +[2025-09-11 10:41:43] [Rank 0] step:6941/10000 train_time:289138ms step_avg:41.66ms +[2025-09-11 10:41:43] [Rank 0] step:6941/10000 train_time:289138ms step_avg:41.66ms +[2025-09-11 10:41:43] [Rank 0] step:6961/10000 train_time:289833ms step_avg:41.64ms +[2025-09-11 10:41:43] [Rank 0] step:6961/10000 train_time:289833ms step_avg:41.64ms +[2025-09-11 10:41:44] [Rank 0] step:6981/10000 train_time:290531ms step_avg:41.62ms +[2025-09-11 10:41:44] [Rank 0] step:6981/10000 train_time:290531ms step_avg:41.62ms +[2025-09-11 10:41:45] [Rank 0] step:7001/10000 train_time:291227ms step_avg:41.60ms +[2025-09-11 10:41:45] [Rank 0] step:7001/10000 train_time:291227ms step_avg:41.60ms +[2025-09-11 10:41:45] [Rank 0] step:7021/10000 train_time:291922ms step_avg:41.58ms +[2025-09-11 10:41:45] [Rank 0] step:7021/10000 train_time:291922ms step_avg:41.58ms +[2025-09-11 10:41:46] [Rank 0] step:7041/10000 train_time:292617ms step_avg:41.56ms +[2025-09-11 10:41:46] [Rank 0] step:7041/10000 train_time:292617ms step_avg:41.56ms +[2025-09-11 10:41:47] [Rank 0] step:7061/10000 train_time:293314ms step_avg:41.54ms +[2025-09-11 10:41:47] [Rank 0] step:7061/10000 train_time:293314ms step_avg:41.54ms +[2025-09-11 10:41:47] [Rank 0] step:7081/10000 train_time:294009ms step_avg:41.52ms +[2025-09-11 10:41:47] [Rank 0] step:7081/10000 train_time:294009ms step_avg:41.52ms +[2025-09-11 10:41:48] [Rank 0] step:7101/10000 train_time:294704ms step_avg:41.50ms +[2025-09-11 10:41:48] [Rank 0] step:7101/10000 train_time:294704ms step_avg:41.50ms +[2025-09-11 10:41:49] [Rank 0] step:7121/10000 train_time:295401ms step_avg:41.48ms +[2025-09-11 10:41:49] [Rank 0] step:7121/10000 train_time:295401ms step_avg:41.48ms +[2025-09-11 10:41:50] [Rank 0] step:7141/10000 train_time:296096ms step_avg:41.46ms +[2025-09-11 10:41:50] [Rank 0] step:7141/10000 train_time:296096ms step_avg:41.46ms +[2025-09-11 10:41:50] [Rank 0] step:7161/10000 train_time:296794ms step_avg:41.45ms +[2025-09-11 10:41:50] [Rank 0] step:7161/10000 train_time:296794ms step_avg:41.45ms +[2025-09-11 10:41:51] [Rank 0] step:7181/10000 train_time:297488ms step_avg:41.43ms +[2025-09-11 10:41:51] [Rank 0] step:7181/10000 train_time:297488ms step_avg:41.43ms +[2025-09-11 10:41:52] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:42:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:02] [Rank 0] PRINT: step:7200/10000 val_loss:4.5365 total_sharp:5.6896e-04 L1_sharp:2.2309e-04 L2_sharp:5.8910e-05 L3_sharp:5.1245e-05 L4_sharp:6.0036e-05 L5_sharp:4.2988e-05 L6_sharp:9.9742e-05 L7_sharp:6.3196e-05 L8_sharp:1.6958e-04 L9_sharp:1.6218e-04 L10_sharp:1.8156e-04 L11_sharp:2.4862e-04 L12_sharp:9.6870e-04 total_fnorm:1.7875e+01 total_l1_linf:4.2240e+04 total_spectral:9.0000e+00 L1_fnorm:4.4688e+00 L2_fnorm:4.2812e+00 L3_fnorm:4.3125e+00 L4_fnorm:4.3125e+00 L5_fnorm:4.3125e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3125e+00 L8_fnorm:4.1250e+00 L9_fnorm:4.2812e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2500e+00 L12_fnorm:4.2500e+00 L1_l1linf:1.0391e+00 L2_l1linf:9.2969e-01 L3_l1linf:9.4922e-01 L4_l1linf:9.4922e-01 L5_l1linf:9.6484e-01 L6_l1linf:9.6484e-01 L7_l1linf:9.5703e-01 L8_l1linf:8.7500e-01 L9_l1linf:8.3984e-01 L10_l1linf:8.1641e-01 L11_l1linf:8.5938e-01 L12_l1linf:9.6094e-01 L1_spectral:5.9041e-02 L2_spectral:5.7419e-02 L3_spectral:5.8069e-02 L4_spectral:5.7947e-02 L5_spectral:5.8665e-02 L6_spectral:5.8140e-02 L7_spectral:5.8040e-02 L8_spectral:5.7092e-02 L9_spectral:5.8837e-02 L10_spectral:5.8440e-02 L11_spectral:5.8530e-02 L12_spectral:5.8640e-02 train_time:298164ms step_avg:41.41ms +[2025-09-11 10:42:02] [Rank 0] PRINT: step:7200/10000 val_loss:4.5365 total_sharp:5.6896e-04 L1_sharp:2.2309e-04 L2_sharp:5.8910e-05 L3_sharp:5.1245e-05 L4_sharp:6.0036e-05 L5_sharp:4.2988e-05 L6_sharp:9.9742e-05 L7_sharp:6.3196e-05 L8_sharp:1.6958e-04 L9_sharp:1.6218e-04 L10_sharp:1.8156e-04 L11_sharp:2.4862e-04 L12_sharp:9.6870e-04 total_fnorm:1.7875e+01 total_l1_linf:4.2240e+04 total_spectral:9.0000e+00 L1_fnorm:4.4688e+00 L2_fnorm:4.2812e+00 L3_fnorm:4.3125e+00 L4_fnorm:4.3125e+00 L5_fnorm:4.3125e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3125e+00 L8_fnorm:4.1250e+00 L9_fnorm:4.2812e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2500e+00 L12_fnorm:4.2500e+00 L1_l1linf:1.0391e+00 L2_l1linf:9.2969e-01 L3_l1linf:9.4922e-01 L4_l1linf:9.4922e-01 L5_l1linf:9.6484e-01 L6_l1linf:9.6484e-01 L7_l1linf:9.5703e-01 L8_l1linf:8.7500e-01 L9_l1linf:8.3984e-01 L10_l1linf:8.1641e-01 L11_l1linf:8.5938e-01 L12_l1linf:9.6094e-01 L1_spectral:5.9041e-02 L2_spectral:5.7419e-02 L3_spectral:5.8069e-02 L4_spectral:5.7947e-02 L5_spectral:5.8665e-02 L6_spectral:5.8140e-02 L7_spectral:5.8040e-02 L8_spectral:5.7092e-02 L9_spectral:5.8837e-02 L10_spectral:5.8440e-02 L11_spectral:5.8530e-02 L12_spectral:5.8640e-02 train_time:298164ms step_avg:41.41ms +[2025-09-11 10:42:03] [Rank 0] step:7201/10000 train_time:299456ms step_avg:41.59ms +[2025-09-11 10:42:03] [Rank 0] step:7201/10000 train_time:299456ms step_avg:41.59ms +[2025-09-11 10:42:04] [Rank 0] step:7221/10000 train_time:300201ms step_avg:41.57ms +[2025-09-11 10:42:04] [Rank 0] step:7221/10000 train_time:300201ms step_avg:41.57ms +[2025-09-11 10:42:05] [Rank 0] step:7241/10000 train_time:301199ms step_avg:41.60ms +[2025-09-11 10:42:05] [Rank 0] step:7241/10000 train_time:301199ms step_avg:41.60ms +[2025-09-11 10:42:05] [Rank 0] step:7261/10000 train_time:301897ms step_avg:41.58ms +[2025-09-11 10:42:05] [Rank 0] step:7261/10000 train_time:301897ms step_avg:41.58ms +[2025-09-11 10:42:06] [Rank 0] step:7281/10000 train_time:302599ms step_avg:41.56ms +[2025-09-11 10:42:06] [Rank 0] step:7281/10000 train_time:302599ms step_avg:41.56ms +[2025-09-11 10:42:07] [Rank 0] step:7301/10000 train_time:303294ms step_avg:41.54ms +[2025-09-11 10:42:07] [Rank 0] step:7301/10000 train_time:303294ms step_avg:41.54ms +[2025-09-11 10:42:07] [Rank 0] step:7321/10000 train_time:303990ms step_avg:41.52ms +[2025-09-11 10:42:07] [Rank 0] step:7321/10000 train_time:303990ms step_avg:41.52ms +[2025-09-11 10:42:08] [Rank 0] step:7341/10000 train_time:304687ms step_avg:41.50ms +[2025-09-11 10:42:08] [Rank 0] step:7341/10000 train_time:304687ms step_avg:41.50ms +[2025-09-11 10:42:09] [Rank 0] step:7361/10000 train_time:305383ms step_avg:41.49ms +[2025-09-11 10:42:09] [Rank 0] step:7361/10000 train_time:305383ms step_avg:41.49ms +[2025-09-11 10:42:10] [Rank 0] step:7381/10000 train_time:306080ms step_avg:41.47ms +[2025-09-11 10:42:10] [Rank 0] step:7381/10000 train_time:306080ms step_avg:41.47ms +[2025-09-11 10:42:10] [Rank 0] step:7401/10000 train_time:306775ms step_avg:41.45ms +[2025-09-11 10:42:10] [Rank 0] step:7401/10000 train_time:306775ms step_avg:41.45ms +[2025-09-11 10:42:11] [Rank 0] step:7421/10000 train_time:307471ms step_avg:41.43ms +[2025-09-11 10:42:11] [Rank 0] step:7421/10000 train_time:307471ms step_avg:41.43ms +[2025-09-11 10:42:12] [Rank 0] step:7441/10000 train_time:308168ms step_avg:41.41ms +[2025-09-11 10:42:12] [Rank 0] step:7441/10000 train_time:308168ms step_avg:41.41ms +[2025-09-11 10:42:12] [Rank 0] step:7461/10000 train_time:308864ms step_avg:41.40ms +[2025-09-11 10:42:12] [Rank 0] step:7461/10000 train_time:308864ms step_avg:41.40ms +[2025-09-11 10:42:13] [Rank 0] step:7481/10000 train_time:309564ms step_avg:41.38ms +[2025-09-11 10:42:13] [Rank 0] step:7481/10000 train_time:309564ms step_avg:41.38ms +[2025-09-11 10:42:14] [Rank 0] step:7501/10000 train_time:310262ms step_avg:41.36ms +[2025-09-11 10:42:14] [Rank 0] step:7501/10000 train_time:310262ms step_avg:41.36ms +[2025-09-11 10:42:14] [Rank 0] step:7521/10000 train_time:310959ms step_avg:41.35ms +[2025-09-11 10:42:14] [Rank 0] step:7521/10000 train_time:310959ms step_avg:41.35ms +[2025-09-11 10:42:15] [Rank 0] step:7541/10000 train_time:311654ms step_avg:41.33ms +[2025-09-11 10:42:15] [Rank 0] step:7541/10000 train_time:311654ms step_avg:41.33ms +[2025-09-11 10:42:16] [Rank 0] step:7561/10000 train_time:312353ms step_avg:41.31ms +[2025-09-11 10:42:16] [Rank 0] step:7561/10000 train_time:312353ms step_avg:41.31ms +[2025-09-11 10:42:17] [Rank 0] step:7581/10000 train_time:313051ms step_avg:41.29ms +[2025-09-11 10:42:17] [Rank 0] step:7581/10000 train_time:313051ms step_avg:41.29ms +[2025-09-11 10:42:17] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:42:17] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:27] [Rank 0] PRINT: step:7600/10000 val_loss:4.5023 total_sharp:5.2261e-04 L1_sharp:2.9638e-04 L2_sharp:5.5073e-05 L3_sharp:7.6940e-05 L4_sharp:6.4934e-05 L5_sharp:3.2772e-05 L6_sharp:6.2272e-05 L7_sharp:4.8136e-05 L8_sharp:1.4642e-04 L9_sharp:1.3217e-04 L10_sharp:1.8129e-04 L11_sharp:2.2283e-04 L12_sharp:7.3336e-04 total_fnorm:1.4750e+01 total_l1_linf:3.2512e+04 total_spectral:7.4062e+00 L1_fnorm:3.7812e+00 L2_fnorm:3.6250e+00 L3_fnorm:3.6406e+00 L4_fnorm:3.6406e+00 L5_fnorm:3.6406e+00 L6_fnorm:3.6406e+00 L7_fnorm:3.6250e+00 L8_fnorm:3.4531e+00 L9_fnorm:3.6094e+00 L10_fnorm:3.5781e+00 L11_fnorm:3.5938e+00 L12_fnorm:3.5938e+00 L1_l1linf:8.2812e-01 L2_l1linf:7.5781e-01 L3_l1linf:7.9688e-01 L4_l1linf:7.6562e-01 L5_l1linf:7.8906e-01 L6_l1linf:7.8125e-01 L7_l1linf:7.5781e-01 L8_l1linf:6.9922e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.5625e-01 L11_l1linf:6.9141e-01 L12_l1linf:8.0078e-01 L1_spectral:5.1210e-02 L2_spectral:4.9441e-02 L3_spectral:5.0290e-02 L4_spectral:4.9716e-02 L5_spectral:5.0326e-02 L6_spectral:5.0483e-02 L7_spectral:5.0220e-02 L8_spectral:4.8566e-02 L9_spectral:5.0543e-02 L10_spectral:5.0597e-02 L11_spectral:5.0617e-02 L12_spectral:5.0377e-02 train_time:313729ms step_avg:41.28ms +[2025-09-11 10:42:27] [Rank 0] PRINT: step:7600/10000 val_loss:4.5023 total_sharp:5.2261e-04 L1_sharp:2.9638e-04 L2_sharp:5.5073e-05 L3_sharp:7.6940e-05 L4_sharp:6.4934e-05 L5_sharp:3.2772e-05 L6_sharp:6.2272e-05 L7_sharp:4.8136e-05 L8_sharp:1.4642e-04 L9_sharp:1.3217e-04 L10_sharp:1.8129e-04 L11_sharp:2.2283e-04 L12_sharp:7.3336e-04 total_fnorm:1.4750e+01 total_l1_linf:3.2512e+04 total_spectral:7.4062e+00 L1_fnorm:3.7812e+00 L2_fnorm:3.6250e+00 L3_fnorm:3.6406e+00 L4_fnorm:3.6406e+00 L5_fnorm:3.6406e+00 L6_fnorm:3.6406e+00 L7_fnorm:3.6250e+00 L8_fnorm:3.4531e+00 L9_fnorm:3.6094e+00 L10_fnorm:3.5781e+00 L11_fnorm:3.5938e+00 L12_fnorm:3.5938e+00 L1_l1linf:8.2812e-01 L2_l1linf:7.5781e-01 L3_l1linf:7.9688e-01 L4_l1linf:7.6562e-01 L5_l1linf:7.8906e-01 L6_l1linf:7.8125e-01 L7_l1linf:7.5781e-01 L8_l1linf:6.9922e-01 L9_l1linf:6.7188e-01 L10_l1linf:6.5625e-01 L11_l1linf:6.9141e-01 L12_l1linf:8.0078e-01 L1_spectral:5.1210e-02 L2_spectral:4.9441e-02 L3_spectral:5.0290e-02 L4_spectral:4.9716e-02 L5_spectral:5.0326e-02 L6_spectral:5.0483e-02 L7_spectral:5.0220e-02 L8_spectral:4.8566e-02 L9_spectral:5.0543e-02 L10_spectral:5.0597e-02 L11_spectral:5.0617e-02 L12_spectral:5.0377e-02 train_time:313729ms step_avg:41.28ms +[2025-09-11 10:42:28] [Rank 0] step:7601/10000 train_time:315005ms step_avg:41.44ms +[2025-09-11 10:42:28] [Rank 0] step:7601/10000 train_time:315005ms step_avg:41.44ms +[2025-09-11 10:42:29] [Rank 0] step:7621/10000 train_time:315739ms step_avg:41.43ms +[2025-09-11 10:42:29] [Rank 0] step:7621/10000 train_time:315739ms step_avg:41.43ms +[2025-09-11 10:42:30] [Rank 0] step:7641/10000 train_time:316438ms step_avg:41.41ms +[2025-09-11 10:42:30] [Rank 0] step:7641/10000 train_time:316438ms step_avg:41.41ms +[2025-09-11 10:42:31] [Rank 0] step:7661/10000 train_time:317135ms step_avg:41.40ms +[2025-09-11 10:42:31] [Rank 0] step:7661/10000 train_time:317135ms step_avg:41.40ms +[2025-09-11 10:42:31] [Rank 0] step:7681/10000 train_time:317832ms step_avg:41.38ms +[2025-09-11 10:42:31] [Rank 0] step:7681/10000 train_time:317832ms step_avg:41.38ms +[2025-09-11 10:42:32] [Rank 0] step:7701/10000 train_time:318532ms step_avg:41.36ms +[2025-09-11 10:42:32] [Rank 0] step:7701/10000 train_time:318532ms step_avg:41.36ms +[2025-09-11 10:42:33] [Rank 0] step:7721/10000 train_time:319230ms step_avg:41.35ms +[2025-09-11 10:42:33] [Rank 0] step:7721/10000 train_time:319230ms step_avg:41.35ms +[2025-09-11 10:42:33] [Rank 0] step:7741/10000 train_time:319928ms step_avg:41.33ms +[2025-09-11 10:42:33] [Rank 0] step:7741/10000 train_time:319928ms step_avg:41.33ms +[2025-09-11 10:42:34] [Rank 0] step:7761/10000 train_time:320626ms step_avg:41.31ms +[2025-09-11 10:42:34] [Rank 0] step:7761/10000 train_time:320626ms step_avg:41.31ms +[2025-09-11 10:42:35] [Rank 0] step:7781/10000 train_time:321326ms step_avg:41.30ms +[2025-09-11 10:42:35] [Rank 0] step:7781/10000 train_time:321326ms step_avg:41.30ms +[2025-09-11 10:42:36] [Rank 0] step:7801/10000 train_time:322023ms step_avg:41.28ms +[2025-09-11 10:42:36] [Rank 0] step:7801/10000 train_time:322023ms step_avg:41.28ms +[2025-09-11 10:42:36] [Rank 0] step:7821/10000 train_time:322721ms step_avg:41.26ms +[2025-09-11 10:42:36] [Rank 0] step:7821/10000 train_time:322721ms step_avg:41.26ms +[2025-09-11 10:42:37] [Rank 0] step:7841/10000 train_time:323420ms step_avg:41.25ms +[2025-09-11 10:42:37] [Rank 0] step:7841/10000 train_time:323420ms step_avg:41.25ms +[2025-09-11 10:42:38] [Rank 0] step:7861/10000 train_time:324121ms step_avg:41.23ms +[2025-09-11 10:42:38] [Rank 0] step:7861/10000 train_time:324121ms step_avg:41.23ms +[2025-09-11 10:42:38] [Rank 0] step:7881/10000 train_time:324819ms step_avg:41.22ms +[2025-09-11 10:42:38] [Rank 0] step:7881/10000 train_time:324819ms step_avg:41.22ms +[2025-09-11 10:42:39] [Rank 0] step:7901/10000 train_time:325518ms step_avg:41.20ms +[2025-09-11 10:42:39] [Rank 0] step:7901/10000 train_time:325518ms step_avg:41.20ms +[2025-09-11 10:42:40] [Rank 0] step:7921/10000 train_time:326217ms step_avg:41.18ms +[2025-09-11 10:42:40] [Rank 0] step:7921/10000 train_time:326217ms step_avg:41.18ms +[2025-09-11 10:42:40] [Rank 0] step:7941/10000 train_time:326916ms step_avg:41.17ms +[2025-09-11 10:42:40] [Rank 0] step:7941/10000 train_time:326916ms step_avg:41.17ms +[2025-09-11 10:42:41] [Rank 0] step:7961/10000 train_time:327613ms step_avg:41.15ms +[2025-09-11 10:42:41] [Rank 0] step:7961/10000 train_time:327613ms step_avg:41.15ms +[2025-09-11 10:42:42] [Rank 0] step:7981/10000 train_time:328314ms step_avg:41.14ms +[2025-09-11 10:42:42] [Rank 0] step:7981/10000 train_time:328314ms step_avg:41.14ms +[2025-09-11 10:42:42] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:42:42] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:52] [Rank 0] PRINT: step:8000/10000 val_loss:4.4790 total_sharp:5.5405e-04 L1_sharp:3.2137e-04 L2_sharp:1.0810e-04 L3_sharp:6.4411e-05 L4_sharp:7.5195e-05 L5_sharp:4.4695e-05 L6_sharp:9.7434e-05 L7_sharp:5.2604e-05 L8_sharp:1.4940e-04 L9_sharp:1.4277e-04 L10_sharp:1.7359e-04 L11_sharp:2.4308e-04 L12_sharp:1.0574e-03 total_fnorm:1.2062e+01 total_l1_linf:2.4832e+04 total_spectral:6.0625e+00 L1_fnorm:3.1250e+00 L2_fnorm:2.9531e+00 L3_fnorm:2.9531e+00 L4_fnorm:2.9531e+00 L5_fnorm:2.9531e+00 L6_fnorm:2.9531e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8125e+00 L9_fnorm:2.9219e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9062e+00 L12_fnorm:2.9219e+00 L1_l1linf:6.4453e-01 L2_l1linf:5.8203e-01 L3_l1linf:6.0938e-01 L4_l1linf:5.8594e-01 L5_l1linf:6.2500e-01 L6_l1linf:5.9766e-01 L7_l1linf:5.9766e-01 L8_l1linf:5.3906e-01 L9_l1linf:5.2344e-01 L10_l1linf:5.0391e-01 L11_l1linf:5.5078e-01 L12_l1linf:6.2500e-01 L1_spectral:4.2737e-02 L2_spectral:4.1020e-02 L3_spectral:4.1032e-02 L4_spectral:4.1230e-02 L5_spectral:4.1483e-02 L6_spectral:4.2009e-02 L7_spectral:4.2096e-02 L8_spectral:4.0984e-02 L9_spectral:4.2083e-02 L10_spectral:4.2068e-02 L11_spectral:4.2092e-02 L12_spectral:4.2133e-02 train_time:328991ms step_avg:41.12ms +[2025-09-11 10:42:52] [Rank 0] PRINT: step:8000/10000 val_loss:4.4790 total_sharp:5.5405e-04 L1_sharp:3.2137e-04 L2_sharp:1.0810e-04 L3_sharp:6.4411e-05 L4_sharp:7.5195e-05 L5_sharp:4.4695e-05 L6_sharp:9.7434e-05 L7_sharp:5.2604e-05 L8_sharp:1.4940e-04 L9_sharp:1.4277e-04 L10_sharp:1.7359e-04 L11_sharp:2.4308e-04 L12_sharp:1.0574e-03 total_fnorm:1.2062e+01 total_l1_linf:2.4832e+04 total_spectral:6.0625e+00 L1_fnorm:3.1250e+00 L2_fnorm:2.9531e+00 L3_fnorm:2.9531e+00 L4_fnorm:2.9531e+00 L5_fnorm:2.9531e+00 L6_fnorm:2.9531e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8125e+00 L9_fnorm:2.9219e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9062e+00 L12_fnorm:2.9219e+00 L1_l1linf:6.4453e-01 L2_l1linf:5.8203e-01 L3_l1linf:6.0938e-01 L4_l1linf:5.8594e-01 L5_l1linf:6.2500e-01 L6_l1linf:5.9766e-01 L7_l1linf:5.9766e-01 L8_l1linf:5.3906e-01 L9_l1linf:5.2344e-01 L10_l1linf:5.0391e-01 L11_l1linf:5.5078e-01 L12_l1linf:6.2500e-01 L1_spectral:4.2737e-02 L2_spectral:4.1020e-02 L3_spectral:4.1032e-02 L4_spectral:4.1230e-02 L5_spectral:4.1483e-02 L6_spectral:4.2009e-02 L7_spectral:4.2096e-02 L8_spectral:4.0984e-02 L9_spectral:4.2083e-02 L10_spectral:4.2068e-02 L11_spectral:4.2092e-02 L12_spectral:4.2133e-02 train_time:328991ms step_avg:41.12ms +[2025-09-11 10:42:54] [Rank 0] step:8001/10000 train_time:330279ms step_avg:41.28ms +[2025-09-11 10:42:54] [Rank 0] step:8001/10000 train_time:330279ms step_avg:41.28ms +[2025-09-11 10:42:54] [Rank 0] step:8021/10000 train_time:330990ms step_avg:41.27ms +[2025-09-11 10:42:54] [Rank 0] step:8021/10000 train_time:330990ms step_avg:41.27ms +[2025-09-11 10:42:55] [Rank 0] step:8041/10000 train_time:331690ms step_avg:41.25ms +[2025-09-11 10:42:55] [Rank 0] step:8041/10000 train_time:331690ms step_avg:41.25ms +[2025-09-11 10:42:56] [Rank 0] step:8061/10000 train_time:332391ms step_avg:41.23ms +[2025-09-11 10:42:56] [Rank 0] step:8061/10000 train_time:332391ms step_avg:41.23ms +[2025-09-11 10:42:57] [Rank 0] step:8081/10000 train_time:333088ms step_avg:41.22ms +[2025-09-11 10:42:57] [Rank 0] step:8081/10000 train_time:333088ms step_avg:41.22ms +[2025-09-11 10:42:57] [Rank 0] step:8101/10000 train_time:333784ms step_avg:41.20ms +[2025-09-11 10:42:57] [Rank 0] step:8101/10000 train_time:333784ms step_avg:41.20ms +[2025-09-11 10:42:58] [Rank 0] step:8121/10000 train_time:334486ms step_avg:41.19ms +[2025-09-11 10:42:58] [Rank 0] step:8121/10000 train_time:334486ms step_avg:41.19ms +[2025-09-11 10:42:59] [Rank 0] step:8141/10000 train_time:335927ms step_avg:41.26ms +[2025-09-11 10:42:59] [Rank 0] step:8141/10000 train_time:335927ms step_avg:41.26ms +[2025-09-11 10:43:00] [Rank 0] step:8161/10000 train_time:336628ms step_avg:41.25ms +[2025-09-11 10:43:00] [Rank 0] step:8161/10000 train_time:336628ms step_avg:41.25ms +[2025-09-11 10:43:01] [Rank 0] step:8181/10000 train_time:337338ms step_avg:41.23ms +[2025-09-11 10:43:01] [Rank 0] step:8181/10000 train_time:337338ms step_avg:41.23ms +[2025-09-11 10:43:02] [Rank 0] step:8201/10000 train_time:338045ms step_avg:41.22ms +[2025-09-11 10:43:02] [Rank 0] step:8201/10000 train_time:338045ms step_avg:41.22ms +[2025-09-11 10:43:02] [Rank 0] step:8221/10000 train_time:338750ms step_avg:41.21ms +[2025-09-11 10:43:02] [Rank 0] step:8221/10000 train_time:338750ms step_avg:41.21ms +[2025-09-11 10:43:03] [Rank 0] step:8241/10000 train_time:339465ms step_avg:41.19ms +[2025-09-11 10:43:03] [Rank 0] step:8241/10000 train_time:339465ms step_avg:41.19ms +[2025-09-11 10:43:04] [Rank 0] step:8261/10000 train_time:340169ms step_avg:41.18ms +[2025-09-11 10:43:04] [Rank 0] step:8261/10000 train_time:340169ms step_avg:41.18ms +[2025-09-11 10:43:04] [Rank 0] step:8281/10000 train_time:340871ms step_avg:41.16ms +[2025-09-11 10:43:04] [Rank 0] step:8281/10000 train_time:340871ms step_avg:41.16ms +[2025-09-11 10:43:05] [Rank 0] step:8301/10000 train_time:341982ms step_avg:41.20ms +[2025-09-11 10:43:05] [Rank 0] step:8301/10000 train_time:341982ms step_avg:41.20ms +[2025-09-11 10:43:06] [Rank 0] step:8321/10000 train_time:342792ms step_avg:41.20ms +[2025-09-11 10:43:06] [Rank 0] step:8321/10000 train_time:342792ms step_avg:41.20ms +[2025-09-11 10:43:07] [Rank 0] step:8341/10000 train_time:343505ms step_avg:41.18ms +[2025-09-11 10:43:07] [Rank 0] step:8341/10000 train_time:343505ms step_avg:41.18ms +[2025-09-11 10:43:08] [Rank 0] step:8361/10000 train_time:344458ms step_avg:41.20ms +[2025-09-11 10:43:08] [Rank 0] step:8361/10000 train_time:344458ms step_avg:41.20ms +[2025-09-11 10:43:09] [Rank 0] step:8381/10000 train_time:345166ms step_avg:41.18ms +[2025-09-11 10:43:09] [Rank 0] step:8381/10000 train_time:345166ms step_avg:41.18ms +[2025-09-11 10:43:09] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:19] [Rank 0] PRINT: step:8400/10000 val_loss:4.4556 total_sharp:4.7021e-04 L1_sharp:2.1658e-04 L2_sharp:4.2862e-05 L3_sharp:5.6493e-05 L4_sharp:2.1814e-05 L5_sharp:2.2505e-05 L6_sharp:3.8983e-05 L7_sharp:2.9557e-05 L8_sharp:1.2701e-04 L9_sharp:1.1041e-04 L10_sharp:1.5437e-04 L11_sharp:1.9963e-04 L12_sharp:7.8117e-04 total_fnorm:9.2500e+00 total_l1_linf:1.7152e+04 total_spectral:4.5625e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.3125e+00 L3_fnorm:2.3125e+00 L4_fnorm:2.3125e+00 L5_fnorm:2.2969e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2969e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2656e+00 L1_l1linf:4.7461e-01 L2_l1linf:4.5117e-01 L3_l1linf:4.4336e-01 L4_l1linf:4.4141e-01 L5_l1linf:4.5508e-01 L6_l1linf:4.4336e-01 L7_l1linf:4.3164e-01 L8_l1linf:4.0234e-01 L9_l1linf:3.7695e-01 L10_l1linf:3.6719e-01 L11_l1linf:3.9453e-01 L12_l1linf:4.8242e-01 L1_spectral:3.4660e-02 L2_spectral:3.2379e-02 L3_spectral:3.2418e-02 L4_spectral:3.2772e-02 L5_spectral:3.3153e-02 L6_spectral:3.3169e-02 L7_spectral:3.3353e-02 L8_spectral:3.3129e-02 L9_spectral:3.3583e-02 L10_spectral:3.3346e-02 L11_spectral:3.3325e-02 L12_spectral:3.3410e-02 train_time:345852ms step_avg:41.17ms +[2025-09-11 10:43:19] [Rank 0] PRINT: step:8400/10000 val_loss:4.4556 total_sharp:4.7021e-04 L1_sharp:2.1658e-04 L2_sharp:4.2862e-05 L3_sharp:5.6493e-05 L4_sharp:2.1814e-05 L5_sharp:2.2505e-05 L6_sharp:3.8983e-05 L7_sharp:2.9557e-05 L8_sharp:1.2701e-04 L9_sharp:1.1041e-04 L10_sharp:1.5437e-04 L11_sharp:1.9963e-04 L12_sharp:7.8117e-04 total_fnorm:9.2500e+00 total_l1_linf:1.7152e+04 total_spectral:4.5625e+00 L1_fnorm:2.4375e+00 L2_fnorm:2.3125e+00 L3_fnorm:2.3125e+00 L4_fnorm:2.3125e+00 L5_fnorm:2.2969e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2969e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2656e+00 L1_l1linf:4.7461e-01 L2_l1linf:4.5117e-01 L3_l1linf:4.4336e-01 L4_l1linf:4.4141e-01 L5_l1linf:4.5508e-01 L6_l1linf:4.4336e-01 L7_l1linf:4.3164e-01 L8_l1linf:4.0234e-01 L9_l1linf:3.7695e-01 L10_l1linf:3.6719e-01 L11_l1linf:3.9453e-01 L12_l1linf:4.8242e-01 L1_spectral:3.4660e-02 L2_spectral:3.2379e-02 L3_spectral:3.2418e-02 L4_spectral:3.2772e-02 L5_spectral:3.3153e-02 L6_spectral:3.3169e-02 L7_spectral:3.3353e-02 L8_spectral:3.3129e-02 L9_spectral:3.3583e-02 L10_spectral:3.3346e-02 L11_spectral:3.3325e-02 L12_spectral:3.3410e-02 train_time:345852ms step_avg:41.17ms +[2025-09-11 10:43:21] [Rank 0] step:8401/10000 train_time:347145ms step_avg:41.32ms +[2025-09-11 10:43:21] [Rank 0] step:8401/10000 train_time:347145ms step_avg:41.32ms +[2025-09-11 10:43:21] [Rank 0] step:8421/10000 train_time:347876ms step_avg:41.31ms +[2025-09-11 10:43:21] [Rank 0] step:8421/10000 train_time:347876ms step_avg:41.31ms +[2025-09-11 10:43:22] [Rank 0] step:8441/10000 train_time:348584ms step_avg:41.30ms +[2025-09-11 10:43:22] [Rank 0] step:8441/10000 train_time:348584ms step_avg:41.30ms +[2025-09-11 10:43:23] [Rank 0] step:8461/10000 train_time:349290ms step_avg:41.28ms +[2025-09-11 10:43:23] [Rank 0] step:8461/10000 train_time:349290ms step_avg:41.28ms +[2025-09-11 10:43:23] [Rank 0] step:8481/10000 train_time:349997ms step_avg:41.27ms +[2025-09-11 10:43:23] [Rank 0] step:8481/10000 train_time:349997ms step_avg:41.27ms +[2025-09-11 10:43:24] [Rank 0] step:8501/10000 train_time:350702ms step_avg:41.25ms +[2025-09-11 10:43:24] [Rank 0] step:8501/10000 train_time:350702ms step_avg:41.25ms +[2025-09-11 10:43:25] [Rank 0] step:8521/10000 train_time:351406ms step_avg:41.24ms +[2025-09-11 10:43:25] [Rank 0] step:8521/10000 train_time:351406ms step_avg:41.24ms +[2025-09-11 10:43:26] [Rank 0] step:8541/10000 train_time:352110ms step_avg:41.23ms +[2025-09-11 10:43:26] [Rank 0] step:8541/10000 train_time:352110ms step_avg:41.23ms +[2025-09-11 10:43:26] [Rank 0] step:8561/10000 train_time:352819ms step_avg:41.21ms +[2025-09-11 10:43:26] [Rank 0] step:8561/10000 train_time:352819ms step_avg:41.21ms +[2025-09-11 10:43:27] [Rank 0] step:8581/10000 train_time:353527ms step_avg:41.20ms +[2025-09-11 10:43:27] [Rank 0] step:8581/10000 train_time:353527ms step_avg:41.20ms +[2025-09-11 10:43:28] [Rank 0] step:8601/10000 train_time:354233ms step_avg:41.19ms +[2025-09-11 10:43:28] [Rank 0] step:8601/10000 train_time:354233ms step_avg:41.19ms +[2025-09-11 10:43:28] [Rank 0] step:8621/10000 train_time:354937ms step_avg:41.17ms +[2025-09-11 10:43:28] [Rank 0] step:8621/10000 train_time:354937ms step_avg:41.17ms +[2025-09-11 10:43:29] [Rank 0] step:8641/10000 train_time:355641ms step_avg:41.16ms +[2025-09-11 10:43:29] [Rank 0] step:8641/10000 train_time:355641ms step_avg:41.16ms +[2025-09-11 10:43:30] [Rank 0] step:8661/10000 train_time:356346ms step_avg:41.14ms +[2025-09-11 10:43:30] [Rank 0] step:8661/10000 train_time:356346ms step_avg:41.14ms +[2025-09-11 10:43:31] [Rank 0] step:8681/10000 train_time:357052ms step_avg:41.13ms +[2025-09-11 10:43:31] [Rank 0] step:8681/10000 train_time:357052ms step_avg:41.13ms +[2025-09-11 10:43:31] [Rank 0] step:8701/10000 train_time:357756ms step_avg:41.12ms +[2025-09-11 10:43:31] [Rank 0] step:8701/10000 train_time:357756ms step_avg:41.12ms +[2025-09-11 10:43:32] [Rank 0] step:8721/10000 train_time:358463ms step_avg:41.10ms +[2025-09-11 10:43:32] [Rank 0] step:8721/10000 train_time:358463ms step_avg:41.10ms +[2025-09-11 10:43:33] [Rank 0] step:8741/10000 train_time:359165ms step_avg:41.09ms +[2025-09-11 10:43:33] [Rank 0] step:8741/10000 train_time:359165ms step_avg:41.09ms +[2025-09-11 10:43:33] [Rank 0] step:8761/10000 train_time:359873ms step_avg:41.08ms +[2025-09-11 10:43:33] [Rank 0] step:8761/10000 train_time:359873ms step_avg:41.08ms +[2025-09-11 10:43:34] [Rank 0] step:8781/10000 train_time:360575ms step_avg:41.06ms +[2025-09-11 10:43:34] [Rank 0] step:8781/10000 train_time:360575ms step_avg:41.06ms +[2025-09-11 10:43:35] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:43:35] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:45] [Rank 0] PRINT: step:8800/10000 val_loss:4.4427 total_sharp:4.5097e-04 L1_sharp:3.0166e-04 L2_sharp:6.4525e-05 L3_sharp:5.6950e-05 L4_sharp:4.2130e-05 L5_sharp:2.5491e-05 L6_sharp:5.0899e-05 L7_sharp:3.9415e-05 L8_sharp:1.0462e-04 L9_sharp:1.0961e-04 L10_sharp:1.3847e-04 L11_sharp:2.0935e-04 L12_sharp:1.0166e-03 total_fnorm:6.6875e+00 total_l1_linf:1.1264e+04 total_spectral:3.3438e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.6875e+00 L4_fnorm:1.6875e+00 L5_fnorm:1.6719e+00 L6_fnorm:1.6797e+00 L7_fnorm:1.6641e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6406e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6328e+00 L12_fnorm:1.6562e+00 L1_l1linf:3.2812e-01 L2_l1linf:2.8711e-01 L3_l1linf:2.9492e-01 L4_l1linf:2.8906e-01 L5_l1linf:2.9688e-01 L6_l1linf:2.9102e-01 L7_l1linf:2.8711e-01 L8_l1linf:2.6367e-01 L9_l1linf:2.5977e-01 L10_l1linf:2.4316e-01 L11_l1linf:2.5781e-01 L12_l1linf:3.4961e-01 L1_spectral:2.6250e-02 L2_spectral:2.4267e-02 L3_spectral:2.4279e-02 L4_spectral:2.4595e-02 L5_spectral:2.4582e-02 L6_spectral:2.4784e-02 L7_spectral:2.4673e-02 L8_spectral:2.4937e-02 L9_spectral:2.4911e-02 L10_spectral:2.4941e-02 L11_spectral:2.4807e-02 L12_spectral:2.4977e-02 train_time:361258ms step_avg:41.05ms +[2025-09-11 10:43:45] [Rank 0] PRINT: step:8800/10000 val_loss:4.4427 total_sharp:4.5097e-04 L1_sharp:3.0166e-04 L2_sharp:6.4525e-05 L3_sharp:5.6950e-05 L4_sharp:4.2130e-05 L5_sharp:2.5491e-05 L6_sharp:5.0899e-05 L7_sharp:3.9415e-05 L8_sharp:1.0462e-04 L9_sharp:1.0961e-04 L10_sharp:1.3847e-04 L11_sharp:2.0935e-04 L12_sharp:1.0166e-03 total_fnorm:6.6875e+00 total_l1_linf:1.1264e+04 total_spectral:3.3438e+00 L1_fnorm:1.7969e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.6875e+00 L4_fnorm:1.6875e+00 L5_fnorm:1.6719e+00 L6_fnorm:1.6797e+00 L7_fnorm:1.6641e+00 L8_fnorm:1.6094e+00 L9_fnorm:1.6406e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6328e+00 L12_fnorm:1.6562e+00 L1_l1linf:3.2812e-01 L2_l1linf:2.8711e-01 L3_l1linf:2.9492e-01 L4_l1linf:2.8906e-01 L5_l1linf:2.9688e-01 L6_l1linf:2.9102e-01 L7_l1linf:2.8711e-01 L8_l1linf:2.6367e-01 L9_l1linf:2.5977e-01 L10_l1linf:2.4316e-01 L11_l1linf:2.5781e-01 L12_l1linf:3.4961e-01 L1_spectral:2.6250e-02 L2_spectral:2.4267e-02 L3_spectral:2.4279e-02 L4_spectral:2.4595e-02 L5_spectral:2.4582e-02 L6_spectral:2.4784e-02 L7_spectral:2.4673e-02 L8_spectral:2.4937e-02 L9_spectral:2.4911e-02 L10_spectral:2.4941e-02 L11_spectral:2.4807e-02 L12_spectral:2.4977e-02 train_time:361258ms step_avg:41.05ms +[2025-09-11 10:43:46] [Rank 0] step:8801/10000 train_time:362530ms step_avg:41.19ms +[2025-09-11 10:43:46] [Rank 0] step:8801/10000 train_time:362530ms step_avg:41.19ms +[2025-09-11 10:43:47] [Rank 0] step:8821/10000 train_time:363284ms step_avg:41.18ms +[2025-09-11 10:43:47] [Rank 0] step:8821/10000 train_time:363284ms step_avg:41.18ms +[2025-09-11 10:43:48] [Rank 0] step:8841/10000 train_time:363990ms step_avg:41.17ms +[2025-09-11 10:43:48] [Rank 0] step:8841/10000 train_time:363990ms step_avg:41.17ms +[2025-09-11 10:43:48] [Rank 0] step:8861/10000 train_time:364696ms step_avg:41.16ms +[2025-09-11 10:43:48] [Rank 0] step:8861/10000 train_time:364696ms step_avg:41.16ms +[2025-09-11 10:43:49] [Rank 0] step:8881/10000 train_time:365401ms step_avg:41.14ms +[2025-09-11 10:43:49] [Rank 0] step:8881/10000 train_time:365401ms step_avg:41.14ms +[2025-09-11 10:43:50] [Rank 0] step:8901/10000 train_time:366108ms step_avg:41.13ms +[2025-09-11 10:43:50] [Rank 0] step:8901/10000 train_time:366108ms step_avg:41.13ms +[2025-09-11 10:43:51] [Rank 0] step:8921/10000 train_time:366810ms step_avg:41.12ms +[2025-09-11 10:43:51] [Rank 0] step:8921/10000 train_time:366810ms step_avg:41.12ms +[2025-09-11 10:43:51] [Rank 0] step:8941/10000 train_time:367518ms step_avg:41.10ms +[2025-09-11 10:43:51] [Rank 0] step:8941/10000 train_time:367518ms step_avg:41.10ms +[2025-09-11 10:43:52] [Rank 0] step:8961/10000 train_time:368231ms step_avg:41.09ms +[2025-09-11 10:43:52] [Rank 0] step:8961/10000 train_time:368231ms step_avg:41.09ms +[2025-09-11 10:43:53] [Rank 0] step:8981/10000 train_time:368941ms step_avg:41.08ms +[2025-09-11 10:43:53] [Rank 0] step:8981/10000 train_time:368941ms step_avg:41.08ms +[2025-09-11 10:43:53] [Rank 0] step:9001/10000 train_time:369641ms step_avg:41.07ms +[2025-09-11 10:43:53] [Rank 0] step:9001/10000 train_time:369641ms step_avg:41.07ms +[2025-09-11 10:43:54] [Rank 0] step:9021/10000 train_time:370347ms step_avg:41.05ms +[2025-09-11 10:43:54] [Rank 0] step:9021/10000 train_time:370347ms step_avg:41.05ms +[2025-09-11 10:43:55] [Rank 0] step:9041/10000 train_time:371056ms step_avg:41.04ms +[2025-09-11 10:43:55] [Rank 0] step:9041/10000 train_time:371056ms step_avg:41.04ms +[2025-09-11 10:43:55] [Rank 0] step:9061/10000 train_time:371760ms step_avg:41.03ms +[2025-09-11 10:43:55] [Rank 0] step:9061/10000 train_time:371760ms step_avg:41.03ms +[2025-09-11 10:43:56] [Rank 0] step:9081/10000 train_time:372467ms step_avg:41.02ms +[2025-09-11 10:43:56] [Rank 0] step:9081/10000 train_time:372467ms step_avg:41.02ms +[2025-09-11 10:43:57] [Rank 0] step:9101/10000 train_time:373176ms step_avg:41.00ms +[2025-09-11 10:43:57] [Rank 0] step:9101/10000 train_time:373176ms step_avg:41.00ms +[2025-09-11 10:43:58] [Rank 0] step:9121/10000 train_time:373886ms step_avg:40.99ms +[2025-09-11 10:43:58] [Rank 0] step:9121/10000 train_time:373886ms step_avg:40.99ms +[2025-09-11 10:43:58] [Rank 0] step:9141/10000 train_time:374590ms step_avg:40.98ms +[2025-09-11 10:43:58] [Rank 0] step:9141/10000 train_time:374590ms step_avg:40.98ms +[2025-09-11 10:43:59] [Rank 0] step:9161/10000 train_time:375298ms step_avg:40.97ms +[2025-09-11 10:43:59] [Rank 0] step:9161/10000 train_time:375298ms step_avg:40.97ms +[2025-09-11 10:44:00] [Rank 0] step:9181/10000 train_time:376006ms step_avg:40.95ms +[2025-09-11 10:44:00] [Rank 0] step:9181/10000 train_time:376006ms step_avg:40.95ms +[2025-09-11 10:44:00] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:44:00] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.4255 total_sharp:3.9324e-04 L1_sharp:1.7088e-04 L2_sharp:8.1396e-05 L3_sharp:3.8042e-05 L4_sharp:1.5332e-05 L5_sharp:2.0956e-05 L6_sharp:4.7946e-05 L7_sharp:3.8607e-05 L8_sharp:9.1807e-05 L9_sharp:1.0307e-04 L10_sharp:1.0962e-04 L11_sharp:1.6267e-04 L12_sharp:1.0614e-03 total_fnorm:4.5000e+00 total_l1_linf:6.5280e+03 total_spectral:2.2344e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.1328e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.1094e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.0859e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.9141e-01 L2_l1linf:1.8457e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.8066e-01 L6_l1linf:1.7578e-01 L7_l1linf:1.7773e-01 L8_l1linf:1.6602e-01 L9_l1linf:1.5723e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.5527e-01 L12_l1linf:2.0801e-01 L1_spectral:1.8036e-02 L2_spectral:1.6529e-02 L3_spectral:1.6479e-02 L4_spectral:1.6779e-02 L5_spectral:1.6878e-02 L6_spectral:1.6950e-02 L7_spectral:1.7062e-02 L8_spectral:1.7358e-02 L9_spectral:1.7175e-02 L10_spectral:1.7221e-02 L11_spectral:1.7145e-02 L12_spectral:1.7335e-02 train_time:376697ms step_avg:40.95ms +[2025-09-11 10:44:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.4255 total_sharp:3.9324e-04 L1_sharp:1.7088e-04 L2_sharp:8.1396e-05 L3_sharp:3.8042e-05 L4_sharp:1.5332e-05 L5_sharp:2.0956e-05 L6_sharp:4.7946e-05 L7_sharp:3.8607e-05 L8_sharp:9.1807e-05 L9_sharp:1.0307e-04 L10_sharp:1.0962e-04 L11_sharp:1.6267e-04 L12_sharp:1.0614e-03 total_fnorm:4.5000e+00 total_l1_linf:6.5280e+03 total_spectral:2.2344e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.1328e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.1094e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.0859e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.9141e-01 L2_l1linf:1.8457e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.8066e-01 L6_l1linf:1.7578e-01 L7_l1linf:1.7773e-01 L8_l1linf:1.6602e-01 L9_l1linf:1.5723e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.5527e-01 L12_l1linf:2.0801e-01 L1_spectral:1.8036e-02 L2_spectral:1.6529e-02 L3_spectral:1.6479e-02 L4_spectral:1.6779e-02 L5_spectral:1.6878e-02 L6_spectral:1.6950e-02 L7_spectral:1.7062e-02 L8_spectral:1.7358e-02 L9_spectral:1.7175e-02 L10_spectral:1.7221e-02 L11_spectral:1.7145e-02 L12_spectral:1.7335e-02 train_time:376697ms step_avg:40.95ms +[2025-09-11 10:44:12] [Rank 0] step:9201/10000 train_time:377985ms step_avg:41.08ms +[2025-09-11 10:44:12] [Rank 0] step:9201/10000 train_time:377985ms step_avg:41.08ms +[2025-09-11 10:44:12] [Rank 0] step:9221/10000 train_time:378728ms step_avg:41.07ms +[2025-09-11 10:44:12] [Rank 0] step:9221/10000 train_time:378728ms step_avg:41.07ms +[2025-09-11 10:44:13] [Rank 0] step:9241/10000 train_time:379433ms step_avg:41.06ms +[2025-09-11 10:44:13] [Rank 0] step:9241/10000 train_time:379433ms step_avg:41.06ms +[2025-09-11 10:44:14] [Rank 0] step:9261/10000 train_time:380141ms step_avg:41.05ms +[2025-09-11 10:44:14] [Rank 0] step:9261/10000 train_time:380141ms step_avg:41.05ms +[2025-09-11 10:44:15] [Rank 0] step:9281/10000 train_time:380849ms step_avg:41.04ms +[2025-09-11 10:44:15] [Rank 0] step:9281/10000 train_time:380849ms step_avg:41.04ms +[2025-09-11 10:44:15] [Rank 0] step:9301/10000 train_time:381553ms step_avg:41.02ms +[2025-09-11 10:44:15] [Rank 0] step:9301/10000 train_time:381553ms step_avg:41.02ms +[2025-09-11 10:44:16] [Rank 0] step:9321/10000 train_time:382262ms step_avg:41.01ms +[2025-09-11 10:44:16] [Rank 0] step:9321/10000 train_time:382262ms step_avg:41.01ms +[2025-09-11 10:44:17] [Rank 0] step:9341/10000 train_time:382963ms step_avg:41.00ms +[2025-09-11 10:44:17] [Rank 0] step:9341/10000 train_time:382963ms step_avg:41.00ms +[2025-09-11 10:44:17] [Rank 0] step:9361/10000 train_time:383666ms step_avg:40.99ms +[2025-09-11 10:44:17] [Rank 0] step:9361/10000 train_time:383666ms step_avg:40.99ms +[2025-09-11 10:44:18] [Rank 0] step:9381/10000 train_time:384370ms step_avg:40.97ms +[2025-09-11 10:44:18] [Rank 0] step:9381/10000 train_time:384370ms step_avg:40.97ms +[2025-09-11 10:44:19] [Rank 0] step:9401/10000 train_time:385077ms step_avg:40.96ms +[2025-09-11 10:44:19] [Rank 0] step:9401/10000 train_time:385077ms step_avg:40.96ms +[2025-09-11 10:44:20] [Rank 0] step:9421/10000 train_time:385786ms step_avg:40.95ms +[2025-09-11 10:44:20] [Rank 0] step:9421/10000 train_time:385786ms step_avg:40.95ms +[2025-09-11 10:44:20] [Rank 0] step:9441/10000 train_time:386495ms step_avg:40.94ms +[2025-09-11 10:44:20] [Rank 0] step:9441/10000 train_time:386495ms step_avg:40.94ms +[2025-09-11 10:44:21] [Rank 0] step:9461/10000 train_time:387202ms step_avg:40.93ms +[2025-09-11 10:44:21] [Rank 0] step:9461/10000 train_time:387202ms step_avg:40.93ms +[2025-09-11 10:44:22] [Rank 0] step:9481/10000 train_time:387909ms step_avg:40.91ms +[2025-09-11 10:44:22] [Rank 0] step:9481/10000 train_time:387909ms step_avg:40.91ms +[2025-09-11 10:44:22] [Rank 0] step:9501/10000 train_time:388617ms step_avg:40.90ms +[2025-09-11 10:44:22] [Rank 0] step:9501/10000 train_time:388617ms step_avg:40.90ms +[2025-09-11 10:44:23] [Rank 0] step:9521/10000 train_time:389326ms step_avg:40.89ms +[2025-09-11 10:44:23] [Rank 0] step:9521/10000 train_time:389326ms step_avg:40.89ms +[2025-09-11 10:44:24] [Rank 0] step:9541/10000 train_time:390030ms step_avg:40.88ms +[2025-09-11 10:44:24] [Rank 0] step:9541/10000 train_time:390030ms step_avg:40.88ms +[2025-09-11 10:44:24] [Rank 0] step:9561/10000 train_time:390736ms step_avg:40.87ms +[2025-09-11 10:44:24] [Rank 0] step:9561/10000 train_time:390736ms step_avg:40.87ms +[2025-09-11 10:44:25] [Rank 0] step:9581/10000 train_time:391445ms step_avg:40.86ms +[2025-09-11 10:44:25] [Rank 0] step:9581/10000 train_time:391445ms step_avg:40.86ms +[2025-09-11 10:44:26] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:44:26] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:36] [Rank 0] PRINT: step:9600/10000 val_loss:4.4147 total_sharp:2.3204e-04 L1_sharp:1.0329e-04 L2_sharp:7.7523e-05 L3_sharp:4.9036e-05 L4_sharp:2.2994e-05 L5_sharp:1.7864e-05 L6_sharp:3.3783e-05 L7_sharp:2.5512e-05 L8_sharp:8.6952e-05 L9_sharp:7.1434e-05 L10_sharp:8.3661e-05 L11_sharp:1.1988e-04 L12_sharp:5.1109e-04 total_fnorm:2.5000e+00 total_l1_linf:3.0720e+03 total_spectral:1.2500e+00 L1_fnorm:6.7969e-01 L2_fnorm:6.3672e-01 L3_fnorm:6.3672e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.2891e-01 L8_fnorm:6.0156e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.1719e-01 L12_fnorm:6.2500e-01 L1_l1linf:9.0820e-02 L2_l1linf:8.5449e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.4961e-02 L5_l1linf:8.8379e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.8379e-02 L8_l1linf:7.8613e-02 L9_l1linf:7.5195e-02 L10_l1linf:6.9824e-02 L11_l1linf:7.7148e-02 L12_l1linf:1.0400e-01 L1_spectral:1.0583e-02 L2_spectral:9.3782e-03 L3_spectral:9.4240e-03 L4_spectral:9.5507e-03 L5_spectral:9.7529e-03 L6_spectral:9.6883e-03 L7_spectral:9.7161e-03 L8_spectral:1.0122e-02 L9_spectral:9.8143e-03 L10_spectral:9.8538e-03 L11_spectral:9.7346e-03 L12_spectral:9.9142e-03 train_time:392128ms step_avg:40.85ms +[2025-09-11 10:44:36] [Rank 0] PRINT: step:9600/10000 val_loss:4.4147 total_sharp:2.3204e-04 L1_sharp:1.0329e-04 L2_sharp:7.7523e-05 L3_sharp:4.9036e-05 L4_sharp:2.2994e-05 L5_sharp:1.7864e-05 L6_sharp:3.3783e-05 L7_sharp:2.5512e-05 L8_sharp:8.6952e-05 L9_sharp:7.1434e-05 L10_sharp:8.3661e-05 L11_sharp:1.1988e-04 L12_sharp:5.1109e-04 total_fnorm:2.5000e+00 total_l1_linf:3.0720e+03 total_spectral:1.2500e+00 L1_fnorm:6.7969e-01 L2_fnorm:6.3672e-01 L3_fnorm:6.3672e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.2891e-01 L8_fnorm:6.0156e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.1719e-01 L12_fnorm:6.2500e-01 L1_l1linf:9.0820e-02 L2_l1linf:8.5449e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.4961e-02 L5_l1linf:8.8379e-02 L6_l1linf:8.2520e-02 L7_l1linf:8.8379e-02 L8_l1linf:7.8613e-02 L9_l1linf:7.5195e-02 L10_l1linf:6.9824e-02 L11_l1linf:7.7148e-02 L12_l1linf:1.0400e-01 L1_spectral:1.0583e-02 L2_spectral:9.3782e-03 L3_spectral:9.4240e-03 L4_spectral:9.5507e-03 L5_spectral:9.7529e-03 L6_spectral:9.6883e-03 L7_spectral:9.7161e-03 L8_spectral:1.0122e-02 L9_spectral:9.8143e-03 L10_spectral:9.8538e-03 L11_spectral:9.7346e-03 L12_spectral:9.9142e-03 train_time:392128ms step_avg:40.85ms +[2025-09-11 10:44:37] [Rank 0] step:9601/10000 train_time:393462ms step_avg:40.98ms +[2025-09-11 10:44:37] [Rank 0] step:9601/10000 train_time:393462ms step_avg:40.98ms +[2025-09-11 10:44:38] [Rank 0] step:9621/10000 train_time:394200ms step_avg:40.97ms +[2025-09-11 10:44:38] [Rank 0] step:9621/10000 train_time:394200ms step_avg:40.97ms +[2025-09-11 10:44:39] [Rank 0] step:9641/10000 train_time:394913ms step_avg:40.96ms +[2025-09-11 10:44:39] [Rank 0] step:9641/10000 train_time:394913ms step_avg:40.96ms +[2025-09-11 10:44:39] [Rank 0] step:9661/10000 train_time:395633ms step_avg:40.95ms +[2025-09-11 10:44:39] [Rank 0] step:9661/10000 train_time:395633ms step_avg:40.95ms +[2025-09-11 10:44:40] [Rank 0] step:9681/10000 train_time:396345ms step_avg:40.94ms +[2025-09-11 10:44:40] [Rank 0] step:9681/10000 train_time:396345ms step_avg:40.94ms +[2025-09-11 10:44:41] [Rank 0] step:9701/10000 train_time:397060ms step_avg:40.93ms +[2025-09-11 10:44:41] [Rank 0] step:9701/10000 train_time:397060ms step_avg:40.93ms +[2025-09-11 10:44:41] [Rank 0] step:9721/10000 train_time:397777ms step_avg:40.92ms +[2025-09-11 10:44:41] [Rank 0] step:9721/10000 train_time:397777ms step_avg:40.92ms +[2025-09-11 10:44:42] [Rank 0] step:9741/10000 train_time:398492ms step_avg:40.91ms +[2025-09-11 10:44:42] [Rank 0] step:9741/10000 train_time:398492ms step_avg:40.91ms +[2025-09-11 10:44:43] [Rank 0] step:9761/10000 train_time:399205ms step_avg:40.90ms +[2025-09-11 10:44:43] [Rank 0] step:9761/10000 train_time:399205ms step_avg:40.90ms +[2025-09-11 10:44:44] [Rank 0] step:9781/10000 train_time:399917ms step_avg:40.89ms +[2025-09-11 10:44:44] [Rank 0] step:9781/10000 train_time:399917ms step_avg:40.89ms +[2025-09-11 10:44:44] [Rank 0] step:9801/10000 train_time:400635ms step_avg:40.88ms +[2025-09-11 10:44:44] [Rank 0] step:9801/10000 train_time:400635ms step_avg:40.88ms +[2025-09-11 10:44:45] [Rank 0] step:9821/10000 train_time:401351ms step_avg:40.87ms +[2025-09-11 10:44:45] [Rank 0] step:9821/10000 train_time:401351ms step_avg:40.87ms +[2025-09-11 10:44:46] [Rank 0] step:9841/10000 train_time:402069ms step_avg:40.86ms +[2025-09-11 10:44:46] [Rank 0] step:9841/10000 train_time:402069ms step_avg:40.86ms +[2025-09-11 10:44:47] [Rank 0] step:9861/10000 train_time:402784ms step_avg:40.85ms +[2025-09-11 10:44:47] [Rank 0] step:9861/10000 train_time:402784ms step_avg:40.85ms +[2025-09-11 10:44:47] [Rank 0] step:9881/10000 train_time:403499ms step_avg:40.84ms +[2025-09-11 10:44:47] [Rank 0] step:9881/10000 train_time:403499ms step_avg:40.84ms +[2025-09-11 10:44:48] [Rank 0] step:9901/10000 train_time:404210ms step_avg:40.83ms +[2025-09-11 10:44:48] [Rank 0] step:9901/10000 train_time:404210ms step_avg:40.83ms +[2025-09-11 10:44:49] [Rank 0] step:9921/10000 train_time:404923ms step_avg:40.81ms +[2025-09-11 10:44:49] [Rank 0] step:9921/10000 train_time:404923ms step_avg:40.81ms +[2025-09-11 10:44:49] [Rank 0] step:9941/10000 train_time:405641ms step_avg:40.80ms +[2025-09-11 10:44:49] [Rank 0] step:9941/10000 train_time:405641ms step_avg:40.80ms +[2025-09-11 10:44:50] [Rank 0] step:9961/10000 train_time:406360ms step_avg:40.80ms +[2025-09-11 10:44:50] [Rank 0] step:9961/10000 train_time:406360ms step_avg:40.80ms +[2025-09-11 10:44:51] [Rank 0] step:9981/10000 train_time:407075ms step_avg:40.78ms +[2025-09-11 10:44:51] [Rank 0] step:9981/10000 train_time:407075ms step_avg:40.78ms +[2025-09-11 10:44:51] [Rank 0] step:10000/10000 train_time:407761ms step_avg:40.78ms +[2025-09-11 10:44:51] [Rank 0] step:10000/10000 train_time:407761ms step_avg:40.78ms +[2025-09-11 10:44:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:44:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:45:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:45:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:45:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:45:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:45:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:02] [Rank 0] PRINT: step:10000/10000 val_loss:4.4119 total_sharp:1.5283e-04 L1_sharp:9.1119e-05 L2_sharp:5.2532e-05 L3_sharp:2.8448e-05 L4_sharp:2.4273e-05 L5_sharp:1.0131e-05 L6_sharp:2.6471e-05 L7_sharp:2.3553e-05 L8_sharp:6.4196e-05 L9_sharp:4.9158e-05 L10_sharp:7.1094e-05 L11_sharp:9.3971e-05 L12_sharp:3.8596e-04 total_fnorm:9.8047e-01 total_l1_linf:8.6800e+02 total_spectral:4.8438e-01 L1_fnorm:2.6758e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.3535e-01 L9_fnorm:2.4121e-01 L10_fnorm:2.3926e-01 L11_fnorm:2.3828e-01 L12_fnorm:2.4414e-01 L1_l1linf:2.7954e-02 L2_l1linf:2.6245e-02 L3_l1linf:2.7832e-02 L4_l1linf:2.6123e-02 L5_l1linf:2.7832e-02 L6_l1linf:2.6367e-02 L7_l1linf:2.8076e-02 L8_l1linf:2.8076e-02 L9_l1linf:2.2827e-02 L10_l1linf:2.2339e-02 L11_l1linf:2.2949e-02 L12_l1linf:2.9663e-02 L1_spectral:4.1516e-03 L2_spectral:3.6898e-03 L3_spectral:3.7506e-03 L4_spectral:3.8270e-03 L5_spectral:3.8392e-03 L6_spectral:3.8857e-03 L7_spectral:3.8717e-03 L8_spectral:4.1731e-03 L9_spectral:3.9309e-03 L10_spectral:3.9881e-03 L11_spectral:3.9103e-03 L12_spectral:3.9617e-03 train_time:407781ms step_avg:40.78ms +[2025-09-11 10:45:02] [Rank 0] PRINT: step:10000/10000 val_loss:4.4119 total_sharp:1.5283e-04 L1_sharp:9.1119e-05 L2_sharp:5.2532e-05 L3_sharp:2.8448e-05 L4_sharp:2.4273e-05 L5_sharp:1.0131e-05 L6_sharp:2.6471e-05 L7_sharp:2.3553e-05 L8_sharp:6.4196e-05 L9_sharp:4.9158e-05 L10_sharp:7.1094e-05 L11_sharp:9.3971e-05 L12_sharp:3.8596e-04 total_fnorm:9.8047e-01 total_l1_linf:8.6800e+02 total_spectral:4.8438e-01 L1_fnorm:2.6758e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.3535e-01 L9_fnorm:2.4121e-01 L10_fnorm:2.3926e-01 L11_fnorm:2.3828e-01 L12_fnorm:2.4414e-01 L1_l1linf:2.7954e-02 L2_l1linf:2.6245e-02 L3_l1linf:2.7832e-02 L4_l1linf:2.6123e-02 L5_l1linf:2.7832e-02 L6_l1linf:2.6367e-02 L7_l1linf:2.8076e-02 L8_l1linf:2.8076e-02 L9_l1linf:2.2827e-02 L10_l1linf:2.2339e-02 L11_l1linf:2.2949e-02 L12_l1linf:2.9663e-02 L1_spectral:4.1516e-03 L2_spectral:3.6898e-03 L3_spectral:3.7506e-03 L4_spectral:3.8270e-03 L5_spectral:3.8392e-03 L6_spectral:3.8857e-03 L7_spectral:3.8717e-03 L8_spectral:4.1731e-03 L9_spectral:3.9309e-03 L10_spectral:3.9881e-03 L11_spectral:3.9103e-03 L12_spectral:3.9617e-03 train_time:407781ms step_avg:40.78ms +[2025-09-11 10:45:02] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:45:02 2025 --- +[2025-09-11 10:45:02] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:45:02 2025 --- +[2025-09-11 10:45:02] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:45:02] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..848adf6b51f095aabd604d3d522831b9454c9b3a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "4fc1056a-2dd1-436c-a6e4-22dbc56a6d21", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/training_log_4fc1056a-2dd1-436c-a6e4-22dbc56a6d21.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/training_log_4fc1056a-2dd1-436c-a6e4-22dbc56a6d21.txt new file mode 100644 index 0000000000000000000000000000000000000000..d989d6e542a3771c4b92a7cba296dc4949660123 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44/training_log_4fc1056a-2dd1-436c-a6e4-22dbc56a6d21.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:18:51] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:18:51 2025 --- +[2025-09-11 10:18:51] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:18:51 2025 --- +[2025-09-11 10:18:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:18:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:18:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:18:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:18:51] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:18:51] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 10:18:51] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44 +[2025-09-11 10:18:51] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.005_muon_lr_0.1_seed_44 +[2025-09-11 10:18:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:18:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:18:51] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:18:51] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:18:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:18:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:18:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:18:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:18:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:18:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:18:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:18:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:18:52] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:18:52] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:18:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:18:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:18:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:18:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:18:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:18:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:18:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:18:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:18:59] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:18:59] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:19:37] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:19:37] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:19:37] [Rank 0] PRINT: Starting training... +[2025-09-11 10:19:37] [Rank 0] PRINT: Starting training... +[2025-09-11 10:19:39] [Rank 0] step:21/10000 train_time:1358ms step_avg:64.68ms +[2025-09-11 10:19:39] [Rank 0] step:21/10000 train_time:1358ms step_avg:64.68ms +[2025-09-11 10:19:39] [Rank 0] step:41/10000 train_time:2088ms step_avg:50.94ms +[2025-09-11 10:19:39] [Rank 0] step:41/10000 train_time:2088ms step_avg:50.94ms +[2025-09-11 10:19:40] [Rank 0] step:61/10000 train_time:2819ms step_avg:46.21ms +[2025-09-11 10:19:40] [Rank 0] step:61/10000 train_time:2819ms step_avg:46.21ms +[2025-09-11 10:19:41] [Rank 0] step:81/10000 train_time:3549ms step_avg:43.81ms +[2025-09-11 10:19:41] [Rank 0] step:81/10000 train_time:3549ms step_avg:43.81ms +[2025-09-11 10:19:41] [Rank 0] step:101/10000 train_time:4279ms step_avg:42.37ms +[2025-09-11 10:19:41] [Rank 0] step:101/10000 train_time:4279ms step_avg:42.37ms +[2025-09-11 10:19:42] [Rank 0] step:121/10000 train_time:5009ms step_avg:41.40ms +[2025-09-11 10:19:42] [Rank 0] step:121/10000 train_time:5009ms step_avg:41.40ms +[2025-09-11 10:19:43] [Rank 0] step:141/10000 train_time:5739ms step_avg:40.70ms +[2025-09-11 10:19:43] [Rank 0] step:141/10000 train_time:5739ms step_avg:40.70ms +[2025-09-11 10:19:44] [Rank 0] step:161/10000 train_time:6469ms step_avg:40.18ms +[2025-09-11 10:19:44] [Rank 0] step:161/10000 train_time:6469ms step_avg:40.18ms +[2025-09-11 10:19:44] [Rank 0] step:181/10000 train_time:7199ms step_avg:39.77ms +[2025-09-11 10:19:44] [Rank 0] step:181/10000 train_time:7199ms step_avg:39.77ms +[2025-09-11 10:19:45] [Rank 0] step:201/10000 train_time:7929ms step_avg:39.45ms +[2025-09-11 10:19:45] [Rank 0] step:201/10000 train_time:7929ms step_avg:39.45ms +[2025-09-11 10:19:46] [Rank 0] step:221/10000 train_time:8659ms step_avg:39.18ms +[2025-09-11 10:19:46] [Rank 0] step:221/10000 train_time:8659ms step_avg:39.18ms +[2025-09-11 10:19:47] [Rank 0] step:241/10000 train_time:9389ms step_avg:38.96ms +[2025-09-11 10:19:47] [Rank 0] step:241/10000 train_time:9389ms step_avg:38.96ms +[2025-09-11 10:19:47] [Rank 0] step:261/10000 train_time:10119ms step_avg:38.77ms +[2025-09-11 10:19:47] [Rank 0] step:261/10000 train_time:10119ms step_avg:38.77ms +[2025-09-11 10:19:48] [Rank 0] step:281/10000 train_time:10848ms step_avg:38.61ms +[2025-09-11 10:19:48] [Rank 0] step:281/10000 train_time:10848ms step_avg:38.61ms +[2025-09-11 10:19:49] [Rank 0] step:301/10000 train_time:11577ms step_avg:38.46ms +[2025-09-11 10:19:49] [Rank 0] step:301/10000 train_time:11577ms step_avg:38.46ms +[2025-09-11 10:19:50] [Rank 0] step:321/10000 train_time:12307ms step_avg:38.34ms +[2025-09-11 10:19:50] [Rank 0] step:321/10000 train_time:12307ms step_avg:38.34ms +[2025-09-11 10:19:50] [Rank 0] step:341/10000 train_time:13037ms step_avg:38.23ms +[2025-09-11 10:19:50] [Rank 0] step:341/10000 train_time:13037ms step_avg:38.23ms +[2025-09-11 10:19:51] [Rank 0] step:361/10000 train_time:13767ms step_avg:38.14ms +[2025-09-11 10:19:51] [Rank 0] step:361/10000 train_time:13767ms step_avg:38.14ms +[2025-09-11 10:19:52] [Rank 0] step:381/10000 train_time:14496ms step_avg:38.05ms +[2025-09-11 10:19:52] [Rank 0] step:381/10000 train_time:14496ms step_avg:38.05ms +[2025-09-11 10:19:52] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:19:52] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:20:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:20:41] [Rank 0] PRINT: step:400/10000 val_loss:6.3935 total_sharp:2.6967e-03 L1_sharp:2.2896e-03 L2_sharp:9.9354e-04 L3_sharp:5.2527e-04 L4_sharp:2.3284e-04 L5_sharp:2.9773e-04 L6_sharp:2.3763e-04 L7_sharp:2.4320e-04 L8_sharp:2.2235e-04 L9_sharp:2.0054e-04 L10_sharp:1.9201e-04 L11_sharp:3.2667e-04 L12_sharp:9.0297e-04 total_fnorm:4.2175e+01 total_l1_linf:1.5926e+05 total_spectral:2.1086e+01 L1_fnorm:1.2311e+01 L2_fnorm:1.1846e+01 L3_fnorm:1.1443e+01 L4_fnorm:1.1126e+01 L5_fnorm:1.0756e+01 L6_fnorm:1.0707e+01 L7_fnorm:1.0429e+01 L8_fnorm:1.0324e+01 L9_fnorm:9.9925e+00 L10_fnorm:9.6564e+00 L11_fnorm:9.1851e+00 L12_fnorm:8.7756e+00 L1_l1linf:3.6840e+00 L2_l1linf:3.5532e+00 L3_l1linf:3.5240e+00 L4_l1linf:3.5836e+00 L5_l1linf:3.3836e+00 L6_l1linf:3.2745e+00 L7_l1linf:3.1320e+00 L8_l1linf:3.0740e+00 L9_l1linf:2.7817e+00 L10_l1linf:2.5997e+00 L11_l1linf:2.3950e+00 L12_l1linf:2.1983e+00 L1_spectral:1.2194e-01 L2_spectral:1.2159e-01 L3_spectral:1.2143e-01 L4_spectral:1.2128e-01 L5_spectral:1.2119e-01 L6_spectral:1.2127e-01 L7_spectral:1.2105e-01 L8_spectral:1.2121e-01 L9_spectral:1.2108e-01 L10_spectral:1.2101e-01 L11_spectral:1.2103e-01 L12_spectral:1.2091e-01 train_time:15206ms step_avg:38.01ms +[2025-09-11 10:20:41] [Rank 0] PRINT: step:400/10000 val_loss:6.3935 total_sharp:2.6967e-03 L1_sharp:2.2896e-03 L2_sharp:9.9354e-04 L3_sharp:5.2527e-04 L4_sharp:2.3284e-04 L5_sharp:2.9773e-04 L6_sharp:2.3763e-04 L7_sharp:2.4320e-04 L8_sharp:2.2235e-04 L9_sharp:2.0054e-04 L10_sharp:1.9201e-04 L11_sharp:3.2667e-04 L12_sharp:9.0297e-04 total_fnorm:4.2175e+01 total_l1_linf:1.5926e+05 total_spectral:2.1086e+01 L1_fnorm:1.2311e+01 L2_fnorm:1.1846e+01 L3_fnorm:1.1443e+01 L4_fnorm:1.1126e+01 L5_fnorm:1.0756e+01 L6_fnorm:1.0707e+01 L7_fnorm:1.0429e+01 L8_fnorm:1.0324e+01 L9_fnorm:9.9925e+00 L10_fnorm:9.6564e+00 L11_fnorm:9.1851e+00 L12_fnorm:8.7756e+00 L1_l1linf:3.6840e+00 L2_l1linf:3.5532e+00 L3_l1linf:3.5240e+00 L4_l1linf:3.5836e+00 L5_l1linf:3.3836e+00 L6_l1linf:3.2745e+00 L7_l1linf:3.1320e+00 L8_l1linf:3.0740e+00 L9_l1linf:2.7817e+00 L10_l1linf:2.5997e+00 L11_l1linf:2.3950e+00 L12_l1linf:2.1983e+00 L1_spectral:1.2194e-01 L2_spectral:1.2159e-01 L3_spectral:1.2143e-01 L4_spectral:1.2128e-01 L5_spectral:1.2119e-01 L6_spectral:1.2127e-01 L7_spectral:1.2105e-01 L8_spectral:1.2121e-01 L9_spectral:1.2108e-01 L10_spectral:1.2101e-01 L11_spectral:1.2103e-01 L12_spectral:1.2091e-01 train_time:15206ms step_avg:38.01ms +[2025-09-11 10:21:12] [Rank 0] step:401/10000 train_time:45836ms step_avg:114.30ms +[2025-09-11 10:21:12] [Rank 0] step:401/10000 train_time:45836ms step_avg:114.30ms +[2025-09-11 10:21:14] [Rank 0] step:421/10000 train_time:47792ms step_avg:113.52ms +[2025-09-11 10:21:14] [Rank 0] step:421/10000 train_time:47792ms step_avg:113.52ms +[2025-09-11 10:21:14] [Rank 0] step:441/10000 train_time:48433ms step_avg:109.82ms +[2025-09-11 10:21:14] [Rank 0] step:441/10000 train_time:48433ms step_avg:109.82ms +[2025-09-11 10:21:15] [Rank 0] step:461/10000 train_time:49073ms step_avg:106.45ms +[2025-09-11 10:21:15] [Rank 0] step:461/10000 train_time:49073ms step_avg:106.45ms +[2025-09-11 10:21:16] [Rank 0] step:481/10000 train_time:49712ms step_avg:103.35ms +[2025-09-11 10:21:16] [Rank 0] step:481/10000 train_time:49712ms step_avg:103.35ms +[2025-09-11 10:21:16] [Rank 0] step:501/10000 train_time:50352ms step_avg:100.50ms +[2025-09-11 10:21:16] [Rank 0] step:501/10000 train_time:50352ms step_avg:100.50ms +[2025-09-11 10:21:17] [Rank 0] step:521/10000 train_time:50991ms step_avg:97.87ms +[2025-09-11 10:21:17] [Rank 0] step:521/10000 train_time:50991ms step_avg:97.87ms +[2025-09-11 10:21:18] [Rank 0] step:541/10000 train_time:51630ms step_avg:95.44ms +[2025-09-11 10:21:18] [Rank 0] step:541/10000 train_time:51630ms step_avg:95.44ms +[2025-09-11 10:21:18] [Rank 0] step:561/10000 train_time:52270ms step_avg:93.17ms +[2025-09-11 10:21:18] [Rank 0] step:561/10000 train_time:52270ms step_avg:93.17ms +[2025-09-11 10:21:19] [Rank 0] step:581/10000 train_time:52910ms step_avg:91.07ms +[2025-09-11 10:21:19] [Rank 0] step:581/10000 train_time:52910ms step_avg:91.07ms +[2025-09-11 10:21:20] [Rank 0] step:601/10000 train_time:53549ms step_avg:89.10ms +[2025-09-11 10:21:20] [Rank 0] step:601/10000 train_time:53549ms step_avg:89.10ms +[2025-09-11 10:21:20] [Rank 0] step:621/10000 train_time:54188ms step_avg:87.26ms +[2025-09-11 10:21:20] [Rank 0] step:621/10000 train_time:54188ms step_avg:87.26ms +[2025-09-11 10:21:21] [Rank 0] step:641/10000 train_time:54828ms step_avg:85.53ms +[2025-09-11 10:21:21] [Rank 0] step:641/10000 train_time:54828ms step_avg:85.53ms +[2025-09-11 10:21:21] [Rank 0] step:661/10000 train_time:55467ms step_avg:83.91ms +[2025-09-11 10:21:21] [Rank 0] step:661/10000 train_time:55467ms step_avg:83.91ms +[2025-09-11 10:21:22] [Rank 0] step:681/10000 train_time:56106ms step_avg:82.39ms +[2025-09-11 10:21:22] [Rank 0] step:681/10000 train_time:56106ms step_avg:82.39ms +[2025-09-11 10:21:23] [Rank 0] step:701/10000 train_time:56745ms step_avg:80.95ms +[2025-09-11 10:21:23] [Rank 0] step:701/10000 train_time:56745ms step_avg:80.95ms +[2025-09-11 10:21:23] [Rank 0] step:721/10000 train_time:57384ms step_avg:79.59ms +[2025-09-11 10:21:23] [Rank 0] step:721/10000 train_time:57384ms step_avg:79.59ms +[2025-09-11 10:21:24] [Rank 0] step:741/10000 train_time:58024ms step_avg:78.30ms +[2025-09-11 10:21:24] [Rank 0] step:741/10000 train_time:58024ms step_avg:78.30ms +[2025-09-11 10:21:25] [Rank 0] step:761/10000 train_time:58668ms step_avg:77.09ms +[2025-09-11 10:21:25] [Rank 0] step:761/10000 train_time:58668ms step_avg:77.09ms +[2025-09-11 10:21:25] [Rank 0] step:781/10000 train_time:59311ms step_avg:75.94ms +[2025-09-11 10:21:25] [Rank 0] step:781/10000 train_time:59311ms step_avg:75.94ms +[2025-09-11 10:21:26] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:10] [Rank 0] PRINT: step:800/10000 val_loss:5.9658 total_sharp:1.5293e-03 L1_sharp:9.6511e-04 L2_sharp:4.8969e-04 L3_sharp:2.0901e-04 L4_sharp:1.1044e-04 L5_sharp:1.1797e-04 L6_sharp:1.0188e-04 L7_sharp:1.1129e-04 L8_sharp:8.0117e-05 L9_sharp:6.6853e-05 L10_sharp:1.1216e-04 L11_sharp:2.7903e-04 L12_sharp:1.4179e-03 total_fnorm:4.3250e+01 total_l1_linf:1.4541e+05 total_spectral:2.1875e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1938e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1438e+01 L6_fnorm:1.1500e+01 L7_fnorm:1.1312e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.0938e+01 L10_fnorm:1.0625e+01 L11_fnorm:1.0250e+01 L12_fnorm:9.0000e+00 L1_l1linf:3.6562e+00 L2_l1linf:3.5938e+00 L3_l1linf:3.5469e+00 L4_l1linf:3.6406e+00 L5_l1linf:3.4844e+00 L6_l1linf:3.4375e+00 L7_l1linf:3.3906e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.1094e+00 L10_l1linf:2.8906e+00 L11_l1linf:2.4688e+00 L12_l1linf:2.2188e+00 L1_spectral:1.3370e-01 L2_spectral:1.3236e-01 L3_spectral:1.3198e-01 L4_spectral:1.3147e-01 L5_spectral:1.3141e-01 L6_spectral:1.3124e-01 L7_spectral:1.3108e-01 L8_spectral:1.3116e-01 L9_spectral:1.3072e-01 L10_spectral:1.3082e-01 L11_spectral:1.3065e-01 L12_spectral:1.3099e-01 train_time:59938ms step_avg:74.92ms +[2025-09-11 10:22:10] [Rank 0] PRINT: step:800/10000 val_loss:5.9658 total_sharp:1.5293e-03 L1_sharp:9.6511e-04 L2_sharp:4.8969e-04 L3_sharp:2.0901e-04 L4_sharp:1.1044e-04 L5_sharp:1.1797e-04 L6_sharp:1.0188e-04 L7_sharp:1.1129e-04 L8_sharp:8.0117e-05 L9_sharp:6.6853e-05 L10_sharp:1.1216e-04 L11_sharp:2.7903e-04 L12_sharp:1.4179e-03 total_fnorm:4.3250e+01 total_l1_linf:1.4541e+05 total_spectral:2.1875e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1938e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1438e+01 L6_fnorm:1.1500e+01 L7_fnorm:1.1312e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.0938e+01 L10_fnorm:1.0625e+01 L11_fnorm:1.0250e+01 L12_fnorm:9.0000e+00 L1_l1linf:3.6562e+00 L2_l1linf:3.5938e+00 L3_l1linf:3.5469e+00 L4_l1linf:3.6406e+00 L5_l1linf:3.4844e+00 L6_l1linf:3.4375e+00 L7_l1linf:3.3906e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.1094e+00 L10_l1linf:2.8906e+00 L11_l1linf:2.4688e+00 L12_l1linf:2.2188e+00 L1_spectral:1.3370e-01 L2_spectral:1.3236e-01 L3_spectral:1.3198e-01 L4_spectral:1.3147e-01 L5_spectral:1.3141e-01 L6_spectral:1.3124e-01 L7_spectral:1.3108e-01 L8_spectral:1.3116e-01 L9_spectral:1.3072e-01 L10_spectral:1.3082e-01 L11_spectral:1.3065e-01 L12_spectral:1.3099e-01 train_time:59938ms step_avg:74.92ms +[2025-09-11 10:22:11] [Rank 0] step:801/10000 train_time:61118ms step_avg:76.30ms +[2025-09-11 10:22:11] [Rank 0] step:801/10000 train_time:61118ms step_avg:76.30ms +[2025-09-11 10:22:12] [Rank 0] step:821/10000 train_time:61766ms step_avg:75.23ms +[2025-09-11 10:22:12] [Rank 0] step:821/10000 train_time:61766ms step_avg:75.23ms +[2025-09-11 10:22:12] [Rank 0] step:841/10000 train_time:62410ms step_avg:74.21ms +[2025-09-11 10:22:12] [Rank 0] step:841/10000 train_time:62410ms step_avg:74.21ms +[2025-09-11 10:22:13] [Rank 0] step:861/10000 train_time:63055ms step_avg:73.23ms +[2025-09-11 10:22:13] [Rank 0] step:861/10000 train_time:63055ms step_avg:73.23ms +[2025-09-11 10:22:14] [Rank 0] step:881/10000 train_time:63700ms step_avg:72.30ms +[2025-09-11 10:22:14] [Rank 0] step:881/10000 train_time:63700ms step_avg:72.30ms +[2025-09-11 10:22:14] [Rank 0] step:901/10000 train_time:64344ms step_avg:71.41ms +[2025-09-11 10:22:14] [Rank 0] step:901/10000 train_time:64344ms step_avg:71.41ms +[2025-09-11 10:22:15] [Rank 0] step:921/10000 train_time:64988ms step_avg:70.56ms +[2025-09-11 10:22:15] [Rank 0] step:921/10000 train_time:64988ms step_avg:70.56ms +[2025-09-11 10:22:16] [Rank 0] step:941/10000 train_time:65632ms step_avg:69.75ms +[2025-09-11 10:22:16] [Rank 0] step:941/10000 train_time:65632ms step_avg:69.75ms +[2025-09-11 10:22:16] [Rank 0] step:961/10000 train_time:66275ms step_avg:68.96ms +[2025-09-11 10:22:16] [Rank 0] step:961/10000 train_time:66275ms step_avg:68.96ms +[2025-09-11 10:22:17] [Rank 0] step:981/10000 train_time:66919ms step_avg:68.22ms +[2025-09-11 10:22:17] [Rank 0] step:981/10000 train_time:66919ms step_avg:68.22ms +[2025-09-11 10:22:17] [Rank 0] step:1001/10000 train_time:67563ms step_avg:67.50ms +[2025-09-11 10:22:17] [Rank 0] step:1001/10000 train_time:67563ms step_avg:67.50ms +[2025-09-11 10:22:18] [Rank 0] step:1021/10000 train_time:68207ms step_avg:66.80ms +[2025-09-11 10:22:18] [Rank 0] step:1021/10000 train_time:68207ms step_avg:66.80ms +[2025-09-11 10:22:19] [Rank 0] step:1041/10000 train_time:68850ms step_avg:66.14ms +[2025-09-11 10:22:19] [Rank 0] step:1041/10000 train_time:68850ms step_avg:66.14ms +[2025-09-11 10:22:19] [Rank 0] step:1061/10000 train_time:69494ms step_avg:65.50ms +[2025-09-11 10:22:19] [Rank 0] step:1061/10000 train_time:69494ms step_avg:65.50ms +[2025-09-11 10:22:20] [Rank 0] step:1081/10000 train_time:70137ms step_avg:64.88ms +[2025-09-11 10:22:20] [Rank 0] step:1081/10000 train_time:70137ms step_avg:64.88ms +[2025-09-11 10:22:21] [Rank 0] step:1101/10000 train_time:70780ms step_avg:64.29ms +[2025-09-11 10:22:21] [Rank 0] step:1101/10000 train_time:70780ms step_avg:64.29ms +[2025-09-11 10:22:21] [Rank 0] step:1121/10000 train_time:71423ms step_avg:63.71ms +[2025-09-11 10:22:21] [Rank 0] step:1121/10000 train_time:71423ms step_avg:63.71ms +[2025-09-11 10:22:22] [Rank 0] step:1141/10000 train_time:72066ms step_avg:63.16ms +[2025-09-11 10:22:22] [Rank 0] step:1141/10000 train_time:72066ms step_avg:63.16ms +[2025-09-11 10:22:23] [Rank 0] step:1161/10000 train_time:72709ms step_avg:62.63ms +[2025-09-11 10:22:23] [Rank 0] step:1161/10000 train_time:72709ms step_avg:62.63ms +[2025-09-11 10:22:23] [Rank 0] step:1181/10000 train_time:73353ms step_avg:62.11ms +[2025-09-11 10:22:23] [Rank 0] step:1181/10000 train_time:73353ms step_avg:62.11ms +[2025-09-11 10:22:24] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:22:24] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:22:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:34] [Rank 0] PRINT: step:1200/10000 val_loss:5.6338 total_sharp:9.2638e-04 L1_sharp:5.1726e-04 L2_sharp:3.1356e-04 L3_sharp:1.6244e-04 L4_sharp:6.6172e-05 L5_sharp:6.8538e-05 L6_sharp:5.2035e-05 L7_sharp:5.2627e-05 L8_sharp:9.6907e-05 L9_sharp:6.6091e-05 L10_sharp:1.1415e-04 L11_sharp:1.8296e-04 L12_sharp:1.2269e-03 total_fnorm:4.5250e+01 total_l1_linf:1.4541e+05 total_spectral:2.2375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.1938e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1500e+01 L9_fnorm:1.1812e+01 L10_fnorm:1.1688e+01 L11_fnorm:1.1500e+01 L12_fnorm:1.0625e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.5781e+00 L3_l1linf:3.5000e+00 L4_l1linf:3.4531e+00 L5_l1linf:3.3281e+00 L6_l1linf:3.2656e+00 L7_l1linf:3.3125e+00 L8_l1linf:3.3125e+00 L9_l1linf:3.1875e+00 L10_l1linf:3.0938e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.2031e+00 L1_spectral:1.3981e-01 L2_spectral:1.3819e-01 L3_spectral:1.3844e-01 L4_spectral:1.3719e-01 L5_spectral:1.3662e-01 L6_spectral:1.3646e-01 L7_spectral:1.3637e-01 L8_spectral:1.3511e-01 L9_spectral:1.3647e-01 L10_spectral:1.3594e-01 L11_spectral:1.3599e-01 L12_spectral:1.3546e-01 train_time:73978ms step_avg:61.65ms +[2025-09-11 10:22:34] [Rank 0] PRINT: step:1200/10000 val_loss:5.6338 total_sharp:9.2638e-04 L1_sharp:5.1726e-04 L2_sharp:3.1356e-04 L3_sharp:1.6244e-04 L4_sharp:6.6172e-05 L5_sharp:6.8538e-05 L6_sharp:5.2035e-05 L7_sharp:5.2627e-05 L8_sharp:9.6907e-05 L9_sharp:6.6091e-05 L10_sharp:1.1415e-04 L11_sharp:1.8296e-04 L12_sharp:1.2269e-03 total_fnorm:4.5250e+01 total_l1_linf:1.4541e+05 total_spectral:2.2375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1812e+01 L6_fnorm:1.1938e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1500e+01 L9_fnorm:1.1812e+01 L10_fnorm:1.1688e+01 L11_fnorm:1.1500e+01 L12_fnorm:1.0625e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.5781e+00 L3_l1linf:3.5000e+00 L4_l1linf:3.4531e+00 L5_l1linf:3.3281e+00 L6_l1linf:3.2656e+00 L7_l1linf:3.3125e+00 L8_l1linf:3.3125e+00 L9_l1linf:3.1875e+00 L10_l1linf:3.0938e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.2031e+00 L1_spectral:1.3981e-01 L2_spectral:1.3819e-01 L3_spectral:1.3844e-01 L4_spectral:1.3719e-01 L5_spectral:1.3662e-01 L6_spectral:1.3646e-01 L7_spectral:1.3637e-01 L8_spectral:1.3511e-01 L9_spectral:1.3647e-01 L10_spectral:1.3594e-01 L11_spectral:1.3599e-01 L12_spectral:1.3546e-01 train_time:73978ms step_avg:61.65ms +[2025-09-11 10:22:35] [Rank 0] step:1201/10000 train_time:75164ms step_avg:62.58ms +[2025-09-11 10:22:35] [Rank 0] step:1201/10000 train_time:75164ms step_avg:62.58ms +[2025-09-11 10:22:36] [Rank 0] step:1221/10000 train_time:75811ms step_avg:62.09ms +[2025-09-11 10:22:36] [Rank 0] step:1221/10000 train_time:75811ms step_avg:62.09ms +[2025-09-11 10:22:36] [Rank 0] step:1241/10000 train_time:76456ms step_avg:61.61ms +[2025-09-11 10:22:36] [Rank 0] step:1241/10000 train_time:76456ms step_avg:61.61ms +[2025-09-11 10:22:37] [Rank 0] step:1261/10000 train_time:77101ms step_avg:61.14ms +[2025-09-11 10:22:37] [Rank 0] step:1261/10000 train_time:77101ms step_avg:61.14ms +[2025-09-11 10:22:37] [Rank 0] step:1281/10000 train_time:77746ms step_avg:60.69ms +[2025-09-11 10:22:37] [Rank 0] step:1281/10000 train_time:77746ms step_avg:60.69ms +[2025-09-11 10:22:38] [Rank 0] step:1301/10000 train_time:78390ms step_avg:60.25ms +[2025-09-11 10:22:38] [Rank 0] step:1301/10000 train_time:78390ms step_avg:60.25ms +[2025-09-11 10:22:39] [Rank 0] step:1321/10000 train_time:79034ms step_avg:59.83ms +[2025-09-11 10:22:39] [Rank 0] step:1321/10000 train_time:79034ms step_avg:59.83ms +[2025-09-11 10:22:39] [Rank 0] step:1341/10000 train_time:79678ms step_avg:59.42ms +[2025-09-11 10:22:39] [Rank 0] step:1341/10000 train_time:79678ms step_avg:59.42ms +[2025-09-11 10:22:40] [Rank 0] step:1361/10000 train_time:80323ms step_avg:59.02ms +[2025-09-11 10:22:40] [Rank 0] step:1361/10000 train_time:80323ms step_avg:59.02ms +[2025-09-11 10:22:41] [Rank 0] step:1381/10000 train_time:80967ms step_avg:58.63ms +[2025-09-11 10:22:41] [Rank 0] step:1381/10000 train_time:80967ms step_avg:58.63ms +[2025-09-11 10:22:41] [Rank 0] step:1401/10000 train_time:81611ms step_avg:58.25ms +[2025-09-11 10:22:41] [Rank 0] step:1401/10000 train_time:81611ms step_avg:58.25ms +[2025-09-11 10:22:42] [Rank 0] step:1421/10000 train_time:82255ms step_avg:57.88ms +[2025-09-11 10:22:42] [Rank 0] step:1421/10000 train_time:82255ms step_avg:57.88ms +[2025-09-11 10:22:43] [Rank 0] step:1441/10000 train_time:82899ms step_avg:57.53ms +[2025-09-11 10:22:43] [Rank 0] step:1441/10000 train_time:82899ms step_avg:57.53ms +[2025-09-11 10:22:43] [Rank 0] step:1461/10000 train_time:83543ms step_avg:57.18ms +[2025-09-11 10:22:43] [Rank 0] step:1461/10000 train_time:83543ms step_avg:57.18ms +[2025-09-11 10:22:44] [Rank 0] step:1481/10000 train_time:84187ms step_avg:56.84ms +[2025-09-11 10:22:44] [Rank 0] step:1481/10000 train_time:84187ms step_avg:56.84ms +[2025-09-11 10:22:45] [Rank 0] step:1501/10000 train_time:84834ms step_avg:56.52ms +[2025-09-11 10:22:45] [Rank 0] step:1501/10000 train_time:84834ms step_avg:56.52ms +[2025-09-11 10:22:45] [Rank 0] step:1521/10000 train_time:85482ms step_avg:56.20ms +[2025-09-11 10:22:45] [Rank 0] step:1521/10000 train_time:85482ms step_avg:56.20ms +[2025-09-11 10:22:46] [Rank 0] step:1541/10000 train_time:86131ms step_avg:55.89ms +[2025-09-11 10:22:46] [Rank 0] step:1541/10000 train_time:86131ms step_avg:55.89ms +[2025-09-11 10:22:46] [Rank 0] step:1561/10000 train_time:86780ms step_avg:55.59ms +[2025-09-11 10:22:46] [Rank 0] step:1561/10000 train_time:86780ms step_avg:55.59ms +[2025-09-11 10:22:47] [Rank 0] step:1581/10000 train_time:87427ms step_avg:55.30ms +[2025-09-11 10:22:47] [Rank 0] step:1581/10000 train_time:87427ms step_avg:55.30ms +[2025-09-11 10:22:48] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:22:48] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:22:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:22:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:22:58] [Rank 0] PRINT: step:1600/10000 val_loss:5.4567 total_sharp:6.4424e-04 L1_sharp:3.3735e-04 L2_sharp:2.3543e-04 L3_sharp:6.1899e-05 L4_sharp:2.6886e-05 L5_sharp:5.9596e-05 L6_sharp:5.5884e-05 L7_sharp:4.1915e-05 L8_sharp:9.9510e-05 L9_sharp:4.9983e-05 L10_sharp:9.2304e-05 L11_sharp:1.4395e-04 L12_sharp:6.7423e-04 total_fnorm:4.5500e+01 total_l1_linf:1.4234e+05 total_spectral:2.2750e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.1625e+01 L9_fnorm:1.2125e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1875e+01 L12_fnorm:1.1125e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.4688e+00 L3_l1linf:3.3281e+00 L4_l1linf:3.3594e+00 L5_l1linf:3.2031e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.2031e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.1250e+00 L10_l1linf:3.0469e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.3125e+00 L1_spectral:1.4435e-01 L2_spectral:1.4197e-01 L3_spectral:1.4323e-01 L4_spectral:1.4245e-01 L5_spectral:1.4195e-01 L6_spectral:1.4119e-01 L7_spectral:1.4104e-01 L8_spectral:1.3998e-01 L9_spectral:1.4078e-01 L10_spectral:1.4017e-01 L11_spectral:1.3975e-01 L12_spectral:1.3888e-01 train_time:88058ms step_avg:55.04ms +[2025-09-11 10:22:58] [Rank 0] PRINT: step:1600/10000 val_loss:5.4567 total_sharp:6.4424e-04 L1_sharp:3.3735e-04 L2_sharp:2.3543e-04 L3_sharp:6.1899e-05 L4_sharp:2.6886e-05 L5_sharp:5.9596e-05 L6_sharp:5.5884e-05 L7_sharp:4.1915e-05 L8_sharp:9.9510e-05 L9_sharp:4.9983e-05 L10_sharp:9.2304e-05 L11_sharp:1.4395e-04 L12_sharp:6.7423e-04 total_fnorm:4.5500e+01 total_l1_linf:1.4234e+05 total_spectral:2.2750e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2250e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.1625e+01 L9_fnorm:1.2125e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.1875e+01 L12_fnorm:1.1125e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.4688e+00 L3_l1linf:3.3281e+00 L4_l1linf:3.3594e+00 L5_l1linf:3.2031e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.2031e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.1250e+00 L10_l1linf:3.0469e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.3125e+00 L1_spectral:1.4435e-01 L2_spectral:1.4197e-01 L3_spectral:1.4323e-01 L4_spectral:1.4245e-01 L5_spectral:1.4195e-01 L6_spectral:1.4119e-01 L7_spectral:1.4104e-01 L8_spectral:1.3998e-01 L9_spectral:1.4078e-01 L10_spectral:1.4017e-01 L11_spectral:1.3975e-01 L12_spectral:1.3888e-01 train_time:88058ms step_avg:55.04ms +[2025-09-11 10:22:59] [Rank 0] step:1601/10000 train_time:89251ms step_avg:55.75ms +[2025-09-11 10:22:59] [Rank 0] step:1601/10000 train_time:89251ms step_avg:55.75ms +[2025-09-11 10:22:59] [Rank 0] step:1621/10000 train_time:89902ms step_avg:55.46ms +[2025-09-11 10:22:59] [Rank 0] step:1621/10000 train_time:89902ms step_avg:55.46ms +[2025-09-11 10:23:00] [Rank 0] step:1641/10000 train_time:90551ms step_avg:55.18ms +[2025-09-11 10:23:00] [Rank 0] step:1641/10000 train_time:90551ms step_avg:55.18ms +[2025-09-11 10:23:01] [Rank 0] step:1661/10000 train_time:91200ms step_avg:54.91ms +[2025-09-11 10:23:01] [Rank 0] step:1661/10000 train_time:91200ms step_avg:54.91ms +[2025-09-11 10:23:01] [Rank 0] step:1681/10000 train_time:91849ms step_avg:54.64ms +[2025-09-11 10:23:01] [Rank 0] step:1681/10000 train_time:91849ms step_avg:54.64ms +[2025-09-11 10:23:02] [Rank 0] step:1701/10000 train_time:92499ms step_avg:54.38ms +[2025-09-11 10:23:02] [Rank 0] step:1701/10000 train_time:92499ms step_avg:54.38ms +[2025-09-11 10:23:03] [Rank 0] step:1721/10000 train_time:93147ms step_avg:54.12ms +[2025-09-11 10:23:03] [Rank 0] step:1721/10000 train_time:93147ms step_avg:54.12ms +[2025-09-11 10:23:04] [Rank 0] step:1741/10000 train_time:94233ms step_avg:54.13ms +[2025-09-11 10:23:04] [Rank 0] step:1741/10000 train_time:94233ms step_avg:54.13ms +[2025-09-11 10:23:05] [Rank 0] step:1761/10000 train_time:94984ms step_avg:53.94ms +[2025-09-11 10:23:05] [Rank 0] step:1761/10000 train_time:94984ms step_avg:53.94ms +[2025-09-11 10:23:05] [Rank 0] step:1781/10000 train_time:95632ms step_avg:53.70ms +[2025-09-11 10:23:05] [Rank 0] step:1781/10000 train_time:95632ms step_avg:53.70ms +[2025-09-11 10:23:06] [Rank 0] step:1801/10000 train_time:96551ms step_avg:53.61ms +[2025-09-11 10:23:06] [Rank 0] step:1801/10000 train_time:96551ms step_avg:53.61ms +[2025-09-11 10:23:07] [Rank 0] step:1821/10000 train_time:97199ms step_avg:53.38ms +[2025-09-11 10:23:07] [Rank 0] step:1821/10000 train_time:97199ms step_avg:53.38ms +[2025-09-11 10:23:07] [Rank 0] step:1841/10000 train_time:97847ms step_avg:53.15ms +[2025-09-11 10:23:07] [Rank 0] step:1841/10000 train_time:97847ms step_avg:53.15ms +[2025-09-11 10:23:08] [Rank 0] step:1861/10000 train_time:98495ms step_avg:52.93ms +[2025-09-11 10:23:08] [Rank 0] step:1861/10000 train_time:98495ms step_avg:52.93ms +[2025-09-11 10:23:09] [Rank 0] step:1881/10000 train_time:99142ms step_avg:52.71ms +[2025-09-11 10:23:09] [Rank 0] step:1881/10000 train_time:99142ms step_avg:52.71ms +[2025-09-11 10:23:09] [Rank 0] step:1901/10000 train_time:99792ms step_avg:52.49ms +[2025-09-11 10:23:09] [Rank 0] step:1901/10000 train_time:99792ms step_avg:52.49ms +[2025-09-11 10:23:10] [Rank 0] step:1921/10000 train_time:100438ms step_avg:52.28ms +[2025-09-11 10:23:10] [Rank 0] step:1921/10000 train_time:100438ms step_avg:52.28ms +[2025-09-11 10:23:11] [Rank 0] step:1941/10000 train_time:101085ms step_avg:52.08ms +[2025-09-11 10:23:11] [Rank 0] step:1941/10000 train_time:101085ms step_avg:52.08ms +[2025-09-11 10:23:11] [Rank 0] step:1961/10000 train_time:101733ms step_avg:51.88ms +[2025-09-11 10:23:11] [Rank 0] step:1961/10000 train_time:101733ms step_avg:51.88ms +[2025-09-11 10:23:12] [Rank 0] step:1981/10000 train_time:102381ms step_avg:51.68ms +[2025-09-11 10:23:12] [Rank 0] step:1981/10000 train_time:102381ms step_avg:51.68ms +[2025-09-11 10:23:13] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:23:13] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:23:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:23:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:22] [Rank 0] PRINT: step:2000/10000 val_loss:5.2965 total_sharp:5.8066e-04 L1_sharp:3.4226e-04 L2_sharp:1.8579e-04 L3_sharp:5.7292e-05 L4_sharp:3.0799e-05 L5_sharp:6.4353e-05 L6_sharp:3.6052e-05 L7_sharp:3.4588e-05 L8_sharp:8.8024e-05 L9_sharp:5.4345e-05 L10_sharp:7.8704e-05 L11_sharp:1.3076e-04 L12_sharp:8.6729e-04 total_fnorm:4.5500e+01 total_l1_linf:1.4029e+05 total_spectral:2.2750e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1750e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.2125e+01 L12_fnorm:1.1312e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.3906e+00 L3_l1linf:3.2656e+00 L4_l1linf:3.2344e+00 L5_l1linf:3.1719e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0938e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.3125e+00 L1_spectral:1.4763e-01 L2_spectral:1.4509e-01 L3_spectral:1.4606e-01 L4_spectral:1.4544e-01 L5_spectral:1.4568e-01 L6_spectral:1.4433e-01 L7_spectral:1.4420e-01 L8_spectral:1.4285e-01 L9_spectral:1.4378e-01 L10_spectral:1.4404e-01 L11_spectral:1.4326e-01 L12_spectral:1.4210e-01 train_time:103013ms step_avg:51.51ms +[2025-09-11 10:23:22] [Rank 0] PRINT: step:2000/10000 val_loss:5.2965 total_sharp:5.8066e-04 L1_sharp:3.4226e-04 L2_sharp:1.8579e-04 L3_sharp:5.7292e-05 L4_sharp:3.0799e-05 L5_sharp:6.4353e-05 L6_sharp:3.6052e-05 L7_sharp:3.4588e-05 L8_sharp:8.8024e-05 L9_sharp:5.4345e-05 L10_sharp:7.8704e-05 L11_sharp:1.3076e-04 L12_sharp:8.6729e-04 total_fnorm:4.5500e+01 total_l1_linf:1.4029e+05 total_spectral:2.2750e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1750e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.2125e+01 L12_fnorm:1.1312e+01 L1_l1linf:3.4062e+00 L2_l1linf:3.3906e+00 L3_l1linf:3.2656e+00 L4_l1linf:3.2344e+00 L5_l1linf:3.1719e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0938e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.3125e+00 L1_spectral:1.4763e-01 L2_spectral:1.4509e-01 L3_spectral:1.4606e-01 L4_spectral:1.4544e-01 L5_spectral:1.4568e-01 L6_spectral:1.4433e-01 L7_spectral:1.4420e-01 L8_spectral:1.4285e-01 L9_spectral:1.4378e-01 L10_spectral:1.4404e-01 L11_spectral:1.4326e-01 L12_spectral:1.4210e-01 train_time:103013ms step_avg:51.51ms +[2025-09-11 10:23:24] [Rank 0] step:2001/10000 train_time:104198ms step_avg:52.07ms +[2025-09-11 10:23:24] [Rank 0] step:2001/10000 train_time:104198ms step_avg:52.07ms +[2025-09-11 10:23:24] [Rank 0] step:2021/10000 train_time:104887ms step_avg:51.90ms +[2025-09-11 10:23:24] [Rank 0] step:2021/10000 train_time:104887ms step_avg:51.90ms +[2025-09-11 10:23:25] [Rank 0] step:2041/10000 train_time:105536ms step_avg:51.71ms +[2025-09-11 10:23:25] [Rank 0] step:2041/10000 train_time:105536ms step_avg:51.71ms +[2025-09-11 10:23:26] [Rank 0] step:2061/10000 train_time:106185ms step_avg:51.52ms +[2025-09-11 10:23:26] [Rank 0] step:2061/10000 train_time:106185ms step_avg:51.52ms +[2025-09-11 10:23:26] [Rank 0] step:2081/10000 train_time:106834ms step_avg:51.34ms +[2025-09-11 10:23:26] [Rank 0] step:2081/10000 train_time:106834ms step_avg:51.34ms +[2025-09-11 10:23:27] [Rank 0] step:2101/10000 train_time:107482ms step_avg:51.16ms +[2025-09-11 10:23:27] [Rank 0] step:2101/10000 train_time:107482ms step_avg:51.16ms +[2025-09-11 10:23:28] [Rank 0] step:2121/10000 train_time:108130ms step_avg:50.98ms +[2025-09-11 10:23:28] [Rank 0] step:2121/10000 train_time:108130ms step_avg:50.98ms +[2025-09-11 10:23:28] [Rank 0] step:2141/10000 train_time:108778ms step_avg:50.81ms +[2025-09-11 10:23:28] [Rank 0] step:2141/10000 train_time:108778ms step_avg:50.81ms +[2025-09-11 10:23:29] [Rank 0] step:2161/10000 train_time:109426ms step_avg:50.64ms +[2025-09-11 10:23:29] [Rank 0] step:2161/10000 train_time:109426ms step_avg:50.64ms +[2025-09-11 10:23:30] [Rank 0] step:2181/10000 train_time:110074ms step_avg:50.47ms +[2025-09-11 10:23:30] [Rank 0] step:2181/10000 train_time:110074ms step_avg:50.47ms +[2025-09-11 10:23:30] [Rank 0] step:2201/10000 train_time:110722ms step_avg:50.31ms +[2025-09-11 10:23:30] [Rank 0] step:2201/10000 train_time:110722ms step_avg:50.31ms +[2025-09-11 10:23:31] [Rank 0] step:2221/10000 train_time:111371ms step_avg:50.14ms +[2025-09-11 10:23:31] [Rank 0] step:2221/10000 train_time:111371ms step_avg:50.14ms +[2025-09-11 10:23:31] [Rank 0] step:2241/10000 train_time:112032ms step_avg:49.99ms +[2025-09-11 10:23:31] [Rank 0] step:2241/10000 train_time:112032ms step_avg:49.99ms +[2025-09-11 10:23:32] [Rank 0] step:2261/10000 train_time:112693ms step_avg:49.84ms +[2025-09-11 10:23:32] [Rank 0] step:2261/10000 train_time:112693ms step_avg:49.84ms +[2025-09-11 10:23:33] [Rank 0] step:2281/10000 train_time:113354ms step_avg:49.70ms +[2025-09-11 10:23:33] [Rank 0] step:2281/10000 train_time:113354ms step_avg:49.70ms +[2025-09-11 10:23:33] [Rank 0] step:2301/10000 train_time:114015ms step_avg:49.55ms +[2025-09-11 10:23:33] [Rank 0] step:2301/10000 train_time:114015ms step_avg:49.55ms +[2025-09-11 10:23:34] [Rank 0] step:2321/10000 train_time:114676ms step_avg:49.41ms +[2025-09-11 10:23:34] [Rank 0] step:2321/10000 train_time:114676ms step_avg:49.41ms +[2025-09-11 10:23:35] [Rank 0] step:2341/10000 train_time:115337ms step_avg:49.27ms +[2025-09-11 10:23:35] [Rank 0] step:2341/10000 train_time:115337ms step_avg:49.27ms +[2025-09-11 10:23:35] [Rank 0] step:2361/10000 train_time:115998ms step_avg:49.13ms +[2025-09-11 10:23:35] [Rank 0] step:2361/10000 train_time:115998ms step_avg:49.13ms +[2025-09-11 10:23:36] [Rank 0] step:2381/10000 train_time:116658ms step_avg:49.00ms +[2025-09-11 10:23:36] [Rank 0] step:2381/10000 train_time:116658ms step_avg:49.00ms +[2025-09-11 10:23:37] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:23:37] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:23:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:23:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:23:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:47] [Rank 0] PRINT: step:2400/10000 val_loss:5.1657 total_sharp:5.2844e-04 L1_sharp:2.8303e-04 L2_sharp:1.1786e-04 L3_sharp:4.4821e-05 L4_sharp:4.4092e-05 L5_sharp:5.6347e-05 L6_sharp:4.3354e-05 L7_sharp:4.0230e-05 L8_sharp:9.5427e-05 L9_sharp:6.4830e-05 L10_sharp:9.3215e-05 L11_sharp:1.3507e-04 L12_sharp:8.2519e-04 total_fnorm:4.5500e+01 total_l1_linf:1.3619e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2812e+00 L3_l1linf:3.2344e+00 L4_l1linf:3.1719e+00 L5_l1linf:3.0469e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.0312e+00 L8_l1linf:3.0469e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.4219e+00 L1_spectral:1.5048e-01 L2_spectral:1.4769e-01 L3_spectral:1.4904e-01 L4_spectral:1.4867e-01 L5_spectral:1.4789e-01 L6_spectral:1.4853e-01 L7_spectral:1.4814e-01 L8_spectral:1.4564e-01 L9_spectral:1.4695e-01 L10_spectral:1.4717e-01 L11_spectral:1.4619e-01 L12_spectral:1.4524e-01 train_time:117300ms step_avg:48.88ms +[2025-09-11 10:23:47] [Rank 0] PRINT: step:2400/10000 val_loss:5.1657 total_sharp:5.2844e-04 L1_sharp:2.8303e-04 L2_sharp:1.1786e-04 L3_sharp:4.4821e-05 L4_sharp:4.4092e-05 L5_sharp:5.6347e-05 L6_sharp:4.3354e-05 L7_sharp:4.0230e-05 L8_sharp:9.5427e-05 L9_sharp:6.4830e-05 L10_sharp:9.3215e-05 L11_sharp:1.3507e-04 L12_sharp:8.2519e-04 total_fnorm:4.5500e+01 total_l1_linf:1.3619e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2812e+00 L3_l1linf:3.2344e+00 L4_l1linf:3.1719e+00 L5_l1linf:3.0469e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.0312e+00 L8_l1linf:3.0469e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.4219e+00 L1_spectral:1.5048e-01 L2_spectral:1.4769e-01 L3_spectral:1.4904e-01 L4_spectral:1.4867e-01 L5_spectral:1.4789e-01 L6_spectral:1.4853e-01 L7_spectral:1.4814e-01 L8_spectral:1.4564e-01 L9_spectral:1.4695e-01 L10_spectral:1.4717e-01 L11_spectral:1.4619e-01 L12_spectral:1.4524e-01 train_time:117300ms step_avg:48.88ms +[2025-09-11 10:23:48] [Rank 0] step:2401/10000 train_time:118496ms step_avg:49.35ms +[2025-09-11 10:23:48] [Rank 0] step:2401/10000 train_time:118496ms step_avg:49.35ms +[2025-09-11 10:23:49] [Rank 0] step:2421/10000 train_time:119161ms step_avg:49.22ms +[2025-09-11 10:23:49] [Rank 0] step:2421/10000 train_time:119161ms step_avg:49.22ms +[2025-09-11 10:23:50] [Rank 0] step:2441/10000 train_time:119824ms step_avg:49.09ms +[2025-09-11 10:23:50] [Rank 0] step:2441/10000 train_time:119824ms step_avg:49.09ms +[2025-09-11 10:23:50] [Rank 0] step:2461/10000 train_time:120499ms step_avg:48.96ms +[2025-09-11 10:23:50] [Rank 0] step:2461/10000 train_time:120499ms step_avg:48.96ms +[2025-09-11 10:23:51] [Rank 0] step:2481/10000 train_time:121161ms step_avg:48.84ms +[2025-09-11 10:23:51] [Rank 0] step:2481/10000 train_time:121161ms step_avg:48.84ms +[2025-09-11 10:23:52] [Rank 0] step:2501/10000 train_time:121824ms step_avg:48.71ms +[2025-09-11 10:23:52] [Rank 0] step:2501/10000 train_time:121824ms step_avg:48.71ms +[2025-09-11 10:23:52] [Rank 0] step:2521/10000 train_time:122488ms step_avg:48.59ms +[2025-09-11 10:23:52] [Rank 0] step:2521/10000 train_time:122488ms step_avg:48.59ms +[2025-09-11 10:23:53] [Rank 0] step:2541/10000 train_time:123150ms step_avg:48.47ms +[2025-09-11 10:23:53] [Rank 0] step:2541/10000 train_time:123150ms step_avg:48.47ms +[2025-09-11 10:23:54] [Rank 0] step:2561/10000 train_time:123812ms step_avg:48.35ms +[2025-09-11 10:23:54] [Rank 0] step:2561/10000 train_time:123812ms step_avg:48.35ms +[2025-09-11 10:23:54] [Rank 0] step:2581/10000 train_time:124474ms step_avg:48.23ms +[2025-09-11 10:23:54] [Rank 0] step:2581/10000 train_time:124474ms step_avg:48.23ms +[2025-09-11 10:23:55] [Rank 0] step:2601/10000 train_time:125135ms step_avg:48.11ms +[2025-09-11 10:23:55] [Rank 0] step:2601/10000 train_time:125135ms step_avg:48.11ms +[2025-09-11 10:23:56] [Rank 0] step:2621/10000 train_time:125797ms step_avg:48.00ms +[2025-09-11 10:23:56] [Rank 0] step:2621/10000 train_time:125797ms step_avg:48.00ms +[2025-09-11 10:23:56] [Rank 0] step:2641/10000 train_time:126459ms step_avg:47.88ms +[2025-09-11 10:23:56] [Rank 0] step:2641/10000 train_time:126459ms step_avg:47.88ms +[2025-09-11 10:23:57] [Rank 0] step:2661/10000 train_time:127121ms step_avg:47.77ms +[2025-09-11 10:23:57] [Rank 0] step:2661/10000 train_time:127121ms step_avg:47.77ms +[2025-09-11 10:23:58] [Rank 0] step:2681/10000 train_time:127782ms step_avg:47.66ms +[2025-09-11 10:23:58] [Rank 0] step:2681/10000 train_time:127782ms step_avg:47.66ms +[2025-09-11 10:23:58] [Rank 0] step:2701/10000 train_time:128444ms step_avg:47.55ms +[2025-09-11 10:23:58] [Rank 0] step:2701/10000 train_time:128444ms step_avg:47.55ms +[2025-09-11 10:23:59] [Rank 0] step:2721/10000 train_time:129106ms step_avg:47.45ms +[2025-09-11 10:23:59] [Rank 0] step:2721/10000 train_time:129106ms step_avg:47.45ms +[2025-09-11 10:24:00] [Rank 0] step:2741/10000 train_time:129769ms step_avg:47.34ms +[2025-09-11 10:24:00] [Rank 0] step:2741/10000 train_time:129769ms step_avg:47.34ms +[2025-09-11 10:24:00] [Rank 0] step:2761/10000 train_time:130431ms step_avg:47.24ms +[2025-09-11 10:24:00] [Rank 0] step:2761/10000 train_time:130431ms step_avg:47.24ms +[2025-09-11 10:24:01] [Rank 0] step:2781/10000 train_time:131093ms step_avg:47.14ms +[2025-09-11 10:24:01] [Rank 0] step:2781/10000 train_time:131093ms step_avg:47.14ms +[2025-09-11 10:24:02] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:24:02] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:24:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:24:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:12] [Rank 0] PRINT: step:2800/10000 val_loss:5.0889 total_sharp:4.9011e-04 L1_sharp:2.5268e-04 L2_sharp:1.0438e-04 L3_sharp:5.6682e-05 L4_sharp:2.0461e-05 L5_sharp:3.6464e-05 L6_sharp:2.4040e-05 L7_sharp:3.1934e-05 L8_sharp:8.8990e-05 L9_sharp:7.0031e-05 L10_sharp:7.9721e-05 L11_sharp:1.3428e-04 L12_sharp:4.6501e-04 total_fnorm:4.5250e+01 total_l1_linf:1.3312e+05 total_spectral:2.2625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.1875e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.0312e+00 L6_l1linf:2.9688e+00 L7_l1linf:3.0469e+00 L8_l1linf:2.9688e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.9375e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.4219e+00 L1_spectral:1.5326e-01 L2_spectral:1.4876e-01 L3_spectral:1.5137e-01 L4_spectral:1.5067e-01 L5_spectral:1.4892e-01 L6_spectral:1.5171e-01 L7_spectral:1.5036e-01 L8_spectral:1.4722e-01 L9_spectral:1.4944e-01 L10_spectral:1.4885e-01 L11_spectral:1.4834e-01 L12_spectral:1.4851e-01 train_time:131736ms step_avg:47.05ms +[2025-09-11 10:24:12] [Rank 0] PRINT: step:2800/10000 val_loss:5.0889 total_sharp:4.9011e-04 L1_sharp:2.5268e-04 L2_sharp:1.0438e-04 L3_sharp:5.6682e-05 L4_sharp:2.0461e-05 L5_sharp:3.6464e-05 L6_sharp:2.4040e-05 L7_sharp:3.1934e-05 L8_sharp:8.8990e-05 L9_sharp:7.0031e-05 L10_sharp:7.9721e-05 L11_sharp:1.3428e-04 L12_sharp:4.6501e-04 total_fnorm:4.5250e+01 total_l1_linf:1.3312e+05 total_spectral:2.2625e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2375e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.1875e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.0312e+00 L6_l1linf:2.9688e+00 L7_l1linf:3.0469e+00 L8_l1linf:2.9688e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.9375e+00 L11_l1linf:2.8281e+00 L12_l1linf:2.4219e+00 L1_spectral:1.5326e-01 L2_spectral:1.4876e-01 L3_spectral:1.5137e-01 L4_spectral:1.5067e-01 L5_spectral:1.4892e-01 L6_spectral:1.5171e-01 L7_spectral:1.5036e-01 L8_spectral:1.4722e-01 L9_spectral:1.4944e-01 L10_spectral:1.4885e-01 L11_spectral:1.4834e-01 L12_spectral:1.4851e-01 train_time:131736ms step_avg:47.05ms +[2025-09-11 10:24:13] [Rank 0] step:2801/10000 train_time:132923ms step_avg:47.46ms +[2025-09-11 10:24:13] [Rank 0] step:2801/10000 train_time:132923ms step_avg:47.46ms +[2025-09-11 10:24:13] [Rank 0] step:2821/10000 train_time:133573ms step_avg:47.35ms +[2025-09-11 10:24:13] [Rank 0] step:2821/10000 train_time:133573ms step_avg:47.35ms +[2025-09-11 10:24:14] [Rank 0] step:2841/10000 train_time:134236ms step_avg:47.25ms +[2025-09-11 10:24:14] [Rank 0] step:2841/10000 train_time:134236ms step_avg:47.25ms +[2025-09-11 10:24:15] [Rank 0] step:2861/10000 train_time:134899ms step_avg:47.15ms +[2025-09-11 10:24:15] [Rank 0] step:2861/10000 train_time:134899ms step_avg:47.15ms +[2025-09-11 10:24:15] [Rank 0] step:2881/10000 train_time:135562ms step_avg:47.05ms +[2025-09-11 10:24:15] [Rank 0] step:2881/10000 train_time:135562ms step_avg:47.05ms +[2025-09-11 10:24:16] [Rank 0] step:2901/10000 train_time:136223ms step_avg:46.96ms +[2025-09-11 10:24:16] [Rank 0] step:2901/10000 train_time:136223ms step_avg:46.96ms +[2025-09-11 10:24:17] [Rank 0] step:2921/10000 train_time:136885ms step_avg:46.86ms +[2025-09-11 10:24:17] [Rank 0] step:2921/10000 train_time:136885ms step_avg:46.86ms +[2025-09-11 10:24:17] [Rank 0] step:2941/10000 train_time:137547ms step_avg:46.77ms +[2025-09-11 10:24:17] [Rank 0] step:2941/10000 train_time:137547ms step_avg:46.77ms +[2025-09-11 10:24:18] [Rank 0] step:2961/10000 train_time:138209ms step_avg:46.68ms +[2025-09-11 10:24:18] [Rank 0] step:2961/10000 train_time:138209ms step_avg:46.68ms +[2025-09-11 10:24:19] [Rank 0] step:2981/10000 train_time:138873ms step_avg:46.59ms +[2025-09-11 10:24:19] [Rank 0] step:2981/10000 train_time:138873ms step_avg:46.59ms +[2025-09-11 10:24:19] [Rank 0] step:3001/10000 train_time:139537ms step_avg:46.50ms +[2025-09-11 10:24:19] [Rank 0] step:3001/10000 train_time:139537ms step_avg:46.50ms +[2025-09-11 10:24:20] [Rank 0] step:3021/10000 train_time:140202ms step_avg:46.41ms +[2025-09-11 10:24:20] [Rank 0] step:3021/10000 train_time:140202ms step_avg:46.41ms +[2025-09-11 10:24:21] [Rank 0] step:3041/10000 train_time:140867ms step_avg:46.32ms +[2025-09-11 10:24:21] [Rank 0] step:3041/10000 train_time:140867ms step_avg:46.32ms +[2025-09-11 10:24:21] [Rank 0] step:3061/10000 train_time:141531ms step_avg:46.24ms +[2025-09-11 10:24:21] [Rank 0] step:3061/10000 train_time:141531ms step_avg:46.24ms +[2025-09-11 10:24:22] [Rank 0] step:3081/10000 train_time:142196ms step_avg:46.15ms +[2025-09-11 10:24:22] [Rank 0] step:3081/10000 train_time:142196ms step_avg:46.15ms +[2025-09-11 10:24:23] [Rank 0] step:3101/10000 train_time:142860ms step_avg:46.07ms +[2025-09-11 10:24:23] [Rank 0] step:3101/10000 train_time:142860ms step_avg:46.07ms +[2025-09-11 10:24:23] [Rank 0] step:3121/10000 train_time:143525ms step_avg:45.99ms +[2025-09-11 10:24:23] [Rank 0] step:3121/10000 train_time:143525ms step_avg:45.99ms +[2025-09-11 10:24:24] [Rank 0] step:3141/10000 train_time:144189ms step_avg:45.91ms +[2025-09-11 10:24:24] [Rank 0] step:3141/10000 train_time:144189ms step_avg:45.91ms +[2025-09-11 10:24:25] [Rank 0] step:3161/10000 train_time:144853ms step_avg:45.83ms +[2025-09-11 10:24:25] [Rank 0] step:3161/10000 train_time:144853ms step_avg:45.83ms +[2025-09-11 10:24:25] [Rank 0] step:3181/10000 train_time:145518ms step_avg:45.75ms +[2025-09-11 10:24:25] [Rank 0] step:3181/10000 train_time:145518ms step_avg:45.75ms +[2025-09-11 10:24:26] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:24:26] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:24:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.9999 total_sharp:3.7166e-04 L1_sharp:2.4376e-04 L2_sharp:7.8415e-05 L3_sharp:4.7443e-05 L4_sharp:1.3858e-05 L5_sharp:5.0735e-05 L6_sharp:2.6097e-05 L7_sharp:3.1741e-05 L8_sharp:8.3522e-05 L9_sharp:5.7316e-05 L10_sharp:6.9978e-05 L11_sharp:1.0430e-04 L12_sharp:4.4160e-04 total_fnorm:4.6500e+01 total_l1_linf:1.3722e+05 total_spectral:2.3250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.3594e+00 L2_l1linf:3.1875e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1406e+00 L5_l1linf:2.9688e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9375e+00 L8_l1linf:2.9531e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8281e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.5312e+00 L1_spectral:1.5443e-01 L2_spectral:1.5134e-01 L3_spectral:1.5296e-01 L4_spectral:1.5210e-01 L5_spectral:1.5082e-01 L6_spectral:1.5238e-01 L7_spectral:1.5288e-01 L8_spectral:1.4841e-01 L9_spectral:1.5200e-01 L10_spectral:1.5105e-01 L11_spectral:1.5017e-01 L12_spectral:1.5083e-01 train_time:146164ms step_avg:45.68ms +[2025-09-11 10:24:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.9999 total_sharp:3.7166e-04 L1_sharp:2.4376e-04 L2_sharp:7.8415e-05 L3_sharp:4.7443e-05 L4_sharp:1.3858e-05 L5_sharp:5.0735e-05 L6_sharp:2.6097e-05 L7_sharp:3.1741e-05 L8_sharp:8.3522e-05 L9_sharp:5.7316e-05 L10_sharp:6.9978e-05 L11_sharp:1.0430e-04 L12_sharp:4.4160e-04 total_fnorm:4.6500e+01 total_l1_linf:1.3722e+05 total_spectral:2.3250e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2438e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.3594e+00 L2_l1linf:3.1875e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1406e+00 L5_l1linf:2.9688e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9375e+00 L8_l1linf:2.9531e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8281e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.5312e+00 L1_spectral:1.5443e-01 L2_spectral:1.5134e-01 L3_spectral:1.5296e-01 L4_spectral:1.5210e-01 L5_spectral:1.5082e-01 L6_spectral:1.5238e-01 L7_spectral:1.5288e-01 L8_spectral:1.4841e-01 L9_spectral:1.5200e-01 L10_spectral:1.5105e-01 L11_spectral:1.5017e-01 L12_spectral:1.5083e-01 train_time:146164ms step_avg:45.68ms +[2025-09-11 10:24:38] [Rank 0] step:3201/10000 train_time:147872ms step_avg:46.20ms +[2025-09-11 10:24:38] [Rank 0] step:3201/10000 train_time:147872ms step_avg:46.20ms +[2025-09-11 10:24:38] [Rank 0] step:3221/10000 train_time:148569ms step_avg:46.13ms +[2025-09-11 10:24:38] [Rank 0] step:3221/10000 train_time:148569ms step_avg:46.13ms +[2025-09-11 10:24:39] [Rank 0] step:3241/10000 train_time:149235ms step_avg:46.05ms +[2025-09-11 10:24:39] [Rank 0] step:3241/10000 train_time:149235ms step_avg:46.05ms +[2025-09-11 10:24:40] [Rank 0] step:3261/10000 train_time:149900ms step_avg:45.97ms +[2025-09-11 10:24:40] [Rank 0] step:3261/10000 train_time:149900ms step_avg:45.97ms +[2025-09-11 10:24:40] [Rank 0] step:3281/10000 train_time:150565ms step_avg:45.89ms +[2025-09-11 10:24:40] [Rank 0] step:3281/10000 train_time:150565ms step_avg:45.89ms +[2025-09-11 10:24:41] [Rank 0] step:3301/10000 train_time:151230ms step_avg:45.81ms +[2025-09-11 10:24:41] [Rank 0] step:3301/10000 train_time:151230ms step_avg:45.81ms +[2025-09-11 10:24:42] [Rank 0] step:3321/10000 train_time:151894ms step_avg:45.74ms +[2025-09-11 10:24:42] [Rank 0] step:3321/10000 train_time:151894ms step_avg:45.74ms +[2025-09-11 10:24:42] [Rank 0] step:3341/10000 train_time:152558ms step_avg:45.66ms +[2025-09-11 10:24:42] [Rank 0] step:3341/10000 train_time:152558ms step_avg:45.66ms +[2025-09-11 10:24:43] [Rank 0] step:3361/10000 train_time:153224ms step_avg:45.59ms +[2025-09-11 10:24:43] [Rank 0] step:3361/10000 train_time:153224ms step_avg:45.59ms +[2025-09-11 10:24:44] [Rank 0] step:3381/10000 train_time:153888ms step_avg:45.52ms +[2025-09-11 10:24:44] [Rank 0] step:3381/10000 train_time:153888ms step_avg:45.52ms +[2025-09-11 10:24:44] [Rank 0] step:3401/10000 train_time:154551ms step_avg:45.44ms +[2025-09-11 10:24:44] [Rank 0] step:3401/10000 train_time:154551ms step_avg:45.44ms +[2025-09-11 10:24:45] [Rank 0] step:3421/10000 train_time:155214ms step_avg:45.37ms +[2025-09-11 10:24:45] [Rank 0] step:3421/10000 train_time:155214ms step_avg:45.37ms +[2025-09-11 10:24:46] [Rank 0] step:3441/10000 train_time:155882ms step_avg:45.30ms +[2025-09-11 10:24:46] [Rank 0] step:3441/10000 train_time:155882ms step_avg:45.30ms +[2025-09-11 10:24:46] [Rank 0] step:3461/10000 train_time:156546ms step_avg:45.23ms +[2025-09-11 10:24:46] [Rank 0] step:3461/10000 train_time:156546ms step_avg:45.23ms +[2025-09-11 10:24:47] [Rank 0] step:3481/10000 train_time:157210ms step_avg:45.16ms +[2025-09-11 10:24:47] [Rank 0] step:3481/10000 train_time:157210ms step_avg:45.16ms +[2025-09-11 10:24:48] [Rank 0] step:3501/10000 train_time:157874ms step_avg:45.09ms +[2025-09-11 10:24:48] [Rank 0] step:3501/10000 train_time:157874ms step_avg:45.09ms +[2025-09-11 10:24:48] [Rank 0] step:3521/10000 train_time:158538ms step_avg:45.03ms +[2025-09-11 10:24:48] [Rank 0] step:3521/10000 train_time:158538ms step_avg:45.03ms +[2025-09-11 10:24:49] [Rank 0] step:3541/10000 train_time:159202ms step_avg:44.96ms +[2025-09-11 10:24:49] [Rank 0] step:3541/10000 train_time:159202ms step_avg:44.96ms +[2025-09-11 10:24:50] [Rank 0] step:3561/10000 train_time:159865ms step_avg:44.89ms +[2025-09-11 10:24:50] [Rank 0] step:3561/10000 train_time:159865ms step_avg:44.89ms +[2025-09-11 10:24:50] [Rank 0] step:3581/10000 train_time:160529ms step_avg:44.83ms +[2025-09-11 10:24:50] [Rank 0] step:3581/10000 train_time:160529ms step_avg:44.83ms +[2025-09-11 10:24:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:24:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:24:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:24:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:24:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:24:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:24:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:24:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.9427 total_sharp:3.8050e-04 L1_sharp:1.7972e-04 L2_sharp:7.3101e-05 L3_sharp:3.2911e-05 L4_sharp:2.0600e-05 L5_sharp:5.8428e-05 L6_sharp:2.8983e-05 L7_sharp:3.7346e-05 L8_sharp:8.3178e-05 L9_sharp:5.3447e-05 L10_sharp:7.1061e-05 L11_sharp:1.0329e-04 L12_sharp:3.8604e-04 total_fnorm:4.5500e+01 total_l1_linf:1.3005e+05 total_spectral:2.2750e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.1719e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1719e+00 L5_l1linf:2.9844e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9531e+00 L8_l1linf:2.8906e+00 L9_l1linf:2.8281e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.4375e+00 L1_spectral:1.5550e-01 L2_spectral:1.5216e-01 L3_spectral:1.5455e-01 L4_spectral:1.5467e-01 L5_spectral:1.5207e-01 L6_spectral:1.5387e-01 L7_spectral:1.5411e-01 L8_spectral:1.4910e-01 L9_spectral:1.5329e-01 L10_spectral:1.5259e-01 L11_spectral:1.5178e-01 L12_spectral:1.5109e-01 train_time:161174ms step_avg:44.77ms +[2025-09-11 10:25:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.9427 total_sharp:3.8050e-04 L1_sharp:1.7972e-04 L2_sharp:7.3101e-05 L3_sharp:3.2911e-05 L4_sharp:2.0600e-05 L5_sharp:5.8428e-05 L6_sharp:2.8983e-05 L7_sharp:3.7346e-05 L8_sharp:8.3178e-05 L9_sharp:5.3447e-05 L10_sharp:7.1061e-05 L11_sharp:1.0329e-04 L12_sharp:3.8604e-04 total_fnorm:4.5500e+01 total_l1_linf:1.3005e+05 total_spectral:2.2750e+01 L1_fnorm:1.2750e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2500e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.1719e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1719e+00 L5_l1linf:2.9844e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9531e+00 L8_l1linf:2.8906e+00 L9_l1linf:2.8281e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.4375e+00 L1_spectral:1.5550e-01 L2_spectral:1.5216e-01 L3_spectral:1.5455e-01 L4_spectral:1.5467e-01 L5_spectral:1.5207e-01 L6_spectral:1.5387e-01 L7_spectral:1.5411e-01 L8_spectral:1.4910e-01 L9_spectral:1.5329e-01 L10_spectral:1.5259e-01 L11_spectral:1.5178e-01 L12_spectral:1.5109e-01 train_time:161174ms step_avg:44.77ms +[2025-09-11 10:25:02] [Rank 0] step:3601/10000 train_time:162397ms step_avg:45.10ms +[2025-09-11 10:25:02] [Rank 0] step:3601/10000 train_time:162397ms step_avg:45.10ms +[2025-09-11 10:25:03] [Rank 0] step:3621/10000 train_time:163070ms step_avg:45.03ms +[2025-09-11 10:25:03] [Rank 0] step:3621/10000 train_time:163070ms step_avg:45.03ms +[2025-09-11 10:25:03] [Rank 0] step:3641/10000 train_time:163735ms step_avg:44.97ms +[2025-09-11 10:25:03] [Rank 0] step:3641/10000 train_time:163735ms step_avg:44.97ms +[2025-09-11 10:25:04] [Rank 0] step:3661/10000 train_time:164399ms step_avg:44.91ms +[2025-09-11 10:25:04] [Rank 0] step:3661/10000 train_time:164399ms step_avg:44.91ms +[2025-09-11 10:25:05] [Rank 0] step:3681/10000 train_time:165063ms step_avg:44.84ms +[2025-09-11 10:25:05] [Rank 0] step:3681/10000 train_time:165063ms step_avg:44.84ms +[2025-09-11 10:25:05] [Rank 0] step:3701/10000 train_time:165727ms step_avg:44.78ms +[2025-09-11 10:25:05] [Rank 0] step:3701/10000 train_time:165727ms step_avg:44.78ms +[2025-09-11 10:25:06] [Rank 0] step:3721/10000 train_time:166400ms step_avg:44.72ms +[2025-09-11 10:25:06] [Rank 0] step:3721/10000 train_time:166400ms step_avg:44.72ms +[2025-09-11 10:25:07] [Rank 0] step:3741/10000 train_time:167075ms step_avg:44.66ms +[2025-09-11 10:25:07] [Rank 0] step:3741/10000 train_time:167075ms step_avg:44.66ms +[2025-09-11 10:25:07] [Rank 0] step:3761/10000 train_time:167751ms step_avg:44.60ms +[2025-09-11 10:25:07] [Rank 0] step:3761/10000 train_time:167751ms step_avg:44.60ms +[2025-09-11 10:25:08] [Rank 0] step:3781/10000 train_time:168425ms step_avg:44.55ms +[2025-09-11 10:25:08] [Rank 0] step:3781/10000 train_time:168425ms step_avg:44.55ms +[2025-09-11 10:25:09] [Rank 0] step:3801/10000 train_time:169100ms step_avg:44.49ms +[2025-09-11 10:25:09] [Rank 0] step:3801/10000 train_time:169100ms step_avg:44.49ms +[2025-09-11 10:25:10] [Rank 0] step:3821/10000 train_time:169924ms step_avg:44.47ms +[2025-09-11 10:25:10] [Rank 0] step:3821/10000 train_time:169924ms step_avg:44.47ms +[2025-09-11 10:25:11] [Rank 0] step:3841/10000 train_time:171009ms step_avg:44.52ms +[2025-09-11 10:25:11] [Rank 0] step:3841/10000 train_time:171009ms step_avg:44.52ms +[2025-09-11 10:25:11] [Rank 0] step:3861/10000 train_time:171685ms step_avg:44.47ms +[2025-09-11 10:25:11] [Rank 0] step:3861/10000 train_time:171685ms step_avg:44.47ms +[2025-09-11 10:25:12] [Rank 0] step:3881/10000 train_time:172655ms step_avg:44.49ms +[2025-09-11 10:25:12] [Rank 0] step:3881/10000 train_time:172655ms step_avg:44.49ms +[2025-09-11 10:25:13] [Rank 0] step:3901/10000 train_time:173329ms step_avg:44.43ms +[2025-09-11 10:25:13] [Rank 0] step:3901/10000 train_time:173329ms step_avg:44.43ms +[2025-09-11 10:25:14] [Rank 0] step:3921/10000 train_time:174004ms step_avg:44.38ms +[2025-09-11 10:25:14] [Rank 0] step:3921/10000 train_time:174004ms step_avg:44.38ms +[2025-09-11 10:25:14] [Rank 0] step:3941/10000 train_time:174679ms step_avg:44.32ms +[2025-09-11 10:25:14] [Rank 0] step:3941/10000 train_time:174679ms step_avg:44.32ms +[2025-09-11 10:25:15] [Rank 0] step:3961/10000 train_time:175354ms step_avg:44.27ms +[2025-09-11 10:25:15] [Rank 0] step:3961/10000 train_time:175354ms step_avg:44.27ms +[2025-09-11 10:25:16] [Rank 0] step:3981/10000 train_time:176028ms step_avg:44.22ms +[2025-09-11 10:25:16] [Rank 0] step:3981/10000 train_time:176028ms step_avg:44.22ms +[2025-09-11 10:25:16] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:25:16] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:25:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:25:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:25:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:26] [Rank 0] PRINT: step:4000/10000 val_loss:4.8928 total_sharp:4.6045e-04 L1_sharp:2.1850e-04 L2_sharp:4.3714e-05 L3_sharp:4.5240e-05 L4_sharp:4.2295e-05 L5_sharp:3.0951e-05 L6_sharp:2.7848e-05 L7_sharp:3.1176e-05 L8_sharp:9.0012e-05 L9_sharp:5.3197e-05 L10_sharp:7.4268e-05 L11_sharp:1.1871e-04 L12_sharp:7.3394e-04 total_fnorm:4.6250e+01 total_l1_linf:1.3312e+05 total_spectral:2.3250e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.1812e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.1562e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.8594e+00 L9_l1linf:2.7812e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5683e-01 L2_spectral:1.5183e-01 L3_spectral:1.5427e-01 L4_spectral:1.5383e-01 L5_spectral:1.5222e-01 L6_spectral:1.5532e-01 L7_spectral:1.5436e-01 L8_spectral:1.5064e-01 L9_spectral:1.5387e-01 L10_spectral:1.5328e-01 L11_spectral:1.5281e-01 L12_spectral:1.5253e-01 train_time:176684ms step_avg:44.17ms +[2025-09-11 10:25:26] [Rank 0] PRINT: step:4000/10000 val_loss:4.8928 total_sharp:4.6045e-04 L1_sharp:2.1850e-04 L2_sharp:4.3714e-05 L3_sharp:4.5240e-05 L4_sharp:4.2295e-05 L5_sharp:3.0951e-05 L6_sharp:2.7848e-05 L7_sharp:3.1176e-05 L8_sharp:9.0012e-05 L9_sharp:5.3197e-05 L10_sharp:7.4268e-05 L11_sharp:1.1871e-04 L12_sharp:7.3394e-04 total_fnorm:4.6250e+01 total_l1_linf:1.3312e+05 total_spectral:2.3250e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.1812e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.1562e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.8594e+00 L9_l1linf:2.7812e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5683e-01 L2_spectral:1.5183e-01 L3_spectral:1.5427e-01 L4_spectral:1.5383e-01 L5_spectral:1.5222e-01 L6_spectral:1.5532e-01 L7_spectral:1.5436e-01 L8_spectral:1.5064e-01 L9_spectral:1.5387e-01 L10_spectral:1.5328e-01 L11_spectral:1.5281e-01 L12_spectral:1.5253e-01 train_time:176684ms step_avg:44.17ms +[2025-09-11 10:25:28] [Rank 0] step:4001/10000 train_time:177890ms step_avg:44.46ms +[2025-09-11 10:25:28] [Rank 0] step:4001/10000 train_time:177890ms step_avg:44.46ms +[2025-09-11 10:25:28] [Rank 0] step:4021/10000 train_time:178579ms step_avg:44.41ms +[2025-09-11 10:25:28] [Rank 0] step:4021/10000 train_time:178579ms step_avg:44.41ms +[2025-09-11 10:25:29] [Rank 0] step:4041/10000 train_time:179254ms step_avg:44.36ms +[2025-09-11 10:25:29] [Rank 0] step:4041/10000 train_time:179254ms step_avg:44.36ms +[2025-09-11 10:25:30] [Rank 0] step:4061/10000 train_time:179929ms step_avg:44.31ms +[2025-09-11 10:25:30] [Rank 0] step:4061/10000 train_time:179929ms step_avg:44.31ms +[2025-09-11 10:25:30] [Rank 0] step:4081/10000 train_time:180604ms step_avg:44.25ms +[2025-09-11 10:25:30] [Rank 0] step:4081/10000 train_time:180604ms step_avg:44.25ms +[2025-09-11 10:25:31] [Rank 0] step:4101/10000 train_time:181278ms step_avg:44.20ms +[2025-09-11 10:25:31] [Rank 0] step:4101/10000 train_time:181278ms step_avg:44.20ms +[2025-09-11 10:25:32] [Rank 0] step:4121/10000 train_time:181952ms step_avg:44.15ms +[2025-09-11 10:25:32] [Rank 0] step:4121/10000 train_time:181952ms step_avg:44.15ms +[2025-09-11 10:25:32] [Rank 0] step:4141/10000 train_time:182625ms step_avg:44.10ms +[2025-09-11 10:25:32] [Rank 0] step:4141/10000 train_time:182625ms step_avg:44.10ms +[2025-09-11 10:25:33] [Rank 0] step:4161/10000 train_time:183302ms step_avg:44.05ms +[2025-09-11 10:25:33] [Rank 0] step:4161/10000 train_time:183302ms step_avg:44.05ms +[2025-09-11 10:25:34] [Rank 0] step:4181/10000 train_time:183976ms step_avg:44.00ms +[2025-09-11 10:25:34] [Rank 0] step:4181/10000 train_time:183976ms step_avg:44.00ms +[2025-09-11 10:25:34] [Rank 0] step:4201/10000 train_time:184651ms step_avg:43.95ms +[2025-09-11 10:25:34] [Rank 0] step:4201/10000 train_time:184651ms step_avg:43.95ms +[2025-09-11 10:25:35] [Rank 0] step:4221/10000 train_time:185325ms step_avg:43.91ms +[2025-09-11 10:25:35] [Rank 0] step:4221/10000 train_time:185325ms step_avg:43.91ms +[2025-09-11 10:25:36] [Rank 0] step:4241/10000 train_time:185999ms step_avg:43.86ms +[2025-09-11 10:25:36] [Rank 0] step:4241/10000 train_time:185999ms step_avg:43.86ms +[2025-09-11 10:25:36] [Rank 0] step:4261/10000 train_time:186674ms step_avg:43.81ms +[2025-09-11 10:25:36] [Rank 0] step:4261/10000 train_time:186674ms step_avg:43.81ms +[2025-09-11 10:25:37] [Rank 0] step:4281/10000 train_time:187349ms step_avg:43.76ms +[2025-09-11 10:25:37] [Rank 0] step:4281/10000 train_time:187349ms step_avg:43.76ms +[2025-09-11 10:25:38] [Rank 0] step:4301/10000 train_time:188024ms step_avg:43.72ms +[2025-09-11 10:25:38] [Rank 0] step:4301/10000 train_time:188024ms step_avg:43.72ms +[2025-09-11 10:25:38] [Rank 0] step:4321/10000 train_time:188698ms step_avg:43.67ms +[2025-09-11 10:25:38] [Rank 0] step:4321/10000 train_time:188698ms step_avg:43.67ms +[2025-09-11 10:25:39] [Rank 0] step:4341/10000 train_time:189372ms step_avg:43.62ms +[2025-09-11 10:25:39] [Rank 0] step:4341/10000 train_time:189372ms step_avg:43.62ms +[2025-09-11 10:25:40] [Rank 0] step:4361/10000 train_time:190045ms step_avg:43.58ms +[2025-09-11 10:25:40] [Rank 0] step:4361/10000 train_time:190045ms step_avg:43.58ms +[2025-09-11 10:25:40] [Rank 0] step:4381/10000 train_time:190720ms step_avg:43.53ms +[2025-09-11 10:25:40] [Rank 0] step:4381/10000 train_time:190720ms step_avg:43.53ms +[2025-09-11 10:25:41] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:25:41] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:25:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:25:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:25:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:25:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:25:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:25:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:51] [Rank 0] PRINT: step:4400/10000 val_loss:4.8489 total_sharp:3.1212e-04 L1_sharp:1.7231e-04 L2_sharp:3.6088e-05 L3_sharp:2.4586e-05 L4_sharp:3.6353e-05 L5_sharp:1.6958e-05 L6_sharp:1.7429e-05 L7_sharp:3.5786e-05 L8_sharp:8.0654e-05 L9_sharp:4.2495e-05 L10_sharp:5.5236e-05 L11_sharp:8.9138e-05 L12_sharp:6.7068e-04 total_fnorm:4.5500e+01 total_l1_linf:1.2749e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2500e+00 L2_l1linf:3.1406e+00 L3_l1linf:3.0781e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9688e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.8281e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7812e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5782e-01 L2_spectral:1.5345e-01 L3_spectral:1.5509e-01 L4_spectral:1.5575e-01 L5_spectral:1.5312e-01 L6_spectral:1.5648e-01 L7_spectral:1.5623e-01 L8_spectral:1.5163e-01 L9_spectral:1.5512e-01 L10_spectral:1.5549e-01 L11_spectral:1.5421e-01 L12_spectral:1.5401e-01 train_time:191375ms step_avg:43.49ms +[2025-09-11 10:25:51] [Rank 0] PRINT: step:4400/10000 val_loss:4.8489 total_sharp:3.1212e-04 L1_sharp:1.7231e-04 L2_sharp:3.6088e-05 L3_sharp:2.4586e-05 L4_sharp:3.6353e-05 L5_sharp:1.6958e-05 L6_sharp:1.7429e-05 L7_sharp:3.5786e-05 L8_sharp:8.0654e-05 L9_sharp:4.2495e-05 L10_sharp:5.5236e-05 L11_sharp:8.9138e-05 L12_sharp:6.7068e-04 total_fnorm:4.5500e+01 total_l1_linf:1.2749e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2500e+00 L2_l1linf:3.1406e+00 L3_l1linf:3.0781e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9688e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9062e+00 L8_l1linf:2.8281e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7812e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5782e-01 L2_spectral:1.5345e-01 L3_spectral:1.5509e-01 L4_spectral:1.5575e-01 L5_spectral:1.5312e-01 L6_spectral:1.5648e-01 L7_spectral:1.5623e-01 L8_spectral:1.5163e-01 L9_spectral:1.5512e-01 L10_spectral:1.5549e-01 L11_spectral:1.5421e-01 L12_spectral:1.5401e-01 train_time:191375ms step_avg:43.49ms +[2025-09-11 10:25:52] [Rank 0] step:4401/10000 train_time:192607ms step_avg:43.76ms +[2025-09-11 10:25:52] [Rank 0] step:4401/10000 train_time:192607ms step_avg:43.76ms +[2025-09-11 10:25:53] [Rank 0] step:4421/10000 train_time:193313ms step_avg:43.73ms +[2025-09-11 10:25:53] [Rank 0] step:4421/10000 train_time:193313ms step_avg:43.73ms +[2025-09-11 10:25:54] [Rank 0] step:4441/10000 train_time:193990ms step_avg:43.68ms +[2025-09-11 10:25:54] [Rank 0] step:4441/10000 train_time:193990ms step_avg:43.68ms +[2025-09-11 10:25:54] [Rank 0] step:4461/10000 train_time:194668ms step_avg:43.64ms +[2025-09-11 10:25:54] [Rank 0] step:4461/10000 train_time:194668ms step_avg:43.64ms +[2025-09-11 10:25:55] [Rank 0] step:4481/10000 train_time:195345ms step_avg:43.59ms +[2025-09-11 10:25:55] [Rank 0] step:4481/10000 train_time:195345ms step_avg:43.59ms +[2025-09-11 10:25:56] [Rank 0] step:4501/10000 train_time:196024ms step_avg:43.55ms +[2025-09-11 10:25:56] [Rank 0] step:4501/10000 train_time:196024ms step_avg:43.55ms +[2025-09-11 10:25:56] [Rank 0] step:4521/10000 train_time:196703ms step_avg:43.51ms +[2025-09-11 10:25:56] [Rank 0] step:4521/10000 train_time:196703ms step_avg:43.51ms +[2025-09-11 10:25:57] [Rank 0] step:4541/10000 train_time:197381ms step_avg:43.47ms +[2025-09-11 10:25:57] [Rank 0] step:4541/10000 train_time:197381ms step_avg:43.47ms +[2025-09-11 10:25:58] [Rank 0] step:4561/10000 train_time:198058ms step_avg:43.42ms +[2025-09-11 10:25:58] [Rank 0] step:4561/10000 train_time:198058ms step_avg:43.42ms +[2025-09-11 10:25:58] [Rank 0] step:4581/10000 train_time:198736ms step_avg:43.38ms +[2025-09-11 10:25:58] [Rank 0] step:4581/10000 train_time:198736ms step_avg:43.38ms +[2025-09-11 10:25:59] [Rank 0] step:4601/10000 train_time:199414ms step_avg:43.34ms +[2025-09-11 10:25:59] [Rank 0] step:4601/10000 train_time:199414ms step_avg:43.34ms +[2025-09-11 10:26:00] [Rank 0] step:4621/10000 train_time:200091ms step_avg:43.30ms +[2025-09-11 10:26:00] [Rank 0] step:4621/10000 train_time:200091ms step_avg:43.30ms +[2025-09-11 10:26:01] [Rank 0] step:4641/10000 train_time:200768ms step_avg:43.26ms +[2025-09-11 10:26:01] [Rank 0] step:4641/10000 train_time:200768ms step_avg:43.26ms +[2025-09-11 10:26:01] [Rank 0] step:4661/10000 train_time:201445ms step_avg:43.22ms +[2025-09-11 10:26:01] [Rank 0] step:4661/10000 train_time:201445ms step_avg:43.22ms +[2025-09-11 10:26:02] [Rank 0] step:4681/10000 train_time:202123ms step_avg:43.18ms +[2025-09-11 10:26:02] [Rank 0] step:4681/10000 train_time:202123ms step_avg:43.18ms +[2025-09-11 10:26:03] [Rank 0] step:4701/10000 train_time:202801ms step_avg:43.14ms +[2025-09-11 10:26:03] [Rank 0] step:4701/10000 train_time:202801ms step_avg:43.14ms +[2025-09-11 10:26:03] [Rank 0] step:4721/10000 train_time:203478ms step_avg:43.10ms +[2025-09-11 10:26:03] [Rank 0] step:4721/10000 train_time:203478ms step_avg:43.10ms +[2025-09-11 10:26:04] [Rank 0] step:4741/10000 train_time:204155ms step_avg:43.06ms +[2025-09-11 10:26:04] [Rank 0] step:4741/10000 train_time:204155ms step_avg:43.06ms +[2025-09-11 10:26:05] [Rank 0] step:4761/10000 train_time:204834ms step_avg:43.02ms +[2025-09-11 10:26:05] [Rank 0] step:4761/10000 train_time:204834ms step_avg:43.02ms +[2025-09-11 10:26:05] [Rank 0] step:4781/10000 train_time:205510ms step_avg:42.98ms +[2025-09-11 10:26:05] [Rank 0] step:4781/10000 train_time:205510ms step_avg:42.98ms +[2025-09-11 10:26:06] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:26:06] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:26:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:26:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:26:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:26:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:26:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:26:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:16] [Rank 0] PRINT: step:4800/10000 val_loss:4.7958 total_sharp:3.4745e-04 L1_sharp:1.5069e-04 L2_sharp:5.5894e-05 L3_sharp:3.3768e-05 L4_sharp:2.5408e-05 L5_sharp:4.1784e-05 L6_sharp:2.6605e-05 L7_sharp:3.7349e-05 L8_sharp:7.9368e-05 L9_sharp:5.3240e-05 L10_sharp:6.1654e-05 L11_sharp:9.3338e-05 L12_sharp:4.9978e-04 total_fnorm:4.5500e+01 total_l1_linf:1.2646e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0938e+00 L4_l1linf:3.0938e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.9219e+00 L8_l1linf:2.8125e+00 L9_l1linf:2.7031e+00 L10_l1linf:2.7188e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.4688e+00 L1_spectral:1.5858e-01 L2_spectral:1.5401e-01 L3_spectral:1.5663e-01 L4_spectral:1.5572e-01 L5_spectral:1.5356e-01 L6_spectral:1.5664e-01 L7_spectral:1.5625e-01 L8_spectral:1.5210e-01 L9_spectral:1.5540e-01 L10_spectral:1.5535e-01 L11_spectral:1.5509e-01 L12_spectral:1.5374e-01 train_time:206169ms step_avg:42.95ms +[2025-09-11 10:26:16] [Rank 0] PRINT: step:4800/10000 val_loss:4.7958 total_sharp:3.4745e-04 L1_sharp:1.5069e-04 L2_sharp:5.5894e-05 L3_sharp:3.3768e-05 L4_sharp:2.5408e-05 L5_sharp:4.1784e-05 L6_sharp:2.6605e-05 L7_sharp:3.7349e-05 L8_sharp:7.9368e-05 L9_sharp:5.3240e-05 L10_sharp:6.1654e-05 L11_sharp:9.3338e-05 L12_sharp:4.9978e-04 total_fnorm:4.5500e+01 total_l1_linf:1.2646e+05 total_spectral:2.2750e+01 L1_fnorm:1.2688e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0938e+00 L4_l1linf:3.0938e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.9219e+00 L8_l1linf:2.8125e+00 L9_l1linf:2.7031e+00 L10_l1linf:2.7188e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.4688e+00 L1_spectral:1.5858e-01 L2_spectral:1.5401e-01 L3_spectral:1.5663e-01 L4_spectral:1.5572e-01 L5_spectral:1.5356e-01 L6_spectral:1.5664e-01 L7_spectral:1.5625e-01 L8_spectral:1.5210e-01 L9_spectral:1.5540e-01 L10_spectral:1.5535e-01 L11_spectral:1.5509e-01 L12_spectral:1.5374e-01 train_time:206169ms step_avg:42.95ms +[2025-09-11 10:26:17] [Rank 0] step:4801/10000 train_time:207375ms step_avg:43.19ms +[2025-09-11 10:26:17] [Rank 0] step:4801/10000 train_time:207375ms step_avg:43.19ms +[2025-09-11 10:26:18] [Rank 0] step:4821/10000 train_time:208086ms step_avg:43.16ms +[2025-09-11 10:26:18] [Rank 0] step:4821/10000 train_time:208086ms step_avg:43.16ms +[2025-09-11 10:26:18] [Rank 0] step:4841/10000 train_time:208764ms step_avg:43.12ms +[2025-09-11 10:26:18] [Rank 0] step:4841/10000 train_time:208764ms step_avg:43.12ms +[2025-09-11 10:26:19] [Rank 0] step:4861/10000 train_time:209442ms step_avg:43.09ms +[2025-09-11 10:26:19] [Rank 0] step:4861/10000 train_time:209442ms step_avg:43.09ms +[2025-09-11 10:26:20] [Rank 0] step:4881/10000 train_time:210120ms step_avg:43.05ms +[2025-09-11 10:26:20] [Rank 0] step:4881/10000 train_time:210120ms step_avg:43.05ms +[2025-09-11 10:26:21] [Rank 0] step:4901/10000 train_time:210799ms step_avg:43.01ms +[2025-09-11 10:26:21] [Rank 0] step:4901/10000 train_time:210799ms step_avg:43.01ms +[2025-09-11 10:26:21] [Rank 0] step:4921/10000 train_time:211477ms step_avg:42.97ms +[2025-09-11 10:26:21] [Rank 0] step:4921/10000 train_time:211477ms step_avg:42.97ms +[2025-09-11 10:26:22] [Rank 0] step:4941/10000 train_time:212154ms step_avg:42.94ms +[2025-09-11 10:26:22] [Rank 0] step:4941/10000 train_time:212154ms step_avg:42.94ms +[2025-09-11 10:26:23] [Rank 0] step:4961/10000 train_time:212832ms step_avg:42.90ms +[2025-09-11 10:26:23] [Rank 0] step:4961/10000 train_time:212832ms step_avg:42.90ms +[2025-09-11 10:26:23] [Rank 0] step:4981/10000 train_time:213509ms step_avg:42.86ms +[2025-09-11 10:26:23] [Rank 0] step:4981/10000 train_time:213509ms step_avg:42.86ms +[2025-09-11 10:26:24] [Rank 0] step:5001/10000 train_time:214188ms step_avg:42.83ms +[2025-09-11 10:26:24] [Rank 0] step:5001/10000 train_time:214188ms step_avg:42.83ms +[2025-09-11 10:26:25] [Rank 0] step:5021/10000 train_time:214864ms step_avg:42.79ms +[2025-09-11 10:26:25] [Rank 0] step:5021/10000 train_time:214864ms step_avg:42.79ms +[2025-09-11 10:26:25] [Rank 0] step:5041/10000 train_time:215541ms step_avg:42.76ms +[2025-09-11 10:26:25] [Rank 0] step:5041/10000 train_time:215541ms step_avg:42.76ms +[2025-09-11 10:26:26] [Rank 0] step:5061/10000 train_time:216218ms step_avg:42.72ms +[2025-09-11 10:26:26] [Rank 0] step:5061/10000 train_time:216218ms step_avg:42.72ms +[2025-09-11 10:26:27] [Rank 0] step:5081/10000 train_time:216896ms step_avg:42.69ms +[2025-09-11 10:26:27] [Rank 0] step:5081/10000 train_time:216896ms step_avg:42.69ms +[2025-09-11 10:26:27] [Rank 0] step:5101/10000 train_time:217573ms step_avg:42.65ms +[2025-09-11 10:26:27] [Rank 0] step:5101/10000 train_time:217573ms step_avg:42.65ms +[2025-09-11 10:26:28] [Rank 0] step:5121/10000 train_time:218250ms step_avg:42.62ms +[2025-09-11 10:26:28] [Rank 0] step:5121/10000 train_time:218250ms step_avg:42.62ms +[2025-09-11 10:26:29] [Rank 0] step:5141/10000 train_time:218927ms step_avg:42.58ms +[2025-09-11 10:26:29] [Rank 0] step:5141/10000 train_time:218927ms step_avg:42.58ms +[2025-09-11 10:26:29] [Rank 0] step:5161/10000 train_time:219604ms step_avg:42.55ms +[2025-09-11 10:26:29] [Rank 0] step:5161/10000 train_time:219604ms step_avg:42.55ms +[2025-09-11 10:26:30] [Rank 0] step:5181/10000 train_time:220281ms step_avg:42.52ms +[2025-09-11 10:26:30] [Rank 0] step:5181/10000 train_time:220281ms step_avg:42.52ms +[2025-09-11 10:26:31] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:26:31] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:26:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:26:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:26:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:26:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:26:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:26:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:26:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:26:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:44] [Rank 0] PRINT: step:5200/10000 val_loss:4.7622 total_sharp:3.7945e-04 L1_sharp:1.2099e-04 L2_sharp:4.5307e-05 L3_sharp:2.3624e-05 L4_sharp:4.7033e-05 L5_sharp:3.1748e-05 L6_sharp:2.1195e-05 L7_sharp:2.0127e-05 L8_sharp:6.5855e-05 L9_sharp:5.5262e-05 L10_sharp:7.6027e-05 L11_sharp:1.0220e-04 L12_sharp:8.2516e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2237e+05 total_spectral:2.2625e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1875e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0781e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9375e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.7344e+00 L10_l1linf:2.6562e+00 L11_l1linf:2.6719e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5916e-01 L2_spectral:1.5391e-01 L3_spectral:1.5717e-01 L4_spectral:1.5667e-01 L5_spectral:1.5469e-01 L6_spectral:1.5830e-01 L7_spectral:1.5634e-01 L8_spectral:1.5321e-01 L9_spectral:1.5654e-01 L10_spectral:1.5600e-01 L11_spectral:1.5563e-01 L12_spectral:1.5547e-01 train_time:220946ms step_avg:42.49ms +[2025-09-11 10:26:44] [Rank 0] PRINT: step:5200/10000 val_loss:4.7622 total_sharp:3.7945e-04 L1_sharp:1.2099e-04 L2_sharp:4.5307e-05 L3_sharp:2.3624e-05 L4_sharp:4.7033e-05 L5_sharp:3.1748e-05 L6_sharp:2.1195e-05 L7_sharp:2.0127e-05 L8_sharp:6.5855e-05 L9_sharp:5.5262e-05 L10_sharp:7.6027e-05 L11_sharp:1.0220e-04 L12_sharp:8.2516e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2237e+05 total_spectral:2.2625e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1875e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0781e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9375e+00 L6_l1linf:2.9219e+00 L7_l1linf:2.9375e+00 L8_l1linf:2.7656e+00 L9_l1linf:2.7344e+00 L10_l1linf:2.6562e+00 L11_l1linf:2.6719e+00 L12_l1linf:2.4062e+00 L1_spectral:1.5916e-01 L2_spectral:1.5391e-01 L3_spectral:1.5717e-01 L4_spectral:1.5667e-01 L5_spectral:1.5469e-01 L6_spectral:1.5830e-01 L7_spectral:1.5634e-01 L8_spectral:1.5321e-01 L9_spectral:1.5654e-01 L10_spectral:1.5600e-01 L11_spectral:1.5563e-01 L12_spectral:1.5547e-01 train_time:220946ms step_avg:42.49ms +[2025-09-11 10:26:45] [Rank 0] step:5201/10000 train_time:222173ms step_avg:42.72ms +[2025-09-11 10:26:45] [Rank 0] step:5201/10000 train_time:222173ms step_avg:42.72ms +[2025-09-11 10:26:46] [Rank 0] step:5221/10000 train_time:222891ms step_avg:42.69ms +[2025-09-11 10:26:46] [Rank 0] step:5221/10000 train_time:222891ms step_avg:42.69ms +[2025-09-11 10:26:47] [Rank 0] step:5241/10000 train_time:223578ms step_avg:42.66ms +[2025-09-11 10:26:47] [Rank 0] step:5241/10000 train_time:223578ms step_avg:42.66ms +[2025-09-11 10:26:47] [Rank 0] step:5261/10000 train_time:224266ms step_avg:42.63ms +[2025-09-11 10:26:47] [Rank 0] step:5261/10000 train_time:224266ms step_avg:42.63ms +[2025-09-11 10:26:48] [Rank 0] step:5281/10000 train_time:224952ms step_avg:42.60ms +[2025-09-11 10:26:48] [Rank 0] step:5281/10000 train_time:224952ms step_avg:42.60ms +[2025-09-11 10:26:49] [Rank 0] step:5301/10000 train_time:225640ms step_avg:42.57ms +[2025-09-11 10:26:49] [Rank 0] step:5301/10000 train_time:225640ms step_avg:42.57ms +[2025-09-11 10:26:49] [Rank 0] step:5321/10000 train_time:226326ms step_avg:42.53ms +[2025-09-11 10:26:49] [Rank 0] step:5321/10000 train_time:226326ms step_avg:42.53ms +[2025-09-11 10:26:50] [Rank 0] step:5341/10000 train_time:227013ms step_avg:42.50ms +[2025-09-11 10:26:50] [Rank 0] step:5341/10000 train_time:227013ms step_avg:42.50ms +[2025-09-11 10:26:51] [Rank 0] step:5361/10000 train_time:227700ms step_avg:42.47ms +[2025-09-11 10:26:51] [Rank 0] step:5361/10000 train_time:227700ms step_avg:42.47ms +[2025-09-11 10:26:51] [Rank 0] step:5381/10000 train_time:228387ms step_avg:42.44ms +[2025-09-11 10:26:51] [Rank 0] step:5381/10000 train_time:228387ms step_avg:42.44ms +[2025-09-11 10:26:52] [Rank 0] step:5401/10000 train_time:229073ms step_avg:42.41ms +[2025-09-11 10:26:52] [Rank 0] step:5401/10000 train_time:229073ms step_avg:42.41ms +[2025-09-11 10:26:53] [Rank 0] step:5421/10000 train_time:229762ms step_avg:42.38ms +[2025-09-11 10:26:53] [Rank 0] step:5421/10000 train_time:229762ms step_avg:42.38ms +[2025-09-11 10:26:53] [Rank 0] step:5441/10000 train_time:230448ms step_avg:42.35ms +[2025-09-11 10:26:53] [Rank 0] step:5441/10000 train_time:230448ms step_avg:42.35ms +[2025-09-11 10:26:54] [Rank 0] step:5461/10000 train_time:231136ms step_avg:42.32ms +[2025-09-11 10:26:54] [Rank 0] step:5461/10000 train_time:231136ms step_avg:42.32ms +[2025-09-11 10:26:55] [Rank 0] step:5481/10000 train_time:231823ms step_avg:42.30ms +[2025-09-11 10:26:55] [Rank 0] step:5481/10000 train_time:231823ms step_avg:42.30ms +[2025-09-11 10:26:56] [Rank 0] step:5501/10000 train_time:232510ms step_avg:42.27ms +[2025-09-11 10:26:56] [Rank 0] step:5501/10000 train_time:232510ms step_avg:42.27ms +[2025-09-11 10:26:56] [Rank 0] step:5521/10000 train_time:233195ms step_avg:42.24ms +[2025-09-11 10:26:56] [Rank 0] step:5521/10000 train_time:233195ms step_avg:42.24ms +[2025-09-11 10:26:57] [Rank 0] step:5541/10000 train_time:233885ms step_avg:42.21ms +[2025-09-11 10:26:57] [Rank 0] step:5541/10000 train_time:233885ms step_avg:42.21ms +[2025-09-11 10:26:58] [Rank 0] step:5561/10000 train_time:234573ms step_avg:42.18ms +[2025-09-11 10:26:58] [Rank 0] step:5561/10000 train_time:234573ms step_avg:42.18ms +[2025-09-11 10:26:58] [Rank 0] step:5581/10000 train_time:235261ms step_avg:42.15ms +[2025-09-11 10:26:58] [Rank 0] step:5581/10000 train_time:235261ms step_avg:42.15ms +[2025-09-11 10:26:59] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:26:59] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:27:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:27:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:27:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:27:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:27:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:27:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:09] [Rank 0] PRINT: step:5600/10000 val_loss:4.7288 total_sharp:3.0847e-04 L1_sharp:1.2316e-04 L2_sharp:4.5633e-05 L3_sharp:2.8716e-05 L4_sharp:1.3541e-05 L5_sharp:2.5193e-05 L6_sharp:2.0057e-05 L7_sharp:2.4046e-05 L8_sharp:7.3154e-05 L9_sharp:4.9249e-05 L10_sharp:6.2797e-05 L11_sharp:8.9038e-05 L12_sharp:3.9279e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2134e+05 total_spectral:2.2500e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1406e+00 L2_l1linf:3.0312e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.8750e+00 L6_l1linf:2.8750e+00 L7_l1linf:2.8750e+00 L8_l1linf:2.7344e+00 L9_l1linf:2.6406e+00 L10_l1linf:2.6094e+00 L11_l1linf:2.7031e+00 L12_l1linf:2.4375e+00 L1_spectral:1.5893e-01 L2_spectral:1.5401e-01 L3_spectral:1.5754e-01 L4_spectral:1.5630e-01 L5_spectral:1.5469e-01 L6_spectral:1.5746e-01 L7_spectral:1.5746e-01 L8_spectral:1.5302e-01 L9_spectral:1.5726e-01 L10_spectral:1.5774e-01 L11_spectral:1.5667e-01 L12_spectral:1.5549e-01 train_time:235927ms step_avg:42.13ms +[2025-09-11 10:27:09] [Rank 0] PRINT: step:5600/10000 val_loss:4.7288 total_sharp:3.0847e-04 L1_sharp:1.2316e-04 L2_sharp:4.5633e-05 L3_sharp:2.8716e-05 L4_sharp:1.3541e-05 L5_sharp:2.5193e-05 L6_sharp:2.0057e-05 L7_sharp:2.4046e-05 L8_sharp:7.3154e-05 L9_sharp:4.9249e-05 L10_sharp:6.2797e-05 L11_sharp:8.9038e-05 L12_sharp:3.9279e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2134e+05 total_spectral:2.2500e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1406e+00 L2_l1linf:3.0312e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0000e+00 L5_l1linf:2.8750e+00 L6_l1linf:2.8750e+00 L7_l1linf:2.8750e+00 L8_l1linf:2.7344e+00 L9_l1linf:2.6406e+00 L10_l1linf:2.6094e+00 L11_l1linf:2.7031e+00 L12_l1linf:2.4375e+00 L1_spectral:1.5893e-01 L2_spectral:1.5401e-01 L3_spectral:1.5754e-01 L4_spectral:1.5630e-01 L5_spectral:1.5469e-01 L6_spectral:1.5746e-01 L7_spectral:1.5746e-01 L8_spectral:1.5302e-01 L9_spectral:1.5726e-01 L10_spectral:1.5774e-01 L11_spectral:1.5667e-01 L12_spectral:1.5549e-01 train_time:235927ms step_avg:42.13ms +[2025-09-11 10:27:10] [Rank 0] step:5601/10000 train_time:237191ms step_avg:42.35ms +[2025-09-11 10:27:10] [Rank 0] step:5601/10000 train_time:237191ms step_avg:42.35ms +[2025-09-11 10:27:11] [Rank 0] step:5621/10000 train_time:237910ms step_avg:42.33ms +[2025-09-11 10:27:11] [Rank 0] step:5621/10000 train_time:237910ms step_avg:42.33ms +[2025-09-11 10:27:12] [Rank 0] step:5641/10000 train_time:238596ms step_avg:42.30ms +[2025-09-11 10:27:12] [Rank 0] step:5641/10000 train_time:238596ms step_avg:42.30ms +[2025-09-11 10:27:12] [Rank 0] step:5661/10000 train_time:239282ms step_avg:42.27ms +[2025-09-11 10:27:12] [Rank 0] step:5661/10000 train_time:239282ms step_avg:42.27ms +[2025-09-11 10:27:13] [Rank 0] step:5681/10000 train_time:239969ms step_avg:42.24ms +[2025-09-11 10:27:13] [Rank 0] step:5681/10000 train_time:239969ms step_avg:42.24ms +[2025-09-11 10:27:14] [Rank 0] step:5701/10000 train_time:240658ms step_avg:42.21ms +[2025-09-11 10:27:14] [Rank 0] step:5701/10000 train_time:240658ms step_avg:42.21ms +[2025-09-11 10:27:14] [Rank 0] step:5721/10000 train_time:241344ms step_avg:42.19ms +[2025-09-11 10:27:14] [Rank 0] step:5721/10000 train_time:241344ms step_avg:42.19ms +[2025-09-11 10:27:15] [Rank 0] step:5741/10000 train_time:242032ms step_avg:42.16ms +[2025-09-11 10:27:15] [Rank 0] step:5741/10000 train_time:242032ms step_avg:42.16ms +[2025-09-11 10:27:16] [Rank 0] step:5761/10000 train_time:242720ms step_avg:42.13ms +[2025-09-11 10:27:16] [Rank 0] step:5761/10000 train_time:242720ms step_avg:42.13ms +[2025-09-11 10:27:17] [Rank 0] step:5781/10000 train_time:243922ms step_avg:42.19ms +[2025-09-11 10:27:17] [Rank 0] step:5781/10000 train_time:243922ms step_avg:42.19ms +[2025-09-11 10:27:18] [Rank 0] step:5801/10000 train_time:244610ms step_avg:42.17ms +[2025-09-11 10:27:18] [Rank 0] step:5801/10000 train_time:244610ms step_avg:42.17ms +[2025-09-11 10:27:18] [Rank 0] step:5821/10000 train_time:245295ms step_avg:42.14ms +[2025-09-11 10:27:18] [Rank 0] step:5821/10000 train_time:245295ms step_avg:42.14ms +[2025-09-11 10:27:19] [Rank 0] step:5841/10000 train_time:246228ms step_avg:42.16ms +[2025-09-11 10:27:19] [Rank 0] step:5841/10000 train_time:246228ms step_avg:42.16ms +[2025-09-11 10:27:20] [Rank 0] step:5861/10000 train_time:246913ms step_avg:42.13ms +[2025-09-11 10:27:20] [Rank 0] step:5861/10000 train_time:246913ms step_avg:42.13ms +[2025-09-11 10:27:21] [Rank 0] step:5881/10000 train_time:247600ms step_avg:42.10ms +[2025-09-11 10:27:21] [Rank 0] step:5881/10000 train_time:247600ms step_avg:42.10ms +[2025-09-11 10:27:21] [Rank 0] step:5901/10000 train_time:248286ms step_avg:42.08ms +[2025-09-11 10:27:21] [Rank 0] step:5901/10000 train_time:248286ms step_avg:42.08ms +[2025-09-11 10:27:22] [Rank 0] step:5921/10000 train_time:248976ms step_avg:42.05ms +[2025-09-11 10:27:22] [Rank 0] step:5921/10000 train_time:248976ms step_avg:42.05ms +[2025-09-11 10:27:23] [Rank 0] step:5941/10000 train_time:249663ms step_avg:42.02ms +[2025-09-11 10:27:23] [Rank 0] step:5941/10000 train_time:249663ms step_avg:42.02ms +[2025-09-11 10:27:23] [Rank 0] step:5961/10000 train_time:250351ms step_avg:42.00ms +[2025-09-11 10:27:23] [Rank 0] step:5961/10000 train_time:250351ms step_avg:42.00ms +[2025-09-11 10:27:24] [Rank 0] step:5981/10000 train_time:251039ms step_avg:41.97ms +[2025-09-11 10:27:24] [Rank 0] step:5981/10000 train_time:251039ms step_avg:41.97ms +[2025-09-11 10:27:25] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:27:25] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:27:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:34] [Rank 0] PRINT: step:6000/10000 val_loss:4.6824 total_sharp:2.5613e-04 L1_sharp:1.2774e-04 L2_sharp:1.5089e-05 L3_sharp:2.0096e-05 L4_sharp:2.2012e-05 L5_sharp:2.3411e-05 L6_sharp:1.5936e-05 L7_sharp:2.4082e-05 L8_sharp:7.8946e-05 L9_sharp:5.0386e-05 L10_sharp:6.3715e-05 L11_sharp:8.7053e-05 L12_sharp:2.7752e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2083e+05 total_spectral:2.2625e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1250e+00 L2_l1linf:3.0312e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9062e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.8750e+00 L8_l1linf:2.7188e+00 L9_l1linf:2.6250e+00 L10_l1linf:2.6562e+00 L11_l1linf:2.6406e+00 L12_l1linf:2.4688e+00 L1_spectral:1.5990e-01 L2_spectral:1.5476e-01 L3_spectral:1.5763e-01 L4_spectral:1.5731e-01 L5_spectral:1.5480e-01 L6_spectral:1.5753e-01 L7_spectral:1.5746e-01 L8_spectral:1.5443e-01 L9_spectral:1.5718e-01 L10_spectral:1.5717e-01 L11_spectral:1.5728e-01 L12_spectral:1.5647e-01 train_time:251708ms step_avg:41.95ms +[2025-09-11 10:27:34] [Rank 0] PRINT: step:6000/10000 val_loss:4.6824 total_sharp:2.5613e-04 L1_sharp:1.2774e-04 L2_sharp:1.5089e-05 L3_sharp:2.0096e-05 L4_sharp:2.2012e-05 L5_sharp:2.3411e-05 L6_sharp:1.5936e-05 L7_sharp:2.4082e-05 L8_sharp:7.8946e-05 L9_sharp:5.0386e-05 L10_sharp:6.3715e-05 L11_sharp:8.7053e-05 L12_sharp:2.7752e-04 total_fnorm:4.5000e+01 total_l1_linf:1.2083e+05 total_spectral:2.2625e+01 L1_fnorm:1.2625e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2438e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.1250e+00 L2_l1linf:3.0312e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0625e+00 L5_l1linf:2.9062e+00 L6_l1linf:2.9062e+00 L7_l1linf:2.8750e+00 L8_l1linf:2.7188e+00 L9_l1linf:2.6250e+00 L10_l1linf:2.6562e+00 L11_l1linf:2.6406e+00 L12_l1linf:2.4688e+00 L1_spectral:1.5990e-01 L2_spectral:1.5476e-01 L3_spectral:1.5763e-01 L4_spectral:1.5731e-01 L5_spectral:1.5480e-01 L6_spectral:1.5753e-01 L7_spectral:1.5746e-01 L8_spectral:1.5443e-01 L9_spectral:1.5718e-01 L10_spectral:1.5717e-01 L11_spectral:1.5728e-01 L12_spectral:1.5647e-01 train_time:251708ms step_avg:41.95ms +[2025-09-11 10:27:36] [Rank 0] step:6001/10000 train_time:252984ms step_avg:42.16ms +[2025-09-11 10:27:36] [Rank 0] step:6001/10000 train_time:252984ms step_avg:42.16ms +[2025-09-11 10:27:36] [Rank 0] step:6021/10000 train_time:253694ms step_avg:42.13ms +[2025-09-11 10:27:36] [Rank 0] step:6021/10000 train_time:253694ms step_avg:42.13ms +[2025-09-11 10:27:37] [Rank 0] step:6041/10000 train_time:254388ms step_avg:42.11ms +[2025-09-11 10:27:37] [Rank 0] step:6041/10000 train_time:254388ms step_avg:42.11ms +[2025-09-11 10:27:38] [Rank 0] step:6061/10000 train_time:255078ms step_avg:42.09ms +[2025-09-11 10:27:38] [Rank 0] step:6061/10000 train_time:255078ms step_avg:42.09ms +[2025-09-11 10:27:39] [Rank 0] step:6081/10000 train_time:255769ms step_avg:42.06ms +[2025-09-11 10:27:39] [Rank 0] step:6081/10000 train_time:255769ms step_avg:42.06ms +[2025-09-11 10:27:39] [Rank 0] step:6101/10000 train_time:256459ms step_avg:42.04ms +[2025-09-11 10:27:39] [Rank 0] step:6101/10000 train_time:256459ms step_avg:42.04ms +[2025-09-11 10:27:40] [Rank 0] step:6121/10000 train_time:257150ms step_avg:42.01ms +[2025-09-11 10:27:40] [Rank 0] step:6121/10000 train_time:257150ms step_avg:42.01ms +[2025-09-11 10:27:41] [Rank 0] step:6141/10000 train_time:257839ms step_avg:41.99ms +[2025-09-11 10:27:41] [Rank 0] step:6141/10000 train_time:257839ms step_avg:41.99ms +[2025-09-11 10:27:41] [Rank 0] step:6161/10000 train_time:258529ms step_avg:41.96ms +[2025-09-11 10:27:41] [Rank 0] step:6161/10000 train_time:258529ms step_avg:41.96ms +[2025-09-11 10:27:42] [Rank 0] step:6181/10000 train_time:259218ms step_avg:41.94ms +[2025-09-11 10:27:42] [Rank 0] step:6181/10000 train_time:259218ms step_avg:41.94ms +[2025-09-11 10:27:43] [Rank 0] step:6201/10000 train_time:259909ms step_avg:41.91ms +[2025-09-11 10:27:43] [Rank 0] step:6201/10000 train_time:259909ms step_avg:41.91ms +[2025-09-11 10:27:43] [Rank 0] step:6221/10000 train_time:260599ms step_avg:41.89ms +[2025-09-11 10:27:43] [Rank 0] step:6221/10000 train_time:260599ms step_avg:41.89ms +[2025-09-11 10:27:44] [Rank 0] step:6241/10000 train_time:261290ms step_avg:41.87ms +[2025-09-11 10:27:44] [Rank 0] step:6241/10000 train_time:261290ms step_avg:41.87ms +[2025-09-11 10:27:45] [Rank 0] step:6261/10000 train_time:261978ms step_avg:41.84ms +[2025-09-11 10:27:45] [Rank 0] step:6261/10000 train_time:261978ms step_avg:41.84ms +[2025-09-11 10:27:45] [Rank 0] step:6281/10000 train_time:262668ms step_avg:41.82ms +[2025-09-11 10:27:45] [Rank 0] step:6281/10000 train_time:262668ms step_avg:41.82ms +[2025-09-11 10:27:46] [Rank 0] step:6301/10000 train_time:263357ms step_avg:41.80ms +[2025-09-11 10:27:46] [Rank 0] step:6301/10000 train_time:263357ms step_avg:41.80ms +[2025-09-11 10:27:47] [Rank 0] step:6321/10000 train_time:264049ms step_avg:41.77ms +[2025-09-11 10:27:47] [Rank 0] step:6321/10000 train_time:264049ms step_avg:41.77ms +[2025-09-11 10:27:47] [Rank 0] step:6341/10000 train_time:264740ms step_avg:41.75ms +[2025-09-11 10:27:47] [Rank 0] step:6341/10000 train_time:264740ms step_avg:41.75ms +[2025-09-11 10:27:48] [Rank 0] step:6361/10000 train_time:265431ms step_avg:41.73ms +[2025-09-11 10:27:48] [Rank 0] step:6361/10000 train_time:265431ms step_avg:41.73ms +[2025-09-11 10:27:49] [Rank 0] step:6381/10000 train_time:266120ms step_avg:41.71ms +[2025-09-11 10:27:49] [Rank 0] step:6381/10000 train_time:266120ms step_avg:41.71ms +[2025-09-11 10:27:50] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:27:50] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:27:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:59] [Rank 0] PRINT: step:6400/10000 val_loss:4.6524 total_sharp:2.6324e-04 L1_sharp:8.7101e-05 L2_sharp:2.2589e-05 L3_sharp:2.6360e-05 L4_sharp:1.2136e-05 L5_sharp:2.1001e-05 L6_sharp:1.6836e-05 L7_sharp:2.2687e-05 L8_sharp:5.8588e-05 L9_sharp:4.8909e-05 L10_sharp:6.1449e-05 L11_sharp:8.8809e-05 L12_sharp:3.8941e-04 total_fnorm:4.0750e+01 total_l1_linf:1.0394e+05 total_spectral:2.0250e+01 L1_fnorm:1.1438e+01 L2_fnorm:1.1062e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.0938e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1188e+01 L8_fnorm:1.0688e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1188e+01 L11_fnorm:1.1250e+01 L12_fnorm:1.0938e+01 L1_l1linf:2.8125e+00 L2_l1linf:2.6406e+00 L3_l1linf:2.6719e+00 L4_l1linf:2.6562e+00 L5_l1linf:2.5469e+00 L6_l1linf:2.5156e+00 L7_l1linf:2.5625e+00 L8_l1linf:2.3750e+00 L9_l1linf:2.3281e+00 L10_l1linf:2.3281e+00 L11_l1linf:2.2969e+00 L12_l1linf:2.2031e+00 L1_spectral:1.4791e-01 L2_spectral:1.4234e-01 L3_spectral:1.4423e-01 L4_spectral:1.4378e-01 L5_spectral:1.4184e-01 L6_spectral:1.4545e-01 L7_spectral:1.4395e-01 L8_spectral:1.4169e-01 L9_spectral:1.4621e-01 L10_spectral:1.4563e-01 L11_spectral:1.4532e-01 L12_spectral:1.4405e-01 train_time:266789ms step_avg:41.69ms +[2025-09-11 10:27:59] [Rank 0] PRINT: step:6400/10000 val_loss:4.6524 total_sharp:2.6324e-04 L1_sharp:8.7101e-05 L2_sharp:2.2589e-05 L3_sharp:2.6360e-05 L4_sharp:1.2136e-05 L5_sharp:2.1001e-05 L6_sharp:1.6836e-05 L7_sharp:2.2687e-05 L8_sharp:5.8588e-05 L9_sharp:4.8909e-05 L10_sharp:6.1449e-05 L11_sharp:8.8809e-05 L12_sharp:3.8941e-04 total_fnorm:4.0750e+01 total_l1_linf:1.0394e+05 total_spectral:2.0250e+01 L1_fnorm:1.1438e+01 L2_fnorm:1.1062e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.0938e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1188e+01 L8_fnorm:1.0688e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1188e+01 L11_fnorm:1.1250e+01 L12_fnorm:1.0938e+01 L1_l1linf:2.8125e+00 L2_l1linf:2.6406e+00 L3_l1linf:2.6719e+00 L4_l1linf:2.6562e+00 L5_l1linf:2.5469e+00 L6_l1linf:2.5156e+00 L7_l1linf:2.5625e+00 L8_l1linf:2.3750e+00 L9_l1linf:2.3281e+00 L10_l1linf:2.3281e+00 L11_l1linf:2.2969e+00 L12_l1linf:2.2031e+00 L1_spectral:1.4791e-01 L2_spectral:1.4234e-01 L3_spectral:1.4423e-01 L4_spectral:1.4378e-01 L5_spectral:1.4184e-01 L6_spectral:1.4545e-01 L7_spectral:1.4395e-01 L8_spectral:1.4169e-01 L9_spectral:1.4621e-01 L10_spectral:1.4563e-01 L11_spectral:1.4532e-01 L12_spectral:1.4405e-01 train_time:266789ms step_avg:41.69ms +[2025-09-11 10:28:01] [Rank 0] step:6401/10000 train_time:268064ms step_avg:41.88ms +[2025-09-11 10:28:01] [Rank 0] step:6401/10000 train_time:268064ms step_avg:41.88ms +[2025-09-11 10:28:02] [Rank 0] step:6421/10000 train_time:268786ms step_avg:41.86ms +[2025-09-11 10:28:02] [Rank 0] step:6421/10000 train_time:268786ms step_avg:41.86ms +[2025-09-11 10:28:02] [Rank 0] step:6441/10000 train_time:269478ms step_avg:41.84ms +[2025-09-11 10:28:02] [Rank 0] step:6441/10000 train_time:269478ms step_avg:41.84ms +[2025-09-11 10:28:03] [Rank 0] step:6461/10000 train_time:270169ms step_avg:41.82ms +[2025-09-11 10:28:03] [Rank 0] step:6461/10000 train_time:270169ms step_avg:41.82ms +[2025-09-11 10:28:04] [Rank 0] step:6481/10000 train_time:270861ms step_avg:41.79ms +[2025-09-11 10:28:04] [Rank 0] step:6481/10000 train_time:270861ms step_avg:41.79ms +[2025-09-11 10:28:04] [Rank 0] step:6501/10000 train_time:271554ms step_avg:41.77ms +[2025-09-11 10:28:04] [Rank 0] step:6501/10000 train_time:271554ms step_avg:41.77ms +[2025-09-11 10:28:05] [Rank 0] step:6521/10000 train_time:272245ms step_avg:41.75ms +[2025-09-11 10:28:05] [Rank 0] step:6521/10000 train_time:272245ms step_avg:41.75ms +[2025-09-11 10:28:06] [Rank 0] step:6541/10000 train_time:272935ms step_avg:41.73ms +[2025-09-11 10:28:06] [Rank 0] step:6541/10000 train_time:272935ms step_avg:41.73ms +[2025-09-11 10:28:06] [Rank 0] step:6561/10000 train_time:273626ms step_avg:41.70ms +[2025-09-11 10:28:06] [Rank 0] step:6561/10000 train_time:273626ms step_avg:41.70ms +[2025-09-11 10:28:07] [Rank 0] step:6581/10000 train_time:274316ms step_avg:41.68ms +[2025-09-11 10:28:07] [Rank 0] step:6581/10000 train_time:274316ms step_avg:41.68ms +[2025-09-11 10:28:08] [Rank 0] step:6601/10000 train_time:275007ms step_avg:41.66ms +[2025-09-11 10:28:08] [Rank 0] step:6601/10000 train_time:275007ms step_avg:41.66ms +[2025-09-11 10:28:08] [Rank 0] step:6621/10000 train_time:275696ms step_avg:41.64ms +[2025-09-11 10:28:08] [Rank 0] step:6621/10000 train_time:275696ms step_avg:41.64ms +[2025-09-11 10:28:09] [Rank 0] step:6641/10000 train_time:276386ms step_avg:41.62ms +[2025-09-11 10:28:09] [Rank 0] step:6641/10000 train_time:276386ms step_avg:41.62ms +[2025-09-11 10:28:10] [Rank 0] step:6661/10000 train_time:277078ms step_avg:41.60ms +[2025-09-11 10:28:10] [Rank 0] step:6661/10000 train_time:277078ms step_avg:41.60ms +[2025-09-11 10:28:10] [Rank 0] step:6681/10000 train_time:277776ms step_avg:41.58ms +[2025-09-11 10:28:10] [Rank 0] step:6681/10000 train_time:277776ms step_avg:41.58ms +[2025-09-11 10:28:11] [Rank 0] step:6701/10000 train_time:278472ms step_avg:41.56ms +[2025-09-11 10:28:11] [Rank 0] step:6701/10000 train_time:278472ms step_avg:41.56ms +[2025-09-11 10:28:12] [Rank 0] step:6721/10000 train_time:279170ms step_avg:41.54ms +[2025-09-11 10:28:12] [Rank 0] step:6721/10000 train_time:279170ms step_avg:41.54ms +[2025-09-11 10:28:13] [Rank 0] step:6741/10000 train_time:279869ms step_avg:41.52ms +[2025-09-11 10:28:13] [Rank 0] step:6741/10000 train_time:279869ms step_avg:41.52ms +[2025-09-11 10:28:13] [Rank 0] step:6761/10000 train_time:280565ms step_avg:41.50ms +[2025-09-11 10:28:13] [Rank 0] step:6761/10000 train_time:280565ms step_avg:41.50ms +[2025-09-11 10:28:14] [Rank 0] step:6781/10000 train_time:281291ms step_avg:41.48ms +[2025-09-11 10:28:14] [Rank 0] step:6781/10000 train_time:281291ms step_avg:41.48ms +[2025-09-11 10:28:15] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:28:15] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:28:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:25] [Rank 0] PRINT: step:6800/10000 val_loss:4.6074 total_sharp:3.1348e-04 L1_sharp:1.1101e-04 L2_sharp:4.2889e-05 L3_sharp:2.6771e-05 L4_sharp:1.6705e-05 L5_sharp:3.0520e-05 L6_sharp:2.0578e-05 L7_sharp:2.1083e-05 L8_sharp:5.9995e-05 L9_sharp:5.1355e-05 L10_sharp:6.5529e-05 L11_sharp:9.0215e-05 L12_sharp:9.5944e-04 total_fnorm:3.5750e+01 total_l1_linf:8.8576e+04 total_spectral:1.8000e+01 L1_fnorm:1.0188e+01 L2_fnorm:9.8125e+00 L3_fnorm:9.9375e+00 L4_fnorm:9.9375e+00 L5_fnorm:9.6875e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.8750e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.8750e+00 L12_fnorm:9.6875e+00 L1_l1linf:2.4531e+00 L2_l1linf:2.3281e+00 L3_l1linf:2.3125e+00 L4_l1linf:2.2969e+00 L5_l1linf:2.2031e+00 L6_l1linf:2.1875e+00 L7_l1linf:2.2031e+00 L8_l1linf:2.0938e+00 L9_l1linf:1.9766e+00 L10_l1linf:1.8906e+00 L11_l1linf:1.9766e+00 L12_l1linf:1.8828e+00 L1_spectral:1.3309e-01 L2_spectral:1.2895e-01 L3_spectral:1.3043e-01 L4_spectral:1.3068e-01 L5_spectral:1.2802e-01 L6_spectral:1.3112e-01 L7_spectral:1.3143e-01 L8_spectral:1.2831e-01 L9_spectral:1.3109e-01 L10_spectral:1.3137e-01 L11_spectral:1.3108e-01 L12_spectral:1.3031e-01 train_time:281969ms step_avg:41.47ms +[2025-09-11 10:28:25] [Rank 0] PRINT: step:6800/10000 val_loss:4.6074 total_sharp:3.1348e-04 L1_sharp:1.1101e-04 L2_sharp:4.2889e-05 L3_sharp:2.6771e-05 L4_sharp:1.6705e-05 L5_sharp:3.0520e-05 L6_sharp:2.0578e-05 L7_sharp:2.1083e-05 L8_sharp:5.9995e-05 L9_sharp:5.1355e-05 L10_sharp:6.5529e-05 L11_sharp:9.0215e-05 L12_sharp:9.5944e-04 total_fnorm:3.5750e+01 total_l1_linf:8.8576e+04 total_spectral:1.8000e+01 L1_fnorm:1.0188e+01 L2_fnorm:9.8125e+00 L3_fnorm:9.9375e+00 L4_fnorm:9.9375e+00 L5_fnorm:9.6875e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.8750e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.8125e+00 L11_fnorm:9.8750e+00 L12_fnorm:9.6875e+00 L1_l1linf:2.4531e+00 L2_l1linf:2.3281e+00 L3_l1linf:2.3125e+00 L4_l1linf:2.2969e+00 L5_l1linf:2.2031e+00 L6_l1linf:2.1875e+00 L7_l1linf:2.2031e+00 L8_l1linf:2.0938e+00 L9_l1linf:1.9766e+00 L10_l1linf:1.8906e+00 L11_l1linf:1.9766e+00 L12_l1linf:1.8828e+00 L1_spectral:1.3309e-01 L2_spectral:1.2895e-01 L3_spectral:1.3043e-01 L4_spectral:1.3068e-01 L5_spectral:1.2802e-01 L6_spectral:1.3112e-01 L7_spectral:1.3143e-01 L8_spectral:1.2831e-01 L9_spectral:1.3109e-01 L10_spectral:1.3137e-01 L11_spectral:1.3108e-01 L12_spectral:1.3031e-01 train_time:281969ms step_avg:41.47ms +[2025-09-11 10:28:26] [Rank 0] step:6801/10000 train_time:283227ms step_avg:41.64ms +[2025-09-11 10:28:26] [Rank 0] step:6801/10000 train_time:283227ms step_avg:41.64ms +[2025-09-11 10:28:27] [Rank 0] step:6821/10000 train_time:283955ms step_avg:41.63ms +[2025-09-11 10:28:27] [Rank 0] step:6821/10000 train_time:283955ms step_avg:41.63ms +[2025-09-11 10:28:27] [Rank 0] step:6841/10000 train_time:284656ms step_avg:41.61ms +[2025-09-11 10:28:27] [Rank 0] step:6841/10000 train_time:284656ms step_avg:41.61ms +[2025-09-11 10:28:28] [Rank 0] step:6861/10000 train_time:285355ms step_avg:41.59ms +[2025-09-11 10:28:28] [Rank 0] step:6861/10000 train_time:285355ms step_avg:41.59ms +[2025-09-11 10:28:29] [Rank 0] step:6881/10000 train_time:286053ms step_avg:41.57ms +[2025-09-11 10:28:29] [Rank 0] step:6881/10000 train_time:286053ms step_avg:41.57ms +[2025-09-11 10:28:29] [Rank 0] step:6901/10000 train_time:286749ms step_avg:41.55ms +[2025-09-11 10:28:29] [Rank 0] step:6901/10000 train_time:286749ms step_avg:41.55ms +[2025-09-11 10:28:30] [Rank 0] step:6921/10000 train_time:287446ms step_avg:41.53ms +[2025-09-11 10:28:30] [Rank 0] step:6921/10000 train_time:287446ms step_avg:41.53ms +[2025-09-11 10:28:31] [Rank 0] step:6941/10000 train_time:288145ms step_avg:41.51ms +[2025-09-11 10:28:31] [Rank 0] step:6941/10000 train_time:288145ms step_avg:41.51ms +[2025-09-11 10:28:32] [Rank 0] step:6961/10000 train_time:288843ms step_avg:41.49ms +[2025-09-11 10:28:32] [Rank 0] step:6961/10000 train_time:288843ms step_avg:41.49ms +[2025-09-11 10:28:32] [Rank 0] step:6981/10000 train_time:289542ms step_avg:41.48ms +[2025-09-11 10:28:32] [Rank 0] step:6981/10000 train_time:289542ms step_avg:41.48ms +[2025-09-11 10:28:33] [Rank 0] step:7001/10000 train_time:290240ms step_avg:41.46ms +[2025-09-11 10:28:33] [Rank 0] step:7001/10000 train_time:290240ms step_avg:41.46ms +[2025-09-11 10:28:34] [Rank 0] step:7021/10000 train_time:290938ms step_avg:41.44ms +[2025-09-11 10:28:34] [Rank 0] step:7021/10000 train_time:290938ms step_avg:41.44ms +[2025-09-11 10:28:34] [Rank 0] step:7041/10000 train_time:291636ms step_avg:41.42ms +[2025-09-11 10:28:34] [Rank 0] step:7041/10000 train_time:291636ms step_avg:41.42ms +[2025-09-11 10:28:35] [Rank 0] step:7061/10000 train_time:292335ms step_avg:41.40ms +[2025-09-11 10:28:35] [Rank 0] step:7061/10000 train_time:292335ms step_avg:41.40ms +[2025-09-11 10:28:36] [Rank 0] step:7081/10000 train_time:293033ms step_avg:41.38ms +[2025-09-11 10:28:36] [Rank 0] step:7081/10000 train_time:293033ms step_avg:41.38ms +[2025-09-11 10:28:36] [Rank 0] step:7101/10000 train_time:293730ms step_avg:41.36ms +[2025-09-11 10:28:36] [Rank 0] step:7101/10000 train_time:293730ms step_avg:41.36ms +[2025-09-11 10:28:37] [Rank 0] step:7121/10000 train_time:294429ms step_avg:41.35ms +[2025-09-11 10:28:37] [Rank 0] step:7121/10000 train_time:294429ms step_avg:41.35ms +[2025-09-11 10:28:38] [Rank 0] step:7141/10000 train_time:295126ms step_avg:41.33ms +[2025-09-11 10:28:38] [Rank 0] step:7141/10000 train_time:295126ms step_avg:41.33ms +[2025-09-11 10:28:39] [Rank 0] step:7161/10000 train_time:295825ms step_avg:41.31ms +[2025-09-11 10:28:39] [Rank 0] step:7161/10000 train_time:295825ms step_avg:41.31ms +[2025-09-11 10:28:39] [Rank 0] step:7181/10000 train_time:296521ms step_avg:41.29ms +[2025-09-11 10:28:39] [Rank 0] step:7181/10000 train_time:296521ms step_avg:41.29ms +[2025-09-11 10:28:40] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:28:40] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:28:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:50] [Rank 0] PRINT: step:7200/10000 val_loss:4.5717 total_sharp:2.3294e-04 L1_sharp:9.0178e-05 L2_sharp:3.3661e-05 L3_sharp:1.5301e-05 L4_sharp:1.6624e-05 L5_sharp:1.9861e-05 L6_sharp:2.3763e-05 L7_sharp:2.5595e-05 L8_sharp:4.7161e-05 L9_sharp:4.3164e-05 L10_sharp:5.8494e-05 L11_sharp:7.9799e-05 L12_sharp:3.2035e-04 total_fnorm:3.0875e+01 total_l1_linf:7.2192e+04 total_spectral:1.5625e+01 L1_fnorm:8.8750e+00 L2_fnorm:8.4375e+00 L3_fnorm:8.6250e+00 L4_fnorm:8.6250e+00 L5_fnorm:8.4375e+00 L6_fnorm:8.6250e+00 L7_fnorm:8.5625e+00 L8_fnorm:8.1875e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5625e+00 L12_fnorm:8.3750e+00 L1_l1linf:2.0156e+00 L2_l1linf:1.9375e+00 L3_l1linf:1.9766e+00 L4_l1linf:1.9609e+00 L5_l1linf:1.8828e+00 L6_l1linf:1.8516e+00 L7_l1linf:1.8516e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6094e+00 L10_l1linf:1.6094e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.6406e+00 L1_spectral:1.1779e-01 L2_spectral:1.1241e-01 L3_spectral:1.1536e-01 L4_spectral:1.1561e-01 L5_spectral:1.1375e-01 L6_spectral:1.1736e-01 L7_spectral:1.1629e-01 L8_spectral:1.1474e-01 L9_spectral:1.1674e-01 L10_spectral:1.1727e-01 L11_spectral:1.1736e-01 L12_spectral:1.1535e-01 train_time:297200ms step_avg:41.28ms +[2025-09-11 10:28:50] [Rank 0] PRINT: step:7200/10000 val_loss:4.5717 total_sharp:2.3294e-04 L1_sharp:9.0178e-05 L2_sharp:3.3661e-05 L3_sharp:1.5301e-05 L4_sharp:1.6624e-05 L5_sharp:1.9861e-05 L6_sharp:2.3763e-05 L7_sharp:2.5595e-05 L8_sharp:4.7161e-05 L9_sharp:4.3164e-05 L10_sharp:5.8494e-05 L11_sharp:7.9799e-05 L12_sharp:3.2035e-04 total_fnorm:3.0875e+01 total_l1_linf:7.2192e+04 total_spectral:1.5625e+01 L1_fnorm:8.8750e+00 L2_fnorm:8.4375e+00 L3_fnorm:8.6250e+00 L4_fnorm:8.6250e+00 L5_fnorm:8.4375e+00 L6_fnorm:8.6250e+00 L7_fnorm:8.5625e+00 L8_fnorm:8.1875e+00 L9_fnorm:8.5000e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5625e+00 L12_fnorm:8.3750e+00 L1_l1linf:2.0156e+00 L2_l1linf:1.9375e+00 L3_l1linf:1.9766e+00 L4_l1linf:1.9609e+00 L5_l1linf:1.8828e+00 L6_l1linf:1.8516e+00 L7_l1linf:1.8516e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6094e+00 L10_l1linf:1.6094e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.6406e+00 L1_spectral:1.1779e-01 L2_spectral:1.1241e-01 L3_spectral:1.1536e-01 L4_spectral:1.1561e-01 L5_spectral:1.1375e-01 L6_spectral:1.1736e-01 L7_spectral:1.1629e-01 L8_spectral:1.1474e-01 L9_spectral:1.1674e-01 L10_spectral:1.1727e-01 L11_spectral:1.1736e-01 L12_spectral:1.1535e-01 train_time:297200ms step_avg:41.28ms +[2025-09-11 10:28:52] [Rank 0] step:7201/10000 train_time:298518ms step_avg:41.46ms +[2025-09-11 10:28:52] [Rank 0] step:7201/10000 train_time:298518ms step_avg:41.46ms +[2025-09-11 10:28:53] [Rank 0] step:7221/10000 train_time:299239ms step_avg:41.44ms +[2025-09-11 10:28:53] [Rank 0] step:7221/10000 train_time:299239ms step_avg:41.44ms +[2025-09-11 10:28:53] [Rank 0] step:7241/10000 train_time:299938ms step_avg:41.42ms +[2025-09-11 10:28:53] [Rank 0] step:7241/10000 train_time:299938ms step_avg:41.42ms +[2025-09-11 10:28:54] [Rank 0] step:7261/10000 train_time:300638ms step_avg:41.40ms +[2025-09-11 10:28:54] [Rank 0] step:7261/10000 train_time:300638ms step_avg:41.40ms +[2025-09-11 10:28:55] [Rank 0] step:7281/10000 train_time:301341ms step_avg:41.39ms +[2025-09-11 10:28:55] [Rank 0] step:7281/10000 train_time:301341ms step_avg:41.39ms +[2025-09-11 10:28:55] [Rank 0] step:7301/10000 train_time:302037ms step_avg:41.37ms +[2025-09-11 10:28:55] [Rank 0] step:7301/10000 train_time:302037ms step_avg:41.37ms +[2025-09-11 10:28:56] [Rank 0] step:7321/10000 train_time:302735ms step_avg:41.35ms +[2025-09-11 10:28:56] [Rank 0] step:7321/10000 train_time:302735ms step_avg:41.35ms +[2025-09-11 10:28:57] [Rank 0] step:7341/10000 train_time:303434ms step_avg:41.33ms +[2025-09-11 10:28:57] [Rank 0] step:7341/10000 train_time:303434ms step_avg:41.33ms +[2025-09-11 10:28:57] [Rank 0] step:7361/10000 train_time:304131ms step_avg:41.32ms +[2025-09-11 10:28:57] [Rank 0] step:7361/10000 train_time:304131ms step_avg:41.32ms +[2025-09-11 10:28:58] [Rank 0] step:7381/10000 train_time:304830ms step_avg:41.30ms +[2025-09-11 10:28:58] [Rank 0] step:7381/10000 train_time:304830ms step_avg:41.30ms +[2025-09-11 10:28:59] [Rank 0] step:7401/10000 train_time:305527ms step_avg:41.28ms +[2025-09-11 10:28:59] [Rank 0] step:7401/10000 train_time:305527ms step_avg:41.28ms +[2025-09-11 10:29:00] [Rank 0] step:7421/10000 train_time:306224ms step_avg:41.26ms +[2025-09-11 10:29:00] [Rank 0] step:7421/10000 train_time:306224ms step_avg:41.26ms +[2025-09-11 10:29:00] [Rank 0] step:7441/10000 train_time:306924ms step_avg:41.25ms +[2025-09-11 10:29:00] [Rank 0] step:7441/10000 train_time:306924ms step_avg:41.25ms +[2025-09-11 10:29:01] [Rank 0] step:7461/10000 train_time:307622ms step_avg:41.23ms +[2025-09-11 10:29:01] [Rank 0] step:7461/10000 train_time:307622ms step_avg:41.23ms +[2025-09-11 10:29:02] [Rank 0] step:7481/10000 train_time:308323ms step_avg:41.21ms +[2025-09-11 10:29:02] [Rank 0] step:7481/10000 train_time:308323ms step_avg:41.21ms +[2025-09-11 10:29:02] [Rank 0] step:7501/10000 train_time:309023ms step_avg:41.20ms +[2025-09-11 10:29:02] [Rank 0] step:7501/10000 train_time:309023ms step_avg:41.20ms +[2025-09-11 10:29:03] [Rank 0] step:7521/10000 train_time:309723ms step_avg:41.18ms +[2025-09-11 10:29:03] [Rank 0] step:7521/10000 train_time:309723ms step_avg:41.18ms +[2025-09-11 10:29:04] [Rank 0] step:7541/10000 train_time:310420ms step_avg:41.16ms +[2025-09-11 10:29:04] [Rank 0] step:7541/10000 train_time:310420ms step_avg:41.16ms +[2025-09-11 10:29:04] [Rank 0] step:7561/10000 train_time:311120ms step_avg:41.15ms +[2025-09-11 10:29:04] [Rank 0] step:7561/10000 train_time:311120ms step_avg:41.15ms +[2025-09-11 10:29:05] [Rank 0] step:7581/10000 train_time:311819ms step_avg:41.13ms +[2025-09-11 10:29:05] [Rank 0] step:7581/10000 train_time:311819ms step_avg:41.13ms +[2025-09-11 10:29:06] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:29:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:16] [Rank 0] PRINT: step:7600/10000 val_loss:4.5386 total_sharp:2.3280e-04 L1_sharp:1.1474e-04 L2_sharp:3.9195e-05 L3_sharp:2.8062e-05 L4_sharp:3.9963e-05 L5_sharp:2.4363e-05 L6_sharp:9.8552e-06 L7_sharp:2.2585e-05 L8_sharp:6.1518e-05 L9_sharp:3.7519e-05 L10_sharp:6.0362e-05 L11_sharp:7.9625e-05 L12_sharp:3.0840e-04 total_fnorm:2.6250e+01 total_l1_linf:5.6832e+04 total_spectral:1.3062e+01 L1_fnorm:7.5312e+00 L2_fnorm:7.1875e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.2812e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.2500e+00 L7_fnorm:7.2500e+00 L8_fnorm:6.8438e+00 L9_fnorm:7.1875e+00 L10_fnorm:7.1875e+00 L11_fnorm:7.2188e+00 L12_fnorm:7.0938e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.6172e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5234e+00 L7_l1linf:1.4766e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3281e+00 L10_l1linf:1.3203e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.3750e+00 L1_spectral:1.0200e-01 L2_spectral:9.7880e-02 L3_spectral:9.9511e-02 L4_spectral:9.9489e-02 L5_spectral:9.5904e-02 L6_spectral:1.0078e-01 L7_spectral:1.0004e-01 L8_spectral:9.8859e-02 L9_spectral:1.0138e-01 L10_spectral:1.0144e-01 L11_spectral:1.0132e-01 L12_spectral:1.0051e-01 train_time:312500ms step_avg:41.12ms +[2025-09-11 10:29:16] [Rank 0] PRINT: step:7600/10000 val_loss:4.5386 total_sharp:2.3280e-04 L1_sharp:1.1474e-04 L2_sharp:3.9195e-05 L3_sharp:2.8062e-05 L4_sharp:3.9963e-05 L5_sharp:2.4363e-05 L6_sharp:9.8552e-06 L7_sharp:2.2585e-05 L8_sharp:6.1518e-05 L9_sharp:3.7519e-05 L10_sharp:6.0362e-05 L11_sharp:7.9625e-05 L12_sharp:3.0840e-04 total_fnorm:2.6250e+01 total_l1_linf:5.6832e+04 total_spectral:1.3062e+01 L1_fnorm:7.5312e+00 L2_fnorm:7.1875e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.2812e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.2500e+00 L7_fnorm:7.2500e+00 L8_fnorm:6.8438e+00 L9_fnorm:7.1875e+00 L10_fnorm:7.1875e+00 L11_fnorm:7.2188e+00 L12_fnorm:7.0938e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.6172e+00 L3_l1linf:1.5703e+00 L4_l1linf:1.5938e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5234e+00 L7_l1linf:1.4766e+00 L8_l1linf:1.3906e+00 L9_l1linf:1.3281e+00 L10_l1linf:1.3203e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.3750e+00 L1_spectral:1.0200e-01 L2_spectral:9.7880e-02 L3_spectral:9.9511e-02 L4_spectral:9.9489e-02 L5_spectral:9.5904e-02 L6_spectral:1.0078e-01 L7_spectral:1.0004e-01 L8_spectral:9.8859e-02 L9_spectral:1.0138e-01 L10_spectral:1.0144e-01 L11_spectral:1.0132e-01 L12_spectral:1.0051e-01 train_time:312500ms step_avg:41.12ms +[2025-09-11 10:29:17] [Rank 0] step:7601/10000 train_time:313793ms step_avg:41.28ms +[2025-09-11 10:29:17] [Rank 0] step:7601/10000 train_time:313793ms step_avg:41.28ms +[2025-09-11 10:29:18] [Rank 0] step:7621/10000 train_time:314521ms step_avg:41.27ms +[2025-09-11 10:29:18] [Rank 0] step:7621/10000 train_time:314521ms step_avg:41.27ms +[2025-09-11 10:29:18] [Rank 0] step:7641/10000 train_time:315222ms step_avg:41.25ms +[2025-09-11 10:29:18] [Rank 0] step:7641/10000 train_time:315222ms step_avg:41.25ms +[2025-09-11 10:29:19] [Rank 0] step:7661/10000 train_time:315921ms step_avg:41.24ms +[2025-09-11 10:29:19] [Rank 0] step:7661/10000 train_time:315921ms step_avg:41.24ms +[2025-09-11 10:29:20] [Rank 0] step:7681/10000 train_time:316620ms step_avg:41.22ms +[2025-09-11 10:29:20] [Rank 0] step:7681/10000 train_time:316620ms step_avg:41.22ms +[2025-09-11 10:29:21] [Rank 0] step:7701/10000 train_time:317321ms step_avg:41.21ms +[2025-09-11 10:29:21] [Rank 0] step:7701/10000 train_time:317321ms step_avg:41.21ms +[2025-09-11 10:29:21] [Rank 0] step:7721/10000 train_time:318020ms step_avg:41.19ms +[2025-09-11 10:29:21] [Rank 0] step:7721/10000 train_time:318020ms step_avg:41.19ms +[2025-09-11 10:29:22] [Rank 0] step:7741/10000 train_time:318720ms step_avg:41.17ms +[2025-09-11 10:29:22] [Rank 0] step:7741/10000 train_time:318720ms step_avg:41.17ms +[2025-09-11 10:29:23] [Rank 0] step:7761/10000 train_time:319963ms step_avg:41.23ms +[2025-09-11 10:29:23] [Rank 0] step:7761/10000 train_time:319963ms step_avg:41.23ms +[2025-09-11 10:29:24] [Rank 0] step:7781/10000 train_time:320665ms step_avg:41.21ms +[2025-09-11 10:29:24] [Rank 0] step:7781/10000 train_time:320665ms step_avg:41.21ms +[2025-09-11 10:29:25] [Rank 0] step:7801/10000 train_time:321363ms step_avg:41.20ms +[2025-09-11 10:29:25] [Rank 0] step:7801/10000 train_time:321363ms step_avg:41.20ms +[2025-09-11 10:29:26] [Rank 0] step:7821/10000 train_time:322353ms step_avg:41.22ms +[2025-09-11 10:29:26] [Rank 0] step:7821/10000 train_time:322353ms step_avg:41.22ms +[2025-09-11 10:29:26] [Rank 0] step:7841/10000 train_time:323054ms step_avg:41.20ms +[2025-09-11 10:29:26] [Rank 0] step:7841/10000 train_time:323054ms step_avg:41.20ms +[2025-09-11 10:29:27] [Rank 0] step:7861/10000 train_time:323755ms step_avg:41.19ms +[2025-09-11 10:29:27] [Rank 0] step:7861/10000 train_time:323755ms step_avg:41.19ms +[2025-09-11 10:29:28] [Rank 0] step:7881/10000 train_time:324455ms step_avg:41.17ms +[2025-09-11 10:29:28] [Rank 0] step:7881/10000 train_time:324455ms step_avg:41.17ms +[2025-09-11 10:29:28] [Rank 0] step:7901/10000 train_time:325156ms step_avg:41.15ms +[2025-09-11 10:29:28] [Rank 0] step:7901/10000 train_time:325156ms step_avg:41.15ms +[2025-09-11 10:29:29] [Rank 0] step:7921/10000 train_time:325856ms step_avg:41.14ms +[2025-09-11 10:29:29] [Rank 0] step:7921/10000 train_time:325856ms step_avg:41.14ms +[2025-09-11 10:29:30] [Rank 0] step:7941/10000 train_time:326556ms step_avg:41.12ms +[2025-09-11 10:29:30] [Rank 0] step:7941/10000 train_time:326556ms step_avg:41.12ms +[2025-09-11 10:29:30] [Rank 0] step:7961/10000 train_time:327254ms step_avg:41.11ms +[2025-09-11 10:29:30] [Rank 0] step:7961/10000 train_time:327254ms step_avg:41.11ms +[2025-09-11 10:29:31] [Rank 0] step:7981/10000 train_time:327957ms step_avg:41.09ms +[2025-09-11 10:29:31] [Rank 0] step:7981/10000 train_time:327957ms step_avg:41.09ms +[2025-09-11 10:29:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:29:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:29:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:29:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:44] [Rank 0] PRINT: step:8000/10000 val_loss:4.5180 total_sharp:2.1161e-04 L1_sharp:1.1438e-04 L2_sharp:2.2751e-05 L3_sharp:1.5738e-05 L4_sharp:3.5215e-05 L5_sharp:2.1736e-05 L6_sharp:1.6596e-05 L7_sharp:2.3183e-05 L8_sharp:5.2121e-05 L9_sharp:3.5875e-05 L10_sharp:4.9362e-05 L11_sharp:8.2914e-05 L12_sharp:2.9516e-04 total_fnorm:2.1250e+01 total_l1_linf:4.3264e+04 total_spectral:1.0688e+01 L1_fnorm:6.1875e+00 L2_fnorm:5.8438e+00 L3_fnorm:5.9375e+00 L4_fnorm:5.9375e+00 L5_fnorm:5.7812e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.8750e+00 L8_fnorm:5.5625e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8125e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.7500e+00 L1_l1linf:1.2656e+00 L2_l1linf:1.2500e+00 L3_l1linf:1.2188e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.2031e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1641e+00 L8_l1linf:1.0938e+00 L9_l1linf:1.0156e+00 L10_l1linf:9.8438e-01 L11_l1linf:1.0234e+00 L12_l1linf:1.1719e+00 L1_spectral:8.6101e-02 L2_spectral:8.0734e-02 L3_spectral:8.2197e-02 L4_spectral:8.2638e-02 L5_spectral:8.1189e-02 L6_spectral:8.3619e-02 L7_spectral:8.3684e-02 L8_spectral:8.2737e-02 L9_spectral:8.3893e-02 L10_spectral:8.4206e-02 L11_spectral:8.4058e-02 L12_spectral:8.3322e-02 train_time:328636ms step_avg:41.08ms +[2025-09-11 10:29:44] [Rank 0] PRINT: step:8000/10000 val_loss:4.5180 total_sharp:2.1161e-04 L1_sharp:1.1438e-04 L2_sharp:2.2751e-05 L3_sharp:1.5738e-05 L4_sharp:3.5215e-05 L5_sharp:2.1736e-05 L6_sharp:1.6596e-05 L7_sharp:2.3183e-05 L8_sharp:5.2121e-05 L9_sharp:3.5875e-05 L10_sharp:4.9362e-05 L11_sharp:8.2914e-05 L12_sharp:2.9516e-04 total_fnorm:2.1250e+01 total_l1_linf:4.3264e+04 total_spectral:1.0688e+01 L1_fnorm:6.1875e+00 L2_fnorm:5.8438e+00 L3_fnorm:5.9375e+00 L4_fnorm:5.9375e+00 L5_fnorm:5.7812e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.8750e+00 L8_fnorm:5.5625e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.8125e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.7500e+00 L1_l1linf:1.2656e+00 L2_l1linf:1.2500e+00 L3_l1linf:1.2188e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.2031e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1641e+00 L8_l1linf:1.0938e+00 L9_l1linf:1.0156e+00 L10_l1linf:9.8438e-01 L11_l1linf:1.0234e+00 L12_l1linf:1.1719e+00 L1_spectral:8.6101e-02 L2_spectral:8.0734e-02 L3_spectral:8.2197e-02 L4_spectral:8.2638e-02 L5_spectral:8.1189e-02 L6_spectral:8.3619e-02 L7_spectral:8.3684e-02 L8_spectral:8.2737e-02 L9_spectral:8.3893e-02 L10_spectral:8.4206e-02 L11_spectral:8.4058e-02 L12_spectral:8.3322e-02 train_time:328636ms step_avg:41.08ms +[2025-09-11 10:29:45] [Rank 0] step:8001/10000 train_time:329936ms step_avg:41.24ms +[2025-09-11 10:29:45] [Rank 0] step:8001/10000 train_time:329936ms step_avg:41.24ms +[2025-09-11 10:29:46] [Rank 0] step:8021/10000 train_time:330671ms step_avg:41.23ms +[2025-09-11 10:29:46] [Rank 0] step:8021/10000 train_time:330671ms step_avg:41.23ms +[2025-09-11 10:29:47] [Rank 0] step:8041/10000 train_time:331373ms step_avg:41.21ms +[2025-09-11 10:29:47] [Rank 0] step:8041/10000 train_time:331373ms step_avg:41.21ms +[2025-09-11 10:29:47] [Rank 0] step:8061/10000 train_time:332076ms step_avg:41.20ms +[2025-09-11 10:29:47] [Rank 0] step:8061/10000 train_time:332076ms step_avg:41.20ms +[2025-09-11 10:29:48] [Rank 0] step:8081/10000 train_time:332775ms step_avg:41.18ms +[2025-09-11 10:29:48] [Rank 0] step:8081/10000 train_time:332775ms step_avg:41.18ms +[2025-09-11 10:29:49] [Rank 0] step:8101/10000 train_time:333473ms step_avg:41.16ms +[2025-09-11 10:29:49] [Rank 0] step:8101/10000 train_time:333473ms step_avg:41.16ms +[2025-09-11 10:29:50] [Rank 0] step:8121/10000 train_time:334177ms step_avg:41.15ms +[2025-09-11 10:29:50] [Rank 0] step:8121/10000 train_time:334177ms step_avg:41.15ms +[2025-09-11 10:29:51] [Rank 0] step:8141/10000 train_time:335618ms step_avg:41.23ms +[2025-09-11 10:29:51] [Rank 0] step:8141/10000 train_time:335618ms step_avg:41.23ms +[2025-09-11 10:29:52] [Rank 0] step:8161/10000 train_time:336321ms step_avg:41.21ms +[2025-09-11 10:29:52] [Rank 0] step:8161/10000 train_time:336321ms step_avg:41.21ms +[2025-09-11 10:29:52] [Rank 0] step:8181/10000 train_time:337033ms step_avg:41.20ms +[2025-09-11 10:29:52] [Rank 0] step:8181/10000 train_time:337033ms step_avg:41.20ms +[2025-09-11 10:29:53] [Rank 0] step:8201/10000 train_time:337743ms step_avg:41.18ms +[2025-09-11 10:29:53] [Rank 0] step:8201/10000 train_time:337743ms step_avg:41.18ms +[2025-09-11 10:29:54] [Rank 0] step:8221/10000 train_time:338449ms step_avg:41.17ms +[2025-09-11 10:29:54] [Rank 0] step:8221/10000 train_time:338449ms step_avg:41.17ms +[2025-09-11 10:29:55] [Rank 0] step:8241/10000 train_time:339165ms step_avg:41.16ms +[2025-09-11 10:29:55] [Rank 0] step:8241/10000 train_time:339165ms step_avg:41.16ms +[2025-09-11 10:29:55] [Rank 0] step:8261/10000 train_time:339872ms step_avg:41.14ms +[2025-09-11 10:29:55] [Rank 0] step:8261/10000 train_time:339872ms step_avg:41.14ms +[2025-09-11 10:29:56] [Rank 0] step:8281/10000 train_time:340576ms step_avg:41.13ms +[2025-09-11 10:29:56] [Rank 0] step:8281/10000 train_time:340576ms step_avg:41.13ms +[2025-09-11 10:29:57] [Rank 0] step:8301/10000 train_time:341282ms step_avg:41.11ms +[2025-09-11 10:29:57] [Rank 0] step:8301/10000 train_time:341282ms step_avg:41.11ms +[2025-09-11 10:29:57] [Rank 0] step:8321/10000 train_time:341989ms step_avg:41.10ms +[2025-09-11 10:29:57] [Rank 0] step:8321/10000 train_time:341989ms step_avg:41.10ms +[2025-09-11 10:29:58] [Rank 0] step:8341/10000 train_time:342703ms step_avg:41.09ms +[2025-09-11 10:29:58] [Rank 0] step:8341/10000 train_time:342703ms step_avg:41.09ms +[2025-09-11 10:29:59] [Rank 0] step:8361/10000 train_time:343406ms step_avg:41.07ms +[2025-09-11 10:29:59] [Rank 0] step:8361/10000 train_time:343406ms step_avg:41.07ms +[2025-09-11 10:30:00] [Rank 0] step:8381/10000 train_time:344115ms step_avg:41.06ms +[2025-09-11 10:30:00] [Rank 0] step:8381/10000 train_time:344115ms step_avg:41.06ms +[2025-09-11 10:30:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:30:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:30:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:30:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:10] [Rank 0] PRINT: step:8400/10000 val_loss:4.4927 total_sharp:1.7509e-04 L1_sharp:7.2505e-05 L2_sharp:1.7982e-05 L3_sharp:1.5430e-05 L4_sharp:1.5532e-05 L5_sharp:1.7507e-05 L6_sharp:9.9093e-06 L7_sharp:1.5678e-05 L8_sharp:4.5735e-05 L9_sharp:3.6963e-05 L10_sharp:4.5988e-05 L11_sharp:7.0567e-05 L12_sharp:2.8752e-04 total_fnorm:1.6500e+01 total_l1_linf:3.0464e+04 total_spectral:8.2500e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.5938e+00 L3_fnorm:4.6250e+00 L4_fnorm:4.6250e+00 L5_fnorm:4.4688e+00 L6_fnorm:4.5938e+00 L7_fnorm:4.5938e+00 L8_fnorm:4.3438e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5625e+00 L12_fnorm:4.5312e+00 L1_l1linf:9.3359e-01 L2_l1linf:9.2969e-01 L3_l1linf:9.3359e-01 L4_l1linf:9.2188e-01 L5_l1linf:8.7891e-01 L6_l1linf:8.5547e-01 L7_l1linf:8.5547e-01 L8_l1linf:8.0469e-01 L9_l1linf:7.5391e-01 L10_l1linf:7.2266e-01 L11_l1linf:7.5781e-01 L12_l1linf:8.5547e-01 L1_spectral:6.9306e-02 L2_spectral:6.4290e-02 L3_spectral:6.5492e-02 L4_spectral:6.5156e-02 L5_spectral:6.4769e-02 L6_spectral:6.6872e-02 L7_spectral:6.6362e-02 L8_spectral:6.6641e-02 L9_spectral:6.6989e-02 L10_spectral:6.6494e-02 L11_spectral:6.7225e-02 L12_spectral:6.7638e-02 train_time:344804ms step_avg:41.05ms +[2025-09-11 10:30:10] [Rank 0] PRINT: step:8400/10000 val_loss:4.4927 total_sharp:1.7509e-04 L1_sharp:7.2505e-05 L2_sharp:1.7982e-05 L3_sharp:1.5430e-05 L4_sharp:1.5532e-05 L5_sharp:1.7507e-05 L6_sharp:9.9093e-06 L7_sharp:1.5678e-05 L8_sharp:4.5735e-05 L9_sharp:3.6963e-05 L10_sharp:4.5988e-05 L11_sharp:7.0567e-05 L12_sharp:2.8752e-04 total_fnorm:1.6500e+01 total_l1_linf:3.0464e+04 total_spectral:8.2500e+00 L1_fnorm:4.8750e+00 L2_fnorm:4.5938e+00 L3_fnorm:4.6250e+00 L4_fnorm:4.6250e+00 L5_fnorm:4.4688e+00 L6_fnorm:4.5938e+00 L7_fnorm:4.5938e+00 L8_fnorm:4.3438e+00 L9_fnorm:4.5000e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5625e+00 L12_fnorm:4.5312e+00 L1_l1linf:9.3359e-01 L2_l1linf:9.2969e-01 L3_l1linf:9.3359e-01 L4_l1linf:9.2188e-01 L5_l1linf:8.7891e-01 L6_l1linf:8.5547e-01 L7_l1linf:8.5547e-01 L8_l1linf:8.0469e-01 L9_l1linf:7.5391e-01 L10_l1linf:7.2266e-01 L11_l1linf:7.5781e-01 L12_l1linf:8.5547e-01 L1_spectral:6.9306e-02 L2_spectral:6.4290e-02 L3_spectral:6.5492e-02 L4_spectral:6.5156e-02 L5_spectral:6.4769e-02 L6_spectral:6.6872e-02 L7_spectral:6.6362e-02 L8_spectral:6.6641e-02 L9_spectral:6.6989e-02 L10_spectral:6.6494e-02 L11_spectral:6.7225e-02 L12_spectral:6.7638e-02 train_time:344804ms step_avg:41.05ms +[2025-09-11 10:30:11] [Rank 0] step:8401/10000 train_time:346108ms step_avg:41.20ms +[2025-09-11 10:30:11] [Rank 0] step:8401/10000 train_time:346108ms step_avg:41.20ms +[2025-09-11 10:30:12] [Rank 0] step:8421/10000 train_time:346843ms step_avg:41.19ms +[2025-09-11 10:30:12] [Rank 0] step:8421/10000 train_time:346843ms step_avg:41.19ms +[2025-09-11 10:30:13] [Rank 0] step:8441/10000 train_time:347554ms step_avg:41.17ms +[2025-09-11 10:30:13] [Rank 0] step:8441/10000 train_time:347554ms step_avg:41.17ms +[2025-09-11 10:30:13] [Rank 0] step:8461/10000 train_time:348262ms step_avg:41.16ms +[2025-09-11 10:30:13] [Rank 0] step:8461/10000 train_time:348262ms step_avg:41.16ms +[2025-09-11 10:30:14] [Rank 0] step:8481/10000 train_time:348973ms step_avg:41.15ms +[2025-09-11 10:30:14] [Rank 0] step:8481/10000 train_time:348973ms step_avg:41.15ms +[2025-09-11 10:30:15] [Rank 0] step:8501/10000 train_time:349681ms step_avg:41.13ms +[2025-09-11 10:30:15] [Rank 0] step:8501/10000 train_time:349681ms step_avg:41.13ms +[2025-09-11 10:30:16] [Rank 0] step:8521/10000 train_time:350388ms step_avg:41.12ms +[2025-09-11 10:30:16] [Rank 0] step:8521/10000 train_time:350388ms step_avg:41.12ms +[2025-09-11 10:30:16] [Rank 0] step:8541/10000 train_time:351096ms step_avg:41.11ms +[2025-09-11 10:30:16] [Rank 0] step:8541/10000 train_time:351096ms step_avg:41.11ms +[2025-09-11 10:30:17] [Rank 0] step:8561/10000 train_time:351809ms step_avg:41.09ms +[2025-09-11 10:30:17] [Rank 0] step:8561/10000 train_time:351809ms step_avg:41.09ms +[2025-09-11 10:30:18] [Rank 0] step:8581/10000 train_time:352519ms step_avg:41.08ms +[2025-09-11 10:30:18] [Rank 0] step:8581/10000 train_time:352519ms step_avg:41.08ms +[2025-09-11 10:30:18] [Rank 0] step:8601/10000 train_time:353228ms step_avg:41.07ms +[2025-09-11 10:30:18] [Rank 0] step:8601/10000 train_time:353228ms step_avg:41.07ms +[2025-09-11 10:30:19] [Rank 0] step:8621/10000 train_time:353935ms step_avg:41.05ms +[2025-09-11 10:30:19] [Rank 0] step:8621/10000 train_time:353935ms step_avg:41.05ms +[2025-09-11 10:30:20] [Rank 0] step:8641/10000 train_time:354642ms step_avg:41.04ms +[2025-09-11 10:30:20] [Rank 0] step:8641/10000 train_time:354642ms step_avg:41.04ms +[2025-09-11 10:30:21] [Rank 0] step:8661/10000 train_time:355351ms step_avg:41.03ms +[2025-09-11 10:30:21] [Rank 0] step:8661/10000 train_time:355351ms step_avg:41.03ms +[2025-09-11 10:30:21] [Rank 0] step:8681/10000 train_time:356061ms step_avg:41.02ms +[2025-09-11 10:30:21] [Rank 0] step:8681/10000 train_time:356061ms step_avg:41.02ms +[2025-09-11 10:30:22] [Rank 0] step:8701/10000 train_time:356767ms step_avg:41.00ms +[2025-09-11 10:30:22] [Rank 0] step:8701/10000 train_time:356767ms step_avg:41.00ms +[2025-09-11 10:30:23] [Rank 0] step:8721/10000 train_time:357478ms step_avg:40.99ms +[2025-09-11 10:30:23] [Rank 0] step:8721/10000 train_time:357478ms step_avg:40.99ms +[2025-09-11 10:30:23] [Rank 0] step:8741/10000 train_time:358183ms step_avg:40.98ms +[2025-09-11 10:30:23] [Rank 0] step:8741/10000 train_time:358183ms step_avg:40.98ms +[2025-09-11 10:30:24] [Rank 0] step:8761/10000 train_time:358894ms step_avg:40.96ms +[2025-09-11 10:30:24] [Rank 0] step:8761/10000 train_time:358894ms step_avg:40.96ms +[2025-09-11 10:30:25] [Rank 0] step:8781/10000 train_time:359599ms step_avg:40.95ms +[2025-09-11 10:30:25] [Rank 0] step:8781/10000 train_time:359599ms step_avg:40.95ms +[2025-09-11 10:30:26] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:30:26] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:30:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:36] [Rank 0] PRINT: step:8800/10000 val_loss:4.4777 total_sharp:1.8937e-04 L1_sharp:6.8513e-05 L2_sharp:2.5695e-05 L3_sharp:1.4598e-05 L4_sharp:1.4162e-05 L5_sharp:1.8537e-05 L6_sharp:1.4627e-05 L7_sharp:9.9923e-06 L8_sharp:4.1584e-05 L9_sharp:2.8663e-05 L10_sharp:4.0779e-05 L11_sharp:7.4435e-05 L12_sharp:4.0807e-04 total_fnorm:1.2000e+01 total_l1_linf:1.9968e+04 total_spectral:6.0000e+00 L1_fnorm:3.5781e+00 L2_fnorm:3.3594e+00 L3_fnorm:3.3750e+00 L4_fnorm:3.3594e+00 L5_fnorm:3.2812e+00 L6_fnorm:3.3281e+00 L7_fnorm:3.3281e+00 L8_fnorm:3.1719e+00 L9_fnorm:3.2656e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.3125e+00 L12_fnorm:3.3125e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2500e-01 L6_l1linf:5.7422e-01 L7_l1linf:5.8984e-01 L8_l1linf:5.3906e-01 L9_l1linf:4.9805e-01 L10_l1linf:4.9219e-01 L11_l1linf:5.1172e-01 L12_l1linf:6.1719e-01 L1_spectral:5.1761e-02 L2_spectral:4.8072e-02 L3_spectral:4.8527e-02 L4_spectral:4.8584e-02 L5_spectral:4.8953e-02 L6_spectral:4.9721e-02 L7_spectral:4.9651e-02 L8_spectral:5.0294e-02 L9_spectral:4.9897e-02 L10_spectral:4.9836e-02 L11_spectral:4.9416e-02 L12_spectral:5.2242e-02 train_time:360428ms step_avg:40.96ms +[2025-09-11 10:30:36] [Rank 0] PRINT: step:8800/10000 val_loss:4.4777 total_sharp:1.8937e-04 L1_sharp:6.8513e-05 L2_sharp:2.5695e-05 L3_sharp:1.4598e-05 L4_sharp:1.4162e-05 L5_sharp:1.8537e-05 L6_sharp:1.4627e-05 L7_sharp:9.9923e-06 L8_sharp:4.1584e-05 L9_sharp:2.8663e-05 L10_sharp:4.0779e-05 L11_sharp:7.4435e-05 L12_sharp:4.0807e-04 total_fnorm:1.2000e+01 total_l1_linf:1.9968e+04 total_spectral:6.0000e+00 L1_fnorm:3.5781e+00 L2_fnorm:3.3594e+00 L3_fnorm:3.3750e+00 L4_fnorm:3.3594e+00 L5_fnorm:3.2812e+00 L6_fnorm:3.3281e+00 L7_fnorm:3.3281e+00 L8_fnorm:3.1719e+00 L9_fnorm:3.2656e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.3125e+00 L12_fnorm:3.3125e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2500e-01 L6_l1linf:5.7422e-01 L7_l1linf:5.8984e-01 L8_l1linf:5.3906e-01 L9_l1linf:4.9805e-01 L10_l1linf:4.9219e-01 L11_l1linf:5.1172e-01 L12_l1linf:6.1719e-01 L1_spectral:5.1761e-02 L2_spectral:4.8072e-02 L3_spectral:4.8527e-02 L4_spectral:4.8584e-02 L5_spectral:4.8953e-02 L6_spectral:4.9721e-02 L7_spectral:4.9651e-02 L8_spectral:5.0294e-02 L9_spectral:4.9897e-02 L10_spectral:4.9836e-02 L11_spectral:4.9416e-02 L12_spectral:5.2242e-02 train_time:360428ms step_avg:40.96ms +[2025-09-11 10:30:37] [Rank 0] step:8801/10000 train_time:361726ms step_avg:41.10ms +[2025-09-11 10:30:37] [Rank 0] step:8801/10000 train_time:361726ms step_avg:41.10ms +[2025-09-11 10:30:38] [Rank 0] step:8821/10000 train_time:362527ms step_avg:41.10ms +[2025-09-11 10:30:38] [Rank 0] step:8821/10000 train_time:362527ms step_avg:41.10ms +[2025-09-11 10:30:38] [Rank 0] step:8841/10000 train_time:363236ms step_avg:41.09ms +[2025-09-11 10:30:38] [Rank 0] step:8841/10000 train_time:363236ms step_avg:41.09ms +[2025-09-11 10:30:39] [Rank 0] step:8861/10000 train_time:363944ms step_avg:41.07ms +[2025-09-11 10:30:39] [Rank 0] step:8861/10000 train_time:363944ms step_avg:41.07ms +[2025-09-11 10:30:40] [Rank 0] step:8881/10000 train_time:364653ms step_avg:41.06ms +[2025-09-11 10:30:40] [Rank 0] step:8881/10000 train_time:364653ms step_avg:41.06ms +[2025-09-11 10:30:41] [Rank 0] step:8901/10000 train_time:365363ms step_avg:41.05ms +[2025-09-11 10:30:41] [Rank 0] step:8901/10000 train_time:365363ms step_avg:41.05ms +[2025-09-11 10:30:41] [Rank 0] step:8921/10000 train_time:366067ms step_avg:41.03ms +[2025-09-11 10:30:41] [Rank 0] step:8921/10000 train_time:366067ms step_avg:41.03ms +[2025-09-11 10:30:42] [Rank 0] step:8941/10000 train_time:366778ms step_avg:41.02ms +[2025-09-11 10:30:42] [Rank 0] step:8941/10000 train_time:366778ms step_avg:41.02ms +[2025-09-11 10:30:43] [Rank 0] step:8961/10000 train_time:367494ms step_avg:41.01ms +[2025-09-11 10:30:43] [Rank 0] step:8961/10000 train_time:367494ms step_avg:41.01ms +[2025-09-11 10:30:43] [Rank 0] step:8981/10000 train_time:368206ms step_avg:41.00ms +[2025-09-11 10:30:43] [Rank 0] step:8981/10000 train_time:368206ms step_avg:41.00ms +[2025-09-11 10:30:44] [Rank 0] step:9001/10000 train_time:368910ms step_avg:40.99ms +[2025-09-11 10:30:44] [Rank 0] step:9001/10000 train_time:368910ms step_avg:40.99ms +[2025-09-11 10:30:45] [Rank 0] step:9021/10000 train_time:369620ms step_avg:40.97ms +[2025-09-11 10:30:45] [Rank 0] step:9021/10000 train_time:369620ms step_avg:40.97ms +[2025-09-11 10:30:46] [Rank 0] step:9041/10000 train_time:370330ms step_avg:40.96ms +[2025-09-11 10:30:46] [Rank 0] step:9041/10000 train_time:370330ms step_avg:40.96ms +[2025-09-11 10:30:46] [Rank 0] step:9061/10000 train_time:371037ms step_avg:40.95ms +[2025-09-11 10:30:46] [Rank 0] step:9061/10000 train_time:371037ms step_avg:40.95ms +[2025-09-11 10:30:47] [Rank 0] step:9081/10000 train_time:371747ms step_avg:40.94ms +[2025-09-11 10:30:47] [Rank 0] step:9081/10000 train_time:371747ms step_avg:40.94ms +[2025-09-11 10:30:48] [Rank 0] step:9101/10000 train_time:372460ms step_avg:40.93ms +[2025-09-11 10:30:48] [Rank 0] step:9101/10000 train_time:372460ms step_avg:40.93ms +[2025-09-11 10:30:48] [Rank 0] step:9121/10000 train_time:373173ms step_avg:40.91ms +[2025-09-11 10:30:48] [Rank 0] step:9121/10000 train_time:373173ms step_avg:40.91ms +[2025-09-11 10:30:49] [Rank 0] step:9141/10000 train_time:373880ms step_avg:40.90ms +[2025-09-11 10:30:49] [Rank 0] step:9141/10000 train_time:373880ms step_avg:40.90ms +[2025-09-11 10:30:50] [Rank 0] step:9161/10000 train_time:374590ms step_avg:40.89ms +[2025-09-11 10:30:50] [Rank 0] step:9161/10000 train_time:374590ms step_avg:40.89ms +[2025-09-11 10:30:50] [Rank 0] step:9181/10000 train_time:375301ms step_avg:40.88ms +[2025-09-11 10:30:50] [Rank 0] step:9181/10000 train_time:375301ms step_avg:40.88ms +[2025-09-11 10:30:51] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:31:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:01] [Rank 0] PRINT: step:9200/10000 val_loss:4.4610 total_sharp:1.5114e-04 L1_sharp:6.1372e-05 L2_sharp:9.5886e-06 L3_sharp:1.5990e-05 L4_sharp:1.5630e-05 L5_sharp:9.2984e-06 L6_sharp:7.3052e-06 L7_sharp:1.6525e-05 L8_sharp:3.0769e-05 L9_sharp:2.8251e-05 L10_sharp:3.8370e-05 L11_sharp:6.5103e-05 L12_sharp:2.7588e-04 total_fnorm:8.1250e+00 total_l1_linf:1.1648e+04 total_spectral:4.0312e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.2500e+00 L3_fnorm:2.2656e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.1875e+00 L6_fnorm:2.2500e+00 L7_fnorm:2.2500e+00 L8_fnorm:2.1406e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2188e+00 L12_fnorm:2.2188e+00 L1_l1linf:3.8086e-01 L2_l1linf:3.7109e-01 L3_l1linf:3.6914e-01 L4_l1linf:3.8477e-01 L5_l1linf:3.5742e-01 L6_l1linf:3.5547e-01 L7_l1linf:3.5547e-01 L8_l1linf:3.4570e-01 L9_l1linf:3.0273e-01 L10_l1linf:2.9688e-01 L11_l1linf:3.1055e-01 L12_l1linf:3.8477e-01 L1_spectral:3.6024e-02 L2_spectral:3.2692e-02 L3_spectral:3.3017e-02 L4_spectral:3.3373e-02 L5_spectral:3.4163e-02 L6_spectral:3.4459e-02 L7_spectral:3.3975e-02 L8_spectral:3.5528e-02 L9_spectral:3.4070e-02 L10_spectral:3.4103e-02 L11_spectral:3.4517e-02 L12_spectral:3.6377e-02 train_time:375993ms step_avg:40.87ms +[2025-09-11 10:31:01] [Rank 0] PRINT: step:9200/10000 val_loss:4.4610 total_sharp:1.5114e-04 L1_sharp:6.1372e-05 L2_sharp:9.5886e-06 L3_sharp:1.5990e-05 L4_sharp:1.5630e-05 L5_sharp:9.2984e-06 L6_sharp:7.3052e-06 L7_sharp:1.6525e-05 L8_sharp:3.0769e-05 L9_sharp:2.8251e-05 L10_sharp:3.8370e-05 L11_sharp:6.5103e-05 L12_sharp:2.7588e-04 total_fnorm:8.1250e+00 total_l1_linf:1.1648e+04 total_spectral:4.0312e+00 L1_fnorm:2.4219e+00 L2_fnorm:2.2500e+00 L3_fnorm:2.2656e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.1875e+00 L6_fnorm:2.2500e+00 L7_fnorm:2.2500e+00 L8_fnorm:2.1406e+00 L9_fnorm:2.1875e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2188e+00 L12_fnorm:2.2188e+00 L1_l1linf:3.8086e-01 L2_l1linf:3.7109e-01 L3_l1linf:3.6914e-01 L4_l1linf:3.8477e-01 L5_l1linf:3.5742e-01 L6_l1linf:3.5547e-01 L7_l1linf:3.5547e-01 L8_l1linf:3.4570e-01 L9_l1linf:3.0273e-01 L10_l1linf:2.9688e-01 L11_l1linf:3.1055e-01 L12_l1linf:3.8477e-01 L1_spectral:3.6024e-02 L2_spectral:3.2692e-02 L3_spectral:3.3017e-02 L4_spectral:3.3373e-02 L5_spectral:3.4163e-02 L6_spectral:3.4459e-02 L7_spectral:3.3975e-02 L8_spectral:3.5528e-02 L9_spectral:3.4070e-02 L10_spectral:3.4103e-02 L11_spectral:3.4517e-02 L12_spectral:3.6377e-02 train_time:375993ms step_avg:40.87ms +[2025-09-11 10:31:02] [Rank 0] step:9201/10000 train_time:377250ms step_avg:41.00ms +[2025-09-11 10:31:02] [Rank 0] step:9201/10000 train_time:377250ms step_avg:41.00ms +[2025-09-11 10:31:03] [Rank 0] step:9221/10000 train_time:378004ms step_avg:40.99ms +[2025-09-11 10:31:03] [Rank 0] step:9221/10000 train_time:378004ms step_avg:40.99ms +[2025-09-11 10:31:04] [Rank 0] step:9241/10000 train_time:378711ms step_avg:40.98ms +[2025-09-11 10:31:04] [Rank 0] step:9241/10000 train_time:378711ms step_avg:40.98ms +[2025-09-11 10:31:05] [Rank 0] step:9261/10000 train_time:379422ms step_avg:40.97ms +[2025-09-11 10:31:05] [Rank 0] step:9261/10000 train_time:379422ms step_avg:40.97ms +[2025-09-11 10:31:05] [Rank 0] step:9281/10000 train_time:380141ms step_avg:40.96ms +[2025-09-11 10:31:05] [Rank 0] step:9281/10000 train_time:380141ms step_avg:40.96ms +[2025-09-11 10:31:06] [Rank 0] step:9301/10000 train_time:380848ms step_avg:40.95ms +[2025-09-11 10:31:06] [Rank 0] step:9301/10000 train_time:380848ms step_avg:40.95ms +[2025-09-11 10:31:07] [Rank 0] step:9321/10000 train_time:381559ms step_avg:40.94ms +[2025-09-11 10:31:07] [Rank 0] step:9321/10000 train_time:381559ms step_avg:40.94ms +[2025-09-11 10:31:07] [Rank 0] step:9341/10000 train_time:382264ms step_avg:40.92ms +[2025-09-11 10:31:07] [Rank 0] step:9341/10000 train_time:382264ms step_avg:40.92ms +[2025-09-11 10:31:08] [Rank 0] step:9361/10000 train_time:382969ms step_avg:40.91ms +[2025-09-11 10:31:08] [Rank 0] step:9361/10000 train_time:382969ms step_avg:40.91ms +[2025-09-11 10:31:09] [Rank 0] step:9381/10000 train_time:383676ms step_avg:40.90ms +[2025-09-11 10:31:09] [Rank 0] step:9381/10000 train_time:383676ms step_avg:40.90ms +[2025-09-11 10:31:10] [Rank 0] step:9401/10000 train_time:384386ms step_avg:40.89ms +[2025-09-11 10:31:10] [Rank 0] step:9401/10000 train_time:384386ms step_avg:40.89ms +[2025-09-11 10:31:10] [Rank 0] step:9421/10000 train_time:385096ms step_avg:40.88ms +[2025-09-11 10:31:10] [Rank 0] step:9421/10000 train_time:385096ms step_avg:40.88ms +[2025-09-11 10:31:11] [Rank 0] step:9441/10000 train_time:385809ms step_avg:40.87ms +[2025-09-11 10:31:11] [Rank 0] step:9441/10000 train_time:385809ms step_avg:40.87ms +[2025-09-11 10:31:12] [Rank 0] step:9461/10000 train_time:386518ms step_avg:40.85ms +[2025-09-11 10:31:12] [Rank 0] step:9461/10000 train_time:386518ms step_avg:40.85ms +[2025-09-11 10:31:12] [Rank 0] step:9481/10000 train_time:387229ms step_avg:40.84ms +[2025-09-11 10:31:12] [Rank 0] step:9481/10000 train_time:387229ms step_avg:40.84ms +[2025-09-11 10:31:13] [Rank 0] step:9501/10000 train_time:387940ms step_avg:40.83ms +[2025-09-11 10:31:13] [Rank 0] step:9501/10000 train_time:387940ms step_avg:40.83ms +[2025-09-11 10:31:14] [Rank 0] step:9521/10000 train_time:388652ms step_avg:40.82ms +[2025-09-11 10:31:14] [Rank 0] step:9521/10000 train_time:388652ms step_avg:40.82ms +[2025-09-11 10:31:15] [Rank 0] step:9541/10000 train_time:389359ms step_avg:40.81ms +[2025-09-11 10:31:15] [Rank 0] step:9541/10000 train_time:389359ms step_avg:40.81ms +[2025-09-11 10:31:15] [Rank 0] step:9561/10000 train_time:390068ms step_avg:40.80ms +[2025-09-11 10:31:15] [Rank 0] step:9561/10000 train_time:390068ms step_avg:40.80ms +[2025-09-11 10:31:16] [Rank 0] step:9581/10000 train_time:390779ms step_avg:40.79ms +[2025-09-11 10:31:16] [Rank 0] step:9581/10000 train_time:390779ms step_avg:40.79ms +[2025-09-11 10:31:17] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:27] [Rank 0] PRINT: step:9600/10000 val_loss:4.4506 total_sharp:8.9893e-05 L1_sharp:3.9389e-05 L2_sharp:1.4551e-05 L3_sharp:4.2946e-06 L4_sharp:1.4678e-05 L5_sharp:1.0754e-05 L6_sharp:6.8075e-06 L7_sharp:8.8896e-06 L8_sharp:2.4767e-05 L9_sharp:2.1423e-05 L10_sharp:2.5842e-05 L11_sharp:4.6099e-05 L12_sharp:1.8209e-04 total_fnorm:4.5312e+00 total_l1_linf:5.4720e+03 total_spectral:2.2656e+00 L1_fnorm:1.3594e+00 L2_fnorm:1.2656e+00 L3_fnorm:1.2734e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.1953e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.2656e+00 L1_l1linf:1.7969e-01 L2_l1linf:1.8359e-01 L3_l1linf:1.8066e-01 L4_l1linf:1.8750e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.6797e-01 L7_l1linf:1.6797e-01 L8_l1linf:1.6406e-01 L9_l1linf:1.4844e-01 L10_l1linf:1.4551e-01 L11_l1linf:1.5430e-01 L12_l1linf:1.9531e-01 L1_spectral:2.0782e-02 L2_spectral:1.9942e-02 L3_spectral:1.8878e-02 L4_spectral:1.9110e-02 L5_spectral:1.9735e-02 L6_spectral:1.9508e-02 L7_spectral:1.9393e-02 L8_spectral:2.0547e-02 L9_spectral:1.9605e-02 L10_spectral:1.9400e-02 L11_spectral:1.9579e-02 L12_spectral:2.1160e-02 train_time:391465ms step_avg:40.78ms +[2025-09-11 10:31:27] [Rank 0] PRINT: step:9600/10000 val_loss:4.4506 total_sharp:8.9893e-05 L1_sharp:3.9389e-05 L2_sharp:1.4551e-05 L3_sharp:4.2946e-06 L4_sharp:1.4678e-05 L5_sharp:1.0754e-05 L6_sharp:6.8075e-06 L7_sharp:8.8896e-06 L8_sharp:2.4767e-05 L9_sharp:2.1423e-05 L10_sharp:2.5842e-05 L11_sharp:4.6099e-05 L12_sharp:1.8209e-04 total_fnorm:4.5312e+00 total_l1_linf:5.4720e+03 total_spectral:2.2656e+00 L1_fnorm:1.3594e+00 L2_fnorm:1.2656e+00 L3_fnorm:1.2734e+00 L4_fnorm:1.2656e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.1953e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.2656e+00 L1_l1linf:1.7969e-01 L2_l1linf:1.8359e-01 L3_l1linf:1.8066e-01 L4_l1linf:1.8750e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.6797e-01 L7_l1linf:1.6797e-01 L8_l1linf:1.6406e-01 L9_l1linf:1.4844e-01 L10_l1linf:1.4551e-01 L11_l1linf:1.5430e-01 L12_l1linf:1.9531e-01 L1_spectral:2.0782e-02 L2_spectral:1.9942e-02 L3_spectral:1.8878e-02 L4_spectral:1.9110e-02 L5_spectral:1.9735e-02 L6_spectral:1.9508e-02 L7_spectral:1.9393e-02 L8_spectral:2.0547e-02 L9_spectral:1.9605e-02 L10_spectral:1.9400e-02 L11_spectral:1.9579e-02 L12_spectral:2.1160e-02 train_time:391465ms step_avg:40.78ms +[2025-09-11 10:31:28] [Rank 0] step:9601/10000 train_time:392811ms step_avg:40.91ms +[2025-09-11 10:31:28] [Rank 0] step:9601/10000 train_time:392811ms step_avg:40.91ms +[2025-09-11 10:31:29] [Rank 0] step:9621/10000 train_time:393858ms step_avg:40.94ms +[2025-09-11 10:31:29] [Rank 0] step:9621/10000 train_time:393858ms step_avg:40.94ms +[2025-09-11 10:31:30] [Rank 0] step:9641/10000 train_time:394863ms step_avg:40.96ms +[2025-09-11 10:31:30] [Rank 0] step:9641/10000 train_time:394863ms step_avg:40.96ms +[2025-09-11 10:31:31] [Rank 0] step:9661/10000 train_time:395586ms step_avg:40.95ms +[2025-09-11 10:31:31] [Rank 0] step:9661/10000 train_time:395586ms step_avg:40.95ms +[2025-09-11 10:31:32] [Rank 0] step:9681/10000 train_time:396592ms step_avg:40.97ms +[2025-09-11 10:31:32] [Rank 0] step:9681/10000 train_time:396592ms step_avg:40.97ms +[2025-09-11 10:31:33] [Rank 0] step:9701/10000 train_time:397308ms step_avg:40.96ms +[2025-09-11 10:31:33] [Rank 0] step:9701/10000 train_time:397308ms step_avg:40.96ms +[2025-09-11 10:31:33] [Rank 0] step:9721/10000 train_time:398028ms step_avg:40.95ms +[2025-09-11 10:31:33] [Rank 0] step:9721/10000 train_time:398028ms step_avg:40.95ms +[2025-09-11 10:31:34] [Rank 0] step:9741/10000 train_time:398746ms step_avg:40.93ms +[2025-09-11 10:31:34] [Rank 0] step:9741/10000 train_time:398746ms step_avg:40.93ms +[2025-09-11 10:31:35] [Rank 0] step:9761/10000 train_time:399464ms step_avg:40.92ms +[2025-09-11 10:31:35] [Rank 0] step:9761/10000 train_time:399464ms step_avg:40.92ms +[2025-09-11 10:31:35] [Rank 0] step:9781/10000 train_time:400181ms step_avg:40.91ms +[2025-09-11 10:31:35] [Rank 0] step:9781/10000 train_time:400181ms step_avg:40.91ms +[2025-09-11 10:31:36] [Rank 0] step:9801/10000 train_time:400902ms step_avg:40.90ms +[2025-09-11 10:31:36] [Rank 0] step:9801/10000 train_time:400902ms step_avg:40.90ms +[2025-09-11 10:31:37] [Rank 0] step:9821/10000 train_time:401622ms step_avg:40.89ms +[2025-09-11 10:31:37] [Rank 0] step:9821/10000 train_time:401622ms step_avg:40.89ms +[2025-09-11 10:31:38] [Rank 0] step:9841/10000 train_time:402342ms step_avg:40.88ms +[2025-09-11 10:31:38] [Rank 0] step:9841/10000 train_time:402342ms step_avg:40.88ms +[2025-09-11 10:31:38] [Rank 0] step:9861/10000 train_time:403060ms step_avg:40.87ms +[2025-09-11 10:31:38] [Rank 0] step:9861/10000 train_time:403060ms step_avg:40.87ms +[2025-09-11 10:31:39] [Rank 0] step:9881/10000 train_time:403777ms step_avg:40.86ms +[2025-09-11 10:31:39] [Rank 0] step:9881/10000 train_time:403777ms step_avg:40.86ms +[2025-09-11 10:31:40] [Rank 0] step:9901/10000 train_time:404492ms step_avg:40.85ms +[2025-09-11 10:31:40] [Rank 0] step:9901/10000 train_time:404492ms step_avg:40.85ms +[2025-09-11 10:31:40] [Rank 0] step:9921/10000 train_time:405209ms step_avg:40.84ms +[2025-09-11 10:31:40] [Rank 0] step:9921/10000 train_time:405209ms step_avg:40.84ms +[2025-09-11 10:31:41] [Rank 0] step:9941/10000 train_time:405930ms step_avg:40.83ms +[2025-09-11 10:31:41] [Rank 0] step:9941/10000 train_time:405930ms step_avg:40.83ms +[2025-09-11 10:31:42] [Rank 0] step:9961/10000 train_time:406652ms step_avg:40.82ms +[2025-09-11 10:31:42] [Rank 0] step:9961/10000 train_time:406652ms step_avg:40.82ms +[2025-09-11 10:31:43] [Rank 0] step:9981/10000 train_time:407370ms step_avg:40.81ms +[2025-09-11 10:31:43] [Rank 0] step:9981/10000 train_time:407370ms step_avg:40.81ms +[2025-09-11 10:31:43] [Rank 0] step:10000/10000 train_time:408059ms step_avg:40.81ms +[2025-09-11 10:31:43] [Rank 0] step:10000/10000 train_time:408059ms step_avg:40.81ms +[2025-09-11 10:31:43] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:31:43] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:53] [Rank 0] PRINT: step:10000/10000 val_loss:4.4467 total_sharp:6.0056e-05 L1_sharp:1.3907e-05 L2_sharp:7.5673e-06 L3_sharp:5.3591e-06 L4_sharp:1.0263e-05 L5_sharp:9.8074e-06 L6_sharp:5.8588e-06 L7_sharp:8.9313e-06 L8_sharp:2.0068e-05 L9_sharp:1.6347e-05 L10_sharp:2.4315e-05 L11_sharp:3.6332e-05 L12_sharp:1.3494e-04 total_fnorm:1.7734e+00 total_l1_linf:1.5520e+03 total_spectral:8.8281e-01 L1_fnorm:5.3906e-01 L2_fnorm:4.9219e-01 L3_fnorm:4.9414e-01 L4_fnorm:4.9219e-01 L5_fnorm:4.8242e-01 L6_fnorm:4.9219e-01 L7_fnorm:4.9219e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.7852e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8828e-01 L12_fnorm:4.9023e-01 L1_l1linf:5.7373e-02 L2_l1linf:5.6152e-02 L3_l1linf:5.8105e-02 L4_l1linf:5.6152e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.3955e-02 L7_l1linf:5.4199e-02 L8_l1linf:5.7129e-02 L9_l1linf:4.5898e-02 L10_l1linf:4.4678e-02 L11_l1linf:4.7119e-02 L12_l1linf:5.9814e-02 L1_spectral:8.2423e-03 L2_spectral:7.4950e-03 L3_spectral:7.6451e-03 L4_spectral:7.4942e-03 L5_spectral:7.7926e-03 L6_spectral:7.8331e-03 L7_spectral:7.8658e-03 L8_spectral:8.3511e-03 L9_spectral:7.8425e-03 L10_spectral:7.7028e-03 L11_spectral:7.8081e-03 L12_spectral:8.4554e-03 train_time:408079ms step_avg:40.81ms +[2025-09-11 10:31:53] [Rank 0] PRINT: step:10000/10000 val_loss:4.4467 total_sharp:6.0056e-05 L1_sharp:1.3907e-05 L2_sharp:7.5673e-06 L3_sharp:5.3591e-06 L4_sharp:1.0263e-05 L5_sharp:9.8074e-06 L6_sharp:5.8588e-06 L7_sharp:8.9313e-06 L8_sharp:2.0068e-05 L9_sharp:1.6347e-05 L10_sharp:2.4315e-05 L11_sharp:3.6332e-05 L12_sharp:1.3494e-04 total_fnorm:1.7734e+00 total_l1_linf:1.5520e+03 total_spectral:8.8281e-01 L1_fnorm:5.3906e-01 L2_fnorm:4.9219e-01 L3_fnorm:4.9414e-01 L4_fnorm:4.9219e-01 L5_fnorm:4.8242e-01 L6_fnorm:4.9219e-01 L7_fnorm:4.9219e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.7852e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8828e-01 L12_fnorm:4.9023e-01 L1_l1linf:5.7373e-02 L2_l1linf:5.6152e-02 L3_l1linf:5.8105e-02 L4_l1linf:5.6152e-02 L5_l1linf:5.5420e-02 L6_l1linf:5.3955e-02 L7_l1linf:5.4199e-02 L8_l1linf:5.7129e-02 L9_l1linf:4.5898e-02 L10_l1linf:4.4678e-02 L11_l1linf:4.7119e-02 L12_l1linf:5.9814e-02 L1_spectral:8.2423e-03 L2_spectral:7.4950e-03 L3_spectral:7.6451e-03 L4_spectral:7.4942e-03 L5_spectral:7.7926e-03 L6_spectral:7.8331e-03 L7_spectral:7.8658e-03 L8_spectral:8.3511e-03 L9_spectral:7.8425e-03 L10_spectral:7.7028e-03 L11_spectral:7.8081e-03 L12_spectral:8.4554e-03 train_time:408079ms step_avg:40.81ms +[2025-09-11 10:31:53] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:31:53 2025 --- +[2025-09-11 10:31:53] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:31:53 2025 --- +[2025-09-11 10:31:53] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:31:53] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..64b4fbb5578756b7d16dcf02dcdb16ef8e4caf28 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "74bb3f20-4ad1-425e-b57d-0fe778e9ae08", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/training_log_74bb3f20-4ad1-425e-b57d-0fe778e9ae08.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/training_log_74bb3f20-4ad1-425e-b57d-0fe778e9ae08.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b4730d0cb7e09ea727e77ec30ec29fc812f05da --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42/training_log_74bb3f20-4ad1-425e-b57d-0fe778e9ae08.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:33:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:33:39 2025 --- +[2025-09-11 10:33:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:33:39 2025 --- +[2025-09-11 10:33:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:33:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:33:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:33:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:33:39] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:33:39] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:33:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42 +[2025-09-11 10:33:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_42 +[2025-09-11 10:33:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:33:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:33:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:33:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:33:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:33:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:33:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:33:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:33:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:33:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:33:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:33:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:33:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:33:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:33:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:33:42] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:33:42] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:33:42] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:33:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:33:42] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:33:48] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:33:48] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:33:48] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:33:48] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:34:26] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:34:26] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:34:26] [Rank 0] PRINT: Starting training... +[2025-09-11 10:34:26] [Rank 0] PRINT: Starting training... +[2025-09-11 10:34:27] [Rank 0] step:21/10000 train_time:1139ms step_avg:54.25ms +[2025-09-11 10:34:27] [Rank 0] step:21/10000 train_time:1139ms step_avg:54.25ms +[2025-09-11 10:34:28] [Rank 0] step:41/10000 train_time:1870ms step_avg:45.61ms +[2025-09-11 10:34:28] [Rank 0] step:41/10000 train_time:1870ms step_avg:45.61ms +[2025-09-11 10:34:29] [Rank 0] step:61/10000 train_time:2600ms step_avg:42.63ms +[2025-09-11 10:34:29] [Rank 0] step:61/10000 train_time:2600ms step_avg:42.63ms +[2025-09-11 10:34:29] [Rank 0] step:81/10000 train_time:3331ms step_avg:41.12ms +[2025-09-11 10:34:29] [Rank 0] step:81/10000 train_time:3331ms step_avg:41.12ms +[2025-09-11 10:34:30] [Rank 0] step:101/10000 train_time:4061ms step_avg:40.20ms +[2025-09-11 10:34:30] [Rank 0] step:101/10000 train_time:4061ms step_avg:40.20ms +[2025-09-11 10:34:31] [Rank 0] step:121/10000 train_time:4792ms step_avg:39.60ms +[2025-09-11 10:34:31] [Rank 0] step:121/10000 train_time:4792ms step_avg:39.60ms +[2025-09-11 10:34:32] [Rank 0] step:141/10000 train_time:5521ms step_avg:39.16ms +[2025-09-11 10:34:32] [Rank 0] step:141/10000 train_time:5521ms step_avg:39.16ms +[2025-09-11 10:34:32] [Rank 0] step:161/10000 train_time:6251ms step_avg:38.83ms +[2025-09-11 10:34:32] [Rank 0] step:161/10000 train_time:6251ms step_avg:38.83ms +[2025-09-11 10:34:33] [Rank 0] step:181/10000 train_time:6981ms step_avg:38.57ms +[2025-09-11 10:34:33] [Rank 0] step:181/10000 train_time:6981ms step_avg:38.57ms +[2025-09-11 10:34:34] [Rank 0] step:201/10000 train_time:7710ms step_avg:38.36ms +[2025-09-11 10:34:34] [Rank 0] step:201/10000 train_time:7710ms step_avg:38.36ms +[2025-09-11 10:34:34] [Rank 0] step:221/10000 train_time:8440ms step_avg:38.19ms +[2025-09-11 10:34:34] [Rank 0] step:221/10000 train_time:8440ms step_avg:38.19ms +[2025-09-11 10:34:35] [Rank 0] step:241/10000 train_time:9170ms step_avg:38.05ms +[2025-09-11 10:34:35] [Rank 0] step:241/10000 train_time:9170ms step_avg:38.05ms +[2025-09-11 10:34:36] [Rank 0] step:261/10000 train_time:9899ms step_avg:37.93ms +[2025-09-11 10:34:36] [Rank 0] step:261/10000 train_time:9899ms step_avg:37.93ms +[2025-09-11 10:34:37] [Rank 0] step:281/10000 train_time:10629ms step_avg:37.83ms +[2025-09-11 10:34:37] [Rank 0] step:281/10000 train_time:10629ms step_avg:37.83ms +[2025-09-11 10:34:37] [Rank 0] step:301/10000 train_time:11359ms step_avg:37.74ms +[2025-09-11 10:34:37] [Rank 0] step:301/10000 train_time:11359ms step_avg:37.74ms +[2025-09-11 10:34:38] [Rank 0] step:321/10000 train_time:12088ms step_avg:37.66ms +[2025-09-11 10:34:38] [Rank 0] step:321/10000 train_time:12088ms step_avg:37.66ms +[2025-09-11 10:34:39] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 10:34:39] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 10:34:40] [Rank 0] step:361/10000 train_time:13548ms step_avg:37.53ms +[2025-09-11 10:34:40] [Rank 0] step:361/10000 train_time:13548ms step_avg:37.53ms +[2025-09-11 10:34:40] [Rank 0] step:381/10000 train_time:14277ms step_avg:37.47ms +[2025-09-11 10:34:40] [Rank 0] step:381/10000 train_time:14277ms step_avg:37.47ms +[2025-09-11 10:34:41] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:34:41] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:35:27] [Rank 0] PRINT: step:400/10000 val_loss:6.4406 total_sharp:4.7016e-04 L1_sharp:9.4690e-02 L2_sharp:8.6893e-02 L3_sharp:9.3316e-02 L4_sharp:1.2462e-01 L5_sharp:1.2285e-01 L6_sharp:1.4078e-01 L7_sharp:1.7762e-01 L8_sharp:2.6201e-01 L9_sharp:3.6006e-01 L10_sharp:5.9219e-01 L11_sharp:5.9697e-01 L12_sharp:6.0685e-01 total_fnorm:3.8909e+01 total_l1_linf:9.2531e+04 total_spectral:1.9461e+01 L1_fnorm:5.8283e-02 L2_fnorm:5.8216e-02 L3_fnorm:5.8120e-02 L4_fnorm:5.7633e-02 L5_fnorm:5.7832e-02 L6_fnorm:5.7686e-02 L7_fnorm:5.7943e-02 L8_fnorm:5.6969e-02 L9_fnorm:5.6909e-02 L10_fnorm:5.6383e-02 L11_fnorm:5.6934e-02 L12_fnorm:5.6297e-02 L1_l1linf:2.2465e-02 L2_l1linf:2.2392e-02 L3_l1linf:2.2489e-02 L4_l1linf:2.2474e-02 L5_l1linf:2.2383e-02 L6_l1linf:2.2364e-02 L7_l1linf:2.2258e-02 L8_l1linf:2.2347e-02 L9_l1linf:2.2150e-02 L10_l1linf:2.2010e-02 L11_l1linf:2.2081e-02 L12_l1linf:2.1971e-02 L1_spectral:6.0254e-04 L2_spectral:6.0248e-04 L3_spectral:6.0256e-04 L4_spectral:6.0246e-04 L5_spectral:6.0258e-04 L6_spectral:6.0248e-04 L7_spectral:6.0311e-04 L8_spectral:6.0308e-04 L9_spectral:6.0251e-04 L10_spectral:6.0276e-04 L11_spectral:6.0273e-04 L12_spectral:6.0273e-04 train_time:14987ms step_avg:37.47ms +[2025-09-11 10:35:27] [Rank 0] PRINT: step:400/10000 val_loss:6.4406 total_sharp:4.7016e-04 L1_sharp:9.4690e-02 L2_sharp:8.6893e-02 L3_sharp:9.3316e-02 L4_sharp:1.2462e-01 L5_sharp:1.2285e-01 L6_sharp:1.4078e-01 L7_sharp:1.7762e-01 L8_sharp:2.6201e-01 L9_sharp:3.6006e-01 L10_sharp:5.9219e-01 L11_sharp:5.9697e-01 L12_sharp:6.0685e-01 total_fnorm:3.8909e+01 total_l1_linf:9.2531e+04 total_spectral:1.9461e+01 L1_fnorm:5.8283e-02 L2_fnorm:5.8216e-02 L3_fnorm:5.8120e-02 L4_fnorm:5.7633e-02 L5_fnorm:5.7832e-02 L6_fnorm:5.7686e-02 L7_fnorm:5.7943e-02 L8_fnorm:5.6969e-02 L9_fnorm:5.6909e-02 L10_fnorm:5.6383e-02 L11_fnorm:5.6934e-02 L12_fnorm:5.6297e-02 L1_l1linf:2.2465e-02 L2_l1linf:2.2392e-02 L3_l1linf:2.2489e-02 L4_l1linf:2.2474e-02 L5_l1linf:2.2383e-02 L6_l1linf:2.2364e-02 L7_l1linf:2.2258e-02 L8_l1linf:2.2347e-02 L9_l1linf:2.2150e-02 L10_l1linf:2.2010e-02 L11_l1linf:2.2081e-02 L12_l1linf:2.1971e-02 L1_spectral:6.0254e-04 L2_spectral:6.0248e-04 L3_spectral:6.0256e-04 L4_spectral:6.0246e-04 L5_spectral:6.0258e-04 L6_spectral:6.0248e-04 L7_spectral:6.0311e-04 L8_spectral:6.0308e-04 L9_spectral:6.0251e-04 L10_spectral:6.0276e-04 L11_spectral:6.0273e-04 L12_spectral:6.0273e-04 train_time:14987ms step_avg:37.47ms +[2025-09-11 10:35:58] [Rank 0] step:401/10000 train_time:45407ms step_avg:113.23ms +[2025-09-11 10:35:58] [Rank 0] step:401/10000 train_time:45407ms step_avg:113.23ms +[2025-09-11 10:35:59] [Rank 0] step:421/10000 train_time:47314ms step_avg:112.39ms +[2025-09-11 10:35:59] [Rank 0] step:421/10000 train_time:47314ms step_avg:112.39ms +[2025-09-11 10:36:00] [Rank 0] step:441/10000 train_time:47956ms step_avg:108.74ms +[2025-09-11 10:36:00] [Rank 0] step:441/10000 train_time:47956ms step_avg:108.74ms +[2025-09-11 10:36:01] [Rank 0] step:461/10000 train_time:48598ms step_avg:105.42ms +[2025-09-11 10:36:01] [Rank 0] step:461/10000 train_time:48598ms step_avg:105.42ms +[2025-09-11 10:36:01] [Rank 0] step:481/10000 train_time:49239ms step_avg:102.37ms +[2025-09-11 10:36:01] [Rank 0] step:481/10000 train_time:49239ms step_avg:102.37ms +[2025-09-11 10:36:02] [Rank 0] step:501/10000 train_time:49882ms step_avg:99.56ms +[2025-09-11 10:36:02] [Rank 0] step:501/10000 train_time:49882ms step_avg:99.56ms +[2025-09-11 10:36:03] [Rank 0] step:521/10000 train_time:50523ms step_avg:96.97ms +[2025-09-11 10:36:03] [Rank 0] step:521/10000 train_time:50523ms step_avg:96.97ms +[2025-09-11 10:36:03] [Rank 0] step:541/10000 train_time:51164ms step_avg:94.57ms +[2025-09-11 10:36:03] [Rank 0] step:541/10000 train_time:51164ms step_avg:94.57ms +[2025-09-11 10:36:04] [Rank 0] step:561/10000 train_time:51805ms step_avg:92.34ms +[2025-09-11 10:36:04] [Rank 0] step:561/10000 train_time:51805ms step_avg:92.34ms +[2025-09-11 10:36:05] [Rank 0] step:581/10000 train_time:52446ms step_avg:90.27ms +[2025-09-11 10:36:05] [Rank 0] step:581/10000 train_time:52446ms step_avg:90.27ms +[2025-09-11 10:36:05] [Rank 0] step:601/10000 train_time:53086ms step_avg:88.33ms +[2025-09-11 10:36:05] [Rank 0] step:601/10000 train_time:53086ms step_avg:88.33ms +[2025-09-11 10:36:06] [Rank 0] step:621/10000 train_time:53727ms step_avg:86.52ms +[2025-09-11 10:36:06] [Rank 0] step:621/10000 train_time:53727ms step_avg:86.52ms +[2025-09-11 10:36:06] [Rank 0] step:641/10000 train_time:54368ms step_avg:84.82ms +[2025-09-11 10:36:06] [Rank 0] step:641/10000 train_time:54368ms step_avg:84.82ms +[2025-09-11 10:36:07] [Rank 0] step:661/10000 train_time:55008ms step_avg:83.22ms +[2025-09-11 10:36:07] [Rank 0] step:661/10000 train_time:55008ms step_avg:83.22ms +[2025-09-11 10:36:08] [Rank 0] step:681/10000 train_time:55649ms step_avg:81.72ms +[2025-09-11 10:36:08] [Rank 0] step:681/10000 train_time:55649ms step_avg:81.72ms +[2025-09-11 10:36:08] [Rank 0] step:701/10000 train_time:56289ms step_avg:80.30ms +[2025-09-11 10:36:08] [Rank 0] step:701/10000 train_time:56289ms step_avg:80.30ms +[2025-09-11 10:36:09] [Rank 0] step:721/10000 train_time:56930ms step_avg:78.96ms +[2025-09-11 10:36:09] [Rank 0] step:721/10000 train_time:56930ms step_avg:78.96ms +[2025-09-11 10:36:10] [Rank 0] step:741/10000 train_time:57571ms step_avg:77.69ms +[2025-09-11 10:36:10] [Rank 0] step:741/10000 train_time:57571ms step_avg:77.69ms +[2025-09-11 10:36:10] [Rank 0] step:761/10000 train_time:58217ms step_avg:76.50ms +[2025-09-11 10:36:10] [Rank 0] step:761/10000 train_time:58217ms step_avg:76.50ms +[2025-09-11 10:36:11] [Rank 0] step:781/10000 train_time:58862ms step_avg:75.37ms +[2025-09-11 10:36:11] [Rank 0] step:781/10000 train_time:58862ms step_avg:75.37ms +[2025-09-11 10:36:12] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:36:12] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:36:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:36:56] [Rank 0] PRINT: step:800/10000 val_loss:6.0630 total_sharp:6.8986e-04 L1_sharp:4.4109e-01 L2_sharp:4.0348e-01 L3_sharp:3.8826e-01 L4_sharp:4.7558e-01 L5_sharp:5.8119e-01 L6_sharp:6.6998e-01 L7_sharp:9.1933e-01 L8_sharp:1.5882e+00 L9_sharp:1.5993e+00 L10_sharp:1.6909e+00 L11_sharp:1.4072e+00 L12_sharp:1.7412e+00 total_fnorm:3.7750e+01 total_l1_linf:6.8608e+04 total_spectral:1.8875e+01 L1_fnorm:4.4922e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.4922e-02 L6_fnorm:4.6143e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.2725e-02 L9_fnorm:4.4922e-02 L10_fnorm:4.4434e-02 L11_fnorm:4.3945e-02 L12_fnorm:4.1748e-02 L1_l1linf:2.0264e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0630e-02 L4_l1linf:2.0508e-02 L5_l1linf:2.0386e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0264e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9897e-02 L11_l1linf:1.9775e-02 L12_l1linf:1.9409e-02 L1_spectral:6.9094e-04 L2_spectral:6.9485e-04 L3_spectral:6.9153e-04 L4_spectral:6.9327e-04 L5_spectral:6.8967e-04 L6_spectral:6.9431e-04 L7_spectral:6.8884e-04 L8_spectral:6.8800e-04 L9_spectral:6.8594e-04 L10_spectral:6.8459e-04 L11_spectral:6.7906e-04 L12_spectral:6.8161e-04 train_time:59490ms step_avg:74.36ms +[2025-09-11 10:36:56] [Rank 0] PRINT: step:800/10000 val_loss:6.0630 total_sharp:6.8986e-04 L1_sharp:4.4109e-01 L2_sharp:4.0348e-01 L3_sharp:3.8826e-01 L4_sharp:4.7558e-01 L5_sharp:5.8119e-01 L6_sharp:6.6998e-01 L7_sharp:9.1933e-01 L8_sharp:1.5882e+00 L9_sharp:1.5993e+00 L10_sharp:1.6909e+00 L11_sharp:1.4072e+00 L12_sharp:1.7412e+00 total_fnorm:3.7750e+01 total_l1_linf:6.8608e+04 total_spectral:1.8875e+01 L1_fnorm:4.4922e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.4922e-02 L6_fnorm:4.6143e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.2725e-02 L9_fnorm:4.4922e-02 L10_fnorm:4.4434e-02 L11_fnorm:4.3945e-02 L12_fnorm:4.1748e-02 L1_l1linf:2.0264e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0630e-02 L4_l1linf:2.0508e-02 L5_l1linf:2.0386e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0264e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9897e-02 L11_l1linf:1.9775e-02 L12_l1linf:1.9409e-02 L1_spectral:6.9094e-04 L2_spectral:6.9485e-04 L3_spectral:6.9153e-04 L4_spectral:6.9327e-04 L5_spectral:6.8967e-04 L6_spectral:6.9431e-04 L7_spectral:6.8884e-04 L8_spectral:6.8800e-04 L9_spectral:6.8594e-04 L10_spectral:6.8459e-04 L11_spectral:6.7906e-04 L12_spectral:6.8161e-04 train_time:59490ms step_avg:74.36ms +[2025-09-11 10:36:58] [Rank 0] step:801/10000 train_time:60726ms step_avg:75.81ms +[2025-09-11 10:36:58] [Rank 0] step:801/10000 train_time:60726ms step_avg:75.81ms +[2025-09-11 10:36:58] [Rank 0] step:821/10000 train_time:61401ms step_avg:74.79ms +[2025-09-11 10:36:58] [Rank 0] step:821/10000 train_time:61401ms step_avg:74.79ms +[2025-09-11 10:36:59] [Rank 0] step:841/10000 train_time:62048ms step_avg:73.78ms +[2025-09-11 10:36:59] [Rank 0] step:841/10000 train_time:62048ms step_avg:73.78ms +[2025-09-11 10:37:00] [Rank 0] step:861/10000 train_time:62694ms step_avg:72.81ms +[2025-09-11 10:37:00] [Rank 0] step:861/10000 train_time:62694ms step_avg:72.81ms +[2025-09-11 10:37:00] [Rank 0] step:881/10000 train_time:63339ms step_avg:71.89ms +[2025-09-11 10:37:00] [Rank 0] step:881/10000 train_time:63339ms step_avg:71.89ms +[2025-09-11 10:37:01] [Rank 0] step:901/10000 train_time:63985ms step_avg:71.02ms +[2025-09-11 10:37:01] [Rank 0] step:901/10000 train_time:63985ms step_avg:71.02ms +[2025-09-11 10:37:02] [Rank 0] step:921/10000 train_time:64630ms step_avg:70.17ms +[2025-09-11 10:37:02] [Rank 0] step:921/10000 train_time:64630ms step_avg:70.17ms +[2025-09-11 10:37:02] [Rank 0] step:941/10000 train_time:65276ms step_avg:69.37ms +[2025-09-11 10:37:02] [Rank 0] step:941/10000 train_time:65276ms step_avg:69.37ms +[2025-09-11 10:37:03] [Rank 0] step:961/10000 train_time:65923ms step_avg:68.60ms +[2025-09-11 10:37:03] [Rank 0] step:961/10000 train_time:65923ms step_avg:68.60ms +[2025-09-11 10:37:03] [Rank 0] step:981/10000 train_time:66568ms step_avg:67.86ms +[2025-09-11 10:37:03] [Rank 0] step:981/10000 train_time:66568ms step_avg:67.86ms +[2025-09-11 10:37:04] [Rank 0] step:1001/10000 train_time:67213ms step_avg:67.15ms +[2025-09-11 10:37:04] [Rank 0] step:1001/10000 train_time:67213ms step_avg:67.15ms +[2025-09-11 10:37:05] [Rank 0] step:1021/10000 train_time:67858ms step_avg:66.46ms +[2025-09-11 10:37:05] [Rank 0] step:1021/10000 train_time:67858ms step_avg:66.46ms +[2025-09-11 10:37:05] [Rank 0] step:1041/10000 train_time:68504ms step_avg:65.81ms +[2025-09-11 10:37:05] [Rank 0] step:1041/10000 train_time:68504ms step_avg:65.81ms +[2025-09-11 10:37:06] [Rank 0] step:1061/10000 train_time:69149ms step_avg:65.17ms +[2025-09-11 10:37:06] [Rank 0] step:1061/10000 train_time:69149ms step_avg:65.17ms +[2025-09-11 10:37:07] [Rank 0] step:1081/10000 train_time:69794ms step_avg:64.56ms +[2025-09-11 10:37:07] [Rank 0] step:1081/10000 train_time:69794ms step_avg:64.56ms +[2025-09-11 10:37:07] [Rank 0] step:1101/10000 train_time:70440ms step_avg:63.98ms +[2025-09-11 10:37:07] [Rank 0] step:1101/10000 train_time:70440ms step_avg:63.98ms +[2025-09-11 10:37:08] [Rank 0] step:1121/10000 train_time:71086ms step_avg:63.41ms +[2025-09-11 10:37:08] [Rank 0] step:1121/10000 train_time:71086ms step_avg:63.41ms +[2025-09-11 10:37:09] [Rank 0] step:1141/10000 train_time:71731ms step_avg:62.87ms +[2025-09-11 10:37:09] [Rank 0] step:1141/10000 train_time:71731ms step_avg:62.87ms +[2025-09-11 10:37:09] [Rank 0] step:1161/10000 train_time:72375ms step_avg:62.34ms +[2025-09-11 10:37:09] [Rank 0] step:1161/10000 train_time:72375ms step_avg:62.34ms +[2025-09-11 10:37:10] [Rank 0] step:1181/10000 train_time:73021ms step_avg:61.83ms +[2025-09-11 10:37:10] [Rank 0] step:1181/10000 train_time:73021ms step_avg:61.83ms +[2025-09-11 10:37:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:37:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:20] [Rank 0] PRINT: step:1200/10000 val_loss:5.7847 total_sharp:5.9992e-04 L1_sharp:7.0957e-01 L2_sharp:6.6509e-01 L3_sharp:6.3217e-01 L4_sharp:6.5827e-01 L5_sharp:6.7556e-01 L6_sharp:5.1119e-01 L7_sharp:4.5357e-01 L8_sharp:4.5851e-01 L9_sharp:3.5032e-01 L10_sharp:4.4786e-01 L11_sharp:8.0808e-01 L12_sharp:1.6928e+00 total_fnorm:3.8750e+01 total_l1_linf:6.7584e+04 total_spectral:1.9375e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.9287e-02 L2_l1linf:1.9287e-02 L3_l1linf:1.9409e-02 L4_l1linf:1.9409e-02 L5_l1linf:1.9409e-02 L6_l1linf:1.9409e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:1.9775e-02 L10_l1linf:1.9775e-02 L11_l1linf:1.9897e-02 L12_l1linf:1.9897e-02 L1_spectral:7.2553e-04 L2_spectral:7.3465e-04 L3_spectral:7.3650e-04 L4_spectral:7.3825e-04 L5_spectral:7.3156e-04 L6_spectral:7.4560e-04 L7_spectral:7.4522e-04 L8_spectral:7.3271e-04 L9_spectral:7.4153e-04 L10_spectral:7.3758e-04 L11_spectral:7.3271e-04 L12_spectral:7.1709e-04 train_time:73649ms step_avg:61.37ms +[2025-09-11 10:37:20] [Rank 0] PRINT: step:1200/10000 val_loss:5.7847 total_sharp:5.9992e-04 L1_sharp:7.0957e-01 L2_sharp:6.6509e-01 L3_sharp:6.3217e-01 L4_sharp:6.5827e-01 L5_sharp:6.7556e-01 L6_sharp:5.1119e-01 L7_sharp:4.5357e-01 L8_sharp:4.5851e-01 L9_sharp:3.5032e-01 L10_sharp:4.4786e-01 L11_sharp:8.0808e-01 L12_sharp:1.6928e+00 total_fnorm:3.8750e+01 total_l1_linf:6.7584e+04 total_spectral:1.9375e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.9287e-02 L2_l1linf:1.9287e-02 L3_l1linf:1.9409e-02 L4_l1linf:1.9409e-02 L5_l1linf:1.9409e-02 L6_l1linf:1.9409e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:1.9775e-02 L10_l1linf:1.9775e-02 L11_l1linf:1.9897e-02 L12_l1linf:1.9897e-02 L1_spectral:7.2553e-04 L2_spectral:7.3465e-04 L3_spectral:7.3650e-04 L4_spectral:7.3825e-04 L5_spectral:7.3156e-04 L6_spectral:7.4560e-04 L7_spectral:7.4522e-04 L8_spectral:7.3271e-04 L9_spectral:7.4153e-04 L10_spectral:7.3758e-04 L11_spectral:7.3271e-04 L12_spectral:7.1709e-04 train_time:73649ms step_avg:61.37ms +[2025-09-11 10:37:22] [Rank 0] step:1201/10000 train_time:74862ms step_avg:62.33ms +[2025-09-11 10:37:22] [Rank 0] step:1201/10000 train_time:74862ms step_avg:62.33ms +[2025-09-11 10:37:22] [Rank 0] step:1221/10000 train_time:75536ms step_avg:61.86ms +[2025-09-11 10:37:22] [Rank 0] step:1221/10000 train_time:75536ms step_avg:61.86ms +[2025-09-11 10:37:23] [Rank 0] step:1241/10000 train_time:76183ms step_avg:61.39ms +[2025-09-11 10:37:23] [Rank 0] step:1241/10000 train_time:76183ms step_avg:61.39ms +[2025-09-11 10:37:24] [Rank 0] step:1261/10000 train_time:76829ms step_avg:60.93ms +[2025-09-11 10:37:24] [Rank 0] step:1261/10000 train_time:76829ms step_avg:60.93ms +[2025-09-11 10:37:24] [Rank 0] step:1281/10000 train_time:77476ms step_avg:60.48ms +[2025-09-11 10:37:24] [Rank 0] step:1281/10000 train_time:77476ms step_avg:60.48ms +[2025-09-11 10:37:25] [Rank 0] step:1301/10000 train_time:78127ms step_avg:60.05ms +[2025-09-11 10:37:25] [Rank 0] step:1301/10000 train_time:78127ms step_avg:60.05ms +[2025-09-11 10:37:25] [Rank 0] step:1321/10000 train_time:78773ms step_avg:59.63ms +[2025-09-11 10:37:25] [Rank 0] step:1321/10000 train_time:78773ms step_avg:59.63ms +[2025-09-11 10:37:26] [Rank 0] step:1341/10000 train_time:79419ms step_avg:59.22ms +[2025-09-11 10:37:26] [Rank 0] step:1341/10000 train_time:79419ms step_avg:59.22ms +[2025-09-11 10:37:27] [Rank 0] step:1361/10000 train_time:80065ms step_avg:58.83ms +[2025-09-11 10:37:27] [Rank 0] step:1361/10000 train_time:80065ms step_avg:58.83ms +[2025-09-11 10:37:27] [Rank 0] step:1381/10000 train_time:80711ms step_avg:58.44ms +[2025-09-11 10:37:27] [Rank 0] step:1381/10000 train_time:80711ms step_avg:58.44ms +[2025-09-11 10:37:28] [Rank 0] step:1401/10000 train_time:81360ms step_avg:58.07ms +[2025-09-11 10:37:28] [Rank 0] step:1401/10000 train_time:81360ms step_avg:58.07ms +[2025-09-11 10:37:29] [Rank 0] step:1421/10000 train_time:82006ms step_avg:57.71ms +[2025-09-11 10:37:29] [Rank 0] step:1421/10000 train_time:82006ms step_avg:57.71ms +[2025-09-11 10:37:29] [Rank 0] step:1441/10000 train_time:82652ms step_avg:57.36ms +[2025-09-11 10:37:29] [Rank 0] step:1441/10000 train_time:82652ms step_avg:57.36ms +[2025-09-11 10:37:30] [Rank 0] step:1461/10000 train_time:83298ms step_avg:57.01ms +[2025-09-11 10:37:30] [Rank 0] step:1461/10000 train_time:83298ms step_avg:57.01ms +[2025-09-11 10:37:31] [Rank 0] step:1481/10000 train_time:83944ms step_avg:56.68ms +[2025-09-11 10:37:31] [Rank 0] step:1481/10000 train_time:83944ms step_avg:56.68ms +[2025-09-11 10:37:31] [Rank 0] step:1501/10000 train_time:84594ms step_avg:56.36ms +[2025-09-11 10:37:31] [Rank 0] step:1501/10000 train_time:84594ms step_avg:56.36ms +[2025-09-11 10:37:32] [Rank 0] step:1521/10000 train_time:85244ms step_avg:56.04ms +[2025-09-11 10:37:32] [Rank 0] step:1521/10000 train_time:85244ms step_avg:56.04ms +[2025-09-11 10:37:33] [Rank 0] step:1541/10000 train_time:85895ms step_avg:55.74ms +[2025-09-11 10:37:33] [Rank 0] step:1541/10000 train_time:85895ms step_avg:55.74ms +[2025-09-11 10:37:33] [Rank 0] step:1561/10000 train_time:86545ms step_avg:55.44ms +[2025-09-11 10:37:33] [Rank 0] step:1561/10000 train_time:86545ms step_avg:55.44ms +[2025-09-11 10:37:34] [Rank 0] step:1581/10000 train_time:87194ms step_avg:55.15ms +[2025-09-11 10:37:34] [Rank 0] step:1581/10000 train_time:87194ms step_avg:55.15ms +[2025-09-11 10:37:34] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:37:34] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:37:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:37:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:37:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:37:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:37:44] [Rank 0] PRINT: step:1600/10000 val_loss:5.5974 total_sharp:6.7495e-04 L1_sharp:5.7616e-01 L2_sharp:5.2268e-01 L3_sharp:5.8030e-01 L4_sharp:6.5340e-01 L5_sharp:7.2153e-01 L6_sharp:6.8045e-01 L7_sharp:7.7645e-01 L8_sharp:8.4643e-01 L9_sharp:1.2333e+00 L10_sharp:2.0706e+00 L11_sharp:1.8152e+00 L12_sharp:2.8060e+00 total_fnorm:3.7500e+01 total_l1_linf:6.1696e+04 total_spectral:1.8750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9072e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.8188e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8555e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8921e-02 L8_l1linf:1.8677e-02 L9_l1linf:1.8921e-02 L10_l1linf:1.8921e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9043e-02 L1_spectral:7.3715e-04 L2_spectral:7.5172e-04 L3_spectral:7.5302e-04 L4_spectral:7.5272e-04 L5_spectral:7.4783e-04 L6_spectral:7.6968e-04 L7_spectral:7.6343e-04 L8_spectral:7.5038e-04 L9_spectral:7.5922e-04 L10_spectral:7.6438e-04 L11_spectral:7.5322e-04 L12_spectral:7.2860e-04 train_time:87827ms step_avg:54.89ms +[2025-09-11 10:37:44] [Rank 0] PRINT: step:1600/10000 val_loss:5.5974 total_sharp:6.7495e-04 L1_sharp:5.7616e-01 L2_sharp:5.2268e-01 L3_sharp:5.8030e-01 L4_sharp:6.5340e-01 L5_sharp:7.2153e-01 L6_sharp:6.8045e-01 L7_sharp:7.7645e-01 L8_sharp:8.4643e-01 L9_sharp:1.2333e+00 L10_sharp:2.0706e+00 L11_sharp:1.8152e+00 L12_sharp:2.8060e+00 total_fnorm:3.7500e+01 total_l1_linf:6.1696e+04 total_spectral:1.8750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9072e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.8188e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8555e-02 L6_l1linf:1.8677e-02 L7_l1linf:1.8921e-02 L8_l1linf:1.8677e-02 L9_l1linf:1.8921e-02 L10_l1linf:1.8921e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9043e-02 L1_spectral:7.3715e-04 L2_spectral:7.5172e-04 L3_spectral:7.5302e-04 L4_spectral:7.5272e-04 L5_spectral:7.4783e-04 L6_spectral:7.6968e-04 L7_spectral:7.6343e-04 L8_spectral:7.5038e-04 L9_spectral:7.5922e-04 L10_spectral:7.6438e-04 L11_spectral:7.5322e-04 L12_spectral:7.2860e-04 train_time:87827ms step_avg:54.89ms +[2025-09-11 10:37:46] [Rank 0] step:1601/10000 train_time:89037ms step_avg:55.61ms +[2025-09-11 10:37:46] [Rank 0] step:1601/10000 train_time:89037ms step_avg:55.61ms +[2025-09-11 10:37:46] [Rank 0] step:1621/10000 train_time:89718ms step_avg:55.35ms +[2025-09-11 10:37:46] [Rank 0] step:1621/10000 train_time:89718ms step_avg:55.35ms +[2025-09-11 10:37:47] [Rank 0] step:1641/10000 train_time:90370ms step_avg:55.07ms +[2025-09-11 10:37:47] [Rank 0] step:1641/10000 train_time:90370ms step_avg:55.07ms +[2025-09-11 10:37:48] [Rank 0] step:1661/10000 train_time:91022ms step_avg:54.80ms +[2025-09-11 10:37:48] [Rank 0] step:1661/10000 train_time:91022ms step_avg:54.80ms +[2025-09-11 10:37:48] [Rank 0] step:1681/10000 train_time:91672ms step_avg:54.53ms +[2025-09-11 10:37:48] [Rank 0] step:1681/10000 train_time:91672ms step_avg:54.53ms +[2025-09-11 10:37:49] [Rank 0] step:1701/10000 train_time:92324ms step_avg:54.28ms +[2025-09-11 10:37:49] [Rank 0] step:1701/10000 train_time:92324ms step_avg:54.28ms +[2025-09-11 10:37:50] [Rank 0] step:1721/10000 train_time:92976ms step_avg:54.02ms +[2025-09-11 10:37:50] [Rank 0] step:1721/10000 train_time:92976ms step_avg:54.02ms +[2025-09-11 10:37:50] [Rank 0] step:1741/10000 train_time:93626ms step_avg:53.78ms +[2025-09-11 10:37:50] [Rank 0] step:1741/10000 train_time:93626ms step_avg:53.78ms +[2025-09-11 10:37:51] [Rank 0] step:1761/10000 train_time:94277ms step_avg:53.54ms +[2025-09-11 10:37:51] [Rank 0] step:1761/10000 train_time:94277ms step_avg:53.54ms +[2025-09-11 10:37:52] [Rank 0] step:1781/10000 train_time:94928ms step_avg:53.30ms +[2025-09-11 10:37:52] [Rank 0] step:1781/10000 train_time:94928ms step_avg:53.30ms +[2025-09-11 10:37:52] [Rank 0] step:1801/10000 train_time:95579ms step_avg:53.07ms +[2025-09-11 10:37:52] [Rank 0] step:1801/10000 train_time:95579ms step_avg:53.07ms +[2025-09-11 10:37:53] [Rank 0] step:1821/10000 train_time:96230ms step_avg:52.84ms +[2025-09-11 10:37:53] [Rank 0] step:1821/10000 train_time:96230ms step_avg:52.84ms +[2025-09-11 10:37:53] [Rank 0] step:1841/10000 train_time:96880ms step_avg:52.62ms +[2025-09-11 10:37:53] [Rank 0] step:1841/10000 train_time:96880ms step_avg:52.62ms +[2025-09-11 10:37:54] [Rank 0] step:1861/10000 train_time:97531ms step_avg:52.41ms +[2025-09-11 10:37:54] [Rank 0] step:1861/10000 train_time:97531ms step_avg:52.41ms +[2025-09-11 10:37:55] [Rank 0] step:1881/10000 train_time:98182ms step_avg:52.20ms +[2025-09-11 10:37:55] [Rank 0] step:1881/10000 train_time:98182ms step_avg:52.20ms +[2025-09-11 10:37:55] [Rank 0] step:1901/10000 train_time:98832ms step_avg:51.99ms +[2025-09-11 10:37:55] [Rank 0] step:1901/10000 train_time:98832ms step_avg:51.99ms +[2025-09-11 10:37:56] [Rank 0] step:1921/10000 train_time:99483ms step_avg:51.79ms +[2025-09-11 10:37:56] [Rank 0] step:1921/10000 train_time:99483ms step_avg:51.79ms +[2025-09-11 10:37:57] [Rank 0] step:1941/10000 train_time:100134ms step_avg:51.59ms +[2025-09-11 10:37:57] [Rank 0] step:1941/10000 train_time:100134ms step_avg:51.59ms +[2025-09-11 10:37:57] [Rank 0] step:1961/10000 train_time:100784ms step_avg:51.39ms +[2025-09-11 10:37:57] [Rank 0] step:1961/10000 train_time:100784ms step_avg:51.39ms +[2025-09-11 10:37:58] [Rank 0] step:1981/10000 train_time:101437ms step_avg:51.20ms +[2025-09-11 10:37:58] [Rank 0] step:1981/10000 train_time:101437ms step_avg:51.20ms +[2025-09-11 10:37:59] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:37:59] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:38:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:38:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:38:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:08] [Rank 0] PRINT: step:2000/10000 val_loss:5.4601 total_sharp:6.7085e-04 L1_sharp:4.3223e-01 L2_sharp:4.2738e-01 L3_sharp:5.1536e-01 L4_sharp:5.6658e-01 L5_sharp:6.8539e-01 L6_sharp:8.1214e-01 L7_sharp:1.0537e+00 L8_sharp:1.5587e+00 L9_sharp:2.4032e+00 L10_sharp:3.7067e+00 L11_sharp:4.5726e+00 L12_sharp:4.4808e+00 total_fnorm:3.6250e+01 total_l1_linf:6.1184e+04 total_spectral:1.8125e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9805e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.9561e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.7212e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.7456e-02 L5_l1linf:1.7944e-02 L6_l1linf:1.7822e-02 L7_l1linf:1.7700e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.8188e-02 L10_l1linf:1.8433e-02 L11_l1linf:1.8555e-02 L12_l1linf:1.8066e-02 L1_spectral:7.6236e-04 L2_spectral:7.6381e-04 L3_spectral:7.6365e-04 L4_spectral:7.6913e-04 L5_spectral:7.7024e-04 L6_spectral:7.7985e-04 L7_spectral:7.7860e-04 L8_spectral:7.6601e-04 L9_spectral:7.7377e-04 L10_spectral:7.7415e-04 L11_spectral:7.6430e-04 L12_spectral:7.3207e-04 train_time:102070ms step_avg:51.03ms +[2025-09-11 10:38:08] [Rank 0] PRINT: step:2000/10000 val_loss:5.4601 total_sharp:6.7085e-04 L1_sharp:4.3223e-01 L2_sharp:4.2738e-01 L3_sharp:5.1536e-01 L4_sharp:5.6658e-01 L5_sharp:6.8539e-01 L6_sharp:8.1214e-01 L7_sharp:1.0537e+00 L8_sharp:1.5587e+00 L9_sharp:2.4032e+00 L10_sharp:3.7067e+00 L11_sharp:4.5726e+00 L12_sharp:4.4808e+00 total_fnorm:3.6250e+01 total_l1_linf:6.1184e+04 total_spectral:1.8125e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9805e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.9561e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.7212e-02 L2_l1linf:1.7334e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.7456e-02 L5_l1linf:1.7944e-02 L6_l1linf:1.7822e-02 L7_l1linf:1.7700e-02 L8_l1linf:1.7822e-02 L9_l1linf:1.8188e-02 L10_l1linf:1.8433e-02 L11_l1linf:1.8555e-02 L12_l1linf:1.8066e-02 L1_spectral:7.6236e-04 L2_spectral:7.6381e-04 L3_spectral:7.6365e-04 L4_spectral:7.6913e-04 L5_spectral:7.7024e-04 L6_spectral:7.7985e-04 L7_spectral:7.7860e-04 L8_spectral:7.6601e-04 L9_spectral:7.7377e-04 L10_spectral:7.7415e-04 L11_spectral:7.6430e-04 L12_spectral:7.3207e-04 train_time:102070ms step_avg:51.03ms +[2025-09-11 10:38:10] [Rank 0] step:2001/10000 train_time:103332ms step_avg:51.64ms +[2025-09-11 10:38:10] [Rank 0] step:2001/10000 train_time:103332ms step_avg:51.64ms +[2025-09-11 10:38:11] [Rank 0] step:2021/10000 train_time:104247ms step_avg:51.58ms +[2025-09-11 10:38:11] [Rank 0] step:2021/10000 train_time:104247ms step_avg:51.58ms +[2025-09-11 10:38:11] [Rank 0] step:2041/10000 train_time:104898ms step_avg:51.40ms +[2025-09-11 10:38:11] [Rank 0] step:2041/10000 train_time:104898ms step_avg:51.40ms +[2025-09-11 10:38:12] [Rank 0] step:2061/10000 train_time:105549ms step_avg:51.21ms +[2025-09-11 10:38:12] [Rank 0] step:2061/10000 train_time:105549ms step_avg:51.21ms +[2025-09-11 10:38:13] [Rank 0] step:2081/10000 train_time:106199ms step_avg:51.03ms +[2025-09-11 10:38:13] [Rank 0] step:2081/10000 train_time:106199ms step_avg:51.03ms +[2025-09-11 10:38:13] [Rank 0] step:2101/10000 train_time:106850ms step_avg:50.86ms +[2025-09-11 10:38:13] [Rank 0] step:2101/10000 train_time:106850ms step_avg:50.86ms +[2025-09-11 10:38:14] [Rank 0] step:2121/10000 train_time:107501ms step_avg:50.68ms +[2025-09-11 10:38:14] [Rank 0] step:2121/10000 train_time:107501ms step_avg:50.68ms +[2025-09-11 10:38:15] [Rank 0] step:2141/10000 train_time:108152ms step_avg:50.51ms +[2025-09-11 10:38:15] [Rank 0] step:2141/10000 train_time:108152ms step_avg:50.51ms +[2025-09-11 10:38:15] [Rank 0] step:2161/10000 train_time:108803ms step_avg:50.35ms +[2025-09-11 10:38:15] [Rank 0] step:2161/10000 train_time:108803ms step_avg:50.35ms +[2025-09-11 10:38:16] [Rank 0] step:2181/10000 train_time:109452ms step_avg:50.18ms +[2025-09-11 10:38:16] [Rank 0] step:2181/10000 train_time:109452ms step_avg:50.18ms +[2025-09-11 10:38:16] [Rank 0] step:2201/10000 train_time:110103ms step_avg:50.02ms +[2025-09-11 10:38:16] [Rank 0] step:2201/10000 train_time:110103ms step_avg:50.02ms +[2025-09-11 10:38:17] [Rank 0] step:2221/10000 train_time:110753ms step_avg:49.87ms +[2025-09-11 10:38:17] [Rank 0] step:2221/10000 train_time:110753ms step_avg:49.87ms +[2025-09-11 10:38:18] [Rank 0] step:2241/10000 train_time:111416ms step_avg:49.72ms +[2025-09-11 10:38:18] [Rank 0] step:2241/10000 train_time:111416ms step_avg:49.72ms +[2025-09-11 10:38:18] [Rank 0] step:2261/10000 train_time:112080ms step_avg:49.57ms +[2025-09-11 10:38:18] [Rank 0] step:2261/10000 train_time:112080ms step_avg:49.57ms +[2025-09-11 10:38:19] [Rank 0] step:2281/10000 train_time:113011ms step_avg:49.54ms +[2025-09-11 10:38:19] [Rank 0] step:2281/10000 train_time:113011ms step_avg:49.54ms +[2025-09-11 10:38:20] [Rank 0] step:2301/10000 train_time:113675ms step_avg:49.40ms +[2025-09-11 10:38:20] [Rank 0] step:2301/10000 train_time:113675ms step_avg:49.40ms +[2025-09-11 10:38:21] [Rank 0] step:2321/10000 train_time:114340ms step_avg:49.26ms +[2025-09-11 10:38:21] [Rank 0] step:2321/10000 train_time:114340ms step_avg:49.26ms +[2025-09-11 10:38:22] [Rank 0] step:2341/10000 train_time:115151ms step_avg:49.19ms +[2025-09-11 10:38:22] [Rank 0] step:2341/10000 train_time:115151ms step_avg:49.19ms +[2025-09-11 10:38:22] [Rank 0] step:2361/10000 train_time:115925ms step_avg:49.10ms +[2025-09-11 10:38:22] [Rank 0] step:2361/10000 train_time:115925ms step_avg:49.10ms +[2025-09-11 10:38:23] [Rank 0] step:2381/10000 train_time:116588ms step_avg:48.97ms +[2025-09-11 10:38:23] [Rank 0] step:2381/10000 train_time:116588ms step_avg:48.97ms +[2025-09-11 10:38:24] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:38:24] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:38:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:38:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:38:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:33] [Rank 0] PRINT: step:2400/10000 val_loss:5.3430 total_sharp:6.0379e-04 L1_sharp:3.6828e-01 L2_sharp:4.3424e-01 L3_sharp:5.4263e-01 L4_sharp:6.8702e-01 L5_sharp:8.1863e-01 L6_sharp:8.9674e-01 L7_sharp:8.9220e-01 L8_sharp:9.6071e-01 L9_sharp:8.8202e-01 L10_sharp:1.4932e+00 L11_sharp:2.6211e+00 L12_sharp:2.9813e+00 total_fnorm:3.4750e+01 total_l1_linf:5.6064e+04 total_spectral:1.7375e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.8828e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.6479e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.6602e-02 L4_l1linf:1.6846e-02 L5_l1linf:1.6602e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.7212e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7212e-02 L10_l1linf:1.7456e-02 L11_l1linf:1.7700e-02 L12_l1linf:1.7578e-02 L1_spectral:7.6115e-04 L2_spectral:7.6615e-04 L3_spectral:7.6889e-04 L4_spectral:7.6891e-04 L5_spectral:7.7100e-04 L6_spectral:7.8088e-04 L7_spectral:7.8486e-04 L8_spectral:7.7218e-04 L9_spectral:7.8435e-04 L10_spectral:7.7953e-04 L11_spectral:7.7518e-04 L12_spectral:7.5764e-04 train_time:117234ms step_avg:48.85ms +[2025-09-11 10:38:33] [Rank 0] PRINT: step:2400/10000 val_loss:5.3430 total_sharp:6.0379e-04 L1_sharp:3.6828e-01 L2_sharp:4.3424e-01 L3_sharp:5.4263e-01 L4_sharp:6.8702e-01 L5_sharp:8.1863e-01 L6_sharp:8.9674e-01 L7_sharp:8.9220e-01 L8_sharp:9.6071e-01 L9_sharp:8.8202e-01 L10_sharp:1.4932e+00 L11_sharp:2.6211e+00 L12_sharp:2.9813e+00 total_fnorm:3.4750e+01 total_l1_linf:5.6064e+04 total_spectral:1.7375e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.8828e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.6479e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.6602e-02 L4_l1linf:1.6846e-02 L5_l1linf:1.6602e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.7212e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7212e-02 L10_l1linf:1.7456e-02 L11_l1linf:1.7700e-02 L12_l1linf:1.7578e-02 L1_spectral:7.6115e-04 L2_spectral:7.6615e-04 L3_spectral:7.6889e-04 L4_spectral:7.6891e-04 L5_spectral:7.7100e-04 L6_spectral:7.8088e-04 L7_spectral:7.8486e-04 L8_spectral:7.7218e-04 L9_spectral:7.8435e-04 L10_spectral:7.7953e-04 L11_spectral:7.7518e-04 L12_spectral:7.5764e-04 train_time:117234ms step_avg:48.85ms +[2025-09-11 10:38:35] [Rank 0] step:2401/10000 train_time:118459ms step_avg:49.34ms +[2025-09-11 10:38:35] [Rank 0] step:2401/10000 train_time:118459ms step_avg:49.34ms +[2025-09-11 10:38:35] [Rank 0] step:2421/10000 train_time:119128ms step_avg:49.21ms +[2025-09-11 10:38:35] [Rank 0] step:2421/10000 train_time:119128ms step_avg:49.21ms +[2025-09-11 10:38:36] [Rank 0] step:2441/10000 train_time:119793ms step_avg:49.08ms +[2025-09-11 10:38:36] [Rank 0] step:2441/10000 train_time:119793ms step_avg:49.08ms +[2025-09-11 10:38:37] [Rank 0] step:2461/10000 train_time:120458ms step_avg:48.95ms +[2025-09-11 10:38:37] [Rank 0] step:2461/10000 train_time:120458ms step_avg:48.95ms +[2025-09-11 10:38:37] [Rank 0] step:2481/10000 train_time:121122ms step_avg:48.82ms +[2025-09-11 10:38:37] [Rank 0] step:2481/10000 train_time:121122ms step_avg:48.82ms +[2025-09-11 10:38:38] [Rank 0] step:2501/10000 train_time:121787ms step_avg:48.70ms +[2025-09-11 10:38:38] [Rank 0] step:2501/10000 train_time:121787ms step_avg:48.70ms +[2025-09-11 10:38:39] [Rank 0] step:2521/10000 train_time:122451ms step_avg:48.57ms +[2025-09-11 10:38:39] [Rank 0] step:2521/10000 train_time:122451ms step_avg:48.57ms +[2025-09-11 10:38:39] [Rank 0] step:2541/10000 train_time:123116ms step_avg:48.45ms +[2025-09-11 10:38:39] [Rank 0] step:2541/10000 train_time:123116ms step_avg:48.45ms +[2025-09-11 10:38:40] [Rank 0] step:2561/10000 train_time:123780ms step_avg:48.33ms +[2025-09-11 10:38:40] [Rank 0] step:2561/10000 train_time:123780ms step_avg:48.33ms +[2025-09-11 10:38:41] [Rank 0] step:2581/10000 train_time:124446ms step_avg:48.22ms +[2025-09-11 10:38:41] [Rank 0] step:2581/10000 train_time:124446ms step_avg:48.22ms +[2025-09-11 10:38:41] [Rank 0] step:2601/10000 train_time:125110ms step_avg:48.10ms +[2025-09-11 10:38:41] [Rank 0] step:2601/10000 train_time:125110ms step_avg:48.10ms +[2025-09-11 10:38:42] [Rank 0] step:2621/10000 train_time:125774ms step_avg:47.99ms +[2025-09-11 10:38:42] [Rank 0] step:2621/10000 train_time:125774ms step_avg:47.99ms +[2025-09-11 10:38:43] [Rank 0] step:2641/10000 train_time:126439ms step_avg:47.88ms +[2025-09-11 10:38:43] [Rank 0] step:2641/10000 train_time:126439ms step_avg:47.88ms +[2025-09-11 10:38:43] [Rank 0] step:2661/10000 train_time:127105ms step_avg:47.77ms +[2025-09-11 10:38:43] [Rank 0] step:2661/10000 train_time:127105ms step_avg:47.77ms +[2025-09-11 10:38:44] [Rank 0] step:2681/10000 train_time:127769ms step_avg:47.66ms +[2025-09-11 10:38:44] [Rank 0] step:2681/10000 train_time:127769ms step_avg:47.66ms +[2025-09-11 10:38:45] [Rank 0] step:2701/10000 train_time:128434ms step_avg:47.55ms +[2025-09-11 10:38:45] [Rank 0] step:2701/10000 train_time:128434ms step_avg:47.55ms +[2025-09-11 10:38:45] [Rank 0] step:2721/10000 train_time:129098ms step_avg:47.45ms +[2025-09-11 10:38:45] [Rank 0] step:2721/10000 train_time:129098ms step_avg:47.45ms +[2025-09-11 10:38:46] [Rank 0] step:2741/10000 train_time:129763ms step_avg:47.34ms +[2025-09-11 10:38:46] [Rank 0] step:2741/10000 train_time:129763ms step_avg:47.34ms +[2025-09-11 10:38:47] [Rank 0] step:2761/10000 train_time:130427ms step_avg:47.24ms +[2025-09-11 10:38:47] [Rank 0] step:2761/10000 train_time:130427ms step_avg:47.24ms +[2025-09-11 10:38:47] [Rank 0] step:2781/10000 train_time:131092ms step_avg:47.14ms +[2025-09-11 10:38:47] [Rank 0] step:2781/10000 train_time:131092ms step_avg:47.14ms +[2025-09-11 10:38:48] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:38:48] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:38:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:38:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:38:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:38:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:38:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:38:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:38:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:38:58] [Rank 0] PRINT: step:2800/10000 val_loss:5.2469 total_sharp:5.9846e-04 L1_sharp:3.7305e-01 L2_sharp:4.4658e-01 L3_sharp:5.6609e-01 L4_sharp:6.7465e-01 L5_sharp:8.0054e-01 L6_sharp:9.2857e-01 L7_sharp:8.8390e-01 L8_sharp:8.1238e-01 L9_sharp:7.9162e-01 L10_sharp:9.0230e-01 L11_sharp:8.7231e-01 L12_sharp:1.6934e+00 total_fnorm:3.2500e+01 total_l1_linf:5.1712e+04 total_spectral:1.6250e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5747e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6479e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6724e-02 L11_l1linf:1.7090e-02 L12_l1linf:1.6968e-02 L1_spectral:7.6803e-04 L2_spectral:7.7141e-04 L3_spectral:7.7260e-04 L4_spectral:7.8099e-04 L5_spectral:7.7719e-04 L6_spectral:7.9241e-04 L7_spectral:7.9289e-04 L8_spectral:7.7936e-04 L9_spectral:7.8913e-04 L10_spectral:7.8491e-04 L11_spectral:7.8208e-04 L12_spectral:7.6509e-04 train_time:131738ms step_avg:47.05ms +[2025-09-11 10:38:58] [Rank 0] PRINT: step:2800/10000 val_loss:5.2469 total_sharp:5.9846e-04 L1_sharp:3.7305e-01 L2_sharp:4.4658e-01 L3_sharp:5.6609e-01 L4_sharp:6.7465e-01 L5_sharp:8.0054e-01 L6_sharp:9.2857e-01 L7_sharp:8.8390e-01 L8_sharp:8.1238e-01 L9_sharp:7.9162e-01 L10_sharp:9.0230e-01 L11_sharp:8.7231e-01 L12_sharp:1.6934e+00 total_fnorm:3.2500e+01 total_l1_linf:5.1712e+04 total_spectral:1.6250e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5747e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6479e-02 L6_l1linf:1.6357e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6479e-02 L10_l1linf:1.6724e-02 L11_l1linf:1.7090e-02 L12_l1linf:1.6968e-02 L1_spectral:7.6803e-04 L2_spectral:7.7141e-04 L3_spectral:7.7260e-04 L4_spectral:7.8099e-04 L5_spectral:7.7719e-04 L6_spectral:7.9241e-04 L7_spectral:7.9289e-04 L8_spectral:7.7936e-04 L9_spectral:7.8913e-04 L10_spectral:7.8491e-04 L11_spectral:7.8208e-04 L12_spectral:7.6509e-04 train_time:131738ms step_avg:47.05ms +[2025-09-11 10:38:59] [Rank 0] step:2801/10000 train_time:132972ms step_avg:47.47ms +[2025-09-11 10:38:59] [Rank 0] step:2801/10000 train_time:132972ms step_avg:47.47ms +[2025-09-11 10:39:00] [Rank 0] step:2821/10000 train_time:133640ms step_avg:47.37ms +[2025-09-11 10:39:00] [Rank 0] step:2821/10000 train_time:133640ms step_avg:47.37ms +[2025-09-11 10:39:01] [Rank 0] step:2841/10000 train_time:134306ms step_avg:47.27ms +[2025-09-11 10:39:01] [Rank 0] step:2841/10000 train_time:134306ms step_avg:47.27ms +[2025-09-11 10:39:01] [Rank 0] step:2861/10000 train_time:134973ms step_avg:47.18ms +[2025-09-11 10:39:01] [Rank 0] step:2861/10000 train_time:134973ms step_avg:47.18ms +[2025-09-11 10:39:02] [Rank 0] step:2881/10000 train_time:135640ms step_avg:47.08ms +[2025-09-11 10:39:02] [Rank 0] step:2881/10000 train_time:135640ms step_avg:47.08ms +[2025-09-11 10:39:03] [Rank 0] step:2901/10000 train_time:136305ms step_avg:46.99ms +[2025-09-11 10:39:03] [Rank 0] step:2901/10000 train_time:136305ms step_avg:46.99ms +[2025-09-11 10:39:03] [Rank 0] step:2921/10000 train_time:136970ms step_avg:46.89ms +[2025-09-11 10:39:03] [Rank 0] step:2921/10000 train_time:136970ms step_avg:46.89ms +[2025-09-11 10:39:04] [Rank 0] step:2941/10000 train_time:137635ms step_avg:46.80ms +[2025-09-11 10:39:04] [Rank 0] step:2941/10000 train_time:137635ms step_avg:46.80ms +[2025-09-11 10:39:05] [Rank 0] step:2961/10000 train_time:138301ms step_avg:46.71ms +[2025-09-11 10:39:05] [Rank 0] step:2961/10000 train_time:138301ms step_avg:46.71ms +[2025-09-11 10:39:05] [Rank 0] step:2981/10000 train_time:138968ms step_avg:46.62ms +[2025-09-11 10:39:05] [Rank 0] step:2981/10000 train_time:138968ms step_avg:46.62ms +[2025-09-11 10:39:06] [Rank 0] step:3001/10000 train_time:139636ms step_avg:46.53ms +[2025-09-11 10:39:06] [Rank 0] step:3001/10000 train_time:139636ms step_avg:46.53ms +[2025-09-11 10:39:07] [Rank 0] step:3021/10000 train_time:140303ms step_avg:46.44ms +[2025-09-11 10:39:07] [Rank 0] step:3021/10000 train_time:140303ms step_avg:46.44ms +[2025-09-11 10:39:07] [Rank 0] step:3041/10000 train_time:140971ms step_avg:46.36ms +[2025-09-11 10:39:07] [Rank 0] step:3041/10000 train_time:140971ms step_avg:46.36ms +[2025-09-11 10:39:08] [Rank 0] step:3061/10000 train_time:141638ms step_avg:46.27ms +[2025-09-11 10:39:08] [Rank 0] step:3061/10000 train_time:141638ms step_avg:46.27ms +[2025-09-11 10:39:09] [Rank 0] step:3081/10000 train_time:142307ms step_avg:46.19ms +[2025-09-11 10:39:09] [Rank 0] step:3081/10000 train_time:142307ms step_avg:46.19ms +[2025-09-11 10:39:09] [Rank 0] step:3101/10000 train_time:142975ms step_avg:46.11ms +[2025-09-11 10:39:09] [Rank 0] step:3101/10000 train_time:142975ms step_avg:46.11ms +[2025-09-11 10:39:10] [Rank 0] step:3121/10000 train_time:143643ms step_avg:46.02ms +[2025-09-11 10:39:10] [Rank 0] step:3121/10000 train_time:143643ms step_avg:46.02ms +[2025-09-11 10:39:11] [Rank 0] step:3141/10000 train_time:144310ms step_avg:45.94ms +[2025-09-11 10:39:11] [Rank 0] step:3141/10000 train_time:144310ms step_avg:45.94ms +[2025-09-11 10:39:11] [Rank 0] step:3161/10000 train_time:144978ms step_avg:45.86ms +[2025-09-11 10:39:11] [Rank 0] step:3161/10000 train_time:144978ms step_avg:45.86ms +[2025-09-11 10:39:12] [Rank 0] step:3181/10000 train_time:145646ms step_avg:45.79ms +[2025-09-11 10:39:12] [Rank 0] step:3181/10000 train_time:145646ms step_avg:45.79ms +[2025-09-11 10:39:13] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:39:13] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:39:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:39:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:39:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.1653 total_sharp:3.4396e-04 L1_sharp:2.8580e-01 L2_sharp:3.3796e-01 L3_sharp:4.5944e-01 L4_sharp:6.0900e-01 L5_sharp:7.3270e-01 L6_sharp:8.7770e-01 L7_sharp:7.3647e-01 L8_sharp:6.9824e-01 L9_sharp:6.4562e-01 L10_sharp:6.6372e-01 L11_sharp:6.9649e-01 L12_sharp:1.7552e+00 total_fnorm:3.8250e+01 total_l1_linf:6.4768e+04 total_spectral:1.9250e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5076e-02 L2_l1linf:1.5076e-02 L3_l1linf:1.5137e-02 L4_l1linf:1.5503e-02 L5_l1linf:1.5564e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5381e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6724e-02 L1_spectral:7.8043e-04 L2_spectral:7.7823e-04 L3_spectral:7.8655e-04 L4_spectral:7.8701e-04 L5_spectral:7.8455e-04 L6_spectral:7.8918e-04 L7_spectral:7.9282e-04 L8_spectral:7.8498e-04 L9_spectral:7.9466e-04 L10_spectral:7.9109e-04 L11_spectral:7.9002e-04 L12_spectral:7.6862e-04 train_time:146295ms step_avg:45.72ms +[2025-09-11 10:39:23] [Rank 0] PRINT: step:3200/10000 val_loss:5.1653 total_sharp:3.4396e-04 L1_sharp:2.8580e-01 L2_sharp:3.3796e-01 L3_sharp:4.5944e-01 L4_sharp:6.0900e-01 L5_sharp:7.3270e-01 L6_sharp:8.7770e-01 L7_sharp:7.3647e-01 L8_sharp:6.9824e-01 L9_sharp:6.4562e-01 L10_sharp:6.6372e-01 L11_sharp:6.9649e-01 L12_sharp:1.7552e+00 total_fnorm:3.8250e+01 total_l1_linf:6.4768e+04 total_spectral:1.9250e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5076e-02 L2_l1linf:1.5076e-02 L3_l1linf:1.5137e-02 L4_l1linf:1.5503e-02 L5_l1linf:1.5564e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5381e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6724e-02 L1_spectral:7.8043e-04 L2_spectral:7.7823e-04 L3_spectral:7.8655e-04 L4_spectral:7.8701e-04 L5_spectral:7.8455e-04 L6_spectral:7.8918e-04 L7_spectral:7.9282e-04 L8_spectral:7.8498e-04 L9_spectral:7.9466e-04 L10_spectral:7.9109e-04 L11_spectral:7.9002e-04 L12_spectral:7.6862e-04 train_time:146295ms step_avg:45.72ms +[2025-09-11 10:39:24] [Rank 0] step:3201/10000 train_time:147546ms step_avg:46.09ms +[2025-09-11 10:39:24] [Rank 0] step:3201/10000 train_time:147546ms step_avg:46.09ms +[2025-09-11 10:39:25] [Rank 0] step:3221/10000 train_time:148244ms step_avg:46.02ms +[2025-09-11 10:39:25] [Rank 0] step:3221/10000 train_time:148244ms step_avg:46.02ms +[2025-09-11 10:39:25] [Rank 0] step:3241/10000 train_time:149173ms step_avg:46.03ms +[2025-09-11 10:39:25] [Rank 0] step:3241/10000 train_time:149173ms step_avg:46.03ms +[2025-09-11 10:39:26] [Rank 0] step:3261/10000 train_time:149843ms step_avg:45.95ms +[2025-09-11 10:39:26] [Rank 0] step:3261/10000 train_time:149843ms step_avg:45.95ms +[2025-09-11 10:39:27] [Rank 0] step:3281/10000 train_time:150511ms step_avg:45.87ms +[2025-09-11 10:39:27] [Rank 0] step:3281/10000 train_time:150511ms step_avg:45.87ms +[2025-09-11 10:39:27] [Rank 0] step:3301/10000 train_time:151180ms step_avg:45.80ms +[2025-09-11 10:39:27] [Rank 0] step:3301/10000 train_time:151180ms step_avg:45.80ms +[2025-09-11 10:39:28] [Rank 0] step:3321/10000 train_time:151847ms step_avg:45.72ms +[2025-09-11 10:39:28] [Rank 0] step:3321/10000 train_time:151847ms step_avg:45.72ms +[2025-09-11 10:39:29] [Rank 0] step:3341/10000 train_time:152515ms step_avg:45.65ms +[2025-09-11 10:39:29] [Rank 0] step:3341/10000 train_time:152515ms step_avg:45.65ms +[2025-09-11 10:39:29] [Rank 0] step:3361/10000 train_time:153185ms step_avg:45.58ms +[2025-09-11 10:39:29] [Rank 0] step:3361/10000 train_time:153185ms step_avg:45.58ms +[2025-09-11 10:39:30] [Rank 0] step:3381/10000 train_time:153854ms step_avg:45.51ms +[2025-09-11 10:39:30] [Rank 0] step:3381/10000 train_time:153854ms step_avg:45.51ms +[2025-09-11 10:39:31] [Rank 0] step:3401/10000 train_time:154522ms step_avg:45.43ms +[2025-09-11 10:39:31] [Rank 0] step:3401/10000 train_time:154522ms step_avg:45.43ms +[2025-09-11 10:39:31] [Rank 0] step:3421/10000 train_time:155190ms step_avg:45.36ms +[2025-09-11 10:39:31] [Rank 0] step:3421/10000 train_time:155190ms step_avg:45.36ms +[2025-09-11 10:39:32] [Rank 0] step:3441/10000 train_time:155858ms step_avg:45.29ms +[2025-09-11 10:39:32] [Rank 0] step:3441/10000 train_time:155858ms step_avg:45.29ms +[2025-09-11 10:39:33] [Rank 0] step:3461/10000 train_time:156526ms step_avg:45.23ms +[2025-09-11 10:39:33] [Rank 0] step:3461/10000 train_time:156526ms step_avg:45.23ms +[2025-09-11 10:39:33] [Rank 0] step:3481/10000 train_time:157195ms step_avg:45.16ms +[2025-09-11 10:39:33] [Rank 0] step:3481/10000 train_time:157195ms step_avg:45.16ms +[2025-09-11 10:39:34] [Rank 0] step:3501/10000 train_time:157863ms step_avg:45.09ms +[2025-09-11 10:39:34] [Rank 0] step:3501/10000 train_time:157863ms step_avg:45.09ms +[2025-09-11 10:39:35] [Rank 0] step:3521/10000 train_time:158531ms step_avg:45.02ms +[2025-09-11 10:39:35] [Rank 0] step:3521/10000 train_time:158531ms step_avg:45.02ms +[2025-09-11 10:39:35] [Rank 0] step:3541/10000 train_time:159199ms step_avg:44.96ms +[2025-09-11 10:39:35] [Rank 0] step:3541/10000 train_time:159199ms step_avg:44.96ms +[2025-09-11 10:39:36] [Rank 0] step:3561/10000 train_time:159867ms step_avg:44.89ms +[2025-09-11 10:39:36] [Rank 0] step:3561/10000 train_time:159867ms step_avg:44.89ms +[2025-09-11 10:39:37] [Rank 0] step:3581/10000 train_time:160539ms step_avg:44.83ms +[2025-09-11 10:39:37] [Rank 0] step:3581/10000 train_time:160539ms step_avg:44.83ms +[2025-09-11 10:39:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:39:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:39:47] [Rank 0] PRINT: step:3600/10000 val_loss:5.1139 total_sharp:4.3933e-04 L1_sharp:2.3487e-01 L2_sharp:3.1078e-01 L3_sharp:3.8610e-01 L4_sharp:5.4187e-01 L5_sharp:6.5314e-01 L6_sharp:7.5917e-01 L7_sharp:6.5244e-01 L8_sharp:7.2779e-01 L9_sharp:7.1898e-01 L10_sharp:7.3620e-01 L11_sharp:1.2390e+00 L12_sharp:3.2763e+00 total_fnorm:3.3500e+01 total_l1_linf:5.3248e+04 total_spectral:1.6750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8340e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.3855e-02 L2_l1linf:1.4099e-02 L3_l1linf:1.4404e-02 L4_l1linf:1.4771e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.4954e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.6113e-02 L1_spectral:7.8428e-04 L2_spectral:7.8803e-04 L3_spectral:7.8936e-04 L4_spectral:7.8962e-04 L5_spectral:7.9095e-04 L6_spectral:8.0510e-04 L7_spectral:8.0013e-04 L8_spectral:7.9494e-04 L9_spectral:8.0260e-04 L10_spectral:7.9815e-04 L11_spectral:7.9394e-04 L12_spectral:7.6868e-04 train_time:161189ms step_avg:44.77ms +[2025-09-11 10:39:47] [Rank 0] PRINT: step:3600/10000 val_loss:5.1139 total_sharp:4.3933e-04 L1_sharp:2.3487e-01 L2_sharp:3.1078e-01 L3_sharp:3.8610e-01 L4_sharp:5.4187e-01 L5_sharp:6.5314e-01 L6_sharp:7.5917e-01 L7_sharp:6.5244e-01 L8_sharp:7.2779e-01 L9_sharp:7.1898e-01 L10_sharp:7.3620e-01 L11_sharp:1.2390e+00 L12_sharp:3.2763e+00 total_fnorm:3.3500e+01 total_l1_linf:5.3248e+04 total_spectral:1.6750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8340e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.3855e-02 L2_l1linf:1.4099e-02 L3_l1linf:1.4404e-02 L4_l1linf:1.4771e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5015e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.4954e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.6113e-02 L1_spectral:7.8428e-04 L2_spectral:7.8803e-04 L3_spectral:7.8936e-04 L4_spectral:7.8962e-04 L5_spectral:7.9095e-04 L6_spectral:8.0510e-04 L7_spectral:8.0013e-04 L8_spectral:7.9494e-04 L9_spectral:8.0260e-04 L10_spectral:7.9815e-04 L11_spectral:7.9394e-04 L12_spectral:7.6868e-04 train_time:161189ms step_avg:44.77ms +[2025-09-11 10:39:49] [Rank 0] step:3601/10000 train_time:162457ms step_avg:45.11ms +[2025-09-11 10:39:49] [Rank 0] step:3601/10000 train_time:162457ms step_avg:45.11ms +[2025-09-11 10:39:49] [Rank 0] step:3621/10000 train_time:163127ms step_avg:45.05ms +[2025-09-11 10:39:49] [Rank 0] step:3621/10000 train_time:163127ms step_avg:45.05ms +[2025-09-11 10:39:50] [Rank 0] step:3641/10000 train_time:163794ms step_avg:44.99ms +[2025-09-11 10:39:50] [Rank 0] step:3641/10000 train_time:163794ms step_avg:44.99ms +[2025-09-11 10:39:51] [Rank 0] step:3661/10000 train_time:164462ms step_avg:44.92ms +[2025-09-11 10:39:51] [Rank 0] step:3661/10000 train_time:164462ms step_avg:44.92ms +[2025-09-11 10:39:51] [Rank 0] step:3681/10000 train_time:165131ms step_avg:44.86ms +[2025-09-11 10:39:51] [Rank 0] step:3681/10000 train_time:165131ms step_avg:44.86ms +[2025-09-11 10:39:52] [Rank 0] step:3701/10000 train_time:165798ms step_avg:44.80ms +[2025-09-11 10:39:52] [Rank 0] step:3701/10000 train_time:165798ms step_avg:44.80ms +[2025-09-11 10:39:53] [Rank 0] step:3721/10000 train_time:166475ms step_avg:44.74ms +[2025-09-11 10:39:53] [Rank 0] step:3721/10000 train_time:166475ms step_avg:44.74ms +[2025-09-11 10:39:53] [Rank 0] step:3741/10000 train_time:167152ms step_avg:44.68ms +[2025-09-11 10:39:53] [Rank 0] step:3741/10000 train_time:167152ms step_avg:44.68ms +[2025-09-11 10:39:54] [Rank 0] step:3761/10000 train_time:167830ms step_avg:44.62ms +[2025-09-11 10:39:54] [Rank 0] step:3761/10000 train_time:167830ms step_avg:44.62ms +[2025-09-11 10:39:55] [Rank 0] step:3781/10000 train_time:168508ms step_avg:44.57ms +[2025-09-11 10:39:55] [Rank 0] step:3781/10000 train_time:168508ms step_avg:44.57ms +[2025-09-11 10:39:55] [Rank 0] step:3801/10000 train_time:169186ms step_avg:44.51ms +[2025-09-11 10:39:55] [Rank 0] step:3801/10000 train_time:169186ms step_avg:44.51ms +[2025-09-11 10:39:56] [Rank 0] step:3821/10000 train_time:169864ms step_avg:44.46ms +[2025-09-11 10:39:56] [Rank 0] step:3821/10000 train_time:169864ms step_avg:44.46ms +[2025-09-11 10:39:57] [Rank 0] step:3841/10000 train_time:170541ms step_avg:44.40ms +[2025-09-11 10:39:57] [Rank 0] step:3841/10000 train_time:170541ms step_avg:44.40ms +[2025-09-11 10:39:57] [Rank 0] step:3861/10000 train_time:171218ms step_avg:44.35ms +[2025-09-11 10:39:57] [Rank 0] step:3861/10000 train_time:171218ms step_avg:44.35ms +[2025-09-11 10:39:58] [Rank 0] step:3881/10000 train_time:171895ms step_avg:44.29ms +[2025-09-11 10:39:58] [Rank 0] step:3881/10000 train_time:171895ms step_avg:44.29ms +[2025-09-11 10:39:59] [Rank 0] step:3901/10000 train_time:172572ms step_avg:44.24ms +[2025-09-11 10:39:59] [Rank 0] step:3901/10000 train_time:172572ms step_avg:44.24ms +[2025-09-11 10:39:59] [Rank 0] step:3921/10000 train_time:173250ms step_avg:44.19ms +[2025-09-11 10:39:59] [Rank 0] step:3921/10000 train_time:173250ms step_avg:44.19ms +[2025-09-11 10:40:00] [Rank 0] step:3941/10000 train_time:173927ms step_avg:44.13ms +[2025-09-11 10:40:00] [Rank 0] step:3941/10000 train_time:173927ms step_avg:44.13ms +[2025-09-11 10:40:01] [Rank 0] step:3961/10000 train_time:174606ms step_avg:44.08ms +[2025-09-11 10:40:01] [Rank 0] step:3961/10000 train_time:174606ms step_avg:44.08ms +[2025-09-11 10:40:02] [Rank 0] step:3981/10000 train_time:175340ms step_avg:44.04ms +[2025-09-11 10:40:02] [Rank 0] step:3981/10000 train_time:175340ms step_avg:44.04ms +[2025-09-11 10:40:02] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:40:02] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:40:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:15] [Rank 0] PRINT: step:4000/10000 val_loss:5.0494 total_sharp:3.7971e-04 L1_sharp:2.4072e-01 L2_sharp:3.1642e-01 L3_sharp:3.9769e-01 L4_sharp:5.3582e-01 L5_sharp:7.4666e-01 L6_sharp:9.8773e-01 L7_sharp:1.0458e+00 L8_sharp:1.0896e+00 L9_sharp:1.0655e+00 L10_sharp:1.2788e+00 L11_sharp:1.1938e+00 L12_sharp:1.6459e+00 total_fnorm:3.9750e+01 total_l1_linf:6.4000e+04 total_spectral:1.9875e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4343e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4893e-02 L4_l1linf:1.4709e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5442e-02 L8_l1linf:1.5198e-02 L9_l1linf:1.5442e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.5869e-02 L1_spectral:7.6460e-04 L2_spectral:7.7584e-04 L3_spectral:7.7474e-04 L4_spectral:7.8605e-04 L5_spectral:7.8121e-04 L6_spectral:7.9347e-04 L7_spectral:8.0286e-04 L8_spectral:7.9222e-04 L9_spectral:8.0200e-04 L10_spectral:7.9583e-04 L11_spectral:7.8827e-04 L12_spectral:7.7229e-04 train_time:176060ms step_avg:44.02ms +[2025-09-11 10:40:15] [Rank 0] PRINT: step:4000/10000 val_loss:5.0494 total_sharp:3.7971e-04 L1_sharp:2.4072e-01 L2_sharp:3.1642e-01 L3_sharp:3.9769e-01 L4_sharp:5.3582e-01 L5_sharp:7.4666e-01 L6_sharp:9.8773e-01 L7_sharp:1.0458e+00 L8_sharp:1.0896e+00 L9_sharp:1.0655e+00 L10_sharp:1.2788e+00 L11_sharp:1.1938e+00 L12_sharp:1.6459e+00 total_fnorm:3.9750e+01 total_l1_linf:6.4000e+04 total_spectral:1.9875e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4343e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4893e-02 L4_l1linf:1.4709e-02 L5_l1linf:1.4893e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5442e-02 L8_l1linf:1.5198e-02 L9_l1linf:1.5442e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.5869e-02 L1_spectral:7.6460e-04 L2_spectral:7.7584e-04 L3_spectral:7.7474e-04 L4_spectral:7.8605e-04 L5_spectral:7.8121e-04 L6_spectral:7.9347e-04 L7_spectral:8.0286e-04 L8_spectral:7.9222e-04 L9_spectral:8.0200e-04 L10_spectral:7.9583e-04 L11_spectral:7.8827e-04 L12_spectral:7.7229e-04 train_time:176060ms step_avg:44.02ms +[2025-09-11 10:40:16] [Rank 0] step:4001/10000 train_time:177332ms step_avg:44.32ms +[2025-09-11 10:40:16] [Rank 0] step:4001/10000 train_time:177332ms step_avg:44.32ms +[2025-09-11 10:40:17] [Rank 0] step:4021/10000 train_time:178026ms step_avg:44.27ms +[2025-09-11 10:40:17] [Rank 0] step:4021/10000 train_time:178026ms step_avg:44.27ms +[2025-09-11 10:40:17] [Rank 0] step:4041/10000 train_time:178705ms step_avg:44.22ms +[2025-09-11 10:40:17] [Rank 0] step:4041/10000 train_time:178705ms step_avg:44.22ms +[2025-09-11 10:40:18] [Rank 0] step:4061/10000 train_time:179381ms step_avg:44.17ms +[2025-09-11 10:40:18] [Rank 0] step:4061/10000 train_time:179381ms step_avg:44.17ms +[2025-09-11 10:40:19] [Rank 0] step:4081/10000 train_time:180060ms step_avg:44.12ms +[2025-09-11 10:40:19] [Rank 0] step:4081/10000 train_time:180060ms step_avg:44.12ms +[2025-09-11 10:40:20] [Rank 0] step:4101/10000 train_time:180739ms step_avg:44.07ms +[2025-09-11 10:40:20] [Rank 0] step:4101/10000 train_time:180739ms step_avg:44.07ms +[2025-09-11 10:40:20] [Rank 0] step:4121/10000 train_time:181417ms step_avg:44.02ms +[2025-09-11 10:40:20] [Rank 0] step:4121/10000 train_time:181417ms step_avg:44.02ms +[2025-09-11 10:40:21] [Rank 0] step:4141/10000 train_time:182095ms step_avg:43.97ms +[2025-09-11 10:40:21] [Rank 0] step:4141/10000 train_time:182095ms step_avg:43.97ms +[2025-09-11 10:40:22] [Rank 0] step:4161/10000 train_time:182773ms step_avg:43.93ms +[2025-09-11 10:40:22] [Rank 0] step:4161/10000 train_time:182773ms step_avg:43.93ms +[2025-09-11 10:40:22] [Rank 0] step:4181/10000 train_time:183451ms step_avg:43.88ms +[2025-09-11 10:40:22] [Rank 0] step:4181/10000 train_time:183451ms step_avg:43.88ms +[2025-09-11 10:40:23] [Rank 0] step:4201/10000 train_time:184129ms step_avg:43.83ms +[2025-09-11 10:40:23] [Rank 0] step:4201/10000 train_time:184129ms step_avg:43.83ms +[2025-09-11 10:40:24] [Rank 0] step:4221/10000 train_time:184806ms step_avg:43.78ms +[2025-09-11 10:40:24] [Rank 0] step:4221/10000 train_time:184806ms step_avg:43.78ms +[2025-09-11 10:40:24] [Rank 0] step:4241/10000 train_time:185482ms step_avg:43.74ms +[2025-09-11 10:40:24] [Rank 0] step:4241/10000 train_time:185482ms step_avg:43.74ms +[2025-09-11 10:40:25] [Rank 0] step:4261/10000 train_time:186161ms step_avg:43.69ms +[2025-09-11 10:40:25] [Rank 0] step:4261/10000 train_time:186161ms step_avg:43.69ms +[2025-09-11 10:40:26] [Rank 0] step:4281/10000 train_time:187131ms step_avg:43.71ms +[2025-09-11 10:40:26] [Rank 0] step:4281/10000 train_time:187131ms step_avg:43.71ms +[2025-09-11 10:40:27] [Rank 0] step:4301/10000 train_time:187810ms step_avg:43.67ms +[2025-09-11 10:40:27] [Rank 0] step:4301/10000 train_time:187810ms step_avg:43.67ms +[2025-09-11 10:40:27] [Rank 0] step:4321/10000 train_time:188487ms step_avg:43.62ms +[2025-09-11 10:40:27] [Rank 0] step:4321/10000 train_time:188487ms step_avg:43.62ms +[2025-09-11 10:40:28] [Rank 0] step:4341/10000 train_time:189464ms step_avg:43.65ms +[2025-09-11 10:40:28] [Rank 0] step:4341/10000 train_time:189464ms step_avg:43.65ms +[2025-09-11 10:40:29] [Rank 0] step:4361/10000 train_time:190142ms step_avg:43.60ms +[2025-09-11 10:40:29] [Rank 0] step:4361/10000 train_time:190142ms step_avg:43.60ms +[2025-09-11 10:40:30] [Rank 0] step:4381/10000 train_time:190820ms step_avg:43.56ms +[2025-09-11 10:40:30] [Rank 0] step:4381/10000 train_time:190820ms step_avg:43.56ms +[2025-09-11 10:40:30] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:40:30] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:40:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:40:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:40:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:40:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:40:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:40:40] [Rank 0] PRINT: step:4400/10000 val_loss:5.0066 total_sharp:4.5722e-04 L1_sharp:1.8296e-01 L2_sharp:2.4187e-01 L3_sharp:3.4395e-01 L4_sharp:4.7003e-01 L5_sharp:6.2426e-01 L6_sharp:8.1923e-01 L7_sharp:9.4782e-01 L8_sharp:9.8175e-01 L9_sharp:1.3790e+00 L10_sharp:2.9345e+00 L11_sharp:4.5673e+00 L12_sharp:7.9035e+00 total_fnorm:3.4750e+01 total_l1_linf:5.4016e+04 total_spectral:1.7375e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.3733e-02 L3_l1linf:1.4160e-02 L4_l1linf:1.4282e-02 L5_l1linf:1.4832e-02 L6_l1linf:1.4771e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.5137e-02 L10_l1linf:1.5442e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5259e-02 L1_spectral:7.7716e-04 L2_spectral:7.8245e-04 L3_spectral:7.8949e-04 L4_spectral:7.8564e-04 L5_spectral:7.8511e-04 L6_spectral:7.9022e-04 L7_spectral:8.0109e-04 L8_spectral:7.8957e-04 L9_spectral:7.9325e-04 L10_spectral:7.9082e-04 L11_spectral:7.8419e-04 L12_spectral:7.5810e-04 train_time:191478ms step_avg:43.52ms +[2025-09-11 10:40:40] [Rank 0] PRINT: step:4400/10000 val_loss:5.0066 total_sharp:4.5722e-04 L1_sharp:1.8296e-01 L2_sharp:2.4187e-01 L3_sharp:3.4395e-01 L4_sharp:4.7003e-01 L5_sharp:6.2426e-01 L6_sharp:8.1923e-01 L7_sharp:9.4782e-01 L8_sharp:9.8175e-01 L9_sharp:1.3790e+00 L10_sharp:2.9345e+00 L11_sharp:4.5673e+00 L12_sharp:7.9035e+00 total_fnorm:3.4750e+01 total_l1_linf:5.4016e+04 total_spectral:1.7375e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.3733e-02 L3_l1linf:1.4160e-02 L4_l1linf:1.4282e-02 L5_l1linf:1.4832e-02 L6_l1linf:1.4771e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.5137e-02 L10_l1linf:1.5442e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5259e-02 L1_spectral:7.7716e-04 L2_spectral:7.8245e-04 L3_spectral:7.8949e-04 L4_spectral:7.8564e-04 L5_spectral:7.8511e-04 L6_spectral:7.9022e-04 L7_spectral:8.0109e-04 L8_spectral:7.8957e-04 L9_spectral:7.9325e-04 L10_spectral:7.9082e-04 L11_spectral:7.8419e-04 L12_spectral:7.5810e-04 train_time:191478ms step_avg:43.52ms +[2025-09-11 10:40:42] [Rank 0] step:4401/10000 train_time:192738ms step_avg:43.79ms +[2025-09-11 10:40:42] [Rank 0] step:4401/10000 train_time:192738ms step_avg:43.79ms +[2025-09-11 10:40:42] [Rank 0] step:4421/10000 train_time:193453ms step_avg:43.76ms +[2025-09-11 10:40:42] [Rank 0] step:4421/10000 train_time:193453ms step_avg:43.76ms +[2025-09-11 10:40:43] [Rank 0] step:4441/10000 train_time:194132ms step_avg:43.71ms +[2025-09-11 10:40:43] [Rank 0] step:4441/10000 train_time:194132ms step_avg:43.71ms +[2025-09-11 10:40:44] [Rank 0] step:4461/10000 train_time:194812ms step_avg:43.67ms +[2025-09-11 10:40:44] [Rank 0] step:4461/10000 train_time:194812ms step_avg:43.67ms +[2025-09-11 10:40:44] [Rank 0] step:4481/10000 train_time:195492ms step_avg:43.63ms +[2025-09-11 10:40:44] [Rank 0] step:4481/10000 train_time:195492ms step_avg:43.63ms +[2025-09-11 10:40:45] [Rank 0] step:4501/10000 train_time:196172ms step_avg:43.58ms +[2025-09-11 10:40:45] [Rank 0] step:4501/10000 train_time:196172ms step_avg:43.58ms +[2025-09-11 10:40:46] [Rank 0] step:4521/10000 train_time:196852ms step_avg:43.54ms +[2025-09-11 10:40:46] [Rank 0] step:4521/10000 train_time:196852ms step_avg:43.54ms +[2025-09-11 10:40:46] [Rank 0] step:4541/10000 train_time:197533ms step_avg:43.50ms +[2025-09-11 10:40:46] [Rank 0] step:4541/10000 train_time:197533ms step_avg:43.50ms +[2025-09-11 10:40:47] [Rank 0] step:4561/10000 train_time:198213ms step_avg:43.46ms +[2025-09-11 10:40:47] [Rank 0] step:4561/10000 train_time:198213ms step_avg:43.46ms +[2025-09-11 10:40:48] [Rank 0] step:4581/10000 train_time:198893ms step_avg:43.42ms +[2025-09-11 10:40:48] [Rank 0] step:4581/10000 train_time:198893ms step_avg:43.42ms +[2025-09-11 10:40:48] [Rank 0] step:4601/10000 train_time:199573ms step_avg:43.38ms +[2025-09-11 10:40:48] [Rank 0] step:4601/10000 train_time:199573ms step_avg:43.38ms +[2025-09-11 10:40:49] [Rank 0] step:4621/10000 train_time:200253ms step_avg:43.34ms +[2025-09-11 10:40:49] [Rank 0] step:4621/10000 train_time:200253ms step_avg:43.34ms +[2025-09-11 10:40:50] [Rank 0] step:4641/10000 train_time:200932ms step_avg:43.29ms +[2025-09-11 10:40:50] [Rank 0] step:4641/10000 train_time:200932ms step_avg:43.29ms +[2025-09-11 10:40:50] [Rank 0] step:4661/10000 train_time:201611ms step_avg:43.25ms +[2025-09-11 10:40:50] [Rank 0] step:4661/10000 train_time:201611ms step_avg:43.25ms +[2025-09-11 10:40:51] [Rank 0] step:4681/10000 train_time:202291ms step_avg:43.22ms +[2025-09-11 10:40:51] [Rank 0] step:4681/10000 train_time:202291ms step_avg:43.22ms +[2025-09-11 10:40:52] [Rank 0] step:4701/10000 train_time:202972ms step_avg:43.18ms +[2025-09-11 10:40:52] [Rank 0] step:4701/10000 train_time:202972ms step_avg:43.18ms +[2025-09-11 10:40:53] [Rank 0] step:4721/10000 train_time:203652ms step_avg:43.14ms +[2025-09-11 10:40:53] [Rank 0] step:4721/10000 train_time:203652ms step_avg:43.14ms +[2025-09-11 10:40:53] [Rank 0] step:4741/10000 train_time:204332ms step_avg:43.10ms +[2025-09-11 10:40:53] [Rank 0] step:4741/10000 train_time:204332ms step_avg:43.10ms +[2025-09-11 10:40:54] [Rank 0] step:4761/10000 train_time:205013ms step_avg:43.06ms +[2025-09-11 10:40:54] [Rank 0] step:4761/10000 train_time:205013ms step_avg:43.06ms +[2025-09-11 10:40:55] [Rank 0] step:4781/10000 train_time:205694ms step_avg:43.02ms +[2025-09-11 10:40:55] [Rank 0] step:4781/10000 train_time:205694ms step_avg:43.02ms +[2025-09-11 10:40:55] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:40:55] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:40:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:40:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:41:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:41:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:41:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:05] [Rank 0] PRINT: step:4800/10000 val_loss:4.9640 total_sharp:3.9410e-04 L1_sharp:1.6879e-01 L2_sharp:2.4909e-01 L3_sharp:3.5308e-01 L4_sharp:4.9685e-01 L5_sharp:6.3734e-01 L6_sharp:7.9501e-01 L7_sharp:9.0958e-01 L8_sharp:8.9939e-01 L9_sharp:9.8675e-01 L10_sharp:1.4660e+00 L11_sharp:2.6970e+00 L12_sharp:7.5046e+00 total_fnorm:3.6000e+01 total_l1_linf:5.8880e+04 total_spectral:1.8125e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3000e-02 L2_l1linf:1.3611e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3794e-02 L5_l1linf:1.3916e-02 L6_l1linf:1.4343e-02 L7_l1linf:1.4038e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4099e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.5076e-02 L1_spectral:7.8703e-04 L2_spectral:7.8608e-04 L3_spectral:7.8972e-04 L4_spectral:7.9233e-04 L5_spectral:7.9198e-04 L6_spectral:7.9657e-04 L7_spectral:8.0300e-04 L8_spectral:7.9152e-04 L9_spectral:8.0084e-04 L10_spectral:7.9867e-04 L11_spectral:7.8695e-04 L12_spectral:7.6361e-04 train_time:206358ms step_avg:42.99ms +[2025-09-11 10:41:05] [Rank 0] PRINT: step:4800/10000 val_loss:4.9640 total_sharp:3.9410e-04 L1_sharp:1.6879e-01 L2_sharp:2.4909e-01 L3_sharp:3.5308e-01 L4_sharp:4.9685e-01 L5_sharp:6.3734e-01 L6_sharp:7.9501e-01 L7_sharp:9.0958e-01 L8_sharp:8.9939e-01 L9_sharp:9.8675e-01 L10_sharp:1.4660e+00 L11_sharp:2.6970e+00 L12_sharp:7.5046e+00 total_fnorm:3.6000e+01 total_l1_linf:5.8880e+04 total_spectral:1.8125e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3000e-02 L2_l1linf:1.3611e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3794e-02 L5_l1linf:1.3916e-02 L6_l1linf:1.4343e-02 L7_l1linf:1.4038e-02 L8_l1linf:1.4038e-02 L9_l1linf:1.4099e-02 L10_l1linf:1.4282e-02 L11_l1linf:1.5198e-02 L12_l1linf:1.5076e-02 L1_spectral:7.8703e-04 L2_spectral:7.8608e-04 L3_spectral:7.8972e-04 L4_spectral:7.9233e-04 L5_spectral:7.9198e-04 L6_spectral:7.9657e-04 L7_spectral:8.0300e-04 L8_spectral:7.9152e-04 L9_spectral:8.0084e-04 L10_spectral:7.9867e-04 L11_spectral:7.8695e-04 L12_spectral:7.6361e-04 train_time:206358ms step_avg:42.99ms +[2025-09-11 10:41:07] [Rank 0] step:4801/10000 train_time:207622ms step_avg:43.25ms +[2025-09-11 10:41:07] [Rank 0] step:4801/10000 train_time:207622ms step_avg:43.25ms +[2025-09-11 10:41:07] [Rank 0] step:4821/10000 train_time:208333ms step_avg:43.21ms +[2025-09-11 10:41:07] [Rank 0] step:4821/10000 train_time:208333ms step_avg:43.21ms +[2025-09-11 10:41:08] [Rank 0] step:4841/10000 train_time:209015ms step_avg:43.18ms +[2025-09-11 10:41:08] [Rank 0] step:4841/10000 train_time:209015ms step_avg:43.18ms +[2025-09-11 10:41:09] [Rank 0] step:4861/10000 train_time:209697ms step_avg:43.14ms +[2025-09-11 10:41:09] [Rank 0] step:4861/10000 train_time:209697ms step_avg:43.14ms +[2025-09-11 10:41:09] [Rank 0] step:4881/10000 train_time:210378ms step_avg:43.10ms +[2025-09-11 10:41:09] [Rank 0] step:4881/10000 train_time:210378ms step_avg:43.10ms +[2025-09-11 10:41:10] [Rank 0] step:4901/10000 train_time:211060ms step_avg:43.06ms +[2025-09-11 10:41:10] [Rank 0] step:4901/10000 train_time:211060ms step_avg:43.06ms +[2025-09-11 10:41:11] [Rank 0] step:4921/10000 train_time:211741ms step_avg:43.03ms +[2025-09-11 10:41:11] [Rank 0] step:4921/10000 train_time:211741ms step_avg:43.03ms +[2025-09-11 10:41:11] [Rank 0] step:4941/10000 train_time:212422ms step_avg:42.99ms +[2025-09-11 10:41:11] [Rank 0] step:4941/10000 train_time:212422ms step_avg:42.99ms +[2025-09-11 10:41:12] [Rank 0] step:4961/10000 train_time:213104ms step_avg:42.96ms +[2025-09-11 10:41:12] [Rank 0] step:4961/10000 train_time:213104ms step_avg:42.96ms +[2025-09-11 10:41:13] [Rank 0] step:4981/10000 train_time:213785ms step_avg:42.92ms +[2025-09-11 10:41:13] [Rank 0] step:4981/10000 train_time:213785ms step_avg:42.92ms +[2025-09-11 10:41:13] [Rank 0] step:5001/10000 train_time:214467ms step_avg:42.88ms +[2025-09-11 10:41:13] [Rank 0] step:5001/10000 train_time:214467ms step_avg:42.88ms +[2025-09-11 10:41:14] [Rank 0] step:5021/10000 train_time:215148ms step_avg:42.85ms +[2025-09-11 10:41:14] [Rank 0] step:5021/10000 train_time:215148ms step_avg:42.85ms +[2025-09-11 10:41:15] [Rank 0] step:5041/10000 train_time:215828ms step_avg:42.81ms +[2025-09-11 10:41:15] [Rank 0] step:5041/10000 train_time:215828ms step_avg:42.81ms +[2025-09-11 10:41:15] [Rank 0] step:5061/10000 train_time:216508ms step_avg:42.78ms +[2025-09-11 10:41:15] [Rank 0] step:5061/10000 train_time:216508ms step_avg:42.78ms +[2025-09-11 10:41:16] [Rank 0] step:5081/10000 train_time:217188ms step_avg:42.75ms +[2025-09-11 10:41:16] [Rank 0] step:5081/10000 train_time:217188ms step_avg:42.75ms +[2025-09-11 10:41:17] [Rank 0] step:5101/10000 train_time:217869ms step_avg:42.71ms +[2025-09-11 10:41:17] [Rank 0] step:5101/10000 train_time:217869ms step_avg:42.71ms +[2025-09-11 10:41:18] [Rank 0] step:5121/10000 train_time:218550ms step_avg:42.68ms +[2025-09-11 10:41:18] [Rank 0] step:5121/10000 train_time:218550ms step_avg:42.68ms +[2025-09-11 10:41:18] [Rank 0] step:5141/10000 train_time:219232ms step_avg:42.64ms +[2025-09-11 10:41:18] [Rank 0] step:5141/10000 train_time:219232ms step_avg:42.64ms +[2025-09-11 10:41:19] [Rank 0] step:5161/10000 train_time:219913ms step_avg:42.61ms +[2025-09-11 10:41:19] [Rank 0] step:5161/10000 train_time:219913ms step_avg:42.61ms +[2025-09-11 10:41:20] [Rank 0] step:5181/10000 train_time:220596ms step_avg:42.58ms +[2025-09-11 10:41:20] [Rank 0] step:5181/10000 train_time:220596ms step_avg:42.58ms +[2025-09-11 10:41:20] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:41:20] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.9201 total_sharp:5.0202e-04 L1_sharp:1.6439e-01 L2_sharp:2.3765e-01 L3_sharp:3.5098e-01 L4_sharp:5.4395e-01 L5_sharp:6.4827e-01 L6_sharp:8.0440e-01 L7_sharp:9.4379e-01 L8_sharp:9.3952e-01 L9_sharp:1.1443e+00 L10_sharp:1.4943e+00 L11_sharp:2.6232e+00 L12_sharp:7.3589e+00 total_fnorm:3.2500e+01 total_l1_linf:4.9152e+04 total_spectral:1.6250e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.2634e-02 L2_l1linf:1.2939e-02 L3_l1linf:1.3489e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3672e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.3550e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4038e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.4832e-02 L1_spectral:7.8437e-04 L2_spectral:7.9331e-04 L3_spectral:7.8879e-04 L4_spectral:7.9842e-04 L5_spectral:7.9597e-04 L6_spectral:8.0199e-04 L7_spectral:8.0446e-04 L8_spectral:8.0601e-04 L9_spectral:8.0372e-04 L10_spectral:7.9818e-04 L11_spectral:8.0161e-04 L12_spectral:7.8109e-04 train_time:221263ms step_avg:42.55ms +[2025-09-11 10:41:30] [Rank 0] PRINT: step:5200/10000 val_loss:4.9201 total_sharp:5.0202e-04 L1_sharp:1.6439e-01 L2_sharp:2.3765e-01 L3_sharp:3.5098e-01 L4_sharp:5.4395e-01 L5_sharp:6.4827e-01 L6_sharp:8.0440e-01 L7_sharp:9.4379e-01 L8_sharp:9.3952e-01 L9_sharp:1.1443e+00 L10_sharp:1.4943e+00 L11_sharp:2.6232e+00 L12_sharp:7.3589e+00 total_fnorm:3.2500e+01 total_l1_linf:4.9152e+04 total_spectral:1.6250e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.2634e-02 L2_l1linf:1.2939e-02 L3_l1linf:1.3489e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3672e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.3550e-02 L9_l1linf:1.3977e-02 L10_l1linf:1.4038e-02 L11_l1linf:1.4832e-02 L12_l1linf:1.4832e-02 L1_spectral:7.8437e-04 L2_spectral:7.9331e-04 L3_spectral:7.8879e-04 L4_spectral:7.9842e-04 L5_spectral:7.9597e-04 L6_spectral:8.0199e-04 L7_spectral:8.0446e-04 L8_spectral:8.0601e-04 L9_spectral:8.0372e-04 L10_spectral:7.9818e-04 L11_spectral:8.0161e-04 L12_spectral:7.8109e-04 train_time:221263ms step_avg:42.55ms +[2025-09-11 10:41:32] [Rank 0] step:5201/10000 train_time:222569ms step_avg:42.79ms +[2025-09-11 10:41:32] [Rank 0] step:5201/10000 train_time:222569ms step_avg:42.79ms +[2025-09-11 10:41:32] [Rank 0] step:5221/10000 train_time:223298ms step_avg:42.77ms +[2025-09-11 10:41:32] [Rank 0] step:5221/10000 train_time:223298ms step_avg:42.77ms +[2025-09-11 10:41:33] [Rank 0] step:5241/10000 train_time:223988ms step_avg:42.74ms +[2025-09-11 10:41:33] [Rank 0] step:5241/10000 train_time:223988ms step_avg:42.74ms +[2025-09-11 10:41:34] [Rank 0] step:5261/10000 train_time:224679ms step_avg:42.71ms +[2025-09-11 10:41:34] [Rank 0] step:5261/10000 train_time:224679ms step_avg:42.71ms +[2025-09-11 10:41:34] [Rank 0] step:5281/10000 train_time:225369ms step_avg:42.68ms +[2025-09-11 10:41:34] [Rank 0] step:5281/10000 train_time:225369ms step_avg:42.68ms +[2025-09-11 10:41:35] [Rank 0] step:5301/10000 train_time:226059ms step_avg:42.64ms +[2025-09-11 10:41:35] [Rank 0] step:5301/10000 train_time:226059ms step_avg:42.64ms +[2025-09-11 10:41:36] [Rank 0] step:5321/10000 train_time:226750ms step_avg:42.61ms +[2025-09-11 10:41:36] [Rank 0] step:5321/10000 train_time:226750ms step_avg:42.61ms +[2025-09-11 10:41:36] [Rank 0] step:5341/10000 train_time:227439ms step_avg:42.58ms +[2025-09-11 10:41:36] [Rank 0] step:5341/10000 train_time:227439ms step_avg:42.58ms +[2025-09-11 10:41:37] [Rank 0] step:5361/10000 train_time:228129ms step_avg:42.55ms +[2025-09-11 10:41:37] [Rank 0] step:5361/10000 train_time:228129ms step_avg:42.55ms +[2025-09-11 10:41:38] [Rank 0] step:5381/10000 train_time:228820ms step_avg:42.52ms +[2025-09-11 10:41:38] [Rank 0] step:5381/10000 train_time:228820ms step_avg:42.52ms +[2025-09-11 10:41:39] [Rank 0] step:5401/10000 train_time:229508ms step_avg:42.49ms +[2025-09-11 10:41:39] [Rank 0] step:5401/10000 train_time:229508ms step_avg:42.49ms +[2025-09-11 10:41:39] [Rank 0] step:5421/10000 train_time:230199ms step_avg:42.46ms +[2025-09-11 10:41:39] [Rank 0] step:5421/10000 train_time:230199ms step_avg:42.46ms +[2025-09-11 10:41:40] [Rank 0] step:5441/10000 train_time:230889ms step_avg:42.44ms +[2025-09-11 10:41:40] [Rank 0] step:5441/10000 train_time:230889ms step_avg:42.44ms +[2025-09-11 10:41:41] [Rank 0] step:5461/10000 train_time:231579ms step_avg:42.41ms +[2025-09-11 10:41:41] [Rank 0] step:5461/10000 train_time:231579ms step_avg:42.41ms +[2025-09-11 10:41:41] [Rank 0] step:5481/10000 train_time:232270ms step_avg:42.38ms +[2025-09-11 10:41:41] [Rank 0] step:5481/10000 train_time:232270ms step_avg:42.38ms +[2025-09-11 10:41:42] [Rank 0] step:5501/10000 train_time:232959ms step_avg:42.35ms +[2025-09-11 10:41:42] [Rank 0] step:5501/10000 train_time:232959ms step_avg:42.35ms +[2025-09-11 10:41:43] [Rank 0] step:5521/10000 train_time:233648ms step_avg:42.32ms +[2025-09-11 10:41:43] [Rank 0] step:5521/10000 train_time:233648ms step_avg:42.32ms +[2025-09-11 10:41:43] [Rank 0] step:5541/10000 train_time:234340ms step_avg:42.29ms +[2025-09-11 10:41:43] [Rank 0] step:5541/10000 train_time:234340ms step_avg:42.29ms +[2025-09-11 10:41:44] [Rank 0] step:5561/10000 train_time:235032ms step_avg:42.26ms +[2025-09-11 10:41:44] [Rank 0] step:5561/10000 train_time:235032ms step_avg:42.26ms +[2025-09-11 10:41:45] [Rank 0] step:5581/10000 train_time:235724ms step_avg:42.24ms +[2025-09-11 10:41:45] [Rank 0] step:5581/10000 train_time:235724ms step_avg:42.24ms +[2025-09-11 10:41:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:41:45] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:41:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:41:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:41:56] [Rank 0] PRINT: step:5600/10000 val_loss:4.8743 total_sharp:3.7100e-04 L1_sharp:1.3213e-01 L2_sharp:1.8573e-01 L3_sharp:3.0532e-01 L4_sharp:4.5448e-01 L5_sharp:5.9995e-01 L6_sharp:7.7081e-01 L7_sharp:8.9332e-01 L8_sharp:8.3569e-01 L9_sharp:9.5999e-01 L10_sharp:1.1641e+00 L11_sharp:1.2110e+00 L12_sharp:4.1587e+00 total_fnorm:3.2750e+01 total_l1_linf:5.0688e+04 total_spectral:1.6375e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2634e-02 L3_l1linf:1.3062e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3428e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.3916e-02 L10_l1linf:1.3794e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4709e-02 L1_spectral:7.8478e-04 L2_spectral:7.9418e-04 L3_spectral:7.9316e-04 L4_spectral:7.9788e-04 L5_spectral:7.9096e-04 L6_spectral:7.9578e-04 L7_spectral:8.0010e-04 L8_spectral:7.9723e-04 L9_spectral:8.0332e-04 L10_spectral:8.0361e-04 L11_spectral:7.9409e-04 L12_spectral:7.7300e-04 train_time:236395ms step_avg:42.21ms +[2025-09-11 10:41:56] [Rank 0] PRINT: step:5600/10000 val_loss:4.8743 total_sharp:3.7100e-04 L1_sharp:1.3213e-01 L2_sharp:1.8573e-01 L3_sharp:3.0532e-01 L4_sharp:4.5448e-01 L5_sharp:5.9995e-01 L6_sharp:7.7081e-01 L7_sharp:8.9332e-01 L8_sharp:8.3569e-01 L9_sharp:9.5999e-01 L10_sharp:1.1641e+00 L11_sharp:1.2110e+00 L12_sharp:4.1587e+00 total_fnorm:3.2750e+01 total_l1_linf:5.0688e+04 total_spectral:1.6375e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2329e-02 L2_l1linf:1.2634e-02 L3_l1linf:1.3062e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3428e-02 L8_l1linf:1.3855e-02 L9_l1linf:1.3916e-02 L10_l1linf:1.3794e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4709e-02 L1_spectral:7.8478e-04 L2_spectral:7.9418e-04 L3_spectral:7.9316e-04 L4_spectral:7.9788e-04 L5_spectral:7.9096e-04 L6_spectral:7.9578e-04 L7_spectral:8.0010e-04 L8_spectral:7.9723e-04 L9_spectral:8.0332e-04 L10_spectral:8.0361e-04 L11_spectral:7.9409e-04 L12_spectral:7.7300e-04 train_time:236395ms step_avg:42.21ms +[2025-09-11 10:41:57] [Rank 0] step:5601/10000 train_time:237690ms step_avg:42.44ms +[2025-09-11 10:41:57] [Rank 0] step:5601/10000 train_time:237690ms step_avg:42.44ms +[2025-09-11 10:41:58] [Rank 0] step:5621/10000 train_time:238414ms step_avg:42.41ms +[2025-09-11 10:41:58] [Rank 0] step:5621/10000 train_time:238414ms step_avg:42.41ms +[2025-09-11 10:41:58] [Rank 0] step:5641/10000 train_time:239103ms step_avg:42.39ms +[2025-09-11 10:41:58] [Rank 0] step:5641/10000 train_time:239103ms step_avg:42.39ms +[2025-09-11 10:41:59] [Rank 0] step:5661/10000 train_time:239792ms step_avg:42.36ms +[2025-09-11 10:41:59] [Rank 0] step:5661/10000 train_time:239792ms step_avg:42.36ms +[2025-09-11 10:42:00] [Rank 0] step:5681/10000 train_time:240483ms step_avg:42.33ms +[2025-09-11 10:42:00] [Rank 0] step:5681/10000 train_time:240483ms step_avg:42.33ms +[2025-09-11 10:42:00] [Rank 0] step:5701/10000 train_time:241175ms step_avg:42.30ms +[2025-09-11 10:42:00] [Rank 0] step:5701/10000 train_time:241175ms step_avg:42.30ms +[2025-09-11 10:42:01] [Rank 0] step:5721/10000 train_time:241864ms step_avg:42.28ms +[2025-09-11 10:42:01] [Rank 0] step:5721/10000 train_time:241864ms step_avg:42.28ms +[2025-09-11 10:42:02] [Rank 0] step:5741/10000 train_time:242556ms step_avg:42.25ms +[2025-09-11 10:42:02] [Rank 0] step:5741/10000 train_time:242556ms step_avg:42.25ms +[2025-09-11 10:42:02] [Rank 0] step:5761/10000 train_time:243247ms step_avg:42.22ms +[2025-09-11 10:42:02] [Rank 0] step:5761/10000 train_time:243247ms step_avg:42.22ms +[2025-09-11 10:42:03] [Rank 0] step:5781/10000 train_time:243938ms step_avg:42.20ms +[2025-09-11 10:42:03] [Rank 0] step:5781/10000 train_time:243938ms step_avg:42.20ms +[2025-09-11 10:42:04] [Rank 0] step:5801/10000 train_time:244630ms step_avg:42.17ms +[2025-09-11 10:42:04] [Rank 0] step:5801/10000 train_time:244630ms step_avg:42.17ms +[2025-09-11 10:42:04] [Rank 0] step:5821/10000 train_time:245330ms step_avg:42.15ms +[2025-09-11 10:42:04] [Rank 0] step:5821/10000 train_time:245330ms step_avg:42.15ms +[2025-09-11 10:42:05] [Rank 0] step:5841/10000 train_time:246021ms step_avg:42.12ms +[2025-09-11 10:42:05] [Rank 0] step:5841/10000 train_time:246021ms step_avg:42.12ms +[2025-09-11 10:42:06] [Rank 0] step:5861/10000 train_time:246711ms step_avg:42.09ms +[2025-09-11 10:42:06] [Rank 0] step:5861/10000 train_time:246711ms step_avg:42.09ms +[2025-09-11 10:42:07] [Rank 0] step:5881/10000 train_time:247401ms step_avg:42.07ms +[2025-09-11 10:42:07] [Rank 0] step:5881/10000 train_time:247401ms step_avg:42.07ms +[2025-09-11 10:42:07] [Rank 0] step:5901/10000 train_time:248090ms step_avg:42.04ms +[2025-09-11 10:42:07] [Rank 0] step:5901/10000 train_time:248090ms step_avg:42.04ms +[2025-09-11 10:42:08] [Rank 0] step:5921/10000 train_time:248783ms step_avg:42.02ms +[2025-09-11 10:42:08] [Rank 0] step:5921/10000 train_time:248783ms step_avg:42.02ms +[2025-09-11 10:42:09] [Rank 0] step:5941/10000 train_time:249479ms step_avg:41.99ms +[2025-09-11 10:42:09] [Rank 0] step:5941/10000 train_time:249479ms step_avg:41.99ms +[2025-09-11 10:42:09] [Rank 0] step:5961/10000 train_time:250169ms step_avg:41.97ms +[2025-09-11 10:42:09] [Rank 0] step:5961/10000 train_time:250169ms step_avg:41.97ms +[2025-09-11 10:42:10] [Rank 0] step:5981/10000 train_time:250860ms step_avg:41.94ms +[2025-09-11 10:42:10] [Rank 0] step:5981/10000 train_time:250860ms step_avg:41.94ms +[2025-09-11 10:42:11] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:42:11] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:21] [Rank 0] PRINT: step:6000/10000 val_loss:4.8235 total_sharp:3.2252e-04 L1_sharp:1.3778e-01 L2_sharp:2.2352e-01 L3_sharp:3.3868e-01 L4_sharp:4.7940e-01 L5_sharp:6.3827e-01 L6_sharp:8.6052e-01 L7_sharp:9.2140e-01 L8_sharp:8.1491e-01 L9_sharp:8.6099e-01 L10_sharp:1.1942e+00 L11_sharp:9.7023e-01 L12_sharp:6.6028e+00 total_fnorm:3.3500e+01 total_l1_linf:5.1456e+04 total_spectral:1.6750e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.2024e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.3123e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3062e-02 L7_l1linf:1.3428e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3367e-02 L12_l1linf:1.4648e-02 L1_spectral:7.8522e-04 L2_spectral:7.9052e-04 L3_spectral:8.0161e-04 L4_spectral:7.9359e-04 L5_spectral:7.9169e-04 L6_spectral:7.9580e-04 L7_spectral:8.0296e-04 L8_spectral:8.0512e-04 L9_spectral:8.0650e-04 L10_spectral:8.0718e-04 L11_spectral:7.9822e-04 L12_spectral:7.9106e-04 train_time:251535ms step_avg:41.92ms +[2025-09-11 10:42:21] [Rank 0] PRINT: step:6000/10000 val_loss:4.8235 total_sharp:3.2252e-04 L1_sharp:1.3778e-01 L2_sharp:2.2352e-01 L3_sharp:3.3868e-01 L4_sharp:4.7940e-01 L5_sharp:6.3827e-01 L6_sharp:8.6052e-01 L7_sharp:9.2140e-01 L8_sharp:8.1491e-01 L9_sharp:8.6099e-01 L10_sharp:1.1942e+00 L11_sharp:9.7023e-01 L12_sharp:6.6028e+00 total_fnorm:3.3500e+01 total_l1_linf:5.1456e+04 total_spectral:1.6750e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.2024e-02 L2_l1linf:1.2451e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.3123e-02 L5_l1linf:1.3245e-02 L6_l1linf:1.3062e-02 L7_l1linf:1.3428e-02 L8_l1linf:1.3184e-02 L9_l1linf:1.3123e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3367e-02 L12_l1linf:1.4648e-02 L1_spectral:7.8522e-04 L2_spectral:7.9052e-04 L3_spectral:8.0161e-04 L4_spectral:7.9359e-04 L5_spectral:7.9169e-04 L6_spectral:7.9580e-04 L7_spectral:8.0296e-04 L8_spectral:8.0512e-04 L9_spectral:8.0650e-04 L10_spectral:8.0718e-04 L11_spectral:7.9822e-04 L12_spectral:7.9106e-04 train_time:251535ms step_avg:41.92ms +[2025-09-11 10:42:22] [Rank 0] step:6001/10000 train_time:252835ms step_avg:42.13ms +[2025-09-11 10:42:22] [Rank 0] step:6001/10000 train_time:252835ms step_avg:42.13ms +[2025-09-11 10:42:23] [Rank 0] step:6021/10000 train_time:253552ms step_avg:42.11ms +[2025-09-11 10:42:23] [Rank 0] step:6021/10000 train_time:253552ms step_avg:42.11ms +[2025-09-11 10:42:23] [Rank 0] step:6041/10000 train_time:254246ms step_avg:42.09ms +[2025-09-11 10:42:23] [Rank 0] step:6041/10000 train_time:254246ms step_avg:42.09ms +[2025-09-11 10:42:24] [Rank 0] step:6061/10000 train_time:254937ms step_avg:42.06ms +[2025-09-11 10:42:24] [Rank 0] step:6061/10000 train_time:254937ms step_avg:42.06ms +[2025-09-11 10:42:25] [Rank 0] step:6081/10000 train_time:255630ms step_avg:42.04ms +[2025-09-11 10:42:25] [Rank 0] step:6081/10000 train_time:255630ms step_avg:42.04ms +[2025-09-11 10:42:26] [Rank 0] step:6101/10000 train_time:256322ms step_avg:42.01ms +[2025-09-11 10:42:26] [Rank 0] step:6101/10000 train_time:256322ms step_avg:42.01ms +[2025-09-11 10:42:26] [Rank 0] step:6121/10000 train_time:257014ms step_avg:41.99ms +[2025-09-11 10:42:26] [Rank 0] step:6121/10000 train_time:257014ms step_avg:41.99ms +[2025-09-11 10:42:27] [Rank 0] step:6141/10000 train_time:257707ms step_avg:41.97ms +[2025-09-11 10:42:27] [Rank 0] step:6141/10000 train_time:257707ms step_avg:41.97ms +[2025-09-11 10:42:28] [Rank 0] step:6161/10000 train_time:258399ms step_avg:41.94ms +[2025-09-11 10:42:28] [Rank 0] step:6161/10000 train_time:258399ms step_avg:41.94ms +[2025-09-11 10:42:28] [Rank 0] step:6181/10000 train_time:259089ms step_avg:41.92ms +[2025-09-11 10:42:28] [Rank 0] step:6181/10000 train_time:259089ms step_avg:41.92ms +[2025-09-11 10:42:29] [Rank 0] step:6201/10000 train_time:259782ms step_avg:41.89ms +[2025-09-11 10:42:29] [Rank 0] step:6201/10000 train_time:259782ms step_avg:41.89ms +[2025-09-11 10:42:30] [Rank 0] step:6221/10000 train_time:260483ms step_avg:41.87ms +[2025-09-11 10:42:30] [Rank 0] step:6221/10000 train_time:260483ms step_avg:41.87ms +[2025-09-11 10:42:30] [Rank 0] step:6241/10000 train_time:261175ms step_avg:41.85ms +[2025-09-11 10:42:30] [Rank 0] step:6241/10000 train_time:261175ms step_avg:41.85ms +[2025-09-11 10:42:31] [Rank 0] step:6261/10000 train_time:261865ms step_avg:41.82ms +[2025-09-11 10:42:31] [Rank 0] step:6261/10000 train_time:261865ms step_avg:41.82ms +[2025-09-11 10:42:32] [Rank 0] step:6281/10000 train_time:262873ms step_avg:41.85ms +[2025-09-11 10:42:32] [Rank 0] step:6281/10000 train_time:262873ms step_avg:41.85ms +[2025-09-11 10:42:33] [Rank 0] step:6301/10000 train_time:263564ms step_avg:41.83ms +[2025-09-11 10:42:33] [Rank 0] step:6301/10000 train_time:263564ms step_avg:41.83ms +[2025-09-11 10:42:34] [Rank 0] step:6321/10000 train_time:264259ms step_avg:41.81ms +[2025-09-11 10:42:34] [Rank 0] step:6321/10000 train_time:264259ms step_avg:41.81ms +[2025-09-11 10:42:34] [Rank 0] step:6341/10000 train_time:265107ms step_avg:41.81ms +[2025-09-11 10:42:34] [Rank 0] step:6341/10000 train_time:265107ms step_avg:41.81ms +[2025-09-11 10:42:35] [Rank 0] step:6361/10000 train_time:265904ms step_avg:41.80ms +[2025-09-11 10:42:35] [Rank 0] step:6361/10000 train_time:265904ms step_avg:41.80ms +[2025-09-11 10:42:36] [Rank 0] step:6381/10000 train_time:266596ms step_avg:41.78ms +[2025-09-11 10:42:36] [Rank 0] step:6381/10000 train_time:266596ms step_avg:41.78ms +[2025-09-11 10:42:36] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:42:36] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:42:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:42:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:42:47] [Rank 0] PRINT: step:6400/10000 val_loss:4.7855 total_sharp:3.5513e-04 L1_sharp:1.4988e-01 L2_sharp:2.3547e-01 L3_sharp:3.4703e-01 L4_sharp:5.2979e-01 L5_sharp:6.4175e-01 L6_sharp:8.9771e-01 L7_sharp:9.3059e-01 L8_sharp:8.2403e-01 L9_sharp:9.3460e-01 L10_sharp:1.0281e+00 L11_sharp:1.2867e+00 L12_sharp:1.9927e+00 total_fnorm:2.9250e+01 total_l1_linf:4.3264e+04 total_spectral:1.4625e+01 L1_fnorm:4.0527e-02 L2_fnorm:4.1260e-02 L3_fnorm:4.1504e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0315e-02 L3_l1linf:1.0620e-02 L4_l1linf:1.1169e-02 L5_l1linf:1.1169e-02 L6_l1linf:1.1536e-02 L7_l1linf:1.1658e-02 L8_l1linf:1.1353e-02 L9_l1linf:1.1169e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1536e-02 L12_l1linf:1.2207e-02 L1_spectral:7.0343e-04 L2_spectral:7.1236e-04 L3_spectral:7.1191e-04 L4_spectral:7.1348e-04 L5_spectral:7.1373e-04 L6_spectral:7.0862e-04 L7_spectral:7.1312e-04 L8_spectral:7.1163e-04 L9_spectral:7.1900e-04 L10_spectral:7.1266e-04 L11_spectral:7.1712e-04 L12_spectral:6.9831e-04 train_time:267267ms step_avg:41.76ms +[2025-09-11 10:42:47] [Rank 0] PRINT: step:6400/10000 val_loss:4.7855 total_sharp:3.5513e-04 L1_sharp:1.4988e-01 L2_sharp:2.3547e-01 L3_sharp:3.4703e-01 L4_sharp:5.2979e-01 L5_sharp:6.4175e-01 L6_sharp:8.9771e-01 L7_sharp:9.3059e-01 L8_sharp:8.2403e-01 L9_sharp:9.3460e-01 L10_sharp:1.0281e+00 L11_sharp:1.2867e+00 L12_sharp:1.9927e+00 total_fnorm:2.9250e+01 total_l1_linf:4.3264e+04 total_spectral:1.4625e+01 L1_fnorm:4.0527e-02 L2_fnorm:4.1260e-02 L3_fnorm:4.1504e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0315e-02 L3_l1linf:1.0620e-02 L4_l1linf:1.1169e-02 L5_l1linf:1.1169e-02 L6_l1linf:1.1536e-02 L7_l1linf:1.1658e-02 L8_l1linf:1.1353e-02 L9_l1linf:1.1169e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1536e-02 L12_l1linf:1.2207e-02 L1_spectral:7.0343e-04 L2_spectral:7.1236e-04 L3_spectral:7.1191e-04 L4_spectral:7.1348e-04 L5_spectral:7.1373e-04 L6_spectral:7.0862e-04 L7_spectral:7.1312e-04 L8_spectral:7.1163e-04 L9_spectral:7.1900e-04 L10_spectral:7.1266e-04 L11_spectral:7.1712e-04 L12_spectral:6.9831e-04 train_time:267267ms step_avg:41.76ms +[2025-09-11 10:42:48] [Rank 0] step:6401/10000 train_time:268581ms step_avg:41.96ms +[2025-09-11 10:42:48] [Rank 0] step:6401/10000 train_time:268581ms step_avg:41.96ms +[2025-09-11 10:42:49] [Rank 0] step:6421/10000 train_time:269287ms step_avg:41.94ms +[2025-09-11 10:42:49] [Rank 0] step:6421/10000 train_time:269287ms step_avg:41.94ms +[2025-09-11 10:42:49] [Rank 0] step:6441/10000 train_time:269979ms step_avg:41.92ms +[2025-09-11 10:42:49] [Rank 0] step:6441/10000 train_time:269979ms step_avg:41.92ms +[2025-09-11 10:42:50] [Rank 0] step:6461/10000 train_time:270672ms step_avg:41.89ms +[2025-09-11 10:42:50] [Rank 0] step:6461/10000 train_time:270672ms step_avg:41.89ms +[2025-09-11 10:42:51] [Rank 0] step:6481/10000 train_time:271366ms step_avg:41.87ms +[2025-09-11 10:42:51] [Rank 0] step:6481/10000 train_time:271366ms step_avg:41.87ms +[2025-09-11 10:42:52] [Rank 0] step:6501/10000 train_time:272061ms step_avg:41.85ms +[2025-09-11 10:42:52] [Rank 0] step:6501/10000 train_time:272061ms step_avg:41.85ms +[2025-09-11 10:42:52] [Rank 0] step:6521/10000 train_time:272754ms step_avg:41.83ms +[2025-09-11 10:42:52] [Rank 0] step:6521/10000 train_time:272754ms step_avg:41.83ms +[2025-09-11 10:42:53] [Rank 0] step:6541/10000 train_time:273447ms step_avg:41.81ms +[2025-09-11 10:42:53] [Rank 0] step:6541/10000 train_time:273447ms step_avg:41.81ms +[2025-09-11 10:42:54] [Rank 0] step:6561/10000 train_time:274139ms step_avg:41.78ms +[2025-09-11 10:42:54] [Rank 0] step:6561/10000 train_time:274139ms step_avg:41.78ms +[2025-09-11 10:42:54] [Rank 0] step:6581/10000 train_time:274833ms step_avg:41.76ms +[2025-09-11 10:42:54] [Rank 0] step:6581/10000 train_time:274833ms step_avg:41.76ms +[2025-09-11 10:42:55] [Rank 0] step:6601/10000 train_time:275525ms step_avg:41.74ms +[2025-09-11 10:42:55] [Rank 0] step:6601/10000 train_time:275525ms step_avg:41.74ms +[2025-09-11 10:42:56] [Rank 0] step:6621/10000 train_time:276216ms step_avg:41.72ms +[2025-09-11 10:42:56] [Rank 0] step:6621/10000 train_time:276216ms step_avg:41.72ms +[2025-09-11 10:42:56] [Rank 0] step:6641/10000 train_time:276909ms step_avg:41.70ms +[2025-09-11 10:42:56] [Rank 0] step:6641/10000 train_time:276909ms step_avg:41.70ms +[2025-09-11 10:42:57] [Rank 0] step:6661/10000 train_time:277603ms step_avg:41.68ms +[2025-09-11 10:42:57] [Rank 0] step:6661/10000 train_time:277603ms step_avg:41.68ms +[2025-09-11 10:42:58] [Rank 0] step:6681/10000 train_time:278302ms step_avg:41.66ms +[2025-09-11 10:42:58] [Rank 0] step:6681/10000 train_time:278302ms step_avg:41.66ms +[2025-09-11 10:42:58] [Rank 0] step:6701/10000 train_time:279001ms step_avg:41.64ms +[2025-09-11 10:42:58] [Rank 0] step:6701/10000 train_time:279001ms step_avg:41.64ms +[2025-09-11 10:42:59] [Rank 0] step:6721/10000 train_time:279702ms step_avg:41.62ms +[2025-09-11 10:42:59] [Rank 0] step:6721/10000 train_time:279702ms step_avg:41.62ms +[2025-09-11 10:43:00] [Rank 0] step:6741/10000 train_time:280402ms step_avg:41.60ms +[2025-09-11 10:43:00] [Rank 0] step:6741/10000 train_time:280402ms step_avg:41.60ms +[2025-09-11 10:43:01] [Rank 0] step:6761/10000 train_time:281100ms step_avg:41.58ms +[2025-09-11 10:43:01] [Rank 0] step:6761/10000 train_time:281100ms step_avg:41.58ms +[2025-09-11 10:43:01] [Rank 0] step:6781/10000 train_time:281801ms step_avg:41.56ms +[2025-09-11 10:43:01] [Rank 0] step:6781/10000 train_time:281801ms step_avg:41.56ms +[2025-09-11 10:43:02] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:43:02] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:43:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:43:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:15] [Rank 0] PRINT: step:6800/10000 val_loss:4.7556 total_sharp:2.5100e-04 L1_sharp:1.1270e-01 L2_sharp:1.7332e-01 L3_sharp:2.9068e-01 L4_sharp:3.9792e-01 L5_sharp:5.1136e-01 L6_sharp:7.5025e-01 L7_sharp:9.1784e-01 L8_sharp:7.7395e-01 L9_sharp:8.1544e-01 L10_sharp:1.0424e+00 L11_sharp:1.0742e+00 L12_sharp:5.0701e+00 total_fnorm:2.7750e+01 total_l1_linf:4.0448e+04 total_spectral:1.3875e+01 L1_fnorm:3.4180e-02 L2_fnorm:3.4668e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.5156e-02 L5_fnorm:3.5156e-02 L6_fnorm:3.4912e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.4180e-02 L9_fnorm:3.4424e-02 L10_fnorm:3.4424e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.4180e-02 L1_l1linf:7.2632e-03 L2_l1linf:7.8735e-03 L3_l1linf:8.3008e-03 L4_l1linf:8.6060e-03 L5_l1linf:8.7280e-03 L6_l1linf:8.9722e-03 L7_l1linf:8.9111e-03 L8_l1linf:8.7891e-03 L9_l1linf:9.0332e-03 L10_l1linf:8.7280e-03 L11_l1linf:9.0942e-03 L12_l1linf:9.7656e-03 L1_spectral:6.2825e-04 L2_spectral:6.2988e-04 L3_spectral:6.2985e-04 L4_spectral:6.2929e-04 L5_spectral:6.3348e-04 L6_spectral:6.2905e-04 L7_spectral:6.2929e-04 L8_spectral:6.2025e-04 L9_spectral:6.2796e-04 L10_spectral:6.2684e-04 L11_spectral:6.2218e-04 L12_spectral:6.0554e-04 train_time:282480ms step_avg:41.54ms +[2025-09-11 10:43:15] [Rank 0] PRINT: step:6800/10000 val_loss:4.7556 total_sharp:2.5100e-04 L1_sharp:1.1270e-01 L2_sharp:1.7332e-01 L3_sharp:2.9068e-01 L4_sharp:3.9792e-01 L5_sharp:5.1136e-01 L6_sharp:7.5025e-01 L7_sharp:9.1784e-01 L8_sharp:7.7395e-01 L9_sharp:8.1544e-01 L10_sharp:1.0424e+00 L11_sharp:1.0742e+00 L12_sharp:5.0701e+00 total_fnorm:2.7750e+01 total_l1_linf:4.0448e+04 total_spectral:1.3875e+01 L1_fnorm:3.4180e-02 L2_fnorm:3.4668e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.5156e-02 L5_fnorm:3.5156e-02 L6_fnorm:3.4912e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.4180e-02 L9_fnorm:3.4424e-02 L10_fnorm:3.4424e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.4180e-02 L1_l1linf:7.2632e-03 L2_l1linf:7.8735e-03 L3_l1linf:8.3008e-03 L4_l1linf:8.6060e-03 L5_l1linf:8.7280e-03 L6_l1linf:8.9722e-03 L7_l1linf:8.9111e-03 L8_l1linf:8.7891e-03 L9_l1linf:9.0332e-03 L10_l1linf:8.7280e-03 L11_l1linf:9.0942e-03 L12_l1linf:9.7656e-03 L1_spectral:6.2825e-04 L2_spectral:6.2988e-04 L3_spectral:6.2985e-04 L4_spectral:6.2929e-04 L5_spectral:6.3348e-04 L6_spectral:6.2905e-04 L7_spectral:6.2929e-04 L8_spectral:6.2025e-04 L9_spectral:6.2796e-04 L10_spectral:6.2684e-04 L11_spectral:6.2218e-04 L12_spectral:6.0554e-04 train_time:282480ms step_avg:41.54ms +[2025-09-11 10:43:16] [Rank 0] step:6801/10000 train_time:283813ms step_avg:41.73ms +[2025-09-11 10:43:16] [Rank 0] step:6801/10000 train_time:283813ms step_avg:41.73ms +[2025-09-11 10:43:17] [Rank 0] step:6821/10000 train_time:284528ms step_avg:41.71ms +[2025-09-11 10:43:17] [Rank 0] step:6821/10000 train_time:284528ms step_avg:41.71ms +[2025-09-11 10:43:17] [Rank 0] step:6841/10000 train_time:285230ms step_avg:41.69ms +[2025-09-11 10:43:17] [Rank 0] step:6841/10000 train_time:285230ms step_avg:41.69ms +[2025-09-11 10:43:18] [Rank 0] step:6861/10000 train_time:285930ms step_avg:41.67ms +[2025-09-11 10:43:18] [Rank 0] step:6861/10000 train_time:285930ms step_avg:41.67ms +[2025-09-11 10:43:19] [Rank 0] step:6881/10000 train_time:286632ms step_avg:41.66ms +[2025-09-11 10:43:19] [Rank 0] step:6881/10000 train_time:286632ms step_avg:41.66ms +[2025-09-11 10:43:19] [Rank 0] step:6901/10000 train_time:287331ms step_avg:41.64ms +[2025-09-11 10:43:19] [Rank 0] step:6901/10000 train_time:287331ms step_avg:41.64ms +[2025-09-11 10:43:20] [Rank 0] step:6921/10000 train_time:288030ms step_avg:41.62ms +[2025-09-11 10:43:20] [Rank 0] step:6921/10000 train_time:288030ms step_avg:41.62ms +[2025-09-11 10:43:21] [Rank 0] step:6941/10000 train_time:288730ms step_avg:41.60ms +[2025-09-11 10:43:21] [Rank 0] step:6941/10000 train_time:288730ms step_avg:41.60ms +[2025-09-11 10:43:22] [Rank 0] step:6961/10000 train_time:289431ms step_avg:41.58ms +[2025-09-11 10:43:22] [Rank 0] step:6961/10000 train_time:289431ms step_avg:41.58ms +[2025-09-11 10:43:22] [Rank 0] step:6981/10000 train_time:290133ms step_avg:41.56ms +[2025-09-11 10:43:22] [Rank 0] step:6981/10000 train_time:290133ms step_avg:41.56ms +[2025-09-11 10:43:23] [Rank 0] step:7001/10000 train_time:290834ms step_avg:41.54ms +[2025-09-11 10:43:23] [Rank 0] step:7001/10000 train_time:290834ms step_avg:41.54ms +[2025-09-11 10:43:24] [Rank 0] step:7021/10000 train_time:291533ms step_avg:41.52ms +[2025-09-11 10:43:24] [Rank 0] step:7021/10000 train_time:291533ms step_avg:41.52ms +[2025-09-11 10:43:24] [Rank 0] step:7041/10000 train_time:292232ms step_avg:41.50ms +[2025-09-11 10:43:24] [Rank 0] step:7041/10000 train_time:292232ms step_avg:41.50ms +[2025-09-11 10:43:25] [Rank 0] step:7061/10000 train_time:292934ms step_avg:41.49ms +[2025-09-11 10:43:25] [Rank 0] step:7061/10000 train_time:292934ms step_avg:41.49ms +[2025-09-11 10:43:26] [Rank 0] step:7081/10000 train_time:293633ms step_avg:41.47ms +[2025-09-11 10:43:26] [Rank 0] step:7081/10000 train_time:293633ms step_avg:41.47ms +[2025-09-11 10:43:26] [Rank 0] step:7101/10000 train_time:294333ms step_avg:41.45ms +[2025-09-11 10:43:26] [Rank 0] step:7101/10000 train_time:294333ms step_avg:41.45ms +[2025-09-11 10:43:27] [Rank 0] step:7121/10000 train_time:295035ms step_avg:41.43ms +[2025-09-11 10:43:27] [Rank 0] step:7121/10000 train_time:295035ms step_avg:41.43ms +[2025-09-11 10:43:28] [Rank 0] step:7141/10000 train_time:295735ms step_avg:41.41ms +[2025-09-11 10:43:28] [Rank 0] step:7141/10000 train_time:295735ms step_avg:41.41ms +[2025-09-11 10:43:29] [Rank 0] step:7161/10000 train_time:296435ms step_avg:41.40ms +[2025-09-11 10:43:29] [Rank 0] step:7161/10000 train_time:296435ms step_avg:41.40ms +[2025-09-11 10:43:29] [Rank 0] step:7181/10000 train_time:297134ms step_avg:41.38ms +[2025-09-11 10:43:29] [Rank 0] step:7181/10000 train_time:297134ms step_avg:41.38ms +[2025-09-11 10:43:30] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:43:30] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:43:40] [Rank 0] PRINT: step:7200/10000 val_loss:4.7263 total_sharp:2.4341e-04 L1_sharp:1.2090e-01 L2_sharp:1.8745e-01 L3_sharp:3.1528e-01 L4_sharp:4.8833e-01 L5_sharp:6.3956e-01 L6_sharp:8.4298e-01 L7_sharp:9.8630e-01 L8_sharp:7.9448e-01 L9_sharp:8.4401e-01 L10_sharp:1.0108e+00 L11_sharp:1.1040e+00 L12_sharp:1.9331e+00 total_fnorm:2.4125e+01 total_l1_linf:3.3280e+04 total_spectral:1.2062e+01 L1_fnorm:2.8564e-02 L2_fnorm:2.9053e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9663e-02 L5_fnorm:2.9663e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9175e-02 L8_fnorm:2.8564e-02 L9_fnorm:2.8809e-02 L10_fnorm:2.8809e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:6.0425e-03 L2_l1linf:6.2561e-03 L3_l1linf:6.5308e-03 L4_l1linf:6.9580e-03 L5_l1linf:7.0190e-03 L6_l1linf:7.1716e-03 L7_l1linf:7.0190e-03 L8_l1linf:7.1411e-03 L9_l1linf:7.3853e-03 L10_l1linf:7.0496e-03 L11_l1linf:7.0801e-03 L12_l1linf:7.6904e-03 L1_spectral:5.3797e-04 L2_spectral:5.4088e-04 L3_spectral:5.4040e-04 L4_spectral:5.4475e-04 L5_spectral:5.4654e-04 L6_spectral:5.3959e-04 L7_spectral:5.4585e-04 L8_spectral:5.3148e-04 L9_spectral:5.3945e-04 L10_spectral:5.3854e-04 L11_spectral:5.3213e-04 L12_spectral:5.1394e-04 train_time:297815ms step_avg:41.36ms +[2025-09-11 10:43:40] [Rank 0] PRINT: step:7200/10000 val_loss:4.7263 total_sharp:2.4341e-04 L1_sharp:1.2090e-01 L2_sharp:1.8745e-01 L3_sharp:3.1528e-01 L4_sharp:4.8833e-01 L5_sharp:6.3956e-01 L6_sharp:8.4298e-01 L7_sharp:9.8630e-01 L8_sharp:7.9448e-01 L9_sharp:8.4401e-01 L10_sharp:1.0108e+00 L11_sharp:1.1040e+00 L12_sharp:1.9331e+00 total_fnorm:2.4125e+01 total_l1_linf:3.3280e+04 total_spectral:1.2062e+01 L1_fnorm:2.8564e-02 L2_fnorm:2.9053e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9663e-02 L5_fnorm:2.9663e-02 L6_fnorm:2.9419e-02 L7_fnorm:2.9175e-02 L8_fnorm:2.8564e-02 L9_fnorm:2.8809e-02 L10_fnorm:2.8809e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:6.0425e-03 L2_l1linf:6.2561e-03 L3_l1linf:6.5308e-03 L4_l1linf:6.9580e-03 L5_l1linf:7.0190e-03 L6_l1linf:7.1716e-03 L7_l1linf:7.0190e-03 L8_l1linf:7.1411e-03 L9_l1linf:7.3853e-03 L10_l1linf:7.0496e-03 L11_l1linf:7.0801e-03 L12_l1linf:7.6904e-03 L1_spectral:5.3797e-04 L2_spectral:5.4088e-04 L3_spectral:5.4040e-04 L4_spectral:5.4475e-04 L5_spectral:5.4654e-04 L6_spectral:5.3959e-04 L7_spectral:5.4585e-04 L8_spectral:5.3148e-04 L9_spectral:5.3945e-04 L10_spectral:5.3854e-04 L11_spectral:5.3213e-04 L12_spectral:5.1394e-04 train_time:297815ms step_avg:41.36ms +[2025-09-11 10:43:42] [Rank 0] step:7201/10000 train_time:299144ms step_avg:41.54ms +[2025-09-11 10:43:42] [Rank 0] step:7201/10000 train_time:299144ms step_avg:41.54ms +[2025-09-11 10:43:42] [Rank 0] step:7221/10000 train_time:299884ms step_avg:41.53ms +[2025-09-11 10:43:42] [Rank 0] step:7221/10000 train_time:299884ms step_avg:41.53ms +[2025-09-11 10:43:43] [Rank 0] step:7241/10000 train_time:300584ms step_avg:41.51ms +[2025-09-11 10:43:43] [Rank 0] step:7241/10000 train_time:300584ms step_avg:41.51ms +[2025-09-11 10:43:44] [Rank 0] step:7261/10000 train_time:301287ms step_avg:41.49ms +[2025-09-11 10:43:44] [Rank 0] step:7261/10000 train_time:301287ms step_avg:41.49ms +[2025-09-11 10:43:44] [Rank 0] step:7281/10000 train_time:301993ms step_avg:41.48ms +[2025-09-11 10:43:44] [Rank 0] step:7281/10000 train_time:301993ms step_avg:41.48ms +[2025-09-11 10:43:45] [Rank 0] step:7301/10000 train_time:302693ms step_avg:41.46ms +[2025-09-11 10:43:45] [Rank 0] step:7301/10000 train_time:302693ms step_avg:41.46ms +[2025-09-11 10:43:46] [Rank 0] step:7321/10000 train_time:303394ms step_avg:41.44ms +[2025-09-11 10:43:46] [Rank 0] step:7321/10000 train_time:303394ms step_avg:41.44ms +[2025-09-11 10:43:47] [Rank 0] step:7341/10000 train_time:304096ms step_avg:41.42ms +[2025-09-11 10:43:47] [Rank 0] step:7341/10000 train_time:304096ms step_avg:41.42ms +[2025-09-11 10:43:47] [Rank 0] step:7361/10000 train_time:304797ms step_avg:41.41ms +[2025-09-11 10:43:47] [Rank 0] step:7361/10000 train_time:304797ms step_avg:41.41ms +[2025-09-11 10:43:48] [Rank 0] step:7381/10000 train_time:305499ms step_avg:41.39ms +[2025-09-11 10:43:48] [Rank 0] step:7381/10000 train_time:305499ms step_avg:41.39ms +[2025-09-11 10:43:49] [Rank 0] step:7401/10000 train_time:306198ms step_avg:41.37ms +[2025-09-11 10:43:49] [Rank 0] step:7401/10000 train_time:306198ms step_avg:41.37ms +[2025-09-11 10:43:49] [Rank 0] step:7421/10000 train_time:306899ms step_avg:41.36ms +[2025-09-11 10:43:49] [Rank 0] step:7421/10000 train_time:306899ms step_avg:41.36ms +[2025-09-11 10:43:50] [Rank 0] step:7441/10000 train_time:307601ms step_avg:41.34ms +[2025-09-11 10:43:50] [Rank 0] step:7441/10000 train_time:307601ms step_avg:41.34ms +[2025-09-11 10:43:51] [Rank 0] step:7461/10000 train_time:308302ms step_avg:41.32ms +[2025-09-11 10:43:51] [Rank 0] step:7461/10000 train_time:308302ms step_avg:41.32ms +[2025-09-11 10:43:51] [Rank 0] step:7481/10000 train_time:309005ms step_avg:41.31ms +[2025-09-11 10:43:51] [Rank 0] step:7481/10000 train_time:309005ms step_avg:41.31ms +[2025-09-11 10:43:52] [Rank 0] step:7501/10000 train_time:309707ms step_avg:41.29ms +[2025-09-11 10:43:52] [Rank 0] step:7501/10000 train_time:309707ms step_avg:41.29ms +[2025-09-11 10:43:53] [Rank 0] step:7521/10000 train_time:310410ms step_avg:41.27ms +[2025-09-11 10:43:53] [Rank 0] step:7521/10000 train_time:310410ms step_avg:41.27ms +[2025-09-11 10:43:54] [Rank 0] step:7541/10000 train_time:311111ms step_avg:41.26ms +[2025-09-11 10:43:54] [Rank 0] step:7541/10000 train_time:311111ms step_avg:41.26ms +[2025-09-11 10:43:54] [Rank 0] step:7561/10000 train_time:311815ms step_avg:41.24ms +[2025-09-11 10:43:54] [Rank 0] step:7561/10000 train_time:311815ms step_avg:41.24ms +[2025-09-11 10:43:55] [Rank 0] step:7581/10000 train_time:312518ms step_avg:41.22ms +[2025-09-11 10:43:55] [Rank 0] step:7581/10000 train_time:312518ms step_avg:41.22ms +[2025-09-11 10:43:56] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:43:56] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:44:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:06] [Rank 0] PRINT: step:7600/10000 val_loss:4.7011 total_sharp:2.1593e-04 L1_sharp:1.0449e-01 L2_sharp:1.6552e-01 L3_sharp:2.6387e-01 L4_sharp:3.7555e-01 L5_sharp:4.9661e-01 L6_sharp:7.3672e-01 L7_sharp:8.1863e-01 L8_sharp:6.7364e-01 L9_sharp:7.8716e-01 L10_sharp:8.5109e-01 L11_sharp:1.0665e+00 L12_sharp:1.4517e+00 total_fnorm:1.8875e+01 total_l1_linf:2.3680e+04 total_spectral:9.3750e+00 L1_fnorm:2.3315e-02 L2_fnorm:2.3682e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4048e-02 L7_fnorm:2.3804e-02 L8_fnorm:2.3315e-02 L9_fnorm:2.3560e-02 L10_fnorm:2.3560e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2827e-02 L1_l1linf:4.3335e-03 L2_l1linf:4.7607e-03 L3_l1linf:5.2795e-03 L4_l1linf:5.2490e-03 L5_l1linf:5.3101e-03 L6_l1linf:5.4321e-03 L7_l1linf:5.5542e-03 L8_l1linf:5.4016e-03 L9_l1linf:5.5237e-03 L10_l1linf:5.5237e-03 L11_l1linf:5.5542e-03 L12_l1linf:5.8289e-03 L1_spectral:4.5764e-04 L2_spectral:4.5802e-04 L3_spectral:4.5631e-04 L4_spectral:4.5983e-04 L5_spectral:4.6218e-04 L6_spectral:4.5221e-04 L7_spectral:4.5721e-04 L8_spectral:4.4128e-04 L9_spectral:4.5376e-04 L10_spectral:4.5297e-04 L11_spectral:4.4250e-04 L12_spectral:4.2115e-04 train_time:313200ms step_avg:41.21ms +[2025-09-11 10:44:06] [Rank 0] PRINT: step:7600/10000 val_loss:4.7011 total_sharp:2.1593e-04 L1_sharp:1.0449e-01 L2_sharp:1.6552e-01 L3_sharp:2.6387e-01 L4_sharp:3.7555e-01 L5_sharp:4.9661e-01 L6_sharp:7.3672e-01 L7_sharp:8.1863e-01 L8_sharp:6.7364e-01 L9_sharp:7.8716e-01 L10_sharp:8.5109e-01 L11_sharp:1.0665e+00 L12_sharp:1.4517e+00 total_fnorm:1.8875e+01 total_l1_linf:2.3680e+04 total_spectral:9.3750e+00 L1_fnorm:2.3315e-02 L2_fnorm:2.3682e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.4170e-02 L5_fnorm:2.4292e-02 L6_fnorm:2.4048e-02 L7_fnorm:2.3804e-02 L8_fnorm:2.3315e-02 L9_fnorm:2.3560e-02 L10_fnorm:2.3560e-02 L11_fnorm:2.3071e-02 L12_fnorm:2.2827e-02 L1_l1linf:4.3335e-03 L2_l1linf:4.7607e-03 L3_l1linf:5.2795e-03 L4_l1linf:5.2490e-03 L5_l1linf:5.3101e-03 L6_l1linf:5.4321e-03 L7_l1linf:5.5542e-03 L8_l1linf:5.4016e-03 L9_l1linf:5.5237e-03 L10_l1linf:5.5237e-03 L11_l1linf:5.5542e-03 L12_l1linf:5.8289e-03 L1_spectral:4.5764e-04 L2_spectral:4.5802e-04 L3_spectral:4.5631e-04 L4_spectral:4.5983e-04 L5_spectral:4.6218e-04 L6_spectral:4.5221e-04 L7_spectral:4.5721e-04 L8_spectral:4.4128e-04 L9_spectral:4.5376e-04 L10_spectral:4.5297e-04 L11_spectral:4.4250e-04 L12_spectral:4.2115e-04 train_time:313200ms step_avg:41.21ms +[2025-09-11 10:44:07] [Rank 0] step:7601/10000 train_time:314545ms step_avg:41.38ms +[2025-09-11 10:44:07] [Rank 0] step:7601/10000 train_time:314545ms step_avg:41.38ms +[2025-09-11 10:44:08] [Rank 0] step:7621/10000 train_time:315273ms step_avg:41.37ms +[2025-09-11 10:44:08] [Rank 0] step:7621/10000 train_time:315273ms step_avg:41.37ms +[2025-09-11 10:44:09] [Rank 0] step:7641/10000 train_time:315976ms step_avg:41.35ms +[2025-09-11 10:44:09] [Rank 0] step:7641/10000 train_time:315976ms step_avg:41.35ms +[2025-09-11 10:44:09] [Rank 0] step:7661/10000 train_time:316677ms step_avg:41.34ms +[2025-09-11 10:44:09] [Rank 0] step:7661/10000 train_time:316677ms step_avg:41.34ms +[2025-09-11 10:44:10] [Rank 0] step:7681/10000 train_time:317380ms step_avg:41.32ms +[2025-09-11 10:44:10] [Rank 0] step:7681/10000 train_time:317380ms step_avg:41.32ms +[2025-09-11 10:44:11] [Rank 0] step:7701/10000 train_time:318082ms step_avg:41.30ms +[2025-09-11 10:44:11] [Rank 0] step:7701/10000 train_time:318082ms step_avg:41.30ms +[2025-09-11 10:44:11] [Rank 0] step:7721/10000 train_time:318785ms step_avg:41.29ms +[2025-09-11 10:44:11] [Rank 0] step:7721/10000 train_time:318785ms step_avg:41.29ms +[2025-09-11 10:44:12] [Rank 0] step:7741/10000 train_time:319489ms step_avg:41.27ms +[2025-09-11 10:44:12] [Rank 0] step:7741/10000 train_time:319489ms step_avg:41.27ms +[2025-09-11 10:44:13] [Rank 0] step:7761/10000 train_time:320191ms step_avg:41.26ms +[2025-09-11 10:44:13] [Rank 0] step:7761/10000 train_time:320191ms step_avg:41.26ms +[2025-09-11 10:44:14] [Rank 0] step:7781/10000 train_time:320895ms step_avg:41.24ms +[2025-09-11 10:44:14] [Rank 0] step:7781/10000 train_time:320895ms step_avg:41.24ms +[2025-09-11 10:44:14] [Rank 0] step:7801/10000 train_time:321597ms step_avg:41.23ms +[2025-09-11 10:44:14] [Rank 0] step:7801/10000 train_time:321597ms step_avg:41.23ms +[2025-09-11 10:44:15] [Rank 0] step:7821/10000 train_time:322300ms step_avg:41.21ms +[2025-09-11 10:44:15] [Rank 0] step:7821/10000 train_time:322300ms step_avg:41.21ms +[2025-09-11 10:44:16] [Rank 0] step:7841/10000 train_time:323004ms step_avg:41.19ms +[2025-09-11 10:44:16] [Rank 0] step:7841/10000 train_time:323004ms step_avg:41.19ms +[2025-09-11 10:44:16] [Rank 0] step:7861/10000 train_time:323709ms step_avg:41.18ms +[2025-09-11 10:44:16] [Rank 0] step:7861/10000 train_time:323709ms step_avg:41.18ms +[2025-09-11 10:44:17] [Rank 0] step:7881/10000 train_time:324411ms step_avg:41.16ms +[2025-09-11 10:44:17] [Rank 0] step:7881/10000 train_time:324411ms step_avg:41.16ms +[2025-09-11 10:44:18] [Rank 0] step:7901/10000 train_time:325116ms step_avg:41.15ms +[2025-09-11 10:44:18] [Rank 0] step:7901/10000 train_time:325116ms step_avg:41.15ms +[2025-09-11 10:44:18] [Rank 0] step:7921/10000 train_time:325818ms step_avg:41.13ms +[2025-09-11 10:44:18] [Rank 0] step:7921/10000 train_time:325818ms step_avg:41.13ms +[2025-09-11 10:44:19] [Rank 0] step:7941/10000 train_time:326522ms step_avg:41.12ms +[2025-09-11 10:44:19] [Rank 0] step:7941/10000 train_time:326522ms step_avg:41.12ms +[2025-09-11 10:44:20] [Rank 0] step:7961/10000 train_time:327223ms step_avg:41.10ms +[2025-09-11 10:44:20] [Rank 0] step:7961/10000 train_time:327223ms step_avg:41.10ms +[2025-09-11 10:44:21] [Rank 0] step:7981/10000 train_time:327928ms step_avg:41.09ms +[2025-09-11 10:44:21] [Rank 0] step:7981/10000 train_time:327928ms step_avg:41.09ms +[2025-09-11 10:44:21] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:44:21] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:31] [Rank 0] PRINT: step:8000/10000 val_loss:4.6855 total_sharp:1.7285e-04 L1_sharp:8.3435e-02 L2_sharp:1.4609e-01 L3_sharp:2.3976e-01 L4_sharp:3.6280e-01 L5_sharp:5.0936e-01 L6_sharp:7.5500e-01 L7_sharp:8.5751e-01 L8_sharp:8.4319e-01 L9_sharp:9.5584e-01 L10_sharp:9.4595e-01 L11_sharp:9.6750e-01 L12_sharp:2.4352e+00 total_fnorm:1.6125e+01 total_l1_linf:1.9200e+04 total_spectral:8.0625e+00 L1_fnorm:1.8433e-02 L2_fnorm:1.8677e-02 L3_fnorm:1.8921e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.8555e-02 L9_fnorm:1.8799e-02 L10_fnorm:1.8799e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8066e-02 L1_l1linf:3.3264e-03 L2_l1linf:3.4637e-03 L3_l1linf:3.6469e-03 L4_l1linf:3.9673e-03 L5_l1linf:4.1809e-03 L6_l1linf:4.2114e-03 L7_l1linf:4.2419e-03 L8_l1linf:4.2725e-03 L9_l1linf:4.1809e-03 L10_l1linf:4.2419e-03 L11_l1linf:4.0894e-03 L12_l1linf:4.3335e-03 L1_spectral:3.6999e-04 L2_spectral:3.7601e-04 L3_spectral:3.7111e-04 L4_spectral:3.7157e-04 L5_spectral:3.7434e-04 L6_spectral:3.6530e-04 L7_spectral:3.7240e-04 L8_spectral:3.5921e-04 L9_spectral:3.7038e-04 L10_spectral:3.6919e-04 L11_spectral:3.5572e-04 L12_spectral:3.4083e-04 train_time:328608ms step_avg:41.08ms +[2025-09-11 10:44:31] [Rank 0] PRINT: step:8000/10000 val_loss:4.6855 total_sharp:1.7285e-04 L1_sharp:8.3435e-02 L2_sharp:1.4609e-01 L3_sharp:2.3976e-01 L4_sharp:3.6280e-01 L5_sharp:5.0936e-01 L6_sharp:7.5500e-01 L7_sharp:8.5751e-01 L8_sharp:8.4319e-01 L9_sharp:9.5584e-01 L10_sharp:9.4595e-01 L11_sharp:9.6750e-01 L12_sharp:2.4352e+00 total_fnorm:1.6125e+01 total_l1_linf:1.9200e+04 total_spectral:8.0625e+00 L1_fnorm:1.8433e-02 L2_fnorm:1.8677e-02 L3_fnorm:1.8921e-02 L4_fnorm:1.9287e-02 L5_fnorm:1.9287e-02 L6_fnorm:1.9165e-02 L7_fnorm:1.9165e-02 L8_fnorm:1.8555e-02 L9_fnorm:1.8799e-02 L10_fnorm:1.8799e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8066e-02 L1_l1linf:3.3264e-03 L2_l1linf:3.4637e-03 L3_l1linf:3.6469e-03 L4_l1linf:3.9673e-03 L5_l1linf:4.1809e-03 L6_l1linf:4.2114e-03 L7_l1linf:4.2419e-03 L8_l1linf:4.2725e-03 L9_l1linf:4.1809e-03 L10_l1linf:4.2419e-03 L11_l1linf:4.0894e-03 L12_l1linf:4.3335e-03 L1_spectral:3.6999e-04 L2_spectral:3.7601e-04 L3_spectral:3.7111e-04 L4_spectral:3.7157e-04 L5_spectral:3.7434e-04 L6_spectral:3.6530e-04 L7_spectral:3.7240e-04 L8_spectral:3.5921e-04 L9_spectral:3.7038e-04 L10_spectral:3.6919e-04 L11_spectral:3.5572e-04 L12_spectral:3.4083e-04 train_time:328608ms step_avg:41.08ms +[2025-09-11 10:44:33] [Rank 0] step:8001/10000 train_time:329971ms step_avg:41.24ms +[2025-09-11 10:44:33] [Rank 0] step:8001/10000 train_time:329971ms step_avg:41.24ms +[2025-09-11 10:44:34] [Rank 0] step:8021/10000 train_time:330714ms step_avg:41.23ms +[2025-09-11 10:44:34] [Rank 0] step:8021/10000 train_time:330714ms step_avg:41.23ms +[2025-09-11 10:44:34] [Rank 0] step:8041/10000 train_time:331419ms step_avg:41.22ms +[2025-09-11 10:44:34] [Rank 0] step:8041/10000 train_time:331419ms step_avg:41.22ms +[2025-09-11 10:44:35] [Rank 0] step:8061/10000 train_time:332125ms step_avg:41.20ms +[2025-09-11 10:44:35] [Rank 0] step:8061/10000 train_time:332125ms step_avg:41.20ms +[2025-09-11 10:44:36] [Rank 0] step:8081/10000 train_time:332826ms step_avg:41.19ms +[2025-09-11 10:44:36] [Rank 0] step:8081/10000 train_time:332826ms step_avg:41.19ms +[2025-09-11 10:44:36] [Rank 0] step:8101/10000 train_time:333529ms step_avg:41.17ms +[2025-09-11 10:44:36] [Rank 0] step:8101/10000 train_time:333529ms step_avg:41.17ms +[2025-09-11 10:44:37] [Rank 0] step:8121/10000 train_time:334238ms step_avg:41.16ms +[2025-09-11 10:44:37] [Rank 0] step:8121/10000 train_time:334238ms step_avg:41.16ms +[2025-09-11 10:44:39] [Rank 0] step:8141/10000 train_time:335695ms step_avg:41.24ms +[2025-09-11 10:44:39] [Rank 0] step:8141/10000 train_time:335695ms step_avg:41.24ms +[2025-09-11 10:44:39] [Rank 0] step:8161/10000 train_time:336402ms step_avg:41.22ms +[2025-09-11 10:44:39] [Rank 0] step:8161/10000 train_time:336402ms step_avg:41.22ms +[2025-09-11 10:44:40] [Rank 0] step:8181/10000 train_time:337116ms step_avg:41.21ms +[2025-09-11 10:44:40] [Rank 0] step:8181/10000 train_time:337116ms step_avg:41.21ms +[2025-09-11 10:44:41] [Rank 0] step:8201/10000 train_time:337966ms step_avg:41.21ms +[2025-09-11 10:44:41] [Rank 0] step:8201/10000 train_time:337966ms step_avg:41.21ms +[2025-09-11 10:44:42] [Rank 0] step:8221/10000 train_time:338798ms step_avg:41.21ms +[2025-09-11 10:44:42] [Rank 0] step:8221/10000 train_time:338798ms step_avg:41.21ms +[2025-09-11 10:44:42] [Rank 0] step:8241/10000 train_time:339516ms step_avg:41.20ms +[2025-09-11 10:44:42] [Rank 0] step:8241/10000 train_time:339516ms step_avg:41.20ms +[2025-09-11 10:44:43] [Rank 0] step:8261/10000 train_time:340225ms step_avg:41.18ms +[2025-09-11 10:44:43] [Rank 0] step:8261/10000 train_time:340225ms step_avg:41.18ms +[2025-09-11 10:44:44] [Rank 0] step:8281/10000 train_time:340932ms step_avg:41.17ms +[2025-09-11 10:44:44] [Rank 0] step:8281/10000 train_time:340932ms step_avg:41.17ms +[2025-09-11 10:44:45] [Rank 0] step:8301/10000 train_time:341642ms step_avg:41.16ms +[2025-09-11 10:44:45] [Rank 0] step:8301/10000 train_time:341642ms step_avg:41.16ms +[2025-09-11 10:44:45] [Rank 0] step:8321/10000 train_time:342351ms step_avg:41.14ms +[2025-09-11 10:44:45] [Rank 0] step:8321/10000 train_time:342351ms step_avg:41.14ms +[2025-09-11 10:44:46] [Rank 0] step:8341/10000 train_time:343067ms step_avg:41.13ms +[2025-09-11 10:44:46] [Rank 0] step:8341/10000 train_time:343067ms step_avg:41.13ms +[2025-09-11 10:44:47] [Rank 0] step:8361/10000 train_time:343773ms step_avg:41.12ms +[2025-09-11 10:44:47] [Rank 0] step:8361/10000 train_time:343773ms step_avg:41.12ms +[2025-09-11 10:44:47] [Rank 0] step:8381/10000 train_time:344485ms step_avg:41.10ms +[2025-09-11 10:44:47] [Rank 0] step:8381/10000 train_time:344485ms step_avg:41.10ms +[2025-09-11 10:44:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:44:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:44:58] [Rank 0] PRINT: step:8400/10000 val_loss:4.6713 total_sharp:1.6614e-04 L1_sharp:6.0502e-02 L2_sharp:1.1942e-01 L3_sharp:1.8407e-01 L4_sharp:2.9206e-01 L5_sharp:4.4628e-01 L6_sharp:6.3295e-01 L7_sharp:7.3215e-01 L8_sharp:8.0150e-01 L9_sharp:8.4813e-01 L10_sharp:8.8739e-01 L11_sharp:8.9762e-01 L12_sharp:1.4813e+00 total_fnorm:1.2188e+01 total_l1_linf:1.3184e+04 total_spectral:6.0938e+00 L1_fnorm:1.4038e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4465e-02 L4_fnorm:1.4709e-02 L5_fnorm:1.4771e-02 L6_fnorm:1.4587e-02 L7_fnorm:1.4587e-02 L8_fnorm:1.4221e-02 L9_fnorm:1.4465e-02 L10_fnorm:1.4404e-02 L11_fnorm:1.4038e-02 L12_fnorm:1.3794e-02 L1_l1linf:2.1820e-03 L2_l1linf:2.3956e-03 L3_l1linf:2.5177e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.8076e-03 L6_l1linf:2.9907e-03 L7_l1linf:2.9449e-03 L8_l1linf:2.9449e-03 L9_l1linf:3.0212e-03 L10_l1linf:2.9602e-03 L11_l1linf:2.8534e-03 L12_l1linf:3.2196e-03 L1_spectral:2.8982e-04 L2_spectral:2.9267e-04 L3_spectral:2.9195e-04 L4_spectral:2.9332e-04 L5_spectral:2.9387e-04 L6_spectral:2.8930e-04 L7_spectral:2.9037e-04 L8_spectral:2.8392e-04 L9_spectral:2.8756e-04 L10_spectral:2.8959e-04 L11_spectral:2.7907e-04 L12_spectral:2.6466e-04 train_time:345177ms step_avg:41.09ms +[2025-09-11 10:44:58] [Rank 0] PRINT: step:8400/10000 val_loss:4.6713 total_sharp:1.6614e-04 L1_sharp:6.0502e-02 L2_sharp:1.1942e-01 L3_sharp:1.8407e-01 L4_sharp:2.9206e-01 L5_sharp:4.4628e-01 L6_sharp:6.3295e-01 L7_sharp:7.3215e-01 L8_sharp:8.0150e-01 L9_sharp:8.4813e-01 L10_sharp:8.8739e-01 L11_sharp:8.9762e-01 L12_sharp:1.4813e+00 total_fnorm:1.2188e+01 total_l1_linf:1.3184e+04 total_spectral:6.0938e+00 L1_fnorm:1.4038e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4465e-02 L4_fnorm:1.4709e-02 L5_fnorm:1.4771e-02 L6_fnorm:1.4587e-02 L7_fnorm:1.4587e-02 L8_fnorm:1.4221e-02 L9_fnorm:1.4465e-02 L10_fnorm:1.4404e-02 L11_fnorm:1.4038e-02 L12_fnorm:1.3794e-02 L1_l1linf:2.1820e-03 L2_l1linf:2.3956e-03 L3_l1linf:2.5177e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.8076e-03 L6_l1linf:2.9907e-03 L7_l1linf:2.9449e-03 L8_l1linf:2.9449e-03 L9_l1linf:3.0212e-03 L10_l1linf:2.9602e-03 L11_l1linf:2.8534e-03 L12_l1linf:3.2196e-03 L1_spectral:2.8982e-04 L2_spectral:2.9267e-04 L3_spectral:2.9195e-04 L4_spectral:2.9332e-04 L5_spectral:2.9387e-04 L6_spectral:2.8930e-04 L7_spectral:2.9037e-04 L8_spectral:2.8392e-04 L9_spectral:2.8756e-04 L10_spectral:2.8959e-04 L11_spectral:2.7907e-04 L12_spectral:2.6466e-04 train_time:345177ms step_avg:41.09ms +[2025-09-11 10:45:00] [Rank 0] step:8401/10000 train_time:346545ms step_avg:41.25ms +[2025-09-11 10:45:00] [Rank 0] step:8401/10000 train_time:346545ms step_avg:41.25ms +[2025-09-11 10:45:00] [Rank 0] step:8421/10000 train_time:347294ms step_avg:41.24ms +[2025-09-11 10:45:00] [Rank 0] step:8421/10000 train_time:347294ms step_avg:41.24ms +[2025-09-11 10:45:01] [Rank 0] step:8441/10000 train_time:348005ms step_avg:41.23ms +[2025-09-11 10:45:01] [Rank 0] step:8441/10000 train_time:348005ms step_avg:41.23ms +[2025-09-11 10:45:02] [Rank 0] step:8461/10000 train_time:348772ms step_avg:41.22ms +[2025-09-11 10:45:02] [Rank 0] step:8461/10000 train_time:348772ms step_avg:41.22ms +[2025-09-11 10:45:03] [Rank 0] step:8481/10000 train_time:349540ms step_avg:41.21ms +[2025-09-11 10:45:03] [Rank 0] step:8481/10000 train_time:349540ms step_avg:41.21ms +[2025-09-11 10:45:03] [Rank 0] step:8501/10000 train_time:350250ms step_avg:41.20ms +[2025-09-11 10:45:03] [Rank 0] step:8501/10000 train_time:350250ms step_avg:41.20ms +[2025-09-11 10:45:04] [Rank 0] step:8521/10000 train_time:350960ms step_avg:41.19ms +[2025-09-11 10:45:04] [Rank 0] step:8521/10000 train_time:350960ms step_avg:41.19ms +[2025-09-11 10:45:05] [Rank 0] step:8541/10000 train_time:351671ms step_avg:41.17ms +[2025-09-11 10:45:05] [Rank 0] step:8541/10000 train_time:351671ms step_avg:41.17ms +[2025-09-11 10:45:05] [Rank 0] step:8561/10000 train_time:352385ms step_avg:41.16ms +[2025-09-11 10:45:05] [Rank 0] step:8561/10000 train_time:352385ms step_avg:41.16ms +[2025-09-11 10:45:06] [Rank 0] step:8581/10000 train_time:353100ms step_avg:41.15ms +[2025-09-11 10:45:06] [Rank 0] step:8581/10000 train_time:353100ms step_avg:41.15ms +[2025-09-11 10:45:07] [Rank 0] step:8601/10000 train_time:353812ms step_avg:41.14ms +[2025-09-11 10:45:07] [Rank 0] step:8601/10000 train_time:353812ms step_avg:41.14ms +[2025-09-11 10:45:08] [Rank 0] step:8621/10000 train_time:354520ms step_avg:41.12ms +[2025-09-11 10:45:08] [Rank 0] step:8621/10000 train_time:354520ms step_avg:41.12ms +[2025-09-11 10:45:08] [Rank 0] step:8641/10000 train_time:355230ms step_avg:41.11ms +[2025-09-11 10:45:08] [Rank 0] step:8641/10000 train_time:355230ms step_avg:41.11ms +[2025-09-11 10:45:09] [Rank 0] step:8661/10000 train_time:355940ms step_avg:41.10ms +[2025-09-11 10:45:09] [Rank 0] step:8661/10000 train_time:355940ms step_avg:41.10ms +[2025-09-11 10:45:10] [Rank 0] step:8681/10000 train_time:356651ms step_avg:41.08ms +[2025-09-11 10:45:10] [Rank 0] step:8681/10000 train_time:356651ms step_avg:41.08ms +[2025-09-11 10:45:10] [Rank 0] step:8701/10000 train_time:357361ms step_avg:41.07ms +[2025-09-11 10:45:10] [Rank 0] step:8701/10000 train_time:357361ms step_avg:41.07ms +[2025-09-11 10:45:11] [Rank 0] step:8721/10000 train_time:358072ms step_avg:41.06ms +[2025-09-11 10:45:11] [Rank 0] step:8721/10000 train_time:358072ms step_avg:41.06ms +[2025-09-11 10:45:12] [Rank 0] step:8741/10000 train_time:358778ms step_avg:41.05ms +[2025-09-11 10:45:12] [Rank 0] step:8741/10000 train_time:358778ms step_avg:41.05ms +[2025-09-11 10:45:13] [Rank 0] step:8761/10000 train_time:359490ms step_avg:41.03ms +[2025-09-11 10:45:13] [Rank 0] step:8761/10000 train_time:359490ms step_avg:41.03ms +[2025-09-11 10:45:13] [Rank 0] step:8781/10000 train_time:360197ms step_avg:41.02ms +[2025-09-11 10:45:13] [Rank 0] step:8781/10000 train_time:360197ms step_avg:41.02ms +[2025-09-11 10:45:14] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:45:14] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:45:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:24] [Rank 0] PRINT: step:8800/10000 val_loss:4.6666 total_sharp:1.2999e-04 L1_sharp:5.5638e-02 L2_sharp:1.1139e-01 L3_sharp:1.7078e-01 L4_sharp:2.6226e-01 L5_sharp:3.2067e-01 L6_sharp:5.5333e-01 L7_sharp:5.7363e-01 L8_sharp:5.7558e-01 L9_sharp:6.4303e-01 L10_sharp:7.3659e-01 L11_sharp:7.8904e-01 L12_sharp:1.1918e+00 total_fnorm:8.7500e+00 total_l1_linf:8.3840e+03 total_spectral:4.4062e+00 L1_fnorm:1.0010e-02 L2_fnorm:1.0132e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0437e-02 L5_fnorm:1.0437e-02 L6_fnorm:1.0376e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0010e-02 L9_fnorm:1.0254e-02 L10_fnorm:1.0193e-02 L11_fnorm:9.8877e-03 L12_fnorm:9.7046e-03 L1_l1linf:1.4191e-03 L2_l1linf:1.4420e-03 L3_l1linf:1.5640e-03 L4_l1linf:1.7319e-03 L5_l1linf:1.8234e-03 L6_l1linf:1.7776e-03 L7_l1linf:1.8082e-03 L8_l1linf:1.8921e-03 L9_l1linf:1.8463e-03 L10_l1linf:1.8845e-03 L11_l1linf:1.7929e-03 L12_l1linf:1.9379e-03 L1_spectral:2.1417e-04 L2_spectral:2.1198e-04 L3_spectral:2.1355e-04 L4_spectral:2.1157e-04 L5_spectral:2.1513e-04 L6_spectral:2.0998e-04 L7_spectral:2.1051e-04 L8_spectral:2.0240e-04 L9_spectral:2.0877e-04 L10_spectral:2.0764e-04 L11_spectral:2.0314e-04 L12_spectral:1.9068e-04 train_time:360886ms step_avg:41.01ms +[2025-09-11 10:45:24] [Rank 0] PRINT: step:8800/10000 val_loss:4.6666 total_sharp:1.2999e-04 L1_sharp:5.5638e-02 L2_sharp:1.1139e-01 L3_sharp:1.7078e-01 L4_sharp:2.6226e-01 L5_sharp:3.2067e-01 L6_sharp:5.5333e-01 L7_sharp:5.7363e-01 L8_sharp:5.7558e-01 L9_sharp:6.4303e-01 L10_sharp:7.3659e-01 L11_sharp:7.8904e-01 L12_sharp:1.1918e+00 total_fnorm:8.7500e+00 total_l1_linf:8.3840e+03 total_spectral:4.4062e+00 L1_fnorm:1.0010e-02 L2_fnorm:1.0132e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0437e-02 L5_fnorm:1.0437e-02 L6_fnorm:1.0376e-02 L7_fnorm:1.0315e-02 L8_fnorm:1.0010e-02 L9_fnorm:1.0254e-02 L10_fnorm:1.0193e-02 L11_fnorm:9.8877e-03 L12_fnorm:9.7046e-03 L1_l1linf:1.4191e-03 L2_l1linf:1.4420e-03 L3_l1linf:1.5640e-03 L4_l1linf:1.7319e-03 L5_l1linf:1.8234e-03 L6_l1linf:1.7776e-03 L7_l1linf:1.8082e-03 L8_l1linf:1.8921e-03 L9_l1linf:1.8463e-03 L10_l1linf:1.8845e-03 L11_l1linf:1.7929e-03 L12_l1linf:1.9379e-03 L1_spectral:2.1417e-04 L2_spectral:2.1198e-04 L3_spectral:2.1355e-04 L4_spectral:2.1157e-04 L5_spectral:2.1513e-04 L6_spectral:2.0998e-04 L7_spectral:2.1051e-04 L8_spectral:2.0240e-04 L9_spectral:2.0877e-04 L10_spectral:2.0764e-04 L11_spectral:2.0314e-04 L12_spectral:1.9068e-04 train_time:360886ms step_avg:41.01ms +[2025-09-11 10:45:25] [Rank 0] step:8801/10000 train_time:362281ms step_avg:41.16ms +[2025-09-11 10:45:25] [Rank 0] step:8801/10000 train_time:362281ms step_avg:41.16ms +[2025-09-11 10:45:26] [Rank 0] step:8821/10000 train_time:363018ms step_avg:41.15ms +[2025-09-11 10:45:26] [Rank 0] step:8821/10000 train_time:363018ms step_avg:41.15ms +[2025-09-11 10:45:27] [Rank 0] step:8841/10000 train_time:363728ms step_avg:41.14ms +[2025-09-11 10:45:27] [Rank 0] step:8841/10000 train_time:363728ms step_avg:41.14ms +[2025-09-11 10:45:28] [Rank 0] step:8861/10000 train_time:364437ms step_avg:41.13ms +[2025-09-11 10:45:28] [Rank 0] step:8861/10000 train_time:364437ms step_avg:41.13ms +[2025-09-11 10:45:28] [Rank 0] step:8881/10000 train_time:365148ms step_avg:41.12ms +[2025-09-11 10:45:28] [Rank 0] step:8881/10000 train_time:365148ms step_avg:41.12ms +[2025-09-11 10:45:29] [Rank 0] step:8901/10000 train_time:365860ms step_avg:41.10ms +[2025-09-11 10:45:29] [Rank 0] step:8901/10000 train_time:365860ms step_avg:41.10ms +[2025-09-11 10:45:30] [Rank 0] step:8921/10000 train_time:366566ms step_avg:41.09ms +[2025-09-11 10:45:30] [Rank 0] step:8921/10000 train_time:366566ms step_avg:41.09ms +[2025-09-11 10:45:30] [Rank 0] step:8941/10000 train_time:367281ms step_avg:41.08ms +[2025-09-11 10:45:30] [Rank 0] step:8941/10000 train_time:367281ms step_avg:41.08ms +[2025-09-11 10:45:31] [Rank 0] step:8961/10000 train_time:367999ms step_avg:41.07ms +[2025-09-11 10:45:31] [Rank 0] step:8961/10000 train_time:367999ms step_avg:41.07ms +[2025-09-11 10:45:32] [Rank 0] step:8981/10000 train_time:368713ms step_avg:41.05ms +[2025-09-11 10:45:32] [Rank 0] step:8981/10000 train_time:368713ms step_avg:41.05ms +[2025-09-11 10:45:33] [Rank 0] step:9001/10000 train_time:369418ms step_avg:41.04ms +[2025-09-11 10:45:33] [Rank 0] step:9001/10000 train_time:369418ms step_avg:41.04ms +[2025-09-11 10:45:33] [Rank 0] step:9021/10000 train_time:370129ms step_avg:41.03ms +[2025-09-11 10:45:33] [Rank 0] step:9021/10000 train_time:370129ms step_avg:41.03ms +[2025-09-11 10:45:34] [Rank 0] step:9041/10000 train_time:370842ms step_avg:41.02ms +[2025-09-11 10:45:34] [Rank 0] step:9041/10000 train_time:370842ms step_avg:41.02ms +[2025-09-11 10:45:35] [Rank 0] step:9061/10000 train_time:371551ms step_avg:41.01ms +[2025-09-11 10:45:35] [Rank 0] step:9061/10000 train_time:371551ms step_avg:41.01ms +[2025-09-11 10:45:35] [Rank 0] step:9081/10000 train_time:372265ms step_avg:40.99ms +[2025-09-11 10:45:35] [Rank 0] step:9081/10000 train_time:372265ms step_avg:40.99ms +[2025-09-11 10:45:36] [Rank 0] step:9101/10000 train_time:372979ms step_avg:40.98ms +[2025-09-11 10:45:36] [Rank 0] step:9101/10000 train_time:372979ms step_avg:40.98ms +[2025-09-11 10:45:37] [Rank 0] step:9121/10000 train_time:373693ms step_avg:40.97ms +[2025-09-11 10:45:37] [Rank 0] step:9121/10000 train_time:373693ms step_avg:40.97ms +[2025-09-11 10:45:38] [Rank 0] step:9141/10000 train_time:374401ms step_avg:40.96ms +[2025-09-11 10:45:38] [Rank 0] step:9141/10000 train_time:374401ms step_avg:40.96ms +[2025-09-11 10:45:38] [Rank 0] step:9161/10000 train_time:375114ms step_avg:40.95ms +[2025-09-11 10:45:38] [Rank 0] step:9161/10000 train_time:375114ms step_avg:40.95ms +[2025-09-11 10:45:39] [Rank 0] step:9181/10000 train_time:375827ms step_avg:40.94ms +[2025-09-11 10:45:39] [Rank 0] step:9181/10000 train_time:375827ms step_avg:40.94ms +[2025-09-11 10:45:40] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:45:40] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:45:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:45:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:45:50] [Rank 0] PRINT: step:9200/10000 val_loss:4.6552 total_sharp:1.5471e-04 L1_sharp:5.1738e-02 L2_sharp:8.5161e-02 L3_sharp:1.2702e-01 L4_sharp:2.2083e-01 L5_sharp:3.1123e-01 L6_sharp:5.0895e-01 L7_sharp:6.3265e-01 L8_sharp:6.7889e-01 L9_sharp:8.1531e-01 L10_sharp:9.2312e-01 L11_sharp:1.0615e+00 L12_sharp:3.2598e+00 total_fnorm:5.8750e+00 total_l1_linf:4.8000e+03 total_spectral:2.9375e+00 L1_fnorm:6.4697e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.7139e-03 L4_fnorm:6.8359e-03 L5_fnorm:6.9275e-03 L6_fnorm:6.8665e-03 L7_fnorm:6.8665e-03 L8_fnorm:6.6528e-03 L9_fnorm:6.7749e-03 L10_fnorm:6.7749e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.4087e-03 L1_l1linf:8.2016e-04 L2_l1linf:9.4223e-04 L3_l1linf:9.9182e-04 L4_l1linf:1.1063e-03 L5_l1linf:1.1139e-03 L6_l1linf:1.1520e-03 L7_l1linf:1.1292e-03 L8_l1linf:1.1826e-03 L9_l1linf:1.1673e-03 L10_l1linf:1.1444e-03 L11_l1linf:1.1520e-03 L12_l1linf:1.1749e-03 L1_spectral:1.3749e-04 L2_spectral:1.3871e-04 L3_spectral:1.3718e-04 L4_spectral:1.3977e-04 L5_spectral:1.4154e-04 L6_spectral:1.3737e-04 L7_spectral:1.3804e-04 L8_spectral:1.3427e-04 L9_spectral:1.3852e-04 L10_spectral:1.3702e-04 L11_spectral:1.3288e-04 L12_spectral:1.2420e-04 train_time:376525ms step_avg:40.93ms +[2025-09-11 10:45:50] [Rank 0] PRINT: step:9200/10000 val_loss:4.6552 total_sharp:1.5471e-04 L1_sharp:5.1738e-02 L2_sharp:8.5161e-02 L3_sharp:1.2702e-01 L4_sharp:2.2083e-01 L5_sharp:3.1123e-01 L6_sharp:5.0895e-01 L7_sharp:6.3265e-01 L8_sharp:6.7889e-01 L9_sharp:8.1531e-01 L10_sharp:9.2312e-01 L11_sharp:1.0615e+00 L12_sharp:3.2598e+00 total_fnorm:5.8750e+00 total_l1_linf:4.8000e+03 total_spectral:2.9375e+00 L1_fnorm:6.4697e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.7139e-03 L4_fnorm:6.8359e-03 L5_fnorm:6.9275e-03 L6_fnorm:6.8665e-03 L7_fnorm:6.8665e-03 L8_fnorm:6.6528e-03 L9_fnorm:6.7749e-03 L10_fnorm:6.7749e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.4087e-03 L1_l1linf:8.2016e-04 L2_l1linf:9.4223e-04 L3_l1linf:9.9182e-04 L4_l1linf:1.1063e-03 L5_l1linf:1.1139e-03 L6_l1linf:1.1520e-03 L7_l1linf:1.1292e-03 L8_l1linf:1.1826e-03 L9_l1linf:1.1673e-03 L10_l1linf:1.1444e-03 L11_l1linf:1.1520e-03 L12_l1linf:1.1749e-03 L1_spectral:1.3749e-04 L2_spectral:1.3871e-04 L3_spectral:1.3718e-04 L4_spectral:1.3977e-04 L5_spectral:1.4154e-04 L6_spectral:1.3737e-04 L7_spectral:1.3804e-04 L8_spectral:1.3427e-04 L9_spectral:1.3852e-04 L10_spectral:1.3702e-04 L11_spectral:1.3288e-04 L12_spectral:1.2420e-04 train_time:376525ms step_avg:40.93ms +[2025-09-11 10:45:51] [Rank 0] step:9201/10000 train_time:377931ms step_avg:41.07ms +[2025-09-11 10:45:51] [Rank 0] step:9201/10000 train_time:377931ms step_avg:41.07ms +[2025-09-11 10:45:52] [Rank 0] step:9221/10000 train_time:378654ms step_avg:41.06ms +[2025-09-11 10:45:52] [Rank 0] step:9221/10000 train_time:378654ms step_avg:41.06ms +[2025-09-11 10:45:53] [Rank 0] step:9241/10000 train_time:379363ms step_avg:41.05ms +[2025-09-11 10:45:53] [Rank 0] step:9241/10000 train_time:379363ms step_avg:41.05ms +[2025-09-11 10:45:53] [Rank 0] step:9261/10000 train_time:380074ms step_avg:41.04ms +[2025-09-11 10:45:53] [Rank 0] step:9261/10000 train_time:380074ms step_avg:41.04ms +[2025-09-11 10:45:54] [Rank 0] step:9281/10000 train_time:380785ms step_avg:41.03ms +[2025-09-11 10:45:54] [Rank 0] step:9281/10000 train_time:380785ms step_avg:41.03ms +[2025-09-11 10:45:55] [Rank 0] step:9301/10000 train_time:381497ms step_avg:41.02ms +[2025-09-11 10:45:55] [Rank 0] step:9301/10000 train_time:381497ms step_avg:41.02ms +[2025-09-11 10:45:56] [Rank 0] step:9321/10000 train_time:382211ms step_avg:41.01ms +[2025-09-11 10:45:56] [Rank 0] step:9321/10000 train_time:382211ms step_avg:41.01ms +[2025-09-11 10:45:56] [Rank 0] step:9341/10000 train_time:382918ms step_avg:40.99ms +[2025-09-11 10:45:56] [Rank 0] step:9341/10000 train_time:382918ms step_avg:40.99ms +[2025-09-11 10:45:57] [Rank 0] step:9361/10000 train_time:383623ms step_avg:40.98ms +[2025-09-11 10:45:57] [Rank 0] step:9361/10000 train_time:383623ms step_avg:40.98ms +[2025-09-11 10:45:58] [Rank 0] step:9381/10000 train_time:384334ms step_avg:40.97ms +[2025-09-11 10:45:58] [Rank 0] step:9381/10000 train_time:384334ms step_avg:40.97ms +[2025-09-11 10:45:58] [Rank 0] step:9401/10000 train_time:385046ms step_avg:40.96ms +[2025-09-11 10:45:58] [Rank 0] step:9401/10000 train_time:385046ms step_avg:40.96ms +[2025-09-11 10:45:59] [Rank 0] step:9421/10000 train_time:385758ms step_avg:40.95ms +[2025-09-11 10:45:59] [Rank 0] step:9421/10000 train_time:385758ms step_avg:40.95ms +[2025-09-11 10:46:00] [Rank 0] step:9441/10000 train_time:386472ms step_avg:40.94ms +[2025-09-11 10:46:00] [Rank 0] step:9441/10000 train_time:386472ms step_avg:40.94ms +[2025-09-11 10:46:00] [Rank 0] step:9461/10000 train_time:387183ms step_avg:40.92ms +[2025-09-11 10:46:00] [Rank 0] step:9461/10000 train_time:387183ms step_avg:40.92ms +[2025-09-11 10:46:01] [Rank 0] step:9481/10000 train_time:387895ms step_avg:40.91ms +[2025-09-11 10:46:01] [Rank 0] step:9481/10000 train_time:387895ms step_avg:40.91ms +[2025-09-11 10:46:02] [Rank 0] step:9501/10000 train_time:388608ms step_avg:40.90ms +[2025-09-11 10:46:02] [Rank 0] step:9501/10000 train_time:388608ms step_avg:40.90ms +[2025-09-11 10:46:03] [Rank 0] step:9521/10000 train_time:389322ms step_avg:40.89ms +[2025-09-11 10:46:03] [Rank 0] step:9521/10000 train_time:389322ms step_avg:40.89ms +[2025-09-11 10:46:03] [Rank 0] step:9541/10000 train_time:390031ms step_avg:40.88ms +[2025-09-11 10:46:03] [Rank 0] step:9541/10000 train_time:390031ms step_avg:40.88ms +[2025-09-11 10:46:04] [Rank 0] step:9561/10000 train_time:390743ms step_avg:40.87ms +[2025-09-11 10:46:04] [Rank 0] step:9561/10000 train_time:390743ms step_avg:40.87ms +[2025-09-11 10:46:05] [Rank 0] step:9581/10000 train_time:391454ms step_avg:40.86ms +[2025-09-11 10:46:05] [Rank 0] step:9581/10000 train_time:391454ms step_avg:40.86ms +[2025-09-11 10:46:05] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:46:05] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:46:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:46:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.6494 total_sharp:7.2976e-05 L1_sharp:3.6668e-02 L2_sharp:6.4389e-02 L3_sharp:1.1277e-01 L4_sharp:1.6940e-01 L5_sharp:2.4168e-01 L6_sharp:3.5484e-01 L7_sharp:4.2149e-01 L8_sharp:4.3310e-01 L9_sharp:5.0580e-01 L10_sharp:5.4700e-01 L11_sharp:5.5494e-01 L12_sharp:8.2957e-01 total_fnorm:3.4062e+00 total_l1_linf:2.4000e+03 total_spectral:1.7031e+00 L1_fnorm:3.6163e-03 L2_fnorm:3.6774e-03 L3_fnorm:3.7384e-03 L4_fnorm:3.8147e-03 L5_fnorm:3.8605e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8300e-03 L8_fnorm:3.6926e-03 L9_fnorm:3.7994e-03 L10_fnorm:3.7994e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.5400e-03 L1_l1linf:3.9482e-04 L2_l1linf:4.3488e-04 L3_l1linf:4.5013e-04 L4_l1linf:5.0735e-04 L5_l1linf:5.4550e-04 L6_l1linf:5.4550e-04 L7_l1linf:5.6839e-04 L8_l1linf:5.5695e-04 L9_l1linf:5.6076e-04 L10_l1linf:5.9509e-04 L11_l1linf:5.7983e-04 L12_l1linf:6.1035e-04 L1_spectral:7.8721e-05 L2_spectral:7.8905e-05 L3_spectral:7.8122e-05 L4_spectral:7.8736e-05 L5_spectral:8.0397e-05 L6_spectral:7.8230e-05 L7_spectral:7.8354e-05 L8_spectral:7.4299e-05 L9_spectral:7.7440e-05 L10_spectral:7.7425e-05 L11_spectral:7.5477e-05 L12_spectral:6.8604e-05 train_time:392143ms step_avg:40.85ms +[2025-09-11 10:46:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.6494 total_sharp:7.2976e-05 L1_sharp:3.6668e-02 L2_sharp:6.4389e-02 L3_sharp:1.1277e-01 L4_sharp:1.6940e-01 L5_sharp:2.4168e-01 L6_sharp:3.5484e-01 L7_sharp:4.2149e-01 L8_sharp:4.3310e-01 L9_sharp:5.0580e-01 L10_sharp:5.4700e-01 L11_sharp:5.5494e-01 L12_sharp:8.2957e-01 total_fnorm:3.4062e+00 total_l1_linf:2.4000e+03 total_spectral:1.7031e+00 L1_fnorm:3.6163e-03 L2_fnorm:3.6774e-03 L3_fnorm:3.7384e-03 L4_fnorm:3.8147e-03 L5_fnorm:3.8605e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8300e-03 L8_fnorm:3.6926e-03 L9_fnorm:3.7994e-03 L10_fnorm:3.7994e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.5400e-03 L1_l1linf:3.9482e-04 L2_l1linf:4.3488e-04 L3_l1linf:4.5013e-04 L4_l1linf:5.0735e-04 L5_l1linf:5.4550e-04 L6_l1linf:5.4550e-04 L7_l1linf:5.6839e-04 L8_l1linf:5.5695e-04 L9_l1linf:5.6076e-04 L10_l1linf:5.9509e-04 L11_l1linf:5.7983e-04 L12_l1linf:6.1035e-04 L1_spectral:7.8721e-05 L2_spectral:7.8905e-05 L3_spectral:7.8122e-05 L4_spectral:7.8736e-05 L5_spectral:8.0397e-05 L6_spectral:7.8230e-05 L7_spectral:7.8354e-05 L8_spectral:7.4299e-05 L9_spectral:7.7440e-05 L10_spectral:7.7425e-05 L11_spectral:7.5477e-05 L12_spectral:6.8604e-05 train_time:392143ms step_avg:40.85ms +[2025-09-11 10:46:17] [Rank 0] step:9601/10000 train_time:393560ms step_avg:40.99ms +[2025-09-11 10:46:17] [Rank 0] step:9601/10000 train_time:393560ms step_avg:40.99ms +[2025-09-11 10:46:18] [Rank 0] step:9621/10000 train_time:394292ms step_avg:40.98ms +[2025-09-11 10:46:18] [Rank 0] step:9621/10000 train_time:394292ms step_avg:40.98ms +[2025-09-11 10:46:19] [Rank 0] step:9641/10000 train_time:395008ms step_avg:40.97ms +[2025-09-11 10:46:19] [Rank 0] step:9641/10000 train_time:395008ms step_avg:40.97ms +[2025-09-11 10:46:19] [Rank 0] step:9661/10000 train_time:395731ms step_avg:40.96ms +[2025-09-11 10:46:19] [Rank 0] step:9661/10000 train_time:395731ms step_avg:40.96ms +[2025-09-11 10:46:20] [Rank 0] step:9681/10000 train_time:396447ms step_avg:40.95ms +[2025-09-11 10:46:20] [Rank 0] step:9681/10000 train_time:396447ms step_avg:40.95ms +[2025-09-11 10:46:21] [Rank 0] step:9701/10000 train_time:397164ms step_avg:40.94ms +[2025-09-11 10:46:21] [Rank 0] step:9701/10000 train_time:397164ms step_avg:40.94ms +[2025-09-11 10:46:22] [Rank 0] step:9721/10000 train_time:397885ms step_avg:40.93ms +[2025-09-11 10:46:22] [Rank 0] step:9721/10000 train_time:397885ms step_avg:40.93ms +[2025-09-11 10:46:22] [Rank 0] step:9741/10000 train_time:398604ms step_avg:40.92ms +[2025-09-11 10:46:22] [Rank 0] step:9741/10000 train_time:398604ms step_avg:40.92ms +[2025-09-11 10:46:23] [Rank 0] step:9761/10000 train_time:399322ms step_avg:40.91ms +[2025-09-11 10:46:23] [Rank 0] step:9761/10000 train_time:399322ms step_avg:40.91ms +[2025-09-11 10:46:24] [Rank 0] step:9781/10000 train_time:400038ms step_avg:40.90ms +[2025-09-11 10:46:24] [Rank 0] step:9781/10000 train_time:400038ms step_avg:40.90ms +[2025-09-11 10:46:25] [Rank 0] step:9801/10000 train_time:400761ms step_avg:40.89ms +[2025-09-11 10:46:25] [Rank 0] step:9801/10000 train_time:400761ms step_avg:40.89ms +[2025-09-11 10:46:25] [Rank 0] step:9821/10000 train_time:401480ms step_avg:40.88ms +[2025-09-11 10:46:25] [Rank 0] step:9821/10000 train_time:401480ms step_avg:40.88ms +[2025-09-11 10:46:26] [Rank 0] step:9841/10000 train_time:402202ms step_avg:40.87ms +[2025-09-11 10:46:26] [Rank 0] step:9841/10000 train_time:402202ms step_avg:40.87ms +[2025-09-11 10:46:27] [Rank 0] step:9861/10000 train_time:402919ms step_avg:40.86ms +[2025-09-11 10:46:27] [Rank 0] step:9861/10000 train_time:402919ms step_avg:40.86ms +[2025-09-11 10:46:27] [Rank 0] step:9881/10000 train_time:403639ms step_avg:40.85ms +[2025-09-11 10:46:27] [Rank 0] step:9881/10000 train_time:403639ms step_avg:40.85ms +[2025-09-11 10:46:28] [Rank 0] step:9901/10000 train_time:404355ms step_avg:40.84ms +[2025-09-11 10:46:28] [Rank 0] step:9901/10000 train_time:404355ms step_avg:40.84ms +[2025-09-11 10:46:29] [Rank 0] step:9921/10000 train_time:405073ms step_avg:40.83ms +[2025-09-11 10:46:29] [Rank 0] step:9921/10000 train_time:405073ms step_avg:40.83ms +[2025-09-11 10:46:30] [Rank 0] step:9941/10000 train_time:405795ms step_avg:40.82ms +[2025-09-11 10:46:30] [Rank 0] step:9941/10000 train_time:405795ms step_avg:40.82ms +[2025-09-11 10:46:30] [Rank 0] step:9961/10000 train_time:406519ms step_avg:40.81ms +[2025-09-11 10:46:30] [Rank 0] step:9961/10000 train_time:406519ms step_avg:40.81ms +[2025-09-11 10:46:31] [Rank 0] step:9981/10000 train_time:407239ms step_avg:40.80ms +[2025-09-11 10:46:31] [Rank 0] step:9981/10000 train_time:407239ms step_avg:40.80ms +[2025-09-11 10:46:32] [Rank 0] step:10000/10000 train_time:407932ms step_avg:40.79ms +[2025-09-11 10:46:32] [Rank 0] step:10000/10000 train_time:407932ms step_avg:40.79ms +[2025-09-11 10:46:32] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:46:32] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:46:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:46:42] [Rank 0] PRINT: step:10000/10000 val_loss:4.6484 total_sharp:5.0662e-05 L1_sharp:3.2867e-02 L2_sharp:5.4718e-02 L3_sharp:9.2483e-02 L4_sharp:1.5535e-01 L5_sharp:2.0731e-01 L6_sharp:2.7574e-01 L7_sharp:3.5142e-01 L8_sharp:3.5172e-01 L9_sharp:4.3452e-01 L10_sharp:4.5818e-01 L11_sharp:4.7134e-01 L12_sharp:5.8823e-01 total_fnorm:1.2734e+00 total_l1_linf:6.5200e+02 total_spectral:6.4062e-01 L1_fnorm:1.3885e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4267e-03 L4_fnorm:1.4725e-03 L5_fnorm:1.4954e-03 L6_fnorm:1.4877e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.4343e-03 L9_fnorm:1.4725e-03 L10_fnorm:1.4648e-03 L11_fnorm:1.4114e-03 L12_fnorm:1.3733e-03 L1_l1linf:1.1969e-04 L2_l1linf:1.3828e-04 L3_l1linf:1.3733e-04 L4_l1linf:1.6499e-04 L5_l1linf:1.9646e-04 L6_l1linf:1.8692e-04 L7_l1linf:1.6403e-04 L8_l1linf:1.7262e-04 L9_l1linf:1.6689e-04 L10_l1linf:1.8024e-04 L11_l1linf:1.6785e-04 L12_l1linf:1.9264e-04 L1_spectral:3.0578e-05 L2_spectral:3.1333e-05 L3_spectral:3.0516e-05 L4_spectral:3.1010e-05 L5_spectral:3.2622e-05 L6_spectral:3.0982e-05 L7_spectral:3.1679e-05 L8_spectral:3.0452e-05 L9_spectral:3.0959e-05 L10_spectral:3.1572e-05 L11_spectral:3.0082e-05 L12_spectral:2.8307e-05 train_time:407953ms step_avg:40.80ms +[2025-09-11 10:46:42] [Rank 0] PRINT: step:10000/10000 val_loss:4.6484 total_sharp:5.0662e-05 L1_sharp:3.2867e-02 L2_sharp:5.4718e-02 L3_sharp:9.2483e-02 L4_sharp:1.5535e-01 L5_sharp:2.0731e-01 L6_sharp:2.7574e-01 L7_sharp:3.5142e-01 L8_sharp:3.5172e-01 L9_sharp:4.3452e-01 L10_sharp:4.5818e-01 L11_sharp:4.7134e-01 L12_sharp:5.8823e-01 total_fnorm:1.2734e+00 total_l1_linf:6.5200e+02 total_spectral:6.4062e-01 L1_fnorm:1.3885e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4267e-03 L4_fnorm:1.4725e-03 L5_fnorm:1.4954e-03 L6_fnorm:1.4877e-03 L7_fnorm:1.4877e-03 L8_fnorm:1.4343e-03 L9_fnorm:1.4725e-03 L10_fnorm:1.4648e-03 L11_fnorm:1.4114e-03 L12_fnorm:1.3733e-03 L1_l1linf:1.1969e-04 L2_l1linf:1.3828e-04 L3_l1linf:1.3733e-04 L4_l1linf:1.6499e-04 L5_l1linf:1.9646e-04 L6_l1linf:1.8692e-04 L7_l1linf:1.6403e-04 L8_l1linf:1.7262e-04 L9_l1linf:1.6689e-04 L10_l1linf:1.8024e-04 L11_l1linf:1.6785e-04 L12_l1linf:1.9264e-04 L1_spectral:3.0578e-05 L2_spectral:3.1333e-05 L3_spectral:3.0516e-05 L4_spectral:3.1010e-05 L5_spectral:3.2622e-05 L6_spectral:3.0982e-05 L7_spectral:3.1679e-05 L8_spectral:3.0452e-05 L9_spectral:3.0959e-05 L10_spectral:3.1572e-05 L11_spectral:3.0082e-05 L12_spectral:2.8307e-05 train_time:407953ms step_avg:40.80ms +[2025-09-11 10:46:42] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:46:42 2025 --- +[2025-09-11 10:46:42] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:46:42 2025 --- +[2025-09-11 10:46:42] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:46:42] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..78a1db7361c8b677ebec7f7b26656f66e32ab743 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "047870fb-32bc-451a-af9c-c9df944d70ff", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/training_log_047870fb-32bc-451a-af9c-c9df944d70ff.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/training_log_047870fb-32bc-451a-af9c-c9df944d70ff.txt new file mode 100644 index 0000000000000000000000000000000000000000..1caf1925a27acababebb8191261e61dd0808acd7 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45/training_log_047870fb-32bc-451a-af9c-c9df944d70ff.txt @@ -0,0 +1,2056 @@ +[2025-09-12 06:51:11] [Rank 0] PRINT: --- Script Start: Fri Sep 12 06:51:11 2025 --- +[2025-09-12 06:51:11] [Rank 0] PRINT: --- Script Start: Fri Sep 12 06:51:11 2025 --- +[2025-09-12 06:51:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-12 06:51:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-12 06:51:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-12 06:51:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-12 06:51:11] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-12 06:51:11] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-12 06:51:11] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45 +[2025-09-12 06:51:11] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.0005_seed_45 +[2025-09-12 06:51:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-12 06:51:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-12 06:51:11] [Rank 0] PRINT: Constructing model... +[2025-09-12 06:51:11] [Rank 0] PRINT: Constructing model... +[2025-09-12 06:51:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-12 06:51:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-12 06:51:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-12 06:51:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-12 06:51:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-12 06:51:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-12 06:51:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-12 06:51:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-12 06:51:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-12 06:51:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-12 06:51:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-12 06:51:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-12 06:51:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-12 06:51:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-12 06:51:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-12 06:51:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-12 06:51:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-12 06:51:33] [Rank 0] PRINT: Model compilation complete. +[2025-09-12 06:51:33] [Rank 0] PRINT: Starting warmup... +[2025-09-12 06:51:33] [Rank 0] PRINT: Starting warmup... diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..581a41d2017deba238c32af3d83f7744d1576012 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "2e25a307-28c0-41a1-8cf9-0ae705dbbd7a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/training_log_2e25a307-28c0-41a1-8cf9-0ae705dbbd7a.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/training_log_2e25a307-28c0-41a1-8cf9-0ae705dbbd7a.txt new file mode 100644 index 0000000000000000000000000000000000000000..7adc3b18a5822c914fbf09890fedc4552d09b951 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42/training_log_2e25a307-28c0-41a1-8cf9-0ae705dbbd7a.txt @@ -0,0 +1,4264 @@ +[2025-09-11 11:01:00] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:01:00 2025 --- +[2025-09-11 11:01:00] [Rank 0] PRINT: --- Script Start: Thu Sep 11 11:01:00 2025 --- +[2025-09-11 11:01:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:01:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 11:01:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:01:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 11:01:00] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:01:00] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 11:01:00] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42 +[2025-09-11 11:01:00] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.001_seed_42 +[2025-09-11 11:01:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:01:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 11:01:00] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:01:00] [Rank 0] PRINT: Constructing model... +[2025-09-11 11:01:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:01:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 11:01:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:01:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 11:01:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:01:01] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 11:01:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:01:01] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 11:01:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:01:01] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 11:01:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:01:03] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 11:01:03] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:01:03] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 11:01:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:01:03] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 11:01:09] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:01:09] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 11:01:09] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:01:09] [Rank 0] PRINT: Starting warmup... +[2025-09-11 11:01:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:01:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 11:01:46] [Rank 0] PRINT: Starting training... +[2025-09-11 11:01:46] [Rank 0] PRINT: Starting training... +[2025-09-11 11:01:47] [Rank 0] step:21/10000 train_time:1137ms step_avg:54.14ms +[2025-09-11 11:01:47] [Rank 0] step:21/10000 train_time:1137ms step_avg:54.14ms +[2025-09-11 11:01:48] [Rank 0] step:41/10000 train_time:1869ms step_avg:45.59ms +[2025-09-11 11:01:48] [Rank 0] step:41/10000 train_time:1869ms step_avg:45.59ms +[2025-09-11 11:01:49] [Rank 0] step:61/10000 train_time:2601ms step_avg:42.64ms +[2025-09-11 11:01:49] [Rank 0] step:61/10000 train_time:2601ms step_avg:42.64ms +[2025-09-11 11:01:50] [Rank 0] step:81/10000 train_time:3332ms step_avg:41.14ms +[2025-09-11 11:01:50] [Rank 0] step:81/10000 train_time:3332ms step_avg:41.14ms +[2025-09-11 11:01:50] [Rank 0] step:101/10000 train_time:4063ms step_avg:40.23ms +[2025-09-11 11:01:50] [Rank 0] step:101/10000 train_time:4063ms step_avg:40.23ms +[2025-09-11 11:01:51] [Rank 0] step:121/10000 train_time:4796ms step_avg:39.63ms +[2025-09-11 11:01:51] [Rank 0] step:121/10000 train_time:4796ms step_avg:39.63ms +[2025-09-11 11:01:52] [Rank 0] step:141/10000 train_time:5527ms step_avg:39.20ms +[2025-09-11 11:01:52] [Rank 0] step:141/10000 train_time:5527ms step_avg:39.20ms +[2025-09-11 11:01:53] [Rank 0] step:161/10000 train_time:6258ms step_avg:38.87ms +[2025-09-11 11:01:53] [Rank 0] step:161/10000 train_time:6258ms step_avg:38.87ms +[2025-09-11 11:01:53] [Rank 0] step:181/10000 train_time:6989ms step_avg:38.62ms +[2025-09-11 11:01:53] [Rank 0] step:181/10000 train_time:6989ms step_avg:38.62ms +[2025-09-11 11:01:54] [Rank 0] step:201/10000 train_time:7722ms step_avg:38.42ms +[2025-09-11 11:01:54] [Rank 0] step:201/10000 train_time:7722ms step_avg:38.42ms +[2025-09-11 11:01:55] [Rank 0] step:221/10000 train_time:8454ms step_avg:38.25ms +[2025-09-11 11:01:55] [Rank 0] step:221/10000 train_time:8454ms step_avg:38.25ms +[2025-09-11 11:01:55] [Rank 0] step:241/10000 train_time:9184ms step_avg:38.11ms +[2025-09-11 11:01:55] [Rank 0] step:241/10000 train_time:9184ms step_avg:38.11ms +[2025-09-11 11:01:56] [Rank 0] step:261/10000 train_time:9915ms step_avg:37.99ms +[2025-09-11 11:01:56] [Rank 0] step:261/10000 train_time:9915ms step_avg:37.99ms +[2025-09-11 11:01:57] [Rank 0] step:281/10000 train_time:10646ms step_avg:37.89ms +[2025-09-11 11:01:57] [Rank 0] step:281/10000 train_time:10646ms step_avg:37.89ms +[2025-09-11 11:01:58] [Rank 0] step:301/10000 train_time:11378ms step_avg:37.80ms +[2025-09-11 11:01:58] [Rank 0] step:301/10000 train_time:11378ms step_avg:37.80ms +[2025-09-11 11:01:58] [Rank 0] step:321/10000 train_time:12110ms step_avg:37.72ms +[2025-09-11 11:01:58] [Rank 0] step:321/10000 train_time:12110ms step_avg:37.72ms +[2025-09-11 11:01:59] [Rank 0] step:341/10000 train_time:12841ms step_avg:37.66ms +[2025-09-11 11:01:59] [Rank 0] step:341/10000 train_time:12841ms step_avg:37.66ms +[2025-09-11 11:02:00] [Rank 0] step:361/10000 train_time:13572ms step_avg:37.60ms +[2025-09-11 11:02:00] [Rank 0] step:361/10000 train_time:13572ms step_avg:37.60ms +[2025-09-11 11:02:01] [Rank 0] step:381/10000 train_time:14303ms step_avg:37.54ms +[2025-09-11 11:02:01] [Rank 0] step:381/10000 train_time:14303ms step_avg:37.54ms +[2025-09-11 11:02:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:02:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 11:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:02:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 11:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:02:47] [Rank 0] PRINT: step:400/10000 val_loss:6.2956 total_sharp:8.8069e-04 L1_sharp:1.3419e-01 L2_sharp:1.3714e-01 L3_sharp:1.2786e-01 L4_sharp:1.6671e-01 L5_sharp:1.6666e-01 L6_sharp:1.5652e-01 L7_sharp:1.7546e-01 L8_sharp:1.9126e-01 L9_sharp:2.0266e-01 L10_sharp:3.5327e-01 L11_sharp:4.2865e-01 L12_sharp:5.2863e-01 total_fnorm:4.0028e+01 total_l1_linf:9.5207e+04 total_spectral:2.0018e+01 L1_fnorm:1.1922e-01 L2_fnorm:1.1901e-01 L3_fnorm:1.1850e-01 L4_fnorm:1.1761e-01 L5_fnorm:1.1706e-01 L6_fnorm:1.1665e-01 L7_fnorm:1.1672e-01 L8_fnorm:1.1552e-01 L9_fnorm:1.1529e-01 L10_fnorm:1.1444e-01 L11_fnorm:1.1392e-01 L12_fnorm:1.1309e-01 L1_l1linf:4.4601e-02 L2_l1linf:4.4886e-02 L3_l1linf:4.4492e-02 L4_l1linf:4.4937e-02 L5_l1linf:4.4573e-02 L6_l1linf:4.4099e-02 L7_l1linf:4.3811e-02 L8_l1linf:4.3946e-02 L9_l1linf:4.3654e-02 L10_l1linf:4.3654e-02 L11_l1linf:4.3254e-02 L12_l1linf:4.2719e-02 L1_spectral:1.2050e-03 L2_spectral:1.2049e-03 L3_spectral:1.2051e-03 L4_spectral:1.2047e-03 L5_spectral:1.2047e-03 L6_spectral:1.2052e-03 L7_spectral:1.2050e-03 L8_spectral:1.2067e-03 L9_spectral:1.2049e-03 L10_spectral:1.2054e-03 L11_spectral:1.2054e-03 L12_spectral:1.2048e-03 train_time:15013ms step_avg:37.53ms +[2025-09-11 11:02:47] [Rank 0] PRINT: step:400/10000 val_loss:6.2956 total_sharp:8.8069e-04 L1_sharp:1.3419e-01 L2_sharp:1.3714e-01 L3_sharp:1.2786e-01 L4_sharp:1.6671e-01 L5_sharp:1.6666e-01 L6_sharp:1.5652e-01 L7_sharp:1.7546e-01 L8_sharp:1.9126e-01 L9_sharp:2.0266e-01 L10_sharp:3.5327e-01 L11_sharp:4.2865e-01 L12_sharp:5.2863e-01 total_fnorm:4.0028e+01 total_l1_linf:9.5207e+04 total_spectral:2.0018e+01 L1_fnorm:1.1922e-01 L2_fnorm:1.1901e-01 L3_fnorm:1.1850e-01 L4_fnorm:1.1761e-01 L5_fnorm:1.1706e-01 L6_fnorm:1.1665e-01 L7_fnorm:1.1672e-01 L8_fnorm:1.1552e-01 L9_fnorm:1.1529e-01 L10_fnorm:1.1444e-01 L11_fnorm:1.1392e-01 L12_fnorm:1.1309e-01 L1_l1linf:4.4601e-02 L2_l1linf:4.4886e-02 L3_l1linf:4.4492e-02 L4_l1linf:4.4937e-02 L5_l1linf:4.4573e-02 L6_l1linf:4.4099e-02 L7_l1linf:4.3811e-02 L8_l1linf:4.3946e-02 L9_l1linf:4.3654e-02 L10_l1linf:4.3654e-02 L11_l1linf:4.3254e-02 L12_l1linf:4.2719e-02 L1_spectral:1.2050e-03 L2_spectral:1.2049e-03 L3_spectral:1.2051e-03 L4_spectral:1.2047e-03 L5_spectral:1.2047e-03 L6_spectral:1.2052e-03 L7_spectral:1.2050e-03 L8_spectral:1.2067e-03 L9_spectral:1.2049e-03 L10_spectral:1.2054e-03 L11_spectral:1.2054e-03 L12_spectral:1.2048e-03 train_time:15013ms step_avg:37.53ms +[2025-09-11 11:03:17] [Rank 0] step:401/10000 train_time:45459ms step_avg:113.36ms +[2025-09-11 11:03:17] [Rank 0] step:401/10000 train_time:45459ms step_avg:113.36ms +[2025-09-11 11:03:19] [Rank 0] step:421/10000 train_time:47469ms step_avg:112.75ms +[2025-09-11 11:03:19] [Rank 0] step:421/10000 train_time:47469ms step_avg:112.75ms +[2025-09-11 11:03:20] [Rank 0] step:441/10000 train_time:48113ms step_avg:109.10ms +[2025-09-11 11:03:20] [Rank 0] step:441/10000 train_time:48113ms step_avg:109.10ms +[2025-09-11 11:03:21] [Rank 0] step:461/10000 train_time:48758ms step_avg:105.77ms +[2025-09-11 11:03:21] [Rank 0] step:461/10000 train_time:48758ms step_avg:105.77ms +[2025-09-11 11:03:21] [Rank 0] step:481/10000 train_time:49399ms step_avg:102.70ms +[2025-09-11 11:03:21] [Rank 0] step:481/10000 train_time:49399ms step_avg:102.70ms +[2025-09-11 11:03:22] [Rank 0] step:501/10000 train_time:50043ms step_avg:99.89ms +[2025-09-11 11:03:22] [Rank 0] step:501/10000 train_time:50043ms step_avg:99.89ms +[2025-09-11 11:03:22] [Rank 0] step:521/10000 train_time:50686ms step_avg:97.29ms +[2025-09-11 11:03:22] [Rank 0] step:521/10000 train_time:50686ms step_avg:97.29ms +[2025-09-11 11:03:23] [Rank 0] step:541/10000 train_time:51330ms step_avg:94.88ms +[2025-09-11 11:03:23] [Rank 0] step:541/10000 train_time:51330ms step_avg:94.88ms +[2025-09-11 11:03:24] [Rank 0] step:561/10000 train_time:51973ms step_avg:92.64ms +[2025-09-11 11:03:24] [Rank 0] step:561/10000 train_time:51973ms step_avg:92.64ms +[2025-09-11 11:03:24] [Rank 0] step:581/10000 train_time:52615ms step_avg:90.56ms +[2025-09-11 11:03:24] [Rank 0] step:581/10000 train_time:52615ms step_avg:90.56ms +[2025-09-11 11:03:25] [Rank 0] step:601/10000 train_time:53258ms step_avg:88.62ms +[2025-09-11 11:03:25] [Rank 0] step:601/10000 train_time:53258ms step_avg:88.62ms +[2025-09-11 11:03:26] [Rank 0] step:621/10000 train_time:53901ms step_avg:86.80ms +[2025-09-11 11:03:26] [Rank 0] step:621/10000 train_time:53901ms step_avg:86.80ms +[2025-09-11 11:03:26] [Rank 0] step:641/10000 train_time:54543ms step_avg:85.09ms +[2025-09-11 11:03:26] [Rank 0] step:641/10000 train_time:54543ms step_avg:85.09ms +[2025-09-11 11:03:27] [Rank 0] step:661/10000 train_time:55186ms step_avg:83.49ms +[2025-09-11 11:03:27] [Rank 0] step:661/10000 train_time:55186ms step_avg:83.49ms +[2025-09-11 11:03:28] [Rank 0] step:681/10000 train_time:55829ms step_avg:81.98ms +[2025-09-11 11:03:28] [Rank 0] step:681/10000 train_time:55829ms step_avg:81.98ms +[2025-09-11 11:03:28] [Rank 0] step:701/10000 train_time:56471ms step_avg:80.56ms +[2025-09-11 11:03:28] [Rank 0] step:701/10000 train_time:56471ms step_avg:80.56ms +[2025-09-11 11:03:29] [Rank 0] step:721/10000 train_time:57114ms step_avg:79.21ms +[2025-09-11 11:03:29] [Rank 0] step:721/10000 train_time:57114ms step_avg:79.21ms +[2025-09-11 11:03:30] [Rank 0] step:741/10000 train_time:57756ms step_avg:77.94ms +[2025-09-11 11:03:30] [Rank 0] step:741/10000 train_time:57756ms step_avg:77.94ms +[2025-09-11 11:03:30] [Rank 0] step:761/10000 train_time:58404ms step_avg:76.75ms +[2025-09-11 11:03:30] [Rank 0] step:761/10000 train_time:58404ms step_avg:76.75ms +[2025-09-11 11:03:31] [Rank 0] step:781/10000 train_time:59054ms step_avg:75.61ms +[2025-09-11 11:03:31] [Rank 0] step:781/10000 train_time:59054ms step_avg:75.61ms +[2025-09-11 11:03:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:03:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 11:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 11:04:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:04:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 11:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 11:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:17] [Rank 0] PRINT: step:800/10000 val_loss:5.8145 total_sharp:9.7125e-04 L1_sharp:2.8050e-01 L2_sharp:2.5837e-01 L3_sharp:2.7780e-01 L4_sharp:2.9362e-01 L5_sharp:3.2968e-01 L6_sharp:3.1730e-01 L7_sharp:3.6152e-01 L8_sharp:4.8205e-01 L9_sharp:6.3282e-01 L10_sharp:9.8074e-01 L11_sharp:8.6644e-01 L12_sharp:8.5040e-01 total_fnorm:3.8500e+01 total_l1_linf:7.2192e+04 total_spectral:1.9250e+01 L1_fnorm:1.0840e-01 L2_fnorm:1.1328e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1377e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.0938e-01 L9_fnorm:1.1182e-01 L10_fnorm:1.0938e-01 L11_fnorm:1.0596e-01 L12_fnorm:9.8633e-02 L1_l1linf:4.1748e-02 L2_l1linf:4.2480e-02 L3_l1linf:4.1992e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.1748e-02 L6_l1linf:4.1748e-02 L7_l1linf:4.1504e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1504e-02 L10_l1linf:4.0283e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.6865e-02 L1_spectral:1.5736e-03 L2_spectral:1.6110e-03 L3_spectral:1.6071e-03 L4_spectral:1.6082e-03 L5_spectral:1.5979e-03 L6_spectral:1.5935e-03 L7_spectral:1.5809e-03 L8_spectral:1.5692e-03 L9_spectral:1.5789e-03 L10_spectral:1.5806e-03 L11_spectral:1.5740e-03 L12_spectral:1.5350e-03 train_time:59683ms step_avg:74.60ms +[2025-09-11 11:04:17] [Rank 0] PRINT: step:800/10000 val_loss:5.8145 total_sharp:9.7125e-04 L1_sharp:2.8050e-01 L2_sharp:2.5837e-01 L3_sharp:2.7780e-01 L4_sharp:2.9362e-01 L5_sharp:3.2968e-01 L6_sharp:3.1730e-01 L7_sharp:3.6152e-01 L8_sharp:4.8205e-01 L9_sharp:6.3282e-01 L10_sharp:9.8074e-01 L11_sharp:8.6644e-01 L12_sharp:8.5040e-01 total_fnorm:3.8500e+01 total_l1_linf:7.2192e+04 total_spectral:1.9250e+01 L1_fnorm:1.0840e-01 L2_fnorm:1.1328e-01 L3_fnorm:1.1328e-01 L4_fnorm:1.1377e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.0938e-01 L9_fnorm:1.1182e-01 L10_fnorm:1.0938e-01 L11_fnorm:1.0596e-01 L12_fnorm:9.8633e-02 L1_l1linf:4.1748e-02 L2_l1linf:4.2480e-02 L3_l1linf:4.1992e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.1748e-02 L6_l1linf:4.1748e-02 L7_l1linf:4.1504e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1504e-02 L10_l1linf:4.0283e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.6865e-02 L1_spectral:1.5736e-03 L2_spectral:1.6110e-03 L3_spectral:1.6071e-03 L4_spectral:1.6082e-03 L5_spectral:1.5979e-03 L6_spectral:1.5935e-03 L7_spectral:1.5809e-03 L8_spectral:1.5692e-03 L9_spectral:1.5789e-03 L10_spectral:1.5806e-03 L11_spectral:1.5740e-03 L12_spectral:1.5350e-03 train_time:59683ms step_avg:74.60ms +[2025-09-11 11:04:18] [Rank 0] step:801/10000 train_time:60916ms step_avg:76.05ms +[2025-09-11 11:04:18] [Rank 0] step:801/10000 train_time:60916ms step_avg:76.05ms +[2025-09-11 11:04:19] [Rank 0] step:821/10000 train_time:61595ms step_avg:75.02ms +[2025-09-11 11:04:19] [Rank 0] step:821/10000 train_time:61595ms step_avg:75.02ms +[2025-09-11 11:04:19] [Rank 0] step:841/10000 train_time:62243ms step_avg:74.01ms +[2025-09-11 11:04:19] [Rank 0] step:841/10000 train_time:62243ms step_avg:74.01ms +[2025-09-11 11:04:20] [Rank 0] step:861/10000 train_time:62891ms step_avg:73.04ms +[2025-09-11 11:04:20] [Rank 0] step:861/10000 train_time:62891ms step_avg:73.04ms +[2025-09-11 11:04:21] [Rank 0] step:881/10000 train_time:63540ms step_avg:72.12ms +[2025-09-11 11:04:21] [Rank 0] step:881/10000 train_time:63540ms step_avg:72.12ms +[2025-09-11 11:04:21] [Rank 0] step:901/10000 train_time:64187ms step_avg:71.24ms +[2025-09-11 11:04:21] [Rank 0] step:901/10000 train_time:64187ms step_avg:71.24ms +[2025-09-11 11:04:22] [Rank 0] step:921/10000 train_time:64835ms step_avg:70.40ms +[2025-09-11 11:04:22] [Rank 0] step:921/10000 train_time:64835ms step_avg:70.40ms +[2025-09-11 11:04:23] [Rank 0] step:941/10000 train_time:65483ms step_avg:69.59ms +[2025-09-11 11:04:23] [Rank 0] step:941/10000 train_time:65483ms step_avg:69.59ms +[2025-09-11 11:04:23] [Rank 0] step:961/10000 train_time:66130ms step_avg:68.81ms +[2025-09-11 11:04:23] [Rank 0] step:961/10000 train_time:66130ms step_avg:68.81ms +[2025-09-11 11:04:24] [Rank 0] step:981/10000 train_time:66778ms step_avg:68.07ms +[2025-09-11 11:04:24] [Rank 0] step:981/10000 train_time:66778ms step_avg:68.07ms +[2025-09-11 11:04:24] [Rank 0] step:1001/10000 train_time:67426ms step_avg:67.36ms +[2025-09-11 11:04:24] [Rank 0] step:1001/10000 train_time:67426ms step_avg:67.36ms +[2025-09-11 11:04:25] [Rank 0] step:1021/10000 train_time:68073ms step_avg:66.67ms +[2025-09-11 11:04:25] [Rank 0] step:1021/10000 train_time:68073ms step_avg:66.67ms +[2025-09-11 11:04:26] [Rank 0] step:1041/10000 train_time:68721ms step_avg:66.01ms +[2025-09-11 11:04:26] [Rank 0] step:1041/10000 train_time:68721ms step_avg:66.01ms +[2025-09-11 11:04:26] [Rank 0] step:1061/10000 train_time:69370ms step_avg:65.38ms +[2025-09-11 11:04:26] [Rank 0] step:1061/10000 train_time:69370ms step_avg:65.38ms +[2025-09-11 11:04:27] [Rank 0] step:1081/10000 train_time:70020ms step_avg:64.77ms +[2025-09-11 11:04:27] [Rank 0] step:1081/10000 train_time:70020ms step_avg:64.77ms +[2025-09-11 11:04:28] [Rank 0] step:1101/10000 train_time:70668ms step_avg:64.19ms +[2025-09-11 11:04:28] [Rank 0] step:1101/10000 train_time:70668ms step_avg:64.19ms +[2025-09-11 11:04:28] [Rank 0] step:1121/10000 train_time:71316ms step_avg:63.62ms +[2025-09-11 11:04:28] [Rank 0] step:1121/10000 train_time:71316ms step_avg:63.62ms +[2025-09-11 11:04:29] [Rank 0] step:1141/10000 train_time:71963ms step_avg:63.07ms +[2025-09-11 11:04:29] [Rank 0] step:1141/10000 train_time:71963ms step_avg:63.07ms +[2025-09-11 11:04:30] [Rank 0] step:1161/10000 train_time:72614ms step_avg:62.54ms +[2025-09-11 11:04:30] [Rank 0] step:1161/10000 train_time:72614ms step_avg:62.54ms +[2025-09-11 11:04:30] [Rank 0] step:1181/10000 train_time:73261ms step_avg:62.03ms +[2025-09-11 11:04:30] [Rank 0] step:1181/10000 train_time:73261ms step_avg:62.03ms +[2025-09-11 11:04:31] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:04:31] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 11:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 11:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 11:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 11:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:04:41] [Rank 0] PRINT: step:1200/10000 val_loss:5.5158 total_sharp:6.9914e-04 L1_sharp:2.1955e-01 L2_sharp:2.2268e-01 L3_sharp:2.3700e-01 L4_sharp:2.4368e-01 L5_sharp:2.9262e-01 L6_sharp:2.5705e-01 L7_sharp:2.4118e-01 L8_sharp:2.3072e-01 L9_sharp:2.2500e-01 L10_sharp:2.7804e-01 L11_sharp:3.2216e-01 L12_sharp:1.1322e+00 total_fnorm:3.9000e+01 total_l1_linf:6.9120e+04 total_spectral:1.9375e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1230e-01 L1_l1linf:4.1016e-02 L2_l1linf:4.0771e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.0039e-02 L7_l1linf:4.0039e-02 L8_l1linf:4.0283e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.0771e-02 L12_l1linf:4.0039e-02 L1_spectral:1.6107e-03 L2_spectral:1.6006e-03 L3_spectral:1.6031e-03 L4_spectral:1.5969e-03 L5_spectral:1.5965e-03 L6_spectral:1.5919e-03 L7_spectral:1.6121e-03 L8_spectral:1.5917e-03 L9_spectral:1.6104e-03 L10_spectral:1.5905e-03 L11_spectral:1.5965e-03 L12_spectral:1.5841e-03 train_time:73890ms step_avg:61.58ms +[2025-09-11 11:04:41] [Rank 0] PRINT: step:1200/10000 val_loss:5.5158 total_sharp:6.9914e-04 L1_sharp:2.1955e-01 L2_sharp:2.2268e-01 L3_sharp:2.3700e-01 L4_sharp:2.4368e-01 L5_sharp:2.9262e-01 L6_sharp:2.5705e-01 L7_sharp:2.4118e-01 L8_sharp:2.3072e-01 L9_sharp:2.2500e-01 L10_sharp:2.7804e-01 L11_sharp:3.2216e-01 L12_sharp:1.1322e+00 total_fnorm:3.9000e+01 total_l1_linf:6.9120e+04 total_spectral:1.9375e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1230e-01 L1_l1linf:4.1016e-02 L2_l1linf:4.0771e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.0039e-02 L7_l1linf:4.0039e-02 L8_l1linf:4.0283e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.0771e-02 L12_l1linf:4.0039e-02 L1_spectral:1.6107e-03 L2_spectral:1.6006e-03 L3_spectral:1.6031e-03 L4_spectral:1.5969e-03 L5_spectral:1.5965e-03 L6_spectral:1.5919e-03 L7_spectral:1.6121e-03 L8_spectral:1.5917e-03 L9_spectral:1.6104e-03 L10_spectral:1.5905e-03 L11_spectral:1.5965e-03 L12_spectral:1.5841e-03 train_time:73890ms step_avg:61.58ms +[2025-09-11 11:04:42] [Rank 0] step:1201/10000 train_time:75125ms step_avg:62.55ms +[2025-09-11 11:04:42] [Rank 0] step:1201/10000 train_time:75125ms step_avg:62.55ms +[2025-09-11 11:04:43] [Rank 0] step:1221/10000 train_time:76104ms step_avg:62.33ms +[2025-09-11 11:04:43] [Rank 0] step:1221/10000 train_time:76104ms step_avg:62.33ms +[2025-09-11 11:04:44] [Rank 0] step:1241/10000 train_time:76753ms step_avg:61.85ms +[2025-09-11 11:04:44] [Rank 0] step:1241/10000 train_time:76753ms step_avg:61.85ms +[2025-09-11 11:04:44] [Rank 0] step:1261/10000 train_time:77401ms step_avg:61.38ms +[2025-09-11 11:04:44] [Rank 0] step:1261/10000 train_time:77401ms step_avg:61.38ms +[2025-09-11 11:04:45] [Rank 0] step:1281/10000 train_time:78050ms step_avg:60.93ms +[2025-09-11 11:04:45] [Rank 0] step:1281/10000 train_time:78050ms step_avg:60.93ms +[2025-09-11 11:04:45] [Rank 0] step:1301/10000 train_time:78697ms step_avg:60.49ms +[2025-09-11 11:04:45] [Rank 0] step:1301/10000 train_time:78697ms step_avg:60.49ms +[2025-09-11 11:04:46] [Rank 0] step:1321/10000 train_time:79345ms step_avg:60.06ms +[2025-09-11 11:04:46] [Rank 0] step:1321/10000 train_time:79345ms step_avg:60.06ms +[2025-09-11 11:04:47] [Rank 0] step:1341/10000 train_time:79993ms step_avg:59.65ms +[2025-09-11 11:04:47] [Rank 0] step:1341/10000 train_time:79993ms step_avg:59.65ms +[2025-09-11 11:04:47] [Rank 0] step:1361/10000 train_time:80641ms step_avg:59.25ms +[2025-09-11 11:04:47] [Rank 0] step:1361/10000 train_time:80641ms step_avg:59.25ms +[2025-09-11 11:04:48] [Rank 0] step:1381/10000 train_time:81290ms step_avg:58.86ms +[2025-09-11 11:04:48] [Rank 0] step:1381/10000 train_time:81290ms step_avg:58.86ms +[2025-09-11 11:04:49] [Rank 0] step:1401/10000 train_time:81938ms step_avg:58.49ms +[2025-09-11 11:04:49] [Rank 0] step:1401/10000 train_time:81938ms step_avg:58.49ms +[2025-09-11 11:04:49] [Rank 0] step:1421/10000 train_time:82585ms step_avg:58.12ms +[2025-09-11 11:04:49] [Rank 0] step:1421/10000 train_time:82585ms step_avg:58.12ms +[2025-09-11 11:04:50] [Rank 0] step:1441/10000 train_time:83233ms step_avg:57.76ms +[2025-09-11 11:04:50] [Rank 0] step:1441/10000 train_time:83233ms step_avg:57.76ms +[2025-09-11 11:04:51] [Rank 0] step:1461/10000 train_time:83880ms step_avg:57.41ms +[2025-09-11 11:04:51] [Rank 0] step:1461/10000 train_time:83880ms step_avg:57.41ms +[2025-09-11 11:04:51] [Rank 0] step:1481/10000 train_time:84527ms step_avg:57.07ms +[2025-09-11 11:04:51] [Rank 0] step:1481/10000 train_time:84527ms step_avg:57.07ms +[2025-09-11 11:04:52] [Rank 0] step:1501/10000 train_time:85179ms step_avg:56.75ms +[2025-09-11 11:04:52] [Rank 0] step:1501/10000 train_time:85179ms step_avg:56.75ms +[2025-09-11 11:04:53] [Rank 0] step:1521/10000 train_time:85831ms step_avg:56.43ms +[2025-09-11 11:04:53] [Rank 0] step:1521/10000 train_time:85831ms step_avg:56.43ms +[2025-09-11 11:04:53] [Rank 0] step:1541/10000 train_time:86483ms step_avg:56.12ms +[2025-09-11 11:04:53] [Rank 0] step:1541/10000 train_time:86483ms step_avg:56.12ms +[2025-09-11 11:04:54] [Rank 0] step:1561/10000 train_time:87134ms step_avg:55.82ms +[2025-09-11 11:04:54] [Rank 0] step:1561/10000 train_time:87134ms step_avg:55.82ms +[2025-09-11 11:04:55] [Rank 0] step:1581/10000 train_time:87787ms step_avg:55.53ms +[2025-09-11 11:04:55] [Rank 0] step:1581/10000 train_time:87787ms step_avg:55.53ms +[2025-09-11 11:04:55] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:04:55] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 11:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:04:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 11:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:05:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:05:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:05] [Rank 0] PRINT: step:1600/10000 val_loss:5.3447 total_sharp:6.1669e-04 L1_sharp:1.3418e-01 L2_sharp:1.2356e-01 L3_sharp:1.5758e-01 L4_sharp:1.7420e-01 L5_sharp:2.0430e-01 L6_sharp:1.8961e-01 L7_sharp:1.7839e-01 L8_sharp:1.9866e-01 L9_sharp:2.2526e-01 L10_sharp:2.9182e-01 L11_sharp:4.0245e-01 L12_sharp:1.1498e+00 total_fnorm:3.7250e+01 total_l1_linf:6.2976e+04 total_spectral:1.8625e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.8574e-02 L2_l1linf:3.7842e-02 L3_l1linf:3.8330e-02 L4_l1linf:3.7842e-02 L5_l1linf:3.7598e-02 L6_l1linf:3.7598e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.7598e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9307e-02 L12_l1linf:3.8330e-02 L1_spectral:1.6058e-03 L2_spectral:1.6064e-03 L3_spectral:1.6147e-03 L4_spectral:1.5974e-03 L5_spectral:1.6061e-03 L6_spectral:1.5979e-03 L7_spectral:1.6066e-03 L8_spectral:1.6085e-03 L9_spectral:1.5981e-03 L10_spectral:1.6012e-03 L11_spectral:1.6084e-03 L12_spectral:1.6128e-03 train_time:88420ms step_avg:55.26ms +[2025-09-11 11:05:05] [Rank 0] PRINT: step:1600/10000 val_loss:5.3447 total_sharp:6.1669e-04 L1_sharp:1.3418e-01 L2_sharp:1.2356e-01 L3_sharp:1.5758e-01 L4_sharp:1.7420e-01 L5_sharp:2.0430e-01 L6_sharp:1.8961e-01 L7_sharp:1.7839e-01 L8_sharp:1.9866e-01 L9_sharp:2.2526e-01 L10_sharp:2.9182e-01 L11_sharp:4.0245e-01 L12_sharp:1.1498e+00 total_fnorm:3.7250e+01 total_l1_linf:6.2976e+04 total_spectral:1.8625e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1426e-01 L1_l1linf:3.8574e-02 L2_l1linf:3.7842e-02 L3_l1linf:3.8330e-02 L4_l1linf:3.7842e-02 L5_l1linf:3.7598e-02 L6_l1linf:3.7598e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.7598e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9307e-02 L12_l1linf:3.8330e-02 L1_spectral:1.6058e-03 L2_spectral:1.6064e-03 L3_spectral:1.6147e-03 L4_spectral:1.5974e-03 L5_spectral:1.6061e-03 L6_spectral:1.5979e-03 L7_spectral:1.6066e-03 L8_spectral:1.6085e-03 L9_spectral:1.5981e-03 L10_spectral:1.6012e-03 L11_spectral:1.6084e-03 L12_spectral:1.6128e-03 train_time:88420ms step_avg:55.26ms +[2025-09-11 11:05:06] [Rank 0] step:1601/10000 train_time:89669ms step_avg:56.01ms +[2025-09-11 11:05:06] [Rank 0] step:1601/10000 train_time:89669ms step_avg:56.01ms +[2025-09-11 11:05:07] [Rank 0] step:1621/10000 train_time:90324ms step_avg:55.72ms +[2025-09-11 11:05:07] [Rank 0] step:1621/10000 train_time:90324ms step_avg:55.72ms +[2025-09-11 11:05:07] [Rank 0] step:1641/10000 train_time:90976ms step_avg:55.44ms +[2025-09-11 11:05:07] [Rank 0] step:1641/10000 train_time:90976ms step_avg:55.44ms +[2025-09-11 11:05:08] [Rank 0] step:1661/10000 train_time:91628ms step_avg:55.16ms +[2025-09-11 11:05:08] [Rank 0] step:1661/10000 train_time:91628ms step_avg:55.16ms +[2025-09-11 11:05:09] [Rank 0] step:1681/10000 train_time:92281ms step_avg:54.90ms +[2025-09-11 11:05:09] [Rank 0] step:1681/10000 train_time:92281ms step_avg:54.90ms +[2025-09-11 11:05:09] [Rank 0] step:1701/10000 train_time:92933ms step_avg:54.63ms +[2025-09-11 11:05:09] [Rank 0] step:1701/10000 train_time:92933ms step_avg:54.63ms +[2025-09-11 11:05:10] [Rank 0] step:1721/10000 train_time:93585ms step_avg:54.38ms +[2025-09-11 11:05:10] [Rank 0] step:1721/10000 train_time:93585ms step_avg:54.38ms +[2025-09-11 11:05:11] [Rank 0] step:1741/10000 train_time:94236ms step_avg:54.13ms +[2025-09-11 11:05:11] [Rank 0] step:1741/10000 train_time:94236ms step_avg:54.13ms +[2025-09-11 11:05:11] [Rank 0] step:1761/10000 train_time:94887ms step_avg:53.88ms +[2025-09-11 11:05:11] [Rank 0] step:1761/10000 train_time:94887ms step_avg:53.88ms +[2025-09-11 11:05:12] [Rank 0] step:1781/10000 train_time:95539ms step_avg:53.64ms +[2025-09-11 11:05:12] [Rank 0] step:1781/10000 train_time:95539ms step_avg:53.64ms +[2025-09-11 11:05:13] [Rank 0] step:1801/10000 train_time:96190ms step_avg:53.41ms +[2025-09-11 11:05:13] [Rank 0] step:1801/10000 train_time:96190ms step_avg:53.41ms +[2025-09-11 11:05:13] [Rank 0] step:1821/10000 train_time:96842ms step_avg:53.18ms +[2025-09-11 11:05:13] [Rank 0] step:1821/10000 train_time:96842ms step_avg:53.18ms +[2025-09-11 11:05:14] [Rank 0] step:1841/10000 train_time:97493ms step_avg:52.96ms +[2025-09-11 11:05:14] [Rank 0] step:1841/10000 train_time:97493ms step_avg:52.96ms +[2025-09-11 11:05:15] [Rank 0] step:1861/10000 train_time:98145ms step_avg:52.74ms +[2025-09-11 11:05:15] [Rank 0] step:1861/10000 train_time:98145ms step_avg:52.74ms +[2025-09-11 11:05:15] [Rank 0] step:1881/10000 train_time:98796ms step_avg:52.52ms +[2025-09-11 11:05:15] [Rank 0] step:1881/10000 train_time:98796ms step_avg:52.52ms +[2025-09-11 11:05:16] [Rank 0] step:1901/10000 train_time:99448ms step_avg:52.31ms +[2025-09-11 11:05:16] [Rank 0] step:1901/10000 train_time:99448ms step_avg:52.31ms +[2025-09-11 11:05:17] [Rank 0] step:1921/10000 train_time:100102ms step_avg:52.11ms +[2025-09-11 11:05:17] [Rank 0] step:1921/10000 train_time:100102ms step_avg:52.11ms +[2025-09-11 11:05:17] [Rank 0] step:1941/10000 train_time:100753ms step_avg:51.91ms +[2025-09-11 11:05:17] [Rank 0] step:1941/10000 train_time:100753ms step_avg:51.91ms +[2025-09-11 11:05:18] [Rank 0] step:1961/10000 train_time:101404ms step_avg:51.71ms +[2025-09-11 11:05:18] [Rank 0] step:1961/10000 train_time:101404ms step_avg:51.71ms +[2025-09-11 11:05:19] [Rank 0] step:1981/10000 train_time:102055ms step_avg:51.52ms +[2025-09-11 11:05:19] [Rank 0] step:1981/10000 train_time:102055ms step_avg:51.52ms +[2025-09-11 11:05:19] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:05:19] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 11:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 11:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:05:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:05:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 11:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:29] [Rank 0] PRINT: step:2000/10000 val_loss:5.2007 total_sharp:6.2425e-04 L1_sharp:6.4192e-02 L2_sharp:7.6115e-02 L3_sharp:8.4880e-02 L4_sharp:1.0436e-01 L5_sharp:1.3945e-01 L6_sharp:1.5790e-01 L7_sharp:1.7505e-01 L8_sharp:2.3901e-01 L9_sharp:2.7050e-01 L10_sharp:4.0170e-01 L11_sharp:1.1380e+00 L12_sharp:3.1840e+00 total_fnorm:3.6750e+01 total_l1_linf:6.3232e+04 total_spectral:1.8375e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.6133e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.6377e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5889e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.5645e-02 L8_l1linf:3.5400e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.6621e-02 L11_l1linf:3.8330e-02 L12_l1linf:3.6621e-02 L1_spectral:1.6088e-03 L2_spectral:1.6099e-03 L3_spectral:1.6046e-03 L4_spectral:1.6169e-03 L5_spectral:1.6063e-03 L6_spectral:1.5994e-03 L7_spectral:1.6118e-03 L8_spectral:1.6092e-03 L9_spectral:1.6156e-03 L10_spectral:1.5985e-03 L11_spectral:1.6010e-03 L12_spectral:1.6083e-03 train_time:102689ms step_avg:51.34ms +[2025-09-11 11:05:29] [Rank 0] PRINT: step:2000/10000 val_loss:5.2007 total_sharp:6.2425e-04 L1_sharp:6.4192e-02 L2_sharp:7.6115e-02 L3_sharp:8.4880e-02 L4_sharp:1.0436e-01 L5_sharp:1.3945e-01 L6_sharp:1.5790e-01 L7_sharp:1.7505e-01 L8_sharp:2.3901e-01 L9_sharp:2.7050e-01 L10_sharp:4.0170e-01 L11_sharp:1.1380e+00 L12_sharp:3.1840e+00 total_fnorm:3.6750e+01 total_l1_linf:6.3232e+04 total_spectral:1.8375e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.6133e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.6377e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5889e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.5645e-02 L8_l1linf:3.5400e-02 L9_l1linf:3.5645e-02 L10_l1linf:3.6621e-02 L11_l1linf:3.8330e-02 L12_l1linf:3.6621e-02 L1_spectral:1.6088e-03 L2_spectral:1.6099e-03 L3_spectral:1.6046e-03 L4_spectral:1.6169e-03 L5_spectral:1.6063e-03 L6_spectral:1.5994e-03 L7_spectral:1.6118e-03 L8_spectral:1.6092e-03 L9_spectral:1.6156e-03 L10_spectral:1.5985e-03 L11_spectral:1.6010e-03 L12_spectral:1.6083e-03 train_time:102689ms step_avg:51.34ms +[2025-09-11 11:05:30] [Rank 0] step:2001/10000 train_time:103946ms step_avg:51.95ms +[2025-09-11 11:05:30] [Rank 0] step:2001/10000 train_time:103946ms step_avg:51.95ms +[2025-09-11 11:05:31] [Rank 0] step:2021/10000 train_time:104600ms step_avg:51.76ms +[2025-09-11 11:05:31] [Rank 0] step:2021/10000 train_time:104600ms step_avg:51.76ms +[2025-09-11 11:05:32] [Rank 0] step:2041/10000 train_time:105251ms step_avg:51.57ms +[2025-09-11 11:05:32] [Rank 0] step:2041/10000 train_time:105251ms step_avg:51.57ms +[2025-09-11 11:05:32] [Rank 0] step:2061/10000 train_time:105902ms step_avg:51.38ms +[2025-09-11 11:05:32] [Rank 0] step:2061/10000 train_time:105902ms step_avg:51.38ms +[2025-09-11 11:05:33] [Rank 0] step:2081/10000 train_time:106554ms step_avg:51.20ms +[2025-09-11 11:05:33] [Rank 0] step:2081/10000 train_time:106554ms step_avg:51.20ms +[2025-09-11 11:05:34] [Rank 0] step:2101/10000 train_time:107206ms step_avg:51.03ms +[2025-09-11 11:05:34] [Rank 0] step:2101/10000 train_time:107206ms step_avg:51.03ms +[2025-09-11 11:05:34] [Rank 0] step:2121/10000 train_time:107857ms step_avg:50.85ms +[2025-09-11 11:05:34] [Rank 0] step:2121/10000 train_time:107857ms step_avg:50.85ms +[2025-09-11 11:05:35] [Rank 0] step:2141/10000 train_time:108508ms step_avg:50.68ms +[2025-09-11 11:05:35] [Rank 0] step:2141/10000 train_time:108508ms step_avg:50.68ms +[2025-09-11 11:05:36] [Rank 0] step:2161/10000 train_time:109159ms step_avg:50.51ms +[2025-09-11 11:05:36] [Rank 0] step:2161/10000 train_time:109159ms step_avg:50.51ms +[2025-09-11 11:05:36] [Rank 0] step:2181/10000 train_time:109809ms step_avg:50.35ms +[2025-09-11 11:05:36] [Rank 0] step:2181/10000 train_time:109809ms step_avg:50.35ms +[2025-09-11 11:05:37] [Rank 0] step:2201/10000 train_time:110460ms step_avg:50.19ms +[2025-09-11 11:05:37] [Rank 0] step:2201/10000 train_time:110460ms step_avg:50.19ms +[2025-09-11 11:05:37] [Rank 0] step:2221/10000 train_time:111110ms step_avg:50.03ms +[2025-09-11 11:05:37] [Rank 0] step:2221/10000 train_time:111110ms step_avg:50.03ms +[2025-09-11 11:05:38] [Rank 0] step:2241/10000 train_time:111774ms step_avg:49.88ms +[2025-09-11 11:05:38] [Rank 0] step:2241/10000 train_time:111774ms step_avg:49.88ms +[2025-09-11 11:05:39] [Rank 0] step:2261/10000 train_time:112438ms step_avg:49.73ms +[2025-09-11 11:05:39] [Rank 0] step:2261/10000 train_time:112438ms step_avg:49.73ms +[2025-09-11 11:05:39] [Rank 0] step:2281/10000 train_time:113103ms step_avg:49.58ms +[2025-09-11 11:05:39] [Rank 0] step:2281/10000 train_time:113103ms step_avg:49.58ms +[2025-09-11 11:05:40] [Rank 0] step:2301/10000 train_time:113767ms step_avg:49.44ms +[2025-09-11 11:05:40] [Rank 0] step:2301/10000 train_time:113767ms step_avg:49.44ms +[2025-09-11 11:05:41] [Rank 0] step:2321/10000 train_time:114432ms step_avg:49.30ms +[2025-09-11 11:05:41] [Rank 0] step:2321/10000 train_time:114432ms step_avg:49.30ms +[2025-09-11 11:05:41] [Rank 0] step:2341/10000 train_time:115096ms step_avg:49.17ms +[2025-09-11 11:05:41] [Rank 0] step:2341/10000 train_time:115096ms step_avg:49.17ms +[2025-09-11 11:05:42] [Rank 0] step:2361/10000 train_time:115761ms step_avg:49.03ms +[2025-09-11 11:05:42] [Rank 0] step:2361/10000 train_time:115761ms step_avg:49.03ms +[2025-09-11 11:05:43] [Rank 0] step:2381/10000 train_time:116424ms step_avg:48.90ms +[2025-09-11 11:05:43] [Rank 0] step:2381/10000 train_time:116424ms step_avg:48.90ms +[2025-09-11 11:05:44] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:05:44] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 11:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:05:53] [Rank 0] PRINT: step:2400/10000 val_loss:5.0702 total_sharp:5.5651e-04 L1_sharp:7.7031e-02 L2_sharp:8.8047e-02 L3_sharp:1.0423e-01 L4_sharp:1.5308e-01 L5_sharp:1.8101e-01 L6_sharp:2.0427e-01 L7_sharp:2.0138e-01 L8_sharp:2.2509e-01 L9_sharp:2.3011e-01 L10_sharp:2.8383e-01 L11_sharp:3.5111e-01 L12_sharp:1.4190e+00 total_fnorm:3.5250e+01 total_l1_linf:5.8112e+04 total_spectral:1.7625e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.4912e-02 L6_l1linf:3.4668e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.4424e-02 L9_l1linf:3.4424e-02 L10_l1linf:3.4668e-02 L11_l1linf:3.5889e-02 L12_l1linf:3.6133e-02 L1_spectral:1.6099e-03 L2_spectral:1.6225e-03 L3_spectral:1.6102e-03 L4_spectral:1.6012e-03 L5_spectral:1.6122e-03 L6_spectral:1.5952e-03 L7_spectral:1.6068e-03 L8_spectral:1.6005e-03 L9_spectral:1.6246e-03 L10_spectral:1.6020e-03 L11_spectral:1.6143e-03 L12_spectral:1.6193e-03 train_time:117365ms step_avg:48.90ms +[2025-09-11 11:05:53] [Rank 0] PRINT: step:2400/10000 val_loss:5.0702 total_sharp:5.5651e-04 L1_sharp:7.7031e-02 L2_sharp:8.8047e-02 L3_sharp:1.0423e-01 L4_sharp:1.5308e-01 L5_sharp:1.8101e-01 L6_sharp:2.0427e-01 L7_sharp:2.0138e-01 L8_sharp:2.2509e-01 L9_sharp:2.3011e-01 L10_sharp:2.8383e-01 L11_sharp:3.5111e-01 L12_sharp:1.4190e+00 total_fnorm:3.5250e+01 total_l1_linf:5.8112e+04 total_spectral:1.7625e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.4912e-02 L6_l1linf:3.4668e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.4424e-02 L9_l1linf:3.4424e-02 L10_l1linf:3.4668e-02 L11_l1linf:3.5889e-02 L12_l1linf:3.6133e-02 L1_spectral:1.6099e-03 L2_spectral:1.6225e-03 L3_spectral:1.6102e-03 L4_spectral:1.6012e-03 L5_spectral:1.6122e-03 L6_spectral:1.5952e-03 L7_spectral:1.6068e-03 L8_spectral:1.6005e-03 L9_spectral:1.6246e-03 L10_spectral:1.6020e-03 L11_spectral:1.6143e-03 L12_spectral:1.6193e-03 train_time:117365ms step_avg:48.90ms +[2025-09-11 11:05:55] [Rank 0] step:2401/10000 train_time:118618ms step_avg:49.40ms +[2025-09-11 11:05:55] [Rank 0] step:2401/10000 train_time:118618ms step_avg:49.40ms +[2025-09-11 11:05:55] [Rank 0] step:2421/10000 train_time:119315ms step_avg:49.28ms +[2025-09-11 11:05:55] [Rank 0] step:2421/10000 train_time:119315ms step_avg:49.28ms +[2025-09-11 11:05:56] [Rank 0] step:2441/10000 train_time:119981ms step_avg:49.15ms +[2025-09-11 11:05:56] [Rank 0] step:2441/10000 train_time:119981ms step_avg:49.15ms +[2025-09-11 11:05:57] [Rank 0] step:2461/10000 train_time:120647ms step_avg:49.02ms +[2025-09-11 11:05:57] [Rank 0] step:2461/10000 train_time:120647ms step_avg:49.02ms +[2025-09-11 11:05:57] [Rank 0] step:2481/10000 train_time:121313ms step_avg:48.90ms +[2025-09-11 11:05:57] [Rank 0] step:2481/10000 train_time:121313ms step_avg:48.90ms +[2025-09-11 11:05:58] [Rank 0] step:2501/10000 train_time:121978ms step_avg:48.77ms +[2025-09-11 11:05:58] [Rank 0] step:2501/10000 train_time:121978ms step_avg:48.77ms +[2025-09-11 11:05:59] [Rank 0] step:2521/10000 train_time:122643ms step_avg:48.65ms +[2025-09-11 11:05:59] [Rank 0] step:2521/10000 train_time:122643ms step_avg:48.65ms +[2025-09-11 11:05:59] [Rank 0] step:2541/10000 train_time:123309ms step_avg:48.53ms +[2025-09-11 11:05:59] [Rank 0] step:2541/10000 train_time:123309ms step_avg:48.53ms +[2025-09-11 11:06:00] [Rank 0] step:2561/10000 train_time:123973ms step_avg:48.41ms +[2025-09-11 11:06:00] [Rank 0] step:2561/10000 train_time:123973ms step_avg:48.41ms +[2025-09-11 11:06:01] [Rank 0] step:2581/10000 train_time:124639ms step_avg:48.29ms +[2025-09-11 11:06:01] [Rank 0] step:2581/10000 train_time:124639ms step_avg:48.29ms +[2025-09-11 11:06:01] [Rank 0] step:2601/10000 train_time:125304ms step_avg:48.18ms +[2025-09-11 11:06:01] [Rank 0] step:2601/10000 train_time:125304ms step_avg:48.18ms +[2025-09-11 11:06:02] [Rank 0] step:2621/10000 train_time:125968ms step_avg:48.06ms +[2025-09-11 11:06:02] [Rank 0] step:2621/10000 train_time:125968ms step_avg:48.06ms +[2025-09-11 11:06:03] [Rank 0] step:2641/10000 train_time:126634ms step_avg:47.95ms +[2025-09-11 11:06:03] [Rank 0] step:2641/10000 train_time:126634ms step_avg:47.95ms +[2025-09-11 11:06:03] [Rank 0] step:2661/10000 train_time:127300ms step_avg:47.84ms +[2025-09-11 11:06:03] [Rank 0] step:2661/10000 train_time:127300ms step_avg:47.84ms +[2025-09-11 11:06:04] [Rank 0] step:2681/10000 train_time:127965ms step_avg:47.73ms +[2025-09-11 11:06:04] [Rank 0] step:2681/10000 train_time:127965ms step_avg:47.73ms +[2025-09-11 11:06:05] [Rank 0] step:2701/10000 train_time:128631ms step_avg:47.62ms +[2025-09-11 11:06:05] [Rank 0] step:2701/10000 train_time:128631ms step_avg:47.62ms +[2025-09-11 11:06:05] [Rank 0] step:2721/10000 train_time:129296ms step_avg:47.52ms +[2025-09-11 11:06:05] [Rank 0] step:2721/10000 train_time:129296ms step_avg:47.52ms +[2025-09-11 11:06:06] [Rank 0] step:2741/10000 train_time:129961ms step_avg:47.41ms +[2025-09-11 11:06:06] [Rank 0] step:2741/10000 train_time:129961ms step_avg:47.41ms +[2025-09-11 11:06:07] [Rank 0] step:2761/10000 train_time:130626ms step_avg:47.31ms +[2025-09-11 11:06:07] [Rank 0] step:2761/10000 train_time:130626ms step_avg:47.31ms +[2025-09-11 11:06:07] [Rank 0] step:2781/10000 train_time:131291ms step_avg:47.21ms +[2025-09-11 11:06:07] [Rank 0] step:2781/10000 train_time:131291ms step_avg:47.21ms +[2025-09-11 11:06:08] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:06:08] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 11:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 11:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 11:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.9733 total_sharp:5.4817e-04 L1_sharp:6.3894e-02 L2_sharp:7.2106e-02 L3_sharp:8.6901e-02 L4_sharp:1.1322e-01 L5_sharp:1.4155e-01 L6_sharp:1.5952e-01 L7_sharp:1.5743e-01 L8_sharp:1.9435e-01 L9_sharp:2.1021e-01 L10_sharp:2.8654e-01 L11_sharp:3.8833e-01 L12_sharp:9.6175e-01 total_fnorm:3.3000e+01 total_l1_linf:5.4016e+04 total_spectral:1.6625e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.3936e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.4668e-02 L12_l1linf:3.5889e-02 L1_spectral:1.6163e-03 L2_spectral:1.6016e-03 L3_spectral:1.6060e-03 L4_spectral:1.5983e-03 L5_spectral:1.5984e-03 L6_spectral:1.5974e-03 L7_spectral:1.6085e-03 L8_spectral:1.6120e-03 L9_spectral:1.6069e-03 L10_spectral:1.6132e-03 L11_spectral:1.6101e-03 L12_spectral:1.6292e-03 train_time:131938ms step_avg:47.12ms +[2025-09-11 11:06:18] [Rank 0] PRINT: step:2800/10000 val_loss:4.9733 total_sharp:5.4817e-04 L1_sharp:6.3894e-02 L2_sharp:7.2106e-02 L3_sharp:8.6901e-02 L4_sharp:1.1322e-01 L5_sharp:1.4155e-01 L6_sharp:1.5952e-01 L7_sharp:1.5743e-01 L8_sharp:1.9435e-01 L9_sharp:2.1021e-01 L10_sharp:2.8654e-01 L11_sharp:3.8833e-01 L12_sharp:9.6175e-01 total_fnorm:3.3000e+01 total_l1_linf:5.4016e+04 total_spectral:1.6625e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.3936e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.4668e-02 L12_l1linf:3.5889e-02 L1_spectral:1.6163e-03 L2_spectral:1.6016e-03 L3_spectral:1.6060e-03 L4_spectral:1.5983e-03 L5_spectral:1.5984e-03 L6_spectral:1.5974e-03 L7_spectral:1.6085e-03 L8_spectral:1.6120e-03 L9_spectral:1.6069e-03 L10_spectral:1.6132e-03 L11_spectral:1.6101e-03 L12_spectral:1.6292e-03 train_time:131938ms step_avg:47.12ms +[2025-09-11 11:06:19] [Rank 0] step:2801/10000 train_time:133174ms step_avg:47.55ms +[2025-09-11 11:06:19] [Rank 0] step:2801/10000 train_time:133174ms step_avg:47.55ms +[2025-09-11 11:06:20] [Rank 0] step:2821/10000 train_time:133872ms step_avg:47.46ms +[2025-09-11 11:06:20] [Rank 0] step:2821/10000 train_time:133872ms step_avg:47.46ms +[2025-09-11 11:06:21] [Rank 0] step:2841/10000 train_time:134538ms step_avg:47.36ms +[2025-09-11 11:06:21] [Rank 0] step:2841/10000 train_time:134538ms step_avg:47.36ms +[2025-09-11 11:06:21] [Rank 0] step:2861/10000 train_time:135203ms step_avg:47.26ms +[2025-09-11 11:06:21] [Rank 0] step:2861/10000 train_time:135203ms step_avg:47.26ms +[2025-09-11 11:06:22] [Rank 0] step:2881/10000 train_time:135869ms step_avg:47.16ms +[2025-09-11 11:06:22] [Rank 0] step:2881/10000 train_time:135869ms step_avg:47.16ms +[2025-09-11 11:06:23] [Rank 0] step:2901/10000 train_time:136533ms step_avg:47.06ms +[2025-09-11 11:06:23] [Rank 0] step:2901/10000 train_time:136533ms step_avg:47.06ms +[2025-09-11 11:06:23] [Rank 0] step:2921/10000 train_time:137198ms step_avg:46.97ms +[2025-09-11 11:06:23] [Rank 0] step:2921/10000 train_time:137198ms step_avg:46.97ms +[2025-09-11 11:06:24] [Rank 0] step:2941/10000 train_time:137863ms step_avg:46.88ms +[2025-09-11 11:06:24] [Rank 0] step:2941/10000 train_time:137863ms step_avg:46.88ms +[2025-09-11 11:06:25] [Rank 0] step:2961/10000 train_time:138528ms step_avg:46.78ms +[2025-09-11 11:06:25] [Rank 0] step:2961/10000 train_time:138528ms step_avg:46.78ms +[2025-09-11 11:06:25] [Rank 0] step:2981/10000 train_time:139195ms step_avg:46.69ms +[2025-09-11 11:06:25] [Rank 0] step:2981/10000 train_time:139195ms step_avg:46.69ms +[2025-09-11 11:06:26] [Rank 0] step:3001/10000 train_time:139862ms step_avg:46.61ms +[2025-09-11 11:06:26] [Rank 0] step:3001/10000 train_time:139862ms step_avg:46.61ms +[2025-09-11 11:06:27] [Rank 0] step:3021/10000 train_time:140529ms step_avg:46.52ms +[2025-09-11 11:06:27] [Rank 0] step:3021/10000 train_time:140529ms step_avg:46.52ms +[2025-09-11 11:06:27] [Rank 0] step:3041/10000 train_time:141197ms step_avg:46.43ms +[2025-09-11 11:06:27] [Rank 0] step:3041/10000 train_time:141197ms step_avg:46.43ms +[2025-09-11 11:06:28] [Rank 0] step:3061/10000 train_time:141864ms step_avg:46.35ms +[2025-09-11 11:06:28] [Rank 0] step:3061/10000 train_time:141864ms step_avg:46.35ms +[2025-09-11 11:06:29] [Rank 0] step:3081/10000 train_time:142531ms step_avg:46.26ms +[2025-09-11 11:06:29] [Rank 0] step:3081/10000 train_time:142531ms step_avg:46.26ms +[2025-09-11 11:06:29] [Rank 0] step:3101/10000 train_time:143199ms step_avg:46.18ms +[2025-09-11 11:06:29] [Rank 0] step:3101/10000 train_time:143199ms step_avg:46.18ms +[2025-09-11 11:06:30] [Rank 0] step:3121/10000 train_time:143867ms step_avg:46.10ms +[2025-09-11 11:06:30] [Rank 0] step:3121/10000 train_time:143867ms step_avg:46.10ms +[2025-09-11 11:06:31] [Rank 0] step:3141/10000 train_time:144534ms step_avg:46.02ms +[2025-09-11 11:06:31] [Rank 0] step:3141/10000 train_time:144534ms step_avg:46.02ms +[2025-09-11 11:06:31] [Rank 0] step:3161/10000 train_time:145200ms step_avg:45.93ms +[2025-09-11 11:06:31] [Rank 0] step:3161/10000 train_time:145200ms step_avg:45.93ms +[2025-09-11 11:06:32] [Rank 0] step:3181/10000 train_time:145867ms step_avg:45.86ms +[2025-09-11 11:06:32] [Rank 0] step:3181/10000 train_time:145867ms step_avg:45.86ms +[2025-09-11 11:06:33] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:06:33] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 11:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 11:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 11:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:06:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:06:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 11:06:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:06:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 11:06:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:06:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 11:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:06:43] [Rank 0] PRINT: step:3200/10000 val_loss:4.8649 total_sharp:3.7571e-04 L1_sharp:5.5811e-02 L2_sharp:6.4838e-02 L3_sharp:8.0700e-02 L4_sharp:1.0719e-01 L5_sharp:1.1986e-01 L6_sharp:1.4518e-01 L7_sharp:1.5946e-01 L8_sharp:2.0573e-01 L9_sharp:2.1730e-01 L10_sharp:3.0627e-01 L11_sharp:3.6056e-01 L12_sharp:1.0727e+00 total_fnorm:3.8250e+01 total_l1_linf:6.5280e+04 total_spectral:1.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.1982e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.4912e-02 L1_spectral:1.6171e-03 L2_spectral:1.6082e-03 L3_spectral:1.5967e-03 L4_spectral:1.6082e-03 L5_spectral:1.6138e-03 L6_spectral:1.6168e-03 L7_spectral:1.6083e-03 L8_spectral:1.6142e-03 L9_spectral:1.6161e-03 L10_spectral:1.6130e-03 L11_spectral:1.6081e-03 L12_spectral:1.6152e-03 train_time:146516ms step_avg:45.79ms +[2025-09-11 11:06:43] [Rank 0] PRINT: step:3200/10000 val_loss:4.8649 total_sharp:3.7571e-04 L1_sharp:5.5811e-02 L2_sharp:6.4838e-02 L3_sharp:8.0700e-02 L4_sharp:1.0719e-01 L5_sharp:1.1986e-01 L6_sharp:1.4518e-01 L7_sharp:1.5946e-01 L8_sharp:2.0573e-01 L9_sharp:2.1730e-01 L10_sharp:3.0627e-01 L11_sharp:3.6056e-01 L12_sharp:1.0727e+00 total_fnorm:3.8250e+01 total_l1_linf:6.5280e+04 total_spectral:1.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1982e-02 L2_l1linf:3.2227e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.1982e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.4912e-02 L1_spectral:1.6171e-03 L2_spectral:1.6082e-03 L3_spectral:1.5967e-03 L4_spectral:1.6082e-03 L5_spectral:1.6138e-03 L6_spectral:1.6168e-03 L7_spectral:1.6083e-03 L8_spectral:1.6142e-03 L9_spectral:1.6161e-03 L10_spectral:1.6130e-03 L11_spectral:1.6081e-03 L12_spectral:1.6152e-03 train_time:146516ms step_avg:45.79ms +[2025-09-11 11:06:44] [Rank 0] step:3201/10000 train_time:147778ms step_avg:46.17ms +[2025-09-11 11:06:44] [Rank 0] step:3201/10000 train_time:147778ms step_avg:46.17ms +[2025-09-11 11:06:45] [Rank 0] step:3221/10000 train_time:148466ms step_avg:46.09ms +[2025-09-11 11:06:45] [Rank 0] step:3221/10000 train_time:148466ms step_avg:46.09ms +[2025-09-11 11:06:45] [Rank 0] step:3241/10000 train_time:149134ms step_avg:46.01ms +[2025-09-11 11:06:45] [Rank 0] step:3241/10000 train_time:149134ms step_avg:46.01ms +[2025-09-11 11:06:46] [Rank 0] step:3261/10000 train_time:149802ms step_avg:45.94ms +[2025-09-11 11:06:46] [Rank 0] step:3261/10000 train_time:149802ms step_avg:45.94ms +[2025-09-11 11:06:47] [Rank 0] step:3281/10000 train_time:150735ms step_avg:45.94ms +[2025-09-11 11:06:47] [Rank 0] step:3281/10000 train_time:150735ms step_avg:45.94ms +[2025-09-11 11:06:47] [Rank 0] step:3301/10000 train_time:151403ms step_avg:45.87ms +[2025-09-11 11:06:47] [Rank 0] step:3301/10000 train_time:151403ms step_avg:45.87ms +[2025-09-11 11:06:48] [Rank 0] step:3321/10000 train_time:152070ms step_avg:45.79ms +[2025-09-11 11:06:48] [Rank 0] step:3321/10000 train_time:152070ms step_avg:45.79ms +[2025-09-11 11:06:49] [Rank 0] step:3341/10000 train_time:152890ms step_avg:45.76ms +[2025-09-11 11:06:49] [Rank 0] step:3341/10000 train_time:152890ms step_avg:45.76ms +[2025-09-11 11:06:50] [Rank 0] step:3361/10000 train_time:153707ms step_avg:45.73ms +[2025-09-11 11:06:50] [Rank 0] step:3361/10000 train_time:153707ms step_avg:45.73ms +[2025-09-11 11:06:50] [Rank 0] step:3381/10000 train_time:154374ms step_avg:45.66ms +[2025-09-11 11:06:50] [Rank 0] step:3381/10000 train_time:154374ms step_avg:45.66ms +[2025-09-11 11:06:51] [Rank 0] step:3401/10000 train_time:155041ms step_avg:45.59ms +[2025-09-11 11:06:51] [Rank 0] step:3401/10000 train_time:155041ms step_avg:45.59ms +[2025-09-11 11:06:52] [Rank 0] step:3421/10000 train_time:155707ms step_avg:45.52ms +[2025-09-11 11:06:52] [Rank 0] step:3421/10000 train_time:155707ms step_avg:45.52ms +[2025-09-11 11:06:52] [Rank 0] step:3441/10000 train_time:156374ms step_avg:45.44ms +[2025-09-11 11:06:52] [Rank 0] step:3441/10000 train_time:156374ms step_avg:45.44ms +[2025-09-11 11:06:53] [Rank 0] step:3461/10000 train_time:157040ms step_avg:45.37ms +[2025-09-11 11:06:53] [Rank 0] step:3461/10000 train_time:157040ms step_avg:45.37ms +[2025-09-11 11:06:54] [Rank 0] step:3481/10000 train_time:157710ms step_avg:45.31ms +[2025-09-11 11:06:54] [Rank 0] step:3481/10000 train_time:157710ms step_avg:45.31ms +[2025-09-11 11:06:54] [Rank 0] step:3501/10000 train_time:158377ms step_avg:45.24ms +[2025-09-11 11:06:54] [Rank 0] step:3501/10000 train_time:158377ms step_avg:45.24ms +[2025-09-11 11:06:55] [Rank 0] step:3521/10000 train_time:159045ms step_avg:45.17ms +[2025-09-11 11:06:55] [Rank 0] step:3521/10000 train_time:159045ms step_avg:45.17ms +[2025-09-11 11:06:56] [Rank 0] step:3541/10000 train_time:159711ms step_avg:45.10ms +[2025-09-11 11:06:56] [Rank 0] step:3541/10000 train_time:159711ms step_avg:45.10ms +[2025-09-11 11:06:56] [Rank 0] step:3561/10000 train_time:160378ms step_avg:45.04ms +[2025-09-11 11:06:56] [Rank 0] step:3561/10000 train_time:160378ms step_avg:45.04ms +[2025-09-11 11:06:57] [Rank 0] step:3581/10000 train_time:161044ms step_avg:44.97ms +[2025-09-11 11:06:57] [Rank 0] step:3581/10000 train_time:161044ms step_avg:44.97ms +[2025-09-11 11:06:58] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:06:58] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 11:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 11:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 11:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:07:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:07:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:07:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 11:07:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:07:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 11:07:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:07:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 11:07:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:08] [Rank 0] PRINT: step:3600/10000 val_loss:4.7898 total_sharp:4.1939e-04 L1_sharp:4.0878e-02 L2_sharp:4.5898e-02 L3_sharp:6.4261e-02 L4_sharp:8.6650e-02 L5_sharp:9.4149e-02 L6_sharp:1.2523e-01 L7_sharp:1.5000e-01 L8_sharp:1.7546e-01 L9_sharp:1.9298e-01 L10_sharp:2.6807e-01 L11_sharp:3.6299e-01 L12_sharp:1.3445e+00 total_fnorm:3.4250e+01 total_l1_linf:5.5296e+04 total_spectral:1.7000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.1250e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.0640e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1250e-02 L12_l1linf:3.4180e-02 L1_spectral:1.6030e-03 L2_spectral:1.6016e-03 L3_spectral:1.6031e-03 L4_spectral:1.6068e-03 L5_spectral:1.6078e-03 L6_spectral:1.6033e-03 L7_spectral:1.6129e-03 L8_spectral:1.5991e-03 L9_spectral:1.6199e-03 L10_spectral:1.6133e-03 L11_spectral:1.6112e-03 L12_spectral:1.6104e-03 train_time:161692ms step_avg:44.91ms +[2025-09-11 11:07:08] [Rank 0] PRINT: step:3600/10000 val_loss:4.7898 total_sharp:4.1939e-04 L1_sharp:4.0878e-02 L2_sharp:4.5898e-02 L3_sharp:6.4261e-02 L4_sharp:8.6650e-02 L5_sharp:9.4149e-02 L6_sharp:1.2523e-01 L7_sharp:1.5000e-01 L8_sharp:1.7546e-01 L9_sharp:1.9298e-01 L10_sharp:2.6807e-01 L11_sharp:3.6299e-01 L12_sharp:1.3445e+00 total_fnorm:3.4250e+01 total_l1_linf:5.5296e+04 total_spectral:1.7000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1738e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.1250e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1128e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.0640e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1250e-02 L12_l1linf:3.4180e-02 L1_spectral:1.6030e-03 L2_spectral:1.6016e-03 L3_spectral:1.6031e-03 L4_spectral:1.6068e-03 L5_spectral:1.6078e-03 L6_spectral:1.6033e-03 L7_spectral:1.6129e-03 L8_spectral:1.5991e-03 L9_spectral:1.6199e-03 L10_spectral:1.6133e-03 L11_spectral:1.6112e-03 L12_spectral:1.6104e-03 train_time:161692ms step_avg:44.91ms +[2025-09-11 11:07:09] [Rank 0] step:3601/10000 train_time:162958ms step_avg:45.25ms +[2025-09-11 11:07:09] [Rank 0] step:3601/10000 train_time:162958ms step_avg:45.25ms +[2025-09-11 11:07:10] [Rank 0] step:3621/10000 train_time:163648ms step_avg:45.19ms +[2025-09-11 11:07:10] [Rank 0] step:3621/10000 train_time:163648ms step_avg:45.19ms +[2025-09-11 11:07:10] [Rank 0] step:3641/10000 train_time:164315ms step_avg:45.13ms +[2025-09-11 11:07:10] [Rank 0] step:3641/10000 train_time:164315ms step_avg:45.13ms +[2025-09-11 11:07:11] [Rank 0] step:3661/10000 train_time:164982ms step_avg:45.06ms +[2025-09-11 11:07:11] [Rank 0] step:3661/10000 train_time:164982ms step_avg:45.06ms +[2025-09-11 11:07:12] [Rank 0] step:3681/10000 train_time:165649ms step_avg:45.00ms +[2025-09-11 11:07:12] [Rank 0] step:3681/10000 train_time:165649ms step_avg:45.00ms +[2025-09-11 11:07:12] [Rank 0] step:3701/10000 train_time:166315ms step_avg:44.94ms +[2025-09-11 11:07:12] [Rank 0] step:3701/10000 train_time:166315ms step_avg:44.94ms +[2025-09-11 11:07:13] [Rank 0] step:3721/10000 train_time:166991ms step_avg:44.88ms +[2025-09-11 11:07:13] [Rank 0] step:3721/10000 train_time:166991ms step_avg:44.88ms +[2025-09-11 11:07:14] [Rank 0] step:3741/10000 train_time:167668ms step_avg:44.82ms +[2025-09-11 11:07:14] [Rank 0] step:3741/10000 train_time:167668ms step_avg:44.82ms +[2025-09-11 11:07:14] [Rank 0] step:3761/10000 train_time:168346ms step_avg:44.76ms +[2025-09-11 11:07:14] [Rank 0] step:3761/10000 train_time:168346ms step_avg:44.76ms +[2025-09-11 11:07:15] [Rank 0] step:3781/10000 train_time:169023ms step_avg:44.70ms +[2025-09-11 11:07:15] [Rank 0] step:3781/10000 train_time:169023ms step_avg:44.70ms +[2025-09-11 11:07:16] [Rank 0] step:3801/10000 train_time:169700ms step_avg:44.65ms +[2025-09-11 11:07:16] [Rank 0] step:3801/10000 train_time:169700ms step_avg:44.65ms +[2025-09-11 11:07:16] [Rank 0] step:3821/10000 train_time:170378ms step_avg:44.59ms +[2025-09-11 11:07:16] [Rank 0] step:3821/10000 train_time:170378ms step_avg:44.59ms +[2025-09-11 11:07:17] [Rank 0] step:3841/10000 train_time:171055ms step_avg:44.53ms +[2025-09-11 11:07:17] [Rank 0] step:3841/10000 train_time:171055ms step_avg:44.53ms +[2025-09-11 11:07:18] [Rank 0] step:3861/10000 train_time:171732ms step_avg:44.48ms +[2025-09-11 11:07:18] [Rank 0] step:3861/10000 train_time:171732ms step_avg:44.48ms +[2025-09-11 11:07:18] [Rank 0] step:3881/10000 train_time:172408ms step_avg:44.42ms +[2025-09-11 11:07:18] [Rank 0] step:3881/10000 train_time:172408ms step_avg:44.42ms +[2025-09-11 11:07:19] [Rank 0] step:3901/10000 train_time:173086ms step_avg:44.37ms +[2025-09-11 11:07:19] [Rank 0] step:3901/10000 train_time:173086ms step_avg:44.37ms +[2025-09-11 11:07:20] [Rank 0] step:3921/10000 train_time:173763ms step_avg:44.32ms +[2025-09-11 11:07:20] [Rank 0] step:3921/10000 train_time:173763ms step_avg:44.32ms +[2025-09-11 11:07:20] [Rank 0] step:3941/10000 train_time:174441ms step_avg:44.26ms +[2025-09-11 11:07:20] [Rank 0] step:3941/10000 train_time:174441ms step_avg:44.26ms +[2025-09-11 11:07:21] [Rank 0] step:3961/10000 train_time:175118ms step_avg:44.21ms +[2025-09-11 11:07:21] [Rank 0] step:3961/10000 train_time:175118ms step_avg:44.21ms +[2025-09-11 11:07:22] [Rank 0] step:3981/10000 train_time:175795ms step_avg:44.16ms +[2025-09-11 11:07:22] [Rank 0] step:3981/10000 train_time:175795ms step_avg:44.16ms +[2025-09-11 11:07:22] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:07:22] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 11:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:07:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:07:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:07:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 11:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:32] [Rank 0] PRINT: step:4000/10000 val_loss:4.7183 total_sharp:4.0454e-04 L1_sharp:4.9367e-02 L2_sharp:6.3596e-02 L3_sharp:7.9436e-02 L4_sharp:1.0560e-01 L5_sharp:1.4045e-01 L6_sharp:1.6681e-01 L7_sharp:2.0822e-01 L8_sharp:2.4111e-01 L9_sharp:2.7703e-01 L10_sharp:3.2433e-01 L11_sharp:4.9427e-01 L12_sharp:1.3140e+00 total_fnorm:4.0250e+01 total_l1_linf:6.6560e+04 total_spectral:2.0125e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.2227e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.1128e-02 L10_l1linf:3.2227e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.3447e-02 L1_spectral:1.5867e-03 L2_spectral:1.6060e-03 L3_spectral:1.6005e-03 L4_spectral:1.6048e-03 L5_spectral:1.6173e-03 L6_spectral:1.5996e-03 L7_spectral:1.6000e-03 L8_spectral:1.5981e-03 L9_spectral:1.6200e-03 L10_spectral:1.6094e-03 L11_spectral:1.6131e-03 L12_spectral:1.6002e-03 train_time:176454ms step_avg:44.11ms +[2025-09-11 11:07:32] [Rank 0] PRINT: step:4000/10000 val_loss:4.7183 total_sharp:4.0454e-04 L1_sharp:4.9367e-02 L2_sharp:6.3596e-02 L3_sharp:7.9436e-02 L4_sharp:1.0560e-01 L5_sharp:1.4045e-01 L6_sharp:1.6681e-01 L7_sharp:2.0822e-01 L8_sharp:2.4111e-01 L9_sharp:2.7703e-01 L10_sharp:3.2433e-01 L11_sharp:4.9427e-01 L12_sharp:1.3140e+00 total_fnorm:4.0250e+01 total_l1_linf:6.6560e+04 total_spectral:2.0125e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2227e-02 L6_l1linf:3.2227e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1494e-02 L9_l1linf:3.1128e-02 L10_l1linf:3.2227e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.3447e-02 L1_spectral:1.5867e-03 L2_spectral:1.6060e-03 L3_spectral:1.6005e-03 L4_spectral:1.6048e-03 L5_spectral:1.6173e-03 L6_spectral:1.5996e-03 L7_spectral:1.6000e-03 L8_spectral:1.5981e-03 L9_spectral:1.6200e-03 L10_spectral:1.6094e-03 L11_spectral:1.6131e-03 L12_spectral:1.6002e-03 train_time:176454ms step_avg:44.11ms +[2025-09-11 11:07:33] [Rank 0] step:4001/10000 train_time:177734ms step_avg:44.42ms +[2025-09-11 11:07:33] [Rank 0] step:4001/10000 train_time:177734ms step_avg:44.42ms +[2025-09-11 11:07:34] [Rank 0] step:4021/10000 train_time:178438ms step_avg:44.38ms +[2025-09-11 11:07:34] [Rank 0] step:4021/10000 train_time:178438ms step_avg:44.38ms +[2025-09-11 11:07:35] [Rank 0] step:4041/10000 train_time:179117ms step_avg:44.32ms +[2025-09-11 11:07:35] [Rank 0] step:4041/10000 train_time:179117ms step_avg:44.32ms +[2025-09-11 11:07:36] [Rank 0] step:4061/10000 train_time:179794ms step_avg:44.27ms +[2025-09-11 11:07:36] [Rank 0] step:4061/10000 train_time:179794ms step_avg:44.27ms +[2025-09-11 11:07:36] [Rank 0] step:4081/10000 train_time:180471ms step_avg:44.22ms +[2025-09-11 11:07:36] [Rank 0] step:4081/10000 train_time:180471ms step_avg:44.22ms +[2025-09-11 11:07:37] [Rank 0] step:4101/10000 train_time:181148ms step_avg:44.17ms +[2025-09-11 11:07:37] [Rank 0] step:4101/10000 train_time:181148ms step_avg:44.17ms +[2025-09-11 11:07:38] [Rank 0] step:4121/10000 train_time:181826ms step_avg:44.12ms +[2025-09-11 11:07:38] [Rank 0] step:4121/10000 train_time:181826ms step_avg:44.12ms +[2025-09-11 11:07:38] [Rank 0] step:4141/10000 train_time:182502ms step_avg:44.07ms +[2025-09-11 11:07:38] [Rank 0] step:4141/10000 train_time:182502ms step_avg:44.07ms +[2025-09-11 11:07:39] [Rank 0] step:4161/10000 train_time:183179ms step_avg:44.02ms +[2025-09-11 11:07:39] [Rank 0] step:4161/10000 train_time:183179ms step_avg:44.02ms +[2025-09-11 11:07:40] [Rank 0] step:4181/10000 train_time:183857ms step_avg:43.97ms +[2025-09-11 11:07:40] [Rank 0] step:4181/10000 train_time:183857ms step_avg:43.97ms +[2025-09-11 11:07:40] [Rank 0] step:4201/10000 train_time:184535ms step_avg:43.93ms +[2025-09-11 11:07:40] [Rank 0] step:4201/10000 train_time:184535ms step_avg:43.93ms +[2025-09-11 11:07:41] [Rank 0] step:4221/10000 train_time:185212ms step_avg:43.88ms +[2025-09-11 11:07:41] [Rank 0] step:4221/10000 train_time:185212ms step_avg:43.88ms +[2025-09-11 11:07:42] [Rank 0] step:4241/10000 train_time:185889ms step_avg:43.83ms +[2025-09-11 11:07:42] [Rank 0] step:4241/10000 train_time:185889ms step_avg:43.83ms +[2025-09-11 11:07:42] [Rank 0] step:4261/10000 train_time:186566ms step_avg:43.78ms +[2025-09-11 11:07:42] [Rank 0] step:4261/10000 train_time:186566ms step_avg:43.78ms +[2025-09-11 11:07:43] [Rank 0] step:4281/10000 train_time:187244ms step_avg:43.74ms +[2025-09-11 11:07:43] [Rank 0] step:4281/10000 train_time:187244ms step_avg:43.74ms +[2025-09-11 11:07:44] [Rank 0] step:4301/10000 train_time:187922ms step_avg:43.69ms +[2025-09-11 11:07:44] [Rank 0] step:4301/10000 train_time:187922ms step_avg:43.69ms +[2025-09-11 11:07:44] [Rank 0] step:4321/10000 train_time:188599ms step_avg:43.65ms +[2025-09-11 11:07:44] [Rank 0] step:4321/10000 train_time:188599ms step_avg:43.65ms +[2025-09-11 11:07:45] [Rank 0] step:4341/10000 train_time:189277ms step_avg:43.60ms +[2025-09-11 11:07:45] [Rank 0] step:4341/10000 train_time:189277ms step_avg:43.60ms +[2025-09-11 11:07:46] [Rank 0] step:4361/10000 train_time:189953ms step_avg:43.56ms +[2025-09-11 11:07:46] [Rank 0] step:4361/10000 train_time:189953ms step_avg:43.56ms +[2025-09-11 11:07:46] [Rank 0] step:4381/10000 train_time:190631ms step_avg:43.51ms +[2025-09-11 11:07:46] [Rank 0] step:4381/10000 train_time:190631ms step_avg:43.51ms +[2025-09-11 11:07:47] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:07:47] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 11:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 11:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:07:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:07:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:07:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 11:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:07:57] [Rank 0] PRINT: step:4400/10000 val_loss:4.6656 total_sharp:3.3402e-04 L1_sharp:3.0844e-02 L2_sharp:3.8800e-02 L3_sharp:4.6559e-02 L4_sharp:6.5111e-02 L5_sharp:9.0108e-02 L6_sharp:1.2166e-01 L7_sharp:1.4123e-01 L8_sharp:1.7241e-01 L9_sharp:1.9363e-01 L10_sharp:2.7066e-01 L11_sharp:3.5868e-01 L12_sharp:1.8236e+00 total_fnorm:3.5500e+01 total_l1_linf:5.6576e+04 total_spectral:1.7750e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0884e-02 L2_l1linf:3.1128e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.0762e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.1128e-02 L9_l1linf:3.0640e-02 L10_l1linf:3.1128e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.1982e-02 L1_spectral:1.5953e-03 L2_spectral:1.6156e-03 L3_spectral:1.6039e-03 L4_spectral:1.6008e-03 L5_spectral:1.5958e-03 L6_spectral:1.6035e-03 L7_spectral:1.6042e-03 L8_spectral:1.5977e-03 L9_spectral:1.6350e-03 L10_spectral:1.6134e-03 L11_spectral:1.6197e-03 L12_spectral:1.6210e-03 train_time:191288ms step_avg:43.47ms +[2025-09-11 11:07:57] [Rank 0] PRINT: step:4400/10000 val_loss:4.6656 total_sharp:3.3402e-04 L1_sharp:3.0844e-02 L2_sharp:3.8800e-02 L3_sharp:4.6559e-02 L4_sharp:6.5111e-02 L5_sharp:9.0108e-02 L6_sharp:1.2166e-01 L7_sharp:1.4123e-01 L8_sharp:1.7241e-01 L9_sharp:1.9363e-01 L10_sharp:2.7066e-01 L11_sharp:3.5868e-01 L12_sharp:1.8236e+00 total_fnorm:3.5500e+01 total_l1_linf:5.6576e+04 total_spectral:1.7750e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0884e-02 L2_l1linf:3.1128e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.0762e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0273e-02 L8_l1linf:3.1128e-02 L9_l1linf:3.0640e-02 L10_l1linf:3.1128e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.1982e-02 L1_spectral:1.5953e-03 L2_spectral:1.6156e-03 L3_spectral:1.6039e-03 L4_spectral:1.6008e-03 L5_spectral:1.5958e-03 L6_spectral:1.6035e-03 L7_spectral:1.6042e-03 L8_spectral:1.5977e-03 L9_spectral:1.6350e-03 L10_spectral:1.6134e-03 L11_spectral:1.6197e-03 L12_spectral:1.6210e-03 train_time:191288ms step_avg:43.47ms +[2025-09-11 11:07:58] [Rank 0] step:4401/10000 train_time:192563ms step_avg:43.75ms +[2025-09-11 11:07:58] [Rank 0] step:4401/10000 train_time:192563ms step_avg:43.75ms +[2025-09-11 11:07:59] [Rank 0] step:4421/10000 train_time:193282ms step_avg:43.72ms +[2025-09-11 11:07:59] [Rank 0] step:4421/10000 train_time:193282ms step_avg:43.72ms +[2025-09-11 11:08:00] [Rank 0] step:4441/10000 train_time:193962ms step_avg:43.68ms +[2025-09-11 11:08:00] [Rank 0] step:4441/10000 train_time:193962ms step_avg:43.68ms +[2025-09-11 11:08:01] [Rank 0] step:4461/10000 train_time:194642ms step_avg:43.63ms +[2025-09-11 11:08:01] [Rank 0] step:4461/10000 train_time:194642ms step_avg:43.63ms +[2025-09-11 11:08:01] [Rank 0] step:4481/10000 train_time:195323ms step_avg:43.59ms +[2025-09-11 11:08:01] [Rank 0] step:4481/10000 train_time:195323ms step_avg:43.59ms +[2025-09-11 11:08:02] [Rank 0] step:4501/10000 train_time:196005ms step_avg:43.55ms +[2025-09-11 11:08:02] [Rank 0] step:4501/10000 train_time:196005ms step_avg:43.55ms +[2025-09-11 11:08:03] [Rank 0] step:4521/10000 train_time:196686ms step_avg:43.51ms +[2025-09-11 11:08:03] [Rank 0] step:4521/10000 train_time:196686ms step_avg:43.51ms +[2025-09-11 11:08:03] [Rank 0] step:4541/10000 train_time:197367ms step_avg:43.46ms +[2025-09-11 11:08:03] [Rank 0] step:4541/10000 train_time:197367ms step_avg:43.46ms +[2025-09-11 11:08:04] [Rank 0] step:4561/10000 train_time:198048ms step_avg:43.42ms +[2025-09-11 11:08:04] [Rank 0] step:4561/10000 train_time:198048ms step_avg:43.42ms +[2025-09-11 11:08:05] [Rank 0] step:4581/10000 train_time:198729ms step_avg:43.38ms +[2025-09-11 11:08:05] [Rank 0] step:4581/10000 train_time:198729ms step_avg:43.38ms +[2025-09-11 11:08:05] [Rank 0] step:4601/10000 train_time:199409ms step_avg:43.34ms +[2025-09-11 11:08:05] [Rank 0] step:4601/10000 train_time:199409ms step_avg:43.34ms +[2025-09-11 11:08:06] [Rank 0] step:4621/10000 train_time:200091ms step_avg:43.30ms +[2025-09-11 11:08:06] [Rank 0] step:4621/10000 train_time:200091ms step_avg:43.30ms +[2025-09-11 11:08:07] [Rank 0] step:4641/10000 train_time:200770ms step_avg:43.26ms +[2025-09-11 11:08:07] [Rank 0] step:4641/10000 train_time:200770ms step_avg:43.26ms +[2025-09-11 11:08:07] [Rank 0] step:4661/10000 train_time:201451ms step_avg:43.22ms +[2025-09-11 11:08:07] [Rank 0] step:4661/10000 train_time:201451ms step_avg:43.22ms +[2025-09-11 11:08:08] [Rank 0] step:4681/10000 train_time:202132ms step_avg:43.18ms +[2025-09-11 11:08:08] [Rank 0] step:4681/10000 train_time:202132ms step_avg:43.18ms +[2025-09-11 11:08:09] [Rank 0] step:4701/10000 train_time:202813ms step_avg:43.14ms +[2025-09-11 11:08:09] [Rank 0] step:4701/10000 train_time:202813ms step_avg:43.14ms +[2025-09-11 11:08:09] [Rank 0] step:4721/10000 train_time:203493ms step_avg:43.10ms +[2025-09-11 11:08:09] [Rank 0] step:4721/10000 train_time:203493ms step_avg:43.10ms +[2025-09-11 11:08:10] [Rank 0] step:4741/10000 train_time:204179ms step_avg:43.07ms +[2025-09-11 11:08:10] [Rank 0] step:4741/10000 train_time:204179ms step_avg:43.07ms +[2025-09-11 11:08:11] [Rank 0] step:4761/10000 train_time:204860ms step_avg:43.03ms +[2025-09-11 11:08:11] [Rank 0] step:4761/10000 train_time:204860ms step_avg:43.03ms +[2025-09-11 11:08:11] [Rank 0] step:4781/10000 train_time:205540ms step_avg:42.99ms +[2025-09-11 11:08:11] [Rank 0] step:4781/10000 train_time:205540ms step_avg:42.99ms +[2025-09-11 11:08:12] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:08:12] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 11:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 11:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:08:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:08:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 11:08:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:22] [Rank 0] PRINT: step:4800/10000 val_loss:4.6136 total_sharp:3.4690e-04 L1_sharp:3.2428e-02 L2_sharp:3.9854e-02 L3_sharp:5.5321e-02 L4_sharp:7.4621e-02 L5_sharp:1.0101e-01 L6_sharp:1.3868e-01 L7_sharp:1.7188e-01 L8_sharp:1.9028e-01 L9_sharp:2.0206e-01 L10_sharp:2.7092e-01 L11_sharp:3.9491e-01 L12_sharp:1.6050e+00 total_fnorm:3.6500e+01 total_l1_linf:6.0672e+04 total_spectral:1.8250e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.0029e-02 L2_l1linf:2.9907e-02 L3_l1linf:3.0273e-02 L4_l1linf:3.0029e-02 L5_l1linf:2.9419e-02 L6_l1linf:3.0029e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9907e-02 L11_l1linf:3.0273e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6088e-03 L2_spectral:1.6040e-03 L3_spectral:1.5981e-03 L4_spectral:1.6060e-03 L5_spectral:1.6052e-03 L6_spectral:1.6054e-03 L7_spectral:1.6151e-03 L8_spectral:1.6161e-03 L9_spectral:1.6172e-03 L10_spectral:1.6211e-03 L11_spectral:1.6210e-03 L12_spectral:1.6160e-03 train_time:206200ms step_avg:42.96ms +[2025-09-11 11:08:22] [Rank 0] PRINT: step:4800/10000 val_loss:4.6136 total_sharp:3.4690e-04 L1_sharp:3.2428e-02 L2_sharp:3.9854e-02 L3_sharp:5.5321e-02 L4_sharp:7.4621e-02 L5_sharp:1.0101e-01 L6_sharp:1.3868e-01 L7_sharp:1.7188e-01 L8_sharp:1.9028e-01 L9_sharp:2.0206e-01 L10_sharp:2.7092e-01 L11_sharp:3.9491e-01 L12_sharp:1.6050e+00 total_fnorm:3.6500e+01 total_l1_linf:6.0672e+04 total_spectral:1.8250e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.0029e-02 L2_l1linf:2.9907e-02 L3_l1linf:3.0273e-02 L4_l1linf:3.0029e-02 L5_l1linf:2.9419e-02 L6_l1linf:3.0029e-02 L7_l1linf:2.9297e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9907e-02 L11_l1linf:3.0273e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6088e-03 L2_spectral:1.6040e-03 L3_spectral:1.5981e-03 L4_spectral:1.6060e-03 L5_spectral:1.6052e-03 L6_spectral:1.6054e-03 L7_spectral:1.6151e-03 L8_spectral:1.6161e-03 L9_spectral:1.6172e-03 L10_spectral:1.6211e-03 L11_spectral:1.6210e-03 L12_spectral:1.6160e-03 train_time:206200ms step_avg:42.96ms +[2025-09-11 11:08:23] [Rank 0] step:4801/10000 train_time:207490ms step_avg:43.22ms +[2025-09-11 11:08:23] [Rank 0] step:4801/10000 train_time:207490ms step_avg:43.22ms +[2025-09-11 11:08:24] [Rank 0] step:4821/10000 train_time:208202ms step_avg:43.19ms +[2025-09-11 11:08:24] [Rank 0] step:4821/10000 train_time:208202ms step_avg:43.19ms +[2025-09-11 11:08:25] [Rank 0] step:4841/10000 train_time:208883ms step_avg:43.15ms +[2025-09-11 11:08:25] [Rank 0] step:4841/10000 train_time:208883ms step_avg:43.15ms +[2025-09-11 11:08:25] [Rank 0] step:4861/10000 train_time:209564ms step_avg:43.11ms +[2025-09-11 11:08:25] [Rank 0] step:4861/10000 train_time:209564ms step_avg:43.11ms +[2025-09-11 11:08:26] [Rank 0] step:4881/10000 train_time:210244ms step_avg:43.07ms +[2025-09-11 11:08:26] [Rank 0] step:4881/10000 train_time:210244ms step_avg:43.07ms +[2025-09-11 11:08:27] [Rank 0] step:4901/10000 train_time:210925ms step_avg:43.04ms +[2025-09-11 11:08:27] [Rank 0] step:4901/10000 train_time:210925ms step_avg:43.04ms +[2025-09-11 11:08:27] [Rank 0] step:4921/10000 train_time:211605ms step_avg:43.00ms +[2025-09-11 11:08:27] [Rank 0] step:4921/10000 train_time:211605ms step_avg:43.00ms +[2025-09-11 11:08:28] [Rank 0] step:4941/10000 train_time:212286ms step_avg:42.96ms +[2025-09-11 11:08:28] [Rank 0] step:4941/10000 train_time:212286ms step_avg:42.96ms +[2025-09-11 11:08:29] [Rank 0] step:4961/10000 train_time:212966ms step_avg:42.93ms +[2025-09-11 11:08:29] [Rank 0] step:4961/10000 train_time:212966ms step_avg:42.93ms +[2025-09-11 11:08:29] [Rank 0] step:4981/10000 train_time:213646ms step_avg:42.89ms +[2025-09-11 11:08:29] [Rank 0] step:4981/10000 train_time:213646ms step_avg:42.89ms +[2025-09-11 11:08:30] [Rank 0] step:5001/10000 train_time:214328ms step_avg:42.86ms +[2025-09-11 11:08:30] [Rank 0] step:5001/10000 train_time:214328ms step_avg:42.86ms +[2025-09-11 11:08:31] [Rank 0] step:5021/10000 train_time:215007ms step_avg:42.82ms +[2025-09-11 11:08:31] [Rank 0] step:5021/10000 train_time:215007ms step_avg:42.82ms +[2025-09-11 11:08:32] [Rank 0] step:5041/10000 train_time:215686ms step_avg:42.79ms +[2025-09-11 11:08:32] [Rank 0] step:5041/10000 train_time:215686ms step_avg:42.79ms +[2025-09-11 11:08:32] [Rank 0] step:5061/10000 train_time:216366ms step_avg:42.75ms +[2025-09-11 11:08:32] [Rank 0] step:5061/10000 train_time:216366ms step_avg:42.75ms +[2025-09-11 11:08:33] [Rank 0] step:5081/10000 train_time:217045ms step_avg:42.72ms +[2025-09-11 11:08:33] [Rank 0] step:5081/10000 train_time:217045ms step_avg:42.72ms +[2025-09-11 11:08:34] [Rank 0] step:5101/10000 train_time:217725ms step_avg:42.68ms +[2025-09-11 11:08:34] [Rank 0] step:5101/10000 train_time:217725ms step_avg:42.68ms +[2025-09-11 11:08:34] [Rank 0] step:5121/10000 train_time:218404ms step_avg:42.65ms +[2025-09-11 11:08:34] [Rank 0] step:5121/10000 train_time:218404ms step_avg:42.65ms +[2025-09-11 11:08:35] [Rank 0] step:5141/10000 train_time:219085ms step_avg:42.62ms +[2025-09-11 11:08:35] [Rank 0] step:5141/10000 train_time:219085ms step_avg:42.62ms +[2025-09-11 11:08:36] [Rank 0] step:5161/10000 train_time:219764ms step_avg:42.58ms +[2025-09-11 11:08:36] [Rank 0] step:5161/10000 train_time:219764ms step_avg:42.58ms +[2025-09-11 11:08:36] [Rank 0] step:5181/10000 train_time:220443ms step_avg:42.55ms +[2025-09-11 11:08:36] [Rank 0] step:5181/10000 train_time:220443ms step_avg:42.55ms +[2025-09-11 11:08:37] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:08:37] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 11:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 11:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 11:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 11:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:08:47] [Rank 0] PRINT: step:5200/10000 val_loss:4.5728 total_sharp:4.3438e-04 L1_sharp:2.9123e-02 L2_sharp:3.4516e-02 L3_sharp:4.8979e-02 L4_sharp:6.5688e-02 L5_sharp:9.0244e-02 L6_sharp:1.0813e-01 L7_sharp:1.4595e-01 L8_sharp:1.8363e-01 L9_sharp:2.3032e-01 L10_sharp:3.6276e-01 L11_sharp:5.1027e-01 L12_sharp:2.3933e+00 total_fnorm:3.4000e+01 total_l1_linf:5.3504e+04 total_spectral:1.7000e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9419e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.9175e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9053e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6073e-03 L2_spectral:1.6076e-03 L3_spectral:1.5991e-03 L4_spectral:1.6124e-03 L5_spectral:1.6047e-03 L6_spectral:1.6060e-03 L7_spectral:1.6151e-03 L8_spectral:1.6097e-03 L9_spectral:1.6251e-03 L10_spectral:1.6225e-03 L11_spectral:1.6115e-03 L12_spectral:1.6063e-03 train_time:221110ms step_avg:42.52ms +[2025-09-11 11:08:47] [Rank 0] PRINT: step:5200/10000 val_loss:4.5728 total_sharp:4.3438e-04 L1_sharp:2.9123e-02 L2_sharp:3.4516e-02 L3_sharp:4.8979e-02 L4_sharp:6.5688e-02 L5_sharp:9.0244e-02 L6_sharp:1.0813e-01 L7_sharp:1.4595e-01 L8_sharp:1.8363e-01 L9_sharp:2.3032e-01 L10_sharp:3.6276e-01 L11_sharp:5.1027e-01 L12_sharp:2.3933e+00 total_fnorm:3.4000e+01 total_l1_linf:5.3504e+04 total_spectral:1.7000e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9419e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.9175e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.9053e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6073e-03 L2_spectral:1.6076e-03 L3_spectral:1.5991e-03 L4_spectral:1.6124e-03 L5_spectral:1.6047e-03 L6_spectral:1.6060e-03 L7_spectral:1.6151e-03 L8_spectral:1.6097e-03 L9_spectral:1.6251e-03 L10_spectral:1.6225e-03 L11_spectral:1.6115e-03 L12_spectral:1.6063e-03 train_time:221110ms step_avg:42.52ms +[2025-09-11 11:08:48] [Rank 0] step:5201/10000 train_time:222432ms step_avg:42.77ms +[2025-09-11 11:08:48] [Rank 0] step:5201/10000 train_time:222432ms step_avg:42.77ms +[2025-09-11 11:08:49] [Rank 0] step:5221/10000 train_time:223160ms step_avg:42.74ms +[2025-09-11 11:08:49] [Rank 0] step:5221/10000 train_time:223160ms step_avg:42.74ms +[2025-09-11 11:08:50] [Rank 0] step:5241/10000 train_time:223851ms step_avg:42.71ms +[2025-09-11 11:08:50] [Rank 0] step:5241/10000 train_time:223851ms step_avg:42.71ms +[2025-09-11 11:08:50] [Rank 0] step:5261/10000 train_time:224541ms step_avg:42.68ms +[2025-09-11 11:08:50] [Rank 0] step:5261/10000 train_time:224541ms step_avg:42.68ms +[2025-09-11 11:08:51] [Rank 0] step:5281/10000 train_time:225231ms step_avg:42.65ms +[2025-09-11 11:08:51] [Rank 0] step:5281/10000 train_time:225231ms step_avg:42.65ms +[2025-09-11 11:08:52] [Rank 0] step:5301/10000 train_time:225921ms step_avg:42.62ms +[2025-09-11 11:08:52] [Rank 0] step:5301/10000 train_time:225921ms step_avg:42.62ms +[2025-09-11 11:08:53] [Rank 0] step:5321/10000 train_time:226611ms step_avg:42.59ms +[2025-09-11 11:08:53] [Rank 0] step:5321/10000 train_time:226611ms step_avg:42.59ms +[2025-09-11 11:08:53] [Rank 0] step:5341/10000 train_time:227592ms step_avg:42.61ms +[2025-09-11 11:08:53] [Rank 0] step:5341/10000 train_time:227592ms step_avg:42.61ms +[2025-09-11 11:08:54] [Rank 0] step:5361/10000 train_time:228282ms step_avg:42.58ms +[2025-09-11 11:08:54] [Rank 0] step:5361/10000 train_time:228282ms step_avg:42.58ms +[2025-09-11 11:08:55] [Rank 0] step:5381/10000 train_time:228973ms step_avg:42.55ms +[2025-09-11 11:08:55] [Rank 0] step:5381/10000 train_time:228973ms step_avg:42.55ms +[2025-09-11 11:08:56] [Rank 0] step:5401/10000 train_time:229954ms step_avg:42.58ms +[2025-09-11 11:08:56] [Rank 0] step:5401/10000 train_time:229954ms step_avg:42.58ms +[2025-09-11 11:08:57] [Rank 0] step:5421/10000 train_time:230645ms step_avg:42.55ms +[2025-09-11 11:08:57] [Rank 0] step:5421/10000 train_time:230645ms step_avg:42.55ms +[2025-09-11 11:08:57] [Rank 0] step:5441/10000 train_time:231335ms step_avg:42.52ms +[2025-09-11 11:08:57] [Rank 0] step:5441/10000 train_time:231335ms step_avg:42.52ms +[2025-09-11 11:08:58] [Rank 0] step:5461/10000 train_time:232025ms step_avg:42.49ms +[2025-09-11 11:08:58] [Rank 0] step:5461/10000 train_time:232025ms step_avg:42.49ms +[2025-09-11 11:08:59] [Rank 0] step:5481/10000 train_time:232715ms step_avg:42.46ms +[2025-09-11 11:08:59] [Rank 0] step:5481/10000 train_time:232715ms step_avg:42.46ms +[2025-09-11 11:08:59] [Rank 0] step:5501/10000 train_time:233404ms step_avg:42.43ms +[2025-09-11 11:08:59] [Rank 0] step:5501/10000 train_time:233404ms step_avg:42.43ms +[2025-09-11 11:09:00] [Rank 0] step:5521/10000 train_time:234094ms step_avg:42.40ms +[2025-09-11 11:09:00] [Rank 0] step:5521/10000 train_time:234094ms step_avg:42.40ms +[2025-09-11 11:09:01] [Rank 0] step:5541/10000 train_time:234786ms step_avg:42.37ms +[2025-09-11 11:09:01] [Rank 0] step:5541/10000 train_time:234786ms step_avg:42.37ms +[2025-09-11 11:09:01] [Rank 0] step:5561/10000 train_time:235477ms step_avg:42.34ms +[2025-09-11 11:09:01] [Rank 0] step:5561/10000 train_time:235477ms step_avg:42.34ms +[2025-09-11 11:09:02] [Rank 0] step:5581/10000 train_time:236167ms step_avg:42.32ms +[2025-09-11 11:09:02] [Rank 0] step:5581/10000 train_time:236167ms step_avg:42.32ms +[2025-09-11 11:09:03] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:09:03] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 11:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 11:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 11:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:09:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 11:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 11:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:09:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 11:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:09:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:09:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 11:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:16] [Rank 0] PRINT: step:5600/10000 val_loss:4.5384 total_sharp:3.2854e-04 L1_sharp:2.7185e-02 L2_sharp:3.1527e-02 L3_sharp:4.5227e-02 L4_sharp:6.9145e-02 L5_sharp:8.3036e-02 L6_sharp:1.2519e-01 L7_sharp:1.4345e-01 L8_sharp:1.6024e-01 L9_sharp:1.8076e-01 L10_sharp:2.4854e-01 L11_sharp:3.4074e-01 L12_sharp:1.2060e+00 total_fnorm:3.3250e+01 total_l1_linf:5.2992e+04 total_spectral:1.6750e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8564e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.8564e-02 L8_l1linf:2.8809e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.8931e-02 L11_l1linf:2.9419e-02 L12_l1linf:3.1250e-02 L1_spectral:1.6063e-03 L2_spectral:1.6148e-03 L3_spectral:1.6071e-03 L4_spectral:1.6134e-03 L5_spectral:1.6008e-03 L6_spectral:1.6133e-03 L7_spectral:1.6093e-03 L8_spectral:1.6064e-03 L9_spectral:1.6201e-03 L10_spectral:1.6170e-03 L11_spectral:1.6141e-03 L12_spectral:1.6226e-03 train_time:236838ms step_avg:42.29ms +[2025-09-11 11:09:16] [Rank 0] PRINT: step:5600/10000 val_loss:4.5384 total_sharp:3.2854e-04 L1_sharp:2.7185e-02 L2_sharp:3.1527e-02 L3_sharp:4.5227e-02 L4_sharp:6.9145e-02 L5_sharp:8.3036e-02 L6_sharp:1.2519e-01 L7_sharp:1.4345e-01 L8_sharp:1.6024e-01 L9_sharp:1.8076e-01 L10_sharp:2.4854e-01 L11_sharp:3.4074e-01 L12_sharp:1.2060e+00 total_fnorm:3.3250e+01 total_l1_linf:5.2992e+04 total_spectral:1.6750e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.9053e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8564e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.8564e-02 L8_l1linf:2.8809e-02 L9_l1linf:2.9053e-02 L10_l1linf:2.8931e-02 L11_l1linf:2.9419e-02 L12_l1linf:3.1250e-02 L1_spectral:1.6063e-03 L2_spectral:1.6148e-03 L3_spectral:1.6071e-03 L4_spectral:1.6134e-03 L5_spectral:1.6008e-03 L6_spectral:1.6133e-03 L7_spectral:1.6093e-03 L8_spectral:1.6064e-03 L9_spectral:1.6201e-03 L10_spectral:1.6170e-03 L11_spectral:1.6141e-03 L12_spectral:1.6226e-03 train_time:236838ms step_avg:42.29ms +[2025-09-11 11:09:17] [Rank 0] step:5601/10000 train_time:238146ms step_avg:42.52ms +[2025-09-11 11:09:17] [Rank 0] step:5601/10000 train_time:238146ms step_avg:42.52ms +[2025-09-11 11:09:18] [Rank 0] step:5621/10000 train_time:238875ms step_avg:42.50ms +[2025-09-11 11:09:18] [Rank 0] step:5621/10000 train_time:238875ms step_avg:42.50ms +[2025-09-11 11:09:19] [Rank 0] step:5641/10000 train_time:239565ms step_avg:42.47ms +[2025-09-11 11:09:19] [Rank 0] step:5641/10000 train_time:239565ms step_avg:42.47ms +[2025-09-11 11:09:20] [Rank 0] step:5661/10000 train_time:240253ms step_avg:42.44ms +[2025-09-11 11:09:20] [Rank 0] step:5661/10000 train_time:240253ms step_avg:42.44ms +[2025-09-11 11:09:20] [Rank 0] step:5681/10000 train_time:240944ms step_avg:42.41ms +[2025-09-11 11:09:20] [Rank 0] step:5681/10000 train_time:240944ms step_avg:42.41ms +[2025-09-11 11:09:21] [Rank 0] step:5701/10000 train_time:241635ms step_avg:42.38ms +[2025-09-11 11:09:21] [Rank 0] step:5701/10000 train_time:241635ms step_avg:42.38ms +[2025-09-11 11:09:22] [Rank 0] step:5721/10000 train_time:242324ms step_avg:42.36ms +[2025-09-11 11:09:22] [Rank 0] step:5721/10000 train_time:242324ms step_avg:42.36ms +[2025-09-11 11:09:22] [Rank 0] step:5741/10000 train_time:243015ms step_avg:42.33ms +[2025-09-11 11:09:22] [Rank 0] step:5741/10000 train_time:243015ms step_avg:42.33ms +[2025-09-11 11:09:23] [Rank 0] step:5761/10000 train_time:243706ms step_avg:42.30ms +[2025-09-11 11:09:23] [Rank 0] step:5761/10000 train_time:243706ms step_avg:42.30ms +[2025-09-11 11:09:24] [Rank 0] step:5781/10000 train_time:244397ms step_avg:42.28ms +[2025-09-11 11:09:24] [Rank 0] step:5781/10000 train_time:244397ms step_avg:42.28ms +[2025-09-11 11:09:24] [Rank 0] step:5801/10000 train_time:245088ms step_avg:42.25ms +[2025-09-11 11:09:24] [Rank 0] step:5801/10000 train_time:245088ms step_avg:42.25ms +[2025-09-11 11:09:25] [Rank 0] step:5821/10000 train_time:245777ms step_avg:42.22ms +[2025-09-11 11:09:25] [Rank 0] step:5821/10000 train_time:245777ms step_avg:42.22ms +[2025-09-11 11:09:26] [Rank 0] step:5841/10000 train_time:246468ms step_avg:42.20ms +[2025-09-11 11:09:26] [Rank 0] step:5841/10000 train_time:246468ms step_avg:42.20ms +[2025-09-11 11:09:26] [Rank 0] step:5861/10000 train_time:247158ms step_avg:42.17ms +[2025-09-11 11:09:26] [Rank 0] step:5861/10000 train_time:247158ms step_avg:42.17ms +[2025-09-11 11:09:27] [Rank 0] step:5881/10000 train_time:247847ms step_avg:42.14ms +[2025-09-11 11:09:27] [Rank 0] step:5881/10000 train_time:247847ms step_avg:42.14ms +[2025-09-11 11:09:28] [Rank 0] step:5901/10000 train_time:248536ms step_avg:42.12ms +[2025-09-11 11:09:28] [Rank 0] step:5901/10000 train_time:248536ms step_avg:42.12ms +[2025-09-11 11:09:29] [Rank 0] step:5921/10000 train_time:249228ms step_avg:42.09ms +[2025-09-11 11:09:29] [Rank 0] step:5921/10000 train_time:249228ms step_avg:42.09ms +[2025-09-11 11:09:29] [Rank 0] step:5941/10000 train_time:249920ms step_avg:42.07ms +[2025-09-11 11:09:29] [Rank 0] step:5941/10000 train_time:249920ms step_avg:42.07ms +[2025-09-11 11:09:30] [Rank 0] step:5961/10000 train_time:250611ms step_avg:42.04ms +[2025-09-11 11:09:30] [Rank 0] step:5961/10000 train_time:250611ms step_avg:42.04ms +[2025-09-11 11:09:31] [Rank 0] step:5981/10000 train_time:251302ms step_avg:42.02ms +[2025-09-11 11:09:31] [Rank 0] step:5981/10000 train_time:251302ms step_avg:42.02ms +[2025-09-11 11:09:31] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:09:31] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 11:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 11:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:09:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:09:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:09:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 11:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:09:41] [Rank 0] PRINT: step:6000/10000 val_loss:4.4968 total_sharp:3.0171e-04 L1_sharp:2.1275e-02 L2_sharp:2.9127e-02 L3_sharp:4.2079e-02 L4_sharp:5.8300e-02 L5_sharp:7.6787e-02 L6_sharp:9.7830e-02 L7_sharp:1.2878e-01 L8_sharp:1.5405e-01 L9_sharp:1.7780e-01 L10_sharp:2.5195e-01 L11_sharp:3.3407e-01 L12_sharp:8.4466e-01 total_fnorm:3.4250e+01 total_l1_linf:5.4016e+04 total_spectral:1.7125e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.7954e-02 L2_l1linf:2.8442e-02 L3_l1linf:2.8320e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.8320e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.7954e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.9175e-02 L12_l1linf:3.0640e-02 L1_spectral:1.6009e-03 L2_spectral:1.5936e-03 L3_spectral:1.6157e-03 L4_spectral:1.6051e-03 L5_spectral:1.6025e-03 L6_spectral:1.5945e-03 L7_spectral:1.6006e-03 L8_spectral:1.6001e-03 L9_spectral:1.6192e-03 L10_spectral:1.6203e-03 L11_spectral:1.6134e-03 L12_spectral:1.6039e-03 train_time:251975ms step_avg:42.00ms +[2025-09-11 11:09:41] [Rank 0] PRINT: step:6000/10000 val_loss:4.4968 total_sharp:3.0171e-04 L1_sharp:2.1275e-02 L2_sharp:2.9127e-02 L3_sharp:4.2079e-02 L4_sharp:5.8300e-02 L5_sharp:7.6787e-02 L6_sharp:9.7830e-02 L7_sharp:1.2878e-01 L8_sharp:1.5405e-01 L9_sharp:1.7780e-01 L10_sharp:2.5195e-01 L11_sharp:3.3407e-01 L12_sharp:8.4466e-01 total_fnorm:3.4250e+01 total_l1_linf:5.4016e+04 total_spectral:1.7125e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.7954e-02 L2_l1linf:2.8442e-02 L3_l1linf:2.8320e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.8320e-02 L6_l1linf:2.8198e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.7954e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.9175e-02 L12_l1linf:3.0640e-02 L1_spectral:1.6009e-03 L2_spectral:1.5936e-03 L3_spectral:1.6157e-03 L4_spectral:1.6051e-03 L5_spectral:1.6025e-03 L6_spectral:1.5945e-03 L7_spectral:1.6006e-03 L8_spectral:1.6001e-03 L9_spectral:1.6192e-03 L10_spectral:1.6203e-03 L11_spectral:1.6134e-03 L12_spectral:1.6039e-03 train_time:251975ms step_avg:42.00ms +[2025-09-11 11:09:43] [Rank 0] step:6001/10000 train_time:253285ms step_avg:42.21ms +[2025-09-11 11:09:43] [Rank 0] step:6001/10000 train_time:253285ms step_avg:42.21ms +[2025-09-11 11:09:43] [Rank 0] step:6021/10000 train_time:254011ms step_avg:42.19ms +[2025-09-11 11:09:43] [Rank 0] step:6021/10000 train_time:254011ms step_avg:42.19ms +[2025-09-11 11:09:44] [Rank 0] step:6041/10000 train_time:254707ms step_avg:42.16ms +[2025-09-11 11:09:44] [Rank 0] step:6041/10000 train_time:254707ms step_avg:42.16ms +[2025-09-11 11:09:45] [Rank 0] step:6061/10000 train_time:255400ms step_avg:42.14ms +[2025-09-11 11:09:45] [Rank 0] step:6061/10000 train_time:255400ms step_avg:42.14ms +[2025-09-11 11:09:45] [Rank 0] step:6081/10000 train_time:256093ms step_avg:42.11ms +[2025-09-11 11:09:45] [Rank 0] step:6081/10000 train_time:256093ms step_avg:42.11ms +[2025-09-11 11:09:46] [Rank 0] step:6101/10000 train_time:256785ms step_avg:42.09ms +[2025-09-11 11:09:46] [Rank 0] step:6101/10000 train_time:256785ms step_avg:42.09ms +[2025-09-11 11:09:47] [Rank 0] step:6121/10000 train_time:257480ms step_avg:42.06ms +[2025-09-11 11:09:47] [Rank 0] step:6121/10000 train_time:257480ms step_avg:42.06ms +[2025-09-11 11:09:47] [Rank 0] step:6141/10000 train_time:258173ms step_avg:42.04ms +[2025-09-11 11:09:47] [Rank 0] step:6141/10000 train_time:258173ms step_avg:42.04ms +[2025-09-11 11:09:48] [Rank 0] step:6161/10000 train_time:258865ms step_avg:42.02ms +[2025-09-11 11:09:48] [Rank 0] step:6161/10000 train_time:258865ms step_avg:42.02ms +[2025-09-11 11:09:49] [Rank 0] step:6181/10000 train_time:259556ms step_avg:41.99ms +[2025-09-11 11:09:49] [Rank 0] step:6181/10000 train_time:259556ms step_avg:41.99ms +[2025-09-11 11:09:50] [Rank 0] step:6201/10000 train_time:260250ms step_avg:41.97ms +[2025-09-11 11:09:50] [Rank 0] step:6201/10000 train_time:260250ms step_avg:41.97ms +[2025-09-11 11:09:50] [Rank 0] step:6221/10000 train_time:260944ms step_avg:41.95ms +[2025-09-11 11:09:50] [Rank 0] step:6221/10000 train_time:260944ms step_avg:41.95ms +[2025-09-11 11:09:51] [Rank 0] step:6241/10000 train_time:261638ms step_avg:41.92ms +[2025-09-11 11:09:51] [Rank 0] step:6241/10000 train_time:261638ms step_avg:41.92ms +[2025-09-11 11:09:52] [Rank 0] step:6261/10000 train_time:262329ms step_avg:41.90ms +[2025-09-11 11:09:52] [Rank 0] step:6261/10000 train_time:262329ms step_avg:41.90ms +[2025-09-11 11:09:52] [Rank 0] step:6281/10000 train_time:263022ms step_avg:41.88ms +[2025-09-11 11:09:52] [Rank 0] step:6281/10000 train_time:263022ms step_avg:41.88ms +[2025-09-11 11:09:53] [Rank 0] step:6301/10000 train_time:263714ms step_avg:41.85ms +[2025-09-11 11:09:53] [Rank 0] step:6301/10000 train_time:263714ms step_avg:41.85ms +[2025-09-11 11:09:54] [Rank 0] step:6321/10000 train_time:264409ms step_avg:41.83ms +[2025-09-11 11:09:54] [Rank 0] step:6321/10000 train_time:264409ms step_avg:41.83ms +[2025-09-11 11:09:54] [Rank 0] step:6341/10000 train_time:265103ms step_avg:41.81ms +[2025-09-11 11:09:54] [Rank 0] step:6341/10000 train_time:265103ms step_avg:41.81ms +[2025-09-11 11:09:55] [Rank 0] step:6361/10000 train_time:265797ms step_avg:41.79ms +[2025-09-11 11:09:55] [Rank 0] step:6361/10000 train_time:265797ms step_avg:41.79ms +[2025-09-11 11:09:56] [Rank 0] step:6381/10000 train_time:266493ms step_avg:41.76ms +[2025-09-11 11:09:56] [Rank 0] step:6381/10000 train_time:266493ms step_avg:41.76ms +[2025-09-11 11:09:57] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:09:57] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 11:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:10:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:06] [Rank 0] PRINT: step:6400/10000 val_loss:4.4611 total_sharp:3.6192e-04 L1_sharp:1.8504e-02 L2_sharp:2.9795e-02 L3_sharp:4.3297e-02 L4_sharp:5.9970e-02 L5_sharp:8.2775e-02 L6_sharp:1.1913e-01 L7_sharp:1.4225e-01 L8_sharp:1.6241e-01 L9_sharp:2.0085e-01 L10_sharp:2.9322e-01 L11_sharp:3.8060e-01 L12_sharp:1.7893e+00 total_fnorm:2.9500e+01 total_l1_linf:4.4544e+04 total_spectral:1.4750e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0010e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0010e-01 L5_fnorm:1.0010e-01 L6_fnorm:9.9609e-02 L7_fnorm:1.0010e-01 L8_fnorm:9.7656e-02 L9_fnorm:1.0010e-01 L10_fnorm:1.0010e-01 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.4414e-02 L2_l1linf:2.4170e-02 L3_l1linf:2.3926e-02 L4_l1linf:2.4170e-02 L5_l1linf:2.4170e-02 L6_l1linf:2.4170e-02 L7_l1linf:2.4292e-02 L8_l1linf:2.4292e-02 L9_l1linf:2.4414e-02 L10_l1linf:2.4780e-02 L11_l1linf:2.4048e-02 L12_l1linf:2.6245e-02 L1_spectral:1.4548e-03 L2_spectral:1.4523e-03 L3_spectral:1.4459e-03 L4_spectral:1.4554e-03 L5_spectral:1.4535e-03 L6_spectral:1.4450e-03 L7_spectral:1.4507e-03 L8_spectral:1.4356e-03 L9_spectral:1.4588e-03 L10_spectral:1.4477e-03 L11_spectral:1.4478e-03 L12_spectral:1.4384e-03 train_time:267435ms step_avg:41.79ms +[2025-09-11 11:10:06] [Rank 0] PRINT: step:6400/10000 val_loss:4.4611 total_sharp:3.6192e-04 L1_sharp:1.8504e-02 L2_sharp:2.9795e-02 L3_sharp:4.3297e-02 L4_sharp:5.9970e-02 L5_sharp:8.2775e-02 L6_sharp:1.1913e-01 L7_sharp:1.4225e-01 L8_sharp:1.6241e-01 L9_sharp:2.0085e-01 L10_sharp:2.9322e-01 L11_sharp:3.8060e-01 L12_sharp:1.7893e+00 total_fnorm:2.9500e+01 total_l1_linf:4.4544e+04 total_spectral:1.4750e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0010e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0010e-01 L5_fnorm:1.0010e-01 L6_fnorm:9.9609e-02 L7_fnorm:1.0010e-01 L8_fnorm:9.7656e-02 L9_fnorm:1.0010e-01 L10_fnorm:1.0010e-01 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.4414e-02 L2_l1linf:2.4170e-02 L3_l1linf:2.3926e-02 L4_l1linf:2.4170e-02 L5_l1linf:2.4170e-02 L6_l1linf:2.4170e-02 L7_l1linf:2.4292e-02 L8_l1linf:2.4292e-02 L9_l1linf:2.4414e-02 L10_l1linf:2.4780e-02 L11_l1linf:2.4048e-02 L12_l1linf:2.6245e-02 L1_spectral:1.4548e-03 L2_spectral:1.4523e-03 L3_spectral:1.4459e-03 L4_spectral:1.4554e-03 L5_spectral:1.4535e-03 L6_spectral:1.4450e-03 L7_spectral:1.4507e-03 L8_spectral:1.4356e-03 L9_spectral:1.4588e-03 L10_spectral:1.4477e-03 L11_spectral:1.4478e-03 L12_spectral:1.4384e-03 train_time:267435ms step_avg:41.79ms +[2025-09-11 11:10:08] [Rank 0] step:6401/10000 train_time:268753ms step_avg:41.99ms +[2025-09-11 11:10:08] [Rank 0] step:6401/10000 train_time:268753ms step_avg:41.99ms +[2025-09-11 11:10:09] [Rank 0] step:6421/10000 train_time:269486ms step_avg:41.97ms +[2025-09-11 11:10:09] [Rank 0] step:6421/10000 train_time:269486ms step_avg:41.97ms +[2025-09-11 11:10:09] [Rank 0] step:6441/10000 train_time:270178ms step_avg:41.95ms +[2025-09-11 11:10:09] [Rank 0] step:6441/10000 train_time:270178ms step_avg:41.95ms +[2025-09-11 11:10:10] [Rank 0] step:6461/10000 train_time:270871ms step_avg:41.92ms +[2025-09-11 11:10:10] [Rank 0] step:6461/10000 train_time:270871ms step_avg:41.92ms +[2025-09-11 11:10:11] [Rank 0] step:6481/10000 train_time:271567ms step_avg:41.90ms +[2025-09-11 11:10:11] [Rank 0] step:6481/10000 train_time:271567ms step_avg:41.90ms +[2025-09-11 11:10:11] [Rank 0] step:6501/10000 train_time:272263ms step_avg:41.88ms +[2025-09-11 11:10:11] [Rank 0] step:6501/10000 train_time:272263ms step_avg:41.88ms +[2025-09-11 11:10:12] [Rank 0] step:6521/10000 train_time:272956ms step_avg:41.86ms +[2025-09-11 11:10:12] [Rank 0] step:6521/10000 train_time:272956ms step_avg:41.86ms +[2025-09-11 11:10:13] [Rank 0] step:6541/10000 train_time:273648ms step_avg:41.84ms +[2025-09-11 11:10:13] [Rank 0] step:6541/10000 train_time:273648ms step_avg:41.84ms +[2025-09-11 11:10:13] [Rank 0] step:6561/10000 train_time:274341ms step_avg:41.81ms +[2025-09-11 11:10:13] [Rank 0] step:6561/10000 train_time:274341ms step_avg:41.81ms +[2025-09-11 11:10:14] [Rank 0] step:6581/10000 train_time:275035ms step_avg:41.79ms +[2025-09-11 11:10:14] [Rank 0] step:6581/10000 train_time:275035ms step_avg:41.79ms +[2025-09-11 11:10:15] [Rank 0] step:6601/10000 train_time:275727ms step_avg:41.77ms +[2025-09-11 11:10:15] [Rank 0] step:6601/10000 train_time:275727ms step_avg:41.77ms +[2025-09-11 11:10:15] [Rank 0] step:6621/10000 train_time:276418ms step_avg:41.75ms +[2025-09-11 11:10:15] [Rank 0] step:6621/10000 train_time:276418ms step_avg:41.75ms +[2025-09-11 11:10:16] [Rank 0] step:6641/10000 train_time:277111ms step_avg:41.73ms +[2025-09-11 11:10:16] [Rank 0] step:6641/10000 train_time:277111ms step_avg:41.73ms +[2025-09-11 11:10:17] [Rank 0] step:6661/10000 train_time:277804ms step_avg:41.71ms +[2025-09-11 11:10:17] [Rank 0] step:6661/10000 train_time:277804ms step_avg:41.71ms +[2025-09-11 11:10:18] [Rank 0] step:6681/10000 train_time:278503ms step_avg:41.69ms +[2025-09-11 11:10:18] [Rank 0] step:6681/10000 train_time:278503ms step_avg:41.69ms +[2025-09-11 11:10:18] [Rank 0] step:6701/10000 train_time:279203ms step_avg:41.67ms +[2025-09-11 11:10:18] [Rank 0] step:6701/10000 train_time:279203ms step_avg:41.67ms +[2025-09-11 11:10:19] [Rank 0] step:6721/10000 train_time:279902ms step_avg:41.65ms +[2025-09-11 11:10:19] [Rank 0] step:6721/10000 train_time:279902ms step_avg:41.65ms +[2025-09-11 11:10:20] [Rank 0] step:6741/10000 train_time:280602ms step_avg:41.63ms +[2025-09-11 11:10:20] [Rank 0] step:6741/10000 train_time:280602ms step_avg:41.63ms +[2025-09-11 11:10:20] [Rank 0] step:6761/10000 train_time:281300ms step_avg:41.61ms +[2025-09-11 11:10:20] [Rank 0] step:6761/10000 train_time:281300ms step_avg:41.61ms +[2025-09-11 11:10:21] [Rank 0] step:6781/10000 train_time:281999ms step_avg:41.59ms +[2025-09-11 11:10:21] [Rank 0] step:6781/10000 train_time:281999ms step_avg:41.59ms +[2025-09-11 11:10:22] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:10:22] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 11:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 11:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 11:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:10:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:10:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:10:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 11:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 11:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 11:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:10:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.4343 total_sharp:2.3037e-04 L1_sharp:1.7470e-02 L2_sharp:2.4117e-02 L3_sharp:3.5800e-02 L4_sharp:5.0376e-02 L5_sharp:8.6039e-02 L6_sharp:1.2109e-01 L7_sharp:1.5222e-01 L8_sharp:1.5734e-01 L9_sharp:1.8065e-01 L10_sharp:2.3161e-01 L11_sharp:3.0765e-01 L12_sharp:8.7492e-01 total_fnorm:2.8625e+01 total_l1_linf:4.2752e+04 total_spectral:1.4312e+01 L1_fnorm:8.5938e-02 L2_fnorm:8.5449e-02 L3_fnorm:8.5449e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.3984e-02 L9_fnorm:8.5449e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9531e-02 L2_l1linf:1.9531e-02 L3_l1linf:2.0264e-02 L4_l1linf:1.9409e-02 L5_l1linf:2.0020e-02 L6_l1linf:1.9897e-02 L7_l1linf:2.0020e-02 L8_l1linf:1.9897e-02 L9_l1linf:1.9775e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9531e-02 L12_l1linf:2.1484e-02 L1_spectral:1.2925e-03 L2_spectral:1.2909e-03 L3_spectral:1.2982e-03 L4_spectral:1.2889e-03 L5_spectral:1.3004e-03 L6_spectral:1.2948e-03 L7_spectral:1.2897e-03 L8_spectral:1.2609e-03 L9_spectral:1.2996e-03 L10_spectral:1.2822e-03 L11_spectral:1.2769e-03 L12_spectral:1.2455e-03 train_time:282678ms step_avg:41.57ms +[2025-09-11 11:10:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.4343 total_sharp:2.3037e-04 L1_sharp:1.7470e-02 L2_sharp:2.4117e-02 L3_sharp:3.5800e-02 L4_sharp:5.0376e-02 L5_sharp:8.6039e-02 L6_sharp:1.2109e-01 L7_sharp:1.5222e-01 L8_sharp:1.5734e-01 L9_sharp:1.8065e-01 L10_sharp:2.3161e-01 L11_sharp:3.0765e-01 L12_sharp:8.7492e-01 total_fnorm:2.8625e+01 total_l1_linf:4.2752e+04 total_spectral:1.4312e+01 L1_fnorm:8.5938e-02 L2_fnorm:8.5449e-02 L3_fnorm:8.5449e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.3984e-02 L9_fnorm:8.5449e-02 L10_fnorm:8.5449e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9531e-02 L2_l1linf:1.9531e-02 L3_l1linf:2.0264e-02 L4_l1linf:1.9409e-02 L5_l1linf:2.0020e-02 L6_l1linf:1.9897e-02 L7_l1linf:2.0020e-02 L8_l1linf:1.9897e-02 L9_l1linf:1.9775e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9531e-02 L12_l1linf:2.1484e-02 L1_spectral:1.2925e-03 L2_spectral:1.2909e-03 L3_spectral:1.2982e-03 L4_spectral:1.2889e-03 L5_spectral:1.3004e-03 L6_spectral:1.2948e-03 L7_spectral:1.2897e-03 L8_spectral:1.2609e-03 L9_spectral:1.2996e-03 L10_spectral:1.2822e-03 L11_spectral:1.2769e-03 L12_spectral:1.2455e-03 train_time:282678ms step_avg:41.57ms +[2025-09-11 11:10:35] [Rank 0] step:6801/10000 train_time:284756ms step_avg:41.87ms +[2025-09-11 11:10:35] [Rank 0] step:6801/10000 train_time:284756ms step_avg:41.87ms +[2025-09-11 11:10:35] [Rank 0] step:6821/10000 train_time:285477ms step_avg:41.85ms +[2025-09-11 11:10:35] [Rank 0] step:6821/10000 train_time:285477ms step_avg:41.85ms +[2025-09-11 11:10:36] [Rank 0] step:6841/10000 train_time:286180ms step_avg:41.83ms +[2025-09-11 11:10:36] [Rank 0] step:6841/10000 train_time:286180ms step_avg:41.83ms +[2025-09-11 11:10:37] [Rank 0] step:6861/10000 train_time:286881ms step_avg:41.81ms +[2025-09-11 11:10:37] [Rank 0] step:6861/10000 train_time:286881ms step_avg:41.81ms +[2025-09-11 11:10:37] [Rank 0] step:6881/10000 train_time:287583ms step_avg:41.79ms +[2025-09-11 11:10:37] [Rank 0] step:6881/10000 train_time:287583ms step_avg:41.79ms +[2025-09-11 11:10:38] [Rank 0] step:6901/10000 train_time:288283ms step_avg:41.77ms +[2025-09-11 11:10:38] [Rank 0] step:6901/10000 train_time:288283ms step_avg:41.77ms +[2025-09-11 11:10:39] [Rank 0] step:6921/10000 train_time:288983ms step_avg:41.75ms +[2025-09-11 11:10:39] [Rank 0] step:6921/10000 train_time:288983ms step_avg:41.75ms +[2025-09-11 11:10:40] [Rank 0] step:6941/10000 train_time:289682ms step_avg:41.73ms +[2025-09-11 11:10:40] [Rank 0] step:6941/10000 train_time:289682ms step_avg:41.73ms +[2025-09-11 11:10:40] [Rank 0] step:6961/10000 train_time:290384ms step_avg:41.72ms +[2025-09-11 11:10:40] [Rank 0] step:6961/10000 train_time:290384ms step_avg:41.72ms +[2025-09-11 11:10:41] [Rank 0] step:6981/10000 train_time:291087ms step_avg:41.70ms +[2025-09-11 11:10:41] [Rank 0] step:6981/10000 train_time:291087ms step_avg:41.70ms +[2025-09-11 11:10:42] [Rank 0] step:7001/10000 train_time:291788ms step_avg:41.68ms +[2025-09-11 11:10:42] [Rank 0] step:7001/10000 train_time:291788ms step_avg:41.68ms +[2025-09-11 11:10:42] [Rank 0] step:7021/10000 train_time:292489ms step_avg:41.66ms +[2025-09-11 11:10:42] [Rank 0] step:7021/10000 train_time:292489ms step_avg:41.66ms +[2025-09-11 11:10:43] [Rank 0] step:7041/10000 train_time:293188ms step_avg:41.64ms +[2025-09-11 11:10:43] [Rank 0] step:7041/10000 train_time:293188ms step_avg:41.64ms +[2025-09-11 11:10:44] [Rank 0] step:7061/10000 train_time:293889ms step_avg:41.62ms +[2025-09-11 11:10:44] [Rank 0] step:7061/10000 train_time:293889ms step_avg:41.62ms +[2025-09-11 11:10:45] [Rank 0] step:7081/10000 train_time:294590ms step_avg:41.60ms +[2025-09-11 11:10:45] [Rank 0] step:7081/10000 train_time:294590ms step_avg:41.60ms +[2025-09-11 11:10:45] [Rank 0] step:7101/10000 train_time:295291ms step_avg:41.58ms +[2025-09-11 11:10:45] [Rank 0] step:7101/10000 train_time:295291ms step_avg:41.58ms +[2025-09-11 11:10:46] [Rank 0] step:7121/10000 train_time:295993ms step_avg:41.57ms +[2025-09-11 11:10:46] [Rank 0] step:7121/10000 train_time:295993ms step_avg:41.57ms +[2025-09-11 11:10:47] [Rank 0] step:7141/10000 train_time:296694ms step_avg:41.55ms +[2025-09-11 11:10:47] [Rank 0] step:7141/10000 train_time:296694ms step_avg:41.55ms +[2025-09-11 11:10:47] [Rank 0] step:7161/10000 train_time:297395ms step_avg:41.53ms +[2025-09-11 11:10:47] [Rank 0] step:7161/10000 train_time:297395ms step_avg:41.53ms +[2025-09-11 11:10:48] [Rank 0] step:7181/10000 train_time:298095ms step_avg:41.51ms +[2025-09-11 11:10:48] [Rank 0] step:7181/10000 train_time:298095ms step_avg:41.51ms +[2025-09-11 11:10:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:10:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 11:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 11:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 11:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 11:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:00] [Rank 0] PRINT: step:7200/10000 val_loss:4.4034 total_sharp:2.2923e-04 L1_sharp:1.8883e-02 L2_sharp:2.1769e-02 L3_sharp:3.8952e-02 L4_sharp:5.7553e-02 L5_sharp:7.9872e-02 L6_sharp:1.1388e-01 L7_sharp:1.3149e-01 L8_sharp:1.5017e-01 L9_sharp:1.7778e-01 L10_sharp:2.3253e-01 L11_sharp:3.3620e-01 L12_sharp:9.1130e-01 total_fnorm:2.4375e+01 total_l1_linf:3.4304e+04 total_spectral:1.2188e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2266e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0801e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.1289e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.5991e-02 L10_l1linf:1.6113e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.7456e-02 L1_spectral:1.1498e-03 L2_spectral:1.1426e-03 L3_spectral:1.1483e-03 L4_spectral:1.1380e-03 L5_spectral:1.1344e-03 L6_spectral:1.1382e-03 L7_spectral:1.1335e-03 L8_spectral:1.1002e-03 L9_spectral:1.1381e-03 L10_spectral:1.1278e-03 L11_spectral:1.1142e-03 L12_spectral:1.0669e-03 train_time:298776ms step_avg:41.50ms +[2025-09-11 11:11:00] [Rank 0] PRINT: step:7200/10000 val_loss:4.4034 total_sharp:2.2923e-04 L1_sharp:1.8883e-02 L2_sharp:2.1769e-02 L3_sharp:3.8952e-02 L4_sharp:5.7553e-02 L5_sharp:7.9872e-02 L6_sharp:1.1388e-01 L7_sharp:1.3149e-01 L8_sharp:1.5017e-01 L9_sharp:1.7778e-01 L10_sharp:2.3253e-01 L11_sharp:3.3620e-01 L12_sharp:9.1130e-01 total_fnorm:2.4375e+01 total_l1_linf:3.4304e+04 total_spectral:1.2188e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2266e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0801e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.2266e-02 L11_fnorm:7.1289e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.5991e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.5991e-02 L10_l1linf:1.6113e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.7456e-02 L1_spectral:1.1498e-03 L2_spectral:1.1426e-03 L3_spectral:1.1483e-03 L4_spectral:1.1380e-03 L5_spectral:1.1344e-03 L6_spectral:1.1382e-03 L7_spectral:1.1335e-03 L8_spectral:1.1002e-03 L9_spectral:1.1381e-03 L10_spectral:1.1278e-03 L11_spectral:1.1142e-03 L12_spectral:1.0669e-03 train_time:298776ms step_avg:41.50ms +[2025-09-11 11:11:02] [Rank 0] step:7201/10000 train_time:300914ms step_avg:41.79ms +[2025-09-11 11:11:02] [Rank 0] step:7201/10000 train_time:300914ms step_avg:41.79ms +[2025-09-11 11:11:03] [Rank 0] step:7221/10000 train_time:301733ms step_avg:41.79ms +[2025-09-11 11:11:03] [Rank 0] step:7221/10000 train_time:301733ms step_avg:41.79ms +[2025-09-11 11:11:03] [Rank 0] step:7241/10000 train_time:302435ms step_avg:41.77ms +[2025-09-11 11:11:03] [Rank 0] step:7241/10000 train_time:302435ms step_avg:41.77ms +[2025-09-11 11:11:04] [Rank 0] step:7261/10000 train_time:303138ms step_avg:41.75ms +[2025-09-11 11:11:04] [Rank 0] step:7261/10000 train_time:303138ms step_avg:41.75ms +[2025-09-11 11:11:05] [Rank 0] step:7281/10000 train_time:303846ms step_avg:41.73ms +[2025-09-11 11:11:05] [Rank 0] step:7281/10000 train_time:303846ms step_avg:41.73ms +[2025-09-11 11:11:05] [Rank 0] step:7301/10000 train_time:304546ms step_avg:41.71ms +[2025-09-11 11:11:05] [Rank 0] step:7301/10000 train_time:304546ms step_avg:41.71ms +[2025-09-11 11:11:06] [Rank 0] step:7321/10000 train_time:305248ms step_avg:41.69ms +[2025-09-11 11:11:06] [Rank 0] step:7321/10000 train_time:305248ms step_avg:41.69ms +[2025-09-11 11:11:07] [Rank 0] step:7341/10000 train_time:305950ms step_avg:41.68ms +[2025-09-11 11:11:07] [Rank 0] step:7341/10000 train_time:305950ms step_avg:41.68ms +[2025-09-11 11:11:07] [Rank 0] step:7361/10000 train_time:306651ms step_avg:41.66ms +[2025-09-11 11:11:07] [Rank 0] step:7361/10000 train_time:306651ms step_avg:41.66ms +[2025-09-11 11:11:08] [Rank 0] step:7381/10000 train_time:307354ms step_avg:41.64ms +[2025-09-11 11:11:08] [Rank 0] step:7381/10000 train_time:307354ms step_avg:41.64ms +[2025-09-11 11:11:09] [Rank 0] step:7401/10000 train_time:308054ms step_avg:41.62ms +[2025-09-11 11:11:09] [Rank 0] step:7401/10000 train_time:308054ms step_avg:41.62ms +[2025-09-11 11:11:10] [Rank 0] step:7421/10000 train_time:308755ms step_avg:41.61ms +[2025-09-11 11:11:10] [Rank 0] step:7421/10000 train_time:308755ms step_avg:41.61ms +[2025-09-11 11:11:10] [Rank 0] step:7441/10000 train_time:309457ms step_avg:41.59ms +[2025-09-11 11:11:10] [Rank 0] step:7441/10000 train_time:309457ms step_avg:41.59ms +[2025-09-11 11:11:11] [Rank 0] step:7461/10000 train_time:310165ms step_avg:41.57ms +[2025-09-11 11:11:11] [Rank 0] step:7461/10000 train_time:310165ms step_avg:41.57ms +[2025-09-11 11:11:12] [Rank 0] step:7481/10000 train_time:310875ms step_avg:41.56ms +[2025-09-11 11:11:12] [Rank 0] step:7481/10000 train_time:310875ms step_avg:41.56ms +[2025-09-11 11:11:12] [Rank 0] step:7501/10000 train_time:311577ms step_avg:41.54ms +[2025-09-11 11:11:12] [Rank 0] step:7501/10000 train_time:311577ms step_avg:41.54ms +[2025-09-11 11:11:13] [Rank 0] step:7521/10000 train_time:312281ms step_avg:41.52ms +[2025-09-11 11:11:13] [Rank 0] step:7521/10000 train_time:312281ms step_avg:41.52ms +[2025-09-11 11:11:14] [Rank 0] step:7541/10000 train_time:312980ms step_avg:41.50ms +[2025-09-11 11:11:14] [Rank 0] step:7541/10000 train_time:312980ms step_avg:41.50ms +[2025-09-11 11:11:14] [Rank 0] step:7561/10000 train_time:313685ms step_avg:41.49ms +[2025-09-11 11:11:14] [Rank 0] step:7561/10000 train_time:313685ms step_avg:41.49ms +[2025-09-11 11:11:15] [Rank 0] step:7581/10000 train_time:314388ms step_avg:41.47ms +[2025-09-11 11:11:15] [Rank 0] step:7581/10000 train_time:314388ms step_avg:41.47ms +[2025-09-11 11:11:16] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:11:16] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 11:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:11:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 11:11:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:11:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 11:11:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:11:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 11:11:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:27] [Rank 0] PRINT: step:7600/10000 val_loss:4.3791 total_sharp:2.0084e-04 L1_sharp:1.6725e-02 L2_sharp:1.8666e-02 L3_sharp:3.3282e-02 L4_sharp:4.3277e-02 L5_sharp:6.8840e-02 L6_sharp:9.8620e-02 L7_sharp:1.1728e-01 L8_sharp:1.3878e-01 L9_sharp:1.5839e-01 L10_sharp:2.2257e-01 L11_sharp:3.0428e-01 L12_sharp:6.8690e-01 total_fnorm:1.9250e+01 total_l1_linf:2.4960e+04 total_spectral:9.6250e+00 L1_fnorm:5.9570e-02 L2_fnorm:5.9570e-02 L3_fnorm:5.9814e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9814e-02 L6_fnorm:5.9570e-02 L7_fnorm:5.9814e-02 L8_fnorm:5.7861e-02 L9_fnorm:5.9570e-02 L10_fnorm:5.9570e-02 L11_fnorm:5.8594e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.2268e-02 L2_l1linf:1.2024e-02 L3_l1linf:1.2512e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2634e-02 L6_l1linf:1.2817e-02 L7_l1linf:1.2878e-02 L8_l1linf:1.3000e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.2634e-02 L11_l1linf:1.2573e-02 L12_l1linf:1.3428e-02 L1_spectral:9.8314e-04 L2_spectral:9.8025e-04 L3_spectral:9.7717e-04 L4_spectral:9.8225e-04 L5_spectral:9.7228e-04 L6_spectral:9.7009e-04 L7_spectral:9.6434e-04 L8_spectral:9.1516e-04 L9_spectral:9.6555e-04 L10_spectral:9.7099e-04 L11_spectral:9.4108e-04 L12_spectral:8.9647e-04 train_time:315072ms step_avg:41.46ms +[2025-09-11 11:11:27] [Rank 0] PRINT: step:7600/10000 val_loss:4.3791 total_sharp:2.0084e-04 L1_sharp:1.6725e-02 L2_sharp:1.8666e-02 L3_sharp:3.3282e-02 L4_sharp:4.3277e-02 L5_sharp:6.8840e-02 L6_sharp:9.8620e-02 L7_sharp:1.1728e-01 L8_sharp:1.3878e-01 L9_sharp:1.5839e-01 L10_sharp:2.2257e-01 L11_sharp:3.0428e-01 L12_sharp:6.8690e-01 total_fnorm:1.9250e+01 total_l1_linf:2.4960e+04 total_spectral:9.6250e+00 L1_fnorm:5.9570e-02 L2_fnorm:5.9570e-02 L3_fnorm:5.9814e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9814e-02 L6_fnorm:5.9570e-02 L7_fnorm:5.9814e-02 L8_fnorm:5.7861e-02 L9_fnorm:5.9570e-02 L10_fnorm:5.9570e-02 L11_fnorm:5.8594e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.2268e-02 L2_l1linf:1.2024e-02 L3_l1linf:1.2512e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2634e-02 L6_l1linf:1.2817e-02 L7_l1linf:1.2878e-02 L8_l1linf:1.3000e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.2634e-02 L11_l1linf:1.2573e-02 L12_l1linf:1.3428e-02 L1_spectral:9.8314e-04 L2_spectral:9.8025e-04 L3_spectral:9.7717e-04 L4_spectral:9.8225e-04 L5_spectral:9.7228e-04 L6_spectral:9.7009e-04 L7_spectral:9.6434e-04 L8_spectral:9.1516e-04 L9_spectral:9.6555e-04 L10_spectral:9.7099e-04 L11_spectral:9.4108e-04 L12_spectral:8.9647e-04 train_time:315072ms step_avg:41.46ms +[2025-09-11 11:11:29] [Rank 0] step:7601/10000 train_time:317276ms step_avg:41.74ms +[2025-09-11 11:11:29] [Rank 0] step:7601/10000 train_time:317276ms step_avg:41.74ms +[2025-09-11 11:11:29] [Rank 0] step:7621/10000 train_time:317998ms step_avg:41.73ms +[2025-09-11 11:11:29] [Rank 0] step:7621/10000 train_time:317998ms step_avg:41.73ms +[2025-09-11 11:11:30] [Rank 0] step:7641/10000 train_time:318703ms step_avg:41.71ms +[2025-09-11 11:11:30] [Rank 0] step:7641/10000 train_time:318703ms step_avg:41.71ms +[2025-09-11 11:11:31] [Rank 0] step:7661/10000 train_time:319404ms step_avg:41.69ms +[2025-09-11 11:11:31] [Rank 0] step:7661/10000 train_time:319404ms step_avg:41.69ms +[2025-09-11 11:11:32] [Rank 0] step:7681/10000 train_time:320107ms step_avg:41.68ms +[2025-09-11 11:11:32] [Rank 0] step:7681/10000 train_time:320107ms step_avg:41.68ms +[2025-09-11 11:11:32] [Rank 0] step:7701/10000 train_time:320810ms step_avg:41.66ms +[2025-09-11 11:11:32] [Rank 0] step:7701/10000 train_time:320810ms step_avg:41.66ms +[2025-09-11 11:11:33] [Rank 0] step:7721/10000 train_time:321514ms step_avg:41.64ms +[2025-09-11 11:11:33] [Rank 0] step:7721/10000 train_time:321514ms step_avg:41.64ms +[2025-09-11 11:11:34] [Rank 0] step:7741/10000 train_time:322217ms step_avg:41.62ms +[2025-09-11 11:11:34] [Rank 0] step:7741/10000 train_time:322217ms step_avg:41.62ms +[2025-09-11 11:11:34] [Rank 0] step:7761/10000 train_time:322919ms step_avg:41.61ms +[2025-09-11 11:11:34] [Rank 0] step:7761/10000 train_time:322919ms step_avg:41.61ms +[2025-09-11 11:11:35] [Rank 0] step:7781/10000 train_time:323624ms step_avg:41.59ms +[2025-09-11 11:11:35] [Rank 0] step:7781/10000 train_time:323624ms step_avg:41.59ms +[2025-09-11 11:11:36] [Rank 0] step:7801/10000 train_time:324326ms step_avg:41.57ms +[2025-09-11 11:11:36] [Rank 0] step:7801/10000 train_time:324326ms step_avg:41.57ms +[2025-09-11 11:11:37] [Rank 0] step:7821/10000 train_time:325028ms step_avg:41.56ms +[2025-09-11 11:11:37] [Rank 0] step:7821/10000 train_time:325028ms step_avg:41.56ms +[2025-09-11 11:11:37] [Rank 0] step:7841/10000 train_time:325732ms step_avg:41.54ms +[2025-09-11 11:11:37] [Rank 0] step:7841/10000 train_time:325732ms step_avg:41.54ms +[2025-09-11 11:11:38] [Rank 0] step:7861/10000 train_time:326437ms step_avg:41.53ms +[2025-09-11 11:11:38] [Rank 0] step:7861/10000 train_time:326437ms step_avg:41.53ms +[2025-09-11 11:11:39] [Rank 0] step:7881/10000 train_time:327140ms step_avg:41.51ms +[2025-09-11 11:11:39] [Rank 0] step:7881/10000 train_time:327140ms step_avg:41.51ms +[2025-09-11 11:11:39] [Rank 0] step:7901/10000 train_time:327844ms step_avg:41.49ms +[2025-09-11 11:11:39] [Rank 0] step:7901/10000 train_time:327844ms step_avg:41.49ms +[2025-09-11 11:11:40] [Rank 0] step:7921/10000 train_time:328547ms step_avg:41.48ms +[2025-09-11 11:11:40] [Rank 0] step:7921/10000 train_time:328547ms step_avg:41.48ms +[2025-09-11 11:11:41] [Rank 0] step:7941/10000 train_time:329251ms step_avg:41.46ms +[2025-09-11 11:11:41] [Rank 0] step:7941/10000 train_time:329251ms step_avg:41.46ms +[2025-09-11 11:11:41] [Rank 0] step:7961/10000 train_time:329952ms step_avg:41.45ms +[2025-09-11 11:11:41] [Rank 0] step:7961/10000 train_time:329952ms step_avg:41.45ms +[2025-09-11 11:11:42] [Rank 0] step:7981/10000 train_time:330657ms step_avg:41.43ms +[2025-09-11 11:11:42] [Rank 0] step:7981/10000 train_time:330657ms step_avg:41.43ms +[2025-09-11 11:11:43] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:11:43] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 11:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 11:11:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:11:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 11:11:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:11:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:11:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:11:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:11:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 11:11:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:11:54] [Rank 0] PRINT: step:8000/10000 val_loss:4.3628 total_sharp:1.9464e-04 L1_sharp:1.9606e-02 L2_sharp:2.0961e-02 L3_sharp:3.2925e-02 L4_sharp:4.7973e-02 L5_sharp:7.5306e-02 L6_sharp:1.1981e-01 L7_sharp:1.4169e-01 L8_sharp:1.4442e-01 L9_sharp:1.7681e-01 L10_sharp:2.3257e-01 L11_sharp:3.1803e-01 L12_sharp:1.9162e+00 total_fnorm:1.6375e+01 total_l1_linf:1.9968e+04 total_spectral:8.1875e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6387e-02 L1_l1linf:9.2773e-03 L2_l1linf:9.1553e-03 L3_l1linf:9.3384e-03 L4_l1linf:9.7046e-03 L5_l1linf:9.6436e-03 L6_l1linf:9.5215e-03 L7_l1linf:9.8877e-03 L8_l1linf:9.4604e-03 L9_l1linf:9.5825e-03 L10_l1linf:9.3994e-03 L11_l1linf:9.2163e-03 L12_l1linf:9.7656e-03 L1_spectral:8.1412e-04 L2_spectral:8.1282e-04 L3_spectral:8.1622e-04 L4_spectral:8.2395e-04 L5_spectral:8.1595e-04 L6_spectral:8.1392e-04 L7_spectral:8.0317e-04 L8_spectral:7.6429e-04 L9_spectral:8.1192e-04 L10_spectral:8.0369e-04 L11_spectral:7.8102e-04 L12_spectral:7.2299e-04 train_time:331337ms step_avg:41.42ms +[2025-09-11 11:11:54] [Rank 0] PRINT: step:8000/10000 val_loss:4.3628 total_sharp:1.9464e-04 L1_sharp:1.9606e-02 L2_sharp:2.0961e-02 L3_sharp:3.2925e-02 L4_sharp:4.7973e-02 L5_sharp:7.5306e-02 L6_sharp:1.1981e-01 L7_sharp:1.4169e-01 L8_sharp:1.4442e-01 L9_sharp:1.7681e-01 L10_sharp:2.3257e-01 L11_sharp:3.1803e-01 L12_sharp:1.9162e+00 total_fnorm:1.6375e+01 total_l1_linf:1.9968e+04 total_spectral:8.1875e+00 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6387e-02 L1_l1linf:9.2773e-03 L2_l1linf:9.1553e-03 L3_l1linf:9.3384e-03 L4_l1linf:9.7046e-03 L5_l1linf:9.6436e-03 L6_l1linf:9.5215e-03 L7_l1linf:9.8877e-03 L8_l1linf:9.4604e-03 L9_l1linf:9.5825e-03 L10_l1linf:9.3994e-03 L11_l1linf:9.2163e-03 L12_l1linf:9.7656e-03 L1_spectral:8.1412e-04 L2_spectral:8.1282e-04 L3_spectral:8.1622e-04 L4_spectral:8.2395e-04 L5_spectral:8.1595e-04 L6_spectral:8.1392e-04 L7_spectral:8.0317e-04 L8_spectral:7.6429e-04 L9_spectral:8.1192e-04 L10_spectral:8.0369e-04 L11_spectral:7.8102e-04 L12_spectral:7.2299e-04 train_time:331337ms step_avg:41.42ms +[2025-09-11 11:11:56] [Rank 0] step:8001/10000 train_time:333512ms step_avg:41.68ms +[2025-09-11 11:11:56] [Rank 0] step:8001/10000 train_time:333512ms step_avg:41.68ms +[2025-09-11 11:11:57] [Rank 0] step:8021/10000 train_time:334233ms step_avg:41.67ms +[2025-09-11 11:11:57] [Rank 0] step:8021/10000 train_time:334233ms step_avg:41.67ms +[2025-09-11 11:11:57] [Rank 0] step:8041/10000 train_time:334937ms step_avg:41.65ms +[2025-09-11 11:11:57] [Rank 0] step:8041/10000 train_time:334937ms step_avg:41.65ms +[2025-09-11 11:11:58] [Rank 0] step:8061/10000 train_time:335643ms step_avg:41.64ms +[2025-09-11 11:11:58] [Rank 0] step:8061/10000 train_time:335643ms step_avg:41.64ms +[2025-09-11 11:11:59] [Rank 0] step:8081/10000 train_time:336346ms step_avg:41.62ms +[2025-09-11 11:11:59] [Rank 0] step:8081/10000 train_time:336346ms step_avg:41.62ms +[2025-09-11 11:11:59] [Rank 0] step:8101/10000 train_time:337048ms step_avg:41.61ms +[2025-09-11 11:11:59] [Rank 0] step:8101/10000 train_time:337048ms step_avg:41.61ms +[2025-09-11 11:12:00] [Rank 0] step:8121/10000 train_time:337756ms step_avg:41.59ms +[2025-09-11 11:12:00] [Rank 0] step:8121/10000 train_time:337756ms step_avg:41.59ms +[2025-09-11 11:12:02] [Rank 0] step:8141/10000 train_time:339195ms step_avg:41.66ms +[2025-09-11 11:12:02] [Rank 0] step:8141/10000 train_time:339195ms step_avg:41.66ms +[2025-09-11 11:12:03] [Rank 0] step:8161/10000 train_time:339935ms step_avg:41.65ms +[2025-09-11 11:12:03] [Rank 0] step:8161/10000 train_time:339935ms step_avg:41.65ms +[2025-09-11 11:12:03] [Rank 0] step:8181/10000 train_time:340844ms step_avg:41.66ms +[2025-09-11 11:12:03] [Rank 0] step:8181/10000 train_time:340844ms step_avg:41.66ms +[2025-09-11 11:12:04] [Rank 0] step:8201/10000 train_time:341555ms step_avg:41.65ms +[2025-09-11 11:12:04] [Rank 0] step:8201/10000 train_time:341555ms step_avg:41.65ms +[2025-09-11 11:12:05] [Rank 0] step:8221/10000 train_time:342266ms step_avg:41.63ms +[2025-09-11 11:12:05] [Rank 0] step:8221/10000 train_time:342266ms step_avg:41.63ms +[2025-09-11 11:12:06] [Rank 0] step:8241/10000 train_time:343264ms step_avg:41.65ms +[2025-09-11 11:12:06] [Rank 0] step:8241/10000 train_time:343264ms step_avg:41.65ms +[2025-09-11 11:12:06] [Rank 0] step:8261/10000 train_time:343973ms step_avg:41.64ms +[2025-09-11 11:12:06] [Rank 0] step:8261/10000 train_time:343973ms step_avg:41.64ms +[2025-09-11 11:12:07] [Rank 0] step:8281/10000 train_time:344680ms step_avg:41.62ms +[2025-09-11 11:12:07] [Rank 0] step:8281/10000 train_time:344680ms step_avg:41.62ms +[2025-09-11 11:12:08] [Rank 0] step:8301/10000 train_time:345390ms step_avg:41.61ms +[2025-09-11 11:12:08] [Rank 0] step:8301/10000 train_time:345390ms step_avg:41.61ms +[2025-09-11 11:12:09] [Rank 0] step:8321/10000 train_time:346099ms step_avg:41.59ms +[2025-09-11 11:12:09] [Rank 0] step:8321/10000 train_time:346099ms step_avg:41.59ms +[2025-09-11 11:12:09] [Rank 0] step:8341/10000 train_time:346815ms step_avg:41.58ms +[2025-09-11 11:12:09] [Rank 0] step:8341/10000 train_time:346815ms step_avg:41.58ms +[2025-09-11 11:12:10] [Rank 0] step:8361/10000 train_time:347520ms step_avg:41.56ms +[2025-09-11 11:12:10] [Rank 0] step:8361/10000 train_time:347520ms step_avg:41.56ms +[2025-09-11 11:12:11] [Rank 0] step:8381/10000 train_time:348233ms step_avg:41.55ms +[2025-09-11 11:12:11] [Rank 0] step:8381/10000 train_time:348233ms step_avg:41.55ms +[2025-09-11 11:12:11] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:12:11] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 11:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 11:12:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:12:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:12:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:12:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 11:12:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:12:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 11:12:22] [Rank 0] PRINT: step:8400/10000 val_loss:4.3476 total_sharp:1.5850e-04 L1_sharp:1.6149e-02 L2_sharp:2.0440e-02 L3_sharp:2.8122e-02 L4_sharp:4.3097e-02 L5_sharp:6.0108e-02 L6_sharp:9.7703e-02 L7_sharp:1.3775e-01 L8_sharp:1.2842e-01 L9_sharp:1.4862e-01 L10_sharp:1.8715e-01 L11_sharp:2.3685e-01 L12_sharp:7.5161e-01 total_fnorm:1.2125e+01 total_l1_linf:1.3312e+04 total_spectral:6.0625e+00 L1_fnorm:3.6865e-02 L2_fnorm:3.7109e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.7109e-02 L7_fnorm:3.7109e-02 L8_fnorm:3.6133e-02 L9_fnorm:3.7109e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.6377e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5002e-03 L2_l1linf:6.5613e-03 L3_l1linf:6.8665e-03 L4_l1linf:6.8054e-03 L5_l1linf:6.8970e-03 L6_l1linf:6.8359e-03 L7_l1linf:6.9580e-03 L8_l1linf:6.8359e-03 L9_l1linf:6.8665e-03 L10_l1linf:6.8054e-03 L11_l1linf:6.5918e-03 L12_l1linf:7.0801e-03 L1_spectral:6.5473e-04 L2_spectral:6.5531e-04 L3_spectral:6.5027e-04 L4_spectral:6.5479e-04 L5_spectral:6.4549e-04 L6_spectral:6.3588e-04 L7_spectral:6.4694e-04 L8_spectral:5.9629e-04 L9_spectral:6.3815e-04 L10_spectral:6.3958e-04 L11_spectral:6.1345e-04 L12_spectral:5.7122e-04 train_time:348928ms step_avg:41.54ms +[2025-09-11 11:12:22] [Rank 0] PRINT: step:8400/10000 val_loss:4.3476 total_sharp:1.5850e-04 L1_sharp:1.6149e-02 L2_sharp:2.0440e-02 L3_sharp:2.8122e-02 L4_sharp:4.3097e-02 L5_sharp:6.0108e-02 L6_sharp:9.7703e-02 L7_sharp:1.3775e-01 L8_sharp:1.2842e-01 L9_sharp:1.4862e-01 L10_sharp:1.8715e-01 L11_sharp:2.3685e-01 L12_sharp:7.5161e-01 total_fnorm:1.2125e+01 total_l1_linf:1.3312e+04 total_spectral:6.0625e+00 L1_fnorm:3.6865e-02 L2_fnorm:3.7109e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.7109e-02 L7_fnorm:3.7109e-02 L8_fnorm:3.6133e-02 L9_fnorm:3.7109e-02 L10_fnorm:3.6865e-02 L11_fnorm:3.6377e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5002e-03 L2_l1linf:6.5613e-03 L3_l1linf:6.8665e-03 L4_l1linf:6.8054e-03 L5_l1linf:6.8970e-03 L6_l1linf:6.8359e-03 L7_l1linf:6.9580e-03 L8_l1linf:6.8359e-03 L9_l1linf:6.8665e-03 L10_l1linf:6.8054e-03 L11_l1linf:6.5918e-03 L12_l1linf:7.0801e-03 L1_spectral:6.5473e-04 L2_spectral:6.5531e-04 L3_spectral:6.5027e-04 L4_spectral:6.5479e-04 L5_spectral:6.4549e-04 L6_spectral:6.3588e-04 L7_spectral:6.4694e-04 L8_spectral:5.9629e-04 L9_spectral:6.3815e-04 L10_spectral:6.3958e-04 L11_spectral:6.1345e-04 L12_spectral:5.7122e-04 train_time:348928ms step_avg:41.54ms +[2025-09-11 11:12:24] [Rank 0] step:8401/10000 train_time:351120ms step_avg:41.80ms +[2025-09-11 11:12:24] [Rank 0] step:8401/10000 train_time:351120ms step_avg:41.80ms +[2025-09-11 11:12:25] [Rank 0] step:8421/10000 train_time:351859ms step_avg:41.78ms +[2025-09-11 11:12:25] [Rank 0] step:8421/10000 train_time:351859ms step_avg:41.78ms +[2025-09-11 11:12:26] [Rank 0] step:8441/10000 train_time:352570ms step_avg:41.77ms +[2025-09-11 11:12:26] [Rank 0] step:8441/10000 train_time:352570ms step_avg:41.77ms +[2025-09-11 11:12:27] [Rank 0] step:8461/10000 train_time:353282ms step_avg:41.75ms +[2025-09-11 11:12:27] [Rank 0] step:8461/10000 train_time:353282ms step_avg:41.75ms +[2025-09-11 11:12:27] [Rank 0] step:8481/10000 train_time:353994ms step_avg:41.74ms +[2025-09-11 11:12:27] [Rank 0] step:8481/10000 train_time:353994ms step_avg:41.74ms +[2025-09-11 11:12:28] [Rank 0] step:8501/10000 train_time:354703ms step_avg:41.72ms +[2025-09-11 11:12:28] [Rank 0] step:8501/10000 train_time:354703ms step_avg:41.72ms +[2025-09-11 11:12:29] [Rank 0] step:8521/10000 train_time:355413ms step_avg:41.71ms +[2025-09-11 11:12:29] [Rank 0] step:8521/10000 train_time:355413ms step_avg:41.71ms +[2025-09-11 11:12:29] [Rank 0] step:8541/10000 train_time:356124ms step_avg:41.70ms +[2025-09-11 11:12:29] [Rank 0] step:8541/10000 train_time:356124ms step_avg:41.70ms +[2025-09-11 11:12:30] [Rank 0] step:8561/10000 train_time:356839ms step_avg:41.68ms +[2025-09-11 11:12:30] [Rank 0] step:8561/10000 train_time:356839ms step_avg:41.68ms +[2025-09-11 11:12:31] [Rank 0] step:8581/10000 train_time:357553ms step_avg:41.67ms +[2025-09-11 11:12:31] [Rank 0] step:8581/10000 train_time:357553ms step_avg:41.67ms +[2025-09-11 11:12:32] [Rank 0] step:8601/10000 train_time:358264ms step_avg:41.65ms +[2025-09-11 11:12:32] [Rank 0] step:8601/10000 train_time:358264ms step_avg:41.65ms +[2025-09-11 11:12:32] [Rank 0] step:8621/10000 train_time:358973ms step_avg:41.64ms +[2025-09-11 11:12:32] [Rank 0] step:8621/10000 train_time:358973ms step_avg:41.64ms +[2025-09-11 11:12:33] [Rank 0] step:8641/10000 train_time:359683ms step_avg:41.63ms +[2025-09-11 11:12:33] [Rank 0] step:8641/10000 train_time:359683ms step_avg:41.63ms +[2025-09-11 11:12:34] [Rank 0] step:8661/10000 train_time:360393ms step_avg:41.61ms +[2025-09-11 11:12:34] [Rank 0] step:8661/10000 train_time:360393ms step_avg:41.61ms +[2025-09-11 11:12:34] [Rank 0] step:8681/10000 train_time:361104ms step_avg:41.60ms +[2025-09-11 11:12:34] [Rank 0] step:8681/10000 train_time:361104ms step_avg:41.60ms +[2025-09-11 11:12:35] [Rank 0] step:8701/10000 train_time:361813ms step_avg:41.58ms +[2025-09-11 11:12:35] [Rank 0] step:8701/10000 train_time:361813ms step_avg:41.58ms +[2025-09-11 11:12:36] [Rank 0] step:8721/10000 train_time:362526ms step_avg:41.57ms +[2025-09-11 11:12:36] [Rank 0] step:8721/10000 train_time:362526ms step_avg:41.57ms +[2025-09-11 11:12:37] [Rank 0] step:8741/10000 train_time:363232ms step_avg:41.56ms +[2025-09-11 11:12:37] [Rank 0] step:8741/10000 train_time:363232ms step_avg:41.56ms +[2025-09-11 11:12:37] [Rank 0] step:8761/10000 train_time:363945ms step_avg:41.54ms +[2025-09-11 11:12:37] [Rank 0] step:8761/10000 train_time:363945ms step_avg:41.54ms +[2025-09-11 11:12:38] [Rank 0] step:8781/10000 train_time:364654ms step_avg:41.53ms +[2025-09-11 11:12:38] [Rank 0] step:8781/10000 train_time:364654ms step_avg:41.53ms +[2025-09-11 11:12:39] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:12:39] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 11:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:12:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:12:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:12:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 11:12:49] [Rank 0] PRINT: step:8800/10000 val_loss:4.3434 total_sharp:1.2477e-04 L1_sharp:1.2141e-02 L2_sharp:1.7975e-02 L3_sharp:2.5601e-02 L4_sharp:3.9313e-02 L5_sharp:5.7112e-02 L6_sharp:7.5203e-02 L7_sharp:1.0004e-01 L8_sharp:1.0367e-01 L9_sharp:1.3375e-01 L10_sharp:1.7333e-01 L11_sharp:2.1523e-01 L12_sharp:5.6479e-01 total_fnorm:8.8750e+00 total_l1_linf:8.6400e+03 total_spectral:4.4375e+00 L1_fnorm:2.6245e-02 L2_fnorm:2.6245e-02 L3_fnorm:2.6367e-02 L4_fnorm:2.6367e-02 L5_fnorm:2.6367e-02 L6_fnorm:2.6367e-02 L7_fnorm:2.6611e-02 L8_fnorm:2.5757e-02 L9_fnorm:2.6367e-02 L10_fnorm:2.6245e-02 L11_fnorm:2.5879e-02 L12_fnorm:2.5146e-02 L1_l1linf:4.1809e-03 L2_l1linf:4.1504e-03 L3_l1linf:4.3335e-03 L4_l1linf:4.2725e-03 L5_l1linf:4.3335e-03 L6_l1linf:4.4250e-03 L7_l1linf:4.4861e-03 L8_l1linf:4.6082e-03 L9_l1linf:4.6692e-03 L10_l1linf:4.3945e-03 L11_l1linf:4.1809e-03 L12_l1linf:4.5166e-03 L1_spectral:4.7919e-04 L2_spectral:4.7016e-04 L3_spectral:4.7568e-04 L4_spectral:4.7742e-04 L5_spectral:4.7604e-04 L6_spectral:4.7003e-04 L7_spectral:4.7267e-04 L8_spectral:4.3958e-04 L9_spectral:4.6871e-04 L10_spectral:4.6387e-04 L11_spectral:4.4447e-04 L12_spectral:4.1368e-04 train_time:365342ms step_avg:41.52ms +[2025-09-11 11:12:49] [Rank 0] PRINT: step:8800/10000 val_loss:4.3434 total_sharp:1.2477e-04 L1_sharp:1.2141e-02 L2_sharp:1.7975e-02 L3_sharp:2.5601e-02 L4_sharp:3.9313e-02 L5_sharp:5.7112e-02 L6_sharp:7.5203e-02 L7_sharp:1.0004e-01 L8_sharp:1.0367e-01 L9_sharp:1.3375e-01 L10_sharp:1.7333e-01 L11_sharp:2.1523e-01 L12_sharp:5.6479e-01 total_fnorm:8.8750e+00 total_l1_linf:8.6400e+03 total_spectral:4.4375e+00 L1_fnorm:2.6245e-02 L2_fnorm:2.6245e-02 L3_fnorm:2.6367e-02 L4_fnorm:2.6367e-02 L5_fnorm:2.6367e-02 L6_fnorm:2.6367e-02 L7_fnorm:2.6611e-02 L8_fnorm:2.5757e-02 L9_fnorm:2.6367e-02 L10_fnorm:2.6245e-02 L11_fnorm:2.5879e-02 L12_fnorm:2.5146e-02 L1_l1linf:4.1809e-03 L2_l1linf:4.1504e-03 L3_l1linf:4.3335e-03 L4_l1linf:4.2725e-03 L5_l1linf:4.3335e-03 L6_l1linf:4.4250e-03 L7_l1linf:4.4861e-03 L8_l1linf:4.6082e-03 L9_l1linf:4.6692e-03 L10_l1linf:4.3945e-03 L11_l1linf:4.1809e-03 L12_l1linf:4.5166e-03 L1_spectral:4.7919e-04 L2_spectral:4.7016e-04 L3_spectral:4.7568e-04 L4_spectral:4.7742e-04 L5_spectral:4.7604e-04 L6_spectral:4.7003e-04 L7_spectral:4.7267e-04 L8_spectral:4.3958e-04 L9_spectral:4.6871e-04 L10_spectral:4.6387e-04 L11_spectral:4.4447e-04 L12_spectral:4.1368e-04 train_time:365342ms step_avg:41.52ms +[2025-09-11 11:12:52] [Rank 0] step:8801/10000 train_time:367523ms step_avg:41.76ms +[2025-09-11 11:12:52] [Rank 0] step:8801/10000 train_time:367523ms step_avg:41.76ms +[2025-09-11 11:12:52] [Rank 0] step:8821/10000 train_time:368271ms step_avg:41.75ms +[2025-09-11 11:12:52] [Rank 0] step:8821/10000 train_time:368271ms step_avg:41.75ms +[2025-09-11 11:12:53] [Rank 0] step:8841/10000 train_time:368981ms step_avg:41.74ms +[2025-09-11 11:12:53] [Rank 0] step:8841/10000 train_time:368981ms step_avg:41.74ms +[2025-09-11 11:12:54] [Rank 0] step:8861/10000 train_time:369691ms step_avg:41.72ms +[2025-09-11 11:12:54] [Rank 0] step:8861/10000 train_time:369691ms step_avg:41.72ms +[2025-09-11 11:12:55] [Rank 0] step:8881/10000 train_time:370403ms step_avg:41.71ms +[2025-09-11 11:12:55] [Rank 0] step:8881/10000 train_time:370403ms step_avg:41.71ms +[2025-09-11 11:12:55] [Rank 0] step:8901/10000 train_time:371116ms step_avg:41.69ms +[2025-09-11 11:12:55] [Rank 0] step:8901/10000 train_time:371116ms step_avg:41.69ms +[2025-09-11 11:12:56] [Rank 0] step:8921/10000 train_time:371824ms step_avg:41.68ms +[2025-09-11 11:12:56] [Rank 0] step:8921/10000 train_time:371824ms step_avg:41.68ms +[2025-09-11 11:12:57] [Rank 0] step:8941/10000 train_time:372538ms step_avg:41.67ms +[2025-09-11 11:12:57] [Rank 0] step:8941/10000 train_time:372538ms step_avg:41.67ms +[2025-09-11 11:12:57] [Rank 0] step:8961/10000 train_time:373257ms step_avg:41.65ms +[2025-09-11 11:12:57] [Rank 0] step:8961/10000 train_time:373257ms step_avg:41.65ms +[2025-09-11 11:12:58] [Rank 0] step:8981/10000 train_time:373971ms step_avg:41.64ms +[2025-09-11 11:12:58] [Rank 0] step:8981/10000 train_time:373971ms step_avg:41.64ms +[2025-09-11 11:12:59] [Rank 0] step:9001/10000 train_time:374677ms step_avg:41.63ms +[2025-09-11 11:12:59] [Rank 0] step:9001/10000 train_time:374677ms step_avg:41.63ms +[2025-09-11 11:13:00] [Rank 0] step:9021/10000 train_time:375388ms step_avg:41.61ms +[2025-09-11 11:13:00] [Rank 0] step:9021/10000 train_time:375388ms step_avg:41.61ms +[2025-09-11 11:13:00] [Rank 0] step:9041/10000 train_time:376101ms step_avg:41.60ms +[2025-09-11 11:13:00] [Rank 0] step:9041/10000 train_time:376101ms step_avg:41.60ms +[2025-09-11 11:13:01] [Rank 0] step:9061/10000 train_time:376810ms step_avg:41.59ms +[2025-09-11 11:13:01] [Rank 0] step:9061/10000 train_time:376810ms step_avg:41.59ms +[2025-09-11 11:13:02] [Rank 0] step:9081/10000 train_time:377524ms step_avg:41.57ms +[2025-09-11 11:13:02] [Rank 0] step:9081/10000 train_time:377524ms step_avg:41.57ms +[2025-09-11 11:13:02] [Rank 0] step:9101/10000 train_time:378239ms step_avg:41.56ms +[2025-09-11 11:13:02] [Rank 0] step:9101/10000 train_time:378239ms step_avg:41.56ms +[2025-09-11 11:13:03] [Rank 0] step:9121/10000 train_time:378954ms step_avg:41.55ms +[2025-09-11 11:13:03] [Rank 0] step:9121/10000 train_time:378954ms step_avg:41.55ms +[2025-09-11 11:13:04] [Rank 0] step:9141/10000 train_time:379664ms step_avg:41.53ms +[2025-09-11 11:13:04] [Rank 0] step:9141/10000 train_time:379664ms step_avg:41.53ms +[2025-09-11 11:13:05] [Rank 0] step:9161/10000 train_time:380378ms step_avg:41.52ms +[2025-09-11 11:13:05] [Rank 0] step:9161/10000 train_time:380378ms step_avg:41.52ms +[2025-09-11 11:13:05] [Rank 0] step:9181/10000 train_time:381091ms step_avg:41.51ms +[2025-09-11 11:13:05] [Rank 0] step:9181/10000 train_time:381091ms step_avg:41.51ms +[2025-09-11 11:13:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:13:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 11:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 11:13:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:13:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:13:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 11:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:17] [Rank 0] PRINT: step:9200/10000 val_loss:4.3311 total_sharp:1.3382e-04 L1_sharp:1.0036e-02 L2_sharp:1.6953e-02 L3_sharp:2.3020e-02 L4_sharp:3.5429e-02 L5_sharp:5.3220e-02 L6_sharp:7.9137e-02 L7_sharp:1.1059e-01 L8_sharp:1.1068e-01 L9_sharp:1.3292e-01 L10_sharp:1.5616e-01 L11_sharp:2.0264e-01 L12_sharp:1.1136e+00 total_fnorm:5.9688e+00 total_l1_linf:4.9920e+03 total_spectral:2.9844e+00 L1_fnorm:1.7456e-02 L2_fnorm:1.7456e-02 L3_fnorm:1.7578e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7578e-02 L6_fnorm:1.7578e-02 L7_fnorm:1.7578e-02 L8_fnorm:1.6968e-02 L9_fnorm:1.7456e-02 L10_fnorm:1.7456e-02 L11_fnorm:1.7090e-02 L12_fnorm:1.6846e-02 L1_l1linf:2.6093e-03 L2_l1linf:2.6093e-03 L3_l1linf:2.7161e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.6398e-03 L6_l1linf:2.6703e-03 L7_l1linf:2.8076e-03 L8_l1linf:2.6550e-03 L9_l1linf:2.6245e-03 L10_l1linf:2.8229e-03 L11_l1linf:2.6093e-03 L12_l1linf:2.6398e-03 L1_spectral:3.2205e-04 L2_spectral:3.1750e-04 L3_spectral:3.2540e-04 L4_spectral:3.2200e-04 L5_spectral:3.2200e-04 L6_spectral:3.1944e-04 L7_spectral:3.1819e-04 L8_spectral:2.8857e-04 L9_spectral:3.1483e-04 L10_spectral:3.1210e-04 L11_spectral:3.0128e-04 L12_spectral:2.7942e-04 train_time:382040ms step_avg:41.53ms +[2025-09-11 11:13:17] [Rank 0] PRINT: step:9200/10000 val_loss:4.3311 total_sharp:1.3382e-04 L1_sharp:1.0036e-02 L2_sharp:1.6953e-02 L3_sharp:2.3020e-02 L4_sharp:3.5429e-02 L5_sharp:5.3220e-02 L6_sharp:7.9137e-02 L7_sharp:1.1059e-01 L8_sharp:1.1068e-01 L9_sharp:1.3292e-01 L10_sharp:1.5616e-01 L11_sharp:2.0264e-01 L12_sharp:1.1136e+00 total_fnorm:5.9688e+00 total_l1_linf:4.9920e+03 total_spectral:2.9844e+00 L1_fnorm:1.7456e-02 L2_fnorm:1.7456e-02 L3_fnorm:1.7578e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7578e-02 L6_fnorm:1.7578e-02 L7_fnorm:1.7578e-02 L8_fnorm:1.6968e-02 L9_fnorm:1.7456e-02 L10_fnorm:1.7456e-02 L11_fnorm:1.7090e-02 L12_fnorm:1.6846e-02 L1_l1linf:2.6093e-03 L2_l1linf:2.6093e-03 L3_l1linf:2.7161e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.6398e-03 L6_l1linf:2.6703e-03 L7_l1linf:2.8076e-03 L8_l1linf:2.6550e-03 L9_l1linf:2.6245e-03 L10_l1linf:2.8229e-03 L11_l1linf:2.6093e-03 L12_l1linf:2.6398e-03 L1_spectral:3.2205e-04 L2_spectral:3.1750e-04 L3_spectral:3.2540e-04 L4_spectral:3.2200e-04 L5_spectral:3.2200e-04 L6_spectral:3.1944e-04 L7_spectral:3.1819e-04 L8_spectral:2.8857e-04 L9_spectral:3.1483e-04 L10_spectral:3.1210e-04 L11_spectral:3.0128e-04 L12_spectral:2.7942e-04 train_time:382040ms step_avg:41.53ms +[2025-09-11 11:13:19] [Rank 0] step:9201/10000 train_time:384297ms step_avg:41.77ms +[2025-09-11 11:13:19] [Rank 0] step:9201/10000 train_time:384297ms step_avg:41.77ms +[2025-09-11 11:13:20] [Rank 0] step:9221/10000 train_time:385044ms step_avg:41.76ms +[2025-09-11 11:13:20] [Rank 0] step:9221/10000 train_time:385044ms step_avg:41.76ms +[2025-09-11 11:13:21] [Rank 0] step:9241/10000 train_time:385753ms step_avg:41.74ms +[2025-09-11 11:13:21] [Rank 0] step:9241/10000 train_time:385753ms step_avg:41.74ms +[2025-09-11 11:13:22] [Rank 0] step:9261/10000 train_time:386466ms step_avg:41.73ms +[2025-09-11 11:13:22] [Rank 0] step:9261/10000 train_time:386466ms step_avg:41.73ms +[2025-09-11 11:13:22] [Rank 0] step:9281/10000 train_time:387178ms step_avg:41.72ms +[2025-09-11 11:13:22] [Rank 0] step:9281/10000 train_time:387178ms step_avg:41.72ms +[2025-09-11 11:13:23] [Rank 0] step:9301/10000 train_time:387886ms step_avg:41.70ms +[2025-09-11 11:13:23] [Rank 0] step:9301/10000 train_time:387886ms step_avg:41.70ms +[2025-09-11 11:13:24] [Rank 0] step:9321/10000 train_time:388598ms step_avg:41.69ms +[2025-09-11 11:13:24] [Rank 0] step:9321/10000 train_time:388598ms step_avg:41.69ms +[2025-09-11 11:13:24] [Rank 0] step:9341/10000 train_time:389307ms step_avg:41.68ms +[2025-09-11 11:13:24] [Rank 0] step:9341/10000 train_time:389307ms step_avg:41.68ms +[2025-09-11 11:13:25] [Rank 0] step:9361/10000 train_time:390013ms step_avg:41.66ms +[2025-09-11 11:13:25] [Rank 0] step:9361/10000 train_time:390013ms step_avg:41.66ms +[2025-09-11 11:13:26] [Rank 0] step:9381/10000 train_time:390724ms step_avg:41.65ms +[2025-09-11 11:13:26] [Rank 0] step:9381/10000 train_time:390724ms step_avg:41.65ms +[2025-09-11 11:13:27] [Rank 0] step:9401/10000 train_time:391436ms step_avg:41.64ms +[2025-09-11 11:13:27] [Rank 0] step:9401/10000 train_time:391436ms step_avg:41.64ms +[2025-09-11 11:13:27] [Rank 0] step:9421/10000 train_time:392148ms step_avg:41.62ms +[2025-09-11 11:13:27] [Rank 0] step:9421/10000 train_time:392148ms step_avg:41.62ms +[2025-09-11 11:13:28] [Rank 0] step:9441/10000 train_time:392861ms step_avg:41.61ms +[2025-09-11 11:13:28] [Rank 0] step:9441/10000 train_time:392861ms step_avg:41.61ms +[2025-09-11 11:13:29] [Rank 0] step:9461/10000 train_time:393572ms step_avg:41.60ms +[2025-09-11 11:13:29] [Rank 0] step:9461/10000 train_time:393572ms step_avg:41.60ms +[2025-09-11 11:13:29] [Rank 0] step:9481/10000 train_time:394284ms step_avg:41.59ms +[2025-09-11 11:13:29] [Rank 0] step:9481/10000 train_time:394284ms step_avg:41.59ms +[2025-09-11 11:13:30] [Rank 0] step:9501/10000 train_time:394997ms step_avg:41.57ms +[2025-09-11 11:13:30] [Rank 0] step:9501/10000 train_time:394997ms step_avg:41.57ms +[2025-09-11 11:13:31] [Rank 0] step:9521/10000 train_time:395711ms step_avg:41.56ms +[2025-09-11 11:13:31] [Rank 0] step:9521/10000 train_time:395711ms step_avg:41.56ms +[2025-09-11 11:13:32] [Rank 0] step:9541/10000 train_time:396421ms step_avg:41.55ms +[2025-09-11 11:13:32] [Rank 0] step:9541/10000 train_time:396421ms step_avg:41.55ms +[2025-09-11 11:13:32] [Rank 0] step:9561/10000 train_time:397132ms step_avg:41.54ms +[2025-09-11 11:13:32] [Rank 0] step:9561/10000 train_time:397132ms step_avg:41.54ms +[2025-09-11 11:13:33] [Rank 0] step:9581/10000 train_time:397845ms step_avg:41.52ms +[2025-09-11 11:13:33] [Rank 0] step:9581/10000 train_time:397845ms step_avg:41.52ms +[2025-09-11 11:13:34] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:13:34] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:13:44] [Rank 0] PRINT: step:9600/10000 val_loss:4.3261 total_sharp:7.3978e-05 L1_sharp:1.0200e-02 L2_sharp:1.1717e-02 L3_sharp:1.7141e-02 L4_sharp:2.9449e-02 L5_sharp:3.3288e-02 L6_sharp:5.3081e-02 L7_sharp:7.0613e-02 L8_sharp:8.0001e-02 L9_sharp:9.5741e-02 L10_sharp:1.2853e-01 L11_sharp:1.5034e-01 L12_sharp:3.1971e-01 total_fnorm:3.3906e+00 total_l1_linf:2.4000e+03 total_spectral:1.6953e+00 L1_fnorm:9.7656e-03 L2_fnorm:9.7656e-03 L3_fnorm:9.8267e-03 L4_fnorm:9.8877e-03 L5_fnorm:9.8877e-03 L6_fnorm:9.8267e-03 L7_fnorm:9.8877e-03 L8_fnorm:9.4604e-03 L9_fnorm:9.8267e-03 L10_fnorm:9.7046e-03 L11_fnorm:9.5825e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.2283e-03 L2_l1linf:1.2589e-03 L3_l1linf:1.2970e-03 L4_l1linf:1.2894e-03 L5_l1linf:1.2665e-03 L6_l1linf:1.2970e-03 L7_l1linf:1.3351e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3809e-03 L10_l1linf:1.2665e-03 L11_l1linf:1.2360e-03 L12_l1linf:1.3199e-03 L1_spectral:1.8299e-04 L2_spectral:1.8497e-04 L3_spectral:1.8488e-04 L4_spectral:1.8263e-04 L5_spectral:1.8361e-04 L6_spectral:1.7985e-04 L7_spectral:1.8220e-04 L8_spectral:1.6515e-04 L9_spectral:1.7889e-04 L10_spectral:1.7753e-04 L11_spectral:1.6955e-04 L12_spectral:1.5660e-04 train_time:398534ms step_avg:41.51ms +[2025-09-11 11:13:44] [Rank 0] PRINT: step:9600/10000 val_loss:4.3261 total_sharp:7.3978e-05 L1_sharp:1.0200e-02 L2_sharp:1.1717e-02 L3_sharp:1.7141e-02 L4_sharp:2.9449e-02 L5_sharp:3.3288e-02 L6_sharp:5.3081e-02 L7_sharp:7.0613e-02 L8_sharp:8.0001e-02 L9_sharp:9.5741e-02 L10_sharp:1.2853e-01 L11_sharp:1.5034e-01 L12_sharp:3.1971e-01 total_fnorm:3.3906e+00 total_l1_linf:2.4000e+03 total_spectral:1.6953e+00 L1_fnorm:9.7656e-03 L2_fnorm:9.7656e-03 L3_fnorm:9.8267e-03 L4_fnorm:9.8877e-03 L5_fnorm:9.8877e-03 L6_fnorm:9.8267e-03 L7_fnorm:9.8877e-03 L8_fnorm:9.4604e-03 L9_fnorm:9.8267e-03 L10_fnorm:9.7046e-03 L11_fnorm:9.5825e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.2283e-03 L2_l1linf:1.2589e-03 L3_l1linf:1.2970e-03 L4_l1linf:1.2894e-03 L5_l1linf:1.2665e-03 L6_l1linf:1.2970e-03 L7_l1linf:1.3351e-03 L8_l1linf:1.3046e-03 L9_l1linf:1.3809e-03 L10_l1linf:1.2665e-03 L11_l1linf:1.2360e-03 L12_l1linf:1.3199e-03 L1_spectral:1.8299e-04 L2_spectral:1.8497e-04 L3_spectral:1.8488e-04 L4_spectral:1.8263e-04 L5_spectral:1.8361e-04 L6_spectral:1.7985e-04 L7_spectral:1.8220e-04 L8_spectral:1.6515e-04 L9_spectral:1.7889e-04 L10_spectral:1.7753e-04 L11_spectral:1.6955e-04 L12_spectral:1.5660e-04 train_time:398534ms step_avg:41.51ms +[2025-09-11 11:13:47] [Rank 0] step:9601/10000 train_time:400818ms step_avg:41.75ms +[2025-09-11 11:13:47] [Rank 0] step:9601/10000 train_time:400818ms step_avg:41.75ms +[2025-09-11 11:13:47] [Rank 0] step:9621/10000 train_time:401551ms step_avg:41.74ms +[2025-09-11 11:13:47] [Rank 0] step:9621/10000 train_time:401551ms step_avg:41.74ms +[2025-09-11 11:13:48] [Rank 0] step:9641/10000 train_time:402267ms step_avg:41.72ms +[2025-09-11 11:13:48] [Rank 0] step:9641/10000 train_time:402267ms step_avg:41.72ms +[2025-09-11 11:13:49] [Rank 0] step:9661/10000 train_time:402992ms step_avg:41.71ms +[2025-09-11 11:13:49] [Rank 0] step:9661/10000 train_time:402992ms step_avg:41.71ms +[2025-09-11 11:13:50] [Rank 0] step:9681/10000 train_time:403709ms step_avg:41.70ms +[2025-09-11 11:13:50] [Rank 0] step:9681/10000 train_time:403709ms step_avg:41.70ms +[2025-09-11 11:13:50] [Rank 0] step:9701/10000 train_time:404426ms step_avg:41.69ms +[2025-09-11 11:13:50] [Rank 0] step:9701/10000 train_time:404426ms step_avg:41.69ms +[2025-09-11 11:13:51] [Rank 0] step:9721/10000 train_time:405148ms step_avg:41.68ms +[2025-09-11 11:13:51] [Rank 0] step:9721/10000 train_time:405148ms step_avg:41.68ms +[2025-09-11 11:13:52] [Rank 0] step:9741/10000 train_time:405868ms step_avg:41.67ms +[2025-09-11 11:13:52] [Rank 0] step:9741/10000 train_time:405868ms step_avg:41.67ms +[2025-09-11 11:13:52] [Rank 0] step:9761/10000 train_time:406587ms step_avg:41.65ms +[2025-09-11 11:13:52] [Rank 0] step:9761/10000 train_time:406587ms step_avg:41.65ms +[2025-09-11 11:13:53] [Rank 0] step:9781/10000 train_time:407304ms step_avg:41.64ms +[2025-09-11 11:13:53] [Rank 0] step:9781/10000 train_time:407304ms step_avg:41.64ms +[2025-09-11 11:13:54] [Rank 0] step:9801/10000 train_time:408028ms step_avg:41.63ms +[2025-09-11 11:13:54] [Rank 0] step:9801/10000 train_time:408028ms step_avg:41.63ms +[2025-09-11 11:13:55] [Rank 0] step:9821/10000 train_time:408748ms step_avg:41.62ms +[2025-09-11 11:13:55] [Rank 0] step:9821/10000 train_time:408748ms step_avg:41.62ms +[2025-09-11 11:13:55] [Rank 0] step:9841/10000 train_time:409470ms step_avg:41.61ms +[2025-09-11 11:13:55] [Rank 0] step:9841/10000 train_time:409470ms step_avg:41.61ms +[2025-09-11 11:13:56] [Rank 0] step:9861/10000 train_time:410187ms step_avg:41.60ms +[2025-09-11 11:13:56] [Rank 0] step:9861/10000 train_time:410187ms step_avg:41.60ms +[2025-09-11 11:13:57] [Rank 0] step:9881/10000 train_time:410907ms step_avg:41.59ms +[2025-09-11 11:13:57] [Rank 0] step:9881/10000 train_time:410907ms step_avg:41.59ms +[2025-09-11 11:13:57] [Rank 0] step:9901/10000 train_time:411623ms step_avg:41.57ms +[2025-09-11 11:13:57] [Rank 0] step:9901/10000 train_time:411623ms step_avg:41.57ms +[2025-09-11 11:13:58] [Rank 0] step:9921/10000 train_time:412341ms step_avg:41.56ms +[2025-09-11 11:13:58] [Rank 0] step:9921/10000 train_time:412341ms step_avg:41.56ms +[2025-09-11 11:13:59] [Rank 0] step:9941/10000 train_time:413064ms step_avg:41.55ms +[2025-09-11 11:13:59] [Rank 0] step:9941/10000 train_time:413064ms step_avg:41.55ms +[2025-09-11 11:14:00] [Rank 0] step:9961/10000 train_time:413788ms step_avg:41.54ms +[2025-09-11 11:14:00] [Rank 0] step:9961/10000 train_time:413788ms step_avg:41.54ms +[2025-09-11 11:14:00] [Rank 0] step:9981/10000 train_time:414508ms step_avg:41.53ms +[2025-09-11 11:14:00] [Rank 0] step:9981/10000 train_time:414508ms step_avg:41.53ms +[2025-09-11 11:14:01] [Rank 0] step:10000/10000 train_time:415201ms step_avg:41.52ms +[2025-09-11 11:14:01] [Rank 0] step:10000/10000 train_time:415201ms step_avg:41.52ms +[2025-09-11 11:14:01] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:14:01] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:14:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:14:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:14:16] [Rank 0] PRINT: step:10000/10000 val_loss:4.3248 total_sharp:5.2667e-05 L1_sharp:6.9150e-03 L2_sharp:7.3034e-03 L3_sharp:1.5267e-02 L4_sharp:2.3527e-02 L5_sharp:3.3499e-02 L6_sharp:4.5312e-02 L7_sharp:6.9144e-02 L8_sharp:7.3992e-02 L9_sharp:7.6811e-02 L10_sharp:9.7128e-02 L11_sharp:1.1793e-01 L12_sharp:3.3931e-01 total_fnorm:1.2734e+00 total_l1_linf:6.5200e+02 total_spectral:6.3672e-01 L1_fnorm:3.7994e-03 L2_fnorm:3.8147e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8605e-03 L8_fnorm:3.7079e-03 L9_fnorm:3.8147e-03 L10_fnorm:3.7994e-03 L11_fnorm:3.7384e-03 L12_fnorm:3.6621e-03 L1_l1linf:3.9482e-04 L2_l1linf:4.0436e-04 L3_l1linf:4.2343e-04 L4_l1linf:4.2152e-04 L5_l1linf:4.0245e-04 L6_l1linf:4.0627e-04 L7_l1linf:4.1771e-04 L8_l1linf:3.9482e-04 L9_l1linf:4.0627e-04 L10_l1linf:3.9291e-04 L11_l1linf:4.1008e-04 L12_l1linf:4.2534e-04 L1_spectral:7.3185e-05 L2_spectral:7.2520e-05 L3_spectral:7.4241e-05 L4_spectral:7.3616e-05 L5_spectral:7.3748e-05 L6_spectral:7.2487e-05 L7_spectral:7.3591e-05 L8_spectral:6.5821e-05 L9_spectral:7.1609e-05 L10_spectral:7.1352e-05 L11_spectral:6.8464e-05 L12_spectral:6.4366e-05 train_time:415221ms step_avg:41.52ms +[2025-09-11 11:14:16] [Rank 0] PRINT: step:10000/10000 val_loss:4.3248 total_sharp:5.2667e-05 L1_sharp:6.9150e-03 L2_sharp:7.3034e-03 L3_sharp:1.5267e-02 L4_sharp:2.3527e-02 L5_sharp:3.3499e-02 L6_sharp:4.5312e-02 L7_sharp:6.9144e-02 L8_sharp:7.3992e-02 L9_sharp:7.6811e-02 L10_sharp:9.7128e-02 L11_sharp:1.1793e-01 L12_sharp:3.3931e-01 total_fnorm:1.2734e+00 total_l1_linf:6.5200e+02 total_spectral:6.3672e-01 L1_fnorm:3.7994e-03 L2_fnorm:3.8147e-03 L3_fnorm:3.8452e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8452e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8605e-03 L8_fnorm:3.7079e-03 L9_fnorm:3.8147e-03 L10_fnorm:3.7994e-03 L11_fnorm:3.7384e-03 L12_fnorm:3.6621e-03 L1_l1linf:3.9482e-04 L2_l1linf:4.0436e-04 L3_l1linf:4.2343e-04 L4_l1linf:4.2152e-04 L5_l1linf:4.0245e-04 L6_l1linf:4.0627e-04 L7_l1linf:4.1771e-04 L8_l1linf:3.9482e-04 L9_l1linf:4.0627e-04 L10_l1linf:3.9291e-04 L11_l1linf:4.1008e-04 L12_l1linf:4.2534e-04 L1_spectral:7.3185e-05 L2_spectral:7.2520e-05 L3_spectral:7.4241e-05 L4_spectral:7.3616e-05 L5_spectral:7.3748e-05 L6_spectral:7.2487e-05 L7_spectral:7.3591e-05 L8_spectral:6.5821e-05 L9_spectral:7.1609e-05 L10_spectral:7.1352e-05 L11_spectral:6.8464e-05 L12_spectral:6.4366e-05 train_time:415221ms step_avg:41.52ms +[2025-09-11 11:14:16] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:14:16 2025 --- +[2025-09-11 11:14:16] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:14:16 2025 --- +[2025-09-11 11:14:16] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:14:16] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..05e727a8b1648dfaa3a229a54867dfb96dcfea6a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "4c211d17-b9ff-4595-9692-ea69ef0fd378", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/training_log_4c211d17-b9ff-4595-9692-ea69ef0fd378.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/training_log_4c211d17-b9ff-4595-9692-ea69ef0fd378.txt new file mode 100644 index 0000000000000000000000000000000000000000..8366e0cc7070793dcbeabaf0672db5a9167bfe7b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42/training_log_4c211d17-b9ff-4595-9692-ea69ef0fd378.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:46:56] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:46:56 2025 --- +[2025-09-11 10:46:56] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:46:56 2025 --- +[2025-09-11 10:46:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:46:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:46:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:46:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:46:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:46:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:46:56] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42 +[2025-09-11 10:46:56] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.002_seed_42 +[2025-09-11 10:46:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:46:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:46:56] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:46:56] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:46:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:46:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:46:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:46:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:46:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:46:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:46:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:46:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:46:57] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:46:57] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:47:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:47:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:47:00] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:47:00] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:47:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:47:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:47:06] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:47:06] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:47:06] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:47:06] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:47:44] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:47:44] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:47:44] [Rank 0] PRINT: Starting training... +[2025-09-11 10:47:44] [Rank 0] PRINT: Starting training... +[2025-09-11 10:47:45] [Rank 0] step:21/10000 train_time:1134ms step_avg:53.99ms +[2025-09-11 10:47:45] [Rank 0] step:21/10000 train_time:1134ms step_avg:53.99ms +[2025-09-11 10:47:46] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.44ms +[2025-09-11 10:47:46] [Rank 0] step:41/10000 train_time:1863ms step_avg:45.44ms +[2025-09-11 10:47:46] [Rank 0] step:61/10000 train_time:2594ms step_avg:42.52ms +[2025-09-11 10:47:46] [Rank 0] step:61/10000 train_time:2594ms step_avg:42.52ms +[2025-09-11 10:47:47] [Rank 0] step:81/10000 train_time:3322ms step_avg:41.01ms +[2025-09-11 10:47:47] [Rank 0] step:81/10000 train_time:3322ms step_avg:41.01ms +[2025-09-11 10:47:48] [Rank 0] step:101/10000 train_time:4174ms step_avg:41.33ms +[2025-09-11 10:47:48] [Rank 0] step:101/10000 train_time:4174ms step_avg:41.33ms +[2025-09-11 10:47:49] [Rank 0] step:121/10000 train_time:5036ms step_avg:41.62ms +[2025-09-11 10:47:49] [Rank 0] step:121/10000 train_time:5036ms step_avg:41.62ms +[2025-09-11 10:47:49] [Rank 0] step:141/10000 train_time:5764ms step_avg:40.88ms +[2025-09-11 10:47:49] [Rank 0] step:141/10000 train_time:5764ms step_avg:40.88ms +[2025-09-11 10:47:50] [Rank 0] step:161/10000 train_time:6492ms step_avg:40.32ms +[2025-09-11 10:47:50] [Rank 0] step:161/10000 train_time:6492ms step_avg:40.32ms +[2025-09-11 10:47:51] [Rank 0] step:181/10000 train_time:7465ms step_avg:41.24ms +[2025-09-11 10:47:51] [Rank 0] step:181/10000 train_time:7465ms step_avg:41.24ms +[2025-09-11 10:47:52] [Rank 0] step:201/10000 train_time:8193ms step_avg:40.76ms +[2025-09-11 10:47:52] [Rank 0] step:201/10000 train_time:8193ms step_avg:40.76ms +[2025-09-11 10:47:53] [Rank 0] step:221/10000 train_time:8921ms step_avg:40.37ms +[2025-09-11 10:47:53] [Rank 0] step:221/10000 train_time:8921ms step_avg:40.37ms +[2025-09-11 10:47:53] [Rank 0] step:241/10000 train_time:9649ms step_avg:40.04ms +[2025-09-11 10:47:53] [Rank 0] step:241/10000 train_time:9649ms step_avg:40.04ms +[2025-09-11 10:47:54] [Rank 0] step:261/10000 train_time:10377ms step_avg:39.76ms +[2025-09-11 10:47:54] [Rank 0] step:261/10000 train_time:10377ms step_avg:39.76ms +[2025-09-11 10:47:55] [Rank 0] step:281/10000 train_time:11105ms step_avg:39.52ms +[2025-09-11 10:47:55] [Rank 0] step:281/10000 train_time:11105ms step_avg:39.52ms +[2025-09-11 10:47:56] [Rank 0] step:301/10000 train_time:11833ms step_avg:39.31ms +[2025-09-11 10:47:56] [Rank 0] step:301/10000 train_time:11833ms step_avg:39.31ms +[2025-09-11 10:47:56] [Rank 0] step:321/10000 train_time:12561ms step_avg:39.13ms +[2025-09-11 10:47:56] [Rank 0] step:321/10000 train_time:12561ms step_avg:39.13ms +[2025-09-11 10:47:57] [Rank 0] step:341/10000 train_time:13288ms step_avg:38.97ms +[2025-09-11 10:47:57] [Rank 0] step:341/10000 train_time:13288ms step_avg:38.97ms +[2025-09-11 10:47:58] [Rank 0] step:361/10000 train_time:14017ms step_avg:38.83ms +[2025-09-11 10:47:58] [Rank 0] step:361/10000 train_time:14017ms step_avg:38.83ms +[2025-09-11 10:47:58] [Rank 0] step:381/10000 train_time:14745ms step_avg:38.70ms +[2025-09-11 10:47:58] [Rank 0] step:381/10000 train_time:14745ms step_avg:38.70ms +[2025-09-11 10:47:59] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:47:59] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:48:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:48:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:48:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:48:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:48:47] [Rank 0] PRINT: step:400/10000 val_loss:6.1256 total_sharp:1.2557e-03 L1_sharp:1.3679e-01 L2_sharp:1.1840e-01 L3_sharp:1.1175e-01 L4_sharp:1.1419e-01 L5_sharp:1.2730e-01 L6_sharp:1.4947e-01 L7_sharp:1.4369e-01 L8_sharp:1.5001e-01 L9_sharp:1.5699e-01 L10_sharp:1.7161e-01 L11_sharp:2.6383e-01 L12_sharp:4.6994e-01 total_fnorm:4.0033e+01 total_l1_linf:9.7050e+04 total_spectral:2.0020e+01 L1_fnorm:2.4279e-01 L2_fnorm:2.4275e-01 L3_fnorm:2.4209e-01 L4_fnorm:2.4062e-01 L5_fnorm:2.3970e-01 L6_fnorm:2.3910e-01 L7_fnorm:2.3971e-01 L8_fnorm:2.3869e-01 L9_fnorm:2.3855e-01 L10_fnorm:2.3427e-01 L11_fnorm:2.3103e-01 L12_fnorm:2.2662e-01 L1_l1linf:8.9433e-02 L2_l1linf:9.0225e-02 L3_l1linf:8.9173e-02 L4_l1linf:8.8650e-02 L5_l1linf:8.8950e-02 L6_l1linf:8.8107e-02 L7_l1linf:8.8793e-02 L8_l1linf:8.7989e-02 L9_l1linf:8.7631e-02 L10_l1linf:8.6758e-02 L11_l1linf:8.6642e-02 L12_l1linf:8.3863e-02 L1_spectral:2.4091e-03 L2_spectral:2.4097e-03 L3_spectral:2.4091e-03 L4_spectral:2.4094e-03 L5_spectral:2.4095e-03 L6_spectral:2.4096e-03 L7_spectral:2.4103e-03 L8_spectral:2.4093e-03 L9_spectral:2.4104e-03 L10_spectral:2.4112e-03 L11_spectral:2.4097e-03 L12_spectral:2.4096e-03 train_time:15452ms step_avg:38.63ms +[2025-09-11 10:48:47] [Rank 0] PRINT: step:400/10000 val_loss:6.1256 total_sharp:1.2557e-03 L1_sharp:1.3679e-01 L2_sharp:1.1840e-01 L3_sharp:1.1175e-01 L4_sharp:1.1419e-01 L5_sharp:1.2730e-01 L6_sharp:1.4947e-01 L7_sharp:1.4369e-01 L8_sharp:1.5001e-01 L9_sharp:1.5699e-01 L10_sharp:1.7161e-01 L11_sharp:2.6383e-01 L12_sharp:4.6994e-01 total_fnorm:4.0033e+01 total_l1_linf:9.7050e+04 total_spectral:2.0020e+01 L1_fnorm:2.4279e-01 L2_fnorm:2.4275e-01 L3_fnorm:2.4209e-01 L4_fnorm:2.4062e-01 L5_fnorm:2.3970e-01 L6_fnorm:2.3910e-01 L7_fnorm:2.3971e-01 L8_fnorm:2.3869e-01 L9_fnorm:2.3855e-01 L10_fnorm:2.3427e-01 L11_fnorm:2.3103e-01 L12_fnorm:2.2662e-01 L1_l1linf:8.9433e-02 L2_l1linf:9.0225e-02 L3_l1linf:8.9173e-02 L4_l1linf:8.8650e-02 L5_l1linf:8.8950e-02 L6_l1linf:8.8107e-02 L7_l1linf:8.8793e-02 L8_l1linf:8.7989e-02 L9_l1linf:8.7631e-02 L10_l1linf:8.6758e-02 L11_l1linf:8.6642e-02 L12_l1linf:8.3863e-02 L1_spectral:2.4091e-03 L2_spectral:2.4097e-03 L3_spectral:2.4091e-03 L4_spectral:2.4094e-03 L5_spectral:2.4095e-03 L6_spectral:2.4096e-03 L7_spectral:2.4103e-03 L8_spectral:2.4093e-03 L9_spectral:2.4104e-03 L10_spectral:2.4112e-03 L11_spectral:2.4097e-03 L12_spectral:2.4096e-03 train_time:15452ms step_avg:38.63ms +[2025-09-11 10:49:18] [Rank 0] step:401/10000 train_time:46263ms step_avg:115.37ms +[2025-09-11 10:49:18] [Rank 0] step:401/10000 train_time:46263ms step_avg:115.37ms +[2025-09-11 10:49:20] [Rank 0] step:421/10000 train_time:48662ms step_avg:115.59ms +[2025-09-11 10:49:20] [Rank 0] step:421/10000 train_time:48662ms step_avg:115.59ms +[2025-09-11 10:49:21] [Rank 0] step:441/10000 train_time:49304ms step_avg:111.80ms +[2025-09-11 10:49:21] [Rank 0] step:441/10000 train_time:49304ms step_avg:111.80ms +[2025-09-11 10:49:21] [Rank 0] step:461/10000 train_time:49945ms step_avg:108.34ms +[2025-09-11 10:49:21] [Rank 0] step:461/10000 train_time:49945ms step_avg:108.34ms +[2025-09-11 10:49:22] [Rank 0] step:481/10000 train_time:50585ms step_avg:105.17ms +[2025-09-11 10:49:22] [Rank 0] step:481/10000 train_time:50585ms step_avg:105.17ms +[2025-09-11 10:49:23] [Rank 0] step:501/10000 train_time:51225ms step_avg:102.25ms +[2025-09-11 10:49:23] [Rank 0] step:501/10000 train_time:51225ms step_avg:102.25ms +[2025-09-11 10:49:23] [Rank 0] step:521/10000 train_time:51866ms step_avg:99.55ms +[2025-09-11 10:49:23] [Rank 0] step:521/10000 train_time:51866ms step_avg:99.55ms +[2025-09-11 10:49:24] [Rank 0] step:541/10000 train_time:52507ms step_avg:97.05ms +[2025-09-11 10:49:24] [Rank 0] step:541/10000 train_time:52507ms step_avg:97.05ms +[2025-09-11 10:49:25] [Rank 0] step:561/10000 train_time:53148ms step_avg:94.74ms +[2025-09-11 10:49:25] [Rank 0] step:561/10000 train_time:53148ms step_avg:94.74ms +[2025-09-11 10:49:25] [Rank 0] step:581/10000 train_time:53789ms step_avg:92.58ms +[2025-09-11 10:49:25] [Rank 0] step:581/10000 train_time:53789ms step_avg:92.58ms +[2025-09-11 10:49:26] [Rank 0] step:601/10000 train_time:54430ms step_avg:90.57ms +[2025-09-11 10:49:26] [Rank 0] step:601/10000 train_time:54430ms step_avg:90.57ms +[2025-09-11 10:49:26] [Rank 0] step:621/10000 train_time:55070ms step_avg:88.68ms +[2025-09-11 10:49:26] [Rank 0] step:621/10000 train_time:55070ms step_avg:88.68ms +[2025-09-11 10:49:27] [Rank 0] step:641/10000 train_time:55710ms step_avg:86.91ms +[2025-09-11 10:49:27] [Rank 0] step:641/10000 train_time:55710ms step_avg:86.91ms +[2025-09-11 10:49:28] [Rank 0] step:661/10000 train_time:56350ms step_avg:85.25ms +[2025-09-11 10:49:28] [Rank 0] step:661/10000 train_time:56350ms step_avg:85.25ms +[2025-09-11 10:49:28] [Rank 0] step:681/10000 train_time:56991ms step_avg:83.69ms +[2025-09-11 10:49:28] [Rank 0] step:681/10000 train_time:56991ms step_avg:83.69ms +[2025-09-11 10:49:29] [Rank 0] step:701/10000 train_time:57632ms step_avg:82.21ms +[2025-09-11 10:49:29] [Rank 0] step:701/10000 train_time:57632ms step_avg:82.21ms +[2025-09-11 10:49:30] [Rank 0] step:721/10000 train_time:58273ms step_avg:80.82ms +[2025-09-11 10:49:30] [Rank 0] step:721/10000 train_time:58273ms step_avg:80.82ms +[2025-09-11 10:49:30] [Rank 0] step:741/10000 train_time:58914ms step_avg:79.51ms +[2025-09-11 10:49:30] [Rank 0] step:741/10000 train_time:58914ms step_avg:79.51ms +[2025-09-11 10:49:31] [Rank 0] step:761/10000 train_time:59559ms step_avg:78.26ms +[2025-09-11 10:49:31] [Rank 0] step:761/10000 train_time:59559ms step_avg:78.26ms +[2025-09-11 10:49:32] [Rank 0] step:781/10000 train_time:60205ms step_avg:77.09ms +[2025-09-11 10:49:32] [Rank 0] step:781/10000 train_time:60205ms step_avg:77.09ms +[2025-09-11 10:49:32] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:49:32] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:17] [Rank 0] PRINT: step:800/10000 val_loss:5.6483 total_sharp:1.1609e-03 L1_sharp:5.3537e-02 L2_sharp:5.4995e-02 L3_sharp:6.5364e-02 L4_sharp:6.9978e-02 L5_sharp:8.2590e-02 L6_sharp:8.3377e-02 L7_sharp:8.5622e-02 L8_sharp:1.3050e-01 L9_sharp:1.3890e-01 L10_sharp:2.1895e-01 L11_sharp:3.4736e-01 L12_sharp:5.0582e-01 total_fnorm:3.7500e+01 total_l1_linf:7.1680e+04 total_spectral:1.8750e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.2656e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.7891e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.6426e-02 L6_l1linf:8.5449e-02 L7_l1linf:8.5449e-02 L8_l1linf:8.5449e-02 L9_l1linf:8.5449e-02 L10_l1linf:8.5449e-02 L11_l1linf:8.2031e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1166e-03 L2_spectral:3.1081e-03 L3_spectral:3.0959e-03 L4_spectral:3.1068e-03 L5_spectral:3.0955e-03 L6_spectral:3.0925e-03 L7_spectral:3.0871e-03 L8_spectral:3.0826e-03 L9_spectral:3.0723e-03 L10_spectral:3.0753e-03 L11_spectral:3.0746e-03 L12_spectral:3.0635e-03 train_time:60832ms step_avg:76.04ms +[2025-09-11 10:50:17] [Rank 0] PRINT: step:800/10000 val_loss:5.6483 total_sharp:1.1609e-03 L1_sharp:5.3537e-02 L2_sharp:5.4995e-02 L3_sharp:6.5364e-02 L4_sharp:6.9978e-02 L5_sharp:8.2590e-02 L6_sharp:8.3377e-02 L7_sharp:8.5622e-02 L8_sharp:1.3050e-01 L9_sharp:1.3890e-01 L10_sharp:2.1895e-01 L11_sharp:3.4736e-01 L12_sharp:5.0582e-01 total_fnorm:3.7500e+01 total_l1_linf:7.1680e+04 total_spectral:1.8750e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.2656e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.7891e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.6426e-02 L6_l1linf:8.5449e-02 L7_l1linf:8.5449e-02 L8_l1linf:8.5449e-02 L9_l1linf:8.5449e-02 L10_l1linf:8.5449e-02 L11_l1linf:8.2031e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1166e-03 L2_spectral:3.1081e-03 L3_spectral:3.0959e-03 L4_spectral:3.1068e-03 L5_spectral:3.0955e-03 L6_spectral:3.0925e-03 L7_spectral:3.0871e-03 L8_spectral:3.0826e-03 L9_spectral:3.0723e-03 L10_spectral:3.0753e-03 L11_spectral:3.0746e-03 L12_spectral:3.0635e-03 train_time:60832ms step_avg:76.04ms +[2025-09-11 10:50:19] [Rank 0] step:801/10000 train_time:62502ms step_avg:78.03ms +[2025-09-11 10:50:19] [Rank 0] step:801/10000 train_time:62502ms step_avg:78.03ms +[2025-09-11 10:50:20] [Rank 0] step:821/10000 train_time:63135ms step_avg:76.90ms +[2025-09-11 10:50:20] [Rank 0] step:821/10000 train_time:63135ms step_avg:76.90ms +[2025-09-11 10:50:20] [Rank 0] step:841/10000 train_time:63780ms step_avg:75.84ms +[2025-09-11 10:50:20] [Rank 0] step:841/10000 train_time:63780ms step_avg:75.84ms +[2025-09-11 10:50:21] [Rank 0] step:861/10000 train_time:64426ms step_avg:74.83ms +[2025-09-11 10:50:21] [Rank 0] step:861/10000 train_time:64426ms step_avg:74.83ms +[2025-09-11 10:50:22] [Rank 0] step:881/10000 train_time:65071ms step_avg:73.86ms +[2025-09-11 10:50:22] [Rank 0] step:881/10000 train_time:65071ms step_avg:73.86ms +[2025-09-11 10:50:22] [Rank 0] step:901/10000 train_time:65716ms step_avg:72.94ms +[2025-09-11 10:50:22] [Rank 0] step:901/10000 train_time:65716ms step_avg:72.94ms +[2025-09-11 10:50:23] [Rank 0] step:921/10000 train_time:66360ms step_avg:72.05ms +[2025-09-11 10:50:23] [Rank 0] step:921/10000 train_time:66360ms step_avg:72.05ms +[2025-09-11 10:50:24] [Rank 0] step:941/10000 train_time:67005ms step_avg:71.21ms +[2025-09-11 10:50:24] [Rank 0] step:941/10000 train_time:67005ms step_avg:71.21ms +[2025-09-11 10:50:24] [Rank 0] step:961/10000 train_time:67650ms step_avg:70.40ms +[2025-09-11 10:50:24] [Rank 0] step:961/10000 train_time:67650ms step_avg:70.40ms +[2025-09-11 10:50:25] [Rank 0] step:981/10000 train_time:68295ms step_avg:69.62ms +[2025-09-11 10:50:25] [Rank 0] step:981/10000 train_time:68295ms step_avg:69.62ms +[2025-09-11 10:50:26] [Rank 0] step:1001/10000 train_time:68939ms step_avg:68.87ms +[2025-09-11 10:50:26] [Rank 0] step:1001/10000 train_time:68939ms step_avg:68.87ms +[2025-09-11 10:50:26] [Rank 0] step:1021/10000 train_time:69584ms step_avg:68.15ms +[2025-09-11 10:50:26] [Rank 0] step:1021/10000 train_time:69584ms step_avg:68.15ms +[2025-09-11 10:50:27] [Rank 0] step:1041/10000 train_time:70229ms step_avg:67.46ms +[2025-09-11 10:50:27] [Rank 0] step:1041/10000 train_time:70229ms step_avg:67.46ms +[2025-09-11 10:50:27] [Rank 0] step:1061/10000 train_time:70874ms step_avg:66.80ms +[2025-09-11 10:50:27] [Rank 0] step:1061/10000 train_time:70874ms step_avg:66.80ms +[2025-09-11 10:50:28] [Rank 0] step:1081/10000 train_time:71519ms step_avg:66.16ms +[2025-09-11 10:50:28] [Rank 0] step:1081/10000 train_time:71519ms step_avg:66.16ms +[2025-09-11 10:50:29] [Rank 0] step:1101/10000 train_time:72163ms step_avg:65.54ms +[2025-09-11 10:50:29] [Rank 0] step:1101/10000 train_time:72163ms step_avg:65.54ms +[2025-09-11 10:50:29] [Rank 0] step:1121/10000 train_time:72808ms step_avg:64.95ms +[2025-09-11 10:50:29] [Rank 0] step:1121/10000 train_time:72808ms step_avg:64.95ms +[2025-09-11 10:50:30] [Rank 0] step:1141/10000 train_time:73453ms step_avg:64.38ms +[2025-09-11 10:50:30] [Rank 0] step:1141/10000 train_time:73453ms step_avg:64.38ms +[2025-09-11 10:50:31] [Rank 0] step:1161/10000 train_time:74098ms step_avg:63.82ms +[2025-09-11 10:50:31] [Rank 0] step:1161/10000 train_time:74098ms step_avg:63.82ms +[2025-09-11 10:50:31] [Rank 0] step:1181/10000 train_time:74742ms step_avg:63.29ms +[2025-09-11 10:50:31] [Rank 0] step:1181/10000 train_time:74742ms step_avg:63.29ms +[2025-09-11 10:50:32] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:50:32] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:50:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:50:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:50:43] [Rank 0] PRINT: step:1200/10000 val_loss:5.3531 total_sharp:7.5459e-04 L1_sharp:4.2725e-02 L2_sharp:3.6231e-02 L3_sharp:3.6711e-02 L4_sharp:4.1052e-02 L5_sharp:5.4139e-02 L6_sharp:5.0197e-02 L7_sharp:5.5735e-02 L8_sharp:8.8781e-02 L9_sharp:8.4557e-02 L10_sharp:1.3411e-01 L11_sharp:1.8543e-01 L12_sharp:6.8236e-01 total_fnorm:3.9000e+01 total_l1_linf:7.1680e+04 total_spectral:1.9500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4805e-01 L1_l1linf:8.0078e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.8125e-02 L4_l1linf:7.7637e-02 L5_l1linf:7.7148e-02 L6_l1linf:7.7637e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.8613e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.9102e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.0078e-02 L1_spectral:3.1239e-03 L2_spectral:3.1188e-03 L3_spectral:3.1136e-03 L4_spectral:3.1194e-03 L5_spectral:3.1105e-03 L6_spectral:3.1113e-03 L7_spectral:3.1260e-03 L8_spectral:3.0956e-03 L9_spectral:3.1306e-03 L10_spectral:3.1131e-03 L11_spectral:3.0980e-03 L12_spectral:3.0914e-03 train_time:75369ms step_avg:62.81ms +[2025-09-11 10:50:43] [Rank 0] PRINT: step:1200/10000 val_loss:5.3531 total_sharp:7.5459e-04 L1_sharp:4.2725e-02 L2_sharp:3.6231e-02 L3_sharp:3.6711e-02 L4_sharp:4.1052e-02 L5_sharp:5.4139e-02 L6_sharp:5.0197e-02 L7_sharp:5.5735e-02 L8_sharp:8.8781e-02 L9_sharp:8.4557e-02 L10_sharp:1.3411e-01 L11_sharp:1.8543e-01 L12_sharp:6.8236e-01 total_fnorm:3.9000e+01 total_l1_linf:7.1680e+04 total_spectral:1.9500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.5000e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.4805e-01 L1_l1linf:8.0078e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.8125e-02 L4_l1linf:7.7637e-02 L5_l1linf:7.7148e-02 L6_l1linf:7.7637e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.8613e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.9102e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.0078e-02 L1_spectral:3.1239e-03 L2_spectral:3.1188e-03 L3_spectral:3.1136e-03 L4_spectral:3.1194e-03 L5_spectral:3.1105e-03 L6_spectral:3.1113e-03 L7_spectral:3.1260e-03 L8_spectral:3.0956e-03 L9_spectral:3.1306e-03 L10_spectral:3.1131e-03 L11_spectral:3.0980e-03 L12_spectral:3.0914e-03 train_time:75369ms step_avg:62.81ms +[2025-09-11 10:50:44] [Rank 0] step:1201/10000 train_time:77068ms step_avg:64.17ms +[2025-09-11 10:50:44] [Rank 0] step:1201/10000 train_time:77068ms step_avg:64.17ms +[2025-09-11 10:50:45] [Rank 0] step:1221/10000 train_time:77706ms step_avg:63.64ms +[2025-09-11 10:50:45] [Rank 0] step:1221/10000 train_time:77706ms step_avg:63.64ms +[2025-09-11 10:50:46] [Rank 0] step:1241/10000 train_time:78352ms step_avg:63.14ms +[2025-09-11 10:50:46] [Rank 0] step:1241/10000 train_time:78352ms step_avg:63.14ms +[2025-09-11 10:50:46] [Rank 0] step:1261/10000 train_time:78997ms step_avg:62.65ms +[2025-09-11 10:50:46] [Rank 0] step:1261/10000 train_time:78997ms step_avg:62.65ms +[2025-09-11 10:50:47] [Rank 0] step:1281/10000 train_time:79643ms step_avg:62.17ms +[2025-09-11 10:50:47] [Rank 0] step:1281/10000 train_time:79643ms step_avg:62.17ms +[2025-09-11 10:50:48] [Rank 0] step:1301/10000 train_time:80288ms step_avg:61.71ms +[2025-09-11 10:50:48] [Rank 0] step:1301/10000 train_time:80288ms step_avg:61.71ms +[2025-09-11 10:50:48] [Rank 0] step:1321/10000 train_time:80934ms step_avg:61.27ms +[2025-09-11 10:50:48] [Rank 0] step:1321/10000 train_time:80934ms step_avg:61.27ms +[2025-09-11 10:50:49] [Rank 0] step:1341/10000 train_time:81579ms step_avg:60.83ms +[2025-09-11 10:50:49] [Rank 0] step:1341/10000 train_time:81579ms step_avg:60.83ms +[2025-09-11 10:50:50] [Rank 0] step:1361/10000 train_time:82224ms step_avg:60.41ms +[2025-09-11 10:50:50] [Rank 0] step:1361/10000 train_time:82224ms step_avg:60.41ms +[2025-09-11 10:50:50] [Rank 0] step:1381/10000 train_time:82869ms step_avg:60.01ms +[2025-09-11 10:50:50] [Rank 0] step:1381/10000 train_time:82869ms step_avg:60.01ms +[2025-09-11 10:50:51] [Rank 0] step:1401/10000 train_time:83515ms step_avg:59.61ms +[2025-09-11 10:50:51] [Rank 0] step:1401/10000 train_time:83515ms step_avg:59.61ms +[2025-09-11 10:50:52] [Rank 0] step:1421/10000 train_time:84160ms step_avg:59.23ms +[2025-09-11 10:50:52] [Rank 0] step:1421/10000 train_time:84160ms step_avg:59.23ms +[2025-09-11 10:50:52] [Rank 0] step:1441/10000 train_time:84805ms step_avg:58.85ms +[2025-09-11 10:50:52] [Rank 0] step:1441/10000 train_time:84805ms step_avg:58.85ms +[2025-09-11 10:50:53] [Rank 0] step:1461/10000 train_time:85450ms step_avg:58.49ms +[2025-09-11 10:50:53] [Rank 0] step:1461/10000 train_time:85450ms step_avg:58.49ms +[2025-09-11 10:50:53] [Rank 0] step:1481/10000 train_time:86095ms step_avg:58.13ms +[2025-09-11 10:50:53] [Rank 0] step:1481/10000 train_time:86095ms step_avg:58.13ms +[2025-09-11 10:50:54] [Rank 0] step:1501/10000 train_time:86744ms step_avg:57.79ms +[2025-09-11 10:50:54] [Rank 0] step:1501/10000 train_time:86744ms step_avg:57.79ms +[2025-09-11 10:50:55] [Rank 0] step:1521/10000 train_time:87392ms step_avg:57.46ms +[2025-09-11 10:50:55] [Rank 0] step:1521/10000 train_time:87392ms step_avg:57.46ms +[2025-09-11 10:50:55] [Rank 0] step:1541/10000 train_time:88041ms step_avg:57.13ms +[2025-09-11 10:50:55] [Rank 0] step:1541/10000 train_time:88041ms step_avg:57.13ms +[2025-09-11 10:50:56] [Rank 0] step:1561/10000 train_time:88690ms step_avg:56.82ms +[2025-09-11 10:50:56] [Rank 0] step:1561/10000 train_time:88690ms step_avg:56.82ms +[2025-09-11 10:50:57] [Rank 0] step:1581/10000 train_time:89339ms step_avg:56.51ms +[2025-09-11 10:50:57] [Rank 0] step:1581/10000 train_time:89339ms step_avg:56.51ms +[2025-09-11 10:50:58] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:50:58] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:51:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:51:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:51:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:51:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:08] [Rank 0] PRINT: step:1600/10000 val_loss:5.1704 total_sharp:6.0619e-04 L1_sharp:2.5003e-02 L2_sharp:1.9128e-02 L3_sharp:1.8700e-02 L4_sharp:1.9886e-02 L5_sharp:3.4830e-02 L6_sharp:3.6433e-02 L7_sharp:4.4229e-02 L8_sharp:7.1831e-02 L9_sharp:6.6232e-02 L10_sharp:9.8101e-02 L11_sharp:1.3839e-01 L12_sharp:4.8621e-01 total_fnorm:3.7250e+01 total_l1_linf:6.5024e+04 total_spectral:1.8625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.4707e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.2754e-02 L6_l1linf:7.3242e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.3730e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.7148e-02 L12_l1linf:7.6660e-02 L1_spectral:3.1380e-03 L2_spectral:3.1356e-03 L3_spectral:3.1201e-03 L4_spectral:3.1216e-03 L5_spectral:3.1395e-03 L6_spectral:3.1150e-03 L7_spectral:3.1436e-03 L8_spectral:3.1256e-03 L9_spectral:3.1316e-03 L10_spectral:3.1248e-03 L11_spectral:3.1293e-03 L12_spectral:3.1216e-03 train_time:90121ms step_avg:56.33ms +[2025-09-11 10:51:08] [Rank 0] PRINT: step:1600/10000 val_loss:5.1704 total_sharp:6.0619e-04 L1_sharp:2.5003e-02 L2_sharp:1.9128e-02 L3_sharp:1.8700e-02 L4_sharp:1.9886e-02 L5_sharp:3.4830e-02 L6_sharp:3.6433e-02 L7_sharp:4.4229e-02 L8_sharp:7.1831e-02 L9_sharp:6.6232e-02 L10_sharp:9.8101e-02 L11_sharp:1.3839e-01 L12_sharp:4.8621e-01 total_fnorm:3.7250e+01 total_l1_linf:6.5024e+04 total_spectral:1.8625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.4707e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.2754e-02 L6_l1linf:7.3242e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.3730e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.7148e-02 L12_l1linf:7.6660e-02 L1_spectral:3.1380e-03 L2_spectral:3.1356e-03 L3_spectral:3.1201e-03 L4_spectral:3.1216e-03 L5_spectral:3.1395e-03 L6_spectral:3.1150e-03 L7_spectral:3.1436e-03 L8_spectral:3.1256e-03 L9_spectral:3.1316e-03 L10_spectral:3.1248e-03 L11_spectral:3.1293e-03 L12_spectral:3.1216e-03 train_time:90121ms step_avg:56.33ms +[2025-09-11 10:51:11] [Rank 0] step:1601/10000 train_time:92598ms step_avg:57.84ms +[2025-09-11 10:51:11] [Rank 0] step:1601/10000 train_time:92598ms step_avg:57.84ms +[2025-09-11 10:51:12] [Rank 0] step:1621/10000 train_time:93465ms step_avg:57.66ms +[2025-09-11 10:51:12] [Rank 0] step:1621/10000 train_time:93465ms step_avg:57.66ms +[2025-09-11 10:51:13] [Rank 0] step:1641/10000 train_time:94114ms step_avg:57.35ms +[2025-09-11 10:51:13] [Rank 0] step:1641/10000 train_time:94114ms step_avg:57.35ms +[2025-09-11 10:51:13] [Rank 0] step:1661/10000 train_time:94764ms step_avg:57.05ms +[2025-09-11 10:51:13] [Rank 0] step:1661/10000 train_time:94764ms step_avg:57.05ms +[2025-09-11 10:51:14] [Rank 0] step:1681/10000 train_time:95413ms step_avg:56.76ms +[2025-09-11 10:51:14] [Rank 0] step:1681/10000 train_time:95413ms step_avg:56.76ms +[2025-09-11 10:51:14] [Rank 0] step:1701/10000 train_time:96061ms step_avg:56.47ms +[2025-09-11 10:51:14] [Rank 0] step:1701/10000 train_time:96061ms step_avg:56.47ms +[2025-09-11 10:51:15] [Rank 0] step:1721/10000 train_time:96710ms step_avg:56.19ms +[2025-09-11 10:51:15] [Rank 0] step:1721/10000 train_time:96710ms step_avg:56.19ms +[2025-09-11 10:51:16] [Rank 0] step:1741/10000 train_time:97358ms step_avg:55.92ms +[2025-09-11 10:51:16] [Rank 0] step:1741/10000 train_time:97358ms step_avg:55.92ms +[2025-09-11 10:51:16] [Rank 0] step:1761/10000 train_time:98007ms step_avg:55.65ms +[2025-09-11 10:51:16] [Rank 0] step:1761/10000 train_time:98007ms step_avg:55.65ms +[2025-09-11 10:51:17] [Rank 0] step:1781/10000 train_time:98654ms step_avg:55.39ms +[2025-09-11 10:51:17] [Rank 0] step:1781/10000 train_time:98654ms step_avg:55.39ms +[2025-09-11 10:51:18] [Rank 0] step:1801/10000 train_time:99302ms step_avg:55.14ms +[2025-09-11 10:51:18] [Rank 0] step:1801/10000 train_time:99302ms step_avg:55.14ms +[2025-09-11 10:51:18] [Rank 0] step:1821/10000 train_time:99950ms step_avg:54.89ms +[2025-09-11 10:51:18] [Rank 0] step:1821/10000 train_time:99950ms step_avg:54.89ms +[2025-09-11 10:51:19] [Rank 0] step:1841/10000 train_time:100598ms step_avg:54.64ms +[2025-09-11 10:51:19] [Rank 0] step:1841/10000 train_time:100598ms step_avg:54.64ms +[2025-09-11 10:51:20] [Rank 0] step:1861/10000 train_time:101246ms step_avg:54.40ms +[2025-09-11 10:51:20] [Rank 0] step:1861/10000 train_time:101246ms step_avg:54.40ms +[2025-09-11 10:51:20] [Rank 0] step:1881/10000 train_time:101894ms step_avg:54.17ms +[2025-09-11 10:51:20] [Rank 0] step:1881/10000 train_time:101894ms step_avg:54.17ms +[2025-09-11 10:51:21] [Rank 0] step:1901/10000 train_time:102543ms step_avg:53.94ms +[2025-09-11 10:51:21] [Rank 0] step:1901/10000 train_time:102543ms step_avg:53.94ms +[2025-09-11 10:51:22] [Rank 0] step:1921/10000 train_time:103191ms step_avg:53.72ms +[2025-09-11 10:51:22] [Rank 0] step:1921/10000 train_time:103191ms step_avg:53.72ms +[2025-09-11 10:51:22] [Rank 0] step:1941/10000 train_time:103838ms step_avg:53.50ms +[2025-09-11 10:51:22] [Rank 0] step:1941/10000 train_time:103838ms step_avg:53.50ms +[2025-09-11 10:51:23] [Rank 0] step:1961/10000 train_time:104486ms step_avg:53.28ms +[2025-09-11 10:51:23] [Rank 0] step:1961/10000 train_time:104486ms step_avg:53.28ms +[2025-09-11 10:51:24] [Rank 0] step:1981/10000 train_time:105135ms step_avg:53.07ms +[2025-09-11 10:51:24] [Rank 0] step:1981/10000 train_time:105135ms step_avg:53.07ms +[2025-09-11 10:51:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:51:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:51:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:51:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:51:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:51:35] [Rank 0] PRINT: step:2000/10000 val_loss:5.0064 total_sharp:6.6759e-04 L1_sharp:1.8163e-02 L2_sharp:1.2422e-02 L3_sharp:1.2523e-02 L4_sharp:1.5800e-02 L5_sharp:2.7476e-02 L6_sharp:3.0198e-02 L7_sharp:3.8605e-02 L8_sharp:7.1639e-02 L9_sharp:6.8161e-02 L10_sharp:1.0417e-01 L11_sharp:1.7932e-01 L12_sharp:1.6874e+00 total_fnorm:3.7000e+01 total_l1_linf:6.6048e+04 total_spectral:1.8625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1289e-02 L4_l1linf:7.1289e-02 L5_l1linf:7.1777e-02 L6_l1linf:7.1289e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.2266e-02 L11_l1linf:7.4707e-02 L12_l1linf:7.2266e-02 L1_spectral:3.1488e-03 L2_spectral:3.1480e-03 L3_spectral:3.1636e-03 L4_spectral:3.1493e-03 L5_spectral:3.1417e-03 L6_spectral:3.1414e-03 L7_spectral:3.1425e-03 L8_spectral:3.1440e-03 L9_spectral:3.1601e-03 L10_spectral:3.1276e-03 L11_spectral:3.1585e-03 L12_spectral:3.1527e-03 train_time:105765ms step_avg:52.88ms +[2025-09-11 10:51:35] [Rank 0] PRINT: step:2000/10000 val_loss:5.0064 total_sharp:6.6759e-04 L1_sharp:1.8163e-02 L2_sharp:1.2422e-02 L3_sharp:1.2523e-02 L4_sharp:1.5800e-02 L5_sharp:2.7476e-02 L6_sharp:3.0198e-02 L7_sharp:3.8605e-02 L8_sharp:7.1639e-02 L9_sharp:6.8161e-02 L10_sharp:1.0417e-01 L11_sharp:1.7932e-01 L12_sharp:1.6874e+00 total_fnorm:3.7000e+01 total_l1_linf:6.6048e+04 total_spectral:1.8625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.1777e-02 L2_l1linf:7.1777e-02 L3_l1linf:7.1289e-02 L4_l1linf:7.1289e-02 L5_l1linf:7.1777e-02 L6_l1linf:7.1289e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0312e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.2266e-02 L11_l1linf:7.4707e-02 L12_l1linf:7.2266e-02 L1_spectral:3.1488e-03 L2_spectral:3.1480e-03 L3_spectral:3.1636e-03 L4_spectral:3.1493e-03 L5_spectral:3.1417e-03 L6_spectral:3.1414e-03 L7_spectral:3.1425e-03 L8_spectral:3.1440e-03 L9_spectral:3.1601e-03 L10_spectral:3.1276e-03 L11_spectral:3.1585e-03 L12_spectral:3.1527e-03 train_time:105765ms step_avg:52.88ms +[2025-09-11 10:51:37] [Rank 0] step:2001/10000 train_time:107502ms step_avg:53.72ms +[2025-09-11 10:51:37] [Rank 0] step:2001/10000 train_time:107502ms step_avg:53.72ms +[2025-09-11 10:51:37] [Rank 0] step:2021/10000 train_time:108139ms step_avg:53.51ms +[2025-09-11 10:51:37] [Rank 0] step:2021/10000 train_time:108139ms step_avg:53.51ms +[2025-09-11 10:51:38] [Rank 0] step:2041/10000 train_time:108787ms step_avg:53.30ms +[2025-09-11 10:51:38] [Rank 0] step:2041/10000 train_time:108787ms step_avg:53.30ms +[2025-09-11 10:51:39] [Rank 0] step:2061/10000 train_time:109436ms step_avg:53.10ms +[2025-09-11 10:51:39] [Rank 0] step:2061/10000 train_time:109436ms step_avg:53.10ms +[2025-09-11 10:51:39] [Rank 0] step:2081/10000 train_time:110083ms step_avg:52.90ms +[2025-09-11 10:51:39] [Rank 0] step:2081/10000 train_time:110083ms step_avg:52.90ms +[2025-09-11 10:51:40] [Rank 0] step:2101/10000 train_time:110732ms step_avg:52.70ms +[2025-09-11 10:51:40] [Rank 0] step:2101/10000 train_time:110732ms step_avg:52.70ms +[2025-09-11 10:51:40] [Rank 0] step:2121/10000 train_time:111380ms step_avg:52.51ms +[2025-09-11 10:51:40] [Rank 0] step:2121/10000 train_time:111380ms step_avg:52.51ms +[2025-09-11 10:51:41] [Rank 0] step:2141/10000 train_time:112027ms step_avg:52.32ms +[2025-09-11 10:51:41] [Rank 0] step:2141/10000 train_time:112027ms step_avg:52.32ms +[2025-09-11 10:51:42] [Rank 0] step:2161/10000 train_time:112675ms step_avg:52.14ms +[2025-09-11 10:51:42] [Rank 0] step:2161/10000 train_time:112675ms step_avg:52.14ms +[2025-09-11 10:51:42] [Rank 0] step:2181/10000 train_time:113323ms step_avg:51.96ms +[2025-09-11 10:51:42] [Rank 0] step:2181/10000 train_time:113323ms step_avg:51.96ms +[2025-09-11 10:51:43] [Rank 0] step:2201/10000 train_time:113971ms step_avg:51.78ms +[2025-09-11 10:51:43] [Rank 0] step:2201/10000 train_time:113971ms step_avg:51.78ms +[2025-09-11 10:51:44] [Rank 0] step:2221/10000 train_time:114618ms step_avg:51.61ms +[2025-09-11 10:51:44] [Rank 0] step:2221/10000 train_time:114618ms step_avg:51.61ms +[2025-09-11 10:51:44] [Rank 0] step:2241/10000 train_time:115278ms step_avg:51.44ms +[2025-09-11 10:51:44] [Rank 0] step:2241/10000 train_time:115278ms step_avg:51.44ms +[2025-09-11 10:51:45] [Rank 0] step:2261/10000 train_time:115939ms step_avg:51.28ms +[2025-09-11 10:51:45] [Rank 0] step:2261/10000 train_time:115939ms step_avg:51.28ms +[2025-09-11 10:51:46] [Rank 0] step:2281/10000 train_time:116600ms step_avg:51.12ms +[2025-09-11 10:51:46] [Rank 0] step:2281/10000 train_time:116600ms step_avg:51.12ms +[2025-09-11 10:51:46] [Rank 0] step:2301/10000 train_time:117260ms step_avg:50.96ms +[2025-09-11 10:51:46] [Rank 0] step:2301/10000 train_time:117260ms step_avg:50.96ms +[2025-09-11 10:51:47] [Rank 0] step:2321/10000 train_time:117921ms step_avg:50.81ms +[2025-09-11 10:51:47] [Rank 0] step:2321/10000 train_time:117921ms step_avg:50.81ms +[2025-09-11 10:51:48] [Rank 0] step:2341/10000 train_time:118582ms step_avg:50.65ms +[2025-09-11 10:51:48] [Rank 0] step:2341/10000 train_time:118582ms step_avg:50.65ms +[2025-09-11 10:51:48] [Rank 0] step:2361/10000 train_time:119243ms step_avg:50.51ms +[2025-09-11 10:51:48] [Rank 0] step:2361/10000 train_time:119243ms step_avg:50.51ms +[2025-09-11 10:51:49] [Rank 0] step:2381/10000 train_time:119903ms step_avg:50.36ms +[2025-09-11 10:51:49] [Rank 0] step:2381/10000 train_time:119903ms step_avg:50.36ms +[2025-09-11 10:51:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:51:50] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:51:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:51:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:51:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:51:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:51:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:51:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:51:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:51:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:52:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:00] [Rank 0] PRINT: step:2400/10000 val_loss:4.8580 total_sharp:7.3930e-04 L1_sharp:1.6393e-02 L2_sharp:1.1578e-02 L3_sharp:1.0605e-02 L4_sharp:1.5033e-02 L5_sharp:2.4406e-02 L6_sharp:3.1554e-02 L7_sharp:4.0558e-02 L8_sharp:7.4325e-02 L9_sharp:7.5641e-02 L10_sharp:1.0327e-01 L11_sharp:1.6377e-01 L12_sharp:1.6394e+00 total_fnorm:3.5000e+01 total_l1_linf:5.9648e+04 total_spectral:1.7500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.9824e-02 L3_l1linf:7.0312e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.8848e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.9336e-02 L9_l1linf:6.8848e-02 L10_l1linf:6.9824e-02 L11_l1linf:7.1289e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1838e-03 L2_spectral:3.1561e-03 L3_spectral:3.1439e-03 L4_spectral:3.1473e-03 L5_spectral:3.1603e-03 L6_spectral:3.1311e-03 L7_spectral:3.1452e-03 L8_spectral:3.1776e-03 L9_spectral:3.1757e-03 L10_spectral:3.1575e-03 L11_spectral:3.1675e-03 L12_spectral:3.1438e-03 train_time:120545ms step_avg:50.23ms +[2025-09-11 10:52:00] [Rank 0] PRINT: step:2400/10000 val_loss:4.8580 total_sharp:7.3930e-04 L1_sharp:1.6393e-02 L2_sharp:1.1578e-02 L3_sharp:1.0605e-02 L4_sharp:1.5033e-02 L5_sharp:2.4406e-02 L6_sharp:3.1554e-02 L7_sharp:4.0558e-02 L8_sharp:7.4325e-02 L9_sharp:7.5641e-02 L10_sharp:1.0327e-01 L11_sharp:1.6377e-01 L12_sharp:1.6394e+00 total_fnorm:3.5000e+01 total_l1_linf:5.9648e+04 total_spectral:1.7500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.9824e-02 L3_l1linf:7.0312e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.8848e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.9336e-02 L9_l1linf:6.8848e-02 L10_l1linf:6.9824e-02 L11_l1linf:7.1289e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1838e-03 L2_spectral:3.1561e-03 L3_spectral:3.1439e-03 L4_spectral:3.1473e-03 L5_spectral:3.1603e-03 L6_spectral:3.1311e-03 L7_spectral:3.1452e-03 L8_spectral:3.1776e-03 L9_spectral:3.1757e-03 L10_spectral:3.1575e-03 L11_spectral:3.1675e-03 L12_spectral:3.1438e-03 train_time:120545ms step_avg:50.23ms +[2025-09-11 10:52:02] [Rank 0] step:2401/10000 train_time:122261ms step_avg:50.92ms +[2025-09-11 10:52:02] [Rank 0] step:2401/10000 train_time:122261ms step_avg:50.92ms +[2025-09-11 10:52:03] [Rank 0] step:2421/10000 train_time:122913ms step_avg:50.77ms +[2025-09-11 10:52:03] [Rank 0] step:2421/10000 train_time:122913ms step_avg:50.77ms +[2025-09-11 10:52:04] [Rank 0] step:2441/10000 train_time:123866ms step_avg:50.74ms +[2025-09-11 10:52:04] [Rank 0] step:2441/10000 train_time:123866ms step_avg:50.74ms +[2025-09-11 10:52:04] [Rank 0] step:2461/10000 train_time:124530ms step_avg:50.60ms +[2025-09-11 10:52:04] [Rank 0] step:2461/10000 train_time:124530ms step_avg:50.60ms +[2025-09-11 10:52:05] [Rank 0] step:2481/10000 train_time:125194ms step_avg:50.46ms +[2025-09-11 10:52:05] [Rank 0] step:2481/10000 train_time:125194ms step_avg:50.46ms +[2025-09-11 10:52:06] [Rank 0] step:2501/10000 train_time:125857ms step_avg:50.32ms +[2025-09-11 10:52:06] [Rank 0] step:2501/10000 train_time:125857ms step_avg:50.32ms +[2025-09-11 10:52:06] [Rank 0] step:2521/10000 train_time:126519ms step_avg:50.19ms +[2025-09-11 10:52:06] [Rank 0] step:2521/10000 train_time:126519ms step_avg:50.19ms +[2025-09-11 10:52:07] [Rank 0] step:2541/10000 train_time:127182ms step_avg:50.05ms +[2025-09-11 10:52:07] [Rank 0] step:2541/10000 train_time:127182ms step_avg:50.05ms +[2025-09-11 10:52:08] [Rank 0] step:2561/10000 train_time:127844ms step_avg:49.92ms +[2025-09-11 10:52:08] [Rank 0] step:2561/10000 train_time:127844ms step_avg:49.92ms +[2025-09-11 10:52:08] [Rank 0] step:2581/10000 train_time:128507ms step_avg:49.79ms +[2025-09-11 10:52:08] [Rank 0] step:2581/10000 train_time:128507ms step_avg:49.79ms +[2025-09-11 10:52:09] [Rank 0] step:2601/10000 train_time:129169ms step_avg:49.66ms +[2025-09-11 10:52:09] [Rank 0] step:2601/10000 train_time:129169ms step_avg:49.66ms +[2025-09-11 10:52:10] [Rank 0] step:2621/10000 train_time:129832ms step_avg:49.54ms +[2025-09-11 10:52:10] [Rank 0] step:2621/10000 train_time:129832ms step_avg:49.54ms +[2025-09-11 10:52:10] [Rank 0] step:2641/10000 train_time:130494ms step_avg:49.41ms +[2025-09-11 10:52:10] [Rank 0] step:2641/10000 train_time:130494ms step_avg:49.41ms +[2025-09-11 10:52:11] [Rank 0] step:2661/10000 train_time:131158ms step_avg:49.29ms +[2025-09-11 10:52:11] [Rank 0] step:2661/10000 train_time:131158ms step_avg:49.29ms +[2025-09-11 10:52:12] [Rank 0] step:2681/10000 train_time:131820ms step_avg:49.17ms +[2025-09-11 10:52:12] [Rank 0] step:2681/10000 train_time:131820ms step_avg:49.17ms +[2025-09-11 10:52:12] [Rank 0] step:2701/10000 train_time:132483ms step_avg:49.05ms +[2025-09-11 10:52:12] [Rank 0] step:2701/10000 train_time:132483ms step_avg:49.05ms +[2025-09-11 10:52:13] [Rank 0] step:2721/10000 train_time:133145ms step_avg:48.93ms +[2025-09-11 10:52:13] [Rank 0] step:2721/10000 train_time:133145ms step_avg:48.93ms +[2025-09-11 10:52:14] [Rank 0] step:2741/10000 train_time:133807ms step_avg:48.82ms +[2025-09-11 10:52:14] [Rank 0] step:2741/10000 train_time:133807ms step_avg:48.82ms +[2025-09-11 10:52:14] [Rank 0] step:2761/10000 train_time:134469ms step_avg:48.70ms +[2025-09-11 10:52:14] [Rank 0] step:2761/10000 train_time:134469ms step_avg:48.70ms +[2025-09-11 10:52:15] [Rank 0] step:2781/10000 train_time:135131ms step_avg:48.59ms +[2025-09-11 10:52:15] [Rank 0] step:2781/10000 train_time:135131ms step_avg:48.59ms +[2025-09-11 10:52:16] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:52:16] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:52:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:52:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:52:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:52:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:52:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:52:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:52:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:26] [Rank 0] PRINT: step:2800/10000 val_loss:4.7695 total_sharp:5.6475e-04 L1_sharp:1.0944e-02 L2_sharp:9.2391e-03 L3_sharp:9.2954e-03 L4_sharp:8.8036e-03 L5_sharp:1.8095e-02 L6_sharp:3.0551e-02 L7_sharp:3.3860e-02 L8_sharp:6.9043e-02 L9_sharp:6.2505e-02 L10_sharp:8.1496e-02 L11_sharp:1.2952e-01 L12_sharp:4.3339e-01 total_fnorm:3.3250e+01 total_l1_linf:5.6064e+04 total_spectral:1.6625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.6895e-02 L9_l1linf:6.7383e-02 L10_l1linf:6.8848e-02 L11_l1linf:6.8848e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1714e-03 L2_spectral:3.1799e-03 L3_spectral:3.1453e-03 L4_spectral:3.1621e-03 L5_spectral:3.1536e-03 L6_spectral:3.1493e-03 L7_spectral:3.1653e-03 L8_spectral:3.1731e-03 L9_spectral:3.1530e-03 L10_spectral:3.1556e-03 L11_spectral:3.1825e-03 L12_spectral:3.1665e-03 train_time:135775ms step_avg:48.49ms +[2025-09-11 10:52:26] [Rank 0] PRINT: step:2800/10000 val_loss:4.7695 total_sharp:5.6475e-04 L1_sharp:1.0944e-02 L2_sharp:9.2391e-03 L3_sharp:9.2954e-03 L4_sharp:8.8036e-03 L5_sharp:1.8095e-02 L6_sharp:3.0551e-02 L7_sharp:3.3860e-02 L8_sharp:6.9043e-02 L9_sharp:6.2505e-02 L10_sharp:8.1496e-02 L11_sharp:1.2952e-01 L12_sharp:4.3339e-01 total_fnorm:3.3250e+01 total_l1_linf:5.6064e+04 total_spectral:1.6625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5000e-01 L1_l1linf:7.0312e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.6895e-02 L9_l1linf:6.7383e-02 L10_l1linf:6.8848e-02 L11_l1linf:6.8848e-02 L12_l1linf:7.1289e-02 L1_spectral:3.1714e-03 L2_spectral:3.1799e-03 L3_spectral:3.1453e-03 L4_spectral:3.1621e-03 L5_spectral:3.1536e-03 L6_spectral:3.1493e-03 L7_spectral:3.1653e-03 L8_spectral:3.1731e-03 L9_spectral:3.1530e-03 L10_spectral:3.1556e-03 L11_spectral:3.1825e-03 L12_spectral:3.1665e-03 train_time:135775ms step_avg:48.49ms +[2025-09-11 10:52:28] [Rank 0] step:2801/10000 train_time:137604ms step_avg:49.13ms +[2025-09-11 10:52:28] [Rank 0] step:2801/10000 train_time:137604ms step_avg:49.13ms +[2025-09-11 10:52:29] [Rank 0] step:2821/10000 train_time:138286ms step_avg:49.02ms +[2025-09-11 10:52:29] [Rank 0] step:2821/10000 train_time:138286ms step_avg:49.02ms +[2025-09-11 10:52:30] [Rank 0] step:2841/10000 train_time:138949ms step_avg:48.91ms +[2025-09-11 10:52:30] [Rank 0] step:2841/10000 train_time:138949ms step_avg:48.91ms +[2025-09-11 10:52:30] [Rank 0] step:2861/10000 train_time:139612ms step_avg:48.80ms +[2025-09-11 10:52:30] [Rank 0] step:2861/10000 train_time:139612ms step_avg:48.80ms +[2025-09-11 10:52:31] [Rank 0] step:2881/10000 train_time:140275ms step_avg:48.69ms +[2025-09-11 10:52:31] [Rank 0] step:2881/10000 train_time:140275ms step_avg:48.69ms +[2025-09-11 10:52:32] [Rank 0] step:2901/10000 train_time:140937ms step_avg:48.58ms +[2025-09-11 10:52:32] [Rank 0] step:2901/10000 train_time:140937ms step_avg:48.58ms +[2025-09-11 10:52:32] [Rank 0] step:2921/10000 train_time:141599ms step_avg:48.48ms +[2025-09-11 10:52:32] [Rank 0] step:2921/10000 train_time:141599ms step_avg:48.48ms +[2025-09-11 10:52:33] [Rank 0] step:2941/10000 train_time:142261ms step_avg:48.37ms +[2025-09-11 10:52:33] [Rank 0] step:2941/10000 train_time:142261ms step_avg:48.37ms +[2025-09-11 10:52:34] [Rank 0] step:2961/10000 train_time:142923ms step_avg:48.27ms +[2025-09-11 10:52:34] [Rank 0] step:2961/10000 train_time:142923ms step_avg:48.27ms +[2025-09-11 10:52:34] [Rank 0] step:2981/10000 train_time:143587ms step_avg:48.17ms +[2025-09-11 10:52:34] [Rank 0] step:2981/10000 train_time:143587ms step_avg:48.17ms +[2025-09-11 10:52:35] [Rank 0] step:3001/10000 train_time:144252ms step_avg:48.07ms +[2025-09-11 10:52:35] [Rank 0] step:3001/10000 train_time:144252ms step_avg:48.07ms +[2025-09-11 10:52:36] [Rank 0] step:3021/10000 train_time:144916ms step_avg:47.97ms +[2025-09-11 10:52:36] [Rank 0] step:3021/10000 train_time:144916ms step_avg:47.97ms +[2025-09-11 10:52:36] [Rank 0] step:3041/10000 train_time:145581ms step_avg:47.87ms +[2025-09-11 10:52:36] [Rank 0] step:3041/10000 train_time:145581ms step_avg:47.87ms +[2025-09-11 10:52:37] [Rank 0] step:3061/10000 train_time:146246ms step_avg:47.78ms +[2025-09-11 10:52:37] [Rank 0] step:3061/10000 train_time:146246ms step_avg:47.78ms +[2025-09-11 10:52:38] [Rank 0] step:3081/10000 train_time:146911ms step_avg:47.68ms +[2025-09-11 10:52:38] [Rank 0] step:3081/10000 train_time:146911ms step_avg:47.68ms +[2025-09-11 10:52:38] [Rank 0] step:3101/10000 train_time:147575ms step_avg:47.59ms +[2025-09-11 10:52:38] [Rank 0] step:3101/10000 train_time:147575ms step_avg:47.59ms +[2025-09-11 10:52:39] [Rank 0] step:3121/10000 train_time:148241ms step_avg:47.50ms +[2025-09-11 10:52:39] [Rank 0] step:3121/10000 train_time:148241ms step_avg:47.50ms +[2025-09-11 10:52:40] [Rank 0] step:3141/10000 train_time:148905ms step_avg:47.41ms +[2025-09-11 10:52:40] [Rank 0] step:3141/10000 train_time:148905ms step_avg:47.41ms +[2025-09-11 10:52:40] [Rank 0] step:3161/10000 train_time:149571ms step_avg:47.32ms +[2025-09-11 10:52:40] [Rank 0] step:3161/10000 train_time:149571ms step_avg:47.32ms +[2025-09-11 10:52:41] [Rank 0] step:3181/10000 train_time:150235ms step_avg:47.23ms +[2025-09-11 10:52:41] [Rank 0] step:3181/10000 train_time:150235ms step_avg:47.23ms +[2025-09-11 10:52:41] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:52:41] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:52:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:52:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:52:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:52:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:52:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:52:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:52:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:52:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:52:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:52:52] [Rank 0] PRINT: step:3200/10000 val_loss:4.6886 total_sharp:3.7477e-04 L1_sharp:8.4147e-03 L2_sharp:5.6595e-03 L3_sharp:9.7866e-03 L4_sharp:7.6157e-03 L5_sharp:1.6946e-02 L6_sharp:2.6285e-02 L7_sharp:3.3982e-02 L8_sharp:6.6807e-02 L9_sharp:5.8716e-02 L10_sharp:8.2911e-02 L11_sharp:1.2798e-01 L12_sharp:5.7374e-01 total_fnorm:3.9250e+01 total_l1_linf:6.9632e+04 total_spectral:1.9625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.6895e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.7383e-02 L12_l1linf:7.0801e-02 L1_spectral:3.1950e-03 L2_spectral:3.1903e-03 L3_spectral:3.1746e-03 L4_spectral:3.1642e-03 L5_spectral:3.1629e-03 L6_spectral:3.1586e-03 L7_spectral:3.1625e-03 L8_spectral:3.1947e-03 L9_spectral:3.1807e-03 L10_spectral:3.1831e-03 L11_spectral:3.1936e-03 L12_spectral:3.1949e-03 train_time:150881ms step_avg:47.15ms +[2025-09-11 10:52:52] [Rank 0] PRINT: step:3200/10000 val_loss:4.6886 total_sharp:3.7477e-04 L1_sharp:8.4147e-03 L2_sharp:5.6595e-03 L3_sharp:9.7866e-03 L4_sharp:7.6157e-03 L5_sharp:1.6946e-02 L6_sharp:2.6285e-02 L7_sharp:3.3982e-02 L8_sharp:6.6807e-02 L9_sharp:5.8716e-02 L10_sharp:8.2911e-02 L11_sharp:1.2798e-01 L12_sharp:5.7374e-01 total_fnorm:3.9250e+01 total_l1_linf:6.9632e+04 total_spectral:1.9625e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8359e-02 L2_l1linf:6.7383e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.8359e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.6895e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.7383e-02 L12_l1linf:7.0801e-02 L1_spectral:3.1950e-03 L2_spectral:3.1903e-03 L3_spectral:3.1746e-03 L4_spectral:3.1642e-03 L5_spectral:3.1629e-03 L6_spectral:3.1586e-03 L7_spectral:3.1625e-03 L8_spectral:3.1947e-03 L9_spectral:3.1807e-03 L10_spectral:3.1831e-03 L11_spectral:3.1936e-03 L12_spectral:3.1949e-03 train_time:150881ms step_avg:47.15ms +[2025-09-11 10:52:54] [Rank 0] step:3201/10000 train_time:152630ms step_avg:47.68ms +[2025-09-11 10:52:54] [Rank 0] step:3201/10000 train_time:152630ms step_avg:47.68ms +[2025-09-11 10:52:55] [Rank 0] step:3221/10000 train_time:153286ms step_avg:47.59ms +[2025-09-11 10:52:55] [Rank 0] step:3221/10000 train_time:153286ms step_avg:47.59ms +[2025-09-11 10:52:56] [Rank 0] step:3241/10000 train_time:153952ms step_avg:47.50ms +[2025-09-11 10:52:56] [Rank 0] step:3241/10000 train_time:153952ms step_avg:47.50ms +[2025-09-11 10:52:56] [Rank 0] step:3261/10000 train_time:154618ms step_avg:47.41ms +[2025-09-11 10:52:56] [Rank 0] step:3261/10000 train_time:154618ms step_avg:47.41ms +[2025-09-11 10:52:57] [Rank 0] step:3281/10000 train_time:155284ms step_avg:47.33ms +[2025-09-11 10:52:57] [Rank 0] step:3281/10000 train_time:155284ms step_avg:47.33ms +[2025-09-11 10:52:58] [Rank 0] step:3301/10000 train_time:155950ms step_avg:47.24ms +[2025-09-11 10:52:58] [Rank 0] step:3301/10000 train_time:155950ms step_avg:47.24ms +[2025-09-11 10:52:58] [Rank 0] step:3321/10000 train_time:156615ms step_avg:47.16ms +[2025-09-11 10:52:58] [Rank 0] step:3321/10000 train_time:156615ms step_avg:47.16ms +[2025-09-11 10:52:59] [Rank 0] step:3341/10000 train_time:157280ms step_avg:47.08ms +[2025-09-11 10:52:59] [Rank 0] step:3341/10000 train_time:157280ms step_avg:47.08ms +[2025-09-11 10:53:00] [Rank 0] step:3361/10000 train_time:157946ms step_avg:46.99ms +[2025-09-11 10:53:00] [Rank 0] step:3361/10000 train_time:157946ms step_avg:46.99ms +[2025-09-11 10:53:00] [Rank 0] step:3381/10000 train_time:158611ms step_avg:46.91ms +[2025-09-11 10:53:00] [Rank 0] step:3381/10000 train_time:158611ms step_avg:46.91ms +[2025-09-11 10:53:01] [Rank 0] step:3401/10000 train_time:159276ms step_avg:46.83ms +[2025-09-11 10:53:01] [Rank 0] step:3401/10000 train_time:159276ms step_avg:46.83ms +[2025-09-11 10:53:02] [Rank 0] step:3421/10000 train_time:159941ms step_avg:46.75ms +[2025-09-11 10:53:02] [Rank 0] step:3421/10000 train_time:159941ms step_avg:46.75ms +[2025-09-11 10:53:02] [Rank 0] step:3441/10000 train_time:160607ms step_avg:46.67ms +[2025-09-11 10:53:02] [Rank 0] step:3441/10000 train_time:160607ms step_avg:46.67ms +[2025-09-11 10:53:03] [Rank 0] step:3461/10000 train_time:161272ms step_avg:46.60ms +[2025-09-11 10:53:03] [Rank 0] step:3461/10000 train_time:161272ms step_avg:46.60ms +[2025-09-11 10:53:04] [Rank 0] step:3481/10000 train_time:161938ms step_avg:46.52ms +[2025-09-11 10:53:04] [Rank 0] step:3481/10000 train_time:161938ms step_avg:46.52ms +[2025-09-11 10:53:05] [Rank 0] step:3501/10000 train_time:162884ms step_avg:46.53ms +[2025-09-11 10:53:05] [Rank 0] step:3501/10000 train_time:162884ms step_avg:46.53ms +[2025-09-11 10:53:05] [Rank 0] step:3521/10000 train_time:163551ms step_avg:46.45ms +[2025-09-11 10:53:05] [Rank 0] step:3521/10000 train_time:163551ms step_avg:46.45ms +[2025-09-11 10:53:06] [Rank 0] step:3541/10000 train_time:164216ms step_avg:46.38ms +[2025-09-11 10:53:06] [Rank 0] step:3541/10000 train_time:164216ms step_avg:46.38ms +[2025-09-11 10:53:07] [Rank 0] step:3561/10000 train_time:165154ms step_avg:46.38ms +[2025-09-11 10:53:07] [Rank 0] step:3561/10000 train_time:165154ms step_avg:46.38ms +[2025-09-11 10:53:07] [Rank 0] step:3581/10000 train_time:165818ms step_avg:46.31ms +[2025-09-11 10:53:07] [Rank 0] step:3581/10000 train_time:165818ms step_avg:46.31ms +[2025-09-11 10:53:08] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:53:08] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:53:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:53:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:53:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:53:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:53:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:53:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:53:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:53:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.6388 total_sharp:3.9146e-04 L1_sharp:5.8870e-03 L2_sharp:4.2466e-03 L3_sharp:7.6044e-03 L4_sharp:4.4550e-03 L5_sharp:1.2498e-02 L6_sharp:1.9743e-02 L7_sharp:2.7147e-02 L8_sharp:5.3504e-02 L9_sharp:5.1857e-02 L10_sharp:7.0724e-02 L11_sharp:1.1369e-01 L12_sharp:5.1217e-01 total_fnorm:3.4750e+01 total_l1_linf:5.8624e+04 total_spectral:1.7375e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5918e-02 L4_l1linf:6.5918e-02 L5_l1linf:6.5430e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5430e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.8359e-02 L1_spectral:3.1905e-03 L2_spectral:3.1860e-03 L3_spectral:3.1803e-03 L4_spectral:3.1815e-03 L5_spectral:3.1720e-03 L6_spectral:3.1652e-03 L7_spectral:3.2148e-03 L8_spectral:3.1712e-03 L9_spectral:3.1723e-03 L10_spectral:3.1743e-03 L11_spectral:3.1891e-03 L12_spectral:3.2038e-03 train_time:166465ms step_avg:46.24ms +[2025-09-11 10:53:19] [Rank 0] PRINT: step:3600/10000 val_loss:4.6388 total_sharp:3.9146e-04 L1_sharp:5.8870e-03 L2_sharp:4.2466e-03 L3_sharp:7.6044e-03 L4_sharp:4.4550e-03 L5_sharp:1.2498e-02 L6_sharp:1.9743e-02 L7_sharp:2.7147e-02 L8_sharp:5.3504e-02 L9_sharp:5.1857e-02 L10_sharp:7.0724e-02 L11_sharp:1.1369e-01 L12_sharp:5.1217e-01 total_fnorm:3.4750e+01 total_l1_linf:5.8624e+04 total_spectral:1.7375e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.5918e-02 L4_l1linf:6.5918e-02 L5_l1linf:6.5430e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.5918e-02 L8_l1linf:6.5430e-02 L9_l1linf:6.4453e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.8359e-02 L1_spectral:3.1905e-03 L2_spectral:3.1860e-03 L3_spectral:3.1803e-03 L4_spectral:3.1815e-03 L5_spectral:3.1720e-03 L6_spectral:3.1652e-03 L7_spectral:3.2148e-03 L8_spectral:3.1712e-03 L9_spectral:3.1723e-03 L10_spectral:3.1743e-03 L11_spectral:3.1891e-03 L12_spectral:3.2038e-03 train_time:166465ms step_avg:46.24ms +[2025-09-11 10:53:21] [Rank 0] step:3601/10000 train_time:168252ms step_avg:46.72ms +[2025-09-11 10:53:21] [Rank 0] step:3601/10000 train_time:168252ms step_avg:46.72ms +[2025-09-11 10:53:22] [Rank 0] step:3621/10000 train_time:168919ms step_avg:46.65ms +[2025-09-11 10:53:22] [Rank 0] step:3621/10000 train_time:168919ms step_avg:46.65ms +[2025-09-11 10:53:23] [Rank 0] step:3641/10000 train_time:169583ms step_avg:46.58ms +[2025-09-11 10:53:23] [Rank 0] step:3641/10000 train_time:169583ms step_avg:46.58ms +[2025-09-11 10:53:23] [Rank 0] step:3661/10000 train_time:170250ms step_avg:46.50ms +[2025-09-11 10:53:23] [Rank 0] step:3661/10000 train_time:170250ms step_avg:46.50ms +[2025-09-11 10:53:24] [Rank 0] step:3681/10000 train_time:170914ms step_avg:46.43ms +[2025-09-11 10:53:24] [Rank 0] step:3681/10000 train_time:170914ms step_avg:46.43ms +[2025-09-11 10:53:25] [Rank 0] step:3701/10000 train_time:171579ms step_avg:46.36ms +[2025-09-11 10:53:25] [Rank 0] step:3701/10000 train_time:171579ms step_avg:46.36ms +[2025-09-11 10:53:25] [Rank 0] step:3721/10000 train_time:172254ms step_avg:46.29ms +[2025-09-11 10:53:25] [Rank 0] step:3721/10000 train_time:172254ms step_avg:46.29ms +[2025-09-11 10:53:26] [Rank 0] step:3741/10000 train_time:172929ms step_avg:46.23ms +[2025-09-11 10:53:26] [Rank 0] step:3741/10000 train_time:172929ms step_avg:46.23ms +[2025-09-11 10:53:27] [Rank 0] step:3761/10000 train_time:173604ms step_avg:46.16ms +[2025-09-11 10:53:27] [Rank 0] step:3761/10000 train_time:173604ms step_avg:46.16ms +[2025-09-11 10:53:27] [Rank 0] step:3781/10000 train_time:174280ms step_avg:46.09ms +[2025-09-11 10:53:27] [Rank 0] step:3781/10000 train_time:174280ms step_avg:46.09ms +[2025-09-11 10:53:28] [Rank 0] step:3801/10000 train_time:174955ms step_avg:46.03ms +[2025-09-11 10:53:28] [Rank 0] step:3801/10000 train_time:174955ms step_avg:46.03ms +[2025-09-11 10:53:29] [Rank 0] step:3821/10000 train_time:175631ms step_avg:45.96ms +[2025-09-11 10:53:29] [Rank 0] step:3821/10000 train_time:175631ms step_avg:45.96ms +[2025-09-11 10:53:29] [Rank 0] step:3841/10000 train_time:176307ms step_avg:45.90ms +[2025-09-11 10:53:29] [Rank 0] step:3841/10000 train_time:176307ms step_avg:45.90ms +[2025-09-11 10:53:30] [Rank 0] step:3861/10000 train_time:176982ms step_avg:45.84ms +[2025-09-11 10:53:30] [Rank 0] step:3861/10000 train_time:176982ms step_avg:45.84ms +[2025-09-11 10:53:31] [Rank 0] step:3881/10000 train_time:177658ms step_avg:45.78ms +[2025-09-11 10:53:31] [Rank 0] step:3881/10000 train_time:177658ms step_avg:45.78ms +[2025-09-11 10:53:31] [Rank 0] step:3901/10000 train_time:178334ms step_avg:45.71ms +[2025-09-11 10:53:31] [Rank 0] step:3901/10000 train_time:178334ms step_avg:45.71ms +[2025-09-11 10:53:32] [Rank 0] step:3921/10000 train_time:179010ms step_avg:45.65ms +[2025-09-11 10:53:32] [Rank 0] step:3921/10000 train_time:179010ms step_avg:45.65ms +[2025-09-11 10:53:33] [Rank 0] step:3941/10000 train_time:179686ms step_avg:45.59ms +[2025-09-11 10:53:33] [Rank 0] step:3941/10000 train_time:179686ms step_avg:45.59ms +[2025-09-11 10:53:33] [Rank 0] step:3961/10000 train_time:180362ms step_avg:45.53ms +[2025-09-11 10:53:33] [Rank 0] step:3961/10000 train_time:180362ms step_avg:45.53ms +[2025-09-11 10:53:34] [Rank 0] step:3981/10000 train_time:181038ms step_avg:45.48ms +[2025-09-11 10:53:34] [Rank 0] step:3981/10000 train_time:181038ms step_avg:45.48ms +[2025-09-11 10:53:35] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:53:35] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:53:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:53:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:53:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:53:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:53:46] [Rank 0] PRINT: step:4000/10000 val_loss:4.5798 total_sharp:4.4191e-04 L1_sharp:1.0257e-02 L2_sharp:6.3742e-03 L3_sharp:1.1611e-02 L4_sharp:9.9316e-03 L5_sharp:1.5033e-02 L6_sharp:2.1154e-02 L7_sharp:3.2334e-02 L8_sharp:5.6928e-02 L9_sharp:6.2851e-02 L10_sharp:8.9448e-02 L11_sharp:1.3768e-01 L12_sharp:1.3257e+00 total_fnorm:4.0500e+01 total_l1_linf:6.8608e+04 total_spectral:2.0250e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.5918e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.2988e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1905e-03 L2_spectral:3.1885e-03 L3_spectral:3.1768e-03 L4_spectral:3.1712e-03 L5_spectral:3.1604e-03 L6_spectral:3.1719e-03 L7_spectral:3.1809e-03 L8_spectral:3.1613e-03 L9_spectral:3.1734e-03 L10_spectral:3.1770e-03 L11_spectral:3.1974e-03 L12_spectral:3.1897e-03 train_time:181694ms step_avg:45.42ms +[2025-09-11 10:53:46] [Rank 0] PRINT: step:4000/10000 val_loss:4.5798 total_sharp:4.4191e-04 L1_sharp:1.0257e-02 L2_sharp:6.3742e-03 L3_sharp:1.1611e-02 L4_sharp:9.9316e-03 L5_sharp:1.5033e-02 L6_sharp:2.1154e-02 L7_sharp:3.2334e-02 L8_sharp:5.6928e-02 L9_sharp:6.2851e-02 L10_sharp:8.9448e-02 L11_sharp:1.3768e-01 L12_sharp:1.3257e+00 total_fnorm:4.0500e+01 total_l1_linf:6.8608e+04 total_spectral:2.0250e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.5918e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4941e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.2988e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1905e-03 L2_spectral:3.1885e-03 L3_spectral:3.1768e-03 L4_spectral:3.1712e-03 L5_spectral:3.1604e-03 L6_spectral:3.1719e-03 L7_spectral:3.1809e-03 L8_spectral:3.1613e-03 L9_spectral:3.1734e-03 L10_spectral:3.1770e-03 L11_spectral:3.1974e-03 L12_spectral:3.1897e-03 train_time:181694ms step_avg:45.42ms +[2025-09-11 10:53:47] [Rank 0] step:4001/10000 train_time:183545ms step_avg:45.87ms +[2025-09-11 10:53:47] [Rank 0] step:4001/10000 train_time:183545ms step_avg:45.87ms +[2025-09-11 10:53:48] [Rank 0] step:4021/10000 train_time:184249ms step_avg:45.82ms +[2025-09-11 10:53:48] [Rank 0] step:4021/10000 train_time:184249ms step_avg:45.82ms +[2025-09-11 10:53:49] [Rank 0] step:4041/10000 train_time:184927ms step_avg:45.76ms +[2025-09-11 10:53:49] [Rank 0] step:4041/10000 train_time:184927ms step_avg:45.76ms +[2025-09-11 10:53:50] [Rank 0] step:4061/10000 train_time:185602ms step_avg:45.70ms +[2025-09-11 10:53:50] [Rank 0] step:4061/10000 train_time:185602ms step_avg:45.70ms +[2025-09-11 10:53:50] [Rank 0] step:4081/10000 train_time:186279ms step_avg:45.65ms +[2025-09-11 10:53:50] [Rank 0] step:4081/10000 train_time:186279ms step_avg:45.65ms +[2025-09-11 10:53:51] [Rank 0] step:4101/10000 train_time:186955ms step_avg:45.59ms +[2025-09-11 10:53:51] [Rank 0] step:4101/10000 train_time:186955ms step_avg:45.59ms +[2025-09-11 10:53:52] [Rank 0] step:4121/10000 train_time:187632ms step_avg:45.53ms +[2025-09-11 10:53:52] [Rank 0] step:4121/10000 train_time:187632ms step_avg:45.53ms +[2025-09-11 10:53:52] [Rank 0] step:4141/10000 train_time:188308ms step_avg:45.47ms +[2025-09-11 10:53:52] [Rank 0] step:4141/10000 train_time:188308ms step_avg:45.47ms +[2025-09-11 10:53:53] [Rank 0] step:4161/10000 train_time:188984ms step_avg:45.42ms +[2025-09-11 10:53:53] [Rank 0] step:4161/10000 train_time:188984ms step_avg:45.42ms +[2025-09-11 10:53:54] [Rank 0] step:4181/10000 train_time:189663ms step_avg:45.36ms +[2025-09-11 10:53:54] [Rank 0] step:4181/10000 train_time:189663ms step_avg:45.36ms +[2025-09-11 10:53:54] [Rank 0] step:4201/10000 train_time:190338ms step_avg:45.31ms +[2025-09-11 10:53:54] [Rank 0] step:4201/10000 train_time:190338ms step_avg:45.31ms +[2025-09-11 10:53:55] [Rank 0] step:4221/10000 train_time:191014ms step_avg:45.25ms +[2025-09-11 10:53:55] [Rank 0] step:4221/10000 train_time:191014ms step_avg:45.25ms +[2025-09-11 10:53:56] [Rank 0] step:4241/10000 train_time:191690ms step_avg:45.20ms +[2025-09-11 10:53:56] [Rank 0] step:4241/10000 train_time:191690ms step_avg:45.20ms +[2025-09-11 10:53:56] [Rank 0] step:4261/10000 train_time:192366ms step_avg:45.15ms +[2025-09-11 10:53:56] [Rank 0] step:4261/10000 train_time:192366ms step_avg:45.15ms +[2025-09-11 10:53:57] [Rank 0] step:4281/10000 train_time:193052ms step_avg:45.10ms +[2025-09-11 10:53:57] [Rank 0] step:4281/10000 train_time:193052ms step_avg:45.10ms +[2025-09-11 10:53:58] [Rank 0] step:4301/10000 train_time:193729ms step_avg:45.04ms +[2025-09-11 10:53:58] [Rank 0] step:4301/10000 train_time:193729ms step_avg:45.04ms +[2025-09-11 10:53:58] [Rank 0] step:4321/10000 train_time:194405ms step_avg:44.99ms +[2025-09-11 10:53:58] [Rank 0] step:4321/10000 train_time:194405ms step_avg:44.99ms +[2025-09-11 10:53:59] [Rank 0] step:4341/10000 train_time:195082ms step_avg:44.94ms +[2025-09-11 10:53:59] [Rank 0] step:4341/10000 train_time:195082ms step_avg:44.94ms +[2025-09-11 10:54:00] [Rank 0] step:4361/10000 train_time:195757ms step_avg:44.89ms +[2025-09-11 10:54:00] [Rank 0] step:4361/10000 train_time:195757ms step_avg:44.89ms +[2025-09-11 10:54:00] [Rank 0] step:4381/10000 train_time:196434ms step_avg:44.84ms +[2025-09-11 10:54:00] [Rank 0] step:4381/10000 train_time:196434ms step_avg:44.84ms +[2025-09-11 10:54:01] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:54:01] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:54:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:54:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:54:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:54:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:54:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:54:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:54:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:54:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:16] [Rank 0] PRINT: step:4400/10000 val_loss:4.5433 total_sharp:3.8212e-04 L1_sharp:4.8613e-03 L2_sharp:3.5797e-03 L3_sharp:5.7137e-03 L4_sharp:5.4071e-03 L5_sharp:1.0038e-02 L6_sharp:1.6578e-02 L7_sharp:2.2473e-02 L8_sharp:5.0177e-02 L9_sharp:4.8584e-02 L10_sharp:6.3218e-02 L11_sharp:1.0800e-01 L12_sharp:1.2181e+00 total_fnorm:3.5000e+01 total_l1_linf:5.8112e+04 total_spectral:1.7500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.1768e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.3477e-02 L1_spectral:3.1852e-03 L2_spectral:3.1858e-03 L3_spectral:3.1821e-03 L4_spectral:3.1888e-03 L5_spectral:3.1931e-03 L6_spectral:3.1721e-03 L7_spectral:3.1861e-03 L8_spectral:3.1862e-03 L9_spectral:3.1881e-03 L10_spectral:3.1904e-03 L11_spectral:3.1820e-03 L12_spectral:3.1825e-03 train_time:197090ms step_avg:44.79ms +[2025-09-11 10:54:16] [Rank 0] PRINT: step:4400/10000 val_loss:4.5433 total_sharp:3.8212e-04 L1_sharp:4.8613e-03 L2_sharp:3.5797e-03 L3_sharp:5.7137e-03 L4_sharp:5.4071e-03 L5_sharp:1.0038e-02 L6_sharp:1.6578e-02 L7_sharp:2.2473e-02 L8_sharp:5.0177e-02 L9_sharp:4.8584e-02 L10_sharp:6.3218e-02 L11_sharp:1.0800e-01 L12_sharp:1.2181e+00 total_fnorm:3.5000e+01 total_l1_linf:5.8112e+04 total_spectral:1.7500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.1768e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.3477e-02 L1_spectral:3.1852e-03 L2_spectral:3.1858e-03 L3_spectral:3.1821e-03 L4_spectral:3.1888e-03 L5_spectral:3.1931e-03 L6_spectral:3.1721e-03 L7_spectral:3.1861e-03 L8_spectral:3.1862e-03 L9_spectral:3.1881e-03 L10_spectral:3.1904e-03 L11_spectral:3.1820e-03 L12_spectral:3.1825e-03 train_time:197090ms step_avg:44.79ms +[2025-09-11 10:54:18] [Rank 0] step:4401/10000 train_time:198925ms step_avg:45.20ms +[2025-09-11 10:54:18] [Rank 0] step:4401/10000 train_time:198925ms step_avg:45.20ms +[2025-09-11 10:54:19] [Rank 0] step:4421/10000 train_time:199639ms step_avg:45.16ms +[2025-09-11 10:54:19] [Rank 0] step:4421/10000 train_time:199639ms step_avg:45.16ms +[2025-09-11 10:54:19] [Rank 0] step:4441/10000 train_time:200315ms step_avg:45.11ms +[2025-09-11 10:54:19] [Rank 0] step:4441/10000 train_time:200315ms step_avg:45.11ms +[2025-09-11 10:54:20] [Rank 0] step:4461/10000 train_time:200993ms step_avg:45.06ms +[2025-09-11 10:54:20] [Rank 0] step:4461/10000 train_time:200993ms step_avg:45.06ms +[2025-09-11 10:54:21] [Rank 0] step:4481/10000 train_time:201672ms step_avg:45.01ms +[2025-09-11 10:54:21] [Rank 0] step:4481/10000 train_time:201672ms step_avg:45.01ms +[2025-09-11 10:54:21] [Rank 0] step:4501/10000 train_time:202351ms step_avg:44.96ms +[2025-09-11 10:54:21] [Rank 0] step:4501/10000 train_time:202351ms step_avg:44.96ms +[2025-09-11 10:54:22] [Rank 0] step:4521/10000 train_time:203028ms step_avg:44.91ms +[2025-09-11 10:54:22] [Rank 0] step:4521/10000 train_time:203028ms step_avg:44.91ms +[2025-09-11 10:54:23] [Rank 0] step:4541/10000 train_time:203707ms step_avg:44.86ms +[2025-09-11 10:54:23] [Rank 0] step:4541/10000 train_time:203707ms step_avg:44.86ms +[2025-09-11 10:54:23] [Rank 0] step:4561/10000 train_time:204385ms step_avg:44.81ms +[2025-09-11 10:54:23] [Rank 0] step:4561/10000 train_time:204385ms step_avg:44.81ms +[2025-09-11 10:54:24] [Rank 0] step:4581/10000 train_time:205062ms step_avg:44.76ms +[2025-09-11 10:54:24] [Rank 0] step:4581/10000 train_time:205062ms step_avg:44.76ms +[2025-09-11 10:54:25] [Rank 0] step:4601/10000 train_time:205740ms step_avg:44.72ms +[2025-09-11 10:54:25] [Rank 0] step:4601/10000 train_time:205740ms step_avg:44.72ms +[2025-09-11 10:54:25] [Rank 0] step:4621/10000 train_time:206418ms step_avg:44.67ms +[2025-09-11 10:54:25] [Rank 0] step:4621/10000 train_time:206418ms step_avg:44.67ms +[2025-09-11 10:54:26] [Rank 0] step:4641/10000 train_time:207095ms step_avg:44.62ms +[2025-09-11 10:54:26] [Rank 0] step:4641/10000 train_time:207095ms step_avg:44.62ms +[2025-09-11 10:54:27] [Rank 0] step:4661/10000 train_time:207773ms step_avg:44.58ms +[2025-09-11 10:54:27] [Rank 0] step:4661/10000 train_time:207773ms step_avg:44.58ms +[2025-09-11 10:54:27] [Rank 0] step:4681/10000 train_time:208451ms step_avg:44.53ms +[2025-09-11 10:54:27] [Rank 0] step:4681/10000 train_time:208451ms step_avg:44.53ms +[2025-09-11 10:54:28] [Rank 0] step:4701/10000 train_time:209128ms step_avg:44.49ms +[2025-09-11 10:54:28] [Rank 0] step:4701/10000 train_time:209128ms step_avg:44.49ms +[2025-09-11 10:54:29] [Rank 0] step:4721/10000 train_time:209806ms step_avg:44.44ms +[2025-09-11 10:54:29] [Rank 0] step:4721/10000 train_time:209806ms step_avg:44.44ms +[2025-09-11 10:54:29] [Rank 0] step:4741/10000 train_time:210483ms step_avg:44.40ms +[2025-09-11 10:54:29] [Rank 0] step:4741/10000 train_time:210483ms step_avg:44.40ms +[2025-09-11 10:54:30] [Rank 0] step:4761/10000 train_time:211164ms step_avg:44.35ms +[2025-09-11 10:54:30] [Rank 0] step:4761/10000 train_time:211164ms step_avg:44.35ms +[2025-09-11 10:54:31] [Rank 0] step:4781/10000 train_time:211842ms step_avg:44.31ms +[2025-09-11 10:54:31] [Rank 0] step:4781/10000 train_time:211842ms step_avg:44.31ms +[2025-09-11 10:54:31] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:54:31] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:54:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:54:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:54:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:54:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:54:42] [Rank 0] PRINT: step:4800/10000 val_loss:4.4999 total_sharp:3.2160e-04 L1_sharp:8.5204e-03 L2_sharp:1.1154e-03 L3_sharp:5.8854e-03 L4_sharp:5.4242e-03 L5_sharp:1.1984e-02 L6_sharp:1.6596e-02 L7_sharp:2.2324e-02 L8_sharp:5.5377e-02 L9_sharp:4.6436e-02 L10_sharp:6.4561e-02 L11_sharp:9.3855e-02 L12_sharp:6.4621e-01 total_fnorm:3.7000e+01 total_l1_linf:6.3232e+04 total_spectral:1.8625e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.0791e-02 L9_l1linf:6.1279e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.4941e-02 L1_spectral:3.2056e-03 L2_spectral:3.2170e-03 L3_spectral:3.1946e-03 L4_spectral:3.2248e-03 L5_spectral:3.1700e-03 L6_spectral:3.2058e-03 L7_spectral:3.2086e-03 L8_spectral:3.1952e-03 L9_spectral:3.2015e-03 L10_spectral:3.1964e-03 L11_spectral:3.1921e-03 L12_spectral:3.2126e-03 train_time:212506ms step_avg:44.27ms +[2025-09-11 10:54:42] [Rank 0] PRINT: step:4800/10000 val_loss:4.4999 total_sharp:3.2160e-04 L1_sharp:8.5204e-03 L2_sharp:1.1154e-03 L3_sharp:5.8854e-03 L4_sharp:5.4242e-03 L5_sharp:1.1984e-02 L6_sharp:1.6596e-02 L7_sharp:2.2324e-02 L8_sharp:5.5377e-02 L9_sharp:4.6436e-02 L10_sharp:6.4561e-02 L11_sharp:9.3855e-02 L12_sharp:6.4621e-01 total_fnorm:3.7000e+01 total_l1_linf:6.3232e+04 total_spectral:1.8625e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.2988e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2012e-02 L8_l1linf:6.0791e-02 L9_l1linf:6.1279e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.4941e-02 L1_spectral:3.2056e-03 L2_spectral:3.2170e-03 L3_spectral:3.1946e-03 L4_spectral:3.2248e-03 L5_spectral:3.1700e-03 L6_spectral:3.2058e-03 L7_spectral:3.2086e-03 L8_spectral:3.1952e-03 L9_spectral:3.2015e-03 L10_spectral:3.1964e-03 L11_spectral:3.1921e-03 L12_spectral:3.2126e-03 train_time:212506ms step_avg:44.27ms +[2025-09-11 10:54:44] [Rank 0] step:4801/10000 train_time:214312ms step_avg:44.64ms +[2025-09-11 10:54:44] [Rank 0] step:4801/10000 train_time:214312ms step_avg:44.64ms +[2025-09-11 10:54:45] [Rank 0] step:4821/10000 train_time:215016ms step_avg:44.60ms +[2025-09-11 10:54:45] [Rank 0] step:4821/10000 train_time:215016ms step_avg:44.60ms +[2025-09-11 10:54:45] [Rank 0] step:4841/10000 train_time:215697ms step_avg:44.56ms +[2025-09-11 10:54:45] [Rank 0] step:4841/10000 train_time:215697ms step_avg:44.56ms +[2025-09-11 10:54:46] [Rank 0] step:4861/10000 train_time:216376ms step_avg:44.51ms +[2025-09-11 10:54:46] [Rank 0] step:4861/10000 train_time:216376ms step_avg:44.51ms +[2025-09-11 10:54:47] [Rank 0] step:4881/10000 train_time:217054ms step_avg:44.47ms +[2025-09-11 10:54:47] [Rank 0] step:4881/10000 train_time:217054ms step_avg:44.47ms +[2025-09-11 10:54:47] [Rank 0] step:4901/10000 train_time:217736ms step_avg:44.43ms +[2025-09-11 10:54:47] [Rank 0] step:4901/10000 train_time:217736ms step_avg:44.43ms +[2025-09-11 10:54:48] [Rank 0] step:4921/10000 train_time:218415ms step_avg:44.38ms +[2025-09-11 10:54:48] [Rank 0] step:4921/10000 train_time:218415ms step_avg:44.38ms +[2025-09-11 10:54:49] [Rank 0] step:4941/10000 train_time:219093ms step_avg:44.34ms +[2025-09-11 10:54:49] [Rank 0] step:4941/10000 train_time:219093ms step_avg:44.34ms +[2025-09-11 10:54:49] [Rank 0] step:4961/10000 train_time:219772ms step_avg:44.30ms +[2025-09-11 10:54:49] [Rank 0] step:4961/10000 train_time:219772ms step_avg:44.30ms +[2025-09-11 10:54:50] [Rank 0] step:4981/10000 train_time:220450ms step_avg:44.26ms +[2025-09-11 10:54:50] [Rank 0] step:4981/10000 train_time:220450ms step_avg:44.26ms +[2025-09-11 10:54:51] [Rank 0] step:5001/10000 train_time:221131ms step_avg:44.22ms +[2025-09-11 10:54:51] [Rank 0] step:5001/10000 train_time:221131ms step_avg:44.22ms +[2025-09-11 10:54:51] [Rank 0] step:5021/10000 train_time:221808ms step_avg:44.18ms +[2025-09-11 10:54:51] [Rank 0] step:5021/10000 train_time:221808ms step_avg:44.18ms +[2025-09-11 10:54:52] [Rank 0] step:5041/10000 train_time:222486ms step_avg:44.14ms +[2025-09-11 10:54:52] [Rank 0] step:5041/10000 train_time:222486ms step_avg:44.14ms +[2025-09-11 10:54:53] [Rank 0] step:5061/10000 train_time:223164ms step_avg:44.09ms +[2025-09-11 10:54:53] [Rank 0] step:5061/10000 train_time:223164ms step_avg:44.09ms +[2025-09-11 10:54:54] [Rank 0] step:5081/10000 train_time:223842ms step_avg:44.05ms +[2025-09-11 10:54:54] [Rank 0] step:5081/10000 train_time:223842ms step_avg:44.05ms +[2025-09-11 10:54:54] [Rank 0] step:5101/10000 train_time:224520ms step_avg:44.01ms +[2025-09-11 10:54:54] [Rank 0] step:5101/10000 train_time:224520ms step_avg:44.01ms +[2025-09-11 10:54:55] [Rank 0] step:5121/10000 train_time:225199ms step_avg:43.98ms +[2025-09-11 10:54:55] [Rank 0] step:5121/10000 train_time:225199ms step_avg:43.98ms +[2025-09-11 10:54:56] [Rank 0] step:5141/10000 train_time:225878ms step_avg:43.94ms +[2025-09-11 10:54:56] [Rank 0] step:5141/10000 train_time:225878ms step_avg:43.94ms +[2025-09-11 10:54:56] [Rank 0] step:5161/10000 train_time:226557ms step_avg:43.90ms +[2025-09-11 10:54:56] [Rank 0] step:5161/10000 train_time:226557ms step_avg:43.90ms +[2025-09-11 10:54:57] [Rank 0] step:5181/10000 train_time:227235ms step_avg:43.86ms +[2025-09-11 10:54:57] [Rank 0] step:5181/10000 train_time:227235ms step_avg:43.86ms +[2025-09-11 10:54:58] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:54:58] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:54:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:55:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:55:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:55:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:09] [Rank 0] PRINT: step:5200/10000 val_loss:4.4657 total_sharp:5.2824e-04 L1_sharp:6.1103e-03 L2_sharp:2.2671e-03 L3_sharp:7.4535e-03 L4_sharp:4.7312e-03 L5_sharp:1.1985e-02 L6_sharp:2.0539e-02 L7_sharp:2.4072e-02 L8_sharp:5.7284e-02 L9_sharp:5.9847e-02 L10_sharp:8.5625e-02 L11_sharp:1.2961e-01 L12_sharp:1.4873e+00 total_fnorm:3.3750e+01 total_l1_linf:5.4272e+04 total_spectral:1.6875e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2256e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2988e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0059e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.0303e-02 L12_l1linf:6.2500e-02 L1_spectral:3.2303e-03 L2_spectral:3.1939e-03 L3_spectral:3.2088e-03 L4_spectral:3.1867e-03 L5_spectral:3.1954e-03 L6_spectral:3.1851e-03 L7_spectral:3.2080e-03 L8_spectral:3.1778e-03 L9_spectral:3.1904e-03 L10_spectral:3.1970e-03 L11_spectral:3.2068e-03 L12_spectral:3.1858e-03 train_time:227901ms step_avg:43.83ms +[2025-09-11 10:55:09] [Rank 0] PRINT: step:5200/10000 val_loss:4.4657 total_sharp:5.2824e-04 L1_sharp:6.1103e-03 L2_sharp:2.2671e-03 L3_sharp:7.4535e-03 L4_sharp:4.7312e-03 L5_sharp:1.1985e-02 L6_sharp:2.0539e-02 L7_sharp:2.4072e-02 L8_sharp:5.7284e-02 L9_sharp:5.9847e-02 L10_sharp:8.5625e-02 L11_sharp:1.2961e-01 L12_sharp:1.4873e+00 total_fnorm:3.3750e+01 total_l1_linf:5.4272e+04 total_spectral:1.6875e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2256e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.2500e-02 L7_l1linf:6.2988e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0059e-02 L10_l1linf:6.0791e-02 L11_l1linf:6.0303e-02 L12_l1linf:6.2500e-02 L1_spectral:3.2303e-03 L2_spectral:3.1939e-03 L3_spectral:3.2088e-03 L4_spectral:3.1867e-03 L5_spectral:3.1954e-03 L6_spectral:3.1851e-03 L7_spectral:3.2080e-03 L8_spectral:3.1778e-03 L9_spectral:3.1904e-03 L10_spectral:3.1970e-03 L11_spectral:3.2068e-03 L12_spectral:3.1858e-03 train_time:227901ms step_avg:43.83ms +[2025-09-11 10:55:11] [Rank 0] step:5201/10000 train_time:230407ms step_avg:44.30ms +[2025-09-11 10:55:11] [Rank 0] step:5201/10000 train_time:230407ms step_avg:44.30ms +[2025-09-11 10:55:12] [Rank 0] step:5221/10000 train_time:231110ms step_avg:44.27ms +[2025-09-11 10:55:12] [Rank 0] step:5221/10000 train_time:231110ms step_avg:44.27ms +[2025-09-11 10:55:13] [Rank 0] step:5241/10000 train_time:231798ms step_avg:44.23ms +[2025-09-11 10:55:13] [Rank 0] step:5241/10000 train_time:231798ms step_avg:44.23ms +[2025-09-11 10:55:14] [Rank 0] step:5261/10000 train_time:232775ms step_avg:44.25ms +[2025-09-11 10:55:14] [Rank 0] step:5261/10000 train_time:232775ms step_avg:44.25ms +[2025-09-11 10:55:14] [Rank 0] step:5281/10000 train_time:233463ms step_avg:44.21ms +[2025-09-11 10:55:14] [Rank 0] step:5281/10000 train_time:233463ms step_avg:44.21ms +[2025-09-11 10:55:15] [Rank 0] step:5301/10000 train_time:234150ms step_avg:44.17ms +[2025-09-11 10:55:15] [Rank 0] step:5301/10000 train_time:234150ms step_avg:44.17ms +[2025-09-11 10:55:16] [Rank 0] step:5321/10000 train_time:234838ms step_avg:44.13ms +[2025-09-11 10:55:16] [Rank 0] step:5321/10000 train_time:234838ms step_avg:44.13ms +[2025-09-11 10:55:16] [Rank 0] step:5341/10000 train_time:235524ms step_avg:44.10ms +[2025-09-11 10:55:16] [Rank 0] step:5341/10000 train_time:235524ms step_avg:44.10ms +[2025-09-11 10:55:17] [Rank 0] step:5361/10000 train_time:236213ms step_avg:44.06ms +[2025-09-11 10:55:17] [Rank 0] step:5361/10000 train_time:236213ms step_avg:44.06ms +[2025-09-11 10:55:18] [Rank 0] step:5381/10000 train_time:236902ms step_avg:44.03ms +[2025-09-11 10:55:18] [Rank 0] step:5381/10000 train_time:236902ms step_avg:44.03ms +[2025-09-11 10:55:18] [Rank 0] step:5401/10000 train_time:237587ms step_avg:43.99ms +[2025-09-11 10:55:18] [Rank 0] step:5401/10000 train_time:237587ms step_avg:43.99ms +[2025-09-11 10:55:19] [Rank 0] step:5421/10000 train_time:238276ms step_avg:43.95ms +[2025-09-11 10:55:19] [Rank 0] step:5421/10000 train_time:238276ms step_avg:43.95ms +[2025-09-11 10:55:20] [Rank 0] step:5441/10000 train_time:238963ms step_avg:43.92ms +[2025-09-11 10:55:20] [Rank 0] step:5441/10000 train_time:238963ms step_avg:43.92ms +[2025-09-11 10:55:20] [Rank 0] step:5461/10000 train_time:239651ms step_avg:43.88ms +[2025-09-11 10:55:20] [Rank 0] step:5461/10000 train_time:239651ms step_avg:43.88ms +[2025-09-11 10:55:21] [Rank 0] step:5481/10000 train_time:240340ms step_avg:43.85ms +[2025-09-11 10:55:21] [Rank 0] step:5481/10000 train_time:240340ms step_avg:43.85ms +[2025-09-11 10:55:22] [Rank 0] step:5501/10000 train_time:241026ms step_avg:43.81ms +[2025-09-11 10:55:22] [Rank 0] step:5501/10000 train_time:241026ms step_avg:43.81ms +[2025-09-11 10:55:22] [Rank 0] step:5521/10000 train_time:241713ms step_avg:43.78ms +[2025-09-11 10:55:22] [Rank 0] step:5521/10000 train_time:241713ms step_avg:43.78ms +[2025-09-11 10:55:23] [Rank 0] step:5541/10000 train_time:242403ms step_avg:43.75ms +[2025-09-11 10:55:23] [Rank 0] step:5541/10000 train_time:242403ms step_avg:43.75ms +[2025-09-11 10:55:24] [Rank 0] step:5561/10000 train_time:243099ms step_avg:43.72ms +[2025-09-11 10:55:24] [Rank 0] step:5561/10000 train_time:243099ms step_avg:43.72ms +[2025-09-11 10:55:25] [Rank 0] step:5581/10000 train_time:243788ms step_avg:43.68ms +[2025-09-11 10:55:25] [Rank 0] step:5581/10000 train_time:243788ms step_avg:43.68ms +[2025-09-11 10:55:25] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:55:25] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:55:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:55:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:55:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:55:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:55:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:55:36] [Rank 0] PRINT: step:5600/10000 val_loss:4.4398 total_sharp:3.2642e-04 L1_sharp:3.5736e-03 L2_sharp:4.3107e-04 L3_sharp:4.1257e-03 L4_sharp:2.0830e-03 L5_sharp:1.0126e-02 L6_sharp:1.7056e-02 L7_sharp:1.9328e-02 L8_sharp:4.7814e-02 L9_sharp:4.4591e-02 L10_sharp:6.2513e-02 L11_sharp:8.8234e-02 L12_sharp:3.7192e-01 total_fnorm:3.4000e+01 total_l1_linf:5.5552e+04 total_spectral:1.7000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4219e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.2256e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.1035e-02 L4_l1linf:6.2012e-02 L5_l1linf:6.0059e-02 L6_l1linf:6.1523e-02 L7_l1linf:5.9814e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0059e-02 L10_l1linf:5.9082e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.2500e-02 L1_spectral:3.2161e-03 L2_spectral:3.1952e-03 L3_spectral:3.2163e-03 L4_spectral:3.2001e-03 L5_spectral:3.2082e-03 L6_spectral:3.1952e-03 L7_spectral:3.1994e-03 L8_spectral:3.1729e-03 L9_spectral:3.2153e-03 L10_spectral:3.2185e-03 L11_spectral:3.2025e-03 L12_spectral:3.2123e-03 train_time:244456ms step_avg:43.65ms +[2025-09-11 10:55:36] [Rank 0] PRINT: step:5600/10000 val_loss:4.4398 total_sharp:3.2642e-04 L1_sharp:3.5736e-03 L2_sharp:4.3107e-04 L3_sharp:4.1257e-03 L4_sharp:2.0830e-03 L5_sharp:1.0126e-02 L6_sharp:1.7056e-02 L7_sharp:1.9328e-02 L8_sharp:4.7814e-02 L9_sharp:4.4591e-02 L10_sharp:6.2513e-02 L11_sharp:8.8234e-02 L12_sharp:3.7192e-01 total_fnorm:3.4000e+01 total_l1_linf:5.5552e+04 total_spectral:1.7000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4219e-01 L6_fnorm:2.4219e-01 L7_fnorm:2.4316e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.2256e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.1035e-02 L4_l1linf:6.2012e-02 L5_l1linf:6.0059e-02 L6_l1linf:6.1523e-02 L7_l1linf:5.9814e-02 L8_l1linf:6.0547e-02 L9_l1linf:6.0059e-02 L10_l1linf:5.9082e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.2500e-02 L1_spectral:3.2161e-03 L2_spectral:3.1952e-03 L3_spectral:3.2163e-03 L4_spectral:3.2001e-03 L5_spectral:3.2082e-03 L6_spectral:3.1952e-03 L7_spectral:3.1994e-03 L8_spectral:3.1729e-03 L9_spectral:3.2153e-03 L10_spectral:3.2185e-03 L11_spectral:3.2025e-03 L12_spectral:3.2123e-03 train_time:244456ms step_avg:43.65ms +[2025-09-11 10:55:38] [Rank 0] step:5601/10000 train_time:246364ms step_avg:43.99ms +[2025-09-11 10:55:38] [Rank 0] step:5601/10000 train_time:246364ms step_avg:43.99ms +[2025-09-11 10:55:39] [Rank 0] step:5621/10000 train_time:247072ms step_avg:43.96ms +[2025-09-11 10:55:39] [Rank 0] step:5621/10000 train_time:247072ms step_avg:43.96ms +[2025-09-11 10:55:40] [Rank 0] step:5641/10000 train_time:247759ms step_avg:43.92ms +[2025-09-11 10:55:40] [Rank 0] step:5641/10000 train_time:247759ms step_avg:43.92ms +[2025-09-11 10:55:40] [Rank 0] step:5661/10000 train_time:248446ms step_avg:43.89ms +[2025-09-11 10:55:40] [Rank 0] step:5661/10000 train_time:248446ms step_avg:43.89ms +[2025-09-11 10:55:41] [Rank 0] step:5681/10000 train_time:249134ms step_avg:43.85ms +[2025-09-11 10:55:41] [Rank 0] step:5681/10000 train_time:249134ms step_avg:43.85ms +[2025-09-11 10:55:42] [Rank 0] step:5701/10000 train_time:249824ms step_avg:43.82ms +[2025-09-11 10:55:42] [Rank 0] step:5701/10000 train_time:249824ms step_avg:43.82ms +[2025-09-11 10:55:42] [Rank 0] step:5721/10000 train_time:250511ms step_avg:43.79ms +[2025-09-11 10:55:42] [Rank 0] step:5721/10000 train_time:250511ms step_avg:43.79ms +[2025-09-11 10:55:43] [Rank 0] step:5741/10000 train_time:251200ms step_avg:43.76ms +[2025-09-11 10:55:43] [Rank 0] step:5741/10000 train_time:251200ms step_avg:43.76ms +[2025-09-11 10:55:44] [Rank 0] step:5761/10000 train_time:251890ms step_avg:43.72ms +[2025-09-11 10:55:44] [Rank 0] step:5761/10000 train_time:251890ms step_avg:43.72ms +[2025-09-11 10:55:44] [Rank 0] step:5781/10000 train_time:252579ms step_avg:43.69ms +[2025-09-11 10:55:44] [Rank 0] step:5781/10000 train_time:252579ms step_avg:43.69ms +[2025-09-11 10:55:45] [Rank 0] step:5801/10000 train_time:253271ms step_avg:43.66ms +[2025-09-11 10:55:45] [Rank 0] step:5801/10000 train_time:253271ms step_avg:43.66ms +[2025-09-11 10:55:46] [Rank 0] step:5821/10000 train_time:253959ms step_avg:43.63ms +[2025-09-11 10:55:46] [Rank 0] step:5821/10000 train_time:253959ms step_avg:43.63ms +[2025-09-11 10:55:46] [Rank 0] step:5841/10000 train_time:254648ms step_avg:43.60ms +[2025-09-11 10:55:46] [Rank 0] step:5841/10000 train_time:254648ms step_avg:43.60ms +[2025-09-11 10:55:47] [Rank 0] step:5861/10000 train_time:255335ms step_avg:43.57ms +[2025-09-11 10:55:47] [Rank 0] step:5861/10000 train_time:255335ms step_avg:43.57ms +[2025-09-11 10:55:48] [Rank 0] step:5881/10000 train_time:256023ms step_avg:43.53ms +[2025-09-11 10:55:48] [Rank 0] step:5881/10000 train_time:256023ms step_avg:43.53ms +[2025-09-11 10:55:48] [Rank 0] step:5901/10000 train_time:256710ms step_avg:43.50ms +[2025-09-11 10:55:48] [Rank 0] step:5901/10000 train_time:256710ms step_avg:43.50ms +[2025-09-11 10:55:49] [Rank 0] step:5921/10000 train_time:257400ms step_avg:43.47ms +[2025-09-11 10:55:49] [Rank 0] step:5921/10000 train_time:257400ms step_avg:43.47ms +[2025-09-11 10:55:50] [Rank 0] step:5941/10000 train_time:258095ms step_avg:43.44ms +[2025-09-11 10:55:50] [Rank 0] step:5941/10000 train_time:258095ms step_avg:43.44ms +[2025-09-11 10:55:51] [Rank 0] step:5961/10000 train_time:258783ms step_avg:43.41ms +[2025-09-11 10:55:51] [Rank 0] step:5961/10000 train_time:258783ms step_avg:43.41ms +[2025-09-11 10:55:51] [Rank 0] step:5981/10000 train_time:259472ms step_avg:43.38ms +[2025-09-11 10:55:51] [Rank 0] step:5981/10000 train_time:259472ms step_avg:43.38ms +[2025-09-11 10:55:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:55:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:55:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:03] [Rank 0] PRINT: step:6000/10000 val_loss:4.4048 total_sharp:3.7012e-04 L1_sharp:7.1370e-03 L2_sharp:2.6686e-03 L3_sharp:7.4620e-03 L4_sharp:2.5855e-03 L5_sharp:7.9193e-03 L6_sharp:1.5596e-02 L7_sharp:1.9229e-02 L8_sharp:4.4907e-02 L9_sharp:4.4379e-02 L10_sharp:5.9282e-02 L11_sharp:8.8479e-02 L12_sharp:1.1022e+00 total_fnorm:3.4250e+01 total_l1_linf:5.5296e+04 total_spectral:1.7125e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.1523e-02 L2_l1linf:6.1279e-02 L3_l1linf:6.0791e-02 L4_l1linf:6.0303e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7617e-02 L11_l1linf:5.8105e-02 L12_l1linf:6.3965e-02 L1_spectral:3.2164e-03 L2_spectral:3.2108e-03 L3_spectral:3.2267e-03 L4_spectral:3.2338e-03 L5_spectral:3.2190e-03 L6_spectral:3.1917e-03 L7_spectral:3.2367e-03 L8_spectral:3.1975e-03 L9_spectral:3.2091e-03 L10_spectral:3.2032e-03 L11_spectral:3.2228e-03 L12_spectral:3.2187e-03 train_time:260144ms step_avg:43.36ms +[2025-09-11 10:56:03] [Rank 0] PRINT: step:6000/10000 val_loss:4.4048 total_sharp:3.7012e-04 L1_sharp:7.1370e-03 L2_sharp:2.6686e-03 L3_sharp:7.4620e-03 L4_sharp:2.5855e-03 L5_sharp:7.9193e-03 L6_sharp:1.5596e-02 L7_sharp:1.9229e-02 L8_sharp:4.4907e-02 L9_sharp:4.4379e-02 L10_sharp:5.9282e-02 L11_sharp:8.8479e-02 L12_sharp:1.1022e+00 total_fnorm:3.4250e+01 total_l1_linf:5.5296e+04 total_spectral:1.7125e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.1523e-02 L2_l1linf:6.1279e-02 L3_l1linf:6.0791e-02 L4_l1linf:6.0303e-02 L5_l1linf:6.1035e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7617e-02 L11_l1linf:5.8105e-02 L12_l1linf:6.3965e-02 L1_spectral:3.2164e-03 L2_spectral:3.2108e-03 L3_spectral:3.2267e-03 L4_spectral:3.2338e-03 L5_spectral:3.2190e-03 L6_spectral:3.1917e-03 L7_spectral:3.2367e-03 L8_spectral:3.1975e-03 L9_spectral:3.2091e-03 L10_spectral:3.2032e-03 L11_spectral:3.2228e-03 L12_spectral:3.2187e-03 train_time:260144ms step_avg:43.36ms +[2025-09-11 10:56:05] [Rank 0] step:6001/10000 train_time:261986ms step_avg:43.66ms +[2025-09-11 10:56:05] [Rank 0] step:6001/10000 train_time:261986ms step_avg:43.66ms +[2025-09-11 10:56:06] [Rank 0] step:6021/10000 train_time:262702ms step_avg:43.63ms +[2025-09-11 10:56:06] [Rank 0] step:6021/10000 train_time:262702ms step_avg:43.63ms +[2025-09-11 10:56:07] [Rank 0] step:6041/10000 train_time:263395ms step_avg:43.60ms +[2025-09-11 10:56:07] [Rank 0] step:6041/10000 train_time:263395ms step_avg:43.60ms +[2025-09-11 10:56:07] [Rank 0] step:6061/10000 train_time:264085ms step_avg:43.57ms +[2025-09-11 10:56:07] [Rank 0] step:6061/10000 train_time:264085ms step_avg:43.57ms +[2025-09-11 10:56:08] [Rank 0] step:6081/10000 train_time:264775ms step_avg:43.54ms +[2025-09-11 10:56:08] [Rank 0] step:6081/10000 train_time:264775ms step_avg:43.54ms +[2025-09-11 10:56:09] [Rank 0] step:6101/10000 train_time:265464ms step_avg:43.51ms +[2025-09-11 10:56:09] [Rank 0] step:6101/10000 train_time:265464ms step_avg:43.51ms +[2025-09-11 10:56:09] [Rank 0] step:6121/10000 train_time:266156ms step_avg:43.48ms +[2025-09-11 10:56:09] [Rank 0] step:6121/10000 train_time:266156ms step_avg:43.48ms +[2025-09-11 10:56:10] [Rank 0] step:6141/10000 train_time:266847ms step_avg:43.45ms +[2025-09-11 10:56:10] [Rank 0] step:6141/10000 train_time:266847ms step_avg:43.45ms +[2025-09-11 10:56:11] [Rank 0] step:6161/10000 train_time:267536ms step_avg:43.42ms +[2025-09-11 10:56:11] [Rank 0] step:6161/10000 train_time:267536ms step_avg:43.42ms +[2025-09-11 10:56:11] [Rank 0] step:6181/10000 train_time:268225ms step_avg:43.40ms +[2025-09-11 10:56:11] [Rank 0] step:6181/10000 train_time:268225ms step_avg:43.40ms +[2025-09-11 10:56:12] [Rank 0] step:6201/10000 train_time:268916ms step_avg:43.37ms +[2025-09-11 10:56:12] [Rank 0] step:6201/10000 train_time:268916ms step_avg:43.37ms +[2025-09-11 10:56:13] [Rank 0] step:6221/10000 train_time:269608ms step_avg:43.34ms +[2025-09-11 10:56:13] [Rank 0] step:6221/10000 train_time:269608ms step_avg:43.34ms +[2025-09-11 10:56:14] [Rank 0] step:6241/10000 train_time:270299ms step_avg:43.31ms +[2025-09-11 10:56:14] [Rank 0] step:6241/10000 train_time:270299ms step_avg:43.31ms +[2025-09-11 10:56:14] [Rank 0] step:6261/10000 train_time:271285ms step_avg:43.33ms +[2025-09-11 10:56:14] [Rank 0] step:6261/10000 train_time:271285ms step_avg:43.33ms +[2025-09-11 10:56:15] [Rank 0] step:6281/10000 train_time:271975ms step_avg:43.30ms +[2025-09-11 10:56:15] [Rank 0] step:6281/10000 train_time:271975ms step_avg:43.30ms +[2025-09-11 10:56:16] [Rank 0] step:6301/10000 train_time:272664ms step_avg:43.27ms +[2025-09-11 10:56:16] [Rank 0] step:6301/10000 train_time:272664ms step_avg:43.27ms +[2025-09-11 10:56:17] [Rank 0] step:6321/10000 train_time:273356ms step_avg:43.25ms +[2025-09-11 10:56:17] [Rank 0] step:6321/10000 train_time:273356ms step_avg:43.25ms +[2025-09-11 10:56:18] [Rank 0] step:6341/10000 train_time:274321ms step_avg:43.26ms +[2025-09-11 10:56:18] [Rank 0] step:6341/10000 train_time:274321ms step_avg:43.26ms +[2025-09-11 10:56:18] [Rank 0] step:6361/10000 train_time:275012ms step_avg:43.23ms +[2025-09-11 10:56:18] [Rank 0] step:6361/10000 train_time:275012ms step_avg:43.23ms +[2025-09-11 10:56:19] [Rank 0] step:6381/10000 train_time:275701ms step_avg:43.21ms +[2025-09-11 10:56:19] [Rank 0] step:6381/10000 train_time:275701ms step_avg:43.21ms +[2025-09-11 10:56:20] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:56:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:56:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:56:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:56:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:31] [Rank 0] PRINT: step:6400/10000 val_loss:4.3716 total_sharp:3.9699e-04 L1_sharp:3.9990e-03 L2_sharp:2.8784e-03 L3_sharp:6.6232e-03 L4_sharp:4.1648e-03 L5_sharp:8.8989e-03 L6_sharp:1.4773e-02 L7_sharp:1.9775e-02 L8_sharp:4.7894e-02 L9_sharp:4.8590e-02 L10_sharp:6.8525e-02 L11_sharp:1.0175e-01 L12_sharp:8.1418e-01 total_fnorm:2.9625e+01 total_l1_linf:4.5568e+04 total_spectral:1.4875e+01 L1_fnorm:2.2070e-01 L2_fnorm:2.1875e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1582e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1387e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1680e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.2979e-02 L2_l1linf:5.2002e-02 L3_l1linf:5.1025e-02 L4_l1linf:5.2002e-02 L5_l1linf:5.1758e-02 L6_l1linf:5.0537e-02 L7_l1linf:5.1270e-02 L8_l1linf:5.0293e-02 L9_l1linf:5.0537e-02 L10_l1linf:4.9805e-02 L11_l1linf:4.9561e-02 L12_l1linf:5.3223e-02 L1_spectral:2.9390e-03 L2_spectral:2.9133e-03 L3_spectral:2.9191e-03 L4_spectral:2.9117e-03 L5_spectral:2.9022e-03 L6_spectral:2.9104e-03 L7_spectral:2.9187e-03 L8_spectral:2.9032e-03 L9_spectral:2.9172e-03 L10_spectral:2.8970e-03 L11_spectral:2.9072e-03 L12_spectral:2.8992e-03 train_time:276371ms step_avg:43.18ms +[2025-09-11 10:56:31] [Rank 0] PRINT: step:6400/10000 val_loss:4.3716 total_sharp:3.9699e-04 L1_sharp:3.9990e-03 L2_sharp:2.8784e-03 L3_sharp:6.6232e-03 L4_sharp:4.1648e-03 L5_sharp:8.8989e-03 L6_sharp:1.4773e-02 L7_sharp:1.9775e-02 L8_sharp:4.7894e-02 L9_sharp:4.8590e-02 L10_sharp:6.8525e-02 L11_sharp:1.0175e-01 L12_sharp:8.1418e-01 total_fnorm:2.9625e+01 total_l1_linf:4.5568e+04 total_spectral:1.4875e+01 L1_fnorm:2.2070e-01 L2_fnorm:2.1875e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1582e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1387e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1680e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.2979e-02 L2_l1linf:5.2002e-02 L3_l1linf:5.1025e-02 L4_l1linf:5.2002e-02 L5_l1linf:5.1758e-02 L6_l1linf:5.0537e-02 L7_l1linf:5.1270e-02 L8_l1linf:5.0293e-02 L9_l1linf:5.0537e-02 L10_l1linf:4.9805e-02 L11_l1linf:4.9561e-02 L12_l1linf:5.3223e-02 L1_spectral:2.9390e-03 L2_spectral:2.9133e-03 L3_spectral:2.9191e-03 L4_spectral:2.9117e-03 L5_spectral:2.9022e-03 L6_spectral:2.9104e-03 L7_spectral:2.9187e-03 L8_spectral:2.9032e-03 L9_spectral:2.9172e-03 L10_spectral:2.8970e-03 L11_spectral:2.9072e-03 L12_spectral:2.8992e-03 train_time:276371ms step_avg:43.18ms +[2025-09-11 10:56:32] [Rank 0] step:6401/10000 train_time:278246ms step_avg:43.47ms +[2025-09-11 10:56:32] [Rank 0] step:6401/10000 train_time:278246ms step_avg:43.47ms +[2025-09-11 10:56:33] [Rank 0] step:6421/10000 train_time:278956ms step_avg:43.44ms +[2025-09-11 10:56:33] [Rank 0] step:6421/10000 train_time:278956ms step_avg:43.44ms +[2025-09-11 10:56:34] [Rank 0] step:6441/10000 train_time:279648ms step_avg:43.42ms +[2025-09-11 10:56:34] [Rank 0] step:6441/10000 train_time:279648ms step_avg:43.42ms +[2025-09-11 10:56:35] [Rank 0] step:6461/10000 train_time:280340ms step_avg:43.39ms +[2025-09-11 10:56:35] [Rank 0] step:6461/10000 train_time:280340ms step_avg:43.39ms +[2025-09-11 10:56:35] [Rank 0] step:6481/10000 train_time:281033ms step_avg:43.36ms +[2025-09-11 10:56:35] [Rank 0] step:6481/10000 train_time:281033ms step_avg:43.36ms +[2025-09-11 10:56:36] [Rank 0] step:6501/10000 train_time:281728ms step_avg:43.34ms +[2025-09-11 10:56:36] [Rank 0] step:6501/10000 train_time:281728ms step_avg:43.34ms +[2025-09-11 10:56:37] [Rank 0] step:6521/10000 train_time:282418ms step_avg:43.31ms +[2025-09-11 10:56:37] [Rank 0] step:6521/10000 train_time:282418ms step_avg:43.31ms +[2025-09-11 10:56:37] [Rank 0] step:6541/10000 train_time:283108ms step_avg:43.28ms +[2025-09-11 10:56:37] [Rank 0] step:6541/10000 train_time:283108ms step_avg:43.28ms +[2025-09-11 10:56:38] [Rank 0] step:6561/10000 train_time:283800ms step_avg:43.26ms +[2025-09-11 10:56:38] [Rank 0] step:6561/10000 train_time:283800ms step_avg:43.26ms +[2025-09-11 10:56:39] [Rank 0] step:6581/10000 train_time:284491ms step_avg:43.23ms +[2025-09-11 10:56:39] [Rank 0] step:6581/10000 train_time:284491ms step_avg:43.23ms +[2025-09-11 10:56:39] [Rank 0] step:6601/10000 train_time:285183ms step_avg:43.20ms +[2025-09-11 10:56:39] [Rank 0] step:6601/10000 train_time:285183ms step_avg:43.20ms +[2025-09-11 10:56:40] [Rank 0] step:6621/10000 train_time:285874ms step_avg:43.18ms +[2025-09-11 10:56:40] [Rank 0] step:6621/10000 train_time:285874ms step_avg:43.18ms +[2025-09-11 10:56:41] [Rank 0] step:6641/10000 train_time:286567ms step_avg:43.15ms +[2025-09-11 10:56:41] [Rank 0] step:6641/10000 train_time:286567ms step_avg:43.15ms +[2025-09-11 10:56:41] [Rank 0] step:6661/10000 train_time:287259ms step_avg:43.13ms +[2025-09-11 10:56:41] [Rank 0] step:6661/10000 train_time:287259ms step_avg:43.13ms +[2025-09-11 10:56:42] [Rank 0] step:6681/10000 train_time:287957ms step_avg:43.10ms +[2025-09-11 10:56:42] [Rank 0] step:6681/10000 train_time:287957ms step_avg:43.10ms +[2025-09-11 10:56:43] [Rank 0] step:6701/10000 train_time:288655ms step_avg:43.08ms +[2025-09-11 10:56:43] [Rank 0] step:6701/10000 train_time:288655ms step_avg:43.08ms +[2025-09-11 10:56:44] [Rank 0] step:6721/10000 train_time:289355ms step_avg:43.05ms +[2025-09-11 10:56:44] [Rank 0] step:6721/10000 train_time:289355ms step_avg:43.05ms +[2025-09-11 10:56:44] [Rank 0] step:6741/10000 train_time:290053ms step_avg:43.03ms +[2025-09-11 10:56:44] [Rank 0] step:6741/10000 train_time:290053ms step_avg:43.03ms +[2025-09-11 10:56:45] [Rank 0] step:6761/10000 train_time:290751ms step_avg:43.00ms +[2025-09-11 10:56:45] [Rank 0] step:6761/10000 train_time:290751ms step_avg:43.00ms +[2025-09-11 10:56:46] [Rank 0] step:6781/10000 train_time:291449ms step_avg:42.98ms +[2025-09-11 10:56:46] [Rank 0] step:6781/10000 train_time:291449ms step_avg:42.98ms +[2025-09-11 10:56:46] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:56:46] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:56:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:56:57] [Rank 0] PRINT: step:6800/10000 val_loss:4.3452 total_sharp:2.6246e-04 L1_sharp:3.8050e-03 L2_sharp:3.7815e-03 L3_sharp:4.5333e-03 L4_sharp:5.7441e-03 L5_sharp:1.1962e-02 L6_sharp:1.7659e-02 L7_sharp:2.0696e-02 L8_sharp:4.6327e-02 L9_sharp:4.4610e-02 L10_sharp:5.9949e-02 L11_sharp:8.8605e-02 L12_sharp:5.7162e-01 total_fnorm:2.8250e+01 total_l1_linf:4.2752e+04 total_spectral:1.4125e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8945e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8652e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8750e-01 L12_fnorm:1.8652e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2236e-02 L4_l1linf:4.2969e-02 L5_l1linf:4.3457e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1260e-02 L10_l1linf:4.0283e-02 L11_l1linf:4.0039e-02 L12_l1linf:4.3457e-02 L1_spectral:2.5991e-03 L2_spectral:2.5949e-03 L3_spectral:2.5951e-03 L4_spectral:2.5952e-03 L5_spectral:2.6004e-03 L6_spectral:2.5907e-03 L7_spectral:2.5939e-03 L8_spectral:2.5833e-03 L9_spectral:2.6048e-03 L10_spectral:2.6193e-03 L11_spectral:2.6072e-03 L12_spectral:2.5896e-03 train_time:292126ms step_avg:42.96ms +[2025-09-11 10:56:57] [Rank 0] PRINT: step:6800/10000 val_loss:4.3452 total_sharp:2.6246e-04 L1_sharp:3.8050e-03 L2_sharp:3.7815e-03 L3_sharp:4.5333e-03 L4_sharp:5.7441e-03 L5_sharp:1.1962e-02 L6_sharp:1.7659e-02 L7_sharp:2.0696e-02 L8_sharp:4.6327e-02 L9_sharp:4.4610e-02 L10_sharp:5.9949e-02 L11_sharp:8.8605e-02 L12_sharp:5.7162e-01 total_fnorm:2.8250e+01 total_l1_linf:4.2752e+04 total_spectral:1.4125e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8945e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8652e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8750e-01 L12_fnorm:1.8652e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2236e-02 L4_l1linf:4.2969e-02 L5_l1linf:4.3457e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.1504e-02 L9_l1linf:4.1260e-02 L10_l1linf:4.0283e-02 L11_l1linf:4.0039e-02 L12_l1linf:4.3457e-02 L1_spectral:2.5991e-03 L2_spectral:2.5949e-03 L3_spectral:2.5951e-03 L4_spectral:2.5952e-03 L5_spectral:2.6004e-03 L6_spectral:2.5907e-03 L7_spectral:2.5939e-03 L8_spectral:2.5833e-03 L9_spectral:2.6048e-03 L10_spectral:2.6193e-03 L11_spectral:2.6072e-03 L12_spectral:2.5896e-03 train_time:292126ms step_avg:42.96ms +[2025-09-11 10:56:59] [Rank 0] step:6801/10000 train_time:294002ms step_avg:43.23ms +[2025-09-11 10:56:59] [Rank 0] step:6801/10000 train_time:294002ms step_avg:43.23ms +[2025-09-11 10:57:00] [Rank 0] step:6821/10000 train_time:294716ms step_avg:43.21ms +[2025-09-11 10:57:00] [Rank 0] step:6821/10000 train_time:294716ms step_avg:43.21ms +[2025-09-11 10:57:01] [Rank 0] step:6841/10000 train_time:295418ms step_avg:43.18ms +[2025-09-11 10:57:01] [Rank 0] step:6841/10000 train_time:295418ms step_avg:43.18ms +[2025-09-11 10:57:01] [Rank 0] step:6861/10000 train_time:296117ms step_avg:43.16ms +[2025-09-11 10:57:01] [Rank 0] step:6861/10000 train_time:296117ms step_avg:43.16ms +[2025-09-11 10:57:02] [Rank 0] step:6881/10000 train_time:296819ms step_avg:43.14ms +[2025-09-11 10:57:02] [Rank 0] step:6881/10000 train_time:296819ms step_avg:43.14ms +[2025-09-11 10:57:03] [Rank 0] step:6901/10000 train_time:297517ms step_avg:43.11ms +[2025-09-11 10:57:03] [Rank 0] step:6901/10000 train_time:297517ms step_avg:43.11ms +[2025-09-11 10:57:03] [Rank 0] step:6921/10000 train_time:298216ms step_avg:43.09ms +[2025-09-11 10:57:03] [Rank 0] step:6921/10000 train_time:298216ms step_avg:43.09ms +[2025-09-11 10:57:04] [Rank 0] step:6941/10000 train_time:298915ms step_avg:43.07ms +[2025-09-11 10:57:04] [Rank 0] step:6941/10000 train_time:298915ms step_avg:43.07ms +[2025-09-11 10:57:05] [Rank 0] step:6961/10000 train_time:299616ms step_avg:43.04ms +[2025-09-11 10:57:05] [Rank 0] step:6961/10000 train_time:299616ms step_avg:43.04ms +[2025-09-11 10:57:06] [Rank 0] step:6981/10000 train_time:300317ms step_avg:43.02ms +[2025-09-11 10:57:06] [Rank 0] step:6981/10000 train_time:300317ms step_avg:43.02ms +[2025-09-11 10:57:06] [Rank 0] step:7001/10000 train_time:301042ms step_avg:43.00ms +[2025-09-11 10:57:06] [Rank 0] step:7001/10000 train_time:301042ms step_avg:43.00ms +[2025-09-11 10:57:07] [Rank 0] step:7021/10000 train_time:301744ms step_avg:42.98ms +[2025-09-11 10:57:07] [Rank 0] step:7021/10000 train_time:301744ms step_avg:42.98ms +[2025-09-11 10:57:08] [Rank 0] step:7041/10000 train_time:302442ms step_avg:42.95ms +[2025-09-11 10:57:08] [Rank 0] step:7041/10000 train_time:302442ms step_avg:42.95ms +[2025-09-11 10:57:08] [Rank 0] step:7061/10000 train_time:303141ms step_avg:42.93ms +[2025-09-11 10:57:08] [Rank 0] step:7061/10000 train_time:303141ms step_avg:42.93ms +[2025-09-11 10:57:09] [Rank 0] step:7081/10000 train_time:303840ms step_avg:42.91ms +[2025-09-11 10:57:09] [Rank 0] step:7081/10000 train_time:303840ms step_avg:42.91ms +[2025-09-11 10:57:10] [Rank 0] step:7101/10000 train_time:304539ms step_avg:42.89ms +[2025-09-11 10:57:10] [Rank 0] step:7101/10000 train_time:304539ms step_avg:42.89ms +[2025-09-11 10:57:10] [Rank 0] step:7121/10000 train_time:305239ms step_avg:42.86ms +[2025-09-11 10:57:10] [Rank 0] step:7121/10000 train_time:305239ms step_avg:42.86ms +[2025-09-11 10:57:11] [Rank 0] step:7141/10000 train_time:305938ms step_avg:42.84ms +[2025-09-11 10:57:11] [Rank 0] step:7141/10000 train_time:305938ms step_avg:42.84ms +[2025-09-11 10:57:12] [Rank 0] step:7161/10000 train_time:306638ms step_avg:42.82ms +[2025-09-11 10:57:12] [Rank 0] step:7161/10000 train_time:306638ms step_avg:42.82ms +[2025-09-11 10:57:13] [Rank 0] step:7181/10000 train_time:307337ms step_avg:42.80ms +[2025-09-11 10:57:13] [Rank 0] step:7181/10000 train_time:307337ms step_avg:42.80ms +[2025-09-11 10:57:13] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:57:13] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:57:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:24] [Rank 0] PRINT: step:7200/10000 val_loss:4.3146 total_sharp:2.2376e-04 L1_sharp:4.5533e-03 L2_sharp:3.0897e-03 L3_sharp:4.7421e-03 L4_sharp:3.0884e-03 L5_sharp:7.3046e-03 L6_sharp:1.5174e-02 L7_sharp:1.9137e-02 L8_sharp:4.1350e-02 L9_sharp:3.9432e-02 L10_sharp:5.6221e-02 L11_sharp:8.3732e-02 L12_sharp:3.7644e-01 total_fnorm:2.4500e+01 total_l1_linf:3.5328e+04 total_spectral:1.2312e+01 L1_fnorm:1.6602e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.6016e-01 L1_l1linf:3.6133e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.5889e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.3936e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.2959e-02 L12_l1linf:3.6133e-02 L1_spectral:2.3204e-03 L2_spectral:2.3209e-03 L3_spectral:2.3124e-03 L4_spectral:2.3239e-03 L5_spectral:2.3027e-03 L6_spectral:2.3122e-03 L7_spectral:2.2978e-03 L8_spectral:2.2787e-03 L9_spectral:2.2988e-03 L10_spectral:2.2860e-03 L11_spectral:2.2879e-03 L12_spectral:2.3107e-03 train_time:308016ms step_avg:42.78ms +[2025-09-11 10:57:24] [Rank 0] PRINT: step:7200/10000 val_loss:4.3146 total_sharp:2.2376e-04 L1_sharp:4.5533e-03 L2_sharp:3.0897e-03 L3_sharp:4.7421e-03 L4_sharp:3.0884e-03 L5_sharp:7.3046e-03 L6_sharp:1.5174e-02 L7_sharp:1.9137e-02 L8_sharp:4.1350e-02 L9_sharp:3.9432e-02 L10_sharp:5.6221e-02 L11_sharp:8.3732e-02 L12_sharp:3.7644e-01 total_fnorm:2.4500e+01 total_l1_linf:3.5328e+04 total_spectral:1.2312e+01 L1_fnorm:1.6602e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.6016e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.6016e-01 L1_l1linf:3.6133e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.5889e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.5889e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.3936e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.2959e-02 L12_l1linf:3.6133e-02 L1_spectral:2.3204e-03 L2_spectral:2.3209e-03 L3_spectral:2.3124e-03 L4_spectral:2.3239e-03 L5_spectral:2.3027e-03 L6_spectral:2.3122e-03 L7_spectral:2.2978e-03 L8_spectral:2.2787e-03 L9_spectral:2.2988e-03 L10_spectral:2.2860e-03 L11_spectral:2.2879e-03 L12_spectral:2.3107e-03 train_time:308016ms step_avg:42.78ms +[2025-09-11 10:57:26] [Rank 0] step:7201/10000 train_time:309915ms step_avg:43.04ms +[2025-09-11 10:57:26] [Rank 0] step:7201/10000 train_time:309915ms step_avg:43.04ms +[2025-09-11 10:57:27] [Rank 0] step:7221/10000 train_time:310643ms step_avg:43.02ms +[2025-09-11 10:57:27] [Rank 0] step:7221/10000 train_time:310643ms step_avg:43.02ms +[2025-09-11 10:57:27] [Rank 0] step:7241/10000 train_time:311343ms step_avg:43.00ms +[2025-09-11 10:57:27] [Rank 0] step:7241/10000 train_time:311343ms step_avg:43.00ms +[2025-09-11 10:57:28] [Rank 0] step:7261/10000 train_time:312044ms step_avg:42.98ms +[2025-09-11 10:57:28] [Rank 0] step:7261/10000 train_time:312044ms step_avg:42.98ms +[2025-09-11 10:57:29] [Rank 0] step:7281/10000 train_time:312749ms step_avg:42.95ms +[2025-09-11 10:57:29] [Rank 0] step:7281/10000 train_time:312749ms step_avg:42.95ms +[2025-09-11 10:57:30] [Rank 0] step:7301/10000 train_time:313449ms step_avg:42.93ms +[2025-09-11 10:57:30] [Rank 0] step:7301/10000 train_time:313449ms step_avg:42.93ms +[2025-09-11 10:57:30] [Rank 0] step:7321/10000 train_time:314148ms step_avg:42.91ms +[2025-09-11 10:57:30] [Rank 0] step:7321/10000 train_time:314148ms step_avg:42.91ms +[2025-09-11 10:57:31] [Rank 0] step:7341/10000 train_time:314849ms step_avg:42.89ms +[2025-09-11 10:57:31] [Rank 0] step:7341/10000 train_time:314849ms step_avg:42.89ms +[2025-09-11 10:57:32] [Rank 0] step:7361/10000 train_time:315547ms step_avg:42.87ms +[2025-09-11 10:57:32] [Rank 0] step:7361/10000 train_time:315547ms step_avg:42.87ms +[2025-09-11 10:57:32] [Rank 0] step:7381/10000 train_time:316248ms step_avg:42.85ms +[2025-09-11 10:57:32] [Rank 0] step:7381/10000 train_time:316248ms step_avg:42.85ms +[2025-09-11 10:57:33] [Rank 0] step:7401/10000 train_time:316947ms step_avg:42.82ms +[2025-09-11 10:57:33] [Rank 0] step:7401/10000 train_time:316947ms step_avg:42.82ms +[2025-09-11 10:57:34] [Rank 0] step:7421/10000 train_time:317646ms step_avg:42.80ms +[2025-09-11 10:57:34] [Rank 0] step:7421/10000 train_time:317646ms step_avg:42.80ms +[2025-09-11 10:57:34] [Rank 0] step:7441/10000 train_time:318346ms step_avg:42.78ms +[2025-09-11 10:57:34] [Rank 0] step:7441/10000 train_time:318346ms step_avg:42.78ms +[2025-09-11 10:57:35] [Rank 0] step:7461/10000 train_time:319047ms step_avg:42.76ms +[2025-09-11 10:57:35] [Rank 0] step:7461/10000 train_time:319047ms step_avg:42.76ms +[2025-09-11 10:57:36] [Rank 0] step:7481/10000 train_time:319749ms step_avg:42.74ms +[2025-09-11 10:57:36] [Rank 0] step:7481/10000 train_time:319749ms step_avg:42.74ms +[2025-09-11 10:57:37] [Rank 0] step:7501/10000 train_time:320449ms step_avg:42.72ms +[2025-09-11 10:57:37] [Rank 0] step:7501/10000 train_time:320449ms step_avg:42.72ms +[2025-09-11 10:57:37] [Rank 0] step:7521/10000 train_time:321150ms step_avg:42.70ms +[2025-09-11 10:57:37] [Rank 0] step:7521/10000 train_time:321150ms step_avg:42.70ms +[2025-09-11 10:57:38] [Rank 0] step:7541/10000 train_time:321849ms step_avg:42.68ms +[2025-09-11 10:57:38] [Rank 0] step:7541/10000 train_time:321849ms step_avg:42.68ms +[2025-09-11 10:57:39] [Rank 0] step:7561/10000 train_time:322551ms step_avg:42.66ms +[2025-09-11 10:57:39] [Rank 0] step:7561/10000 train_time:322551ms step_avg:42.66ms +[2025-09-11 10:57:39] [Rank 0] step:7581/10000 train_time:323253ms step_avg:42.64ms +[2025-09-11 10:57:39] [Rank 0] step:7581/10000 train_time:323253ms step_avg:42.64ms +[2025-09-11 10:57:40] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:57:40] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:57:51] [Rank 0] PRINT: step:7600/10000 val_loss:4.2911 total_sharp:2.3931e-04 L1_sharp:5.0301e-03 L2_sharp:1.5857e-03 L3_sharp:3.9403e-03 L4_sharp:3.8866e-03 L5_sharp:8.7250e-03 L6_sharp:1.4853e-02 L7_sharp:2.0478e-02 L8_sharp:3.7788e-02 L9_sharp:4.1812e-02 L10_sharp:5.2171e-02 L11_sharp:8.0915e-02 L12_sharp:4.6896e-01 total_fnorm:1.9250e+01 total_l1_linf:2.5216e+04 total_spectral:9.6250e+00 L1_fnorm:1.3965e-01 L2_fnorm:1.3770e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3672e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3477e-01 L9_fnorm:1.3672e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3281e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.8564e-02 L3_l1linf:2.7588e-02 L4_l1linf:2.7954e-02 L5_l1linf:2.7222e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7710e-02 L8_l1linf:2.6978e-02 L9_l1linf:2.6367e-02 L10_l1linf:2.6733e-02 L11_l1linf:2.6367e-02 L12_l1linf:2.8809e-02 L1_spectral:2.0152e-03 L2_spectral:2.0252e-03 L3_spectral:2.0049e-03 L4_spectral:2.0051e-03 L5_spectral:1.9986e-03 L6_spectral:2.0019e-03 L7_spectral:2.0104e-03 L8_spectral:1.9604e-03 L9_spectral:2.0007e-03 L10_spectral:1.9842e-03 L11_spectral:1.9670e-03 L12_spectral:1.9695e-03 train_time:323934ms step_avg:42.62ms +[2025-09-11 10:57:51] [Rank 0] PRINT: step:7600/10000 val_loss:4.2911 total_sharp:2.3931e-04 L1_sharp:5.0301e-03 L2_sharp:1.5857e-03 L3_sharp:3.9403e-03 L4_sharp:3.8866e-03 L5_sharp:8.7250e-03 L6_sharp:1.4853e-02 L7_sharp:2.0478e-02 L8_sharp:3.7788e-02 L9_sharp:4.1812e-02 L10_sharp:5.2171e-02 L11_sharp:8.0915e-02 L12_sharp:4.6896e-01 total_fnorm:1.9250e+01 total_l1_linf:2.5216e+04 total_spectral:9.6250e+00 L1_fnorm:1.3965e-01 L2_fnorm:1.3770e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3672e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3477e-01 L9_fnorm:1.3672e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3281e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.8564e-02 L3_l1linf:2.7588e-02 L4_l1linf:2.7954e-02 L5_l1linf:2.7222e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7710e-02 L8_l1linf:2.6978e-02 L9_l1linf:2.6367e-02 L10_l1linf:2.6733e-02 L11_l1linf:2.6367e-02 L12_l1linf:2.8809e-02 L1_spectral:2.0152e-03 L2_spectral:2.0252e-03 L3_spectral:2.0049e-03 L4_spectral:2.0051e-03 L5_spectral:1.9986e-03 L6_spectral:2.0019e-03 L7_spectral:2.0104e-03 L8_spectral:1.9604e-03 L9_spectral:2.0007e-03 L10_spectral:1.9842e-03 L11_spectral:1.9670e-03 L12_spectral:1.9695e-03 train_time:323934ms step_avg:42.62ms +[2025-09-11 10:57:53] [Rank 0] step:7601/10000 train_time:325847ms step_avg:42.87ms +[2025-09-11 10:57:53] [Rank 0] step:7601/10000 train_time:325847ms step_avg:42.87ms +[2025-09-11 10:57:54] [Rank 0] step:7621/10000 train_time:326579ms step_avg:42.85ms +[2025-09-11 10:57:54] [Rank 0] step:7621/10000 train_time:326579ms step_avg:42.85ms +[2025-09-11 10:57:55] [Rank 0] step:7641/10000 train_time:327280ms step_avg:42.83ms +[2025-09-11 10:57:55] [Rank 0] step:7641/10000 train_time:327280ms step_avg:42.83ms +[2025-09-11 10:57:55] [Rank 0] step:7661/10000 train_time:327980ms step_avg:42.81ms +[2025-09-11 10:57:55] [Rank 0] step:7661/10000 train_time:327980ms step_avg:42.81ms +[2025-09-11 10:57:56] [Rank 0] step:7681/10000 train_time:328681ms step_avg:42.79ms +[2025-09-11 10:57:56] [Rank 0] step:7681/10000 train_time:328681ms step_avg:42.79ms +[2025-09-11 10:57:57] [Rank 0] step:7701/10000 train_time:329381ms step_avg:42.77ms +[2025-09-11 10:57:57] [Rank 0] step:7701/10000 train_time:329381ms step_avg:42.77ms +[2025-09-11 10:57:57] [Rank 0] step:7721/10000 train_time:330082ms step_avg:42.75ms +[2025-09-11 10:57:57] [Rank 0] step:7721/10000 train_time:330082ms step_avg:42.75ms +[2025-09-11 10:57:58] [Rank 0] step:7741/10000 train_time:330784ms step_avg:42.73ms +[2025-09-11 10:57:58] [Rank 0] step:7741/10000 train_time:330784ms step_avg:42.73ms +[2025-09-11 10:57:59] [Rank 0] step:7761/10000 train_time:331483ms step_avg:42.71ms +[2025-09-11 10:57:59] [Rank 0] step:7761/10000 train_time:331483ms step_avg:42.71ms +[2025-09-11 10:58:00] [Rank 0] step:7781/10000 train_time:332185ms step_avg:42.69ms +[2025-09-11 10:58:00] [Rank 0] step:7781/10000 train_time:332185ms step_avg:42.69ms +[2025-09-11 10:58:00] [Rank 0] step:7801/10000 train_time:332884ms step_avg:42.67ms +[2025-09-11 10:58:00] [Rank 0] step:7801/10000 train_time:332884ms step_avg:42.67ms +[2025-09-11 10:58:01] [Rank 0] step:7821/10000 train_time:333584ms step_avg:42.65ms +[2025-09-11 10:58:01] [Rank 0] step:7821/10000 train_time:333584ms step_avg:42.65ms +[2025-09-11 10:58:02] [Rank 0] step:7841/10000 train_time:334286ms step_avg:42.63ms +[2025-09-11 10:58:02] [Rank 0] step:7841/10000 train_time:334286ms step_avg:42.63ms +[2025-09-11 10:58:02] [Rank 0] step:7861/10000 train_time:334993ms step_avg:42.61ms +[2025-09-11 10:58:02] [Rank 0] step:7861/10000 train_time:334993ms step_avg:42.61ms +[2025-09-11 10:58:03] [Rank 0] step:7881/10000 train_time:335695ms step_avg:42.60ms +[2025-09-11 10:58:03] [Rank 0] step:7881/10000 train_time:335695ms step_avg:42.60ms +[2025-09-11 10:58:04] [Rank 0] step:7901/10000 train_time:336398ms step_avg:42.58ms +[2025-09-11 10:58:04] [Rank 0] step:7901/10000 train_time:336398ms step_avg:42.58ms +[2025-09-11 10:58:04] [Rank 0] step:7921/10000 train_time:337098ms step_avg:42.56ms +[2025-09-11 10:58:04] [Rank 0] step:7921/10000 train_time:337098ms step_avg:42.56ms +[2025-09-11 10:58:05] [Rank 0] step:7941/10000 train_time:337800ms step_avg:42.54ms +[2025-09-11 10:58:05] [Rank 0] step:7941/10000 train_time:337800ms step_avg:42.54ms +[2025-09-11 10:58:06] [Rank 0] step:7961/10000 train_time:338499ms step_avg:42.52ms +[2025-09-11 10:58:06] [Rank 0] step:7961/10000 train_time:338499ms step_avg:42.52ms +[2025-09-11 10:58:07] [Rank 0] step:7981/10000 train_time:339202ms step_avg:42.50ms +[2025-09-11 10:58:07] [Rank 0] step:7981/10000 train_time:339202ms step_avg:42.50ms +[2025-09-11 10:58:07] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:58:07] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:19] [Rank 0] PRINT: step:8000/10000 val_loss:4.2715 total_sharp:1.9402e-04 L1_sharp:3.0757e-03 L2_sharp:1.4248e-03 L3_sharp:4.7252e-03 L4_sharp:3.6418e-03 L5_sharp:9.2364e-03 L6_sharp:1.5985e-02 L7_sharp:2.0278e-02 L8_sharp:3.5320e-02 L9_sharp:3.7735e-02 L10_sharp:4.9632e-02 L11_sharp:6.8670e-02 L12_sharp:4.1673e-01 total_fnorm:1.6250e+01 total_l1_linf:2.0096e+04 total_spectral:8.1250e+00 L1_fnorm:1.1377e-01 L2_fnorm:1.1279e-01 L3_fnorm:1.1182e-01 L4_fnorm:1.1182e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1133e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0791e-01 L1_l1linf:2.2095e-02 L2_l1linf:2.2461e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1484e-02 L5_l1linf:2.1118e-02 L6_l1linf:2.0752e-02 L7_l1linf:2.0874e-02 L8_l1linf:2.0386e-02 L9_l1linf:1.9531e-02 L10_l1linf:2.0264e-02 L11_l1linf:1.9775e-02 L12_l1linf:2.2827e-02 L1_spectral:1.7269e-03 L2_spectral:1.6962e-03 L3_spectral:1.6902e-03 L4_spectral:1.6990e-03 L5_spectral:1.6924e-03 L6_spectral:1.6920e-03 L7_spectral:1.6796e-03 L8_spectral:1.6336e-03 L9_spectral:1.6685e-03 L10_spectral:1.6591e-03 L11_spectral:1.6341e-03 L12_spectral:1.6601e-03 train_time:339881ms step_avg:42.49ms +[2025-09-11 10:58:19] [Rank 0] PRINT: step:8000/10000 val_loss:4.2715 total_sharp:1.9402e-04 L1_sharp:3.0757e-03 L2_sharp:1.4248e-03 L3_sharp:4.7252e-03 L4_sharp:3.6418e-03 L5_sharp:9.2364e-03 L6_sharp:1.5985e-02 L7_sharp:2.0278e-02 L8_sharp:3.5320e-02 L9_sharp:3.7735e-02 L10_sharp:4.9632e-02 L11_sharp:6.8670e-02 L12_sharp:4.1673e-01 total_fnorm:1.6250e+01 total_l1_linf:2.0096e+04 total_spectral:8.1250e+00 L1_fnorm:1.1377e-01 L2_fnorm:1.1279e-01 L3_fnorm:1.1182e-01 L4_fnorm:1.1182e-01 L5_fnorm:1.1182e-01 L6_fnorm:1.1133e-01 L7_fnorm:1.1133e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1084e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0791e-01 L1_l1linf:2.2095e-02 L2_l1linf:2.2461e-02 L3_l1linf:2.1606e-02 L4_l1linf:2.1484e-02 L5_l1linf:2.1118e-02 L6_l1linf:2.0752e-02 L7_l1linf:2.0874e-02 L8_l1linf:2.0386e-02 L9_l1linf:1.9531e-02 L10_l1linf:2.0264e-02 L11_l1linf:1.9775e-02 L12_l1linf:2.2827e-02 L1_spectral:1.7269e-03 L2_spectral:1.6962e-03 L3_spectral:1.6902e-03 L4_spectral:1.6990e-03 L5_spectral:1.6924e-03 L6_spectral:1.6920e-03 L7_spectral:1.6796e-03 L8_spectral:1.6336e-03 L9_spectral:1.6685e-03 L10_spectral:1.6591e-03 L11_spectral:1.6341e-03 L12_spectral:1.6601e-03 train_time:339881ms step_avg:42.49ms +[2025-09-11 10:58:21] [Rank 0] step:8001/10000 train_time:341969ms step_avg:42.74ms +[2025-09-11 10:58:21] [Rank 0] step:8001/10000 train_time:341969ms step_avg:42.74ms +[2025-09-11 10:58:22] [Rank 0] step:8021/10000 train_time:342698ms step_avg:42.73ms +[2025-09-11 10:58:22] [Rank 0] step:8021/10000 train_time:342698ms step_avg:42.73ms +[2025-09-11 10:58:23] [Rank 0] step:8041/10000 train_time:343400ms step_avg:42.71ms +[2025-09-11 10:58:23] [Rank 0] step:8041/10000 train_time:343400ms step_avg:42.71ms +[2025-09-11 10:58:24] [Rank 0] step:8061/10000 train_time:344401ms step_avg:42.72ms +[2025-09-11 10:58:24] [Rank 0] step:8061/10000 train_time:344401ms step_avg:42.72ms +[2025-09-11 10:58:24] [Rank 0] step:8081/10000 train_time:345101ms step_avg:42.71ms +[2025-09-11 10:58:24] [Rank 0] step:8081/10000 train_time:345101ms step_avg:42.71ms +[2025-09-11 10:58:25] [Rank 0] step:8101/10000 train_time:345801ms step_avg:42.69ms +[2025-09-11 10:58:25] [Rank 0] step:8101/10000 train_time:345801ms step_avg:42.69ms +[2025-09-11 10:58:26] [Rank 0] step:8121/10000 train_time:346506ms step_avg:42.67ms +[2025-09-11 10:58:26] [Rank 0] step:8121/10000 train_time:346506ms step_avg:42.67ms +[2025-09-11 10:58:27] [Rank 0] step:8141/10000 train_time:347939ms step_avg:42.74ms +[2025-09-11 10:58:27] [Rank 0] step:8141/10000 train_time:347939ms step_avg:42.74ms +[2025-09-11 10:58:28] [Rank 0] step:8161/10000 train_time:348645ms step_avg:42.72ms +[2025-09-11 10:58:28] [Rank 0] step:8161/10000 train_time:348645ms step_avg:42.72ms +[2025-09-11 10:58:29] [Rank 0] step:8181/10000 train_time:349357ms step_avg:42.70ms +[2025-09-11 10:58:29] [Rank 0] step:8181/10000 train_time:349357ms step_avg:42.70ms +[2025-09-11 10:58:29] [Rank 0] step:8201/10000 train_time:350067ms step_avg:42.69ms +[2025-09-11 10:58:29] [Rank 0] step:8201/10000 train_time:350067ms step_avg:42.69ms +[2025-09-11 10:58:30] [Rank 0] step:8221/10000 train_time:350775ms step_avg:42.67ms +[2025-09-11 10:58:30] [Rank 0] step:8221/10000 train_time:350775ms step_avg:42.67ms +[2025-09-11 10:58:31] [Rank 0] step:8241/10000 train_time:351491ms step_avg:42.65ms +[2025-09-11 10:58:31] [Rank 0] step:8241/10000 train_time:351491ms step_avg:42.65ms +[2025-09-11 10:58:32] [Rank 0] step:8261/10000 train_time:352198ms step_avg:42.63ms +[2025-09-11 10:58:32] [Rank 0] step:8261/10000 train_time:352198ms step_avg:42.63ms +[2025-09-11 10:58:32] [Rank 0] step:8281/10000 train_time:352903ms step_avg:42.62ms +[2025-09-11 10:58:32] [Rank 0] step:8281/10000 train_time:352903ms step_avg:42.62ms +[2025-09-11 10:58:33] [Rank 0] step:8301/10000 train_time:353612ms step_avg:42.60ms +[2025-09-11 10:58:33] [Rank 0] step:8301/10000 train_time:353612ms step_avg:42.60ms +[2025-09-11 10:58:34] [Rank 0] step:8321/10000 train_time:354319ms step_avg:42.58ms +[2025-09-11 10:58:34] [Rank 0] step:8321/10000 train_time:354319ms step_avg:42.58ms +[2025-09-11 10:58:34] [Rank 0] step:8341/10000 train_time:355033ms step_avg:42.56ms +[2025-09-11 10:58:34] [Rank 0] step:8341/10000 train_time:355033ms step_avg:42.56ms +[2025-09-11 10:58:35] [Rank 0] step:8361/10000 train_time:355736ms step_avg:42.55ms +[2025-09-11 10:58:35] [Rank 0] step:8361/10000 train_time:355736ms step_avg:42.55ms +[2025-09-11 10:58:36] [Rank 0] step:8381/10000 train_time:356447ms step_avg:42.53ms +[2025-09-11 10:58:36] [Rank 0] step:8381/10000 train_time:356447ms step_avg:42.53ms +[2025-09-11 10:58:36] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:58:36] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:58:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:58:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:58:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:58:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:58:48] [Rank 0] PRINT: step:8400/10000 val_loss:4.2557 total_sharp:1.6741e-04 L1_sharp:2.5153e-03 L2_sharp:3.1791e-03 L3_sharp:3.4207e-03 L4_sharp:3.4760e-03 L5_sharp:6.2559e-03 L6_sharp:1.4177e-02 L7_sharp:1.6412e-02 L8_sharp:3.4723e-02 L9_sharp:3.3155e-02 L10_sharp:4.2432e-02 L11_sharp:5.8245e-02 L12_sharp:2.9373e-01 total_fnorm:1.2062e+01 total_l1_linf:1.3376e+04 total_spectral:6.0312e+00 L1_fnorm:8.8867e-02 L2_fnorm:8.8379e-02 L3_fnorm:8.7402e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.7402e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.6426e-02 L11_fnorm:8.5449e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.6357e-02 L2_l1linf:1.5991e-02 L3_l1linf:1.4771e-02 L4_l1linf:1.5076e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.4648e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.4404e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.5015e-02 L1_spectral:1.3741e-03 L2_spectral:1.3798e-03 L3_spectral:1.3684e-03 L4_spectral:1.3843e-03 L5_spectral:1.3533e-03 L6_spectral:1.3662e-03 L7_spectral:1.3512e-03 L8_spectral:1.3033e-03 L9_spectral:1.3345e-03 L10_spectral:1.3279e-03 L11_spectral:1.3002e-03 L12_spectral:1.3400e-03 train_time:357139ms step_avg:42.52ms +[2025-09-11 10:58:48] [Rank 0] PRINT: step:8400/10000 val_loss:4.2557 total_sharp:1.6741e-04 L1_sharp:2.5153e-03 L2_sharp:3.1791e-03 L3_sharp:3.4207e-03 L4_sharp:3.4760e-03 L5_sharp:6.2559e-03 L6_sharp:1.4177e-02 L7_sharp:1.6412e-02 L8_sharp:3.4723e-02 L9_sharp:3.3155e-02 L10_sharp:4.2432e-02 L11_sharp:5.8245e-02 L12_sharp:2.9373e-01 total_fnorm:1.2062e+01 total_l1_linf:1.3376e+04 total_spectral:6.0312e+00 L1_fnorm:8.8867e-02 L2_fnorm:8.8379e-02 L3_fnorm:8.7402e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.7402e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.4961e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.6426e-02 L11_fnorm:8.5449e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.6357e-02 L2_l1linf:1.5991e-02 L3_l1linf:1.4771e-02 L4_l1linf:1.5076e-02 L5_l1linf:1.5015e-02 L6_l1linf:1.5137e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.4648e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.4404e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.5015e-02 L1_spectral:1.3741e-03 L2_spectral:1.3798e-03 L3_spectral:1.3684e-03 L4_spectral:1.3843e-03 L5_spectral:1.3533e-03 L6_spectral:1.3662e-03 L7_spectral:1.3512e-03 L8_spectral:1.3033e-03 L9_spectral:1.3345e-03 L10_spectral:1.3279e-03 L11_spectral:1.3002e-03 L12_spectral:1.3400e-03 train_time:357139ms step_avg:42.52ms +[2025-09-11 10:58:50] [Rank 0] step:8401/10000 train_time:359065ms step_avg:42.74ms +[2025-09-11 10:58:50] [Rank 0] step:8401/10000 train_time:359065ms step_avg:42.74ms +[2025-09-11 10:58:50] [Rank 0] step:8421/10000 train_time:359801ms step_avg:42.73ms +[2025-09-11 10:58:50] [Rank 0] step:8421/10000 train_time:359801ms step_avg:42.73ms +[2025-09-11 10:58:51] [Rank 0] step:8441/10000 train_time:360511ms step_avg:42.71ms +[2025-09-11 10:58:51] [Rank 0] step:8441/10000 train_time:360511ms step_avg:42.71ms +[2025-09-11 10:58:52] [Rank 0] step:8461/10000 train_time:361222ms step_avg:42.69ms +[2025-09-11 10:58:52] [Rank 0] step:8461/10000 train_time:361222ms step_avg:42.69ms +[2025-09-11 10:58:53] [Rank 0] step:8481/10000 train_time:361931ms step_avg:42.68ms +[2025-09-11 10:58:53] [Rank 0] step:8481/10000 train_time:361931ms step_avg:42.68ms +[2025-09-11 10:58:53] [Rank 0] step:8501/10000 train_time:362640ms step_avg:42.66ms +[2025-09-11 10:58:53] [Rank 0] step:8501/10000 train_time:362640ms step_avg:42.66ms +[2025-09-11 10:58:54] [Rank 0] step:8521/10000 train_time:363348ms step_avg:42.64ms +[2025-09-11 10:58:54] [Rank 0] step:8521/10000 train_time:363348ms step_avg:42.64ms +[2025-09-11 10:58:55] [Rank 0] step:8541/10000 train_time:364056ms step_avg:42.62ms +[2025-09-11 10:58:55] [Rank 0] step:8541/10000 train_time:364056ms step_avg:42.62ms +[2025-09-11 10:58:55] [Rank 0] step:8561/10000 train_time:364770ms step_avg:42.61ms +[2025-09-11 10:58:55] [Rank 0] step:8561/10000 train_time:364770ms step_avg:42.61ms +[2025-09-11 10:58:56] [Rank 0] step:8581/10000 train_time:365482ms step_avg:42.59ms +[2025-09-11 10:58:56] [Rank 0] step:8581/10000 train_time:365482ms step_avg:42.59ms +[2025-09-11 10:58:57] [Rank 0] step:8601/10000 train_time:366191ms step_avg:42.58ms +[2025-09-11 10:58:57] [Rank 0] step:8601/10000 train_time:366191ms step_avg:42.58ms +[2025-09-11 10:58:58] [Rank 0] step:8621/10000 train_time:366898ms step_avg:42.56ms +[2025-09-11 10:58:58] [Rank 0] step:8621/10000 train_time:366898ms step_avg:42.56ms +[2025-09-11 10:58:58] [Rank 0] step:8641/10000 train_time:367606ms step_avg:42.54ms +[2025-09-11 10:58:58] [Rank 0] step:8641/10000 train_time:367606ms step_avg:42.54ms +[2025-09-11 10:58:59] [Rank 0] step:8661/10000 train_time:368315ms step_avg:42.53ms +[2025-09-11 10:58:59] [Rank 0] step:8661/10000 train_time:368315ms step_avg:42.53ms +[2025-09-11 10:59:00] [Rank 0] step:8681/10000 train_time:369026ms step_avg:42.51ms +[2025-09-11 10:59:00] [Rank 0] step:8681/10000 train_time:369026ms step_avg:42.51ms +[2025-09-11 10:59:00] [Rank 0] step:8701/10000 train_time:369734ms step_avg:42.49ms +[2025-09-11 10:59:00] [Rank 0] step:8701/10000 train_time:369734ms step_avg:42.49ms +[2025-09-11 10:59:01] [Rank 0] step:8721/10000 train_time:370445ms step_avg:42.48ms +[2025-09-11 10:59:01] [Rank 0] step:8721/10000 train_time:370445ms step_avg:42.48ms +[2025-09-11 10:59:02] [Rank 0] step:8741/10000 train_time:371150ms step_avg:42.46ms +[2025-09-11 10:59:02] [Rank 0] step:8741/10000 train_time:371150ms step_avg:42.46ms +[2025-09-11 10:59:03] [Rank 0] step:8761/10000 train_time:371862ms step_avg:42.45ms +[2025-09-11 10:59:03] [Rank 0] step:8761/10000 train_time:371862ms step_avg:42.45ms +[2025-09-11 10:59:03] [Rank 0] step:8781/10000 train_time:372569ms step_avg:42.43ms +[2025-09-11 10:59:03] [Rank 0] step:8781/10000 train_time:372569ms step_avg:42.43ms +[2025-09-11 10:59:04] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:59:04] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:59:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:59:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:59:16] [Rank 0] PRINT: step:8800/10000 val_loss:4.2490 total_sharp:1.4015e-04 L1_sharp:3.4543e-04 L2_sharp:-6.0413e-05 L3_sharp:5.7108e-04 L4_sharp:2.0321e-03 L5_sharp:6.5658e-03 L6_sharp:1.0563e-02 L7_sharp:1.4990e-02 L8_sharp:2.8535e-02 L9_sharp:2.6082e-02 L10_sharp:3.7457e-02 L11_sharp:5.5568e-02 L12_sharp:2.4150e-01 total_fnorm:8.8125e+00 total_l1_linf:8.7680e+03 total_spectral:4.4375e+00 L1_fnorm:6.4453e-02 L2_fnorm:6.3477e-02 L3_fnorm:6.2988e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2500e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1279e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.1279e-02 L12_fnorm:6.0059e-02 L1_l1linf:1.0559e-02 L2_l1linf:1.0559e-02 L3_l1linf:9.7046e-03 L4_l1linf:1.0193e-02 L5_l1linf:9.8267e-03 L6_l1linf:9.8877e-03 L7_l1linf:9.8267e-03 L8_l1linf:9.7656e-03 L9_l1linf:9.5215e-03 L10_l1linf:9.3384e-03 L11_l1linf:9.1553e-03 L12_l1linf:1.0315e-02 L1_spectral:1.0305e-03 L2_spectral:1.0168e-03 L3_spectral:1.0081e-03 L4_spectral:1.0184e-03 L5_spectral:1.0164e-03 L6_spectral:1.0077e-03 L7_spectral:1.0114e-03 L8_spectral:9.6656e-04 L9_spectral:9.8698e-04 L10_spectral:9.7545e-04 L11_spectral:9.5365e-04 L12_spectral:9.9399e-04 train_time:373255ms step_avg:42.42ms +[2025-09-11 10:59:16] [Rank 0] PRINT: step:8800/10000 val_loss:4.2490 total_sharp:1.4015e-04 L1_sharp:3.4543e-04 L2_sharp:-6.0413e-05 L3_sharp:5.7108e-04 L4_sharp:2.0321e-03 L5_sharp:6.5658e-03 L6_sharp:1.0563e-02 L7_sharp:1.4990e-02 L8_sharp:2.8535e-02 L9_sharp:2.6082e-02 L10_sharp:3.7457e-02 L11_sharp:5.5568e-02 L12_sharp:2.4150e-01 total_fnorm:8.8125e+00 total_l1_linf:8.7680e+03 total_spectral:4.4375e+00 L1_fnorm:6.4453e-02 L2_fnorm:6.3477e-02 L3_fnorm:6.2988e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2500e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.1279e-02 L9_fnorm:6.2256e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.1279e-02 L12_fnorm:6.0059e-02 L1_l1linf:1.0559e-02 L2_l1linf:1.0559e-02 L3_l1linf:9.7046e-03 L4_l1linf:1.0193e-02 L5_l1linf:9.8267e-03 L6_l1linf:9.8877e-03 L7_l1linf:9.8267e-03 L8_l1linf:9.7656e-03 L9_l1linf:9.5215e-03 L10_l1linf:9.3384e-03 L11_l1linf:9.1553e-03 L12_l1linf:1.0315e-02 L1_spectral:1.0305e-03 L2_spectral:1.0168e-03 L3_spectral:1.0081e-03 L4_spectral:1.0184e-03 L5_spectral:1.0164e-03 L6_spectral:1.0077e-03 L7_spectral:1.0114e-03 L8_spectral:9.6656e-04 L9_spectral:9.8698e-04 L10_spectral:9.7545e-04 L11_spectral:9.5365e-04 L12_spectral:9.9399e-04 train_time:373255ms step_avg:42.42ms +[2025-09-11 10:59:19] [Rank 0] step:8801/10000 train_time:375517ms step_avg:42.67ms +[2025-09-11 10:59:19] [Rank 0] step:8801/10000 train_time:375517ms step_avg:42.67ms +[2025-09-11 10:59:19] [Rank 0] step:8821/10000 train_time:376242ms step_avg:42.65ms +[2025-09-11 10:59:19] [Rank 0] step:8821/10000 train_time:376242ms step_avg:42.65ms +[2025-09-11 10:59:20] [Rank 0] step:8841/10000 train_time:376952ms step_avg:42.64ms +[2025-09-11 10:59:20] [Rank 0] step:8841/10000 train_time:376952ms step_avg:42.64ms +[2025-09-11 10:59:21] [Rank 0] step:8861/10000 train_time:377661ms step_avg:42.62ms +[2025-09-11 10:59:21] [Rank 0] step:8861/10000 train_time:377661ms step_avg:42.62ms +[2025-09-11 10:59:22] [Rank 0] step:8881/10000 train_time:378370ms step_avg:42.60ms +[2025-09-11 10:59:22] [Rank 0] step:8881/10000 train_time:378370ms step_avg:42.60ms +[2025-09-11 10:59:22] [Rank 0] step:8901/10000 train_time:379083ms step_avg:42.59ms +[2025-09-11 10:59:22] [Rank 0] step:8901/10000 train_time:379083ms step_avg:42.59ms +[2025-09-11 10:59:23] [Rank 0] step:8921/10000 train_time:379790ms step_avg:42.57ms +[2025-09-11 10:59:23] [Rank 0] step:8921/10000 train_time:379790ms step_avg:42.57ms +[2025-09-11 10:59:24] [Rank 0] step:8941/10000 train_time:380642ms step_avg:42.57ms +[2025-09-11 10:59:24] [Rank 0] step:8941/10000 train_time:380642ms step_avg:42.57ms +[2025-09-11 10:59:25] [Rank 0] step:8961/10000 train_time:381483ms step_avg:42.57ms +[2025-09-11 10:59:25] [Rank 0] step:8961/10000 train_time:381483ms step_avg:42.57ms +[2025-09-11 10:59:25] [Rank 0] step:8981/10000 train_time:382196ms step_avg:42.56ms +[2025-09-11 10:59:25] [Rank 0] step:8981/10000 train_time:382196ms step_avg:42.56ms +[2025-09-11 10:59:26] [Rank 0] step:9001/10000 train_time:382901ms step_avg:42.54ms +[2025-09-11 10:59:26] [Rank 0] step:9001/10000 train_time:382901ms step_avg:42.54ms +[2025-09-11 10:59:27] [Rank 0] step:9021/10000 train_time:383875ms step_avg:42.55ms +[2025-09-11 10:59:27] [Rank 0] step:9021/10000 train_time:383875ms step_avg:42.55ms +[2025-09-11 10:59:28] [Rank 0] step:9041/10000 train_time:384586ms step_avg:42.54ms +[2025-09-11 10:59:28] [Rank 0] step:9041/10000 train_time:384586ms step_avg:42.54ms +[2025-09-11 10:59:29] [Rank 0] step:9061/10000 train_time:385295ms step_avg:42.52ms +[2025-09-11 10:59:29] [Rank 0] step:9061/10000 train_time:385295ms step_avg:42.52ms +[2025-09-11 10:59:29] [Rank 0] step:9081/10000 train_time:386008ms step_avg:42.51ms +[2025-09-11 10:59:29] [Rank 0] step:9081/10000 train_time:386008ms step_avg:42.51ms +[2025-09-11 10:59:30] [Rank 0] step:9101/10000 train_time:386722ms step_avg:42.49ms +[2025-09-11 10:59:30] [Rank 0] step:9101/10000 train_time:386722ms step_avg:42.49ms +[2025-09-11 10:59:31] [Rank 0] step:9121/10000 train_time:387435ms step_avg:42.48ms +[2025-09-11 10:59:31] [Rank 0] step:9121/10000 train_time:387435ms step_avg:42.48ms +[2025-09-11 10:59:31] [Rank 0] step:9141/10000 train_time:388144ms step_avg:42.46ms +[2025-09-11 10:59:31] [Rank 0] step:9141/10000 train_time:388144ms step_avg:42.46ms +[2025-09-11 10:59:32] [Rank 0] step:9161/10000 train_time:388856ms step_avg:42.45ms +[2025-09-11 10:59:32] [Rank 0] step:9161/10000 train_time:388856ms step_avg:42.45ms +[2025-09-11 10:59:33] [Rank 0] step:9181/10000 train_time:389568ms step_avg:42.43ms +[2025-09-11 10:59:33] [Rank 0] step:9181/10000 train_time:389568ms step_avg:42.43ms +[2025-09-11 10:59:33] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:59:33] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:59:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:59:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:59:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:59:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.2357 total_sharp:1.3582e-04 L1_sharp:1.4529e-03 L2_sharp:1.7456e-03 L3_sharp:3.0544e-03 L4_sharp:2.0570e-03 L5_sharp:7.9198e-03 L6_sharp:1.1641e-02 L7_sharp:1.2137e-02 L8_sharp:2.5996e-02 L9_sharp:2.0580e-02 L10_sharp:3.2642e-02 L11_sharp:4.8771e-02 L12_sharp:3.4271e-01 total_fnorm:5.9688e+00 total_l1_linf:5.0240e+03 total_spectral:2.9688e+00 L1_fnorm:4.2969e-02 L2_fnorm:4.2480e-02 L3_fnorm:4.1992e-02 L4_fnorm:4.1992e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1748e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.1260e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1504e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0039e-02 L1_l1linf:6.3782e-03 L2_l1linf:6.3171e-03 L3_l1linf:5.8289e-03 L4_l1linf:6.1951e-03 L5_l1linf:5.8289e-03 L6_l1linf:5.7373e-03 L7_l1linf:5.7068e-03 L8_l1linf:6.1951e-03 L9_l1linf:5.5542e-03 L10_l1linf:5.4626e-03 L11_l1linf:5.5237e-03 L12_l1linf:5.7373e-03 L1_spectral:7.1372e-04 L2_spectral:6.9707e-04 L3_spectral:7.0577e-04 L4_spectral:6.9111e-04 L5_spectral:6.9127e-04 L6_spectral:6.8897e-04 L7_spectral:6.8445e-04 L8_spectral:6.6205e-04 L9_spectral:6.7292e-04 L10_spectral:6.7029e-04 L11_spectral:6.4809e-04 L12_spectral:6.7942e-04 train_time:390261ms step_avg:42.42ms +[2025-09-11 10:59:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.2357 total_sharp:1.3582e-04 L1_sharp:1.4529e-03 L2_sharp:1.7456e-03 L3_sharp:3.0544e-03 L4_sharp:2.0570e-03 L5_sharp:7.9198e-03 L6_sharp:1.1641e-02 L7_sharp:1.2137e-02 L8_sharp:2.5996e-02 L9_sharp:2.0580e-02 L10_sharp:3.2642e-02 L11_sharp:4.8771e-02 L12_sharp:3.4271e-01 total_fnorm:5.9688e+00 total_l1_linf:5.0240e+03 total_spectral:2.9688e+00 L1_fnorm:4.2969e-02 L2_fnorm:4.2480e-02 L3_fnorm:4.1992e-02 L4_fnorm:4.1992e-02 L5_fnorm:4.1992e-02 L6_fnorm:4.1748e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.1260e-02 L9_fnorm:4.1504e-02 L10_fnorm:4.1504e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0039e-02 L1_l1linf:6.3782e-03 L2_l1linf:6.3171e-03 L3_l1linf:5.8289e-03 L4_l1linf:6.1951e-03 L5_l1linf:5.8289e-03 L6_l1linf:5.7373e-03 L7_l1linf:5.7068e-03 L8_l1linf:6.1951e-03 L9_l1linf:5.5542e-03 L10_l1linf:5.4626e-03 L11_l1linf:5.5237e-03 L12_l1linf:5.7373e-03 L1_spectral:7.1372e-04 L2_spectral:6.9707e-04 L3_spectral:7.0577e-04 L4_spectral:6.9111e-04 L5_spectral:6.9127e-04 L6_spectral:6.8897e-04 L7_spectral:6.8445e-04 L8_spectral:6.6205e-04 L9_spectral:6.7292e-04 L10_spectral:6.7029e-04 L11_spectral:6.4809e-04 L12_spectral:6.7942e-04 train_time:390261ms step_avg:42.42ms +[2025-09-11 10:59:47] [Rank 0] step:9201/10000 train_time:392531ms step_avg:42.66ms +[2025-09-11 10:59:47] [Rank 0] step:9201/10000 train_time:392531ms step_avg:42.66ms +[2025-09-11 10:59:48] [Rank 0] step:9221/10000 train_time:393271ms step_avg:42.65ms +[2025-09-11 10:59:48] [Rank 0] step:9221/10000 train_time:393271ms step_avg:42.65ms +[2025-09-11 10:59:48] [Rank 0] step:9241/10000 train_time:393979ms step_avg:42.63ms +[2025-09-11 10:59:48] [Rank 0] step:9241/10000 train_time:393979ms step_avg:42.63ms +[2025-09-11 10:59:49] [Rank 0] step:9261/10000 train_time:394691ms step_avg:42.62ms +[2025-09-11 10:59:49] [Rank 0] step:9261/10000 train_time:394691ms step_avg:42.62ms +[2025-09-11 10:59:50] [Rank 0] step:9281/10000 train_time:395401ms step_avg:42.60ms +[2025-09-11 10:59:50] [Rank 0] step:9281/10000 train_time:395401ms step_avg:42.60ms +[2025-09-11 10:59:50] [Rank 0] step:9301/10000 train_time:396109ms step_avg:42.59ms +[2025-09-11 10:59:50] [Rank 0] step:9301/10000 train_time:396109ms step_avg:42.59ms +[2025-09-11 10:59:51] [Rank 0] step:9321/10000 train_time:396819ms step_avg:42.57ms +[2025-09-11 10:59:51] [Rank 0] step:9321/10000 train_time:396819ms step_avg:42.57ms +[2025-09-11 10:59:52] [Rank 0] step:9341/10000 train_time:397526ms step_avg:42.56ms +[2025-09-11 10:59:52] [Rank 0] step:9341/10000 train_time:397526ms step_avg:42.56ms +[2025-09-11 10:59:52] [Rank 0] step:9361/10000 train_time:398236ms step_avg:42.54ms +[2025-09-11 10:59:52] [Rank 0] step:9361/10000 train_time:398236ms step_avg:42.54ms +[2025-09-11 10:59:53] [Rank 0] step:9381/10000 train_time:398944ms step_avg:42.53ms +[2025-09-11 10:59:53] [Rank 0] step:9381/10000 train_time:398944ms step_avg:42.53ms +[2025-09-11 10:59:54] [Rank 0] step:9401/10000 train_time:399656ms step_avg:42.51ms +[2025-09-11 10:59:54] [Rank 0] step:9401/10000 train_time:399656ms step_avg:42.51ms +[2025-09-11 10:59:55] [Rank 0] step:9421/10000 train_time:400368ms step_avg:42.50ms +[2025-09-11 10:59:55] [Rank 0] step:9421/10000 train_time:400368ms step_avg:42.50ms +[2025-09-11 10:59:55] [Rank 0] step:9441/10000 train_time:401080ms step_avg:42.48ms +[2025-09-11 10:59:55] [Rank 0] step:9441/10000 train_time:401080ms step_avg:42.48ms +[2025-09-11 10:59:56] [Rank 0] step:9461/10000 train_time:401789ms step_avg:42.47ms +[2025-09-11 10:59:56] [Rank 0] step:9461/10000 train_time:401789ms step_avg:42.47ms +[2025-09-11 10:59:57] [Rank 0] step:9481/10000 train_time:402500ms step_avg:42.45ms +[2025-09-11 10:59:57] [Rank 0] step:9481/10000 train_time:402500ms step_avg:42.45ms +[2025-09-11 10:59:57] [Rank 0] step:9501/10000 train_time:403212ms step_avg:42.44ms +[2025-09-11 10:59:57] [Rank 0] step:9501/10000 train_time:403212ms step_avg:42.44ms +[2025-09-11 10:59:58] [Rank 0] step:9521/10000 train_time:403926ms step_avg:42.42ms +[2025-09-11 10:59:58] [Rank 0] step:9521/10000 train_time:403926ms step_avg:42.42ms +[2025-09-11 10:59:59] [Rank 0] step:9541/10000 train_time:404633ms step_avg:42.41ms +[2025-09-11 10:59:59] [Rank 0] step:9541/10000 train_time:404633ms step_avg:42.41ms +[2025-09-11 11:00:00] [Rank 0] step:9561/10000 train_time:405343ms step_avg:42.40ms +[2025-09-11 11:00:00] [Rank 0] step:9561/10000 train_time:405343ms step_avg:42.40ms +[2025-09-11 11:00:00] [Rank 0] step:9581/10000 train_time:406054ms step_avg:42.38ms +[2025-09-11 11:00:00] [Rank 0] step:9581/10000 train_time:406054ms step_avg:42.38ms +[2025-09-11 11:00:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:00:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 11:00:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:00:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:00:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 11:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 11:00:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:00:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 11:00:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:00:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 11:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 11:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 11:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.2294 total_sharp:8.5031e-05 L1_sharp:2.6224e-03 L2_sharp:5.1631e-04 L3_sharp:1.9174e-03 L4_sharp:1.3259e-03 L5_sharp:4.3561e-03 L6_sharp:5.6064e-03 L7_sharp:1.0798e-02 L8_sharp:1.8497e-02 L9_sharp:1.9127e-02 L10_sharp:2.1187e-02 L11_sharp:3.3735e-02 L12_sharp:2.8315e-01 total_fnorm:3.3594e+00 total_l1_linf:2.4000e+03 total_spectral:1.6719e+00 L1_fnorm:2.4048e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3315e-02 L5_fnorm:2.3315e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3193e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2949e-02 L12_fnorm:2.2583e-02 L1_l1linf:2.8534e-03 L2_l1linf:2.7771e-03 L3_l1linf:2.7466e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.6550e-03 L6_l1linf:2.7618e-03 L7_l1linf:2.6245e-03 L8_l1linf:3.0518e-03 L9_l1linf:2.5940e-03 L10_l1linf:2.6245e-03 L11_l1linf:2.5787e-03 L12_l1linf:2.9907e-03 L1_spectral:4.0518e-04 L2_spectral:4.0213e-04 L3_spectral:4.0667e-04 L4_spectral:3.9785e-04 L5_spectral:3.9262e-04 L6_spectral:3.9579e-04 L7_spectral:3.9038e-04 L8_spectral:3.8365e-04 L9_spectral:3.8659e-04 L10_spectral:3.8279e-04 L11_spectral:3.7017e-04 L12_spectral:3.9267e-04 train_time:406742ms step_avg:42.37ms +[2025-09-11 11:00:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.2294 total_sharp:8.5031e-05 L1_sharp:2.6224e-03 L2_sharp:5.1631e-04 L3_sharp:1.9174e-03 L4_sharp:1.3259e-03 L5_sharp:4.3561e-03 L6_sharp:5.6064e-03 L7_sharp:1.0798e-02 L8_sharp:1.8497e-02 L9_sharp:1.9127e-02 L10_sharp:2.1187e-02 L11_sharp:3.3735e-02 L12_sharp:2.8315e-01 total_fnorm:3.3594e+00 total_l1_linf:2.4000e+03 total_spectral:1.6719e+00 L1_fnorm:2.4048e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3315e-02 L5_fnorm:2.3315e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3193e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2949e-02 L12_fnorm:2.2583e-02 L1_l1linf:2.8534e-03 L2_l1linf:2.7771e-03 L3_l1linf:2.7466e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.6550e-03 L6_l1linf:2.7618e-03 L7_l1linf:2.6245e-03 L8_l1linf:3.0518e-03 L9_l1linf:2.5940e-03 L10_l1linf:2.6245e-03 L11_l1linf:2.5787e-03 L12_l1linf:2.9907e-03 L1_spectral:4.0518e-04 L2_spectral:4.0213e-04 L3_spectral:4.0667e-04 L4_spectral:3.9785e-04 L5_spectral:3.9262e-04 L6_spectral:3.9579e-04 L7_spectral:3.9038e-04 L8_spectral:3.8365e-04 L9_spectral:3.8659e-04 L10_spectral:3.8279e-04 L11_spectral:3.7017e-04 L12_spectral:3.9267e-04 train_time:406742ms step_avg:42.37ms +[2025-09-11 11:00:18] [Rank 0] step:9601/10000 train_time:409099ms step_avg:42.61ms +[2025-09-11 11:00:18] [Rank 0] step:9601/10000 train_time:409099ms step_avg:42.61ms +[2025-09-11 11:00:19] [Rank 0] step:9621/10000 train_time:409819ms step_avg:42.60ms +[2025-09-11 11:00:19] [Rank 0] step:9621/10000 train_time:409819ms step_avg:42.60ms +[2025-09-11 11:00:20] [Rank 0] step:9641/10000 train_time:410534ms step_avg:42.58ms +[2025-09-11 11:00:20] [Rank 0] step:9641/10000 train_time:410534ms step_avg:42.58ms +[2025-09-11 11:00:21] [Rank 0] step:9661/10000 train_time:411256ms step_avg:42.57ms +[2025-09-11 11:00:21] [Rank 0] step:9661/10000 train_time:411256ms step_avg:42.57ms +[2025-09-11 11:00:21] [Rank 0] step:9681/10000 train_time:411971ms step_avg:42.55ms +[2025-09-11 11:00:21] [Rank 0] step:9681/10000 train_time:411971ms step_avg:42.55ms +[2025-09-11 11:00:22] [Rank 0] step:9701/10000 train_time:412687ms step_avg:42.54ms +[2025-09-11 11:00:22] [Rank 0] step:9701/10000 train_time:412687ms step_avg:42.54ms +[2025-09-11 11:00:23] [Rank 0] step:9721/10000 train_time:413409ms step_avg:42.53ms +[2025-09-11 11:00:23] [Rank 0] step:9721/10000 train_time:413409ms step_avg:42.53ms +[2025-09-11 11:00:23] [Rank 0] step:9741/10000 train_time:414127ms step_avg:42.51ms +[2025-09-11 11:00:23] [Rank 0] step:9741/10000 train_time:414127ms step_avg:42.51ms +[2025-09-11 11:00:24] [Rank 0] step:9761/10000 train_time:414844ms step_avg:42.50ms +[2025-09-11 11:00:24] [Rank 0] step:9761/10000 train_time:414844ms step_avg:42.50ms +[2025-09-11 11:00:25] [Rank 0] step:9781/10000 train_time:415559ms step_avg:42.49ms +[2025-09-11 11:00:25] [Rank 0] step:9781/10000 train_time:415559ms step_avg:42.49ms +[2025-09-11 11:00:26] [Rank 0] step:9801/10000 train_time:416281ms step_avg:42.47ms +[2025-09-11 11:00:26] [Rank 0] step:9801/10000 train_time:416281ms step_avg:42.47ms +[2025-09-11 11:00:26] [Rank 0] step:9821/10000 train_time:416998ms step_avg:42.46ms +[2025-09-11 11:00:26] [Rank 0] step:9821/10000 train_time:416998ms step_avg:42.46ms +[2025-09-11 11:00:27] [Rank 0] step:9841/10000 train_time:417985ms step_avg:42.47ms +[2025-09-11 11:00:27] [Rank 0] step:9841/10000 train_time:417985ms step_avg:42.47ms +[2025-09-11 11:00:28] [Rank 0] step:9861/10000 train_time:418701ms step_avg:42.46ms +[2025-09-11 11:00:28] [Rank 0] step:9861/10000 train_time:418701ms step_avg:42.46ms +[2025-09-11 11:00:29] [Rank 0] step:9881/10000 train_time:419419ms step_avg:42.45ms +[2025-09-11 11:00:29] [Rank 0] step:9881/10000 train_time:419419ms step_avg:42.45ms +[2025-09-11 11:00:30] [Rank 0] step:9901/10000 train_time:420386ms step_avg:42.46ms +[2025-09-11 11:00:30] [Rank 0] step:9901/10000 train_time:420386ms step_avg:42.46ms +[2025-09-11 11:00:30] [Rank 0] step:9921/10000 train_time:421102ms step_avg:42.45ms +[2025-09-11 11:00:30] [Rank 0] step:9921/10000 train_time:421102ms step_avg:42.45ms +[2025-09-11 11:00:31] [Rank 0] step:9941/10000 train_time:421824ms step_avg:42.43ms +[2025-09-11 11:00:31] [Rank 0] step:9941/10000 train_time:421824ms step_avg:42.43ms +[2025-09-11 11:00:32] [Rank 0] step:9961/10000 train_time:422546ms step_avg:42.42ms +[2025-09-11 11:00:32] [Rank 0] step:9961/10000 train_time:422546ms step_avg:42.42ms +[2025-09-11 11:00:33] [Rank 0] step:9981/10000 train_time:423266ms step_avg:42.41ms +[2025-09-11 11:00:33] [Rank 0] step:9981/10000 train_time:423266ms step_avg:42.41ms +[2025-09-11 11:00:33] [Rank 0] step:10000/10000 train_time:423956ms step_avg:42.40ms +[2025-09-11 11:00:33] [Rank 0] step:10000/10000 train_time:423956ms step_avg:42.40ms +[2025-09-11 11:00:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:00:33] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 11:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:00:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 11:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 11:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 11:00:44] [Rank 0] PRINT: step:10000/10000 val_loss:4.2272 total_sharp:5.9280e-05 L1_sharp:1.0564e-03 L2_sharp:1.0077e-03 L3_sharp:1.6973e-03 L4_sharp:1.9166e-03 L5_sharp:5.3512e-03 L6_sharp:6.2248e-03 L7_sharp:9.1724e-03 L8_sharp:1.6499e-02 L9_sharp:1.3466e-02 L10_sharp:1.9870e-02 L11_sharp:2.6283e-02 L12_sharp:1.5623e-01 total_fnorm:1.2656e+00 total_l1_linf:6.5600e+02 total_spectral:6.3281e-01 L1_fnorm:9.3384e-03 L2_fnorm:9.2163e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.1553e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.9722e-03 L9_fnorm:9.0942e-03 L10_fnorm:9.0942e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.8501e-03 L1_l1linf:8.9645e-04 L2_l1linf:8.9264e-04 L3_l1linf:8.7738e-04 L4_l1linf:8.3542e-04 L5_l1linf:8.1635e-04 L6_l1linf:8.4686e-04 L7_l1linf:7.9346e-04 L8_l1linf:9.4604e-04 L9_l1linf:7.8583e-04 L10_l1linf:8.1253e-04 L11_l1linf:7.8964e-04 L12_l1linf:9.1553e-04 L1_spectral:1.6391e-04 L2_spectral:1.6024e-04 L3_spectral:1.5986e-04 L4_spectral:1.6080e-04 L5_spectral:1.5930e-04 L6_spectral:1.5944e-04 L7_spectral:1.5738e-04 L8_spectral:1.5719e-04 L9_spectral:1.5662e-04 L10_spectral:1.5407e-04 L11_spectral:1.4981e-04 L12_spectral:1.6005e-04 train_time:423976ms step_avg:42.40ms +[2025-09-11 11:00:44] [Rank 0] PRINT: step:10000/10000 val_loss:4.2272 total_sharp:5.9280e-05 L1_sharp:1.0564e-03 L2_sharp:1.0077e-03 L3_sharp:1.6973e-03 L4_sharp:1.9166e-03 L5_sharp:5.3512e-03 L6_sharp:6.2248e-03 L7_sharp:9.1724e-03 L8_sharp:1.6499e-02 L9_sharp:1.3466e-02 L10_sharp:1.9870e-02 L11_sharp:2.6283e-02 L12_sharp:1.5623e-01 total_fnorm:1.2656e+00 total_l1_linf:6.5600e+02 total_spectral:6.3281e-01 L1_fnorm:9.3384e-03 L2_fnorm:9.2163e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.1553e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.9722e-03 L9_fnorm:9.0942e-03 L10_fnorm:9.0942e-03 L11_fnorm:8.9722e-03 L12_fnorm:8.8501e-03 L1_l1linf:8.9645e-04 L2_l1linf:8.9264e-04 L3_l1linf:8.7738e-04 L4_l1linf:8.3542e-04 L5_l1linf:8.1635e-04 L6_l1linf:8.4686e-04 L7_l1linf:7.9346e-04 L8_l1linf:9.4604e-04 L9_l1linf:7.8583e-04 L10_l1linf:8.1253e-04 L11_l1linf:7.8964e-04 L12_l1linf:9.1553e-04 L1_spectral:1.6391e-04 L2_spectral:1.6024e-04 L3_spectral:1.5986e-04 L4_spectral:1.6080e-04 L5_spectral:1.5930e-04 L6_spectral:1.5944e-04 L7_spectral:1.5738e-04 L8_spectral:1.5719e-04 L9_spectral:1.5662e-04 L10_spectral:1.5407e-04 L11_spectral:1.4981e-04 L12_spectral:1.6005e-04 train_time:423976ms step_avg:42.40ms +[2025-09-11 11:00:44] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:00:44 2025 --- +[2025-09-11 11:00:44] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 11:00:44 2025 --- +[2025-09-11 11:00:44] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 11:00:44] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6a2c0bde23f3e87a898b562cec8c95f41bfc95db --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "8f3224e1-ad6b-4d6e-b91a-3a2f4dd2212f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/training_log_8f3224e1-ad6b-4d6e-b91a-3a2f4dd2212f.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/training_log_8f3224e1-ad6b-4d6e-b91a-3a2f4dd2212f.txt new file mode 100644 index 0000000000000000000000000000000000000000..407a0c6b94f6b58e83a1010956f6ca2f0cbf47d2 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44/training_log_8f3224e1-ad6b-4d6e-b91a-3a2f4dd2212f.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:11:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:11:45 2025 --- +[2025-09-11 09:11:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:11:45 2025 --- +[2025-09-11 09:11:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:11:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:11:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:11:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:11:45] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:11:45] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:11:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44 +[2025-09-11 09:11:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.01_seed_44 +[2025-09-11 09:11:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:11:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:11:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:11:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:11:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:11:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:11:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:11:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:11:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:11:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:11:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:11:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:11:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:11:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:11:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:11:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:11:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:11:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:11:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:11:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:11:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:11:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:11:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:11:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:12:36] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:12:36] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:12:36] [Rank 0] PRINT: Starting training... +[2025-09-11 09:12:36] [Rank 0] PRINT: Starting training... +[2025-09-11 09:12:37] [Rank 0] step:21/10000 train_time:1135ms step_avg:54.03ms +[2025-09-11 09:12:37] [Rank 0] step:21/10000 train_time:1135ms step_avg:54.03ms +[2025-09-11 09:12:37] [Rank 0] step:41/10000 train_time:1867ms step_avg:45.53ms +[2025-09-11 09:12:37] [Rank 0] step:41/10000 train_time:1867ms step_avg:45.53ms +[2025-09-11 09:12:38] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 09:12:38] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 09:12:39] [Rank 0] step:81/10000 train_time:3317ms step_avg:40.95ms +[2025-09-11 09:12:39] [Rank 0] step:81/10000 train_time:3317ms step_avg:40.95ms +[2025-09-11 09:12:40] [Rank 0] step:101/10000 train_time:4042ms step_avg:40.02ms +[2025-09-11 09:12:40] [Rank 0] step:101/10000 train_time:4042ms step_avg:40.02ms +[2025-09-11 09:12:40] [Rank 0] step:121/10000 train_time:4767ms step_avg:39.40ms +[2025-09-11 09:12:40] [Rank 0] step:121/10000 train_time:4767ms step_avg:39.40ms +[2025-09-11 09:12:41] [Rank 0] step:141/10000 train_time:5492ms step_avg:38.95ms +[2025-09-11 09:12:41] [Rank 0] step:141/10000 train_time:5492ms step_avg:38.95ms +[2025-09-11 09:12:42] [Rank 0] step:161/10000 train_time:6217ms step_avg:38.61ms +[2025-09-11 09:12:42] [Rank 0] step:161/10000 train_time:6217ms step_avg:38.61ms +[2025-09-11 09:12:42] [Rank 0] step:181/10000 train_time:6942ms step_avg:38.35ms +[2025-09-11 09:12:42] [Rank 0] step:181/10000 train_time:6942ms step_avg:38.35ms +[2025-09-11 09:12:43] [Rank 0] step:201/10000 train_time:7667ms step_avg:38.15ms +[2025-09-11 09:12:43] [Rank 0] step:201/10000 train_time:7667ms step_avg:38.15ms +[2025-09-11 09:12:44] [Rank 0] step:221/10000 train_time:8393ms step_avg:37.98ms +[2025-09-11 09:12:44] [Rank 0] step:221/10000 train_time:8393ms step_avg:37.98ms +[2025-09-11 09:12:45] [Rank 0] step:241/10000 train_time:9122ms step_avg:37.85ms +[2025-09-11 09:12:45] [Rank 0] step:241/10000 train_time:9122ms step_avg:37.85ms +[2025-09-11 09:12:45] [Rank 0] step:261/10000 train_time:9847ms step_avg:37.73ms +[2025-09-11 09:12:45] [Rank 0] step:261/10000 train_time:9847ms step_avg:37.73ms +[2025-09-11 09:12:46] [Rank 0] step:281/10000 train_time:10573ms step_avg:37.63ms +[2025-09-11 09:12:46] [Rank 0] step:281/10000 train_time:10573ms step_avg:37.63ms +[2025-09-11 09:12:47] [Rank 0] step:301/10000 train_time:11297ms step_avg:37.53ms +[2025-09-11 09:12:47] [Rank 0] step:301/10000 train_time:11297ms step_avg:37.53ms +[2025-09-11 09:12:48] [Rank 0] step:321/10000 train_time:12022ms step_avg:37.45ms +[2025-09-11 09:12:48] [Rank 0] step:321/10000 train_time:12022ms step_avg:37.45ms +[2025-09-11 09:12:48] [Rank 0] step:341/10000 train_time:12747ms step_avg:37.38ms +[2025-09-11 09:12:48] [Rank 0] step:341/10000 train_time:12747ms step_avg:37.38ms +[2025-09-11 09:12:49] [Rank 0] step:361/10000 train_time:13473ms step_avg:37.32ms +[2025-09-11 09:12:49] [Rank 0] step:361/10000 train_time:13473ms step_avg:37.32ms +[2025-09-11 09:12:50] [Rank 0] step:381/10000 train_time:14199ms step_avg:37.27ms +[2025-09-11 09:12:50] [Rank 0] step:381/10000 train_time:14199ms step_avg:37.27ms +[2025-09-11 09:12:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:12:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:13:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:13:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:37] [Rank 0] PRINT: step:400/10000 val_loss:5.9969 total_sharp:2.1824e-03 L1_sharp:2.7990e-02 L2_sharp:1.7357e-02 L3_sharp:1.6750e-02 L4_sharp:1.4610e-02 L5_sharp:1.4702e-02 L6_sharp:1.3045e-02 L7_sharp:1.5957e-02 L8_sharp:1.6561e-02 L9_sharp:1.4904e-02 L10_sharp:1.5944e-02 L11_sharp:2.0771e-02 L12_sharp:7.6952e-02 total_fnorm:3.9783e+01 total_l1_linf:1.0738e+05 total_spectral:1.9895e+01 L1_fnorm:1.2248e+00 L2_fnorm:1.2130e+00 L3_fnorm:1.2125e+00 L4_fnorm:1.2146e+00 L5_fnorm:1.2115e+00 L6_fnorm:1.2108e+00 L7_fnorm:1.2045e+00 L8_fnorm:1.1956e+00 L9_fnorm:1.1814e+00 L10_fnorm:1.1669e+00 L11_fnorm:1.1360e+00 L12_fnorm:1.0760e+00 L1_l1linf:4.1157e-01 L2_l1linf:4.0465e-01 L3_l1linf:4.0750e-01 L4_l1linf:4.1827e-01 L5_l1linf:4.1953e-01 L6_l1linf:4.2106e-01 L7_l1linf:4.2280e-01 L8_l1linf:4.2210e-01 L9_l1linf:4.1923e-01 L10_l1linf:4.0852e-01 L11_l1linf:3.8611e-01 L12_l1linf:3.4082e-01 L1_spectral:1.2042e-02 L2_spectral:1.2044e-02 L3_spectral:1.2052e-02 L4_spectral:1.2057e-02 L5_spectral:1.2043e-02 L6_spectral:1.2042e-02 L7_spectral:1.2053e-02 L8_spectral:1.2045e-02 L9_spectral:1.2045e-02 L10_spectral:1.2043e-02 L11_spectral:1.2040e-02 L12_spectral:1.2036e-02 train_time:14904ms step_avg:37.26ms +[2025-09-11 09:13:37] [Rank 0] PRINT: step:400/10000 val_loss:5.9969 total_sharp:2.1824e-03 L1_sharp:2.7990e-02 L2_sharp:1.7357e-02 L3_sharp:1.6750e-02 L4_sharp:1.4610e-02 L5_sharp:1.4702e-02 L6_sharp:1.3045e-02 L7_sharp:1.5957e-02 L8_sharp:1.6561e-02 L9_sharp:1.4904e-02 L10_sharp:1.5944e-02 L11_sharp:2.0771e-02 L12_sharp:7.6952e-02 total_fnorm:3.9783e+01 total_l1_linf:1.0738e+05 total_spectral:1.9895e+01 L1_fnorm:1.2248e+00 L2_fnorm:1.2130e+00 L3_fnorm:1.2125e+00 L4_fnorm:1.2146e+00 L5_fnorm:1.2115e+00 L6_fnorm:1.2108e+00 L7_fnorm:1.2045e+00 L8_fnorm:1.1956e+00 L9_fnorm:1.1814e+00 L10_fnorm:1.1669e+00 L11_fnorm:1.1360e+00 L12_fnorm:1.0760e+00 L1_l1linf:4.1157e-01 L2_l1linf:4.0465e-01 L3_l1linf:4.0750e-01 L4_l1linf:4.1827e-01 L5_l1linf:4.1953e-01 L6_l1linf:4.2106e-01 L7_l1linf:4.2280e-01 L8_l1linf:4.2210e-01 L9_l1linf:4.1923e-01 L10_l1linf:4.0852e-01 L11_l1linf:3.8611e-01 L12_l1linf:3.4082e-01 L1_spectral:1.2042e-02 L2_spectral:1.2044e-02 L3_spectral:1.2052e-02 L4_spectral:1.2057e-02 L5_spectral:1.2043e-02 L6_spectral:1.2042e-02 L7_spectral:1.2053e-02 L8_spectral:1.2045e-02 L9_spectral:1.2045e-02 L10_spectral:1.2043e-02 L11_spectral:1.2040e-02 L12_spectral:1.2036e-02 train_time:14904ms step_avg:37.26ms +[2025-09-11 09:14:08] [Rank 0] step:401/10000 train_time:45896ms step_avg:114.45ms +[2025-09-11 09:14:08] [Rank 0] step:401/10000 train_time:45896ms step_avg:114.45ms +[2025-09-11 09:14:10] [Rank 0] step:421/10000 train_time:48043ms step_avg:114.12ms +[2025-09-11 09:14:10] [Rank 0] step:421/10000 train_time:48043ms step_avg:114.12ms +[2025-09-11 09:14:11] [Rank 0] step:441/10000 train_time:48682ms step_avg:110.39ms +[2025-09-11 09:14:11] [Rank 0] step:441/10000 train_time:48682ms step_avg:110.39ms +[2025-09-11 09:14:12] [Rank 0] step:461/10000 train_time:49320ms step_avg:106.98ms +[2025-09-11 09:14:12] [Rank 0] step:461/10000 train_time:49320ms step_avg:106.98ms +[2025-09-11 09:14:12] [Rank 0] step:481/10000 train_time:49958ms step_avg:103.86ms +[2025-09-11 09:14:12] [Rank 0] step:481/10000 train_time:49958ms step_avg:103.86ms +[2025-09-11 09:14:13] [Rank 0] step:501/10000 train_time:50596ms step_avg:100.99ms +[2025-09-11 09:14:13] [Rank 0] step:501/10000 train_time:50596ms step_avg:100.99ms +[2025-09-11 09:14:14] [Rank 0] step:521/10000 train_time:51234ms step_avg:98.34ms +[2025-09-11 09:14:14] [Rank 0] step:521/10000 train_time:51234ms step_avg:98.34ms +[2025-09-11 09:14:14] [Rank 0] step:541/10000 train_time:51872ms step_avg:95.88ms +[2025-09-11 09:14:14] [Rank 0] step:541/10000 train_time:51872ms step_avg:95.88ms +[2025-09-11 09:14:15] [Rank 0] step:561/10000 train_time:52510ms step_avg:93.60ms +[2025-09-11 09:14:15] [Rank 0] step:561/10000 train_time:52510ms step_avg:93.60ms +[2025-09-11 09:14:16] [Rank 0] step:581/10000 train_time:53147ms step_avg:91.47ms +[2025-09-11 09:14:16] [Rank 0] step:581/10000 train_time:53147ms step_avg:91.47ms +[2025-09-11 09:14:16] [Rank 0] step:601/10000 train_time:53786ms step_avg:89.49ms +[2025-09-11 09:14:16] [Rank 0] step:601/10000 train_time:53786ms step_avg:89.49ms +[2025-09-11 09:14:17] [Rank 0] step:621/10000 train_time:54423ms step_avg:87.64ms +[2025-09-11 09:14:17] [Rank 0] step:621/10000 train_time:54423ms step_avg:87.64ms +[2025-09-11 09:14:17] [Rank 0] step:641/10000 train_time:55060ms step_avg:85.90ms +[2025-09-11 09:14:17] [Rank 0] step:641/10000 train_time:55060ms step_avg:85.90ms +[2025-09-11 09:14:18] [Rank 0] step:661/10000 train_time:55698ms step_avg:84.26ms +[2025-09-11 09:14:18] [Rank 0] step:661/10000 train_time:55698ms step_avg:84.26ms +[2025-09-11 09:14:19] [Rank 0] step:681/10000 train_time:56335ms step_avg:82.72ms +[2025-09-11 09:14:19] [Rank 0] step:681/10000 train_time:56335ms step_avg:82.72ms +[2025-09-11 09:14:19] [Rank 0] step:701/10000 train_time:56972ms step_avg:81.27ms +[2025-09-11 09:14:19] [Rank 0] step:701/10000 train_time:56972ms step_avg:81.27ms +[2025-09-11 09:14:20] [Rank 0] step:721/10000 train_time:57610ms step_avg:79.90ms +[2025-09-11 09:14:20] [Rank 0] step:721/10000 train_time:57610ms step_avg:79.90ms +[2025-09-11 09:14:21] [Rank 0] step:741/10000 train_time:58247ms step_avg:78.61ms +[2025-09-11 09:14:21] [Rank 0] step:741/10000 train_time:58247ms step_avg:78.61ms +[2025-09-11 09:14:21] [Rank 0] step:761/10000 train_time:58890ms step_avg:77.38ms +[2025-09-11 09:14:21] [Rank 0] step:761/10000 train_time:58890ms step_avg:77.38ms +[2025-09-11 09:14:22] [Rank 0] step:781/10000 train_time:59532ms step_avg:76.23ms +[2025-09-11 09:14:22] [Rank 0] step:781/10000 train_time:59532ms step_avg:76.23ms +[2025-09-11 09:14:23] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:14:23] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:15:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:07] [Rank 0] PRINT: step:800/10000 val_loss:5.5967 total_sharp:1.4519e-03 L1_sharp:2.5892e-02 L2_sharp:7.0741e-03 L3_sharp:4.3875e-03 L4_sharp:6.5744e-04 L5_sharp:5.4009e-03 L6_sharp:4.2219e-03 L7_sharp:6.9085e-03 L8_sharp:7.5894e-03 L9_sharp:1.1364e-02 L10_sharp:1.1149e-02 L11_sharp:1.8268e-02 L12_sharp:6.5324e-02 total_fnorm:3.9750e+01 total_l1_linf:8.9088e+04 total_spectral:2.0000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2109e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0547e+00 L1_l1linf:4.0039e-01 L2_l1linf:3.9258e-01 L3_l1linf:3.9062e-01 L4_l1linf:3.8867e-01 L5_l1linf:3.9844e-01 L6_l1linf:4.0430e-01 L7_l1linf:4.0625e-01 L8_l1linf:4.0430e-01 L9_l1linf:4.0625e-01 L10_l1linf:4.0234e-01 L11_l1linf:3.6328e-01 L12_l1linf:2.8125e-01 L1_spectral:1.3457e-02 L2_spectral:1.3549e-02 L3_spectral:1.3585e-02 L4_spectral:1.3577e-02 L5_spectral:1.3458e-02 L6_spectral:1.3447e-02 L7_spectral:1.3443e-02 L8_spectral:1.3465e-02 L9_spectral:1.3508e-02 L10_spectral:1.3525e-02 L11_spectral:1.3569e-02 L12_spectral:1.3393e-02 train_time:60157ms step_avg:75.20ms +[2025-09-11 09:15:07] [Rank 0] PRINT: step:800/10000 val_loss:5.5967 total_sharp:1.4519e-03 L1_sharp:2.5892e-02 L2_sharp:7.0741e-03 L3_sharp:4.3875e-03 L4_sharp:6.5744e-04 L5_sharp:5.4009e-03 L6_sharp:4.2219e-03 L7_sharp:6.9085e-03 L8_sharp:7.5894e-03 L9_sharp:1.1364e-02 L10_sharp:1.1149e-02 L11_sharp:1.8268e-02 L12_sharp:6.5324e-02 total_fnorm:3.9750e+01 total_l1_linf:8.9088e+04 total_spectral:2.0000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2266e+00 L6_fnorm:1.2344e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2109e+00 L10_fnorm:1.2188e+00 L11_fnorm:1.1797e+00 L12_fnorm:1.0547e+00 L1_l1linf:4.0039e-01 L2_l1linf:3.9258e-01 L3_l1linf:3.9062e-01 L4_l1linf:3.8867e-01 L5_l1linf:3.9844e-01 L6_l1linf:4.0430e-01 L7_l1linf:4.0625e-01 L8_l1linf:4.0430e-01 L9_l1linf:4.0625e-01 L10_l1linf:4.0234e-01 L11_l1linf:3.6328e-01 L12_l1linf:2.8125e-01 L1_spectral:1.3457e-02 L2_spectral:1.3549e-02 L3_spectral:1.3585e-02 L4_spectral:1.3577e-02 L5_spectral:1.3458e-02 L6_spectral:1.3447e-02 L7_spectral:1.3443e-02 L8_spectral:1.3465e-02 L9_spectral:1.3508e-02 L10_spectral:1.3525e-02 L11_spectral:1.3569e-02 L12_spectral:1.3393e-02 train_time:60157ms step_avg:75.20ms +[2025-09-11 09:15:08] [Rank 0] step:801/10000 train_time:61225ms step_avg:76.44ms +[2025-09-11 09:15:08] [Rank 0] step:801/10000 train_time:61225ms step_avg:76.44ms +[2025-09-11 09:15:08] [Rank 0] step:821/10000 train_time:61854ms step_avg:75.34ms +[2025-09-11 09:15:08] [Rank 0] step:821/10000 train_time:61854ms step_avg:75.34ms +[2025-09-11 09:15:09] [Rank 0] step:841/10000 train_time:62497ms step_avg:74.31ms +[2025-09-11 09:15:09] [Rank 0] step:841/10000 train_time:62497ms step_avg:74.31ms +[2025-09-11 09:15:10] [Rank 0] step:861/10000 train_time:63140ms step_avg:73.33ms +[2025-09-11 09:15:10] [Rank 0] step:861/10000 train_time:63140ms step_avg:73.33ms +[2025-09-11 09:15:10] [Rank 0] step:881/10000 train_time:63782ms step_avg:72.40ms +[2025-09-11 09:15:10] [Rank 0] step:881/10000 train_time:63782ms step_avg:72.40ms +[2025-09-11 09:15:11] [Rank 0] step:901/10000 train_time:64423ms step_avg:71.50ms +[2025-09-11 09:15:11] [Rank 0] step:901/10000 train_time:64423ms step_avg:71.50ms +[2025-09-11 09:15:12] [Rank 0] step:921/10000 train_time:65064ms step_avg:70.64ms +[2025-09-11 09:15:12] [Rank 0] step:921/10000 train_time:65064ms step_avg:70.64ms +[2025-09-11 09:15:12] [Rank 0] step:941/10000 train_time:65705ms step_avg:69.82ms +[2025-09-11 09:15:12] [Rank 0] step:941/10000 train_time:65705ms step_avg:69.82ms +[2025-09-11 09:15:13] [Rank 0] step:961/10000 train_time:66346ms step_avg:69.04ms +[2025-09-11 09:15:13] [Rank 0] step:961/10000 train_time:66346ms step_avg:69.04ms +[2025-09-11 09:15:14] [Rank 0] step:981/10000 train_time:66987ms step_avg:68.28ms +[2025-09-11 09:15:14] [Rank 0] step:981/10000 train_time:66987ms step_avg:68.28ms +[2025-09-11 09:15:14] [Rank 0] step:1001/10000 train_time:67628ms step_avg:67.56ms +[2025-09-11 09:15:14] [Rank 0] step:1001/10000 train_time:67628ms step_avg:67.56ms +[2025-09-11 09:15:15] [Rank 0] step:1021/10000 train_time:68268ms step_avg:66.86ms +[2025-09-11 09:15:15] [Rank 0] step:1021/10000 train_time:68268ms step_avg:66.86ms +[2025-09-11 09:15:15] [Rank 0] step:1041/10000 train_time:68909ms step_avg:66.19ms +[2025-09-11 09:15:15] [Rank 0] step:1041/10000 train_time:68909ms step_avg:66.19ms +[2025-09-11 09:15:16] [Rank 0] step:1061/10000 train_time:69549ms step_avg:65.55ms +[2025-09-11 09:15:16] [Rank 0] step:1061/10000 train_time:69549ms step_avg:65.55ms +[2025-09-11 09:15:17] [Rank 0] step:1081/10000 train_time:70190ms step_avg:64.93ms +[2025-09-11 09:15:17] [Rank 0] step:1081/10000 train_time:70190ms step_avg:64.93ms +[2025-09-11 09:15:17] [Rank 0] step:1101/10000 train_time:70830ms step_avg:64.33ms +[2025-09-11 09:15:17] [Rank 0] step:1101/10000 train_time:70830ms step_avg:64.33ms +[2025-09-11 09:15:18] [Rank 0] step:1121/10000 train_time:71470ms step_avg:63.76ms +[2025-09-11 09:15:18] [Rank 0] step:1121/10000 train_time:71470ms step_avg:63.76ms +[2025-09-11 09:15:19] [Rank 0] step:1141/10000 train_time:72111ms step_avg:63.20ms +[2025-09-11 09:15:19] [Rank 0] step:1141/10000 train_time:72111ms step_avg:63.20ms +[2025-09-11 09:15:19] [Rank 0] step:1161/10000 train_time:72751ms step_avg:62.66ms +[2025-09-11 09:15:19] [Rank 0] step:1161/10000 train_time:72751ms step_avg:62.66ms +[2025-09-11 09:15:20] [Rank 0] step:1181/10000 train_time:73392ms step_avg:62.14ms +[2025-09-11 09:15:20] [Rank 0] step:1181/10000 train_time:73392ms step_avg:62.14ms +[2025-09-11 09:15:21] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:15:21] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:15:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:15:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:30] [Rank 0] PRINT: step:1200/10000 val_loss:5.2703 total_sharp:1.1375e-03 L1_sharp:1.7595e-02 L2_sharp:5.2624e-03 L3_sharp:2.5287e-03 L4_sharp:2.9820e-03 L5_sharp:5.0879e-03 L6_sharp:3.8291e-03 L7_sharp:5.8919e-03 L8_sharp:7.5601e-03 L9_sharp:8.0493e-03 L10_sharp:8.8006e-03 L11_sharp:1.0668e-02 L12_sharp:5.7915e-02 total_fnorm:3.9000e+01 total_l1_linf:8.2944e+04 total_spectral:1.9500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.7500e-01 L2_l1linf:3.6328e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.6719e-01 L5_l1linf:3.6523e-01 L6_l1linf:3.6328e-01 L7_l1linf:3.6719e-01 L8_l1linf:3.7305e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.7695e-01 L11_l1linf:3.8086e-01 L12_l1linf:3.3789e-01 L1_spectral:1.4086e-02 L2_spectral:1.4070e-02 L3_spectral:1.4013e-02 L4_spectral:1.3991e-02 L5_spectral:1.3947e-02 L6_spectral:1.3946e-02 L7_spectral:1.3943e-02 L8_spectral:1.4009e-02 L9_spectral:1.3984e-02 L10_spectral:1.3923e-02 L11_spectral:1.3855e-02 L12_spectral:1.3924e-02 train_time:74014ms step_avg:61.68ms +[2025-09-11 09:15:30] [Rank 0] PRINT: step:1200/10000 val_loss:5.2703 total_sharp:1.1375e-03 L1_sharp:1.7595e-02 L2_sharp:5.2624e-03 L3_sharp:2.5287e-03 L4_sharp:2.9820e-03 L5_sharp:5.0879e-03 L6_sharp:3.8291e-03 L7_sharp:5.8919e-03 L8_sharp:7.5601e-03 L9_sharp:8.0493e-03 L10_sharp:8.8006e-03 L11_sharp:1.0668e-02 L12_sharp:5.7915e-02 total_fnorm:3.9000e+01 total_l1_linf:8.2944e+04 total_spectral:1.9500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2188e+00 L1_l1linf:3.7500e-01 L2_l1linf:3.6328e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.6719e-01 L5_l1linf:3.6523e-01 L6_l1linf:3.6328e-01 L7_l1linf:3.6719e-01 L8_l1linf:3.7305e-01 L9_l1linf:3.7891e-01 L10_l1linf:3.7695e-01 L11_l1linf:3.8086e-01 L12_l1linf:3.3789e-01 L1_spectral:1.4086e-02 L2_spectral:1.4070e-02 L3_spectral:1.4013e-02 L4_spectral:1.3991e-02 L5_spectral:1.3947e-02 L6_spectral:1.3946e-02 L7_spectral:1.3943e-02 L8_spectral:1.4009e-02 L9_spectral:1.3984e-02 L10_spectral:1.3923e-02 L11_spectral:1.3855e-02 L12_spectral:1.3924e-02 train_time:74014ms step_avg:61.68ms +[2025-09-11 09:15:32] [Rank 0] step:1201/10000 train_time:75096ms step_avg:62.53ms +[2025-09-11 09:15:32] [Rank 0] step:1201/10000 train_time:75096ms step_avg:62.53ms +[2025-09-11 09:15:32] [Rank 0] step:1221/10000 train_time:75726ms step_avg:62.02ms +[2025-09-11 09:15:32] [Rank 0] step:1221/10000 train_time:75726ms step_avg:62.02ms +[2025-09-11 09:15:33] [Rank 0] step:1241/10000 train_time:76369ms step_avg:61.54ms +[2025-09-11 09:15:33] [Rank 0] step:1241/10000 train_time:76369ms step_avg:61.54ms +[2025-09-11 09:15:33] [Rank 0] step:1261/10000 train_time:77011ms step_avg:61.07ms +[2025-09-11 09:15:33] [Rank 0] step:1261/10000 train_time:77011ms step_avg:61.07ms +[2025-09-11 09:15:34] [Rank 0] step:1281/10000 train_time:77653ms step_avg:60.62ms +[2025-09-11 09:15:34] [Rank 0] step:1281/10000 train_time:77653ms step_avg:60.62ms +[2025-09-11 09:15:35] [Rank 0] step:1301/10000 train_time:78606ms step_avg:60.42ms +[2025-09-11 09:15:35] [Rank 0] step:1301/10000 train_time:78606ms step_avg:60.42ms +[2025-09-11 09:15:36] [Rank 0] step:1321/10000 train_time:79515ms step_avg:60.19ms +[2025-09-11 09:15:36] [Rank 0] step:1321/10000 train_time:79515ms step_avg:60.19ms +[2025-09-11 09:15:37] [Rank 0] step:1341/10000 train_time:80156ms step_avg:59.77ms +[2025-09-11 09:15:37] [Rank 0] step:1341/10000 train_time:80156ms step_avg:59.77ms +[2025-09-11 09:15:37] [Rank 0] step:1361/10000 train_time:80952ms step_avg:59.48ms +[2025-09-11 09:15:37] [Rank 0] step:1361/10000 train_time:80952ms step_avg:59.48ms +[2025-09-11 09:15:38] [Rank 0] step:1381/10000 train_time:81740ms step_avg:59.19ms +[2025-09-11 09:15:38] [Rank 0] step:1381/10000 train_time:81740ms step_avg:59.19ms +[2025-09-11 09:15:39] [Rank 0] step:1401/10000 train_time:82382ms step_avg:58.80ms +[2025-09-11 09:15:39] [Rank 0] step:1401/10000 train_time:82382ms step_avg:58.80ms +[2025-09-11 09:15:39] [Rank 0] step:1421/10000 train_time:83023ms step_avg:58.43ms +[2025-09-11 09:15:39] [Rank 0] step:1421/10000 train_time:83023ms step_avg:58.43ms +[2025-09-11 09:15:40] [Rank 0] step:1441/10000 train_time:83664ms step_avg:58.06ms +[2025-09-11 09:15:40] [Rank 0] step:1441/10000 train_time:83664ms step_avg:58.06ms +[2025-09-11 09:15:41] [Rank 0] step:1461/10000 train_time:84305ms step_avg:57.70ms +[2025-09-11 09:15:41] [Rank 0] step:1461/10000 train_time:84305ms step_avg:57.70ms +[2025-09-11 09:15:41] [Rank 0] step:1481/10000 train_time:84946ms step_avg:57.36ms +[2025-09-11 09:15:41] [Rank 0] step:1481/10000 train_time:84946ms step_avg:57.36ms +[2025-09-11 09:15:42] [Rank 0] step:1501/10000 train_time:85591ms step_avg:57.02ms +[2025-09-11 09:15:42] [Rank 0] step:1501/10000 train_time:85591ms step_avg:57.02ms +[2025-09-11 09:15:43] [Rank 0] step:1521/10000 train_time:86238ms step_avg:56.70ms +[2025-09-11 09:15:43] [Rank 0] step:1521/10000 train_time:86238ms step_avg:56.70ms +[2025-09-11 09:15:43] [Rank 0] step:1541/10000 train_time:86883ms step_avg:56.38ms +[2025-09-11 09:15:43] [Rank 0] step:1541/10000 train_time:86883ms step_avg:56.38ms +[2025-09-11 09:15:44] [Rank 0] step:1561/10000 train_time:87529ms step_avg:56.07ms +[2025-09-11 09:15:44] [Rank 0] step:1561/10000 train_time:87529ms step_avg:56.07ms +[2025-09-11 09:15:45] [Rank 0] step:1581/10000 train_time:88174ms step_avg:55.77ms +[2025-09-11 09:15:45] [Rank 0] step:1581/10000 train_time:88174ms step_avg:55.77ms +[2025-09-11 09:15:45] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:15:45] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:15:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:15:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:15:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:55] [Rank 0] PRINT: step:1600/10000 val_loss:5.0910 total_sharp:9.7037e-04 L1_sharp:1.2504e-02 L2_sharp:2.5214e-03 L3_sharp:1.9506e-03 L4_sharp:2.6124e-03 L5_sharp:4.5101e-03 L6_sharp:3.3759e-03 L7_sharp:4.3891e-03 L8_sharp:6.5936e-03 L9_sharp:6.1703e-03 L10_sharp:7.1090e-03 L11_sharp:9.2029e-03 L12_sharp:6.4109e-02 total_fnorm:3.7000e+01 total_l1_linf:7.6800e+04 total_spectral:1.8750e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2422e+00 L1_l1linf:3.6719e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.5547e-01 L5_l1linf:3.4961e-01 L6_l1linf:3.4961e-01 L7_l1linf:3.5352e-01 L8_l1linf:3.5547e-01 L9_l1linf:3.5352e-01 L10_l1linf:3.6133e-01 L11_l1linf:3.6914e-01 L12_l1linf:3.3008e-01 L1_spectral:1.4483e-02 L2_spectral:1.4442e-02 L3_spectral:1.4383e-02 L4_spectral:1.4333e-02 L5_spectral:1.4263e-02 L6_spectral:1.4259e-02 L7_spectral:1.4271e-02 L8_spectral:1.4535e-02 L9_spectral:1.4409e-02 L10_spectral:1.4245e-02 L11_spectral:1.4267e-02 L12_spectral:1.4401e-02 train_time:88801ms step_avg:55.50ms +[2025-09-11 09:15:55] [Rank 0] PRINT: step:1600/10000 val_loss:5.0910 total_sharp:9.7037e-04 L1_sharp:1.2504e-02 L2_sharp:2.5214e-03 L3_sharp:1.9506e-03 L4_sharp:2.6124e-03 L5_sharp:4.5101e-03 L6_sharp:3.3759e-03 L7_sharp:4.3891e-03 L8_sharp:6.5936e-03 L9_sharp:6.1703e-03 L10_sharp:7.1090e-03 L11_sharp:9.2029e-03 L12_sharp:6.4109e-02 total_fnorm:3.7000e+01 total_l1_linf:7.6800e+04 total_spectral:1.8750e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2422e+00 L1_l1linf:3.6719e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.5547e-01 L5_l1linf:3.4961e-01 L6_l1linf:3.4961e-01 L7_l1linf:3.5352e-01 L8_l1linf:3.5547e-01 L9_l1linf:3.5352e-01 L10_l1linf:3.6133e-01 L11_l1linf:3.6914e-01 L12_l1linf:3.3008e-01 L1_spectral:1.4483e-02 L2_spectral:1.4442e-02 L3_spectral:1.4383e-02 L4_spectral:1.4333e-02 L5_spectral:1.4263e-02 L6_spectral:1.4259e-02 L7_spectral:1.4271e-02 L8_spectral:1.4535e-02 L9_spectral:1.4409e-02 L10_spectral:1.4245e-02 L11_spectral:1.4267e-02 L12_spectral:1.4401e-02 train_time:88801ms step_avg:55.50ms +[2025-09-11 09:15:56] [Rank 0] step:1601/10000 train_time:89933ms step_avg:56.17ms +[2025-09-11 09:15:56] [Rank 0] step:1601/10000 train_time:89933ms step_avg:56.17ms +[2025-09-11 09:15:57] [Rank 0] step:1621/10000 train_time:90575ms step_avg:55.88ms +[2025-09-11 09:15:57] [Rank 0] step:1621/10000 train_time:90575ms step_avg:55.88ms +[2025-09-11 09:15:58] [Rank 0] step:1641/10000 train_time:91221ms step_avg:55.59ms +[2025-09-11 09:15:58] [Rank 0] step:1641/10000 train_time:91221ms step_avg:55.59ms +[2025-09-11 09:15:58] [Rank 0] step:1661/10000 train_time:91868ms step_avg:55.31ms +[2025-09-11 09:15:58] [Rank 0] step:1661/10000 train_time:91868ms step_avg:55.31ms +[2025-09-11 09:15:59] [Rank 0] step:1681/10000 train_time:92514ms step_avg:55.04ms +[2025-09-11 09:15:59] [Rank 0] step:1681/10000 train_time:92514ms step_avg:55.04ms +[2025-09-11 09:15:59] [Rank 0] step:1701/10000 train_time:93160ms step_avg:54.77ms +[2025-09-11 09:15:59] [Rank 0] step:1701/10000 train_time:93160ms step_avg:54.77ms +[2025-09-11 09:16:00] [Rank 0] step:1721/10000 train_time:93808ms step_avg:54.51ms +[2025-09-11 09:16:00] [Rank 0] step:1721/10000 train_time:93808ms step_avg:54.51ms +[2025-09-11 09:16:01] [Rank 0] step:1741/10000 train_time:94453ms step_avg:54.25ms +[2025-09-11 09:16:01] [Rank 0] step:1741/10000 train_time:94453ms step_avg:54.25ms +[2025-09-11 09:16:01] [Rank 0] step:1761/10000 train_time:95100ms step_avg:54.00ms +[2025-09-11 09:16:01] [Rank 0] step:1761/10000 train_time:95100ms step_avg:54.00ms +[2025-09-11 09:16:02] [Rank 0] step:1781/10000 train_time:95747ms step_avg:53.76ms +[2025-09-11 09:16:02] [Rank 0] step:1781/10000 train_time:95747ms step_avg:53.76ms +[2025-09-11 09:16:03] [Rank 0] step:1801/10000 train_time:96393ms step_avg:53.52ms +[2025-09-11 09:16:03] [Rank 0] step:1801/10000 train_time:96393ms step_avg:53.52ms +[2025-09-11 09:16:03] [Rank 0] step:1821/10000 train_time:97039ms step_avg:53.29ms +[2025-09-11 09:16:03] [Rank 0] step:1821/10000 train_time:97039ms step_avg:53.29ms +[2025-09-11 09:16:04] [Rank 0] step:1841/10000 train_time:97685ms step_avg:53.06ms +[2025-09-11 09:16:04] [Rank 0] step:1841/10000 train_time:97685ms step_avg:53.06ms +[2025-09-11 09:16:05] [Rank 0] step:1861/10000 train_time:98331ms step_avg:52.84ms +[2025-09-11 09:16:05] [Rank 0] step:1861/10000 train_time:98331ms step_avg:52.84ms +[2025-09-11 09:16:05] [Rank 0] step:1881/10000 train_time:98977ms step_avg:52.62ms +[2025-09-11 09:16:05] [Rank 0] step:1881/10000 train_time:98977ms step_avg:52.62ms +[2025-09-11 09:16:06] [Rank 0] step:1901/10000 train_time:99623ms step_avg:52.41ms +[2025-09-11 09:16:06] [Rank 0] step:1901/10000 train_time:99623ms step_avg:52.41ms +[2025-09-11 09:16:07] [Rank 0] step:1921/10000 train_time:100269ms step_avg:52.20ms +[2025-09-11 09:16:07] [Rank 0] step:1921/10000 train_time:100269ms step_avg:52.20ms +[2025-09-11 09:16:07] [Rank 0] step:1941/10000 train_time:100914ms step_avg:51.99ms +[2025-09-11 09:16:07] [Rank 0] step:1941/10000 train_time:100914ms step_avg:51.99ms +[2025-09-11 09:16:08] [Rank 0] step:1961/10000 train_time:101560ms step_avg:51.79ms +[2025-09-11 09:16:08] [Rank 0] step:1961/10000 train_time:101560ms step_avg:51.79ms +[2025-09-11 09:16:09] [Rank 0] step:1981/10000 train_time:102205ms step_avg:51.59ms +[2025-09-11 09:16:09] [Rank 0] step:1981/10000 train_time:102205ms step_avg:51.59ms +[2025-09-11 09:16:09] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:16:09] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:16:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:16:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.9509 total_sharp:6.7645e-04 L1_sharp:1.0872e-02 L2_sharp:1.0725e-03 L3_sharp:2.0812e-03 L4_sharp:1.7395e-03 L5_sharp:3.3738e-03 L6_sharp:2.3543e-03 L7_sharp:3.4491e-03 L8_sharp:6.2848e-03 L9_sharp:4.4189e-03 L10_sharp:5.4258e-03 L11_sharp:7.7241e-03 L12_sharp:5.5401e-02 total_fnorm:3.8250e+01 total_l1_linf:8.0384e+04 total_spectral:1.9375e+01 L1_fnorm:1.2734e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.6719e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.4570e-01 L4_l1linf:3.4375e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4180e-01 L7_l1linf:3.4375e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.5352e-01 L11_l1linf:3.5156e-01 L12_l1linf:3.3203e-01 L1_spectral:1.4863e-02 L2_spectral:1.4654e-02 L3_spectral:1.4638e-02 L4_spectral:1.4663e-02 L5_spectral:1.4582e-02 L6_spectral:1.4514e-02 L7_spectral:1.4535e-02 L8_spectral:1.4899e-02 L9_spectral:1.4883e-02 L10_spectral:1.4558e-02 L11_spectral:1.4572e-02 L12_spectral:1.4828e-02 train_time:102833ms step_avg:51.42ms +[2025-09-11 09:16:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.9509 total_sharp:6.7645e-04 L1_sharp:1.0872e-02 L2_sharp:1.0725e-03 L3_sharp:2.0812e-03 L4_sharp:1.7395e-03 L5_sharp:3.3738e-03 L6_sharp:2.3543e-03 L7_sharp:3.4491e-03 L8_sharp:6.2848e-03 L9_sharp:4.4189e-03 L10_sharp:5.4258e-03 L11_sharp:7.7241e-03 L12_sharp:5.5401e-02 total_fnorm:3.8250e+01 total_l1_linf:8.0384e+04 total_spectral:1.9375e+01 L1_fnorm:1.2734e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.6719e-01 L2_l1linf:3.4961e-01 L3_l1linf:3.4570e-01 L4_l1linf:3.4375e-01 L5_l1linf:3.4180e-01 L6_l1linf:3.4180e-01 L7_l1linf:3.4375e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.5352e-01 L11_l1linf:3.5156e-01 L12_l1linf:3.3203e-01 L1_spectral:1.4863e-02 L2_spectral:1.4654e-02 L3_spectral:1.4638e-02 L4_spectral:1.4663e-02 L5_spectral:1.4582e-02 L6_spectral:1.4514e-02 L7_spectral:1.4535e-02 L8_spectral:1.4899e-02 L9_spectral:1.4883e-02 L10_spectral:1.4558e-02 L11_spectral:1.4572e-02 L12_spectral:1.4828e-02 train_time:102833ms step_avg:51.42ms +[2025-09-11 09:16:21] [Rank 0] step:2001/10000 train_time:104404ms step_avg:52.18ms +[2025-09-11 09:16:21] [Rank 0] step:2001/10000 train_time:104404ms step_avg:52.18ms +[2025-09-11 09:16:22] [Rank 0] step:2021/10000 train_time:105064ms step_avg:51.99ms +[2025-09-11 09:16:22] [Rank 0] step:2021/10000 train_time:105064ms step_avg:51.99ms +[2025-09-11 09:16:23] [Rank 0] step:2041/10000 train_time:105709ms step_avg:51.79ms +[2025-09-11 09:16:23] [Rank 0] step:2041/10000 train_time:105709ms step_avg:51.79ms +[2025-09-11 09:16:23] [Rank 0] step:2061/10000 train_time:106359ms step_avg:51.61ms +[2025-09-11 09:16:23] [Rank 0] step:2061/10000 train_time:106359ms step_avg:51.61ms +[2025-09-11 09:16:24] [Rank 0] step:2081/10000 train_time:107005ms step_avg:51.42ms +[2025-09-11 09:16:24] [Rank 0] step:2081/10000 train_time:107005ms step_avg:51.42ms +[2025-09-11 09:16:25] [Rank 0] step:2101/10000 train_time:107651ms step_avg:51.24ms +[2025-09-11 09:16:25] [Rank 0] step:2101/10000 train_time:107651ms step_avg:51.24ms +[2025-09-11 09:16:25] [Rank 0] step:2121/10000 train_time:108296ms step_avg:51.06ms +[2025-09-11 09:16:25] [Rank 0] step:2121/10000 train_time:108296ms step_avg:51.06ms +[2025-09-11 09:16:26] [Rank 0] step:2141/10000 train_time:108941ms step_avg:50.88ms +[2025-09-11 09:16:26] [Rank 0] step:2141/10000 train_time:108941ms step_avg:50.88ms +[2025-09-11 09:16:27] [Rank 0] step:2161/10000 train_time:109591ms step_avg:50.71ms +[2025-09-11 09:16:27] [Rank 0] step:2161/10000 train_time:109591ms step_avg:50.71ms +[2025-09-11 09:16:27] [Rank 0] step:2181/10000 train_time:110236ms step_avg:50.54ms +[2025-09-11 09:16:27] [Rank 0] step:2181/10000 train_time:110236ms step_avg:50.54ms +[2025-09-11 09:16:28] [Rank 0] step:2201/10000 train_time:110882ms step_avg:50.38ms +[2025-09-11 09:16:28] [Rank 0] step:2201/10000 train_time:110882ms step_avg:50.38ms +[2025-09-11 09:16:29] [Rank 0] step:2221/10000 train_time:111527ms step_avg:50.21ms +[2025-09-11 09:16:29] [Rank 0] step:2221/10000 train_time:111527ms step_avg:50.21ms +[2025-09-11 09:16:29] [Rank 0] step:2241/10000 train_time:112185ms step_avg:50.06ms +[2025-09-11 09:16:29] [Rank 0] step:2241/10000 train_time:112185ms step_avg:50.06ms +[2025-09-11 09:16:30] [Rank 0] step:2261/10000 train_time:112843ms step_avg:49.91ms +[2025-09-11 09:16:30] [Rank 0] step:2261/10000 train_time:112843ms step_avg:49.91ms +[2025-09-11 09:16:31] [Rank 0] step:2281/10000 train_time:113502ms step_avg:49.76ms +[2025-09-11 09:16:31] [Rank 0] step:2281/10000 train_time:113502ms step_avg:49.76ms +[2025-09-11 09:16:31] [Rank 0] step:2301/10000 train_time:114160ms step_avg:49.61ms +[2025-09-11 09:16:31] [Rank 0] step:2301/10000 train_time:114160ms step_avg:49.61ms +[2025-09-11 09:16:32] [Rank 0] step:2321/10000 train_time:114819ms step_avg:49.47ms +[2025-09-11 09:16:32] [Rank 0] step:2321/10000 train_time:114819ms step_avg:49.47ms +[2025-09-11 09:16:32] [Rank 0] step:2341/10000 train_time:115477ms step_avg:49.33ms +[2025-09-11 09:16:32] [Rank 0] step:2341/10000 train_time:115477ms step_avg:49.33ms +[2025-09-11 09:16:33] [Rank 0] step:2361/10000 train_time:116135ms step_avg:49.19ms +[2025-09-11 09:16:33] [Rank 0] step:2361/10000 train_time:116135ms step_avg:49.19ms +[2025-09-11 09:16:34] [Rank 0] step:2381/10000 train_time:116793ms step_avg:49.05ms +[2025-09-11 09:16:34] [Rank 0] step:2381/10000 train_time:116793ms step_avg:49.05ms +[2025-09-11 09:16:34] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:16:34] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:45] [Rank 0] PRINT: step:2400/10000 val_loss:4.8258 total_sharp:9.1481e-04 L1_sharp:1.0849e-02 L2_sharp:1.2249e-03 L3_sharp:1.0252e-03 L4_sharp:1.6959e-03 L5_sharp:2.4167e-03 L6_sharp:1.5754e-03 L7_sharp:2.4874e-03 L8_sharp:5.0139e-03 L9_sharp:5.1377e-03 L10_sharp:5.9181e-03 L11_sharp:8.2985e-03 L12_sharp:7.9266e-02 total_fnorm:3.5500e+01 total_l1_linf:7.1680e+04 total_spectral:1.7875e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3789e-01 L4_l1linf:3.3984e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.3594e-01 L7_l1linf:3.3984e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.3594e-01 L10_l1linf:3.3594e-01 L11_l1linf:3.3984e-01 L12_l1linf:3.2422e-01 L1_spectral:1.5140e-02 L2_spectral:1.4828e-02 L3_spectral:1.4842e-02 L4_spectral:1.4859e-02 L5_spectral:1.4749e-02 L6_spectral:1.4828e-02 L7_spectral:1.4906e-02 L8_spectral:1.5195e-02 L9_spectral:1.4948e-02 L10_spectral:1.4847e-02 L11_spectral:1.4877e-02 L12_spectral:1.5121e-02 train_time:117446ms step_avg:48.94ms +[2025-09-11 09:16:45] [Rank 0] PRINT: step:2400/10000 val_loss:4.8258 total_sharp:9.1481e-04 L1_sharp:1.0849e-02 L2_sharp:1.2249e-03 L3_sharp:1.0252e-03 L4_sharp:1.6959e-03 L5_sharp:2.4167e-03 L6_sharp:1.5754e-03 L7_sharp:2.4874e-03 L8_sharp:5.0139e-03 L9_sharp:5.1377e-03 L10_sharp:5.9181e-03 L11_sharp:8.2985e-03 L12_sharp:7.9266e-02 total_fnorm:3.5500e+01 total_l1_linf:7.1680e+04 total_spectral:1.7875e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3789e-01 L4_l1linf:3.3984e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.3594e-01 L7_l1linf:3.3984e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.3594e-01 L10_l1linf:3.3594e-01 L11_l1linf:3.3984e-01 L12_l1linf:3.2422e-01 L1_spectral:1.5140e-02 L2_spectral:1.4828e-02 L3_spectral:1.4842e-02 L4_spectral:1.4859e-02 L5_spectral:1.4749e-02 L6_spectral:1.4828e-02 L7_spectral:1.4906e-02 L8_spectral:1.5195e-02 L9_spectral:1.4948e-02 L10_spectral:1.4847e-02 L11_spectral:1.4877e-02 L12_spectral:1.5121e-02 train_time:117446ms step_avg:48.94ms +[2025-09-11 09:16:46] [Rank 0] step:2401/10000 train_time:118618ms step_avg:49.40ms +[2025-09-11 09:16:46] [Rank 0] step:2401/10000 train_time:118618ms step_avg:49.40ms +[2025-09-11 09:16:47] [Rank 0] step:2421/10000 train_time:119281ms step_avg:49.27ms +[2025-09-11 09:16:47] [Rank 0] step:2421/10000 train_time:119281ms step_avg:49.27ms +[2025-09-11 09:16:47] [Rank 0] step:2441/10000 train_time:119941ms step_avg:49.14ms +[2025-09-11 09:16:47] [Rank 0] step:2441/10000 train_time:119941ms step_avg:49.14ms +[2025-09-11 09:16:48] [Rank 0] step:2461/10000 train_time:120601ms step_avg:49.00ms +[2025-09-11 09:16:48] [Rank 0] step:2461/10000 train_time:120601ms step_avg:49.00ms +[2025-09-11 09:16:49] [Rank 0] step:2481/10000 train_time:121260ms step_avg:48.88ms +[2025-09-11 09:16:49] [Rank 0] step:2481/10000 train_time:121260ms step_avg:48.88ms +[2025-09-11 09:16:49] [Rank 0] step:2501/10000 train_time:121919ms step_avg:48.75ms +[2025-09-11 09:16:49] [Rank 0] step:2501/10000 train_time:121919ms step_avg:48.75ms +[2025-09-11 09:16:50] [Rank 0] step:2521/10000 train_time:122579ms step_avg:48.62ms +[2025-09-11 09:16:50] [Rank 0] step:2521/10000 train_time:122579ms step_avg:48.62ms +[2025-09-11 09:16:51] [Rank 0] step:2541/10000 train_time:123238ms step_avg:48.50ms +[2025-09-11 09:16:51] [Rank 0] step:2541/10000 train_time:123238ms step_avg:48.50ms +[2025-09-11 09:16:51] [Rank 0] step:2561/10000 train_time:123897ms step_avg:48.38ms +[2025-09-11 09:16:51] [Rank 0] step:2561/10000 train_time:123897ms step_avg:48.38ms +[2025-09-11 09:16:52] [Rank 0] step:2581/10000 train_time:124556ms step_avg:48.26ms +[2025-09-11 09:16:52] [Rank 0] step:2581/10000 train_time:124556ms step_avg:48.26ms +[2025-09-11 09:16:53] [Rank 0] step:2601/10000 train_time:125215ms step_avg:48.14ms +[2025-09-11 09:16:53] [Rank 0] step:2601/10000 train_time:125215ms step_avg:48.14ms +[2025-09-11 09:16:53] [Rank 0] step:2621/10000 train_time:125873ms step_avg:48.02ms +[2025-09-11 09:16:53] [Rank 0] step:2621/10000 train_time:125873ms step_avg:48.02ms +[2025-09-11 09:16:54] [Rank 0] step:2641/10000 train_time:126535ms step_avg:47.91ms +[2025-09-11 09:16:54] [Rank 0] step:2641/10000 train_time:126535ms step_avg:47.91ms +[2025-09-11 09:16:55] [Rank 0] step:2661/10000 train_time:127194ms step_avg:47.80ms +[2025-09-11 09:16:55] [Rank 0] step:2661/10000 train_time:127194ms step_avg:47.80ms +[2025-09-11 09:16:55] [Rank 0] step:2681/10000 train_time:127853ms step_avg:47.69ms +[2025-09-11 09:16:55] [Rank 0] step:2681/10000 train_time:127853ms step_avg:47.69ms +[2025-09-11 09:16:56] [Rank 0] step:2701/10000 train_time:128513ms step_avg:47.58ms +[2025-09-11 09:16:56] [Rank 0] step:2701/10000 train_time:128513ms step_avg:47.58ms +[2025-09-11 09:16:57] [Rank 0] step:2721/10000 train_time:129175ms step_avg:47.47ms +[2025-09-11 09:16:57] [Rank 0] step:2721/10000 train_time:129175ms step_avg:47.47ms +[2025-09-11 09:16:57] [Rank 0] step:2741/10000 train_time:129835ms step_avg:47.37ms +[2025-09-11 09:16:57] [Rank 0] step:2741/10000 train_time:129835ms step_avg:47.37ms +[2025-09-11 09:16:58] [Rank 0] step:2761/10000 train_time:130494ms step_avg:47.26ms +[2025-09-11 09:16:58] [Rank 0] step:2761/10000 train_time:130494ms step_avg:47.26ms +[2025-09-11 09:16:59] [Rank 0] step:2781/10000 train_time:131152ms step_avg:47.16ms +[2025-09-11 09:16:59] [Rank 0] step:2781/10000 train_time:131152ms step_avg:47.16ms +[2025-09-11 09:16:59] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:16:59] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:09] [Rank 0] PRINT: step:2800/10000 val_loss:4.7553 total_sharp:7.8239e-04 L1_sharp:7.7390e-03 L2_sharp:1.4639e-03 L3_sharp:7.7434e-04 L4_sharp:5.3535e-04 L5_sharp:1.6243e-03 L6_sharp:1.4971e-03 L7_sharp:2.1023e-03 L8_sharp:4.6406e-03 L9_sharp:4.5059e-03 L10_sharp:5.5364e-03 L11_sharp:7.8342e-03 L12_sharp:2.7499e-02 total_fnorm:3.4000e+01 total_l1_linf:6.8096e+04 total_spectral:1.7125e+01 L1_fnorm:1.2734e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.3008e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.2031e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.3398e-01 L12_l1linf:3.2812e-01 L1_spectral:1.5325e-02 L2_spectral:1.5021e-02 L3_spectral:1.5123e-02 L4_spectral:1.5095e-02 L5_spectral:1.5042e-02 L6_spectral:1.5074e-02 L7_spectral:1.5056e-02 L8_spectral:1.5384e-02 L9_spectral:1.5194e-02 L10_spectral:1.5141e-02 L11_spectral:1.5219e-02 L12_spectral:1.5265e-02 train_time:131793ms step_avg:47.07ms +[2025-09-11 09:17:09] [Rank 0] PRINT: step:2800/10000 val_loss:4.7553 total_sharp:7.8239e-04 L1_sharp:7.7390e-03 L2_sharp:1.4639e-03 L3_sharp:7.7434e-04 L4_sharp:5.3535e-04 L5_sharp:1.6243e-03 L6_sharp:1.4971e-03 L7_sharp:2.1023e-03 L8_sharp:4.6406e-03 L9_sharp:4.5059e-03 L10_sharp:5.5364e-03 L11_sharp:7.8342e-03 L12_sharp:2.7499e-02 total_fnorm:3.4000e+01 total_l1_linf:6.8096e+04 total_spectral:1.7125e+01 L1_fnorm:1.2734e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2812e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.3008e-01 L8_l1linf:3.3008e-01 L9_l1linf:3.2031e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.3398e-01 L12_l1linf:3.2812e-01 L1_spectral:1.5325e-02 L2_spectral:1.5021e-02 L3_spectral:1.5123e-02 L4_spectral:1.5095e-02 L5_spectral:1.5042e-02 L6_spectral:1.5074e-02 L7_spectral:1.5056e-02 L8_spectral:1.5384e-02 L9_spectral:1.5194e-02 L10_spectral:1.5141e-02 L11_spectral:1.5219e-02 L12_spectral:1.5265e-02 train_time:131793ms step_avg:47.07ms +[2025-09-11 09:17:10] [Rank 0] step:2801/10000 train_time:132959ms step_avg:47.47ms +[2025-09-11 09:17:10] [Rank 0] step:2801/10000 train_time:132959ms step_avg:47.47ms +[2025-09-11 09:17:11] [Rank 0] step:2821/10000 train_time:133607ms step_avg:47.36ms +[2025-09-11 09:17:11] [Rank 0] step:2821/10000 train_time:133607ms step_avg:47.36ms +[2025-09-11 09:17:12] [Rank 0] step:2841/10000 train_time:134268ms step_avg:47.26ms +[2025-09-11 09:17:12] [Rank 0] step:2841/10000 train_time:134268ms step_avg:47.26ms +[2025-09-11 09:17:12] [Rank 0] step:2861/10000 train_time:134929ms step_avg:47.16ms +[2025-09-11 09:17:12] [Rank 0] step:2861/10000 train_time:134929ms step_avg:47.16ms +[2025-09-11 09:17:13] [Rank 0] step:2881/10000 train_time:135588ms step_avg:47.06ms +[2025-09-11 09:17:13] [Rank 0] step:2881/10000 train_time:135588ms step_avg:47.06ms +[2025-09-11 09:17:14] [Rank 0] step:2901/10000 train_time:136247ms step_avg:46.97ms +[2025-09-11 09:17:14] [Rank 0] step:2901/10000 train_time:136247ms step_avg:46.97ms +[2025-09-11 09:17:14] [Rank 0] step:2921/10000 train_time:136907ms step_avg:46.87ms +[2025-09-11 09:17:14] [Rank 0] step:2921/10000 train_time:136907ms step_avg:46.87ms +[2025-09-11 09:17:15] [Rank 0] step:2941/10000 train_time:137566ms step_avg:46.78ms +[2025-09-11 09:17:15] [Rank 0] step:2941/10000 train_time:137566ms step_avg:46.78ms +[2025-09-11 09:17:16] [Rank 0] step:2961/10000 train_time:138225ms step_avg:46.68ms +[2025-09-11 09:17:16] [Rank 0] step:2961/10000 train_time:138225ms step_avg:46.68ms +[2025-09-11 09:17:16] [Rank 0] step:2981/10000 train_time:138887ms step_avg:46.59ms +[2025-09-11 09:17:16] [Rank 0] step:2981/10000 train_time:138887ms step_avg:46.59ms +[2025-09-11 09:17:17] [Rank 0] step:3001/10000 train_time:139549ms step_avg:46.50ms +[2025-09-11 09:17:17] [Rank 0] step:3001/10000 train_time:139549ms step_avg:46.50ms +[2025-09-11 09:17:18] [Rank 0] step:3021/10000 train_time:140212ms step_avg:46.41ms +[2025-09-11 09:17:18] [Rank 0] step:3021/10000 train_time:140212ms step_avg:46.41ms +[2025-09-11 09:17:18] [Rank 0] step:3041/10000 train_time:140874ms step_avg:46.32ms +[2025-09-11 09:17:18] [Rank 0] step:3041/10000 train_time:140874ms step_avg:46.32ms +[2025-09-11 09:17:19] [Rank 0] step:3061/10000 train_time:141537ms step_avg:46.24ms +[2025-09-11 09:17:19] [Rank 0] step:3061/10000 train_time:141537ms step_avg:46.24ms +[2025-09-11 09:17:20] [Rank 0] step:3081/10000 train_time:142199ms step_avg:46.15ms +[2025-09-11 09:17:20] [Rank 0] step:3081/10000 train_time:142199ms step_avg:46.15ms +[2025-09-11 09:17:20] [Rank 0] step:3101/10000 train_time:142861ms step_avg:46.07ms +[2025-09-11 09:17:20] [Rank 0] step:3101/10000 train_time:142861ms step_avg:46.07ms +[2025-09-11 09:17:21] [Rank 0] step:3121/10000 train_time:143524ms step_avg:45.99ms +[2025-09-11 09:17:21] [Rank 0] step:3121/10000 train_time:143524ms step_avg:45.99ms +[2025-09-11 09:17:22] [Rank 0] step:3141/10000 train_time:144186ms step_avg:45.90ms +[2025-09-11 09:17:22] [Rank 0] step:3141/10000 train_time:144186ms step_avg:45.90ms +[2025-09-11 09:17:22] [Rank 0] step:3161/10000 train_time:144848ms step_avg:45.82ms +[2025-09-11 09:17:22] [Rank 0] step:3161/10000 train_time:144848ms step_avg:45.82ms +[2025-09-11 09:17:23] [Rank 0] step:3181/10000 train_time:145510ms step_avg:45.74ms +[2025-09-11 09:17:23] [Rank 0] step:3181/10000 train_time:145510ms step_avg:45.74ms +[2025-09-11 09:17:23] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:17:23] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:17:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:17:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:33] [Rank 0] PRINT: step:3200/10000 val_loss:4.6702 total_sharp:5.0352e-04 L1_sharp:7.3945e-03 L2_sharp:3.1246e-04 L3_sharp:9.4395e-04 L4_sharp:1.2988e-03 L5_sharp:2.5825e-03 L6_sharp:1.8691e-03 L7_sharp:2.0022e-03 L8_sharp:4.0007e-03 L9_sharp:4.2651e-03 L10_sharp:4.8253e-03 L11_sharp:6.0114e-03 L12_sharp:4.4661e-02 total_fnorm:3.9000e+01 total_l1_linf:7.9360e+04 total_spectral:1.9625e+01 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3008e-01 L4_l1linf:3.2422e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2031e-01 L9_l1linf:3.1836e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.2422e-01 L12_l1linf:3.2227e-01 L1_spectral:1.5549e-02 L2_spectral:1.5168e-02 L3_spectral:1.5292e-02 L4_spectral:1.5235e-02 L5_spectral:1.5139e-02 L6_spectral:1.5196e-02 L7_spectral:1.5209e-02 L8_spectral:1.5555e-02 L9_spectral:1.5475e-02 L10_spectral:1.5314e-02 L11_spectral:1.5352e-02 L12_spectral:1.5567e-02 train_time:146154ms step_avg:45.67ms +[2025-09-11 09:17:33] [Rank 0] PRINT: step:3200/10000 val_loss:4.6702 total_sharp:5.0352e-04 L1_sharp:7.3945e-03 L2_sharp:3.1246e-04 L3_sharp:9.4395e-04 L4_sharp:1.2988e-03 L5_sharp:2.5825e-03 L6_sharp:1.8691e-03 L7_sharp:2.0022e-03 L8_sharp:4.0007e-03 L9_sharp:4.2651e-03 L10_sharp:4.8253e-03 L11_sharp:6.0114e-03 L12_sharp:4.4661e-02 total_fnorm:3.9000e+01 total_l1_linf:7.9360e+04 total_spectral:1.9625e+01 L1_fnorm:1.2812e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3008e-01 L4_l1linf:3.2422e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2031e-01 L9_l1linf:3.1836e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.2422e-01 L12_l1linf:3.2227e-01 L1_spectral:1.5549e-02 L2_spectral:1.5168e-02 L3_spectral:1.5292e-02 L4_spectral:1.5235e-02 L5_spectral:1.5139e-02 L6_spectral:1.5196e-02 L7_spectral:1.5209e-02 L8_spectral:1.5555e-02 L9_spectral:1.5475e-02 L10_spectral:1.5314e-02 L11_spectral:1.5352e-02 L12_spectral:1.5567e-02 train_time:146154ms step_avg:45.67ms +[2025-09-11 09:17:35] [Rank 0] step:3201/10000 train_time:147305ms step_avg:46.02ms +[2025-09-11 09:17:35] [Rank 0] step:3201/10000 train_time:147305ms step_avg:46.02ms +[2025-09-11 09:17:35] [Rank 0] step:3221/10000 train_time:147958ms step_avg:45.94ms +[2025-09-11 09:17:35] [Rank 0] step:3221/10000 train_time:147958ms step_avg:45.94ms +[2025-09-11 09:17:36] [Rank 0] step:3241/10000 train_time:148621ms step_avg:45.86ms +[2025-09-11 09:17:36] [Rank 0] step:3241/10000 train_time:148621ms step_avg:45.86ms +[2025-09-11 09:17:37] [Rank 0] step:3261/10000 train_time:149284ms step_avg:45.78ms +[2025-09-11 09:17:37] [Rank 0] step:3261/10000 train_time:149284ms step_avg:45.78ms +[2025-09-11 09:17:37] [Rank 0] step:3281/10000 train_time:149947ms step_avg:45.70ms +[2025-09-11 09:17:37] [Rank 0] step:3281/10000 train_time:149947ms step_avg:45.70ms +[2025-09-11 09:17:38] [Rank 0] step:3301/10000 train_time:150610ms step_avg:45.63ms +[2025-09-11 09:17:38] [Rank 0] step:3301/10000 train_time:150610ms step_avg:45.63ms +[2025-09-11 09:17:39] [Rank 0] step:3321/10000 train_time:151273ms step_avg:45.55ms +[2025-09-11 09:17:39] [Rank 0] step:3321/10000 train_time:151273ms step_avg:45.55ms +[2025-09-11 09:17:39] [Rank 0] step:3341/10000 train_time:151935ms step_avg:45.48ms +[2025-09-11 09:17:39] [Rank 0] step:3341/10000 train_time:151935ms step_avg:45.48ms +[2025-09-11 09:17:40] [Rank 0] step:3361/10000 train_time:152598ms step_avg:45.40ms +[2025-09-11 09:17:40] [Rank 0] step:3361/10000 train_time:152598ms step_avg:45.40ms +[2025-09-11 09:17:40] [Rank 0] step:3381/10000 train_time:153261ms step_avg:45.33ms +[2025-09-11 09:17:40] [Rank 0] step:3381/10000 train_time:153261ms step_avg:45.33ms +[2025-09-11 09:17:41] [Rank 0] step:3401/10000 train_time:153923ms step_avg:45.26ms +[2025-09-11 09:17:41] [Rank 0] step:3401/10000 train_time:153923ms step_avg:45.26ms +[2025-09-11 09:17:42] [Rank 0] step:3421/10000 train_time:154585ms step_avg:45.19ms +[2025-09-11 09:17:42] [Rank 0] step:3421/10000 train_time:154585ms step_avg:45.19ms +[2025-09-11 09:17:43] [Rank 0] step:3441/10000 train_time:155779ms step_avg:45.27ms +[2025-09-11 09:17:43] [Rank 0] step:3441/10000 train_time:155779ms step_avg:45.27ms +[2025-09-11 09:17:44] [Rank 0] step:3461/10000 train_time:156440ms step_avg:45.20ms +[2025-09-11 09:17:44] [Rank 0] step:3461/10000 train_time:156440ms step_avg:45.20ms +[2025-09-11 09:17:44] [Rank 0] step:3481/10000 train_time:157103ms step_avg:45.13ms +[2025-09-11 09:17:44] [Rank 0] step:3481/10000 train_time:157103ms step_avg:45.13ms +[2025-09-11 09:17:45] [Rank 0] step:3501/10000 train_time:158039ms step_avg:45.14ms +[2025-09-11 09:17:45] [Rank 0] step:3501/10000 train_time:158039ms step_avg:45.14ms +[2025-09-11 09:17:46] [Rank 0] step:3521/10000 train_time:158701ms step_avg:45.07ms +[2025-09-11 09:17:46] [Rank 0] step:3521/10000 train_time:158701ms step_avg:45.07ms +[2025-09-11 09:17:47] [Rank 0] step:3541/10000 train_time:159364ms step_avg:45.01ms +[2025-09-11 09:17:47] [Rank 0] step:3541/10000 train_time:159364ms step_avg:45.01ms +[2025-09-11 09:17:47] [Rank 0] step:3561/10000 train_time:160026ms step_avg:44.94ms +[2025-09-11 09:17:47] [Rank 0] step:3561/10000 train_time:160026ms step_avg:44.94ms +[2025-09-11 09:17:48] [Rank 0] step:3581/10000 train_time:160688ms step_avg:44.87ms +[2025-09-11 09:17:48] [Rank 0] step:3581/10000 train_time:160688ms step_avg:44.87ms +[2025-09-11 09:17:49] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:17:49] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:17:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:17:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:17:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:58] [Rank 0] PRINT: step:3600/10000 val_loss:4.6231 total_sharp:6.5506e-04 L1_sharp:9.7143e-03 L2_sharp:1.4245e-03 L3_sharp:7.2201e-04 L4_sharp:1.1670e-03 L5_sharp:1.6035e-03 L6_sharp:2.1568e-03 L7_sharp:2.1201e-03 L8_sharp:4.1561e-03 L9_sharp:3.9026e-03 L10_sharp:4.5264e-03 L11_sharp:5.7115e-03 L12_sharp:2.7417e-02 total_fnorm:3.3750e+01 total_l1_linf:6.6560e+04 total_spectral:1.7000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.2812e-01 L1_spectral:1.5650e-02 L2_spectral:1.5296e-02 L3_spectral:1.5325e-02 L4_spectral:1.5504e-02 L5_spectral:1.5273e-02 L6_spectral:1.5378e-02 L7_spectral:1.5360e-02 L8_spectral:1.5671e-02 L9_spectral:1.5638e-02 L10_spectral:1.5488e-02 L11_spectral:1.5527e-02 L12_spectral:1.5763e-02 train_time:161331ms step_avg:44.81ms +[2025-09-11 09:17:58] [Rank 0] PRINT: step:3600/10000 val_loss:4.6231 total_sharp:6.5506e-04 L1_sharp:9.7143e-03 L2_sharp:1.4245e-03 L3_sharp:7.2201e-04 L4_sharp:1.1670e-03 L5_sharp:1.6035e-03 L6_sharp:2.1568e-03 L7_sharp:2.1201e-03 L8_sharp:4.1561e-03 L9_sharp:3.9026e-03 L10_sharp:4.5264e-03 L11_sharp:5.7115e-03 L12_sharp:2.7417e-02 total_fnorm:3.3750e+01 total_l1_linf:6.6560e+04 total_spectral:1.7000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.2812e-01 L1_spectral:1.5650e-02 L2_spectral:1.5296e-02 L3_spectral:1.5325e-02 L4_spectral:1.5504e-02 L5_spectral:1.5273e-02 L6_spectral:1.5378e-02 L7_spectral:1.5360e-02 L8_spectral:1.5671e-02 L9_spectral:1.5638e-02 L10_spectral:1.5488e-02 L11_spectral:1.5527e-02 L12_spectral:1.5763e-02 train_time:161331ms step_avg:44.81ms +[2025-09-11 09:17:59] [Rank 0] step:3601/10000 train_time:162475ms step_avg:45.12ms +[2025-09-11 09:17:59] [Rank 0] step:3601/10000 train_time:162475ms step_avg:45.12ms +[2025-09-11 09:18:00] [Rank 0] step:3621/10000 train_time:163125ms step_avg:45.05ms +[2025-09-11 09:18:00] [Rank 0] step:3621/10000 train_time:163125ms step_avg:45.05ms +[2025-09-11 09:18:01] [Rank 0] step:3641/10000 train_time:163788ms step_avg:44.98ms +[2025-09-11 09:18:01] [Rank 0] step:3641/10000 train_time:163788ms step_avg:44.98ms +[2025-09-11 09:18:01] [Rank 0] step:3661/10000 train_time:164453ms step_avg:44.92ms +[2025-09-11 09:18:01] [Rank 0] step:3661/10000 train_time:164453ms step_avg:44.92ms +[2025-09-11 09:18:02] [Rank 0] step:3681/10000 train_time:165116ms step_avg:44.86ms +[2025-09-11 09:18:02] [Rank 0] step:3681/10000 train_time:165116ms step_avg:44.86ms +[2025-09-11 09:18:03] [Rank 0] step:3701/10000 train_time:165778ms step_avg:44.79ms +[2025-09-11 09:18:03] [Rank 0] step:3701/10000 train_time:165778ms step_avg:44.79ms +[2025-09-11 09:18:03] [Rank 0] step:3721/10000 train_time:166451ms step_avg:44.73ms +[2025-09-11 09:18:03] [Rank 0] step:3721/10000 train_time:166451ms step_avg:44.73ms +[2025-09-11 09:18:04] [Rank 0] step:3741/10000 train_time:167125ms step_avg:44.67ms +[2025-09-11 09:18:04] [Rank 0] step:3741/10000 train_time:167125ms step_avg:44.67ms +[2025-09-11 09:18:05] [Rank 0] step:3761/10000 train_time:167799ms step_avg:44.62ms +[2025-09-11 09:18:05] [Rank 0] step:3761/10000 train_time:167799ms step_avg:44.62ms +[2025-09-11 09:18:05] [Rank 0] step:3781/10000 train_time:168473ms step_avg:44.56ms +[2025-09-11 09:18:05] [Rank 0] step:3781/10000 train_time:168473ms step_avg:44.56ms +[2025-09-11 09:18:06] [Rank 0] step:3801/10000 train_time:169147ms step_avg:44.50ms +[2025-09-11 09:18:06] [Rank 0] step:3801/10000 train_time:169147ms step_avg:44.50ms +[2025-09-11 09:18:07] [Rank 0] step:3821/10000 train_time:169821ms step_avg:44.44ms +[2025-09-11 09:18:07] [Rank 0] step:3821/10000 train_time:169821ms step_avg:44.44ms +[2025-09-11 09:18:07] [Rank 0] step:3841/10000 train_time:170495ms step_avg:44.39ms +[2025-09-11 09:18:07] [Rank 0] step:3841/10000 train_time:170495ms step_avg:44.39ms +[2025-09-11 09:18:08] [Rank 0] step:3861/10000 train_time:171168ms step_avg:44.33ms +[2025-09-11 09:18:08] [Rank 0] step:3861/10000 train_time:171168ms step_avg:44.33ms +[2025-09-11 09:18:09] [Rank 0] step:3881/10000 train_time:171840ms step_avg:44.28ms +[2025-09-11 09:18:09] [Rank 0] step:3881/10000 train_time:171840ms step_avg:44.28ms +[2025-09-11 09:18:09] [Rank 0] step:3901/10000 train_time:172513ms step_avg:44.22ms +[2025-09-11 09:18:09] [Rank 0] step:3901/10000 train_time:172513ms step_avg:44.22ms +[2025-09-11 09:18:10] [Rank 0] step:3921/10000 train_time:173188ms step_avg:44.17ms +[2025-09-11 09:18:10] [Rank 0] step:3921/10000 train_time:173188ms step_avg:44.17ms +[2025-09-11 09:18:11] [Rank 0] step:3941/10000 train_time:173862ms step_avg:44.12ms +[2025-09-11 09:18:11] [Rank 0] step:3941/10000 train_time:173862ms step_avg:44.12ms +[2025-09-11 09:18:11] [Rank 0] step:3961/10000 train_time:174536ms step_avg:44.06ms +[2025-09-11 09:18:11] [Rank 0] step:3961/10000 train_time:174536ms step_avg:44.06ms +[2025-09-11 09:18:12] [Rank 0] step:3981/10000 train_time:175208ms step_avg:44.01ms +[2025-09-11 09:18:12] [Rank 0] step:3981/10000 train_time:175208ms step_avg:44.01ms +[2025-09-11 09:18:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:18:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:18:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:18:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:18:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.5686 total_sharp:5.6141e-04 L1_sharp:1.1107e-02 L2_sharp:8.6629e-04 L3_sharp:5.3085e-04 L4_sharp:1.0277e-03 L5_sharp:1.1600e-03 L6_sharp:2.0289e-03 L7_sharp:2.0264e-03 L8_sharp:3.9835e-03 L9_sharp:3.6900e-03 L10_sharp:4.7142e-03 L11_sharp:7.0411e-03 L12_sharp:7.2496e-02 total_fnorm:4.0500e+01 total_l1_linf:7.8336e+04 total_spectral:2.0500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1641e-01 L8_l1linf:3.1055e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1641e-01 L12_l1linf:3.0664e-01 L1_spectral:1.5728e-02 L2_spectral:1.5345e-02 L3_spectral:1.5483e-02 L4_spectral:1.5509e-02 L5_spectral:1.5413e-02 L6_spectral:1.5517e-02 L7_spectral:1.5509e-02 L8_spectral:1.5715e-02 L9_spectral:1.5688e-02 L10_spectral:1.5580e-02 L11_spectral:1.5639e-02 L12_spectral:1.5765e-02 train_time:175864ms step_avg:43.97ms +[2025-09-11 09:18:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.5686 total_sharp:5.6141e-04 L1_sharp:1.1107e-02 L2_sharp:8.6629e-04 L3_sharp:5.3085e-04 L4_sharp:1.0277e-03 L5_sharp:1.1600e-03 L6_sharp:2.0289e-03 L7_sharp:2.0264e-03 L8_sharp:3.9835e-03 L9_sharp:3.6900e-03 L10_sharp:4.7142e-03 L11_sharp:7.0411e-03 L12_sharp:7.2496e-02 total_fnorm:4.0500e+01 total_l1_linf:7.8336e+04 total_spectral:2.0500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1641e-01 L8_l1linf:3.1055e-01 L9_l1linf:3.1250e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1641e-01 L12_l1linf:3.0664e-01 L1_spectral:1.5728e-02 L2_spectral:1.5345e-02 L3_spectral:1.5483e-02 L4_spectral:1.5509e-02 L5_spectral:1.5413e-02 L6_spectral:1.5517e-02 L7_spectral:1.5509e-02 L8_spectral:1.5715e-02 L9_spectral:1.5688e-02 L10_spectral:1.5580e-02 L11_spectral:1.5639e-02 L12_spectral:1.5765e-02 train_time:175864ms step_avg:43.97ms +[2025-09-11 09:18:24] [Rank 0] step:4001/10000 train_time:177000ms step_avg:44.24ms +[2025-09-11 09:18:24] [Rank 0] step:4001/10000 train_time:177000ms step_avg:44.24ms +[2025-09-11 09:18:25] [Rank 0] step:4021/10000 train_time:177674ms step_avg:44.19ms +[2025-09-11 09:18:25] [Rank 0] step:4021/10000 train_time:177674ms step_avg:44.19ms +[2025-09-11 09:18:25] [Rank 0] step:4041/10000 train_time:178352ms step_avg:44.14ms +[2025-09-11 09:18:25] [Rank 0] step:4041/10000 train_time:178352ms step_avg:44.14ms +[2025-09-11 09:18:26] [Rank 0] step:4061/10000 train_time:179028ms step_avg:44.08ms +[2025-09-11 09:18:26] [Rank 0] step:4061/10000 train_time:179028ms step_avg:44.08ms +[2025-09-11 09:18:27] [Rank 0] step:4081/10000 train_time:179702ms step_avg:44.03ms +[2025-09-11 09:18:27] [Rank 0] step:4081/10000 train_time:179702ms step_avg:44.03ms +[2025-09-11 09:18:27] [Rank 0] step:4101/10000 train_time:180375ms step_avg:43.98ms +[2025-09-11 09:18:27] [Rank 0] step:4101/10000 train_time:180375ms step_avg:43.98ms +[2025-09-11 09:18:28] [Rank 0] step:4121/10000 train_time:181048ms step_avg:43.93ms +[2025-09-11 09:18:28] [Rank 0] step:4121/10000 train_time:181048ms step_avg:43.93ms +[2025-09-11 09:18:29] [Rank 0] step:4141/10000 train_time:181721ms step_avg:43.88ms +[2025-09-11 09:18:29] [Rank 0] step:4141/10000 train_time:181721ms step_avg:43.88ms +[2025-09-11 09:18:29] [Rank 0] step:4161/10000 train_time:182394ms step_avg:43.83ms +[2025-09-11 09:18:29] [Rank 0] step:4161/10000 train_time:182394ms step_avg:43.83ms +[2025-09-11 09:18:30] [Rank 0] step:4181/10000 train_time:183067ms step_avg:43.79ms +[2025-09-11 09:18:30] [Rank 0] step:4181/10000 train_time:183067ms step_avg:43.79ms +[2025-09-11 09:18:31] [Rank 0] step:4201/10000 train_time:183740ms step_avg:43.74ms +[2025-09-11 09:18:31] [Rank 0] step:4201/10000 train_time:183740ms step_avg:43.74ms +[2025-09-11 09:18:31] [Rank 0] step:4221/10000 train_time:184413ms step_avg:43.69ms +[2025-09-11 09:18:31] [Rank 0] step:4221/10000 train_time:184413ms step_avg:43.69ms +[2025-09-11 09:18:32] [Rank 0] step:4241/10000 train_time:185086ms step_avg:43.64ms +[2025-09-11 09:18:32] [Rank 0] step:4241/10000 train_time:185086ms step_avg:43.64ms +[2025-09-11 09:18:33] [Rank 0] step:4261/10000 train_time:185759ms step_avg:43.60ms +[2025-09-11 09:18:33] [Rank 0] step:4261/10000 train_time:185759ms step_avg:43.60ms +[2025-09-11 09:18:33] [Rank 0] step:4281/10000 train_time:186433ms step_avg:43.55ms +[2025-09-11 09:18:33] [Rank 0] step:4281/10000 train_time:186433ms step_avg:43.55ms +[2025-09-11 09:18:34] [Rank 0] step:4301/10000 train_time:187107ms step_avg:43.50ms +[2025-09-11 09:18:34] [Rank 0] step:4301/10000 train_time:187107ms step_avg:43.50ms +[2025-09-11 09:18:35] [Rank 0] step:4321/10000 train_time:187782ms step_avg:43.46ms +[2025-09-11 09:18:35] [Rank 0] step:4321/10000 train_time:187782ms step_avg:43.46ms +[2025-09-11 09:18:35] [Rank 0] step:4341/10000 train_time:188455ms step_avg:43.41ms +[2025-09-11 09:18:35] [Rank 0] step:4341/10000 train_time:188455ms step_avg:43.41ms +[2025-09-11 09:18:36] [Rank 0] step:4361/10000 train_time:189128ms step_avg:43.37ms +[2025-09-11 09:18:36] [Rank 0] step:4361/10000 train_time:189128ms step_avg:43.37ms +[2025-09-11 09:18:37] [Rank 0] step:4381/10000 train_time:189802ms step_avg:43.32ms +[2025-09-11 09:18:37] [Rank 0] step:4381/10000 train_time:189802ms step_avg:43.32ms +[2025-09-11 09:18:37] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:18:37] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:18:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:18:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:18:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:18:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:48] [Rank 0] PRINT: step:4400/10000 val_loss:4.5425 total_sharp:4.9597e-04 L1_sharp:8.6705e-03 L2_sharp:4.8445e-04 L3_sharp:2.6510e-04 L4_sharp:8.7629e-04 L5_sharp:1.3063e-03 L6_sharp:1.8045e-03 L7_sharp:2.1966e-03 L8_sharp:3.7961e-03 L9_sharp:3.4066e-03 L10_sharp:4.0819e-03 L11_sharp:5.3547e-03 L12_sharp:3.7796e-02 total_fnorm:3.5000e+01 total_l1_linf:6.7072e+04 total_spectral:1.7750e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2031e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0859e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.9883e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.0859e-01 L1_spectral:1.5864e-02 L2_spectral:1.5446e-02 L3_spectral:1.5461e-02 L4_spectral:1.5504e-02 L5_spectral:1.5496e-02 L6_spectral:1.5627e-02 L7_spectral:1.5573e-02 L8_spectral:1.5737e-02 L9_spectral:1.5801e-02 L10_spectral:1.5738e-02 L11_spectral:1.5852e-02 L12_spectral:1.5700e-02 train_time:190455ms step_avg:43.29ms +[2025-09-11 09:18:48] [Rank 0] PRINT: step:4400/10000 val_loss:4.5425 total_sharp:4.9597e-04 L1_sharp:8.6705e-03 L2_sharp:4.8445e-04 L3_sharp:2.6510e-04 L4_sharp:8.7629e-04 L5_sharp:1.3063e-03 L6_sharp:1.8045e-03 L7_sharp:2.1966e-03 L8_sharp:3.7961e-03 L9_sharp:3.4066e-03 L10_sharp:4.0819e-03 L11_sharp:5.3547e-03 L12_sharp:3.7796e-02 total_fnorm:3.5000e+01 total_l1_linf:6.7072e+04 total_spectral:1.7750e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3789e-01 L2_l1linf:3.2031e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1055e-01 L8_l1linf:3.0859e-01 L9_l1linf:3.0664e-01 L10_l1linf:2.9883e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.0859e-01 L1_spectral:1.5864e-02 L2_spectral:1.5446e-02 L3_spectral:1.5461e-02 L4_spectral:1.5504e-02 L5_spectral:1.5496e-02 L6_spectral:1.5627e-02 L7_spectral:1.5573e-02 L8_spectral:1.5737e-02 L9_spectral:1.5801e-02 L10_spectral:1.5738e-02 L11_spectral:1.5852e-02 L12_spectral:1.5700e-02 train_time:190455ms step_avg:43.29ms +[2025-09-11 09:18:49] [Rank 0] step:4401/10000 train_time:191599ms step_avg:43.54ms +[2025-09-11 09:18:49] [Rank 0] step:4401/10000 train_time:191599ms step_avg:43.54ms +[2025-09-11 09:18:49] [Rank 0] step:4421/10000 train_time:192298ms step_avg:43.50ms +[2025-09-11 09:18:49] [Rank 0] step:4421/10000 train_time:192298ms step_avg:43.50ms +[2025-09-11 09:18:50] [Rank 0] step:4441/10000 train_time:192973ms step_avg:43.45ms +[2025-09-11 09:18:50] [Rank 0] step:4441/10000 train_time:192973ms step_avg:43.45ms +[2025-09-11 09:18:51] [Rank 0] step:4461/10000 train_time:193648ms step_avg:43.41ms +[2025-09-11 09:18:51] [Rank 0] step:4461/10000 train_time:193648ms step_avg:43.41ms +[2025-09-11 09:18:51] [Rank 0] step:4481/10000 train_time:194324ms step_avg:43.37ms +[2025-09-11 09:18:51] [Rank 0] step:4481/10000 train_time:194324ms step_avg:43.37ms +[2025-09-11 09:18:52] [Rank 0] step:4501/10000 train_time:195001ms step_avg:43.32ms +[2025-09-11 09:18:52] [Rank 0] step:4501/10000 train_time:195001ms step_avg:43.32ms +[2025-09-11 09:18:53] [Rank 0] step:4521/10000 train_time:195678ms step_avg:43.28ms +[2025-09-11 09:18:53] [Rank 0] step:4521/10000 train_time:195678ms step_avg:43.28ms +[2025-09-11 09:18:53] [Rank 0] step:4541/10000 train_time:196354ms step_avg:43.24ms +[2025-09-11 09:18:53] [Rank 0] step:4541/10000 train_time:196354ms step_avg:43.24ms +[2025-09-11 09:18:54] [Rank 0] step:4561/10000 train_time:197030ms step_avg:43.20ms +[2025-09-11 09:18:54] [Rank 0] step:4561/10000 train_time:197030ms step_avg:43.20ms +[2025-09-11 09:18:55] [Rank 0] step:4581/10000 train_time:197706ms step_avg:43.16ms +[2025-09-11 09:18:55] [Rank 0] step:4581/10000 train_time:197706ms step_avg:43.16ms +[2025-09-11 09:18:56] [Rank 0] step:4601/10000 train_time:198382ms step_avg:43.12ms +[2025-09-11 09:18:56] [Rank 0] step:4601/10000 train_time:198382ms step_avg:43.12ms +[2025-09-11 09:18:56] [Rank 0] step:4621/10000 train_time:199057ms step_avg:43.08ms +[2025-09-11 09:18:56] [Rank 0] step:4621/10000 train_time:199057ms step_avg:43.08ms +[2025-09-11 09:18:57] [Rank 0] step:4641/10000 train_time:199733ms step_avg:43.04ms +[2025-09-11 09:18:57] [Rank 0] step:4641/10000 train_time:199733ms step_avg:43.04ms +[2025-09-11 09:18:58] [Rank 0] step:4661/10000 train_time:200409ms step_avg:43.00ms +[2025-09-11 09:18:58] [Rank 0] step:4661/10000 train_time:200409ms step_avg:43.00ms +[2025-09-11 09:18:58] [Rank 0] step:4681/10000 train_time:201084ms step_avg:42.96ms +[2025-09-11 09:18:58] [Rank 0] step:4681/10000 train_time:201084ms step_avg:42.96ms +[2025-09-11 09:18:59] [Rank 0] step:4701/10000 train_time:201760ms step_avg:42.92ms +[2025-09-11 09:18:59] [Rank 0] step:4701/10000 train_time:201760ms step_avg:42.92ms +[2025-09-11 09:19:00] [Rank 0] step:4721/10000 train_time:202435ms step_avg:42.88ms +[2025-09-11 09:19:00] [Rank 0] step:4721/10000 train_time:202435ms step_avg:42.88ms +[2025-09-11 09:19:00] [Rank 0] step:4741/10000 train_time:203110ms step_avg:42.84ms +[2025-09-11 09:19:00] [Rank 0] step:4741/10000 train_time:203110ms step_avg:42.84ms +[2025-09-11 09:19:01] [Rank 0] step:4761/10000 train_time:203787ms step_avg:42.80ms +[2025-09-11 09:19:01] [Rank 0] step:4761/10000 train_time:203787ms step_avg:42.80ms +[2025-09-11 09:19:02] [Rank 0] step:4781/10000 train_time:204464ms step_avg:42.77ms +[2025-09-11 09:19:02] [Rank 0] step:4781/10000 train_time:204464ms step_avg:42.77ms +[2025-09-11 09:19:02] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:19:02] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:19:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:19:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:19:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:19:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:12] [Rank 0] PRINT: step:4800/10000 val_loss:4.4940 total_sharp:4.7634e-04 L1_sharp:9.4178e-03 L2_sharp:1.1334e-03 L3_sharp:5.9026e-04 L4_sharp:1.5203e-03 L5_sharp:1.8941e-03 L6_sharp:1.9113e-03 L7_sharp:1.7241e-03 L8_sharp:3.2520e-03 L9_sharp:3.5280e-03 L10_sharp:3.9703e-03 L11_sharp:5.4425e-03 L12_sharp:3.5304e-02 total_fnorm:3.6250e+01 total_l1_linf:7.0144e+04 total_spectral:1.8250e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.1055e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.9883e-01 L12_l1linf:3.0469e-01 L1_spectral:1.5979e-02 L2_spectral:1.5679e-02 L3_spectral:1.5675e-02 L4_spectral:1.5673e-02 L5_spectral:1.5677e-02 L6_spectral:1.5724e-02 L7_spectral:1.5683e-02 L8_spectral:1.5892e-02 L9_spectral:1.5809e-02 L10_spectral:1.5839e-02 L11_spectral:1.5992e-02 L12_spectral:1.5931e-02 train_time:205121ms step_avg:42.73ms +[2025-09-11 09:19:12] [Rank 0] PRINT: step:4800/10000 val_loss:4.4940 total_sharp:4.7634e-04 L1_sharp:9.4178e-03 L2_sharp:1.1334e-03 L3_sharp:5.9026e-04 L4_sharp:1.5203e-03 L5_sharp:1.8941e-03 L6_sharp:1.9113e-03 L7_sharp:1.7241e-03 L8_sharp:3.2520e-03 L9_sharp:3.5280e-03 L10_sharp:3.9703e-03 L11_sharp:5.4425e-03 L12_sharp:3.5304e-02 total_fnorm:3.6250e+01 total_l1_linf:7.0144e+04 total_spectral:1.8250e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.2422e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.1055e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.0469e-01 L9_l1linf:3.0078e-01 L10_l1linf:2.9297e-01 L11_l1linf:2.9883e-01 L12_l1linf:3.0469e-01 L1_spectral:1.5979e-02 L2_spectral:1.5679e-02 L3_spectral:1.5675e-02 L4_spectral:1.5673e-02 L5_spectral:1.5677e-02 L6_spectral:1.5724e-02 L7_spectral:1.5683e-02 L8_spectral:1.5892e-02 L9_spectral:1.5809e-02 L10_spectral:1.5839e-02 L11_spectral:1.5992e-02 L12_spectral:1.5931e-02 train_time:205121ms step_avg:42.73ms +[2025-09-11 09:19:13] [Rank 0] step:4801/10000 train_time:206258ms step_avg:42.96ms +[2025-09-11 09:19:13] [Rank 0] step:4801/10000 train_time:206258ms step_avg:42.96ms +[2025-09-11 09:19:14] [Rank 0] step:4821/10000 train_time:206938ms step_avg:42.92ms +[2025-09-11 09:19:14] [Rank 0] step:4821/10000 train_time:206938ms step_avg:42.92ms +[2025-09-11 09:19:15] [Rank 0] step:4841/10000 train_time:207616ms step_avg:42.89ms +[2025-09-11 09:19:15] [Rank 0] step:4841/10000 train_time:207616ms step_avg:42.89ms +[2025-09-11 09:19:15] [Rank 0] step:4861/10000 train_time:208292ms step_avg:42.85ms +[2025-09-11 09:19:15] [Rank 0] step:4861/10000 train_time:208292ms step_avg:42.85ms +[2025-09-11 09:19:16] [Rank 0] step:4881/10000 train_time:208968ms step_avg:42.81ms +[2025-09-11 09:19:16] [Rank 0] step:4881/10000 train_time:208968ms step_avg:42.81ms +[2025-09-11 09:19:17] [Rank 0] step:4901/10000 train_time:209644ms step_avg:42.78ms +[2025-09-11 09:19:17] [Rank 0] step:4901/10000 train_time:209644ms step_avg:42.78ms +[2025-09-11 09:19:17] [Rank 0] step:4921/10000 train_time:210321ms step_avg:42.74ms +[2025-09-11 09:19:17] [Rank 0] step:4921/10000 train_time:210321ms step_avg:42.74ms +[2025-09-11 09:19:18] [Rank 0] step:4941/10000 train_time:210997ms step_avg:42.70ms +[2025-09-11 09:19:18] [Rank 0] step:4941/10000 train_time:210997ms step_avg:42.70ms +[2025-09-11 09:19:19] [Rank 0] step:4961/10000 train_time:211674ms step_avg:42.67ms +[2025-09-11 09:19:19] [Rank 0] step:4961/10000 train_time:211674ms step_avg:42.67ms +[2025-09-11 09:19:19] [Rank 0] step:4981/10000 train_time:212350ms step_avg:42.63ms +[2025-09-11 09:19:19] [Rank 0] step:4981/10000 train_time:212350ms step_avg:42.63ms +[2025-09-11 09:19:20] [Rank 0] step:5001/10000 train_time:213028ms step_avg:42.60ms +[2025-09-11 09:19:20] [Rank 0] step:5001/10000 train_time:213028ms step_avg:42.60ms +[2025-09-11 09:19:21] [Rank 0] step:5021/10000 train_time:213703ms step_avg:42.56ms +[2025-09-11 09:19:21] [Rank 0] step:5021/10000 train_time:213703ms step_avg:42.56ms +[2025-09-11 09:19:22] [Rank 0] step:5041/10000 train_time:214378ms step_avg:42.53ms +[2025-09-11 09:19:22] [Rank 0] step:5041/10000 train_time:214378ms step_avg:42.53ms +[2025-09-11 09:19:22] [Rank 0] step:5061/10000 train_time:215055ms step_avg:42.49ms +[2025-09-11 09:19:22] [Rank 0] step:5061/10000 train_time:215055ms step_avg:42.49ms +[2025-09-11 09:19:23] [Rank 0] step:5081/10000 train_time:215730ms step_avg:42.46ms +[2025-09-11 09:19:23] [Rank 0] step:5081/10000 train_time:215730ms step_avg:42.46ms +[2025-09-11 09:19:24] [Rank 0] step:5101/10000 train_time:216406ms step_avg:42.42ms +[2025-09-11 09:19:24] [Rank 0] step:5101/10000 train_time:216406ms step_avg:42.42ms +[2025-09-11 09:19:24] [Rank 0] step:5121/10000 train_time:217082ms step_avg:42.39ms +[2025-09-11 09:19:24] [Rank 0] step:5121/10000 train_time:217082ms step_avg:42.39ms +[2025-09-11 09:19:25] [Rank 0] step:5141/10000 train_time:217758ms step_avg:42.36ms +[2025-09-11 09:19:25] [Rank 0] step:5141/10000 train_time:217758ms step_avg:42.36ms +[2025-09-11 09:19:26] [Rank 0] step:5161/10000 train_time:218434ms step_avg:42.32ms +[2025-09-11 09:19:26] [Rank 0] step:5161/10000 train_time:218434ms step_avg:42.32ms +[2025-09-11 09:19:26] [Rank 0] step:5181/10000 train_time:219109ms step_avg:42.29ms +[2025-09-11 09:19:26] [Rank 0] step:5181/10000 train_time:219109ms step_avg:42.29ms +[2025-09-11 09:19:27] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:19:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:19:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:19:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:19:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:19:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:37] [Rank 0] PRINT: step:5200/10000 val_loss:4.4582 total_sharp:6.7086e-04 L1_sharp:1.0144e-02 L2_sharp:8.5822e-04 L3_sharp:-1.2967e-04 L4_sharp:1.1636e-03 L5_sharp:1.7557e-03 L6_sharp:1.9245e-03 L7_sharp:2.6754e-03 L8_sharp:3.2888e-03 L9_sharp:3.3170e-03 L10_sharp:5.1028e-03 L11_sharp:6.6331e-03 L12_sharp:5.9986e-02 total_fnorm:3.3500e+01 total_l1_linf:6.2208e+04 total_spectral:1.6875e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.1250e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.9102e-01 L11_l1linf:2.9297e-01 L12_l1linf:3.1055e-01 L1_spectral:1.6096e-02 L2_spectral:1.5604e-02 L3_spectral:1.5780e-02 L4_spectral:1.5808e-02 L5_spectral:1.5765e-02 L6_spectral:1.5741e-02 L7_spectral:1.5754e-02 L8_spectral:1.5982e-02 L9_spectral:1.6048e-02 L10_spectral:1.5956e-02 L11_spectral:1.5963e-02 L12_spectral:1.5912e-02 train_time:219772ms step_avg:42.26ms +[2025-09-11 09:19:37] [Rank 0] PRINT: step:5200/10000 val_loss:4.4582 total_sharp:6.7086e-04 L1_sharp:1.0144e-02 L2_sharp:8.5822e-04 L3_sharp:-1.2967e-04 L4_sharp:1.1636e-03 L5_sharp:1.7557e-03 L6_sharp:1.9245e-03 L7_sharp:2.6754e-03 L8_sharp:3.2888e-03 L9_sharp:3.3170e-03 L10_sharp:5.1028e-03 L11_sharp:6.6331e-03 L12_sharp:5.9986e-02 total_fnorm:3.3500e+01 total_l1_linf:6.2208e+04 total_spectral:1.6875e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2617e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.1250e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.0469e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.9102e-01 L11_l1linf:2.9297e-01 L12_l1linf:3.1055e-01 L1_spectral:1.6096e-02 L2_spectral:1.5604e-02 L3_spectral:1.5780e-02 L4_spectral:1.5808e-02 L5_spectral:1.5765e-02 L6_spectral:1.5741e-02 L7_spectral:1.5754e-02 L8_spectral:1.5982e-02 L9_spectral:1.6048e-02 L10_spectral:1.5956e-02 L11_spectral:1.5963e-02 L12_spectral:1.5912e-02 train_time:219772ms step_avg:42.26ms +[2025-09-11 09:19:39] [Rank 0] step:5201/10000 train_time:221778ms step_avg:42.64ms +[2025-09-11 09:19:39] [Rank 0] step:5201/10000 train_time:221778ms step_avg:42.64ms +[2025-09-11 09:19:40] [Rank 0] step:5221/10000 train_time:222470ms step_avg:42.61ms +[2025-09-11 09:19:40] [Rank 0] step:5221/10000 train_time:222470ms step_avg:42.61ms +[2025-09-11 09:19:40] [Rank 0] step:5241/10000 train_time:223156ms step_avg:42.58ms +[2025-09-11 09:19:40] [Rank 0] step:5241/10000 train_time:223156ms step_avg:42.58ms +[2025-09-11 09:19:41] [Rank 0] step:5261/10000 train_time:223842ms step_avg:42.55ms +[2025-09-11 09:19:41] [Rank 0] step:5261/10000 train_time:223842ms step_avg:42.55ms +[2025-09-11 09:19:42] [Rank 0] step:5281/10000 train_time:224529ms step_avg:42.52ms +[2025-09-11 09:19:42] [Rank 0] step:5281/10000 train_time:224529ms step_avg:42.52ms +[2025-09-11 09:19:43] [Rank 0] step:5301/10000 train_time:225214ms step_avg:42.49ms +[2025-09-11 09:19:43] [Rank 0] step:5301/10000 train_time:225214ms step_avg:42.49ms +[2025-09-11 09:19:43] [Rank 0] step:5321/10000 train_time:225900ms step_avg:42.45ms +[2025-09-11 09:19:43] [Rank 0] step:5321/10000 train_time:225900ms step_avg:42.45ms +[2025-09-11 09:19:44] [Rank 0] step:5341/10000 train_time:226585ms step_avg:42.42ms +[2025-09-11 09:19:44] [Rank 0] step:5341/10000 train_time:226585ms step_avg:42.42ms +[2025-09-11 09:19:45] [Rank 0] step:5361/10000 train_time:227271ms step_avg:42.39ms +[2025-09-11 09:19:45] [Rank 0] step:5361/10000 train_time:227271ms step_avg:42.39ms +[2025-09-11 09:19:45] [Rank 0] step:5381/10000 train_time:227957ms step_avg:42.36ms +[2025-09-11 09:19:45] [Rank 0] step:5381/10000 train_time:227957ms step_avg:42.36ms +[2025-09-11 09:19:46] [Rank 0] step:5401/10000 train_time:228642ms step_avg:42.33ms +[2025-09-11 09:19:46] [Rank 0] step:5401/10000 train_time:228642ms step_avg:42.33ms +[2025-09-11 09:19:47] [Rank 0] step:5421/10000 train_time:229329ms step_avg:42.30ms +[2025-09-11 09:19:47] [Rank 0] step:5421/10000 train_time:229329ms step_avg:42.30ms +[2025-09-11 09:19:47] [Rank 0] step:5441/10000 train_time:230014ms step_avg:42.27ms +[2025-09-11 09:19:47] [Rank 0] step:5441/10000 train_time:230014ms step_avg:42.27ms +[2025-09-11 09:19:48] [Rank 0] step:5461/10000 train_time:230701ms step_avg:42.25ms +[2025-09-11 09:19:48] [Rank 0] step:5461/10000 train_time:230701ms step_avg:42.25ms +[2025-09-11 09:19:49] [Rank 0] step:5481/10000 train_time:231828ms step_avg:42.30ms +[2025-09-11 09:19:49] [Rank 0] step:5481/10000 train_time:231828ms step_avg:42.30ms +[2025-09-11 09:19:50] [Rank 0] step:5501/10000 train_time:232643ms step_avg:42.29ms +[2025-09-11 09:19:50] [Rank 0] step:5501/10000 train_time:232643ms step_avg:42.29ms +[2025-09-11 09:19:51] [Rank 0] step:5521/10000 train_time:233328ms step_avg:42.26ms +[2025-09-11 09:19:51] [Rank 0] step:5521/10000 train_time:233328ms step_avg:42.26ms +[2025-09-11 09:19:52] [Rank 0] step:5541/10000 train_time:234314ms step_avg:42.29ms +[2025-09-11 09:19:52] [Rank 0] step:5541/10000 train_time:234314ms step_avg:42.29ms +[2025-09-11 09:19:52] [Rank 0] step:5561/10000 train_time:235002ms step_avg:42.26ms +[2025-09-11 09:19:52] [Rank 0] step:5561/10000 train_time:235002ms step_avg:42.26ms +[2025-09-11 09:19:53] [Rank 0] step:5581/10000 train_time:235689ms step_avg:42.23ms +[2025-09-11 09:19:53] [Rank 0] step:5581/10000 train_time:235689ms step_avg:42.23ms +[2025-09-11 09:19:54] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:20:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:20:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:20:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:20:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:20:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:20:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:20:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:20:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:04] [Rank 0] PRINT: step:5600/10000 val_loss:4.4401 total_sharp:4.6528e-04 L1_sharp:8.3324e-03 L2_sharp:8.7182e-04 L3_sharp:2.6840e-04 L4_sharp:1.1065e-03 L5_sharp:1.1618e-03 L6_sharp:1.6329e-03 L7_sharp:1.0874e-03 L8_sharp:2.6542e-03 L9_sharp:2.8444e-03 L10_sharp:3.2501e-03 L11_sharp:5.2185e-03 L12_sharp:4.3429e-02 total_fnorm:3.3750e+01 total_l1_linf:6.3744e+04 total_spectral:1.7000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.8711e-01 L12_l1linf:3.0664e-01 L1_spectral:1.6112e-02 L2_spectral:1.5584e-02 L3_spectral:1.5766e-02 L4_spectral:1.5895e-02 L5_spectral:1.5743e-02 L6_spectral:1.5907e-02 L7_spectral:1.5845e-02 L8_spectral:1.5843e-02 L9_spectral:1.6048e-02 L10_spectral:1.6013e-02 L11_spectral:1.6061e-02 L12_spectral:1.6010e-02 train_time:236354ms step_avg:42.21ms +[2025-09-11 09:20:04] [Rank 0] PRINT: step:5600/10000 val_loss:4.4401 total_sharp:4.6528e-04 L1_sharp:8.3324e-03 L2_sharp:8.7182e-04 L3_sharp:2.6840e-04 L4_sharp:1.1065e-03 L5_sharp:1.1618e-03 L6_sharp:1.6329e-03 L7_sharp:1.0874e-03 L8_sharp:2.6542e-03 L9_sharp:2.8444e-03 L10_sharp:3.2501e-03 L11_sharp:5.2185e-03 L12_sharp:4.3429e-02 total_fnorm:3.3750e+01 total_l1_linf:6.3744e+04 total_spectral:1.7000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.0859e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9688e-01 L10_l1linf:2.8906e-01 L11_l1linf:2.8711e-01 L12_l1linf:3.0664e-01 L1_spectral:1.6112e-02 L2_spectral:1.5584e-02 L3_spectral:1.5766e-02 L4_spectral:1.5895e-02 L5_spectral:1.5743e-02 L6_spectral:1.5907e-02 L7_spectral:1.5845e-02 L8_spectral:1.5843e-02 L9_spectral:1.6048e-02 L10_spectral:1.6013e-02 L11_spectral:1.6061e-02 L12_spectral:1.6010e-02 train_time:236354ms step_avg:42.21ms +[2025-09-11 09:20:05] [Rank 0] step:5601/10000 train_time:237500ms step_avg:42.40ms +[2025-09-11 09:20:05] [Rank 0] step:5601/10000 train_time:237500ms step_avg:42.40ms +[2025-09-11 09:20:05] [Rank 0] step:5621/10000 train_time:238199ms step_avg:42.38ms +[2025-09-11 09:20:05] [Rank 0] step:5621/10000 train_time:238199ms step_avg:42.38ms +[2025-09-11 09:20:06] [Rank 0] step:5641/10000 train_time:238884ms step_avg:42.35ms +[2025-09-11 09:20:06] [Rank 0] step:5641/10000 train_time:238884ms step_avg:42.35ms +[2025-09-11 09:20:07] [Rank 0] step:5661/10000 train_time:239570ms step_avg:42.32ms +[2025-09-11 09:20:07] [Rank 0] step:5661/10000 train_time:239570ms step_avg:42.32ms +[2025-09-11 09:20:07] [Rank 0] step:5681/10000 train_time:240256ms step_avg:42.29ms +[2025-09-11 09:20:07] [Rank 0] step:5681/10000 train_time:240256ms step_avg:42.29ms +[2025-09-11 09:20:08] [Rank 0] step:5701/10000 train_time:240944ms step_avg:42.26ms +[2025-09-11 09:20:08] [Rank 0] step:5701/10000 train_time:240944ms step_avg:42.26ms +[2025-09-11 09:20:09] [Rank 0] step:5721/10000 train_time:241628ms step_avg:42.24ms +[2025-09-11 09:20:09] [Rank 0] step:5721/10000 train_time:241628ms step_avg:42.24ms +[2025-09-11 09:20:09] [Rank 0] step:5741/10000 train_time:242315ms step_avg:42.21ms +[2025-09-11 09:20:09] [Rank 0] step:5741/10000 train_time:242315ms step_avg:42.21ms +[2025-09-11 09:20:10] [Rank 0] step:5761/10000 train_time:243002ms step_avg:42.18ms +[2025-09-11 09:20:10] [Rank 0] step:5761/10000 train_time:243002ms step_avg:42.18ms +[2025-09-11 09:20:11] [Rank 0] step:5781/10000 train_time:243688ms step_avg:42.15ms +[2025-09-11 09:20:11] [Rank 0] step:5781/10000 train_time:243688ms step_avg:42.15ms +[2025-09-11 09:20:12] [Rank 0] step:5801/10000 train_time:244376ms step_avg:42.13ms +[2025-09-11 09:20:12] [Rank 0] step:5801/10000 train_time:244376ms step_avg:42.13ms +[2025-09-11 09:20:12] [Rank 0] step:5821/10000 train_time:245062ms step_avg:42.10ms +[2025-09-11 09:20:12] [Rank 0] step:5821/10000 train_time:245062ms step_avg:42.10ms +[2025-09-11 09:20:13] [Rank 0] step:5841/10000 train_time:245748ms step_avg:42.07ms +[2025-09-11 09:20:13] [Rank 0] step:5841/10000 train_time:245748ms step_avg:42.07ms +[2025-09-11 09:20:14] [Rank 0] step:5861/10000 train_time:246432ms step_avg:42.05ms +[2025-09-11 09:20:14] [Rank 0] step:5861/10000 train_time:246432ms step_avg:42.05ms +[2025-09-11 09:20:14] [Rank 0] step:5881/10000 train_time:247119ms step_avg:42.02ms +[2025-09-11 09:20:14] [Rank 0] step:5881/10000 train_time:247119ms step_avg:42.02ms +[2025-09-11 09:20:15] [Rank 0] step:5901/10000 train_time:247804ms step_avg:41.99ms +[2025-09-11 09:20:15] [Rank 0] step:5901/10000 train_time:247804ms step_avg:41.99ms +[2025-09-11 09:20:16] [Rank 0] step:5921/10000 train_time:248492ms step_avg:41.97ms +[2025-09-11 09:20:16] [Rank 0] step:5921/10000 train_time:248492ms step_avg:41.97ms +[2025-09-11 09:20:16] [Rank 0] step:5941/10000 train_time:249179ms step_avg:41.94ms +[2025-09-11 09:20:16] [Rank 0] step:5941/10000 train_time:249179ms step_avg:41.94ms +[2025-09-11 09:20:17] [Rank 0] step:5961/10000 train_time:249866ms step_avg:41.92ms +[2025-09-11 09:20:17] [Rank 0] step:5961/10000 train_time:249866ms step_avg:41.92ms +[2025-09-11 09:20:18] [Rank 0] step:5981/10000 train_time:250553ms step_avg:41.89ms +[2025-09-11 09:20:18] [Rank 0] step:5981/10000 train_time:250553ms step_avg:41.89ms +[2025-09-11 09:20:18] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:20:18] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:20:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:20:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:20:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:28] [Rank 0] PRINT: step:6000/10000 val_loss:4.3952 total_sharp:3.5890e-04 L1_sharp:8.2409e-03 L2_sharp:1.0194e-03 L3_sharp:4.0807e-04 L4_sharp:7.1161e-04 L5_sharp:1.3088e-03 L6_sharp:1.1616e-03 L7_sharp:9.5728e-04 L8_sharp:2.1254e-03 L9_sharp:2.7572e-03 L10_sharp:3.3258e-03 L11_sharp:4.6723e-03 L12_sharp:1.5989e-02 total_fnorm:3.4500e+01 total_l1_linf:6.4000e+04 total_spectral:1.7375e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1055e-01 L3_l1linf:3.0469e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.1250e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9297e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.8516e-01 L12_l1linf:3.0859e-01 L1_spectral:1.6302e-02 L2_spectral:1.5741e-02 L3_spectral:1.5835e-02 L4_spectral:1.5949e-02 L5_spectral:1.5892e-02 L6_spectral:1.5915e-02 L7_spectral:1.5962e-02 L8_spectral:1.6001e-02 L9_spectral:1.6066e-02 L10_spectral:1.6065e-02 L11_spectral:1.6139e-02 L12_spectral:1.6096e-02 train_time:251223ms step_avg:41.87ms +[2025-09-11 09:20:28] [Rank 0] PRINT: step:6000/10000 val_loss:4.3952 total_sharp:3.5890e-04 L1_sharp:8.2409e-03 L2_sharp:1.0194e-03 L3_sharp:4.0807e-04 L4_sharp:7.1161e-04 L5_sharp:1.3088e-03 L6_sharp:1.1616e-03 L7_sharp:9.5728e-04 L8_sharp:2.1254e-03 L9_sharp:2.7572e-03 L10_sharp:3.3258e-03 L11_sharp:4.6723e-03 L12_sharp:1.5989e-02 total_fnorm:3.4500e+01 total_l1_linf:6.4000e+04 total_spectral:1.7375e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2109e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1055e-01 L3_l1linf:3.0469e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0664e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.1250e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.9297e-01 L10_l1linf:2.8711e-01 L11_l1linf:2.8516e-01 L12_l1linf:3.0859e-01 L1_spectral:1.6302e-02 L2_spectral:1.5741e-02 L3_spectral:1.5835e-02 L4_spectral:1.5949e-02 L5_spectral:1.5892e-02 L6_spectral:1.5915e-02 L7_spectral:1.5962e-02 L8_spectral:1.6001e-02 L9_spectral:1.6066e-02 L10_spectral:1.6065e-02 L11_spectral:1.6139e-02 L12_spectral:1.6096e-02 train_time:251223ms step_avg:41.87ms +[2025-09-11 09:20:30] [Rank 0] step:6001/10000 train_time:252387ms step_avg:42.06ms +[2025-09-11 09:20:30] [Rank 0] step:6001/10000 train_time:252387ms step_avg:42.06ms +[2025-09-11 09:20:30] [Rank 0] step:6021/10000 train_time:253066ms step_avg:42.03ms +[2025-09-11 09:20:30] [Rank 0] step:6021/10000 train_time:253066ms step_avg:42.03ms +[2025-09-11 09:20:31] [Rank 0] step:6041/10000 train_time:253757ms step_avg:42.01ms +[2025-09-11 09:20:31] [Rank 0] step:6041/10000 train_time:253757ms step_avg:42.01ms +[2025-09-11 09:20:32] [Rank 0] step:6061/10000 train_time:254444ms step_avg:41.98ms +[2025-09-11 09:20:32] [Rank 0] step:6061/10000 train_time:254444ms step_avg:41.98ms +[2025-09-11 09:20:32] [Rank 0] step:6081/10000 train_time:255134ms step_avg:41.96ms +[2025-09-11 09:20:32] [Rank 0] step:6081/10000 train_time:255134ms step_avg:41.96ms +[2025-09-11 09:20:33] [Rank 0] step:6101/10000 train_time:255821ms step_avg:41.93ms +[2025-09-11 09:20:33] [Rank 0] step:6101/10000 train_time:255821ms step_avg:41.93ms +[2025-09-11 09:20:34] [Rank 0] step:6121/10000 train_time:256510ms step_avg:41.91ms +[2025-09-11 09:20:34] [Rank 0] step:6121/10000 train_time:256510ms step_avg:41.91ms +[2025-09-11 09:20:34] [Rank 0] step:6141/10000 train_time:257197ms step_avg:41.88ms +[2025-09-11 09:20:34] [Rank 0] step:6141/10000 train_time:257197ms step_avg:41.88ms +[2025-09-11 09:20:35] [Rank 0] step:6161/10000 train_time:257885ms step_avg:41.86ms +[2025-09-11 09:20:35] [Rank 0] step:6161/10000 train_time:257885ms step_avg:41.86ms +[2025-09-11 09:20:36] [Rank 0] step:6181/10000 train_time:258570ms step_avg:41.83ms +[2025-09-11 09:20:36] [Rank 0] step:6181/10000 train_time:258570ms step_avg:41.83ms +[2025-09-11 09:20:36] [Rank 0] step:6201/10000 train_time:259258ms step_avg:41.81ms +[2025-09-11 09:20:36] [Rank 0] step:6201/10000 train_time:259258ms step_avg:41.81ms +[2025-09-11 09:20:37] [Rank 0] step:6221/10000 train_time:259946ms step_avg:41.79ms +[2025-09-11 09:20:37] [Rank 0] step:6221/10000 train_time:259946ms step_avg:41.79ms +[2025-09-11 09:20:38] [Rank 0] step:6241/10000 train_time:260634ms step_avg:41.76ms +[2025-09-11 09:20:38] [Rank 0] step:6241/10000 train_time:260634ms step_avg:41.76ms +[2025-09-11 09:20:38] [Rank 0] step:6261/10000 train_time:261321ms step_avg:41.74ms +[2025-09-11 09:20:38] [Rank 0] step:6261/10000 train_time:261321ms step_avg:41.74ms +[2025-09-11 09:20:39] [Rank 0] step:6281/10000 train_time:262008ms step_avg:41.71ms +[2025-09-11 09:20:39] [Rank 0] step:6281/10000 train_time:262008ms step_avg:41.71ms +[2025-09-11 09:20:40] [Rank 0] step:6301/10000 train_time:262695ms step_avg:41.69ms +[2025-09-11 09:20:40] [Rank 0] step:6301/10000 train_time:262695ms step_avg:41.69ms +[2025-09-11 09:20:41] [Rank 0] step:6321/10000 train_time:263386ms step_avg:41.67ms +[2025-09-11 09:20:41] [Rank 0] step:6321/10000 train_time:263386ms step_avg:41.67ms +[2025-09-11 09:20:41] [Rank 0] step:6341/10000 train_time:264081ms step_avg:41.65ms +[2025-09-11 09:20:41] [Rank 0] step:6341/10000 train_time:264081ms step_avg:41.65ms +[2025-09-11 09:20:42] [Rank 0] step:6361/10000 train_time:264770ms step_avg:41.62ms +[2025-09-11 09:20:42] [Rank 0] step:6361/10000 train_time:264770ms step_avg:41.62ms +[2025-09-11 09:20:43] [Rank 0] step:6381/10000 train_time:265458ms step_avg:41.60ms +[2025-09-11 09:20:43] [Rank 0] step:6381/10000 train_time:265458ms step_avg:41.60ms +[2025-09-11 09:20:43] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:20:43] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:20:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:20:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:20:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:20:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:53] [Rank 0] PRINT: step:6400/10000 val_loss:4.3650 total_sharp:5.1648e-04 L1_sharp:8.8624e-03 L2_sharp:1.1133e-03 L3_sharp:3.2466e-04 L4_sharp:7.7667e-04 L5_sharp:1.1282e-03 L6_sharp:1.0186e-03 L7_sharp:1.4054e-03 L8_sharp:2.6741e-03 L9_sharp:3.0531e-03 L10_sharp:3.5621e-03 L11_sharp:4.9791e-03 L12_sharp:2.8230e-02 total_fnorm:2.9375e+01 total_l1_linf:5.2224e+04 total_spectral:1.4812e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.0859e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1172e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1172e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1328e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.6953e-01 L3_l1linf:2.6172e-01 L4_l1linf:2.6367e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.6562e-01 L7_l1linf:2.6758e-01 L8_l1linf:2.5781e-01 L9_l1linf:2.4902e-01 L10_l1linf:2.4512e-01 L11_l1linf:2.4512e-01 L12_l1linf:2.6367e-01 L1_spectral:1.4831e-02 L2_spectral:1.4335e-02 L3_spectral:1.4364e-02 L4_spectral:1.4604e-02 L5_spectral:1.4487e-02 L6_spectral:1.4617e-02 L7_spectral:1.4671e-02 L8_spectral:1.4548e-02 L9_spectral:1.4732e-02 L10_spectral:1.4685e-02 L11_spectral:1.4767e-02 L12_spectral:1.4637e-02 train_time:266125ms step_avg:41.58ms +[2025-09-11 09:20:53] [Rank 0] PRINT: step:6400/10000 val_loss:4.3650 total_sharp:5.1648e-04 L1_sharp:8.8624e-03 L2_sharp:1.1133e-03 L3_sharp:3.2466e-04 L4_sharp:7.7667e-04 L5_sharp:1.1282e-03 L6_sharp:1.0186e-03 L7_sharp:1.4054e-03 L8_sharp:2.6741e-03 L9_sharp:3.0531e-03 L10_sharp:3.5621e-03 L11_sharp:4.9791e-03 L12_sharp:2.8230e-02 total_fnorm:2.9375e+01 total_l1_linf:5.2224e+04 total_spectral:1.4812e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.0859e+00 L3_fnorm:1.1094e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1172e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1172e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1328e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.6953e-01 L3_l1linf:2.6172e-01 L4_l1linf:2.6367e-01 L5_l1linf:2.6367e-01 L6_l1linf:2.6562e-01 L7_l1linf:2.6758e-01 L8_l1linf:2.5781e-01 L9_l1linf:2.4902e-01 L10_l1linf:2.4512e-01 L11_l1linf:2.4512e-01 L12_l1linf:2.6367e-01 L1_spectral:1.4831e-02 L2_spectral:1.4335e-02 L3_spectral:1.4364e-02 L4_spectral:1.4604e-02 L5_spectral:1.4487e-02 L6_spectral:1.4617e-02 L7_spectral:1.4671e-02 L8_spectral:1.4548e-02 L9_spectral:1.4732e-02 L10_spectral:1.4685e-02 L11_spectral:1.4767e-02 L12_spectral:1.4637e-02 train_time:266125ms step_avg:41.58ms +[2025-09-11 09:20:55] [Rank 0] step:6401/10000 train_time:267300ms step_avg:41.76ms +[2025-09-11 09:20:55] [Rank 0] step:6401/10000 train_time:267300ms step_avg:41.76ms +[2025-09-11 09:20:55] [Rank 0] step:6421/10000 train_time:268017ms step_avg:41.74ms +[2025-09-11 09:20:55] [Rank 0] step:6421/10000 train_time:268017ms step_avg:41.74ms +[2025-09-11 09:20:56] [Rank 0] step:6441/10000 train_time:268705ms step_avg:41.72ms +[2025-09-11 09:20:56] [Rank 0] step:6441/10000 train_time:268705ms step_avg:41.72ms +[2025-09-11 09:20:57] [Rank 0] step:6461/10000 train_time:269394ms step_avg:41.70ms +[2025-09-11 09:20:57] [Rank 0] step:6461/10000 train_time:269394ms step_avg:41.70ms +[2025-09-11 09:20:57] [Rank 0] step:6481/10000 train_time:270085ms step_avg:41.67ms +[2025-09-11 09:20:57] [Rank 0] step:6481/10000 train_time:270085ms step_avg:41.67ms +[2025-09-11 09:20:58] [Rank 0] step:6501/10000 train_time:270776ms step_avg:41.65ms +[2025-09-11 09:20:58] [Rank 0] step:6501/10000 train_time:270776ms step_avg:41.65ms +[2025-09-11 09:20:59] [Rank 0] step:6521/10000 train_time:271465ms step_avg:41.63ms +[2025-09-11 09:20:59] [Rank 0] step:6521/10000 train_time:271465ms step_avg:41.63ms +[2025-09-11 09:20:59] [Rank 0] step:6541/10000 train_time:272151ms step_avg:41.61ms +[2025-09-11 09:20:59] [Rank 0] step:6541/10000 train_time:272151ms step_avg:41.61ms +[2025-09-11 09:21:00] [Rank 0] step:6561/10000 train_time:272841ms step_avg:41.59ms +[2025-09-11 09:21:00] [Rank 0] step:6561/10000 train_time:272841ms step_avg:41.59ms +[2025-09-11 09:21:01] [Rank 0] step:6581/10000 train_time:273531ms step_avg:41.56ms +[2025-09-11 09:21:01] [Rank 0] step:6581/10000 train_time:273531ms step_avg:41.56ms +[2025-09-11 09:21:01] [Rank 0] step:6601/10000 train_time:274219ms step_avg:41.54ms +[2025-09-11 09:21:01] [Rank 0] step:6601/10000 train_time:274219ms step_avg:41.54ms +[2025-09-11 09:21:02] [Rank 0] step:6621/10000 train_time:274910ms step_avg:41.52ms +[2025-09-11 09:21:02] [Rank 0] step:6621/10000 train_time:274910ms step_avg:41.52ms +[2025-09-11 09:21:03] [Rank 0] step:6641/10000 train_time:275600ms step_avg:41.50ms +[2025-09-11 09:21:03] [Rank 0] step:6641/10000 train_time:275600ms step_avg:41.50ms +[2025-09-11 09:21:04] [Rank 0] step:6661/10000 train_time:276289ms step_avg:41.48ms +[2025-09-11 09:21:04] [Rank 0] step:6661/10000 train_time:276289ms step_avg:41.48ms +[2025-09-11 09:21:04] [Rank 0] step:6681/10000 train_time:276986ms step_avg:41.46ms +[2025-09-11 09:21:04] [Rank 0] step:6681/10000 train_time:276986ms step_avg:41.46ms +[2025-09-11 09:21:05] [Rank 0] step:6701/10000 train_time:277681ms step_avg:41.44ms +[2025-09-11 09:21:05] [Rank 0] step:6701/10000 train_time:277681ms step_avg:41.44ms +[2025-09-11 09:21:06] [Rank 0] step:6721/10000 train_time:278377ms step_avg:41.42ms +[2025-09-11 09:21:06] [Rank 0] step:6721/10000 train_time:278377ms step_avg:41.42ms +[2025-09-11 09:21:06] [Rank 0] step:6741/10000 train_time:279073ms step_avg:41.40ms +[2025-09-11 09:21:06] [Rank 0] step:6741/10000 train_time:279073ms step_avg:41.40ms +[2025-09-11 09:21:07] [Rank 0] step:6761/10000 train_time:279775ms step_avg:41.38ms +[2025-09-11 09:21:07] [Rank 0] step:6761/10000 train_time:279775ms step_avg:41.38ms +[2025-09-11 09:21:08] [Rank 0] step:6781/10000 train_time:280471ms step_avg:41.36ms +[2025-09-11 09:21:08] [Rank 0] step:6781/10000 train_time:280471ms step_avg:41.36ms +[2025-09-11 09:21:08] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:18] [Rank 0] PRINT: step:6800/10000 val_loss:4.3300 total_sharp:3.9146e-04 L1_sharp:4.0813e-03 L2_sharp:7.8421e-04 L3_sharp:3.7516e-04 L4_sharp:3.5709e-04 L5_sharp:9.8170e-04 L6_sharp:1.6295e-03 L7_sharp:1.3337e-03 L8_sharp:2.3650e-03 L9_sharp:2.7092e-03 L10_sharp:3.7169e-03 L11_sharp:4.8185e-03 L12_sharp:3.9079e-02 total_fnorm:2.8250e+01 total_l1_linf:4.9152e+04 total_spectral:1.4250e+01 L1_fnorm:1.0000e+00 L2_fnorm:9.5703e-01 L3_fnorm:9.8047e-01 L4_fnorm:9.9609e-01 L5_fnorm:9.8828e-01 L6_fnorm:9.9609e-01 L7_fnorm:1.0000e+00 L8_fnorm:9.8047e-01 L9_fnorm:9.9609e-01 L10_fnorm:9.9609e-01 L11_fnorm:1.0000e+00 L12_fnorm:9.9609e-01 L1_l1linf:2.3730e-01 L2_l1linf:2.3535e-01 L3_l1linf:2.2461e-01 L4_l1linf:2.2754e-01 L5_l1linf:2.3145e-01 L6_l1linf:2.3047e-01 L7_l1linf:2.3047e-01 L8_l1linf:2.2070e-01 L9_l1linf:2.1484e-01 L10_l1linf:2.0801e-01 L11_l1linf:2.0215e-01 L12_l1linf:2.2852e-01 L1_spectral:1.3360e-02 L2_spectral:1.2806e-02 L3_spectral:1.3080e-02 L4_spectral:1.3112e-02 L5_spectral:1.3036e-02 L6_spectral:1.3227e-02 L7_spectral:1.3140e-02 L8_spectral:1.3073e-02 L9_spectral:1.3232e-02 L10_spectral:1.3266e-02 L11_spectral:1.3274e-02 L12_spectral:1.3110e-02 train_time:281146ms step_avg:41.34ms +[2025-09-11 09:21:18] [Rank 0] PRINT: step:6800/10000 val_loss:4.3300 total_sharp:3.9146e-04 L1_sharp:4.0813e-03 L2_sharp:7.8421e-04 L3_sharp:3.7516e-04 L4_sharp:3.5709e-04 L5_sharp:9.8170e-04 L6_sharp:1.6295e-03 L7_sharp:1.3337e-03 L8_sharp:2.3650e-03 L9_sharp:2.7092e-03 L10_sharp:3.7169e-03 L11_sharp:4.8185e-03 L12_sharp:3.9079e-02 total_fnorm:2.8250e+01 total_l1_linf:4.9152e+04 total_spectral:1.4250e+01 L1_fnorm:1.0000e+00 L2_fnorm:9.5703e-01 L3_fnorm:9.8047e-01 L4_fnorm:9.9609e-01 L5_fnorm:9.8828e-01 L6_fnorm:9.9609e-01 L7_fnorm:1.0000e+00 L8_fnorm:9.8047e-01 L9_fnorm:9.9609e-01 L10_fnorm:9.9609e-01 L11_fnorm:1.0000e+00 L12_fnorm:9.9609e-01 L1_l1linf:2.3730e-01 L2_l1linf:2.3535e-01 L3_l1linf:2.2461e-01 L4_l1linf:2.2754e-01 L5_l1linf:2.3145e-01 L6_l1linf:2.3047e-01 L7_l1linf:2.3047e-01 L8_l1linf:2.2070e-01 L9_l1linf:2.1484e-01 L10_l1linf:2.0801e-01 L11_l1linf:2.0215e-01 L12_l1linf:2.2852e-01 L1_spectral:1.3360e-02 L2_spectral:1.2806e-02 L3_spectral:1.3080e-02 L4_spectral:1.3112e-02 L5_spectral:1.3036e-02 L6_spectral:1.3227e-02 L7_spectral:1.3140e-02 L8_spectral:1.3073e-02 L9_spectral:1.3232e-02 L10_spectral:1.3266e-02 L11_spectral:1.3274e-02 L12_spectral:1.3110e-02 train_time:281146ms step_avg:41.34ms +[2025-09-11 09:21:20] [Rank 0] step:6801/10000 train_time:282321ms step_avg:41.51ms +[2025-09-11 09:21:20] [Rank 0] step:6801/10000 train_time:282321ms step_avg:41.51ms +[2025-09-11 09:21:20] [Rank 0] step:6821/10000 train_time:283034ms step_avg:41.49ms +[2025-09-11 09:21:20] [Rank 0] step:6821/10000 train_time:283034ms step_avg:41.49ms +[2025-09-11 09:21:21] [Rank 0] step:6841/10000 train_time:283734ms step_avg:41.48ms +[2025-09-11 09:21:21] [Rank 0] step:6841/10000 train_time:283734ms step_avg:41.48ms +[2025-09-11 09:21:22] [Rank 0] step:6861/10000 train_time:284432ms step_avg:41.46ms +[2025-09-11 09:21:22] [Rank 0] step:6861/10000 train_time:284432ms step_avg:41.46ms +[2025-09-11 09:21:22] [Rank 0] step:6881/10000 train_time:285130ms step_avg:41.44ms +[2025-09-11 09:21:22] [Rank 0] step:6881/10000 train_time:285130ms step_avg:41.44ms +[2025-09-11 09:21:23] [Rank 0] step:6901/10000 train_time:285825ms step_avg:41.42ms +[2025-09-11 09:21:23] [Rank 0] step:6901/10000 train_time:285825ms step_avg:41.42ms +[2025-09-11 09:21:24] [Rank 0] step:6921/10000 train_time:286520ms step_avg:41.40ms +[2025-09-11 09:21:24] [Rank 0] step:6921/10000 train_time:286520ms step_avg:41.40ms +[2025-09-11 09:21:24] [Rank 0] step:6941/10000 train_time:287218ms step_avg:41.38ms +[2025-09-11 09:21:24] [Rank 0] step:6941/10000 train_time:287218ms step_avg:41.38ms +[2025-09-11 09:21:25] [Rank 0] step:6961/10000 train_time:287915ms step_avg:41.36ms +[2025-09-11 09:21:25] [Rank 0] step:6961/10000 train_time:287915ms step_avg:41.36ms +[2025-09-11 09:21:26] [Rank 0] step:6981/10000 train_time:288614ms step_avg:41.34ms +[2025-09-11 09:21:26] [Rank 0] step:6981/10000 train_time:288614ms step_avg:41.34ms +[2025-09-11 09:21:27] [Rank 0] step:7001/10000 train_time:289310ms step_avg:41.32ms +[2025-09-11 09:21:27] [Rank 0] step:7001/10000 train_time:289310ms step_avg:41.32ms +[2025-09-11 09:21:27] [Rank 0] step:7021/10000 train_time:290007ms step_avg:41.31ms +[2025-09-11 09:21:27] [Rank 0] step:7021/10000 train_time:290007ms step_avg:41.31ms +[2025-09-11 09:21:28] [Rank 0] step:7041/10000 train_time:290703ms step_avg:41.29ms +[2025-09-11 09:21:28] [Rank 0] step:7041/10000 train_time:290703ms step_avg:41.29ms +[2025-09-11 09:21:29] [Rank 0] step:7061/10000 train_time:291401ms step_avg:41.27ms +[2025-09-11 09:21:29] [Rank 0] step:7061/10000 train_time:291401ms step_avg:41.27ms +[2025-09-11 09:21:29] [Rank 0] step:7081/10000 train_time:292097ms step_avg:41.25ms +[2025-09-11 09:21:29] [Rank 0] step:7081/10000 train_time:292097ms step_avg:41.25ms +[2025-09-11 09:21:30] [Rank 0] step:7101/10000 train_time:292794ms step_avg:41.23ms +[2025-09-11 09:21:30] [Rank 0] step:7101/10000 train_time:292794ms step_avg:41.23ms +[2025-09-11 09:21:31] [Rank 0] step:7121/10000 train_time:293492ms step_avg:41.21ms +[2025-09-11 09:21:31] [Rank 0] step:7121/10000 train_time:293492ms step_avg:41.21ms +[2025-09-11 09:21:31] [Rank 0] step:7141/10000 train_time:294188ms step_avg:41.20ms +[2025-09-11 09:21:31] [Rank 0] step:7141/10000 train_time:294188ms step_avg:41.20ms +[2025-09-11 09:21:32] [Rank 0] step:7161/10000 train_time:294887ms step_avg:41.18ms +[2025-09-11 09:21:32] [Rank 0] step:7161/10000 train_time:294887ms step_avg:41.18ms +[2025-09-11 09:21:33] [Rank 0] step:7181/10000 train_time:295582ms step_avg:41.16ms +[2025-09-11 09:21:33] [Rank 0] step:7181/10000 train_time:295582ms step_avg:41.16ms +[2025-09-11 09:21:34] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:21:34] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:45] [Rank 0] PRINT: step:7200/10000 val_loss:4.2993 total_sharp:2.8645e-04 L1_sharp:3.3350e-03 L2_sharp:6.0157e-04 L3_sharp:4.2788e-04 L4_sharp:4.9634e-04 L5_sharp:1.1382e-03 L6_sharp:1.2499e-03 L7_sharp:1.2452e-03 L8_sharp:2.3811e-03 L9_sharp:2.6129e-03 L10_sharp:3.2498e-03 L11_sharp:4.3540e-03 L12_sharp:1.9055e-02 total_fnorm:2.4125e+01 total_l1_linf:3.9424e+04 total_spectral:1.2188e+01 L1_fnorm:8.7109e-01 L2_fnorm:8.3203e-01 L3_fnorm:8.4766e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.5547e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6719e-01 L8_fnorm:8.4766e-01 L9_fnorm:8.5547e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5547e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.9336e-01 L3_l1linf:1.8555e-01 L4_l1linf:1.8848e-01 L5_l1linf:1.8848e-01 L6_l1linf:1.9043e-01 L7_l1linf:1.8750e-01 L8_l1linf:1.8066e-01 L9_l1linf:1.7383e-01 L10_l1linf:1.6992e-01 L11_l1linf:1.6406e-01 L12_l1linf:1.9629e-01 L1_spectral:1.1680e-02 L2_spectral:1.1225e-02 L3_spectral:1.1538e-02 L4_spectral:1.1645e-02 L5_spectral:1.1512e-02 L6_spectral:1.1574e-02 L7_spectral:1.1700e-02 L8_spectral:1.1451e-02 L9_spectral:1.1703e-02 L10_spectral:1.1609e-02 L11_spectral:1.1652e-02 L12_spectral:1.1664e-02 train_time:296259ms step_avg:41.15ms +[2025-09-11 09:21:45] [Rank 0] PRINT: step:7200/10000 val_loss:4.2993 total_sharp:2.8645e-04 L1_sharp:3.3350e-03 L2_sharp:6.0157e-04 L3_sharp:4.2788e-04 L4_sharp:4.9634e-04 L5_sharp:1.1382e-03 L6_sharp:1.2499e-03 L7_sharp:1.2452e-03 L8_sharp:2.3811e-03 L9_sharp:2.6129e-03 L10_sharp:3.2498e-03 L11_sharp:4.3540e-03 L12_sharp:1.9055e-02 total_fnorm:2.4125e+01 total_l1_linf:3.9424e+04 total_spectral:1.2188e+01 L1_fnorm:8.7109e-01 L2_fnorm:8.3203e-01 L3_fnorm:8.4766e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.5547e-01 L6_fnorm:8.6328e-01 L7_fnorm:8.6719e-01 L8_fnorm:8.4766e-01 L9_fnorm:8.5547e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5547e-01 L12_fnorm:8.5547e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.9336e-01 L3_l1linf:1.8555e-01 L4_l1linf:1.8848e-01 L5_l1linf:1.8848e-01 L6_l1linf:1.9043e-01 L7_l1linf:1.8750e-01 L8_l1linf:1.8066e-01 L9_l1linf:1.7383e-01 L10_l1linf:1.6992e-01 L11_l1linf:1.6406e-01 L12_l1linf:1.9629e-01 L1_spectral:1.1680e-02 L2_spectral:1.1225e-02 L3_spectral:1.1538e-02 L4_spectral:1.1645e-02 L5_spectral:1.1512e-02 L6_spectral:1.1574e-02 L7_spectral:1.1700e-02 L8_spectral:1.1451e-02 L9_spectral:1.1703e-02 L10_spectral:1.1609e-02 L11_spectral:1.1652e-02 L12_spectral:1.1664e-02 train_time:296259ms step_avg:41.15ms +[2025-09-11 09:21:46] [Rank 0] step:7201/10000 train_time:297449ms step_avg:41.31ms +[2025-09-11 09:21:46] [Rank 0] step:7201/10000 train_time:297449ms step_avg:41.31ms +[2025-09-11 09:21:47] [Rank 0] step:7221/10000 train_time:298170ms step_avg:41.29ms +[2025-09-11 09:21:47] [Rank 0] step:7221/10000 train_time:298170ms step_avg:41.29ms +[2025-09-11 09:21:47] [Rank 0] step:7241/10000 train_time:298868ms step_avg:41.27ms +[2025-09-11 09:21:47] [Rank 0] step:7241/10000 train_time:298868ms step_avg:41.27ms +[2025-09-11 09:21:48] [Rank 0] step:7261/10000 train_time:299567ms step_avg:41.26ms +[2025-09-11 09:21:48] [Rank 0] step:7261/10000 train_time:299567ms step_avg:41.26ms +[2025-09-11 09:21:49] [Rank 0] step:7281/10000 train_time:300269ms step_avg:41.24ms +[2025-09-11 09:21:49] [Rank 0] step:7281/10000 train_time:300269ms step_avg:41.24ms +[2025-09-11 09:21:49] [Rank 0] step:7301/10000 train_time:300965ms step_avg:41.22ms +[2025-09-11 09:21:49] [Rank 0] step:7301/10000 train_time:300965ms step_avg:41.22ms +[2025-09-11 09:21:50] [Rank 0] step:7321/10000 train_time:301661ms step_avg:41.20ms +[2025-09-11 09:21:50] [Rank 0] step:7321/10000 train_time:301661ms step_avg:41.20ms +[2025-09-11 09:21:51] [Rank 0] step:7341/10000 train_time:302360ms step_avg:41.19ms +[2025-09-11 09:21:51] [Rank 0] step:7341/10000 train_time:302360ms step_avg:41.19ms +[2025-09-11 09:21:51] [Rank 0] step:7361/10000 train_time:303057ms step_avg:41.17ms +[2025-09-11 09:21:51] [Rank 0] step:7361/10000 train_time:303057ms step_avg:41.17ms +[2025-09-11 09:21:52] [Rank 0] step:7381/10000 train_time:303754ms step_avg:41.15ms +[2025-09-11 09:21:52] [Rank 0] step:7381/10000 train_time:303754ms step_avg:41.15ms +[2025-09-11 09:21:53] [Rank 0] step:7401/10000 train_time:304450ms step_avg:41.14ms +[2025-09-11 09:21:53] [Rank 0] step:7401/10000 train_time:304450ms step_avg:41.14ms +[2025-09-11 09:21:54] [Rank 0] step:7421/10000 train_time:305147ms step_avg:41.12ms +[2025-09-11 09:21:54] [Rank 0] step:7421/10000 train_time:305147ms step_avg:41.12ms +[2025-09-11 09:21:54] [Rank 0] step:7441/10000 train_time:305845ms step_avg:41.10ms +[2025-09-11 09:21:54] [Rank 0] step:7441/10000 train_time:305845ms step_avg:41.10ms +[2025-09-11 09:21:55] [Rank 0] step:7461/10000 train_time:306833ms step_avg:41.12ms +[2025-09-11 09:21:55] [Rank 0] step:7461/10000 train_time:306833ms step_avg:41.12ms +[2025-09-11 09:21:56] [Rank 0] step:7481/10000 train_time:307791ms step_avg:41.14ms +[2025-09-11 09:21:56] [Rank 0] step:7481/10000 train_time:307791ms step_avg:41.14ms +[2025-09-11 09:21:57] [Rank 0] step:7501/10000 train_time:308490ms step_avg:41.13ms +[2025-09-11 09:21:57] [Rank 0] step:7501/10000 train_time:308490ms step_avg:41.13ms +[2025-09-11 09:21:58] [Rank 0] step:7521/10000 train_time:309461ms step_avg:41.15ms +[2025-09-11 09:21:58] [Rank 0] step:7521/10000 train_time:309461ms step_avg:41.15ms +[2025-09-11 09:21:59] [Rank 0] step:7541/10000 train_time:310158ms step_avg:41.13ms +[2025-09-11 09:21:59] [Rank 0] step:7541/10000 train_time:310158ms step_avg:41.13ms +[2025-09-11 09:21:59] [Rank 0] step:7561/10000 train_time:310857ms step_avg:41.11ms +[2025-09-11 09:21:59] [Rank 0] step:7561/10000 train_time:310857ms step_avg:41.11ms +[2025-09-11 09:22:00] [Rank 0] step:7581/10000 train_time:311555ms step_avg:41.10ms +[2025-09-11 09:22:00] [Rank 0] step:7581/10000 train_time:311555ms step_avg:41.10ms +[2025-09-11 09:22:01] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:22:01] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:22:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:11] [Rank 0] PRINT: step:7600/10000 val_loss:4.2653 total_sharp:3.0890e-04 L1_sharp:5.8276e-03 L2_sharp:9.3449e-04 L3_sharp:5.4529e-04 L4_sharp:9.3598e-04 L5_sharp:1.3285e-03 L6_sharp:1.3902e-03 L7_sharp:1.1188e-03 L8_sharp:2.3966e-03 L9_sharp:2.5403e-03 L10_sharp:3.3366e-03 L11_sharp:4.0437e-03 L12_sharp:1.4856e-02 total_fnorm:1.9625e+01 total_l1_linf:2.9696e+04 total_spectral:9.8750e+00 L1_fnorm:7.3828e-01 L2_fnorm:7.0312e-01 L3_fnorm:7.1094e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1094e-01 L6_fnorm:7.1875e-01 L7_fnorm:7.2656e-01 L8_fnorm:7.0312e-01 L9_fnorm:7.1094e-01 L10_fnorm:7.1094e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1094e-01 L1_l1linf:1.5234e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.4746e-01 L4_l1linf:1.4746e-01 L5_l1linf:1.5039e-01 L6_l1linf:1.4941e-01 L7_l1linf:1.4844e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.3477e-01 L10_l1linf:1.3379e-01 L11_l1linf:1.2988e-01 L12_l1linf:1.5625e-01 L1_spectral:9.8834e-03 L2_spectral:9.5980e-03 L3_spectral:9.9671e-03 L4_spectral:1.0007e-02 L5_spectral:9.8639e-03 L6_spectral:1.0009e-02 L7_spectral:9.9846e-03 L8_spectral:9.7900e-03 L9_spectral:9.9454e-03 L10_spectral:9.9858e-03 L11_spectral:1.0017e-02 L12_spectral:9.9995e-03 train_time:312234ms step_avg:41.08ms +[2025-09-11 09:22:11] [Rank 0] PRINT: step:7600/10000 val_loss:4.2653 total_sharp:3.0890e-04 L1_sharp:5.8276e-03 L2_sharp:9.3449e-04 L3_sharp:5.4529e-04 L4_sharp:9.3598e-04 L5_sharp:1.3285e-03 L6_sharp:1.3902e-03 L7_sharp:1.1188e-03 L8_sharp:2.3966e-03 L9_sharp:2.5403e-03 L10_sharp:3.3366e-03 L11_sharp:4.0437e-03 L12_sharp:1.4856e-02 total_fnorm:1.9625e+01 total_l1_linf:2.9696e+04 total_spectral:9.8750e+00 L1_fnorm:7.3828e-01 L2_fnorm:7.0312e-01 L3_fnorm:7.1094e-01 L4_fnorm:7.2266e-01 L5_fnorm:7.1094e-01 L6_fnorm:7.1875e-01 L7_fnorm:7.2656e-01 L8_fnorm:7.0312e-01 L9_fnorm:7.1094e-01 L10_fnorm:7.1094e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1094e-01 L1_l1linf:1.5234e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.4746e-01 L4_l1linf:1.4746e-01 L5_l1linf:1.5039e-01 L6_l1linf:1.4941e-01 L7_l1linf:1.4844e-01 L8_l1linf:1.4551e-01 L9_l1linf:1.3477e-01 L10_l1linf:1.3379e-01 L11_l1linf:1.2988e-01 L12_l1linf:1.5625e-01 L1_spectral:9.8834e-03 L2_spectral:9.5980e-03 L3_spectral:9.9671e-03 L4_spectral:1.0007e-02 L5_spectral:9.8639e-03 L6_spectral:1.0009e-02 L7_spectral:9.9846e-03 L8_spectral:9.7900e-03 L9_spectral:9.9454e-03 L10_spectral:9.9858e-03 L11_spectral:1.0017e-02 L12_spectral:9.9995e-03 train_time:312234ms step_avg:41.08ms +[2025-09-11 09:22:12] [Rank 0] step:7601/10000 train_time:313423ms step_avg:41.23ms +[2025-09-11 09:22:12] [Rank 0] step:7601/10000 train_time:313423ms step_avg:41.23ms +[2025-09-11 09:22:13] [Rank 0] step:7621/10000 train_time:314134ms step_avg:41.22ms +[2025-09-11 09:22:13] [Rank 0] step:7621/10000 train_time:314134ms step_avg:41.22ms +[2025-09-11 09:22:13] [Rank 0] step:7641/10000 train_time:314839ms step_avg:41.20ms +[2025-09-11 09:22:13] [Rank 0] step:7641/10000 train_time:314839ms step_avg:41.20ms +[2025-09-11 09:22:14] [Rank 0] step:7661/10000 train_time:315537ms step_avg:41.19ms +[2025-09-11 09:22:14] [Rank 0] step:7661/10000 train_time:315537ms step_avg:41.19ms +[2025-09-11 09:22:15] [Rank 0] step:7681/10000 train_time:316235ms step_avg:41.17ms +[2025-09-11 09:22:15] [Rank 0] step:7681/10000 train_time:316235ms step_avg:41.17ms +[2025-09-11 09:22:15] [Rank 0] step:7701/10000 train_time:316936ms step_avg:41.16ms +[2025-09-11 09:22:15] [Rank 0] step:7701/10000 train_time:316936ms step_avg:41.16ms +[2025-09-11 09:22:16] [Rank 0] step:7721/10000 train_time:317635ms step_avg:41.14ms +[2025-09-11 09:22:16] [Rank 0] step:7721/10000 train_time:317635ms step_avg:41.14ms +[2025-09-11 09:22:17] [Rank 0] step:7741/10000 train_time:318333ms step_avg:41.12ms +[2025-09-11 09:22:17] [Rank 0] step:7741/10000 train_time:318333ms step_avg:41.12ms +[2025-09-11 09:22:18] [Rank 0] step:7761/10000 train_time:319031ms step_avg:41.11ms +[2025-09-11 09:22:18] [Rank 0] step:7761/10000 train_time:319031ms step_avg:41.11ms +[2025-09-11 09:22:18] [Rank 0] step:7781/10000 train_time:319731ms step_avg:41.09ms +[2025-09-11 09:22:18] [Rank 0] step:7781/10000 train_time:319731ms step_avg:41.09ms +[2025-09-11 09:22:19] [Rank 0] step:7801/10000 train_time:320428ms step_avg:41.08ms +[2025-09-11 09:22:19] [Rank 0] step:7801/10000 train_time:320428ms step_avg:41.08ms +[2025-09-11 09:22:20] [Rank 0] step:7821/10000 train_time:321126ms step_avg:41.06ms +[2025-09-11 09:22:20] [Rank 0] step:7821/10000 train_time:321126ms step_avg:41.06ms +[2025-09-11 09:22:20] [Rank 0] step:7841/10000 train_time:321827ms step_avg:41.04ms +[2025-09-11 09:22:20] [Rank 0] step:7841/10000 train_time:321827ms step_avg:41.04ms +[2025-09-11 09:22:21] [Rank 0] step:7861/10000 train_time:322528ms step_avg:41.03ms +[2025-09-11 09:22:21] [Rank 0] step:7861/10000 train_time:322528ms step_avg:41.03ms +[2025-09-11 09:22:22] [Rank 0] step:7881/10000 train_time:323226ms step_avg:41.01ms +[2025-09-11 09:22:22] [Rank 0] step:7881/10000 train_time:323226ms step_avg:41.01ms +[2025-09-11 09:22:22] [Rank 0] step:7901/10000 train_time:323926ms step_avg:41.00ms +[2025-09-11 09:22:22] [Rank 0] step:7901/10000 train_time:323926ms step_avg:41.00ms +[2025-09-11 09:22:23] [Rank 0] step:7921/10000 train_time:324625ms step_avg:40.98ms +[2025-09-11 09:22:23] [Rank 0] step:7921/10000 train_time:324625ms step_avg:40.98ms +[2025-09-11 09:22:24] [Rank 0] step:7941/10000 train_time:325325ms step_avg:40.97ms +[2025-09-11 09:22:24] [Rank 0] step:7941/10000 train_time:325325ms step_avg:40.97ms +[2025-09-11 09:22:25] [Rank 0] step:7961/10000 train_time:326024ms step_avg:40.95ms +[2025-09-11 09:22:25] [Rank 0] step:7961/10000 train_time:326024ms step_avg:40.95ms +[2025-09-11 09:22:25] [Rank 0] step:7981/10000 train_time:326726ms step_avg:40.94ms +[2025-09-11 09:22:25] [Rank 0] step:7981/10000 train_time:326726ms step_avg:40.94ms +[2025-09-11 09:22:26] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:22:26] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:22:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:36] [Rank 0] PRINT: step:8000/10000 val_loss:4.2480 total_sharp:2.9638e-04 L1_sharp:7.2896e-03 L2_sharp:7.5244e-04 L3_sharp:5.3614e-04 L4_sharp:5.7841e-04 L5_sharp:1.7138e-03 L6_sharp:1.4775e-03 L7_sharp:1.4510e-03 L8_sharp:2.8476e-03 L9_sharp:2.2301e-03 L10_sharp:2.9357e-03 L11_sharp:3.7211e-03 L12_sharp:1.8076e-02 total_fnorm:1.6375e+01 total_l1_linf:2.3296e+04 total_spectral:8.2500e+00 L1_fnorm:6.1328e-01 L2_fnorm:5.7031e-01 L3_fnorm:5.8203e-01 L4_fnorm:5.8984e-01 L5_fnorm:5.8594e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.9375e-01 L8_fnorm:5.7812e-01 L9_fnorm:5.8203e-01 L10_fnorm:5.8203e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8203e-01 L1_l1linf:1.1963e-01 L2_l1linf:1.1865e-01 L3_l1linf:1.1670e-01 L4_l1linf:1.1621e-01 L5_l1linf:1.1572e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1475e-01 L8_l1linf:1.0791e-01 L9_l1linf:1.0400e-01 L10_l1linf:1.0156e-01 L11_l1linf:9.8633e-02 L12_l1linf:1.2207e-01 L1_spectral:8.6173e-03 L2_spectral:7.8716e-03 L3_spectral:8.4670e-03 L4_spectral:8.5613e-03 L5_spectral:8.2956e-03 L6_spectral:8.3547e-03 L7_spectral:8.3866e-03 L8_spectral:8.2705e-03 L9_spectral:8.2942e-03 L10_spectral:8.3265e-03 L11_spectral:8.3497e-03 L12_spectral:8.4034e-03 train_time:327404ms step_avg:40.93ms +[2025-09-11 09:22:36] [Rank 0] PRINT: step:8000/10000 val_loss:4.2480 total_sharp:2.9638e-04 L1_sharp:7.2896e-03 L2_sharp:7.5244e-04 L3_sharp:5.3614e-04 L4_sharp:5.7841e-04 L5_sharp:1.7138e-03 L6_sharp:1.4775e-03 L7_sharp:1.4510e-03 L8_sharp:2.8476e-03 L9_sharp:2.2301e-03 L10_sharp:2.9357e-03 L11_sharp:3.7211e-03 L12_sharp:1.8076e-02 total_fnorm:1.6375e+01 total_l1_linf:2.3296e+04 total_spectral:8.2500e+00 L1_fnorm:6.1328e-01 L2_fnorm:5.7031e-01 L3_fnorm:5.8203e-01 L4_fnorm:5.8984e-01 L5_fnorm:5.8594e-01 L6_fnorm:5.8984e-01 L7_fnorm:5.9375e-01 L8_fnorm:5.7812e-01 L9_fnorm:5.8203e-01 L10_fnorm:5.8203e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8203e-01 L1_l1linf:1.1963e-01 L2_l1linf:1.1865e-01 L3_l1linf:1.1670e-01 L4_l1linf:1.1621e-01 L5_l1linf:1.1572e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1475e-01 L8_l1linf:1.0791e-01 L9_l1linf:1.0400e-01 L10_l1linf:1.0156e-01 L11_l1linf:9.8633e-02 L12_l1linf:1.2207e-01 L1_spectral:8.6173e-03 L2_spectral:7.8716e-03 L3_spectral:8.4670e-03 L4_spectral:8.5613e-03 L5_spectral:8.2956e-03 L6_spectral:8.3547e-03 L7_spectral:8.3866e-03 L8_spectral:8.2705e-03 L9_spectral:8.2942e-03 L10_spectral:8.3265e-03 L11_spectral:8.3497e-03 L12_spectral:8.4034e-03 train_time:327404ms step_avg:40.93ms +[2025-09-11 09:22:37] [Rank 0] step:8001/10000 train_time:328698ms step_avg:41.08ms +[2025-09-11 09:22:37] [Rank 0] step:8001/10000 train_time:328698ms step_avg:41.08ms +[2025-09-11 09:22:38] [Rank 0] step:8021/10000 train_time:329414ms step_avg:41.07ms +[2025-09-11 09:22:38] [Rank 0] step:8021/10000 train_time:329414ms step_avg:41.07ms +[2025-09-11 09:22:39] [Rank 0] step:8041/10000 train_time:330113ms step_avg:41.05ms +[2025-09-11 09:22:39] [Rank 0] step:8041/10000 train_time:330113ms step_avg:41.05ms +[2025-09-11 09:22:39] [Rank 0] step:8061/10000 train_time:330815ms step_avg:41.04ms +[2025-09-11 09:22:39] [Rank 0] step:8061/10000 train_time:330815ms step_avg:41.04ms +[2025-09-11 09:22:40] [Rank 0] step:8081/10000 train_time:331513ms step_avg:41.02ms +[2025-09-11 09:22:40] [Rank 0] step:8081/10000 train_time:331513ms step_avg:41.02ms +[2025-09-11 09:22:41] [Rank 0] step:8101/10000 train_time:332210ms step_avg:41.01ms +[2025-09-11 09:22:41] [Rank 0] step:8101/10000 train_time:332210ms step_avg:41.01ms +[2025-09-11 09:22:42] [Rank 0] step:8121/10000 train_time:332912ms step_avg:40.99ms +[2025-09-11 09:22:42] [Rank 0] step:8121/10000 train_time:332912ms step_avg:40.99ms +[2025-09-11 09:22:43] [Rank 0] step:8141/10000 train_time:334344ms step_avg:41.07ms +[2025-09-11 09:22:43] [Rank 0] step:8141/10000 train_time:334344ms step_avg:41.07ms +[2025-09-11 09:22:44] [Rank 0] step:8161/10000 train_time:335047ms step_avg:41.05ms +[2025-09-11 09:22:44] [Rank 0] step:8161/10000 train_time:335047ms step_avg:41.05ms +[2025-09-11 09:22:44] [Rank 0] step:8181/10000 train_time:335757ms step_avg:41.04ms +[2025-09-11 09:22:44] [Rank 0] step:8181/10000 train_time:335757ms step_avg:41.04ms +[2025-09-11 09:22:45] [Rank 0] step:8201/10000 train_time:336464ms step_avg:41.03ms +[2025-09-11 09:22:45] [Rank 0] step:8201/10000 train_time:336464ms step_avg:41.03ms +[2025-09-11 09:22:46] [Rank 0] step:8221/10000 train_time:337169ms step_avg:41.01ms +[2025-09-11 09:22:46] [Rank 0] step:8221/10000 train_time:337169ms step_avg:41.01ms +[2025-09-11 09:22:46] [Rank 0] step:8241/10000 train_time:337883ms step_avg:41.00ms +[2025-09-11 09:22:46] [Rank 0] step:8241/10000 train_time:337883ms step_avg:41.00ms +[2025-09-11 09:22:47] [Rank 0] step:8261/10000 train_time:338588ms step_avg:40.99ms +[2025-09-11 09:22:47] [Rank 0] step:8261/10000 train_time:338588ms step_avg:40.99ms +[2025-09-11 09:22:48] [Rank 0] step:8281/10000 train_time:339290ms step_avg:40.97ms +[2025-09-11 09:22:48] [Rank 0] step:8281/10000 train_time:339290ms step_avg:40.97ms +[2025-09-11 09:22:49] [Rank 0] step:8301/10000 train_time:339994ms step_avg:40.96ms +[2025-09-11 09:22:49] [Rank 0] step:8301/10000 train_time:339994ms step_avg:40.96ms +[2025-09-11 09:22:49] [Rank 0] step:8321/10000 train_time:340699ms step_avg:40.94ms +[2025-09-11 09:22:49] [Rank 0] step:8321/10000 train_time:340699ms step_avg:40.94ms +[2025-09-11 09:22:50] [Rank 0] step:8341/10000 train_time:341411ms step_avg:40.93ms +[2025-09-11 09:22:50] [Rank 0] step:8341/10000 train_time:341411ms step_avg:40.93ms +[2025-09-11 09:22:51] [Rank 0] step:8361/10000 train_time:342112ms step_avg:40.92ms +[2025-09-11 09:22:51] [Rank 0] step:8361/10000 train_time:342112ms step_avg:40.92ms +[2025-09-11 09:22:51] [Rank 0] step:8381/10000 train_time:342820ms step_avg:40.90ms +[2025-09-11 09:22:51] [Rank 0] step:8381/10000 train_time:342820ms step_avg:40.90ms +[2025-09-11 09:22:52] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:22:52] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:22:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:22:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:22:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:02] [Rank 0] PRINT: step:8400/10000 val_loss:4.2256 total_sharp:2.4748e-04 L1_sharp:3.2308e-03 L2_sharp:2.9556e-04 L3_sharp:8.2000e-05 L4_sharp:8.6250e-04 L5_sharp:9.1097e-04 L6_sharp:1.2751e-03 L7_sharp:1.0472e-03 L8_sharp:1.9082e-03 L9_sharp:1.8997e-03 L10_sharp:2.4066e-03 L11_sharp:3.0005e-03 L12_sharp:1.6834e-02 total_fnorm:1.2188e+01 total_l1_linf:1.5744e+04 total_spectral:6.1250e+00 L1_fnorm:4.9023e-01 L2_fnorm:4.4922e-01 L3_fnorm:4.5703e-01 L4_fnorm:4.6289e-01 L5_fnorm:4.5703e-01 L6_fnorm:4.6094e-01 L7_fnorm:4.6680e-01 L8_fnorm:4.5117e-01 L9_fnorm:4.5312e-01 L10_fnorm:4.5508e-01 L11_fnorm:4.5117e-01 L12_fnorm:4.5312e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.4961e-02 L4_l1linf:8.4473e-02 L5_l1linf:8.3496e-02 L6_l1linf:8.3984e-02 L7_l1linf:8.2031e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.1289e-02 L11_l1linf:6.9824e-02 L12_l1linf:8.5938e-02 L1_spectral:7.2459e-03 L2_spectral:6.3345e-03 L3_spectral:6.7712e-03 L4_spectral:6.8784e-03 L5_spectral:6.5876e-03 L6_spectral:6.6696e-03 L7_spectral:6.7236e-03 L8_spectral:6.6819e-03 L9_spectral:6.6489e-03 L10_spectral:6.6621e-03 L11_spectral:6.6813e-03 L12_spectral:6.7473e-03 train_time:343507ms step_avg:40.89ms +[2025-09-11 09:23:02] [Rank 0] PRINT: step:8400/10000 val_loss:4.2256 total_sharp:2.4748e-04 L1_sharp:3.2308e-03 L2_sharp:2.9556e-04 L3_sharp:8.2000e-05 L4_sharp:8.6250e-04 L5_sharp:9.1097e-04 L6_sharp:1.2751e-03 L7_sharp:1.0472e-03 L8_sharp:1.9082e-03 L9_sharp:1.8997e-03 L10_sharp:2.4066e-03 L11_sharp:3.0005e-03 L12_sharp:1.6834e-02 total_fnorm:1.2188e+01 total_l1_linf:1.5744e+04 total_spectral:6.1250e+00 L1_fnorm:4.9023e-01 L2_fnorm:4.4922e-01 L3_fnorm:4.5703e-01 L4_fnorm:4.6289e-01 L5_fnorm:4.5703e-01 L6_fnorm:4.6094e-01 L7_fnorm:4.6680e-01 L8_fnorm:4.5117e-01 L9_fnorm:4.5312e-01 L10_fnorm:4.5508e-01 L11_fnorm:4.5117e-01 L12_fnorm:4.5312e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.4961e-02 L4_l1linf:8.4473e-02 L5_l1linf:8.3496e-02 L6_l1linf:8.3984e-02 L7_l1linf:8.2031e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.1289e-02 L11_l1linf:6.9824e-02 L12_l1linf:8.5938e-02 L1_spectral:7.2459e-03 L2_spectral:6.3345e-03 L3_spectral:6.7712e-03 L4_spectral:6.8784e-03 L5_spectral:6.5876e-03 L6_spectral:6.6696e-03 L7_spectral:6.7236e-03 L8_spectral:6.6819e-03 L9_spectral:6.6489e-03 L10_spectral:6.6621e-03 L11_spectral:6.6813e-03 L12_spectral:6.7473e-03 train_time:343507ms step_avg:40.89ms +[2025-09-11 09:23:03] [Rank 0] step:8401/10000 train_time:344687ms step_avg:41.03ms +[2025-09-11 09:23:03] [Rank 0] step:8401/10000 train_time:344687ms step_avg:41.03ms +[2025-09-11 09:23:04] [Rank 0] step:8421/10000 train_time:345411ms step_avg:41.02ms +[2025-09-11 09:23:04] [Rank 0] step:8421/10000 train_time:345411ms step_avg:41.02ms +[2025-09-11 09:23:05] [Rank 0] step:8441/10000 train_time:346120ms step_avg:41.00ms +[2025-09-11 09:23:05] [Rank 0] step:8441/10000 train_time:346120ms step_avg:41.00ms +[2025-09-11 09:23:05] [Rank 0] step:8461/10000 train_time:346826ms step_avg:40.99ms +[2025-09-11 09:23:05] [Rank 0] step:8461/10000 train_time:346826ms step_avg:40.99ms +[2025-09-11 09:23:06] [Rank 0] step:8481/10000 train_time:347535ms step_avg:40.98ms +[2025-09-11 09:23:06] [Rank 0] step:8481/10000 train_time:347535ms step_avg:40.98ms +[2025-09-11 09:23:07] [Rank 0] step:8501/10000 train_time:348239ms step_avg:40.96ms +[2025-09-11 09:23:07] [Rank 0] step:8501/10000 train_time:348239ms step_avg:40.96ms +[2025-09-11 09:23:08] [Rank 0] step:8521/10000 train_time:348944ms step_avg:40.95ms +[2025-09-11 09:23:08] [Rank 0] step:8521/10000 train_time:348944ms step_avg:40.95ms +[2025-09-11 09:23:08] [Rank 0] step:8541/10000 train_time:349649ms step_avg:40.94ms +[2025-09-11 09:23:08] [Rank 0] step:8541/10000 train_time:349649ms step_avg:40.94ms +[2025-09-11 09:23:09] [Rank 0] step:8561/10000 train_time:350360ms step_avg:40.93ms +[2025-09-11 09:23:09] [Rank 0] step:8561/10000 train_time:350360ms step_avg:40.93ms +[2025-09-11 09:23:10] [Rank 0] step:8581/10000 train_time:351068ms step_avg:40.91ms +[2025-09-11 09:23:10] [Rank 0] step:8581/10000 train_time:351068ms step_avg:40.91ms +[2025-09-11 09:23:10] [Rank 0] step:8601/10000 train_time:351775ms step_avg:40.90ms +[2025-09-11 09:23:10] [Rank 0] step:8601/10000 train_time:351775ms step_avg:40.90ms +[2025-09-11 09:23:11] [Rank 0] step:8621/10000 train_time:352481ms step_avg:40.89ms +[2025-09-11 09:23:11] [Rank 0] step:8621/10000 train_time:352481ms step_avg:40.89ms +[2025-09-11 09:23:12] [Rank 0] step:8641/10000 train_time:353185ms step_avg:40.87ms +[2025-09-11 09:23:12] [Rank 0] step:8641/10000 train_time:353185ms step_avg:40.87ms +[2025-09-11 09:23:13] [Rank 0] step:8661/10000 train_time:353892ms step_avg:40.86ms +[2025-09-11 09:23:13] [Rank 0] step:8661/10000 train_time:353892ms step_avg:40.86ms +[2025-09-11 09:23:13] [Rank 0] step:8681/10000 train_time:354599ms step_avg:40.85ms +[2025-09-11 09:23:13] [Rank 0] step:8681/10000 train_time:354599ms step_avg:40.85ms +[2025-09-11 09:23:14] [Rank 0] step:8701/10000 train_time:355303ms step_avg:40.83ms +[2025-09-11 09:23:14] [Rank 0] step:8701/10000 train_time:355303ms step_avg:40.83ms +[2025-09-11 09:23:15] [Rank 0] step:8721/10000 train_time:356012ms step_avg:40.82ms +[2025-09-11 09:23:15] [Rank 0] step:8721/10000 train_time:356012ms step_avg:40.82ms +[2025-09-11 09:23:15] [Rank 0] step:8741/10000 train_time:356715ms step_avg:40.81ms +[2025-09-11 09:23:15] [Rank 0] step:8741/10000 train_time:356715ms step_avg:40.81ms +[2025-09-11 09:23:16] [Rank 0] step:8761/10000 train_time:357423ms step_avg:40.80ms +[2025-09-11 09:23:16] [Rank 0] step:8761/10000 train_time:357423ms step_avg:40.80ms +[2025-09-11 09:23:17] [Rank 0] step:8781/10000 train_time:358126ms step_avg:40.78ms +[2025-09-11 09:23:17] [Rank 0] step:8781/10000 train_time:358126ms step_avg:40.78ms +[2025-09-11 09:23:17] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:23:17] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:27] [Rank 0] PRINT: step:8800/10000 val_loss:4.2150 total_sharp:2.3212e-04 L1_sharp:3.4457e-03 L2_sharp:5.5824e-04 L3_sharp:2.8509e-04 L4_sharp:1.0690e-03 L5_sharp:9.3025e-04 L6_sharp:8.4779e-04 L7_sharp:7.7698e-04 L8_sharp:1.7296e-03 L9_sharp:1.7692e-03 L10_sharp:2.1282e-03 L11_sharp:3.3856e-03 L12_sharp:1.3207e-02 total_fnorm:8.7500e+00 total_l1_linf:1.0112e+04 total_spectral:4.4375e+00 L1_fnorm:3.6914e-01 L2_fnorm:3.3203e-01 L3_fnorm:3.3594e-01 L4_fnorm:3.3984e-01 L5_fnorm:3.3789e-01 L6_fnorm:3.3984e-01 L7_fnorm:3.4375e-01 L8_fnorm:3.3398e-01 L9_fnorm:3.3398e-01 L10_fnorm:3.3594e-01 L11_fnorm:3.3203e-01 L12_fnorm:3.3008e-01 L1_l1linf:5.8105e-02 L2_l1linf:5.8350e-02 L3_l1linf:5.6396e-02 L4_l1linf:5.7861e-02 L5_l1linf:5.6396e-02 L6_l1linf:5.6885e-02 L7_l1linf:5.7617e-02 L8_l1linf:5.3467e-02 L9_l1linf:5.2490e-02 L10_l1linf:4.9805e-02 L11_l1linf:4.5898e-02 L12_l1linf:5.9814e-02 L1_spectral:5.7453e-03 L2_spectral:4.8010e-03 L3_spectral:5.1695e-03 L4_spectral:5.1558e-03 L5_spectral:5.0187e-03 L6_spectral:5.0170e-03 L7_spectral:5.0634e-03 L8_spectral:5.0610e-03 L9_spectral:5.0474e-03 L10_spectral:5.0026e-03 L11_spectral:5.0307e-03 L12_spectral:5.1018e-03 train_time:358809ms step_avg:40.77ms +[2025-09-11 09:23:27] [Rank 0] PRINT: step:8800/10000 val_loss:4.2150 total_sharp:2.3212e-04 L1_sharp:3.4457e-03 L2_sharp:5.5824e-04 L3_sharp:2.8509e-04 L4_sharp:1.0690e-03 L5_sharp:9.3025e-04 L6_sharp:8.4779e-04 L7_sharp:7.7698e-04 L8_sharp:1.7296e-03 L9_sharp:1.7692e-03 L10_sharp:2.1282e-03 L11_sharp:3.3856e-03 L12_sharp:1.3207e-02 total_fnorm:8.7500e+00 total_l1_linf:1.0112e+04 total_spectral:4.4375e+00 L1_fnorm:3.6914e-01 L2_fnorm:3.3203e-01 L3_fnorm:3.3594e-01 L4_fnorm:3.3984e-01 L5_fnorm:3.3789e-01 L6_fnorm:3.3984e-01 L7_fnorm:3.4375e-01 L8_fnorm:3.3398e-01 L9_fnorm:3.3398e-01 L10_fnorm:3.3594e-01 L11_fnorm:3.3203e-01 L12_fnorm:3.3008e-01 L1_l1linf:5.8105e-02 L2_l1linf:5.8350e-02 L3_l1linf:5.6396e-02 L4_l1linf:5.7861e-02 L5_l1linf:5.6396e-02 L6_l1linf:5.6885e-02 L7_l1linf:5.7617e-02 L8_l1linf:5.3467e-02 L9_l1linf:5.2490e-02 L10_l1linf:4.9805e-02 L11_l1linf:4.5898e-02 L12_l1linf:5.9814e-02 L1_spectral:5.7453e-03 L2_spectral:4.8010e-03 L3_spectral:5.1695e-03 L4_spectral:5.1558e-03 L5_spectral:5.0187e-03 L6_spectral:5.0170e-03 L7_spectral:5.0634e-03 L8_spectral:5.0610e-03 L9_spectral:5.0474e-03 L10_spectral:5.0026e-03 L11_spectral:5.0307e-03 L12_spectral:5.1018e-03 train_time:358809ms step_avg:40.77ms +[2025-09-11 09:23:29] [Rank 0] step:8801/10000 train_time:360000ms step_avg:40.90ms +[2025-09-11 09:23:29] [Rank 0] step:8801/10000 train_time:360000ms step_avg:40.90ms +[2025-09-11 09:23:29] [Rank 0] step:8821/10000 train_time:360723ms step_avg:40.89ms +[2025-09-11 09:23:29] [Rank 0] step:8821/10000 train_time:360723ms step_avg:40.89ms +[2025-09-11 09:23:30] [Rank 0] step:8841/10000 train_time:361430ms step_avg:40.88ms +[2025-09-11 09:23:30] [Rank 0] step:8841/10000 train_time:361430ms step_avg:40.88ms +[2025-09-11 09:23:31] [Rank 0] step:8861/10000 train_time:362136ms step_avg:40.87ms +[2025-09-11 09:23:31] [Rank 0] step:8861/10000 train_time:362136ms step_avg:40.87ms +[2025-09-11 09:23:31] [Rank 0] step:8881/10000 train_time:362842ms step_avg:40.86ms +[2025-09-11 09:23:31] [Rank 0] step:8881/10000 train_time:362842ms step_avg:40.86ms +[2025-09-11 09:23:32] [Rank 0] step:8901/10000 train_time:363550ms step_avg:40.84ms +[2025-09-11 09:23:32] [Rank 0] step:8901/10000 train_time:363550ms step_avg:40.84ms +[2025-09-11 09:23:33] [Rank 0] step:8921/10000 train_time:364254ms step_avg:40.83ms +[2025-09-11 09:23:33] [Rank 0] step:8921/10000 train_time:364254ms step_avg:40.83ms +[2025-09-11 09:23:33] [Rank 0] step:8941/10000 train_time:364962ms step_avg:40.82ms +[2025-09-11 09:23:33] [Rank 0] step:8941/10000 train_time:364962ms step_avg:40.82ms +[2025-09-11 09:23:34] [Rank 0] step:8961/10000 train_time:365677ms step_avg:40.81ms +[2025-09-11 09:23:34] [Rank 0] step:8961/10000 train_time:365677ms step_avg:40.81ms +[2025-09-11 09:23:35] [Rank 0] step:8981/10000 train_time:366386ms step_avg:40.80ms +[2025-09-11 09:23:35] [Rank 0] step:8981/10000 train_time:366386ms step_avg:40.80ms +[2025-09-11 09:23:36] [Rank 0] step:9001/10000 train_time:367088ms step_avg:40.78ms +[2025-09-11 09:23:36] [Rank 0] step:9001/10000 train_time:367088ms step_avg:40.78ms +[2025-09-11 09:23:36] [Rank 0] step:9021/10000 train_time:367795ms step_avg:40.77ms +[2025-09-11 09:23:36] [Rank 0] step:9021/10000 train_time:367795ms step_avg:40.77ms +[2025-09-11 09:23:37] [Rank 0] step:9041/10000 train_time:368504ms step_avg:40.76ms +[2025-09-11 09:23:37] [Rank 0] step:9041/10000 train_time:368504ms step_avg:40.76ms +[2025-09-11 09:23:38] [Rank 0] step:9061/10000 train_time:369208ms step_avg:40.75ms +[2025-09-11 09:23:38] [Rank 0] step:9061/10000 train_time:369208ms step_avg:40.75ms +[2025-09-11 09:23:38] [Rank 0] step:9081/10000 train_time:369916ms step_avg:40.74ms +[2025-09-11 09:23:38] [Rank 0] step:9081/10000 train_time:369916ms step_avg:40.74ms +[2025-09-11 09:23:39] [Rank 0] step:9101/10000 train_time:370625ms step_avg:40.72ms +[2025-09-11 09:23:39] [Rank 0] step:9101/10000 train_time:370625ms step_avg:40.72ms +[2025-09-11 09:23:40] [Rank 0] step:9121/10000 train_time:371336ms step_avg:40.71ms +[2025-09-11 09:23:40] [Rank 0] step:9121/10000 train_time:371336ms step_avg:40.71ms +[2025-09-11 09:23:41] [Rank 0] step:9141/10000 train_time:372041ms step_avg:40.70ms +[2025-09-11 09:23:41] [Rank 0] step:9141/10000 train_time:372041ms step_avg:40.70ms +[2025-09-11 09:23:41] [Rank 0] step:9161/10000 train_time:372749ms step_avg:40.69ms +[2025-09-11 09:23:41] [Rank 0] step:9161/10000 train_time:372749ms step_avg:40.69ms +[2025-09-11 09:23:42] [Rank 0] step:9181/10000 train_time:373456ms step_avg:40.68ms +[2025-09-11 09:23:42] [Rank 0] step:9181/10000 train_time:373456ms step_avg:40.68ms +[2025-09-11 09:23:43] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:23:43] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:53] [Rank 0] PRINT: step:9200/10000 val_loss:4.1967 total_sharp:1.8790e-04 L1_sharp:2.7934e-03 L2_sharp:3.9554e-04 L3_sharp:1.9074e-04 L4_sharp:6.0937e-04 L5_sharp:9.6359e-04 L6_sharp:1.1185e-03 L7_sharp:9.7851e-04 L8_sharp:1.6649e-03 L9_sharp:1.5832e-03 L10_sharp:1.7422e-03 L11_sharp:2.5771e-03 L12_sharp:1.8420e-02 total_fnorm:6.0000e+00 total_l1_linf:5.9520e+03 total_spectral:3.0312e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.1777e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2461e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2461e-01 L7_fnorm:2.2852e-01 L8_fnorm:2.2070e-01 L9_fnorm:2.2070e-01 L10_fnorm:2.2070e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.1875e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.0396e-02 L10_l1linf:3.0518e-02 L11_l1linf:2.8198e-02 L12_l1linf:3.5400e-02 L1_spectral:4.2019e-03 L2_spectral:3.1730e-03 L3_spectral:3.4529e-03 L4_spectral:3.4766e-03 L5_spectral:3.3744e-03 L6_spectral:3.3897e-03 L7_spectral:3.4331e-03 L8_spectral:3.4550e-03 L9_spectral:3.3850e-03 L10_spectral:3.3824e-03 L11_spectral:3.4063e-03 L12_spectral:3.4829e-03 train_time:374146ms step_avg:40.67ms +[2025-09-11 09:23:53] [Rank 0] PRINT: step:9200/10000 val_loss:4.1967 total_sharp:1.8790e-04 L1_sharp:2.7934e-03 L2_sharp:3.9554e-04 L3_sharp:1.9074e-04 L4_sharp:6.0937e-04 L5_sharp:9.6359e-04 L6_sharp:1.1185e-03 L7_sharp:9.7851e-04 L8_sharp:1.6649e-03 L9_sharp:1.5832e-03 L10_sharp:1.7422e-03 L11_sharp:2.5771e-03 L12_sharp:1.8420e-02 total_fnorm:6.0000e+00 total_l1_linf:5.9520e+03 total_spectral:3.0312e+00 L1_fnorm:2.5000e-01 L2_fnorm:2.1777e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2461e-01 L5_fnorm:2.2363e-01 L6_fnorm:2.2461e-01 L7_fnorm:2.2852e-01 L8_fnorm:2.2070e-01 L9_fnorm:2.2070e-01 L10_fnorm:2.2070e-01 L11_fnorm:2.1973e-01 L12_fnorm:2.1875e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.0396e-02 L10_l1linf:3.0518e-02 L11_l1linf:2.8198e-02 L12_l1linf:3.5400e-02 L1_spectral:4.2019e-03 L2_spectral:3.1730e-03 L3_spectral:3.4529e-03 L4_spectral:3.4766e-03 L5_spectral:3.3744e-03 L6_spectral:3.3897e-03 L7_spectral:3.4331e-03 L8_spectral:3.4550e-03 L9_spectral:3.3850e-03 L10_spectral:3.3824e-03 L11_spectral:3.4063e-03 L12_spectral:3.4829e-03 train_time:374146ms step_avg:40.67ms +[2025-09-11 09:23:54] [Rank 0] step:9201/10000 train_time:375354ms step_avg:40.79ms +[2025-09-11 09:23:54] [Rank 0] step:9201/10000 train_time:375354ms step_avg:40.79ms +[2025-09-11 09:23:55] [Rank 0] step:9221/10000 train_time:376085ms step_avg:40.79ms +[2025-09-11 09:23:55] [Rank 0] step:9221/10000 train_time:376085ms step_avg:40.79ms +[2025-09-11 09:23:55] [Rank 0] step:9241/10000 train_time:376790ms step_avg:40.77ms +[2025-09-11 09:23:55] [Rank 0] step:9241/10000 train_time:376790ms step_avg:40.77ms +[2025-09-11 09:23:56] [Rank 0] step:9261/10000 train_time:377498ms step_avg:40.76ms +[2025-09-11 09:23:56] [Rank 0] step:9261/10000 train_time:377498ms step_avg:40.76ms +[2025-09-11 09:23:57] [Rank 0] step:9281/10000 train_time:378207ms step_avg:40.75ms +[2025-09-11 09:23:57] [Rank 0] step:9281/10000 train_time:378207ms step_avg:40.75ms +[2025-09-11 09:23:57] [Rank 0] step:9301/10000 train_time:378911ms step_avg:40.74ms +[2025-09-11 09:23:57] [Rank 0] step:9301/10000 train_time:378911ms step_avg:40.74ms +[2025-09-11 09:23:58] [Rank 0] step:9321/10000 train_time:379620ms step_avg:40.73ms +[2025-09-11 09:23:58] [Rank 0] step:9321/10000 train_time:379620ms step_avg:40.73ms +[2025-09-11 09:23:59] [Rank 0] step:9341/10000 train_time:380322ms step_avg:40.72ms +[2025-09-11 09:23:59] [Rank 0] step:9341/10000 train_time:380322ms step_avg:40.72ms +[2025-09-11 09:24:00] [Rank 0] step:9361/10000 train_time:381025ms step_avg:40.70ms +[2025-09-11 09:24:00] [Rank 0] step:9361/10000 train_time:381025ms step_avg:40.70ms +[2025-09-11 09:24:00] [Rank 0] step:9381/10000 train_time:381730ms step_avg:40.69ms +[2025-09-11 09:24:00] [Rank 0] step:9381/10000 train_time:381730ms step_avg:40.69ms +[2025-09-11 09:24:01] [Rank 0] step:9401/10000 train_time:382437ms step_avg:40.68ms +[2025-09-11 09:24:01] [Rank 0] step:9401/10000 train_time:382437ms step_avg:40.68ms +[2025-09-11 09:24:02] [Rank 0] step:9421/10000 train_time:383697ms step_avg:40.73ms +[2025-09-11 09:24:02] [Rank 0] step:9421/10000 train_time:383697ms step_avg:40.73ms +[2025-09-11 09:24:03] [Rank 0] step:9441/10000 train_time:384409ms step_avg:40.72ms +[2025-09-11 09:24:03] [Rank 0] step:9441/10000 train_time:384409ms step_avg:40.72ms +[2025-09-11 09:24:04] [Rank 0] step:9461/10000 train_time:385116ms step_avg:40.71ms +[2025-09-11 09:24:04] [Rank 0] step:9461/10000 train_time:385116ms step_avg:40.71ms +[2025-09-11 09:24:05] [Rank 0] step:9481/10000 train_time:386081ms step_avg:40.72ms +[2025-09-11 09:24:05] [Rank 0] step:9481/10000 train_time:386081ms step_avg:40.72ms +[2025-09-11 09:24:05] [Rank 0] step:9501/10000 train_time:386789ms step_avg:40.71ms +[2025-09-11 09:24:05] [Rank 0] step:9501/10000 train_time:386789ms step_avg:40.71ms +[2025-09-11 09:24:06] [Rank 0] step:9521/10000 train_time:387499ms step_avg:40.70ms +[2025-09-11 09:24:06] [Rank 0] step:9521/10000 train_time:387499ms step_avg:40.70ms +[2025-09-11 09:24:07] [Rank 0] step:9541/10000 train_time:388203ms step_avg:40.69ms +[2025-09-11 09:24:07] [Rank 0] step:9541/10000 train_time:388203ms step_avg:40.69ms +[2025-09-11 09:24:07] [Rank 0] step:9561/10000 train_time:388910ms step_avg:40.68ms +[2025-09-11 09:24:07] [Rank 0] step:9561/10000 train_time:388910ms step_avg:40.68ms +[2025-09-11 09:24:08] [Rank 0] step:9581/10000 train_time:389619ms step_avg:40.67ms +[2025-09-11 09:24:08] [Rank 0] step:9581/10000 train_time:389619ms step_avg:40.67ms +[2025-09-11 09:24:09] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:24:09] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:24:19] [Rank 0] PRINT: step:9600/10000 val_loss:4.1867 total_sharp:1.3549e-04 L1_sharp:1.4616e-03 L2_sharp:4.9985e-04 L3_sharp:1.9153e-04 L4_sharp:5.4156e-04 L5_sharp:6.4020e-04 L6_sharp:1.0456e-03 L7_sharp:6.8918e-04 L8_sharp:1.1344e-03 L9_sharp:1.2022e-03 L10_sharp:1.4337e-03 L11_sharp:1.9173e-03 L12_sharp:8.0857e-03 total_fnorm:3.3281e+00 total_l1_linf:2.7840e+03 total_spectral:1.6797e+00 L1_fnorm:1.4746e-01 L2_fnorm:1.2402e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2598e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2891e-01 L8_fnorm:1.2500e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2500e-01 L11_fnorm:1.2500e-01 L12_fnorm:1.2451e-01 L1_l1linf:1.9165e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.6235e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6968e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.3672e-02 L12_l1linf:1.7700e-02 L1_spectral:2.5822e-03 L2_spectral:1.8419e-03 L3_spectral:2.0446e-03 L4_spectral:2.0338e-03 L5_spectral:1.9560e-03 L6_spectral:1.9852e-03 L7_spectral:2.0165e-03 L8_spectral:2.0579e-03 L9_spectral:2.0009e-03 L10_spectral:1.9623e-03 L11_spectral:1.9665e-03 L12_spectral:2.0153e-03 train_time:390303ms step_avg:40.66ms +[2025-09-11 09:24:19] [Rank 0] PRINT: step:9600/10000 val_loss:4.1867 total_sharp:1.3549e-04 L1_sharp:1.4616e-03 L2_sharp:4.9985e-04 L3_sharp:1.9153e-04 L4_sharp:5.4156e-04 L5_sharp:6.4020e-04 L6_sharp:1.0456e-03 L7_sharp:6.8918e-04 L8_sharp:1.1344e-03 L9_sharp:1.2022e-03 L10_sharp:1.4337e-03 L11_sharp:1.9173e-03 L12_sharp:8.0857e-03 total_fnorm:3.3281e+00 total_l1_linf:2.7840e+03 total_spectral:1.6797e+00 L1_fnorm:1.4746e-01 L2_fnorm:1.2402e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2598e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2891e-01 L8_fnorm:1.2500e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2500e-01 L11_fnorm:1.2500e-01 L12_fnorm:1.2451e-01 L1_l1linf:1.9165e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.6235e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.6479e-02 L8_l1linf:1.6968e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.3672e-02 L12_l1linf:1.7700e-02 L1_spectral:2.5822e-03 L2_spectral:1.8419e-03 L3_spectral:2.0446e-03 L4_spectral:2.0338e-03 L5_spectral:1.9560e-03 L6_spectral:1.9852e-03 L7_spectral:2.0165e-03 L8_spectral:2.0579e-03 L9_spectral:2.0009e-03 L10_spectral:1.9623e-03 L11_spectral:1.9665e-03 L12_spectral:2.0153e-03 train_time:390303ms step_avg:40.66ms +[2025-09-11 09:24:20] [Rank 0] step:9601/10000 train_time:391502ms step_avg:40.78ms +[2025-09-11 09:24:20] [Rank 0] step:9601/10000 train_time:391502ms step_avg:40.78ms +[2025-09-11 09:24:21] [Rank 0] step:9621/10000 train_time:392224ms step_avg:40.77ms +[2025-09-11 09:24:21] [Rank 0] step:9621/10000 train_time:392224ms step_avg:40.77ms +[2025-09-11 09:24:21] [Rank 0] step:9641/10000 train_time:392937ms step_avg:40.76ms +[2025-09-11 09:24:21] [Rank 0] step:9641/10000 train_time:392937ms step_avg:40.76ms +[2025-09-11 09:24:22] [Rank 0] step:9661/10000 train_time:393656ms step_avg:40.75ms +[2025-09-11 09:24:22] [Rank 0] step:9661/10000 train_time:393656ms step_avg:40.75ms +[2025-09-11 09:24:23] [Rank 0] step:9681/10000 train_time:394368ms step_avg:40.74ms +[2025-09-11 09:24:23] [Rank 0] step:9681/10000 train_time:394368ms step_avg:40.74ms +[2025-09-11 09:24:23] [Rank 0] step:9701/10000 train_time:395082ms step_avg:40.73ms +[2025-09-11 09:24:23] [Rank 0] step:9701/10000 train_time:395082ms step_avg:40.73ms +[2025-09-11 09:24:24] [Rank 0] step:9721/10000 train_time:395800ms step_avg:40.72ms +[2025-09-11 09:24:24] [Rank 0] step:9721/10000 train_time:395800ms step_avg:40.72ms +[2025-09-11 09:24:25] [Rank 0] step:9741/10000 train_time:396515ms step_avg:40.71ms +[2025-09-11 09:24:25] [Rank 0] step:9741/10000 train_time:396515ms step_avg:40.71ms +[2025-09-11 09:24:26] [Rank 0] step:9761/10000 train_time:397229ms step_avg:40.70ms +[2025-09-11 09:24:26] [Rank 0] step:9761/10000 train_time:397229ms step_avg:40.70ms +[2025-09-11 09:24:26] [Rank 0] step:9781/10000 train_time:397942ms step_avg:40.69ms +[2025-09-11 09:24:26] [Rank 0] step:9781/10000 train_time:397942ms step_avg:40.69ms +[2025-09-11 09:24:27] [Rank 0] step:9801/10000 train_time:398660ms step_avg:40.68ms +[2025-09-11 09:24:27] [Rank 0] step:9801/10000 train_time:398660ms step_avg:40.68ms +[2025-09-11 09:24:28] [Rank 0] step:9821/10000 train_time:399379ms step_avg:40.67ms +[2025-09-11 09:24:28] [Rank 0] step:9821/10000 train_time:399379ms step_avg:40.67ms +[2025-09-11 09:24:28] [Rank 0] step:9841/10000 train_time:400097ms step_avg:40.66ms +[2025-09-11 09:24:28] [Rank 0] step:9841/10000 train_time:400097ms step_avg:40.66ms +[2025-09-11 09:24:29] [Rank 0] step:9861/10000 train_time:400813ms step_avg:40.65ms +[2025-09-11 09:24:29] [Rank 0] step:9861/10000 train_time:400813ms step_avg:40.65ms +[2025-09-11 09:24:30] [Rank 0] step:9881/10000 train_time:401528ms step_avg:40.64ms +[2025-09-11 09:24:30] [Rank 0] step:9881/10000 train_time:401528ms step_avg:40.64ms +[2025-09-11 09:24:31] [Rank 0] step:9901/10000 train_time:402240ms step_avg:40.63ms +[2025-09-11 09:24:31] [Rank 0] step:9901/10000 train_time:402240ms step_avg:40.63ms +[2025-09-11 09:24:31] [Rank 0] step:9921/10000 train_time:402953ms step_avg:40.62ms +[2025-09-11 09:24:31] [Rank 0] step:9921/10000 train_time:402953ms step_avg:40.62ms +[2025-09-11 09:24:32] [Rank 0] step:9941/10000 train_time:403672ms step_avg:40.61ms +[2025-09-11 09:24:32] [Rank 0] step:9941/10000 train_time:403672ms step_avg:40.61ms +[2025-09-11 09:24:33] [Rank 0] step:9961/10000 train_time:404391ms step_avg:40.60ms +[2025-09-11 09:24:33] [Rank 0] step:9961/10000 train_time:404391ms step_avg:40.60ms +[2025-09-11 09:24:33] [Rank 0] step:9981/10000 train_time:405107ms step_avg:40.59ms +[2025-09-11 09:24:33] [Rank 0] step:9981/10000 train_time:405107ms step_avg:40.59ms +[2025-09-11 09:24:34] [Rank 0] step:10000/10000 train_time:405794ms step_avg:40.58ms +[2025-09-11 09:24:34] [Rank 0] step:10000/10000 train_time:405794ms step_avg:40.58ms +[2025-09-11 09:24:34] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:24:34] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:24:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:24:45] [Rank 0] PRINT: step:10000/10000 val_loss:4.1840 total_sharp:8.2025e-05 L1_sharp:1.3664e-03 L2_sharp:2.6157e-04 L3_sharp:1.3920e-04 L4_sharp:3.5120e-04 L5_sharp:4.6498e-04 L6_sharp:8.0686e-04 L7_sharp:6.6960e-04 L8_sharp:9.0985e-04 L9_sharp:8.5716e-04 L10_sharp:9.5570e-04 L11_sharp:1.3680e-03 L12_sharp:6.1667e-03 total_fnorm:1.2656e+00 total_l1_linf:7.7200e+02 total_spectral:6.3672e-01 L1_fnorm:5.7617e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.8096e-02 L1_l1linf:6.1340e-03 L2_l1linf:5.0049e-03 L3_l1linf:4.7913e-03 L4_l1linf:4.7913e-03 L5_l1linf:4.7913e-03 L6_l1linf:4.8523e-03 L7_l1linf:4.7913e-03 L8_l1linf:5.5847e-03 L9_l1linf:4.3945e-03 L10_l1linf:4.1504e-03 L11_l1linf:4.3030e-03 L12_l1linf:5.5237e-03 L1_spectral:1.0637e-03 L2_spectral:7.1071e-04 L3_spectral:8.0641e-04 L4_spectral:7.9891e-04 L5_spectral:7.8129e-04 L6_spectral:7.7730e-04 L7_spectral:7.8382e-04 L8_spectral:8.2080e-04 L9_spectral:7.8565e-04 L10_spectral:7.8012e-04 L11_spectral:7.8488e-04 L12_spectral:8.0860e-04 train_time:405814ms step_avg:40.58ms +[2025-09-11 09:24:45] [Rank 0] PRINT: step:10000/10000 val_loss:4.1840 total_sharp:8.2025e-05 L1_sharp:1.3664e-03 L2_sharp:2.6157e-04 L3_sharp:1.3920e-04 L4_sharp:3.5120e-04 L5_sharp:4.6498e-04 L6_sharp:8.0686e-04 L7_sharp:6.6960e-04 L8_sharp:9.0985e-04 L9_sharp:8.5716e-04 L10_sharp:9.5570e-04 L11_sharp:1.3680e-03 L12_sharp:6.1667e-03 total_fnorm:1.2656e+00 total_l1_linf:7.7200e+02 total_spectral:6.3672e-01 L1_fnorm:5.7617e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.8096e-02 L1_l1linf:6.1340e-03 L2_l1linf:5.0049e-03 L3_l1linf:4.7913e-03 L4_l1linf:4.7913e-03 L5_l1linf:4.7913e-03 L6_l1linf:4.8523e-03 L7_l1linf:4.7913e-03 L8_l1linf:5.5847e-03 L9_l1linf:4.3945e-03 L10_l1linf:4.1504e-03 L11_l1linf:4.3030e-03 L12_l1linf:5.5237e-03 L1_spectral:1.0637e-03 L2_spectral:7.1071e-04 L3_spectral:8.0641e-04 L4_spectral:7.9891e-04 L5_spectral:7.8129e-04 L6_spectral:7.7730e-04 L7_spectral:7.8382e-04 L8_spectral:8.2080e-04 L9_spectral:7.8565e-04 L10_spectral:7.8012e-04 L11_spectral:7.8488e-04 L12_spectral:8.0860e-04 train_time:405814ms step_avg:40.58ms +[2025-09-11 09:24:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:24:45 2025 --- +[2025-09-11 09:24:45] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:24:45 2025 --- +[2025-09-11 09:24:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:24:45] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fe5c92c23291a5b076b6e679008fee6c83c74247 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "1e475e26-c895-45e5-9957-3a4355201b25", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/training_log_1e475e26-c895-45e5-9957-3a4355201b25.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/training_log_1e475e26-c895-45e5-9957-3a4355201b25.txt new file mode 100644 index 0000000000000000000000000000000000000000..68848a45dcb24c81c4b7b8a546248a430f45eb9d --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44/training_log_1e475e26-c895-45e5-9957-3a4355201b25.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:51:53] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:51:53 2025 --- +[2025-09-11 09:51:53] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:51:53 2025 --- +[2025-09-11 09:51:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:51:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:51:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:51:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:51:53] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:51:53] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:51:53] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44 +[2025-09-11 09:51:53] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.02_seed_44 +[2025-09-11 09:51:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:51:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:51:53] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:51:53] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:51:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:51:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:51:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:51:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:51:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:51:54] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:51:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:51:54] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:51:54] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:51:54] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:51:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:51:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:51:57] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:51:57] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:51:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:51:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:52:02] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:52:02] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:52:02] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:52:02] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:52:43] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:52:43] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:52:43] [Rank 0] PRINT: Starting training... +[2025-09-11 09:52:43] [Rank 0] PRINT: Starting training... +[2025-09-11 09:52:44] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.92ms +[2025-09-11 09:52:44] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.92ms +[2025-09-11 09:52:45] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 09:52:45] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 09:52:45] [Rank 0] step:61/10000 train_time:2589ms step_avg:42.44ms +[2025-09-11 09:52:45] [Rank 0] step:61/10000 train_time:2589ms step_avg:42.44ms +[2025-09-11 09:52:46] [Rank 0] step:81/10000 train_time:3316ms step_avg:40.94ms +[2025-09-11 09:52:46] [Rank 0] step:81/10000 train_time:3316ms step_avg:40.94ms +[2025-09-11 09:52:47] [Rank 0] step:101/10000 train_time:4044ms step_avg:40.04ms +[2025-09-11 09:52:47] [Rank 0] step:101/10000 train_time:4044ms step_avg:40.04ms +[2025-09-11 09:52:48] [Rank 0] step:121/10000 train_time:4771ms step_avg:39.43ms +[2025-09-11 09:52:48] [Rank 0] step:121/10000 train_time:4771ms step_avg:39.43ms +[2025-09-11 09:52:48] [Rank 0] step:141/10000 train_time:5499ms step_avg:39.00ms +[2025-09-11 09:52:48] [Rank 0] step:141/10000 train_time:5499ms step_avg:39.00ms +[2025-09-11 09:52:49] [Rank 0] step:161/10000 train_time:6226ms step_avg:38.67ms +[2025-09-11 09:52:49] [Rank 0] step:161/10000 train_time:6226ms step_avg:38.67ms +[2025-09-11 09:52:50] [Rank 0] step:181/10000 train_time:6952ms step_avg:38.41ms +[2025-09-11 09:52:50] [Rank 0] step:181/10000 train_time:6952ms step_avg:38.41ms +[2025-09-11 09:52:50] [Rank 0] step:201/10000 train_time:7679ms step_avg:38.20ms +[2025-09-11 09:52:50] [Rank 0] step:201/10000 train_time:7679ms step_avg:38.20ms +[2025-09-11 09:52:51] [Rank 0] step:221/10000 train_time:8406ms step_avg:38.04ms +[2025-09-11 09:52:51] [Rank 0] step:221/10000 train_time:8406ms step_avg:38.04ms +[2025-09-11 09:52:52] [Rank 0] step:241/10000 train_time:9132ms step_avg:37.89ms +[2025-09-11 09:52:52] [Rank 0] step:241/10000 train_time:9132ms step_avg:37.89ms +[2025-09-11 09:52:53] [Rank 0] step:261/10000 train_time:9859ms step_avg:37.77ms +[2025-09-11 09:52:53] [Rank 0] step:261/10000 train_time:9859ms step_avg:37.77ms +[2025-09-11 09:52:53] [Rank 0] step:281/10000 train_time:10586ms step_avg:37.67ms +[2025-09-11 09:52:53] [Rank 0] step:281/10000 train_time:10586ms step_avg:37.67ms +[2025-09-11 09:52:54] [Rank 0] step:301/10000 train_time:11313ms step_avg:37.58ms +[2025-09-11 09:52:54] [Rank 0] step:301/10000 train_time:11313ms step_avg:37.58ms +[2025-09-11 09:52:55] [Rank 0] step:321/10000 train_time:12039ms step_avg:37.51ms +[2025-09-11 09:52:55] [Rank 0] step:321/10000 train_time:12039ms step_avg:37.51ms +[2025-09-11 09:52:56] [Rank 0] step:341/10000 train_time:12766ms step_avg:37.44ms +[2025-09-11 09:52:56] [Rank 0] step:341/10000 train_time:12766ms step_avg:37.44ms +[2025-09-11 09:52:56] [Rank 0] step:361/10000 train_time:13492ms step_avg:37.37ms +[2025-09-11 09:52:56] [Rank 0] step:361/10000 train_time:13492ms step_avg:37.37ms +[2025-09-11 09:52:57] [Rank 0] step:381/10000 train_time:14218ms step_avg:37.32ms +[2025-09-11 09:52:57] [Rank 0] step:381/10000 train_time:14218ms step_avg:37.32ms +[2025-09-11 09:52:58] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:52:58] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:53:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:53:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:53:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:53:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:53:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:53:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:53:46] [Rank 0] PRINT: step:400/10000 val_loss:6.0794 total_sharp:2.2831e-03 L1_sharp:1.8434e-02 L2_sharp:7.9310e-03 L3_sharp:5.0283e-03 L4_sharp:6.0247e-03 L5_sharp:6.7367e-03 L6_sharp:4.7015e-03 L7_sharp:4.5072e-03 L8_sharp:4.0314e-03 L9_sharp:4.3982e-03 L10_sharp:4.0833e-03 L11_sharp:4.3978e-03 L12_sharp:1.3835e-02 total_fnorm:4.0805e+01 total_l1_linf:1.2117e+05 total_spectral:2.0399e+01 L1_fnorm:2.4533e+00 L2_fnorm:2.4418e+00 L3_fnorm:2.4506e+00 L4_fnorm:2.4119e+00 L5_fnorm:2.3692e+00 L6_fnorm:2.3488e+00 L7_fnorm:2.3092e+00 L8_fnorm:2.2883e+00 L9_fnorm:2.2666e+00 L10_fnorm:2.2090e+00 L11_fnorm:2.1626e+00 L12_fnorm:2.0733e+00 L1_l1linf:8.1118e-01 L2_l1linf:8.0277e-01 L3_l1linf:8.0371e-01 L4_l1linf:7.9862e-01 L5_l1linf:7.8829e-01 L6_l1linf:7.9869e-01 L7_l1linf:7.9834e-01 L8_l1linf:7.9047e-01 L9_l1linf:7.6900e-01 L10_l1linf:7.4555e-01 L11_l1linf:6.9918e-01 L12_l1linf:6.1215e-01 L1_spectral:2.4088e-02 L2_spectral:2.4093e-02 L3_spectral:2.4090e-02 L4_spectral:2.4096e-02 L5_spectral:2.4088e-02 L6_spectral:2.4077e-02 L7_spectral:2.4070e-02 L8_spectral:2.4078e-02 L9_spectral:2.4077e-02 L10_spectral:2.4063e-02 L11_spectral:2.4081e-02 L12_spectral:2.4075e-02 train_time:14923ms step_avg:37.31ms +[2025-09-11 09:53:46] [Rank 0] PRINT: step:400/10000 val_loss:6.0794 total_sharp:2.2831e-03 L1_sharp:1.8434e-02 L2_sharp:7.9310e-03 L3_sharp:5.0283e-03 L4_sharp:6.0247e-03 L5_sharp:6.7367e-03 L6_sharp:4.7015e-03 L7_sharp:4.5072e-03 L8_sharp:4.0314e-03 L9_sharp:4.3982e-03 L10_sharp:4.0833e-03 L11_sharp:4.3978e-03 L12_sharp:1.3835e-02 total_fnorm:4.0805e+01 total_l1_linf:1.2117e+05 total_spectral:2.0399e+01 L1_fnorm:2.4533e+00 L2_fnorm:2.4418e+00 L3_fnorm:2.4506e+00 L4_fnorm:2.4119e+00 L5_fnorm:2.3692e+00 L6_fnorm:2.3488e+00 L7_fnorm:2.3092e+00 L8_fnorm:2.2883e+00 L9_fnorm:2.2666e+00 L10_fnorm:2.2090e+00 L11_fnorm:2.1626e+00 L12_fnorm:2.0733e+00 L1_l1linf:8.1118e-01 L2_l1linf:8.0277e-01 L3_l1linf:8.0371e-01 L4_l1linf:7.9862e-01 L5_l1linf:7.8829e-01 L6_l1linf:7.9869e-01 L7_l1linf:7.9834e-01 L8_l1linf:7.9047e-01 L9_l1linf:7.6900e-01 L10_l1linf:7.4555e-01 L11_l1linf:6.9918e-01 L12_l1linf:6.1215e-01 L1_spectral:2.4088e-02 L2_spectral:2.4093e-02 L3_spectral:2.4090e-02 L4_spectral:2.4096e-02 L5_spectral:2.4088e-02 L6_spectral:2.4077e-02 L7_spectral:2.4070e-02 L8_spectral:2.4078e-02 L9_spectral:2.4077e-02 L10_spectral:2.4063e-02 L11_spectral:2.4081e-02 L12_spectral:2.4075e-02 train_time:14923ms step_avg:37.31ms +[2025-09-11 09:54:16] [Rank 0] step:401/10000 train_time:44822ms step_avg:111.78ms +[2025-09-11 09:54:16] [Rank 0] step:401/10000 train_time:44822ms step_avg:111.78ms +[2025-09-11 09:54:19] [Rank 0] step:421/10000 train_time:47548ms step_avg:112.94ms +[2025-09-11 09:54:19] [Rank 0] step:421/10000 train_time:47548ms step_avg:112.94ms +[2025-09-11 09:54:19] [Rank 0] step:441/10000 train_time:48187ms step_avg:109.27ms +[2025-09-11 09:54:19] [Rank 0] step:441/10000 train_time:48187ms step_avg:109.27ms +[2025-09-11 09:54:20] [Rank 0] step:461/10000 train_time:48826ms step_avg:105.91ms +[2025-09-11 09:54:20] [Rank 0] step:461/10000 train_time:48826ms step_avg:105.91ms +[2025-09-11 09:54:21] [Rank 0] step:481/10000 train_time:49464ms step_avg:102.83ms +[2025-09-11 09:54:21] [Rank 0] step:481/10000 train_time:49464ms step_avg:102.83ms +[2025-09-11 09:54:21] [Rank 0] step:501/10000 train_time:50101ms step_avg:100.00ms +[2025-09-11 09:54:21] [Rank 0] step:501/10000 train_time:50101ms step_avg:100.00ms +[2025-09-11 09:54:22] [Rank 0] step:521/10000 train_time:50739ms step_avg:97.39ms +[2025-09-11 09:54:22] [Rank 0] step:521/10000 train_time:50739ms step_avg:97.39ms +[2025-09-11 09:54:23] [Rank 0] step:541/10000 train_time:51377ms step_avg:94.97ms +[2025-09-11 09:54:23] [Rank 0] step:541/10000 train_time:51377ms step_avg:94.97ms +[2025-09-11 09:54:23] [Rank 0] step:561/10000 train_time:52015ms step_avg:92.72ms +[2025-09-11 09:54:23] [Rank 0] step:561/10000 train_time:52015ms step_avg:92.72ms +[2025-09-11 09:54:24] [Rank 0] step:581/10000 train_time:52655ms step_avg:90.63ms +[2025-09-11 09:54:24] [Rank 0] step:581/10000 train_time:52655ms step_avg:90.63ms +[2025-09-11 09:54:25] [Rank 0] step:601/10000 train_time:53293ms step_avg:88.67ms +[2025-09-11 09:54:25] [Rank 0] step:601/10000 train_time:53293ms step_avg:88.67ms +[2025-09-11 09:54:25] [Rank 0] step:621/10000 train_time:53930ms step_avg:86.84ms +[2025-09-11 09:54:25] [Rank 0] step:621/10000 train_time:53930ms step_avg:86.84ms +[2025-09-11 09:54:26] [Rank 0] step:641/10000 train_time:54568ms step_avg:85.13ms +[2025-09-11 09:54:26] [Rank 0] step:641/10000 train_time:54568ms step_avg:85.13ms +[2025-09-11 09:54:27] [Rank 0] step:661/10000 train_time:55206ms step_avg:83.52ms +[2025-09-11 09:54:27] [Rank 0] step:661/10000 train_time:55206ms step_avg:83.52ms +[2025-09-11 09:54:27] [Rank 0] step:681/10000 train_time:55844ms step_avg:82.00ms +[2025-09-11 09:54:27] [Rank 0] step:681/10000 train_time:55844ms step_avg:82.00ms +[2025-09-11 09:54:28] [Rank 0] step:701/10000 train_time:56481ms step_avg:80.57ms +[2025-09-11 09:54:28] [Rank 0] step:701/10000 train_time:56481ms step_avg:80.57ms +[2025-09-11 09:54:28] [Rank 0] step:721/10000 train_time:57118ms step_avg:79.22ms +[2025-09-11 09:54:28] [Rank 0] step:721/10000 train_time:57118ms step_avg:79.22ms +[2025-09-11 09:54:29] [Rank 0] step:741/10000 train_time:57756ms step_avg:77.94ms +[2025-09-11 09:54:29] [Rank 0] step:741/10000 train_time:57756ms step_avg:77.94ms +[2025-09-11 09:54:30] [Rank 0] step:761/10000 train_time:58399ms step_avg:76.74ms +[2025-09-11 09:54:30] [Rank 0] step:761/10000 train_time:58399ms step_avg:76.74ms +[2025-09-11 09:54:30] [Rank 0] step:781/10000 train_time:59041ms step_avg:75.60ms +[2025-09-11 09:54:30] [Rank 0] step:781/10000 train_time:59041ms step_avg:75.60ms +[2025-09-11 09:54:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:54:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:54:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:54:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:55:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:15] [Rank 0] PRINT: step:800/10000 val_loss:5.6631 total_sharp:1.7650e-03 L1_sharp:1.6752e-02 L2_sharp:3.8993e-03 L3_sharp:2.1974e-03 L4_sharp:2.3257e-03 L5_sharp:2.2927e-03 L6_sharp:2.1261e-03 L7_sharp:1.7365e-03 L8_sharp:2.9907e-03 L9_sharp:2.9481e-03 L10_sharp:3.6095e-03 L11_sharp:4.9459e-03 L12_sharp:1.7357e-02 total_fnorm:4.0500e+01 total_l1_linf:1.0189e+05 total_spectral:2.0625e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4375e+00 L5_fnorm:2.4219e+00 L6_fnorm:2.4219e+00 L7_fnorm:2.4062e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.3594e+00 L10_fnorm:2.2812e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0469e+00 L1_l1linf:8.2812e-01 L2_l1linf:7.8906e-01 L3_l1linf:7.6562e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.6562e-01 L6_l1linf:7.7344e-01 L7_l1linf:7.8125e-01 L8_l1linf:7.7734e-01 L9_l1linf:7.5000e-01 L10_l1linf:7.1875e-01 L11_l1linf:6.3672e-01 L12_l1linf:4.8828e-01 L1_spectral:2.6915e-02 L2_spectral:2.7025e-02 L3_spectral:2.6953e-02 L4_spectral:2.6694e-02 L5_spectral:2.6526e-02 L6_spectral:2.6474e-02 L7_spectral:2.6522e-02 L8_spectral:2.6488e-02 L9_spectral:2.6545e-02 L10_spectral:2.6489e-02 L11_spectral:2.6605e-02 L12_spectral:2.6413e-02 train_time:59666ms step_avg:74.58ms +[2025-09-11 09:55:15] [Rank 0] PRINT: step:800/10000 val_loss:5.6631 total_sharp:1.7650e-03 L1_sharp:1.6752e-02 L2_sharp:3.8993e-03 L3_sharp:2.1974e-03 L4_sharp:2.3257e-03 L5_sharp:2.2927e-03 L6_sharp:2.1261e-03 L7_sharp:1.7365e-03 L8_sharp:2.9907e-03 L9_sharp:2.9481e-03 L10_sharp:3.6095e-03 L11_sharp:4.9459e-03 L12_sharp:1.7357e-02 total_fnorm:4.0500e+01 total_l1_linf:1.0189e+05 total_spectral:2.0625e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4375e+00 L5_fnorm:2.4219e+00 L6_fnorm:2.4219e+00 L7_fnorm:2.4062e+00 L8_fnorm:2.3438e+00 L9_fnorm:2.3594e+00 L10_fnorm:2.2812e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.0469e+00 L1_l1linf:8.2812e-01 L2_l1linf:7.8906e-01 L3_l1linf:7.6562e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.6562e-01 L6_l1linf:7.7344e-01 L7_l1linf:7.8125e-01 L8_l1linf:7.7734e-01 L9_l1linf:7.5000e-01 L10_l1linf:7.1875e-01 L11_l1linf:6.3672e-01 L12_l1linf:4.8828e-01 L1_spectral:2.6915e-02 L2_spectral:2.7025e-02 L3_spectral:2.6953e-02 L4_spectral:2.6694e-02 L5_spectral:2.6526e-02 L6_spectral:2.6474e-02 L7_spectral:2.6522e-02 L8_spectral:2.6488e-02 L9_spectral:2.6545e-02 L10_spectral:2.6489e-02 L11_spectral:2.6605e-02 L12_spectral:2.6413e-02 train_time:59666ms step_avg:74.58ms +[2025-09-11 09:55:17] [Rank 0] step:801/10000 train_time:61729ms step_avg:77.06ms +[2025-09-11 09:55:17] [Rank 0] step:801/10000 train_time:61729ms step_avg:77.06ms +[2025-09-11 09:55:18] [Rank 0] step:821/10000 train_time:62376ms step_avg:75.98ms +[2025-09-11 09:55:18] [Rank 0] step:821/10000 train_time:62376ms step_avg:75.98ms +[2025-09-11 09:55:18] [Rank 0] step:841/10000 train_time:63018ms step_avg:74.93ms +[2025-09-11 09:55:18] [Rank 0] step:841/10000 train_time:63018ms step_avg:74.93ms +[2025-09-11 09:55:19] [Rank 0] step:861/10000 train_time:63661ms step_avg:73.94ms +[2025-09-11 09:55:19] [Rank 0] step:861/10000 train_time:63661ms step_avg:73.94ms +[2025-09-11 09:55:20] [Rank 0] step:881/10000 train_time:64303ms step_avg:72.99ms +[2025-09-11 09:55:20] [Rank 0] step:881/10000 train_time:64303ms step_avg:72.99ms +[2025-09-11 09:55:20] [Rank 0] step:901/10000 train_time:64945ms step_avg:72.08ms +[2025-09-11 09:55:20] [Rank 0] step:901/10000 train_time:64945ms step_avg:72.08ms +[2025-09-11 09:55:21] [Rank 0] step:921/10000 train_time:65586ms step_avg:71.21ms +[2025-09-11 09:55:21] [Rank 0] step:921/10000 train_time:65586ms step_avg:71.21ms +[2025-09-11 09:55:22] [Rank 0] step:941/10000 train_time:66227ms step_avg:70.38ms +[2025-09-11 09:55:22] [Rank 0] step:941/10000 train_time:66227ms step_avg:70.38ms +[2025-09-11 09:55:22] [Rank 0] step:961/10000 train_time:66868ms step_avg:69.58ms +[2025-09-11 09:55:22] [Rank 0] step:961/10000 train_time:66868ms step_avg:69.58ms +[2025-09-11 09:55:23] [Rank 0] step:981/10000 train_time:67510ms step_avg:68.82ms +[2025-09-11 09:55:23] [Rank 0] step:981/10000 train_time:67510ms step_avg:68.82ms +[2025-09-11 09:55:24] [Rank 0] step:1001/10000 train_time:68150ms step_avg:68.08ms +[2025-09-11 09:55:24] [Rank 0] step:1001/10000 train_time:68150ms step_avg:68.08ms +[2025-09-11 09:55:24] [Rank 0] step:1021/10000 train_time:68792ms step_avg:67.38ms +[2025-09-11 09:55:24] [Rank 0] step:1021/10000 train_time:68792ms step_avg:67.38ms +[2025-09-11 09:55:25] [Rank 0] step:1041/10000 train_time:69434ms step_avg:66.70ms +[2025-09-11 09:55:25] [Rank 0] step:1041/10000 train_time:69434ms step_avg:66.70ms +[2025-09-11 09:55:25] [Rank 0] step:1061/10000 train_time:70075ms step_avg:66.05ms +[2025-09-11 09:55:25] [Rank 0] step:1061/10000 train_time:70075ms step_avg:66.05ms +[2025-09-11 09:55:26] [Rank 0] step:1081/10000 train_time:70716ms step_avg:65.42ms +[2025-09-11 09:55:26] [Rank 0] step:1081/10000 train_time:70716ms step_avg:65.42ms +[2025-09-11 09:55:27] [Rank 0] step:1101/10000 train_time:71360ms step_avg:64.81ms +[2025-09-11 09:55:27] [Rank 0] step:1101/10000 train_time:71360ms step_avg:64.81ms +[2025-09-11 09:55:27] [Rank 0] step:1121/10000 train_time:72001ms step_avg:64.23ms +[2025-09-11 09:55:27] [Rank 0] step:1121/10000 train_time:72001ms step_avg:64.23ms +[2025-09-11 09:55:28] [Rank 0] step:1141/10000 train_time:72641ms step_avg:63.66ms +[2025-09-11 09:55:28] [Rank 0] step:1141/10000 train_time:72641ms step_avg:63.66ms +[2025-09-11 09:55:29] [Rank 0] step:1161/10000 train_time:73282ms step_avg:63.12ms +[2025-09-11 09:55:29] [Rank 0] step:1161/10000 train_time:73282ms step_avg:63.12ms +[2025-09-11 09:55:29] [Rank 0] step:1181/10000 train_time:73924ms step_avg:62.59ms +[2025-09-11 09:55:29] [Rank 0] step:1181/10000 train_time:73924ms step_avg:62.59ms +[2025-09-11 09:55:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:55:30] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:55:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:44] [Rank 0] PRINT: step:1200/10000 val_loss:5.3274 total_sharp:1.2085e-03 L1_sharp:1.2907e-02 L2_sharp:2.2405e-03 L3_sharp:1.1921e-03 L4_sharp:1.6016e-03 L5_sharp:1.6009e-03 L6_sharp:1.4583e-03 L7_sharp:1.4990e-03 L8_sharp:2.4957e-03 L9_sharp:1.7593e-03 L10_sharp:2.4671e-03 L11_sharp:3.1180e-03 L12_sharp:1.1610e-02 total_fnorm:4.0000e+01 total_l1_linf:9.7792e+04 total_spectral:2.0375e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4219e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.3750e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.1875e-01 L3_l1linf:7.1484e-01 L4_l1linf:7.1875e-01 L5_l1linf:7.1875e-01 L6_l1linf:7.2656e-01 L7_l1linf:7.2656e-01 L8_l1linf:7.3828e-01 L9_l1linf:7.3438e-01 L10_l1linf:7.3438e-01 L11_l1linf:7.2266e-01 L12_l1linf:6.2500e-01 L1_spectral:2.7975e-02 L2_spectral:2.7894e-02 L3_spectral:2.7665e-02 L4_spectral:2.7688e-02 L5_spectral:2.7571e-02 L6_spectral:2.7588e-02 L7_spectral:2.7489e-02 L8_spectral:2.8023e-02 L9_spectral:2.7476e-02 L10_spectral:2.7572e-02 L11_spectral:2.7497e-02 L12_spectral:2.7445e-02 train_time:74547ms step_avg:62.12ms +[2025-09-11 09:55:44] [Rank 0] PRINT: step:1200/10000 val_loss:5.3274 total_sharp:1.2085e-03 L1_sharp:1.2907e-02 L2_sharp:2.2405e-03 L3_sharp:1.1921e-03 L4_sharp:1.6016e-03 L5_sharp:1.6009e-03 L6_sharp:1.4583e-03 L7_sharp:1.4990e-03 L8_sharp:2.4957e-03 L9_sharp:1.7593e-03 L10_sharp:2.4671e-03 L11_sharp:3.1180e-03 L12_sharp:1.1610e-02 total_fnorm:4.0000e+01 total_l1_linf:9.7792e+04 total_spectral:2.0375e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4688e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4219e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.4688e+00 L11_fnorm:2.4688e+00 L12_fnorm:2.3750e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.1875e-01 L3_l1linf:7.1484e-01 L4_l1linf:7.1875e-01 L5_l1linf:7.1875e-01 L6_l1linf:7.2656e-01 L7_l1linf:7.2656e-01 L8_l1linf:7.3828e-01 L9_l1linf:7.3438e-01 L10_l1linf:7.3438e-01 L11_l1linf:7.2266e-01 L12_l1linf:6.2500e-01 L1_spectral:2.7975e-02 L2_spectral:2.7894e-02 L3_spectral:2.7665e-02 L4_spectral:2.7688e-02 L5_spectral:2.7571e-02 L6_spectral:2.7588e-02 L7_spectral:2.7489e-02 L8_spectral:2.8023e-02 L9_spectral:2.7476e-02 L10_spectral:2.7572e-02 L11_spectral:2.7497e-02 L12_spectral:2.7445e-02 train_time:74547ms step_avg:62.12ms +[2025-09-11 09:55:46] [Rank 0] step:1201/10000 train_time:76626ms step_avg:63.80ms +[2025-09-11 09:55:46] [Rank 0] step:1201/10000 train_time:76626ms step_avg:63.80ms +[2025-09-11 09:55:47] [Rank 0] step:1221/10000 train_time:77273ms step_avg:63.29ms +[2025-09-11 09:55:47] [Rank 0] step:1221/10000 train_time:77273ms step_avg:63.29ms +[2025-09-11 09:55:48] [Rank 0] step:1241/10000 train_time:77916ms step_avg:62.79ms +[2025-09-11 09:55:48] [Rank 0] step:1241/10000 train_time:77916ms step_avg:62.79ms +[2025-09-11 09:55:48] [Rank 0] step:1261/10000 train_time:78559ms step_avg:62.30ms +[2025-09-11 09:55:48] [Rank 0] step:1261/10000 train_time:78559ms step_avg:62.30ms +[2025-09-11 09:55:49] [Rank 0] step:1281/10000 train_time:79201ms step_avg:61.83ms +[2025-09-11 09:55:49] [Rank 0] step:1281/10000 train_time:79201ms step_avg:61.83ms +[2025-09-11 09:55:50] [Rank 0] step:1301/10000 train_time:79844ms step_avg:61.37ms +[2025-09-11 09:55:50] [Rank 0] step:1301/10000 train_time:79844ms step_avg:61.37ms +[2025-09-11 09:55:50] [Rank 0] step:1321/10000 train_time:80487ms step_avg:60.93ms +[2025-09-11 09:55:50] [Rank 0] step:1321/10000 train_time:80487ms step_avg:60.93ms +[2025-09-11 09:55:51] [Rank 0] step:1341/10000 train_time:81130ms step_avg:60.50ms +[2025-09-11 09:55:51] [Rank 0] step:1341/10000 train_time:81130ms step_avg:60.50ms +[2025-09-11 09:55:52] [Rank 0] step:1361/10000 train_time:81773ms step_avg:60.08ms +[2025-09-11 09:55:52] [Rank 0] step:1361/10000 train_time:81773ms step_avg:60.08ms +[2025-09-11 09:55:52] [Rank 0] step:1381/10000 train_time:82415ms step_avg:59.68ms +[2025-09-11 09:55:52] [Rank 0] step:1381/10000 train_time:82415ms step_avg:59.68ms +[2025-09-11 09:55:53] [Rank 0] step:1401/10000 train_time:83057ms step_avg:59.28ms +[2025-09-11 09:55:53] [Rank 0] step:1401/10000 train_time:83057ms step_avg:59.28ms +[2025-09-11 09:55:54] [Rank 0] step:1421/10000 train_time:83700ms step_avg:58.90ms +[2025-09-11 09:55:54] [Rank 0] step:1421/10000 train_time:83700ms step_avg:58.90ms +[2025-09-11 09:55:54] [Rank 0] step:1441/10000 train_time:84342ms step_avg:58.53ms +[2025-09-11 09:55:54] [Rank 0] step:1441/10000 train_time:84342ms step_avg:58.53ms +[2025-09-11 09:55:55] [Rank 0] step:1461/10000 train_time:84984ms step_avg:58.17ms +[2025-09-11 09:55:55] [Rank 0] step:1461/10000 train_time:84984ms step_avg:58.17ms +[2025-09-11 09:55:55] [Rank 0] step:1481/10000 train_time:85627ms step_avg:57.82ms +[2025-09-11 09:55:55] [Rank 0] step:1481/10000 train_time:85627ms step_avg:57.82ms +[2025-09-11 09:55:56] [Rank 0] step:1501/10000 train_time:86273ms step_avg:57.48ms +[2025-09-11 09:55:56] [Rank 0] step:1501/10000 train_time:86273ms step_avg:57.48ms +[2025-09-11 09:55:57] [Rank 0] step:1521/10000 train_time:86919ms step_avg:57.15ms +[2025-09-11 09:55:57] [Rank 0] step:1521/10000 train_time:86919ms step_avg:57.15ms +[2025-09-11 09:55:57] [Rank 0] step:1541/10000 train_time:87564ms step_avg:56.82ms +[2025-09-11 09:55:57] [Rank 0] step:1541/10000 train_time:87564ms step_avg:56.82ms +[2025-09-11 09:55:58] [Rank 0] step:1561/10000 train_time:88211ms step_avg:56.51ms +[2025-09-11 09:55:58] [Rank 0] step:1561/10000 train_time:88211ms step_avg:56.51ms +[2025-09-11 09:55:59] [Rank 0] step:1581/10000 train_time:88857ms step_avg:56.20ms +[2025-09-11 09:55:59] [Rank 0] step:1581/10000 train_time:88857ms step_avg:56.20ms +[2025-09-11 09:55:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:55:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:56:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:56:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:10] [Rank 0] PRINT: step:1600/10000 val_loss:5.1759 total_sharp:1.0501e-03 L1_sharp:9.2046e-03 L2_sharp:1.8588e-03 L3_sharp:6.7388e-04 L4_sharp:6.6534e-04 L5_sharp:1.3376e-03 L6_sharp:8.3486e-04 L7_sharp:1.0757e-03 L8_sharp:2.7952e-03 L9_sharp:1.8384e-03 L10_sharp:1.9399e-03 L11_sharp:2.6677e-03 L12_sharp:1.1965e-02 total_fnorm:3.7750e+01 total_l1_linf:8.8576e+04 total_spectral:1.9000e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.3438e-01 L2_l1linf:6.9141e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.9531e-01 L5_l1linf:6.9922e-01 L6_l1linf:7.0312e-01 L7_l1linf:6.9922e-01 L8_l1linf:7.0703e-01 L9_l1linf:6.9531e-01 L10_l1linf:7.0312e-01 L11_l1linf:7.1875e-01 L12_l1linf:6.2109e-01 L1_spectral:2.8883e-02 L2_spectral:2.8589e-02 L3_spectral:2.8503e-02 L4_spectral:2.8304e-02 L5_spectral:2.8541e-02 L6_spectral:2.8307e-02 L7_spectral:2.8352e-02 L8_spectral:2.9160e-02 L9_spectral:2.8391e-02 L10_spectral:2.8231e-02 L11_spectral:2.8249e-02 L12_spectral:2.8125e-02 train_time:89486ms step_avg:55.93ms +[2025-09-11 09:56:10] [Rank 0] PRINT: step:1600/10000 val_loss:5.1759 total_sharp:1.0501e-03 L1_sharp:9.2046e-03 L2_sharp:1.8588e-03 L3_sharp:6.7388e-04 L4_sharp:6.6534e-04 L5_sharp:1.3376e-03 L6_sharp:8.3486e-04 L7_sharp:1.0757e-03 L8_sharp:2.7952e-03 L9_sharp:1.8384e-03 L10_sharp:1.9399e-03 L11_sharp:2.6677e-03 L12_sharp:1.1965e-02 total_fnorm:3.7750e+01 total_l1_linf:8.8576e+04 total_spectral:1.9000e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4375e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.4375e+00 L1_l1linf:7.3438e-01 L2_l1linf:6.9141e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.9531e-01 L5_l1linf:6.9922e-01 L6_l1linf:7.0312e-01 L7_l1linf:6.9922e-01 L8_l1linf:7.0703e-01 L9_l1linf:6.9531e-01 L10_l1linf:7.0312e-01 L11_l1linf:7.1875e-01 L12_l1linf:6.2109e-01 L1_spectral:2.8883e-02 L2_spectral:2.8589e-02 L3_spectral:2.8503e-02 L4_spectral:2.8304e-02 L5_spectral:2.8541e-02 L6_spectral:2.8307e-02 L7_spectral:2.8352e-02 L8_spectral:2.9160e-02 L9_spectral:2.8391e-02 L10_spectral:2.8231e-02 L11_spectral:2.8249e-02 L12_spectral:2.8125e-02 train_time:89486ms step_avg:55.93ms +[2025-09-11 09:56:12] [Rank 0] step:1601/10000 train_time:91585ms step_avg:57.20ms +[2025-09-11 09:56:12] [Rank 0] step:1601/10000 train_time:91585ms step_avg:57.20ms +[2025-09-11 09:56:13] [Rank 0] step:1621/10000 train_time:92236ms step_avg:56.90ms +[2025-09-11 09:56:13] [Rank 0] step:1621/10000 train_time:92236ms step_avg:56.90ms +[2025-09-11 09:56:13] [Rank 0] step:1641/10000 train_time:92884ms step_avg:56.60ms +[2025-09-11 09:56:13] [Rank 0] step:1641/10000 train_time:92884ms step_avg:56.60ms +[2025-09-11 09:56:14] [Rank 0] step:1661/10000 train_time:93531ms step_avg:56.31ms +[2025-09-11 09:56:14] [Rank 0] step:1661/10000 train_time:93531ms step_avg:56.31ms +[2025-09-11 09:56:14] [Rank 0] step:1681/10000 train_time:94179ms step_avg:56.03ms +[2025-09-11 09:56:14] [Rank 0] step:1681/10000 train_time:94179ms step_avg:56.03ms +[2025-09-11 09:56:15] [Rank 0] step:1701/10000 train_time:94825ms step_avg:55.75ms +[2025-09-11 09:56:15] [Rank 0] step:1701/10000 train_time:94825ms step_avg:55.75ms +[2025-09-11 09:56:16] [Rank 0] step:1721/10000 train_time:95471ms step_avg:55.47ms +[2025-09-11 09:56:16] [Rank 0] step:1721/10000 train_time:95471ms step_avg:55.47ms +[2025-09-11 09:56:16] [Rank 0] step:1741/10000 train_time:96119ms step_avg:55.21ms +[2025-09-11 09:56:16] [Rank 0] step:1741/10000 train_time:96119ms step_avg:55.21ms +[2025-09-11 09:56:17] [Rank 0] step:1761/10000 train_time:96766ms step_avg:54.95ms +[2025-09-11 09:56:17] [Rank 0] step:1761/10000 train_time:96766ms step_avg:54.95ms +[2025-09-11 09:56:18] [Rank 0] step:1781/10000 train_time:97413ms step_avg:54.70ms +[2025-09-11 09:56:18] [Rank 0] step:1781/10000 train_time:97413ms step_avg:54.70ms +[2025-09-11 09:56:18] [Rank 0] step:1801/10000 train_time:98061ms step_avg:54.45ms +[2025-09-11 09:56:18] [Rank 0] step:1801/10000 train_time:98061ms step_avg:54.45ms +[2025-09-11 09:56:19] [Rank 0] step:1821/10000 train_time:98707ms step_avg:54.20ms +[2025-09-11 09:56:19] [Rank 0] step:1821/10000 train_time:98707ms step_avg:54.20ms +[2025-09-11 09:56:20] [Rank 0] step:1841/10000 train_time:99353ms step_avg:53.97ms +[2025-09-11 09:56:20] [Rank 0] step:1841/10000 train_time:99353ms step_avg:53.97ms +[2025-09-11 09:56:20] [Rank 0] step:1861/10000 train_time:100000ms step_avg:53.73ms +[2025-09-11 09:56:20] [Rank 0] step:1861/10000 train_time:100000ms step_avg:53.73ms +[2025-09-11 09:56:21] [Rank 0] step:1881/10000 train_time:100647ms step_avg:53.51ms +[2025-09-11 09:56:21] [Rank 0] step:1881/10000 train_time:100647ms step_avg:53.51ms +[2025-09-11 09:56:22] [Rank 0] step:1901/10000 train_time:101292ms step_avg:53.28ms +[2025-09-11 09:56:22] [Rank 0] step:1901/10000 train_time:101292ms step_avg:53.28ms +[2025-09-11 09:56:22] [Rank 0] step:1921/10000 train_time:101938ms step_avg:53.07ms +[2025-09-11 09:56:22] [Rank 0] step:1921/10000 train_time:101938ms step_avg:53.07ms +[2025-09-11 09:56:23] [Rank 0] step:1941/10000 train_time:102584ms step_avg:52.85ms +[2025-09-11 09:56:23] [Rank 0] step:1941/10000 train_time:102584ms step_avg:52.85ms +[2025-09-11 09:56:24] [Rank 0] step:1961/10000 train_time:103230ms step_avg:52.64ms +[2025-09-11 09:56:24] [Rank 0] step:1961/10000 train_time:103230ms step_avg:52.64ms +[2025-09-11 09:56:24] [Rank 0] step:1981/10000 train_time:103877ms step_avg:52.44ms +[2025-09-11 09:56:24] [Rank 0] step:1981/10000 train_time:103877ms step_avg:52.44ms +[2025-09-11 09:56:25] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:56:25] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:56:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:56:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:36] [Rank 0] PRINT: step:2000/10000 val_loss:5.0275 total_sharp:8.4412e-04 L1_sharp:6.1061e-03 L2_sharp:9.2649e-04 L3_sharp:6.8558e-04 L4_sharp:6.0154e-04 L5_sharp:6.8777e-04 L6_sharp:6.8829e-04 L7_sharp:6.9038e-04 L8_sharp:2.1800e-03 L9_sharp:1.5431e-03 L10_sharp:1.7497e-03 L11_sharp:2.6190e-03 L12_sharp:1.4015e-02 total_fnorm:3.8250e+01 total_l1_linf:9.1136e+04 total_spectral:1.9500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4688e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.9141e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.7578e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.8750e-01 L8_l1linf:6.7969e-01 L9_l1linf:6.8359e-01 L10_l1linf:6.9922e-01 L11_l1linf:6.8750e-01 L12_l1linf:6.1328e-01 L1_spectral:2.9528e-02 L2_spectral:2.9211e-02 L3_spectral:2.9060e-02 L4_spectral:2.8957e-02 L5_spectral:2.9119e-02 L6_spectral:2.9172e-02 L7_spectral:2.9162e-02 L8_spectral:2.9903e-02 L9_spectral:2.9175e-02 L10_spectral:2.8982e-02 L11_spectral:2.9061e-02 L12_spectral:2.8858e-02 train_time:104505ms step_avg:52.25ms +[2025-09-11 09:56:36] [Rank 0] PRINT: step:2000/10000 val_loss:5.0275 total_sharp:8.4412e-04 L1_sharp:6.1061e-03 L2_sharp:9.2649e-04 L3_sharp:6.8558e-04 L4_sharp:6.0154e-04 L5_sharp:6.8777e-04 L6_sharp:6.8829e-04 L7_sharp:6.9038e-04 L8_sharp:2.1800e-03 L9_sharp:1.5431e-03 L10_sharp:1.7497e-03 L11_sharp:2.6190e-03 L12_sharp:1.4015e-02 total_fnorm:3.8250e+01 total_l1_linf:9.1136e+04 total_spectral:1.9500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4844e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4688e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.9141e-01 L3_l1linf:6.6797e-01 L4_l1linf:6.7578e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.8750e-01 L8_l1linf:6.7969e-01 L9_l1linf:6.8359e-01 L10_l1linf:6.9922e-01 L11_l1linf:6.8750e-01 L12_l1linf:6.1328e-01 L1_spectral:2.9528e-02 L2_spectral:2.9211e-02 L3_spectral:2.9060e-02 L4_spectral:2.8957e-02 L5_spectral:2.9119e-02 L6_spectral:2.9172e-02 L7_spectral:2.9162e-02 L8_spectral:2.9903e-02 L9_spectral:2.9175e-02 L10_spectral:2.8982e-02 L11_spectral:2.9061e-02 L12_spectral:2.8858e-02 train_time:104505ms step_avg:52.25ms +[2025-09-11 09:56:38] [Rank 0] step:2001/10000 train_time:106491ms step_avg:53.22ms +[2025-09-11 09:56:38] [Rank 0] step:2001/10000 train_time:106491ms step_avg:53.22ms +[2025-09-11 09:56:38] [Rank 0] step:2021/10000 train_time:107142ms step_avg:53.01ms +[2025-09-11 09:56:38] [Rank 0] step:2021/10000 train_time:107142ms step_avg:53.01ms +[2025-09-11 09:56:39] [Rank 0] step:2041/10000 train_time:107791ms step_avg:52.81ms +[2025-09-11 09:56:39] [Rank 0] step:2041/10000 train_time:107791ms step_avg:52.81ms +[2025-09-11 09:56:40] [Rank 0] step:2061/10000 train_time:108438ms step_avg:52.61ms +[2025-09-11 09:56:40] [Rank 0] step:2061/10000 train_time:108438ms step_avg:52.61ms +[2025-09-11 09:56:40] [Rank 0] step:2081/10000 train_time:109084ms step_avg:52.42ms +[2025-09-11 09:56:40] [Rank 0] step:2081/10000 train_time:109084ms step_avg:52.42ms +[2025-09-11 09:56:41] [Rank 0] step:2101/10000 train_time:109731ms step_avg:52.23ms +[2025-09-11 09:56:41] [Rank 0] step:2101/10000 train_time:109731ms step_avg:52.23ms +[2025-09-11 09:56:42] [Rank 0] step:2121/10000 train_time:110378ms step_avg:52.04ms +[2025-09-11 09:56:42] [Rank 0] step:2121/10000 train_time:110378ms step_avg:52.04ms +[2025-09-11 09:56:42] [Rank 0] step:2141/10000 train_time:111179ms step_avg:51.93ms +[2025-09-11 09:56:42] [Rank 0] step:2141/10000 train_time:111179ms step_avg:51.93ms +[2025-09-11 09:56:43] [Rank 0] step:2161/10000 train_time:112276ms step_avg:51.96ms +[2025-09-11 09:56:43] [Rank 0] step:2161/10000 train_time:112276ms step_avg:51.96ms +[2025-09-11 09:56:44] [Rank 0] step:2181/10000 train_time:112923ms step_avg:51.78ms +[2025-09-11 09:56:44] [Rank 0] step:2181/10000 train_time:112923ms step_avg:51.78ms +[2025-09-11 09:56:45] [Rank 0] step:2201/10000 train_time:113568ms step_avg:51.60ms +[2025-09-11 09:56:45] [Rank 0] step:2201/10000 train_time:113568ms step_avg:51.60ms +[2025-09-11 09:56:46] [Rank 0] step:2221/10000 train_time:114510ms step_avg:51.56ms +[2025-09-11 09:56:46] [Rank 0] step:2221/10000 train_time:114510ms step_avg:51.56ms +[2025-09-11 09:56:46] [Rank 0] step:2241/10000 train_time:115168ms step_avg:51.39ms +[2025-09-11 09:56:46] [Rank 0] step:2241/10000 train_time:115168ms step_avg:51.39ms +[2025-09-11 09:56:47] [Rank 0] step:2261/10000 train_time:115827ms step_avg:51.23ms +[2025-09-11 09:56:47] [Rank 0] step:2261/10000 train_time:115827ms step_avg:51.23ms +[2025-09-11 09:56:48] [Rank 0] step:2281/10000 train_time:116486ms step_avg:51.07ms +[2025-09-11 09:56:48] [Rank 0] step:2281/10000 train_time:116486ms step_avg:51.07ms +[2025-09-11 09:56:48] [Rank 0] step:2301/10000 train_time:117146ms step_avg:50.91ms +[2025-09-11 09:56:48] [Rank 0] step:2301/10000 train_time:117146ms step_avg:50.91ms +[2025-09-11 09:56:49] [Rank 0] step:2321/10000 train_time:117805ms step_avg:50.76ms +[2025-09-11 09:56:49] [Rank 0] step:2321/10000 train_time:117805ms step_avg:50.76ms +[2025-09-11 09:56:50] [Rank 0] step:2341/10000 train_time:118465ms step_avg:50.60ms +[2025-09-11 09:56:50] [Rank 0] step:2341/10000 train_time:118465ms step_avg:50.60ms +[2025-09-11 09:56:50] [Rank 0] step:2361/10000 train_time:119124ms step_avg:50.45ms +[2025-09-11 09:56:50] [Rank 0] step:2361/10000 train_time:119124ms step_avg:50.45ms +[2025-09-11 09:56:51] [Rank 0] step:2381/10000 train_time:119783ms step_avg:50.31ms +[2025-09-11 09:56:51] [Rank 0] step:2381/10000 train_time:119783ms step_avg:50.31ms +[2025-09-11 09:56:52] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:56:52] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:56:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:57:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:57:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:02] [Rank 0] PRINT: step:2400/10000 val_loss:4.9053 total_sharp:8.4736e-04 L1_sharp:5.4670e-03 L2_sharp:1.1433e-03 L3_sharp:5.4090e-04 L4_sharp:3.6230e-04 L5_sharp:6.6467e-04 L6_sharp:6.2109e-04 L7_sharp:6.0876e-04 L8_sharp:1.9584e-03 L9_sharp:1.3340e-03 L10_sharp:1.5926e-03 L11_sharp:2.1922e-03 L12_sharp:1.4343e-02 total_fnorm:3.6000e+01 total_l1_linf:8.2944e+04 total_spectral:1.8125e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:7.0703e-01 L2_l1linf:6.7188e-01 L3_l1linf:6.6016e-01 L4_l1linf:6.6016e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6016e-01 L8_l1linf:6.6016e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.6406e-01 L11_l1linf:6.6797e-01 L12_l1linf:6.2109e-01 L1_spectral:3.0209e-02 L2_spectral:2.9537e-02 L3_spectral:2.9405e-02 L4_spectral:2.9486e-02 L5_spectral:2.9556e-02 L6_spectral:2.9520e-02 L7_spectral:2.9529e-02 L8_spectral:3.0530e-02 L9_spectral:2.9759e-02 L10_spectral:2.9728e-02 L11_spectral:2.9493e-02 L12_spectral:2.9492e-02 train_time:120423ms step_avg:50.18ms +[2025-09-11 09:57:02] [Rank 0] PRINT: step:2400/10000 val_loss:4.9053 total_sharp:8.4736e-04 L1_sharp:5.4670e-03 L2_sharp:1.1433e-03 L3_sharp:5.4090e-04 L4_sharp:3.6230e-04 L5_sharp:6.6467e-04 L6_sharp:6.2109e-04 L7_sharp:6.0876e-04 L8_sharp:1.9584e-03 L9_sharp:1.3340e-03 L10_sharp:1.5926e-03 L11_sharp:2.1922e-03 L12_sharp:1.4343e-02 total_fnorm:3.6000e+01 total_l1_linf:8.2944e+04 total_spectral:1.8125e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5000e+00 L1_l1linf:7.0703e-01 L2_l1linf:6.7188e-01 L3_l1linf:6.6016e-01 L4_l1linf:6.6016e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6016e-01 L8_l1linf:6.6016e-01 L9_l1linf:6.4844e-01 L10_l1linf:6.6406e-01 L11_l1linf:6.6797e-01 L12_l1linf:6.2109e-01 L1_spectral:3.0209e-02 L2_spectral:2.9537e-02 L3_spectral:2.9405e-02 L4_spectral:2.9486e-02 L5_spectral:2.9556e-02 L6_spectral:2.9520e-02 L7_spectral:2.9529e-02 L8_spectral:3.0530e-02 L9_spectral:2.9759e-02 L10_spectral:2.9728e-02 L11_spectral:2.9493e-02 L12_spectral:2.9492e-02 train_time:120423ms step_avg:50.18ms +[2025-09-11 09:57:04] [Rank 0] step:2401/10000 train_time:122118ms step_avg:50.86ms +[2025-09-11 09:57:04] [Rank 0] step:2401/10000 train_time:122118ms step_avg:50.86ms +[2025-09-11 09:57:05] [Rank 0] step:2421/10000 train_time:122806ms step_avg:50.73ms +[2025-09-11 09:57:05] [Rank 0] step:2421/10000 train_time:122806ms step_avg:50.73ms +[2025-09-11 09:57:05] [Rank 0] step:2441/10000 train_time:123467ms step_avg:50.58ms +[2025-09-11 09:57:05] [Rank 0] step:2441/10000 train_time:123467ms step_avg:50.58ms +[2025-09-11 09:57:06] [Rank 0] step:2461/10000 train_time:124129ms step_avg:50.44ms +[2025-09-11 09:57:06] [Rank 0] step:2461/10000 train_time:124129ms step_avg:50.44ms +[2025-09-11 09:57:07] [Rank 0] step:2481/10000 train_time:124790ms step_avg:50.30ms +[2025-09-11 09:57:07] [Rank 0] step:2481/10000 train_time:124790ms step_avg:50.30ms +[2025-09-11 09:57:07] [Rank 0] step:2501/10000 train_time:125451ms step_avg:50.16ms +[2025-09-11 09:57:07] [Rank 0] step:2501/10000 train_time:125451ms step_avg:50.16ms +[2025-09-11 09:57:08] [Rank 0] step:2521/10000 train_time:126112ms step_avg:50.02ms +[2025-09-11 09:57:08] [Rank 0] step:2521/10000 train_time:126112ms step_avg:50.02ms +[2025-09-11 09:57:09] [Rank 0] step:2541/10000 train_time:126774ms step_avg:49.89ms +[2025-09-11 09:57:09] [Rank 0] step:2541/10000 train_time:126774ms step_avg:49.89ms +[2025-09-11 09:57:09] [Rank 0] step:2561/10000 train_time:127435ms step_avg:49.76ms +[2025-09-11 09:57:09] [Rank 0] step:2561/10000 train_time:127435ms step_avg:49.76ms +[2025-09-11 09:57:10] [Rank 0] step:2581/10000 train_time:128095ms step_avg:49.63ms +[2025-09-11 09:57:10] [Rank 0] step:2581/10000 train_time:128095ms step_avg:49.63ms +[2025-09-11 09:57:10] [Rank 0] step:2601/10000 train_time:128756ms step_avg:49.50ms +[2025-09-11 09:57:10] [Rank 0] step:2601/10000 train_time:128756ms step_avg:49.50ms +[2025-09-11 09:57:11] [Rank 0] step:2621/10000 train_time:129417ms step_avg:49.38ms +[2025-09-11 09:57:11] [Rank 0] step:2621/10000 train_time:129417ms step_avg:49.38ms +[2025-09-11 09:57:12] [Rank 0] step:2641/10000 train_time:130078ms step_avg:49.25ms +[2025-09-11 09:57:12] [Rank 0] step:2641/10000 train_time:130078ms step_avg:49.25ms +[2025-09-11 09:57:12] [Rank 0] step:2661/10000 train_time:130739ms step_avg:49.13ms +[2025-09-11 09:57:12] [Rank 0] step:2661/10000 train_time:130739ms step_avg:49.13ms +[2025-09-11 09:57:13] [Rank 0] step:2681/10000 train_time:131399ms step_avg:49.01ms +[2025-09-11 09:57:13] [Rank 0] step:2681/10000 train_time:131399ms step_avg:49.01ms +[2025-09-11 09:57:14] [Rank 0] step:2701/10000 train_time:132059ms step_avg:48.89ms +[2025-09-11 09:57:14] [Rank 0] step:2701/10000 train_time:132059ms step_avg:48.89ms +[2025-09-11 09:57:14] [Rank 0] step:2721/10000 train_time:132720ms step_avg:48.78ms +[2025-09-11 09:57:14] [Rank 0] step:2721/10000 train_time:132720ms step_avg:48.78ms +[2025-09-11 09:57:15] [Rank 0] step:2741/10000 train_time:133381ms step_avg:48.66ms +[2025-09-11 09:57:15] [Rank 0] step:2741/10000 train_time:133381ms step_avg:48.66ms +[2025-09-11 09:57:16] [Rank 0] step:2761/10000 train_time:134041ms step_avg:48.55ms +[2025-09-11 09:57:16] [Rank 0] step:2761/10000 train_time:134041ms step_avg:48.55ms +[2025-09-11 09:57:16] [Rank 0] step:2781/10000 train_time:134701ms step_avg:48.44ms +[2025-09-11 09:57:16] [Rank 0] step:2781/10000 train_time:134701ms step_avg:48.44ms +[2025-09-11 09:57:17] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:57:17] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:57:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:28] [Rank 0] PRINT: step:2800/10000 val_loss:4.8293 total_sharp:9.1652e-04 L1_sharp:5.1557e-03 L2_sharp:1.0854e-03 L3_sharp:4.1157e-04 L4_sharp:3.0194e-04 L5_sharp:5.2825e-04 L6_sharp:6.5620e-04 L7_sharp:7.9613e-04 L8_sharp:1.6813e-03 L9_sharp:1.3680e-03 L10_sharp:1.5648e-03 L11_sharp:2.2557e-03 L12_sharp:7.3269e-03 total_fnorm:3.4500e+01 total_l1_linf:7.8336e+04 total_spectral:1.7375e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3672e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.2109e-01 L1_spectral:3.0737e-02 L2_spectral:2.9969e-02 L3_spectral:2.9873e-02 L4_spectral:2.9782e-02 L5_spectral:3.0150e-02 L6_spectral:2.9809e-02 L7_spectral:3.0140e-02 L8_spectral:3.0539e-02 L9_spectral:3.0415e-02 L10_spectral:3.0368e-02 L11_spectral:2.9861e-02 L12_spectral:2.9800e-02 train_time:135342ms step_avg:48.34ms +[2025-09-11 09:57:28] [Rank 0] PRINT: step:2800/10000 val_loss:4.8293 total_sharp:9.1652e-04 L1_sharp:5.1557e-03 L2_sharp:1.0854e-03 L3_sharp:4.1157e-04 L4_sharp:3.0194e-04 L5_sharp:5.2825e-04 L6_sharp:6.5620e-04 L7_sharp:7.9613e-04 L8_sharp:1.6813e-03 L9_sharp:1.3680e-03 L10_sharp:1.5648e-03 L11_sharp:2.2557e-03 L12_sharp:7.3269e-03 total_fnorm:3.4500e+01 total_l1_linf:7.8336e+04 total_spectral:1.7375e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3672e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.2109e-01 L1_spectral:3.0737e-02 L2_spectral:2.9969e-02 L3_spectral:2.9873e-02 L4_spectral:2.9782e-02 L5_spectral:3.0150e-02 L6_spectral:2.9809e-02 L7_spectral:3.0140e-02 L8_spectral:3.0539e-02 L9_spectral:3.0415e-02 L10_spectral:3.0368e-02 L11_spectral:2.9861e-02 L12_spectral:2.9800e-02 train_time:135342ms step_avg:48.34ms +[2025-09-11 09:57:29] [Rank 0] step:2801/10000 train_time:137099ms step_avg:48.95ms +[2025-09-11 09:57:29] [Rank 0] step:2801/10000 train_time:137099ms step_avg:48.95ms +[2025-09-11 09:57:30] [Rank 0] step:2821/10000 train_time:137777ms step_avg:48.84ms +[2025-09-11 09:57:30] [Rank 0] step:2821/10000 train_time:137777ms step_avg:48.84ms +[2025-09-11 09:57:31] [Rank 0] step:2841/10000 train_time:138439ms step_avg:48.73ms +[2025-09-11 09:57:31] [Rank 0] step:2841/10000 train_time:138439ms step_avg:48.73ms +[2025-09-11 09:57:31] [Rank 0] step:2861/10000 train_time:139100ms step_avg:48.62ms +[2025-09-11 09:57:31] [Rank 0] step:2861/10000 train_time:139100ms step_avg:48.62ms +[2025-09-11 09:57:32] [Rank 0] step:2881/10000 train_time:139760ms step_avg:48.51ms +[2025-09-11 09:57:32] [Rank 0] step:2881/10000 train_time:139760ms step_avg:48.51ms +[2025-09-11 09:57:33] [Rank 0] step:2901/10000 train_time:140420ms step_avg:48.40ms +[2025-09-11 09:57:33] [Rank 0] step:2901/10000 train_time:140420ms step_avg:48.40ms +[2025-09-11 09:57:33] [Rank 0] step:2921/10000 train_time:141080ms step_avg:48.30ms +[2025-09-11 09:57:33] [Rank 0] step:2921/10000 train_time:141080ms step_avg:48.30ms +[2025-09-11 09:57:34] [Rank 0] step:2941/10000 train_time:141740ms step_avg:48.19ms +[2025-09-11 09:57:34] [Rank 0] step:2941/10000 train_time:141740ms step_avg:48.19ms +[2025-09-11 09:57:35] [Rank 0] step:2961/10000 train_time:142400ms step_avg:48.09ms +[2025-09-11 09:57:35] [Rank 0] step:2961/10000 train_time:142400ms step_avg:48.09ms +[2025-09-11 09:57:35] [Rank 0] step:2981/10000 train_time:143061ms step_avg:47.99ms +[2025-09-11 09:57:35] [Rank 0] step:2981/10000 train_time:143061ms step_avg:47.99ms +[2025-09-11 09:57:36] [Rank 0] step:3001/10000 train_time:143725ms step_avg:47.89ms +[2025-09-11 09:57:36] [Rank 0] step:3001/10000 train_time:143725ms step_avg:47.89ms +[2025-09-11 09:57:37] [Rank 0] step:3021/10000 train_time:144388ms step_avg:47.79ms +[2025-09-11 09:57:37] [Rank 0] step:3021/10000 train_time:144388ms step_avg:47.79ms +[2025-09-11 09:57:37] [Rank 0] step:3041/10000 train_time:145051ms step_avg:47.70ms +[2025-09-11 09:57:37] [Rank 0] step:3041/10000 train_time:145051ms step_avg:47.70ms +[2025-09-11 09:57:38] [Rank 0] step:3061/10000 train_time:145714ms step_avg:47.60ms +[2025-09-11 09:57:38] [Rank 0] step:3061/10000 train_time:145714ms step_avg:47.60ms +[2025-09-11 09:57:39] [Rank 0] step:3081/10000 train_time:146376ms step_avg:47.51ms +[2025-09-11 09:57:39] [Rank 0] step:3081/10000 train_time:146376ms step_avg:47.51ms +[2025-09-11 09:57:39] [Rank 0] step:3101/10000 train_time:147039ms step_avg:47.42ms +[2025-09-11 09:57:39] [Rank 0] step:3101/10000 train_time:147039ms step_avg:47.42ms +[2025-09-11 09:57:40] [Rank 0] step:3121/10000 train_time:147702ms step_avg:47.33ms +[2025-09-11 09:57:40] [Rank 0] step:3121/10000 train_time:147702ms step_avg:47.33ms +[2025-09-11 09:57:41] [Rank 0] step:3141/10000 train_time:148364ms step_avg:47.23ms +[2025-09-11 09:57:41] [Rank 0] step:3141/10000 train_time:148364ms step_avg:47.23ms +[2025-09-11 09:57:41] [Rank 0] step:3161/10000 train_time:149026ms step_avg:47.15ms +[2025-09-11 09:57:41] [Rank 0] step:3161/10000 train_time:149026ms step_avg:47.15ms +[2025-09-11 09:57:42] [Rank 0] step:3181/10000 train_time:149690ms step_avg:47.06ms +[2025-09-11 09:57:42] [Rank 0] step:3181/10000 train_time:149690ms step_avg:47.06ms +[2025-09-11 09:57:43] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:57:43] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:53] [Rank 0] PRINT: step:3200/10000 val_loss:4.7385 total_sharp:5.5126e-04 L1_sharp:5.5242e-03 L2_sharp:9.0520e-04 L3_sharp:2.6266e-04 L4_sharp:4.0097e-04 L5_sharp:8.3186e-04 L6_sharp:5.1506e-04 L7_sharp:6.2461e-04 L8_sharp:1.4604e-03 L9_sharp:1.1610e-03 L10_sharp:1.3801e-03 L11_sharp:1.7893e-03 L12_sharp:8.8788e-03 total_fnorm:3.9000e+01 total_l1_linf:9.1136e+04 total_spectral:2.0000e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.4453e-01 L3_l1linf:6.2891e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.3672e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5234e-01 L8_l1linf:6.3281e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.2891e-01 L12_l1linf:6.1719e-01 L1_spectral:3.0802e-02 L2_spectral:3.0288e-02 L3_spectral:3.0137e-02 L4_spectral:3.0142e-02 L5_spectral:3.0530e-02 L6_spectral:3.0302e-02 L7_spectral:3.0446e-02 L8_spectral:3.0828e-02 L9_spectral:3.1040e-02 L10_spectral:3.0829e-02 L11_spectral:3.0589e-02 L12_spectral:3.0408e-02 train_time:150333ms step_avg:46.98ms +[2025-09-11 09:57:53] [Rank 0] PRINT: step:3200/10000 val_loss:4.7385 total_sharp:5.5126e-04 L1_sharp:5.5242e-03 L2_sharp:9.0520e-04 L3_sharp:2.6266e-04 L4_sharp:4.0097e-04 L5_sharp:8.3186e-04 L6_sharp:5.1506e-04 L7_sharp:6.2461e-04 L8_sharp:1.4604e-03 L9_sharp:1.1610e-03 L10_sharp:1.3801e-03 L11_sharp:1.7893e-03 L12_sharp:8.8788e-03 total_fnorm:3.9000e+01 total_l1_linf:9.1136e+04 total_spectral:2.0000e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.4453e-01 L3_l1linf:6.2891e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.3672e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5234e-01 L8_l1linf:6.3281e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.2891e-01 L12_l1linf:6.1719e-01 L1_spectral:3.0802e-02 L2_spectral:3.0288e-02 L3_spectral:3.0137e-02 L4_spectral:3.0142e-02 L5_spectral:3.0530e-02 L6_spectral:3.0302e-02 L7_spectral:3.0446e-02 L8_spectral:3.0828e-02 L9_spectral:3.1040e-02 L10_spectral:3.0829e-02 L11_spectral:3.0589e-02 L12_spectral:3.0408e-02 train_time:150333ms step_avg:46.98ms +[2025-09-11 09:57:55] [Rank 0] step:3201/10000 train_time:152106ms step_avg:47.52ms +[2025-09-11 09:57:55] [Rank 0] step:3201/10000 train_time:152106ms step_avg:47.52ms +[2025-09-11 09:57:55] [Rank 0] step:3221/10000 train_time:152774ms step_avg:47.43ms +[2025-09-11 09:57:55] [Rank 0] step:3221/10000 train_time:152774ms step_avg:47.43ms +[2025-09-11 09:57:56] [Rank 0] step:3241/10000 train_time:153438ms step_avg:47.34ms +[2025-09-11 09:57:56] [Rank 0] step:3241/10000 train_time:153438ms step_avg:47.34ms +[2025-09-11 09:57:57] [Rank 0] step:3261/10000 train_time:154103ms step_avg:47.26ms +[2025-09-11 09:57:57] [Rank 0] step:3261/10000 train_time:154103ms step_avg:47.26ms +[2025-09-11 09:57:57] [Rank 0] step:3281/10000 train_time:154766ms step_avg:47.17ms +[2025-09-11 09:57:57] [Rank 0] step:3281/10000 train_time:154766ms step_avg:47.17ms +[2025-09-11 09:57:58] [Rank 0] step:3301/10000 train_time:155429ms step_avg:47.09ms +[2025-09-11 09:57:58] [Rank 0] step:3301/10000 train_time:155429ms step_avg:47.09ms +[2025-09-11 09:57:59] [Rank 0] step:3321/10000 train_time:156093ms step_avg:47.00ms +[2025-09-11 09:57:59] [Rank 0] step:3321/10000 train_time:156093ms step_avg:47.00ms +[2025-09-11 09:57:59] [Rank 0] step:3341/10000 train_time:156757ms step_avg:46.92ms +[2025-09-11 09:57:59] [Rank 0] step:3341/10000 train_time:156757ms step_avg:46.92ms +[2025-09-11 09:58:00] [Rank 0] step:3361/10000 train_time:157420ms step_avg:46.84ms +[2025-09-11 09:58:00] [Rank 0] step:3361/10000 train_time:157420ms step_avg:46.84ms +[2025-09-11 09:58:01] [Rank 0] step:3381/10000 train_time:158083ms step_avg:46.76ms +[2025-09-11 09:58:01] [Rank 0] step:3381/10000 train_time:158083ms step_avg:46.76ms +[2025-09-11 09:58:01] [Rank 0] step:3401/10000 train_time:158750ms step_avg:46.68ms +[2025-09-11 09:58:01] [Rank 0] step:3401/10000 train_time:158750ms step_avg:46.68ms +[2025-09-11 09:58:02] [Rank 0] step:3421/10000 train_time:159413ms step_avg:46.60ms +[2025-09-11 09:58:02] [Rank 0] step:3421/10000 train_time:159413ms step_avg:46.60ms +[2025-09-11 09:58:03] [Rank 0] step:3441/10000 train_time:160076ms step_avg:46.52ms +[2025-09-11 09:58:03] [Rank 0] step:3441/10000 train_time:160076ms step_avg:46.52ms +[2025-09-11 09:58:03] [Rank 0] step:3461/10000 train_time:160739ms step_avg:46.44ms +[2025-09-11 09:58:03] [Rank 0] step:3461/10000 train_time:160739ms step_avg:46.44ms +[2025-09-11 09:58:04] [Rank 0] step:3481/10000 train_time:161404ms step_avg:46.37ms +[2025-09-11 09:58:04] [Rank 0] step:3481/10000 train_time:161404ms step_avg:46.37ms +[2025-09-11 09:58:05] [Rank 0] step:3501/10000 train_time:162068ms step_avg:46.29ms +[2025-09-11 09:58:05] [Rank 0] step:3501/10000 train_time:162068ms step_avg:46.29ms +[2025-09-11 09:58:05] [Rank 0] step:3521/10000 train_time:162731ms step_avg:46.22ms +[2025-09-11 09:58:05] [Rank 0] step:3521/10000 train_time:162731ms step_avg:46.22ms +[2025-09-11 09:58:06] [Rank 0] step:3541/10000 train_time:163395ms step_avg:46.14ms +[2025-09-11 09:58:06] [Rank 0] step:3541/10000 train_time:163395ms step_avg:46.14ms +[2025-09-11 09:58:07] [Rank 0] step:3561/10000 train_time:164058ms step_avg:46.07ms +[2025-09-11 09:58:07] [Rank 0] step:3561/10000 train_time:164058ms step_avg:46.07ms +[2025-09-11 09:58:07] [Rank 0] step:3581/10000 train_time:164720ms step_avg:46.00ms +[2025-09-11 09:58:07] [Rank 0] step:3581/10000 train_time:164720ms step_avg:46.00ms +[2025-09-11 09:58:08] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:58:08] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:58:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:18] [Rank 0] PRINT: step:3600/10000 val_loss:4.6835 total_sharp:7.5128e-04 L1_sharp:6.3267e-03 L2_sharp:4.5027e-04 L3_sharp:2.9316e-04 L4_sharp:4.4400e-04 L5_sharp:6.2327e-04 L6_sharp:5.1621e-04 L7_sharp:6.4703e-04 L8_sharp:1.4290e-03 L9_sharp:1.2833e-03 L10_sharp:1.3418e-03 L11_sharp:1.8718e-03 L12_sharp:1.0846e-02 total_fnorm:3.4750e+01 total_l1_linf:7.8848e+04 total_spectral:1.7625e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.1328e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.1719e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.0156e-01 L11_l1linf:6.2891e-01 L12_l1linf:6.2500e-01 L1_spectral:3.1105e-02 L2_spectral:3.0332e-02 L3_spectral:3.0371e-02 L4_spectral:3.0458e-02 L5_spectral:3.0575e-02 L6_spectral:3.0745e-02 L7_spectral:3.0645e-02 L8_spectral:3.1174e-02 L9_spectral:3.1267e-02 L10_spectral:3.1125e-02 L11_spectral:3.1169e-02 L12_spectral:3.0792e-02 train_time:165364ms step_avg:45.93ms +[2025-09-11 09:58:18] [Rank 0] PRINT: step:3600/10000 val_loss:4.6835 total_sharp:7.5128e-04 L1_sharp:6.3267e-03 L2_sharp:4.5027e-04 L3_sharp:2.9316e-04 L4_sharp:4.4400e-04 L5_sharp:6.2327e-04 L6_sharp:5.1621e-04 L7_sharp:6.4703e-04 L8_sharp:1.4290e-03 L9_sharp:1.2833e-03 L10_sharp:1.3418e-03 L11_sharp:1.8718e-03 L12_sharp:1.0846e-02 total_fnorm:3.4750e+01 total_l1_linf:7.8848e+04 total_spectral:1.7625e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3672e-01 L3_l1linf:6.1328e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.1719e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.0156e-01 L11_l1linf:6.2891e-01 L12_l1linf:6.2500e-01 L1_spectral:3.1105e-02 L2_spectral:3.0332e-02 L3_spectral:3.0371e-02 L4_spectral:3.0458e-02 L5_spectral:3.0575e-02 L6_spectral:3.0745e-02 L7_spectral:3.0645e-02 L8_spectral:3.1174e-02 L9_spectral:3.1267e-02 L10_spectral:3.1125e-02 L11_spectral:3.1169e-02 L12_spectral:3.0792e-02 train_time:165364ms step_avg:45.93ms +[2025-09-11 09:58:20] [Rank 0] step:3601/10000 train_time:167386ms step_avg:46.48ms +[2025-09-11 09:58:20] [Rank 0] step:3601/10000 train_time:167386ms step_avg:46.48ms +[2025-09-11 09:58:21] [Rank 0] step:3621/10000 train_time:168053ms step_avg:46.41ms +[2025-09-11 09:58:21] [Rank 0] step:3621/10000 train_time:168053ms step_avg:46.41ms +[2025-09-11 09:58:22] [Rank 0] step:3641/10000 train_time:168716ms step_avg:46.34ms +[2025-09-11 09:58:22] [Rank 0] step:3641/10000 train_time:168716ms step_avg:46.34ms +[2025-09-11 09:58:22] [Rank 0] step:3661/10000 train_time:169378ms step_avg:46.27ms +[2025-09-11 09:58:22] [Rank 0] step:3661/10000 train_time:169378ms step_avg:46.27ms +[2025-09-11 09:58:23] [Rank 0] step:3681/10000 train_time:170040ms step_avg:46.19ms +[2025-09-11 09:58:23] [Rank 0] step:3681/10000 train_time:170040ms step_avg:46.19ms +[2025-09-11 09:58:24] [Rank 0] step:3701/10000 train_time:170702ms step_avg:46.12ms +[2025-09-11 09:58:24] [Rank 0] step:3701/10000 train_time:170702ms step_avg:46.12ms +[2025-09-11 09:58:24] [Rank 0] step:3721/10000 train_time:171373ms step_avg:46.06ms +[2025-09-11 09:58:24] [Rank 0] step:3721/10000 train_time:171373ms step_avg:46.06ms +[2025-09-11 09:58:25] [Rank 0] step:3741/10000 train_time:172047ms step_avg:45.99ms +[2025-09-11 09:58:25] [Rank 0] step:3741/10000 train_time:172047ms step_avg:45.99ms +[2025-09-11 09:58:26] [Rank 0] step:3761/10000 train_time:172720ms step_avg:45.92ms +[2025-09-11 09:58:26] [Rank 0] step:3761/10000 train_time:172720ms step_avg:45.92ms +[2025-09-11 09:58:26] [Rank 0] step:3781/10000 train_time:173393ms step_avg:45.86ms +[2025-09-11 09:58:26] [Rank 0] step:3781/10000 train_time:173393ms step_avg:45.86ms +[2025-09-11 09:58:27] [Rank 0] step:3801/10000 train_time:174066ms step_avg:45.79ms +[2025-09-11 09:58:27] [Rank 0] step:3801/10000 train_time:174066ms step_avg:45.79ms +[2025-09-11 09:58:28] [Rank 0] step:3821/10000 train_time:174740ms step_avg:45.73ms +[2025-09-11 09:58:28] [Rank 0] step:3821/10000 train_time:174740ms step_avg:45.73ms +[2025-09-11 09:58:28] [Rank 0] step:3841/10000 train_time:175414ms step_avg:45.67ms +[2025-09-11 09:58:28] [Rank 0] step:3841/10000 train_time:175414ms step_avg:45.67ms +[2025-09-11 09:58:29] [Rank 0] step:3861/10000 train_time:176086ms step_avg:45.61ms +[2025-09-11 09:58:29] [Rank 0] step:3861/10000 train_time:176086ms step_avg:45.61ms +[2025-09-11 09:58:30] [Rank 0] step:3881/10000 train_time:176760ms step_avg:45.54ms +[2025-09-11 09:58:30] [Rank 0] step:3881/10000 train_time:176760ms step_avg:45.54ms +[2025-09-11 09:58:30] [Rank 0] step:3901/10000 train_time:177432ms step_avg:45.48ms +[2025-09-11 09:58:30] [Rank 0] step:3901/10000 train_time:177432ms step_avg:45.48ms +[2025-09-11 09:58:31] [Rank 0] step:3921/10000 train_time:178105ms step_avg:45.42ms +[2025-09-11 09:58:31] [Rank 0] step:3921/10000 train_time:178105ms step_avg:45.42ms +[2025-09-11 09:58:32] [Rank 0] step:3941/10000 train_time:178779ms step_avg:45.36ms +[2025-09-11 09:58:32] [Rank 0] step:3941/10000 train_time:178779ms step_avg:45.36ms +[2025-09-11 09:58:32] [Rank 0] step:3961/10000 train_time:179451ms step_avg:45.30ms +[2025-09-11 09:58:32] [Rank 0] step:3961/10000 train_time:179451ms step_avg:45.30ms +[2025-09-11 09:58:33] [Rank 0] step:3981/10000 train_time:180124ms step_avg:45.25ms +[2025-09-11 09:58:33] [Rank 0] step:3981/10000 train_time:180124ms step_avg:45.25ms +[2025-09-11 09:58:34] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:58:34] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:58:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:58:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:44] [Rank 0] PRINT: step:4000/10000 val_loss:4.6298 total_sharp:5.9157e-04 L1_sharp:5.9217e-03 L2_sharp:4.0190e-04 L3_sharp:3.4206e-04 L4_sharp:3.4757e-04 L5_sharp:4.8074e-04 L6_sharp:7.3234e-04 L7_sharp:4.6482e-04 L8_sharp:1.2137e-03 L9_sharp:1.2170e-03 L10_sharp:1.4536e-03 L11_sharp:2.0739e-03 L12_sharp:1.1983e-02 total_fnorm:4.0000e+01 total_l1_linf:8.7552e+04 total_spectral:2.0250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.1328e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.1328e-01 L12_l1linf:5.9766e-01 L1_spectral:3.1354e-02 L2_spectral:3.0666e-02 L3_spectral:3.0879e-02 L4_spectral:3.0537e-02 L5_spectral:3.0746e-02 L6_spectral:3.0883e-02 L7_spectral:3.0887e-02 L8_spectral:3.1209e-02 L9_spectral:3.1654e-02 L10_spectral:3.1168e-02 L11_spectral:3.1279e-02 L12_spectral:3.1043e-02 train_time:180779ms step_avg:45.19ms +[2025-09-11 09:58:44] [Rank 0] PRINT: step:4000/10000 val_loss:4.6298 total_sharp:5.9157e-04 L1_sharp:5.9217e-03 L2_sharp:4.0190e-04 L3_sharp:3.4206e-04 L4_sharp:3.4757e-04 L5_sharp:4.8074e-04 L6_sharp:7.3234e-04 L7_sharp:4.6482e-04 L8_sharp:1.2137e-03 L9_sharp:1.2170e-03 L10_sharp:1.4536e-03 L11_sharp:2.0739e-03 L12_sharp:1.1983e-02 total_fnorm:4.0000e+01 total_l1_linf:8.7552e+04 total_spectral:2.0250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.1328e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.1328e-01 L12_l1linf:5.9766e-01 L1_spectral:3.1354e-02 L2_spectral:3.0666e-02 L3_spectral:3.0879e-02 L4_spectral:3.0537e-02 L5_spectral:3.0746e-02 L6_spectral:3.0883e-02 L7_spectral:3.0887e-02 L8_spectral:3.1209e-02 L9_spectral:3.1654e-02 L10_spectral:3.1168e-02 L11_spectral:3.1279e-02 L12_spectral:3.1043e-02 train_time:180779ms step_avg:45.19ms +[2025-09-11 09:58:45] [Rank 0] step:4001/10000 train_time:181908ms step_avg:45.47ms +[2025-09-11 09:58:45] [Rank 0] step:4001/10000 train_time:181908ms step_avg:45.47ms +[2025-09-11 09:58:46] [Rank 0] step:4021/10000 train_time:182571ms step_avg:45.40ms +[2025-09-11 09:58:46] [Rank 0] step:4021/10000 train_time:182571ms step_avg:45.40ms +[2025-09-11 09:58:47] [Rank 0] step:4041/10000 train_time:183245ms step_avg:45.35ms +[2025-09-11 09:58:47] [Rank 0] step:4041/10000 train_time:183245ms step_avg:45.35ms +[2025-09-11 09:58:47] [Rank 0] step:4061/10000 train_time:183919ms step_avg:45.29ms +[2025-09-11 09:58:47] [Rank 0] step:4061/10000 train_time:183919ms step_avg:45.29ms +[2025-09-11 09:58:48] [Rank 0] step:4081/10000 train_time:184592ms step_avg:45.23ms +[2025-09-11 09:58:48] [Rank 0] step:4081/10000 train_time:184592ms step_avg:45.23ms +[2025-09-11 09:58:49] [Rank 0] step:4101/10000 train_time:185547ms step_avg:45.24ms +[2025-09-11 09:58:49] [Rank 0] step:4101/10000 train_time:185547ms step_avg:45.24ms +[2025-09-11 09:58:50] [Rank 0] step:4121/10000 train_time:186456ms step_avg:45.25ms +[2025-09-11 09:58:50] [Rank 0] step:4121/10000 train_time:186456ms step_avg:45.25ms +[2025-09-11 09:58:51] [Rank 0] step:4141/10000 train_time:187129ms step_avg:45.19ms +[2025-09-11 09:58:51] [Rank 0] step:4141/10000 train_time:187129ms step_avg:45.19ms +[2025-09-11 09:58:52] [Rank 0] step:4161/10000 train_time:188061ms step_avg:45.20ms +[2025-09-11 09:58:52] [Rank 0] step:4161/10000 train_time:188061ms step_avg:45.20ms +[2025-09-11 09:58:52] [Rank 0] step:4181/10000 train_time:188734ms step_avg:45.14ms +[2025-09-11 09:58:52] [Rank 0] step:4181/10000 train_time:188734ms step_avg:45.14ms +[2025-09-11 09:58:53] [Rank 0] step:4201/10000 train_time:189407ms step_avg:45.09ms +[2025-09-11 09:58:53] [Rank 0] step:4201/10000 train_time:189407ms step_avg:45.09ms +[2025-09-11 09:58:54] [Rank 0] step:4221/10000 train_time:190079ms step_avg:45.03ms +[2025-09-11 09:58:54] [Rank 0] step:4221/10000 train_time:190079ms step_avg:45.03ms +[2025-09-11 09:58:54] [Rank 0] step:4241/10000 train_time:190752ms step_avg:44.98ms +[2025-09-11 09:58:54] [Rank 0] step:4241/10000 train_time:190752ms step_avg:44.98ms +[2025-09-11 09:58:55] [Rank 0] step:4261/10000 train_time:191425ms step_avg:44.92ms +[2025-09-11 09:58:55] [Rank 0] step:4261/10000 train_time:191425ms step_avg:44.92ms +[2025-09-11 09:58:56] [Rank 0] step:4281/10000 train_time:192100ms step_avg:44.87ms +[2025-09-11 09:58:56] [Rank 0] step:4281/10000 train_time:192100ms step_avg:44.87ms +[2025-09-11 09:58:56] [Rank 0] step:4301/10000 train_time:192772ms step_avg:44.82ms +[2025-09-11 09:58:56] [Rank 0] step:4301/10000 train_time:192772ms step_avg:44.82ms +[2025-09-11 09:58:57] [Rank 0] step:4321/10000 train_time:193444ms step_avg:44.77ms +[2025-09-11 09:58:57] [Rank 0] step:4321/10000 train_time:193444ms step_avg:44.77ms +[2025-09-11 09:58:58] [Rank 0] step:4341/10000 train_time:194117ms step_avg:44.72ms +[2025-09-11 09:58:58] [Rank 0] step:4341/10000 train_time:194117ms step_avg:44.72ms +[2025-09-11 09:58:58] [Rank 0] step:4361/10000 train_time:194789ms step_avg:44.67ms +[2025-09-11 09:58:58] [Rank 0] step:4361/10000 train_time:194789ms step_avg:44.67ms +[2025-09-11 09:58:59] [Rank 0] step:4381/10000 train_time:195463ms step_avg:44.62ms +[2025-09-11 09:58:59] [Rank 0] step:4381/10000 train_time:195463ms step_avg:44.62ms +[2025-09-11 09:59:00] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:59:00] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:59:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:59:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:59:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:59:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:10] [Rank 0] PRINT: step:4400/10000 val_loss:4.6006 total_sharp:5.3519e-04 L1_sharp:5.4499e-03 L2_sharp:2.5536e-04 L3_sharp:3.4182e-04 L4_sharp:2.4035e-04 L5_sharp:7.1112e-04 L6_sharp:5.0673e-04 L7_sharp:5.7472e-04 L8_sharp:1.2750e-03 L9_sharp:9.8919e-04 L10_sharp:1.1376e-03 L11_sharp:1.5496e-03 L12_sharp:5.9143e-03 total_fnorm:3.6250e+01 total_l1_linf:7.9360e+04 total_spectral:1.8375e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.0938e-01 L9_l1linf:6.0938e-01 L10_l1linf:5.9375e-01 L11_l1linf:6.0547e-01 L12_l1linf:5.8984e-01 L1_spectral:3.1524e-02 L2_spectral:3.0971e-02 L3_spectral:3.0888e-02 L4_spectral:3.0852e-02 L5_spectral:3.0898e-02 L6_spectral:3.1051e-02 L7_spectral:3.1050e-02 L8_spectral:3.1316e-02 L9_spectral:3.1578e-02 L10_spectral:3.1493e-02 L11_spectral:3.1301e-02 L12_spectral:3.1032e-02 train_time:196116ms step_avg:44.57ms +[2025-09-11 09:59:10] [Rank 0] PRINT: step:4400/10000 val_loss:4.6006 total_sharp:5.3519e-04 L1_sharp:5.4499e-03 L2_sharp:2.5536e-04 L3_sharp:3.4182e-04 L4_sharp:2.4035e-04 L5_sharp:7.1112e-04 L6_sharp:5.0673e-04 L7_sharp:5.7472e-04 L8_sharp:1.2750e-03 L9_sharp:9.8919e-04 L10_sharp:1.1376e-03 L11_sharp:1.5496e-03 L12_sharp:5.9143e-03 total_fnorm:3.6250e+01 total_l1_linf:7.9360e+04 total_spectral:1.8375e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.1328e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.2891e-01 L8_l1linf:6.0938e-01 L9_l1linf:6.0938e-01 L10_l1linf:5.9375e-01 L11_l1linf:6.0547e-01 L12_l1linf:5.8984e-01 L1_spectral:3.1524e-02 L2_spectral:3.0971e-02 L3_spectral:3.0888e-02 L4_spectral:3.0852e-02 L5_spectral:3.0898e-02 L6_spectral:3.1051e-02 L7_spectral:3.1050e-02 L8_spectral:3.1316e-02 L9_spectral:3.1578e-02 L10_spectral:3.1493e-02 L11_spectral:3.1301e-02 L12_spectral:3.1032e-02 train_time:196116ms step_avg:44.57ms +[2025-09-11 09:59:11] [Rank 0] step:4401/10000 train_time:197233ms step_avg:44.82ms +[2025-09-11 09:59:11] [Rank 0] step:4401/10000 train_time:197233ms step_avg:44.82ms +[2025-09-11 09:59:11] [Rank 0] step:4421/10000 train_time:197896ms step_avg:44.76ms +[2025-09-11 09:59:11] [Rank 0] step:4421/10000 train_time:197896ms step_avg:44.76ms +[2025-09-11 09:59:12] [Rank 0] step:4441/10000 train_time:198571ms step_avg:44.71ms +[2025-09-11 09:59:12] [Rank 0] step:4441/10000 train_time:198571ms step_avg:44.71ms +[2025-09-11 09:59:13] [Rank 0] step:4461/10000 train_time:199246ms step_avg:44.66ms +[2025-09-11 09:59:13] [Rank 0] step:4461/10000 train_time:199246ms step_avg:44.66ms +[2025-09-11 09:59:13] [Rank 0] step:4481/10000 train_time:199922ms step_avg:44.62ms +[2025-09-11 09:59:13] [Rank 0] step:4481/10000 train_time:199922ms step_avg:44.62ms +[2025-09-11 09:59:14] [Rank 0] step:4501/10000 train_time:200598ms step_avg:44.57ms +[2025-09-11 09:59:14] [Rank 0] step:4501/10000 train_time:200598ms step_avg:44.57ms +[2025-09-11 09:59:15] [Rank 0] step:4521/10000 train_time:201273ms step_avg:44.52ms +[2025-09-11 09:59:15] [Rank 0] step:4521/10000 train_time:201273ms step_avg:44.52ms +[2025-09-11 09:59:15] [Rank 0] step:4541/10000 train_time:201949ms step_avg:44.47ms +[2025-09-11 09:59:15] [Rank 0] step:4541/10000 train_time:201949ms step_avg:44.47ms +[2025-09-11 09:59:16] [Rank 0] step:4561/10000 train_time:202625ms step_avg:44.43ms +[2025-09-11 09:59:16] [Rank 0] step:4561/10000 train_time:202625ms step_avg:44.43ms +[2025-09-11 09:59:17] [Rank 0] step:4581/10000 train_time:203301ms step_avg:44.38ms +[2025-09-11 09:59:17] [Rank 0] step:4581/10000 train_time:203301ms step_avg:44.38ms +[2025-09-11 09:59:17] [Rank 0] step:4601/10000 train_time:203976ms step_avg:44.33ms +[2025-09-11 09:59:17] [Rank 0] step:4601/10000 train_time:203976ms step_avg:44.33ms +[2025-09-11 09:59:18] [Rank 0] step:4621/10000 train_time:204652ms step_avg:44.29ms +[2025-09-11 09:59:18] [Rank 0] step:4621/10000 train_time:204652ms step_avg:44.29ms +[2025-09-11 09:59:19] [Rank 0] step:4641/10000 train_time:205327ms step_avg:44.24ms +[2025-09-11 09:59:19] [Rank 0] step:4641/10000 train_time:205327ms step_avg:44.24ms +[2025-09-11 09:59:20] [Rank 0] step:4661/10000 train_time:206003ms step_avg:44.20ms +[2025-09-11 09:59:20] [Rank 0] step:4661/10000 train_time:206003ms step_avg:44.20ms +[2025-09-11 09:59:20] [Rank 0] step:4681/10000 train_time:206678ms step_avg:44.15ms +[2025-09-11 09:59:20] [Rank 0] step:4681/10000 train_time:206678ms step_avg:44.15ms +[2025-09-11 09:59:21] [Rank 0] step:4701/10000 train_time:207354ms step_avg:44.11ms +[2025-09-11 09:59:21] [Rank 0] step:4701/10000 train_time:207354ms step_avg:44.11ms +[2025-09-11 09:59:22] [Rank 0] step:4721/10000 train_time:208029ms step_avg:44.06ms +[2025-09-11 09:59:22] [Rank 0] step:4721/10000 train_time:208029ms step_avg:44.06ms +[2025-09-11 09:59:22] [Rank 0] step:4741/10000 train_time:208704ms step_avg:44.02ms +[2025-09-11 09:59:22] [Rank 0] step:4741/10000 train_time:208704ms step_avg:44.02ms +[2025-09-11 09:59:23] [Rank 0] step:4761/10000 train_time:209379ms step_avg:43.98ms +[2025-09-11 09:59:23] [Rank 0] step:4761/10000 train_time:209379ms step_avg:43.98ms +[2025-09-11 09:59:24] [Rank 0] step:4781/10000 train_time:210053ms step_avg:43.94ms +[2025-09-11 09:59:24] [Rank 0] step:4781/10000 train_time:210053ms step_avg:43.94ms +[2025-09-11 09:59:24] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:59:24] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:59:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:59:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:59:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:59:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:59:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:34] [Rank 0] PRINT: step:4800/10000 val_loss:4.5466 total_sharp:4.8295e-04 L1_sharp:4.9638e-03 L2_sharp:5.9349e-04 L3_sharp:3.2540e-04 L4_sharp:2.0119e-04 L5_sharp:3.0343e-04 L6_sharp:2.9059e-04 L7_sharp:4.5623e-04 L8_sharp:1.0591e-03 L9_sharp:8.2227e-04 L10_sharp:1.0790e-03 L11_sharp:1.4858e-03 L12_sharp:5.7703e-03 total_fnorm:3.6250e+01 total_l1_linf:7.9872e+04 total_spectral:1.8375e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.2500e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0547e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2500e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.8203e-01 L11_l1linf:5.8594e-01 L12_l1linf:6.0156e-01 L1_spectral:3.1790e-02 L2_spectral:3.1186e-02 L3_spectral:3.0997e-02 L4_spectral:3.1313e-02 L5_spectral:3.1390e-02 L6_spectral:3.1320e-02 L7_spectral:3.1465e-02 L8_spectral:3.1323e-02 L9_spectral:3.1836e-02 L10_spectral:3.1497e-02 L11_spectral:3.1682e-02 L12_spectral:3.1451e-02 train_time:210709ms step_avg:43.90ms +[2025-09-11 09:59:34] [Rank 0] PRINT: step:4800/10000 val_loss:4.5466 total_sharp:4.8295e-04 L1_sharp:4.9638e-03 L2_sharp:5.9349e-04 L3_sharp:3.2540e-04 L4_sharp:2.0119e-04 L5_sharp:3.0343e-04 L6_sharp:2.9059e-04 L7_sharp:4.5623e-04 L8_sharp:1.0591e-03 L9_sharp:8.2227e-04 L10_sharp:1.0790e-03 L11_sharp:1.4858e-03 L12_sharp:5.7703e-03 total_fnorm:3.6250e+01 total_l1_linf:7.9872e+04 total_spectral:1.8375e+01 L1_fnorm:2.4844e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.2500e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.0547e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2500e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.8203e-01 L11_l1linf:5.8594e-01 L12_l1linf:6.0156e-01 L1_spectral:3.1790e-02 L2_spectral:3.1186e-02 L3_spectral:3.0997e-02 L4_spectral:3.1313e-02 L5_spectral:3.1390e-02 L6_spectral:3.1320e-02 L7_spectral:3.1465e-02 L8_spectral:3.1323e-02 L9_spectral:3.1836e-02 L10_spectral:3.1497e-02 L11_spectral:3.1682e-02 L12_spectral:3.1451e-02 train_time:210709ms step_avg:43.90ms +[2025-09-11 09:59:35] [Rank 0] step:4801/10000 train_time:211892ms step_avg:44.13ms +[2025-09-11 09:59:35] [Rank 0] step:4801/10000 train_time:211892ms step_avg:44.13ms +[2025-09-11 09:59:36] [Rank 0] step:4821/10000 train_time:212573ms step_avg:44.09ms +[2025-09-11 09:59:36] [Rank 0] step:4821/10000 train_time:212573ms step_avg:44.09ms +[2025-09-11 09:59:37] [Rank 0] step:4841/10000 train_time:213250ms step_avg:44.05ms +[2025-09-11 09:59:37] [Rank 0] step:4841/10000 train_time:213250ms step_avg:44.05ms +[2025-09-11 09:59:37] [Rank 0] step:4861/10000 train_time:213926ms step_avg:44.01ms +[2025-09-11 09:59:37] [Rank 0] step:4861/10000 train_time:213926ms step_avg:44.01ms +[2025-09-11 09:59:38] [Rank 0] step:4881/10000 train_time:214603ms step_avg:43.97ms +[2025-09-11 09:59:38] [Rank 0] step:4881/10000 train_time:214603ms step_avg:43.97ms +[2025-09-11 09:59:39] [Rank 0] step:4901/10000 train_time:215280ms step_avg:43.93ms +[2025-09-11 09:59:39] [Rank 0] step:4901/10000 train_time:215280ms step_avg:43.93ms +[2025-09-11 09:59:39] [Rank 0] step:4921/10000 train_time:215956ms step_avg:43.88ms +[2025-09-11 09:59:39] [Rank 0] step:4921/10000 train_time:215956ms step_avg:43.88ms +[2025-09-11 09:59:40] [Rank 0] step:4941/10000 train_time:216632ms step_avg:43.84ms +[2025-09-11 09:59:40] [Rank 0] step:4941/10000 train_time:216632ms step_avg:43.84ms +[2025-09-11 09:59:41] [Rank 0] step:4961/10000 train_time:217307ms step_avg:43.80ms +[2025-09-11 09:59:41] [Rank 0] step:4961/10000 train_time:217307ms step_avg:43.80ms +[2025-09-11 09:59:41] [Rank 0] step:4981/10000 train_time:217983ms step_avg:43.76ms +[2025-09-11 09:59:41] [Rank 0] step:4981/10000 train_time:217983ms step_avg:43.76ms +[2025-09-11 09:59:42] [Rank 0] step:5001/10000 train_time:218660ms step_avg:43.72ms +[2025-09-11 09:59:42] [Rank 0] step:5001/10000 train_time:218660ms step_avg:43.72ms +[2025-09-11 09:59:43] [Rank 0] step:5021/10000 train_time:219335ms step_avg:43.68ms +[2025-09-11 09:59:43] [Rank 0] step:5021/10000 train_time:219335ms step_avg:43.68ms +[2025-09-11 09:59:43] [Rank 0] step:5041/10000 train_time:220009ms step_avg:43.64ms +[2025-09-11 09:59:43] [Rank 0] step:5041/10000 train_time:220009ms step_avg:43.64ms +[2025-09-11 09:59:44] [Rank 0] step:5061/10000 train_time:220685ms step_avg:43.61ms +[2025-09-11 09:59:44] [Rank 0] step:5061/10000 train_time:220685ms step_avg:43.61ms +[2025-09-11 09:59:45] [Rank 0] step:5081/10000 train_time:221359ms step_avg:43.57ms +[2025-09-11 09:59:45] [Rank 0] step:5081/10000 train_time:221359ms step_avg:43.57ms +[2025-09-11 09:59:46] [Rank 0] step:5101/10000 train_time:222035ms step_avg:43.53ms +[2025-09-11 09:59:46] [Rank 0] step:5101/10000 train_time:222035ms step_avg:43.53ms +[2025-09-11 09:59:46] [Rank 0] step:5121/10000 train_time:222710ms step_avg:43.49ms +[2025-09-11 09:59:46] [Rank 0] step:5121/10000 train_time:222710ms step_avg:43.49ms +[2025-09-11 09:59:47] [Rank 0] step:5141/10000 train_time:223385ms step_avg:43.45ms +[2025-09-11 09:59:47] [Rank 0] step:5141/10000 train_time:223385ms step_avg:43.45ms +[2025-09-11 09:59:48] [Rank 0] step:5161/10000 train_time:224061ms step_avg:43.41ms +[2025-09-11 09:59:48] [Rank 0] step:5161/10000 train_time:224061ms step_avg:43.41ms +[2025-09-11 09:59:48] [Rank 0] step:5181/10000 train_time:224736ms step_avg:43.38ms +[2025-09-11 09:59:48] [Rank 0] step:5181/10000 train_time:224736ms step_avg:43.38ms +[2025-09-11 09:59:49] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:59:49] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:59:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:59] [Rank 0] PRINT: step:5200/10000 val_loss:4.5174 total_sharp:7.5876e-04 L1_sharp:5.0203e-03 L2_sharp:7.0812e-04 L3_sharp:6.0896e-04 L4_sharp:4.1892e-04 L5_sharp:5.3579e-04 L6_sharp:4.5610e-04 L7_sharp:4.2972e-04 L8_sharp:1.0956e-03 L9_sharp:1.0859e-03 L10_sharp:1.1250e-03 L11_sharp:1.7656e-03 L12_sharp:1.5578e-02 total_fnorm:3.4250e+01 total_l1_linf:7.2192e+04 total_spectral:1.7125e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0547e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.9375e-01 L1_spectral:3.1821e-02 L2_spectral:3.1011e-02 L3_spectral:3.1062e-02 L4_spectral:3.1442e-02 L5_spectral:3.1403e-02 L6_spectral:3.1513e-02 L7_spectral:3.1545e-02 L8_spectral:3.1473e-02 L9_spectral:3.2021e-02 L10_spectral:3.1781e-02 L11_spectral:3.1784e-02 L12_spectral:3.1616e-02 train_time:225398ms step_avg:43.35ms +[2025-09-11 09:59:59] [Rank 0] PRINT: step:5200/10000 val_loss:4.5174 total_sharp:7.5876e-04 L1_sharp:5.0203e-03 L2_sharp:7.0812e-04 L3_sharp:6.0896e-04 L4_sharp:4.1892e-04 L5_sharp:5.3579e-04 L6_sharp:4.5610e-04 L7_sharp:4.2972e-04 L8_sharp:1.0956e-03 L9_sharp:1.0859e-03 L10_sharp:1.1250e-03 L11_sharp:1.7656e-03 L12_sharp:1.5578e-02 total_fnorm:3.4250e+01 total_l1_linf:7.2192e+04 total_spectral:1.7125e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5234e-01 L2_l1linf:6.0547e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0547e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.7422e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.9375e-01 L1_spectral:3.1821e-02 L2_spectral:3.1011e-02 L3_spectral:3.1062e-02 L4_spectral:3.1442e-02 L5_spectral:3.1403e-02 L6_spectral:3.1513e-02 L7_spectral:3.1545e-02 L8_spectral:3.1473e-02 L9_spectral:3.2021e-02 L10_spectral:3.1781e-02 L11_spectral:3.1784e-02 L12_spectral:3.1616e-02 train_time:225398ms step_avg:43.35ms +[2025-09-11 10:00:00] [Rank 0] step:5201/10000 train_time:226559ms step_avg:43.56ms +[2025-09-11 10:00:00] [Rank 0] step:5201/10000 train_time:226559ms step_avg:43.56ms +[2025-09-11 10:00:01] [Rank 0] step:5221/10000 train_time:227234ms step_avg:43.52ms +[2025-09-11 10:00:01] [Rank 0] step:5221/10000 train_time:227234ms step_avg:43.52ms +[2025-09-11 10:00:02] [Rank 0] step:5241/10000 train_time:228105ms step_avg:43.52ms +[2025-09-11 10:00:02] [Rank 0] step:5241/10000 train_time:228105ms step_avg:43.52ms +[2025-09-11 10:00:02] [Rank 0] step:5261/10000 train_time:228879ms step_avg:43.50ms +[2025-09-11 10:00:02] [Rank 0] step:5261/10000 train_time:228879ms step_avg:43.50ms +[2025-09-11 10:00:03] [Rank 0] step:5281/10000 train_time:229564ms step_avg:43.47ms +[2025-09-11 10:00:03] [Rank 0] step:5281/10000 train_time:229564ms step_avg:43.47ms +[2025-09-11 10:00:04] [Rank 0] step:5301/10000 train_time:230249ms step_avg:43.44ms +[2025-09-11 10:00:04] [Rank 0] step:5301/10000 train_time:230249ms step_avg:43.44ms +[2025-09-11 10:00:04] [Rank 0] step:5321/10000 train_time:230934ms step_avg:43.40ms +[2025-09-11 10:00:04] [Rank 0] step:5321/10000 train_time:230934ms step_avg:43.40ms +[2025-09-11 10:00:05] [Rank 0] step:5341/10000 train_time:231618ms step_avg:43.37ms +[2025-09-11 10:00:05] [Rank 0] step:5341/10000 train_time:231618ms step_avg:43.37ms +[2025-09-11 10:00:06] [Rank 0] step:5361/10000 train_time:232303ms step_avg:43.33ms +[2025-09-11 10:00:06] [Rank 0] step:5361/10000 train_time:232303ms step_avg:43.33ms +[2025-09-11 10:00:07] [Rank 0] step:5381/10000 train_time:232988ms step_avg:43.30ms +[2025-09-11 10:00:07] [Rank 0] step:5381/10000 train_time:232988ms step_avg:43.30ms +[2025-09-11 10:00:07] [Rank 0] step:5401/10000 train_time:233672ms step_avg:43.26ms +[2025-09-11 10:00:07] [Rank 0] step:5401/10000 train_time:233672ms step_avg:43.26ms +[2025-09-11 10:00:08] [Rank 0] step:5421/10000 train_time:234357ms step_avg:43.23ms +[2025-09-11 10:00:08] [Rank 0] step:5421/10000 train_time:234357ms step_avg:43.23ms +[2025-09-11 10:00:09] [Rank 0] step:5441/10000 train_time:235040ms step_avg:43.20ms +[2025-09-11 10:00:09] [Rank 0] step:5441/10000 train_time:235040ms step_avg:43.20ms +[2025-09-11 10:00:09] [Rank 0] step:5461/10000 train_time:235726ms step_avg:43.17ms +[2025-09-11 10:00:09] [Rank 0] step:5461/10000 train_time:235726ms step_avg:43.17ms +[2025-09-11 10:00:10] [Rank 0] step:5481/10000 train_time:236411ms step_avg:43.13ms +[2025-09-11 10:00:10] [Rank 0] step:5481/10000 train_time:236411ms step_avg:43.13ms +[2025-09-11 10:00:11] [Rank 0] step:5501/10000 train_time:237096ms step_avg:43.10ms +[2025-09-11 10:00:11] [Rank 0] step:5501/10000 train_time:237096ms step_avg:43.10ms +[2025-09-11 10:00:11] [Rank 0] step:5521/10000 train_time:237779ms step_avg:43.07ms +[2025-09-11 10:00:11] [Rank 0] step:5521/10000 train_time:237779ms step_avg:43.07ms +[2025-09-11 10:00:12] [Rank 0] step:5541/10000 train_time:238465ms step_avg:43.04ms +[2025-09-11 10:00:12] [Rank 0] step:5541/10000 train_time:238465ms step_avg:43.04ms +[2025-09-11 10:00:13] [Rank 0] step:5561/10000 train_time:239151ms step_avg:43.01ms +[2025-09-11 10:00:13] [Rank 0] step:5561/10000 train_time:239151ms step_avg:43.01ms +[2025-09-11 10:00:13] [Rank 0] step:5581/10000 train_time:239838ms step_avg:42.97ms +[2025-09-11 10:00:13] [Rank 0] step:5581/10000 train_time:239838ms step_avg:42.97ms +[2025-09-11 10:00:14] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:00:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:00:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:00:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.4946 total_sharp:4.7599e-04 L1_sharp:2.8913e-03 L2_sharp:5.9188e-04 L3_sharp:5.6148e-04 L4_sharp:3.0433e-04 L5_sharp:3.8883e-04 L6_sharp:3.1445e-04 L7_sharp:3.7590e-04 L8_sharp:8.5187e-04 L9_sharp:7.8315e-04 L10_sharp:1.0204e-03 L11_sharp:1.4693e-03 L12_sharp:7.6388e-03 total_fnorm:3.3750e+01 total_l1_linf:7.3728e+04 total_spectral:1.7375e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.0156e-01 L3_l1linf:5.8203e-01 L4_l1linf:6.0156e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.2891e-01 L8_l1linf:5.9375e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.9375e-01 L1_spectral:3.1856e-02 L2_spectral:3.1470e-02 L3_spectral:3.1284e-02 L4_spectral:3.1365e-02 L5_spectral:3.1594e-02 L6_spectral:3.1582e-02 L7_spectral:3.1571e-02 L8_spectral:3.1363e-02 L9_spectral:3.2018e-02 L10_spectral:3.1934e-02 L11_spectral:3.1952e-02 L12_spectral:3.1676e-02 train_time:240502ms step_avg:42.95ms +[2025-09-11 10:00:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.4946 total_sharp:4.7599e-04 L1_sharp:2.8913e-03 L2_sharp:5.9188e-04 L3_sharp:5.6148e-04 L4_sharp:3.0433e-04 L5_sharp:3.8883e-04 L6_sharp:3.1445e-04 L7_sharp:3.7590e-04 L8_sharp:8.5187e-04 L9_sharp:7.8315e-04 L10_sharp:1.0204e-03 L11_sharp:1.4693e-03 L12_sharp:7.6388e-03 total_fnorm:3.3750e+01 total_l1_linf:7.3728e+04 total_spectral:1.7375e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.0156e-01 L3_l1linf:5.8203e-01 L4_l1linf:6.0156e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.2891e-01 L8_l1linf:5.9375e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.9375e-01 L1_spectral:3.1856e-02 L2_spectral:3.1470e-02 L3_spectral:3.1284e-02 L4_spectral:3.1365e-02 L5_spectral:3.1594e-02 L6_spectral:3.1582e-02 L7_spectral:3.1571e-02 L8_spectral:3.1363e-02 L9_spectral:3.2018e-02 L10_spectral:3.1934e-02 L11_spectral:3.1952e-02 L12_spectral:3.1676e-02 train_time:240502ms step_avg:42.95ms +[2025-09-11 10:00:25] [Rank 0] step:5601/10000 train_time:241672ms step_avg:43.15ms +[2025-09-11 10:00:25] [Rank 0] step:5601/10000 train_time:241672ms step_avg:43.15ms +[2025-09-11 10:00:26] [Rank 0] step:5621/10000 train_time:242370ms step_avg:43.12ms +[2025-09-11 10:00:26] [Rank 0] step:5621/10000 train_time:242370ms step_avg:43.12ms +[2025-09-11 10:00:27] [Rank 0] step:5641/10000 train_time:243054ms step_avg:43.09ms +[2025-09-11 10:00:27] [Rank 0] step:5641/10000 train_time:243054ms step_avg:43.09ms +[2025-09-11 10:00:27] [Rank 0] step:5661/10000 train_time:243740ms step_avg:43.06ms +[2025-09-11 10:00:27] [Rank 0] step:5661/10000 train_time:243740ms step_avg:43.06ms +[2025-09-11 10:00:28] [Rank 0] step:5681/10000 train_time:244425ms step_avg:43.03ms +[2025-09-11 10:00:28] [Rank 0] step:5681/10000 train_time:244425ms step_avg:43.03ms +[2025-09-11 10:00:29] [Rank 0] step:5701/10000 train_time:245112ms step_avg:42.99ms +[2025-09-11 10:00:29] [Rank 0] step:5701/10000 train_time:245112ms step_avg:42.99ms +[2025-09-11 10:00:29] [Rank 0] step:5721/10000 train_time:245796ms step_avg:42.96ms +[2025-09-11 10:00:29] [Rank 0] step:5721/10000 train_time:245796ms step_avg:42.96ms +[2025-09-11 10:00:30] [Rank 0] step:5741/10000 train_time:246483ms step_avg:42.93ms +[2025-09-11 10:00:30] [Rank 0] step:5741/10000 train_time:246483ms step_avg:42.93ms +[2025-09-11 10:00:31] [Rank 0] step:5761/10000 train_time:247169ms step_avg:42.90ms +[2025-09-11 10:00:31] [Rank 0] step:5761/10000 train_time:247169ms step_avg:42.90ms +[2025-09-11 10:00:31] [Rank 0] step:5781/10000 train_time:247855ms step_avg:42.87ms +[2025-09-11 10:00:31] [Rank 0] step:5781/10000 train_time:247855ms step_avg:42.87ms +[2025-09-11 10:00:32] [Rank 0] step:5801/10000 train_time:248541ms step_avg:42.84ms +[2025-09-11 10:00:32] [Rank 0] step:5801/10000 train_time:248541ms step_avg:42.84ms +[2025-09-11 10:00:33] [Rank 0] step:5821/10000 train_time:249226ms step_avg:42.82ms +[2025-09-11 10:00:33] [Rank 0] step:5821/10000 train_time:249226ms step_avg:42.82ms +[2025-09-11 10:00:33] [Rank 0] step:5841/10000 train_time:249913ms step_avg:42.79ms +[2025-09-11 10:00:33] [Rank 0] step:5841/10000 train_time:249913ms step_avg:42.79ms +[2025-09-11 10:00:34] [Rank 0] step:5861/10000 train_time:250597ms step_avg:42.76ms +[2025-09-11 10:00:34] [Rank 0] step:5861/10000 train_time:250597ms step_avg:42.76ms +[2025-09-11 10:00:35] [Rank 0] step:5881/10000 train_time:251283ms step_avg:42.73ms +[2025-09-11 10:00:35] [Rank 0] step:5881/10000 train_time:251283ms step_avg:42.73ms +[2025-09-11 10:00:36] [Rank 0] step:5901/10000 train_time:251968ms step_avg:42.70ms +[2025-09-11 10:00:36] [Rank 0] step:5901/10000 train_time:251968ms step_avg:42.70ms +[2025-09-11 10:00:36] [Rank 0] step:5921/10000 train_time:252661ms step_avg:42.67ms +[2025-09-11 10:00:36] [Rank 0] step:5921/10000 train_time:252661ms step_avg:42.67ms +[2025-09-11 10:00:37] [Rank 0] step:5941/10000 train_time:253348ms step_avg:42.64ms +[2025-09-11 10:00:37] [Rank 0] step:5941/10000 train_time:253348ms step_avg:42.64ms +[2025-09-11 10:00:38] [Rank 0] step:5961/10000 train_time:254035ms step_avg:42.62ms +[2025-09-11 10:00:38] [Rank 0] step:5961/10000 train_time:254035ms step_avg:42.62ms +[2025-09-11 10:00:38] [Rank 0] step:5981/10000 train_time:254721ms step_avg:42.59ms +[2025-09-11 10:00:38] [Rank 0] step:5981/10000 train_time:254721ms step_avg:42.59ms +[2025-09-11 10:00:39] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:00:39] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:00:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:00:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:49] [Rank 0] PRINT: step:6000/10000 val_loss:4.4449 total_sharp:3.3918e-04 L1_sharp:3.2011e-03 L2_sharp:4.9786e-04 L3_sharp:3.3991e-04 L4_sharp:4.6799e-04 L5_sharp:4.4491e-04 L6_sharp:3.6535e-04 L7_sharp:3.4769e-04 L8_sharp:7.5376e-04 L9_sharp:6.8896e-04 L10_sharp:8.8885e-04 L11_sharp:1.3949e-03 L12_sharp:5.2474e-03 total_fnorm:3.6750e+01 total_l1_linf:7.6800e+04 total_spectral:1.8500e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.9766e-01 L3_l1linf:5.8203e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.3672e-01 L8_l1linf:5.8984e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.5859e-01 L12_l1linf:5.9766e-01 L1_spectral:3.1975e-02 L2_spectral:3.1210e-02 L3_spectral:3.1393e-02 L4_spectral:3.1654e-02 L5_spectral:3.1829e-02 L6_spectral:3.1643e-02 L7_spectral:3.2033e-02 L8_spectral:3.1588e-02 L9_spectral:3.2139e-02 L10_spectral:3.2028e-02 L11_spectral:3.2026e-02 L12_spectral:3.1793e-02 train_time:255390ms step_avg:42.56ms +[2025-09-11 10:00:49] [Rank 0] PRINT: step:6000/10000 val_loss:4.4449 total_sharp:3.3918e-04 L1_sharp:3.2011e-03 L2_sharp:4.9786e-04 L3_sharp:3.3991e-04 L4_sharp:4.6799e-04 L5_sharp:4.4491e-04 L6_sharp:3.6535e-04 L7_sharp:3.4769e-04 L8_sharp:7.5376e-04 L9_sharp:6.8896e-04 L10_sharp:8.8885e-04 L11_sharp:1.3949e-03 L12_sharp:5.2474e-03 total_fnorm:3.6750e+01 total_l1_linf:7.6800e+04 total_spectral:1.8500e+01 L1_fnorm:2.4688e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.9766e-01 L3_l1linf:5.8203e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.3672e-01 L8_l1linf:5.8984e-01 L9_l1linf:5.7812e-01 L10_l1linf:5.6250e-01 L11_l1linf:5.5859e-01 L12_l1linf:5.9766e-01 L1_spectral:3.1975e-02 L2_spectral:3.1210e-02 L3_spectral:3.1393e-02 L4_spectral:3.1654e-02 L5_spectral:3.1829e-02 L6_spectral:3.1643e-02 L7_spectral:3.2033e-02 L8_spectral:3.1588e-02 L9_spectral:3.2139e-02 L10_spectral:3.2028e-02 L11_spectral:3.2026e-02 L12_spectral:3.1793e-02 train_time:255390ms step_avg:42.56ms +[2025-09-11 10:00:50] [Rank 0] step:6001/10000 train_time:256586ms step_avg:42.76ms +[2025-09-11 10:00:50] [Rank 0] step:6001/10000 train_time:256586ms step_avg:42.76ms +[2025-09-11 10:00:51] [Rank 0] step:6021/10000 train_time:257278ms step_avg:42.73ms +[2025-09-11 10:00:51] [Rank 0] step:6021/10000 train_time:257278ms step_avg:42.73ms +[2025-09-11 10:00:51] [Rank 0] step:6041/10000 train_time:257968ms step_avg:42.70ms +[2025-09-11 10:00:51] [Rank 0] step:6041/10000 train_time:257968ms step_avg:42.70ms +[2025-09-11 10:00:52] [Rank 0] step:6061/10000 train_time:258656ms step_avg:42.68ms +[2025-09-11 10:00:52] [Rank 0] step:6061/10000 train_time:258656ms step_avg:42.68ms +[2025-09-11 10:00:53] [Rank 0] step:6081/10000 train_time:259345ms step_avg:42.65ms +[2025-09-11 10:00:53] [Rank 0] step:6081/10000 train_time:259345ms step_avg:42.65ms +[2025-09-11 10:00:53] [Rank 0] step:6101/10000 train_time:260033ms step_avg:42.62ms +[2025-09-11 10:00:53] [Rank 0] step:6101/10000 train_time:260033ms step_avg:42.62ms +[2025-09-11 10:00:54] [Rank 0] step:6121/10000 train_time:260721ms step_avg:42.59ms +[2025-09-11 10:00:54] [Rank 0] step:6121/10000 train_time:260721ms step_avg:42.59ms +[2025-09-11 10:00:55] [Rank 0] step:6141/10000 train_time:261409ms step_avg:42.57ms +[2025-09-11 10:00:55] [Rank 0] step:6141/10000 train_time:261409ms step_avg:42.57ms +[2025-09-11 10:00:56] [Rank 0] step:6161/10000 train_time:262663ms step_avg:42.63ms +[2025-09-11 10:00:56] [Rank 0] step:6161/10000 train_time:262663ms step_avg:42.63ms +[2025-09-11 10:00:57] [Rank 0] step:6181/10000 train_time:263349ms step_avg:42.61ms +[2025-09-11 10:00:57] [Rank 0] step:6181/10000 train_time:263349ms step_avg:42.61ms +[2025-09-11 10:00:57] [Rank 0] step:6201/10000 train_time:264037ms step_avg:42.58ms +[2025-09-11 10:00:57] [Rank 0] step:6201/10000 train_time:264037ms step_avg:42.58ms +[2025-09-11 10:00:58] [Rank 0] step:6221/10000 train_time:264995ms step_avg:42.60ms +[2025-09-11 10:00:58] [Rank 0] step:6221/10000 train_time:264995ms step_avg:42.60ms +[2025-09-11 10:00:59] [Rank 0] step:6241/10000 train_time:265684ms step_avg:42.57ms +[2025-09-11 10:00:59] [Rank 0] step:6241/10000 train_time:265684ms step_avg:42.57ms +[2025-09-11 10:01:00] [Rank 0] step:6261/10000 train_time:266370ms step_avg:42.54ms +[2025-09-11 10:01:00] [Rank 0] step:6261/10000 train_time:266370ms step_avg:42.54ms +[2025-09-11 10:01:00] [Rank 0] step:6281/10000 train_time:267059ms step_avg:42.52ms +[2025-09-11 10:01:00] [Rank 0] step:6281/10000 train_time:267059ms step_avg:42.52ms +[2025-09-11 10:01:01] [Rank 0] step:6301/10000 train_time:267745ms step_avg:42.49ms +[2025-09-11 10:01:01] [Rank 0] step:6301/10000 train_time:267745ms step_avg:42.49ms +[2025-09-11 10:01:02] [Rank 0] step:6321/10000 train_time:268435ms step_avg:42.47ms +[2025-09-11 10:01:02] [Rank 0] step:6321/10000 train_time:268435ms step_avg:42.47ms +[2025-09-11 10:01:02] [Rank 0] step:6341/10000 train_time:269124ms step_avg:42.44ms +[2025-09-11 10:01:02] [Rank 0] step:6341/10000 train_time:269124ms step_avg:42.44ms +[2025-09-11 10:01:03] [Rank 0] step:6361/10000 train_time:269813ms step_avg:42.42ms +[2025-09-11 10:01:03] [Rank 0] step:6361/10000 train_time:269813ms step_avg:42.42ms +[2025-09-11 10:01:04] [Rank 0] step:6381/10000 train_time:270501ms step_avg:42.39ms +[2025-09-11 10:01:04] [Rank 0] step:6381/10000 train_time:270501ms step_avg:42.39ms +[2025-09-11 10:01:05] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:01:05] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:15] [Rank 0] PRINT: step:6400/10000 val_loss:4.4059 total_sharp:4.5177e-04 L1_sharp:2.0337e-03 L2_sharp:3.3880e-04 L3_sharp:2.3443e-04 L4_sharp:4.2508e-04 L5_sharp:3.8109e-04 L6_sharp:2.9890e-04 L7_sharp:3.9563e-04 L8_sharp:8.7901e-04 L9_sharp:8.4097e-04 L10_sharp:9.4996e-04 L11_sharp:1.3701e-03 L12_sharp:4.8444e-03 total_fnorm:2.9375e+01 total_l1_linf:6.1184e+04 total_spectral:1.5125e+01 L1_fnorm:2.2188e+00 L2_fnorm:2.1875e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2656e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2656e+00 L1_l1linf:5.5859e-01 L2_l1linf:5.1953e-01 L3_l1linf:5.0391e-01 L4_l1linf:5.1953e-01 L5_l1linf:5.3125e-01 L6_l1linf:5.3516e-01 L7_l1linf:5.4297e-01 L8_l1linf:5.1562e-01 L9_l1linf:5.0391e-01 L10_l1linf:4.8242e-01 L11_l1linf:4.8047e-01 L12_l1linf:5.2344e-01 L1_spectral:2.9497e-02 L2_spectral:2.8295e-02 L3_spectral:2.8930e-02 L4_spectral:2.8998e-02 L5_spectral:2.9247e-02 L6_spectral:2.9220e-02 L7_spectral:2.9181e-02 L8_spectral:2.8789e-02 L9_spectral:2.9589e-02 L10_spectral:2.9366e-02 L11_spectral:2.9359e-02 L12_spectral:2.9128e-02 train_time:271169ms step_avg:42.37ms +[2025-09-11 10:01:15] [Rank 0] PRINT: step:6400/10000 val_loss:4.4059 total_sharp:4.5177e-04 L1_sharp:2.0337e-03 L2_sharp:3.3880e-04 L3_sharp:2.3443e-04 L4_sharp:4.2508e-04 L5_sharp:3.8109e-04 L6_sharp:2.9890e-04 L7_sharp:3.9563e-04 L8_sharp:8.7901e-04 L9_sharp:8.4097e-04 L10_sharp:9.4996e-04 L11_sharp:1.3701e-03 L12_sharp:4.8444e-03 total_fnorm:2.9375e+01 total_l1_linf:6.1184e+04 total_spectral:1.5125e+01 L1_fnorm:2.2188e+00 L2_fnorm:2.1875e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2656e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2656e+00 L1_l1linf:5.5859e-01 L2_l1linf:5.1953e-01 L3_l1linf:5.0391e-01 L4_l1linf:5.1953e-01 L5_l1linf:5.3125e-01 L6_l1linf:5.3516e-01 L7_l1linf:5.4297e-01 L8_l1linf:5.1562e-01 L9_l1linf:5.0391e-01 L10_l1linf:4.8242e-01 L11_l1linf:4.8047e-01 L12_l1linf:5.2344e-01 L1_spectral:2.9497e-02 L2_spectral:2.8295e-02 L3_spectral:2.8930e-02 L4_spectral:2.8998e-02 L5_spectral:2.9247e-02 L6_spectral:2.9220e-02 L7_spectral:2.9181e-02 L8_spectral:2.8789e-02 L9_spectral:2.9589e-02 L10_spectral:2.9366e-02 L11_spectral:2.9359e-02 L12_spectral:2.9128e-02 train_time:271169ms step_avg:42.37ms +[2025-09-11 10:01:16] [Rank 0] step:6401/10000 train_time:272336ms step_avg:42.55ms +[2025-09-11 10:01:16] [Rank 0] step:6401/10000 train_time:272336ms step_avg:42.55ms +[2025-09-11 10:01:16] [Rank 0] step:6421/10000 train_time:273037ms step_avg:42.52ms +[2025-09-11 10:01:16] [Rank 0] step:6421/10000 train_time:273037ms step_avg:42.52ms +[2025-09-11 10:01:17] [Rank 0] step:6441/10000 train_time:273724ms step_avg:42.50ms +[2025-09-11 10:01:17] [Rank 0] step:6441/10000 train_time:273724ms step_avg:42.50ms +[2025-09-11 10:01:18] [Rank 0] step:6461/10000 train_time:274413ms step_avg:42.47ms +[2025-09-11 10:01:18] [Rank 0] step:6461/10000 train_time:274413ms step_avg:42.47ms +[2025-09-11 10:01:18] [Rank 0] step:6481/10000 train_time:275102ms step_avg:42.45ms +[2025-09-11 10:01:18] [Rank 0] step:6481/10000 train_time:275102ms step_avg:42.45ms +[2025-09-11 10:01:19] [Rank 0] step:6501/10000 train_time:275792ms step_avg:42.42ms +[2025-09-11 10:01:19] [Rank 0] step:6501/10000 train_time:275792ms step_avg:42.42ms +[2025-09-11 10:01:20] [Rank 0] step:6521/10000 train_time:276480ms step_avg:42.40ms +[2025-09-11 10:01:20] [Rank 0] step:6521/10000 train_time:276480ms step_avg:42.40ms +[2025-09-11 10:01:21] [Rank 0] step:6541/10000 train_time:277167ms step_avg:42.37ms +[2025-09-11 10:01:21] [Rank 0] step:6541/10000 train_time:277167ms step_avg:42.37ms +[2025-09-11 10:01:21] [Rank 0] step:6561/10000 train_time:277856ms step_avg:42.35ms +[2025-09-11 10:01:21] [Rank 0] step:6561/10000 train_time:277856ms step_avg:42.35ms +[2025-09-11 10:01:22] [Rank 0] step:6581/10000 train_time:278544ms step_avg:42.33ms +[2025-09-11 10:01:22] [Rank 0] step:6581/10000 train_time:278544ms step_avg:42.33ms +[2025-09-11 10:01:23] [Rank 0] step:6601/10000 train_time:279232ms step_avg:42.30ms +[2025-09-11 10:01:23] [Rank 0] step:6601/10000 train_time:279232ms step_avg:42.30ms +[2025-09-11 10:01:23] [Rank 0] step:6621/10000 train_time:279918ms step_avg:42.28ms +[2025-09-11 10:01:23] [Rank 0] step:6621/10000 train_time:279918ms step_avg:42.28ms +[2025-09-11 10:01:24] [Rank 0] step:6641/10000 train_time:280606ms step_avg:42.25ms +[2025-09-11 10:01:24] [Rank 0] step:6641/10000 train_time:280606ms step_avg:42.25ms +[2025-09-11 10:01:25] [Rank 0] step:6661/10000 train_time:281295ms step_avg:42.23ms +[2025-09-11 10:01:25] [Rank 0] step:6661/10000 train_time:281295ms step_avg:42.23ms +[2025-09-11 10:01:25] [Rank 0] step:6681/10000 train_time:281990ms step_avg:42.21ms +[2025-09-11 10:01:25] [Rank 0] step:6681/10000 train_time:281990ms step_avg:42.21ms +[2025-09-11 10:01:26] [Rank 0] step:6701/10000 train_time:282683ms step_avg:42.19ms +[2025-09-11 10:01:26] [Rank 0] step:6701/10000 train_time:282683ms step_avg:42.19ms +[2025-09-11 10:01:27] [Rank 0] step:6721/10000 train_time:283379ms step_avg:42.16ms +[2025-09-11 10:01:27] [Rank 0] step:6721/10000 train_time:283379ms step_avg:42.16ms +[2025-09-11 10:01:27] [Rank 0] step:6741/10000 train_time:284075ms step_avg:42.14ms +[2025-09-11 10:01:27] [Rank 0] step:6741/10000 train_time:284075ms step_avg:42.14ms +[2025-09-11 10:01:28] [Rank 0] step:6761/10000 train_time:284768ms step_avg:42.12ms +[2025-09-11 10:01:28] [Rank 0] step:6761/10000 train_time:284768ms step_avg:42.12ms +[2025-09-11 10:01:29] [Rank 0] step:6781/10000 train_time:285464ms step_avg:42.10ms +[2025-09-11 10:01:29] [Rank 0] step:6781/10000 train_time:285464ms step_avg:42.10ms +[2025-09-11 10:01:30] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:01:30] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:01:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:43] [Rank 0] PRINT: step:6800/10000 val_loss:4.3713 total_sharp:3.4511e-04 L1_sharp:1.8330e-03 L2_sharp:5.2657e-04 L3_sharp:2.8046e-04 L4_sharp:4.0190e-04 L5_sharp:3.3633e-04 L6_sharp:2.1926e-04 L7_sharp:2.9631e-04 L8_sharp:7.2450e-04 L9_sharp:8.3852e-04 L10_sharp:1.0026e-03 L11_sharp:1.3242e-03 L12_sharp:8.6326e-03 total_fnorm:2.8250e+01 total_l1_linf:5.6576e+04 total_spectral:1.4562e+01 L1_fnorm:1.9844e+00 L2_fnorm:1.9297e+00 L3_fnorm:1.9766e+00 L4_fnorm:1.9922e+00 L5_fnorm:2.0000e+00 L6_fnorm:2.0000e+00 L7_fnorm:1.9922e+00 L8_fnorm:1.9531e+00 L9_fnorm:1.9922e+00 L10_fnorm:1.9766e+00 L11_fnorm:1.9922e+00 L12_fnorm:2.0000e+00 L1_l1linf:4.7070e-01 L2_l1linf:4.6680e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.4922e-01 L5_l1linf:4.5703e-01 L6_l1linf:4.5898e-01 L7_l1linf:4.6875e-01 L8_l1linf:4.4141e-01 L9_l1linf:4.2383e-01 L10_l1linf:4.0625e-01 L11_l1linf:4.0430e-01 L12_l1linf:4.5898e-01 L1_spectral:2.6337e-02 L2_spectral:2.5542e-02 L3_spectral:2.6211e-02 L4_spectral:2.6219e-02 L5_spectral:2.6432e-02 L6_spectral:2.6435e-02 L7_spectral:2.6391e-02 L8_spectral:2.5919e-02 L9_spectral:2.6479e-02 L10_spectral:2.6385e-02 L11_spectral:2.6328e-02 L12_spectral:2.6531e-02 train_time:286139ms step_avg:42.08ms +[2025-09-11 10:01:43] [Rank 0] PRINT: step:6800/10000 val_loss:4.3713 total_sharp:3.4511e-04 L1_sharp:1.8330e-03 L2_sharp:5.2657e-04 L3_sharp:2.8046e-04 L4_sharp:4.0190e-04 L5_sharp:3.3633e-04 L6_sharp:2.1926e-04 L7_sharp:2.9631e-04 L8_sharp:7.2450e-04 L9_sharp:8.3852e-04 L10_sharp:1.0026e-03 L11_sharp:1.3242e-03 L12_sharp:8.6326e-03 total_fnorm:2.8250e+01 total_l1_linf:5.6576e+04 total_spectral:1.4562e+01 L1_fnorm:1.9844e+00 L2_fnorm:1.9297e+00 L3_fnorm:1.9766e+00 L4_fnorm:1.9922e+00 L5_fnorm:2.0000e+00 L6_fnorm:2.0000e+00 L7_fnorm:1.9922e+00 L8_fnorm:1.9531e+00 L9_fnorm:1.9922e+00 L10_fnorm:1.9766e+00 L11_fnorm:1.9922e+00 L12_fnorm:2.0000e+00 L1_l1linf:4.7070e-01 L2_l1linf:4.6680e-01 L3_l1linf:4.3945e-01 L4_l1linf:4.4922e-01 L5_l1linf:4.5703e-01 L6_l1linf:4.5898e-01 L7_l1linf:4.6875e-01 L8_l1linf:4.4141e-01 L9_l1linf:4.2383e-01 L10_l1linf:4.0625e-01 L11_l1linf:4.0430e-01 L12_l1linf:4.5898e-01 L1_spectral:2.6337e-02 L2_spectral:2.5542e-02 L3_spectral:2.6211e-02 L4_spectral:2.6219e-02 L5_spectral:2.6432e-02 L6_spectral:2.6435e-02 L7_spectral:2.6391e-02 L8_spectral:2.5919e-02 L9_spectral:2.6479e-02 L10_spectral:2.6385e-02 L11_spectral:2.6328e-02 L12_spectral:2.6531e-02 train_time:286139ms step_avg:42.08ms +[2025-09-11 10:01:44] [Rank 0] step:6801/10000 train_time:287296ms step_avg:42.24ms +[2025-09-11 10:01:44] [Rank 0] step:6801/10000 train_time:287296ms step_avg:42.24ms +[2025-09-11 10:01:45] [Rank 0] step:6821/10000 train_time:288021ms step_avg:42.23ms +[2025-09-11 10:01:45] [Rank 0] step:6821/10000 train_time:288021ms step_avg:42.23ms +[2025-09-11 10:01:45] [Rank 0] step:6841/10000 train_time:288720ms step_avg:42.20ms +[2025-09-11 10:01:45] [Rank 0] step:6841/10000 train_time:288720ms step_avg:42.20ms +[2025-09-11 10:01:46] [Rank 0] step:6861/10000 train_time:289418ms step_avg:42.18ms +[2025-09-11 10:01:46] [Rank 0] step:6861/10000 train_time:289418ms step_avg:42.18ms +[2025-09-11 10:01:47] [Rank 0] step:6881/10000 train_time:290113ms step_avg:42.16ms +[2025-09-11 10:01:47] [Rank 0] step:6881/10000 train_time:290113ms step_avg:42.16ms +[2025-09-11 10:01:47] [Rank 0] step:6901/10000 train_time:290807ms step_avg:42.14ms +[2025-09-11 10:01:47] [Rank 0] step:6901/10000 train_time:290807ms step_avg:42.14ms +[2025-09-11 10:01:48] [Rank 0] step:6921/10000 train_time:291502ms step_avg:42.12ms +[2025-09-11 10:01:48] [Rank 0] step:6921/10000 train_time:291502ms step_avg:42.12ms +[2025-09-11 10:01:49] [Rank 0] step:6941/10000 train_time:292198ms step_avg:42.10ms +[2025-09-11 10:01:49] [Rank 0] step:6941/10000 train_time:292198ms step_avg:42.10ms +[2025-09-11 10:01:49] [Rank 0] step:6961/10000 train_time:292893ms step_avg:42.08ms +[2025-09-11 10:01:49] [Rank 0] step:6961/10000 train_time:292893ms step_avg:42.08ms +[2025-09-11 10:01:50] [Rank 0] step:6981/10000 train_time:293591ms step_avg:42.06ms +[2025-09-11 10:01:50] [Rank 0] step:6981/10000 train_time:293591ms step_avg:42.06ms +[2025-09-11 10:01:51] [Rank 0] step:7001/10000 train_time:294286ms step_avg:42.03ms +[2025-09-11 10:01:51] [Rank 0] step:7001/10000 train_time:294286ms step_avg:42.03ms +[2025-09-11 10:01:52] [Rank 0] step:7021/10000 train_time:294982ms step_avg:42.01ms +[2025-09-11 10:01:52] [Rank 0] step:7021/10000 train_time:294982ms step_avg:42.01ms +[2025-09-11 10:01:52] [Rank 0] step:7041/10000 train_time:295676ms step_avg:41.99ms +[2025-09-11 10:01:52] [Rank 0] step:7041/10000 train_time:295676ms step_avg:41.99ms +[2025-09-11 10:01:53] [Rank 0] step:7061/10000 train_time:296373ms step_avg:41.97ms +[2025-09-11 10:01:53] [Rank 0] step:7061/10000 train_time:296373ms step_avg:41.97ms +[2025-09-11 10:01:54] [Rank 0] step:7081/10000 train_time:297069ms step_avg:41.95ms +[2025-09-11 10:01:54] [Rank 0] step:7081/10000 train_time:297069ms step_avg:41.95ms +[2025-09-11 10:01:54] [Rank 0] step:7101/10000 train_time:297765ms step_avg:41.93ms +[2025-09-11 10:01:54] [Rank 0] step:7101/10000 train_time:297765ms step_avg:41.93ms +[2025-09-11 10:01:55] [Rank 0] step:7121/10000 train_time:298462ms step_avg:41.91ms +[2025-09-11 10:01:55] [Rank 0] step:7121/10000 train_time:298462ms step_avg:41.91ms +[2025-09-11 10:01:56] [Rank 0] step:7141/10000 train_time:299156ms step_avg:41.89ms +[2025-09-11 10:01:56] [Rank 0] step:7141/10000 train_time:299156ms step_avg:41.89ms +[2025-09-11 10:01:56] [Rank 0] step:7161/10000 train_time:299854ms step_avg:41.87ms +[2025-09-11 10:01:56] [Rank 0] step:7161/10000 train_time:299854ms step_avg:41.87ms +[2025-09-11 10:01:57] [Rank 0] step:7181/10000 train_time:300550ms step_avg:41.85ms +[2025-09-11 10:01:57] [Rank 0] step:7181/10000 train_time:300550ms step_avg:41.85ms +[2025-09-11 10:01:58] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:01:58] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:01:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:01:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:08] [Rank 0] PRINT: step:7200/10000 val_loss:4.3356 total_sharp:3.3961e-04 L1_sharp:2.7177e-03 L2_sharp:1.7452e-04 L3_sharp:2.5129e-04 L4_sharp:2.7177e-04 L5_sharp:2.5902e-04 L6_sharp:2.5447e-04 L7_sharp:2.8553e-04 L8_sharp:7.1406e-04 L9_sharp:7.1267e-04 L10_sharp:8.6817e-04 L11_sharp:1.3645e-03 L12_sharp:6.4374e-03 total_fnorm:2.4125e+01 total_l1_linf:4.5568e+04 total_spectral:1.2438e+01 L1_fnorm:1.7188e+00 L2_fnorm:1.6719e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7266e+00 L5_fnorm:1.7266e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6875e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7031e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7109e+00 L1_l1linf:3.8672e-01 L2_l1linf:3.7500e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7695e-01 L5_l1linf:3.8086e-01 L6_l1linf:3.8281e-01 L7_l1linf:3.8477e-01 L8_l1linf:3.6914e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.3008e-01 L12_l1linf:3.8477e-01 L1_spectral:2.3364e-02 L2_spectral:2.2239e-02 L3_spectral:2.3131e-02 L4_spectral:2.3125e-02 L5_spectral:2.3034e-02 L6_spectral:2.3301e-02 L7_spectral:2.3200e-02 L8_spectral:2.2811e-02 L9_spectral:2.3259e-02 L10_spectral:2.3331e-02 L11_spectral:2.3453e-02 L12_spectral:2.3399e-02 train_time:301225ms step_avg:41.84ms +[2025-09-11 10:02:08] [Rank 0] PRINT: step:7200/10000 val_loss:4.3356 total_sharp:3.3961e-04 L1_sharp:2.7177e-03 L2_sharp:1.7452e-04 L3_sharp:2.5129e-04 L4_sharp:2.7177e-04 L5_sharp:2.5902e-04 L6_sharp:2.5447e-04 L7_sharp:2.8553e-04 L8_sharp:7.1406e-04 L9_sharp:7.1267e-04 L10_sharp:8.6817e-04 L11_sharp:1.3645e-03 L12_sharp:6.4374e-03 total_fnorm:2.4125e+01 total_l1_linf:4.5568e+04 total_spectral:1.2438e+01 L1_fnorm:1.7188e+00 L2_fnorm:1.6719e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7266e+00 L5_fnorm:1.7266e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6875e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7031e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7109e+00 L1_l1linf:3.8672e-01 L2_l1linf:3.7500e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.7695e-01 L5_l1linf:3.8086e-01 L6_l1linf:3.8281e-01 L7_l1linf:3.8477e-01 L8_l1linf:3.6914e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.3008e-01 L12_l1linf:3.8477e-01 L1_spectral:2.3364e-02 L2_spectral:2.2239e-02 L3_spectral:2.3131e-02 L4_spectral:2.3125e-02 L5_spectral:2.3034e-02 L6_spectral:2.3301e-02 L7_spectral:2.3200e-02 L8_spectral:2.2811e-02 L9_spectral:2.3259e-02 L10_spectral:2.3331e-02 L11_spectral:2.3453e-02 L12_spectral:2.3399e-02 train_time:301225ms step_avg:41.84ms +[2025-09-11 10:02:09] [Rank 0] step:7201/10000 train_time:302391ms step_avg:41.99ms +[2025-09-11 10:02:09] [Rank 0] step:7201/10000 train_time:302391ms step_avg:41.99ms +[2025-09-11 10:02:10] [Rank 0] step:7221/10000 train_time:303107ms step_avg:41.98ms +[2025-09-11 10:02:10] [Rank 0] step:7221/10000 train_time:303107ms step_avg:41.98ms +[2025-09-11 10:02:10] [Rank 0] step:7241/10000 train_time:303804ms step_avg:41.96ms +[2025-09-11 10:02:10] [Rank 0] step:7241/10000 train_time:303804ms step_avg:41.96ms +[2025-09-11 10:02:11] [Rank 0] step:7261/10000 train_time:304503ms step_avg:41.94ms +[2025-09-11 10:02:11] [Rank 0] step:7261/10000 train_time:304503ms step_avg:41.94ms +[2025-09-11 10:02:12] [Rank 0] step:7281/10000 train_time:305209ms step_avg:41.92ms +[2025-09-11 10:02:12] [Rank 0] step:7281/10000 train_time:305209ms step_avg:41.92ms +[2025-09-11 10:02:13] [Rank 0] step:7301/10000 train_time:305904ms step_avg:41.90ms +[2025-09-11 10:02:13] [Rank 0] step:7301/10000 train_time:305904ms step_avg:41.90ms +[2025-09-11 10:02:13] [Rank 0] step:7321/10000 train_time:306601ms step_avg:41.88ms +[2025-09-11 10:02:13] [Rank 0] step:7321/10000 train_time:306601ms step_avg:41.88ms +[2025-09-11 10:02:14] [Rank 0] step:7341/10000 train_time:307299ms step_avg:41.86ms +[2025-09-11 10:02:14] [Rank 0] step:7341/10000 train_time:307299ms step_avg:41.86ms +[2025-09-11 10:02:15] [Rank 0] step:7361/10000 train_time:307995ms step_avg:41.84ms +[2025-09-11 10:02:15] [Rank 0] step:7361/10000 train_time:307995ms step_avg:41.84ms +[2025-09-11 10:02:15] [Rank 0] step:7381/10000 train_time:308692ms step_avg:41.82ms +[2025-09-11 10:02:15] [Rank 0] step:7381/10000 train_time:308692ms step_avg:41.82ms +[2025-09-11 10:02:16] [Rank 0] step:7401/10000 train_time:309388ms step_avg:41.80ms +[2025-09-11 10:02:16] [Rank 0] step:7401/10000 train_time:309388ms step_avg:41.80ms +[2025-09-11 10:02:17] [Rank 0] step:7421/10000 train_time:310084ms step_avg:41.78ms +[2025-09-11 10:02:17] [Rank 0] step:7421/10000 train_time:310084ms step_avg:41.78ms +[2025-09-11 10:02:17] [Rank 0] step:7441/10000 train_time:310782ms step_avg:41.77ms +[2025-09-11 10:02:17] [Rank 0] step:7441/10000 train_time:310782ms step_avg:41.77ms +[2025-09-11 10:02:18] [Rank 0] step:7461/10000 train_time:311479ms step_avg:41.75ms +[2025-09-11 10:02:18] [Rank 0] step:7461/10000 train_time:311479ms step_avg:41.75ms +[2025-09-11 10:02:19] [Rank 0] step:7481/10000 train_time:312178ms step_avg:41.73ms +[2025-09-11 10:02:19] [Rank 0] step:7481/10000 train_time:312178ms step_avg:41.73ms +[2025-09-11 10:02:20] [Rank 0] step:7501/10000 train_time:312877ms step_avg:41.71ms +[2025-09-11 10:02:20] [Rank 0] step:7501/10000 train_time:312877ms step_avg:41.71ms +[2025-09-11 10:02:20] [Rank 0] step:7521/10000 train_time:313574ms step_avg:41.69ms +[2025-09-11 10:02:20] [Rank 0] step:7521/10000 train_time:313574ms step_avg:41.69ms +[2025-09-11 10:02:21] [Rank 0] step:7541/10000 train_time:314270ms step_avg:41.67ms +[2025-09-11 10:02:21] [Rank 0] step:7541/10000 train_time:314270ms step_avg:41.67ms +[2025-09-11 10:02:22] [Rank 0] step:7561/10000 train_time:314969ms step_avg:41.66ms +[2025-09-11 10:02:22] [Rank 0] step:7561/10000 train_time:314969ms step_avg:41.66ms +[2025-09-11 10:02:22] [Rank 0] step:7581/10000 train_time:315667ms step_avg:41.64ms +[2025-09-11 10:02:22] [Rank 0] step:7581/10000 train_time:315667ms step_avg:41.64ms +[2025-09-11 10:02:23] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:02:23] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:02:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:02:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:02:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:33] [Rank 0] PRINT: step:7600/10000 val_loss:4.3007 total_sharp:3.5922e-04 L1_sharp:1.3624e-03 L2_sharp:4.7367e-04 L3_sharp:2.9824e-04 L4_sharp:1.7574e-04 L5_sharp:2.2374e-04 L6_sharp:2.5661e-04 L7_sharp:2.7133e-04 L8_sharp:7.5180e-04 L9_sharp:7.6269e-04 L10_sharp:9.2382e-04 L11_sharp:1.2460e-03 L12_sharp:7.1814e-03 total_fnorm:1.9250e+01 total_l1_linf:3.4048e+04 total_spectral:9.9375e+00 L1_fnorm:1.4609e+00 L2_fnorm:1.3984e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4375e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3906e+00 L9_fnorm:1.4219e+00 L10_fnorm:1.4141e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4453e+00 L1_l1linf:3.0273e-01 L2_l1linf:2.9883e-01 L3_l1linf:2.8125e-01 L4_l1linf:2.9883e-01 L5_l1linf:3.0273e-01 L6_l1linf:2.9883e-01 L7_l1linf:3.0078e-01 L8_l1linf:2.8906e-01 L9_l1linf:2.7539e-01 L10_l1linf:2.6953e-01 L11_l1linf:2.6758e-01 L12_l1linf:3.0469e-01 L1_spectral:1.9522e-02 L2_spectral:1.8915e-02 L3_spectral:1.9689e-02 L4_spectral:1.9819e-02 L5_spectral:1.9658e-02 L6_spectral:1.9674e-02 L7_spectral:1.9654e-02 L8_spectral:1.9536e-02 L9_spectral:1.9877e-02 L10_spectral:1.9814e-02 L11_spectral:1.9917e-02 L12_spectral:1.9952e-02 train_time:316346ms step_avg:41.62ms +[2025-09-11 10:02:33] [Rank 0] PRINT: step:7600/10000 val_loss:4.3007 total_sharp:3.5922e-04 L1_sharp:1.3624e-03 L2_sharp:4.7367e-04 L3_sharp:2.9824e-04 L4_sharp:1.7574e-04 L5_sharp:2.2374e-04 L6_sharp:2.5661e-04 L7_sharp:2.7133e-04 L8_sharp:7.5180e-04 L9_sharp:7.6269e-04 L10_sharp:9.2382e-04 L11_sharp:1.2460e-03 L12_sharp:7.1814e-03 total_fnorm:1.9250e+01 total_l1_linf:3.4048e+04 total_spectral:9.9375e+00 L1_fnorm:1.4609e+00 L2_fnorm:1.3984e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4375e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3906e+00 L9_fnorm:1.4219e+00 L10_fnorm:1.4141e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4453e+00 L1_l1linf:3.0273e-01 L2_l1linf:2.9883e-01 L3_l1linf:2.8125e-01 L4_l1linf:2.9883e-01 L5_l1linf:3.0273e-01 L6_l1linf:2.9883e-01 L7_l1linf:3.0078e-01 L8_l1linf:2.8906e-01 L9_l1linf:2.7539e-01 L10_l1linf:2.6953e-01 L11_l1linf:2.6758e-01 L12_l1linf:3.0469e-01 L1_spectral:1.9522e-02 L2_spectral:1.8915e-02 L3_spectral:1.9689e-02 L4_spectral:1.9819e-02 L5_spectral:1.9658e-02 L6_spectral:1.9674e-02 L7_spectral:1.9654e-02 L8_spectral:1.9536e-02 L9_spectral:1.9877e-02 L10_spectral:1.9814e-02 L11_spectral:1.9917e-02 L12_spectral:1.9952e-02 train_time:316346ms step_avg:41.62ms +[2025-09-11 10:02:34] [Rank 0] step:7601/10000 train_time:317510ms step_avg:41.77ms +[2025-09-11 10:02:34] [Rank 0] step:7601/10000 train_time:317510ms step_avg:41.77ms +[2025-09-11 10:02:35] [Rank 0] step:7621/10000 train_time:318237ms step_avg:41.76ms +[2025-09-11 10:02:35] [Rank 0] step:7621/10000 train_time:318237ms step_avg:41.76ms +[2025-09-11 10:02:36] [Rank 0] step:7641/10000 train_time:318937ms step_avg:41.74ms +[2025-09-11 10:02:36] [Rank 0] step:7641/10000 train_time:318937ms step_avg:41.74ms +[2025-09-11 10:02:36] [Rank 0] step:7661/10000 train_time:319634ms step_avg:41.72ms +[2025-09-11 10:02:36] [Rank 0] step:7661/10000 train_time:319634ms step_avg:41.72ms +[2025-09-11 10:02:37] [Rank 0] step:7681/10000 train_time:320332ms step_avg:41.70ms +[2025-09-11 10:02:37] [Rank 0] step:7681/10000 train_time:320332ms step_avg:41.70ms +[2025-09-11 10:02:38] [Rank 0] step:7701/10000 train_time:321033ms step_avg:41.69ms +[2025-09-11 10:02:38] [Rank 0] step:7701/10000 train_time:321033ms step_avg:41.69ms +[2025-09-11 10:02:38] [Rank 0] step:7721/10000 train_time:321731ms step_avg:41.67ms +[2025-09-11 10:02:38] [Rank 0] step:7721/10000 train_time:321731ms step_avg:41.67ms +[2025-09-11 10:02:39] [Rank 0] step:7741/10000 train_time:322430ms step_avg:41.65ms +[2025-09-11 10:02:39] [Rank 0] step:7741/10000 train_time:322430ms step_avg:41.65ms +[2025-09-11 10:02:40] [Rank 0] step:7761/10000 train_time:323129ms step_avg:41.63ms +[2025-09-11 10:02:40] [Rank 0] step:7761/10000 train_time:323129ms step_avg:41.63ms +[2025-09-11 10:02:40] [Rank 0] step:7781/10000 train_time:323829ms step_avg:41.62ms +[2025-09-11 10:02:40] [Rank 0] step:7781/10000 train_time:323829ms step_avg:41.62ms +[2025-09-11 10:02:41] [Rank 0] step:7801/10000 train_time:324527ms step_avg:41.60ms +[2025-09-11 10:02:41] [Rank 0] step:7801/10000 train_time:324527ms step_avg:41.60ms +[2025-09-11 10:02:42] [Rank 0] step:7821/10000 train_time:325226ms step_avg:41.58ms +[2025-09-11 10:02:42] [Rank 0] step:7821/10000 train_time:325226ms step_avg:41.58ms +[2025-09-11 10:02:43] [Rank 0] step:7841/10000 train_time:325927ms step_avg:41.57ms +[2025-09-11 10:02:43] [Rank 0] step:7841/10000 train_time:325927ms step_avg:41.57ms +[2025-09-11 10:02:43] [Rank 0] step:7861/10000 train_time:326628ms step_avg:41.55ms +[2025-09-11 10:02:43] [Rank 0] step:7861/10000 train_time:326628ms step_avg:41.55ms +[2025-09-11 10:02:44] [Rank 0] step:7881/10000 train_time:327326ms step_avg:41.53ms +[2025-09-11 10:02:44] [Rank 0] step:7881/10000 train_time:327326ms step_avg:41.53ms +[2025-09-11 10:02:45] [Rank 0] step:7901/10000 train_time:328026ms step_avg:41.52ms +[2025-09-11 10:02:45] [Rank 0] step:7901/10000 train_time:328026ms step_avg:41.52ms +[2025-09-11 10:02:45] [Rank 0] step:7921/10000 train_time:328725ms step_avg:41.50ms +[2025-09-11 10:02:45] [Rank 0] step:7921/10000 train_time:328725ms step_avg:41.50ms +[2025-09-11 10:02:46] [Rank 0] step:7941/10000 train_time:329427ms step_avg:41.48ms +[2025-09-11 10:02:46] [Rank 0] step:7941/10000 train_time:329427ms step_avg:41.48ms +[2025-09-11 10:02:47] [Rank 0] step:7961/10000 train_time:330125ms step_avg:41.47ms +[2025-09-11 10:02:47] [Rank 0] step:7961/10000 train_time:330125ms step_avg:41.47ms +[2025-09-11 10:02:47] [Rank 0] step:7981/10000 train_time:330826ms step_avg:41.45ms +[2025-09-11 10:02:47] [Rank 0] step:7981/10000 train_time:330826ms step_avg:41.45ms +[2025-09-11 10:02:48] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:02:48] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:02:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:02:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:02:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:02:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:02:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:58] [Rank 0] PRINT: step:8000/10000 val_loss:4.2801 total_sharp:2.9429e-04 L1_sharp:2.7639e-03 L2_sharp:2.7905e-04 L3_sharp:4.4156e-04 L4_sharp:2.5155e-04 L5_sharp:3.8442e-04 L6_sharp:3.7381e-04 L7_sharp:3.1648e-04 L8_sharp:6.9506e-04 L9_sharp:6.8902e-04 L10_sharp:8.2517e-04 L11_sharp:1.2313e-03 L12_sharp:4.4098e-03 total_fnorm:1.6125e+01 total_l1_linf:2.6752e+04 total_spectral:8.3125e+00 L1_fnorm:1.2031e+00 L2_fnorm:1.1484e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1875e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.1875e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1562e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1719e+00 L1_l1linf:2.3438e-01 L2_l1linf:2.3242e-01 L3_l1linf:2.2559e-01 L4_l1linf:2.3242e-01 L5_l1linf:2.3535e-01 L6_l1linf:2.3926e-01 L7_l1linf:2.4805e-01 L8_l1linf:2.2461e-01 L9_l1linf:2.1094e-01 L10_l1linf:2.0215e-01 L11_l1linf:2.0215e-01 L12_l1linf:2.3926e-01 L1_spectral:1.6491e-02 L2_spectral:1.5501e-02 L3_spectral:1.6616e-02 L4_spectral:1.6797e-02 L5_spectral:1.6460e-02 L6_spectral:1.6478e-02 L7_spectral:1.6443e-02 L8_spectral:1.6427e-02 L9_spectral:1.6578e-02 L10_spectral:1.6690e-02 L11_spectral:1.6644e-02 L12_spectral:1.6744e-02 train_time:331503ms step_avg:41.44ms +[2025-09-11 10:02:58] [Rank 0] PRINT: step:8000/10000 val_loss:4.2801 total_sharp:2.9429e-04 L1_sharp:2.7639e-03 L2_sharp:2.7905e-04 L3_sharp:4.4156e-04 L4_sharp:2.5155e-04 L5_sharp:3.8442e-04 L6_sharp:3.7381e-04 L7_sharp:3.1648e-04 L8_sharp:6.9506e-04 L9_sharp:6.8902e-04 L10_sharp:8.2517e-04 L11_sharp:1.2313e-03 L12_sharp:4.4098e-03 total_fnorm:1.6125e+01 total_l1_linf:2.6752e+04 total_spectral:8.3125e+00 L1_fnorm:1.2031e+00 L2_fnorm:1.1484e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1875e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.1875e+00 L7_fnorm:1.1797e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1562e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1719e+00 L1_l1linf:2.3438e-01 L2_l1linf:2.3242e-01 L3_l1linf:2.2559e-01 L4_l1linf:2.3242e-01 L5_l1linf:2.3535e-01 L6_l1linf:2.3926e-01 L7_l1linf:2.4805e-01 L8_l1linf:2.2461e-01 L9_l1linf:2.1094e-01 L10_l1linf:2.0215e-01 L11_l1linf:2.0215e-01 L12_l1linf:2.3926e-01 L1_spectral:1.6491e-02 L2_spectral:1.5501e-02 L3_spectral:1.6616e-02 L4_spectral:1.6797e-02 L5_spectral:1.6460e-02 L6_spectral:1.6478e-02 L7_spectral:1.6443e-02 L8_spectral:1.6427e-02 L9_spectral:1.6578e-02 L10_spectral:1.6690e-02 L11_spectral:1.6644e-02 L12_spectral:1.6744e-02 train_time:331503ms step_avg:41.44ms +[2025-09-11 10:02:59] [Rank 0] step:8001/10000 train_time:332665ms step_avg:41.58ms +[2025-09-11 10:02:59] [Rank 0] step:8001/10000 train_time:332665ms step_avg:41.58ms +[2025-09-11 10:03:00] [Rank 0] step:8021/10000 train_time:333394ms step_avg:41.57ms +[2025-09-11 10:03:00] [Rank 0] step:8021/10000 train_time:333394ms step_avg:41.57ms +[2025-09-11 10:03:01] [Rank 0] step:8041/10000 train_time:334094ms step_avg:41.55ms +[2025-09-11 10:03:01] [Rank 0] step:8041/10000 train_time:334094ms step_avg:41.55ms +[2025-09-11 10:03:01] [Rank 0] step:8061/10000 train_time:334801ms step_avg:41.53ms +[2025-09-11 10:03:01] [Rank 0] step:8061/10000 train_time:334801ms step_avg:41.53ms +[2025-09-11 10:03:03] [Rank 0] step:8081/10000 train_time:336058ms step_avg:41.59ms +[2025-09-11 10:03:03] [Rank 0] step:8081/10000 train_time:336058ms step_avg:41.59ms +[2025-09-11 10:03:03] [Rank 0] step:8101/10000 train_time:336755ms step_avg:41.57ms +[2025-09-11 10:03:03] [Rank 0] step:8101/10000 train_time:336755ms step_avg:41.57ms +[2025-09-11 10:03:04] [Rank 0] step:8121/10000 train_time:337610ms step_avg:41.57ms +[2025-09-11 10:03:04] [Rank 0] step:8121/10000 train_time:337610ms step_avg:41.57ms +[2025-09-11 10:03:06] [Rank 0] step:8141/10000 train_time:339185ms step_avg:41.66ms +[2025-09-11 10:03:06] [Rank 0] step:8141/10000 train_time:339185ms step_avg:41.66ms +[2025-09-11 10:03:06] [Rank 0] step:8161/10000 train_time:339888ms step_avg:41.65ms +[2025-09-11 10:03:06] [Rank 0] step:8161/10000 train_time:339888ms step_avg:41.65ms +[2025-09-11 10:03:07] [Rank 0] step:8181/10000 train_time:340601ms step_avg:41.63ms +[2025-09-11 10:03:07] [Rank 0] step:8181/10000 train_time:340601ms step_avg:41.63ms +[2025-09-11 10:03:08] [Rank 0] step:8201/10000 train_time:341308ms step_avg:41.62ms +[2025-09-11 10:03:08] [Rank 0] step:8201/10000 train_time:341308ms step_avg:41.62ms +[2025-09-11 10:03:09] [Rank 0] step:8221/10000 train_time:342014ms step_avg:41.60ms +[2025-09-11 10:03:09] [Rank 0] step:8221/10000 train_time:342014ms step_avg:41.60ms +[2025-09-11 10:03:09] [Rank 0] step:8241/10000 train_time:342728ms step_avg:41.59ms +[2025-09-11 10:03:09] [Rank 0] step:8241/10000 train_time:342728ms step_avg:41.59ms +[2025-09-11 10:03:10] [Rank 0] step:8261/10000 train_time:343432ms step_avg:41.57ms +[2025-09-11 10:03:10] [Rank 0] step:8261/10000 train_time:343432ms step_avg:41.57ms +[2025-09-11 10:03:11] [Rank 0] step:8281/10000 train_time:344134ms step_avg:41.56ms +[2025-09-11 10:03:11] [Rank 0] step:8281/10000 train_time:344134ms step_avg:41.56ms +[2025-09-11 10:03:11] [Rank 0] step:8301/10000 train_time:344839ms step_avg:41.54ms +[2025-09-11 10:03:11] [Rank 0] step:8301/10000 train_time:344839ms step_avg:41.54ms +[2025-09-11 10:03:12] [Rank 0] step:8321/10000 train_time:345544ms step_avg:41.53ms +[2025-09-11 10:03:12] [Rank 0] step:8321/10000 train_time:345544ms step_avg:41.53ms +[2025-09-11 10:03:13] [Rank 0] step:8341/10000 train_time:346256ms step_avg:41.51ms +[2025-09-11 10:03:13] [Rank 0] step:8341/10000 train_time:346256ms step_avg:41.51ms +[2025-09-11 10:03:14] [Rank 0] step:8361/10000 train_time:346958ms step_avg:41.50ms +[2025-09-11 10:03:14] [Rank 0] step:8361/10000 train_time:346958ms step_avg:41.50ms +[2025-09-11 10:03:14] [Rank 0] step:8381/10000 train_time:347666ms step_avg:41.48ms +[2025-09-11 10:03:14] [Rank 0] step:8381/10000 train_time:347666ms step_avg:41.48ms +[2025-09-11 10:03:15] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:03:15] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:25] [Rank 0] PRINT: step:8400/10000 val_loss:4.2562 total_sharp:2.7353e-04 L1_sharp:1.9736e-03 L2_sharp:2.9519e-04 L3_sharp:2.6586e-04 L4_sharp:2.4818e-04 L5_sharp:1.8385e-04 L6_sharp:2.2223e-04 L7_sharp:2.3358e-04 L8_sharp:6.2704e-04 L9_sharp:5.9273e-04 L10_sharp:7.5917e-04 L11_sharp:1.0355e-03 L12_sharp:4.1349e-03 total_fnorm:1.1938e+01 total_l1_linf:1.8176e+04 total_spectral:6.1875e+00 L1_fnorm:9.5703e-01 L2_fnorm:9.0625e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2969e-01 L5_fnorm:9.1797e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2188e-01 L8_fnorm:8.9844e-01 L9_fnorm:9.1406e-01 L10_fnorm:8.9844e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7383e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7090e-01 L6_l1linf:1.6992e-01 L7_l1linf:1.7578e-01 L8_l1linf:1.6113e-01 L9_l1linf:1.5332e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.4355e-01 L12_l1linf:1.7578e-01 L1_spectral:1.3917e-02 L2_spectral:1.2434e-02 L3_spectral:1.3314e-02 L4_spectral:1.3492e-02 L5_spectral:1.3145e-02 L6_spectral:1.3202e-02 L7_spectral:1.3221e-02 L8_spectral:1.3442e-02 L9_spectral:1.3449e-02 L10_spectral:1.3353e-02 L11_spectral:1.3316e-02 L12_spectral:1.3381e-02 train_time:348352ms step_avg:41.47ms +[2025-09-11 10:03:25] [Rank 0] PRINT: step:8400/10000 val_loss:4.2562 total_sharp:2.7353e-04 L1_sharp:1.9736e-03 L2_sharp:2.9519e-04 L3_sharp:2.6586e-04 L4_sharp:2.4818e-04 L5_sharp:1.8385e-04 L6_sharp:2.2223e-04 L7_sharp:2.3358e-04 L8_sharp:6.2704e-04 L9_sharp:5.9273e-04 L10_sharp:7.5917e-04 L11_sharp:1.0355e-03 L12_sharp:4.1349e-03 total_fnorm:1.1938e+01 total_l1_linf:1.8176e+04 total_spectral:6.1875e+00 L1_fnorm:9.5703e-01 L2_fnorm:9.0625e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2969e-01 L5_fnorm:9.1797e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2188e-01 L8_fnorm:8.9844e-01 L9_fnorm:9.1406e-01 L10_fnorm:8.9844e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7383e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7090e-01 L6_l1linf:1.6992e-01 L7_l1linf:1.7578e-01 L8_l1linf:1.6113e-01 L9_l1linf:1.5332e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.4355e-01 L12_l1linf:1.7578e-01 L1_spectral:1.3917e-02 L2_spectral:1.2434e-02 L3_spectral:1.3314e-02 L4_spectral:1.3492e-02 L5_spectral:1.3145e-02 L6_spectral:1.3202e-02 L7_spectral:1.3221e-02 L8_spectral:1.3442e-02 L9_spectral:1.3449e-02 L10_spectral:1.3353e-02 L11_spectral:1.3316e-02 L12_spectral:1.3381e-02 train_time:348352ms step_avg:41.47ms +[2025-09-11 10:03:26] [Rank 0] step:8401/10000 train_time:349535ms step_avg:41.61ms +[2025-09-11 10:03:26] [Rank 0] step:8401/10000 train_time:349535ms step_avg:41.61ms +[2025-09-11 10:03:27] [Rank 0] step:8421/10000 train_time:350256ms step_avg:41.59ms +[2025-09-11 10:03:27] [Rank 0] step:8421/10000 train_time:350256ms step_avg:41.59ms +[2025-09-11 10:03:28] [Rank 0] step:8441/10000 train_time:350964ms step_avg:41.58ms +[2025-09-11 10:03:28] [Rank 0] step:8441/10000 train_time:350964ms step_avg:41.58ms +[2025-09-11 10:03:28] [Rank 0] step:8461/10000 train_time:351671ms step_avg:41.56ms +[2025-09-11 10:03:28] [Rank 0] step:8461/10000 train_time:351671ms step_avg:41.56ms +[2025-09-11 10:03:29] [Rank 0] step:8481/10000 train_time:352380ms step_avg:41.55ms +[2025-09-11 10:03:29] [Rank 0] step:8481/10000 train_time:352380ms step_avg:41.55ms +[2025-09-11 10:03:30] [Rank 0] step:8501/10000 train_time:353086ms step_avg:41.53ms +[2025-09-11 10:03:30] [Rank 0] step:8501/10000 train_time:353086ms step_avg:41.53ms +[2025-09-11 10:03:30] [Rank 0] step:8521/10000 train_time:353791ms step_avg:41.52ms +[2025-09-11 10:03:30] [Rank 0] step:8521/10000 train_time:353791ms step_avg:41.52ms +[2025-09-11 10:03:31] [Rank 0] step:8541/10000 train_time:354495ms step_avg:41.51ms +[2025-09-11 10:03:31] [Rank 0] step:8541/10000 train_time:354495ms step_avg:41.51ms +[2025-09-11 10:03:32] [Rank 0] step:8561/10000 train_time:355205ms step_avg:41.49ms +[2025-09-11 10:03:32] [Rank 0] step:8561/10000 train_time:355205ms step_avg:41.49ms +[2025-09-11 10:03:32] [Rank 0] step:8581/10000 train_time:355914ms step_avg:41.48ms +[2025-09-11 10:03:32] [Rank 0] step:8581/10000 train_time:355914ms step_avg:41.48ms +[2025-09-11 10:03:33] [Rank 0] step:8601/10000 train_time:356620ms step_avg:41.46ms +[2025-09-11 10:03:33] [Rank 0] step:8601/10000 train_time:356620ms step_avg:41.46ms +[2025-09-11 10:03:34] [Rank 0] step:8621/10000 train_time:357325ms step_avg:41.45ms +[2025-09-11 10:03:34] [Rank 0] step:8621/10000 train_time:357325ms step_avg:41.45ms +[2025-09-11 10:03:35] [Rank 0] step:8641/10000 train_time:358030ms step_avg:41.43ms +[2025-09-11 10:03:35] [Rank 0] step:8641/10000 train_time:358030ms step_avg:41.43ms +[2025-09-11 10:03:35] [Rank 0] step:8661/10000 train_time:358736ms step_avg:41.42ms +[2025-09-11 10:03:35] [Rank 0] step:8661/10000 train_time:358736ms step_avg:41.42ms +[2025-09-11 10:03:36] [Rank 0] step:8681/10000 train_time:359443ms step_avg:41.41ms +[2025-09-11 10:03:36] [Rank 0] step:8681/10000 train_time:359443ms step_avg:41.41ms +[2025-09-11 10:03:37] [Rank 0] step:8701/10000 train_time:360148ms step_avg:41.39ms +[2025-09-11 10:03:37] [Rank 0] step:8701/10000 train_time:360148ms step_avg:41.39ms +[2025-09-11 10:03:37] [Rank 0] step:8721/10000 train_time:360856ms step_avg:41.38ms +[2025-09-11 10:03:37] [Rank 0] step:8721/10000 train_time:360856ms step_avg:41.38ms +[2025-09-11 10:03:38] [Rank 0] step:8741/10000 train_time:361558ms step_avg:41.36ms +[2025-09-11 10:03:38] [Rank 0] step:8741/10000 train_time:361558ms step_avg:41.36ms +[2025-09-11 10:03:39] [Rank 0] step:8761/10000 train_time:362267ms step_avg:41.35ms +[2025-09-11 10:03:39] [Rank 0] step:8761/10000 train_time:362267ms step_avg:41.35ms +[2025-09-11 10:03:40] [Rank 0] step:8781/10000 train_time:362971ms step_avg:41.34ms +[2025-09-11 10:03:40] [Rank 0] step:8781/10000 train_time:362971ms step_avg:41.34ms +[2025-09-11 10:03:40] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:03:40] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:50] [Rank 0] PRINT: step:8800/10000 val_loss:4.2463 total_sharp:2.8101e-04 L1_sharp:1.6880e-03 L2_sharp:2.9498e-04 L3_sharp:1.8838e-04 L4_sharp:2.8065e-04 L5_sharp:2.4328e-04 L6_sharp:2.5909e-04 L7_sharp:2.3495e-04 L8_sharp:5.2878e-04 L9_sharp:4.7935e-04 L10_sharp:6.8321e-04 L11_sharp:1.0668e-03 L12_sharp:6.7627e-03 total_fnorm:8.6875e+00 total_l1_linf:1.1776e+04 total_spectral:4.5000e+00 L1_fnorm:7.1484e-01 L2_fnorm:6.6797e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.7578e-01 L6_fnorm:6.8359e-01 L7_fnorm:6.7969e-01 L8_fnorm:6.6016e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6016e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.1963e-01 L2_l1linf:1.1523e-01 L3_l1linf:1.1084e-01 L4_l1linf:1.1963e-01 L5_l1linf:1.1768e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1670e-01 L8_l1linf:1.1084e-01 L9_l1linf:1.0596e-01 L10_l1linf:9.8145e-02 L11_l1linf:9.6191e-02 L12_l1linf:1.2500e-01 L1_spectral:1.1159e-02 L2_spectral:9.4408e-03 L3_spectral:1.0082e-02 L4_spectral:1.0090e-02 L5_spectral:9.8005e-03 L6_spectral:9.9069e-03 L7_spectral:9.8288e-03 L8_spectral:1.0135e-02 L9_spectral:1.0035e-02 L10_spectral:1.0045e-02 L11_spectral:1.0087e-02 L12_spectral:1.0144e-02 train_time:363655ms step_avg:41.32ms +[2025-09-11 10:03:50] [Rank 0] PRINT: step:8800/10000 val_loss:4.2463 total_sharp:2.8101e-04 L1_sharp:1.6880e-03 L2_sharp:2.9498e-04 L3_sharp:1.8838e-04 L4_sharp:2.8065e-04 L5_sharp:2.4328e-04 L6_sharp:2.5909e-04 L7_sharp:2.3495e-04 L8_sharp:5.2878e-04 L9_sharp:4.7935e-04 L10_sharp:6.8321e-04 L11_sharp:1.0668e-03 L12_sharp:6.7627e-03 total_fnorm:8.6875e+00 total_l1_linf:1.1776e+04 total_spectral:4.5000e+00 L1_fnorm:7.1484e-01 L2_fnorm:6.6797e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.7578e-01 L6_fnorm:6.8359e-01 L7_fnorm:6.7969e-01 L8_fnorm:6.6016e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.6016e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.1963e-01 L2_l1linf:1.1523e-01 L3_l1linf:1.1084e-01 L4_l1linf:1.1963e-01 L5_l1linf:1.1768e-01 L6_l1linf:1.1523e-01 L7_l1linf:1.1670e-01 L8_l1linf:1.1084e-01 L9_l1linf:1.0596e-01 L10_l1linf:9.8145e-02 L11_l1linf:9.6191e-02 L12_l1linf:1.2500e-01 L1_spectral:1.1159e-02 L2_spectral:9.4408e-03 L3_spectral:1.0082e-02 L4_spectral:1.0090e-02 L5_spectral:9.8005e-03 L6_spectral:9.9069e-03 L7_spectral:9.8288e-03 L8_spectral:1.0135e-02 L9_spectral:1.0035e-02 L10_spectral:1.0045e-02 L11_spectral:1.0087e-02 L12_spectral:1.0144e-02 train_time:363655ms step_avg:41.32ms +[2025-09-11 10:03:51] [Rank 0] step:8801/10000 train_time:364840ms step_avg:41.45ms +[2025-09-11 10:03:51] [Rank 0] step:8801/10000 train_time:364840ms step_avg:41.45ms +[2025-09-11 10:03:52] [Rank 0] step:8821/10000 train_time:365582ms step_avg:41.44ms +[2025-09-11 10:03:52] [Rank 0] step:8821/10000 train_time:365582ms step_avg:41.44ms +[2025-09-11 10:03:53] [Rank 0] step:8841/10000 train_time:366290ms step_avg:41.43ms +[2025-09-11 10:03:53] [Rank 0] step:8841/10000 train_time:366290ms step_avg:41.43ms +[2025-09-11 10:03:53] [Rank 0] step:8861/10000 train_time:366997ms step_avg:41.42ms +[2025-09-11 10:03:53] [Rank 0] step:8861/10000 train_time:366997ms step_avg:41.42ms +[2025-09-11 10:03:54] [Rank 0] step:8881/10000 train_time:367703ms step_avg:41.40ms +[2025-09-11 10:03:54] [Rank 0] step:8881/10000 train_time:367703ms step_avg:41.40ms +[2025-09-11 10:03:55] [Rank 0] step:8901/10000 train_time:368412ms step_avg:41.39ms +[2025-09-11 10:03:55] [Rank 0] step:8901/10000 train_time:368412ms step_avg:41.39ms +[2025-09-11 10:03:56] [Rank 0] step:8921/10000 train_time:369115ms step_avg:41.38ms +[2025-09-11 10:03:56] [Rank 0] step:8921/10000 train_time:369115ms step_avg:41.38ms +[2025-09-11 10:03:56] [Rank 0] step:8941/10000 train_time:369824ms step_avg:41.36ms +[2025-09-11 10:03:56] [Rank 0] step:8941/10000 train_time:369824ms step_avg:41.36ms +[2025-09-11 10:03:57] [Rank 0] step:8961/10000 train_time:370539ms step_avg:41.35ms +[2025-09-11 10:03:57] [Rank 0] step:8961/10000 train_time:370539ms step_avg:41.35ms +[2025-09-11 10:03:58] [Rank 0] step:8981/10000 train_time:371249ms step_avg:41.34ms +[2025-09-11 10:03:58] [Rank 0] step:8981/10000 train_time:371249ms step_avg:41.34ms +[2025-09-11 10:03:58] [Rank 0] step:9001/10000 train_time:371950ms step_avg:41.32ms +[2025-09-11 10:03:58] [Rank 0] step:9001/10000 train_time:371950ms step_avg:41.32ms +[2025-09-11 10:03:59] [Rank 0] step:9021/10000 train_time:372659ms step_avg:41.31ms +[2025-09-11 10:03:59] [Rank 0] step:9021/10000 train_time:372659ms step_avg:41.31ms +[2025-09-11 10:04:00] [Rank 0] step:9041/10000 train_time:373368ms step_avg:41.30ms +[2025-09-11 10:04:00] [Rank 0] step:9041/10000 train_time:373368ms step_avg:41.30ms +[2025-09-11 10:04:00] [Rank 0] step:9061/10000 train_time:374073ms step_avg:41.28ms +[2025-09-11 10:04:00] [Rank 0] step:9061/10000 train_time:374073ms step_avg:41.28ms +[2025-09-11 10:04:01] [Rank 0] step:9081/10000 train_time:374782ms step_avg:41.27ms +[2025-09-11 10:04:01] [Rank 0] step:9081/10000 train_time:374782ms step_avg:41.27ms +[2025-09-11 10:04:02] [Rank 0] step:9101/10000 train_time:375494ms step_avg:41.26ms +[2025-09-11 10:04:02] [Rank 0] step:9101/10000 train_time:375494ms step_avg:41.26ms +[2025-09-11 10:04:03] [Rank 0] step:9121/10000 train_time:376206ms step_avg:41.25ms +[2025-09-11 10:04:03] [Rank 0] step:9121/10000 train_time:376206ms step_avg:41.25ms +[2025-09-11 10:04:03] [Rank 0] step:9141/10000 train_time:376911ms step_avg:41.23ms +[2025-09-11 10:04:03] [Rank 0] step:9141/10000 train_time:376911ms step_avg:41.23ms +[2025-09-11 10:04:04] [Rank 0] step:9161/10000 train_time:377620ms step_avg:41.22ms +[2025-09-11 10:04:04] [Rank 0] step:9161/10000 train_time:377620ms step_avg:41.22ms +[2025-09-11 10:04:05] [Rank 0] step:9181/10000 train_time:378486ms step_avg:41.22ms +[2025-09-11 10:04:05] [Rank 0] step:9181/10000 train_time:378486ms step_avg:41.22ms +[2025-09-11 10:04:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:04:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:16] [Rank 0] PRINT: step:9200/10000 val_loss:4.2286 total_sharp:2.5442e-04 L1_sharp:2.3172e-03 L2_sharp:3.6047e-04 L3_sharp:2.0021e-04 L4_sharp:2.3572e-04 L5_sharp:2.8407e-04 L6_sharp:2.2807e-04 L7_sharp:2.3190e-04 L8_sharp:5.1400e-04 L9_sharp:5.1245e-04 L10_sharp:6.6112e-04 L11_sharp:9.1632e-04 L12_sharp:5.4203e-03 total_fnorm:5.8750e+00 total_l1_linf:6.8480e+03 total_spectral:3.0312e+00 L1_fnorm:4.8828e-01 L2_fnorm:4.3750e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.5312e-01 L5_fnorm:4.4922e-01 L6_fnorm:4.5117e-01 L7_fnorm:4.4727e-01 L8_fnorm:4.3750e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3555e-01 L11_fnorm:4.3945e-01 L12_fnorm:4.4336e-01 L1_l1linf:7.4219e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.5918e-02 L4_l1linf:7.1777e-02 L5_l1linf:6.7383e-02 L6_l1linf:7.1289e-02 L7_l1linf:6.9336e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.2988e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.8350e-02 L12_l1linf:7.8613e-02 L1_spectral:8.2878e-03 L2_spectral:6.2298e-03 L3_spectral:6.6991e-03 L4_spectral:6.7906e-03 L5_spectral:6.6155e-03 L6_spectral:6.6591e-03 L7_spectral:6.6433e-03 L8_spectral:6.9311e-03 L9_spectral:6.8063e-03 L10_spectral:6.7539e-03 L11_spectral:6.8002e-03 L12_spectral:6.8088e-03 train_time:379530ms step_avg:41.25ms +[2025-09-11 10:04:16] [Rank 0] PRINT: step:9200/10000 val_loss:4.2286 total_sharp:2.5442e-04 L1_sharp:2.3172e-03 L2_sharp:3.6047e-04 L3_sharp:2.0021e-04 L4_sharp:2.3572e-04 L5_sharp:2.8407e-04 L6_sharp:2.2807e-04 L7_sharp:2.3190e-04 L8_sharp:5.1400e-04 L9_sharp:5.1245e-04 L10_sharp:6.6112e-04 L11_sharp:9.1632e-04 L12_sharp:5.4203e-03 total_fnorm:5.8750e+00 total_l1_linf:6.8480e+03 total_spectral:3.0312e+00 L1_fnorm:4.8828e-01 L2_fnorm:4.3750e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.5312e-01 L5_fnorm:4.4922e-01 L6_fnorm:4.5117e-01 L7_fnorm:4.4727e-01 L8_fnorm:4.3750e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.3555e-01 L11_fnorm:4.3945e-01 L12_fnorm:4.4336e-01 L1_l1linf:7.4219e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.5918e-02 L4_l1linf:7.1777e-02 L5_l1linf:6.7383e-02 L6_l1linf:7.1289e-02 L7_l1linf:6.9336e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.2988e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.8350e-02 L12_l1linf:7.8613e-02 L1_spectral:8.2878e-03 L2_spectral:6.2298e-03 L3_spectral:6.6991e-03 L4_spectral:6.7906e-03 L5_spectral:6.6155e-03 L6_spectral:6.6591e-03 L7_spectral:6.6433e-03 L8_spectral:6.9311e-03 L9_spectral:6.8063e-03 L10_spectral:6.7539e-03 L11_spectral:6.8002e-03 L12_spectral:6.8088e-03 train_time:379530ms step_avg:41.25ms +[2025-09-11 10:04:17] [Rank 0] step:9201/10000 train_time:380725ms step_avg:41.38ms +[2025-09-11 10:04:17] [Rank 0] step:9201/10000 train_time:380725ms step_avg:41.38ms +[2025-09-11 10:04:18] [Rank 0] step:9221/10000 train_time:381447ms step_avg:41.37ms +[2025-09-11 10:04:18] [Rank 0] step:9221/10000 train_time:381447ms step_avg:41.37ms +[2025-09-11 10:04:18] [Rank 0] step:9241/10000 train_time:382153ms step_avg:41.35ms +[2025-09-11 10:04:18] [Rank 0] step:9241/10000 train_time:382153ms step_avg:41.35ms +[2025-09-11 10:04:19] [Rank 0] step:9261/10000 train_time:382862ms step_avg:41.34ms +[2025-09-11 10:04:19] [Rank 0] step:9261/10000 train_time:382862ms step_avg:41.34ms +[2025-09-11 10:04:20] [Rank 0] step:9281/10000 train_time:383571ms step_avg:41.33ms +[2025-09-11 10:04:20] [Rank 0] step:9281/10000 train_time:383571ms step_avg:41.33ms +[2025-09-11 10:04:20] [Rank 0] step:9301/10000 train_time:384277ms step_avg:41.32ms +[2025-09-11 10:04:20] [Rank 0] step:9301/10000 train_time:384277ms step_avg:41.32ms +[2025-09-11 10:04:21] [Rank 0] step:9321/10000 train_time:384986ms step_avg:41.30ms +[2025-09-11 10:04:21] [Rank 0] step:9321/10000 train_time:384986ms step_avg:41.30ms +[2025-09-11 10:04:22] [Rank 0] step:9341/10000 train_time:385689ms step_avg:41.29ms +[2025-09-11 10:04:22] [Rank 0] step:9341/10000 train_time:385689ms step_avg:41.29ms +[2025-09-11 10:04:23] [Rank 0] step:9361/10000 train_time:386393ms step_avg:41.28ms +[2025-09-11 10:04:23] [Rank 0] step:9361/10000 train_time:386393ms step_avg:41.28ms +[2025-09-11 10:04:23] [Rank 0] step:9381/10000 train_time:387098ms step_avg:41.26ms +[2025-09-11 10:04:23] [Rank 0] step:9381/10000 train_time:387098ms step_avg:41.26ms +[2025-09-11 10:04:24] [Rank 0] step:9401/10000 train_time:387806ms step_avg:41.25ms +[2025-09-11 10:04:24] [Rank 0] step:9401/10000 train_time:387806ms step_avg:41.25ms +[2025-09-11 10:04:25] [Rank 0] step:9421/10000 train_time:388516ms step_avg:41.24ms +[2025-09-11 10:04:25] [Rank 0] step:9421/10000 train_time:388516ms step_avg:41.24ms +[2025-09-11 10:04:25] [Rank 0] step:9441/10000 train_time:389226ms step_avg:41.23ms +[2025-09-11 10:04:25] [Rank 0] step:9441/10000 train_time:389226ms step_avg:41.23ms +[2025-09-11 10:04:26] [Rank 0] step:9461/10000 train_time:389933ms step_avg:41.21ms +[2025-09-11 10:04:26] [Rank 0] step:9461/10000 train_time:389933ms step_avg:41.21ms +[2025-09-11 10:04:27] [Rank 0] step:9481/10000 train_time:390641ms step_avg:41.20ms +[2025-09-11 10:04:27] [Rank 0] step:9481/10000 train_time:390641ms step_avg:41.20ms +[2025-09-11 10:04:28] [Rank 0] step:9501/10000 train_time:391349ms step_avg:41.19ms +[2025-09-11 10:04:28] [Rank 0] step:9501/10000 train_time:391349ms step_avg:41.19ms +[2025-09-11 10:04:28] [Rank 0] step:9521/10000 train_time:392059ms step_avg:41.18ms +[2025-09-11 10:04:28] [Rank 0] step:9521/10000 train_time:392059ms step_avg:41.18ms +[2025-09-11 10:04:29] [Rank 0] step:9541/10000 train_time:392764ms step_avg:41.17ms +[2025-09-11 10:04:29] [Rank 0] step:9541/10000 train_time:392764ms step_avg:41.17ms +[2025-09-11 10:04:30] [Rank 0] step:9561/10000 train_time:393470ms step_avg:41.15ms +[2025-09-11 10:04:30] [Rank 0] step:9561/10000 train_time:393470ms step_avg:41.15ms +[2025-09-11 10:04:30] [Rank 0] step:9581/10000 train_time:394179ms step_avg:41.14ms +[2025-09-11 10:04:30] [Rank 0] step:9581/10000 train_time:394179ms step_avg:41.14ms +[2025-09-11 10:04:31] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:04:31] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:04:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:04:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:04:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:43] [Rank 0] PRINT: step:9600/10000 val_loss:4.2183 total_sharp:1.4646e-04 L1_sharp:1.4430e-03 L2_sharp:2.0779e-04 L3_sharp:1.2560e-04 L4_sharp:1.4629e-04 L5_sharp:1.6652e-04 L6_sharp:1.5807e-04 L7_sharp:1.9460e-04 L8_sharp:3.9253e-04 L9_sharp:3.4660e-04 L10_sharp:4.1742e-04 L11_sharp:5.9574e-04 L12_sharp:2.0984e-03 total_fnorm:3.3281e+00 total_l1_linf:3.2800e+03 total_spectral:1.7188e+00 L1_fnorm:2.8125e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.5391e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5586e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5195e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3447e-02 L8_l1linf:3.6865e-02 L9_l1linf:2.9541e-02 L10_l1linf:2.7954e-02 L11_l1linf:2.8931e-02 L12_l1linf:3.5400e-02 L1_spectral:4.9039e-03 L2_spectral:3.6436e-03 L3_spectral:3.8863e-03 L4_spectral:4.0050e-03 L5_spectral:3.8388e-03 L6_spectral:3.8818e-03 L7_spectral:3.8468e-03 L8_spectral:4.1571e-03 L9_spectral:3.9804e-03 L10_spectral:3.9687e-03 L11_spectral:3.9364e-03 L12_spectral:3.9888e-03 train_time:394863ms step_avg:41.13ms +[2025-09-11 10:04:43] [Rank 0] PRINT: step:9600/10000 val_loss:4.2183 total_sharp:1.4646e-04 L1_sharp:1.4430e-03 L2_sharp:2.0779e-04 L3_sharp:1.2560e-04 L4_sharp:1.4629e-04 L5_sharp:1.6652e-04 L6_sharp:1.5807e-04 L7_sharp:1.9460e-04 L8_sharp:3.9253e-04 L9_sharp:3.4660e-04 L10_sharp:4.1742e-04 L11_sharp:5.9574e-04 L12_sharp:2.0984e-03 total_fnorm:3.3281e+00 total_l1_linf:3.2800e+03 total_spectral:1.7188e+00 L1_fnorm:2.8125e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.5391e-01 L4_fnorm:2.5781e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5586e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5195e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3447e-02 L8_l1linf:3.6865e-02 L9_l1linf:2.9541e-02 L10_l1linf:2.7954e-02 L11_l1linf:2.8931e-02 L12_l1linf:3.5400e-02 L1_spectral:4.9039e-03 L2_spectral:3.6436e-03 L3_spectral:3.8863e-03 L4_spectral:4.0050e-03 L5_spectral:3.8388e-03 L6_spectral:3.8818e-03 L7_spectral:3.8468e-03 L8_spectral:4.1571e-03 L9_spectral:3.9804e-03 L10_spectral:3.9687e-03 L11_spectral:3.9364e-03 L12_spectral:3.9888e-03 train_time:394863ms step_avg:41.13ms +[2025-09-11 10:04:45] [Rank 0] step:9601/10000 train_time:396066ms step_avg:41.25ms +[2025-09-11 10:04:45] [Rank 0] step:9601/10000 train_time:396066ms step_avg:41.25ms +[2025-09-11 10:04:45] [Rank 0] step:9621/10000 train_time:396796ms step_avg:41.24ms +[2025-09-11 10:04:45] [Rank 0] step:9621/10000 train_time:396796ms step_avg:41.24ms +[2025-09-11 10:04:46] [Rank 0] step:9641/10000 train_time:397509ms step_avg:41.23ms +[2025-09-11 10:04:46] [Rank 0] step:9641/10000 train_time:397509ms step_avg:41.23ms +[2025-09-11 10:04:47] [Rank 0] step:9661/10000 train_time:398229ms step_avg:41.22ms +[2025-09-11 10:04:47] [Rank 0] step:9661/10000 train_time:398229ms step_avg:41.22ms +[2025-09-11 10:04:47] [Rank 0] step:9681/10000 train_time:398941ms step_avg:41.21ms +[2025-09-11 10:04:47] [Rank 0] step:9681/10000 train_time:398941ms step_avg:41.21ms +[2025-09-11 10:04:48] [Rank 0] step:9701/10000 train_time:399655ms step_avg:41.20ms +[2025-09-11 10:04:48] [Rank 0] step:9701/10000 train_time:399655ms step_avg:41.20ms +[2025-09-11 10:04:49] [Rank 0] step:9721/10000 train_time:400373ms step_avg:41.19ms +[2025-09-11 10:04:49] [Rank 0] step:9721/10000 train_time:400373ms step_avg:41.19ms +[2025-09-11 10:04:50] [Rank 0] step:9741/10000 train_time:401088ms step_avg:41.18ms +[2025-09-11 10:04:50] [Rank 0] step:9741/10000 train_time:401088ms step_avg:41.18ms +[2025-09-11 10:04:50] [Rank 0] step:9761/10000 train_time:401803ms step_avg:41.16ms +[2025-09-11 10:04:50] [Rank 0] step:9761/10000 train_time:401803ms step_avg:41.16ms +[2025-09-11 10:04:51] [Rank 0] step:9781/10000 train_time:402515ms step_avg:41.15ms +[2025-09-11 10:04:51] [Rank 0] step:9781/10000 train_time:402515ms step_avg:41.15ms +[2025-09-11 10:04:52] [Rank 0] step:9801/10000 train_time:403234ms step_avg:41.14ms +[2025-09-11 10:04:52] [Rank 0] step:9801/10000 train_time:403234ms step_avg:41.14ms +[2025-09-11 10:04:53] [Rank 0] step:9821/10000 train_time:403950ms step_avg:41.13ms +[2025-09-11 10:04:53] [Rank 0] step:9821/10000 train_time:403950ms step_avg:41.13ms +[2025-09-11 10:04:53] [Rank 0] step:9841/10000 train_time:404668ms step_avg:41.12ms +[2025-09-11 10:04:53] [Rank 0] step:9841/10000 train_time:404668ms step_avg:41.12ms +[2025-09-11 10:04:54] [Rank 0] step:9861/10000 train_time:405384ms step_avg:41.11ms +[2025-09-11 10:04:54] [Rank 0] step:9861/10000 train_time:405384ms step_avg:41.11ms +[2025-09-11 10:04:55] [Rank 0] step:9881/10000 train_time:406099ms step_avg:41.10ms +[2025-09-11 10:04:55] [Rank 0] step:9881/10000 train_time:406099ms step_avg:41.10ms +[2025-09-11 10:04:55] [Rank 0] step:9901/10000 train_time:406811ms step_avg:41.09ms +[2025-09-11 10:04:55] [Rank 0] step:9901/10000 train_time:406811ms step_avg:41.09ms +[2025-09-11 10:04:56] [Rank 0] step:9921/10000 train_time:407525ms step_avg:41.08ms +[2025-09-11 10:04:56] [Rank 0] step:9921/10000 train_time:407525ms step_avg:41.08ms +[2025-09-11 10:04:57] [Rank 0] step:9941/10000 train_time:408243ms step_avg:41.07ms +[2025-09-11 10:04:57] [Rank 0] step:9941/10000 train_time:408243ms step_avg:41.07ms +[2025-09-11 10:04:58] [Rank 0] step:9961/10000 train_time:408962ms step_avg:41.06ms +[2025-09-11 10:04:58] [Rank 0] step:9961/10000 train_time:408962ms step_avg:41.06ms +[2025-09-11 10:04:58] [Rank 0] step:9981/10000 train_time:409678ms step_avg:41.05ms +[2025-09-11 10:04:58] [Rank 0] step:9981/10000 train_time:409678ms step_avg:41.05ms +[2025-09-11 10:04:59] [Rank 0] step:10000/10000 train_time:410365ms step_avg:41.04ms +[2025-09-11 10:04:59] [Rank 0] step:10000/10000 train_time:410365ms step_avg:41.04ms +[2025-09-11 10:04:59] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:04:59] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:05:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:05:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:09] [Rank 0] PRINT: step:10000/10000 val_loss:4.2145 total_sharp:1.0367e-04 L1_sharp:1.2454e-03 L2_sharp:1.8618e-04 L3_sharp:9.2179e-05 L4_sharp:2.0485e-04 L5_sharp:1.6648e-04 L6_sharp:1.3461e-04 L7_sharp:1.5037e-04 L8_sharp:2.7685e-04 L9_sharp:2.3259e-04 L10_sharp:3.3875e-04 L11_sharp:4.8018e-04 L12_sharp:1.4970e-03 total_fnorm:1.2578e+00 total_l1_linf:9.0000e+02 total_spectral:6.4844e-01 L1_fnorm:1.1035e-01 L2_fnorm:9.3262e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.7168e-02 L6_fnorm:9.8633e-02 L7_fnorm:9.8633e-02 L8_fnorm:9.5703e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.5703e-02 L11_fnorm:9.6191e-02 L12_fnorm:9.7656e-02 L1_l1linf:1.2268e-02 L2_l1linf:1.0315e-02 L3_l1linf:9.5825e-03 L4_l1linf:1.0559e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0132e-02 L7_l1linf:1.0071e-02 L8_l1linf:1.0559e-02 L9_l1linf:8.9722e-03 L10_l1linf:8.6670e-03 L11_l1linf:8.7280e-03 L12_l1linf:1.0742e-02 L1_spectral:2.0916e-03 L2_spectral:1.4228e-03 L3_spectral:1.5417e-03 L4_spectral:1.5650e-03 L5_spectral:1.5011e-03 L6_spectral:1.5197e-03 L7_spectral:1.5347e-03 L8_spectral:1.6681e-03 L9_spectral:1.5599e-03 L10_spectral:1.5678e-03 L11_spectral:1.5745e-03 L12_spectral:1.5783e-03 train_time:410385ms step_avg:41.04ms +[2025-09-11 10:05:09] [Rank 0] PRINT: step:10000/10000 val_loss:4.2145 total_sharp:1.0367e-04 L1_sharp:1.2454e-03 L2_sharp:1.8618e-04 L3_sharp:9.2179e-05 L4_sharp:2.0485e-04 L5_sharp:1.6648e-04 L6_sharp:1.3461e-04 L7_sharp:1.5037e-04 L8_sharp:2.7685e-04 L9_sharp:2.3259e-04 L10_sharp:3.3875e-04 L11_sharp:4.8018e-04 L12_sharp:1.4970e-03 total_fnorm:1.2578e+00 total_l1_linf:9.0000e+02 total_spectral:6.4844e-01 L1_fnorm:1.1035e-01 L2_fnorm:9.3262e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.8633e-02 L5_fnorm:9.7168e-02 L6_fnorm:9.8633e-02 L7_fnorm:9.8633e-02 L8_fnorm:9.5703e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.5703e-02 L11_fnorm:9.6191e-02 L12_fnorm:9.7656e-02 L1_l1linf:1.2268e-02 L2_l1linf:1.0315e-02 L3_l1linf:9.5825e-03 L4_l1linf:1.0559e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0132e-02 L7_l1linf:1.0071e-02 L8_l1linf:1.0559e-02 L9_l1linf:8.9722e-03 L10_l1linf:8.6670e-03 L11_l1linf:8.7280e-03 L12_l1linf:1.0742e-02 L1_spectral:2.0916e-03 L2_spectral:1.4228e-03 L3_spectral:1.5417e-03 L4_spectral:1.5650e-03 L5_spectral:1.5011e-03 L6_spectral:1.5197e-03 L7_spectral:1.5347e-03 L8_spectral:1.6681e-03 L9_spectral:1.5599e-03 L10_spectral:1.5678e-03 L11_spectral:1.5745e-03 L12_spectral:1.5783e-03 train_time:410385ms step_avg:41.04ms +[2025-09-11 10:05:09] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:05:09 2025 --- +[2025-09-11 10:05:09] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:05:09 2025 --- +[2025-09-11 10:05:09] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:05:09] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5d3de3b6ea5b4c0018cf5fe17f35f8ff1e8d522b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "86b5cb92-6afd-4725-971d-52684d6cecd9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/training_log_86b5cb92-6afd-4725-971d-52684d6cecd9.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/training_log_86b5cb92-6afd-4725-971d-52684d6cecd9.txt new file mode 100644 index 0000000000000000000000000000000000000000..6838cc090bf2e4e996c0f5cb707802a440b0399a --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44/training_log_86b5cb92-6afd-4725-971d-52684d6cecd9.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:38:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:38:15 2025 --- +[2025-09-11 09:38:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:38:15 2025 --- +[2025-09-11 09:38:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:38:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:38:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:38:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:38:15] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:38:15] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:38:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44 +[2025-09-11 09:38:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.05_seed_44 +[2025-09-11 09:38:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:38:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:38:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:38:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:38:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:38:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:38:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:38:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:38:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:38:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:38:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:38:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:38:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:38:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:38:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:38:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:38:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:38:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:38:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:38:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:38:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:38:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:38:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:38:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:39:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:39:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:39:01] [Rank 0] PRINT: Starting training... +[2025-09-11 09:39:01] [Rank 0] PRINT: Starting training... +[2025-09-11 09:39:02] [Rank 0] step:21/10000 train_time:1141ms step_avg:54.35ms +[2025-09-11 09:39:02] [Rank 0] step:21/10000 train_time:1141ms step_avg:54.35ms +[2025-09-11 09:39:03] [Rank 0] step:41/10000 train_time:1885ms step_avg:45.97ms +[2025-09-11 09:39:03] [Rank 0] step:41/10000 train_time:1885ms step_avg:45.97ms +[2025-09-11 09:39:04] [Rank 0] step:61/10000 train_time:2620ms step_avg:42.96ms +[2025-09-11 09:39:04] [Rank 0] step:61/10000 train_time:2620ms step_avg:42.96ms +[2025-09-11 09:39:04] [Rank 0] step:81/10000 train_time:3355ms step_avg:41.42ms +[2025-09-11 09:39:04] [Rank 0] step:81/10000 train_time:3355ms step_avg:41.42ms +[2025-09-11 09:39:05] [Rank 0] step:101/10000 train_time:4089ms step_avg:40.49ms +[2025-09-11 09:39:05] [Rank 0] step:101/10000 train_time:4089ms step_avg:40.49ms +[2025-09-11 09:39:06] [Rank 0] step:121/10000 train_time:4834ms step_avg:39.95ms +[2025-09-11 09:39:06] [Rank 0] step:121/10000 train_time:4834ms step_avg:39.95ms +[2025-09-11 09:39:07] [Rank 0] step:141/10000 train_time:5569ms step_avg:39.50ms +[2025-09-11 09:39:07] [Rank 0] step:141/10000 train_time:5569ms step_avg:39.50ms +[2025-09-11 09:39:07] [Rank 0] step:161/10000 train_time:6302ms step_avg:39.14ms +[2025-09-11 09:39:07] [Rank 0] step:161/10000 train_time:6302ms step_avg:39.14ms +[2025-09-11 09:39:08] [Rank 0] step:181/10000 train_time:7036ms step_avg:38.87ms +[2025-09-11 09:39:08] [Rank 0] step:181/10000 train_time:7036ms step_avg:38.87ms +[2025-09-11 09:39:09] [Rank 0] step:201/10000 train_time:7770ms step_avg:38.66ms +[2025-09-11 09:39:09] [Rank 0] step:201/10000 train_time:7770ms step_avg:38.66ms +[2025-09-11 09:39:10] [Rank 0] step:221/10000 train_time:8504ms step_avg:38.48ms +[2025-09-11 09:39:10] [Rank 0] step:221/10000 train_time:8504ms step_avg:38.48ms +[2025-09-11 09:39:10] [Rank 0] step:241/10000 train_time:9237ms step_avg:38.33ms +[2025-09-11 09:39:10] [Rank 0] step:241/10000 train_time:9237ms step_avg:38.33ms +[2025-09-11 09:39:11] [Rank 0] step:261/10000 train_time:9971ms step_avg:38.20ms +[2025-09-11 09:39:11] [Rank 0] step:261/10000 train_time:9971ms step_avg:38.20ms +[2025-09-11 09:39:12] [Rank 0] step:281/10000 train_time:10704ms step_avg:38.09ms +[2025-09-11 09:39:12] [Rank 0] step:281/10000 train_time:10704ms step_avg:38.09ms +[2025-09-11 09:39:12] [Rank 0] step:301/10000 train_time:11437ms step_avg:38.00ms +[2025-09-11 09:39:12] [Rank 0] step:301/10000 train_time:11437ms step_avg:38.00ms +[2025-09-11 09:39:13] [Rank 0] step:321/10000 train_time:12171ms step_avg:37.92ms +[2025-09-11 09:39:13] [Rank 0] step:321/10000 train_time:12171ms step_avg:37.92ms +[2025-09-11 09:39:14] [Rank 0] step:341/10000 train_time:12905ms step_avg:37.84ms +[2025-09-11 09:39:14] [Rank 0] step:341/10000 train_time:12905ms step_avg:37.84ms +[2025-09-11 09:39:15] [Rank 0] step:361/10000 train_time:13639ms step_avg:37.78ms +[2025-09-11 09:39:15] [Rank 0] step:361/10000 train_time:13639ms step_avg:37.78ms +[2025-09-11 09:39:15] [Rank 0] step:381/10000 train_time:14374ms step_avg:37.73ms +[2025-09-11 09:39:15] [Rank 0] step:381/10000 train_time:14374ms step_avg:37.73ms +[2025-09-11 09:39:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:39:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:39:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:39:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:39:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:39:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:40:02] [Rank 0] PRINT: step:400/10000 val_loss:6.1826 total_sharp:2.6731e-03 L1_sharp:1.4072e-02 L2_sharp:2.5213e-03 L3_sharp:2.1983e-03 L4_sharp:1.0260e-03 L5_sharp:5.1638e-04 L6_sharp:5.7032e-04 L7_sharp:6.2812e-04 L8_sharp:5.0074e-04 L9_sharp:5.6144e-04 L10_sharp:8.8987e-04 L11_sharp:1.4125e-03 L12_sharp:3.0499e-03 total_fnorm:4.4964e+01 total_l1_linf:1.5791e+05 total_spectral:2.2477e+01 L1_fnorm:6.1923e+00 L2_fnorm:6.0405e+00 L3_fnorm:5.9146e+00 L4_fnorm:5.7484e+00 L5_fnorm:5.7290e+00 L6_fnorm:5.6050e+00 L7_fnorm:5.5619e+00 L8_fnorm:5.4982e+00 L9_fnorm:5.4543e+00 L10_fnorm:5.3998e+00 L11_fnorm:5.1595e+00 L12_fnorm:4.9645e+00 L1_l1linf:1.9450e+00 L2_l1linf:1.7887e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.8575e+00 L5_l1linf:1.9299e+00 L6_l1linf:1.8782e+00 L7_l1linf:1.8666e+00 L8_l1linf:1.8327e+00 L9_l1linf:1.7543e+00 L10_l1linf:1.6716e+00 L11_l1linf:1.5179e+00 L12_l1linf:1.3274e+00 L1_spectral:6.0451e-02 L2_spectral:6.0411e-02 L3_spectral:6.0365e-02 L4_spectral:6.0332e-02 L5_spectral:6.0256e-02 L6_spectral:6.0292e-02 L7_spectral:6.0244e-02 L8_spectral:6.0265e-02 L9_spectral:6.0242e-02 L10_spectral:6.0306e-02 L11_spectral:6.0257e-02 L12_spectral:6.0317e-02 train_time:15087ms step_avg:37.72ms +[2025-09-11 09:40:02] [Rank 0] PRINT: step:400/10000 val_loss:6.1826 total_sharp:2.6731e-03 L1_sharp:1.4072e-02 L2_sharp:2.5213e-03 L3_sharp:2.1983e-03 L4_sharp:1.0260e-03 L5_sharp:5.1638e-04 L6_sharp:5.7032e-04 L7_sharp:6.2812e-04 L8_sharp:5.0074e-04 L9_sharp:5.6144e-04 L10_sharp:8.8987e-04 L11_sharp:1.4125e-03 L12_sharp:3.0499e-03 total_fnorm:4.4964e+01 total_l1_linf:1.5791e+05 total_spectral:2.2477e+01 L1_fnorm:6.1923e+00 L2_fnorm:6.0405e+00 L3_fnorm:5.9146e+00 L4_fnorm:5.7484e+00 L5_fnorm:5.7290e+00 L6_fnorm:5.6050e+00 L7_fnorm:5.5619e+00 L8_fnorm:5.4982e+00 L9_fnorm:5.4543e+00 L10_fnorm:5.3998e+00 L11_fnorm:5.1595e+00 L12_fnorm:4.9645e+00 L1_l1linf:1.9450e+00 L2_l1linf:1.7887e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.8575e+00 L5_l1linf:1.9299e+00 L6_l1linf:1.8782e+00 L7_l1linf:1.8666e+00 L8_l1linf:1.8327e+00 L9_l1linf:1.7543e+00 L10_l1linf:1.6716e+00 L11_l1linf:1.5179e+00 L12_l1linf:1.3274e+00 L1_spectral:6.0451e-02 L2_spectral:6.0411e-02 L3_spectral:6.0365e-02 L4_spectral:6.0332e-02 L5_spectral:6.0256e-02 L6_spectral:6.0292e-02 L7_spectral:6.0244e-02 L8_spectral:6.0265e-02 L9_spectral:6.0242e-02 L10_spectral:6.0306e-02 L11_spectral:6.0257e-02 L12_spectral:6.0317e-02 train_time:15087ms step_avg:37.72ms +[2025-09-11 09:40:35] [Rank 0] step:401/10000 train_time:47370ms step_avg:118.13ms +[2025-09-11 09:40:35] [Rank 0] step:401/10000 train_time:47370ms step_avg:118.13ms +[2025-09-11 09:40:38] [Rank 0] step:421/10000 train_time:50109ms step_avg:119.02ms +[2025-09-11 09:40:38] [Rank 0] step:421/10000 train_time:50109ms step_avg:119.02ms +[2025-09-11 09:40:38] [Rank 0] step:441/10000 train_time:50752ms step_avg:115.08ms +[2025-09-11 09:40:38] [Rank 0] step:441/10000 train_time:50752ms step_avg:115.08ms +[2025-09-11 09:40:39] [Rank 0] step:461/10000 train_time:51396ms step_avg:111.49ms +[2025-09-11 09:40:39] [Rank 0] step:461/10000 train_time:51396ms step_avg:111.49ms +[2025-09-11 09:40:39] [Rank 0] step:481/10000 train_time:52041ms step_avg:108.19ms +[2025-09-11 09:40:39] [Rank 0] step:481/10000 train_time:52041ms step_avg:108.19ms +[2025-09-11 09:40:40] [Rank 0] step:501/10000 train_time:52681ms step_avg:105.15ms +[2025-09-11 09:40:40] [Rank 0] step:501/10000 train_time:52681ms step_avg:105.15ms +[2025-09-11 09:40:41] [Rank 0] step:521/10000 train_time:53324ms step_avg:102.35ms +[2025-09-11 09:40:41] [Rank 0] step:521/10000 train_time:53324ms step_avg:102.35ms +[2025-09-11 09:40:41] [Rank 0] step:541/10000 train_time:53967ms step_avg:99.75ms +[2025-09-11 09:40:41] [Rank 0] step:541/10000 train_time:53967ms step_avg:99.75ms +[2025-09-11 09:40:42] [Rank 0] step:561/10000 train_time:54609ms step_avg:97.34ms +[2025-09-11 09:40:42] [Rank 0] step:561/10000 train_time:54609ms step_avg:97.34ms +[2025-09-11 09:40:43] [Rank 0] step:581/10000 train_time:55251ms step_avg:95.10ms +[2025-09-11 09:40:43] [Rank 0] step:581/10000 train_time:55251ms step_avg:95.10ms +[2025-09-11 09:40:43] [Rank 0] step:601/10000 train_time:55893ms step_avg:93.00ms +[2025-09-11 09:40:43] [Rank 0] step:601/10000 train_time:55893ms step_avg:93.00ms +[2025-09-11 09:40:44] [Rank 0] step:621/10000 train_time:56536ms step_avg:91.04ms +[2025-09-11 09:40:44] [Rank 0] step:621/10000 train_time:56536ms step_avg:91.04ms +[2025-09-11 09:40:45] [Rank 0] step:641/10000 train_time:57179ms step_avg:89.20ms +[2025-09-11 09:40:45] [Rank 0] step:641/10000 train_time:57179ms step_avg:89.20ms +[2025-09-11 09:40:45] [Rank 0] step:661/10000 train_time:57822ms step_avg:87.48ms +[2025-09-11 09:40:45] [Rank 0] step:661/10000 train_time:57822ms step_avg:87.48ms +[2025-09-11 09:40:46] [Rank 0] step:681/10000 train_time:58465ms step_avg:85.85ms +[2025-09-11 09:40:46] [Rank 0] step:681/10000 train_time:58465ms step_avg:85.85ms +[2025-09-11 09:40:47] [Rank 0] step:701/10000 train_time:59107ms step_avg:84.32ms +[2025-09-11 09:40:47] [Rank 0] step:701/10000 train_time:59107ms step_avg:84.32ms +[2025-09-11 09:40:47] [Rank 0] step:721/10000 train_time:59750ms step_avg:82.87ms +[2025-09-11 09:40:47] [Rank 0] step:721/10000 train_time:59750ms step_avg:82.87ms +[2025-09-11 09:40:48] [Rank 0] step:741/10000 train_time:60393ms step_avg:81.50ms +[2025-09-11 09:40:48] [Rank 0] step:741/10000 train_time:60393ms step_avg:81.50ms +[2025-09-11 09:40:48] [Rank 0] step:761/10000 train_time:61040ms step_avg:80.21ms +[2025-09-11 09:40:48] [Rank 0] step:761/10000 train_time:61040ms step_avg:80.21ms +[2025-09-11 09:40:49] [Rank 0] step:781/10000 train_time:61687ms step_avg:78.98ms +[2025-09-11 09:40:49] [Rank 0] step:781/10000 train_time:61687ms step_avg:78.98ms +[2025-09-11 09:40:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:40:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:40:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:41:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:41:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:41:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:41:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:35] [Rank 0] PRINT: step:800/10000 val_loss:5.7301 total_sharp:1.2928e-03 L1_sharp:3.4705e-03 L2_sharp:9.7699e-04 L3_sharp:4.9761e-04 L4_sharp:3.8543e-04 L5_sharp:1.9429e-04 L6_sharp:2.9211e-04 L7_sharp:1.8215e-04 L8_sharp:4.3877e-04 L9_sharp:3.2004e-04 L10_sharp:5.8621e-04 L11_sharp:9.9613e-04 L12_sharp:4.1268e-03 total_fnorm:4.7000e+01 total_l1_linf:1.4234e+05 total_spectral:2.3625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0312e+00 L4_fnorm:6.0000e+00 L5_fnorm:5.9375e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.8750e+00 L8_fnorm:5.6562e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.5000e+00 L12_fnorm:5.0000e+00 L1_l1linf:1.9062e+00 L2_l1linf:1.8047e+00 L3_l1linf:1.7656e+00 L4_l1linf:1.8438e+00 L5_l1linf:1.8672e+00 L6_l1linf:1.8438e+00 L7_l1linf:1.8672e+00 L8_l1linf:1.8516e+00 L9_l1linf:1.8203e+00 L10_l1linf:1.7422e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.1484e+00 L1_spectral:6.7021e-02 L2_spectral:6.6541e-02 L3_spectral:6.6481e-02 L4_spectral:6.5981e-02 L5_spectral:6.5862e-02 L6_spectral:6.5762e-02 L7_spectral:6.5607e-02 L8_spectral:6.5605e-02 L9_spectral:6.5702e-02 L10_spectral:6.6062e-02 L11_spectral:6.5830e-02 L12_spectral:6.5741e-02 train_time:62316ms step_avg:77.90ms +[2025-09-11 09:41:35] [Rank 0] PRINT: step:800/10000 val_loss:5.7301 total_sharp:1.2928e-03 L1_sharp:3.4705e-03 L2_sharp:9.7699e-04 L3_sharp:4.9761e-04 L4_sharp:3.8543e-04 L5_sharp:1.9429e-04 L6_sharp:2.9211e-04 L7_sharp:1.8215e-04 L8_sharp:4.3877e-04 L9_sharp:3.2004e-04 L10_sharp:5.8621e-04 L11_sharp:9.9613e-04 L12_sharp:4.1268e-03 total_fnorm:4.7000e+01 total_l1_linf:1.4234e+05 total_spectral:2.3625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0312e+00 L4_fnorm:6.0000e+00 L5_fnorm:5.9375e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.8750e+00 L8_fnorm:5.6562e+00 L9_fnorm:5.7812e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.5000e+00 L12_fnorm:5.0000e+00 L1_l1linf:1.9062e+00 L2_l1linf:1.8047e+00 L3_l1linf:1.7656e+00 L4_l1linf:1.8438e+00 L5_l1linf:1.8672e+00 L6_l1linf:1.8438e+00 L7_l1linf:1.8672e+00 L8_l1linf:1.8516e+00 L9_l1linf:1.8203e+00 L10_l1linf:1.7422e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.1484e+00 L1_spectral:6.7021e-02 L2_spectral:6.6541e-02 L3_spectral:6.6481e-02 L4_spectral:6.5981e-02 L5_spectral:6.5862e-02 L6_spectral:6.5762e-02 L7_spectral:6.5607e-02 L8_spectral:6.5605e-02 L9_spectral:6.5702e-02 L10_spectral:6.6062e-02 L11_spectral:6.5830e-02 L12_spectral:6.5741e-02 train_time:62316ms step_avg:77.90ms +[2025-09-11 09:41:37] [Rank 0] step:801/10000 train_time:64547ms step_avg:80.58ms +[2025-09-11 09:41:37] [Rank 0] step:801/10000 train_time:64547ms step_avg:80.58ms +[2025-09-11 09:41:38] [Rank 0] step:821/10000 train_time:65290ms step_avg:79.53ms +[2025-09-11 09:41:38] [Rank 0] step:821/10000 train_time:65290ms step_avg:79.53ms +[2025-09-11 09:41:38] [Rank 0] step:841/10000 train_time:65938ms step_avg:78.40ms +[2025-09-11 09:41:38] [Rank 0] step:841/10000 train_time:65938ms step_avg:78.40ms +[2025-09-11 09:41:39] [Rank 0] step:861/10000 train_time:66585ms step_avg:77.33ms +[2025-09-11 09:41:39] [Rank 0] step:861/10000 train_time:66585ms step_avg:77.33ms +[2025-09-11 09:41:40] [Rank 0] step:881/10000 train_time:67233ms step_avg:76.31ms +[2025-09-11 09:41:40] [Rank 0] step:881/10000 train_time:67233ms step_avg:76.31ms +[2025-09-11 09:41:40] [Rank 0] step:901/10000 train_time:67879ms step_avg:75.34ms +[2025-09-11 09:41:40] [Rank 0] step:901/10000 train_time:67879ms step_avg:75.34ms +[2025-09-11 09:41:41] [Rank 0] step:921/10000 train_time:68527ms step_avg:74.40ms +[2025-09-11 09:41:41] [Rank 0] step:921/10000 train_time:68527ms step_avg:74.40ms +[2025-09-11 09:41:42] [Rank 0] step:941/10000 train_time:69174ms step_avg:73.51ms +[2025-09-11 09:41:42] [Rank 0] step:941/10000 train_time:69174ms step_avg:73.51ms +[2025-09-11 09:41:42] [Rank 0] step:961/10000 train_time:69821ms step_avg:72.65ms +[2025-09-11 09:41:42] [Rank 0] step:961/10000 train_time:69821ms step_avg:72.65ms +[2025-09-11 09:41:43] [Rank 0] step:981/10000 train_time:70468ms step_avg:71.83ms +[2025-09-11 09:41:43] [Rank 0] step:981/10000 train_time:70468ms step_avg:71.83ms +[2025-09-11 09:41:43] [Rank 0] step:1001/10000 train_time:71114ms step_avg:71.04ms +[2025-09-11 09:41:43] [Rank 0] step:1001/10000 train_time:71114ms step_avg:71.04ms +[2025-09-11 09:41:44] [Rank 0] step:1021/10000 train_time:71760ms step_avg:70.28ms +[2025-09-11 09:41:44] [Rank 0] step:1021/10000 train_time:71760ms step_avg:70.28ms +[2025-09-11 09:41:45] [Rank 0] step:1041/10000 train_time:72407ms step_avg:69.56ms +[2025-09-11 09:41:45] [Rank 0] step:1041/10000 train_time:72407ms step_avg:69.56ms +[2025-09-11 09:41:45] [Rank 0] step:1061/10000 train_time:73053ms step_avg:68.85ms +[2025-09-11 09:41:45] [Rank 0] step:1061/10000 train_time:73053ms step_avg:68.85ms +[2025-09-11 09:41:46] [Rank 0] step:1081/10000 train_time:73699ms step_avg:68.18ms +[2025-09-11 09:41:46] [Rank 0] step:1081/10000 train_time:73699ms step_avg:68.18ms +[2025-09-11 09:41:47] [Rank 0] step:1101/10000 train_time:74345ms step_avg:67.53ms +[2025-09-11 09:41:47] [Rank 0] step:1101/10000 train_time:74345ms step_avg:67.53ms +[2025-09-11 09:41:47] [Rank 0] step:1121/10000 train_time:74991ms step_avg:66.90ms +[2025-09-11 09:41:47] [Rank 0] step:1121/10000 train_time:74991ms step_avg:66.90ms +[2025-09-11 09:41:48] [Rank 0] step:1141/10000 train_time:75637ms step_avg:66.29ms +[2025-09-11 09:41:48] [Rank 0] step:1141/10000 train_time:75637ms step_avg:66.29ms +[2025-09-11 09:41:49] [Rank 0] step:1161/10000 train_time:76283ms step_avg:65.70ms +[2025-09-11 09:41:49] [Rank 0] step:1161/10000 train_time:76283ms step_avg:65.70ms +[2025-09-11 09:41:49] [Rank 0] step:1181/10000 train_time:76929ms step_avg:65.14ms +[2025-09-11 09:41:49] [Rank 0] step:1181/10000 train_time:76929ms step_avg:65.14ms +[2025-09-11 09:41:50] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:41:50] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:41:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:41:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:41:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:00] [Rank 0] PRINT: step:1200/10000 val_loss:5.3643 total_sharp:9.5416e-04 L1_sharp:2.2870e-03 L2_sharp:4.1831e-04 L3_sharp:4.0840e-04 L4_sharp:2.4358e-04 L5_sharp:2.5838e-04 L6_sharp:2.5907e-04 L7_sharp:2.5689e-04 L8_sharp:4.5249e-04 L9_sharp:2.9874e-04 L10_sharp:4.2723e-04 L11_sharp:6.9738e-04 L12_sharp:2.5604e-03 total_fnorm:4.5250e+01 total_l1_linf:1.3517e+05 total_spectral:2.2625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0625e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.0625e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.7812e+00 L1_l1linf:1.8281e+00 L2_l1linf:1.7500e+00 L3_l1linf:1.7188e+00 L4_l1linf:1.7578e+00 L5_l1linf:1.7578e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.7578e+00 L8_l1linf:1.7500e+00 L9_l1linf:1.7578e+00 L10_l1linf:1.7500e+00 L11_l1linf:1.7109e+00 L12_l1linf:1.4688e+00 L1_spectral:7.0175e-02 L2_spectral:6.8935e-02 L3_spectral:6.9442e-02 L4_spectral:6.9059e-02 L5_spectral:6.9055e-02 L6_spectral:6.8820e-02 L7_spectral:6.8592e-02 L8_spectral:6.9683e-02 L9_spectral:6.8248e-02 L10_spectral:6.8451e-02 L11_spectral:6.8268e-02 L12_spectral:6.8299e-02 train_time:77557ms step_avg:64.63ms +[2025-09-11 09:42:00] [Rank 0] PRINT: step:1200/10000 val_loss:5.3643 total_sharp:9.5416e-04 L1_sharp:2.2870e-03 L2_sharp:4.1831e-04 L3_sharp:4.0840e-04 L4_sharp:2.4358e-04 L5_sharp:2.5838e-04 L6_sharp:2.5907e-04 L7_sharp:2.5689e-04 L8_sharp:4.5249e-04 L9_sharp:2.9874e-04 L10_sharp:4.2723e-04 L11_sharp:6.9738e-04 L12_sharp:2.5604e-03 total_fnorm:4.5250e+01 total_l1_linf:1.3517e+05 total_spectral:2.2625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.0625e+00 L6_fnorm:6.0938e+00 L7_fnorm:6.0938e+00 L8_fnorm:5.9062e+00 L9_fnorm:6.0625e+00 L10_fnorm:6.0938e+00 L11_fnorm:6.0312e+00 L12_fnorm:5.7812e+00 L1_l1linf:1.8281e+00 L2_l1linf:1.7500e+00 L3_l1linf:1.7188e+00 L4_l1linf:1.7578e+00 L5_l1linf:1.7578e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.7578e+00 L8_l1linf:1.7500e+00 L9_l1linf:1.7578e+00 L10_l1linf:1.7500e+00 L11_l1linf:1.7109e+00 L12_l1linf:1.4688e+00 L1_spectral:7.0175e-02 L2_spectral:6.8935e-02 L3_spectral:6.9442e-02 L4_spectral:6.9059e-02 L5_spectral:6.9055e-02 L6_spectral:6.8820e-02 L7_spectral:6.8592e-02 L8_spectral:6.9683e-02 L9_spectral:6.8248e-02 L10_spectral:6.8451e-02 L11_spectral:6.8268e-02 L12_spectral:6.8299e-02 train_time:77557ms step_avg:64.63ms +[2025-09-11 09:42:03] [Rank 0] step:1201/10000 train_time:79822ms step_avg:66.46ms +[2025-09-11 09:42:03] [Rank 0] step:1201/10000 train_time:79822ms step_avg:66.46ms +[2025-09-11 09:42:03] [Rank 0] step:1221/10000 train_time:80503ms step_avg:65.93ms +[2025-09-11 09:42:03] [Rank 0] step:1221/10000 train_time:80503ms step_avg:65.93ms +[2025-09-11 09:42:04] [Rank 0] step:1241/10000 train_time:81151ms step_avg:65.39ms +[2025-09-11 09:42:04] [Rank 0] step:1241/10000 train_time:81151ms step_avg:65.39ms +[2025-09-11 09:42:05] [Rank 0] step:1261/10000 train_time:81799ms step_avg:64.87ms +[2025-09-11 09:42:05] [Rank 0] step:1261/10000 train_time:81799ms step_avg:64.87ms +[2025-09-11 09:42:05] [Rank 0] step:1281/10000 train_time:82446ms step_avg:64.36ms +[2025-09-11 09:42:05] [Rank 0] step:1281/10000 train_time:82446ms step_avg:64.36ms +[2025-09-11 09:42:06] [Rank 0] step:1301/10000 train_time:83093ms step_avg:63.87ms +[2025-09-11 09:42:06] [Rank 0] step:1301/10000 train_time:83093ms step_avg:63.87ms +[2025-09-11 09:42:07] [Rank 0] step:1321/10000 train_time:83741ms step_avg:63.39ms +[2025-09-11 09:42:07] [Rank 0] step:1321/10000 train_time:83741ms step_avg:63.39ms +[2025-09-11 09:42:07] [Rank 0] step:1341/10000 train_time:84387ms step_avg:62.93ms +[2025-09-11 09:42:07] [Rank 0] step:1341/10000 train_time:84387ms step_avg:62.93ms +[2025-09-11 09:42:08] [Rank 0] step:1361/10000 train_time:85034ms step_avg:62.48ms +[2025-09-11 09:42:08] [Rank 0] step:1361/10000 train_time:85034ms step_avg:62.48ms +[2025-09-11 09:42:09] [Rank 0] step:1381/10000 train_time:85680ms step_avg:62.04ms +[2025-09-11 09:42:09] [Rank 0] step:1381/10000 train_time:85680ms step_avg:62.04ms +[2025-09-11 09:42:09] [Rank 0] step:1401/10000 train_time:86327ms step_avg:61.62ms +[2025-09-11 09:42:09] [Rank 0] step:1401/10000 train_time:86327ms step_avg:61.62ms +[2025-09-11 09:42:10] [Rank 0] step:1421/10000 train_time:86973ms step_avg:61.21ms +[2025-09-11 09:42:10] [Rank 0] step:1421/10000 train_time:86973ms step_avg:61.21ms +[2025-09-11 09:42:11] [Rank 0] step:1441/10000 train_time:87619ms step_avg:60.80ms +[2025-09-11 09:42:11] [Rank 0] step:1441/10000 train_time:87619ms step_avg:60.80ms +[2025-09-11 09:42:11] [Rank 0] step:1461/10000 train_time:88264ms step_avg:60.41ms +[2025-09-11 09:42:11] [Rank 0] step:1461/10000 train_time:88264ms step_avg:60.41ms +[2025-09-11 09:42:12] [Rank 0] step:1481/10000 train_time:88911ms step_avg:60.03ms +[2025-09-11 09:42:12] [Rank 0] step:1481/10000 train_time:88911ms step_avg:60.03ms +[2025-09-11 09:42:13] [Rank 0] step:1501/10000 train_time:89561ms step_avg:59.67ms +[2025-09-11 09:42:13] [Rank 0] step:1501/10000 train_time:89561ms step_avg:59.67ms +[2025-09-11 09:42:13] [Rank 0] step:1521/10000 train_time:90211ms step_avg:59.31ms +[2025-09-11 09:42:13] [Rank 0] step:1521/10000 train_time:90211ms step_avg:59.31ms +[2025-09-11 09:42:14] [Rank 0] step:1541/10000 train_time:90861ms step_avg:58.96ms +[2025-09-11 09:42:14] [Rank 0] step:1541/10000 train_time:90861ms step_avg:58.96ms +[2025-09-11 09:42:14] [Rank 0] step:1561/10000 train_time:91512ms step_avg:58.62ms +[2025-09-11 09:42:14] [Rank 0] step:1561/10000 train_time:91512ms step_avg:58.62ms +[2025-09-11 09:42:15] [Rank 0] step:1581/10000 train_time:92163ms step_avg:58.29ms +[2025-09-11 09:42:15] [Rank 0] step:1581/10000 train_time:92163ms step_avg:58.29ms +[2025-09-11 09:42:16] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:42:16] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:42:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:42:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:42:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:27] [Rank 0] PRINT: step:1600/10000 val_loss:5.1916 total_sharp:7.4893e-04 L1_sharp:1.2021e-03 L2_sharp:3.1740e-04 L3_sharp:2.2981e-04 L4_sharp:1.0548e-04 L5_sharp:2.0449e-04 L6_sharp:8.4565e-05 L7_sharp:1.2319e-04 L8_sharp:3.2730e-04 L9_sharp:2.1985e-04 L10_sharp:2.8661e-04 L11_sharp:5.5343e-04 L12_sharp:3.2248e-03 total_fnorm:4.3500e+01 total_l1_linf:1.2595e+05 total_spectral:2.1625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1250e+00 L12_fnorm:5.9375e+00 L1_l1linf:1.7969e+00 L2_l1linf:1.7031e+00 L3_l1linf:1.6797e+00 L4_l1linf:1.7031e+00 L5_l1linf:1.6875e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.6797e+00 L8_l1linf:1.6797e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.6719e+00 L11_l1linf:1.6719e+00 L12_l1linf:1.4766e+00 L1_spectral:7.2256e-02 L2_spectral:7.0558e-02 L3_spectral:7.1280e-02 L4_spectral:7.0939e-02 L5_spectral:7.1190e-02 L6_spectral:7.0824e-02 L7_spectral:7.0826e-02 L8_spectral:7.1785e-02 L9_spectral:7.1231e-02 L10_spectral:7.0568e-02 L11_spectral:7.0414e-02 L12_spectral:7.0172e-02 train_time:92795ms step_avg:58.00ms +[2025-09-11 09:42:27] [Rank 0] PRINT: step:1600/10000 val_loss:5.1916 total_sharp:7.4893e-04 L1_sharp:1.2021e-03 L2_sharp:3.1740e-04 L3_sharp:2.2981e-04 L4_sharp:1.0548e-04 L5_sharp:2.0449e-04 L6_sharp:8.4565e-05 L7_sharp:1.2319e-04 L8_sharp:3.2730e-04 L9_sharp:2.1985e-04 L10_sharp:2.8661e-04 L11_sharp:5.5343e-04 L12_sharp:3.2248e-03 total_fnorm:4.3500e+01 total_l1_linf:1.2595e+05 total_spectral:2.1625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1250e+00 L12_fnorm:5.9375e+00 L1_l1linf:1.7969e+00 L2_l1linf:1.7031e+00 L3_l1linf:1.6797e+00 L4_l1linf:1.7031e+00 L5_l1linf:1.6875e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.6797e+00 L8_l1linf:1.6797e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.6719e+00 L11_l1linf:1.6719e+00 L12_l1linf:1.4766e+00 L1_spectral:7.2256e-02 L2_spectral:7.0558e-02 L3_spectral:7.1280e-02 L4_spectral:7.0939e-02 L5_spectral:7.1190e-02 L6_spectral:7.0824e-02 L7_spectral:7.0826e-02 L8_spectral:7.1785e-02 L9_spectral:7.1231e-02 L10_spectral:7.0568e-02 L11_spectral:7.0414e-02 L12_spectral:7.0172e-02 train_time:92795ms step_avg:58.00ms +[2025-09-11 09:42:29] [Rank 0] step:1601/10000 train_time:94900ms step_avg:59.28ms +[2025-09-11 09:42:29] [Rank 0] step:1601/10000 train_time:94900ms step_avg:59.28ms +[2025-09-11 09:42:29] [Rank 0] step:1621/10000 train_time:95559ms step_avg:58.95ms +[2025-09-11 09:42:29] [Rank 0] step:1621/10000 train_time:95559ms step_avg:58.95ms +[2025-09-11 09:42:30] [Rank 0] step:1641/10000 train_time:96211ms step_avg:58.63ms +[2025-09-11 09:42:30] [Rank 0] step:1641/10000 train_time:96211ms step_avg:58.63ms +[2025-09-11 09:42:31] [Rank 0] step:1661/10000 train_time:96864ms step_avg:58.32ms +[2025-09-11 09:42:31] [Rank 0] step:1661/10000 train_time:96864ms step_avg:58.32ms +[2025-09-11 09:42:31] [Rank 0] step:1681/10000 train_time:97515ms step_avg:58.01ms +[2025-09-11 09:42:31] [Rank 0] step:1681/10000 train_time:97515ms step_avg:58.01ms +[2025-09-11 09:42:32] [Rank 0] step:1701/10000 train_time:98166ms step_avg:57.71ms +[2025-09-11 09:42:32] [Rank 0] step:1701/10000 train_time:98166ms step_avg:57.71ms +[2025-09-11 09:42:33] [Rank 0] step:1721/10000 train_time:98817ms step_avg:57.42ms +[2025-09-11 09:42:33] [Rank 0] step:1721/10000 train_time:98817ms step_avg:57.42ms +[2025-09-11 09:42:33] [Rank 0] step:1741/10000 train_time:99468ms step_avg:57.13ms +[2025-09-11 09:42:33] [Rank 0] step:1741/10000 train_time:99468ms step_avg:57.13ms +[2025-09-11 09:42:34] [Rank 0] step:1761/10000 train_time:100119ms step_avg:56.85ms +[2025-09-11 09:42:34] [Rank 0] step:1761/10000 train_time:100119ms step_avg:56.85ms +[2025-09-11 09:42:35] [Rank 0] step:1781/10000 train_time:100770ms step_avg:56.58ms +[2025-09-11 09:42:35] [Rank 0] step:1781/10000 train_time:100770ms step_avg:56.58ms +[2025-09-11 09:42:35] [Rank 0] step:1801/10000 train_time:101421ms step_avg:56.31ms +[2025-09-11 09:42:35] [Rank 0] step:1801/10000 train_time:101421ms step_avg:56.31ms +[2025-09-11 09:42:36] [Rank 0] step:1821/10000 train_time:102072ms step_avg:56.05ms +[2025-09-11 09:42:36] [Rank 0] step:1821/10000 train_time:102072ms step_avg:56.05ms +[2025-09-11 09:42:37] [Rank 0] step:1841/10000 train_time:102723ms step_avg:55.80ms +[2025-09-11 09:42:37] [Rank 0] step:1841/10000 train_time:102723ms step_avg:55.80ms +[2025-09-11 09:42:37] [Rank 0] step:1861/10000 train_time:103375ms step_avg:55.55ms +[2025-09-11 09:42:37] [Rank 0] step:1861/10000 train_time:103375ms step_avg:55.55ms +[2025-09-11 09:42:38] [Rank 0] step:1881/10000 train_time:104026ms step_avg:55.30ms +[2025-09-11 09:42:38] [Rank 0] step:1881/10000 train_time:104026ms step_avg:55.30ms +[2025-09-11 09:42:39] [Rank 0] step:1901/10000 train_time:104677ms step_avg:55.06ms +[2025-09-11 09:42:39] [Rank 0] step:1901/10000 train_time:104677ms step_avg:55.06ms +[2025-09-11 09:42:39] [Rank 0] step:1921/10000 train_time:105328ms step_avg:54.83ms +[2025-09-11 09:42:39] [Rank 0] step:1921/10000 train_time:105328ms step_avg:54.83ms +[2025-09-11 09:42:40] [Rank 0] step:1941/10000 train_time:105980ms step_avg:54.60ms +[2025-09-11 09:42:40] [Rank 0] step:1941/10000 train_time:105980ms step_avg:54.60ms +[2025-09-11 09:42:40] [Rank 0] step:1961/10000 train_time:106631ms step_avg:54.38ms +[2025-09-11 09:42:40] [Rank 0] step:1961/10000 train_time:106631ms step_avg:54.38ms +[2025-09-11 09:42:41] [Rank 0] step:1981/10000 train_time:107282ms step_avg:54.16ms +[2025-09-11 09:42:41] [Rank 0] step:1981/10000 train_time:107282ms step_avg:54.16ms +[2025-09-11 09:42:42] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:42:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:42:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:52] [Rank 0] PRINT: step:2000/10000 val_loss:5.0388 total_sharp:6.9151e-04 L1_sharp:1.1592e-03 L2_sharp:2.8133e-04 L3_sharp:1.6016e-04 L4_sharp:9.2302e-05 L5_sharp:9.3099e-05 L6_sharp:1.2355e-04 L7_sharp:1.0904e-04 L8_sharp:3.1135e-04 L9_sharp:2.3122e-04 L10_sharp:2.5594e-04 L11_sharp:5.3030e-04 L12_sharp:3.0661e-03 total_fnorm:4.3500e+01 total_l1_linf:1.2595e+05 total_spectral:2.1500e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.6797e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6328e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.7031e+00 L8_l1linf:1.6250e+00 L9_l1linf:1.6250e+00 L10_l1linf:1.6484e+00 L11_l1linf:1.6250e+00 L12_l1linf:1.4688e+00 L1_spectral:7.4376e-02 L2_spectral:7.2059e-02 L3_spectral:7.2655e-02 L4_spectral:7.2468e-02 L5_spectral:7.3238e-02 L6_spectral:7.2849e-02 L7_spectral:7.2740e-02 L8_spectral:7.3943e-02 L9_spectral:7.3390e-02 L10_spectral:7.2476e-02 L11_spectral:7.2447e-02 L12_spectral:7.1840e-02 train_time:107915ms step_avg:53.96ms +[2025-09-11 09:42:52] [Rank 0] PRINT: step:2000/10000 val_loss:5.0388 total_sharp:6.9151e-04 L1_sharp:1.1592e-03 L2_sharp:2.8133e-04 L3_sharp:1.6016e-04 L4_sharp:9.2302e-05 L5_sharp:9.3099e-05 L6_sharp:1.2355e-04 L7_sharp:1.0904e-04 L8_sharp:3.1135e-04 L9_sharp:2.3122e-04 L10_sharp:2.5594e-04 L11_sharp:5.3030e-04 L12_sharp:3.0661e-03 total_fnorm:4.3500e+01 total_l1_linf:1.2595e+05 total_spectral:2.1500e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.6797e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6328e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.7031e+00 L8_l1linf:1.6250e+00 L9_l1linf:1.6250e+00 L10_l1linf:1.6484e+00 L11_l1linf:1.6250e+00 L12_l1linf:1.4688e+00 L1_spectral:7.4376e-02 L2_spectral:7.2059e-02 L3_spectral:7.2655e-02 L4_spectral:7.2468e-02 L5_spectral:7.3238e-02 L6_spectral:7.2849e-02 L7_spectral:7.2740e-02 L8_spectral:7.3943e-02 L9_spectral:7.3390e-02 L10_spectral:7.2476e-02 L11_spectral:7.2447e-02 L12_spectral:7.1840e-02 train_time:107915ms step_avg:53.96ms +[2025-09-11 09:42:54] [Rank 0] step:2001/10000 train_time:109909ms step_avg:54.93ms +[2025-09-11 09:42:54] [Rank 0] step:2001/10000 train_time:109909ms step_avg:54.93ms +[2025-09-11 09:42:55] [Rank 0] step:2021/10000 train_time:110566ms step_avg:54.71ms +[2025-09-11 09:42:55] [Rank 0] step:2021/10000 train_time:110566ms step_avg:54.71ms +[2025-09-11 09:42:56] [Rank 0] step:2041/10000 train_time:111218ms step_avg:54.49ms +[2025-09-11 09:42:56] [Rank 0] step:2041/10000 train_time:111218ms step_avg:54.49ms +[2025-09-11 09:42:56] [Rank 0] step:2061/10000 train_time:111870ms step_avg:54.28ms +[2025-09-11 09:42:56] [Rank 0] step:2061/10000 train_time:111870ms step_avg:54.28ms +[2025-09-11 09:42:57] [Rank 0] step:2081/10000 train_time:112523ms step_avg:54.07ms +[2025-09-11 09:42:57] [Rank 0] step:2081/10000 train_time:112523ms step_avg:54.07ms +[2025-09-11 09:42:58] [Rank 0] step:2101/10000 train_time:113177ms step_avg:53.87ms +[2025-09-11 09:42:58] [Rank 0] step:2101/10000 train_time:113177ms step_avg:53.87ms +[2025-09-11 09:42:58] [Rank 0] step:2121/10000 train_time:113827ms step_avg:53.67ms +[2025-09-11 09:42:58] [Rank 0] step:2121/10000 train_time:113827ms step_avg:53.67ms +[2025-09-11 09:42:59] [Rank 0] step:2141/10000 train_time:114479ms step_avg:53.47ms +[2025-09-11 09:42:59] [Rank 0] step:2141/10000 train_time:114479ms step_avg:53.47ms +[2025-09-11 09:43:00] [Rank 0] step:2161/10000 train_time:115130ms step_avg:53.28ms +[2025-09-11 09:43:00] [Rank 0] step:2161/10000 train_time:115130ms step_avg:53.28ms +[2025-09-11 09:43:01] [Rank 0] step:2181/10000 train_time:116375ms step_avg:53.36ms +[2025-09-11 09:43:01] [Rank 0] step:2181/10000 train_time:116375ms step_avg:53.36ms +[2025-09-11 09:43:01] [Rank 0] step:2201/10000 train_time:117027ms step_avg:53.17ms +[2025-09-11 09:43:01] [Rank 0] step:2201/10000 train_time:117027ms step_avg:53.17ms +[2025-09-11 09:43:02] [Rank 0] step:2221/10000 train_time:117679ms step_avg:52.98ms +[2025-09-11 09:43:02] [Rank 0] step:2221/10000 train_time:117679ms step_avg:52.98ms +[2025-09-11 09:43:03] [Rank 0] step:2241/10000 train_time:118656ms step_avg:52.95ms +[2025-09-11 09:43:03] [Rank 0] step:2241/10000 train_time:118656ms step_avg:52.95ms +[2025-09-11 09:43:04] [Rank 0] step:2261/10000 train_time:119320ms step_avg:52.77ms +[2025-09-11 09:43:04] [Rank 0] step:2261/10000 train_time:119320ms step_avg:52.77ms +[2025-09-11 09:43:04] [Rank 0] step:2281/10000 train_time:119984ms step_avg:52.60ms +[2025-09-11 09:43:04] [Rank 0] step:2281/10000 train_time:119984ms step_avg:52.60ms +[2025-09-11 09:43:05] [Rank 0] step:2301/10000 train_time:120648ms step_avg:52.43ms +[2025-09-11 09:43:05] [Rank 0] step:2301/10000 train_time:120648ms step_avg:52.43ms +[2025-09-11 09:43:06] [Rank 0] step:2321/10000 train_time:121311ms step_avg:52.27ms +[2025-09-11 09:43:06] [Rank 0] step:2321/10000 train_time:121311ms step_avg:52.27ms +[2025-09-11 09:43:06] [Rank 0] step:2341/10000 train_time:121975ms step_avg:52.10ms +[2025-09-11 09:43:06] [Rank 0] step:2341/10000 train_time:121975ms step_avg:52.10ms +[2025-09-11 09:43:07] [Rank 0] step:2361/10000 train_time:122639ms step_avg:51.94ms +[2025-09-11 09:43:07] [Rank 0] step:2361/10000 train_time:122639ms step_avg:51.94ms +[2025-09-11 09:43:08] [Rank 0] step:2381/10000 train_time:123302ms step_avg:51.79ms +[2025-09-11 09:43:08] [Rank 0] step:2381/10000 train_time:123302ms step_avg:51.79ms +[2025-09-11 09:43:08] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:19] [Rank 0] PRINT: step:2400/10000 val_loss:4.9056 total_sharp:7.1689e-04 L1_sharp:9.2008e-04 L2_sharp:2.7003e-04 L3_sharp:1.5092e-04 L4_sharp:1.2562e-04 L5_sharp:9.5642e-05 L6_sharp:1.3374e-04 L7_sharp:1.3252e-04 L8_sharp:3.6555e-04 L9_sharp:2.7092e-04 L10_sharp:3.3303e-04 L11_sharp:5.1155e-04 L12_sharp:3.1930e-03 total_fnorm:4.1750e+01 total_l1_linf:1.1827e+05 total_spectral:2.0625e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6328e+00 L3_l1linf:1.6172e+00 L4_l1linf:1.6172e+00 L5_l1linf:1.6016e+00 L6_l1linf:1.6016e+00 L7_l1linf:1.6250e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5781e+00 L11_l1linf:1.5859e+00 L12_l1linf:1.4766e+00 L1_spectral:7.5087e-02 L2_spectral:7.3239e-02 L3_spectral:7.4232e-02 L4_spectral:7.3555e-02 L5_spectral:7.4001e-02 L6_spectral:7.4319e-02 L7_spectral:7.3972e-02 L8_spectral:7.4411e-02 L9_spectral:7.4958e-02 L10_spectral:7.4406e-02 L11_spectral:7.3469e-02 L12_spectral:7.3650e-02 train_time:123948ms step_avg:51.64ms +[2025-09-11 09:43:19] [Rank 0] PRINT: step:2400/10000 val_loss:4.9056 total_sharp:7.1689e-04 L1_sharp:9.2008e-04 L2_sharp:2.7003e-04 L3_sharp:1.5092e-04 L4_sharp:1.2562e-04 L5_sharp:9.5642e-05 L6_sharp:1.3374e-04 L7_sharp:1.3252e-04 L8_sharp:3.6555e-04 L9_sharp:2.7092e-04 L10_sharp:3.3303e-04 L11_sharp:5.1155e-04 L12_sharp:3.1930e-03 total_fnorm:4.1750e+01 total_l1_linf:1.1827e+05 total_spectral:2.0625e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1875e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6328e+00 L3_l1linf:1.6172e+00 L4_l1linf:1.6172e+00 L5_l1linf:1.6016e+00 L6_l1linf:1.6016e+00 L7_l1linf:1.6250e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5781e+00 L11_l1linf:1.5859e+00 L12_l1linf:1.4766e+00 L1_spectral:7.5087e-02 L2_spectral:7.3239e-02 L3_spectral:7.4232e-02 L4_spectral:7.3555e-02 L5_spectral:7.4001e-02 L6_spectral:7.4319e-02 L7_spectral:7.3972e-02 L8_spectral:7.4411e-02 L9_spectral:7.4958e-02 L10_spectral:7.4406e-02 L11_spectral:7.3469e-02 L12_spectral:7.3650e-02 train_time:123948ms step_avg:51.64ms +[2025-09-11 09:43:21] [Rank 0] step:2401/10000 train_time:125853ms step_avg:52.42ms +[2025-09-11 09:43:21] [Rank 0] step:2401/10000 train_time:125853ms step_avg:52.42ms +[2025-09-11 09:43:22] [Rank 0] step:2421/10000 train_time:126540ms step_avg:52.27ms +[2025-09-11 09:43:22] [Rank 0] step:2421/10000 train_time:126540ms step_avg:52.27ms +[2025-09-11 09:43:23] [Rank 0] step:2441/10000 train_time:127205ms step_avg:52.11ms +[2025-09-11 09:43:23] [Rank 0] step:2441/10000 train_time:127205ms step_avg:52.11ms +[2025-09-11 09:43:23] [Rank 0] step:2461/10000 train_time:127871ms step_avg:51.96ms +[2025-09-11 09:43:23] [Rank 0] step:2461/10000 train_time:127871ms step_avg:51.96ms +[2025-09-11 09:43:24] [Rank 0] step:2481/10000 train_time:128536ms step_avg:51.81ms +[2025-09-11 09:43:24] [Rank 0] step:2481/10000 train_time:128536ms step_avg:51.81ms +[2025-09-11 09:43:25] [Rank 0] step:2501/10000 train_time:129201ms step_avg:51.66ms +[2025-09-11 09:43:25] [Rank 0] step:2501/10000 train_time:129201ms step_avg:51.66ms +[2025-09-11 09:43:25] [Rank 0] step:2521/10000 train_time:129866ms step_avg:51.51ms +[2025-09-11 09:43:25] [Rank 0] step:2521/10000 train_time:129866ms step_avg:51.51ms +[2025-09-11 09:43:26] [Rank 0] step:2541/10000 train_time:130531ms step_avg:51.37ms +[2025-09-11 09:43:26] [Rank 0] step:2541/10000 train_time:130531ms step_avg:51.37ms +[2025-09-11 09:43:27] [Rank 0] step:2561/10000 train_time:131195ms step_avg:51.23ms +[2025-09-11 09:43:27] [Rank 0] step:2561/10000 train_time:131195ms step_avg:51.23ms +[2025-09-11 09:43:27] [Rank 0] step:2581/10000 train_time:131861ms step_avg:51.09ms +[2025-09-11 09:43:27] [Rank 0] step:2581/10000 train_time:131861ms step_avg:51.09ms +[2025-09-11 09:43:28] [Rank 0] step:2601/10000 train_time:132525ms step_avg:50.95ms +[2025-09-11 09:43:28] [Rank 0] step:2601/10000 train_time:132525ms step_avg:50.95ms +[2025-09-11 09:43:29] [Rank 0] step:2621/10000 train_time:133189ms step_avg:50.82ms +[2025-09-11 09:43:29] [Rank 0] step:2621/10000 train_time:133189ms step_avg:50.82ms +[2025-09-11 09:43:29] [Rank 0] step:2641/10000 train_time:133854ms step_avg:50.68ms +[2025-09-11 09:43:29] [Rank 0] step:2641/10000 train_time:133854ms step_avg:50.68ms +[2025-09-11 09:43:30] [Rank 0] step:2661/10000 train_time:134520ms step_avg:50.55ms +[2025-09-11 09:43:30] [Rank 0] step:2661/10000 train_time:134520ms step_avg:50.55ms +[2025-09-11 09:43:31] [Rank 0] step:2681/10000 train_time:135184ms step_avg:50.42ms +[2025-09-11 09:43:31] [Rank 0] step:2681/10000 train_time:135184ms step_avg:50.42ms +[2025-09-11 09:43:31] [Rank 0] step:2701/10000 train_time:135851ms step_avg:50.30ms +[2025-09-11 09:43:31] [Rank 0] step:2701/10000 train_time:135851ms step_avg:50.30ms +[2025-09-11 09:43:32] [Rank 0] step:2721/10000 train_time:136515ms step_avg:50.17ms +[2025-09-11 09:43:32] [Rank 0] step:2721/10000 train_time:136515ms step_avg:50.17ms +[2025-09-11 09:43:33] [Rank 0] step:2741/10000 train_time:137181ms step_avg:50.05ms +[2025-09-11 09:43:33] [Rank 0] step:2741/10000 train_time:137181ms step_avg:50.05ms +[2025-09-11 09:43:33] [Rank 0] step:2761/10000 train_time:137846ms step_avg:49.93ms +[2025-09-11 09:43:33] [Rank 0] step:2761/10000 train_time:137846ms step_avg:49.93ms +[2025-09-11 09:43:34] [Rank 0] step:2781/10000 train_time:138510ms step_avg:49.81ms +[2025-09-11 09:43:34] [Rank 0] step:2781/10000 train_time:138510ms step_avg:49.81ms +[2025-09-11 09:43:35] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:43:35] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:43:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:46] [Rank 0] PRINT: step:2800/10000 val_loss:4.8279 total_sharp:7.1700e-04 L1_sharp:8.9494e-04 L2_sharp:2.8935e-04 L3_sharp:1.1009e-04 L4_sharp:7.8244e-05 L5_sharp:9.3144e-05 L6_sharp:1.1219e-04 L7_sharp:1.0468e-04 L8_sharp:3.6389e-04 L9_sharp:2.5677e-04 L10_sharp:2.7801e-04 L11_sharp:5.2120e-04 L12_sharp:1.5126e-03 total_fnorm:4.0500e+01 total_l1_linf:1.1366e+05 total_spectral:2.0000e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.7266e+00 L2_l1linf:1.5859e+00 L3_l1linf:1.5781e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5781e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6136e-02 L2_spectral:7.4257e-02 L3_spectral:7.5131e-02 L4_spectral:7.4797e-02 L5_spectral:7.5071e-02 L6_spectral:7.4985e-02 L7_spectral:7.4914e-02 L8_spectral:7.5337e-02 L9_spectral:7.5593e-02 L10_spectral:7.5654e-02 L11_spectral:7.5009e-02 L12_spectral:7.4522e-02 train_time:139156ms step_avg:49.70ms +[2025-09-11 09:43:46] [Rank 0] PRINT: step:2800/10000 val_loss:4.8279 total_sharp:7.1700e-04 L1_sharp:8.9494e-04 L2_sharp:2.8935e-04 L3_sharp:1.1009e-04 L4_sharp:7.8244e-05 L5_sharp:9.3144e-05 L6_sharp:1.1219e-04 L7_sharp:1.0468e-04 L8_sharp:3.6389e-04 L9_sharp:2.5677e-04 L10_sharp:2.7801e-04 L11_sharp:5.2120e-04 L12_sharp:1.5126e-03 total_fnorm:4.0500e+01 total_l1_linf:1.1366e+05 total_spectral:2.0000e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1562e+00 L1_l1linf:1.7266e+00 L2_l1linf:1.5859e+00 L3_l1linf:1.5781e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5781e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6136e-02 L2_spectral:7.4257e-02 L3_spectral:7.5131e-02 L4_spectral:7.4797e-02 L5_spectral:7.5071e-02 L6_spectral:7.4985e-02 L7_spectral:7.4914e-02 L8_spectral:7.5337e-02 L9_spectral:7.5593e-02 L10_spectral:7.5654e-02 L11_spectral:7.5009e-02 L12_spectral:7.4522e-02 train_time:139156ms step_avg:49.70ms +[2025-09-11 09:43:47] [Rank 0] step:2801/10000 train_time:140882ms step_avg:50.30ms +[2025-09-11 09:43:47] [Rank 0] step:2801/10000 train_time:140882ms step_avg:50.30ms +[2025-09-11 09:43:48] [Rank 0] step:2821/10000 train_time:141567ms step_avg:50.18ms +[2025-09-11 09:43:48] [Rank 0] step:2821/10000 train_time:141567ms step_avg:50.18ms +[2025-09-11 09:43:49] [Rank 0] step:2841/10000 train_time:142233ms step_avg:50.06ms +[2025-09-11 09:43:49] [Rank 0] step:2841/10000 train_time:142233ms step_avg:50.06ms +[2025-09-11 09:43:50] [Rank 0] step:2861/10000 train_time:142899ms step_avg:49.95ms +[2025-09-11 09:43:50] [Rank 0] step:2861/10000 train_time:142899ms step_avg:49.95ms +[2025-09-11 09:43:50] [Rank 0] step:2881/10000 train_time:143564ms step_avg:49.83ms +[2025-09-11 09:43:50] [Rank 0] step:2881/10000 train_time:143564ms step_avg:49.83ms +[2025-09-11 09:43:51] [Rank 0] step:2901/10000 train_time:144229ms step_avg:49.72ms +[2025-09-11 09:43:51] [Rank 0] step:2901/10000 train_time:144229ms step_avg:49.72ms +[2025-09-11 09:43:52] [Rank 0] step:2921/10000 train_time:144894ms step_avg:49.60ms +[2025-09-11 09:43:52] [Rank 0] step:2921/10000 train_time:144894ms step_avg:49.60ms +[2025-09-11 09:43:52] [Rank 0] step:2941/10000 train_time:145560ms step_avg:49.49ms +[2025-09-11 09:43:52] [Rank 0] step:2941/10000 train_time:145560ms step_avg:49.49ms +[2025-09-11 09:43:53] [Rank 0] step:2961/10000 train_time:146225ms step_avg:49.38ms +[2025-09-11 09:43:53] [Rank 0] step:2961/10000 train_time:146225ms step_avg:49.38ms +[2025-09-11 09:43:54] [Rank 0] step:2981/10000 train_time:146894ms step_avg:49.28ms +[2025-09-11 09:43:54] [Rank 0] step:2981/10000 train_time:146894ms step_avg:49.28ms +[2025-09-11 09:43:54] [Rank 0] step:3001/10000 train_time:147561ms step_avg:49.17ms +[2025-09-11 09:43:54] [Rank 0] step:3001/10000 train_time:147561ms step_avg:49.17ms +[2025-09-11 09:43:55] [Rank 0] step:3021/10000 train_time:148231ms step_avg:49.07ms +[2025-09-11 09:43:55] [Rank 0] step:3021/10000 train_time:148231ms step_avg:49.07ms +[2025-09-11 09:43:56] [Rank 0] step:3041/10000 train_time:148899ms step_avg:48.96ms +[2025-09-11 09:43:56] [Rank 0] step:3041/10000 train_time:148899ms step_avg:48.96ms +[2025-09-11 09:43:56] [Rank 0] step:3061/10000 train_time:149567ms step_avg:48.86ms +[2025-09-11 09:43:56] [Rank 0] step:3061/10000 train_time:149567ms step_avg:48.86ms +[2025-09-11 09:43:57] [Rank 0] step:3081/10000 train_time:150235ms step_avg:48.76ms +[2025-09-11 09:43:57] [Rank 0] step:3081/10000 train_time:150235ms step_avg:48.76ms +[2025-09-11 09:43:58] [Rank 0] step:3101/10000 train_time:150904ms step_avg:48.66ms +[2025-09-11 09:43:58] [Rank 0] step:3101/10000 train_time:150904ms step_avg:48.66ms +[2025-09-11 09:43:58] [Rank 0] step:3121/10000 train_time:151572ms step_avg:48.57ms +[2025-09-11 09:43:58] [Rank 0] step:3121/10000 train_time:151572ms step_avg:48.57ms +[2025-09-11 09:43:59] [Rank 0] step:3141/10000 train_time:152238ms step_avg:48.47ms +[2025-09-11 09:43:59] [Rank 0] step:3141/10000 train_time:152238ms step_avg:48.47ms +[2025-09-11 09:44:00] [Rank 0] step:3161/10000 train_time:152905ms step_avg:48.37ms +[2025-09-11 09:44:00] [Rank 0] step:3161/10000 train_time:152905ms step_avg:48.37ms +[2025-09-11 09:44:00] [Rank 0] step:3181/10000 train_time:153574ms step_avg:48.28ms +[2025-09-11 09:44:00] [Rank 0] step:3181/10000 train_time:153574ms step_avg:48.28ms +[2025-09-11 09:44:01] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:44:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:44:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.7462 total_sharp:4.1722e-04 L1_sharp:7.8329e-04 L2_sharp:1.8957e-04 L3_sharp:1.2485e-04 L4_sharp:1.1501e-04 L5_sharp:1.5869e-04 L6_sharp:9.8721e-05 L7_sharp:1.0254e-04 L8_sharp:2.6550e-04 L9_sharp:2.0650e-04 L10_sharp:2.6103e-04 L11_sharp:4.2664e-04 L12_sharp:1.1353e-03 total_fnorm:4.5250e+01 total_l1_linf:1.2442e+05 total_spectral:2.2125e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.3125e+00 L4_fnorm:6.3125e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5625e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5859e+00 L7_l1linf:1.6250e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5234e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7059e-02 L2_spectral:7.4850e-02 L3_spectral:7.6250e-02 L4_spectral:7.5980e-02 L5_spectral:7.6240e-02 L6_spectral:7.6294e-02 L7_spectral:7.6105e-02 L8_spectral:7.6189e-02 L9_spectral:7.7113e-02 L10_spectral:7.7038e-02 L11_spectral:7.6570e-02 L12_spectral:7.5494e-02 train_time:154223ms step_avg:48.19ms +[2025-09-11 09:44:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.7462 total_sharp:4.1722e-04 L1_sharp:7.8329e-04 L2_sharp:1.8957e-04 L3_sharp:1.2485e-04 L4_sharp:1.1501e-04 L5_sharp:1.5869e-04 L6_sharp:9.8721e-05 L7_sharp:1.0254e-04 L8_sharp:2.6550e-04 L9_sharp:2.0650e-04 L10_sharp:2.6103e-04 L11_sharp:4.2664e-04 L12_sharp:1.1353e-03 total_fnorm:4.5250e+01 total_l1_linf:1.2442e+05 total_spectral:2.2125e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.3125e+00 L4_fnorm:6.3125e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5547e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5625e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5859e+00 L7_l1linf:1.6250e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.5234e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7059e-02 L2_spectral:7.4850e-02 L3_spectral:7.6250e-02 L4_spectral:7.5980e-02 L5_spectral:7.6240e-02 L6_spectral:7.6294e-02 L7_spectral:7.6105e-02 L8_spectral:7.6189e-02 L9_spectral:7.7113e-02 L10_spectral:7.7038e-02 L11_spectral:7.6570e-02 L12_spectral:7.5494e-02 train_time:154223ms step_avg:48.19ms +[2025-09-11 09:44:14] [Rank 0] step:3201/10000 train_time:156241ms step_avg:48.81ms +[2025-09-11 09:44:14] [Rank 0] step:3201/10000 train_time:156241ms step_avg:48.81ms +[2025-09-11 09:44:14] [Rank 0] step:3221/10000 train_time:156915ms step_avg:48.72ms +[2025-09-11 09:44:14] [Rank 0] step:3221/10000 train_time:156915ms step_avg:48.72ms +[2025-09-11 09:44:15] [Rank 0] step:3241/10000 train_time:157584ms step_avg:48.62ms +[2025-09-11 09:44:15] [Rank 0] step:3241/10000 train_time:157584ms step_avg:48.62ms +[2025-09-11 09:44:16] [Rank 0] step:3261/10000 train_time:158253ms step_avg:48.53ms +[2025-09-11 09:44:16] [Rank 0] step:3261/10000 train_time:158253ms step_avg:48.53ms +[2025-09-11 09:44:16] [Rank 0] step:3281/10000 train_time:158921ms step_avg:48.44ms +[2025-09-11 09:44:16] [Rank 0] step:3281/10000 train_time:158921ms step_avg:48.44ms +[2025-09-11 09:44:17] [Rank 0] step:3301/10000 train_time:159588ms step_avg:48.35ms +[2025-09-11 09:44:17] [Rank 0] step:3301/10000 train_time:159588ms step_avg:48.35ms +[2025-09-11 09:44:18] [Rank 0] step:3321/10000 train_time:160257ms step_avg:48.26ms +[2025-09-11 09:44:18] [Rank 0] step:3321/10000 train_time:160257ms step_avg:48.26ms +[2025-09-11 09:44:18] [Rank 0] step:3341/10000 train_time:160925ms step_avg:48.17ms +[2025-09-11 09:44:18] [Rank 0] step:3341/10000 train_time:160925ms step_avg:48.17ms +[2025-09-11 09:44:19] [Rank 0] step:3361/10000 train_time:161595ms step_avg:48.08ms +[2025-09-11 09:44:19] [Rank 0] step:3361/10000 train_time:161595ms step_avg:48.08ms +[2025-09-11 09:44:20] [Rank 0] step:3381/10000 train_time:162262ms step_avg:47.99ms +[2025-09-11 09:44:20] [Rank 0] step:3381/10000 train_time:162262ms step_avg:47.99ms +[2025-09-11 09:44:20] [Rank 0] step:3401/10000 train_time:162930ms step_avg:47.91ms +[2025-09-11 09:44:20] [Rank 0] step:3401/10000 train_time:162930ms step_avg:47.91ms +[2025-09-11 09:44:21] [Rank 0] step:3421/10000 train_time:163597ms step_avg:47.82ms +[2025-09-11 09:44:21] [Rank 0] step:3421/10000 train_time:163597ms step_avg:47.82ms +[2025-09-11 09:44:22] [Rank 0] step:3441/10000 train_time:164264ms step_avg:47.74ms +[2025-09-11 09:44:22] [Rank 0] step:3441/10000 train_time:164264ms step_avg:47.74ms +[2025-09-11 09:44:22] [Rank 0] step:3461/10000 train_time:164932ms step_avg:47.65ms +[2025-09-11 09:44:22] [Rank 0] step:3461/10000 train_time:164932ms step_avg:47.65ms +[2025-09-11 09:44:23] [Rank 0] step:3481/10000 train_time:165600ms step_avg:47.57ms +[2025-09-11 09:44:23] [Rank 0] step:3481/10000 train_time:165600ms step_avg:47.57ms +[2025-09-11 09:44:24] [Rank 0] step:3501/10000 train_time:166267ms step_avg:47.49ms +[2025-09-11 09:44:24] [Rank 0] step:3501/10000 train_time:166267ms step_avg:47.49ms +[2025-09-11 09:44:24] [Rank 0] step:3521/10000 train_time:166933ms step_avg:47.41ms +[2025-09-11 09:44:24] [Rank 0] step:3521/10000 train_time:166933ms step_avg:47.41ms +[2025-09-11 09:44:25] [Rank 0] step:3541/10000 train_time:167601ms step_avg:47.33ms +[2025-09-11 09:44:25] [Rank 0] step:3541/10000 train_time:167601ms step_avg:47.33ms +[2025-09-11 09:44:26] [Rank 0] step:3561/10000 train_time:168268ms step_avg:47.25ms +[2025-09-11 09:44:26] [Rank 0] step:3561/10000 train_time:168268ms step_avg:47.25ms +[2025-09-11 09:44:26] [Rank 0] step:3581/10000 train_time:168934ms step_avg:47.18ms +[2025-09-11 09:44:26] [Rank 0] step:3581/10000 train_time:168934ms step_avg:47.18ms +[2025-09-11 09:44:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:44:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:44:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:44:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:44:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:39] [Rank 0] PRINT: step:3600/10000 val_loss:4.6898 total_sharp:5.0791e-04 L1_sharp:6.0454e-04 L2_sharp:2.1334e-04 L3_sharp:1.1845e-04 L4_sharp:6.8441e-05 L5_sharp:6.3783e-05 L6_sharp:1.1856e-04 L7_sharp:1.0376e-04 L8_sharp:2.7782e-04 L9_sharp:2.2719e-04 L10_sharp:2.2298e-04 L11_sharp:3.5804e-04 L12_sharp:1.4750e-03 total_fnorm:4.0750e+01 total_l1_linf:1.1162e+05 total_spectral:2.0000e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2812e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.4922e+00 L12_l1linf:1.4922e+00 L1_spectral:7.8143e-02 L2_spectral:7.5151e-02 L3_spectral:7.6608e-02 L4_spectral:7.6543e-02 L5_spectral:7.6936e-02 L6_spectral:7.6632e-02 L7_spectral:7.7442e-02 L8_spectral:7.6462e-02 L9_spectral:7.7874e-02 L10_spectral:7.7174e-02 L11_spectral:7.6936e-02 L12_spectral:7.5944e-02 train_time:169583ms step_avg:47.11ms +[2025-09-11 09:44:39] [Rank 0] PRINT: step:3600/10000 val_loss:4.6898 total_sharp:5.0791e-04 L1_sharp:6.0454e-04 L2_sharp:2.1334e-04 L3_sharp:1.1845e-04 L4_sharp:6.8441e-05 L5_sharp:6.3783e-05 L6_sharp:1.1856e-04 L7_sharp:1.0376e-04 L8_sharp:2.7782e-04 L9_sharp:2.2719e-04 L10_sharp:2.2298e-04 L11_sharp:3.5804e-04 L12_sharp:1.4750e-03 total_fnorm:4.0750e+01 total_l1_linf:1.1162e+05 total_spectral:2.0000e+01 L1_fnorm:6.2812e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2500e+00 L4_fnorm:6.2812e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.4922e+00 L12_l1linf:1.4922e+00 L1_spectral:7.8143e-02 L2_spectral:7.5151e-02 L3_spectral:7.6608e-02 L4_spectral:7.6543e-02 L5_spectral:7.6936e-02 L6_spectral:7.6632e-02 L7_spectral:7.7442e-02 L8_spectral:7.6462e-02 L9_spectral:7.7874e-02 L10_spectral:7.7174e-02 L11_spectral:7.6936e-02 L12_spectral:7.5944e-02 train_time:169583ms step_avg:47.11ms +[2025-09-11 09:44:42] [Rank 0] step:3601/10000 train_time:172061ms step_avg:47.78ms +[2025-09-11 09:44:42] [Rank 0] step:3601/10000 train_time:172061ms step_avg:47.78ms +[2025-09-11 09:44:42] [Rank 0] step:3621/10000 train_time:172795ms step_avg:47.72ms +[2025-09-11 09:44:42] [Rank 0] step:3621/10000 train_time:172795ms step_avg:47.72ms +[2025-09-11 09:44:43] [Rank 0] step:3641/10000 train_time:173462ms step_avg:47.64ms +[2025-09-11 09:44:43] [Rank 0] step:3641/10000 train_time:173462ms step_avg:47.64ms +[2025-09-11 09:44:44] [Rank 0] step:3661/10000 train_time:174129ms step_avg:47.56ms +[2025-09-11 09:44:44] [Rank 0] step:3661/10000 train_time:174129ms step_avg:47.56ms +[2025-09-11 09:44:44] [Rank 0] step:3681/10000 train_time:174795ms step_avg:47.49ms +[2025-09-11 09:44:44] [Rank 0] step:3681/10000 train_time:174795ms step_avg:47.49ms +[2025-09-11 09:44:45] [Rank 0] step:3701/10000 train_time:175462ms step_avg:47.41ms +[2025-09-11 09:44:45] [Rank 0] step:3701/10000 train_time:175462ms step_avg:47.41ms +[2025-09-11 09:44:46] [Rank 0] step:3721/10000 train_time:176138ms step_avg:47.34ms +[2025-09-11 09:44:46] [Rank 0] step:3721/10000 train_time:176138ms step_avg:47.34ms +[2025-09-11 09:44:46] [Rank 0] step:3741/10000 train_time:176816ms step_avg:47.26ms +[2025-09-11 09:44:46] [Rank 0] step:3741/10000 train_time:176816ms step_avg:47.26ms +[2025-09-11 09:44:47] [Rank 0] step:3761/10000 train_time:177494ms step_avg:47.19ms +[2025-09-11 09:44:47] [Rank 0] step:3761/10000 train_time:177494ms step_avg:47.19ms +[2025-09-11 09:44:48] [Rank 0] step:3781/10000 train_time:178171ms step_avg:47.12ms +[2025-09-11 09:44:48] [Rank 0] step:3781/10000 train_time:178171ms step_avg:47.12ms +[2025-09-11 09:44:48] [Rank 0] step:3801/10000 train_time:178849ms step_avg:47.05ms +[2025-09-11 09:44:48] [Rank 0] step:3801/10000 train_time:178849ms step_avg:47.05ms +[2025-09-11 09:44:49] [Rank 0] step:3821/10000 train_time:179527ms step_avg:46.98ms +[2025-09-11 09:44:49] [Rank 0] step:3821/10000 train_time:179527ms step_avg:46.98ms +[2025-09-11 09:44:50] [Rank 0] step:3841/10000 train_time:180205ms step_avg:46.92ms +[2025-09-11 09:44:50] [Rank 0] step:3841/10000 train_time:180205ms step_avg:46.92ms +[2025-09-11 09:44:50] [Rank 0] step:3861/10000 train_time:180882ms step_avg:46.85ms +[2025-09-11 09:44:50] [Rank 0] step:3861/10000 train_time:180882ms step_avg:46.85ms +[2025-09-11 09:44:51] [Rank 0] step:3881/10000 train_time:181559ms step_avg:46.78ms +[2025-09-11 09:44:51] [Rank 0] step:3881/10000 train_time:181559ms step_avg:46.78ms +[2025-09-11 09:44:52] [Rank 0] step:3901/10000 train_time:182236ms step_avg:46.72ms +[2025-09-11 09:44:52] [Rank 0] step:3901/10000 train_time:182236ms step_avg:46.72ms +[2025-09-11 09:44:52] [Rank 0] step:3921/10000 train_time:182914ms step_avg:46.65ms +[2025-09-11 09:44:52] [Rank 0] step:3921/10000 train_time:182914ms step_avg:46.65ms +[2025-09-11 09:44:53] [Rank 0] step:3941/10000 train_time:183591ms step_avg:46.58ms +[2025-09-11 09:44:53] [Rank 0] step:3941/10000 train_time:183591ms step_avg:46.58ms +[2025-09-11 09:44:54] [Rank 0] step:3961/10000 train_time:184268ms step_avg:46.52ms +[2025-09-11 09:44:54] [Rank 0] step:3961/10000 train_time:184268ms step_avg:46.52ms +[2025-09-11 09:44:54] [Rank 0] step:3981/10000 train_time:184946ms step_avg:46.46ms +[2025-09-11 09:44:54] [Rank 0] step:3981/10000 train_time:184946ms step_avg:46.46ms +[2025-09-11 09:44:55] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:45:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:45:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:45:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:05] [Rank 0] PRINT: step:4000/10000 val_loss:4.6386 total_sharp:5.8277e-04 L1_sharp:4.8955e-04 L2_sharp:7.4198e-05 L3_sharp:-2.7217e-07 L4_sharp:7.3484e-05 L5_sharp:9.3878e-05 L6_sharp:8.5994e-05 L7_sharp:9.7215e-05 L8_sharp:2.8998e-04 L9_sharp:2.7191e-04 L10_sharp:2.5930e-04 L11_sharp:4.8258e-04 L12_sharp:3.6276e-03 total_fnorm:4.4750e+01 total_l1_linf:1.1981e+05 total_spectral:2.2125e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.6016e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4922e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.4297e+00 L1_spectral:7.7812e-02 L2_spectral:7.5682e-02 L3_spectral:7.7256e-02 L4_spectral:7.7069e-02 L5_spectral:7.6981e-02 L6_spectral:7.7965e-02 L7_spectral:7.6935e-02 L8_spectral:7.6153e-02 L9_spectral:7.7946e-02 L10_spectral:7.7552e-02 L11_spectral:7.6635e-02 L12_spectral:7.6902e-02 train_time:185605ms step_avg:46.40ms +[2025-09-11 09:45:05] [Rank 0] PRINT: step:4000/10000 val_loss:4.6386 total_sharp:5.8277e-04 L1_sharp:4.8955e-04 L2_sharp:7.4198e-05 L3_sharp:-2.7217e-07 L4_sharp:7.3484e-05 L5_sharp:9.3878e-05 L6_sharp:8.5994e-05 L7_sharp:9.7215e-05 L8_sharp:2.8998e-04 L9_sharp:2.7191e-04 L10_sharp:2.5930e-04 L11_sharp:4.8258e-04 L12_sharp:3.6276e-03 total_fnorm:4.4750e+01 total_l1_linf:1.1981e+05 total_spectral:2.2125e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5234e+00 L3_l1linf:1.4844e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.6016e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4922e+00 L11_l1linf:1.5078e+00 L12_l1linf:1.4297e+00 L1_spectral:7.7812e-02 L2_spectral:7.5682e-02 L3_spectral:7.7256e-02 L4_spectral:7.7069e-02 L5_spectral:7.6981e-02 L6_spectral:7.7965e-02 L7_spectral:7.6935e-02 L8_spectral:7.6153e-02 L9_spectral:7.7946e-02 L10_spectral:7.7552e-02 L11_spectral:7.6635e-02 L12_spectral:7.6902e-02 train_time:185605ms step_avg:46.40ms +[2025-09-11 09:45:07] [Rank 0] step:4001/10000 train_time:187216ms step_avg:46.79ms +[2025-09-11 09:45:07] [Rank 0] step:4001/10000 train_time:187216ms step_avg:46.79ms +[2025-09-11 09:45:08] [Rank 0] step:4021/10000 train_time:187982ms step_avg:46.75ms +[2025-09-11 09:45:08] [Rank 0] step:4021/10000 train_time:187982ms step_avg:46.75ms +[2025-09-11 09:45:08] [Rank 0] step:4041/10000 train_time:188662ms step_avg:46.69ms +[2025-09-11 09:45:08] [Rank 0] step:4041/10000 train_time:188662ms step_avg:46.69ms +[2025-09-11 09:45:09] [Rank 0] step:4061/10000 train_time:189635ms step_avg:46.70ms +[2025-09-11 09:45:09] [Rank 0] step:4061/10000 train_time:189635ms step_avg:46.70ms +[2025-09-11 09:45:10] [Rank 0] step:4081/10000 train_time:190314ms step_avg:46.63ms +[2025-09-11 09:45:10] [Rank 0] step:4081/10000 train_time:190314ms step_avg:46.63ms +[2025-09-11 09:45:11] [Rank 0] step:4101/10000 train_time:190992ms step_avg:46.57ms +[2025-09-11 09:45:11] [Rank 0] step:4101/10000 train_time:190992ms step_avg:46.57ms +[2025-09-11 09:45:11] [Rank 0] step:4121/10000 train_time:191671ms step_avg:46.51ms +[2025-09-11 09:45:11] [Rank 0] step:4121/10000 train_time:191671ms step_avg:46.51ms +[2025-09-11 09:45:12] [Rank 0] step:4141/10000 train_time:192348ms step_avg:46.45ms +[2025-09-11 09:45:12] [Rank 0] step:4141/10000 train_time:192348ms step_avg:46.45ms +[2025-09-11 09:45:13] [Rank 0] step:4161/10000 train_time:193026ms step_avg:46.39ms +[2025-09-11 09:45:13] [Rank 0] step:4161/10000 train_time:193026ms step_avg:46.39ms +[2025-09-11 09:45:14] [Rank 0] step:4181/10000 train_time:193704ms step_avg:46.33ms +[2025-09-11 09:45:14] [Rank 0] step:4181/10000 train_time:193704ms step_avg:46.33ms +[2025-09-11 09:45:14] [Rank 0] step:4201/10000 train_time:194382ms step_avg:46.27ms +[2025-09-11 09:45:14] [Rank 0] step:4201/10000 train_time:194382ms step_avg:46.27ms +[2025-09-11 09:45:15] [Rank 0] step:4221/10000 train_time:195060ms step_avg:46.21ms +[2025-09-11 09:45:15] [Rank 0] step:4221/10000 train_time:195060ms step_avg:46.21ms +[2025-09-11 09:45:16] [Rank 0] step:4241/10000 train_time:195739ms step_avg:46.15ms +[2025-09-11 09:45:16] [Rank 0] step:4241/10000 train_time:195739ms step_avg:46.15ms +[2025-09-11 09:45:16] [Rank 0] step:4261/10000 train_time:196417ms step_avg:46.10ms +[2025-09-11 09:45:16] [Rank 0] step:4261/10000 train_time:196417ms step_avg:46.10ms +[2025-09-11 09:45:17] [Rank 0] step:4281/10000 train_time:197098ms step_avg:46.04ms +[2025-09-11 09:45:17] [Rank 0] step:4281/10000 train_time:197098ms step_avg:46.04ms +[2025-09-11 09:45:18] [Rank 0] step:4301/10000 train_time:197777ms step_avg:45.98ms +[2025-09-11 09:45:18] [Rank 0] step:4301/10000 train_time:197777ms step_avg:45.98ms +[2025-09-11 09:45:18] [Rank 0] step:4321/10000 train_time:198454ms step_avg:45.93ms +[2025-09-11 09:45:18] [Rank 0] step:4321/10000 train_time:198454ms step_avg:45.93ms +[2025-09-11 09:45:19] [Rank 0] step:4341/10000 train_time:199132ms step_avg:45.87ms +[2025-09-11 09:45:19] [Rank 0] step:4341/10000 train_time:199132ms step_avg:45.87ms +[2025-09-11 09:45:20] [Rank 0] step:4361/10000 train_time:199809ms step_avg:45.82ms +[2025-09-11 09:45:20] [Rank 0] step:4361/10000 train_time:199809ms step_avg:45.82ms +[2025-09-11 09:45:20] [Rank 0] step:4381/10000 train_time:200487ms step_avg:45.76ms +[2025-09-11 09:45:20] [Rank 0] step:4381/10000 train_time:200487ms step_avg:45.76ms +[2025-09-11 09:45:21] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:45:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:45:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:45:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:45:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:45:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:45:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:45:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:45:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:32] [Rank 0] PRINT: step:4400/10000 val_loss:4.6038 total_sharp:3.6791e-04 L1_sharp:3.4757e-04 L2_sharp:1.1892e-04 L3_sharp:4.3330e-05 L4_sharp:6.5576e-05 L5_sharp:6.5259e-05 L6_sharp:8.9747e-05 L7_sharp:1.3744e-04 L8_sharp:1.8876e-04 L9_sharp:2.1706e-04 L10_sharp:2.5151e-04 L11_sharp:3.5826e-04 L12_sharp:1.2415e-03 total_fnorm:4.2250e+01 total_l1_linf:1.1213e+05 total_spectral:2.0875e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8610e-02 L2_spectral:7.5772e-02 L3_spectral:7.7284e-02 L4_spectral:7.7557e-02 L5_spectral:7.7248e-02 L6_spectral:7.7746e-02 L7_spectral:7.7664e-02 L8_spectral:7.6411e-02 L9_spectral:7.8269e-02 L10_spectral:7.8051e-02 L11_spectral:7.7442e-02 L12_spectral:7.7104e-02 train_time:201147ms step_avg:45.72ms +[2025-09-11 09:45:32] [Rank 0] PRINT: step:4400/10000 val_loss:4.6038 total_sharp:3.6791e-04 L1_sharp:3.4757e-04 L2_sharp:1.1892e-04 L3_sharp:4.3330e-05 L4_sharp:6.5576e-05 L5_sharp:6.5259e-05 L6_sharp:8.9747e-05 L7_sharp:1.3744e-04 L8_sharp:1.8876e-04 L9_sharp:2.1706e-04 L10_sharp:2.5151e-04 L11_sharp:3.5826e-04 L12_sharp:1.2415e-03 total_fnorm:4.2250e+01 total_l1_linf:1.1213e+05 total_spectral:2.0875e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8610e-02 L2_spectral:7.5772e-02 L3_spectral:7.7284e-02 L4_spectral:7.7557e-02 L5_spectral:7.7248e-02 L6_spectral:7.7746e-02 L7_spectral:7.7664e-02 L8_spectral:7.6411e-02 L9_spectral:7.8269e-02 L10_spectral:7.8051e-02 L11_spectral:7.7442e-02 L12_spectral:7.7104e-02 train_time:201147ms step_avg:45.72ms +[2025-09-11 09:45:33] [Rank 0] step:4401/10000 train_time:202909ms step_avg:46.11ms +[2025-09-11 09:45:33] [Rank 0] step:4401/10000 train_time:202909ms step_avg:46.11ms +[2025-09-11 09:45:34] [Rank 0] step:4421/10000 train_time:203592ms step_avg:46.05ms +[2025-09-11 09:45:34] [Rank 0] step:4421/10000 train_time:203592ms step_avg:46.05ms +[2025-09-11 09:45:35] [Rank 0] step:4441/10000 train_time:204271ms step_avg:46.00ms +[2025-09-11 09:45:35] [Rank 0] step:4441/10000 train_time:204271ms step_avg:46.00ms +[2025-09-11 09:45:35] [Rank 0] step:4461/10000 train_time:204950ms step_avg:45.94ms +[2025-09-11 09:45:35] [Rank 0] step:4461/10000 train_time:204950ms step_avg:45.94ms +[2025-09-11 09:45:36] [Rank 0] step:4481/10000 train_time:205629ms step_avg:45.89ms +[2025-09-11 09:45:36] [Rank 0] step:4481/10000 train_time:205629ms step_avg:45.89ms +[2025-09-11 09:45:37] [Rank 0] step:4501/10000 train_time:206310ms step_avg:45.84ms +[2025-09-11 09:45:37] [Rank 0] step:4501/10000 train_time:206310ms step_avg:45.84ms +[2025-09-11 09:45:37] [Rank 0] step:4521/10000 train_time:206990ms step_avg:45.78ms +[2025-09-11 09:45:37] [Rank 0] step:4521/10000 train_time:206990ms step_avg:45.78ms +[2025-09-11 09:45:38] [Rank 0] step:4541/10000 train_time:207669ms step_avg:45.73ms +[2025-09-11 09:45:38] [Rank 0] step:4541/10000 train_time:207669ms step_avg:45.73ms +[2025-09-11 09:45:39] [Rank 0] step:4561/10000 train_time:208349ms step_avg:45.68ms +[2025-09-11 09:45:39] [Rank 0] step:4561/10000 train_time:208349ms step_avg:45.68ms +[2025-09-11 09:45:39] [Rank 0] step:4581/10000 train_time:209028ms step_avg:45.63ms +[2025-09-11 09:45:39] [Rank 0] step:4581/10000 train_time:209028ms step_avg:45.63ms +[2025-09-11 09:45:40] [Rank 0] step:4601/10000 train_time:209708ms step_avg:45.58ms +[2025-09-11 09:45:40] [Rank 0] step:4601/10000 train_time:209708ms step_avg:45.58ms +[2025-09-11 09:45:41] [Rank 0] step:4621/10000 train_time:210388ms step_avg:45.53ms +[2025-09-11 09:45:41] [Rank 0] step:4621/10000 train_time:210388ms step_avg:45.53ms +[2025-09-11 09:45:42] [Rank 0] step:4641/10000 train_time:211070ms step_avg:45.48ms +[2025-09-11 09:45:42] [Rank 0] step:4641/10000 train_time:211070ms step_avg:45.48ms +[2025-09-11 09:45:42] [Rank 0] step:4661/10000 train_time:211750ms step_avg:45.43ms +[2025-09-11 09:45:42] [Rank 0] step:4661/10000 train_time:211750ms step_avg:45.43ms +[2025-09-11 09:45:43] [Rank 0] step:4681/10000 train_time:212428ms step_avg:45.38ms +[2025-09-11 09:45:43] [Rank 0] step:4681/10000 train_time:212428ms step_avg:45.38ms +[2025-09-11 09:45:44] [Rank 0] step:4701/10000 train_time:213108ms step_avg:45.33ms +[2025-09-11 09:45:44] [Rank 0] step:4701/10000 train_time:213108ms step_avg:45.33ms +[2025-09-11 09:45:44] [Rank 0] step:4721/10000 train_time:213788ms step_avg:45.28ms +[2025-09-11 09:45:44] [Rank 0] step:4721/10000 train_time:213788ms step_avg:45.28ms +[2025-09-11 09:45:45] [Rank 0] step:4741/10000 train_time:214468ms step_avg:45.24ms +[2025-09-11 09:45:45] [Rank 0] step:4741/10000 train_time:214468ms step_avg:45.24ms +[2025-09-11 09:45:46] [Rank 0] step:4761/10000 train_time:215148ms step_avg:45.19ms +[2025-09-11 09:45:46] [Rank 0] step:4761/10000 train_time:215148ms step_avg:45.19ms +[2025-09-11 09:45:46] [Rank 0] step:4781/10000 train_time:215827ms step_avg:45.14ms +[2025-09-11 09:45:46] [Rank 0] step:4781/10000 train_time:215827ms step_avg:45.14ms +[2025-09-11 09:45:47] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:45:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:45:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:45:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:45:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:45:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:45:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:45:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:45:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:57] [Rank 0] PRINT: step:4800/10000 val_loss:4.5517 total_sharp:4.3170e-04 L1_sharp:4.3171e-04 L2_sharp:1.2799e-04 L3_sharp:7.4119e-05 L4_sharp:5.3986e-05 L5_sharp:4.8620e-05 L6_sharp:1.2949e-04 L7_sharp:8.3654e-05 L8_sharp:2.0323e-04 L9_sharp:2.1091e-04 L10_sharp:2.2569e-04 L11_sharp:3.9134e-04 L12_sharp:1.7500e-03 total_fnorm:4.2250e+01 total_l1_linf:1.1315e+05 total_spectral:2.1000e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.4766e+00 L3_l1linf:1.4688e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5547e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4609e+00 L10_l1linf:1.4375e+00 L11_l1linf:1.4375e+00 L12_l1linf:1.4453e+00 L1_spectral:7.8971e-02 L2_spectral:7.5871e-02 L3_spectral:7.8391e-02 L4_spectral:7.7950e-02 L5_spectral:7.8497e-02 L6_spectral:7.7909e-02 L7_spectral:7.7990e-02 L8_spectral:7.6994e-02 L9_spectral:7.8539e-02 L10_spectral:7.8303e-02 L11_spectral:7.7950e-02 L12_spectral:7.8056e-02 train_time:216487ms step_avg:45.10ms +[2025-09-11 09:45:57] [Rank 0] PRINT: step:4800/10000 val_loss:4.5517 total_sharp:4.3170e-04 L1_sharp:4.3171e-04 L2_sharp:1.2799e-04 L3_sharp:7.4119e-05 L4_sharp:5.3986e-05 L5_sharp:4.8620e-05 L6_sharp:1.2949e-04 L7_sharp:8.3654e-05 L8_sharp:2.0323e-04 L9_sharp:2.1091e-04 L10_sharp:2.2569e-04 L11_sharp:3.9134e-04 L12_sharp:1.7500e-03 total_fnorm:4.2250e+01 total_l1_linf:1.1315e+05 total_spectral:2.1000e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.4766e+00 L3_l1linf:1.4688e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5547e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4609e+00 L10_l1linf:1.4375e+00 L11_l1linf:1.4375e+00 L12_l1linf:1.4453e+00 L1_spectral:7.8971e-02 L2_spectral:7.5871e-02 L3_spectral:7.8391e-02 L4_spectral:7.7950e-02 L5_spectral:7.8497e-02 L6_spectral:7.7909e-02 L7_spectral:7.7990e-02 L8_spectral:7.6994e-02 L9_spectral:7.8539e-02 L10_spectral:7.8303e-02 L11_spectral:7.7950e-02 L12_spectral:7.8056e-02 train_time:216487ms step_avg:45.10ms +[2025-09-11 09:45:59] [Rank 0] step:4801/10000 train_time:218422ms step_avg:45.50ms +[2025-09-11 09:45:59] [Rank 0] step:4801/10000 train_time:218422ms step_avg:45.50ms +[2025-09-11 09:46:00] [Rank 0] step:4821/10000 train_time:219140ms step_avg:45.46ms +[2025-09-11 09:46:00] [Rank 0] step:4821/10000 train_time:219140ms step_avg:45.46ms +[2025-09-11 09:46:01] [Rank 0] step:4841/10000 train_time:219822ms step_avg:45.41ms +[2025-09-11 09:46:01] [Rank 0] step:4841/10000 train_time:219822ms step_avg:45.41ms +[2025-09-11 09:46:01] [Rank 0] step:4861/10000 train_time:220503ms step_avg:45.36ms +[2025-09-11 09:46:01] [Rank 0] step:4861/10000 train_time:220503ms step_avg:45.36ms +[2025-09-11 09:46:02] [Rank 0] step:4881/10000 train_time:221185ms step_avg:45.32ms +[2025-09-11 09:46:02] [Rank 0] step:4881/10000 train_time:221185ms step_avg:45.32ms +[2025-09-11 09:46:03] [Rank 0] step:4901/10000 train_time:221868ms step_avg:45.27ms +[2025-09-11 09:46:03] [Rank 0] step:4901/10000 train_time:221868ms step_avg:45.27ms +[2025-09-11 09:46:03] [Rank 0] step:4921/10000 train_time:222552ms step_avg:45.23ms +[2025-09-11 09:46:03] [Rank 0] step:4921/10000 train_time:222552ms step_avg:45.23ms +[2025-09-11 09:46:04] [Rank 0] step:4941/10000 train_time:223234ms step_avg:45.18ms +[2025-09-11 09:46:04] [Rank 0] step:4941/10000 train_time:223234ms step_avg:45.18ms +[2025-09-11 09:46:05] [Rank 0] step:4961/10000 train_time:223917ms step_avg:45.14ms +[2025-09-11 09:46:05] [Rank 0] step:4961/10000 train_time:223917ms step_avg:45.14ms +[2025-09-11 09:46:05] [Rank 0] step:4981/10000 train_time:224597ms step_avg:45.09ms +[2025-09-11 09:46:05] [Rank 0] step:4981/10000 train_time:224597ms step_avg:45.09ms +[2025-09-11 09:46:06] [Rank 0] step:5001/10000 train_time:225280ms step_avg:45.05ms +[2025-09-11 09:46:06] [Rank 0] step:5001/10000 train_time:225280ms step_avg:45.05ms +[2025-09-11 09:46:07] [Rank 0] step:5021/10000 train_time:225960ms step_avg:45.00ms +[2025-09-11 09:46:07] [Rank 0] step:5021/10000 train_time:225960ms step_avg:45.00ms +[2025-09-11 09:46:07] [Rank 0] step:5041/10000 train_time:226640ms step_avg:44.96ms +[2025-09-11 09:46:07] [Rank 0] step:5041/10000 train_time:226640ms step_avg:44.96ms +[2025-09-11 09:46:08] [Rank 0] step:5061/10000 train_time:227321ms step_avg:44.92ms +[2025-09-11 09:46:08] [Rank 0] step:5061/10000 train_time:227321ms step_avg:44.92ms +[2025-09-11 09:46:09] [Rank 0] step:5081/10000 train_time:228002ms step_avg:44.87ms +[2025-09-11 09:46:09] [Rank 0] step:5081/10000 train_time:228002ms step_avg:44.87ms +[2025-09-11 09:46:10] [Rank 0] step:5101/10000 train_time:228829ms step_avg:44.86ms +[2025-09-11 09:46:10] [Rank 0] step:5101/10000 train_time:228829ms step_avg:44.86ms +[2025-09-11 09:46:11] [Rank 0] step:5121/10000 train_time:229891ms step_avg:44.89ms +[2025-09-11 09:46:11] [Rank 0] step:5121/10000 train_time:229891ms step_avg:44.89ms +[2025-09-11 09:46:11] [Rank 0] step:5141/10000 train_time:230572ms step_avg:44.85ms +[2025-09-11 09:46:11] [Rank 0] step:5141/10000 train_time:230572ms step_avg:44.85ms +[2025-09-11 09:46:12] [Rank 0] step:5161/10000 train_time:231418ms step_avg:44.84ms +[2025-09-11 09:46:12] [Rank 0] step:5161/10000 train_time:231418ms step_avg:44.84ms +[2025-09-11 09:46:13] [Rank 0] step:5181/10000 train_time:232245ms step_avg:44.83ms +[2025-09-11 09:46:13] [Rank 0] step:5181/10000 train_time:232245ms step_avg:44.83ms +[2025-09-11 09:46:14] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:46:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:46:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:46:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:24] [Rank 0] PRINT: step:5200/10000 val_loss:4.5165 total_sharp:5.4485e-04 L1_sharp:4.1373e-04 L2_sharp:1.3546e-04 L3_sharp:1.2857e-04 L4_sharp:5.1968e-05 L5_sharp:6.7219e-05 L6_sharp:1.1407e-04 L7_sharp:1.0782e-04 L8_sharp:1.9526e-04 L9_sharp:2.1622e-04 L10_sharp:2.2236e-04 L11_sharp:3.7389e-04 L12_sharp:3.0620e-03 total_fnorm:3.9750e+01 total_l1_linf:1.0445e+05 total_spectral:1.9625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.4531e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.5938e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.4531e+00 L1_spectral:7.9417e-02 L2_spectral:7.6658e-02 L3_spectral:7.8022e-02 L4_spectral:7.9003e-02 L5_spectral:7.8713e-02 L6_spectral:7.8442e-02 L7_spectral:7.8304e-02 L8_spectral:7.7315e-02 L9_spectral:7.8598e-02 L10_spectral:7.8347e-02 L11_spectral:7.8518e-02 L12_spectral:7.7944e-02 train_time:232913ms step_avg:44.79ms +[2025-09-11 09:46:24] [Rank 0] PRINT: step:5200/10000 val_loss:4.5165 total_sharp:5.4485e-04 L1_sharp:4.1373e-04 L2_sharp:1.3546e-04 L3_sharp:1.2857e-04 L4_sharp:5.1968e-05 L5_sharp:6.7219e-05 L6_sharp:1.1407e-04 L7_sharp:1.0782e-04 L8_sharp:1.9526e-04 L9_sharp:2.1622e-04 L10_sharp:2.2236e-04 L11_sharp:3.7389e-04 L12_sharp:3.0620e-03 total_fnorm:3.9750e+01 total_l1_linf:1.0445e+05 total_spectral:1.9625e+01 L1_fnorm:6.2500e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.4531e+00 L4_l1linf:1.5469e+00 L5_l1linf:1.5938e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.4531e+00 L1_spectral:7.9417e-02 L2_spectral:7.6658e-02 L3_spectral:7.8022e-02 L4_spectral:7.9003e-02 L5_spectral:7.8713e-02 L6_spectral:7.8442e-02 L7_spectral:7.8304e-02 L8_spectral:7.7315e-02 L9_spectral:7.8598e-02 L10_spectral:7.8347e-02 L11_spectral:7.8518e-02 L12_spectral:7.7944e-02 train_time:232913ms step_avg:44.79ms +[2025-09-11 09:46:26] [Rank 0] step:5201/10000 train_time:234650ms step_avg:45.12ms +[2025-09-11 09:46:26] [Rank 0] step:5201/10000 train_time:234650ms step_avg:45.12ms +[2025-09-11 09:46:27] [Rank 0] step:5221/10000 train_time:235366ms step_avg:45.08ms +[2025-09-11 09:46:27] [Rank 0] step:5221/10000 train_time:235366ms step_avg:45.08ms +[2025-09-11 09:46:27] [Rank 0] step:5241/10000 train_time:236057ms step_avg:45.04ms +[2025-09-11 09:46:27] [Rank 0] step:5241/10000 train_time:236057ms step_avg:45.04ms +[2025-09-11 09:46:28] [Rank 0] step:5261/10000 train_time:236747ms step_avg:45.00ms +[2025-09-11 09:46:28] [Rank 0] step:5261/10000 train_time:236747ms step_avg:45.00ms +[2025-09-11 09:46:29] [Rank 0] step:5281/10000 train_time:237438ms step_avg:44.96ms +[2025-09-11 09:46:29] [Rank 0] step:5281/10000 train_time:237438ms step_avg:44.96ms +[2025-09-11 09:46:29] [Rank 0] step:5301/10000 train_time:238128ms step_avg:44.92ms +[2025-09-11 09:46:29] [Rank 0] step:5301/10000 train_time:238128ms step_avg:44.92ms +[2025-09-11 09:46:30] [Rank 0] step:5321/10000 train_time:238817ms step_avg:44.88ms +[2025-09-11 09:46:30] [Rank 0] step:5321/10000 train_time:238817ms step_avg:44.88ms +[2025-09-11 09:46:31] [Rank 0] step:5341/10000 train_time:239507ms step_avg:44.84ms +[2025-09-11 09:46:31] [Rank 0] step:5341/10000 train_time:239507ms step_avg:44.84ms +[2025-09-11 09:46:31] [Rank 0] step:5361/10000 train_time:240197ms step_avg:44.80ms +[2025-09-11 09:46:31] [Rank 0] step:5361/10000 train_time:240197ms step_avg:44.80ms +[2025-09-11 09:46:32] [Rank 0] step:5381/10000 train_time:240888ms step_avg:44.77ms +[2025-09-11 09:46:32] [Rank 0] step:5381/10000 train_time:240888ms step_avg:44.77ms +[2025-09-11 09:46:33] [Rank 0] step:5401/10000 train_time:241576ms step_avg:44.73ms +[2025-09-11 09:46:33] [Rank 0] step:5401/10000 train_time:241576ms step_avg:44.73ms +[2025-09-11 09:46:33] [Rank 0] step:5421/10000 train_time:242267ms step_avg:44.69ms +[2025-09-11 09:46:33] [Rank 0] step:5421/10000 train_time:242267ms step_avg:44.69ms +[2025-09-11 09:46:34] [Rank 0] step:5441/10000 train_time:242956ms step_avg:44.65ms +[2025-09-11 09:46:34] [Rank 0] step:5441/10000 train_time:242956ms step_avg:44.65ms +[2025-09-11 09:46:35] [Rank 0] step:5461/10000 train_time:243646ms step_avg:44.62ms +[2025-09-11 09:46:35] [Rank 0] step:5461/10000 train_time:243646ms step_avg:44.62ms +[2025-09-11 09:46:36] [Rank 0] step:5481/10000 train_time:244336ms step_avg:44.58ms +[2025-09-11 09:46:36] [Rank 0] step:5481/10000 train_time:244336ms step_avg:44.58ms +[2025-09-11 09:46:36] [Rank 0] step:5501/10000 train_time:245025ms step_avg:44.54ms +[2025-09-11 09:46:36] [Rank 0] step:5501/10000 train_time:245025ms step_avg:44.54ms +[2025-09-11 09:46:37] [Rank 0] step:5521/10000 train_time:245716ms step_avg:44.51ms +[2025-09-11 09:46:37] [Rank 0] step:5521/10000 train_time:245716ms step_avg:44.51ms +[2025-09-11 09:46:38] [Rank 0] step:5541/10000 train_time:246407ms step_avg:44.47ms +[2025-09-11 09:46:38] [Rank 0] step:5541/10000 train_time:246407ms step_avg:44.47ms +[2025-09-11 09:46:38] [Rank 0] step:5561/10000 train_time:247098ms step_avg:44.43ms +[2025-09-11 09:46:38] [Rank 0] step:5561/10000 train_time:247098ms step_avg:44.43ms +[2025-09-11 09:46:39] [Rank 0] step:5581/10000 train_time:247788ms step_avg:44.40ms +[2025-09-11 09:46:39] [Rank 0] step:5581/10000 train_time:247788ms step_avg:44.40ms +[2025-09-11 09:46:40] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:46:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:46:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:46:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:46:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:46:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:50] [Rank 0] PRINT: step:5600/10000 val_loss:4.4948 total_sharp:3.5245e-04 L1_sharp:3.3266e-04 L2_sharp:1.2916e-04 L3_sharp:3.5986e-05 L4_sharp:4.7386e-05 L5_sharp:7.6267e-05 L6_sharp:4.5721e-05 L7_sharp:3.8911e-05 L8_sharp:1.8063e-04 L9_sharp:1.8365e-04 L10_sharp:2.0168e-04 L11_sharp:3.2481e-04 L12_sharp:1.1312e-03 total_fnorm:4.0250e+01 total_l1_linf:1.0547e+05 total_spectral:1.9875e+01 L1_fnorm:6.1875e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5938e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.4609e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4609e+00 L9_l1linf:1.4219e+00 L10_l1linf:1.3906e+00 L11_l1linf:1.4062e+00 L12_l1linf:1.4688e+00 L1_spectral:7.9130e-02 L2_spectral:7.7262e-02 L3_spectral:7.8604e-02 L4_spectral:7.8818e-02 L5_spectral:7.8695e-02 L6_spectral:7.8777e-02 L7_spectral:7.8309e-02 L8_spectral:7.7215e-02 L9_spectral:7.8293e-02 L10_spectral:7.9061e-02 L11_spectral:7.8625e-02 L12_spectral:7.8642e-02 train_time:248458ms step_avg:44.37ms +[2025-09-11 09:46:50] [Rank 0] PRINT: step:5600/10000 val_loss:4.4948 total_sharp:3.5245e-04 L1_sharp:3.3266e-04 L2_sharp:1.2916e-04 L3_sharp:3.5986e-05 L4_sharp:4.7386e-05 L5_sharp:7.6267e-05 L6_sharp:4.5721e-05 L7_sharp:3.8911e-05 L8_sharp:1.8063e-04 L9_sharp:1.8365e-04 L10_sharp:2.0168e-04 L11_sharp:3.2481e-04 L12_sharp:1.1312e-03 total_fnorm:4.0250e+01 total_l1_linf:1.0547e+05 total_spectral:1.9875e+01 L1_fnorm:6.1875e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.1875e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5938e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.4609e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5312e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4609e+00 L9_l1linf:1.4219e+00 L10_l1linf:1.3906e+00 L11_l1linf:1.4062e+00 L12_l1linf:1.4688e+00 L1_spectral:7.9130e-02 L2_spectral:7.7262e-02 L3_spectral:7.8604e-02 L4_spectral:7.8818e-02 L5_spectral:7.8695e-02 L6_spectral:7.8777e-02 L7_spectral:7.8309e-02 L8_spectral:7.7215e-02 L9_spectral:7.8293e-02 L10_spectral:7.9061e-02 L11_spectral:7.8625e-02 L12_spectral:7.8642e-02 train_time:248458ms step_avg:44.37ms +[2025-09-11 09:46:52] [Rank 0] step:5601/10000 train_time:250412ms step_avg:44.71ms +[2025-09-11 09:46:52] [Rank 0] step:5601/10000 train_time:250412ms step_avg:44.71ms +[2025-09-11 09:46:53] [Rank 0] step:5621/10000 train_time:251119ms step_avg:44.68ms +[2025-09-11 09:46:53] [Rank 0] step:5621/10000 train_time:251119ms step_avg:44.68ms +[2025-09-11 09:46:53] [Rank 0] step:5641/10000 train_time:251809ms step_avg:44.64ms +[2025-09-11 09:46:53] [Rank 0] step:5641/10000 train_time:251809ms step_avg:44.64ms +[2025-09-11 09:46:54] [Rank 0] step:5661/10000 train_time:252499ms step_avg:44.60ms +[2025-09-11 09:46:54] [Rank 0] step:5661/10000 train_time:252499ms step_avg:44.60ms +[2025-09-11 09:46:55] [Rank 0] step:5681/10000 train_time:253190ms step_avg:44.57ms +[2025-09-11 09:46:55] [Rank 0] step:5681/10000 train_time:253190ms step_avg:44.57ms +[2025-09-11 09:46:56] [Rank 0] step:5701/10000 train_time:253883ms step_avg:44.53ms +[2025-09-11 09:46:56] [Rank 0] step:5701/10000 train_time:253883ms step_avg:44.53ms +[2025-09-11 09:46:56] [Rank 0] step:5721/10000 train_time:254571ms step_avg:44.50ms +[2025-09-11 09:46:56] [Rank 0] step:5721/10000 train_time:254571ms step_avg:44.50ms +[2025-09-11 09:46:57] [Rank 0] step:5741/10000 train_time:255263ms step_avg:44.46ms +[2025-09-11 09:46:57] [Rank 0] step:5741/10000 train_time:255263ms step_avg:44.46ms +[2025-09-11 09:46:58] [Rank 0] step:5761/10000 train_time:255954ms step_avg:44.43ms +[2025-09-11 09:46:58] [Rank 0] step:5761/10000 train_time:255954ms step_avg:44.43ms +[2025-09-11 09:46:58] [Rank 0] step:5781/10000 train_time:256647ms step_avg:44.39ms +[2025-09-11 09:46:58] [Rank 0] step:5781/10000 train_time:256647ms step_avg:44.39ms +[2025-09-11 09:46:59] [Rank 0] step:5801/10000 train_time:257339ms step_avg:44.36ms +[2025-09-11 09:46:59] [Rank 0] step:5801/10000 train_time:257339ms step_avg:44.36ms +[2025-09-11 09:47:00] [Rank 0] step:5821/10000 train_time:258029ms step_avg:44.33ms +[2025-09-11 09:47:00] [Rank 0] step:5821/10000 train_time:258029ms step_avg:44.33ms +[2025-09-11 09:47:00] [Rank 0] step:5841/10000 train_time:258721ms step_avg:44.29ms +[2025-09-11 09:47:00] [Rank 0] step:5841/10000 train_time:258721ms step_avg:44.29ms +[2025-09-11 09:47:01] [Rank 0] step:5861/10000 train_time:259411ms step_avg:44.26ms +[2025-09-11 09:47:01] [Rank 0] step:5861/10000 train_time:259411ms step_avg:44.26ms +[2025-09-11 09:47:02] [Rank 0] step:5881/10000 train_time:260103ms step_avg:44.23ms +[2025-09-11 09:47:02] [Rank 0] step:5881/10000 train_time:260103ms step_avg:44.23ms +[2025-09-11 09:47:02] [Rank 0] step:5901/10000 train_time:260793ms step_avg:44.19ms +[2025-09-11 09:47:02] [Rank 0] step:5901/10000 train_time:260793ms step_avg:44.19ms +[2025-09-11 09:47:03] [Rank 0] step:5921/10000 train_time:261487ms step_avg:44.16ms +[2025-09-11 09:47:03] [Rank 0] step:5921/10000 train_time:261487ms step_avg:44.16ms +[2025-09-11 09:47:04] [Rank 0] step:5941/10000 train_time:262179ms step_avg:44.13ms +[2025-09-11 09:47:04] [Rank 0] step:5941/10000 train_time:262179ms step_avg:44.13ms +[2025-09-11 09:47:05] [Rank 0] step:5961/10000 train_time:262869ms step_avg:44.10ms +[2025-09-11 09:47:05] [Rank 0] step:5961/10000 train_time:262869ms step_avg:44.10ms +[2025-09-11 09:47:05] [Rank 0] step:5981/10000 train_time:263561ms step_avg:44.07ms +[2025-09-11 09:47:05] [Rank 0] step:5981/10000 train_time:263561ms step_avg:44.07ms +[2025-09-11 09:47:06] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:47:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:47:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:47:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:47:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:47:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:16] [Rank 0] PRINT: step:6000/10000 val_loss:4.4474 total_sharp:3.4816e-04 L1_sharp:3.9809e-04 L2_sharp:9.2085e-05 L3_sharp:5.3320e-05 L4_sharp:4.5140e-05 L5_sharp:5.9703e-05 L6_sharp:5.2535e-05 L7_sharp:8.4767e-05 L8_sharp:1.5745e-04 L9_sharp:1.5379e-04 L10_sharp:1.9870e-04 L11_sharp:3.0657e-04 L12_sharp:1.2516e-03 total_fnorm:4.0750e+01 total_l1_linf:1.0547e+05 total_spectral:2.0125e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.0625e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.1875e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.4141e+00 L4_l1linf:1.5391e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4297e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.4609e+00 L1_spectral:7.9625e-02 L2_spectral:7.6955e-02 L3_spectral:7.8693e-02 L4_spectral:7.9079e-02 L5_spectral:7.8897e-02 L6_spectral:7.9324e-02 L7_spectral:7.8601e-02 L8_spectral:7.7514e-02 L9_spectral:7.9086e-02 L10_spectral:7.8947e-02 L11_spectral:7.9105e-02 L12_spectral:7.8674e-02 train_time:264234ms step_avg:44.04ms +[2025-09-11 09:47:16] [Rank 0] PRINT: step:6000/10000 val_loss:4.4474 total_sharp:3.4816e-04 L1_sharp:3.9809e-04 L2_sharp:9.2085e-05 L3_sharp:5.3320e-05 L4_sharp:4.5140e-05 L5_sharp:5.9703e-05 L6_sharp:5.2535e-05 L7_sharp:8.4767e-05 L8_sharp:1.5745e-04 L9_sharp:1.5379e-04 L10_sharp:1.9870e-04 L11_sharp:3.0657e-04 L12_sharp:1.2516e-03 total_fnorm:4.0750e+01 total_l1_linf:1.0547e+05 total_spectral:2.0125e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.0625e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2500e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.1562e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.1875e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.4141e+00 L4_l1linf:1.5391e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5000e+00 L9_l1linf:1.4297e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3828e+00 L12_l1linf:1.4609e+00 L1_spectral:7.9625e-02 L2_spectral:7.6955e-02 L3_spectral:7.8693e-02 L4_spectral:7.9079e-02 L5_spectral:7.8897e-02 L6_spectral:7.9324e-02 L7_spectral:7.8601e-02 L8_spectral:7.7514e-02 L9_spectral:7.9086e-02 L10_spectral:7.8947e-02 L11_spectral:7.9105e-02 L12_spectral:7.8674e-02 train_time:264234ms step_avg:44.04ms +[2025-09-11 09:47:18] [Rank 0] step:6001/10000 train_time:266082ms step_avg:44.34ms +[2025-09-11 09:47:18] [Rank 0] step:6001/10000 train_time:266082ms step_avg:44.34ms +[2025-09-11 09:47:19] [Rank 0] step:6021/10000 train_time:266798ms step_avg:44.31ms +[2025-09-11 09:47:19] [Rank 0] step:6021/10000 train_time:266798ms step_avg:44.31ms +[2025-09-11 09:47:20] [Rank 0] step:6041/10000 train_time:267495ms step_avg:44.28ms +[2025-09-11 09:47:20] [Rank 0] step:6041/10000 train_time:267495ms step_avg:44.28ms +[2025-09-11 09:47:20] [Rank 0] step:6061/10000 train_time:268189ms step_avg:44.25ms +[2025-09-11 09:47:20] [Rank 0] step:6061/10000 train_time:268189ms step_avg:44.25ms +[2025-09-11 09:47:21] [Rank 0] step:6081/10000 train_time:268886ms step_avg:44.22ms +[2025-09-11 09:47:21] [Rank 0] step:6081/10000 train_time:268886ms step_avg:44.22ms +[2025-09-11 09:47:22] [Rank 0] step:6101/10000 train_time:269579ms step_avg:44.19ms +[2025-09-11 09:47:22] [Rank 0] step:6101/10000 train_time:269579ms step_avg:44.19ms +[2025-09-11 09:47:22] [Rank 0] step:6121/10000 train_time:270273ms step_avg:44.16ms +[2025-09-11 09:47:22] [Rank 0] step:6121/10000 train_time:270273ms step_avg:44.16ms +[2025-09-11 09:47:23] [Rank 0] step:6141/10000 train_time:270967ms step_avg:44.12ms +[2025-09-11 09:47:23] [Rank 0] step:6141/10000 train_time:270967ms step_avg:44.12ms +[2025-09-11 09:47:24] [Rank 0] step:6161/10000 train_time:271660ms step_avg:44.09ms +[2025-09-11 09:47:24] [Rank 0] step:6161/10000 train_time:271660ms step_avg:44.09ms +[2025-09-11 09:47:24] [Rank 0] step:6181/10000 train_time:272351ms step_avg:44.06ms +[2025-09-11 09:47:24] [Rank 0] step:6181/10000 train_time:272351ms step_avg:44.06ms +[2025-09-11 09:47:25] [Rank 0] step:6201/10000 train_time:273045ms step_avg:44.03ms +[2025-09-11 09:47:25] [Rank 0] step:6201/10000 train_time:273045ms step_avg:44.03ms +[2025-09-11 09:47:26] [Rank 0] step:6221/10000 train_time:273739ms step_avg:44.00ms +[2025-09-11 09:47:26] [Rank 0] step:6221/10000 train_time:273739ms step_avg:44.00ms +[2025-09-11 09:47:27] [Rank 0] step:6241/10000 train_time:274435ms step_avg:43.97ms +[2025-09-11 09:47:27] [Rank 0] step:6241/10000 train_time:274435ms step_avg:43.97ms +[2025-09-11 09:47:27] [Rank 0] step:6261/10000 train_time:275127ms step_avg:43.94ms +[2025-09-11 09:47:27] [Rank 0] step:6261/10000 train_time:275127ms step_avg:43.94ms +[2025-09-11 09:47:28] [Rank 0] step:6281/10000 train_time:275821ms step_avg:43.91ms +[2025-09-11 09:47:28] [Rank 0] step:6281/10000 train_time:275821ms step_avg:43.91ms +[2025-09-11 09:47:29] [Rank 0] step:6301/10000 train_time:276513ms step_avg:43.88ms +[2025-09-11 09:47:29] [Rank 0] step:6301/10000 train_time:276513ms step_avg:43.88ms +[2025-09-11 09:47:29] [Rank 0] step:6321/10000 train_time:277209ms step_avg:43.86ms +[2025-09-11 09:47:29] [Rank 0] step:6321/10000 train_time:277209ms step_avg:43.86ms +[2025-09-11 09:47:30] [Rank 0] step:6341/10000 train_time:277904ms step_avg:43.83ms +[2025-09-11 09:47:30] [Rank 0] step:6341/10000 train_time:277904ms step_avg:43.83ms +[2025-09-11 09:47:31] [Rank 0] step:6361/10000 train_time:278598ms step_avg:43.80ms +[2025-09-11 09:47:31] [Rank 0] step:6361/10000 train_time:278598ms step_avg:43.80ms +[2025-09-11 09:47:31] [Rank 0] step:6381/10000 train_time:279292ms step_avg:43.77ms +[2025-09-11 09:47:31] [Rank 0] step:6381/10000 train_time:279292ms step_avg:43.77ms +[2025-09-11 09:47:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:47:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:47:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:47:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:47:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:47:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:44] [Rank 0] PRINT: step:6400/10000 val_loss:4.4153 total_sharp:4.0994e-04 L1_sharp:4.1139e-04 L2_sharp:1.6980e-04 L3_sharp:5.6456e-05 L4_sharp:3.6481e-05 L5_sharp:5.5821e-05 L6_sharp:7.7491e-05 L7_sharp:5.3931e-05 L8_sharp:1.6908e-04 L9_sharp:1.8376e-04 L10_sharp:1.9181e-04 L11_sharp:3.4364e-04 L12_sharp:1.5975e-03 total_fnorm:3.4250e+01 total_l1_linf:8.8576e+04 total_spectral:1.7500e+01 L1_fnorm:5.5938e+00 L2_fnorm:5.4688e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5938e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.6250e+00 L8_fnorm:5.4062e+00 L9_fnorm:5.5312e+00 L10_fnorm:5.5938e+00 L11_fnorm:5.5938e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3906e+00 L2_l1linf:1.2891e+00 L3_l1linf:1.2500e+00 L4_l1linf:1.3203e+00 L5_l1linf:1.3438e+00 L6_l1linf:1.3359e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.2891e+00 L9_l1linf:1.2266e+00 L10_l1linf:1.2031e+00 L11_l1linf:1.1797e+00 L12_l1linf:1.2969e+00 L1_spectral:7.3218e-02 L2_spectral:7.0559e-02 L3_spectral:7.2606e-02 L4_spectral:7.2796e-02 L5_spectral:7.2172e-02 L6_spectral:7.2065e-02 L7_spectral:7.2424e-02 L8_spectral:7.1117e-02 L9_spectral:7.2514e-02 L10_spectral:7.2819e-02 L11_spectral:7.2768e-02 L12_spectral:7.2707e-02 train_time:279964ms step_avg:43.74ms +[2025-09-11 09:47:44] [Rank 0] PRINT: step:6400/10000 val_loss:4.4153 total_sharp:4.0994e-04 L1_sharp:4.1139e-04 L2_sharp:1.6980e-04 L3_sharp:5.6456e-05 L4_sharp:3.6481e-05 L5_sharp:5.5821e-05 L6_sharp:7.7491e-05 L7_sharp:5.3931e-05 L8_sharp:1.6908e-04 L9_sharp:1.8376e-04 L10_sharp:1.9181e-04 L11_sharp:3.4364e-04 L12_sharp:1.5975e-03 total_fnorm:3.4250e+01 total_l1_linf:8.8576e+04 total_spectral:1.7500e+01 L1_fnorm:5.5938e+00 L2_fnorm:5.4688e+00 L3_fnorm:5.5938e+00 L4_fnorm:5.6250e+00 L5_fnorm:5.5938e+00 L6_fnorm:5.5938e+00 L7_fnorm:5.6250e+00 L8_fnorm:5.4062e+00 L9_fnorm:5.5312e+00 L10_fnorm:5.5938e+00 L11_fnorm:5.5938e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3906e+00 L2_l1linf:1.2891e+00 L3_l1linf:1.2500e+00 L4_l1linf:1.3203e+00 L5_l1linf:1.3438e+00 L6_l1linf:1.3359e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.2891e+00 L9_l1linf:1.2266e+00 L10_l1linf:1.2031e+00 L11_l1linf:1.1797e+00 L12_l1linf:1.2969e+00 L1_spectral:7.3218e-02 L2_spectral:7.0559e-02 L3_spectral:7.2606e-02 L4_spectral:7.2796e-02 L5_spectral:7.2172e-02 L6_spectral:7.2065e-02 L7_spectral:7.2424e-02 L8_spectral:7.1117e-02 L9_spectral:7.2514e-02 L10_spectral:7.2819e-02 L11_spectral:7.2768e-02 L12_spectral:7.2707e-02 train_time:279964ms step_avg:43.74ms +[2025-09-11 09:47:46] [Rank 0] step:6401/10000 train_time:281553ms step_avg:43.99ms +[2025-09-11 09:47:46] [Rank 0] step:6401/10000 train_time:281553ms step_avg:43.99ms +[2025-09-11 09:47:47] [Rank 0] step:6421/10000 train_time:282276ms step_avg:43.96ms +[2025-09-11 09:47:47] [Rank 0] step:6421/10000 train_time:282276ms step_avg:43.96ms +[2025-09-11 09:47:47] [Rank 0] step:6441/10000 train_time:282970ms step_avg:43.93ms +[2025-09-11 09:47:47] [Rank 0] step:6441/10000 train_time:282970ms step_avg:43.93ms +[2025-09-11 09:47:48] [Rank 0] step:6461/10000 train_time:283663ms step_avg:43.90ms +[2025-09-11 09:47:48] [Rank 0] step:6461/10000 train_time:283663ms step_avg:43.90ms +[2025-09-11 09:47:49] [Rank 0] step:6481/10000 train_time:284367ms step_avg:43.88ms +[2025-09-11 09:47:49] [Rank 0] step:6481/10000 train_time:284367ms step_avg:43.88ms +[2025-09-11 09:47:49] [Rank 0] step:6501/10000 train_time:285062ms step_avg:43.85ms +[2025-09-11 09:47:49] [Rank 0] step:6501/10000 train_time:285062ms step_avg:43.85ms +[2025-09-11 09:47:50] [Rank 0] step:6521/10000 train_time:285755ms step_avg:43.82ms +[2025-09-11 09:47:50] [Rank 0] step:6521/10000 train_time:285755ms step_avg:43.82ms +[2025-09-11 09:47:51] [Rank 0] step:6541/10000 train_time:286447ms step_avg:43.79ms +[2025-09-11 09:47:51] [Rank 0] step:6541/10000 train_time:286447ms step_avg:43.79ms +[2025-09-11 09:47:51] [Rank 0] step:6561/10000 train_time:287140ms step_avg:43.76ms +[2025-09-11 09:47:51] [Rank 0] step:6561/10000 train_time:287140ms step_avg:43.76ms +[2025-09-11 09:47:52] [Rank 0] step:6581/10000 train_time:287833ms step_avg:43.74ms +[2025-09-11 09:47:52] [Rank 0] step:6581/10000 train_time:287833ms step_avg:43.74ms +[2025-09-11 09:47:53] [Rank 0] step:6601/10000 train_time:288526ms step_avg:43.71ms +[2025-09-11 09:47:53] [Rank 0] step:6601/10000 train_time:288526ms step_avg:43.71ms +[2025-09-11 09:47:54] [Rank 0] step:6621/10000 train_time:289218ms step_avg:43.68ms +[2025-09-11 09:47:54] [Rank 0] step:6621/10000 train_time:289218ms step_avg:43.68ms +[2025-09-11 09:47:54] [Rank 0] step:6641/10000 train_time:289912ms step_avg:43.65ms +[2025-09-11 09:47:54] [Rank 0] step:6641/10000 train_time:289912ms step_avg:43.65ms +[2025-09-11 09:47:55] [Rank 0] step:6661/10000 train_time:290606ms step_avg:43.63ms +[2025-09-11 09:47:55] [Rank 0] step:6661/10000 train_time:290606ms step_avg:43.63ms +[2025-09-11 09:47:56] [Rank 0] step:6681/10000 train_time:291307ms step_avg:43.60ms +[2025-09-11 09:47:56] [Rank 0] step:6681/10000 train_time:291307ms step_avg:43.60ms +[2025-09-11 09:47:56] [Rank 0] step:6701/10000 train_time:292005ms step_avg:43.58ms +[2025-09-11 09:47:56] [Rank 0] step:6701/10000 train_time:292005ms step_avg:43.58ms +[2025-09-11 09:47:57] [Rank 0] step:6721/10000 train_time:292706ms step_avg:43.55ms +[2025-09-11 09:47:57] [Rank 0] step:6721/10000 train_time:292706ms step_avg:43.55ms +[2025-09-11 09:47:58] [Rank 0] step:6741/10000 train_time:293406ms step_avg:43.53ms +[2025-09-11 09:47:58] [Rank 0] step:6741/10000 train_time:293406ms step_avg:43.53ms +[2025-09-11 09:47:58] [Rank 0] step:6761/10000 train_time:294105ms step_avg:43.50ms +[2025-09-11 09:47:58] [Rank 0] step:6761/10000 train_time:294105ms step_avg:43.50ms +[2025-09-11 09:47:59] [Rank 0] step:6781/10000 train_time:294805ms step_avg:43.48ms +[2025-09-11 09:47:59] [Rank 0] step:6781/10000 train_time:294805ms step_avg:43.48ms +[2025-09-11 09:48:00] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:48:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:48:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:48:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:48:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:10] [Rank 0] PRINT: step:6800/10000 val_loss:4.3840 total_sharp:3.3823e-04 L1_sharp:4.1062e-04 L2_sharp:7.5720e-05 L3_sharp:4.7047e-05 L4_sharp:5.0969e-05 L5_sharp:6.7105e-05 L6_sharp:7.0402e-05 L7_sharp:7.2931e-05 L8_sharp:1.9641e-04 L9_sharp:1.9867e-04 L10_sharp:2.2839e-04 L11_sharp:3.2274e-04 L12_sharp:1.4389e-03 total_fnorm:3.2500e+01 total_l1_linf:7.8848e+04 total_spectral:1.6250e+01 L1_fnorm:4.9688e+00 L2_fnorm:4.8125e+00 L3_fnorm:4.9375e+00 L4_fnorm:5.0000e+00 L5_fnorm:4.9062e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.7500e+00 L9_fnorm:4.8750e+00 L10_fnorm:4.9375e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.1875e+00 L2_l1linf:1.0938e+00 L3_l1linf:1.0781e+00 L4_l1linf:1.1484e+00 L5_l1linf:1.1719e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1719e+00 L8_l1linf:1.1016e+00 L9_l1linf:1.0703e+00 L10_l1linf:1.0156e+00 L11_l1linf:1.0156e+00 L12_l1linf:1.1328e+00 L1_spectral:6.5338e-02 L2_spectral:6.4039e-02 L3_spectral:6.5608e-02 L4_spectral:6.5519e-02 L5_spectral:6.5088e-02 L6_spectral:6.4972e-02 L7_spectral:6.5582e-02 L8_spectral:6.4404e-02 L9_spectral:6.5573e-02 L10_spectral:6.5825e-02 L11_spectral:6.5562e-02 L12_spectral:6.5539e-02 train_time:295484ms step_avg:43.45ms +[2025-09-11 09:48:10] [Rank 0] PRINT: step:6800/10000 val_loss:4.3840 total_sharp:3.3823e-04 L1_sharp:4.1062e-04 L2_sharp:7.5720e-05 L3_sharp:4.7047e-05 L4_sharp:5.0969e-05 L5_sharp:6.7105e-05 L6_sharp:7.0402e-05 L7_sharp:7.2931e-05 L8_sharp:1.9641e-04 L9_sharp:1.9867e-04 L10_sharp:2.2839e-04 L11_sharp:3.2274e-04 L12_sharp:1.4389e-03 total_fnorm:3.2500e+01 total_l1_linf:7.8848e+04 total_spectral:1.6250e+01 L1_fnorm:4.9688e+00 L2_fnorm:4.8125e+00 L3_fnorm:4.9375e+00 L4_fnorm:5.0000e+00 L5_fnorm:4.9062e+00 L6_fnorm:4.9375e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.7500e+00 L9_fnorm:4.8750e+00 L10_fnorm:4.9375e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.1875e+00 L2_l1linf:1.0938e+00 L3_l1linf:1.0781e+00 L4_l1linf:1.1484e+00 L5_l1linf:1.1719e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1719e+00 L8_l1linf:1.1016e+00 L9_l1linf:1.0703e+00 L10_l1linf:1.0156e+00 L11_l1linf:1.0156e+00 L12_l1linf:1.1328e+00 L1_spectral:6.5338e-02 L2_spectral:6.4039e-02 L3_spectral:6.5608e-02 L4_spectral:6.5519e-02 L5_spectral:6.5088e-02 L6_spectral:6.4972e-02 L7_spectral:6.5582e-02 L8_spectral:6.4404e-02 L9_spectral:6.5573e-02 L10_spectral:6.5825e-02 L11_spectral:6.5562e-02 L12_spectral:6.5539e-02 train_time:295484ms step_avg:43.45ms +[2025-09-11 09:48:11] [Rank 0] step:6801/10000 train_time:296834ms step_avg:43.65ms +[2025-09-11 09:48:11] [Rank 0] step:6801/10000 train_time:296834ms step_avg:43.65ms +[2025-09-11 09:48:12] [Rank 0] step:6821/10000 train_time:297548ms step_avg:43.62ms +[2025-09-11 09:48:12] [Rank 0] step:6821/10000 train_time:297548ms step_avg:43.62ms +[2025-09-11 09:48:13] [Rank 0] step:6841/10000 train_time:298253ms step_avg:43.60ms +[2025-09-11 09:48:13] [Rank 0] step:6841/10000 train_time:298253ms step_avg:43.60ms +[2025-09-11 09:48:13] [Rank 0] step:6861/10000 train_time:298955ms step_avg:43.57ms +[2025-09-11 09:48:13] [Rank 0] step:6861/10000 train_time:298955ms step_avg:43.57ms +[2025-09-11 09:48:14] [Rank 0] step:6881/10000 train_time:299656ms step_avg:43.55ms +[2025-09-11 09:48:14] [Rank 0] step:6881/10000 train_time:299656ms step_avg:43.55ms +[2025-09-11 09:48:15] [Rank 0] step:6901/10000 train_time:300356ms step_avg:43.52ms +[2025-09-11 09:48:15] [Rank 0] step:6901/10000 train_time:300356ms step_avg:43.52ms +[2025-09-11 09:48:16] [Rank 0] step:6921/10000 train_time:301056ms step_avg:43.50ms +[2025-09-11 09:48:16] [Rank 0] step:6921/10000 train_time:301056ms step_avg:43.50ms +[2025-09-11 09:48:17] [Rank 0] step:6941/10000 train_time:302158ms step_avg:43.53ms +[2025-09-11 09:48:17] [Rank 0] step:6941/10000 train_time:302158ms step_avg:43.53ms +[2025-09-11 09:48:17] [Rank 0] step:6961/10000 train_time:302977ms step_avg:43.52ms +[2025-09-11 09:48:17] [Rank 0] step:6961/10000 train_time:302977ms step_avg:43.52ms +[2025-09-11 09:48:18] [Rank 0] step:6981/10000 train_time:303679ms step_avg:43.50ms +[2025-09-11 09:48:18] [Rank 0] step:6981/10000 train_time:303679ms step_avg:43.50ms +[2025-09-11 09:48:19] [Rank 0] step:7001/10000 train_time:304677ms step_avg:43.52ms +[2025-09-11 09:48:19] [Rank 0] step:7001/10000 train_time:304677ms step_avg:43.52ms +[2025-09-11 09:48:20] [Rank 0] step:7021/10000 train_time:305378ms step_avg:43.49ms +[2025-09-11 09:48:20] [Rank 0] step:7021/10000 train_time:305378ms step_avg:43.49ms +[2025-09-11 09:48:21] [Rank 0] step:7041/10000 train_time:306079ms step_avg:43.47ms +[2025-09-11 09:48:21] [Rank 0] step:7041/10000 train_time:306079ms step_avg:43.47ms +[2025-09-11 09:48:21] [Rank 0] step:7061/10000 train_time:306781ms step_avg:43.45ms +[2025-09-11 09:48:21] [Rank 0] step:7061/10000 train_time:306781ms step_avg:43.45ms +[2025-09-11 09:48:22] [Rank 0] step:7081/10000 train_time:307482ms step_avg:43.42ms +[2025-09-11 09:48:22] [Rank 0] step:7081/10000 train_time:307482ms step_avg:43.42ms +[2025-09-11 09:48:23] [Rank 0] step:7101/10000 train_time:308184ms step_avg:43.40ms +[2025-09-11 09:48:23] [Rank 0] step:7101/10000 train_time:308184ms step_avg:43.40ms +[2025-09-11 09:48:23] [Rank 0] step:7121/10000 train_time:308887ms step_avg:43.38ms +[2025-09-11 09:48:23] [Rank 0] step:7121/10000 train_time:308887ms step_avg:43.38ms +[2025-09-11 09:48:24] [Rank 0] step:7141/10000 train_time:309589ms step_avg:43.35ms +[2025-09-11 09:48:24] [Rank 0] step:7141/10000 train_time:309589ms step_avg:43.35ms +[2025-09-11 09:48:25] [Rank 0] step:7161/10000 train_time:310292ms step_avg:43.33ms +[2025-09-11 09:48:25] [Rank 0] step:7161/10000 train_time:310292ms step_avg:43.33ms +[2025-09-11 09:48:25] [Rank 0] step:7181/10000 train_time:310992ms step_avg:43.31ms +[2025-09-11 09:48:25] [Rank 0] step:7181/10000 train_time:310992ms step_avg:43.31ms +[2025-09-11 09:48:26] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:48:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:48:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:48:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:48:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:36] [Rank 0] PRINT: step:7200/10000 val_loss:4.3518 total_sharp:2.8718e-04 L1_sharp:3.1949e-04 L2_sharp:6.6435e-05 L3_sharp:4.3328e-05 L4_sharp:2.6630e-05 L5_sharp:8.1716e-05 L6_sharp:7.3895e-05 L7_sharp:7.4511e-05 L8_sharp:1.5572e-04 L9_sharp:1.5923e-04 L10_sharp:1.6066e-04 L11_sharp:2.5412e-04 L12_sharp:9.8773e-04 total_fnorm:2.8750e+01 total_l1_linf:6.4768e+04 total_spectral:1.4125e+01 L1_fnorm:4.3438e+00 L2_fnorm:4.1875e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.3438e+00 L5_fnorm:4.3125e+00 L6_fnorm:4.3125e+00 L7_fnorm:4.3125e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2812e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2812e+00 L1_l1linf:9.9219e-01 L2_l1linf:9.1406e-01 L3_l1linf:8.8672e-01 L4_l1linf:9.8438e-01 L5_l1linf:9.6875e-01 L6_l1linf:9.7656e-01 L7_l1linf:9.8047e-01 L8_l1linf:9.2969e-01 L9_l1linf:8.8672e-01 L10_l1linf:8.4375e-01 L11_l1linf:8.2031e-01 L12_l1linf:9.8047e-01 L1_spectral:5.8549e-02 L2_spectral:5.5753e-02 L3_spectral:5.8041e-02 L4_spectral:5.8208e-02 L5_spectral:5.7988e-02 L6_spectral:5.7941e-02 L7_spectral:5.8079e-02 L8_spectral:5.6985e-02 L9_spectral:5.8362e-02 L10_spectral:5.8602e-02 L11_spectral:5.8250e-02 L12_spectral:5.8438e-02 train_time:311673ms step_avg:43.29ms +[2025-09-11 09:48:36] [Rank 0] PRINT: step:7200/10000 val_loss:4.3518 total_sharp:2.8718e-04 L1_sharp:3.1949e-04 L2_sharp:6.6435e-05 L3_sharp:4.3328e-05 L4_sharp:2.6630e-05 L5_sharp:8.1716e-05 L6_sharp:7.3895e-05 L7_sharp:7.4511e-05 L8_sharp:1.5572e-04 L9_sharp:1.5923e-04 L10_sharp:1.6066e-04 L11_sharp:2.5412e-04 L12_sharp:9.8773e-04 total_fnorm:2.8750e+01 total_l1_linf:6.4768e+04 total_spectral:1.4125e+01 L1_fnorm:4.3438e+00 L2_fnorm:4.1875e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.3438e+00 L5_fnorm:4.3125e+00 L6_fnorm:4.3125e+00 L7_fnorm:4.3125e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2500e+00 L10_fnorm:4.2812e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2812e+00 L1_l1linf:9.9219e-01 L2_l1linf:9.1406e-01 L3_l1linf:8.8672e-01 L4_l1linf:9.8438e-01 L5_l1linf:9.6875e-01 L6_l1linf:9.7656e-01 L7_l1linf:9.8047e-01 L8_l1linf:9.2969e-01 L9_l1linf:8.8672e-01 L10_l1linf:8.4375e-01 L11_l1linf:8.2031e-01 L12_l1linf:9.8047e-01 L1_spectral:5.8549e-02 L2_spectral:5.5753e-02 L3_spectral:5.8041e-02 L4_spectral:5.8208e-02 L5_spectral:5.7988e-02 L6_spectral:5.7941e-02 L7_spectral:5.8079e-02 L8_spectral:5.6985e-02 L9_spectral:5.8362e-02 L10_spectral:5.8602e-02 L11_spectral:5.8250e-02 L12_spectral:5.8438e-02 train_time:311673ms step_avg:43.29ms +[2025-09-11 09:48:38] [Rank 0] step:7201/10000 train_time:313204ms step_avg:43.49ms +[2025-09-11 09:48:38] [Rank 0] step:7201/10000 train_time:313204ms step_avg:43.49ms +[2025-09-11 09:48:39] [Rank 0] step:7221/10000 train_time:313984ms step_avg:43.48ms +[2025-09-11 09:48:39] [Rank 0] step:7221/10000 train_time:313984ms step_avg:43.48ms +[2025-09-11 09:48:39] [Rank 0] step:7241/10000 train_time:314687ms step_avg:43.46ms +[2025-09-11 09:48:39] [Rank 0] step:7241/10000 train_time:314687ms step_avg:43.46ms +[2025-09-11 09:48:40] [Rank 0] step:7261/10000 train_time:315390ms step_avg:43.44ms +[2025-09-11 09:48:40] [Rank 0] step:7261/10000 train_time:315390ms step_avg:43.44ms +[2025-09-11 09:48:41] [Rank 0] step:7281/10000 train_time:316096ms step_avg:43.41ms +[2025-09-11 09:48:41] [Rank 0] step:7281/10000 train_time:316096ms step_avg:43.41ms +[2025-09-11 09:48:41] [Rank 0] step:7301/10000 train_time:316797ms step_avg:43.39ms +[2025-09-11 09:48:41] [Rank 0] step:7301/10000 train_time:316797ms step_avg:43.39ms +[2025-09-11 09:48:42] [Rank 0] step:7321/10000 train_time:317498ms step_avg:43.37ms +[2025-09-11 09:48:42] [Rank 0] step:7321/10000 train_time:317498ms step_avg:43.37ms +[2025-09-11 09:48:43] [Rank 0] step:7341/10000 train_time:318201ms step_avg:43.35ms +[2025-09-11 09:48:43] [Rank 0] step:7341/10000 train_time:318201ms step_avg:43.35ms +[2025-09-11 09:48:43] [Rank 0] step:7361/10000 train_time:318902ms step_avg:43.32ms +[2025-09-11 09:48:43] [Rank 0] step:7361/10000 train_time:318902ms step_avg:43.32ms +[2025-09-11 09:48:44] [Rank 0] step:7381/10000 train_time:319604ms step_avg:43.30ms +[2025-09-11 09:48:44] [Rank 0] step:7381/10000 train_time:319604ms step_avg:43.30ms +[2025-09-11 09:48:45] [Rank 0] step:7401/10000 train_time:320304ms step_avg:43.28ms +[2025-09-11 09:48:45] [Rank 0] step:7401/10000 train_time:320304ms step_avg:43.28ms +[2025-09-11 09:48:46] [Rank 0] step:7421/10000 train_time:321005ms step_avg:43.26ms +[2025-09-11 09:48:46] [Rank 0] step:7421/10000 train_time:321005ms step_avg:43.26ms +[2025-09-11 09:48:46] [Rank 0] step:7441/10000 train_time:321707ms step_avg:43.23ms +[2025-09-11 09:48:46] [Rank 0] step:7441/10000 train_time:321707ms step_avg:43.23ms +[2025-09-11 09:48:47] [Rank 0] step:7461/10000 train_time:322409ms step_avg:43.21ms +[2025-09-11 09:48:47] [Rank 0] step:7461/10000 train_time:322409ms step_avg:43.21ms +[2025-09-11 09:48:48] [Rank 0] step:7481/10000 train_time:323114ms step_avg:43.19ms +[2025-09-11 09:48:48] [Rank 0] step:7481/10000 train_time:323114ms step_avg:43.19ms +[2025-09-11 09:48:48] [Rank 0] step:7501/10000 train_time:323816ms step_avg:43.17ms +[2025-09-11 09:48:48] [Rank 0] step:7501/10000 train_time:323816ms step_avg:43.17ms +[2025-09-11 09:48:49] [Rank 0] step:7521/10000 train_time:324519ms step_avg:43.15ms +[2025-09-11 09:48:49] [Rank 0] step:7521/10000 train_time:324519ms step_avg:43.15ms +[2025-09-11 09:48:50] [Rank 0] step:7541/10000 train_time:325220ms step_avg:43.13ms +[2025-09-11 09:48:50] [Rank 0] step:7541/10000 train_time:325220ms step_avg:43.13ms +[2025-09-11 09:48:50] [Rank 0] step:7561/10000 train_time:325923ms step_avg:43.11ms +[2025-09-11 09:48:50] [Rank 0] step:7561/10000 train_time:325923ms step_avg:43.11ms +[2025-09-11 09:48:51] [Rank 0] step:7581/10000 train_time:326625ms step_avg:43.08ms +[2025-09-11 09:48:51] [Rank 0] step:7581/10000 train_time:326625ms step_avg:43.08ms +[2025-09-11 09:48:52] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:48:52] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:48:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:48:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:49:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:49:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:49:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:02] [Rank 0] PRINT: step:7600/10000 val_loss:4.3145 total_sharp:2.6752e-04 L1_sharp:3.5054e-04 L2_sharp:9.8976e-05 L3_sharp:4.2527e-05 L4_sharp:3.1487e-05 L5_sharp:7.1960e-05 L6_sharp:3.5223e-05 L7_sharp:5.2005e-05 L8_sharp:1.1799e-04 L9_sharp:1.4370e-04 L10_sharp:1.6397e-04 L11_sharp:2.6835e-04 L12_sharp:8.2476e-04 total_fnorm:2.2500e+01 total_l1_linf:4.9152e+04 total_spectral:1.1375e+01 L1_fnorm:3.6875e+00 L2_fnorm:3.5469e+00 L3_fnorm:3.6250e+00 L4_fnorm:3.6719e+00 L5_fnorm:3.6250e+00 L6_fnorm:3.6406e+00 L7_fnorm:3.6562e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.6094e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.6094e+00 L1_l1linf:7.8516e-01 L2_l1linf:7.4609e-01 L3_l1linf:7.3047e-01 L4_l1linf:7.8516e-01 L5_l1linf:7.8906e-01 L6_l1linf:8.0078e-01 L7_l1linf:8.0859e-01 L8_l1linf:7.2656e-01 L9_l1linf:7.2266e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.8750e-01 L12_l1linf:8.0078e-01 L1_spectral:5.0713e-02 L2_spectral:4.7506e-02 L3_spectral:5.0038e-02 L4_spectral:4.9659e-02 L5_spectral:5.0181e-02 L6_spectral:4.9947e-02 L7_spectral:5.0578e-02 L8_spectral:4.9317e-02 L9_spectral:5.0398e-02 L10_spectral:5.0557e-02 L11_spectral:5.0471e-02 L12_spectral:5.0540e-02 train_time:327313ms step_avg:43.07ms +[2025-09-11 09:49:02] [Rank 0] PRINT: step:7600/10000 val_loss:4.3145 total_sharp:2.6752e-04 L1_sharp:3.5054e-04 L2_sharp:9.8976e-05 L3_sharp:4.2527e-05 L4_sharp:3.1487e-05 L5_sharp:7.1960e-05 L6_sharp:3.5223e-05 L7_sharp:5.2005e-05 L8_sharp:1.1799e-04 L9_sharp:1.4370e-04 L10_sharp:1.6397e-04 L11_sharp:2.6835e-04 L12_sharp:8.2476e-04 total_fnorm:2.2500e+01 total_l1_linf:4.9152e+04 total_spectral:1.1375e+01 L1_fnorm:3.6875e+00 L2_fnorm:3.5469e+00 L3_fnorm:3.6250e+00 L4_fnorm:3.6719e+00 L5_fnorm:3.6250e+00 L6_fnorm:3.6406e+00 L7_fnorm:3.6562e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5781e+00 L10_fnorm:3.6094e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.6094e+00 L1_l1linf:7.8516e-01 L2_l1linf:7.4609e-01 L3_l1linf:7.3047e-01 L4_l1linf:7.8516e-01 L5_l1linf:7.8906e-01 L6_l1linf:8.0078e-01 L7_l1linf:8.0859e-01 L8_l1linf:7.2656e-01 L9_l1linf:7.2266e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.8750e-01 L12_l1linf:8.0078e-01 L1_spectral:5.0713e-02 L2_spectral:4.7506e-02 L3_spectral:5.0038e-02 L4_spectral:4.9659e-02 L5_spectral:5.0181e-02 L6_spectral:4.9947e-02 L7_spectral:5.0578e-02 L8_spectral:4.9317e-02 L9_spectral:5.0398e-02 L10_spectral:5.0557e-02 L11_spectral:5.0471e-02 L12_spectral:5.0540e-02 train_time:327313ms step_avg:43.07ms +[2025-09-11 09:49:03] [Rank 0] step:7601/10000 train_time:328495ms step_avg:43.22ms +[2025-09-11 09:49:03] [Rank 0] step:7601/10000 train_time:328495ms step_avg:43.22ms +[2025-09-11 09:49:04] [Rank 0] step:7621/10000 train_time:329203ms step_avg:43.20ms +[2025-09-11 09:49:04] [Rank 0] step:7621/10000 train_time:329203ms step_avg:43.20ms +[2025-09-11 09:49:04] [Rank 0] step:7641/10000 train_time:329909ms step_avg:43.18ms +[2025-09-11 09:49:04] [Rank 0] step:7641/10000 train_time:329909ms step_avg:43.18ms +[2025-09-11 09:49:05] [Rank 0] step:7661/10000 train_time:330611ms step_avg:43.16ms +[2025-09-11 09:49:05] [Rank 0] step:7661/10000 train_time:330611ms step_avg:43.16ms +[2025-09-11 09:49:06] [Rank 0] step:7681/10000 train_time:331313ms step_avg:43.13ms +[2025-09-11 09:49:06] [Rank 0] step:7681/10000 train_time:331313ms step_avg:43.13ms +[2025-09-11 09:49:07] [Rank 0] step:7701/10000 train_time:332019ms step_avg:43.11ms +[2025-09-11 09:49:07] [Rank 0] step:7701/10000 train_time:332019ms step_avg:43.11ms +[2025-09-11 09:49:07] [Rank 0] step:7721/10000 train_time:332722ms step_avg:43.09ms +[2025-09-11 09:49:07] [Rank 0] step:7721/10000 train_time:332722ms step_avg:43.09ms +[2025-09-11 09:49:08] [Rank 0] step:7741/10000 train_time:333425ms step_avg:43.07ms +[2025-09-11 09:49:08] [Rank 0] step:7741/10000 train_time:333425ms step_avg:43.07ms +[2025-09-11 09:49:09] [Rank 0] step:7761/10000 train_time:334127ms step_avg:43.05ms +[2025-09-11 09:49:09] [Rank 0] step:7761/10000 train_time:334127ms step_avg:43.05ms +[2025-09-11 09:49:09] [Rank 0] step:7781/10000 train_time:334832ms step_avg:43.03ms +[2025-09-11 09:49:09] [Rank 0] step:7781/10000 train_time:334832ms step_avg:43.03ms +[2025-09-11 09:49:10] [Rank 0] step:7801/10000 train_time:335535ms step_avg:43.01ms +[2025-09-11 09:49:10] [Rank 0] step:7801/10000 train_time:335535ms step_avg:43.01ms +[2025-09-11 09:49:11] [Rank 0] step:7821/10000 train_time:336238ms step_avg:42.99ms +[2025-09-11 09:49:11] [Rank 0] step:7821/10000 train_time:336238ms step_avg:42.99ms +[2025-09-11 09:49:11] [Rank 0] step:7841/10000 train_time:336943ms step_avg:42.97ms +[2025-09-11 09:49:11] [Rank 0] step:7841/10000 train_time:336943ms step_avg:42.97ms +[2025-09-11 09:49:12] [Rank 0] step:7861/10000 train_time:337648ms step_avg:42.95ms +[2025-09-11 09:49:12] [Rank 0] step:7861/10000 train_time:337648ms step_avg:42.95ms +[2025-09-11 09:49:13] [Rank 0] step:7881/10000 train_time:338351ms step_avg:42.93ms +[2025-09-11 09:49:13] [Rank 0] step:7881/10000 train_time:338351ms step_avg:42.93ms +[2025-09-11 09:49:14] [Rank 0] step:7901/10000 train_time:339055ms step_avg:42.91ms +[2025-09-11 09:49:14] [Rank 0] step:7901/10000 train_time:339055ms step_avg:42.91ms +[2025-09-11 09:49:14] [Rank 0] step:7921/10000 train_time:339759ms step_avg:42.89ms +[2025-09-11 09:49:14] [Rank 0] step:7921/10000 train_time:339759ms step_avg:42.89ms +[2025-09-11 09:49:15] [Rank 0] step:7941/10000 train_time:340463ms step_avg:42.87ms +[2025-09-11 09:49:15] [Rank 0] step:7941/10000 train_time:340463ms step_avg:42.87ms +[2025-09-11 09:49:16] [Rank 0] step:7961/10000 train_time:341165ms step_avg:42.85ms +[2025-09-11 09:49:16] [Rank 0] step:7961/10000 train_time:341165ms step_avg:42.85ms +[2025-09-11 09:49:16] [Rank 0] step:7981/10000 train_time:341872ms step_avg:42.84ms +[2025-09-11 09:49:16] [Rank 0] step:7981/10000 train_time:341872ms step_avg:42.84ms +[2025-09-11 09:49:17] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:49:17] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:49:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:49:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:49:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:49:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:49:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:49:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:49:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:27] [Rank 0] PRINT: step:8000/10000 val_loss:4.2900 total_sharp:2.5395e-04 L1_sharp:3.4569e-04 L2_sharp:6.2752e-05 L3_sharp:4.3958e-05 L4_sharp:3.6577e-05 L5_sharp:2.3362e-05 L6_sharp:5.5291e-05 L7_sharp:8.0359e-05 L8_sharp:1.3867e-04 L9_sharp:1.4591e-04 L10_sharp:1.6342e-04 L11_sharp:2.6146e-04 L12_sharp:1.0225e-03 total_fnorm:1.9000e+01 total_l1_linf:3.8400e+04 total_spectral:9.5625e+00 L1_fnorm:3.0312e+00 L2_fnorm:2.8906e+00 L3_fnorm:2.9219e+00 L4_fnorm:2.9844e+00 L5_fnorm:2.9375e+00 L6_fnorm:2.9531e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.8906e+00 L10_fnorm:2.9219e+00 L11_fnorm:2.9219e+00 L12_fnorm:2.9375e+00 L1_l1linf:6.1328e-01 L2_l1linf:5.8203e-01 L3_l1linf:5.5859e-01 L4_l1linf:6.0156e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.2500e-01 L8_l1linf:5.7031e-01 L9_l1linf:5.5859e-01 L10_l1linf:5.2344e-01 L11_l1linf:5.1953e-01 L12_l1linf:6.1328e-01 L1_spectral:4.2805e-02 L2_spectral:3.9342e-02 L3_spectral:4.0615e-02 L4_spectral:4.1388e-02 L5_spectral:4.1361e-02 L6_spectral:4.1230e-02 L7_spectral:4.1619e-02 L8_spectral:4.1064e-02 L9_spectral:4.1888e-02 L10_spectral:4.2217e-02 L11_spectral:4.1926e-02 L12_spectral:4.2118e-02 train_time:342555ms step_avg:42.82ms +[2025-09-11 09:49:27] [Rank 0] PRINT: step:8000/10000 val_loss:4.2900 total_sharp:2.5395e-04 L1_sharp:3.4569e-04 L2_sharp:6.2752e-05 L3_sharp:4.3958e-05 L4_sharp:3.6577e-05 L5_sharp:2.3362e-05 L6_sharp:5.5291e-05 L7_sharp:8.0359e-05 L8_sharp:1.3867e-04 L9_sharp:1.4591e-04 L10_sharp:1.6342e-04 L11_sharp:2.6146e-04 L12_sharp:1.0225e-03 total_fnorm:1.9000e+01 total_l1_linf:3.8400e+04 total_spectral:9.5625e+00 L1_fnorm:3.0312e+00 L2_fnorm:2.8906e+00 L3_fnorm:2.9219e+00 L4_fnorm:2.9844e+00 L5_fnorm:2.9375e+00 L6_fnorm:2.9531e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.8906e+00 L10_fnorm:2.9219e+00 L11_fnorm:2.9219e+00 L12_fnorm:2.9375e+00 L1_l1linf:6.1328e-01 L2_l1linf:5.8203e-01 L3_l1linf:5.5859e-01 L4_l1linf:6.0156e-01 L5_l1linf:6.0938e-01 L6_l1linf:6.0938e-01 L7_l1linf:6.2500e-01 L8_l1linf:5.7031e-01 L9_l1linf:5.5859e-01 L10_l1linf:5.2344e-01 L11_l1linf:5.1953e-01 L12_l1linf:6.1328e-01 L1_spectral:4.2805e-02 L2_spectral:3.9342e-02 L3_spectral:4.0615e-02 L4_spectral:4.1388e-02 L5_spectral:4.1361e-02 L6_spectral:4.1230e-02 L7_spectral:4.1619e-02 L8_spectral:4.1064e-02 L9_spectral:4.1888e-02 L10_spectral:4.2217e-02 L11_spectral:4.1926e-02 L12_spectral:4.2118e-02 train_time:342555ms step_avg:42.82ms +[2025-09-11 09:49:28] [Rank 0] step:8001/10000 train_time:343752ms step_avg:42.96ms +[2025-09-11 09:49:28] [Rank 0] step:8001/10000 train_time:343752ms step_avg:42.96ms +[2025-09-11 09:49:29] [Rank 0] step:8021/10000 train_time:344492ms step_avg:42.95ms +[2025-09-11 09:49:29] [Rank 0] step:8021/10000 train_time:344492ms step_avg:42.95ms +[2025-09-11 09:49:30] [Rank 0] step:8041/10000 train_time:345198ms step_avg:42.93ms +[2025-09-11 09:49:30] [Rank 0] step:8041/10000 train_time:345198ms step_avg:42.93ms +[2025-09-11 09:49:31] [Rank 0] step:8061/10000 train_time:345905ms step_avg:42.91ms +[2025-09-11 09:49:31] [Rank 0] step:8061/10000 train_time:345905ms step_avg:42.91ms +[2025-09-11 09:49:31] [Rank 0] step:8081/10000 train_time:346607ms step_avg:42.89ms +[2025-09-11 09:49:31] [Rank 0] step:8081/10000 train_time:346607ms step_avg:42.89ms +[2025-09-11 09:49:32] [Rank 0] step:8101/10000 train_time:347309ms step_avg:42.87ms +[2025-09-11 09:49:32] [Rank 0] step:8101/10000 train_time:347309ms step_avg:42.87ms +[2025-09-11 09:49:33] [Rank 0] step:8121/10000 train_time:348017ms step_avg:42.85ms +[2025-09-11 09:49:33] [Rank 0] step:8121/10000 train_time:348017ms step_avg:42.85ms +[2025-09-11 09:49:34] [Rank 0] step:8141/10000 train_time:349473ms step_avg:42.93ms +[2025-09-11 09:49:34] [Rank 0] step:8141/10000 train_time:349473ms step_avg:42.93ms +[2025-09-11 09:49:35] [Rank 0] step:8161/10000 train_time:350181ms step_avg:42.91ms +[2025-09-11 09:49:35] [Rank 0] step:8161/10000 train_time:350181ms step_avg:42.91ms +[2025-09-11 09:49:36] [Rank 0] step:8181/10000 train_time:350896ms step_avg:42.89ms +[2025-09-11 09:49:36] [Rank 0] step:8181/10000 train_time:350896ms step_avg:42.89ms +[2025-09-11 09:49:36] [Rank 0] step:8201/10000 train_time:351608ms step_avg:42.87ms +[2025-09-11 09:49:36] [Rank 0] step:8201/10000 train_time:351608ms step_avg:42.87ms +[2025-09-11 09:49:37] [Rank 0] step:8221/10000 train_time:352318ms step_avg:42.86ms +[2025-09-11 09:49:37] [Rank 0] step:8221/10000 train_time:352318ms step_avg:42.86ms +[2025-09-11 09:49:38] [Rank 0] step:8241/10000 train_time:353037ms step_avg:42.84ms +[2025-09-11 09:49:38] [Rank 0] step:8241/10000 train_time:353037ms step_avg:42.84ms +[2025-09-11 09:49:38] [Rank 0] step:8261/10000 train_time:353747ms step_avg:42.82ms +[2025-09-11 09:49:38] [Rank 0] step:8261/10000 train_time:353747ms step_avg:42.82ms +[2025-09-11 09:49:39] [Rank 0] step:8281/10000 train_time:354453ms step_avg:42.80ms +[2025-09-11 09:49:39] [Rank 0] step:8281/10000 train_time:354453ms step_avg:42.80ms +[2025-09-11 09:49:40] [Rank 0] step:8301/10000 train_time:355167ms step_avg:42.79ms +[2025-09-11 09:49:40] [Rank 0] step:8301/10000 train_time:355167ms step_avg:42.79ms +[2025-09-11 09:49:41] [Rank 0] step:8321/10000 train_time:355876ms step_avg:42.77ms +[2025-09-11 09:49:41] [Rank 0] step:8321/10000 train_time:355876ms step_avg:42.77ms +[2025-09-11 09:49:41] [Rank 0] step:8341/10000 train_time:356593ms step_avg:42.75ms +[2025-09-11 09:49:41] [Rank 0] step:8341/10000 train_time:356593ms step_avg:42.75ms +[2025-09-11 09:49:42] [Rank 0] step:8361/10000 train_time:357301ms step_avg:42.73ms +[2025-09-11 09:49:42] [Rank 0] step:8361/10000 train_time:357301ms step_avg:42.73ms +[2025-09-11 09:49:43] [Rank 0] step:8381/10000 train_time:358013ms step_avg:42.72ms +[2025-09-11 09:49:43] [Rank 0] step:8381/10000 train_time:358013ms step_avg:42.72ms +[2025-09-11 09:49:43] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:49:43] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:49:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:49:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:49:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:53] [Rank 0] PRINT: step:8400/10000 val_loss:4.2707 total_sharp:2.2774e-04 L1_sharp:2.6360e-04 L2_sharp:-9.9292e-07 L3_sharp:4.4736e-05 L4_sharp:1.5211e-05 L5_sharp:3.4255e-05 L6_sharp:4.7170e-05 L7_sharp:5.5059e-05 L8_sharp:1.1753e-04 L9_sharp:1.2468e-04 L10_sharp:1.4881e-04 L11_sharp:2.2436e-04 L12_sharp:8.0891e-04 total_fnorm:1.4375e+01 total_l1_linf:2.6368e+04 total_spectral:7.1562e+00 L1_fnorm:2.3906e+00 L2_fnorm:2.2656e+00 L3_fnorm:2.2969e+00 L4_fnorm:2.3281e+00 L5_fnorm:2.2969e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.3125e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2812e+00 L1_l1linf:4.5508e-01 L2_l1linf:4.2383e-01 L3_l1linf:4.3555e-01 L4_l1linf:4.5312e-01 L5_l1linf:4.5898e-01 L6_l1linf:4.4922e-01 L7_l1linf:4.5312e-01 L8_l1linf:4.1406e-01 L9_l1linf:3.9844e-01 L10_l1linf:3.8086e-01 L11_l1linf:3.7695e-01 L12_l1linf:4.6680e-01 L1_spectral:3.4567e-02 L2_spectral:3.1486e-02 L3_spectral:3.1940e-02 L4_spectral:3.2693e-02 L5_spectral:3.2614e-02 L6_spectral:3.2783e-02 L7_spectral:3.2902e-02 L8_spectral:3.3365e-02 L9_spectral:3.3136e-02 L10_spectral:3.3669e-02 L11_spectral:3.3045e-02 L12_spectral:3.3712e-02 train_time:358705ms step_avg:42.70ms +[2025-09-11 09:49:53] [Rank 0] PRINT: step:8400/10000 val_loss:4.2707 total_sharp:2.2774e-04 L1_sharp:2.6360e-04 L2_sharp:-9.9292e-07 L3_sharp:4.4736e-05 L4_sharp:1.5211e-05 L5_sharp:3.4255e-05 L6_sharp:4.7170e-05 L7_sharp:5.5059e-05 L8_sharp:1.1753e-04 L9_sharp:1.2468e-04 L10_sharp:1.4881e-04 L11_sharp:2.2436e-04 L12_sharp:8.0891e-04 total_fnorm:1.4375e+01 total_l1_linf:2.6368e+04 total_spectral:7.1562e+00 L1_fnorm:2.3906e+00 L2_fnorm:2.2656e+00 L3_fnorm:2.2969e+00 L4_fnorm:2.3281e+00 L5_fnorm:2.2969e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.3125e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2812e+00 L1_l1linf:4.5508e-01 L2_l1linf:4.2383e-01 L3_l1linf:4.3555e-01 L4_l1linf:4.5312e-01 L5_l1linf:4.5898e-01 L6_l1linf:4.4922e-01 L7_l1linf:4.5312e-01 L8_l1linf:4.1406e-01 L9_l1linf:3.9844e-01 L10_l1linf:3.8086e-01 L11_l1linf:3.7695e-01 L12_l1linf:4.6680e-01 L1_spectral:3.4567e-02 L2_spectral:3.1486e-02 L3_spectral:3.1940e-02 L4_spectral:3.2693e-02 L5_spectral:3.2614e-02 L6_spectral:3.2783e-02 L7_spectral:3.2902e-02 L8_spectral:3.3365e-02 L9_spectral:3.3136e-02 L10_spectral:3.3669e-02 L11_spectral:3.3045e-02 L12_spectral:3.3712e-02 train_time:358705ms step_avg:42.70ms +[2025-09-11 09:49:55] [Rank 0] step:8401/10000 train_time:359931ms step_avg:42.84ms +[2025-09-11 09:49:55] [Rank 0] step:8401/10000 train_time:359931ms step_avg:42.84ms +[2025-09-11 09:49:55] [Rank 0] step:8421/10000 train_time:360683ms step_avg:42.83ms +[2025-09-11 09:49:55] [Rank 0] step:8421/10000 train_time:360683ms step_avg:42.83ms +[2025-09-11 09:49:56] [Rank 0] step:8441/10000 train_time:361397ms step_avg:42.81ms +[2025-09-11 09:49:56] [Rank 0] step:8441/10000 train_time:361397ms step_avg:42.81ms +[2025-09-11 09:49:57] [Rank 0] step:8461/10000 train_time:362109ms step_avg:42.80ms +[2025-09-11 09:49:57] [Rank 0] step:8461/10000 train_time:362109ms step_avg:42.80ms +[2025-09-11 09:49:58] [Rank 0] step:8481/10000 train_time:362823ms step_avg:42.78ms +[2025-09-11 09:49:58] [Rank 0] step:8481/10000 train_time:362823ms step_avg:42.78ms +[2025-09-11 09:49:58] [Rank 0] step:8501/10000 train_time:363535ms step_avg:42.76ms +[2025-09-11 09:49:58] [Rank 0] step:8501/10000 train_time:363535ms step_avg:42.76ms +[2025-09-11 09:49:59] [Rank 0] step:8521/10000 train_time:364247ms step_avg:42.75ms +[2025-09-11 09:49:59] [Rank 0] step:8521/10000 train_time:364247ms step_avg:42.75ms +[2025-09-11 09:50:00] [Rank 0] step:8541/10000 train_time:364958ms step_avg:42.73ms +[2025-09-11 09:50:00] [Rank 0] step:8541/10000 train_time:364958ms step_avg:42.73ms +[2025-09-11 09:50:00] [Rank 0] step:8561/10000 train_time:365674ms step_avg:42.71ms +[2025-09-11 09:50:00] [Rank 0] step:8561/10000 train_time:365674ms step_avg:42.71ms +[2025-09-11 09:50:01] [Rank 0] step:8581/10000 train_time:366525ms step_avg:42.71ms +[2025-09-11 09:50:01] [Rank 0] step:8581/10000 train_time:366525ms step_avg:42.71ms +[2025-09-11 09:50:02] [Rank 0] step:8601/10000 train_time:367317ms step_avg:42.71ms +[2025-09-11 09:50:02] [Rank 0] step:8601/10000 train_time:367317ms step_avg:42.71ms +[2025-09-11 09:50:03] [Rank 0] step:8621/10000 train_time:368030ms step_avg:42.69ms +[2025-09-11 09:50:03] [Rank 0] step:8621/10000 train_time:368030ms step_avg:42.69ms +[2025-09-11 09:50:04] [Rank 0] step:8641/10000 train_time:368740ms step_avg:42.67ms +[2025-09-11 09:50:04] [Rank 0] step:8641/10000 train_time:368740ms step_avg:42.67ms +[2025-09-11 09:50:04] [Rank 0] step:8661/10000 train_time:369452ms step_avg:42.66ms +[2025-09-11 09:50:04] [Rank 0] step:8661/10000 train_time:369452ms step_avg:42.66ms +[2025-09-11 09:50:05] [Rank 0] step:8681/10000 train_time:370165ms step_avg:42.64ms +[2025-09-11 09:50:05] [Rank 0] step:8681/10000 train_time:370165ms step_avg:42.64ms +[2025-09-11 09:50:06] [Rank 0] step:8701/10000 train_time:370876ms step_avg:42.62ms +[2025-09-11 09:50:06] [Rank 0] step:8701/10000 train_time:370876ms step_avg:42.62ms +[2025-09-11 09:50:06] [Rank 0] step:8721/10000 train_time:371591ms step_avg:42.61ms +[2025-09-11 09:50:06] [Rank 0] step:8721/10000 train_time:371591ms step_avg:42.61ms +[2025-09-11 09:50:07] [Rank 0] step:8741/10000 train_time:372298ms step_avg:42.59ms +[2025-09-11 09:50:07] [Rank 0] step:8741/10000 train_time:372298ms step_avg:42.59ms +[2025-09-11 09:50:08] [Rank 0] step:8761/10000 train_time:373013ms step_avg:42.58ms +[2025-09-11 09:50:08] [Rank 0] step:8761/10000 train_time:373013ms step_avg:42.58ms +[2025-09-11 09:50:09] [Rank 0] step:8781/10000 train_time:373721ms step_avg:42.56ms +[2025-09-11 09:50:09] [Rank 0] step:8781/10000 train_time:373721ms step_avg:42.56ms +[2025-09-11 09:50:09] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:50:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:50:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:50:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:50:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:50:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:50:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:50:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:19] [Rank 0] PRINT: step:8800/10000 val_loss:4.2586 total_sharp:2.2582e-04 L1_sharp:1.9899e-04 L2_sharp:5.3025e-05 L3_sharp:-2.2330e-06 L4_sharp:3.8602e-05 L5_sharp:5.0699e-05 L6_sharp:4.1492e-05 L7_sharp:5.0429e-05 L8_sharp:9.6577e-05 L9_sharp:1.1252e-04 L10_sharp:1.1709e-04 L11_sharp:2.0917e-04 L12_sharp:1.6018e-03 total_fnorm:1.0625e+01 total_l1_linf:1.7408e+04 total_spectral:5.2500e+00 L1_fnorm:1.7656e+00 L2_fnorm:1.6641e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.7031e+00 L5_fnorm:1.6797e+00 L6_fnorm:1.6797e+00 L7_fnorm:1.6797e+00 L8_fnorm:1.6172e+00 L9_fnorm:1.6328e+00 L10_fnorm:1.6484e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6641e+00 L1_l1linf:3.0078e-01 L2_l1linf:2.8906e-01 L3_l1linf:2.7734e-01 L4_l1linf:3.0469e-01 L5_l1linf:2.9883e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0469e-01 L8_l1linf:2.7930e-01 L9_l1linf:2.7344e-01 L10_l1linf:2.4707e-01 L11_l1linf:2.6367e-01 L12_l1linf:3.3594e-01 L1_spectral:2.6557e-02 L2_spectral:2.3471e-02 L3_spectral:2.3903e-02 L4_spectral:2.4119e-02 L5_spectral:2.4565e-02 L6_spectral:2.4314e-02 L7_spectral:2.4543e-02 L8_spectral:2.4969e-02 L9_spectral:2.5016e-02 L10_spectral:2.5152e-02 L11_spectral:2.4888e-02 L12_spectral:2.5096e-02 train_time:374410ms step_avg:42.55ms +[2025-09-11 09:50:19] [Rank 0] PRINT: step:8800/10000 val_loss:4.2586 total_sharp:2.2582e-04 L1_sharp:1.9899e-04 L2_sharp:5.3025e-05 L3_sharp:-2.2330e-06 L4_sharp:3.8602e-05 L5_sharp:5.0699e-05 L6_sharp:4.1492e-05 L7_sharp:5.0429e-05 L8_sharp:9.6577e-05 L9_sharp:1.1252e-04 L10_sharp:1.1709e-04 L11_sharp:2.0917e-04 L12_sharp:1.6018e-03 total_fnorm:1.0625e+01 total_l1_linf:1.7408e+04 total_spectral:5.2500e+00 L1_fnorm:1.7656e+00 L2_fnorm:1.6641e+00 L3_fnorm:1.6797e+00 L4_fnorm:1.7031e+00 L5_fnorm:1.6797e+00 L6_fnorm:1.6797e+00 L7_fnorm:1.6797e+00 L8_fnorm:1.6172e+00 L9_fnorm:1.6328e+00 L10_fnorm:1.6484e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6641e+00 L1_l1linf:3.0078e-01 L2_l1linf:2.8906e-01 L3_l1linf:2.7734e-01 L4_l1linf:3.0469e-01 L5_l1linf:2.9883e-01 L6_l1linf:3.0078e-01 L7_l1linf:3.0469e-01 L8_l1linf:2.7930e-01 L9_l1linf:2.7344e-01 L10_l1linf:2.4707e-01 L11_l1linf:2.6367e-01 L12_l1linf:3.3594e-01 L1_spectral:2.6557e-02 L2_spectral:2.3471e-02 L3_spectral:2.3903e-02 L4_spectral:2.4119e-02 L5_spectral:2.4565e-02 L6_spectral:2.4314e-02 L7_spectral:2.4543e-02 L8_spectral:2.4969e-02 L9_spectral:2.5016e-02 L10_spectral:2.5152e-02 L11_spectral:2.4888e-02 L12_spectral:2.5096e-02 train_time:374410ms step_avg:42.55ms +[2025-09-11 09:50:21] [Rank 0] step:8801/10000 train_time:375638ms step_avg:42.68ms +[2025-09-11 09:50:21] [Rank 0] step:8801/10000 train_time:375638ms step_avg:42.68ms +[2025-09-11 09:50:21] [Rank 0] step:8821/10000 train_time:376385ms step_avg:42.67ms +[2025-09-11 09:50:21] [Rank 0] step:8821/10000 train_time:376385ms step_avg:42.67ms +[2025-09-11 09:50:22] [Rank 0] step:8841/10000 train_time:377098ms step_avg:42.65ms +[2025-09-11 09:50:22] [Rank 0] step:8841/10000 train_time:377098ms step_avg:42.65ms +[2025-09-11 09:50:23] [Rank 0] step:8861/10000 train_time:378279ms step_avg:42.69ms +[2025-09-11 09:50:23] [Rank 0] step:8861/10000 train_time:378279ms step_avg:42.69ms +[2025-09-11 09:50:24] [Rank 0] step:8881/10000 train_time:379016ms step_avg:42.68ms +[2025-09-11 09:50:24] [Rank 0] step:8881/10000 train_time:379016ms step_avg:42.68ms +[2025-09-11 09:50:25] [Rank 0] step:8901/10000 train_time:379731ms step_avg:42.66ms +[2025-09-11 09:50:25] [Rank 0] step:8901/10000 train_time:379731ms step_avg:42.66ms +[2025-09-11 09:50:26] [Rank 0] step:8921/10000 train_time:380714ms step_avg:42.68ms +[2025-09-11 09:50:26] [Rank 0] step:8921/10000 train_time:380714ms step_avg:42.68ms +[2025-09-11 09:50:26] [Rank 0] step:8941/10000 train_time:381428ms step_avg:42.66ms +[2025-09-11 09:50:26] [Rank 0] step:8941/10000 train_time:381428ms step_avg:42.66ms +[2025-09-11 09:50:27] [Rank 0] step:8961/10000 train_time:382148ms step_avg:42.65ms +[2025-09-11 09:50:27] [Rank 0] step:8961/10000 train_time:382148ms step_avg:42.65ms +[2025-09-11 09:50:28] [Rank 0] step:8981/10000 train_time:382864ms step_avg:42.63ms +[2025-09-11 09:50:28] [Rank 0] step:8981/10000 train_time:382864ms step_avg:42.63ms +[2025-09-11 09:50:28] [Rank 0] step:9001/10000 train_time:383570ms step_avg:42.61ms +[2025-09-11 09:50:28] [Rank 0] step:9001/10000 train_time:383570ms step_avg:42.61ms +[2025-09-11 09:50:29] [Rank 0] step:9021/10000 train_time:384284ms step_avg:42.60ms +[2025-09-11 09:50:29] [Rank 0] step:9021/10000 train_time:384284ms step_avg:42.60ms +[2025-09-11 09:50:30] [Rank 0] step:9041/10000 train_time:384998ms step_avg:42.58ms +[2025-09-11 09:50:30] [Rank 0] step:9041/10000 train_time:384998ms step_avg:42.58ms +[2025-09-11 09:50:31] [Rank 0] step:9061/10000 train_time:385708ms step_avg:42.57ms +[2025-09-11 09:50:31] [Rank 0] step:9061/10000 train_time:385708ms step_avg:42.57ms +[2025-09-11 09:50:31] [Rank 0] step:9081/10000 train_time:386421ms step_avg:42.55ms +[2025-09-11 09:50:31] [Rank 0] step:9081/10000 train_time:386421ms step_avg:42.55ms +[2025-09-11 09:50:32] [Rank 0] step:9101/10000 train_time:387136ms step_avg:42.54ms +[2025-09-11 09:50:32] [Rank 0] step:9101/10000 train_time:387136ms step_avg:42.54ms +[2025-09-11 09:50:33] [Rank 0] step:9121/10000 train_time:387851ms step_avg:42.52ms +[2025-09-11 09:50:33] [Rank 0] step:9121/10000 train_time:387851ms step_avg:42.52ms +[2025-09-11 09:50:33] [Rank 0] step:9141/10000 train_time:388561ms step_avg:42.51ms +[2025-09-11 09:50:33] [Rank 0] step:9141/10000 train_time:388561ms step_avg:42.51ms +[2025-09-11 09:50:34] [Rank 0] step:9161/10000 train_time:389275ms step_avg:42.49ms +[2025-09-11 09:50:34] [Rank 0] step:9161/10000 train_time:389275ms step_avg:42.49ms +[2025-09-11 09:50:35] [Rank 0] step:9181/10000 train_time:389989ms step_avg:42.48ms +[2025-09-11 09:50:35] [Rank 0] step:9181/10000 train_time:389989ms step_avg:42.48ms +[2025-09-11 09:50:36] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:50:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:50:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:46] [Rank 0] PRINT: step:9200/10000 val_loss:4.2396 total_sharp:1.6765e-04 L1_sharp:1.7011e-04 L2_sharp:6.5008e-05 L3_sharp:2.6701e-05 L4_sharp:2.0480e-05 L5_sharp:3.4699e-05 L6_sharp:4.2356e-05 L7_sharp:4.6068e-05 L8_sharp:1.0790e-04 L9_sharp:9.8071e-05 L10_sharp:1.1640e-04 L11_sharp:1.8284e-04 L12_sharp:6.1811e-04 total_fnorm:7.1562e+00 total_l1_linf:1.0176e+04 total_spectral:3.5469e+00 L1_fnorm:1.1953e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1172e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.0938e+00 L10_fnorm:1.1094e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.8652e-01 L2_l1linf:1.8652e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8164e-01 L7_l1linf:1.8262e-01 L8_l1linf:1.7188e-01 L9_l1linf:1.5918e-01 L10_l1linf:1.5234e-01 L11_l1linf:1.5332e-01 L12_l1linf:1.9141e-01 L1_spectral:1.8333e-02 L2_spectral:1.6249e-02 L3_spectral:1.6189e-02 L4_spectral:1.6527e-02 L5_spectral:1.6747e-02 L6_spectral:1.6843e-02 L7_spectral:1.6738e-02 L8_spectral:1.7493e-02 L9_spectral:1.7145e-02 L10_spectral:1.7105e-02 L11_spectral:1.7044e-02 L12_spectral:1.7207e-02 train_time:390685ms step_avg:42.47ms +[2025-09-11 09:50:46] [Rank 0] PRINT: step:9200/10000 val_loss:4.2396 total_sharp:1.6765e-04 L1_sharp:1.7011e-04 L2_sharp:6.5008e-05 L3_sharp:2.6701e-05 L4_sharp:2.0480e-05 L5_sharp:3.4699e-05 L6_sharp:4.2356e-05 L7_sharp:4.6068e-05 L8_sharp:1.0790e-04 L9_sharp:9.8071e-05 L10_sharp:1.1640e-04 L11_sharp:1.8284e-04 L12_sharp:6.1811e-04 total_fnorm:7.1562e+00 total_l1_linf:1.0176e+04 total_spectral:3.5469e+00 L1_fnorm:1.1953e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1328e+00 L5_fnorm:1.1172e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.0938e+00 L10_fnorm:1.1094e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.8652e-01 L2_l1linf:1.8652e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8164e-01 L7_l1linf:1.8262e-01 L8_l1linf:1.7188e-01 L9_l1linf:1.5918e-01 L10_l1linf:1.5234e-01 L11_l1linf:1.5332e-01 L12_l1linf:1.9141e-01 L1_spectral:1.8333e-02 L2_spectral:1.6249e-02 L3_spectral:1.6189e-02 L4_spectral:1.6527e-02 L5_spectral:1.6747e-02 L6_spectral:1.6843e-02 L7_spectral:1.6738e-02 L8_spectral:1.7493e-02 L9_spectral:1.7145e-02 L10_spectral:1.7105e-02 L11_spectral:1.7044e-02 L12_spectral:1.7207e-02 train_time:390685ms step_avg:42.47ms +[2025-09-11 09:50:47] [Rank 0] step:9201/10000 train_time:391888ms step_avg:42.59ms +[2025-09-11 09:50:47] [Rank 0] step:9201/10000 train_time:391888ms step_avg:42.59ms +[2025-09-11 09:50:48] [Rank 0] step:9221/10000 train_time:392635ms step_avg:42.58ms +[2025-09-11 09:50:48] [Rank 0] step:9221/10000 train_time:392635ms step_avg:42.58ms +[2025-09-11 09:50:49] [Rank 0] step:9241/10000 train_time:393346ms step_avg:42.57ms +[2025-09-11 09:50:49] [Rank 0] step:9241/10000 train_time:393346ms step_avg:42.57ms +[2025-09-11 09:50:50] [Rank 0] step:9261/10000 train_time:394060ms step_avg:42.55ms +[2025-09-11 09:50:50] [Rank 0] step:9261/10000 train_time:394060ms step_avg:42.55ms +[2025-09-11 09:50:50] [Rank 0] step:9281/10000 train_time:394782ms step_avg:42.54ms +[2025-09-11 09:50:50] [Rank 0] step:9281/10000 train_time:394782ms step_avg:42.54ms +[2025-09-11 09:50:51] [Rank 0] step:9301/10000 train_time:395493ms step_avg:42.52ms +[2025-09-11 09:50:51] [Rank 0] step:9301/10000 train_time:395493ms step_avg:42.52ms +[2025-09-11 09:50:52] [Rank 0] step:9321/10000 train_time:396208ms step_avg:42.51ms +[2025-09-11 09:50:52] [Rank 0] step:9321/10000 train_time:396208ms step_avg:42.51ms +[2025-09-11 09:50:52] [Rank 0] step:9341/10000 train_time:396917ms step_avg:42.49ms +[2025-09-11 09:50:52] [Rank 0] step:9341/10000 train_time:396917ms step_avg:42.49ms +[2025-09-11 09:50:53] [Rank 0] step:9361/10000 train_time:397627ms step_avg:42.48ms +[2025-09-11 09:50:53] [Rank 0] step:9361/10000 train_time:397627ms step_avg:42.48ms +[2025-09-11 09:50:54] [Rank 0] step:9381/10000 train_time:398337ms step_avg:42.46ms +[2025-09-11 09:50:54] [Rank 0] step:9381/10000 train_time:398337ms step_avg:42.46ms +[2025-09-11 09:50:55] [Rank 0] step:9401/10000 train_time:399051ms step_avg:42.45ms +[2025-09-11 09:50:55] [Rank 0] step:9401/10000 train_time:399051ms step_avg:42.45ms +[2025-09-11 09:50:55] [Rank 0] step:9421/10000 train_time:399767ms step_avg:42.43ms +[2025-09-11 09:50:55] [Rank 0] step:9421/10000 train_time:399767ms step_avg:42.43ms +[2025-09-11 09:50:56] [Rank 0] step:9441/10000 train_time:400483ms step_avg:42.42ms +[2025-09-11 09:50:56] [Rank 0] step:9441/10000 train_time:400483ms step_avg:42.42ms +[2025-09-11 09:50:57] [Rank 0] step:9461/10000 train_time:401194ms step_avg:42.41ms +[2025-09-11 09:50:57] [Rank 0] step:9461/10000 train_time:401194ms step_avg:42.41ms +[2025-09-11 09:50:57] [Rank 0] step:9481/10000 train_time:401908ms step_avg:42.39ms +[2025-09-11 09:50:57] [Rank 0] step:9481/10000 train_time:401908ms step_avg:42.39ms +[2025-09-11 09:50:58] [Rank 0] step:9501/10000 train_time:402623ms step_avg:42.38ms +[2025-09-11 09:50:58] [Rank 0] step:9501/10000 train_time:402623ms step_avg:42.38ms +[2025-09-11 09:50:59] [Rank 0] step:9521/10000 train_time:403339ms step_avg:42.36ms +[2025-09-11 09:50:59] [Rank 0] step:9521/10000 train_time:403339ms step_avg:42.36ms +[2025-09-11 09:51:00] [Rank 0] step:9541/10000 train_time:404049ms step_avg:42.35ms +[2025-09-11 09:51:00] [Rank 0] step:9541/10000 train_time:404049ms step_avg:42.35ms +[2025-09-11 09:51:00] [Rank 0] step:9561/10000 train_time:404761ms step_avg:42.33ms +[2025-09-11 09:51:00] [Rank 0] step:9561/10000 train_time:404761ms step_avg:42.33ms +[2025-09-11 09:51:01] [Rank 0] step:9581/10000 train_time:405476ms step_avg:42.32ms +[2025-09-11 09:51:01] [Rank 0] step:9581/10000 train_time:405476ms step_avg:42.32ms +[2025-09-11 09:51:02] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:51:02] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:51:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:12] [Rank 0] PRINT: step:9600/10000 val_loss:4.2303 total_sharp:1.1615e-04 L1_sharp:1.4549e-04 L2_sharp:4.3840e-05 L3_sharp:9.3441e-07 L4_sharp:1.9282e-05 L5_sharp:3.7357e-05 L6_sharp:3.4489e-05 L7_sharp:3.8275e-05 L8_sharp:7.5389e-05 L9_sharp:7.4780e-05 L10_sharp:8.6954e-05 L11_sharp:1.3299e-04 L12_sharp:4.7623e-04 total_fnorm:4.0000e+00 total_l1_linf:4.8000e+03 total_spectral:2.0000e+00 L1_fnorm:6.7969e-01 L2_fnorm:6.2109e-01 L3_fnorm:6.3281e-01 L4_fnorm:6.4062e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1328e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.1719e-01 L12_fnorm:6.2891e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.5938e-02 L3_l1linf:8.8379e-02 L4_l1linf:8.6914e-02 L5_l1linf:8.7891e-02 L6_l1linf:8.8379e-02 L7_l1linf:8.9844e-02 L8_l1linf:8.3496e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.0801e-02 L11_l1linf:7.5195e-02 L12_l1linf:9.5703e-02 L1_spectral:1.0710e-02 L2_spectral:9.4355e-03 L3_spectral:9.2412e-03 L4_spectral:9.4628e-03 L5_spectral:9.4515e-03 L6_spectral:9.5553e-03 L7_spectral:9.6929e-03 L8_spectral:1.0221e-02 L9_spectral:9.7558e-03 L10_spectral:9.9055e-03 L11_spectral:9.8284e-03 L12_spectral:9.9015e-03 train_time:406168ms step_avg:42.31ms +[2025-09-11 09:51:12] [Rank 0] PRINT: step:9600/10000 val_loss:4.2303 total_sharp:1.1615e-04 L1_sharp:1.4549e-04 L2_sharp:4.3840e-05 L3_sharp:9.3441e-07 L4_sharp:1.9282e-05 L5_sharp:3.7357e-05 L6_sharp:3.4489e-05 L7_sharp:3.8275e-05 L8_sharp:7.5389e-05 L9_sharp:7.4780e-05 L10_sharp:8.6954e-05 L11_sharp:1.3299e-04 L12_sharp:4.7623e-04 total_fnorm:4.0000e+00 total_l1_linf:4.8000e+03 total_spectral:2.0000e+00 L1_fnorm:6.7969e-01 L2_fnorm:6.2109e-01 L3_fnorm:6.3281e-01 L4_fnorm:6.4062e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1328e-01 L10_fnorm:6.1719e-01 L11_fnorm:6.1719e-01 L12_fnorm:6.2891e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.5938e-02 L3_l1linf:8.8379e-02 L4_l1linf:8.6914e-02 L5_l1linf:8.7891e-02 L6_l1linf:8.8379e-02 L7_l1linf:8.9844e-02 L8_l1linf:8.3496e-02 L9_l1linf:7.4707e-02 L10_l1linf:7.0801e-02 L11_l1linf:7.5195e-02 L12_l1linf:9.5703e-02 L1_spectral:1.0710e-02 L2_spectral:9.4355e-03 L3_spectral:9.2412e-03 L4_spectral:9.4628e-03 L5_spectral:9.4515e-03 L6_spectral:9.5553e-03 L7_spectral:9.6929e-03 L8_spectral:1.0221e-02 L9_spectral:9.7558e-03 L10_spectral:9.9055e-03 L11_spectral:9.8284e-03 L12_spectral:9.9015e-03 train_time:406168ms step_avg:42.31ms +[2025-09-11 09:51:13] [Rank 0] step:9601/10000 train_time:407352ms step_avg:42.43ms +[2025-09-11 09:51:13] [Rank 0] step:9601/10000 train_time:407352ms step_avg:42.43ms +[2025-09-11 09:51:14] [Rank 0] step:9621/10000 train_time:408057ms step_avg:42.41ms +[2025-09-11 09:51:14] [Rank 0] step:9621/10000 train_time:408057ms step_avg:42.41ms +[2025-09-11 09:51:14] [Rank 0] step:9641/10000 train_time:408776ms step_avg:42.40ms +[2025-09-11 09:51:14] [Rank 0] step:9641/10000 train_time:408776ms step_avg:42.40ms +[2025-09-11 09:51:15] [Rank 0] step:9661/10000 train_time:409502ms step_avg:42.39ms +[2025-09-11 09:51:15] [Rank 0] step:9661/10000 train_time:409502ms step_avg:42.39ms +[2025-09-11 09:51:16] [Rank 0] step:9681/10000 train_time:410221ms step_avg:42.37ms +[2025-09-11 09:51:16] [Rank 0] step:9681/10000 train_time:410221ms step_avg:42.37ms +[2025-09-11 09:51:16] [Rank 0] step:9701/10000 train_time:410941ms step_avg:42.36ms +[2025-09-11 09:51:16] [Rank 0] step:9701/10000 train_time:410941ms step_avg:42.36ms +[2025-09-11 09:51:17] [Rank 0] step:9721/10000 train_time:411666ms step_avg:42.35ms +[2025-09-11 09:51:17] [Rank 0] step:9721/10000 train_time:411666ms step_avg:42.35ms +[2025-09-11 09:51:18] [Rank 0] step:9741/10000 train_time:412388ms step_avg:42.34ms +[2025-09-11 09:51:18] [Rank 0] step:9741/10000 train_time:412388ms step_avg:42.34ms +[2025-09-11 09:51:19] [Rank 0] step:9761/10000 train_time:413109ms step_avg:42.32ms +[2025-09-11 09:51:19] [Rank 0] step:9761/10000 train_time:413109ms step_avg:42.32ms +[2025-09-11 09:51:19] [Rank 0] step:9781/10000 train_time:413827ms step_avg:42.31ms +[2025-09-11 09:51:19] [Rank 0] step:9781/10000 train_time:413827ms step_avg:42.31ms +[2025-09-11 09:51:20] [Rank 0] step:9801/10000 train_time:414552ms step_avg:42.30ms +[2025-09-11 09:51:20] [Rank 0] step:9801/10000 train_time:414552ms step_avg:42.30ms +[2025-09-11 09:51:21] [Rank 0] step:9821/10000 train_time:415275ms step_avg:42.28ms +[2025-09-11 09:51:21] [Rank 0] step:9821/10000 train_time:415275ms step_avg:42.28ms +[2025-09-11 09:51:22] [Rank 0] step:9841/10000 train_time:416000ms step_avg:42.27ms +[2025-09-11 09:51:22] [Rank 0] step:9841/10000 train_time:416000ms step_avg:42.27ms +[2025-09-11 09:51:22] [Rank 0] step:9861/10000 train_time:416721ms step_avg:42.26ms +[2025-09-11 09:51:22] [Rank 0] step:9861/10000 train_time:416721ms step_avg:42.26ms +[2025-09-11 09:51:23] [Rank 0] step:9881/10000 train_time:417441ms step_avg:42.25ms +[2025-09-11 09:51:23] [Rank 0] step:9881/10000 train_time:417441ms step_avg:42.25ms +[2025-09-11 09:51:24] [Rank 0] step:9901/10000 train_time:418158ms step_avg:42.23ms +[2025-09-11 09:51:24] [Rank 0] step:9901/10000 train_time:418158ms step_avg:42.23ms +[2025-09-11 09:51:24] [Rank 0] step:9921/10000 train_time:418877ms step_avg:42.22ms +[2025-09-11 09:51:24] [Rank 0] step:9921/10000 train_time:418877ms step_avg:42.22ms +[2025-09-11 09:51:25] [Rank 0] step:9941/10000 train_time:419602ms step_avg:42.21ms +[2025-09-11 09:51:25] [Rank 0] step:9941/10000 train_time:419602ms step_avg:42.21ms +[2025-09-11 09:51:26] [Rank 0] step:9961/10000 train_time:420873ms step_avg:42.25ms +[2025-09-11 09:51:26] [Rank 0] step:9961/10000 train_time:420873ms step_avg:42.25ms +[2025-09-11 09:51:27] [Rank 0] step:9981/10000 train_time:421595ms step_avg:42.24ms +[2025-09-11 09:51:27] [Rank 0] step:9981/10000 train_time:421595ms step_avg:42.24ms +[2025-09-11 09:51:28] [Rank 0] step:10000/10000 train_time:422287ms step_avg:42.23ms +[2025-09-11 09:51:28] [Rank 0] step:10000/10000 train_time:422287ms step_avg:42.23ms +[2025-09-11 09:51:28] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:51:28] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:51:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:51:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:51:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:51:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:51:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:51:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:39] [Rank 0] PRINT: step:10000/10000 val_loss:4.2268 total_sharp:7.0537e-05 L1_sharp:1.4856e-04 L2_sharp:2.8335e-05 L3_sharp:-4.1093e-06 L4_sharp:1.2332e-05 L5_sharp:2.1575e-05 L6_sharp:1.7618e-05 L7_sharp:2.9552e-05 L8_sharp:4.4006e-05 L9_sharp:4.8440e-05 L10_sharp:6.3451e-05 L11_sharp:9.9140e-05 L12_sharp:3.4633e-04 total_fnorm:1.5078e+00 total_l1_linf:1.3440e+03 total_spectral:7.6562e-01 L1_fnorm:2.6367e-01 L2_fnorm:2.4023e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.3828e-01 L9_fnorm:2.3926e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.4023e-01 L12_fnorm:2.4609e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.6123e-02 L3_l1linf:2.7100e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.6855e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.6978e-02 L8_l1linf:2.6001e-02 L9_l1linf:2.4048e-02 L10_l1linf:2.2949e-02 L11_l1linf:2.3071e-02 L12_l1linf:3.1006e-02 L1_spectral:4.2188e-03 L2_spectral:3.7623e-03 L3_spectral:3.6513e-03 L4_spectral:3.7247e-03 L5_spectral:3.7630e-03 L6_spectral:3.8169e-03 L7_spectral:3.8344e-03 L8_spectral:4.0634e-03 L9_spectral:3.9000e-03 L10_spectral:3.9298e-03 L11_spectral:3.9234e-03 L12_spectral:3.9432e-03 train_time:422309ms step_avg:42.23ms +[2025-09-11 09:51:39] [Rank 0] PRINT: step:10000/10000 val_loss:4.2268 total_sharp:7.0537e-05 L1_sharp:1.4856e-04 L2_sharp:2.8335e-05 L3_sharp:-4.1093e-06 L4_sharp:1.2332e-05 L5_sharp:2.1575e-05 L6_sharp:1.7618e-05 L7_sharp:2.9552e-05 L8_sharp:4.4006e-05 L9_sharp:4.8440e-05 L10_sharp:6.3451e-05 L11_sharp:9.9140e-05 L12_sharp:3.4633e-04 total_fnorm:1.5078e+00 total_l1_linf:1.3440e+03 total_spectral:7.6562e-01 L1_fnorm:2.6367e-01 L2_fnorm:2.4023e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.3828e-01 L9_fnorm:2.3926e-01 L10_fnorm:2.4121e-01 L11_fnorm:2.4023e-01 L12_fnorm:2.4609e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.6123e-02 L3_l1linf:2.7100e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.6855e-02 L6_l1linf:2.9053e-02 L7_l1linf:2.6978e-02 L8_l1linf:2.6001e-02 L9_l1linf:2.4048e-02 L10_l1linf:2.2949e-02 L11_l1linf:2.3071e-02 L12_l1linf:3.1006e-02 L1_spectral:4.2188e-03 L2_spectral:3.7623e-03 L3_spectral:3.6513e-03 L4_spectral:3.7247e-03 L5_spectral:3.7630e-03 L6_spectral:3.8169e-03 L7_spectral:3.8344e-03 L8_spectral:4.0634e-03 L9_spectral:3.9000e-03 L10_spectral:3.9298e-03 L11_spectral:3.9234e-03 L12_spectral:3.9432e-03 train_time:422309ms step_avg:42.23ms +[2025-09-11 09:51:39] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:51:39 2025 --- +[2025-09-11 09:51:39] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:51:39 2025 --- +[2025-09-11 09:51:39] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:51:39] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f918383d0425cc7d1c9307ca494488dfaa9bf3d4 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.01, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "60befd8f-1b0a-47e7-a1d6-eaeb558b0505", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/training_log_60befd8f-1b0a-47e7-a1d6-eaeb558b0505.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/training_log_60befd8f-1b0a-47e7-a1d6-eaeb558b0505.txt new file mode 100644 index 0000000000000000000000000000000000000000..b09cee748429fe7dac5359bb4e268c6fb8e944dd --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44/training_log_60befd8f-1b0a-47e7-a1d6-eaeb558b0505.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:24:59] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:24:59 2025 --- +[2025-09-11 09:24:59] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:24:59 2025 --- +[2025-09-11 09:24:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:24:59] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.01, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:24:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:24:59] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:24:59] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:24:59] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 09:24:59] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44 +[2025-09-11 09:24:59] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.01_muon_lr_0.1_seed_44 +[2025-09-11 09:24:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:24:59] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:24:59] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:24:59] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:25:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:25:00] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:25:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:25:00] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:25:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:25:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:25:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:25:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:25:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:25:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:25:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:25:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:25:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:25:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:25:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:25:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:25:08] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:25:08] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:25:08] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:25:08] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:25:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:25:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:25:45] [Rank 0] PRINT: Starting training... +[2025-09-11 09:25:45] [Rank 0] PRINT: Starting training... +[2025-09-11 09:25:46] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.81ms +[2025-09-11 09:25:46] [Rank 0] step:21/10000 train_time:1130ms step_avg:53.81ms +[2025-09-11 09:25:47] [Rank 0] step:41/10000 train_time:1856ms step_avg:45.27ms +[2025-09-11 09:25:47] [Rank 0] step:41/10000 train_time:1856ms step_avg:45.27ms +[2025-09-11 09:25:48] [Rank 0] step:61/10000 train_time:2582ms step_avg:42.32ms +[2025-09-11 09:25:48] [Rank 0] step:61/10000 train_time:2582ms step_avg:42.32ms +[2025-09-11 09:25:48] [Rank 0] step:81/10000 train_time:3307ms step_avg:40.83ms +[2025-09-11 09:25:48] [Rank 0] step:81/10000 train_time:3307ms step_avg:40.83ms +[2025-09-11 09:25:49] [Rank 0] step:101/10000 train_time:4032ms step_avg:39.92ms +[2025-09-11 09:25:49] [Rank 0] step:101/10000 train_time:4032ms step_avg:39.92ms +[2025-09-11 09:25:50] [Rank 0] step:121/10000 train_time:4756ms step_avg:39.31ms +[2025-09-11 09:25:50] [Rank 0] step:121/10000 train_time:4756ms step_avg:39.31ms +[2025-09-11 09:25:51] [Rank 0] step:141/10000 train_time:5480ms step_avg:38.87ms +[2025-09-11 09:25:51] [Rank 0] step:141/10000 train_time:5480ms step_avg:38.87ms +[2025-09-11 09:25:51] [Rank 0] step:161/10000 train_time:6205ms step_avg:38.54ms +[2025-09-11 09:25:51] [Rank 0] step:161/10000 train_time:6205ms step_avg:38.54ms +[2025-09-11 09:25:52] [Rank 0] step:181/10000 train_time:6929ms step_avg:38.28ms +[2025-09-11 09:25:52] [Rank 0] step:181/10000 train_time:6929ms step_avg:38.28ms +[2025-09-11 09:25:53] [Rank 0] step:201/10000 train_time:7653ms step_avg:38.07ms +[2025-09-11 09:25:53] [Rank 0] step:201/10000 train_time:7653ms step_avg:38.07ms +[2025-09-11 09:25:54] [Rank 0] step:221/10000 train_time:8377ms step_avg:37.90ms +[2025-09-11 09:25:54] [Rank 0] step:221/10000 train_time:8377ms step_avg:37.90ms +[2025-09-11 09:25:54] [Rank 0] step:241/10000 train_time:9101ms step_avg:37.76ms +[2025-09-11 09:25:54] [Rank 0] step:241/10000 train_time:9101ms step_avg:37.76ms +[2025-09-11 09:25:55] [Rank 0] step:261/10000 train_time:9825ms step_avg:37.64ms +[2025-09-11 09:25:55] [Rank 0] step:261/10000 train_time:9825ms step_avg:37.64ms +[2025-09-11 09:25:56] [Rank 0] step:281/10000 train_time:10549ms step_avg:37.54ms +[2025-09-11 09:25:56] [Rank 0] step:281/10000 train_time:10549ms step_avg:37.54ms +[2025-09-11 09:25:56] [Rank 0] step:301/10000 train_time:11272ms step_avg:37.45ms +[2025-09-11 09:25:56] [Rank 0] step:301/10000 train_time:11272ms step_avg:37.45ms +[2025-09-11 09:25:57] [Rank 0] step:321/10000 train_time:11995ms step_avg:37.37ms +[2025-09-11 09:25:57] [Rank 0] step:321/10000 train_time:11995ms step_avg:37.37ms +[2025-09-11 09:25:58] [Rank 0] step:341/10000 train_time:12719ms step_avg:37.30ms +[2025-09-11 09:25:58] [Rank 0] step:341/10000 train_time:12719ms step_avg:37.30ms +[2025-09-11 09:25:59] [Rank 0] step:361/10000 train_time:13442ms step_avg:37.24ms +[2025-09-11 09:25:59] [Rank 0] step:361/10000 train_time:13442ms step_avg:37.24ms +[2025-09-11 09:25:59] [Rank 0] step:381/10000 train_time:14166ms step_avg:37.18ms +[2025-09-11 09:25:59] [Rank 0] step:381/10000 train_time:14166ms step_avg:37.18ms +[2025-09-11 09:26:00] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:26:00] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:26:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:26:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:26:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:26:48] [Rank 0] PRINT: step:400/10000 val_loss:6.1825 total_sharp:1.7455e-03 L1_sharp:3.3326e-03 L2_sharp:1.1401e-03 L3_sharp:5.4380e-04 L4_sharp:2.0917e-04 L5_sharp:1.6198e-04 L6_sharp:7.6959e-05 L7_sharp:1.4652e-04 L8_sharp:7.2240e-05 L9_sharp:1.3468e-04 L10_sharp:2.6283e-04 L11_sharp:3.2990e-04 L12_sharp:1.4437e-03 total_fnorm:5.6033e+01 total_l1_linf:2.1514e+05 total_spectral:2.8017e+01 L1_fnorm:1.2281e+01 L2_fnorm:1.1747e+01 L3_fnorm:1.1389e+01 L4_fnorm:1.1190e+01 L5_fnorm:1.1070e+01 L6_fnorm:1.1072e+01 L7_fnorm:1.1025e+01 L8_fnorm:1.0831e+01 L9_fnorm:1.0638e+01 L10_fnorm:1.0407e+01 L11_fnorm:1.0183e+01 L12_fnorm:9.7414e+00 L1_l1linf:3.6182e+00 L2_l1linf:3.3954e+00 L3_l1linf:3.5144e+00 L4_l1linf:3.6207e+00 L5_l1linf:3.6229e+00 L6_l1linf:3.6381e+00 L7_l1linf:3.5609e+00 L8_l1linf:3.4976e+00 L9_l1linf:3.2294e+00 L10_l1linf:3.0657e+00 L11_l1linf:2.8421e+00 L12_l1linf:2.2360e+00 L1_spectral:1.2187e-01 L2_spectral:1.2150e-01 L3_spectral:1.2137e-01 L4_spectral:1.2131e-01 L5_spectral:1.2131e-01 L6_spectral:1.2127e-01 L7_spectral:1.2122e-01 L8_spectral:1.2104e-01 L9_spectral:1.2113e-01 L10_spectral:1.2107e-01 L11_spectral:1.2114e-01 L12_spectral:1.2101e-01 train_time:14868ms step_avg:37.17ms +[2025-09-11 09:26:48] [Rank 0] PRINT: step:400/10000 val_loss:6.1825 total_sharp:1.7455e-03 L1_sharp:3.3326e-03 L2_sharp:1.1401e-03 L3_sharp:5.4380e-04 L4_sharp:2.0917e-04 L5_sharp:1.6198e-04 L6_sharp:7.6959e-05 L7_sharp:1.4652e-04 L8_sharp:7.2240e-05 L9_sharp:1.3468e-04 L10_sharp:2.6283e-04 L11_sharp:3.2990e-04 L12_sharp:1.4437e-03 total_fnorm:5.6033e+01 total_l1_linf:2.1514e+05 total_spectral:2.8017e+01 L1_fnorm:1.2281e+01 L2_fnorm:1.1747e+01 L3_fnorm:1.1389e+01 L4_fnorm:1.1190e+01 L5_fnorm:1.1070e+01 L6_fnorm:1.1072e+01 L7_fnorm:1.1025e+01 L8_fnorm:1.0831e+01 L9_fnorm:1.0638e+01 L10_fnorm:1.0407e+01 L11_fnorm:1.0183e+01 L12_fnorm:9.7414e+00 L1_l1linf:3.6182e+00 L2_l1linf:3.3954e+00 L3_l1linf:3.5144e+00 L4_l1linf:3.6207e+00 L5_l1linf:3.6229e+00 L6_l1linf:3.6381e+00 L7_l1linf:3.5609e+00 L8_l1linf:3.4976e+00 L9_l1linf:3.2294e+00 L10_l1linf:3.0657e+00 L11_l1linf:2.8421e+00 L12_l1linf:2.2360e+00 L1_spectral:1.2187e-01 L2_spectral:1.2150e-01 L3_spectral:1.2137e-01 L4_spectral:1.2131e-01 L5_spectral:1.2131e-01 L6_spectral:1.2127e-01 L7_spectral:1.2122e-01 L8_spectral:1.2104e-01 L9_spectral:1.2113e-01 L10_spectral:1.2107e-01 L11_spectral:1.2114e-01 L12_spectral:1.2101e-01 train_time:14868ms step_avg:37.17ms +[2025-09-11 09:27:17] [Rank 0] step:401/10000 train_time:44542ms step_avg:111.08ms +[2025-09-11 09:27:17] [Rank 0] step:401/10000 train_time:44542ms step_avg:111.08ms +[2025-09-11 09:27:20] [Rank 0] step:421/10000 train_time:46844ms step_avg:111.27ms +[2025-09-11 09:27:20] [Rank 0] step:421/10000 train_time:46844ms step_avg:111.27ms +[2025-09-11 09:27:20] [Rank 0] step:441/10000 train_time:47481ms step_avg:107.67ms +[2025-09-11 09:27:20] [Rank 0] step:441/10000 train_time:47481ms step_avg:107.67ms +[2025-09-11 09:27:21] [Rank 0] step:461/10000 train_time:48118ms step_avg:104.38ms +[2025-09-11 09:27:21] [Rank 0] step:461/10000 train_time:48118ms step_avg:104.38ms +[2025-09-11 09:27:22] [Rank 0] step:481/10000 train_time:48755ms step_avg:101.36ms +[2025-09-11 09:27:22] [Rank 0] step:481/10000 train_time:48755ms step_avg:101.36ms +[2025-09-11 09:27:22] [Rank 0] step:501/10000 train_time:49393ms step_avg:98.59ms +[2025-09-11 09:27:22] [Rank 0] step:501/10000 train_time:49393ms step_avg:98.59ms +[2025-09-11 09:27:23] [Rank 0] step:521/10000 train_time:50029ms step_avg:96.03ms +[2025-09-11 09:27:23] [Rank 0] step:521/10000 train_time:50029ms step_avg:96.03ms +[2025-09-11 09:27:24] [Rank 0] step:541/10000 train_time:50666ms step_avg:93.65ms +[2025-09-11 09:27:24] [Rank 0] step:541/10000 train_time:50666ms step_avg:93.65ms +[2025-09-11 09:27:24] [Rank 0] step:561/10000 train_time:51303ms step_avg:91.45ms +[2025-09-11 09:27:24] [Rank 0] step:561/10000 train_time:51303ms step_avg:91.45ms +[2025-09-11 09:27:25] [Rank 0] step:581/10000 train_time:51940ms step_avg:89.40ms +[2025-09-11 09:27:25] [Rank 0] step:581/10000 train_time:51940ms step_avg:89.40ms +[2025-09-11 09:27:25] [Rank 0] step:601/10000 train_time:52576ms step_avg:87.48ms +[2025-09-11 09:27:25] [Rank 0] step:601/10000 train_time:52576ms step_avg:87.48ms +[2025-09-11 09:27:26] [Rank 0] step:621/10000 train_time:53213ms step_avg:85.69ms +[2025-09-11 09:27:26] [Rank 0] step:621/10000 train_time:53213ms step_avg:85.69ms +[2025-09-11 09:27:27] [Rank 0] step:641/10000 train_time:53849ms step_avg:84.01ms +[2025-09-11 09:27:27] [Rank 0] step:641/10000 train_time:53849ms step_avg:84.01ms +[2025-09-11 09:27:27] [Rank 0] step:661/10000 train_time:54485ms step_avg:82.43ms +[2025-09-11 09:27:27] [Rank 0] step:661/10000 train_time:54485ms step_avg:82.43ms +[2025-09-11 09:27:28] [Rank 0] step:681/10000 train_time:55121ms step_avg:80.94ms +[2025-09-11 09:27:28] [Rank 0] step:681/10000 train_time:55121ms step_avg:80.94ms +[2025-09-11 09:27:29] [Rank 0] step:701/10000 train_time:55757ms step_avg:79.54ms +[2025-09-11 09:27:29] [Rank 0] step:701/10000 train_time:55757ms step_avg:79.54ms +[2025-09-11 09:27:29] [Rank 0] step:721/10000 train_time:56393ms step_avg:78.21ms +[2025-09-11 09:27:29] [Rank 0] step:721/10000 train_time:56393ms step_avg:78.21ms +[2025-09-11 09:27:30] [Rank 0] step:741/10000 train_time:57028ms step_avg:76.96ms +[2025-09-11 09:27:30] [Rank 0] step:741/10000 train_time:57028ms step_avg:76.96ms +[2025-09-11 09:27:31] [Rank 0] step:761/10000 train_time:57668ms step_avg:75.78ms +[2025-09-11 09:27:31] [Rank 0] step:761/10000 train_time:57668ms step_avg:75.78ms +[2025-09-11 09:27:31] [Rank 0] step:781/10000 train_time:58309ms step_avg:74.66ms +[2025-09-11 09:27:31] [Rank 0] step:781/10000 train_time:58309ms step_avg:74.66ms +[2025-09-11 09:27:32] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:27:32] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:27:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:27:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:28:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:28:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:19] [Rank 0] PRINT: step:800/10000 val_loss:5.7735 total_sharp:1.0448e-03 L1_sharp:1.2332e-03 L2_sharp:2.7659e-04 L3_sharp:1.8073e-04 L4_sharp:5.4214e-05 L5_sharp:1.1078e-04 L6_sharp:9.9036e-05 L7_sharp:5.2562e-05 L8_sharp:9.9226e-05 L9_sharp:1.0344e-04 L10_sharp:1.5077e-04 L11_sharp:3.4973e-04 L12_sharp:1.8234e-03 total_fnorm:5.8250e+01 total_l1_linf:1.9763e+05 total_spectral:2.8750e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.1875e+01 L3_fnorm:1.1812e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1500e+01 L6_fnorm:1.1688e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1188e+01 L9_fnorm:1.1375e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.0875e+01 L12_fnorm:1.0000e+01 L1_l1linf:3.6562e+00 L2_l1linf:3.5938e+00 L3_l1linf:3.6250e+00 L4_l1linf:3.6406e+00 L5_l1linf:3.5938e+00 L6_l1linf:3.6094e+00 L7_l1linf:3.6406e+00 L8_l1linf:3.5781e+00 L9_l1linf:3.4062e+00 L10_l1linf:3.2344e+00 L11_l1linf:2.9375e+00 L12_l1linf:2.1406e+00 L1_spectral:1.3375e-01 L2_spectral:1.3246e-01 L3_spectral:1.3211e-01 L4_spectral:1.3181e-01 L5_spectral:1.3178e-01 L6_spectral:1.3151e-01 L7_spectral:1.3128e-01 L8_spectral:1.3110e-01 L9_spectral:1.3147e-01 L10_spectral:1.3060e-01 L11_spectral:1.3142e-01 L12_spectral:1.3115e-01 train_time:58932ms step_avg:73.67ms +[2025-09-11 09:28:19] [Rank 0] PRINT: step:800/10000 val_loss:5.7735 total_sharp:1.0448e-03 L1_sharp:1.2332e-03 L2_sharp:2.7659e-04 L3_sharp:1.8073e-04 L4_sharp:5.4214e-05 L5_sharp:1.1078e-04 L6_sharp:9.9036e-05 L7_sharp:5.2562e-05 L8_sharp:9.9226e-05 L9_sharp:1.0344e-04 L10_sharp:1.5077e-04 L11_sharp:3.4973e-04 L12_sharp:1.8234e-03 total_fnorm:5.8250e+01 total_l1_linf:1.9763e+05 total_spectral:2.8750e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.1875e+01 L3_fnorm:1.1812e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1500e+01 L6_fnorm:1.1688e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1188e+01 L9_fnorm:1.1375e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.0875e+01 L12_fnorm:1.0000e+01 L1_l1linf:3.6562e+00 L2_l1linf:3.5938e+00 L3_l1linf:3.6250e+00 L4_l1linf:3.6406e+00 L5_l1linf:3.5938e+00 L6_l1linf:3.6094e+00 L7_l1linf:3.6406e+00 L8_l1linf:3.5781e+00 L9_l1linf:3.4062e+00 L10_l1linf:3.2344e+00 L11_l1linf:2.9375e+00 L12_l1linf:2.1406e+00 L1_spectral:1.3375e-01 L2_spectral:1.3246e-01 L3_spectral:1.3211e-01 L4_spectral:1.3181e-01 L5_spectral:1.3178e-01 L6_spectral:1.3151e-01 L7_spectral:1.3128e-01 L8_spectral:1.3110e-01 L9_spectral:1.3147e-01 L10_spectral:1.3060e-01 L11_spectral:1.3142e-01 L12_spectral:1.3115e-01 train_time:58932ms step_avg:73.67ms +[2025-09-11 09:28:20] [Rank 0] step:801/10000 train_time:60367ms step_avg:75.36ms +[2025-09-11 09:28:20] [Rank 0] step:801/10000 train_time:60367ms step_avg:75.36ms +[2025-09-11 09:28:21] [Rank 0] step:821/10000 train_time:61012ms step_avg:74.31ms +[2025-09-11 09:28:21] [Rank 0] step:821/10000 train_time:61012ms step_avg:74.31ms +[2025-09-11 09:28:22] [Rank 0] step:841/10000 train_time:61654ms step_avg:73.31ms +[2025-09-11 09:28:22] [Rank 0] step:841/10000 train_time:61654ms step_avg:73.31ms +[2025-09-11 09:28:22] [Rank 0] step:861/10000 train_time:62298ms step_avg:72.36ms +[2025-09-11 09:28:22] [Rank 0] step:861/10000 train_time:62298ms step_avg:72.36ms +[2025-09-11 09:28:23] [Rank 0] step:881/10000 train_time:62939ms step_avg:71.44ms +[2025-09-11 09:28:23] [Rank 0] step:881/10000 train_time:62939ms step_avg:71.44ms +[2025-09-11 09:28:24] [Rank 0] step:901/10000 train_time:63580ms step_avg:70.57ms +[2025-09-11 09:28:24] [Rank 0] step:901/10000 train_time:63580ms step_avg:70.57ms +[2025-09-11 09:28:24] [Rank 0] step:921/10000 train_time:64222ms step_avg:69.73ms +[2025-09-11 09:28:24] [Rank 0] step:921/10000 train_time:64222ms step_avg:69.73ms +[2025-09-11 09:28:25] [Rank 0] step:941/10000 train_time:64863ms step_avg:68.93ms +[2025-09-11 09:28:25] [Rank 0] step:941/10000 train_time:64863ms step_avg:68.93ms +[2025-09-11 09:28:25] [Rank 0] step:961/10000 train_time:65504ms step_avg:68.16ms +[2025-09-11 09:28:25] [Rank 0] step:961/10000 train_time:65504ms step_avg:68.16ms +[2025-09-11 09:28:26] [Rank 0] step:981/10000 train_time:66144ms step_avg:67.43ms +[2025-09-11 09:28:26] [Rank 0] step:981/10000 train_time:66144ms step_avg:67.43ms +[2025-09-11 09:28:27] [Rank 0] step:1001/10000 train_time:66785ms step_avg:66.72ms +[2025-09-11 09:28:27] [Rank 0] step:1001/10000 train_time:66785ms step_avg:66.72ms +[2025-09-11 09:28:27] [Rank 0] step:1021/10000 train_time:67426ms step_avg:66.04ms +[2025-09-11 09:28:27] [Rank 0] step:1021/10000 train_time:67426ms step_avg:66.04ms +[2025-09-11 09:28:28] [Rank 0] step:1041/10000 train_time:68067ms step_avg:65.39ms +[2025-09-11 09:28:28] [Rank 0] step:1041/10000 train_time:68067ms step_avg:65.39ms +[2025-09-11 09:28:29] [Rank 0] step:1061/10000 train_time:68708ms step_avg:64.76ms +[2025-09-11 09:28:29] [Rank 0] step:1061/10000 train_time:68708ms step_avg:64.76ms +[2025-09-11 09:28:29] [Rank 0] step:1081/10000 train_time:69348ms step_avg:64.15ms +[2025-09-11 09:28:29] [Rank 0] step:1081/10000 train_time:69348ms step_avg:64.15ms +[2025-09-11 09:28:30] [Rank 0] step:1101/10000 train_time:69989ms step_avg:63.57ms +[2025-09-11 09:28:30] [Rank 0] step:1101/10000 train_time:69989ms step_avg:63.57ms +[2025-09-11 09:28:31] [Rank 0] step:1121/10000 train_time:70632ms step_avg:63.01ms +[2025-09-11 09:28:31] [Rank 0] step:1121/10000 train_time:70632ms step_avg:63.01ms +[2025-09-11 09:28:31] [Rank 0] step:1141/10000 train_time:71273ms step_avg:62.47ms +[2025-09-11 09:28:31] [Rank 0] step:1141/10000 train_time:71273ms step_avg:62.47ms +[2025-09-11 09:28:32] [Rank 0] step:1161/10000 train_time:71914ms step_avg:61.94ms +[2025-09-11 09:28:32] [Rank 0] step:1161/10000 train_time:71914ms step_avg:61.94ms +[2025-09-11 09:28:33] [Rank 0] step:1181/10000 train_time:72555ms step_avg:61.44ms +[2025-09-11 09:28:33] [Rank 0] step:1181/10000 train_time:72555ms step_avg:61.44ms +[2025-09-11 09:28:33] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:28:33] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:45] [Rank 0] PRINT: step:1200/10000 val_loss:5.4335 total_sharp:6.7185e-04 L1_sharp:9.3571e-04 L2_sharp:2.7047e-04 L3_sharp:1.2296e-04 L4_sharp:6.9768e-05 L5_sharp:3.1613e-05 L6_sharp:6.7959e-05 L7_sharp:5.4914e-05 L8_sharp:7.8019e-05 L9_sharp:8.6205e-05 L10_sharp:9.3267e-05 L11_sharp:2.0770e-04 L12_sharp:1.1607e-03 total_fnorm:5.8250e+01 total_l1_linf:1.9558e+05 total_spectral:2.9125e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2125e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.2062e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1812e+01 L11_fnorm:1.1812e+01 L12_fnorm:1.1375e+01 L1_l1linf:3.5469e+00 L2_l1linf:3.4844e+00 L3_l1linf:3.4531e+00 L4_l1linf:3.4531e+00 L5_l1linf:3.4062e+00 L6_l1linf:3.3906e+00 L7_l1linf:3.3594e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.3438e+00 L10_l1linf:3.2812e+00 L11_l1linf:3.1562e+00 L12_l1linf:2.6250e+00 L1_spectral:1.3939e-01 L2_spectral:1.3862e-01 L3_spectral:1.3773e-01 L4_spectral:1.3649e-01 L5_spectral:1.3743e-01 L6_spectral:1.3770e-01 L7_spectral:1.3713e-01 L8_spectral:1.3710e-01 L9_spectral:1.3612e-01 L10_spectral:1.3610e-01 L11_spectral:1.3659e-01 L12_spectral:1.3599e-01 train_time:73178ms step_avg:60.98ms +[2025-09-11 09:28:45] [Rank 0] PRINT: step:1200/10000 val_loss:5.4335 total_sharp:6.7185e-04 L1_sharp:9.3571e-04 L2_sharp:2.7047e-04 L3_sharp:1.2296e-04 L4_sharp:6.9768e-05 L5_sharp:3.1613e-05 L6_sharp:6.7959e-05 L7_sharp:5.4914e-05 L8_sharp:7.8019e-05 L9_sharp:8.6205e-05 L10_sharp:9.3267e-05 L11_sharp:2.0770e-04 L12_sharp:1.1607e-03 total_fnorm:5.8250e+01 total_l1_linf:1.9558e+05 total_spectral:2.9125e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2125e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1875e+01 L6_fnorm:1.2062e+01 L7_fnorm:1.2062e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1938e+01 L10_fnorm:1.1812e+01 L11_fnorm:1.1812e+01 L12_fnorm:1.1375e+01 L1_l1linf:3.5469e+00 L2_l1linf:3.4844e+00 L3_l1linf:3.4531e+00 L4_l1linf:3.4531e+00 L5_l1linf:3.4062e+00 L6_l1linf:3.3906e+00 L7_l1linf:3.3594e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.3438e+00 L10_l1linf:3.2812e+00 L11_l1linf:3.1562e+00 L12_l1linf:2.6250e+00 L1_spectral:1.3939e-01 L2_spectral:1.3862e-01 L3_spectral:1.3773e-01 L4_spectral:1.3649e-01 L5_spectral:1.3743e-01 L6_spectral:1.3770e-01 L7_spectral:1.3713e-01 L8_spectral:1.3710e-01 L9_spectral:1.3612e-01 L10_spectral:1.3610e-01 L11_spectral:1.3659e-01 L12_spectral:1.3599e-01 train_time:73178ms step_avg:60.98ms +[2025-09-11 09:28:46] [Rank 0] step:1201/10000 train_time:74381ms step_avg:61.93ms +[2025-09-11 09:28:46] [Rank 0] step:1201/10000 train_time:74381ms step_avg:61.93ms +[2025-09-11 09:28:47] [Rank 0] step:1221/10000 train_time:75026ms step_avg:61.45ms +[2025-09-11 09:28:47] [Rank 0] step:1221/10000 train_time:75026ms step_avg:61.45ms +[2025-09-11 09:28:47] [Rank 0] step:1241/10000 train_time:75668ms step_avg:60.97ms +[2025-09-11 09:28:47] [Rank 0] step:1241/10000 train_time:75668ms step_avg:60.97ms +[2025-09-11 09:28:48] [Rank 0] step:1261/10000 train_time:76310ms step_avg:60.52ms +[2025-09-11 09:28:48] [Rank 0] step:1261/10000 train_time:76310ms step_avg:60.52ms +[2025-09-11 09:28:49] [Rank 0] step:1281/10000 train_time:76952ms step_avg:60.07ms +[2025-09-11 09:28:49] [Rank 0] step:1281/10000 train_time:76952ms step_avg:60.07ms +[2025-09-11 09:28:49] [Rank 0] step:1301/10000 train_time:77593ms step_avg:59.64ms +[2025-09-11 09:28:49] [Rank 0] step:1301/10000 train_time:77593ms step_avg:59.64ms +[2025-09-11 09:28:50] [Rank 0] step:1321/10000 train_time:78233ms step_avg:59.22ms +[2025-09-11 09:28:50] [Rank 0] step:1321/10000 train_time:78233ms step_avg:59.22ms +[2025-09-11 09:28:50] [Rank 0] step:1341/10000 train_time:78874ms step_avg:58.82ms +[2025-09-11 09:28:50] [Rank 0] step:1341/10000 train_time:78874ms step_avg:58.82ms +[2025-09-11 09:28:51] [Rank 0] step:1361/10000 train_time:79514ms step_avg:58.42ms +[2025-09-11 09:28:51] [Rank 0] step:1361/10000 train_time:79514ms step_avg:58.42ms +[2025-09-11 09:28:52] [Rank 0] step:1381/10000 train_time:80155ms step_avg:58.04ms +[2025-09-11 09:28:52] [Rank 0] step:1381/10000 train_time:80155ms step_avg:58.04ms +[2025-09-11 09:28:52] [Rank 0] step:1401/10000 train_time:80795ms step_avg:57.67ms +[2025-09-11 09:28:52] [Rank 0] step:1401/10000 train_time:80795ms step_avg:57.67ms +[2025-09-11 09:28:53] [Rank 0] step:1421/10000 train_time:81434ms step_avg:57.31ms +[2025-09-11 09:28:53] [Rank 0] step:1421/10000 train_time:81434ms step_avg:57.31ms +[2025-09-11 09:28:54] [Rank 0] step:1441/10000 train_time:82074ms step_avg:56.96ms +[2025-09-11 09:28:54] [Rank 0] step:1441/10000 train_time:82074ms step_avg:56.96ms +[2025-09-11 09:28:54] [Rank 0] step:1461/10000 train_time:82714ms step_avg:56.61ms +[2025-09-11 09:28:54] [Rank 0] step:1461/10000 train_time:82714ms step_avg:56.61ms +[2025-09-11 09:28:55] [Rank 0] step:1481/10000 train_time:83353ms step_avg:56.28ms +[2025-09-11 09:28:55] [Rank 0] step:1481/10000 train_time:83353ms step_avg:56.28ms +[2025-09-11 09:28:56] [Rank 0] step:1501/10000 train_time:83997ms step_avg:55.96ms +[2025-09-11 09:28:56] [Rank 0] step:1501/10000 train_time:83997ms step_avg:55.96ms +[2025-09-11 09:28:56] [Rank 0] step:1521/10000 train_time:84641ms step_avg:55.65ms +[2025-09-11 09:28:56] [Rank 0] step:1521/10000 train_time:84641ms step_avg:55.65ms +[2025-09-11 09:28:57] [Rank 0] step:1541/10000 train_time:85287ms step_avg:55.35ms +[2025-09-11 09:28:57] [Rank 0] step:1541/10000 train_time:85287ms step_avg:55.35ms +[2025-09-11 09:28:58] [Rank 0] step:1561/10000 train_time:85933ms step_avg:55.05ms +[2025-09-11 09:28:58] [Rank 0] step:1561/10000 train_time:85933ms step_avg:55.05ms +[2025-09-11 09:28:58] [Rank 0] step:1581/10000 train_time:86578ms step_avg:54.76ms +[2025-09-11 09:28:58] [Rank 0] step:1581/10000 train_time:86578ms step_avg:54.76ms +[2025-09-11 09:28:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:28:59] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:29:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:29:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:29:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:09] [Rank 0] PRINT: step:1600/10000 val_loss:5.2482 total_sharp:5.1057e-04 L1_sharp:6.0200e-04 L2_sharp:9.7829e-05 L3_sharp:7.1029e-05 L4_sharp:5.5361e-05 L5_sharp:8.3895e-05 L6_sharp:4.3522e-05 L7_sharp:4.4514e-05 L8_sharp:6.9425e-05 L9_sharp:6.1336e-05 L10_sharp:9.1418e-05 L11_sharp:1.5093e-04 L12_sharp:8.6944e-04 total_fnorm:5.6750e+01 total_l1_linf:1.8534e+05 total_spectral:2.8375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2062e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.4062e+00 L3_l1linf:3.3594e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.2656e+00 L6_l1linf:3.3125e+00 L7_l1linf:3.2656e+00 L8_l1linf:3.2969e+00 L9_l1linf:3.1875e+00 L10_l1linf:3.1875e+00 L11_l1linf:3.1250e+00 L12_l1linf:2.6562e+00 L1_spectral:1.4419e-01 L2_spectral:1.4153e-01 L3_spectral:1.4205e-01 L4_spectral:1.4092e-01 L5_spectral:1.4167e-01 L6_spectral:1.4286e-01 L7_spectral:1.4202e-01 L8_spectral:1.4216e-01 L9_spectral:1.4027e-01 L10_spectral:1.4002e-01 L11_spectral:1.4061e-01 L12_spectral:1.4021e-01 train_time:87205ms step_avg:54.50ms +[2025-09-11 09:29:09] [Rank 0] PRINT: step:1600/10000 val_loss:5.2482 total_sharp:5.1057e-04 L1_sharp:6.0200e-04 L2_sharp:9.7829e-05 L3_sharp:7.1029e-05 L4_sharp:5.5361e-05 L5_sharp:8.3895e-05 L6_sharp:4.3522e-05 L7_sharp:4.4514e-05 L8_sharp:6.9425e-05 L9_sharp:6.1336e-05 L10_sharp:9.1418e-05 L11_sharp:1.5093e-04 L12_sharp:8.6944e-04 total_fnorm:5.6750e+01 total_l1_linf:1.8534e+05 total_spectral:2.8375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.1812e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2062e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.1688e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.4062e+00 L3_l1linf:3.3594e+00 L4_l1linf:3.3906e+00 L5_l1linf:3.2656e+00 L6_l1linf:3.3125e+00 L7_l1linf:3.2656e+00 L8_l1linf:3.2969e+00 L9_l1linf:3.1875e+00 L10_l1linf:3.1875e+00 L11_l1linf:3.1250e+00 L12_l1linf:2.6562e+00 L1_spectral:1.4419e-01 L2_spectral:1.4153e-01 L3_spectral:1.4205e-01 L4_spectral:1.4092e-01 L5_spectral:1.4167e-01 L6_spectral:1.4286e-01 L7_spectral:1.4202e-01 L8_spectral:1.4216e-01 L9_spectral:1.4027e-01 L10_spectral:1.4002e-01 L11_spectral:1.4061e-01 L12_spectral:1.4021e-01 train_time:87205ms step_avg:54.50ms +[2025-09-11 09:29:10] [Rank 0] step:1601/10000 train_time:88734ms step_avg:55.42ms +[2025-09-11 09:29:10] [Rank 0] step:1601/10000 train_time:88734ms step_avg:55.42ms +[2025-09-11 09:29:11] [Rank 0] step:1621/10000 train_time:89415ms step_avg:55.16ms +[2025-09-11 09:29:11] [Rank 0] step:1621/10000 train_time:89415ms step_avg:55.16ms +[2025-09-11 09:29:12] [Rank 0] step:1641/10000 train_time:90062ms step_avg:54.88ms +[2025-09-11 09:29:12] [Rank 0] step:1641/10000 train_time:90062ms step_avg:54.88ms +[2025-09-11 09:29:12] [Rank 0] step:1661/10000 train_time:90709ms step_avg:54.61ms +[2025-09-11 09:29:12] [Rank 0] step:1661/10000 train_time:90709ms step_avg:54.61ms +[2025-09-11 09:29:13] [Rank 0] step:1681/10000 train_time:91355ms step_avg:54.35ms +[2025-09-11 09:29:13] [Rank 0] step:1681/10000 train_time:91355ms step_avg:54.35ms +[2025-09-11 09:29:14] [Rank 0] step:1701/10000 train_time:92001ms step_avg:54.09ms +[2025-09-11 09:29:14] [Rank 0] step:1701/10000 train_time:92001ms step_avg:54.09ms +[2025-09-11 09:29:14] [Rank 0] step:1721/10000 train_time:92646ms step_avg:53.83ms +[2025-09-11 09:29:14] [Rank 0] step:1721/10000 train_time:92646ms step_avg:53.83ms +[2025-09-11 09:29:15] [Rank 0] step:1741/10000 train_time:93292ms step_avg:53.59ms +[2025-09-11 09:29:15] [Rank 0] step:1741/10000 train_time:93292ms step_avg:53.59ms +[2025-09-11 09:29:16] [Rank 0] step:1761/10000 train_time:93937ms step_avg:53.34ms +[2025-09-11 09:29:16] [Rank 0] step:1761/10000 train_time:93937ms step_avg:53.34ms +[2025-09-11 09:29:16] [Rank 0] step:1781/10000 train_time:94582ms step_avg:53.11ms +[2025-09-11 09:29:16] [Rank 0] step:1781/10000 train_time:94582ms step_avg:53.11ms +[2025-09-11 09:29:17] [Rank 0] step:1801/10000 train_time:95227ms step_avg:52.87ms +[2025-09-11 09:29:17] [Rank 0] step:1801/10000 train_time:95227ms step_avg:52.87ms +[2025-09-11 09:29:18] [Rank 0] step:1821/10000 train_time:96231ms step_avg:52.85ms +[2025-09-11 09:29:18] [Rank 0] step:1821/10000 train_time:96231ms step_avg:52.85ms +[2025-09-11 09:29:19] [Rank 0] step:1841/10000 train_time:97039ms step_avg:52.71ms +[2025-09-11 09:29:19] [Rank 0] step:1841/10000 train_time:97039ms step_avg:52.71ms +[2025-09-11 09:29:19] [Rank 0] step:1861/10000 train_time:97684ms step_avg:52.49ms +[2025-09-11 09:29:19] [Rank 0] step:1861/10000 train_time:97684ms step_avg:52.49ms +[2025-09-11 09:29:20] [Rank 0] step:1881/10000 train_time:98647ms step_avg:52.44ms +[2025-09-11 09:29:20] [Rank 0] step:1881/10000 train_time:98647ms step_avg:52.44ms +[2025-09-11 09:29:21] [Rank 0] step:1901/10000 train_time:99292ms step_avg:52.23ms +[2025-09-11 09:29:21] [Rank 0] step:1901/10000 train_time:99292ms step_avg:52.23ms +[2025-09-11 09:29:22] [Rank 0] step:1921/10000 train_time:99937ms step_avg:52.02ms +[2025-09-11 09:29:22] [Rank 0] step:1921/10000 train_time:99937ms step_avg:52.02ms +[2025-09-11 09:29:22] [Rank 0] step:1941/10000 train_time:100583ms step_avg:51.82ms +[2025-09-11 09:29:22] [Rank 0] step:1941/10000 train_time:100583ms step_avg:51.82ms +[2025-09-11 09:29:23] [Rank 0] step:1961/10000 train_time:101229ms step_avg:51.62ms +[2025-09-11 09:29:23] [Rank 0] step:1961/10000 train_time:101229ms step_avg:51.62ms +[2025-09-11 09:29:24] [Rank 0] step:1981/10000 train_time:101874ms step_avg:51.43ms +[2025-09-11 09:29:24] [Rank 0] step:1981/10000 train_time:101874ms step_avg:51.43ms +[2025-09-11 09:29:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:29:24] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:29:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:34] [Rank 0] PRINT: step:2000/10000 val_loss:5.0917 total_sharp:4.1392e-04 L1_sharp:4.1700e-04 L2_sharp:6.6761e-05 L3_sharp:6.4812e-05 L4_sharp:9.5417e-06 L5_sharp:5.4327e-05 L6_sharp:1.9236e-05 L7_sharp:4.7842e-05 L8_sharp:6.3501e-05 L9_sharp:8.2193e-05 L10_sharp:6.9382e-05 L11_sharp:1.3061e-04 L12_sharp:8.9197e-04 total_fnorm:5.7250e+01 total_l1_linf:1.8534e+05 total_spectral:2.8375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.5000e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.2344e+00 L4_l1linf:3.2188e+00 L5_l1linf:3.2188e+00 L6_l1linf:3.2031e+00 L7_l1linf:3.2188e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.1406e+00 L10_l1linf:3.1094e+00 L11_l1linf:3.1094e+00 L12_l1linf:2.7031e+00 L1_spectral:1.4758e-01 L2_spectral:1.4492e-01 L3_spectral:1.4583e-01 L4_spectral:1.4404e-01 L5_spectral:1.4759e-01 L6_spectral:1.4633e-01 L7_spectral:1.4518e-01 L8_spectral:1.4719e-01 L9_spectral:1.4467e-01 L10_spectral:1.4353e-01 L11_spectral:1.4398e-01 L12_spectral:1.4288e-01 train_time:102502ms step_avg:51.25ms +[2025-09-11 09:29:34] [Rank 0] PRINT: step:2000/10000 val_loss:5.0917 total_sharp:4.1392e-04 L1_sharp:4.1700e-04 L2_sharp:6.6761e-05 L3_sharp:6.4812e-05 L4_sharp:9.5417e-06 L5_sharp:5.4327e-05 L6_sharp:1.9236e-05 L7_sharp:4.7842e-05 L8_sharp:6.3501e-05 L9_sharp:8.2193e-05 L10_sharp:6.9382e-05 L11_sharp:1.3061e-04 L12_sharp:8.9197e-04 total_fnorm:5.7250e+01 total_l1_linf:1.8534e+05 total_spectral:2.8375e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.1938e+01 L1_l1linf:3.5000e+00 L2_l1linf:3.3281e+00 L3_l1linf:3.2344e+00 L4_l1linf:3.2188e+00 L5_l1linf:3.2188e+00 L6_l1linf:3.2031e+00 L7_l1linf:3.2188e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.1406e+00 L10_l1linf:3.1094e+00 L11_l1linf:3.1094e+00 L12_l1linf:2.7031e+00 L1_spectral:1.4758e-01 L2_spectral:1.4492e-01 L3_spectral:1.4583e-01 L4_spectral:1.4404e-01 L5_spectral:1.4759e-01 L6_spectral:1.4633e-01 L7_spectral:1.4518e-01 L8_spectral:1.4719e-01 L9_spectral:1.4467e-01 L10_spectral:1.4353e-01 L11_spectral:1.4398e-01 L12_spectral:1.4288e-01 train_time:102502ms step_avg:51.25ms +[2025-09-11 09:29:36] [Rank 0] step:2001/10000 train_time:104090ms step_avg:52.02ms +[2025-09-11 09:29:36] [Rank 0] step:2001/10000 train_time:104090ms step_avg:52.02ms +[2025-09-11 09:29:37] [Rank 0] step:2021/10000 train_time:104726ms step_avg:51.82ms +[2025-09-11 09:29:37] [Rank 0] step:2021/10000 train_time:104726ms step_avg:51.82ms +[2025-09-11 09:29:37] [Rank 0] step:2041/10000 train_time:105374ms step_avg:51.63ms +[2025-09-11 09:29:37] [Rank 0] step:2041/10000 train_time:105374ms step_avg:51.63ms +[2025-09-11 09:29:38] [Rank 0] step:2061/10000 train_time:106020ms step_avg:51.44ms +[2025-09-11 09:29:38] [Rank 0] step:2061/10000 train_time:106020ms step_avg:51.44ms +[2025-09-11 09:29:38] [Rank 0] step:2081/10000 train_time:106667ms step_avg:51.26ms +[2025-09-11 09:29:38] [Rank 0] step:2081/10000 train_time:106667ms step_avg:51.26ms +[2025-09-11 09:29:39] [Rank 0] step:2101/10000 train_time:107314ms step_avg:51.08ms +[2025-09-11 09:29:39] [Rank 0] step:2101/10000 train_time:107314ms step_avg:51.08ms +[2025-09-11 09:29:40] [Rank 0] step:2121/10000 train_time:107959ms step_avg:50.90ms +[2025-09-11 09:29:40] [Rank 0] step:2121/10000 train_time:107959ms step_avg:50.90ms +[2025-09-11 09:29:40] [Rank 0] step:2141/10000 train_time:108605ms step_avg:50.73ms +[2025-09-11 09:29:40] [Rank 0] step:2141/10000 train_time:108605ms step_avg:50.73ms +[2025-09-11 09:29:41] [Rank 0] step:2161/10000 train_time:109251ms step_avg:50.56ms +[2025-09-11 09:29:41] [Rank 0] step:2161/10000 train_time:109251ms step_avg:50.56ms +[2025-09-11 09:29:42] [Rank 0] step:2181/10000 train_time:109898ms step_avg:50.39ms +[2025-09-11 09:29:42] [Rank 0] step:2181/10000 train_time:109898ms step_avg:50.39ms +[2025-09-11 09:29:42] [Rank 0] step:2201/10000 train_time:110545ms step_avg:50.23ms +[2025-09-11 09:29:42] [Rank 0] step:2201/10000 train_time:110545ms step_avg:50.23ms +[2025-09-11 09:29:43] [Rank 0] step:2221/10000 train_time:111191ms step_avg:50.06ms +[2025-09-11 09:29:43] [Rank 0] step:2221/10000 train_time:111191ms step_avg:50.06ms +[2025-09-11 09:29:44] [Rank 0] step:2241/10000 train_time:111849ms step_avg:49.91ms +[2025-09-11 09:29:44] [Rank 0] step:2241/10000 train_time:111849ms step_avg:49.91ms +[2025-09-11 09:29:44] [Rank 0] step:2261/10000 train_time:112511ms step_avg:49.76ms +[2025-09-11 09:29:44] [Rank 0] step:2261/10000 train_time:112511ms step_avg:49.76ms +[2025-09-11 09:29:45] [Rank 0] step:2281/10000 train_time:113170ms step_avg:49.61ms +[2025-09-11 09:29:45] [Rank 0] step:2281/10000 train_time:113170ms step_avg:49.61ms +[2025-09-11 09:29:46] [Rank 0] step:2301/10000 train_time:113831ms step_avg:49.47ms +[2025-09-11 09:29:46] [Rank 0] step:2301/10000 train_time:113831ms step_avg:49.47ms +[2025-09-11 09:29:46] [Rank 0] step:2321/10000 train_time:114489ms step_avg:49.33ms +[2025-09-11 09:29:46] [Rank 0] step:2321/10000 train_time:114489ms step_avg:49.33ms +[2025-09-11 09:29:47] [Rank 0] step:2341/10000 train_time:115148ms step_avg:49.19ms +[2025-09-11 09:29:47] [Rank 0] step:2341/10000 train_time:115148ms step_avg:49.19ms +[2025-09-11 09:29:48] [Rank 0] step:2361/10000 train_time:115807ms step_avg:49.05ms +[2025-09-11 09:29:48] [Rank 0] step:2361/10000 train_time:115807ms step_avg:49.05ms +[2025-09-11 09:29:48] [Rank 0] step:2381/10000 train_time:116466ms step_avg:48.91ms +[2025-09-11 09:29:48] [Rank 0] step:2381/10000 train_time:116466ms step_avg:48.91ms +[2025-09-11 09:29:49] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:29:49] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:59] [Rank 0] PRINT: step:2400/10000 val_loss:4.9438 total_sharp:5.1866e-04 L1_sharp:3.2026e-04 L2_sharp:6.3561e-05 L3_sharp:3.6700e-05 L4_sharp:2.3260e-05 L5_sharp:5.6527e-05 L6_sharp:2.1625e-05 L7_sharp:2.6495e-05 L8_sharp:6.1802e-05 L9_sharp:8.7039e-05 L10_sharp:8.3233e-05 L11_sharp:1.4464e-04 L12_sharp:1.6134e-03 total_fnorm:5.5500e+01 total_l1_linf:1.7510e+05 total_spectral:2.7625e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1875e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0625e+00 L11_l1linf:3.0000e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5039e-01 L2_spectral:1.4666e-01 L3_spectral:1.4778e-01 L4_spectral:1.4681e-01 L5_spectral:1.4850e-01 L6_spectral:1.4887e-01 L7_spectral:1.4947e-01 L8_spectral:1.4929e-01 L9_spectral:1.4818e-01 L10_spectral:1.4597e-01 L11_spectral:1.4680e-01 L12_spectral:1.4532e-01 train_time:117107ms step_avg:48.79ms +[2025-09-11 09:29:59] [Rank 0] PRINT: step:2400/10000 val_loss:4.9438 total_sharp:5.1866e-04 L1_sharp:3.2026e-04 L2_sharp:6.3561e-05 L3_sharp:3.6700e-05 L4_sharp:2.3260e-05 L5_sharp:5.6527e-05 L6_sharp:2.1625e-05 L7_sharp:2.6495e-05 L8_sharp:6.1802e-05 L9_sharp:8.7039e-05 L10_sharp:8.3233e-05 L11_sharp:1.4464e-04 L12_sharp:1.6134e-03 total_fnorm:5.5500e+01 total_l1_linf:1.7510e+05 total_spectral:2.7625e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2375e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.2188e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1875e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0625e+00 L11_l1linf:3.0000e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5039e-01 L2_spectral:1.4666e-01 L3_spectral:1.4778e-01 L4_spectral:1.4681e-01 L5_spectral:1.4850e-01 L6_spectral:1.4887e-01 L7_spectral:1.4947e-01 L8_spectral:1.4929e-01 L9_spectral:1.4818e-01 L10_spectral:1.4597e-01 L11_spectral:1.4680e-01 L12_spectral:1.4532e-01 train_time:117107ms step_avg:48.79ms +[2025-09-11 09:30:00] [Rank 0] step:2401/10000 train_time:118250ms step_avg:49.25ms +[2025-09-11 09:30:00] [Rank 0] step:2401/10000 train_time:118250ms step_avg:49.25ms +[2025-09-11 09:30:01] [Rank 0] step:2421/10000 train_time:118901ms step_avg:49.11ms +[2025-09-11 09:30:01] [Rank 0] step:2421/10000 train_time:118901ms step_avg:49.11ms +[2025-09-11 09:30:01] [Rank 0] step:2441/10000 train_time:119684ms step_avg:49.03ms +[2025-09-11 09:30:01] [Rank 0] step:2441/10000 train_time:119684ms step_avg:49.03ms +[2025-09-11 09:30:02] [Rank 0] step:2461/10000 train_time:120407ms step_avg:48.93ms +[2025-09-11 09:30:02] [Rank 0] step:2461/10000 train_time:120407ms step_avg:48.93ms +[2025-09-11 09:30:03] [Rank 0] step:2481/10000 train_time:121100ms step_avg:48.81ms +[2025-09-11 09:30:03] [Rank 0] step:2481/10000 train_time:121100ms step_avg:48.81ms +[2025-09-11 09:30:03] [Rank 0] step:2501/10000 train_time:121759ms step_avg:48.68ms +[2025-09-11 09:30:03] [Rank 0] step:2501/10000 train_time:121759ms step_avg:48.68ms +[2025-09-11 09:30:04] [Rank 0] step:2521/10000 train_time:122420ms step_avg:48.56ms +[2025-09-11 09:30:04] [Rank 0] step:2521/10000 train_time:122420ms step_avg:48.56ms +[2025-09-11 09:30:05] [Rank 0] step:2541/10000 train_time:123085ms step_avg:48.44ms +[2025-09-11 09:30:05] [Rank 0] step:2541/10000 train_time:123085ms step_avg:48.44ms +[2025-09-11 09:30:05] [Rank 0] step:2561/10000 train_time:123744ms step_avg:48.32ms +[2025-09-11 09:30:05] [Rank 0] step:2561/10000 train_time:123744ms step_avg:48.32ms +[2025-09-11 09:30:06] [Rank 0] step:2581/10000 train_time:124406ms step_avg:48.20ms +[2025-09-11 09:30:06] [Rank 0] step:2581/10000 train_time:124406ms step_avg:48.20ms +[2025-09-11 09:30:07] [Rank 0] step:2601/10000 train_time:125065ms step_avg:48.08ms +[2025-09-11 09:30:07] [Rank 0] step:2601/10000 train_time:125065ms step_avg:48.08ms +[2025-09-11 09:30:07] [Rank 0] step:2621/10000 train_time:125724ms step_avg:47.97ms +[2025-09-11 09:30:07] [Rank 0] step:2621/10000 train_time:125724ms step_avg:47.97ms +[2025-09-11 09:30:08] [Rank 0] step:2641/10000 train_time:126382ms step_avg:47.85ms +[2025-09-11 09:30:08] [Rank 0] step:2641/10000 train_time:126382ms step_avg:47.85ms +[2025-09-11 09:30:09] [Rank 0] step:2661/10000 train_time:127041ms step_avg:47.74ms +[2025-09-11 09:30:09] [Rank 0] step:2661/10000 train_time:127041ms step_avg:47.74ms +[2025-09-11 09:30:09] [Rank 0] step:2681/10000 train_time:127700ms step_avg:47.63ms +[2025-09-11 09:30:09] [Rank 0] step:2681/10000 train_time:127700ms step_avg:47.63ms +[2025-09-11 09:30:10] [Rank 0] step:2701/10000 train_time:128359ms step_avg:47.52ms +[2025-09-11 09:30:10] [Rank 0] step:2701/10000 train_time:128359ms step_avg:47.52ms +[2025-09-11 09:30:11] [Rank 0] step:2721/10000 train_time:129018ms step_avg:47.42ms +[2025-09-11 09:30:11] [Rank 0] step:2721/10000 train_time:129018ms step_avg:47.42ms +[2025-09-11 09:30:11] [Rank 0] step:2741/10000 train_time:129677ms step_avg:47.31ms +[2025-09-11 09:30:11] [Rank 0] step:2741/10000 train_time:129677ms step_avg:47.31ms +[2025-09-11 09:30:12] [Rank 0] step:2761/10000 train_time:130336ms step_avg:47.21ms +[2025-09-11 09:30:12] [Rank 0] step:2761/10000 train_time:130336ms step_avg:47.21ms +[2025-09-11 09:30:13] [Rank 0] step:2781/10000 train_time:130996ms step_avg:47.10ms +[2025-09-11 09:30:13] [Rank 0] step:2781/10000 train_time:130996ms step_avg:47.10ms +[2025-09-11 09:30:13] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:30:13] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:30:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:23] [Rank 0] PRINT: step:2800/10000 val_loss:4.8626 total_sharp:4.4147e-04 L1_sharp:4.1865e-04 L2_sharp:4.7499e-05 L3_sharp:1.3582e-05 L4_sharp:5.5075e-06 L5_sharp:3.4137e-05 L6_sharp:1.4272e-05 L7_sharp:4.1567e-05 L8_sharp:6.4060e-05 L9_sharp:7.5396e-05 L10_sharp:6.9947e-05 L11_sharp:1.4845e-04 L12_sharp:8.4204e-04 total_fnorm:5.4500e+01 total_l1_linf:1.6998e+05 total_spectral:2.7250e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2188e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2031e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1094e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0312e+00 L10_l1linf:3.0000e+00 L11_l1linf:3.0156e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5296e-01 L2_spectral:1.4922e-01 L3_spectral:1.4921e-01 L4_spectral:1.4900e-01 L5_spectral:1.5045e-01 L6_spectral:1.5173e-01 L7_spectral:1.5137e-01 L8_spectral:1.5079e-01 L9_spectral:1.5004e-01 L10_spectral:1.4860e-01 L11_spectral:1.4917e-01 L12_spectral:1.4843e-01 train_time:131635ms step_avg:47.01ms +[2025-09-11 09:30:23] [Rank 0] PRINT: step:2800/10000 val_loss:4.8626 total_sharp:4.4147e-04 L1_sharp:4.1865e-04 L2_sharp:4.7499e-05 L3_sharp:1.3582e-05 L4_sharp:5.5075e-06 L5_sharp:3.4137e-05 L6_sharp:1.4272e-05 L7_sharp:4.1567e-05 L8_sharp:6.4060e-05 L9_sharp:7.5396e-05 L10_sharp:6.9947e-05 L11_sharp:1.4845e-04 L12_sharp:8.4204e-04 total_fnorm:5.4500e+01 total_l1_linf:1.6998e+05 total_spectral:2.7250e+01 L1_fnorm:1.2500e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2188e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.2031e+00 L3_l1linf:3.1719e+00 L4_l1linf:3.1094e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1719e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0312e+00 L10_l1linf:3.0000e+00 L11_l1linf:3.0156e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5296e-01 L2_spectral:1.4922e-01 L3_spectral:1.4921e-01 L4_spectral:1.4900e-01 L5_spectral:1.5045e-01 L6_spectral:1.5173e-01 L7_spectral:1.5137e-01 L8_spectral:1.5079e-01 L9_spectral:1.5004e-01 L10_spectral:1.4860e-01 L11_spectral:1.4917e-01 L12_spectral:1.4843e-01 train_time:131635ms step_avg:47.01ms +[2025-09-11 09:30:24] [Rank 0] step:2801/10000 train_time:132741ms step_avg:47.39ms +[2025-09-11 09:30:24] [Rank 0] step:2801/10000 train_time:132741ms step_avg:47.39ms +[2025-09-11 09:30:25] [Rank 0] step:2821/10000 train_time:133404ms step_avg:47.29ms +[2025-09-11 09:30:25] [Rank 0] step:2821/10000 train_time:133404ms step_avg:47.29ms +[2025-09-11 09:30:25] [Rank 0] step:2841/10000 train_time:134064ms step_avg:47.19ms +[2025-09-11 09:30:25] [Rank 0] step:2841/10000 train_time:134064ms step_avg:47.19ms +[2025-09-11 09:30:26] [Rank 0] step:2861/10000 train_time:134724ms step_avg:47.09ms +[2025-09-11 09:30:26] [Rank 0] step:2861/10000 train_time:134724ms step_avg:47.09ms +[2025-09-11 09:30:27] [Rank 0] step:2881/10000 train_time:135385ms step_avg:46.99ms +[2025-09-11 09:30:27] [Rank 0] step:2881/10000 train_time:135385ms step_avg:46.99ms +[2025-09-11 09:30:27] [Rank 0] step:2901/10000 train_time:136045ms step_avg:46.90ms +[2025-09-11 09:30:27] [Rank 0] step:2901/10000 train_time:136045ms step_avg:46.90ms +[2025-09-11 09:30:28] [Rank 0] step:2921/10000 train_time:136704ms step_avg:46.80ms +[2025-09-11 09:30:28] [Rank 0] step:2921/10000 train_time:136704ms step_avg:46.80ms +[2025-09-11 09:30:29] [Rank 0] step:2941/10000 train_time:137364ms step_avg:46.71ms +[2025-09-11 09:30:29] [Rank 0] step:2941/10000 train_time:137364ms step_avg:46.71ms +[2025-09-11 09:30:29] [Rank 0] step:2961/10000 train_time:138023ms step_avg:46.61ms +[2025-09-11 09:30:29] [Rank 0] step:2961/10000 train_time:138023ms step_avg:46.61ms +[2025-09-11 09:30:30] [Rank 0] step:2981/10000 train_time:138684ms step_avg:46.52ms +[2025-09-11 09:30:30] [Rank 0] step:2981/10000 train_time:138684ms step_avg:46.52ms +[2025-09-11 09:30:31] [Rank 0] step:3001/10000 train_time:139347ms step_avg:46.43ms +[2025-09-11 09:30:31] [Rank 0] step:3001/10000 train_time:139347ms step_avg:46.43ms +[2025-09-11 09:30:31] [Rank 0] step:3021/10000 train_time:140009ms step_avg:46.35ms +[2025-09-11 09:30:31] [Rank 0] step:3021/10000 train_time:140009ms step_avg:46.35ms +[2025-09-11 09:30:32] [Rank 0] step:3041/10000 train_time:140671ms step_avg:46.26ms +[2025-09-11 09:30:32] [Rank 0] step:3041/10000 train_time:140671ms step_avg:46.26ms +[2025-09-11 09:30:33] [Rank 0] step:3061/10000 train_time:141333ms step_avg:46.17ms +[2025-09-11 09:30:33] [Rank 0] step:3061/10000 train_time:141333ms step_avg:46.17ms +[2025-09-11 09:30:33] [Rank 0] step:3081/10000 train_time:141995ms step_avg:46.09ms +[2025-09-11 09:30:33] [Rank 0] step:3081/10000 train_time:141995ms step_avg:46.09ms +[2025-09-11 09:30:34] [Rank 0] step:3101/10000 train_time:142657ms step_avg:46.00ms +[2025-09-11 09:30:34] [Rank 0] step:3101/10000 train_time:142657ms step_avg:46.00ms +[2025-09-11 09:30:35] [Rank 0] step:3121/10000 train_time:143319ms step_avg:45.92ms +[2025-09-11 09:30:35] [Rank 0] step:3121/10000 train_time:143319ms step_avg:45.92ms +[2025-09-11 09:30:35] [Rank 0] step:3141/10000 train_time:143981ms step_avg:45.84ms +[2025-09-11 09:30:35] [Rank 0] step:3141/10000 train_time:143981ms step_avg:45.84ms +[2025-09-11 09:30:36] [Rank 0] step:3161/10000 train_time:144643ms step_avg:45.76ms +[2025-09-11 09:30:36] [Rank 0] step:3161/10000 train_time:144643ms step_avg:45.76ms +[2025-09-11 09:30:37] [Rank 0] step:3181/10000 train_time:145305ms step_avg:45.68ms +[2025-09-11 09:30:37] [Rank 0] step:3181/10000 train_time:145305ms step_avg:45.68ms +[2025-09-11 09:30:37] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:30:37] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:30:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:30:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:30:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:30:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:30:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:47] [Rank 0] PRINT: step:3200/10000 val_loss:4.7752 total_sharp:3.2822e-04 L1_sharp:2.5222e-04 L2_sharp:4.1374e-05 L3_sharp:4.1879e-05 L4_sharp:2.5821e-05 L5_sharp:3.5645e-05 L6_sharp:2.3547e-05 L7_sharp:4.0206e-05 L8_sharp:5.6144e-05 L9_sharp:8.8271e-05 L10_sharp:7.8170e-05 L11_sharp:1.2365e-04 L12_sharp:6.2305e-04 total_fnorm:5.7750e+01 total_l1_linf:1.8022e+05 total_spectral:2.8875e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.1250e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.0781e+00 L5_l1linf:3.0781e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.2031e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.9062e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.7969e+00 L1_spectral:1.5363e-01 L2_spectral:1.5019e-01 L3_spectral:1.5172e-01 L4_spectral:1.5123e-01 L5_spectral:1.5122e-01 L6_spectral:1.5410e-01 L7_spectral:1.5325e-01 L8_spectral:1.5240e-01 L9_spectral:1.5280e-01 L10_spectral:1.5106e-01 L11_spectral:1.5096e-01 L12_spectral:1.5093e-01 train_time:145948ms step_avg:45.61ms +[2025-09-11 09:30:47] [Rank 0] PRINT: step:3200/10000 val_loss:4.7752 total_sharp:3.2822e-04 L1_sharp:2.5222e-04 L2_sharp:4.1374e-05 L3_sharp:4.1879e-05 L4_sharp:2.5821e-05 L5_sharp:3.5645e-05 L6_sharp:2.3547e-05 L7_sharp:4.0206e-05 L8_sharp:5.6144e-05 L9_sharp:8.8271e-05 L10_sharp:7.8170e-05 L11_sharp:1.2365e-04 L12_sharp:6.2305e-04 total_fnorm:5.7750e+01 total_l1_linf:1.8022e+05 total_spectral:2.8875e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.3750e+00 L2_l1linf:3.1250e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.0781e+00 L5_l1linf:3.0781e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.2031e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.9062e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.7969e+00 L1_spectral:1.5363e-01 L2_spectral:1.5019e-01 L3_spectral:1.5172e-01 L4_spectral:1.5123e-01 L5_spectral:1.5122e-01 L6_spectral:1.5410e-01 L7_spectral:1.5325e-01 L8_spectral:1.5240e-01 L9_spectral:1.5280e-01 L10_spectral:1.5106e-01 L11_spectral:1.5096e-01 L12_spectral:1.5093e-01 train_time:145948ms step_avg:45.61ms +[2025-09-11 09:30:49] [Rank 0] step:3201/10000 train_time:147076ms step_avg:45.95ms +[2025-09-11 09:30:49] [Rank 0] step:3201/10000 train_time:147076ms step_avg:45.95ms +[2025-09-11 09:30:49] [Rank 0] step:3221/10000 train_time:147741ms step_avg:45.87ms +[2025-09-11 09:30:49] [Rank 0] step:3221/10000 train_time:147741ms step_avg:45.87ms +[2025-09-11 09:30:50] [Rank 0] step:3241/10000 train_time:148404ms step_avg:45.79ms +[2025-09-11 09:30:50] [Rank 0] step:3241/10000 train_time:148404ms step_avg:45.79ms +[2025-09-11 09:30:51] [Rank 0] step:3261/10000 train_time:149067ms step_avg:45.71ms +[2025-09-11 09:30:51] [Rank 0] step:3261/10000 train_time:149067ms step_avg:45.71ms +[2025-09-11 09:30:51] [Rank 0] step:3281/10000 train_time:149730ms step_avg:45.64ms +[2025-09-11 09:30:51] [Rank 0] step:3281/10000 train_time:149730ms step_avg:45.64ms +[2025-09-11 09:30:52] [Rank 0] step:3301/10000 train_time:150393ms step_avg:45.56ms +[2025-09-11 09:30:52] [Rank 0] step:3301/10000 train_time:150393ms step_avg:45.56ms +[2025-09-11 09:30:53] [Rank 0] step:3321/10000 train_time:151055ms step_avg:45.48ms +[2025-09-11 09:30:53] [Rank 0] step:3321/10000 train_time:151055ms step_avg:45.48ms +[2025-09-11 09:30:53] [Rank 0] step:3341/10000 train_time:151718ms step_avg:45.41ms +[2025-09-11 09:30:53] [Rank 0] step:3341/10000 train_time:151718ms step_avg:45.41ms +[2025-09-11 09:30:54] [Rank 0] step:3361/10000 train_time:152382ms step_avg:45.34ms +[2025-09-11 09:30:54] [Rank 0] step:3361/10000 train_time:152382ms step_avg:45.34ms +[2025-09-11 09:30:55] [Rank 0] step:3381/10000 train_time:153050ms step_avg:45.27ms +[2025-09-11 09:30:55] [Rank 0] step:3381/10000 train_time:153050ms step_avg:45.27ms +[2025-09-11 09:30:55] [Rank 0] step:3401/10000 train_time:153713ms step_avg:45.20ms +[2025-09-11 09:30:55] [Rank 0] step:3401/10000 train_time:153713ms step_avg:45.20ms +[2025-09-11 09:30:56] [Rank 0] step:3421/10000 train_time:154375ms step_avg:45.13ms +[2025-09-11 09:30:56] [Rank 0] step:3421/10000 train_time:154375ms step_avg:45.13ms +[2025-09-11 09:30:57] [Rank 0] step:3441/10000 train_time:155037ms step_avg:45.06ms +[2025-09-11 09:30:57] [Rank 0] step:3441/10000 train_time:155037ms step_avg:45.06ms +[2025-09-11 09:30:57] [Rank 0] step:3461/10000 train_time:155699ms step_avg:44.99ms +[2025-09-11 09:30:57] [Rank 0] step:3461/10000 train_time:155699ms step_avg:44.99ms +[2025-09-11 09:30:58] [Rank 0] step:3481/10000 train_time:156362ms step_avg:44.92ms +[2025-09-11 09:30:58] [Rank 0] step:3481/10000 train_time:156362ms step_avg:44.92ms +[2025-09-11 09:30:59] [Rank 0] step:3501/10000 train_time:157025ms step_avg:44.85ms +[2025-09-11 09:30:59] [Rank 0] step:3501/10000 train_time:157025ms step_avg:44.85ms +[2025-09-11 09:30:59] [Rank 0] step:3521/10000 train_time:157687ms step_avg:44.78ms +[2025-09-11 09:30:59] [Rank 0] step:3521/10000 train_time:157687ms step_avg:44.78ms +[2025-09-11 09:31:00] [Rank 0] step:3541/10000 train_time:158349ms step_avg:44.72ms +[2025-09-11 09:31:00] [Rank 0] step:3541/10000 train_time:158349ms step_avg:44.72ms +[2025-09-11 09:31:01] [Rank 0] step:3561/10000 train_time:159012ms step_avg:44.65ms +[2025-09-11 09:31:01] [Rank 0] step:3561/10000 train_time:159012ms step_avg:44.65ms +[2025-09-11 09:31:01] [Rank 0] step:3581/10000 train_time:159675ms step_avg:44.59ms +[2025-09-11 09:31:01] [Rank 0] step:3581/10000 train_time:159675ms step_avg:44.59ms +[2025-09-11 09:31:02] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:31:02] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:31:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:31:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:12] [Rank 0] PRINT: step:3600/10000 val_loss:4.7159 total_sharp:2.7595e-04 L1_sharp:2.7175e-04 L2_sharp:3.6812e-05 L3_sharp:2.4380e-07 L4_sharp:2.5681e-06 L5_sharp:3.1350e-05 L6_sharp:2.2291e-05 L7_sharp:2.6969e-05 L8_sharp:5.8838e-05 L9_sharp:6.9664e-05 L10_sharp:6.5405e-05 L11_sharp:1.0012e-04 L12_sharp:4.1508e-04 total_fnorm:5.4750e+01 total_l1_linf:1.6691e+05 total_spectral:2.7375e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0469e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0469e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7656e+00 L1_spectral:1.5577e-01 L2_spectral:1.5071e-01 L3_spectral:1.5298e-01 L4_spectral:1.5373e-01 L5_spectral:1.5257e-01 L6_spectral:1.5455e-01 L7_spectral:1.5509e-01 L8_spectral:1.5278e-01 L9_spectral:1.5511e-01 L10_spectral:1.5252e-01 L11_spectral:1.5229e-01 L12_spectral:1.5124e-01 train_time:160318ms step_avg:44.53ms +[2025-09-11 09:31:12] [Rank 0] PRINT: step:3600/10000 val_loss:4.7159 total_sharp:2.7595e-04 L1_sharp:2.7175e-04 L2_sharp:3.6812e-05 L3_sharp:2.4380e-07 L4_sharp:2.5681e-06 L5_sharp:3.1350e-05 L6_sharp:2.2291e-05 L7_sharp:2.6969e-05 L8_sharp:5.8838e-05 L9_sharp:6.9664e-05 L10_sharp:6.5405e-05 L11_sharp:1.0012e-04 L12_sharp:4.1508e-04 total_fnorm:5.4750e+01 total_l1_linf:1.6691e+05 total_spectral:2.7375e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2500e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.0781e+00 L3_l1linf:3.0312e+00 L4_l1linf:3.0469e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0469e+00 L9_l1linf:2.9062e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7656e+00 L1_spectral:1.5577e-01 L2_spectral:1.5071e-01 L3_spectral:1.5298e-01 L4_spectral:1.5373e-01 L5_spectral:1.5257e-01 L6_spectral:1.5455e-01 L7_spectral:1.5509e-01 L8_spectral:1.5278e-01 L9_spectral:1.5511e-01 L10_spectral:1.5252e-01 L11_spectral:1.5229e-01 L12_spectral:1.5124e-01 train_time:160318ms step_avg:44.53ms +[2025-09-11 09:31:13] [Rank 0] step:3601/10000 train_time:161449ms step_avg:44.83ms +[2025-09-11 09:31:13] [Rank 0] step:3601/10000 train_time:161449ms step_avg:44.83ms +[2025-09-11 09:31:14] [Rank 0] step:3621/10000 train_time:162102ms step_avg:44.77ms +[2025-09-11 09:31:14] [Rank 0] step:3621/10000 train_time:162102ms step_avg:44.77ms +[2025-09-11 09:31:14] [Rank 0] step:3641/10000 train_time:162765ms step_avg:44.70ms +[2025-09-11 09:31:14] [Rank 0] step:3641/10000 train_time:162765ms step_avg:44.70ms +[2025-09-11 09:31:15] [Rank 0] step:3661/10000 train_time:163428ms step_avg:44.64ms +[2025-09-11 09:31:15] [Rank 0] step:3661/10000 train_time:163428ms step_avg:44.64ms +[2025-09-11 09:31:16] [Rank 0] step:3681/10000 train_time:164089ms step_avg:44.58ms +[2025-09-11 09:31:16] [Rank 0] step:3681/10000 train_time:164089ms step_avg:44.58ms +[2025-09-11 09:31:16] [Rank 0] step:3701/10000 train_time:164750ms step_avg:44.52ms +[2025-09-11 09:31:16] [Rank 0] step:3701/10000 train_time:164750ms step_avg:44.52ms +[2025-09-11 09:31:17] [Rank 0] step:3721/10000 train_time:165423ms step_avg:44.46ms +[2025-09-11 09:31:17] [Rank 0] step:3721/10000 train_time:165423ms step_avg:44.46ms +[2025-09-11 09:31:18] [Rank 0] step:3741/10000 train_time:166095ms step_avg:44.40ms +[2025-09-11 09:31:18] [Rank 0] step:3741/10000 train_time:166095ms step_avg:44.40ms +[2025-09-11 09:31:18] [Rank 0] step:3761/10000 train_time:166769ms step_avg:44.34ms +[2025-09-11 09:31:18] [Rank 0] step:3761/10000 train_time:166769ms step_avg:44.34ms +[2025-09-11 09:31:19] [Rank 0] step:3781/10000 train_time:167441ms step_avg:44.28ms +[2025-09-11 09:31:19] [Rank 0] step:3781/10000 train_time:167441ms step_avg:44.28ms +[2025-09-11 09:31:20] [Rank 0] step:3801/10000 train_time:168115ms step_avg:44.23ms +[2025-09-11 09:31:20] [Rank 0] step:3801/10000 train_time:168115ms step_avg:44.23ms +[2025-09-11 09:31:20] [Rank 0] step:3821/10000 train_time:168789ms step_avg:44.17ms +[2025-09-11 09:31:20] [Rank 0] step:3821/10000 train_time:168789ms step_avg:44.17ms +[2025-09-11 09:31:21] [Rank 0] step:3841/10000 train_time:169462ms step_avg:44.12ms +[2025-09-11 09:31:21] [Rank 0] step:3841/10000 train_time:169462ms step_avg:44.12ms +[2025-09-11 09:31:22] [Rank 0] step:3861/10000 train_time:170135ms step_avg:44.06ms +[2025-09-11 09:31:22] [Rank 0] step:3861/10000 train_time:170135ms step_avg:44.06ms +[2025-09-11 09:31:22] [Rank 0] step:3881/10000 train_time:170807ms step_avg:44.01ms +[2025-09-11 09:31:22] [Rank 0] step:3881/10000 train_time:170807ms step_avg:44.01ms +[2025-09-11 09:31:23] [Rank 0] step:3901/10000 train_time:171479ms step_avg:43.96ms +[2025-09-11 09:31:23] [Rank 0] step:3901/10000 train_time:171479ms step_avg:43.96ms +[2025-09-11 09:31:24] [Rank 0] step:3921/10000 train_time:172153ms step_avg:43.91ms +[2025-09-11 09:31:24] [Rank 0] step:3921/10000 train_time:172153ms step_avg:43.91ms +[2025-09-11 09:31:25] [Rank 0] step:3941/10000 train_time:173404ms step_avg:44.00ms +[2025-09-11 09:31:25] [Rank 0] step:3941/10000 train_time:173404ms step_avg:44.00ms +[2025-09-11 09:31:26] [Rank 0] step:3961/10000 train_time:174077ms step_avg:43.95ms +[2025-09-11 09:31:26] [Rank 0] step:3961/10000 train_time:174077ms step_avg:43.95ms +[2025-09-11 09:31:26] [Rank 0] step:3981/10000 train_time:174750ms step_avg:43.90ms +[2025-09-11 09:31:26] [Rank 0] step:3981/10000 train_time:174750ms step_avg:43.90ms +[2025-09-11 09:31:27] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:31:27] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:31:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:31:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:37] [Rank 0] PRINT: step:4000/10000 val_loss:4.6579 total_sharp:3.4847e-04 L1_sharp:3.1654e-04 L2_sharp:4.5049e-05 L3_sharp:2.2759e-05 L4_sharp:2.2033e-05 L5_sharp:6.1903e-05 L6_sharp:2.4470e-05 L7_sharp:4.4250e-05 L8_sharp:7.8806e-05 L9_sharp:7.3846e-05 L10_sharp:7.2891e-05 L11_sharp:1.3565e-04 L12_sharp:8.2296e-04 total_fnorm:5.7750e+01 total_l1_linf:1.7510e+05 total_spectral:2.9000e+01 L1_fnorm:1.2375e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.0312e+00 L3_l1linf:2.9688e+00 L4_l1linf:3.0312e+00 L5_l1linf:3.0625e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0312e+00 L9_l1linf:2.8906e+00 L10_l1linf:2.8281e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5644e-01 L2_spectral:1.5127e-01 L3_spectral:1.5380e-01 L4_spectral:1.5298e-01 L5_spectral:1.5322e-01 L6_spectral:1.5465e-01 L7_spectral:1.5485e-01 L8_spectral:1.5278e-01 L9_spectral:1.5431e-01 L10_spectral:1.5302e-01 L11_spectral:1.5398e-01 L12_spectral:1.5325e-01 train_time:175703ms step_avg:43.93ms +[2025-09-11 09:31:37] [Rank 0] PRINT: step:4000/10000 val_loss:4.6579 total_sharp:3.4847e-04 L1_sharp:3.1654e-04 L2_sharp:4.5049e-05 L3_sharp:2.2759e-05 L4_sharp:2.2033e-05 L5_sharp:6.1903e-05 L6_sharp:2.4470e-05 L7_sharp:4.4250e-05 L8_sharp:7.8806e-05 L9_sharp:7.3846e-05 L10_sharp:7.2891e-05 L11_sharp:1.3565e-04 L12_sharp:8.2296e-04 total_fnorm:5.7750e+01 total_l1_linf:1.7510e+05 total_spectral:2.9000e+01 L1_fnorm:1.2375e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.2969e+00 L2_l1linf:3.0312e+00 L3_l1linf:2.9688e+00 L4_l1linf:3.0312e+00 L5_l1linf:3.0625e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0312e+00 L9_l1linf:2.8906e+00 L10_l1linf:2.8281e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5644e-01 L2_spectral:1.5127e-01 L3_spectral:1.5380e-01 L4_spectral:1.5298e-01 L5_spectral:1.5322e-01 L6_spectral:1.5465e-01 L7_spectral:1.5485e-01 L8_spectral:1.5278e-01 L9_spectral:1.5431e-01 L10_spectral:1.5302e-01 L11_spectral:1.5398e-01 L12_spectral:1.5325e-01 train_time:175703ms step_avg:43.93ms +[2025-09-11 09:31:39] [Rank 0] step:4001/10000 train_time:177817ms step_avg:44.44ms +[2025-09-11 09:31:39] [Rank 0] step:4001/10000 train_time:177817ms step_avg:44.44ms +[2025-09-11 09:31:40] [Rank 0] step:4021/10000 train_time:178579ms step_avg:44.41ms +[2025-09-11 09:31:40] [Rank 0] step:4021/10000 train_time:178579ms step_avg:44.41ms +[2025-09-11 09:31:41] [Rank 0] step:4041/10000 train_time:179254ms step_avg:44.36ms +[2025-09-11 09:31:41] [Rank 0] step:4041/10000 train_time:179254ms step_avg:44.36ms +[2025-09-11 09:31:41] [Rank 0] step:4061/10000 train_time:179929ms step_avg:44.31ms +[2025-09-11 09:31:41] [Rank 0] step:4061/10000 train_time:179929ms step_avg:44.31ms +[2025-09-11 09:31:42] [Rank 0] step:4081/10000 train_time:180602ms step_avg:44.25ms +[2025-09-11 09:31:42] [Rank 0] step:4081/10000 train_time:180602ms step_avg:44.25ms +[2025-09-11 09:31:43] [Rank 0] step:4101/10000 train_time:181275ms step_avg:44.20ms +[2025-09-11 09:31:43] [Rank 0] step:4101/10000 train_time:181275ms step_avg:44.20ms +[2025-09-11 09:31:43] [Rank 0] step:4121/10000 train_time:181948ms step_avg:44.15ms +[2025-09-11 09:31:43] [Rank 0] step:4121/10000 train_time:181948ms step_avg:44.15ms +[2025-09-11 09:31:44] [Rank 0] step:4141/10000 train_time:182620ms step_avg:44.10ms +[2025-09-11 09:31:44] [Rank 0] step:4141/10000 train_time:182620ms step_avg:44.10ms +[2025-09-11 09:31:45] [Rank 0] step:4161/10000 train_time:183293ms step_avg:44.05ms +[2025-09-11 09:31:45] [Rank 0] step:4161/10000 train_time:183293ms step_avg:44.05ms +[2025-09-11 09:31:45] [Rank 0] step:4181/10000 train_time:183966ms step_avg:44.00ms +[2025-09-11 09:31:45] [Rank 0] step:4181/10000 train_time:183966ms step_avg:44.00ms +[2025-09-11 09:31:46] [Rank 0] step:4201/10000 train_time:184639ms step_avg:43.95ms +[2025-09-11 09:31:46] [Rank 0] step:4201/10000 train_time:184639ms step_avg:43.95ms +[2025-09-11 09:31:47] [Rank 0] step:4221/10000 train_time:185311ms step_avg:43.90ms +[2025-09-11 09:31:47] [Rank 0] step:4221/10000 train_time:185311ms step_avg:43.90ms +[2025-09-11 09:31:47] [Rank 0] step:4241/10000 train_time:185984ms step_avg:43.85ms +[2025-09-11 09:31:47] [Rank 0] step:4241/10000 train_time:185984ms step_avg:43.85ms +[2025-09-11 09:31:48] [Rank 0] step:4261/10000 train_time:186657ms step_avg:43.81ms +[2025-09-11 09:31:48] [Rank 0] step:4261/10000 train_time:186657ms step_avg:43.81ms +[2025-09-11 09:31:49] [Rank 0] step:4281/10000 train_time:187331ms step_avg:43.76ms +[2025-09-11 09:31:49] [Rank 0] step:4281/10000 train_time:187331ms step_avg:43.76ms +[2025-09-11 09:31:49] [Rank 0] step:4301/10000 train_time:188004ms step_avg:43.71ms +[2025-09-11 09:31:49] [Rank 0] step:4301/10000 train_time:188004ms step_avg:43.71ms +[2025-09-11 09:31:50] [Rank 0] step:4321/10000 train_time:188676ms step_avg:43.67ms +[2025-09-11 09:31:50] [Rank 0] step:4321/10000 train_time:188676ms step_avg:43.67ms +[2025-09-11 09:31:51] [Rank 0] step:4341/10000 train_time:189348ms step_avg:43.62ms +[2025-09-11 09:31:51] [Rank 0] step:4341/10000 train_time:189348ms step_avg:43.62ms +[2025-09-11 09:31:51] [Rank 0] step:4361/10000 train_time:190020ms step_avg:43.57ms +[2025-09-11 09:31:51] [Rank 0] step:4361/10000 train_time:190020ms step_avg:43.57ms +[2025-09-11 09:31:52] [Rank 0] step:4381/10000 train_time:190693ms step_avg:43.53ms +[2025-09-11 09:31:52] [Rank 0] step:4381/10000 train_time:190693ms step_avg:43.53ms +[2025-09-11 09:31:53] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:31:53] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:31:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:32:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:32:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:32:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:03] [Rank 0] PRINT: step:4400/10000 val_loss:4.6322 total_sharp:2.5807e-04 L1_sharp:2.5898e-04 L2_sharp:2.1723e-05 L3_sharp:3.1933e-05 L4_sharp:1.8039e-05 L5_sharp:3.7867e-05 L6_sharp:1.0188e-05 L7_sharp:3.1895e-05 L8_sharp:4.0471e-05 L9_sharp:5.3593e-05 L10_sharp:5.1353e-05 L11_sharp:1.0599e-04 L12_sharp:4.4980e-04 total_fnorm:5.5750e+01 total_l1_linf:1.6589e+05 total_spectral:2.7875e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.0469e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0781e+00 L6_l1linf:3.1562e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.0312e+00 L9_l1linf:2.8438e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.7031e+00 L1_spectral:1.5852e-01 L2_spectral:1.5116e-01 L3_spectral:1.5306e-01 L4_spectral:1.5433e-01 L5_spectral:1.5215e-01 L6_spectral:1.5577e-01 L7_spectral:1.5681e-01 L8_spectral:1.5253e-01 L9_spectral:1.5563e-01 L10_spectral:1.5404e-01 L11_spectral:1.5513e-01 L12_spectral:1.5440e-01 train_time:191346ms step_avg:43.49ms +[2025-09-11 09:32:03] [Rank 0] PRINT: step:4400/10000 val_loss:4.6322 total_sharp:2.5807e-04 L1_sharp:2.5898e-04 L2_sharp:2.1723e-05 L3_sharp:3.1933e-05 L4_sharp:1.8039e-05 L5_sharp:3.7867e-05 L6_sharp:1.0188e-05 L7_sharp:3.1895e-05 L8_sharp:4.0471e-05 L9_sharp:5.3593e-05 L10_sharp:5.1353e-05 L11_sharp:1.0599e-04 L12_sharp:4.4980e-04 total_fnorm:5.5750e+01 total_l1_linf:1.6589e+05 total_spectral:2.7875e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.2656e+00 L2_l1linf:3.0469e+00 L3_l1linf:2.9531e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0781e+00 L6_l1linf:3.1562e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.0312e+00 L9_l1linf:2.8438e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.7031e+00 L1_spectral:1.5852e-01 L2_spectral:1.5116e-01 L3_spectral:1.5306e-01 L4_spectral:1.5433e-01 L5_spectral:1.5215e-01 L6_spectral:1.5577e-01 L7_spectral:1.5681e-01 L8_spectral:1.5253e-01 L9_spectral:1.5563e-01 L10_spectral:1.5404e-01 L11_spectral:1.5513e-01 L12_spectral:1.5440e-01 train_time:191346ms step_avg:43.49ms +[2025-09-11 09:32:04] [Rank 0] step:4401/10000 train_time:192505ms step_avg:43.74ms +[2025-09-11 09:32:04] [Rank 0] step:4401/10000 train_time:192505ms step_avg:43.74ms +[2025-09-11 09:32:05] [Rank 0] step:4421/10000 train_time:193185ms step_avg:43.70ms +[2025-09-11 09:32:05] [Rank 0] step:4421/10000 train_time:193185ms step_avg:43.70ms +[2025-09-11 09:32:06] [Rank 0] step:4441/10000 train_time:193860ms step_avg:43.65ms +[2025-09-11 09:32:06] [Rank 0] step:4441/10000 train_time:193860ms step_avg:43.65ms +[2025-09-11 09:32:07] [Rank 0] step:4461/10000 train_time:194537ms step_avg:43.61ms +[2025-09-11 09:32:07] [Rank 0] step:4461/10000 train_time:194537ms step_avg:43.61ms +[2025-09-11 09:32:07] [Rank 0] step:4481/10000 train_time:195213ms step_avg:43.56ms +[2025-09-11 09:32:07] [Rank 0] step:4481/10000 train_time:195213ms step_avg:43.56ms +[2025-09-11 09:32:08] [Rank 0] step:4501/10000 train_time:195890ms step_avg:43.52ms +[2025-09-11 09:32:08] [Rank 0] step:4501/10000 train_time:195890ms step_avg:43.52ms +[2025-09-11 09:32:09] [Rank 0] step:4521/10000 train_time:196569ms step_avg:43.48ms +[2025-09-11 09:32:09] [Rank 0] step:4521/10000 train_time:196569ms step_avg:43.48ms +[2025-09-11 09:32:09] [Rank 0] step:4541/10000 train_time:197246ms step_avg:43.44ms +[2025-09-11 09:32:09] [Rank 0] step:4541/10000 train_time:197246ms step_avg:43.44ms +[2025-09-11 09:32:10] [Rank 0] step:4561/10000 train_time:197923ms step_avg:43.39ms +[2025-09-11 09:32:10] [Rank 0] step:4561/10000 train_time:197923ms step_avg:43.39ms +[2025-09-11 09:32:11] [Rank 0] step:4581/10000 train_time:198600ms step_avg:43.35ms +[2025-09-11 09:32:11] [Rank 0] step:4581/10000 train_time:198600ms step_avg:43.35ms +[2025-09-11 09:32:11] [Rank 0] step:4601/10000 train_time:199276ms step_avg:43.31ms +[2025-09-11 09:32:11] [Rank 0] step:4601/10000 train_time:199276ms step_avg:43.31ms +[2025-09-11 09:32:12] [Rank 0] step:4621/10000 train_time:199953ms step_avg:43.27ms +[2025-09-11 09:32:12] [Rank 0] step:4621/10000 train_time:199953ms step_avg:43.27ms +[2025-09-11 09:32:13] [Rank 0] step:4641/10000 train_time:200629ms step_avg:43.23ms +[2025-09-11 09:32:13] [Rank 0] step:4641/10000 train_time:200629ms step_avg:43.23ms +[2025-09-11 09:32:13] [Rank 0] step:4661/10000 train_time:201306ms step_avg:43.19ms +[2025-09-11 09:32:13] [Rank 0] step:4661/10000 train_time:201306ms step_avg:43.19ms +[2025-09-11 09:32:14] [Rank 0] step:4681/10000 train_time:201983ms step_avg:43.15ms +[2025-09-11 09:32:14] [Rank 0] step:4681/10000 train_time:201983ms step_avg:43.15ms +[2025-09-11 09:32:15] [Rank 0] step:4701/10000 train_time:202660ms step_avg:43.11ms +[2025-09-11 09:32:15] [Rank 0] step:4701/10000 train_time:202660ms step_avg:43.11ms +[2025-09-11 09:32:15] [Rank 0] step:4721/10000 train_time:203336ms step_avg:43.07ms +[2025-09-11 09:32:15] [Rank 0] step:4721/10000 train_time:203336ms step_avg:43.07ms +[2025-09-11 09:32:16] [Rank 0] step:4741/10000 train_time:204013ms step_avg:43.03ms +[2025-09-11 09:32:16] [Rank 0] step:4741/10000 train_time:204013ms step_avg:43.03ms +[2025-09-11 09:32:17] [Rank 0] step:4761/10000 train_time:204690ms step_avg:42.99ms +[2025-09-11 09:32:17] [Rank 0] step:4761/10000 train_time:204690ms step_avg:42.99ms +[2025-09-11 09:32:17] [Rank 0] step:4781/10000 train_time:205365ms step_avg:42.95ms +[2025-09-11 09:32:17] [Rank 0] step:4781/10000 train_time:205365ms step_avg:42.95ms +[2025-09-11 09:32:18] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:32:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:28] [Rank 0] PRINT: step:4800/10000 val_loss:4.5706 total_sharp:2.4030e-04 L1_sharp:1.8028e-04 L2_sharp:2.1760e-05 L3_sharp:2.4034e-05 L4_sharp:6.5242e-06 L5_sharp:4.0148e-05 L6_sharp:2.1386e-05 L7_sharp:2.5964e-05 L8_sharp:4.5611e-05 L9_sharp:5.4540e-05 L10_sharp:4.7460e-05 L11_sharp:9.3762e-05 L12_sharp:5.2619e-04 total_fnorm:5.5500e+01 total_l1_linf:1.6589e+05 total_spectral:2.7875e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0156e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0156e+00 L9_l1linf:2.8750e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.7188e+00 L1_spectral:1.5910e-01 L2_spectral:1.5256e-01 L3_spectral:1.5563e-01 L4_spectral:1.5474e-01 L5_spectral:1.5447e-01 L6_spectral:1.5764e-01 L7_spectral:1.5724e-01 L8_spectral:1.5214e-01 L9_spectral:1.5599e-01 L10_spectral:1.5523e-01 L11_spectral:1.5587e-01 L12_spectral:1.5560e-01 train_time:206022ms step_avg:42.92ms +[2025-09-11 09:32:28] [Rank 0] PRINT: step:4800/10000 val_loss:4.5706 total_sharp:2.4030e-04 L1_sharp:1.8028e-04 L2_sharp:2.1760e-05 L3_sharp:2.4034e-05 L4_sharp:6.5242e-06 L5_sharp:4.0148e-05 L6_sharp:2.1386e-05 L7_sharp:2.5964e-05 L8_sharp:4.5611e-05 L9_sharp:5.4540e-05 L10_sharp:4.7460e-05 L11_sharp:9.3762e-05 L12_sharp:5.2619e-04 total_fnorm:5.5500e+01 total_l1_linf:1.6589e+05 total_spectral:2.7875e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.2031e+00 L2_l1linf:3.0156e+00 L3_l1linf:2.9688e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0156e+00 L9_l1linf:2.8750e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.7188e+00 L1_spectral:1.5910e-01 L2_spectral:1.5256e-01 L3_spectral:1.5563e-01 L4_spectral:1.5474e-01 L5_spectral:1.5447e-01 L6_spectral:1.5764e-01 L7_spectral:1.5724e-01 L8_spectral:1.5214e-01 L9_spectral:1.5599e-01 L10_spectral:1.5523e-01 L11_spectral:1.5587e-01 L12_spectral:1.5560e-01 train_time:206022ms step_avg:42.92ms +[2025-09-11 09:32:29] [Rank 0] step:4801/10000 train_time:207185ms step_avg:43.15ms +[2025-09-11 09:32:29] [Rank 0] step:4801/10000 train_time:207185ms step_avg:43.15ms +[2025-09-11 09:32:30] [Rank 0] step:4821/10000 train_time:208166ms step_avg:43.18ms +[2025-09-11 09:32:30] [Rank 0] step:4821/10000 train_time:208166ms step_avg:43.18ms +[2025-09-11 09:32:31] [Rank 0] step:4841/10000 train_time:208844ms step_avg:43.14ms +[2025-09-11 09:32:31] [Rank 0] step:4841/10000 train_time:208844ms step_avg:43.14ms +[2025-09-11 09:32:31] [Rank 0] step:4861/10000 train_time:209521ms step_avg:43.10ms +[2025-09-11 09:32:31] [Rank 0] step:4861/10000 train_time:209521ms step_avg:43.10ms +[2025-09-11 09:32:32] [Rank 0] step:4881/10000 train_time:210198ms step_avg:43.06ms +[2025-09-11 09:32:32] [Rank 0] step:4881/10000 train_time:210198ms step_avg:43.06ms +[2025-09-11 09:32:33] [Rank 0] step:4901/10000 train_time:210875ms step_avg:43.03ms +[2025-09-11 09:32:33] [Rank 0] step:4901/10000 train_time:210875ms step_avg:43.03ms +[2025-09-11 09:32:34] [Rank 0] step:4921/10000 train_time:211551ms step_avg:42.99ms +[2025-09-11 09:32:34] [Rank 0] step:4921/10000 train_time:211551ms step_avg:42.99ms +[2025-09-11 09:32:34] [Rank 0] step:4941/10000 train_time:212228ms step_avg:42.95ms +[2025-09-11 09:32:34] [Rank 0] step:4941/10000 train_time:212228ms step_avg:42.95ms +[2025-09-11 09:32:35] [Rank 0] step:4961/10000 train_time:212905ms step_avg:42.92ms +[2025-09-11 09:32:35] [Rank 0] step:4961/10000 train_time:212905ms step_avg:42.92ms +[2025-09-11 09:32:36] [Rank 0] step:4981/10000 train_time:213581ms step_avg:42.88ms +[2025-09-11 09:32:36] [Rank 0] step:4981/10000 train_time:213581ms step_avg:42.88ms +[2025-09-11 09:32:36] [Rank 0] step:5001/10000 train_time:214258ms step_avg:42.84ms +[2025-09-11 09:32:36] [Rank 0] step:5001/10000 train_time:214258ms step_avg:42.84ms +[2025-09-11 09:32:37] [Rank 0] step:5021/10000 train_time:214933ms step_avg:42.81ms +[2025-09-11 09:32:37] [Rank 0] step:5021/10000 train_time:214933ms step_avg:42.81ms +[2025-09-11 09:32:38] [Rank 0] step:5041/10000 train_time:215608ms step_avg:42.77ms +[2025-09-11 09:32:38] [Rank 0] step:5041/10000 train_time:215608ms step_avg:42.77ms +[2025-09-11 09:32:38] [Rank 0] step:5061/10000 train_time:216284ms step_avg:42.74ms +[2025-09-11 09:32:38] [Rank 0] step:5061/10000 train_time:216284ms step_avg:42.74ms +[2025-09-11 09:32:39] [Rank 0] step:5081/10000 train_time:216960ms step_avg:42.70ms +[2025-09-11 09:32:39] [Rank 0] step:5081/10000 train_time:216960ms step_avg:42.70ms +[2025-09-11 09:32:40] [Rank 0] step:5101/10000 train_time:217637ms step_avg:42.67ms +[2025-09-11 09:32:40] [Rank 0] step:5101/10000 train_time:217637ms step_avg:42.67ms +[2025-09-11 09:32:40] [Rank 0] step:5121/10000 train_time:218313ms step_avg:42.63ms +[2025-09-11 09:32:40] [Rank 0] step:5121/10000 train_time:218313ms step_avg:42.63ms +[2025-09-11 09:32:41] [Rank 0] step:5141/10000 train_time:218990ms step_avg:42.60ms +[2025-09-11 09:32:41] [Rank 0] step:5141/10000 train_time:218990ms step_avg:42.60ms +[2025-09-11 09:32:42] [Rank 0] step:5161/10000 train_time:219666ms step_avg:42.56ms +[2025-09-11 09:32:42] [Rank 0] step:5161/10000 train_time:219666ms step_avg:42.56ms +[2025-09-11 09:32:42] [Rank 0] step:5181/10000 train_time:220342ms step_avg:42.53ms +[2025-09-11 09:32:42] [Rank 0] step:5181/10000 train_time:220342ms step_avg:42.53ms +[2025-09-11 09:32:43] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:32:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:53] [Rank 0] PRINT: step:5200/10000 val_loss:4.5408 total_sharp:3.4519e-04 L1_sharp:3.1047e-04 L2_sharp:2.1034e-05 L3_sharp:1.8434e-05 L4_sharp:5.5188e-06 L5_sharp:2.2358e-05 L6_sharp:2.4845e-05 L7_sharp:2.4132e-05 L8_sharp:4.6333e-05 L9_sharp:7.0258e-05 L10_sharp:6.2748e-05 L11_sharp:1.0922e-04 L12_sharp:1.2875e-03 total_fnorm:5.4000e+01 total_l1_linf:1.5770e+05 total_spectral:2.7125e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.1719e+00 L2_l1linf:3.0000e+00 L3_l1linf:2.8906e+00 L4_l1linf:2.9688e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0156e+00 L9_l1linf:2.8750e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.6875e+00 L1_spectral:1.5795e-01 L2_spectral:1.5388e-01 L3_spectral:1.5600e-01 L4_spectral:1.5611e-01 L5_spectral:1.5362e-01 L6_spectral:1.5736e-01 L7_spectral:1.5768e-01 L8_spectral:1.5260e-01 L9_spectral:1.5622e-01 L10_spectral:1.5621e-01 L11_spectral:1.5615e-01 L12_spectral:1.5677e-01 train_time:221005ms step_avg:42.50ms +[2025-09-11 09:32:53] [Rank 0] PRINT: step:5200/10000 val_loss:4.5408 total_sharp:3.4519e-04 L1_sharp:3.1047e-04 L2_sharp:2.1034e-05 L3_sharp:1.8434e-05 L4_sharp:5.5188e-06 L5_sharp:2.2358e-05 L6_sharp:2.4845e-05 L7_sharp:2.4132e-05 L8_sharp:4.6333e-05 L9_sharp:7.0258e-05 L10_sharp:6.2748e-05 L11_sharp:1.0922e-04 L12_sharp:1.2875e-03 total_fnorm:5.4000e+01 total_l1_linf:1.5770e+05 total_spectral:2.7125e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.1719e+00 L2_l1linf:3.0000e+00 L3_l1linf:2.8906e+00 L4_l1linf:2.9688e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0156e+00 L9_l1linf:2.8750e+00 L10_l1linf:2.7656e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.6875e+00 L1_spectral:1.5795e-01 L2_spectral:1.5388e-01 L3_spectral:1.5600e-01 L4_spectral:1.5611e-01 L5_spectral:1.5362e-01 L6_spectral:1.5736e-01 L7_spectral:1.5768e-01 L8_spectral:1.5260e-01 L9_spectral:1.5622e-01 L10_spectral:1.5621e-01 L11_spectral:1.5615e-01 L12_spectral:1.5677e-01 train_time:221005ms step_avg:42.50ms +[2025-09-11 09:32:54] [Rank 0] step:5201/10000 train_time:222206ms step_avg:42.72ms +[2025-09-11 09:32:54] [Rank 0] step:5201/10000 train_time:222206ms step_avg:42.72ms +[2025-09-11 09:32:55] [Rank 0] step:5221/10000 train_time:222897ms step_avg:42.69ms +[2025-09-11 09:32:55] [Rank 0] step:5221/10000 train_time:222897ms step_avg:42.69ms +[2025-09-11 09:32:56] [Rank 0] step:5241/10000 train_time:223583ms step_avg:42.66ms +[2025-09-11 09:32:56] [Rank 0] step:5241/10000 train_time:223583ms step_avg:42.66ms +[2025-09-11 09:32:56] [Rank 0] step:5261/10000 train_time:224270ms step_avg:42.63ms +[2025-09-11 09:32:56] [Rank 0] step:5261/10000 train_time:224270ms step_avg:42.63ms +[2025-09-11 09:32:57] [Rank 0] step:5281/10000 train_time:224956ms step_avg:42.60ms +[2025-09-11 09:32:57] [Rank 0] step:5281/10000 train_time:224956ms step_avg:42.60ms +[2025-09-11 09:32:58] [Rank 0] step:5301/10000 train_time:225643ms step_avg:42.57ms +[2025-09-11 09:32:58] [Rank 0] step:5301/10000 train_time:225643ms step_avg:42.57ms +[2025-09-11 09:32:58] [Rank 0] step:5321/10000 train_time:226330ms step_avg:42.54ms +[2025-09-11 09:32:58] [Rank 0] step:5321/10000 train_time:226330ms step_avg:42.54ms +[2025-09-11 09:32:59] [Rank 0] step:5341/10000 train_time:227015ms step_avg:42.50ms +[2025-09-11 09:32:59] [Rank 0] step:5341/10000 train_time:227015ms step_avg:42.50ms +[2025-09-11 09:33:00] [Rank 0] step:5361/10000 train_time:227701ms step_avg:42.47ms +[2025-09-11 09:33:00] [Rank 0] step:5361/10000 train_time:227701ms step_avg:42.47ms +[2025-09-11 09:33:00] [Rank 0] step:5381/10000 train_time:228387ms step_avg:42.44ms +[2025-09-11 09:33:00] [Rank 0] step:5381/10000 train_time:228387ms step_avg:42.44ms +[2025-09-11 09:33:01] [Rank 0] step:5401/10000 train_time:229072ms step_avg:42.41ms +[2025-09-11 09:33:01] [Rank 0] step:5401/10000 train_time:229072ms step_avg:42.41ms +[2025-09-11 09:33:02] [Rank 0] step:5421/10000 train_time:229760ms step_avg:42.38ms +[2025-09-11 09:33:02] [Rank 0] step:5421/10000 train_time:229760ms step_avg:42.38ms +[2025-09-11 09:33:02] [Rank 0] step:5441/10000 train_time:230446ms step_avg:42.35ms +[2025-09-11 09:33:02] [Rank 0] step:5441/10000 train_time:230446ms step_avg:42.35ms +[2025-09-11 09:33:03] [Rank 0] step:5461/10000 train_time:231133ms step_avg:42.32ms +[2025-09-11 09:33:03] [Rank 0] step:5461/10000 train_time:231133ms step_avg:42.32ms +[2025-09-11 09:33:04] [Rank 0] step:5481/10000 train_time:231819ms step_avg:42.29ms +[2025-09-11 09:33:04] [Rank 0] step:5481/10000 train_time:231819ms step_avg:42.29ms +[2025-09-11 09:33:04] [Rank 0] step:5501/10000 train_time:232505ms step_avg:42.27ms +[2025-09-11 09:33:04] [Rank 0] step:5501/10000 train_time:232505ms step_avg:42.27ms +[2025-09-11 09:33:05] [Rank 0] step:5521/10000 train_time:233190ms step_avg:42.24ms +[2025-09-11 09:33:05] [Rank 0] step:5521/10000 train_time:233190ms step_avg:42.24ms +[2025-09-11 09:33:06] [Rank 0] step:5541/10000 train_time:233878ms step_avg:42.21ms +[2025-09-11 09:33:06] [Rank 0] step:5541/10000 train_time:233878ms step_avg:42.21ms +[2025-09-11 09:33:07] [Rank 0] step:5561/10000 train_time:234567ms step_avg:42.18ms +[2025-09-11 09:33:07] [Rank 0] step:5561/10000 train_time:234567ms step_avg:42.18ms +[2025-09-11 09:33:07] [Rank 0] step:5581/10000 train_time:235254ms step_avg:42.15ms +[2025-09-11 09:33:07] [Rank 0] step:5581/10000 train_time:235254ms step_avg:42.15ms +[2025-09-11 09:33:08] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:33:08] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:33:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:18] [Rank 0] PRINT: step:5600/10000 val_loss:4.5259 total_sharp:2.1955e-04 L1_sharp:1.3718e-04 L2_sharp:2.3600e-05 L3_sharp:1.8086e-05 L4_sharp:2.4730e-05 L5_sharp:3.7832e-05 L6_sharp:1.5897e-05 L7_sharp:2.8833e-05 L8_sharp:4.4975e-05 L9_sharp:4.7484e-05 L10_sharp:4.8312e-05 L11_sharp:9.7605e-05 L12_sharp:3.7745e-04 total_fnorm:5.4250e+01 total_l1_linf:1.5770e+05 total_spectral:2.7250e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1719e+00 L2_l1linf:2.9531e+00 L3_l1linf:2.8594e+00 L4_l1linf:2.9688e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:2.9688e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.6875e+00 L1_spectral:1.5897e-01 L2_spectral:1.5313e-01 L3_spectral:1.5539e-01 L4_spectral:1.5603e-01 L5_spectral:1.5385e-01 L6_spectral:1.5806e-01 L7_spectral:1.5928e-01 L8_spectral:1.5352e-01 L9_spectral:1.5659e-01 L10_spectral:1.5614e-01 L11_spectral:1.5729e-01 L12_spectral:1.5637e-01 train_time:235921ms step_avg:42.13ms +[2025-09-11 09:33:18] [Rank 0] PRINT: step:5600/10000 val_loss:4.5259 total_sharp:2.1955e-04 L1_sharp:1.3718e-04 L2_sharp:2.3600e-05 L3_sharp:1.8086e-05 L4_sharp:2.4730e-05 L5_sharp:3.7832e-05 L6_sharp:1.5897e-05 L7_sharp:2.8833e-05 L8_sharp:4.4975e-05 L9_sharp:4.7484e-05 L10_sharp:4.8312e-05 L11_sharp:9.7605e-05 L12_sharp:3.7745e-04 total_fnorm:5.4250e+01 total_l1_linf:1.5770e+05 total_spectral:2.7250e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1875e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1719e+00 L2_l1linf:2.9531e+00 L3_l1linf:2.8594e+00 L4_l1linf:2.9688e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:2.9688e+00 L9_l1linf:2.7656e+00 L10_l1linf:2.7500e+00 L11_l1linf:2.7344e+00 L12_l1linf:2.6875e+00 L1_spectral:1.5897e-01 L2_spectral:1.5313e-01 L3_spectral:1.5539e-01 L4_spectral:1.5603e-01 L5_spectral:1.5385e-01 L6_spectral:1.5806e-01 L7_spectral:1.5928e-01 L8_spectral:1.5352e-01 L9_spectral:1.5659e-01 L10_spectral:1.5614e-01 L11_spectral:1.5729e-01 L12_spectral:1.5637e-01 train_time:235921ms step_avg:42.13ms +[2025-09-11 09:33:19] [Rank 0] step:5601/10000 train_time:237094ms step_avg:42.33ms +[2025-09-11 09:33:19] [Rank 0] step:5601/10000 train_time:237094ms step_avg:42.33ms +[2025-09-11 09:33:20] [Rank 0] step:5621/10000 train_time:237815ms step_avg:42.31ms +[2025-09-11 09:33:20] [Rank 0] step:5621/10000 train_time:237815ms step_avg:42.31ms +[2025-09-11 09:33:20] [Rank 0] step:5641/10000 train_time:238500ms step_avg:42.28ms +[2025-09-11 09:33:20] [Rank 0] step:5641/10000 train_time:238500ms step_avg:42.28ms +[2025-09-11 09:33:21] [Rank 0] step:5661/10000 train_time:239187ms step_avg:42.25ms +[2025-09-11 09:33:21] [Rank 0] step:5661/10000 train_time:239187ms step_avg:42.25ms +[2025-09-11 09:33:22] [Rank 0] step:5681/10000 train_time:239872ms step_avg:42.22ms +[2025-09-11 09:33:22] [Rank 0] step:5681/10000 train_time:239872ms step_avg:42.22ms +[2025-09-11 09:33:22] [Rank 0] step:5701/10000 train_time:240560ms step_avg:42.20ms +[2025-09-11 09:33:22] [Rank 0] step:5701/10000 train_time:240560ms step_avg:42.20ms +[2025-09-11 09:33:23] [Rank 0] step:5721/10000 train_time:241246ms step_avg:42.17ms +[2025-09-11 09:33:23] [Rank 0] step:5721/10000 train_time:241246ms step_avg:42.17ms +[2025-09-11 09:33:24] [Rank 0] step:5741/10000 train_time:241934ms step_avg:42.14ms +[2025-09-11 09:33:24] [Rank 0] step:5741/10000 train_time:241934ms step_avg:42.14ms +[2025-09-11 09:33:25] [Rank 0] step:5761/10000 train_time:242621ms step_avg:42.11ms +[2025-09-11 09:33:25] [Rank 0] step:5761/10000 train_time:242621ms step_avg:42.11ms +[2025-09-11 09:33:25] [Rank 0] step:5781/10000 train_time:243307ms step_avg:42.09ms +[2025-09-11 09:33:25] [Rank 0] step:5781/10000 train_time:243307ms step_avg:42.09ms +[2025-09-11 09:33:26] [Rank 0] step:5801/10000 train_time:243996ms step_avg:42.06ms +[2025-09-11 09:33:26] [Rank 0] step:5801/10000 train_time:243996ms step_avg:42.06ms +[2025-09-11 09:33:27] [Rank 0] step:5821/10000 train_time:244681ms step_avg:42.03ms +[2025-09-11 09:33:27] [Rank 0] step:5821/10000 train_time:244681ms step_avg:42.03ms +[2025-09-11 09:33:27] [Rank 0] step:5841/10000 train_time:245373ms step_avg:42.01ms +[2025-09-11 09:33:27] [Rank 0] step:5841/10000 train_time:245373ms step_avg:42.01ms +[2025-09-11 09:33:28] [Rank 0] step:5861/10000 train_time:246059ms step_avg:41.98ms +[2025-09-11 09:33:28] [Rank 0] step:5861/10000 train_time:246059ms step_avg:41.98ms +[2025-09-11 09:33:29] [Rank 0] step:5881/10000 train_time:246745ms step_avg:41.96ms +[2025-09-11 09:33:29] [Rank 0] step:5881/10000 train_time:246745ms step_avg:41.96ms +[2025-09-11 09:33:29] [Rank 0] step:5901/10000 train_time:247432ms step_avg:41.93ms +[2025-09-11 09:33:29] [Rank 0] step:5901/10000 train_time:247432ms step_avg:41.93ms +[2025-09-11 09:33:30] [Rank 0] step:5921/10000 train_time:248121ms step_avg:41.91ms +[2025-09-11 09:33:30] [Rank 0] step:5921/10000 train_time:248121ms step_avg:41.91ms +[2025-09-11 09:33:31] [Rank 0] step:5941/10000 train_time:249267ms step_avg:41.96ms +[2025-09-11 09:33:31] [Rank 0] step:5941/10000 train_time:249267ms step_avg:41.96ms +[2025-09-11 09:33:32] [Rank 0] step:5961/10000 train_time:249954ms step_avg:41.93ms +[2025-09-11 09:33:32] [Rank 0] step:5961/10000 train_time:249954ms step_avg:41.93ms +[2025-09-11 09:33:33] [Rank 0] step:5981/10000 train_time:250642ms step_avg:41.91ms +[2025-09-11 09:33:33] [Rank 0] step:5981/10000 train_time:250642ms step_avg:41.91ms +[2025-09-11 09:33:33] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:33:33] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:33:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:33:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:44] [Rank 0] PRINT: step:6000/10000 val_loss:4.4779 total_sharp:2.1783e-04 L1_sharp:1.8033e-04 L2_sharp:2.7821e-05 L3_sharp:7.9901e-06 L4_sharp:1.0678e-05 L5_sharp:3.0108e-05 L6_sharp:1.4594e-05 L7_sharp:1.8336e-05 L8_sharp:5.1784e-05 L9_sharp:5.0296e-05 L10_sharp:4.6794e-05 L11_sharp:8.6476e-05 L12_sharp:3.5544e-04 total_fnorm:5.4250e+01 total_l1_linf:1.5667e+05 total_spectral:2.7250e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1562e+00 L2_l1linf:2.9219e+00 L3_l1linf:2.8750e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0312e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.8438e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.6562e+00 L12_l1linf:2.7500e+00 L1_spectral:1.6052e-01 L2_spectral:1.5449e-01 L3_spectral:1.5650e-01 L4_spectral:1.5727e-01 L5_spectral:1.5530e-01 L6_spectral:1.5838e-01 L7_spectral:1.5951e-01 L8_spectral:1.5366e-01 L9_spectral:1.5706e-01 L10_spectral:1.5713e-01 L11_spectral:1.5733e-01 L12_spectral:1.5729e-01 train_time:251584ms step_avg:41.93ms +[2025-09-11 09:33:44] [Rank 0] PRINT: step:6000/10000 val_loss:4.4779 total_sharp:2.1783e-04 L1_sharp:1.8033e-04 L2_sharp:2.7821e-05 L3_sharp:7.9901e-06 L4_sharp:1.0678e-05 L5_sharp:3.0108e-05 L6_sharp:1.4594e-05 L7_sharp:1.8336e-05 L8_sharp:5.1784e-05 L9_sharp:5.0296e-05 L10_sharp:4.6794e-05 L11_sharp:8.6476e-05 L12_sharp:3.5544e-04 total_fnorm:5.4250e+01 total_l1_linf:1.5667e+05 total_spectral:2.7250e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2375e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1562e+00 L2_l1linf:2.9219e+00 L3_l1linf:2.8750e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.0312e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.8438e+00 L10_l1linf:2.7344e+00 L11_l1linf:2.6562e+00 L12_l1linf:2.7500e+00 L1_spectral:1.6052e-01 L2_spectral:1.5449e-01 L3_spectral:1.5650e-01 L4_spectral:1.5727e-01 L5_spectral:1.5530e-01 L6_spectral:1.5838e-01 L7_spectral:1.5951e-01 L8_spectral:1.5366e-01 L9_spectral:1.5706e-01 L10_spectral:1.5713e-01 L11_spectral:1.5733e-01 L12_spectral:1.5729e-01 train_time:251584ms step_avg:41.93ms +[2025-09-11 09:33:46] [Rank 0] step:6001/10000 train_time:252784ms step_avg:42.12ms +[2025-09-11 09:33:46] [Rank 0] step:6001/10000 train_time:252784ms step_avg:42.12ms +[2025-09-11 09:33:46] [Rank 0] step:6021/10000 train_time:253461ms step_avg:42.10ms +[2025-09-11 09:33:46] [Rank 0] step:6021/10000 train_time:253461ms step_avg:42.10ms +[2025-09-11 09:33:47] [Rank 0] step:6041/10000 train_time:254153ms step_avg:42.07ms +[2025-09-11 09:33:47] [Rank 0] step:6041/10000 train_time:254153ms step_avg:42.07ms +[2025-09-11 09:33:48] [Rank 0] step:6061/10000 train_time:254842ms step_avg:42.05ms +[2025-09-11 09:33:48] [Rank 0] step:6061/10000 train_time:254842ms step_avg:42.05ms +[2025-09-11 09:33:48] [Rank 0] step:6081/10000 train_time:255532ms step_avg:42.02ms +[2025-09-11 09:33:48] [Rank 0] step:6081/10000 train_time:255532ms step_avg:42.02ms +[2025-09-11 09:33:49] [Rank 0] step:6101/10000 train_time:256221ms step_avg:42.00ms +[2025-09-11 09:33:49] [Rank 0] step:6101/10000 train_time:256221ms step_avg:42.00ms +[2025-09-11 09:33:50] [Rank 0] step:6121/10000 train_time:256909ms step_avg:41.97ms +[2025-09-11 09:33:50] [Rank 0] step:6121/10000 train_time:256909ms step_avg:41.97ms +[2025-09-11 09:33:50] [Rank 0] step:6141/10000 train_time:257599ms step_avg:41.95ms +[2025-09-11 09:33:50] [Rank 0] step:6141/10000 train_time:257599ms step_avg:41.95ms +[2025-09-11 09:33:51] [Rank 0] step:6161/10000 train_time:258292ms step_avg:41.92ms +[2025-09-11 09:33:51] [Rank 0] step:6161/10000 train_time:258292ms step_avg:41.92ms +[2025-09-11 09:33:52] [Rank 0] step:6181/10000 train_time:258978ms step_avg:41.90ms +[2025-09-11 09:33:52] [Rank 0] step:6181/10000 train_time:258978ms step_avg:41.90ms +[2025-09-11 09:33:53] [Rank 0] step:6201/10000 train_time:259667ms step_avg:41.88ms +[2025-09-11 09:33:53] [Rank 0] step:6201/10000 train_time:259667ms step_avg:41.88ms +[2025-09-11 09:33:53] [Rank 0] step:6221/10000 train_time:260356ms step_avg:41.85ms +[2025-09-11 09:33:53] [Rank 0] step:6221/10000 train_time:260356ms step_avg:41.85ms +[2025-09-11 09:33:54] [Rank 0] step:6241/10000 train_time:261044ms step_avg:41.83ms +[2025-09-11 09:33:54] [Rank 0] step:6241/10000 train_time:261044ms step_avg:41.83ms +[2025-09-11 09:33:55] [Rank 0] step:6261/10000 train_time:261732ms step_avg:41.80ms +[2025-09-11 09:33:55] [Rank 0] step:6261/10000 train_time:261732ms step_avg:41.80ms +[2025-09-11 09:33:55] [Rank 0] step:6281/10000 train_time:262420ms step_avg:41.78ms +[2025-09-11 09:33:55] [Rank 0] step:6281/10000 train_time:262420ms step_avg:41.78ms +[2025-09-11 09:33:56] [Rank 0] step:6301/10000 train_time:263107ms step_avg:41.76ms +[2025-09-11 09:33:56] [Rank 0] step:6301/10000 train_time:263107ms step_avg:41.76ms +[2025-09-11 09:33:57] [Rank 0] step:6321/10000 train_time:263799ms step_avg:41.73ms +[2025-09-11 09:33:57] [Rank 0] step:6321/10000 train_time:263799ms step_avg:41.73ms +[2025-09-11 09:33:57] [Rank 0] step:6341/10000 train_time:264488ms step_avg:41.71ms +[2025-09-11 09:33:57] [Rank 0] step:6341/10000 train_time:264488ms step_avg:41.71ms +[2025-09-11 09:33:58] [Rank 0] step:6361/10000 train_time:265177ms step_avg:41.69ms +[2025-09-11 09:33:58] [Rank 0] step:6361/10000 train_time:265177ms step_avg:41.69ms +[2025-09-11 09:33:59] [Rank 0] step:6381/10000 train_time:265866ms step_avg:41.67ms +[2025-09-11 09:33:59] [Rank 0] step:6381/10000 train_time:265866ms step_avg:41.67ms +[2025-09-11 09:33:59] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:33:59] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:34:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:34:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:09] [Rank 0] PRINT: step:6400/10000 val_loss:4.4413 total_sharp:2.0905e-04 L1_sharp:1.6362e-04 L2_sharp:4.4047e-05 L3_sharp:3.3302e-05 L4_sharp:1.9325e-05 L5_sharp:3.2523e-05 L6_sharp:1.1892e-05 L7_sharp:2.7697e-05 L8_sharp:4.3135e-05 L9_sharp:5.1713e-05 L10_sharp:5.2986e-05 L11_sharp:8.8062e-05 L12_sharp:3.6989e-04 total_fnorm:4.8750e+01 total_l1_linf:1.3312e+05 total_spectral:2.4125e+01 L1_fnorm:1.1062e+01 L2_fnorm:1.0938e+01 L3_fnorm:1.1062e+01 L4_fnorm:1.1125e+01 L5_fnorm:1.0812e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1312e+01 L8_fnorm:1.0688e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1125e+01 L1_l1linf:2.7188e+00 L2_l1linf:2.5781e+00 L3_l1linf:2.5312e+00 L4_l1linf:2.6094e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.7344e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.5781e+00 L9_l1linf:2.4375e+00 L10_l1linf:2.4062e+00 L11_l1linf:2.3438e+00 L12_l1linf:2.4219e+00 L1_spectral:1.4605e-01 L2_spectral:1.4078e-01 L3_spectral:1.4400e-01 L4_spectral:1.4464e-01 L5_spectral:1.4068e-01 L6_spectral:1.4502e-01 L7_spectral:1.4590e-01 L8_spectral:1.4157e-01 L9_spectral:1.4516e-01 L10_spectral:1.4472e-01 L11_spectral:1.4540e-01 L12_spectral:1.4604e-01 train_time:266534ms step_avg:41.65ms +[2025-09-11 09:34:09] [Rank 0] PRINT: step:6400/10000 val_loss:4.4413 total_sharp:2.0905e-04 L1_sharp:1.6362e-04 L2_sharp:4.4047e-05 L3_sharp:3.3302e-05 L4_sharp:1.9325e-05 L5_sharp:3.2523e-05 L6_sharp:1.1892e-05 L7_sharp:2.7697e-05 L8_sharp:4.3135e-05 L9_sharp:5.1713e-05 L10_sharp:5.2986e-05 L11_sharp:8.8062e-05 L12_sharp:3.6989e-04 total_fnorm:4.8750e+01 total_l1_linf:1.3312e+05 total_spectral:2.4125e+01 L1_fnorm:1.1062e+01 L2_fnorm:1.0938e+01 L3_fnorm:1.1062e+01 L4_fnorm:1.1125e+01 L5_fnorm:1.0812e+01 L6_fnorm:1.1188e+01 L7_fnorm:1.1312e+01 L8_fnorm:1.0688e+01 L9_fnorm:1.1125e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1125e+01 L1_l1linf:2.7188e+00 L2_l1linf:2.5781e+00 L3_l1linf:2.5312e+00 L4_l1linf:2.6094e+00 L5_l1linf:2.7188e+00 L6_l1linf:2.7344e+00 L7_l1linf:2.7500e+00 L8_l1linf:2.5781e+00 L9_l1linf:2.4375e+00 L10_l1linf:2.4062e+00 L11_l1linf:2.3438e+00 L12_l1linf:2.4219e+00 L1_spectral:1.4605e-01 L2_spectral:1.4078e-01 L3_spectral:1.4400e-01 L4_spectral:1.4464e-01 L5_spectral:1.4068e-01 L6_spectral:1.4502e-01 L7_spectral:1.4590e-01 L8_spectral:1.4157e-01 L9_spectral:1.4516e-01 L10_spectral:1.4472e-01 L11_spectral:1.4540e-01 L12_spectral:1.4604e-01 train_time:266534ms step_avg:41.65ms +[2025-09-11 09:34:11] [Rank 0] step:6401/10000 train_time:267709ms step_avg:41.82ms +[2025-09-11 09:34:11] [Rank 0] step:6401/10000 train_time:267709ms step_avg:41.82ms +[2025-09-11 09:34:11] [Rank 0] step:6421/10000 train_time:268386ms step_avg:41.80ms +[2025-09-11 09:34:11] [Rank 0] step:6421/10000 train_time:268386ms step_avg:41.80ms +[2025-09-11 09:34:12] [Rank 0] step:6441/10000 train_time:269075ms step_avg:41.78ms +[2025-09-11 09:34:12] [Rank 0] step:6441/10000 train_time:269075ms step_avg:41.78ms +[2025-09-11 09:34:13] [Rank 0] step:6461/10000 train_time:269764ms step_avg:41.75ms +[2025-09-11 09:34:13] [Rank 0] step:6461/10000 train_time:269764ms step_avg:41.75ms +[2025-09-11 09:34:13] [Rank 0] step:6481/10000 train_time:270455ms step_avg:41.73ms +[2025-09-11 09:34:13] [Rank 0] step:6481/10000 train_time:270455ms step_avg:41.73ms +[2025-09-11 09:34:14] [Rank 0] step:6501/10000 train_time:271147ms step_avg:41.71ms +[2025-09-11 09:34:14] [Rank 0] step:6501/10000 train_time:271147ms step_avg:41.71ms +[2025-09-11 09:34:15] [Rank 0] step:6521/10000 train_time:271836ms step_avg:41.69ms +[2025-09-11 09:34:15] [Rank 0] step:6521/10000 train_time:271836ms step_avg:41.69ms +[2025-09-11 09:34:15] [Rank 0] step:6541/10000 train_time:272525ms step_avg:41.66ms +[2025-09-11 09:34:15] [Rank 0] step:6541/10000 train_time:272525ms step_avg:41.66ms +[2025-09-11 09:34:16] [Rank 0] step:6561/10000 train_time:273214ms step_avg:41.64ms +[2025-09-11 09:34:16] [Rank 0] step:6561/10000 train_time:273214ms step_avg:41.64ms +[2025-09-11 09:34:17] [Rank 0] step:6581/10000 train_time:273904ms step_avg:41.62ms +[2025-09-11 09:34:17] [Rank 0] step:6581/10000 train_time:273904ms step_avg:41.62ms +[2025-09-11 09:34:17] [Rank 0] step:6601/10000 train_time:274593ms step_avg:41.60ms +[2025-09-11 09:34:17] [Rank 0] step:6601/10000 train_time:274593ms step_avg:41.60ms +[2025-09-11 09:34:18] [Rank 0] step:6621/10000 train_time:275281ms step_avg:41.58ms +[2025-09-11 09:34:18] [Rank 0] step:6621/10000 train_time:275281ms step_avg:41.58ms +[2025-09-11 09:34:19] [Rank 0] step:6641/10000 train_time:275973ms step_avg:41.56ms +[2025-09-11 09:34:19] [Rank 0] step:6641/10000 train_time:275973ms step_avg:41.56ms +[2025-09-11 09:34:20] [Rank 0] step:6661/10000 train_time:276662ms step_avg:41.53ms +[2025-09-11 09:34:20] [Rank 0] step:6661/10000 train_time:276662ms step_avg:41.53ms +[2025-09-11 09:34:20] [Rank 0] step:6681/10000 train_time:277358ms step_avg:41.51ms +[2025-09-11 09:34:20] [Rank 0] step:6681/10000 train_time:277358ms step_avg:41.51ms +[2025-09-11 09:34:21] [Rank 0] step:6701/10000 train_time:278052ms step_avg:41.49ms +[2025-09-11 09:34:21] [Rank 0] step:6701/10000 train_time:278052ms step_avg:41.49ms +[2025-09-11 09:34:22] [Rank 0] step:6721/10000 train_time:278749ms step_avg:41.47ms +[2025-09-11 09:34:22] [Rank 0] step:6721/10000 train_time:278749ms step_avg:41.47ms +[2025-09-11 09:34:22] [Rank 0] step:6741/10000 train_time:279445ms step_avg:41.45ms +[2025-09-11 09:34:22] [Rank 0] step:6741/10000 train_time:279445ms step_avg:41.45ms +[2025-09-11 09:34:23] [Rank 0] step:6761/10000 train_time:280142ms step_avg:41.43ms +[2025-09-11 09:34:23] [Rank 0] step:6761/10000 train_time:280142ms step_avg:41.43ms +[2025-09-11 09:34:24] [Rank 0] step:6781/10000 train_time:280838ms step_avg:41.42ms +[2025-09-11 09:34:24] [Rank 0] step:6781/10000 train_time:280838ms step_avg:41.42ms +[2025-09-11 09:34:24] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:34:24] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:34:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:34] [Rank 0] PRINT: step:6800/10000 val_loss:4.4032 total_sharp:2.1874e-04 L1_sharp:1.5441e-04 L2_sharp:1.1736e-05 L3_sharp:1.8183e-05 L4_sharp:1.8607e-05 L5_sharp:2.7537e-05 L6_sharp:1.2633e-05 L7_sharp:2.6506e-05 L8_sharp:4.1537e-05 L9_sharp:5.3354e-05 L10_sharp:5.6437e-05 L11_sharp:1.0363e-04 L12_sharp:5.8396e-04 total_fnorm:4.4000e+01 total_l1_linf:1.1725e+05 total_spectral:2.2000e+01 L1_fnorm:9.8125e+00 L2_fnorm:9.6875e+00 L3_fnorm:9.7500e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.5000e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.9375e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.7500e+00 L12_fnorm:9.8125e+00 L1_l1linf:2.3906e+00 L2_l1linf:2.2344e+00 L3_l1linf:2.1875e+00 L4_l1linf:2.2500e+00 L5_l1linf:2.3750e+00 L6_l1linf:2.3281e+00 L7_l1linf:2.3594e+00 L8_l1linf:2.2812e+00 L9_l1linf:2.0938e+00 L10_l1linf:1.9922e+00 L11_l1linf:2.0000e+00 L12_l1linf:2.1562e+00 L1_spectral:1.3235e-01 L2_spectral:1.2654e-01 L3_spectral:1.2849e-01 L4_spectral:1.2885e-01 L5_spectral:1.2558e-01 L6_spectral:1.3049e-01 L7_spectral:1.3112e-01 L8_spectral:1.2748e-01 L9_spectral:1.3122e-01 L10_spectral:1.3136e-01 L11_spectral:1.3144e-01 L12_spectral:1.3085e-01 train_time:281514ms step_avg:41.40ms +[2025-09-11 09:34:34] [Rank 0] PRINT: step:6800/10000 val_loss:4.4032 total_sharp:2.1874e-04 L1_sharp:1.5441e-04 L2_sharp:1.1736e-05 L3_sharp:1.8183e-05 L4_sharp:1.8607e-05 L5_sharp:2.7537e-05 L6_sharp:1.2633e-05 L7_sharp:2.6506e-05 L8_sharp:4.1537e-05 L9_sharp:5.3354e-05 L10_sharp:5.6437e-05 L11_sharp:1.0363e-04 L12_sharp:5.8396e-04 total_fnorm:4.4000e+01 total_l1_linf:1.1725e+05 total_spectral:2.2000e+01 L1_fnorm:9.8125e+00 L2_fnorm:9.6875e+00 L3_fnorm:9.7500e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.5000e+00 L6_fnorm:9.8750e+00 L7_fnorm:9.9375e+00 L8_fnorm:9.4375e+00 L9_fnorm:9.8125e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.7500e+00 L12_fnorm:9.8125e+00 L1_l1linf:2.3906e+00 L2_l1linf:2.2344e+00 L3_l1linf:2.1875e+00 L4_l1linf:2.2500e+00 L5_l1linf:2.3750e+00 L6_l1linf:2.3281e+00 L7_l1linf:2.3594e+00 L8_l1linf:2.2812e+00 L9_l1linf:2.0938e+00 L10_l1linf:1.9922e+00 L11_l1linf:2.0000e+00 L12_l1linf:2.1562e+00 L1_spectral:1.3235e-01 L2_spectral:1.2654e-01 L3_spectral:1.2849e-01 L4_spectral:1.2885e-01 L5_spectral:1.2558e-01 L6_spectral:1.3049e-01 L7_spectral:1.3112e-01 L8_spectral:1.2748e-01 L9_spectral:1.3122e-01 L10_spectral:1.3136e-01 L11_spectral:1.3144e-01 L12_spectral:1.3085e-01 train_time:281514ms step_avg:41.40ms +[2025-09-11 09:34:36] [Rank 0] step:6801/10000 train_time:282731ms step_avg:41.57ms +[2025-09-11 09:34:36] [Rank 0] step:6801/10000 train_time:282731ms step_avg:41.57ms +[2025-09-11 09:34:37] [Rank 0] step:6821/10000 train_time:283750ms step_avg:41.60ms +[2025-09-11 09:34:37] [Rank 0] step:6821/10000 train_time:283750ms step_avg:41.60ms +[2025-09-11 09:34:37] [Rank 0] step:6841/10000 train_time:284451ms step_avg:41.58ms +[2025-09-11 09:34:37] [Rank 0] step:6841/10000 train_time:284451ms step_avg:41.58ms +[2025-09-11 09:34:38] [Rank 0] step:6861/10000 train_time:285150ms step_avg:41.56ms +[2025-09-11 09:34:38] [Rank 0] step:6861/10000 train_time:285150ms step_avg:41.56ms +[2025-09-11 09:34:39] [Rank 0] step:6881/10000 train_time:285847ms step_avg:41.54ms +[2025-09-11 09:34:39] [Rank 0] step:6881/10000 train_time:285847ms step_avg:41.54ms +[2025-09-11 09:34:39] [Rank 0] step:6901/10000 train_time:286543ms step_avg:41.52ms +[2025-09-11 09:34:39] [Rank 0] step:6901/10000 train_time:286543ms step_avg:41.52ms +[2025-09-11 09:34:40] [Rank 0] step:6921/10000 train_time:287239ms step_avg:41.50ms +[2025-09-11 09:34:40] [Rank 0] step:6921/10000 train_time:287239ms step_avg:41.50ms +[2025-09-11 09:34:41] [Rank 0] step:6941/10000 train_time:287936ms step_avg:41.48ms +[2025-09-11 09:34:41] [Rank 0] step:6941/10000 train_time:287936ms step_avg:41.48ms +[2025-09-11 09:34:41] [Rank 0] step:6961/10000 train_time:288633ms step_avg:41.46ms +[2025-09-11 09:34:41] [Rank 0] step:6961/10000 train_time:288633ms step_avg:41.46ms +[2025-09-11 09:34:42] [Rank 0] step:6981/10000 train_time:289332ms step_avg:41.45ms +[2025-09-11 09:34:42] [Rank 0] step:6981/10000 train_time:289332ms step_avg:41.45ms +[2025-09-11 09:34:43] [Rank 0] step:7001/10000 train_time:290028ms step_avg:41.43ms +[2025-09-11 09:34:43] [Rank 0] step:7001/10000 train_time:290028ms step_avg:41.43ms +[2025-09-11 09:34:44] [Rank 0] step:7021/10000 train_time:290725ms step_avg:41.41ms +[2025-09-11 09:34:44] [Rank 0] step:7021/10000 train_time:290725ms step_avg:41.41ms +[2025-09-11 09:34:44] [Rank 0] step:7041/10000 train_time:291422ms step_avg:41.39ms +[2025-09-11 09:34:44] [Rank 0] step:7041/10000 train_time:291422ms step_avg:41.39ms +[2025-09-11 09:34:45] [Rank 0] step:7061/10000 train_time:292120ms step_avg:41.37ms +[2025-09-11 09:34:45] [Rank 0] step:7061/10000 train_time:292120ms step_avg:41.37ms +[2025-09-11 09:34:46] [Rank 0] step:7081/10000 train_time:292815ms step_avg:41.35ms +[2025-09-11 09:34:46] [Rank 0] step:7081/10000 train_time:292815ms step_avg:41.35ms +[2025-09-11 09:34:46] [Rank 0] step:7101/10000 train_time:293511ms step_avg:41.33ms +[2025-09-11 09:34:46] [Rank 0] step:7101/10000 train_time:293511ms step_avg:41.33ms +[2025-09-11 09:34:47] [Rank 0] step:7121/10000 train_time:294208ms step_avg:41.32ms +[2025-09-11 09:34:47] [Rank 0] step:7121/10000 train_time:294208ms step_avg:41.32ms +[2025-09-11 09:34:48] [Rank 0] step:7141/10000 train_time:294904ms step_avg:41.30ms +[2025-09-11 09:34:48] [Rank 0] step:7141/10000 train_time:294904ms step_avg:41.30ms +[2025-09-11 09:34:48] [Rank 0] step:7161/10000 train_time:295603ms step_avg:41.28ms +[2025-09-11 09:34:48] [Rank 0] step:7161/10000 train_time:295603ms step_avg:41.28ms +[2025-09-11 09:34:49] [Rank 0] step:7181/10000 train_time:296298ms step_avg:41.26ms +[2025-09-11 09:34:49] [Rank 0] step:7181/10000 train_time:296298ms step_avg:41.26ms +[2025-09-11 09:34:50] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:34:50] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:34:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:34:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:34:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:34:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:00] [Rank 0] PRINT: step:7200/10000 val_loss:4.3668 total_sharp:1.8200e-04 L1_sharp:1.1281e-04 L2_sharp:-9.9272e-07 L3_sharp:6.5169e-06 L4_sharp:2.1370e-05 L5_sharp:2.9581e-05 L6_sharp:1.6706e-05 L7_sharp:2.7759e-05 L8_sharp:4.2608e-05 L9_sharp:4.3142e-05 L10_sharp:5.3605e-05 L11_sharp:8.5267e-05 L12_sharp:3.5688e-04 total_fnorm:3.7750e+01 total_l1_linf:9.5744e+04 total_spectral:1.9000e+01 L1_fnorm:8.5625e+00 L2_fnorm:8.4375e+00 L3_fnorm:8.5000e+00 L4_fnorm:8.6250e+00 L5_fnorm:8.3125e+00 L6_fnorm:8.6250e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.1875e+00 L9_fnorm:8.5625e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.5625e+00 L1_l1linf:1.9609e+00 L2_l1linf:1.9219e+00 L3_l1linf:1.8359e+00 L4_l1linf:1.9062e+00 L5_l1linf:1.9844e+00 L6_l1linf:1.9922e+00 L7_l1linf:1.9766e+00 L8_l1linf:1.9141e+00 L9_l1linf:1.7578e+00 L10_l1linf:1.6797e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.8125e+00 L1_spectral:1.1664e-01 L2_spectral:1.1280e-01 L3_spectral:1.1319e-01 L4_spectral:1.1428e-01 L5_spectral:1.1147e-01 L6_spectral:1.1638e-01 L7_spectral:1.1634e-01 L8_spectral:1.1424e-01 L9_spectral:1.1676e-01 L10_spectral:1.1634e-01 L11_spectral:1.1689e-01 L12_spectral:1.1718e-01 train_time:296974ms step_avg:41.25ms +[2025-09-11 09:35:00] [Rank 0] PRINT: step:7200/10000 val_loss:4.3668 total_sharp:1.8200e-04 L1_sharp:1.1281e-04 L2_sharp:-9.9272e-07 L3_sharp:6.5169e-06 L4_sharp:2.1370e-05 L5_sharp:2.9581e-05 L6_sharp:1.6706e-05 L7_sharp:2.7759e-05 L8_sharp:4.2608e-05 L9_sharp:4.3142e-05 L10_sharp:5.3605e-05 L11_sharp:8.5267e-05 L12_sharp:3.5688e-04 total_fnorm:3.7750e+01 total_l1_linf:9.5744e+04 total_spectral:1.9000e+01 L1_fnorm:8.5625e+00 L2_fnorm:8.4375e+00 L3_fnorm:8.5000e+00 L4_fnorm:8.6250e+00 L5_fnorm:8.3125e+00 L6_fnorm:8.6250e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.1875e+00 L9_fnorm:8.5625e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.5625e+00 L1_l1linf:1.9609e+00 L2_l1linf:1.9219e+00 L3_l1linf:1.8359e+00 L4_l1linf:1.9062e+00 L5_l1linf:1.9844e+00 L6_l1linf:1.9922e+00 L7_l1linf:1.9766e+00 L8_l1linf:1.9141e+00 L9_l1linf:1.7578e+00 L10_l1linf:1.6797e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.8125e+00 L1_spectral:1.1664e-01 L2_spectral:1.1280e-01 L3_spectral:1.1319e-01 L4_spectral:1.1428e-01 L5_spectral:1.1147e-01 L6_spectral:1.1638e-01 L7_spectral:1.1634e-01 L8_spectral:1.1424e-01 L9_spectral:1.1676e-01 L10_spectral:1.1634e-01 L11_spectral:1.1689e-01 L12_spectral:1.1718e-01 train_time:296974ms step_avg:41.25ms +[2025-09-11 09:35:01] [Rank 0] step:7201/10000 train_time:298198ms step_avg:41.41ms +[2025-09-11 09:35:01] [Rank 0] step:7201/10000 train_time:298198ms step_avg:41.41ms +[2025-09-11 09:35:02] [Rank 0] step:7221/10000 train_time:298951ms step_avg:41.40ms +[2025-09-11 09:35:02] [Rank 0] step:7221/10000 train_time:298951ms step_avg:41.40ms +[2025-09-11 09:35:03] [Rank 0] step:7241/10000 train_time:299650ms step_avg:41.38ms +[2025-09-11 09:35:03] [Rank 0] step:7241/10000 train_time:299650ms step_avg:41.38ms +[2025-09-11 09:35:03] [Rank 0] step:7261/10000 train_time:300349ms step_avg:41.36ms +[2025-09-11 09:35:03] [Rank 0] step:7261/10000 train_time:300349ms step_avg:41.36ms +[2025-09-11 09:35:04] [Rank 0] step:7281/10000 train_time:301052ms step_avg:41.35ms +[2025-09-11 09:35:04] [Rank 0] step:7281/10000 train_time:301052ms step_avg:41.35ms +[2025-09-11 09:35:05] [Rank 0] step:7301/10000 train_time:301749ms step_avg:41.33ms +[2025-09-11 09:35:05] [Rank 0] step:7301/10000 train_time:301749ms step_avg:41.33ms +[2025-09-11 09:35:05] [Rank 0] step:7321/10000 train_time:302447ms step_avg:41.31ms +[2025-09-11 09:35:05] [Rank 0] step:7321/10000 train_time:302447ms step_avg:41.31ms +[2025-09-11 09:35:06] [Rank 0] step:7341/10000 train_time:303146ms step_avg:41.29ms +[2025-09-11 09:35:06] [Rank 0] step:7341/10000 train_time:303146ms step_avg:41.29ms +[2025-09-11 09:35:07] [Rank 0] step:7361/10000 train_time:303844ms step_avg:41.28ms +[2025-09-11 09:35:07] [Rank 0] step:7361/10000 train_time:303844ms step_avg:41.28ms +[2025-09-11 09:35:07] [Rank 0] step:7381/10000 train_time:304542ms step_avg:41.26ms +[2025-09-11 09:35:07] [Rank 0] step:7381/10000 train_time:304542ms step_avg:41.26ms +[2025-09-11 09:35:08] [Rank 0] step:7401/10000 train_time:305240ms step_avg:41.24ms +[2025-09-11 09:35:08] [Rank 0] step:7401/10000 train_time:305240ms step_avg:41.24ms +[2025-09-11 09:35:09] [Rank 0] step:7421/10000 train_time:305937ms step_avg:41.23ms +[2025-09-11 09:35:09] [Rank 0] step:7421/10000 train_time:305937ms step_avg:41.23ms +[2025-09-11 09:35:10] [Rank 0] step:7441/10000 train_time:306635ms step_avg:41.21ms +[2025-09-11 09:35:10] [Rank 0] step:7441/10000 train_time:306635ms step_avg:41.21ms +[2025-09-11 09:35:10] [Rank 0] step:7461/10000 train_time:307333ms step_avg:41.19ms +[2025-09-11 09:35:10] [Rank 0] step:7461/10000 train_time:307333ms step_avg:41.19ms +[2025-09-11 09:35:11] [Rank 0] step:7481/10000 train_time:308034ms step_avg:41.18ms +[2025-09-11 09:35:11] [Rank 0] step:7481/10000 train_time:308034ms step_avg:41.18ms +[2025-09-11 09:35:12] [Rank 0] step:7501/10000 train_time:308733ms step_avg:41.16ms +[2025-09-11 09:35:12] [Rank 0] step:7501/10000 train_time:308733ms step_avg:41.16ms +[2025-09-11 09:35:12] [Rank 0] step:7521/10000 train_time:309432ms step_avg:41.14ms +[2025-09-11 09:35:12] [Rank 0] step:7521/10000 train_time:309432ms step_avg:41.14ms +[2025-09-11 09:35:13] [Rank 0] step:7541/10000 train_time:310128ms step_avg:41.13ms +[2025-09-11 09:35:13] [Rank 0] step:7541/10000 train_time:310128ms step_avg:41.13ms +[2025-09-11 09:35:14] [Rank 0] step:7561/10000 train_time:310828ms step_avg:41.11ms +[2025-09-11 09:35:14] [Rank 0] step:7561/10000 train_time:310828ms step_avg:41.11ms +[2025-09-11 09:35:14] [Rank 0] step:7581/10000 train_time:311527ms step_avg:41.09ms +[2025-09-11 09:35:14] [Rank 0] step:7581/10000 train_time:311527ms step_avg:41.09ms +[2025-09-11 09:35:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:35:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:35:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:25] [Rank 0] PRINT: step:7600/10000 val_loss:4.3307 total_sharp:1.8897e-04 L1_sharp:1.4495e-04 L2_sharp:2.9216e-05 L3_sharp:1.1527e-05 L4_sharp:9.5658e-06 L5_sharp:2.6853e-05 L6_sharp:6.2920e-06 L7_sharp:2.6729e-05 L8_sharp:3.9634e-05 L9_sharp:4.7496e-05 L10_sharp:5.1982e-05 L11_sharp:9.3979e-05 L12_sharp:3.9724e-04 total_fnorm:3.1000e+01 total_l1_linf:7.3728e+04 total_spectral:1.5688e+01 L1_fnorm:7.2812e+00 L2_fnorm:7.1562e+00 L3_fnorm:7.1875e+00 L4_fnorm:7.3125e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.2500e+00 L7_fnorm:7.3125e+00 L8_fnorm:6.8750e+00 L9_fnorm:7.1875e+00 L10_fnorm:7.1562e+00 L11_fnorm:7.1562e+00 L12_fnorm:7.1875e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.4922e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.6484e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4922e+00 L9_l1linf:1.3828e+00 L10_l1linf:1.3750e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.5859e+00 L1_spectral:1.0062e-01 L2_spectral:9.6737e-02 L3_spectral:9.7501e-02 L4_spectral:9.9200e-02 L5_spectral:9.5348e-02 L6_spectral:9.9751e-02 L7_spectral:1.0074e-01 L8_spectral:9.7438e-02 L9_spectral:1.0097e-01 L10_spectral:1.0100e-01 L11_spectral:1.0164e-01 L12_spectral:1.0018e-01 train_time:312208ms step_avg:41.08ms +[2025-09-11 09:35:25] [Rank 0] PRINT: step:7600/10000 val_loss:4.3307 total_sharp:1.8897e-04 L1_sharp:1.4495e-04 L2_sharp:2.9216e-05 L3_sharp:1.1527e-05 L4_sharp:9.5658e-06 L5_sharp:2.6853e-05 L6_sharp:6.2920e-06 L7_sharp:2.6729e-05 L8_sharp:3.9634e-05 L9_sharp:4.7496e-05 L10_sharp:5.1982e-05 L11_sharp:9.3979e-05 L12_sharp:3.9724e-04 total_fnorm:3.1000e+01 total_l1_linf:7.3728e+04 total_spectral:1.5688e+01 L1_fnorm:7.2812e+00 L2_fnorm:7.1562e+00 L3_fnorm:7.1875e+00 L4_fnorm:7.3125e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.2500e+00 L7_fnorm:7.3125e+00 L8_fnorm:6.8750e+00 L9_fnorm:7.1875e+00 L10_fnorm:7.1562e+00 L11_fnorm:7.1562e+00 L12_fnorm:7.1875e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.5391e+00 L3_l1linf:1.4922e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.6484e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.4922e+00 L9_l1linf:1.3828e+00 L10_l1linf:1.3750e+00 L11_l1linf:1.3438e+00 L12_l1linf:1.5859e+00 L1_spectral:1.0062e-01 L2_spectral:9.6737e-02 L3_spectral:9.7501e-02 L4_spectral:9.9200e-02 L5_spectral:9.5348e-02 L6_spectral:9.9751e-02 L7_spectral:1.0074e-01 L8_spectral:9.7438e-02 L9_spectral:1.0097e-01 L10_spectral:1.0100e-01 L11_spectral:1.0164e-01 L12_spectral:1.0018e-01 train_time:312208ms step_avg:41.08ms +[2025-09-11 09:35:26] [Rank 0] step:7601/10000 train_time:313419ms step_avg:41.23ms +[2025-09-11 09:35:26] [Rank 0] step:7601/10000 train_time:313419ms step_avg:41.23ms +[2025-09-11 09:35:27] [Rank 0] step:7621/10000 train_time:314148ms step_avg:41.22ms +[2025-09-11 09:35:27] [Rank 0] step:7621/10000 train_time:314148ms step_avg:41.22ms +[2025-09-11 09:35:28] [Rank 0] step:7641/10000 train_time:314849ms step_avg:41.21ms +[2025-09-11 09:35:28] [Rank 0] step:7641/10000 train_time:314849ms step_avg:41.21ms +[2025-09-11 09:35:28] [Rank 0] step:7661/10000 train_time:315548ms step_avg:41.19ms +[2025-09-11 09:35:28] [Rank 0] step:7661/10000 train_time:315548ms step_avg:41.19ms +[2025-09-11 09:35:29] [Rank 0] step:7681/10000 train_time:316247ms step_avg:41.17ms +[2025-09-11 09:35:29] [Rank 0] step:7681/10000 train_time:316247ms step_avg:41.17ms +[2025-09-11 09:35:30] [Rank 0] step:7701/10000 train_time:316948ms step_avg:41.16ms +[2025-09-11 09:35:30] [Rank 0] step:7701/10000 train_time:316948ms step_avg:41.16ms +[2025-09-11 09:35:31] [Rank 0] step:7721/10000 train_time:317648ms step_avg:41.14ms +[2025-09-11 09:35:31] [Rank 0] step:7721/10000 train_time:317648ms step_avg:41.14ms +[2025-09-11 09:35:31] [Rank 0] step:7741/10000 train_time:318347ms step_avg:41.12ms +[2025-09-11 09:35:31] [Rank 0] step:7741/10000 train_time:318347ms step_avg:41.12ms +[2025-09-11 09:35:32] [Rank 0] step:7761/10000 train_time:319046ms step_avg:41.11ms +[2025-09-11 09:35:32] [Rank 0] step:7761/10000 train_time:319046ms step_avg:41.11ms +[2025-09-11 09:35:33] [Rank 0] step:7781/10000 train_time:319748ms step_avg:41.09ms +[2025-09-11 09:35:33] [Rank 0] step:7781/10000 train_time:319748ms step_avg:41.09ms +[2025-09-11 09:35:33] [Rank 0] step:7801/10000 train_time:320445ms step_avg:41.08ms +[2025-09-11 09:35:33] [Rank 0] step:7801/10000 train_time:320445ms step_avg:41.08ms +[2025-09-11 09:35:34] [Rank 0] step:7821/10000 train_time:321145ms step_avg:41.06ms +[2025-09-11 09:35:34] [Rank 0] step:7821/10000 train_time:321145ms step_avg:41.06ms +[2025-09-11 09:35:35] [Rank 0] step:7841/10000 train_time:321846ms step_avg:41.05ms +[2025-09-11 09:35:35] [Rank 0] step:7841/10000 train_time:321846ms step_avg:41.05ms +[2025-09-11 09:35:35] [Rank 0] step:7861/10000 train_time:322547ms step_avg:41.03ms +[2025-09-11 09:35:35] [Rank 0] step:7861/10000 train_time:322547ms step_avg:41.03ms +[2025-09-11 09:35:36] [Rank 0] step:7881/10000 train_time:323246ms step_avg:41.02ms +[2025-09-11 09:35:36] [Rank 0] step:7881/10000 train_time:323246ms step_avg:41.02ms +[2025-09-11 09:35:37] [Rank 0] step:7901/10000 train_time:324220ms step_avg:41.04ms +[2025-09-11 09:35:37] [Rank 0] step:7901/10000 train_time:324220ms step_avg:41.04ms +[2025-09-11 09:35:38] [Rank 0] step:7921/10000 train_time:325152ms step_avg:41.05ms +[2025-09-11 09:35:38] [Rank 0] step:7921/10000 train_time:325152ms step_avg:41.05ms +[2025-09-11 09:35:39] [Rank 0] step:7941/10000 train_time:325852ms step_avg:41.03ms +[2025-09-11 09:35:39] [Rank 0] step:7941/10000 train_time:325852ms step_avg:41.03ms +[2025-09-11 09:35:40] [Rank 0] step:7961/10000 train_time:326850ms step_avg:41.06ms +[2025-09-11 09:35:40] [Rank 0] step:7961/10000 train_time:326850ms step_avg:41.06ms +[2025-09-11 09:35:40] [Rank 0] step:7981/10000 train_time:327551ms step_avg:41.04ms +[2025-09-11 09:35:40] [Rank 0] step:7981/10000 train_time:327551ms step_avg:41.04ms +[2025-09-11 09:35:41] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:35:41] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:35:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:51] [Rank 0] PRINT: step:8000/10000 val_loss:4.3131 total_sharp:1.7565e-04 L1_sharp:1.3885e-04 L2_sharp:3.4322e-05 L3_sharp:9.1320e-06 L4_sharp:1.1323e-05 L5_sharp:3.2093e-05 L6_sharp:1.7801e-05 L7_sharp:2.8938e-05 L8_sharp:4.0073e-05 L9_sharp:4.6510e-05 L10_sharp:5.3025e-05 L11_sharp:8.5210e-05 L12_sharp:4.6269e-04 total_fnorm:2.5750e+01 total_l1_linf:5.7088e+04 total_spectral:1.2938e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.8125e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.9062e+00 L5_fnorm:5.6562e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.9062e+00 L8_fnorm:5.5938e+00 L9_fnorm:5.8438e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.2188e+00 L2_l1linf:1.2109e+00 L3_l1linf:1.1641e+00 L4_l1linf:1.2266e+00 L5_l1linf:1.2500e+00 L6_l1linf:1.2656e+00 L7_l1linf:1.2656e+00 L8_l1linf:1.2109e+00 L9_l1linf:1.0859e+00 L10_l1linf:1.0391e+00 L11_l1linf:1.0391e+00 L12_l1linf:1.1797e+00 L1_spectral:8.4709e-02 L2_spectral:7.9166e-02 L3_spectral:8.0877e-02 L4_spectral:8.1432e-02 L5_spectral:7.8899e-02 L6_spectral:8.3883e-02 L7_spectral:8.4283e-02 L8_spectral:8.2129e-02 L9_spectral:8.3515e-02 L10_spectral:8.3724e-02 L11_spectral:8.3702e-02 L12_spectral:8.3502e-02 train_time:328230ms step_avg:41.03ms +[2025-09-11 09:35:51] [Rank 0] PRINT: step:8000/10000 val_loss:4.3131 total_sharp:1.7565e-04 L1_sharp:1.3885e-04 L2_sharp:3.4322e-05 L3_sharp:9.1320e-06 L4_sharp:1.1323e-05 L5_sharp:3.2093e-05 L6_sharp:1.7801e-05 L7_sharp:2.8938e-05 L8_sharp:4.0073e-05 L9_sharp:4.6510e-05 L10_sharp:5.3025e-05 L11_sharp:8.5210e-05 L12_sharp:4.6269e-04 total_fnorm:2.5750e+01 total_l1_linf:5.7088e+04 total_spectral:1.2938e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.8125e+00 L3_fnorm:5.8750e+00 L4_fnorm:5.9062e+00 L5_fnorm:5.6562e+00 L6_fnorm:5.9062e+00 L7_fnorm:5.9062e+00 L8_fnorm:5.5938e+00 L9_fnorm:5.8438e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.2188e+00 L2_l1linf:1.2109e+00 L3_l1linf:1.1641e+00 L4_l1linf:1.2266e+00 L5_l1linf:1.2500e+00 L6_l1linf:1.2656e+00 L7_l1linf:1.2656e+00 L8_l1linf:1.2109e+00 L9_l1linf:1.0859e+00 L10_l1linf:1.0391e+00 L11_l1linf:1.0391e+00 L12_l1linf:1.1797e+00 L1_spectral:8.4709e-02 L2_spectral:7.9166e-02 L3_spectral:8.0877e-02 L4_spectral:8.1432e-02 L5_spectral:7.8899e-02 L6_spectral:8.3883e-02 L7_spectral:8.4283e-02 L8_spectral:8.2129e-02 L9_spectral:8.3515e-02 L10_spectral:8.3724e-02 L11_spectral:8.3702e-02 L12_spectral:8.3502e-02 train_time:328230ms step_avg:41.03ms +[2025-09-11 09:35:52] [Rank 0] step:8001/10000 train_time:329441ms step_avg:41.17ms +[2025-09-11 09:35:52] [Rank 0] step:8001/10000 train_time:329441ms step_avg:41.17ms +[2025-09-11 09:35:53] [Rank 0] step:8021/10000 train_time:330157ms step_avg:41.16ms +[2025-09-11 09:35:53] [Rank 0] step:8021/10000 train_time:330157ms step_avg:41.16ms +[2025-09-11 09:35:54] [Rank 0] step:8041/10000 train_time:330859ms step_avg:41.15ms +[2025-09-11 09:35:54] [Rank 0] step:8041/10000 train_time:330859ms step_avg:41.15ms +[2025-09-11 09:35:54] [Rank 0] step:8061/10000 train_time:331561ms step_avg:41.13ms +[2025-09-11 09:35:54] [Rank 0] step:8061/10000 train_time:331561ms step_avg:41.13ms +[2025-09-11 09:35:55] [Rank 0] step:8081/10000 train_time:332258ms step_avg:41.12ms +[2025-09-11 09:35:55] [Rank 0] step:8081/10000 train_time:332258ms step_avg:41.12ms +[2025-09-11 09:35:56] [Rank 0] step:8101/10000 train_time:332956ms step_avg:41.10ms +[2025-09-11 09:35:56] [Rank 0] step:8101/10000 train_time:332956ms step_avg:41.10ms +[2025-09-11 09:35:56] [Rank 0] step:8121/10000 train_time:333658ms step_avg:41.09ms +[2025-09-11 09:35:56] [Rank 0] step:8121/10000 train_time:333658ms step_avg:41.09ms +[2025-09-11 09:35:58] [Rank 0] step:8141/10000 train_time:335096ms step_avg:41.16ms +[2025-09-11 09:35:58] [Rank 0] step:8141/10000 train_time:335096ms step_avg:41.16ms +[2025-09-11 09:35:59] [Rank 0] step:8161/10000 train_time:335798ms step_avg:41.15ms +[2025-09-11 09:35:59] [Rank 0] step:8161/10000 train_time:335798ms step_avg:41.15ms +[2025-09-11 09:35:59] [Rank 0] step:8181/10000 train_time:336508ms step_avg:41.13ms +[2025-09-11 09:35:59] [Rank 0] step:8181/10000 train_time:336508ms step_avg:41.13ms +[2025-09-11 09:36:00] [Rank 0] step:8201/10000 train_time:337214ms step_avg:41.12ms +[2025-09-11 09:36:00] [Rank 0] step:8201/10000 train_time:337214ms step_avg:41.12ms +[2025-09-11 09:36:01] [Rank 0] step:8221/10000 train_time:337920ms step_avg:41.10ms +[2025-09-11 09:36:01] [Rank 0] step:8221/10000 train_time:337920ms step_avg:41.10ms +[2025-09-11 09:36:01] [Rank 0] step:8241/10000 train_time:338635ms step_avg:41.09ms +[2025-09-11 09:36:01] [Rank 0] step:8241/10000 train_time:338635ms step_avg:41.09ms +[2025-09-11 09:36:02] [Rank 0] step:8261/10000 train_time:339340ms step_avg:41.08ms +[2025-09-11 09:36:02] [Rank 0] step:8261/10000 train_time:339340ms step_avg:41.08ms +[2025-09-11 09:36:03] [Rank 0] step:8281/10000 train_time:340043ms step_avg:41.06ms +[2025-09-11 09:36:03] [Rank 0] step:8281/10000 train_time:340043ms step_avg:41.06ms +[2025-09-11 09:36:04] [Rank 0] step:8301/10000 train_time:340748ms step_avg:41.05ms +[2025-09-11 09:36:04] [Rank 0] step:8301/10000 train_time:340748ms step_avg:41.05ms +[2025-09-11 09:36:04] [Rank 0] step:8321/10000 train_time:341453ms step_avg:41.04ms +[2025-09-11 09:36:04] [Rank 0] step:8321/10000 train_time:341453ms step_avg:41.04ms +[2025-09-11 09:36:05] [Rank 0] step:8341/10000 train_time:342165ms step_avg:41.02ms +[2025-09-11 09:36:05] [Rank 0] step:8341/10000 train_time:342165ms step_avg:41.02ms +[2025-09-11 09:36:06] [Rank 0] step:8361/10000 train_time:342867ms step_avg:41.01ms +[2025-09-11 09:36:06] [Rank 0] step:8361/10000 train_time:342867ms step_avg:41.01ms +[2025-09-11 09:36:06] [Rank 0] step:8381/10000 train_time:343575ms step_avg:40.99ms +[2025-09-11 09:36:06] [Rank 0] step:8381/10000 train_time:343575ms step_avg:40.99ms +[2025-09-11 09:36:07] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:36:07] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:17] [Rank 0] PRINT: step:8400/10000 val_loss:4.2848 total_sharp:1.6355e-04 L1_sharp:1.2380e-04 L2_sharp:1.5413e-05 L3_sharp:3.8716e-06 L4_sharp:5.2716e-06 L5_sharp:2.1281e-05 L6_sharp:1.6964e-05 L7_sharp:2.6302e-05 L8_sharp:4.1750e-05 L9_sharp:4.1598e-05 L10_sharp:4.6041e-05 L11_sharp:7.2761e-05 L12_sharp:4.8389e-04 total_fnorm:1.9750e+01 total_l1_linf:4.0192e+04 total_spectral:9.9375e+00 L1_fnorm:4.7188e+00 L2_fnorm:4.5938e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.6250e+00 L5_fnorm:4.5000e+00 L6_fnorm:4.5938e+00 L7_fnorm:4.5938e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5312e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5000e+00 L12_fnorm:4.5938e+00 L1_l1linf:9.0234e-01 L2_l1linf:9.0625e-01 L3_l1linf:8.9844e-01 L4_l1linf:9.1016e-01 L5_l1linf:9.3750e-01 L6_l1linf:9.2578e-01 L7_l1linf:9.0625e-01 L8_l1linf:8.6719e-01 L9_l1linf:8.0469e-01 L10_l1linf:7.7734e-01 L11_l1linf:7.3828e-01 L12_l1linf:9.1406e-01 L1_spectral:6.8607e-02 L2_spectral:6.3307e-02 L3_spectral:6.3703e-02 L4_spectral:6.4346e-02 L5_spectral:6.3890e-02 L6_spectral:6.5420e-02 L7_spectral:6.6317e-02 L8_spectral:6.6523e-02 L9_spectral:6.6667e-02 L10_spectral:6.6483e-02 L11_spectral:6.6210e-02 L12_spectral:6.6984e-02 train_time:344262ms step_avg:40.98ms +[2025-09-11 09:36:17] [Rank 0] PRINT: step:8400/10000 val_loss:4.2848 total_sharp:1.6355e-04 L1_sharp:1.2380e-04 L2_sharp:1.5413e-05 L3_sharp:3.8716e-06 L4_sharp:5.2716e-06 L5_sharp:2.1281e-05 L6_sharp:1.6964e-05 L7_sharp:2.6302e-05 L8_sharp:4.1750e-05 L9_sharp:4.1598e-05 L10_sharp:4.6041e-05 L11_sharp:7.2761e-05 L12_sharp:4.8389e-04 total_fnorm:1.9750e+01 total_l1_linf:4.0192e+04 total_spectral:9.9375e+00 L1_fnorm:4.7188e+00 L2_fnorm:4.5938e+00 L3_fnorm:4.5938e+00 L4_fnorm:4.6250e+00 L5_fnorm:4.5000e+00 L6_fnorm:4.5938e+00 L7_fnorm:4.5938e+00 L8_fnorm:4.3750e+00 L9_fnorm:4.5312e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5000e+00 L12_fnorm:4.5938e+00 L1_l1linf:9.0234e-01 L2_l1linf:9.0625e-01 L3_l1linf:8.9844e-01 L4_l1linf:9.1016e-01 L5_l1linf:9.3750e-01 L6_l1linf:9.2578e-01 L7_l1linf:9.0625e-01 L8_l1linf:8.6719e-01 L9_l1linf:8.0469e-01 L10_l1linf:7.7734e-01 L11_l1linf:7.3828e-01 L12_l1linf:9.1406e-01 L1_spectral:6.8607e-02 L2_spectral:6.3307e-02 L3_spectral:6.3703e-02 L4_spectral:6.4346e-02 L5_spectral:6.3890e-02 L6_spectral:6.5420e-02 L7_spectral:6.6317e-02 L8_spectral:6.6523e-02 L9_spectral:6.6667e-02 L10_spectral:6.6483e-02 L11_spectral:6.6210e-02 L12_spectral:6.6984e-02 train_time:344262ms step_avg:40.98ms +[2025-09-11 09:36:18] [Rank 0] step:8401/10000 train_time:345495ms step_avg:41.13ms +[2025-09-11 09:36:18] [Rank 0] step:8401/10000 train_time:345495ms step_avg:41.13ms +[2025-09-11 09:36:19] [Rank 0] step:8421/10000 train_time:346211ms step_avg:41.11ms +[2025-09-11 09:36:19] [Rank 0] step:8421/10000 train_time:346211ms step_avg:41.11ms +[2025-09-11 09:36:20] [Rank 0] step:8441/10000 train_time:346920ms step_avg:41.10ms +[2025-09-11 09:36:20] [Rank 0] step:8441/10000 train_time:346920ms step_avg:41.10ms +[2025-09-11 09:36:20] [Rank 0] step:8461/10000 train_time:347626ms step_avg:41.09ms +[2025-09-11 09:36:20] [Rank 0] step:8461/10000 train_time:347626ms step_avg:41.09ms +[2025-09-11 09:36:21] [Rank 0] step:8481/10000 train_time:348335ms step_avg:41.07ms +[2025-09-11 09:36:21] [Rank 0] step:8481/10000 train_time:348335ms step_avg:41.07ms +[2025-09-11 09:36:22] [Rank 0] step:8501/10000 train_time:349040ms step_avg:41.06ms +[2025-09-11 09:36:22] [Rank 0] step:8501/10000 train_time:349040ms step_avg:41.06ms +[2025-09-11 09:36:23] [Rank 0] step:8521/10000 train_time:349745ms step_avg:41.05ms +[2025-09-11 09:36:23] [Rank 0] step:8521/10000 train_time:349745ms step_avg:41.05ms +[2025-09-11 09:36:23] [Rank 0] step:8541/10000 train_time:350449ms step_avg:41.03ms +[2025-09-11 09:36:23] [Rank 0] step:8541/10000 train_time:350449ms step_avg:41.03ms +[2025-09-11 09:36:24] [Rank 0] step:8561/10000 train_time:351160ms step_avg:41.02ms +[2025-09-11 09:36:24] [Rank 0] step:8561/10000 train_time:351160ms step_avg:41.02ms +[2025-09-11 09:36:25] [Rank 0] step:8581/10000 train_time:351868ms step_avg:41.01ms +[2025-09-11 09:36:25] [Rank 0] step:8581/10000 train_time:351868ms step_avg:41.01ms +[2025-09-11 09:36:25] [Rank 0] step:8601/10000 train_time:352575ms step_avg:40.99ms +[2025-09-11 09:36:25] [Rank 0] step:8601/10000 train_time:352575ms step_avg:40.99ms +[2025-09-11 09:36:26] [Rank 0] step:8621/10000 train_time:353280ms step_avg:40.98ms +[2025-09-11 09:36:26] [Rank 0] step:8621/10000 train_time:353280ms step_avg:40.98ms +[2025-09-11 09:36:27] [Rank 0] step:8641/10000 train_time:353985ms step_avg:40.97ms +[2025-09-11 09:36:27] [Rank 0] step:8641/10000 train_time:353985ms step_avg:40.97ms +[2025-09-11 09:36:27] [Rank 0] step:8661/10000 train_time:354691ms step_avg:40.95ms +[2025-09-11 09:36:27] [Rank 0] step:8661/10000 train_time:354691ms step_avg:40.95ms +[2025-09-11 09:36:28] [Rank 0] step:8681/10000 train_time:355398ms step_avg:40.94ms +[2025-09-11 09:36:28] [Rank 0] step:8681/10000 train_time:355398ms step_avg:40.94ms +[2025-09-11 09:36:29] [Rank 0] step:8701/10000 train_time:356102ms step_avg:40.93ms +[2025-09-11 09:36:29] [Rank 0] step:8701/10000 train_time:356102ms step_avg:40.93ms +[2025-09-11 09:36:30] [Rank 0] step:8721/10000 train_time:356811ms step_avg:40.91ms +[2025-09-11 09:36:30] [Rank 0] step:8721/10000 train_time:356811ms step_avg:40.91ms +[2025-09-11 09:36:30] [Rank 0] step:8741/10000 train_time:357514ms step_avg:40.90ms +[2025-09-11 09:36:30] [Rank 0] step:8741/10000 train_time:357514ms step_avg:40.90ms +[2025-09-11 09:36:31] [Rank 0] step:8761/10000 train_time:358224ms step_avg:40.89ms +[2025-09-11 09:36:31] [Rank 0] step:8761/10000 train_time:358224ms step_avg:40.89ms +[2025-09-11 09:36:32] [Rank 0] step:8781/10000 train_time:358927ms step_avg:40.88ms +[2025-09-11 09:36:32] [Rank 0] step:8781/10000 train_time:358927ms step_avg:40.88ms +[2025-09-11 09:36:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:36:32] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:44] [Rank 0] PRINT: step:8800/10000 val_loss:4.2734 total_sharp:1.4661e-04 L1_sharp:1.3655e-04 L2_sharp:2.6370e-05 L3_sharp:8.5767e-06 L4_sharp:-3.6249e-06 L5_sharp:2.2406e-05 L6_sharp:9.8957e-06 L7_sharp:1.8715e-05 L8_sharp:3.1121e-05 L9_sharp:3.7691e-05 L10_sharp:3.7289e-05 L11_sharp:8.1504e-05 L12_sharp:4.3153e-04 total_fnorm:1.4438e+01 total_l1_linf:2.6368e+04 total_spectral:7.2500e+00 L1_fnorm:3.5000e+00 L2_fnorm:3.3594e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3750e+00 L5_fnorm:3.2500e+00 L6_fnorm:3.3594e+00 L7_fnorm:3.3594e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2969e+00 L10_fnorm:3.2656e+00 L11_fnorm:3.2812e+00 L12_fnorm:3.3594e+00 L1_l1linf:6.0938e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0156e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.7422e-01 L9_l1linf:5.6250e-01 L10_l1linf:5.0000e-01 L11_l1linf:5.0391e-01 L12_l1linf:6.6406e-01 L1_spectral:5.2408e-02 L2_spectral:4.6789e-02 L3_spectral:4.7637e-02 L4_spectral:4.7833e-02 L5_spectral:4.7653e-02 L6_spectral:4.9343e-02 L7_spectral:4.9504e-02 L8_spectral:4.9435e-02 L9_spectral:5.0208e-02 L10_spectral:4.9430e-02 L11_spectral:4.9787e-02 L12_spectral:5.0159e-02 train_time:359611ms step_avg:40.86ms +[2025-09-11 09:36:44] [Rank 0] PRINT: step:8800/10000 val_loss:4.2734 total_sharp:1.4661e-04 L1_sharp:1.3655e-04 L2_sharp:2.6370e-05 L3_sharp:8.5767e-06 L4_sharp:-3.6249e-06 L5_sharp:2.2406e-05 L6_sharp:9.8957e-06 L7_sharp:1.8715e-05 L8_sharp:3.1121e-05 L9_sharp:3.7691e-05 L10_sharp:3.7289e-05 L11_sharp:8.1504e-05 L12_sharp:4.3153e-04 total_fnorm:1.4438e+01 total_l1_linf:2.6368e+04 total_spectral:7.2500e+00 L1_fnorm:3.5000e+00 L2_fnorm:3.3594e+00 L3_fnorm:3.3594e+00 L4_fnorm:3.3750e+00 L5_fnorm:3.2500e+00 L6_fnorm:3.3594e+00 L7_fnorm:3.3594e+00 L8_fnorm:3.2031e+00 L9_fnorm:3.2969e+00 L10_fnorm:3.2656e+00 L11_fnorm:3.2812e+00 L12_fnorm:3.3594e+00 L1_l1linf:6.0938e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0156e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.1328e-01 L8_l1linf:5.7422e-01 L9_l1linf:5.6250e-01 L10_l1linf:5.0000e-01 L11_l1linf:5.0391e-01 L12_l1linf:6.6406e-01 L1_spectral:5.2408e-02 L2_spectral:4.6789e-02 L3_spectral:4.7637e-02 L4_spectral:4.7833e-02 L5_spectral:4.7653e-02 L6_spectral:4.9343e-02 L7_spectral:4.9504e-02 L8_spectral:4.9435e-02 L9_spectral:5.0208e-02 L10_spectral:4.9430e-02 L11_spectral:4.9787e-02 L12_spectral:5.0159e-02 train_time:359611ms step_avg:40.86ms +[2025-09-11 09:36:45] [Rank 0] step:8801/10000 train_time:360795ms step_avg:40.99ms +[2025-09-11 09:36:45] [Rank 0] step:8801/10000 train_time:360795ms step_avg:40.99ms +[2025-09-11 09:36:46] [Rank 0] step:8821/10000 train_time:361531ms step_avg:40.99ms +[2025-09-11 09:36:46] [Rank 0] step:8821/10000 train_time:361531ms step_avg:40.99ms +[2025-09-11 09:36:47] [Rank 0] step:8841/10000 train_time:362238ms step_avg:40.97ms +[2025-09-11 09:36:47] [Rank 0] step:8841/10000 train_time:362238ms step_avg:40.97ms +[2025-09-11 09:36:47] [Rank 0] step:8861/10000 train_time:362945ms step_avg:40.96ms +[2025-09-11 09:36:47] [Rank 0] step:8861/10000 train_time:362945ms step_avg:40.96ms +[2025-09-11 09:36:48] [Rank 0] step:8881/10000 train_time:363653ms step_avg:40.95ms +[2025-09-11 09:36:48] [Rank 0] step:8881/10000 train_time:363653ms step_avg:40.95ms +[2025-09-11 09:36:49] [Rank 0] step:8901/10000 train_time:364362ms step_avg:40.93ms +[2025-09-11 09:36:49] [Rank 0] step:8901/10000 train_time:364362ms step_avg:40.93ms +[2025-09-11 09:36:50] [Rank 0] step:8921/10000 train_time:365065ms step_avg:40.92ms +[2025-09-11 09:36:50] [Rank 0] step:8921/10000 train_time:365065ms step_avg:40.92ms +[2025-09-11 09:36:50] [Rank 0] step:8941/10000 train_time:365773ms step_avg:40.91ms +[2025-09-11 09:36:50] [Rank 0] step:8941/10000 train_time:365773ms step_avg:40.91ms +[2025-09-11 09:36:51] [Rank 0] step:8961/10000 train_time:366488ms step_avg:40.90ms +[2025-09-11 09:36:51] [Rank 0] step:8961/10000 train_time:366488ms step_avg:40.90ms +[2025-09-11 09:36:52] [Rank 0] step:8981/10000 train_time:367198ms step_avg:40.89ms +[2025-09-11 09:36:52] [Rank 0] step:8981/10000 train_time:367198ms step_avg:40.89ms +[2025-09-11 09:36:52] [Rank 0] step:9001/10000 train_time:367900ms step_avg:40.87ms +[2025-09-11 09:36:52] [Rank 0] step:9001/10000 train_time:367900ms step_avg:40.87ms +[2025-09-11 09:36:53] [Rank 0] step:9021/10000 train_time:368608ms step_avg:40.86ms +[2025-09-11 09:36:53] [Rank 0] step:9021/10000 train_time:368608ms step_avg:40.86ms +[2025-09-11 09:36:54] [Rank 0] step:9041/10000 train_time:369318ms step_avg:40.85ms +[2025-09-11 09:36:54] [Rank 0] step:9041/10000 train_time:369318ms step_avg:40.85ms +[2025-09-11 09:36:55] [Rank 0] step:9061/10000 train_time:370023ms step_avg:40.84ms +[2025-09-11 09:36:55] [Rank 0] step:9061/10000 train_time:370023ms step_avg:40.84ms +[2025-09-11 09:36:55] [Rank 0] step:9081/10000 train_time:370732ms step_avg:40.82ms +[2025-09-11 09:36:55] [Rank 0] step:9081/10000 train_time:370732ms step_avg:40.82ms +[2025-09-11 09:36:56] [Rank 0] step:9101/10000 train_time:371448ms step_avg:40.81ms +[2025-09-11 09:36:56] [Rank 0] step:9101/10000 train_time:371448ms step_avg:40.81ms +[2025-09-11 09:36:57] [Rank 0] step:9121/10000 train_time:372159ms step_avg:40.80ms +[2025-09-11 09:36:57] [Rank 0] step:9121/10000 train_time:372159ms step_avg:40.80ms +[2025-09-11 09:36:57] [Rank 0] step:9141/10000 train_time:372864ms step_avg:40.79ms +[2025-09-11 09:36:57] [Rank 0] step:9141/10000 train_time:372864ms step_avg:40.79ms +[2025-09-11 09:36:58] [Rank 0] step:9161/10000 train_time:373575ms step_avg:40.78ms +[2025-09-11 09:36:58] [Rank 0] step:9161/10000 train_time:373575ms step_avg:40.78ms +[2025-09-11 09:36:59] [Rank 0] step:9181/10000 train_time:374283ms step_avg:40.77ms +[2025-09-11 09:36:59] [Rank 0] step:9181/10000 train_time:374283ms step_avg:40.77ms +[2025-09-11 09:36:59] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:36:59] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:37:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:09] [Rank 0] PRINT: step:9200/10000 val_loss:4.2528 total_sharp:1.1830e-04 L1_sharp:1.0506e-04 L2_sharp:1.2007e-05 L3_sharp:9.2912e-06 L4_sharp:2.3562e-06 L5_sharp:2.8441e-05 L6_sharp:7.7549e-06 L7_sharp:1.5596e-05 L8_sharp:3.0396e-05 L9_sharp:3.2923e-05 L10_sharp:4.2813e-05 L11_sharp:6.4369e-05 L12_sharp:2.7640e-04 total_fnorm:9.7500e+00 total_l1_linf:1.5360e+04 total_spectral:4.8750e+00 L1_fnorm:2.3594e+00 L2_fnorm:2.2344e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2500e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.1562e+00 L9_fnorm:2.2188e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2500e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.8086e-01 L3_l1linf:3.7109e-01 L4_l1linf:3.9062e-01 L5_l1linf:3.7891e-01 L6_l1linf:3.7500e-01 L7_l1linf:3.7695e-01 L8_l1linf:3.5352e-01 L9_l1linf:3.2812e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0273e-01 L12_l1linf:4.0430e-01 L1_spectral:3.6336e-02 L2_spectral:3.2229e-02 L3_spectral:3.2334e-02 L4_spectral:3.3012e-02 L5_spectral:3.2841e-02 L6_spectral:3.3667e-02 L7_spectral:3.3673e-02 L8_spectral:3.4979e-02 L9_spectral:3.4203e-02 L10_spectral:3.3891e-02 L11_spectral:3.4248e-02 L12_spectral:3.4641e-02 train_time:374973ms step_avg:40.76ms +[2025-09-11 09:37:09] [Rank 0] PRINT: step:9200/10000 val_loss:4.2528 total_sharp:1.1830e-04 L1_sharp:1.0506e-04 L2_sharp:1.2007e-05 L3_sharp:9.2912e-06 L4_sharp:2.3562e-06 L5_sharp:2.8441e-05 L6_sharp:7.7549e-06 L7_sharp:1.5596e-05 L8_sharp:3.0396e-05 L9_sharp:3.2923e-05 L10_sharp:4.2813e-05 L11_sharp:6.4369e-05 L12_sharp:2.7640e-04 total_fnorm:9.7500e+00 total_l1_linf:1.5360e+04 total_spectral:4.8750e+00 L1_fnorm:2.3594e+00 L2_fnorm:2.2344e+00 L3_fnorm:2.2500e+00 L4_fnorm:2.2656e+00 L5_fnorm:2.1719e+00 L6_fnorm:2.2500e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.1562e+00 L9_fnorm:2.2188e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2500e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.8086e-01 L3_l1linf:3.7109e-01 L4_l1linf:3.9062e-01 L5_l1linf:3.7891e-01 L6_l1linf:3.7500e-01 L7_l1linf:3.7695e-01 L8_l1linf:3.5352e-01 L9_l1linf:3.2812e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0273e-01 L12_l1linf:4.0430e-01 L1_spectral:3.6336e-02 L2_spectral:3.2229e-02 L3_spectral:3.2334e-02 L4_spectral:3.3012e-02 L5_spectral:3.2841e-02 L6_spectral:3.3667e-02 L7_spectral:3.3673e-02 L8_spectral:3.4979e-02 L9_spectral:3.4203e-02 L10_spectral:3.3891e-02 L11_spectral:3.4248e-02 L12_spectral:3.4641e-02 train_time:374973ms step_avg:40.76ms +[2025-09-11 09:37:11] [Rank 0] step:9201/10000 train_time:376184ms step_avg:40.89ms +[2025-09-11 09:37:11] [Rank 0] step:9201/10000 train_time:376184ms step_avg:40.89ms +[2025-09-11 09:37:11] [Rank 0] step:9221/10000 train_time:376921ms step_avg:40.88ms +[2025-09-11 09:37:11] [Rank 0] step:9221/10000 train_time:376921ms step_avg:40.88ms +[2025-09-11 09:37:12] [Rank 0] step:9241/10000 train_time:377628ms step_avg:40.86ms +[2025-09-11 09:37:12] [Rank 0] step:9241/10000 train_time:377628ms step_avg:40.86ms +[2025-09-11 09:37:13] [Rank 0] step:9261/10000 train_time:378337ms step_avg:40.85ms +[2025-09-11 09:37:13] [Rank 0] step:9261/10000 train_time:378337ms step_avg:40.85ms +[2025-09-11 09:37:13] [Rank 0] step:9281/10000 train_time:379046ms step_avg:40.84ms +[2025-09-11 09:37:13] [Rank 0] step:9281/10000 train_time:379046ms step_avg:40.84ms +[2025-09-11 09:37:14] [Rank 0] step:9301/10000 train_time:379751ms step_avg:40.83ms +[2025-09-11 09:37:14] [Rank 0] step:9301/10000 train_time:379751ms step_avg:40.83ms +[2025-09-11 09:37:15] [Rank 0] step:9321/10000 train_time:380460ms step_avg:40.82ms +[2025-09-11 09:37:15] [Rank 0] step:9321/10000 train_time:380460ms step_avg:40.82ms +[2025-09-11 09:37:16] [Rank 0] step:9341/10000 train_time:381163ms step_avg:40.81ms +[2025-09-11 09:37:16] [Rank 0] step:9341/10000 train_time:381163ms step_avg:40.81ms +[2025-09-11 09:37:16] [Rank 0] step:9361/10000 train_time:381868ms step_avg:40.79ms +[2025-09-11 09:37:16] [Rank 0] step:9361/10000 train_time:381868ms step_avg:40.79ms +[2025-09-11 09:37:17] [Rank 0] step:9381/10000 train_time:382576ms step_avg:40.78ms +[2025-09-11 09:37:17] [Rank 0] step:9381/10000 train_time:382576ms step_avg:40.78ms +[2025-09-11 09:37:18] [Rank 0] step:9401/10000 train_time:383284ms step_avg:40.77ms +[2025-09-11 09:37:18] [Rank 0] step:9401/10000 train_time:383284ms step_avg:40.77ms +[2025-09-11 09:37:18] [Rank 0] step:9421/10000 train_time:383993ms step_avg:40.76ms +[2025-09-11 09:37:18] [Rank 0] step:9421/10000 train_time:383993ms step_avg:40.76ms +[2025-09-11 09:37:19] [Rank 0] step:9441/10000 train_time:384703ms step_avg:40.75ms +[2025-09-11 09:37:19] [Rank 0] step:9441/10000 train_time:384703ms step_avg:40.75ms +[2025-09-11 09:37:20] [Rank 0] step:9461/10000 train_time:385410ms step_avg:40.74ms +[2025-09-11 09:37:20] [Rank 0] step:9461/10000 train_time:385410ms step_avg:40.74ms +[2025-09-11 09:37:21] [Rank 0] step:9481/10000 train_time:386119ms step_avg:40.73ms +[2025-09-11 09:37:21] [Rank 0] step:9481/10000 train_time:386119ms step_avg:40.73ms +[2025-09-11 09:37:21] [Rank 0] step:9501/10000 train_time:386829ms step_avg:40.71ms +[2025-09-11 09:37:21] [Rank 0] step:9501/10000 train_time:386829ms step_avg:40.71ms +[2025-09-11 09:37:22] [Rank 0] step:9521/10000 train_time:387540ms step_avg:40.70ms +[2025-09-11 09:37:22] [Rank 0] step:9521/10000 train_time:387540ms step_avg:40.70ms +[2025-09-11 09:37:23] [Rank 0] step:9541/10000 train_time:388244ms step_avg:40.69ms +[2025-09-11 09:37:23] [Rank 0] step:9541/10000 train_time:388244ms step_avg:40.69ms +[2025-09-11 09:37:23] [Rank 0] step:9561/10000 train_time:388951ms step_avg:40.68ms +[2025-09-11 09:37:23] [Rank 0] step:9561/10000 train_time:388951ms step_avg:40.68ms +[2025-09-11 09:37:24] [Rank 0] step:9581/10000 train_time:389660ms step_avg:40.67ms +[2025-09-11 09:37:24] [Rank 0] step:9581/10000 train_time:389660ms step_avg:40.67ms +[2025-09-11 09:37:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:37:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:37:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:35] [Rank 0] PRINT: step:9600/10000 val_loss:4.2421 total_sharp:8.0288e-05 L1_sharp:8.2319e-05 L2_sharp:1.3626e-05 L3_sharp:3.5169e-06 L4_sharp:3.1959e-06 L5_sharp:1.7774e-05 L6_sharp:4.0271e-06 L7_sharp:9.9467e-06 L8_sharp:2.7089e-05 L9_sharp:2.7372e-05 L10_sharp:2.9662e-05 L11_sharp:4.7561e-05 L12_sharp:2.3238e-04 total_fnorm:5.4375e+00 total_l1_linf:7.2960e+03 total_spectral:2.7500e+00 L1_fnorm:1.3203e+00 L2_fnorm:1.2656e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2734e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2031e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.2812e+00 L1_l1linf:1.7969e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.9043e-01 L6_l1linf:1.8652e-01 L7_l1linf:1.8457e-01 L8_l1linf:1.7188e-01 L9_l1linf:1.5820e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.5625e-01 L12_l1linf:1.9434e-01 L1_spectral:2.0735e-02 L2_spectral:1.8908e-02 L3_spectral:1.8669e-02 L4_spectral:1.8626e-02 L5_spectral:1.9167e-02 L6_spectral:1.9116e-02 L7_spectral:1.9225e-02 L8_spectral:2.0351e-02 L9_spectral:1.9622e-02 L10_spectral:1.9606e-02 L11_spectral:1.9447e-02 L12_spectral:1.9917e-02 train_time:390345ms step_avg:40.66ms +[2025-09-11 09:37:35] [Rank 0] PRINT: step:9600/10000 val_loss:4.2421 total_sharp:8.0288e-05 L1_sharp:8.2319e-05 L2_sharp:1.3626e-05 L3_sharp:3.5169e-06 L4_sharp:3.1959e-06 L5_sharp:1.7774e-05 L6_sharp:4.0271e-06 L7_sharp:9.9467e-06 L8_sharp:2.7089e-05 L9_sharp:2.7372e-05 L10_sharp:2.9662e-05 L11_sharp:4.7561e-05 L12_sharp:2.3238e-04 total_fnorm:5.4375e+00 total_l1_linf:7.2960e+03 total_spectral:2.7500e+00 L1_fnorm:1.3203e+00 L2_fnorm:1.2656e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2734e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2031e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2344e+00 L12_fnorm:1.2812e+00 L1_l1linf:1.7969e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7871e-01 L4_l1linf:1.8457e-01 L5_l1linf:1.9043e-01 L6_l1linf:1.8652e-01 L7_l1linf:1.8457e-01 L8_l1linf:1.7188e-01 L9_l1linf:1.5820e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.5625e-01 L12_l1linf:1.9434e-01 L1_spectral:2.0735e-02 L2_spectral:1.8908e-02 L3_spectral:1.8669e-02 L4_spectral:1.8626e-02 L5_spectral:1.9167e-02 L6_spectral:1.9116e-02 L7_spectral:1.9225e-02 L8_spectral:2.0351e-02 L9_spectral:1.9622e-02 L10_spectral:1.9606e-02 L11_spectral:1.9447e-02 L12_spectral:1.9917e-02 train_time:390345ms step_avg:40.66ms +[2025-09-11 09:37:36] [Rank 0] step:9601/10000 train_time:391580ms step_avg:40.79ms +[2025-09-11 09:37:36] [Rank 0] step:9601/10000 train_time:391580ms step_avg:40.79ms +[2025-09-11 09:37:37] [Rank 0] step:9621/10000 train_time:392327ms step_avg:40.78ms +[2025-09-11 09:37:37] [Rank 0] step:9621/10000 train_time:392327ms step_avg:40.78ms +[2025-09-11 09:37:37] [Rank 0] step:9641/10000 train_time:393040ms step_avg:40.77ms +[2025-09-11 09:37:37] [Rank 0] step:9641/10000 train_time:393040ms step_avg:40.77ms +[2025-09-11 09:37:38] [Rank 0] step:9661/10000 train_time:393760ms step_avg:40.76ms +[2025-09-11 09:37:38] [Rank 0] step:9661/10000 train_time:393760ms step_avg:40.76ms +[2025-09-11 09:37:39] [Rank 0] step:9681/10000 train_time:394472ms step_avg:40.75ms +[2025-09-11 09:37:39] [Rank 0] step:9681/10000 train_time:394472ms step_avg:40.75ms +[2025-09-11 09:37:39] [Rank 0] step:9701/10000 train_time:395187ms step_avg:40.74ms +[2025-09-11 09:37:39] [Rank 0] step:9701/10000 train_time:395187ms step_avg:40.74ms +[2025-09-11 09:37:40] [Rank 0] step:9721/10000 train_time:395906ms step_avg:40.73ms +[2025-09-11 09:37:40] [Rank 0] step:9721/10000 train_time:395906ms step_avg:40.73ms +[2025-09-11 09:37:41] [Rank 0] step:9741/10000 train_time:396621ms step_avg:40.72ms +[2025-09-11 09:37:41] [Rank 0] step:9741/10000 train_time:396621ms step_avg:40.72ms +[2025-09-11 09:37:42] [Rank 0] step:9761/10000 train_time:397336ms step_avg:40.71ms +[2025-09-11 09:37:42] [Rank 0] step:9761/10000 train_time:397336ms step_avg:40.71ms +[2025-09-11 09:37:42] [Rank 0] step:9781/10000 train_time:398050ms step_avg:40.70ms +[2025-09-11 09:37:42] [Rank 0] step:9781/10000 train_time:398050ms step_avg:40.70ms +[2025-09-11 09:37:43] [Rank 0] step:9801/10000 train_time:398768ms step_avg:40.69ms +[2025-09-11 09:37:43] [Rank 0] step:9801/10000 train_time:398768ms step_avg:40.69ms +[2025-09-11 09:37:44] [Rank 0] step:9821/10000 train_time:399485ms step_avg:40.68ms +[2025-09-11 09:37:44] [Rank 0] step:9821/10000 train_time:399485ms step_avg:40.68ms +[2025-09-11 09:37:45] [Rank 0] step:9841/10000 train_time:400742ms step_avg:40.72ms +[2025-09-11 09:37:45] [Rank 0] step:9841/10000 train_time:400742ms step_avg:40.72ms +[2025-09-11 09:37:46] [Rank 0] step:9861/10000 train_time:401457ms step_avg:40.71ms +[2025-09-11 09:37:46] [Rank 0] step:9861/10000 train_time:401457ms step_avg:40.71ms +[2025-09-11 09:37:47] [Rank 0] step:9881/10000 train_time:402325ms step_avg:40.72ms +[2025-09-11 09:37:47] [Rank 0] step:9881/10000 train_time:402325ms step_avg:40.72ms +[2025-09-11 09:37:47] [Rank 0] step:9901/10000 train_time:403142ms step_avg:40.72ms +[2025-09-11 09:37:47] [Rank 0] step:9901/10000 train_time:403142ms step_avg:40.72ms +[2025-09-11 09:37:48] [Rank 0] step:9921/10000 train_time:403856ms step_avg:40.71ms +[2025-09-11 09:37:48] [Rank 0] step:9921/10000 train_time:403856ms step_avg:40.71ms +[2025-09-11 09:37:49] [Rank 0] step:9941/10000 train_time:404575ms step_avg:40.70ms +[2025-09-11 09:37:49] [Rank 0] step:9941/10000 train_time:404575ms step_avg:40.70ms +[2025-09-11 09:37:49] [Rank 0] step:9961/10000 train_time:405294ms step_avg:40.69ms +[2025-09-11 09:37:49] [Rank 0] step:9961/10000 train_time:405294ms step_avg:40.69ms +[2025-09-11 09:37:50] [Rank 0] step:9981/10000 train_time:406010ms step_avg:40.68ms +[2025-09-11 09:37:50] [Rank 0] step:9981/10000 train_time:406010ms step_avg:40.68ms +[2025-09-11 09:37:51] [Rank 0] step:10000/10000 train_time:406697ms step_avg:40.67ms +[2025-09-11 09:37:51] [Rank 0] step:10000/10000 train_time:406697ms step_avg:40.67ms +[2025-09-11 09:37:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:37:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:37:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:38:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.2377 total_sharp:5.4765e-05 L1_sharp:4.7197e-05 L2_sharp:1.1987e-05 L3_sharp:1.9793e-06 L4_sharp:5.3590e-06 L5_sharp:1.4349e-05 L6_sharp:4.6233e-06 L7_sharp:1.0339e-05 L8_sharp:1.6931e-05 L9_sharp:1.9095e-05 L10_sharp:2.1611e-05 L11_sharp:3.8636e-05 L12_sharp:1.6771e-04 total_fnorm:2.1250e+00 total_l1_linf:2.0400e+03 total_spectral:1.0547e+00 L1_fnorm:5.1953e-01 L2_fnorm:4.8828e-01 L3_fnorm:4.9023e-01 L4_fnorm:4.9609e-01 L5_fnorm:4.7852e-01 L6_fnorm:4.9219e-01 L7_fnorm:4.9219e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.8242e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:4.9805e-01 L1_l1linf:5.8838e-02 L2_l1linf:5.5176e-02 L3_l1linf:5.5176e-02 L4_l1linf:5.6885e-02 L5_l1linf:5.8594e-02 L6_l1linf:5.7373e-02 L7_l1linf:5.4199e-02 L8_l1linf:5.1758e-02 L9_l1linf:4.8584e-02 L10_l1linf:4.5166e-02 L11_l1linf:4.7119e-02 L12_l1linf:6.0791e-02 L1_spectral:8.3302e-03 L2_spectral:7.6404e-03 L3_spectral:7.3628e-03 L4_spectral:7.5012e-03 L5_spectral:7.7159e-03 L6_spectral:7.6052e-03 L7_spectral:7.7207e-03 L8_spectral:8.3673e-03 L9_spectral:7.7813e-03 L10_spectral:7.6998e-03 L11_spectral:7.8393e-03 L12_spectral:7.9427e-03 train_time:406718ms step_avg:40.67ms +[2025-09-11 09:38:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.2377 total_sharp:5.4765e-05 L1_sharp:4.7197e-05 L2_sharp:1.1987e-05 L3_sharp:1.9793e-06 L4_sharp:5.3590e-06 L5_sharp:1.4349e-05 L6_sharp:4.6233e-06 L7_sharp:1.0339e-05 L8_sharp:1.6931e-05 L9_sharp:1.9095e-05 L10_sharp:2.1611e-05 L11_sharp:3.8636e-05 L12_sharp:1.6771e-04 total_fnorm:2.1250e+00 total_l1_linf:2.0400e+03 total_spectral:1.0547e+00 L1_fnorm:5.1953e-01 L2_fnorm:4.8828e-01 L3_fnorm:4.9023e-01 L4_fnorm:4.9609e-01 L5_fnorm:4.7852e-01 L6_fnorm:4.9219e-01 L7_fnorm:4.9219e-01 L8_fnorm:4.7070e-01 L9_fnorm:4.8242e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:4.9805e-01 L1_l1linf:5.8838e-02 L2_l1linf:5.5176e-02 L3_l1linf:5.5176e-02 L4_l1linf:5.6885e-02 L5_l1linf:5.8594e-02 L6_l1linf:5.7373e-02 L7_l1linf:5.4199e-02 L8_l1linf:5.1758e-02 L9_l1linf:4.8584e-02 L10_l1linf:4.5166e-02 L11_l1linf:4.7119e-02 L12_l1linf:6.0791e-02 L1_spectral:8.3302e-03 L2_spectral:7.6404e-03 L3_spectral:7.3628e-03 L4_spectral:7.5012e-03 L5_spectral:7.7159e-03 L6_spectral:7.6052e-03 L7_spectral:7.7207e-03 L8_spectral:8.3673e-03 L9_spectral:7.7813e-03 L10_spectral:7.6998e-03 L11_spectral:7.8393e-03 L12_spectral:7.9427e-03 train_time:406718ms step_avg:40.67ms +[2025-09-11 09:38:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:38:01 2025 --- +[2025-09-11 09:38:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:38:01 2025 --- +[2025-09-11 09:38:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:38:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c36d78c9c19da3491fab8940ba4d86291cf8243c --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "0ece77d7-a7a6-4cc1-a540-8f2f4037e8c1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/training_log_0ece77d7-a7a6-4cc1-a540-8f2f4037e8c1.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/training_log_0ece77d7-a7a6-4cc1-a540-8f2f4037e8c1.txt new file mode 100644 index 0000000000000000000000000000000000000000..969113499f1330e122f7030fa7fd9d0356d07675 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42/training_log_0ece77d7-a7a6-4cc1-a540-8f2f4037e8c1.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:51:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:51:58 2025 --- +[2025-09-11 09:51:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:51:58 2025 --- +[2025-09-11 09:51:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:51:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:51:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:51:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:51:58] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:51:58] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:51:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42 +[2025-09-11 09:51:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_42 +[2025-09-11 09:51:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:51:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:51:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:51:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:51:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:51:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:51:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:51:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:51:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:51:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:51:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:51:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:51:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:51:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:52:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:52:01] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:52:01] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:52:01] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:52:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:52:01] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:52:07] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:52:07] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:52:07] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:52:07] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:52:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:52:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:52:45] [Rank 0] PRINT: Starting training... +[2025-09-11 09:52:45] [Rank 0] PRINT: Starting training... +[2025-09-11 09:52:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.19ms +[2025-09-11 09:52:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.19ms +[2025-09-11 09:52:47] [Rank 0] step:41/10000 train_time:1871ms step_avg:45.63ms +[2025-09-11 09:52:47] [Rank 0] step:41/10000 train_time:1871ms step_avg:45.63ms +[2025-09-11 09:52:48] [Rank 0] step:61/10000 train_time:2603ms step_avg:42.67ms +[2025-09-11 09:52:48] [Rank 0] step:61/10000 train_time:2603ms step_avg:42.67ms +[2025-09-11 09:52:49] [Rank 0] step:81/10000 train_time:3335ms step_avg:41.17ms +[2025-09-11 09:52:49] [Rank 0] step:81/10000 train_time:3335ms step_avg:41.17ms +[2025-09-11 09:52:49] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 09:52:49] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 09:52:50] [Rank 0] step:121/10000 train_time:4798ms step_avg:39.66ms +[2025-09-11 09:52:50] [Rank 0] step:121/10000 train_time:4798ms step_avg:39.66ms +[2025-09-11 09:52:51] [Rank 0] step:141/10000 train_time:5530ms step_avg:39.22ms +[2025-09-11 09:52:51] [Rank 0] step:141/10000 train_time:5530ms step_avg:39.22ms +[2025-09-11 09:52:52] [Rank 0] step:161/10000 train_time:6266ms step_avg:38.92ms +[2025-09-11 09:52:52] [Rank 0] step:161/10000 train_time:6266ms step_avg:38.92ms +[2025-09-11 09:52:52] [Rank 0] step:181/10000 train_time:6997ms step_avg:38.66ms +[2025-09-11 09:52:52] [Rank 0] step:181/10000 train_time:6997ms step_avg:38.66ms +[2025-09-11 09:52:53] [Rank 0] step:201/10000 train_time:7729ms step_avg:38.45ms +[2025-09-11 09:52:53] [Rank 0] step:201/10000 train_time:7729ms step_avg:38.45ms +[2025-09-11 09:52:54] [Rank 0] step:221/10000 train_time:8462ms step_avg:38.29ms +[2025-09-11 09:52:54] [Rank 0] step:221/10000 train_time:8462ms step_avg:38.29ms +[2025-09-11 09:52:55] [Rank 0] step:241/10000 train_time:9193ms step_avg:38.15ms +[2025-09-11 09:52:55] [Rank 0] step:241/10000 train_time:9193ms step_avg:38.15ms +[2025-09-11 09:52:55] [Rank 0] step:261/10000 train_time:9925ms step_avg:38.03ms +[2025-09-11 09:52:55] [Rank 0] step:261/10000 train_time:9925ms step_avg:38.03ms +[2025-09-11 09:52:56] [Rank 0] step:281/10000 train_time:10656ms step_avg:37.92ms +[2025-09-11 09:52:56] [Rank 0] step:281/10000 train_time:10656ms step_avg:37.92ms +[2025-09-11 09:52:57] [Rank 0] step:301/10000 train_time:11387ms step_avg:37.83ms +[2025-09-11 09:52:57] [Rank 0] step:301/10000 train_time:11387ms step_avg:37.83ms +[2025-09-11 09:52:58] [Rank 0] step:321/10000 train_time:12118ms step_avg:37.75ms +[2025-09-11 09:52:58] [Rank 0] step:321/10000 train_time:12118ms step_avg:37.75ms +[2025-09-11 09:52:58] [Rank 0] step:341/10000 train_time:12850ms step_avg:37.68ms +[2025-09-11 09:52:58] [Rank 0] step:341/10000 train_time:12850ms step_avg:37.68ms +[2025-09-11 09:52:59] [Rank 0] step:361/10000 train_time:13581ms step_avg:37.62ms +[2025-09-11 09:52:59] [Rank 0] step:361/10000 train_time:13581ms step_avg:37.62ms +[2025-09-11 09:53:00] [Rank 0] step:381/10000 train_time:14562ms step_avg:38.22ms +[2025-09-11 09:53:00] [Rank 0] step:381/10000 train_time:14562ms step_avg:38.22ms +[2025-09-11 09:53:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:53:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:53:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:53:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:53:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:53:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:53:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:53:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:53:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:53:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:53:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:53:53] [Rank 0] PRINT: step:400/10000 val_loss:6.2871 total_sharp:3.4286e-04 L1_sharp:1.0768e-01 L2_sharp:1.0676e-01 L3_sharp:9.2313e-02 L4_sharp:1.3591e-01 L5_sharp:1.3777e-01 L6_sharp:1.5545e-01 L7_sharp:1.8442e-01 L8_sharp:2.0800e-01 L9_sharp:2.7000e-01 L10_sharp:5.4301e-01 L11_sharp:6.1228e-01 L12_sharp:6.9621e-01 total_fnorm:7.3537e+01 total_l1_linf:1.7910e+05 total_spectral:3.6768e+01 L1_fnorm:5.8914e-02 L2_fnorm:5.8679e-02 L3_fnorm:5.9101e-02 L4_fnorm:5.8879e-02 L5_fnorm:5.9446e-02 L6_fnorm:5.9477e-02 L7_fnorm:5.9831e-02 L8_fnorm:5.9163e-02 L9_fnorm:5.9483e-02 L10_fnorm:5.8607e-02 L11_fnorm:5.9136e-02 L12_fnorm:5.8781e-02 L1_l1linf:2.2510e-02 L2_l1linf:2.2439e-02 L3_l1linf:2.2341e-02 L4_l1linf:2.2397e-02 L5_l1linf:2.2397e-02 L6_l1linf:2.2367e-02 L7_l1linf:2.2566e-02 L8_l1linf:2.2593e-02 L9_l1linf:2.2329e-02 L10_l1linf:2.2358e-02 L11_l1linf:2.2301e-02 L12_l1linf:2.2286e-02 L1_spectral:6.0254e-04 L2_spectral:6.0274e-04 L3_spectral:6.0271e-04 L4_spectral:6.0251e-04 L5_spectral:6.0297e-04 L6_spectral:6.0263e-04 L7_spectral:6.0302e-04 L8_spectral:6.0271e-04 L9_spectral:6.0284e-04 L10_spectral:6.0248e-04 L11_spectral:6.0276e-04 L12_spectral:6.0266e-04 train_time:15273ms step_avg:38.18ms +[2025-09-11 09:53:53] [Rank 0] PRINT: step:400/10000 val_loss:6.2871 total_sharp:3.4286e-04 L1_sharp:1.0768e-01 L2_sharp:1.0676e-01 L3_sharp:9.2313e-02 L4_sharp:1.3591e-01 L5_sharp:1.3777e-01 L6_sharp:1.5545e-01 L7_sharp:1.8442e-01 L8_sharp:2.0800e-01 L9_sharp:2.7000e-01 L10_sharp:5.4301e-01 L11_sharp:6.1228e-01 L12_sharp:6.9621e-01 total_fnorm:7.3537e+01 total_l1_linf:1.7910e+05 total_spectral:3.6768e+01 L1_fnorm:5.8914e-02 L2_fnorm:5.8679e-02 L3_fnorm:5.9101e-02 L4_fnorm:5.8879e-02 L5_fnorm:5.9446e-02 L6_fnorm:5.9477e-02 L7_fnorm:5.9831e-02 L8_fnorm:5.9163e-02 L9_fnorm:5.9483e-02 L10_fnorm:5.8607e-02 L11_fnorm:5.9136e-02 L12_fnorm:5.8781e-02 L1_l1linf:2.2510e-02 L2_l1linf:2.2439e-02 L3_l1linf:2.2341e-02 L4_l1linf:2.2397e-02 L5_l1linf:2.2397e-02 L6_l1linf:2.2367e-02 L7_l1linf:2.2566e-02 L8_l1linf:2.2593e-02 L9_l1linf:2.2329e-02 L10_l1linf:2.2358e-02 L11_l1linf:2.2301e-02 L12_l1linf:2.2286e-02 L1_spectral:6.0254e-04 L2_spectral:6.0274e-04 L3_spectral:6.0271e-04 L4_spectral:6.0251e-04 L5_spectral:6.0297e-04 L6_spectral:6.0263e-04 L7_spectral:6.0302e-04 L8_spectral:6.0271e-04 L9_spectral:6.0284e-04 L10_spectral:6.0248e-04 L11_spectral:6.0276e-04 L12_spectral:6.0266e-04 train_time:15273ms step_avg:38.18ms +[2025-09-11 09:54:25] [Rank 0] step:401/10000 train_time:47442ms step_avg:118.31ms +[2025-09-11 09:54:25] [Rank 0] step:401/10000 train_time:47442ms step_avg:118.31ms +[2025-09-11 09:54:27] [Rank 0] step:421/10000 train_time:49789ms step_avg:118.26ms +[2025-09-11 09:54:27] [Rank 0] step:421/10000 train_time:49789ms step_avg:118.26ms +[2025-09-11 09:54:28] [Rank 0] step:441/10000 train_time:50430ms step_avg:114.35ms +[2025-09-11 09:54:28] [Rank 0] step:441/10000 train_time:50430ms step_avg:114.35ms +[2025-09-11 09:54:29] [Rank 0] step:461/10000 train_time:51072ms step_avg:110.78ms +[2025-09-11 09:54:29] [Rank 0] step:461/10000 train_time:51072ms step_avg:110.78ms +[2025-09-11 09:54:29] [Rank 0] step:481/10000 train_time:51713ms step_avg:107.51ms +[2025-09-11 09:54:29] [Rank 0] step:481/10000 train_time:51713ms step_avg:107.51ms +[2025-09-11 09:54:30] [Rank 0] step:501/10000 train_time:52353ms step_avg:104.50ms +[2025-09-11 09:54:30] [Rank 0] step:501/10000 train_time:52353ms step_avg:104.50ms +[2025-09-11 09:54:31] [Rank 0] step:521/10000 train_time:52995ms step_avg:101.72ms +[2025-09-11 09:54:31] [Rank 0] step:521/10000 train_time:52995ms step_avg:101.72ms +[2025-09-11 09:54:31] [Rank 0] step:541/10000 train_time:53635ms step_avg:99.14ms +[2025-09-11 09:54:31] [Rank 0] step:541/10000 train_time:53635ms step_avg:99.14ms +[2025-09-11 09:54:32] [Rank 0] step:561/10000 train_time:54276ms step_avg:96.75ms +[2025-09-11 09:54:32] [Rank 0] step:561/10000 train_time:54276ms step_avg:96.75ms +[2025-09-11 09:54:33] [Rank 0] step:581/10000 train_time:54916ms step_avg:94.52ms +[2025-09-11 09:54:33] [Rank 0] step:581/10000 train_time:54916ms step_avg:94.52ms +[2025-09-11 09:54:33] [Rank 0] step:601/10000 train_time:55556ms step_avg:92.44ms +[2025-09-11 09:54:33] [Rank 0] step:601/10000 train_time:55556ms step_avg:92.44ms +[2025-09-11 09:54:34] [Rank 0] step:621/10000 train_time:56196ms step_avg:90.49ms +[2025-09-11 09:54:34] [Rank 0] step:621/10000 train_time:56196ms step_avg:90.49ms +[2025-09-11 09:54:34] [Rank 0] step:641/10000 train_time:56836ms step_avg:88.67ms +[2025-09-11 09:54:34] [Rank 0] step:641/10000 train_time:56836ms step_avg:88.67ms +[2025-09-11 09:54:35] [Rank 0] step:661/10000 train_time:57476ms step_avg:86.95ms +[2025-09-11 09:54:35] [Rank 0] step:661/10000 train_time:57476ms step_avg:86.95ms +[2025-09-11 09:54:36] [Rank 0] step:681/10000 train_time:58117ms step_avg:85.34ms +[2025-09-11 09:54:36] [Rank 0] step:681/10000 train_time:58117ms step_avg:85.34ms +[2025-09-11 09:54:36] [Rank 0] step:701/10000 train_time:58757ms step_avg:83.82ms +[2025-09-11 09:54:36] [Rank 0] step:701/10000 train_time:58757ms step_avg:83.82ms +[2025-09-11 09:54:37] [Rank 0] step:721/10000 train_time:59397ms step_avg:82.38ms +[2025-09-11 09:54:37] [Rank 0] step:721/10000 train_time:59397ms step_avg:82.38ms +[2025-09-11 09:54:38] [Rank 0] step:741/10000 train_time:60037ms step_avg:81.02ms +[2025-09-11 09:54:38] [Rank 0] step:741/10000 train_time:60037ms step_avg:81.02ms +[2025-09-11 09:54:38] [Rank 0] step:761/10000 train_time:60682ms step_avg:79.74ms +[2025-09-11 09:54:38] [Rank 0] step:761/10000 train_time:60682ms step_avg:79.74ms +[2025-09-11 09:54:39] [Rank 0] step:781/10000 train_time:61328ms step_avg:78.53ms +[2025-09-11 09:54:39] [Rank 0] step:781/10000 train_time:61328ms step_avg:78.53ms +[2025-09-11 09:54:40] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:54:40] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:55:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:55:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:55:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:55:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:55:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:55:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:25] [Rank 0] PRINT: step:800/10000 val_loss:5.9228 total_sharp:3.2613e-04 L1_sharp:3.1954e-01 L2_sharp:2.8043e-01 L3_sharp:2.8913e-01 L4_sharp:3.3867e-01 L5_sharp:4.2361e-01 L6_sharp:5.3497e-01 L7_sharp:6.7635e-01 L8_sharp:1.2317e+00 L9_sharp:1.2224e+00 L10_sharp:1.0547e+00 L11_sharp:9.5410e-01 L12_sharp:1.2739e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5667e+05 total_spectral:3.8000e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.6143e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.4922e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6143e-02 L11_fnorm:4.5654e-02 L12_fnorm:4.2969e-02 L1_l1linf:2.1240e-02 L2_l1linf:2.1118e-02 L3_l1linf:2.1240e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.1362e-02 L6_l1linf:2.1240e-02 L7_l1linf:2.1729e-02 L8_l1linf:2.1240e-02 L9_l1linf:2.1484e-02 L10_l1linf:2.0996e-02 L11_l1linf:2.0874e-02 L12_l1linf:2.0264e-02 L1_spectral:6.9610e-04 L2_spectral:7.0785e-04 L3_spectral:7.0793e-04 L4_spectral:7.1062e-04 L5_spectral:6.9167e-04 L6_spectral:7.0913e-04 L7_spectral:7.0722e-04 L8_spectral:6.8745e-04 L9_spectral:6.9976e-04 L10_spectral:6.9524e-04 L11_spectral:6.8424e-04 L12_spectral:6.6998e-04 train_time:61955ms step_avg:77.44ms +[2025-09-11 09:55:25] [Rank 0] PRINT: step:800/10000 val_loss:5.9228 total_sharp:3.2613e-04 L1_sharp:3.1954e-01 L2_sharp:2.8043e-01 L3_sharp:2.8913e-01 L4_sharp:3.3867e-01 L5_sharp:4.2361e-01 L6_sharp:5.3497e-01 L7_sharp:6.7635e-01 L8_sharp:1.2317e+00 L9_sharp:1.2224e+00 L10_sharp:1.0547e+00 L11_sharp:9.5410e-01 L12_sharp:1.2739e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5667e+05 total_spectral:3.8000e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.6143e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.4922e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6143e-02 L11_fnorm:4.5654e-02 L12_fnorm:4.2969e-02 L1_l1linf:2.1240e-02 L2_l1linf:2.1118e-02 L3_l1linf:2.1240e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.1362e-02 L6_l1linf:2.1240e-02 L7_l1linf:2.1729e-02 L8_l1linf:2.1240e-02 L9_l1linf:2.1484e-02 L10_l1linf:2.0996e-02 L11_l1linf:2.0874e-02 L12_l1linf:2.0264e-02 L1_spectral:6.9610e-04 L2_spectral:7.0785e-04 L3_spectral:7.0793e-04 L4_spectral:7.1062e-04 L5_spectral:6.9167e-04 L6_spectral:7.0913e-04 L7_spectral:7.0722e-04 L8_spectral:6.8745e-04 L9_spectral:6.9976e-04 L10_spectral:6.9524e-04 L11_spectral:6.8424e-04 L12_spectral:6.6998e-04 train_time:61955ms step_avg:77.44ms +[2025-09-11 09:55:27] [Rank 0] step:801/10000 train_time:63615ms step_avg:79.42ms +[2025-09-11 09:55:27] [Rank 0] step:801/10000 train_time:63615ms step_avg:79.42ms +[2025-09-11 09:55:27] [Rank 0] step:821/10000 train_time:64265ms step_avg:78.28ms +[2025-09-11 09:55:27] [Rank 0] step:821/10000 train_time:64265ms step_avg:78.28ms +[2025-09-11 09:55:28] [Rank 0] step:841/10000 train_time:64911ms step_avg:77.18ms +[2025-09-11 09:55:28] [Rank 0] step:841/10000 train_time:64911ms step_avg:77.18ms +[2025-09-11 09:55:29] [Rank 0] step:861/10000 train_time:65558ms step_avg:76.14ms +[2025-09-11 09:55:29] [Rank 0] step:861/10000 train_time:65558ms step_avg:76.14ms +[2025-09-11 09:55:29] [Rank 0] step:881/10000 train_time:66205ms step_avg:75.15ms +[2025-09-11 09:55:29] [Rank 0] step:881/10000 train_time:66205ms step_avg:75.15ms +[2025-09-11 09:55:30] [Rank 0] step:901/10000 train_time:66851ms step_avg:74.20ms +[2025-09-11 09:55:30] [Rank 0] step:901/10000 train_time:66851ms step_avg:74.20ms +[2025-09-11 09:55:30] [Rank 0] step:921/10000 train_time:67497ms step_avg:73.29ms +[2025-09-11 09:55:30] [Rank 0] step:921/10000 train_time:67497ms step_avg:73.29ms +[2025-09-11 09:55:31] [Rank 0] step:941/10000 train_time:68143ms step_avg:72.42ms +[2025-09-11 09:55:31] [Rank 0] step:941/10000 train_time:68143ms step_avg:72.42ms +[2025-09-11 09:55:32] [Rank 0] step:961/10000 train_time:68789ms step_avg:71.58ms +[2025-09-11 09:55:32] [Rank 0] step:961/10000 train_time:68789ms step_avg:71.58ms +[2025-09-11 09:55:32] [Rank 0] step:981/10000 train_time:69435ms step_avg:70.78ms +[2025-09-11 09:55:32] [Rank 0] step:981/10000 train_time:69435ms step_avg:70.78ms +[2025-09-11 09:55:33] [Rank 0] step:1001/10000 train_time:70081ms step_avg:70.01ms +[2025-09-11 09:55:33] [Rank 0] step:1001/10000 train_time:70081ms step_avg:70.01ms +[2025-09-11 09:55:34] [Rank 0] step:1021/10000 train_time:70727ms step_avg:69.27ms +[2025-09-11 09:55:34] [Rank 0] step:1021/10000 train_time:70727ms step_avg:69.27ms +[2025-09-11 09:55:34] [Rank 0] step:1041/10000 train_time:71374ms step_avg:68.56ms +[2025-09-11 09:55:34] [Rank 0] step:1041/10000 train_time:71374ms step_avg:68.56ms +[2025-09-11 09:55:35] [Rank 0] step:1061/10000 train_time:72020ms step_avg:67.88ms +[2025-09-11 09:55:35] [Rank 0] step:1061/10000 train_time:72020ms step_avg:67.88ms +[2025-09-11 09:55:36] [Rank 0] step:1081/10000 train_time:72666ms step_avg:67.22ms +[2025-09-11 09:55:36] [Rank 0] step:1081/10000 train_time:72666ms step_avg:67.22ms +[2025-09-11 09:55:36] [Rank 0] step:1101/10000 train_time:73312ms step_avg:66.59ms +[2025-09-11 09:55:36] [Rank 0] step:1101/10000 train_time:73312ms step_avg:66.59ms +[2025-09-11 09:55:37] [Rank 0] step:1121/10000 train_time:73959ms step_avg:65.98ms +[2025-09-11 09:55:37] [Rank 0] step:1121/10000 train_time:73959ms step_avg:65.98ms +[2025-09-11 09:55:38] [Rank 0] step:1141/10000 train_time:74604ms step_avg:65.38ms +[2025-09-11 09:55:38] [Rank 0] step:1141/10000 train_time:74604ms step_avg:65.38ms +[2025-09-11 09:55:38] [Rank 0] step:1161/10000 train_time:75250ms step_avg:64.81ms +[2025-09-11 09:55:38] [Rank 0] step:1161/10000 train_time:75250ms step_avg:64.81ms +[2025-09-11 09:55:39] [Rank 0] step:1181/10000 train_time:75895ms step_avg:64.26ms +[2025-09-11 09:55:39] [Rank 0] step:1181/10000 train_time:75895ms step_avg:64.26ms +[2025-09-11 09:55:40] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:55:40] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:55:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:55:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:55:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:55:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:55:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:55:50] [Rank 0] PRINT: step:1200/10000 val_loss:5.6678 total_sharp:3.1518e-04 L1_sharp:4.9827e-01 L2_sharp:4.4819e-01 L3_sharp:4.3634e-01 L4_sharp:4.4936e-01 L5_sharp:4.9358e-01 L6_sharp:4.3760e-01 L7_sharp:4.6219e-01 L8_sharp:3.9511e-01 L9_sharp:4.9072e-01 L10_sharp:1.2610e+00 L11_sharp:1.5367e+00 L12_sharp:3.0520e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5155e+05 total_spectral:3.8250e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0386e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0508e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0264e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0508e-02 L8_l1linf:2.0386e-02 L9_l1linf:2.0386e-02 L10_l1linf:2.0752e-02 L11_l1linf:2.0752e-02 L12_l1linf:2.0996e-02 L1_spectral:7.3843e-04 L2_spectral:7.4347e-04 L3_spectral:7.4415e-04 L4_spectral:7.4494e-04 L5_spectral:7.4472e-04 L6_spectral:7.5471e-04 L7_spectral:7.5644e-04 L8_spectral:7.4556e-04 L9_spectral:7.4747e-04 L10_spectral:7.4511e-04 L11_spectral:7.4071e-04 L12_spectral:7.3558e-04 train_time:76523ms step_avg:63.77ms +[2025-09-11 09:55:50] [Rank 0] PRINT: step:1200/10000 val_loss:5.6678 total_sharp:3.1518e-04 L1_sharp:4.9827e-01 L2_sharp:4.4819e-01 L3_sharp:4.3634e-01 L4_sharp:4.4936e-01 L5_sharp:4.9358e-01 L6_sharp:4.3760e-01 L7_sharp:4.6219e-01 L8_sharp:3.9511e-01 L9_sharp:4.9072e-01 L10_sharp:1.2610e+00 L11_sharp:1.5367e+00 L12_sharp:3.0520e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5155e+05 total_spectral:3.8250e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0386e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0508e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0264e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0508e-02 L8_l1linf:2.0386e-02 L9_l1linf:2.0386e-02 L10_l1linf:2.0752e-02 L11_l1linf:2.0752e-02 L12_l1linf:2.0996e-02 L1_spectral:7.3843e-04 L2_spectral:7.4347e-04 L3_spectral:7.4415e-04 L4_spectral:7.4494e-04 L5_spectral:7.4472e-04 L6_spectral:7.5471e-04 L7_spectral:7.5644e-04 L8_spectral:7.4556e-04 L9_spectral:7.4747e-04 L10_spectral:7.4511e-04 L11_spectral:7.4071e-04 L12_spectral:7.3558e-04 train_time:76523ms step_avg:63.77ms +[2025-09-11 09:55:52] [Rank 0] step:1201/10000 train_time:78154ms step_avg:65.07ms +[2025-09-11 09:55:52] [Rank 0] step:1201/10000 train_time:78154ms step_avg:65.07ms +[2025-09-11 09:55:53] [Rank 0] step:1221/10000 train_time:78805ms step_avg:64.54ms +[2025-09-11 09:55:53] [Rank 0] step:1221/10000 train_time:78805ms step_avg:64.54ms +[2025-09-11 09:55:53] [Rank 0] step:1241/10000 train_time:79453ms step_avg:64.02ms +[2025-09-11 09:55:53] [Rank 0] step:1241/10000 train_time:79453ms step_avg:64.02ms +[2025-09-11 09:55:54] [Rank 0] step:1261/10000 train_time:80100ms step_avg:63.52ms +[2025-09-11 09:55:54] [Rank 0] step:1261/10000 train_time:80100ms step_avg:63.52ms +[2025-09-11 09:55:55] [Rank 0] step:1281/10000 train_time:80747ms step_avg:63.03ms +[2025-09-11 09:55:55] [Rank 0] step:1281/10000 train_time:80747ms step_avg:63.03ms +[2025-09-11 09:55:55] [Rank 0] step:1301/10000 train_time:81394ms step_avg:62.56ms +[2025-09-11 09:55:55] [Rank 0] step:1301/10000 train_time:81394ms step_avg:62.56ms +[2025-09-11 09:55:56] [Rank 0] step:1321/10000 train_time:82039ms step_avg:62.10ms +[2025-09-11 09:55:56] [Rank 0] step:1321/10000 train_time:82039ms step_avg:62.10ms +[2025-09-11 09:55:57] [Rank 0] step:1341/10000 train_time:82685ms step_avg:61.66ms +[2025-09-11 09:55:57] [Rank 0] step:1341/10000 train_time:82685ms step_avg:61.66ms +[2025-09-11 09:55:57] [Rank 0] step:1361/10000 train_time:83331ms step_avg:61.23ms +[2025-09-11 09:55:57] [Rank 0] step:1361/10000 train_time:83331ms step_avg:61.23ms +[2025-09-11 09:55:58] [Rank 0] step:1381/10000 train_time:83977ms step_avg:60.81ms +[2025-09-11 09:55:58] [Rank 0] step:1381/10000 train_time:83977ms step_avg:60.81ms +[2025-09-11 09:55:59] [Rank 0] step:1401/10000 train_time:84623ms step_avg:60.40ms +[2025-09-11 09:55:59] [Rank 0] step:1401/10000 train_time:84623ms step_avg:60.40ms +[2025-09-11 09:55:59] [Rank 0] step:1421/10000 train_time:85268ms step_avg:60.01ms +[2025-09-11 09:55:59] [Rank 0] step:1421/10000 train_time:85268ms step_avg:60.01ms +[2025-09-11 09:56:00] [Rank 0] step:1441/10000 train_time:85914ms step_avg:59.62ms +[2025-09-11 09:56:00] [Rank 0] step:1441/10000 train_time:85914ms step_avg:59.62ms +[2025-09-11 09:56:00] [Rank 0] step:1461/10000 train_time:86559ms step_avg:59.25ms +[2025-09-11 09:56:00] [Rank 0] step:1461/10000 train_time:86559ms step_avg:59.25ms +[2025-09-11 09:56:01] [Rank 0] step:1481/10000 train_time:87205ms step_avg:58.88ms +[2025-09-11 09:56:01] [Rank 0] step:1481/10000 train_time:87205ms step_avg:58.88ms +[2025-09-11 09:56:02] [Rank 0] step:1501/10000 train_time:87856ms step_avg:58.53ms +[2025-09-11 09:56:02] [Rank 0] step:1501/10000 train_time:87856ms step_avg:58.53ms +[2025-09-11 09:56:02] [Rank 0] step:1521/10000 train_time:88507ms step_avg:58.19ms +[2025-09-11 09:56:02] [Rank 0] step:1521/10000 train_time:88507ms step_avg:58.19ms +[2025-09-11 09:56:03] [Rank 0] step:1541/10000 train_time:89158ms step_avg:57.86ms +[2025-09-11 09:56:03] [Rank 0] step:1541/10000 train_time:89158ms step_avg:57.86ms +[2025-09-11 09:56:04] [Rank 0] step:1561/10000 train_time:89808ms step_avg:57.53ms +[2025-09-11 09:56:04] [Rank 0] step:1561/10000 train_time:89808ms step_avg:57.53ms +[2025-09-11 09:56:04] [Rank 0] step:1581/10000 train_time:90459ms step_avg:57.22ms +[2025-09-11 09:56:04] [Rank 0] step:1581/10000 train_time:90459ms step_avg:57.22ms +[2025-09-11 09:56:05] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:56:05] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:56:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:56:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:56:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:56:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:56:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:56:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:56:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:17] [Rank 0] PRINT: step:1600/10000 val_loss:5.5051 total_sharp:3.1296e-04 L1_sharp:3.8608e-01 L2_sharp:3.8613e-01 L3_sharp:4.1846e-01 L4_sharp:4.6689e-01 L5_sharp:5.2249e-01 L6_sharp:5.4107e-01 L7_sharp:8.4976e-01 L8_sharp:9.2331e-01 L9_sharp:1.5339e+00 L10_sharp:1.9049e+00 L11_sharp:2.3554e+00 L12_sharp:4.1831e+00 total_fnorm:7.5000e+01 total_l1_linf:1.4131e+05 total_spectral:3.7500e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9561e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.9409e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9287e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9409e-02 L6_l1linf:1.9287e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9897e-02 L11_l1linf:2.0020e-02 L12_l1linf:2.0142e-02 L1_spectral:7.5686e-04 L2_spectral:7.6021e-04 L3_spectral:7.5875e-04 L4_spectral:7.6706e-04 L5_spectral:7.6145e-04 L6_spectral:7.7020e-04 L7_spectral:7.7190e-04 L8_spectral:7.6185e-04 L9_spectral:7.7038e-04 L10_spectral:7.6705e-04 L11_spectral:7.6691e-04 L12_spectral:7.4130e-04 train_time:91092ms step_avg:56.93ms +[2025-09-11 09:56:17] [Rank 0] PRINT: step:1600/10000 val_loss:5.5051 total_sharp:3.1296e-04 L1_sharp:3.8608e-01 L2_sharp:3.8613e-01 L3_sharp:4.1846e-01 L4_sharp:4.6689e-01 L5_sharp:5.2249e-01 L6_sharp:5.4107e-01 L7_sharp:8.4976e-01 L8_sharp:9.2331e-01 L9_sharp:1.5339e+00 L10_sharp:1.9049e+00 L11_sharp:2.3554e+00 L12_sharp:4.1831e+00 total_fnorm:7.5000e+01 total_l1_linf:1.4131e+05 total_spectral:3.7500e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9561e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.9409e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9287e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9409e-02 L6_l1linf:1.9287e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9897e-02 L11_l1linf:2.0020e-02 L12_l1linf:2.0142e-02 L1_spectral:7.5686e-04 L2_spectral:7.6021e-04 L3_spectral:7.5875e-04 L4_spectral:7.6706e-04 L5_spectral:7.6145e-04 L6_spectral:7.7020e-04 L7_spectral:7.7190e-04 L8_spectral:7.6185e-04 L9_spectral:7.7038e-04 L10_spectral:7.6705e-04 L11_spectral:7.6691e-04 L12_spectral:7.4130e-04 train_time:91092ms step_avg:56.93ms +[2025-09-11 09:56:18] [Rank 0] step:1601/10000 train_time:92730ms step_avg:57.92ms +[2025-09-11 09:56:18] [Rank 0] step:1601/10000 train_time:92730ms step_avg:57.92ms +[2025-09-11 09:56:19] [Rank 0] step:1621/10000 train_time:93386ms step_avg:57.61ms +[2025-09-11 09:56:19] [Rank 0] step:1621/10000 train_time:93386ms step_avg:57.61ms +[2025-09-11 09:56:20] [Rank 0] step:1641/10000 train_time:94041ms step_avg:57.31ms +[2025-09-11 09:56:20] [Rank 0] step:1641/10000 train_time:94041ms step_avg:57.31ms +[2025-09-11 09:56:20] [Rank 0] step:1661/10000 train_time:94693ms step_avg:57.01ms +[2025-09-11 09:56:20] [Rank 0] step:1661/10000 train_time:94693ms step_avg:57.01ms +[2025-09-11 09:56:21] [Rank 0] step:1681/10000 train_time:95345ms step_avg:56.72ms +[2025-09-11 09:56:21] [Rank 0] step:1681/10000 train_time:95345ms step_avg:56.72ms +[2025-09-11 09:56:22] [Rank 0] step:1701/10000 train_time:95995ms step_avg:56.43ms +[2025-09-11 09:56:22] [Rank 0] step:1701/10000 train_time:95995ms step_avg:56.43ms +[2025-09-11 09:56:22] [Rank 0] step:1721/10000 train_time:96646ms step_avg:56.16ms +[2025-09-11 09:56:22] [Rank 0] step:1721/10000 train_time:96646ms step_avg:56.16ms +[2025-09-11 09:56:23] [Rank 0] step:1741/10000 train_time:97298ms step_avg:55.89ms +[2025-09-11 09:56:23] [Rank 0] step:1741/10000 train_time:97298ms step_avg:55.89ms +[2025-09-11 09:56:23] [Rank 0] step:1761/10000 train_time:97949ms step_avg:55.62ms +[2025-09-11 09:56:23] [Rank 0] step:1761/10000 train_time:97949ms step_avg:55.62ms +[2025-09-11 09:56:24] [Rank 0] step:1781/10000 train_time:98601ms step_avg:55.36ms +[2025-09-11 09:56:24] [Rank 0] step:1781/10000 train_time:98601ms step_avg:55.36ms +[2025-09-11 09:56:25] [Rank 0] step:1801/10000 train_time:99253ms step_avg:55.11ms +[2025-09-11 09:56:25] [Rank 0] step:1801/10000 train_time:99253ms step_avg:55.11ms +[2025-09-11 09:56:25] [Rank 0] step:1821/10000 train_time:99904ms step_avg:54.86ms +[2025-09-11 09:56:25] [Rank 0] step:1821/10000 train_time:99904ms step_avg:54.86ms +[2025-09-11 09:56:26] [Rank 0] step:1841/10000 train_time:100555ms step_avg:54.62ms +[2025-09-11 09:56:26] [Rank 0] step:1841/10000 train_time:100555ms step_avg:54.62ms +[2025-09-11 09:56:27] [Rank 0] step:1861/10000 train_time:101206ms step_avg:54.38ms +[2025-09-11 09:56:27] [Rank 0] step:1861/10000 train_time:101206ms step_avg:54.38ms +[2025-09-11 09:56:27] [Rank 0] step:1881/10000 train_time:101857ms step_avg:54.15ms +[2025-09-11 09:56:27] [Rank 0] step:1881/10000 train_time:101857ms step_avg:54.15ms +[2025-09-11 09:56:28] [Rank 0] step:1901/10000 train_time:102508ms step_avg:53.92ms +[2025-09-11 09:56:28] [Rank 0] step:1901/10000 train_time:102508ms step_avg:53.92ms +[2025-09-11 09:56:29] [Rank 0] step:1921/10000 train_time:103159ms step_avg:53.70ms +[2025-09-11 09:56:29] [Rank 0] step:1921/10000 train_time:103159ms step_avg:53.70ms +[2025-09-11 09:56:29] [Rank 0] step:1941/10000 train_time:103812ms step_avg:53.48ms +[2025-09-11 09:56:29] [Rank 0] step:1941/10000 train_time:103812ms step_avg:53.48ms +[2025-09-11 09:56:30] [Rank 0] step:1961/10000 train_time:104463ms step_avg:53.27ms +[2025-09-11 09:56:30] [Rank 0] step:1961/10000 train_time:104463ms step_avg:53.27ms +[2025-09-11 09:56:31] [Rank 0] step:1981/10000 train_time:105114ms step_avg:53.06ms +[2025-09-11 09:56:31] [Rank 0] step:1981/10000 train_time:105114ms step_avg:53.06ms +[2025-09-11 09:56:31] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:56:31] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:56:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:56:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:56:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:56:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:56:42] [Rank 0] PRINT: step:2000/10000 val_loss:5.3835 total_sharp:2.3315e-04 L1_sharp:2.9944e-01 L2_sharp:2.8459e-01 L3_sharp:3.4874e-01 L4_sharp:3.5851e-01 L5_sharp:3.6582e-01 L6_sharp:3.3039e-01 L7_sharp:4.3844e-01 L8_sharp:4.7085e-01 L9_sharp:1.0459e+00 L10_sharp:2.1000e+00 L11_sharp:2.9341e+00 L12_sharp:4.7083e+00 total_fnorm:7.3500e+01 total_l1_linf:1.4336e+05 total_spectral:3.6750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.8584e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8433e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.8433e-02 L9_l1linf:1.8311e-02 L10_l1linf:1.9165e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.9165e-02 L1_spectral:7.7187e-04 L2_spectral:7.6948e-04 L3_spectral:7.8386e-04 L4_spectral:7.7589e-04 L5_spectral:7.8365e-04 L6_spectral:7.8458e-04 L7_spectral:7.8247e-04 L8_spectral:7.8330e-04 L9_spectral:7.7980e-04 L10_spectral:7.7807e-04 L11_spectral:7.7413e-04 L12_spectral:7.6295e-04 train_time:105746ms step_avg:52.87ms +[2025-09-11 09:56:42] [Rank 0] PRINT: step:2000/10000 val_loss:5.3835 total_sharp:2.3315e-04 L1_sharp:2.9944e-01 L2_sharp:2.8459e-01 L3_sharp:3.4874e-01 L4_sharp:3.5851e-01 L5_sharp:3.6582e-01 L6_sharp:3.3039e-01 L7_sharp:4.3844e-01 L8_sharp:4.7085e-01 L9_sharp:1.0459e+00 L10_sharp:2.1000e+00 L11_sharp:2.9341e+00 L12_sharp:4.7083e+00 total_fnorm:7.3500e+01 total_l1_linf:1.4336e+05 total_spectral:3.6750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9561e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.8584e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8188e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8433e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8433e-02 L8_l1linf:1.8433e-02 L9_l1linf:1.8311e-02 L10_l1linf:1.9165e-02 L11_l1linf:1.8921e-02 L12_l1linf:1.9165e-02 L1_spectral:7.7187e-04 L2_spectral:7.6948e-04 L3_spectral:7.8386e-04 L4_spectral:7.7589e-04 L5_spectral:7.8365e-04 L6_spectral:7.8458e-04 L7_spectral:7.8247e-04 L8_spectral:7.8330e-04 L9_spectral:7.7980e-04 L10_spectral:7.7807e-04 L11_spectral:7.7413e-04 L12_spectral:7.6295e-04 train_time:105746ms step_avg:52.87ms +[2025-09-11 09:56:44] [Rank 0] step:2001/10000 train_time:107489ms step_avg:53.72ms +[2025-09-11 09:56:44] [Rank 0] step:2001/10000 train_time:107489ms step_avg:53.72ms +[2025-09-11 09:56:45] [Rank 0] step:2021/10000 train_time:108166ms step_avg:53.52ms +[2025-09-11 09:56:45] [Rank 0] step:2021/10000 train_time:108166ms step_avg:53.52ms +[2025-09-11 09:56:45] [Rank 0] step:2041/10000 train_time:108817ms step_avg:53.32ms +[2025-09-11 09:56:45] [Rank 0] step:2041/10000 train_time:108817ms step_avg:53.32ms +[2025-09-11 09:56:46] [Rank 0] step:2061/10000 train_time:109468ms step_avg:53.11ms +[2025-09-11 09:56:46] [Rank 0] step:2061/10000 train_time:109468ms step_avg:53.11ms +[2025-09-11 09:56:46] [Rank 0] step:2081/10000 train_time:110118ms step_avg:52.92ms +[2025-09-11 09:56:46] [Rank 0] step:2081/10000 train_time:110118ms step_avg:52.92ms +[2025-09-11 09:56:47] [Rank 0] step:2101/10000 train_time:110769ms step_avg:52.72ms +[2025-09-11 09:56:47] [Rank 0] step:2101/10000 train_time:110769ms step_avg:52.72ms +[2025-09-11 09:56:48] [Rank 0] step:2121/10000 train_time:111419ms step_avg:52.53ms +[2025-09-11 09:56:48] [Rank 0] step:2121/10000 train_time:111419ms step_avg:52.53ms +[2025-09-11 09:56:48] [Rank 0] step:2141/10000 train_time:112070ms step_avg:52.34ms +[2025-09-11 09:56:48] [Rank 0] step:2141/10000 train_time:112070ms step_avg:52.34ms +[2025-09-11 09:56:49] [Rank 0] step:2161/10000 train_time:112721ms step_avg:52.16ms +[2025-09-11 09:56:49] [Rank 0] step:2161/10000 train_time:112721ms step_avg:52.16ms +[2025-09-11 09:56:50] [Rank 0] step:2181/10000 train_time:113370ms step_avg:51.98ms +[2025-09-11 09:56:50] [Rank 0] step:2181/10000 train_time:113370ms step_avg:51.98ms +[2025-09-11 09:56:50] [Rank 0] step:2201/10000 train_time:114020ms step_avg:51.80ms +[2025-09-11 09:56:50] [Rank 0] step:2201/10000 train_time:114020ms step_avg:51.80ms +[2025-09-11 09:56:51] [Rank 0] step:2221/10000 train_time:114671ms step_avg:51.63ms +[2025-09-11 09:56:51] [Rank 0] step:2221/10000 train_time:114671ms step_avg:51.63ms +[2025-09-11 09:56:52] [Rank 0] step:2241/10000 train_time:115332ms step_avg:51.46ms +[2025-09-11 09:56:52] [Rank 0] step:2241/10000 train_time:115332ms step_avg:51.46ms +[2025-09-11 09:56:52] [Rank 0] step:2261/10000 train_time:115995ms step_avg:51.30ms +[2025-09-11 09:56:52] [Rank 0] step:2261/10000 train_time:115995ms step_avg:51.30ms +[2025-09-11 09:56:53] [Rank 0] step:2281/10000 train_time:116659ms step_avg:51.14ms +[2025-09-11 09:56:53] [Rank 0] step:2281/10000 train_time:116659ms step_avg:51.14ms +[2025-09-11 09:56:54] [Rank 0] step:2301/10000 train_time:117322ms step_avg:50.99ms +[2025-09-11 09:56:54] [Rank 0] step:2301/10000 train_time:117322ms step_avg:50.99ms +[2025-09-11 09:56:54] [Rank 0] step:2321/10000 train_time:117985ms step_avg:50.83ms +[2025-09-11 09:56:54] [Rank 0] step:2321/10000 train_time:117985ms step_avg:50.83ms +[2025-09-11 09:56:55] [Rank 0] step:2341/10000 train_time:118649ms step_avg:50.68ms +[2025-09-11 09:56:55] [Rank 0] step:2341/10000 train_time:118649ms step_avg:50.68ms +[2025-09-11 09:56:56] [Rank 0] step:2361/10000 train_time:119313ms step_avg:50.53ms +[2025-09-11 09:56:56] [Rank 0] step:2361/10000 train_time:119313ms step_avg:50.53ms +[2025-09-11 09:56:56] [Rank 0] step:2381/10000 train_time:119975ms step_avg:50.39ms +[2025-09-11 09:56:56] [Rank 0] step:2381/10000 train_time:119975ms step_avg:50.39ms +[2025-09-11 09:56:57] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:56:57] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:57:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:57:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:57:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:57:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:08] [Rank 0] PRINT: step:2400/10000 val_loss:5.2699 total_sharp:2.1303e-04 L1_sharp:2.8278e-01 L2_sharp:3.3571e-01 L3_sharp:3.9803e-01 L4_sharp:4.7405e-01 L5_sharp:5.1620e-01 L6_sharp:5.1732e-01 L7_sharp:4.5584e-01 L8_sharp:4.2593e-01 L9_sharp:4.3141e-01 L10_sharp:4.7556e-01 L11_sharp:5.0292e-01 L12_sharp:1.4513e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8340e-02 L1_l1linf:1.7700e-02 L2_l1linf:1.7944e-02 L3_l1linf:1.7944e-02 L4_l1linf:1.8066e-02 L5_l1linf:1.7944e-02 L6_l1linf:1.7944e-02 L7_l1linf:1.7578e-02 L8_l1linf:1.7578e-02 L9_l1linf:1.7334e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.7944e-02 L12_l1linf:1.8311e-02 L1_spectral:7.7184e-04 L2_spectral:7.7561e-04 L3_spectral:7.7800e-04 L4_spectral:7.8585e-04 L5_spectral:7.8511e-04 L6_spectral:7.8608e-04 L7_spectral:7.8826e-04 L8_spectral:7.8565e-04 L9_spectral:7.9214e-04 L10_spectral:7.8042e-04 L11_spectral:7.8097e-04 L12_spectral:7.7460e-04 train_time:120622ms step_avg:50.26ms +[2025-09-11 09:57:08] [Rank 0] PRINT: step:2400/10000 val_loss:5.2699 total_sharp:2.1303e-04 L1_sharp:2.8278e-01 L2_sharp:3.3571e-01 L3_sharp:3.9803e-01 L4_sharp:4.7405e-01 L5_sharp:5.1620e-01 L6_sharp:5.1732e-01 L7_sharp:4.5584e-01 L8_sharp:4.2593e-01 L9_sharp:4.3141e-01 L10_sharp:4.7556e-01 L11_sharp:5.0292e-01 L12_sharp:1.4513e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.9072e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8340e-02 L1_l1linf:1.7700e-02 L2_l1linf:1.7944e-02 L3_l1linf:1.7944e-02 L4_l1linf:1.8066e-02 L5_l1linf:1.7944e-02 L6_l1linf:1.7944e-02 L7_l1linf:1.7578e-02 L8_l1linf:1.7578e-02 L9_l1linf:1.7334e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.7944e-02 L12_l1linf:1.8311e-02 L1_spectral:7.7184e-04 L2_spectral:7.7561e-04 L3_spectral:7.7800e-04 L4_spectral:7.8585e-04 L5_spectral:7.8511e-04 L6_spectral:7.8608e-04 L7_spectral:7.8826e-04 L8_spectral:7.8565e-04 L9_spectral:7.9214e-04 L10_spectral:7.8042e-04 L11_spectral:7.8097e-04 L12_spectral:7.7460e-04 train_time:120622ms step_avg:50.26ms +[2025-09-11 09:57:10] [Rank 0] step:2401/10000 train_time:122508ms step_avg:51.02ms +[2025-09-11 09:57:10] [Rank 0] step:2401/10000 train_time:122508ms step_avg:51.02ms +[2025-09-11 09:57:11] [Rank 0] step:2421/10000 train_time:123253ms step_avg:50.91ms +[2025-09-11 09:57:11] [Rank 0] step:2421/10000 train_time:123253ms step_avg:50.91ms +[2025-09-11 09:57:12] [Rank 0] step:2441/10000 train_time:123921ms step_avg:50.77ms +[2025-09-11 09:57:12] [Rank 0] step:2441/10000 train_time:123921ms step_avg:50.77ms +[2025-09-11 09:57:12] [Rank 0] step:2461/10000 train_time:124588ms step_avg:50.62ms +[2025-09-11 09:57:12] [Rank 0] step:2461/10000 train_time:124588ms step_avg:50.62ms +[2025-09-11 09:57:13] [Rank 0] step:2481/10000 train_time:125527ms step_avg:50.60ms +[2025-09-11 09:57:13] [Rank 0] step:2481/10000 train_time:125527ms step_avg:50.60ms +[2025-09-11 09:57:14] [Rank 0] step:2501/10000 train_time:126192ms step_avg:50.46ms +[2025-09-11 09:57:14] [Rank 0] step:2501/10000 train_time:126192ms step_avg:50.46ms +[2025-09-11 09:57:15] [Rank 0] step:2521/10000 train_time:126857ms step_avg:50.32ms +[2025-09-11 09:57:15] [Rank 0] step:2521/10000 train_time:126857ms step_avg:50.32ms +[2025-09-11 09:57:16] [Rank 0] step:2541/10000 train_time:127824ms step_avg:50.30ms +[2025-09-11 09:57:16] [Rank 0] step:2541/10000 train_time:127824ms step_avg:50.30ms +[2025-09-11 09:57:16] [Rank 0] step:2561/10000 train_time:128489ms step_avg:50.17ms +[2025-09-11 09:57:16] [Rank 0] step:2561/10000 train_time:128489ms step_avg:50.17ms +[2025-09-11 09:57:17] [Rank 0] step:2581/10000 train_time:129154ms step_avg:50.04ms +[2025-09-11 09:57:17] [Rank 0] step:2581/10000 train_time:129154ms step_avg:50.04ms +[2025-09-11 09:57:18] [Rank 0] step:2601/10000 train_time:129819ms step_avg:49.91ms +[2025-09-11 09:57:18] [Rank 0] step:2601/10000 train_time:129819ms step_avg:49.91ms +[2025-09-11 09:57:18] [Rank 0] step:2621/10000 train_time:130485ms step_avg:49.78ms +[2025-09-11 09:57:18] [Rank 0] step:2621/10000 train_time:130485ms step_avg:49.78ms +[2025-09-11 09:57:19] [Rank 0] step:2641/10000 train_time:131152ms step_avg:49.66ms +[2025-09-11 09:57:19] [Rank 0] step:2641/10000 train_time:131152ms step_avg:49.66ms +[2025-09-11 09:57:20] [Rank 0] step:2661/10000 train_time:131817ms step_avg:49.54ms +[2025-09-11 09:57:20] [Rank 0] step:2661/10000 train_time:131817ms step_avg:49.54ms +[2025-09-11 09:57:20] [Rank 0] step:2681/10000 train_time:132483ms step_avg:49.42ms +[2025-09-11 09:57:20] [Rank 0] step:2681/10000 train_time:132483ms step_avg:49.42ms +[2025-09-11 09:57:21] [Rank 0] step:2701/10000 train_time:133149ms step_avg:49.30ms +[2025-09-11 09:57:21] [Rank 0] step:2701/10000 train_time:133149ms step_avg:49.30ms +[2025-09-11 09:57:22] [Rank 0] step:2721/10000 train_time:133815ms step_avg:49.18ms +[2025-09-11 09:57:22] [Rank 0] step:2721/10000 train_time:133815ms step_avg:49.18ms +[2025-09-11 09:57:22] [Rank 0] step:2741/10000 train_time:134480ms step_avg:49.06ms +[2025-09-11 09:57:22] [Rank 0] step:2741/10000 train_time:134480ms step_avg:49.06ms +[2025-09-11 09:57:23] [Rank 0] step:2761/10000 train_time:135145ms step_avg:48.95ms +[2025-09-11 09:57:23] [Rank 0] step:2761/10000 train_time:135145ms step_avg:48.95ms +[2025-09-11 09:57:24] [Rank 0] step:2781/10000 train_time:135810ms step_avg:48.83ms +[2025-09-11 09:57:24] [Rank 0] step:2781/10000 train_time:135810ms step_avg:48.83ms +[2025-09-11 09:57:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:57:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:57:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:57:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:57:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:57:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:57:35] [Rank 0] PRINT: step:2800/10000 val_loss:5.1855 total_sharp:2.3305e-04 L1_sharp:2.8754e-01 L2_sharp:3.4296e-01 L3_sharp:4.2003e-01 L4_sharp:4.9776e-01 L5_sharp:5.8919e-01 L6_sharp:6.3700e-01 L7_sharp:5.9791e-01 L8_sharp:5.3580e-01 L9_sharp:5.6893e-01 L10_sharp:6.2919e-01 L11_sharp:6.3754e-01 L12_sharp:1.6585e+00 total_fnorm:6.7500e+01 total_l1_linf:1.2646e+05 total_spectral:3.3750e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.6724e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6968e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6724e-02 L9_l1linf:1.6846e-02 L10_l1linf:1.7090e-02 L11_l1linf:1.7090e-02 L12_l1linf:1.7456e-02 L1_spectral:7.8542e-04 L2_spectral:7.9314e-04 L3_spectral:7.8572e-04 L4_spectral:7.8439e-04 L5_spectral:7.9400e-04 L6_spectral:7.9173e-04 L7_spectral:7.9279e-04 L8_spectral:7.9052e-04 L9_spectral:7.9416e-04 L10_spectral:7.8768e-04 L11_spectral:7.8768e-04 L12_spectral:7.7443e-04 train_time:136457ms step_avg:48.73ms +[2025-09-11 09:57:35] [Rank 0] PRINT: step:2800/10000 val_loss:5.1855 total_sharp:2.3305e-04 L1_sharp:2.8754e-01 L2_sharp:3.4296e-01 L3_sharp:4.2003e-01 L4_sharp:4.9776e-01 L5_sharp:5.8919e-01 L6_sharp:6.3700e-01 L7_sharp:5.9791e-01 L8_sharp:5.3580e-01 L9_sharp:5.6893e-01 L10_sharp:6.2919e-01 L11_sharp:6.3754e-01 L12_sharp:1.6585e+00 total_fnorm:6.7500e+01 total_l1_linf:1.2646e+05 total_spectral:3.3750e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.6724e-02 L2_l1linf:1.6846e-02 L3_l1linf:1.6968e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.7334e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6724e-02 L9_l1linf:1.6846e-02 L10_l1linf:1.7090e-02 L11_l1linf:1.7090e-02 L12_l1linf:1.7456e-02 L1_spectral:7.8542e-04 L2_spectral:7.9314e-04 L3_spectral:7.8572e-04 L4_spectral:7.8439e-04 L5_spectral:7.9400e-04 L6_spectral:7.9173e-04 L7_spectral:7.9279e-04 L8_spectral:7.9052e-04 L9_spectral:7.9416e-04 L10_spectral:7.8768e-04 L11_spectral:7.8768e-04 L12_spectral:7.7443e-04 train_time:136457ms step_avg:48.73ms +[2025-09-11 09:57:37] [Rank 0] step:2801/10000 train_time:138177ms step_avg:49.33ms +[2025-09-11 09:57:37] [Rank 0] step:2801/10000 train_time:138177ms step_avg:49.33ms +[2025-09-11 09:57:37] [Rank 0] step:2821/10000 train_time:138832ms step_avg:49.21ms +[2025-09-11 09:57:37] [Rank 0] step:2821/10000 train_time:138832ms step_avg:49.21ms +[2025-09-11 09:57:38] [Rank 0] step:2841/10000 train_time:139497ms step_avg:49.10ms +[2025-09-11 09:57:38] [Rank 0] step:2841/10000 train_time:139497ms step_avg:49.10ms +[2025-09-11 09:57:39] [Rank 0] step:2861/10000 train_time:140163ms step_avg:48.99ms +[2025-09-11 09:57:39] [Rank 0] step:2861/10000 train_time:140163ms step_avg:48.99ms +[2025-09-11 09:57:39] [Rank 0] step:2881/10000 train_time:140828ms step_avg:48.88ms +[2025-09-11 09:57:39] [Rank 0] step:2881/10000 train_time:140828ms step_avg:48.88ms +[2025-09-11 09:57:40] [Rank 0] step:2901/10000 train_time:141493ms step_avg:48.77ms +[2025-09-11 09:57:40] [Rank 0] step:2901/10000 train_time:141493ms step_avg:48.77ms +[2025-09-11 09:57:41] [Rank 0] step:2921/10000 train_time:142157ms step_avg:48.67ms +[2025-09-11 09:57:41] [Rank 0] step:2921/10000 train_time:142157ms step_avg:48.67ms +[2025-09-11 09:57:41] [Rank 0] step:2941/10000 train_time:142822ms step_avg:48.56ms +[2025-09-11 09:57:41] [Rank 0] step:2941/10000 train_time:142822ms step_avg:48.56ms +[2025-09-11 09:57:42] [Rank 0] step:2961/10000 train_time:143486ms step_avg:48.46ms +[2025-09-11 09:57:42] [Rank 0] step:2961/10000 train_time:143486ms step_avg:48.46ms +[2025-09-11 09:57:43] [Rank 0] step:2981/10000 train_time:144153ms step_avg:48.36ms +[2025-09-11 09:57:43] [Rank 0] step:2981/10000 train_time:144153ms step_avg:48.36ms +[2025-09-11 09:57:43] [Rank 0] step:3001/10000 train_time:144820ms step_avg:48.26ms +[2025-09-11 09:57:43] [Rank 0] step:3001/10000 train_time:144820ms step_avg:48.26ms +[2025-09-11 09:57:44] [Rank 0] step:3021/10000 train_time:145487ms step_avg:48.16ms +[2025-09-11 09:57:44] [Rank 0] step:3021/10000 train_time:145487ms step_avg:48.16ms +[2025-09-11 09:57:45] [Rank 0] step:3041/10000 train_time:146155ms step_avg:48.06ms +[2025-09-11 09:57:45] [Rank 0] step:3041/10000 train_time:146155ms step_avg:48.06ms +[2025-09-11 09:57:45] [Rank 0] step:3061/10000 train_time:146821ms step_avg:47.97ms +[2025-09-11 09:57:45] [Rank 0] step:3061/10000 train_time:146821ms step_avg:47.97ms +[2025-09-11 09:57:46] [Rank 0] step:3081/10000 train_time:147488ms step_avg:47.87ms +[2025-09-11 09:57:46] [Rank 0] step:3081/10000 train_time:147488ms step_avg:47.87ms +[2025-09-11 09:57:47] [Rank 0] step:3101/10000 train_time:148155ms step_avg:47.78ms +[2025-09-11 09:57:47] [Rank 0] step:3101/10000 train_time:148155ms step_avg:47.78ms +[2025-09-11 09:57:47] [Rank 0] step:3121/10000 train_time:148823ms step_avg:47.68ms +[2025-09-11 09:57:47] [Rank 0] step:3121/10000 train_time:148823ms step_avg:47.68ms +[2025-09-11 09:57:48] [Rank 0] step:3141/10000 train_time:149490ms step_avg:47.59ms +[2025-09-11 09:57:48] [Rank 0] step:3141/10000 train_time:149490ms step_avg:47.59ms +[2025-09-11 09:57:49] [Rank 0] step:3161/10000 train_time:150157ms step_avg:47.50ms +[2025-09-11 09:57:49] [Rank 0] step:3161/10000 train_time:150157ms step_avg:47.50ms +[2025-09-11 09:57:49] [Rank 0] step:3181/10000 train_time:150823ms step_avg:47.41ms +[2025-09-11 09:57:49] [Rank 0] step:3181/10000 train_time:150823ms step_avg:47.41ms +[2025-09-11 09:57:50] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:57:50] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:57:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:57:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:57:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:57:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:57:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:57:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:58:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:58:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:01] [Rank 0] PRINT: step:3200/10000 val_loss:5.1050 total_sharp:1.4136e-04 L1_sharp:2.4318e-01 L2_sharp:2.9667e-01 L3_sharp:3.6985e-01 L4_sharp:4.6281e-01 L5_sharp:5.0414e-01 L6_sharp:5.0830e-01 L7_sharp:4.0722e-01 L8_sharp:3.8416e-01 L9_sharp:4.1221e-01 L10_sharp:4.9523e-01 L11_sharp:6.0008e-01 L12_sharp:2.0566e+00 total_fnorm:7.7500e+01 total_l1_linf:1.5258e+05 total_spectral:3.8750e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.5381e-02 L2_l1linf:1.5991e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6113e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.5869e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6602e-02 L1_spectral:7.8732e-04 L2_spectral:7.9401e-04 L3_spectral:7.9324e-04 L4_spectral:7.9441e-04 L5_spectral:7.9387e-04 L6_spectral:7.9909e-04 L7_spectral:7.9980e-04 L8_spectral:8.0130e-04 L9_spectral:7.9685e-04 L10_spectral:7.9771e-04 L11_spectral:7.9327e-04 L12_spectral:7.8354e-04 train_time:151472ms step_avg:47.34ms +[2025-09-11 09:58:01] [Rank 0] PRINT: step:3200/10000 val_loss:5.1050 total_sharp:1.4136e-04 L1_sharp:2.4318e-01 L2_sharp:2.9667e-01 L3_sharp:3.6985e-01 L4_sharp:4.6281e-01 L5_sharp:5.0414e-01 L6_sharp:5.0830e-01 L7_sharp:4.0722e-01 L8_sharp:3.8416e-01 L9_sharp:4.1221e-01 L10_sharp:4.9523e-01 L11_sharp:6.0008e-01 L12_sharp:2.0566e+00 total_fnorm:7.7500e+01 total_l1_linf:1.5258e+05 total_spectral:3.8750e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.5381e-02 L2_l1linf:1.5991e-02 L3_l1linf:1.6113e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6113e-02 L6_l1linf:1.5991e-02 L7_l1linf:1.5869e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6602e-02 L1_spectral:7.8732e-04 L2_spectral:7.9401e-04 L3_spectral:7.9324e-04 L4_spectral:7.9441e-04 L5_spectral:7.9387e-04 L6_spectral:7.9909e-04 L7_spectral:7.9980e-04 L8_spectral:8.0130e-04 L9_spectral:7.9685e-04 L10_spectral:7.9771e-04 L11_spectral:7.9327e-04 L12_spectral:7.8354e-04 train_time:151472ms step_avg:47.34ms +[2025-09-11 09:58:03] [Rank 0] step:3201/10000 train_time:153202ms step_avg:47.86ms +[2025-09-11 09:58:03] [Rank 0] step:3201/10000 train_time:153202ms step_avg:47.86ms +[2025-09-11 09:58:03] [Rank 0] step:3221/10000 train_time:153890ms step_avg:47.78ms +[2025-09-11 09:58:03] [Rank 0] step:3221/10000 train_time:153890ms step_avg:47.78ms +[2025-09-11 09:58:04] [Rank 0] step:3241/10000 train_time:154559ms step_avg:47.69ms +[2025-09-11 09:58:04] [Rank 0] step:3241/10000 train_time:154559ms step_avg:47.69ms +[2025-09-11 09:58:05] [Rank 0] step:3261/10000 train_time:155227ms step_avg:47.60ms +[2025-09-11 09:58:05] [Rank 0] step:3261/10000 train_time:155227ms step_avg:47.60ms +[2025-09-11 09:58:05] [Rank 0] step:3281/10000 train_time:155896ms step_avg:47.51ms +[2025-09-11 09:58:05] [Rank 0] step:3281/10000 train_time:155896ms step_avg:47.51ms +[2025-09-11 09:58:06] [Rank 0] step:3301/10000 train_time:156565ms step_avg:47.43ms +[2025-09-11 09:58:06] [Rank 0] step:3301/10000 train_time:156565ms step_avg:47.43ms +[2025-09-11 09:58:07] [Rank 0] step:3321/10000 train_time:157233ms step_avg:47.35ms +[2025-09-11 09:58:07] [Rank 0] step:3321/10000 train_time:157233ms step_avg:47.35ms +[2025-09-11 09:58:07] [Rank 0] step:3341/10000 train_time:157901ms step_avg:47.26ms +[2025-09-11 09:58:07] [Rank 0] step:3341/10000 train_time:157901ms step_avg:47.26ms +[2025-09-11 09:58:08] [Rank 0] step:3361/10000 train_time:158571ms step_avg:47.18ms +[2025-09-11 09:58:08] [Rank 0] step:3361/10000 train_time:158571ms step_avg:47.18ms +[2025-09-11 09:58:09] [Rank 0] step:3381/10000 train_time:159238ms step_avg:47.10ms +[2025-09-11 09:58:09] [Rank 0] step:3381/10000 train_time:159238ms step_avg:47.10ms +[2025-09-11 09:58:09] [Rank 0] step:3401/10000 train_time:159906ms step_avg:47.02ms +[2025-09-11 09:58:09] [Rank 0] step:3401/10000 train_time:159906ms step_avg:47.02ms +[2025-09-11 09:58:10] [Rank 0] step:3421/10000 train_time:160573ms step_avg:46.94ms +[2025-09-11 09:58:10] [Rank 0] step:3421/10000 train_time:160573ms step_avg:46.94ms +[2025-09-11 09:58:11] [Rank 0] step:3441/10000 train_time:161241ms step_avg:46.86ms +[2025-09-11 09:58:11] [Rank 0] step:3441/10000 train_time:161241ms step_avg:46.86ms +[2025-09-11 09:58:11] [Rank 0] step:3461/10000 train_time:161907ms step_avg:46.78ms +[2025-09-11 09:58:11] [Rank 0] step:3461/10000 train_time:161907ms step_avg:46.78ms +[2025-09-11 09:58:12] [Rank 0] step:3481/10000 train_time:162574ms step_avg:46.70ms +[2025-09-11 09:58:12] [Rank 0] step:3481/10000 train_time:162574ms step_avg:46.70ms +[2025-09-11 09:58:13] [Rank 0] step:3501/10000 train_time:163242ms step_avg:46.63ms +[2025-09-11 09:58:13] [Rank 0] step:3501/10000 train_time:163242ms step_avg:46.63ms +[2025-09-11 09:58:13] [Rank 0] step:3521/10000 train_time:163910ms step_avg:46.55ms +[2025-09-11 09:58:13] [Rank 0] step:3521/10000 train_time:163910ms step_avg:46.55ms +[2025-09-11 09:58:14] [Rank 0] step:3541/10000 train_time:164577ms step_avg:46.48ms +[2025-09-11 09:58:14] [Rank 0] step:3541/10000 train_time:164577ms step_avg:46.48ms +[2025-09-11 09:58:15] [Rank 0] step:3561/10000 train_time:165245ms step_avg:46.40ms +[2025-09-11 09:58:15] [Rank 0] step:3561/10000 train_time:165245ms step_avg:46.40ms +[2025-09-11 09:58:15] [Rank 0] step:3581/10000 train_time:165913ms step_avg:46.33ms +[2025-09-11 09:58:15] [Rank 0] step:3581/10000 train_time:165913ms step_avg:46.33ms +[2025-09-11 09:58:16] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:58:16] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:58:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:58:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:58:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:58:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:27] [Rank 0] PRINT: step:3600/10000 val_loss:5.0597 total_sharp:1.8004e-04 L1_sharp:2.1144e-01 L2_sharp:2.7175e-01 L3_sharp:3.2586e-01 L4_sharp:4.3454e-01 L5_sharp:4.9705e-01 L6_sharp:5.5021e-01 L7_sharp:5.1606e-01 L8_sharp:5.0067e-01 L9_sharp:5.6088e-01 L10_sharp:5.2037e-01 L11_sharp:7.3031e-01 L12_sharp:2.6824e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.4954e-02 L2_l1linf:1.5503e-02 L3_l1linf:1.5442e-02 L4_l1linf:1.5564e-02 L5_l1linf:1.5381e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5137e-02 L9_l1linf:1.5198e-02 L10_l1linf:1.5503e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5869e-02 L1_spectral:7.8897e-04 L2_spectral:7.8969e-04 L3_spectral:7.9714e-04 L4_spectral:7.9558e-04 L5_spectral:7.9568e-04 L6_spectral:7.9700e-04 L7_spectral:7.9739e-04 L8_spectral:7.9979e-04 L9_spectral:7.9677e-04 L10_spectral:8.0000e-04 L11_spectral:7.9879e-04 L12_spectral:7.8861e-04 train_time:166718ms step_avg:46.31ms +[2025-09-11 09:58:27] [Rank 0] PRINT: step:3600/10000 val_loss:5.0597 total_sharp:1.8004e-04 L1_sharp:2.1144e-01 L2_sharp:2.7175e-01 L3_sharp:3.2586e-01 L4_sharp:4.3454e-01 L5_sharp:4.9705e-01 L6_sharp:5.5021e-01 L7_sharp:5.1606e-01 L8_sharp:5.0067e-01 L9_sharp:5.6088e-01 L10_sharp:5.2037e-01 L11_sharp:7.3031e-01 L12_sharp:2.6824e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.4954e-02 L2_l1linf:1.5503e-02 L3_l1linf:1.5442e-02 L4_l1linf:1.5564e-02 L5_l1linf:1.5381e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5137e-02 L9_l1linf:1.5198e-02 L10_l1linf:1.5503e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5869e-02 L1_spectral:7.8897e-04 L2_spectral:7.8969e-04 L3_spectral:7.9714e-04 L4_spectral:7.9558e-04 L5_spectral:7.9568e-04 L6_spectral:7.9700e-04 L7_spectral:7.9739e-04 L8_spectral:7.9979e-04 L9_spectral:7.9677e-04 L10_spectral:8.0000e-04 L11_spectral:7.9879e-04 L12_spectral:7.8861e-04 train_time:166718ms step_avg:46.31ms +[2025-09-11 09:58:29] [Rank 0] step:3601/10000 train_time:168668ms step_avg:46.84ms +[2025-09-11 09:58:29] [Rank 0] step:3601/10000 train_time:168668ms step_avg:46.84ms +[2025-09-11 09:58:30] [Rank 0] step:3621/10000 train_time:169367ms step_avg:46.77ms +[2025-09-11 09:58:30] [Rank 0] step:3621/10000 train_time:169367ms step_avg:46.77ms +[2025-09-11 09:58:30] [Rank 0] step:3641/10000 train_time:170035ms step_avg:46.70ms +[2025-09-11 09:58:30] [Rank 0] step:3641/10000 train_time:170035ms step_avg:46.70ms +[2025-09-11 09:58:31] [Rank 0] step:3661/10000 train_time:170701ms step_avg:46.63ms +[2025-09-11 09:58:31] [Rank 0] step:3661/10000 train_time:170701ms step_avg:46.63ms +[2025-09-11 09:58:32] [Rank 0] step:3681/10000 train_time:171369ms step_avg:46.55ms +[2025-09-11 09:58:32] [Rank 0] step:3681/10000 train_time:171369ms step_avg:46.55ms +[2025-09-11 09:58:32] [Rank 0] step:3701/10000 train_time:172036ms step_avg:46.48ms +[2025-09-11 09:58:32] [Rank 0] step:3701/10000 train_time:172036ms step_avg:46.48ms +[2025-09-11 09:58:33] [Rank 0] step:3721/10000 train_time:172717ms step_avg:46.42ms +[2025-09-11 09:58:33] [Rank 0] step:3721/10000 train_time:172717ms step_avg:46.42ms +[2025-09-11 09:58:34] [Rank 0] step:3741/10000 train_time:173395ms step_avg:46.35ms +[2025-09-11 09:58:34] [Rank 0] step:3741/10000 train_time:173395ms step_avg:46.35ms +[2025-09-11 09:58:34] [Rank 0] step:3761/10000 train_time:174073ms step_avg:46.28ms +[2025-09-11 09:58:34] [Rank 0] step:3761/10000 train_time:174073ms step_avg:46.28ms +[2025-09-11 09:58:35] [Rank 0] step:3781/10000 train_time:174751ms step_avg:46.22ms +[2025-09-11 09:58:35] [Rank 0] step:3781/10000 train_time:174751ms step_avg:46.22ms +[2025-09-11 09:58:36] [Rank 0] step:3801/10000 train_time:175430ms step_avg:46.15ms +[2025-09-11 09:58:36] [Rank 0] step:3801/10000 train_time:175430ms step_avg:46.15ms +[2025-09-11 09:58:37] [Rank 0] step:3821/10000 train_time:176108ms step_avg:46.09ms +[2025-09-11 09:58:37] [Rank 0] step:3821/10000 train_time:176108ms step_avg:46.09ms +[2025-09-11 09:58:37] [Rank 0] step:3841/10000 train_time:176786ms step_avg:46.03ms +[2025-09-11 09:58:37] [Rank 0] step:3841/10000 train_time:176786ms step_avg:46.03ms +[2025-09-11 09:58:38] [Rank 0] step:3861/10000 train_time:177463ms step_avg:45.96ms +[2025-09-11 09:58:38] [Rank 0] step:3861/10000 train_time:177463ms step_avg:45.96ms +[2025-09-11 09:58:39] [Rank 0] step:3881/10000 train_time:178140ms step_avg:45.90ms +[2025-09-11 09:58:39] [Rank 0] step:3881/10000 train_time:178140ms step_avg:45.90ms +[2025-09-11 09:58:39] [Rank 0] step:3901/10000 train_time:178818ms step_avg:45.84ms +[2025-09-11 09:58:39] [Rank 0] step:3901/10000 train_time:178818ms step_avg:45.84ms +[2025-09-11 09:58:40] [Rank 0] step:3921/10000 train_time:179497ms step_avg:45.78ms +[2025-09-11 09:58:40] [Rank 0] step:3921/10000 train_time:179497ms step_avg:45.78ms +[2025-09-11 09:58:41] [Rank 0] step:3941/10000 train_time:180176ms step_avg:45.72ms +[2025-09-11 09:58:41] [Rank 0] step:3941/10000 train_time:180176ms step_avg:45.72ms +[2025-09-11 09:58:41] [Rank 0] step:3961/10000 train_time:180854ms step_avg:45.66ms +[2025-09-11 09:58:41] [Rank 0] step:3961/10000 train_time:180854ms step_avg:45.66ms +[2025-09-11 09:58:42] [Rank 0] step:3981/10000 train_time:181532ms step_avg:45.60ms +[2025-09-11 09:58:42] [Rank 0] step:3981/10000 train_time:181532ms step_avg:45.60ms +[2025-09-11 09:58:43] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:58:43] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:58:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:58:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:58:54] [Rank 0] PRINT: step:4000/10000 val_loss:5.0015 total_sharp:1.8689e-04 L1_sharp:2.2121e-01 L2_sharp:3.0389e-01 L3_sharp:3.9622e-01 L4_sharp:5.1360e-01 L5_sharp:5.9138e-01 L6_sharp:7.1171e-01 L7_sharp:9.3143e-01 L8_sharp:9.5832e-01 L9_sharp:1.4713e+00 L10_sharp:2.5053e+00 L11_sharp:2.8543e+00 L12_sharp:4.8771e+00 total_fnorm:8.1000e+01 total_l1_linf:1.5565e+05 total_spectral:4.0500e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5320e-02 L2_l1linf:1.5564e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5747e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5625e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5381e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.6113e-02 L1_spectral:7.7767e-04 L2_spectral:7.8630e-04 L3_spectral:7.9245e-04 L4_spectral:7.9700e-04 L5_spectral:7.9139e-04 L6_spectral:7.9683e-04 L7_spectral:8.0263e-04 L8_spectral:7.9259e-04 L9_spectral:7.9682e-04 L10_spectral:7.9575e-04 L11_spectral:7.9074e-04 L12_spectral:7.8317e-04 train_time:182192ms step_avg:45.55ms +[2025-09-11 09:58:54] [Rank 0] PRINT: step:4000/10000 val_loss:5.0015 total_sharp:1.8689e-04 L1_sharp:2.2121e-01 L2_sharp:3.0389e-01 L3_sharp:3.9622e-01 L4_sharp:5.1360e-01 L5_sharp:5.9138e-01 L6_sharp:7.1171e-01 L7_sharp:9.3143e-01 L8_sharp:9.5832e-01 L9_sharp:1.4713e+00 L10_sharp:2.5053e+00 L11_sharp:2.8543e+00 L12_sharp:4.8771e+00 total_fnorm:8.1000e+01 total_l1_linf:1.5565e+05 total_spectral:4.0500e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5320e-02 L2_l1linf:1.5564e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5747e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5625e-02 L7_l1linf:1.5625e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5381e-02 L10_l1linf:1.5991e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.6113e-02 L1_spectral:7.7767e-04 L2_spectral:7.8630e-04 L3_spectral:7.9245e-04 L4_spectral:7.9700e-04 L5_spectral:7.9139e-04 L6_spectral:7.9683e-04 L7_spectral:8.0263e-04 L8_spectral:7.9259e-04 L9_spectral:7.9682e-04 L10_spectral:7.9575e-04 L11_spectral:7.9074e-04 L12_spectral:7.8317e-04 train_time:182192ms step_avg:45.55ms +[2025-09-11 09:58:56] [Rank 0] step:4001/10000 train_time:184094ms step_avg:46.01ms +[2025-09-11 09:58:56] [Rank 0] step:4001/10000 train_time:184094ms step_avg:46.01ms +[2025-09-11 09:58:57] [Rank 0] step:4021/10000 train_time:184776ms step_avg:45.95ms +[2025-09-11 09:58:57] [Rank 0] step:4021/10000 train_time:184776ms step_avg:45.95ms +[2025-09-11 09:58:57] [Rank 0] step:4041/10000 train_time:185455ms step_avg:45.89ms +[2025-09-11 09:58:57] [Rank 0] step:4041/10000 train_time:185455ms step_avg:45.89ms +[2025-09-11 09:58:58] [Rank 0] step:4061/10000 train_time:186132ms step_avg:45.83ms +[2025-09-11 09:58:58] [Rank 0] step:4061/10000 train_time:186132ms step_avg:45.83ms +[2025-09-11 09:58:59] [Rank 0] step:4081/10000 train_time:186810ms step_avg:45.78ms +[2025-09-11 09:58:59] [Rank 0] step:4081/10000 train_time:186810ms step_avg:45.78ms +[2025-09-11 09:58:59] [Rank 0] step:4101/10000 train_time:187489ms step_avg:45.72ms +[2025-09-11 09:58:59] [Rank 0] step:4101/10000 train_time:187489ms step_avg:45.72ms +[2025-09-11 09:59:00] [Rank 0] step:4121/10000 train_time:188169ms step_avg:45.66ms +[2025-09-11 09:59:00] [Rank 0] step:4121/10000 train_time:188169ms step_avg:45.66ms +[2025-09-11 09:59:01] [Rank 0] step:4141/10000 train_time:188847ms step_avg:45.60ms +[2025-09-11 09:59:01] [Rank 0] step:4141/10000 train_time:188847ms step_avg:45.60ms +[2025-09-11 09:59:01] [Rank 0] step:4161/10000 train_time:189527ms step_avg:45.55ms +[2025-09-11 09:59:01] [Rank 0] step:4161/10000 train_time:189527ms step_avg:45.55ms +[2025-09-11 09:59:02] [Rank 0] step:4181/10000 train_time:190205ms step_avg:45.49ms +[2025-09-11 09:59:02] [Rank 0] step:4181/10000 train_time:190205ms step_avg:45.49ms +[2025-09-11 09:59:03] [Rank 0] step:4201/10000 train_time:190884ms step_avg:45.44ms +[2025-09-11 09:59:03] [Rank 0] step:4201/10000 train_time:190884ms step_avg:45.44ms +[2025-09-11 09:59:03] [Rank 0] step:4221/10000 train_time:191565ms step_avg:45.38ms +[2025-09-11 09:59:03] [Rank 0] step:4221/10000 train_time:191565ms step_avg:45.38ms +[2025-09-11 09:59:04] [Rank 0] step:4241/10000 train_time:192242ms step_avg:45.33ms +[2025-09-11 09:59:04] [Rank 0] step:4241/10000 train_time:192242ms step_avg:45.33ms +[2025-09-11 09:59:05] [Rank 0] step:4261/10000 train_time:192920ms step_avg:45.28ms +[2025-09-11 09:59:05] [Rank 0] step:4261/10000 train_time:192920ms step_avg:45.28ms +[2025-09-11 09:59:05] [Rank 0] step:4281/10000 train_time:193601ms step_avg:45.22ms +[2025-09-11 09:59:05] [Rank 0] step:4281/10000 train_time:193601ms step_avg:45.22ms +[2025-09-11 09:59:06] [Rank 0] step:4301/10000 train_time:194281ms step_avg:45.17ms +[2025-09-11 09:59:06] [Rank 0] step:4301/10000 train_time:194281ms step_avg:45.17ms +[2025-09-11 09:59:07] [Rank 0] step:4321/10000 train_time:194961ms step_avg:45.12ms +[2025-09-11 09:59:07] [Rank 0] step:4321/10000 train_time:194961ms step_avg:45.12ms +[2025-09-11 09:59:07] [Rank 0] step:4341/10000 train_time:195644ms step_avg:45.07ms +[2025-09-11 09:59:07] [Rank 0] step:4341/10000 train_time:195644ms step_avg:45.07ms +[2025-09-11 09:59:08] [Rank 0] step:4361/10000 train_time:196321ms step_avg:45.02ms +[2025-09-11 09:59:08] [Rank 0] step:4361/10000 train_time:196321ms step_avg:45.02ms +[2025-09-11 09:59:09] [Rank 0] step:4381/10000 train_time:197000ms step_avg:44.97ms +[2025-09-11 09:59:09] [Rank 0] step:4381/10000 train_time:197000ms step_avg:44.97ms +[2025-09-11 09:59:09] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:59:09] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:59:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:59:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:59:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:21] [Rank 0] PRINT: step:4400/10000 val_loss:4.9701 total_sharp:1.5139e-04 L1_sharp:1.7413e-01 L2_sharp:2.4415e-01 L3_sharp:2.8874e-01 L4_sharp:3.8771e-01 L5_sharp:5.1024e-01 L6_sharp:6.1593e-01 L7_sharp:6.4164e-01 L8_sharp:6.8214e-01 L9_sharp:8.2718e-01 L10_sharp:1.4481e+00 L11_sharp:2.8430e+00 L12_sharp:3.8467e+00 total_fnorm:7.1500e+01 total_l1_linf:1.3107e+05 total_spectral:3.5750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.4404e-02 L2_l1linf:1.4771e-02 L3_l1linf:1.5198e-02 L4_l1linf:1.5442e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5747e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.6357e-02 L1_spectral:7.9021e-04 L2_spectral:7.8932e-04 L3_spectral:7.9286e-04 L4_spectral:7.9868e-04 L5_spectral:7.9251e-04 L6_spectral:8.0284e-04 L7_spectral:7.9965e-04 L8_spectral:7.9493e-04 L9_spectral:7.9647e-04 L10_spectral:7.9508e-04 L11_spectral:7.9027e-04 L12_spectral:7.6792e-04 train_time:197659ms step_avg:44.92ms +[2025-09-11 09:59:21] [Rank 0] PRINT: step:4400/10000 val_loss:4.9701 total_sharp:1.5139e-04 L1_sharp:1.7413e-01 L2_sharp:2.4415e-01 L3_sharp:2.8874e-01 L4_sharp:3.8771e-01 L5_sharp:5.1024e-01 L6_sharp:6.1593e-01 L7_sharp:6.4164e-01 L8_sharp:6.8214e-01 L9_sharp:8.2718e-01 L10_sharp:1.4481e+00 L11_sharp:2.8430e+00 L12_sharp:3.8467e+00 total_fnorm:7.1500e+01 total_l1_linf:1.3107e+05 total_spectral:3.5750e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.4404e-02 L2_l1linf:1.4771e-02 L3_l1linf:1.5198e-02 L4_l1linf:1.5442e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5747e-02 L7_l1linf:1.5381e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5747e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.6357e-02 L1_spectral:7.9021e-04 L2_spectral:7.8932e-04 L3_spectral:7.9286e-04 L4_spectral:7.9868e-04 L5_spectral:7.9251e-04 L6_spectral:8.0284e-04 L7_spectral:7.9965e-04 L8_spectral:7.9493e-04 L9_spectral:7.9647e-04 L10_spectral:7.9508e-04 L11_spectral:7.9027e-04 L12_spectral:7.6792e-04 train_time:197659ms step_avg:44.92ms +[2025-09-11 09:59:23] [Rank 0] step:4401/10000 train_time:199702ms step_avg:45.38ms +[2025-09-11 09:59:23] [Rank 0] step:4401/10000 train_time:199702ms step_avg:45.38ms +[2025-09-11 09:59:23] [Rank 0] step:4421/10000 train_time:200393ms step_avg:45.33ms +[2025-09-11 09:59:23] [Rank 0] step:4421/10000 train_time:200393ms step_avg:45.33ms +[2025-09-11 09:59:24] [Rank 0] step:4441/10000 train_time:201072ms step_avg:45.28ms +[2025-09-11 09:59:24] [Rank 0] step:4441/10000 train_time:201072ms step_avg:45.28ms +[2025-09-11 09:59:25] [Rank 0] step:4461/10000 train_time:201753ms step_avg:45.23ms +[2025-09-11 09:59:25] [Rank 0] step:4461/10000 train_time:201753ms step_avg:45.23ms +[2025-09-11 09:59:25] [Rank 0] step:4481/10000 train_time:202438ms step_avg:45.18ms +[2025-09-11 09:59:25] [Rank 0] step:4481/10000 train_time:202438ms step_avg:45.18ms +[2025-09-11 09:59:26] [Rank 0] step:4501/10000 train_time:203127ms step_avg:45.13ms +[2025-09-11 09:59:26] [Rank 0] step:4501/10000 train_time:203127ms step_avg:45.13ms +[2025-09-11 09:59:27] [Rank 0] step:4521/10000 train_time:203811ms step_avg:45.08ms +[2025-09-11 09:59:27] [Rank 0] step:4521/10000 train_time:203811ms step_avg:45.08ms +[2025-09-11 09:59:28] [Rank 0] step:4541/10000 train_time:204492ms step_avg:45.03ms +[2025-09-11 09:59:28] [Rank 0] step:4541/10000 train_time:204492ms step_avg:45.03ms +[2025-09-11 09:59:28] [Rank 0] step:4561/10000 train_time:205171ms step_avg:44.98ms +[2025-09-11 09:59:28] [Rank 0] step:4561/10000 train_time:205171ms step_avg:44.98ms +[2025-09-11 09:59:29] [Rank 0] step:4581/10000 train_time:205851ms step_avg:44.94ms +[2025-09-11 09:59:29] [Rank 0] step:4581/10000 train_time:205851ms step_avg:44.94ms +[2025-09-11 09:59:30] [Rank 0] step:4601/10000 train_time:206532ms step_avg:44.89ms +[2025-09-11 09:59:30] [Rank 0] step:4601/10000 train_time:206532ms step_avg:44.89ms +[2025-09-11 09:59:30] [Rank 0] step:4621/10000 train_time:207211ms step_avg:44.84ms +[2025-09-11 09:59:30] [Rank 0] step:4621/10000 train_time:207211ms step_avg:44.84ms +[2025-09-11 09:59:31] [Rank 0] step:4641/10000 train_time:207891ms step_avg:44.79ms +[2025-09-11 09:59:31] [Rank 0] step:4641/10000 train_time:207891ms step_avg:44.79ms +[2025-09-11 09:59:32] [Rank 0] step:4661/10000 train_time:208581ms step_avg:44.75ms +[2025-09-11 09:59:32] [Rank 0] step:4661/10000 train_time:208581ms step_avg:44.75ms +[2025-09-11 09:59:32] [Rank 0] step:4681/10000 train_time:209279ms step_avg:44.71ms +[2025-09-11 09:59:32] [Rank 0] step:4681/10000 train_time:209279ms step_avg:44.71ms +[2025-09-11 09:59:33] [Rank 0] step:4701/10000 train_time:209974ms step_avg:44.67ms +[2025-09-11 09:59:33] [Rank 0] step:4701/10000 train_time:209974ms step_avg:44.67ms +[2025-09-11 09:59:34] [Rank 0] step:4721/10000 train_time:210660ms step_avg:44.62ms +[2025-09-11 09:59:34] [Rank 0] step:4721/10000 train_time:210660ms step_avg:44.62ms +[2025-09-11 09:59:34] [Rank 0] step:4741/10000 train_time:211340ms step_avg:44.58ms +[2025-09-11 09:59:34] [Rank 0] step:4741/10000 train_time:211340ms step_avg:44.58ms +[2025-09-11 09:59:35] [Rank 0] step:4761/10000 train_time:212021ms step_avg:44.53ms +[2025-09-11 09:59:35] [Rank 0] step:4761/10000 train_time:212021ms step_avg:44.53ms +[2025-09-11 09:59:36] [Rank 0] step:4781/10000 train_time:212702ms step_avg:44.49ms +[2025-09-11 09:59:36] [Rank 0] step:4781/10000 train_time:212702ms step_avg:44.49ms +[2025-09-11 09:59:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:59:36] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:59:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:59:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:59:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:59:47] [Rank 0] PRINT: step:4800/10000 val_loss:4.9281 total_sharp:1.2978e-04 L1_sharp:1.5769e-01 L2_sharp:2.5327e-01 L3_sharp:3.3355e-01 L4_sharp:3.8770e-01 L5_sharp:4.4639e-01 L6_sharp:5.9222e-01 L7_sharp:6.5807e-01 L8_sharp:6.7474e-01 L9_sharp:1.0653e+00 L10_sharp:2.3286e+00 L11_sharp:3.2184e+00 L12_sharp:3.7434e+00 total_fnorm:7.6500e+01 total_l1_linf:1.4848e+05 total_spectral:3.8250e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.3916e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4587e-02 L4_l1linf:1.4648e-02 L5_l1linf:1.4648e-02 L6_l1linf:1.4893e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4648e-02 L9_l1linf:1.4648e-02 L10_l1linf:1.5015e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5869e-02 L1_spectral:7.8771e-04 L2_spectral:8.0313e-04 L3_spectral:7.9764e-04 L4_spectral:7.9665e-04 L5_spectral:7.9165e-04 L6_spectral:8.0208e-04 L7_spectral:8.0017e-04 L8_spectral:7.9611e-04 L9_spectral:8.0028e-04 L10_spectral:7.9857e-04 L11_spectral:7.9189e-04 L12_spectral:7.6569e-04 train_time:213361ms step_avg:44.45ms +[2025-09-11 09:59:47] [Rank 0] PRINT: step:4800/10000 val_loss:4.9281 total_sharp:1.2978e-04 L1_sharp:1.5769e-01 L2_sharp:2.5327e-01 L3_sharp:3.3355e-01 L4_sharp:3.8770e-01 L5_sharp:4.4639e-01 L6_sharp:5.9222e-01 L7_sharp:6.5807e-01 L8_sharp:6.7474e-01 L9_sharp:1.0653e+00 L10_sharp:2.3286e+00 L11_sharp:3.2184e+00 L12_sharp:3.7434e+00 total_fnorm:7.6500e+01 total_l1_linf:1.4848e+05 total_spectral:3.8250e+01 L1_fnorm:4.7852e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.3916e-02 L2_l1linf:1.4282e-02 L3_l1linf:1.4587e-02 L4_l1linf:1.4648e-02 L5_l1linf:1.4648e-02 L6_l1linf:1.4893e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4648e-02 L9_l1linf:1.4648e-02 L10_l1linf:1.5015e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5869e-02 L1_spectral:7.8771e-04 L2_spectral:8.0313e-04 L3_spectral:7.9764e-04 L4_spectral:7.9665e-04 L5_spectral:7.9165e-04 L6_spectral:8.0208e-04 L7_spectral:8.0017e-04 L8_spectral:7.9611e-04 L9_spectral:8.0028e-04 L10_spectral:7.9857e-04 L11_spectral:7.9189e-04 L12_spectral:7.6569e-04 train_time:213361ms step_avg:44.45ms +[2025-09-11 09:59:50] [Rank 0] step:4801/10000 train_time:215433ms step_avg:44.87ms +[2025-09-11 09:59:50] [Rank 0] step:4801/10000 train_time:215433ms step_avg:44.87ms +[2025-09-11 09:59:50] [Rank 0] step:4821/10000 train_time:216150ms step_avg:44.84ms +[2025-09-11 09:59:50] [Rank 0] step:4821/10000 train_time:216150ms step_avg:44.84ms +[2025-09-11 09:59:51] [Rank 0] step:4841/10000 train_time:216832ms step_avg:44.79ms +[2025-09-11 09:59:51] [Rank 0] step:4841/10000 train_time:216832ms step_avg:44.79ms +[2025-09-11 09:59:52] [Rank 0] step:4861/10000 train_time:217513ms step_avg:44.75ms +[2025-09-11 09:59:52] [Rank 0] step:4861/10000 train_time:217513ms step_avg:44.75ms +[2025-09-11 09:59:52] [Rank 0] step:4881/10000 train_time:218193ms step_avg:44.70ms +[2025-09-11 09:59:52] [Rank 0] step:4881/10000 train_time:218193ms step_avg:44.70ms +[2025-09-11 09:59:53] [Rank 0] step:4901/10000 train_time:218874ms step_avg:44.66ms +[2025-09-11 09:59:53] [Rank 0] step:4901/10000 train_time:218874ms step_avg:44.66ms +[2025-09-11 09:59:54] [Rank 0] step:4921/10000 train_time:219555ms step_avg:44.62ms +[2025-09-11 09:59:54] [Rank 0] step:4921/10000 train_time:219555ms step_avg:44.62ms +[2025-09-11 09:59:54] [Rank 0] step:4941/10000 train_time:220235ms step_avg:44.57ms +[2025-09-11 09:59:54] [Rank 0] step:4941/10000 train_time:220235ms step_avg:44.57ms +[2025-09-11 09:59:55] [Rank 0] step:4961/10000 train_time:220915ms step_avg:44.53ms +[2025-09-11 09:59:55] [Rank 0] step:4961/10000 train_time:220915ms step_avg:44.53ms +[2025-09-11 09:59:56] [Rank 0] step:4981/10000 train_time:221596ms step_avg:44.49ms +[2025-09-11 09:59:56] [Rank 0] step:4981/10000 train_time:221596ms step_avg:44.49ms +[2025-09-11 09:59:56] [Rank 0] step:5001/10000 train_time:222278ms step_avg:44.45ms +[2025-09-11 09:59:56] [Rank 0] step:5001/10000 train_time:222278ms step_avg:44.45ms +[2025-09-11 09:59:57] [Rank 0] step:5021/10000 train_time:222958ms step_avg:44.41ms +[2025-09-11 09:59:57] [Rank 0] step:5021/10000 train_time:222958ms step_avg:44.41ms +[2025-09-11 09:59:58] [Rank 0] step:5041/10000 train_time:223637ms step_avg:44.36ms +[2025-09-11 09:59:58] [Rank 0] step:5041/10000 train_time:223637ms step_avg:44.36ms +[2025-09-11 09:59:58] [Rank 0] step:5061/10000 train_time:224318ms step_avg:44.32ms +[2025-09-11 09:59:58] [Rank 0] step:5061/10000 train_time:224318ms step_avg:44.32ms +[2025-09-11 09:59:59] [Rank 0] step:5081/10000 train_time:224997ms step_avg:44.28ms +[2025-09-11 09:59:59] [Rank 0] step:5081/10000 train_time:224997ms step_avg:44.28ms +[2025-09-11 10:00:00] [Rank 0] step:5101/10000 train_time:225678ms step_avg:44.24ms +[2025-09-11 10:00:00] [Rank 0] step:5101/10000 train_time:225678ms step_avg:44.24ms +[2025-09-11 10:00:00] [Rank 0] step:5121/10000 train_time:226359ms step_avg:44.20ms +[2025-09-11 10:00:00] [Rank 0] step:5121/10000 train_time:226359ms step_avg:44.20ms +[2025-09-11 10:00:01] [Rank 0] step:5141/10000 train_time:227041ms step_avg:44.16ms +[2025-09-11 10:00:01] [Rank 0] step:5141/10000 train_time:227041ms step_avg:44.16ms +[2025-09-11 10:00:02] [Rank 0] step:5161/10000 train_time:227782ms step_avg:44.14ms +[2025-09-11 10:00:02] [Rank 0] step:5161/10000 train_time:227782ms step_avg:44.14ms +[2025-09-11 10:00:03] [Rank 0] step:5181/10000 train_time:228540ms step_avg:44.11ms +[2025-09-11 10:00:03] [Rank 0] step:5181/10000 train_time:228540ms step_avg:44.11ms +[2025-09-11 10:00:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:00:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:00:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:00:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:00:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:00:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:16] [Rank 0] PRINT: step:5200/10000 val_loss:4.8887 total_sharp:1.6038e-04 L1_sharp:1.4930e-01 L2_sharp:2.4112e-01 L3_sharp:3.0724e-01 L4_sharp:4.3644e-01 L5_sharp:4.9944e-01 L6_sharp:6.3020e-01 L7_sharp:6.5324e-01 L8_sharp:5.7727e-01 L9_sharp:7.1600e-01 L10_sharp:8.5158e-01 L11_sharp:1.5317e+00 L12_sharp:3.9219e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2237e+05 total_spectral:3.4000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3184e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.4038e-02 L4_l1linf:1.4038e-02 L5_l1linf:1.3855e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.3916e-02 L9_l1linf:1.3672e-02 L10_l1linf:1.3916e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4404e-02 L1_spectral:7.8710e-04 L2_spectral:7.9501e-04 L3_spectral:8.0135e-04 L4_spectral:8.0127e-04 L5_spectral:8.0565e-04 L6_spectral:8.0305e-04 L7_spectral:8.0519e-04 L8_spectral:8.0149e-04 L9_spectral:8.0372e-04 L10_spectral:8.0742e-04 L11_spectral:7.9927e-04 L12_spectral:7.9252e-04 train_time:229207ms step_avg:44.08ms +[2025-09-11 10:00:16] [Rank 0] PRINT: step:5200/10000 val_loss:4.8887 total_sharp:1.6038e-04 L1_sharp:1.4930e-01 L2_sharp:2.4112e-01 L3_sharp:3.0724e-01 L4_sharp:4.3644e-01 L5_sharp:4.9944e-01 L6_sharp:6.3020e-01 L7_sharp:6.5324e-01 L8_sharp:5.7727e-01 L9_sharp:7.1600e-01 L10_sharp:8.5158e-01 L11_sharp:1.5317e+00 L12_sharp:3.9219e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2237e+05 total_spectral:3.4000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3184e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.4038e-02 L4_l1linf:1.4038e-02 L5_l1linf:1.3855e-02 L6_l1linf:1.4282e-02 L7_l1linf:1.3916e-02 L8_l1linf:1.3916e-02 L9_l1linf:1.3672e-02 L10_l1linf:1.3916e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4404e-02 L1_spectral:7.8710e-04 L2_spectral:7.9501e-04 L3_spectral:8.0135e-04 L4_spectral:8.0127e-04 L5_spectral:8.0565e-04 L6_spectral:8.0305e-04 L7_spectral:8.0519e-04 L8_spectral:8.0149e-04 L9_spectral:8.0372e-04 L10_spectral:8.0742e-04 L11_spectral:7.9927e-04 L12_spectral:7.9252e-04 train_time:229207ms step_avg:44.08ms +[2025-09-11 10:00:18] [Rank 0] step:5201/10000 train_time:231282ms step_avg:44.47ms +[2025-09-11 10:00:18] [Rank 0] step:5201/10000 train_time:231282ms step_avg:44.47ms +[2025-09-11 10:00:19] [Rank 0] step:5221/10000 train_time:231989ms step_avg:44.43ms +[2025-09-11 10:00:19] [Rank 0] step:5221/10000 train_time:231989ms step_avg:44.43ms +[2025-09-11 10:00:20] [Rank 0] step:5241/10000 train_time:232678ms step_avg:44.40ms +[2025-09-11 10:00:20] [Rank 0] step:5241/10000 train_time:232678ms step_avg:44.40ms +[2025-09-11 10:00:20] [Rank 0] step:5261/10000 train_time:233369ms step_avg:44.36ms +[2025-09-11 10:00:20] [Rank 0] step:5261/10000 train_time:233369ms step_avg:44.36ms +[2025-09-11 10:00:21] [Rank 0] step:5281/10000 train_time:234058ms step_avg:44.32ms +[2025-09-11 10:00:21] [Rank 0] step:5281/10000 train_time:234058ms step_avg:44.32ms +[2025-09-11 10:00:22] [Rank 0] step:5301/10000 train_time:234747ms step_avg:44.28ms +[2025-09-11 10:00:22] [Rank 0] step:5301/10000 train_time:234747ms step_avg:44.28ms +[2025-09-11 10:00:22] [Rank 0] step:5321/10000 train_time:235436ms step_avg:44.25ms +[2025-09-11 10:00:22] [Rank 0] step:5321/10000 train_time:235436ms step_avg:44.25ms +[2025-09-11 10:00:23] [Rank 0] step:5341/10000 train_time:236392ms step_avg:44.26ms +[2025-09-11 10:00:23] [Rank 0] step:5341/10000 train_time:236392ms step_avg:44.26ms +[2025-09-11 10:00:24] [Rank 0] step:5361/10000 train_time:237081ms step_avg:44.22ms +[2025-09-11 10:00:24] [Rank 0] step:5361/10000 train_time:237081ms step_avg:44.22ms +[2025-09-11 10:00:25] [Rank 0] step:5381/10000 train_time:237771ms step_avg:44.19ms +[2025-09-11 10:00:25] [Rank 0] step:5381/10000 train_time:237771ms step_avg:44.19ms +[2025-09-11 10:00:26] [Rank 0] step:5401/10000 train_time:238753ms step_avg:44.21ms +[2025-09-11 10:00:26] [Rank 0] step:5401/10000 train_time:238753ms step_avg:44.21ms +[2025-09-11 10:00:26] [Rank 0] step:5421/10000 train_time:239444ms step_avg:44.17ms +[2025-09-11 10:00:26] [Rank 0] step:5421/10000 train_time:239444ms step_avg:44.17ms +[2025-09-11 10:00:27] [Rank 0] step:5441/10000 train_time:240133ms step_avg:44.13ms +[2025-09-11 10:00:27] [Rank 0] step:5441/10000 train_time:240133ms step_avg:44.13ms +[2025-09-11 10:00:28] [Rank 0] step:5461/10000 train_time:240823ms step_avg:44.10ms +[2025-09-11 10:00:28] [Rank 0] step:5461/10000 train_time:240823ms step_avg:44.10ms +[2025-09-11 10:00:28] [Rank 0] step:5481/10000 train_time:241513ms step_avg:44.06ms +[2025-09-11 10:00:28] [Rank 0] step:5481/10000 train_time:241513ms step_avg:44.06ms +[2025-09-11 10:00:29] [Rank 0] step:5501/10000 train_time:242201ms step_avg:44.03ms +[2025-09-11 10:00:29] [Rank 0] step:5501/10000 train_time:242201ms step_avg:44.03ms +[2025-09-11 10:00:30] [Rank 0] step:5521/10000 train_time:242890ms step_avg:43.99ms +[2025-09-11 10:00:30] [Rank 0] step:5521/10000 train_time:242890ms step_avg:43.99ms +[2025-09-11 10:00:30] [Rank 0] step:5541/10000 train_time:243581ms step_avg:43.96ms +[2025-09-11 10:00:30] [Rank 0] step:5541/10000 train_time:243581ms step_avg:43.96ms +[2025-09-11 10:00:31] [Rank 0] step:5561/10000 train_time:244273ms step_avg:43.93ms +[2025-09-11 10:00:31] [Rank 0] step:5561/10000 train_time:244273ms step_avg:43.93ms +[2025-09-11 10:00:32] [Rank 0] step:5581/10000 train_time:244966ms step_avg:43.89ms +[2025-09-11 10:00:32] [Rank 0] step:5581/10000 train_time:244966ms step_avg:43.89ms +[2025-09-11 10:00:33] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:00:33] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:00:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:00:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:00:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.8467 total_sharp:1.2428e-04 L1_sharp:1.4249e-01 L2_sharp:2.0999e-01 L3_sharp:2.7601e-01 L4_sharp:3.5800e-01 L5_sharp:4.2886e-01 L6_sharp:5.7008e-01 L7_sharp:5.8431e-01 L8_sharp:4.4844e-01 L9_sharp:5.3067e-01 L10_sharp:6.6285e-01 L11_sharp:8.1103e-01 L12_sharp:2.5137e+00 total_fnorm:6.9500e+01 total_l1_linf:1.2954e+05 total_spectral:3.5000e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.2939e-02 L2_l1linf:1.3245e-02 L3_l1linf:1.3611e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3794e-02 L6_l1linf:1.4160e-02 L7_l1linf:1.3977e-02 L8_l1linf:1.3794e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.4099e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8640e-04 L2_spectral:7.9014e-04 L3_spectral:7.9777e-04 L4_spectral:7.9616e-04 L5_spectral:7.9497e-04 L6_spectral:7.9903e-04 L7_spectral:7.9968e-04 L8_spectral:7.9822e-04 L9_spectral:7.9837e-04 L10_spectral:7.9675e-04 L11_spectral:7.9454e-04 L12_spectral:7.9323e-04 train_time:245636ms step_avg:43.86ms +[2025-09-11 10:00:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.8467 total_sharp:1.2428e-04 L1_sharp:1.4249e-01 L2_sharp:2.0999e-01 L3_sharp:2.7601e-01 L4_sharp:3.5800e-01 L5_sharp:4.2886e-01 L6_sharp:5.7008e-01 L7_sharp:5.8431e-01 L8_sharp:4.4844e-01 L9_sharp:5.3067e-01 L10_sharp:6.6285e-01 L11_sharp:8.1103e-01 L12_sharp:2.5137e+00 total_fnorm:6.9500e+01 total_l1_linf:1.2954e+05 total_spectral:3.5000e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6875e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.2939e-02 L2_l1linf:1.3245e-02 L3_l1linf:1.3611e-02 L4_l1linf:1.3855e-02 L5_l1linf:1.3794e-02 L6_l1linf:1.4160e-02 L7_l1linf:1.3977e-02 L8_l1linf:1.3794e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.4099e-02 L11_l1linf:1.3733e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8640e-04 L2_spectral:7.9014e-04 L3_spectral:7.9777e-04 L4_spectral:7.9616e-04 L5_spectral:7.9497e-04 L6_spectral:7.9903e-04 L7_spectral:7.9968e-04 L8_spectral:7.9822e-04 L9_spectral:7.9837e-04 L10_spectral:7.9675e-04 L11_spectral:7.9454e-04 L12_spectral:7.9323e-04 train_time:245636ms step_avg:43.86ms +[2025-09-11 10:00:46] [Rank 0] step:5601/10000 train_time:247662ms step_avg:44.22ms +[2025-09-11 10:00:46] [Rank 0] step:5601/10000 train_time:247662ms step_avg:44.22ms +[2025-09-11 10:00:46] [Rank 0] step:5621/10000 train_time:248374ms step_avg:44.19ms +[2025-09-11 10:00:46] [Rank 0] step:5621/10000 train_time:248374ms step_avg:44.19ms +[2025-09-11 10:00:47] [Rank 0] step:5641/10000 train_time:249065ms step_avg:44.15ms +[2025-09-11 10:00:47] [Rank 0] step:5641/10000 train_time:249065ms step_avg:44.15ms +[2025-09-11 10:00:48] [Rank 0] step:5661/10000 train_time:249755ms step_avg:44.12ms +[2025-09-11 10:00:48] [Rank 0] step:5661/10000 train_time:249755ms step_avg:44.12ms +[2025-09-11 10:00:49] [Rank 0] step:5681/10000 train_time:250446ms step_avg:44.08ms +[2025-09-11 10:00:49] [Rank 0] step:5681/10000 train_time:250446ms step_avg:44.08ms +[2025-09-11 10:00:49] [Rank 0] step:5701/10000 train_time:251137ms step_avg:44.05ms +[2025-09-11 10:00:49] [Rank 0] step:5701/10000 train_time:251137ms step_avg:44.05ms +[2025-09-11 10:00:50] [Rank 0] step:5721/10000 train_time:251826ms step_avg:44.02ms +[2025-09-11 10:00:50] [Rank 0] step:5721/10000 train_time:251826ms step_avg:44.02ms +[2025-09-11 10:00:51] [Rank 0] step:5741/10000 train_time:252517ms step_avg:43.98ms +[2025-09-11 10:00:51] [Rank 0] step:5741/10000 train_time:252517ms step_avg:43.98ms +[2025-09-11 10:00:51] [Rank 0] step:5761/10000 train_time:253208ms step_avg:43.95ms +[2025-09-11 10:00:51] [Rank 0] step:5761/10000 train_time:253208ms step_avg:43.95ms +[2025-09-11 10:00:52] [Rank 0] step:5781/10000 train_time:253899ms step_avg:43.92ms +[2025-09-11 10:00:52] [Rank 0] step:5781/10000 train_time:253899ms step_avg:43.92ms +[2025-09-11 10:00:53] [Rank 0] step:5801/10000 train_time:254591ms step_avg:43.89ms +[2025-09-11 10:00:53] [Rank 0] step:5801/10000 train_time:254591ms step_avg:43.89ms +[2025-09-11 10:00:53] [Rank 0] step:5821/10000 train_time:255279ms step_avg:43.85ms +[2025-09-11 10:00:53] [Rank 0] step:5821/10000 train_time:255279ms step_avg:43.85ms +[2025-09-11 10:00:54] [Rank 0] step:5841/10000 train_time:255972ms step_avg:43.82ms +[2025-09-11 10:00:54] [Rank 0] step:5841/10000 train_time:255972ms step_avg:43.82ms +[2025-09-11 10:00:55] [Rank 0] step:5861/10000 train_time:256664ms step_avg:43.79ms +[2025-09-11 10:00:55] [Rank 0] step:5861/10000 train_time:256664ms step_avg:43.79ms +[2025-09-11 10:00:55] [Rank 0] step:5881/10000 train_time:257354ms step_avg:43.76ms +[2025-09-11 10:00:55] [Rank 0] step:5881/10000 train_time:257354ms step_avg:43.76ms +[2025-09-11 10:00:56] [Rank 0] step:5901/10000 train_time:258045ms step_avg:43.73ms +[2025-09-11 10:00:56] [Rank 0] step:5901/10000 train_time:258045ms step_avg:43.73ms +[2025-09-11 10:00:57] [Rank 0] step:5921/10000 train_time:258738ms step_avg:43.70ms +[2025-09-11 10:00:57] [Rank 0] step:5921/10000 train_time:258738ms step_avg:43.70ms +[2025-09-11 10:00:58] [Rank 0] step:5941/10000 train_time:259431ms step_avg:43.67ms +[2025-09-11 10:00:58] [Rank 0] step:5941/10000 train_time:259431ms step_avg:43.67ms +[2025-09-11 10:00:58] [Rank 0] step:5961/10000 train_time:260122ms step_avg:43.64ms +[2025-09-11 10:00:58] [Rank 0] step:5961/10000 train_time:260122ms step_avg:43.64ms +[2025-09-11 10:00:59] [Rank 0] step:5981/10000 train_time:260814ms step_avg:43.61ms +[2025-09-11 10:00:59] [Rank 0] step:5981/10000 train_time:260814ms step_avg:43.61ms +[2025-09-11 10:01:00] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:01:00] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:01:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:01:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:13] [Rank 0] PRINT: step:6000/10000 val_loss:4.7953 total_sharp:1.0444e-04 L1_sharp:1.3642e-01 L2_sharp:2.0772e-01 L3_sharp:2.8805e-01 L4_sharp:3.7997e-01 L5_sharp:4.4132e-01 L6_sharp:5.2076e-01 L7_sharp:5.0543e-01 L8_sharp:4.0933e-01 L9_sharp:4.2704e-01 L10_sharp:6.0136e-01 L11_sharp:6.4428e-01 L12_sharp:4.5570e+00 total_fnorm:7.0500e+01 total_l1_linf:1.2902e+05 total_spectral:3.5250e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.2939e-02 L3_l1linf:1.3306e-02 L4_l1linf:1.3489e-02 L5_l1linf:1.3733e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3306e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3367e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3123e-02 L12_l1linf:1.3733e-02 L1_spectral:7.9248e-04 L2_spectral:7.9625e-04 L3_spectral:8.0538e-04 L4_spectral:8.0301e-04 L5_spectral:8.0441e-04 L6_spectral:8.0358e-04 L7_spectral:8.0312e-04 L8_spectral:8.0475e-04 L9_spectral:8.0263e-04 L10_spectral:8.0688e-04 L11_spectral:8.0621e-04 L12_spectral:7.9453e-04 train_time:261489ms step_avg:43.58ms +[2025-09-11 10:01:13] [Rank 0] PRINT: step:6000/10000 val_loss:4.7953 total_sharp:1.0444e-04 L1_sharp:1.3642e-01 L2_sharp:2.0772e-01 L3_sharp:2.8805e-01 L4_sharp:3.7997e-01 L5_sharp:4.4132e-01 L6_sharp:5.2076e-01 L7_sharp:5.0543e-01 L8_sharp:4.0933e-01 L9_sharp:4.2704e-01 L10_sharp:6.0136e-01 L11_sharp:6.4428e-01 L12_sharp:4.5570e+00 total_fnorm:7.0500e+01 total_l1_linf:1.2902e+05 total_spectral:3.5250e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.7852e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7363e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6387e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.2939e-02 L3_l1linf:1.3306e-02 L4_l1linf:1.3489e-02 L5_l1linf:1.3733e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3306e-02 L8_l1linf:1.3367e-02 L9_l1linf:1.3367e-02 L10_l1linf:1.3306e-02 L11_l1linf:1.3123e-02 L12_l1linf:1.3733e-02 L1_spectral:7.9248e-04 L2_spectral:7.9625e-04 L3_spectral:8.0538e-04 L4_spectral:8.0301e-04 L5_spectral:8.0441e-04 L6_spectral:8.0358e-04 L7_spectral:8.0312e-04 L8_spectral:8.0475e-04 L9_spectral:8.0263e-04 L10_spectral:8.0688e-04 L11_spectral:8.0621e-04 L12_spectral:7.9453e-04 train_time:261489ms step_avg:43.58ms +[2025-09-11 10:01:15] [Rank 0] step:6001/10000 train_time:263553ms step_avg:43.92ms +[2025-09-11 10:01:15] [Rank 0] step:6001/10000 train_time:263553ms step_avg:43.92ms +[2025-09-11 10:01:16] [Rank 0] step:6021/10000 train_time:264264ms step_avg:43.89ms +[2025-09-11 10:01:16] [Rank 0] step:6021/10000 train_time:264264ms step_avg:43.89ms +[2025-09-11 10:01:17] [Rank 0] step:6041/10000 train_time:264959ms step_avg:43.86ms +[2025-09-11 10:01:17] [Rank 0] step:6041/10000 train_time:264959ms step_avg:43.86ms +[2025-09-11 10:01:18] [Rank 0] step:6061/10000 train_time:265651ms step_avg:43.83ms +[2025-09-11 10:01:18] [Rank 0] step:6061/10000 train_time:265651ms step_avg:43.83ms +[2025-09-11 10:01:18] [Rank 0] step:6081/10000 train_time:266344ms step_avg:43.80ms +[2025-09-11 10:01:18] [Rank 0] step:6081/10000 train_time:266344ms step_avg:43.80ms +[2025-09-11 10:01:19] [Rank 0] step:6101/10000 train_time:267036ms step_avg:43.77ms +[2025-09-11 10:01:19] [Rank 0] step:6101/10000 train_time:267036ms step_avg:43.77ms +[2025-09-11 10:01:20] [Rank 0] step:6121/10000 train_time:267729ms step_avg:43.74ms +[2025-09-11 10:01:20] [Rank 0] step:6121/10000 train_time:267729ms step_avg:43.74ms +[2025-09-11 10:01:20] [Rank 0] step:6141/10000 train_time:268422ms step_avg:43.71ms +[2025-09-11 10:01:20] [Rank 0] step:6141/10000 train_time:268422ms step_avg:43.71ms +[2025-09-11 10:01:21] [Rank 0] step:6161/10000 train_time:269114ms step_avg:43.68ms +[2025-09-11 10:01:21] [Rank 0] step:6161/10000 train_time:269114ms step_avg:43.68ms +[2025-09-11 10:01:22] [Rank 0] step:6181/10000 train_time:269805ms step_avg:43.65ms +[2025-09-11 10:01:22] [Rank 0] step:6181/10000 train_time:269805ms step_avg:43.65ms +[2025-09-11 10:01:22] [Rank 0] step:6201/10000 train_time:270499ms step_avg:43.62ms +[2025-09-11 10:01:22] [Rank 0] step:6201/10000 train_time:270499ms step_avg:43.62ms +[2025-09-11 10:01:23] [Rank 0] step:6221/10000 train_time:271192ms step_avg:43.59ms +[2025-09-11 10:01:23] [Rank 0] step:6221/10000 train_time:271192ms step_avg:43.59ms +[2025-09-11 10:01:24] [Rank 0] step:6241/10000 train_time:271885ms step_avg:43.56ms +[2025-09-11 10:01:24] [Rank 0] step:6241/10000 train_time:271885ms step_avg:43.56ms +[2025-09-11 10:01:25] [Rank 0] step:6261/10000 train_time:272576ms step_avg:43.54ms +[2025-09-11 10:01:25] [Rank 0] step:6261/10000 train_time:272576ms step_avg:43.54ms +[2025-09-11 10:01:25] [Rank 0] step:6281/10000 train_time:273268ms step_avg:43.51ms +[2025-09-11 10:01:25] [Rank 0] step:6281/10000 train_time:273268ms step_avg:43.51ms +[2025-09-11 10:01:26] [Rank 0] step:6301/10000 train_time:274257ms step_avg:43.53ms +[2025-09-11 10:01:26] [Rank 0] step:6301/10000 train_time:274257ms step_avg:43.53ms +[2025-09-11 10:01:27] [Rank 0] step:6321/10000 train_time:274951ms step_avg:43.50ms +[2025-09-11 10:01:27] [Rank 0] step:6321/10000 train_time:274951ms step_avg:43.50ms +[2025-09-11 10:01:28] [Rank 0] step:6341/10000 train_time:275644ms step_avg:43.47ms +[2025-09-11 10:01:28] [Rank 0] step:6341/10000 train_time:275644ms step_avg:43.47ms +[2025-09-11 10:01:29] [Rank 0] step:6361/10000 train_time:276634ms step_avg:43.49ms +[2025-09-11 10:01:29] [Rank 0] step:6361/10000 train_time:276634ms step_avg:43.49ms +[2025-09-11 10:01:29] [Rank 0] step:6381/10000 train_time:277325ms step_avg:43.46ms +[2025-09-11 10:01:29] [Rank 0] step:6381/10000 train_time:277325ms step_avg:43.46ms +[2025-09-11 10:01:30] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:01:30] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:01:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:01:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:01:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:01:41] [Rank 0] PRINT: step:6400/10000 val_loss:4.7579 total_sharp:1.2308e-04 L1_sharp:1.2656e-01 L2_sharp:2.0497e-01 L3_sharp:2.7625e-01 L4_sharp:4.0462e-01 L5_sharp:4.8417e-01 L6_sharp:5.3934e-01 L7_sharp:5.9049e-01 L8_sharp:4.9853e-01 L9_sharp:5.1672e-01 L10_sharp:6.8974e-01 L11_sharp:7.2643e-01 L12_sharp:9.6320e-01 total_fnorm:6.2000e+01 total_l1_linf:1.1008e+05 total_spectral:3.1000e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1748e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1504e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1016e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0771e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0771e-02 L12_fnorm:4.0527e-02 L1_l1linf:1.0071e-02 L2_l1linf:1.0620e-02 L3_l1linf:1.1108e-02 L4_l1linf:1.1292e-02 L5_l1linf:1.1292e-02 L6_l1linf:1.1353e-02 L7_l1linf:1.1597e-02 L8_l1linf:1.1475e-02 L9_l1linf:1.1536e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1292e-02 L12_l1linf:1.1841e-02 L1_spectral:7.0540e-04 L2_spectral:7.1645e-04 L3_spectral:7.1466e-04 L4_spectral:7.2464e-04 L5_spectral:7.1260e-04 L6_spectral:7.1776e-04 L7_spectral:7.1170e-04 L8_spectral:7.1373e-04 L9_spectral:7.1479e-04 L10_spectral:7.1218e-04 L11_spectral:7.0895e-04 L12_spectral:6.9955e-04 train_time:277996ms step_avg:43.44ms +[2025-09-11 10:01:41] [Rank 0] PRINT: step:6400/10000 val_loss:4.7579 total_sharp:1.2308e-04 L1_sharp:1.2656e-01 L2_sharp:2.0497e-01 L3_sharp:2.7625e-01 L4_sharp:4.0462e-01 L5_sharp:4.8417e-01 L6_sharp:5.3934e-01 L7_sharp:5.9049e-01 L8_sharp:4.9853e-01 L9_sharp:5.1672e-01 L10_sharp:6.8974e-01 L11_sharp:7.2643e-01 L12_sharp:9.6320e-01 total_fnorm:6.2000e+01 total_l1_linf:1.1008e+05 total_spectral:3.1000e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1748e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1504e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1016e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0771e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0771e-02 L12_fnorm:4.0527e-02 L1_l1linf:1.0071e-02 L2_l1linf:1.0620e-02 L3_l1linf:1.1108e-02 L4_l1linf:1.1292e-02 L5_l1linf:1.1292e-02 L6_l1linf:1.1353e-02 L7_l1linf:1.1597e-02 L8_l1linf:1.1475e-02 L9_l1linf:1.1536e-02 L10_l1linf:1.1292e-02 L11_l1linf:1.1292e-02 L12_l1linf:1.1841e-02 L1_spectral:7.0540e-04 L2_spectral:7.1645e-04 L3_spectral:7.1466e-04 L4_spectral:7.2464e-04 L5_spectral:7.1260e-04 L6_spectral:7.1776e-04 L7_spectral:7.1170e-04 L8_spectral:7.1373e-04 L9_spectral:7.1479e-04 L10_spectral:7.1218e-04 L11_spectral:7.0895e-04 L12_spectral:6.9955e-04 train_time:277996ms step_avg:43.44ms +[2025-09-11 10:01:43] [Rank 0] step:6401/10000 train_time:280095ms step_avg:43.76ms +[2025-09-11 10:01:43] [Rank 0] step:6401/10000 train_time:280095ms step_avg:43.76ms +[2025-09-11 10:01:44] [Rank 0] step:6421/10000 train_time:280814ms step_avg:43.73ms +[2025-09-11 10:01:44] [Rank 0] step:6421/10000 train_time:280814ms step_avg:43.73ms +[2025-09-11 10:01:44] [Rank 0] step:6441/10000 train_time:281506ms step_avg:43.71ms +[2025-09-11 10:01:44] [Rank 0] step:6441/10000 train_time:281506ms step_avg:43.71ms +[2025-09-11 10:01:45] [Rank 0] step:6461/10000 train_time:282199ms step_avg:43.68ms +[2025-09-11 10:01:45] [Rank 0] step:6461/10000 train_time:282199ms step_avg:43.68ms +[2025-09-11 10:01:46] [Rank 0] step:6481/10000 train_time:282893ms step_avg:43.65ms +[2025-09-11 10:01:46] [Rank 0] step:6481/10000 train_time:282893ms step_avg:43.65ms +[2025-09-11 10:01:46] [Rank 0] step:6501/10000 train_time:283587ms step_avg:43.62ms +[2025-09-11 10:01:46] [Rank 0] step:6501/10000 train_time:283587ms step_avg:43.62ms +[2025-09-11 10:01:47] [Rank 0] step:6521/10000 train_time:284281ms step_avg:43.59ms +[2025-09-11 10:01:47] [Rank 0] step:6521/10000 train_time:284281ms step_avg:43.59ms +[2025-09-11 10:01:48] [Rank 0] step:6541/10000 train_time:284971ms step_avg:43.57ms +[2025-09-11 10:01:48] [Rank 0] step:6541/10000 train_time:284971ms step_avg:43.57ms +[2025-09-11 10:01:48] [Rank 0] step:6561/10000 train_time:285665ms step_avg:43.54ms +[2025-09-11 10:01:48] [Rank 0] step:6561/10000 train_time:285665ms step_avg:43.54ms +[2025-09-11 10:01:49] [Rank 0] step:6581/10000 train_time:286358ms step_avg:43.51ms +[2025-09-11 10:01:49] [Rank 0] step:6581/10000 train_time:286358ms step_avg:43.51ms +[2025-09-11 10:01:50] [Rank 0] step:6601/10000 train_time:287050ms step_avg:43.49ms +[2025-09-11 10:01:50] [Rank 0] step:6601/10000 train_time:287050ms step_avg:43.49ms +[2025-09-11 10:01:51] [Rank 0] step:6621/10000 train_time:287742ms step_avg:43.46ms +[2025-09-11 10:01:51] [Rank 0] step:6621/10000 train_time:287742ms step_avg:43.46ms +[2025-09-11 10:01:51] [Rank 0] step:6641/10000 train_time:288435ms step_avg:43.43ms +[2025-09-11 10:01:51] [Rank 0] step:6641/10000 train_time:288435ms step_avg:43.43ms +[2025-09-11 10:01:52] [Rank 0] step:6661/10000 train_time:289128ms step_avg:43.41ms +[2025-09-11 10:01:52] [Rank 0] step:6661/10000 train_time:289128ms step_avg:43.41ms +[2025-09-11 10:01:53] [Rank 0] step:6681/10000 train_time:289827ms step_avg:43.38ms +[2025-09-11 10:01:53] [Rank 0] step:6681/10000 train_time:289827ms step_avg:43.38ms +[2025-09-11 10:01:53] [Rank 0] step:6701/10000 train_time:290526ms step_avg:43.36ms +[2025-09-11 10:01:53] [Rank 0] step:6701/10000 train_time:290526ms step_avg:43.36ms +[2025-09-11 10:01:54] [Rank 0] step:6721/10000 train_time:291226ms step_avg:43.33ms +[2025-09-11 10:01:54] [Rank 0] step:6721/10000 train_time:291226ms step_avg:43.33ms +[2025-09-11 10:01:55] [Rank 0] step:6741/10000 train_time:291925ms step_avg:43.31ms +[2025-09-11 10:01:55] [Rank 0] step:6741/10000 train_time:291925ms step_avg:43.31ms +[2025-09-11 10:01:55] [Rank 0] step:6761/10000 train_time:292623ms step_avg:43.28ms +[2025-09-11 10:01:55] [Rank 0] step:6761/10000 train_time:292623ms step_avg:43.28ms +[2025-09-11 10:01:56] [Rank 0] step:6781/10000 train_time:293323ms step_avg:43.26ms +[2025-09-11 10:01:56] [Rank 0] step:6781/10000 train_time:293323ms step_avg:43.26ms +[2025-09-11 10:01:57] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:01:57] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:01:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:01:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:08] [Rank 0] PRINT: step:6800/10000 val_loss:4.7292 total_sharp:8.6606e-05 L1_sharp:1.0603e-01 L2_sharp:1.6760e-01 L3_sharp:2.5962e-01 L4_sharp:3.6933e-01 L5_sharp:4.3063e-01 L6_sharp:5.6044e-01 L7_sharp:5.7181e-01 L8_sharp:4.3420e-01 L9_sharp:4.7759e-01 L10_sharp:5.8649e-01 L11_sharp:7.4495e-01 L12_sharp:1.1776e+00 total_fnorm:6.0250e+01 total_l1_linf:1.0650e+05 total_spectral:3.0000e+01 L1_fnorm:3.4424e-02 L2_fnorm:3.5156e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.5156e-02 L5_fnorm:3.5156e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.3936e-02 L9_fnorm:3.4180e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.4180e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.6904e-03 L2_l1linf:8.3618e-03 L3_l1linf:8.7891e-03 L4_l1linf:8.8501e-03 L5_l1linf:8.9722e-03 L6_l1linf:8.9111e-03 L7_l1linf:8.7280e-03 L8_l1linf:8.9111e-03 L9_l1linf:9.0332e-03 L10_l1linf:9.0332e-03 L11_l1linf:8.9111e-03 L12_l1linf:9.2163e-03 L1_spectral:6.2643e-04 L2_spectral:6.2464e-04 L3_spectral:6.2705e-04 L4_spectral:6.2701e-04 L5_spectral:6.2919e-04 L6_spectral:6.2772e-04 L7_spectral:6.2779e-04 L8_spectral:6.2012e-04 L9_spectral:6.2013e-04 L10_spectral:6.1871e-04 L11_spectral:6.1855e-04 L12_spectral:6.1092e-04 train_time:294002ms step_avg:43.24ms +[2025-09-11 10:02:08] [Rank 0] PRINT: step:6800/10000 val_loss:4.7292 total_sharp:8.6606e-05 L1_sharp:1.0603e-01 L2_sharp:1.6760e-01 L3_sharp:2.5962e-01 L4_sharp:3.6933e-01 L5_sharp:4.3063e-01 L6_sharp:5.6044e-01 L7_sharp:5.7181e-01 L8_sharp:4.3420e-01 L9_sharp:4.7759e-01 L10_sharp:5.8649e-01 L11_sharp:7.4495e-01 L12_sharp:1.1776e+00 total_fnorm:6.0250e+01 total_l1_linf:1.0650e+05 total_spectral:3.0000e+01 L1_fnorm:3.4424e-02 L2_fnorm:3.5156e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.5156e-02 L5_fnorm:3.5156e-02 L6_fnorm:3.4668e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.3936e-02 L9_fnorm:3.4180e-02 L10_fnorm:3.4180e-02 L11_fnorm:3.4180e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.6904e-03 L2_l1linf:8.3618e-03 L3_l1linf:8.7891e-03 L4_l1linf:8.8501e-03 L5_l1linf:8.9722e-03 L6_l1linf:8.9111e-03 L7_l1linf:8.7280e-03 L8_l1linf:8.9111e-03 L9_l1linf:9.0332e-03 L10_l1linf:9.0332e-03 L11_l1linf:8.9111e-03 L12_l1linf:9.2163e-03 L1_spectral:6.2643e-04 L2_spectral:6.2464e-04 L3_spectral:6.2705e-04 L4_spectral:6.2701e-04 L5_spectral:6.2919e-04 L6_spectral:6.2772e-04 L7_spectral:6.2779e-04 L8_spectral:6.2012e-04 L9_spectral:6.2013e-04 L10_spectral:6.1871e-04 L11_spectral:6.1855e-04 L12_spectral:6.1092e-04 train_time:294002ms step_avg:43.24ms +[2025-09-11 10:02:11] [Rank 0] step:6801/10000 train_time:296522ms step_avg:43.60ms +[2025-09-11 10:02:11] [Rank 0] step:6801/10000 train_time:296522ms step_avg:43.60ms +[2025-09-11 10:02:11] [Rank 0] step:6821/10000 train_time:297403ms step_avg:43.60ms +[2025-09-11 10:02:11] [Rank 0] step:6821/10000 train_time:297403ms step_avg:43.60ms +[2025-09-11 10:02:12] [Rank 0] step:6841/10000 train_time:298107ms step_avg:43.58ms +[2025-09-11 10:02:12] [Rank 0] step:6841/10000 train_time:298107ms step_avg:43.58ms +[2025-09-11 10:02:13] [Rank 0] step:6861/10000 train_time:298811ms step_avg:43.55ms +[2025-09-11 10:02:13] [Rank 0] step:6861/10000 train_time:298811ms step_avg:43.55ms +[2025-09-11 10:02:14] [Rank 0] step:6881/10000 train_time:299514ms step_avg:43.53ms +[2025-09-11 10:02:14] [Rank 0] step:6881/10000 train_time:299514ms step_avg:43.53ms +[2025-09-11 10:02:14] [Rank 0] step:6901/10000 train_time:300212ms step_avg:43.50ms +[2025-09-11 10:02:14] [Rank 0] step:6901/10000 train_time:300212ms step_avg:43.50ms +[2025-09-11 10:02:15] [Rank 0] step:6921/10000 train_time:300912ms step_avg:43.48ms +[2025-09-11 10:02:15] [Rank 0] step:6921/10000 train_time:300912ms step_avg:43.48ms +[2025-09-11 10:02:16] [Rank 0] step:6941/10000 train_time:301613ms step_avg:43.45ms +[2025-09-11 10:02:16] [Rank 0] step:6941/10000 train_time:301613ms step_avg:43.45ms +[2025-09-11 10:02:16] [Rank 0] step:6961/10000 train_time:302314ms step_avg:43.43ms +[2025-09-11 10:02:16] [Rank 0] step:6961/10000 train_time:302314ms step_avg:43.43ms +[2025-09-11 10:02:17] [Rank 0] step:6981/10000 train_time:303017ms step_avg:43.41ms +[2025-09-11 10:02:17] [Rank 0] step:6981/10000 train_time:303017ms step_avg:43.41ms +[2025-09-11 10:02:18] [Rank 0] step:7001/10000 train_time:303718ms step_avg:43.38ms +[2025-09-11 10:02:18] [Rank 0] step:7001/10000 train_time:303718ms step_avg:43.38ms +[2025-09-11 10:02:18] [Rank 0] step:7021/10000 train_time:304418ms step_avg:43.36ms +[2025-09-11 10:02:18] [Rank 0] step:7021/10000 train_time:304418ms step_avg:43.36ms +[2025-09-11 10:02:19] [Rank 0] step:7041/10000 train_time:305117ms step_avg:43.33ms +[2025-09-11 10:02:19] [Rank 0] step:7041/10000 train_time:305117ms step_avg:43.33ms +[2025-09-11 10:02:20] [Rank 0] step:7061/10000 train_time:305818ms step_avg:43.31ms +[2025-09-11 10:02:20] [Rank 0] step:7061/10000 train_time:305818ms step_avg:43.31ms +[2025-09-11 10:02:21] [Rank 0] step:7081/10000 train_time:306520ms step_avg:43.29ms +[2025-09-11 10:02:21] [Rank 0] step:7081/10000 train_time:306520ms step_avg:43.29ms +[2025-09-11 10:02:21] [Rank 0] step:7101/10000 train_time:307220ms step_avg:43.26ms +[2025-09-11 10:02:21] [Rank 0] step:7101/10000 train_time:307220ms step_avg:43.26ms +[2025-09-11 10:02:22] [Rank 0] step:7121/10000 train_time:307923ms step_avg:43.24ms +[2025-09-11 10:02:22] [Rank 0] step:7121/10000 train_time:307923ms step_avg:43.24ms +[2025-09-11 10:02:23] [Rank 0] step:7141/10000 train_time:308624ms step_avg:43.22ms +[2025-09-11 10:02:23] [Rank 0] step:7141/10000 train_time:308624ms step_avg:43.22ms +[2025-09-11 10:02:23] [Rank 0] step:7161/10000 train_time:309326ms step_avg:43.20ms +[2025-09-11 10:02:23] [Rank 0] step:7161/10000 train_time:309326ms step_avg:43.20ms +[2025-09-11 10:02:24] [Rank 0] step:7181/10000 train_time:310026ms step_avg:43.17ms +[2025-09-11 10:02:24] [Rank 0] step:7181/10000 train_time:310026ms step_avg:43.17ms +[2025-09-11 10:02:25] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:02:25] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:02:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:02:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:02:36] [Rank 0] PRINT: step:7200/10000 val_loss:4.6982 total_sharp:8.1587e-05 L1_sharp:1.2068e-01 L2_sharp:1.8384e-01 L3_sharp:2.5674e-01 L4_sharp:3.2042e-01 L5_sharp:4.3656e-01 L6_sharp:5.5608e-01 L7_sharp:5.0636e-01 L8_sharp:4.5503e-01 L9_sharp:4.9067e-01 L10_sharp:6.3207e-01 L11_sharp:7.4289e-01 L12_sharp:2.0714e+00 total_fnorm:5.3000e+01 total_l1_linf:8.9600e+04 total_spectral:2.6500e+01 L1_fnorm:2.8809e-02 L2_fnorm:2.9419e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9419e-02 L5_fnorm:2.9175e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.8931e-02 L8_fnorm:2.8320e-02 L9_fnorm:2.8564e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:5.9814e-03 L2_l1linf:6.5002e-03 L3_l1linf:6.7749e-03 L4_l1linf:7.1411e-03 L5_l1linf:7.0801e-03 L6_l1linf:7.1411e-03 L7_l1linf:7.2327e-03 L8_l1linf:7.2632e-03 L9_l1linf:7.2021e-03 L10_l1linf:7.2021e-03 L11_l1linf:7.2937e-03 L12_l1linf:7.4158e-03 L1_spectral:5.3269e-04 L2_spectral:5.3807e-04 L3_spectral:5.3906e-04 L4_spectral:5.3872e-04 L5_spectral:5.4164e-04 L6_spectral:5.4065e-04 L7_spectral:5.3855e-04 L8_spectral:5.2227e-04 L9_spectral:5.2873e-04 L10_spectral:5.3445e-04 L11_spectral:5.2610e-04 L12_spectral:5.2485e-04 train_time:310707ms step_avg:43.15ms +[2025-09-11 10:02:36] [Rank 0] PRINT: step:7200/10000 val_loss:4.6982 total_sharp:8.1587e-05 L1_sharp:1.2068e-01 L2_sharp:1.8384e-01 L3_sharp:2.5674e-01 L4_sharp:3.2042e-01 L5_sharp:4.3656e-01 L6_sharp:5.5608e-01 L7_sharp:5.0636e-01 L8_sharp:4.5503e-01 L9_sharp:4.9067e-01 L10_sharp:6.3207e-01 L11_sharp:7.4289e-01 L12_sharp:2.0714e+00 total_fnorm:5.3000e+01 total_l1_linf:8.9600e+04 total_spectral:2.6500e+01 L1_fnorm:2.8809e-02 L2_fnorm:2.9419e-02 L3_fnorm:2.9419e-02 L4_fnorm:2.9419e-02 L5_fnorm:2.9175e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.8931e-02 L8_fnorm:2.8320e-02 L9_fnorm:2.8564e-02 L10_fnorm:2.8687e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:5.9814e-03 L2_l1linf:6.5002e-03 L3_l1linf:6.7749e-03 L4_l1linf:7.1411e-03 L5_l1linf:7.0801e-03 L6_l1linf:7.1411e-03 L7_l1linf:7.2327e-03 L8_l1linf:7.2632e-03 L9_l1linf:7.2021e-03 L10_l1linf:7.2021e-03 L11_l1linf:7.2937e-03 L12_l1linf:7.4158e-03 L1_spectral:5.3269e-04 L2_spectral:5.3807e-04 L3_spectral:5.3906e-04 L4_spectral:5.3872e-04 L5_spectral:5.4164e-04 L6_spectral:5.4065e-04 L7_spectral:5.3855e-04 L8_spectral:5.2227e-04 L9_spectral:5.2873e-04 L10_spectral:5.3445e-04 L11_spectral:5.2610e-04 L12_spectral:5.2485e-04 train_time:310707ms step_avg:43.15ms +[2025-09-11 10:02:38] [Rank 0] step:7201/10000 train_time:312715ms step_avg:43.43ms +[2025-09-11 10:02:38] [Rank 0] step:7201/10000 train_time:312715ms step_avg:43.43ms +[2025-09-11 10:02:39] [Rank 0] step:7221/10000 train_time:313453ms step_avg:43.41ms +[2025-09-11 10:02:39] [Rank 0] step:7221/10000 train_time:313453ms step_avg:43.41ms +[2025-09-11 10:02:39] [Rank 0] step:7241/10000 train_time:314156ms step_avg:43.39ms +[2025-09-11 10:02:39] [Rank 0] step:7241/10000 train_time:314156ms step_avg:43.39ms +[2025-09-11 10:02:40] [Rank 0] step:7261/10000 train_time:314861ms step_avg:43.36ms +[2025-09-11 10:02:40] [Rank 0] step:7261/10000 train_time:314861ms step_avg:43.36ms +[2025-09-11 10:02:41] [Rank 0] step:7281/10000 train_time:315568ms step_avg:43.34ms +[2025-09-11 10:02:41] [Rank 0] step:7281/10000 train_time:315568ms step_avg:43.34ms +[2025-09-11 10:02:42] [Rank 0] step:7301/10000 train_time:316272ms step_avg:43.32ms +[2025-09-11 10:02:42] [Rank 0] step:7301/10000 train_time:316272ms step_avg:43.32ms +[2025-09-11 10:02:42] [Rank 0] step:7321/10000 train_time:316974ms step_avg:43.30ms +[2025-09-11 10:02:42] [Rank 0] step:7321/10000 train_time:316974ms step_avg:43.30ms +[2025-09-11 10:02:43] [Rank 0] step:7341/10000 train_time:317677ms step_avg:43.27ms +[2025-09-11 10:02:43] [Rank 0] step:7341/10000 train_time:317677ms step_avg:43.27ms +[2025-09-11 10:02:44] [Rank 0] step:7361/10000 train_time:318379ms step_avg:43.25ms +[2025-09-11 10:02:44] [Rank 0] step:7361/10000 train_time:318379ms step_avg:43.25ms +[2025-09-11 10:02:44] [Rank 0] step:7381/10000 train_time:319082ms step_avg:43.23ms +[2025-09-11 10:02:44] [Rank 0] step:7381/10000 train_time:319082ms step_avg:43.23ms +[2025-09-11 10:02:45] [Rank 0] step:7401/10000 train_time:319784ms step_avg:43.21ms +[2025-09-11 10:02:45] [Rank 0] step:7401/10000 train_time:319784ms step_avg:43.21ms +[2025-09-11 10:02:46] [Rank 0] step:7421/10000 train_time:320485ms step_avg:43.19ms +[2025-09-11 10:02:46] [Rank 0] step:7421/10000 train_time:320485ms step_avg:43.19ms +[2025-09-11 10:02:46] [Rank 0] step:7441/10000 train_time:321189ms step_avg:43.16ms +[2025-09-11 10:02:46] [Rank 0] step:7441/10000 train_time:321189ms step_avg:43.16ms +[2025-09-11 10:02:47] [Rank 0] step:7461/10000 train_time:321892ms step_avg:43.14ms +[2025-09-11 10:02:47] [Rank 0] step:7461/10000 train_time:321892ms step_avg:43.14ms +[2025-09-11 10:02:48] [Rank 0] step:7481/10000 train_time:322596ms step_avg:43.12ms +[2025-09-11 10:02:48] [Rank 0] step:7481/10000 train_time:322596ms step_avg:43.12ms +[2025-09-11 10:02:49] [Rank 0] step:7501/10000 train_time:323299ms step_avg:43.10ms +[2025-09-11 10:02:49] [Rank 0] step:7501/10000 train_time:323299ms step_avg:43.10ms +[2025-09-11 10:02:49] [Rank 0] step:7521/10000 train_time:324002ms step_avg:43.08ms +[2025-09-11 10:02:49] [Rank 0] step:7521/10000 train_time:324002ms step_avg:43.08ms +[2025-09-11 10:02:50] [Rank 0] step:7541/10000 train_time:324703ms step_avg:43.06ms +[2025-09-11 10:02:50] [Rank 0] step:7541/10000 train_time:324703ms step_avg:43.06ms +[2025-09-11 10:02:51] [Rank 0] step:7561/10000 train_time:325408ms step_avg:43.04ms +[2025-09-11 10:02:51] [Rank 0] step:7561/10000 train_time:325408ms step_avg:43.04ms +[2025-09-11 10:02:51] [Rank 0] step:7581/10000 train_time:326112ms step_avg:43.02ms +[2025-09-11 10:02:51] [Rank 0] step:7581/10000 train_time:326112ms step_avg:43.02ms +[2025-09-11 10:02:52] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:02:52] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:02:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:02:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:02:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:03:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:03] [Rank 0] PRINT: step:7600/10000 val_loss:4.6745 total_sharp:8.3305e-05 L1_sharp:8.6074e-02 L2_sharp:1.6740e-01 L3_sharp:2.3763e-01 L4_sharp:3.1438e-01 L5_sharp:3.7368e-01 L6_sharp:5.2942e-01 L7_sharp:5.5757e-01 L8_sharp:4.4405e-01 L9_sharp:5.3113e-01 L10_sharp:6.3040e-01 L11_sharp:7.7883e-01 L12_sharp:1.3040e+00 total_fnorm:4.2000e+01 total_l1_linf:6.6048e+04 total_spectral:2.1000e+01 L1_fnorm:2.3315e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.3926e-02 L5_fnorm:2.3926e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3682e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3315e-02 L11_fnorm:2.3315e-02 L12_fnorm:2.2949e-02 L1_l1linf:4.5776e-03 L2_l1linf:5.0049e-03 L3_l1linf:5.0964e-03 L4_l1linf:5.5847e-03 L5_l1linf:5.4626e-03 L6_l1linf:5.5847e-03 L7_l1linf:5.6458e-03 L8_l1linf:5.5847e-03 L9_l1linf:5.6763e-03 L10_l1linf:5.6458e-03 L11_l1linf:5.5847e-03 L12_l1linf:5.6458e-03 L1_spectral:4.4886e-04 L2_spectral:4.5580e-04 L3_spectral:4.4803e-04 L4_spectral:4.5226e-04 L5_spectral:4.5362e-04 L6_spectral:4.4863e-04 L7_spectral:4.4623e-04 L8_spectral:4.3554e-04 L9_spectral:4.4136e-04 L10_spectral:4.3544e-04 L11_spectral:4.3984e-04 L12_spectral:4.2442e-04 train_time:326797ms step_avg:43.00ms +[2025-09-11 10:03:03] [Rank 0] PRINT: step:7600/10000 val_loss:4.6745 total_sharp:8.3305e-05 L1_sharp:8.6074e-02 L2_sharp:1.6740e-01 L3_sharp:2.3763e-01 L4_sharp:3.1438e-01 L5_sharp:3.7368e-01 L6_sharp:5.2942e-01 L7_sharp:5.5757e-01 L8_sharp:4.4405e-01 L9_sharp:5.3113e-01 L10_sharp:6.3040e-01 L11_sharp:7.7883e-01 L12_sharp:1.3040e+00 total_fnorm:4.2000e+01 total_l1_linf:6.6048e+04 total_spectral:2.1000e+01 L1_fnorm:2.3315e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.3804e-02 L4_fnorm:2.3926e-02 L5_fnorm:2.3926e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3682e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3315e-02 L10_fnorm:2.3315e-02 L11_fnorm:2.3315e-02 L12_fnorm:2.2949e-02 L1_l1linf:4.5776e-03 L2_l1linf:5.0049e-03 L3_l1linf:5.0964e-03 L4_l1linf:5.5847e-03 L5_l1linf:5.4626e-03 L6_l1linf:5.5847e-03 L7_l1linf:5.6458e-03 L8_l1linf:5.5847e-03 L9_l1linf:5.6763e-03 L10_l1linf:5.6458e-03 L11_l1linf:5.5847e-03 L12_l1linf:5.6458e-03 L1_spectral:4.4886e-04 L2_spectral:4.5580e-04 L3_spectral:4.4803e-04 L4_spectral:4.5226e-04 L5_spectral:4.5362e-04 L6_spectral:4.4863e-04 L7_spectral:4.4623e-04 L8_spectral:4.3554e-04 L9_spectral:4.4136e-04 L10_spectral:4.3544e-04 L11_spectral:4.3984e-04 L12_spectral:4.2442e-04 train_time:326797ms step_avg:43.00ms +[2025-09-11 10:03:05] [Rank 0] step:7601/10000 train_time:328767ms step_avg:43.25ms +[2025-09-11 10:03:05] [Rank 0] step:7601/10000 train_time:328767ms step_avg:43.25ms +[2025-09-11 10:03:06] [Rank 0] step:7621/10000 train_time:329499ms step_avg:43.24ms +[2025-09-11 10:03:06] [Rank 0] step:7621/10000 train_time:329499ms step_avg:43.24ms +[2025-09-11 10:03:07] [Rank 0] step:7641/10000 train_time:330204ms step_avg:43.21ms +[2025-09-11 10:03:07] [Rank 0] step:7641/10000 train_time:330204ms step_avg:43.21ms +[2025-09-11 10:03:08] [Rank 0] step:7661/10000 train_time:330906ms step_avg:43.19ms +[2025-09-11 10:03:08] [Rank 0] step:7661/10000 train_time:330906ms step_avg:43.19ms +[2025-09-11 10:03:08] [Rank 0] step:7681/10000 train_time:331609ms step_avg:43.17ms +[2025-09-11 10:03:08] [Rank 0] step:7681/10000 train_time:331609ms step_avg:43.17ms +[2025-09-11 10:03:09] [Rank 0] step:7701/10000 train_time:332319ms step_avg:43.15ms +[2025-09-11 10:03:09] [Rank 0] step:7701/10000 train_time:332319ms step_avg:43.15ms +[2025-09-11 10:03:10] [Rank 0] step:7721/10000 train_time:333022ms step_avg:43.13ms +[2025-09-11 10:03:10] [Rank 0] step:7721/10000 train_time:333022ms step_avg:43.13ms +[2025-09-11 10:03:10] [Rank 0] step:7741/10000 train_time:333725ms step_avg:43.11ms +[2025-09-11 10:03:10] [Rank 0] step:7741/10000 train_time:333725ms step_avg:43.11ms +[2025-09-11 10:03:11] [Rank 0] step:7761/10000 train_time:334426ms step_avg:43.09ms +[2025-09-11 10:03:11] [Rank 0] step:7761/10000 train_time:334426ms step_avg:43.09ms +[2025-09-11 10:03:12] [Rank 0] step:7781/10000 train_time:335130ms step_avg:43.07ms +[2025-09-11 10:03:12] [Rank 0] step:7781/10000 train_time:335130ms step_avg:43.07ms +[2025-09-11 10:03:12] [Rank 0] step:7801/10000 train_time:335832ms step_avg:43.05ms +[2025-09-11 10:03:12] [Rank 0] step:7801/10000 train_time:335832ms step_avg:43.05ms +[2025-09-11 10:03:13] [Rank 0] step:7821/10000 train_time:336534ms step_avg:43.03ms +[2025-09-11 10:03:13] [Rank 0] step:7821/10000 train_time:336534ms step_avg:43.03ms +[2025-09-11 10:03:14] [Rank 0] step:7841/10000 train_time:337238ms step_avg:43.01ms +[2025-09-11 10:03:14] [Rank 0] step:7841/10000 train_time:337238ms step_avg:43.01ms +[2025-09-11 10:03:15] [Rank 0] step:7861/10000 train_time:337943ms step_avg:42.99ms +[2025-09-11 10:03:15] [Rank 0] step:7861/10000 train_time:337943ms step_avg:42.99ms +[2025-09-11 10:03:15] [Rank 0] step:7881/10000 train_time:338645ms step_avg:42.97ms +[2025-09-11 10:03:15] [Rank 0] step:7881/10000 train_time:338645ms step_avg:42.97ms +[2025-09-11 10:03:16] [Rank 0] step:7901/10000 train_time:339348ms step_avg:42.95ms +[2025-09-11 10:03:16] [Rank 0] step:7901/10000 train_time:339348ms step_avg:42.95ms +[2025-09-11 10:03:17] [Rank 0] step:7921/10000 train_time:340051ms step_avg:42.93ms +[2025-09-11 10:03:17] [Rank 0] step:7921/10000 train_time:340051ms step_avg:42.93ms +[2025-09-11 10:03:17] [Rank 0] step:7941/10000 train_time:340755ms step_avg:42.91ms +[2025-09-11 10:03:17] [Rank 0] step:7941/10000 train_time:340755ms step_avg:42.91ms +[2025-09-11 10:03:18] [Rank 0] step:7961/10000 train_time:341455ms step_avg:42.89ms +[2025-09-11 10:03:18] [Rank 0] step:7961/10000 train_time:341455ms step_avg:42.89ms +[2025-09-11 10:03:19] [Rank 0] step:7981/10000 train_time:342161ms step_avg:42.87ms +[2025-09-11 10:03:19] [Rank 0] step:7981/10000 train_time:342161ms step_avg:42.87ms +[2025-09-11 10:03:19] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:03:19] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:03:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:03:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:03:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:03:31] [Rank 0] PRINT: step:8000/10000 val_loss:4.6580 total_sharp:6.7447e-05 L1_sharp:9.6979e-02 L2_sharp:1.5951e-01 L3_sharp:2.0666e-01 L4_sharp:3.0174e-01 L5_sharp:3.7915e-01 L6_sharp:5.4219e-01 L7_sharp:5.8470e-01 L8_sharp:5.1269e-01 L9_sharp:5.3538e-01 L10_sharp:6.5745e-01 L11_sharp:1.3911e+00 L12_sharp:3.1498e+00 total_fnorm:3.6750e+01 total_l1_linf:5.5296e+04 total_spectral:1.8375e+01 L1_fnorm:1.8555e-02 L2_fnorm:1.8921e-02 L3_fnorm:1.9043e-02 L4_fnorm:1.9043e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.8799e-02 L8_fnorm:1.8188e-02 L9_fnorm:1.8555e-02 L10_fnorm:1.8433e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8188e-02 L1_l1linf:3.3264e-03 L2_l1linf:3.5400e-03 L3_l1linf:3.9062e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.2114e-03 L6_l1linf:4.3030e-03 L7_l1linf:4.2419e-03 L8_l1linf:4.2419e-03 L9_l1linf:4.3335e-03 L10_l1linf:4.4556e-03 L11_l1linf:4.2725e-03 L12_l1linf:4.2725e-03 L1_spectral:3.6162e-04 L2_spectral:3.6525e-04 L3_spectral:3.6294e-04 L4_spectral:3.6339e-04 L5_spectral:3.6661e-04 L6_spectral:3.6644e-04 L7_spectral:3.6462e-04 L8_spectral:3.4799e-04 L9_spectral:3.6226e-04 L10_spectral:3.5590e-04 L11_spectral:3.5571e-04 L12_spectral:3.4447e-04 train_time:342850ms step_avg:42.86ms +[2025-09-11 10:03:31] [Rank 0] PRINT: step:8000/10000 val_loss:4.6580 total_sharp:6.7447e-05 L1_sharp:9.6979e-02 L2_sharp:1.5951e-01 L3_sharp:2.0666e-01 L4_sharp:3.0174e-01 L5_sharp:3.7915e-01 L6_sharp:5.4219e-01 L7_sharp:5.8470e-01 L8_sharp:5.1269e-01 L9_sharp:5.3538e-01 L10_sharp:6.5745e-01 L11_sharp:1.3911e+00 L12_sharp:3.1498e+00 total_fnorm:3.6750e+01 total_l1_linf:5.5296e+04 total_spectral:1.8375e+01 L1_fnorm:1.8555e-02 L2_fnorm:1.8921e-02 L3_fnorm:1.9043e-02 L4_fnorm:1.9043e-02 L5_fnorm:1.9043e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.8799e-02 L8_fnorm:1.8188e-02 L9_fnorm:1.8555e-02 L10_fnorm:1.8433e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8188e-02 L1_l1linf:3.3264e-03 L2_l1linf:3.5400e-03 L3_l1linf:3.9062e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.2114e-03 L6_l1linf:4.3030e-03 L7_l1linf:4.2419e-03 L8_l1linf:4.2419e-03 L9_l1linf:4.3335e-03 L10_l1linf:4.4556e-03 L11_l1linf:4.2725e-03 L12_l1linf:4.2725e-03 L1_spectral:3.6162e-04 L2_spectral:3.6525e-04 L3_spectral:3.6294e-04 L4_spectral:3.6339e-04 L5_spectral:3.6661e-04 L6_spectral:3.6644e-04 L7_spectral:3.6462e-04 L8_spectral:3.4799e-04 L9_spectral:3.6226e-04 L10_spectral:3.5590e-04 L11_spectral:3.5571e-04 L12_spectral:3.4447e-04 train_time:342850ms step_avg:42.86ms +[2025-09-11 10:03:33] [Rank 0] step:8001/10000 train_time:344982ms step_avg:43.12ms +[2025-09-11 10:03:33] [Rank 0] step:8001/10000 train_time:344982ms step_avg:43.12ms +[2025-09-11 10:03:34] [Rank 0] step:8021/10000 train_time:345707ms step_avg:43.10ms +[2025-09-11 10:03:34] [Rank 0] step:8021/10000 train_time:345707ms step_avg:43.10ms +[2025-09-11 10:03:35] [Rank 0] step:8041/10000 train_time:346689ms step_avg:43.12ms +[2025-09-11 10:03:35] [Rank 0] step:8041/10000 train_time:346689ms step_avg:43.12ms +[2025-09-11 10:03:36] [Rank 0] step:8061/10000 train_time:347395ms step_avg:43.10ms +[2025-09-11 10:03:36] [Rank 0] step:8061/10000 train_time:347395ms step_avg:43.10ms +[2025-09-11 10:03:36] [Rank 0] step:8081/10000 train_time:348097ms step_avg:43.08ms +[2025-09-11 10:03:36] [Rank 0] step:8081/10000 train_time:348097ms step_avg:43.08ms +[2025-09-11 10:03:37] [Rank 0] step:8101/10000 train_time:348800ms step_avg:43.06ms +[2025-09-11 10:03:37] [Rank 0] step:8101/10000 train_time:348800ms step_avg:43.06ms +[2025-09-11 10:03:38] [Rank 0] step:8121/10000 train_time:349506ms step_avg:43.04ms +[2025-09-11 10:03:38] [Rank 0] step:8121/10000 train_time:349506ms step_avg:43.04ms +[2025-09-11 10:03:39] [Rank 0] step:8141/10000 train_time:350944ms step_avg:43.11ms +[2025-09-11 10:03:39] [Rank 0] step:8141/10000 train_time:350944ms step_avg:43.11ms +[2025-09-11 10:03:40] [Rank 0] step:8161/10000 train_time:351651ms step_avg:43.09ms +[2025-09-11 10:03:40] [Rank 0] step:8161/10000 train_time:351651ms step_avg:43.09ms +[2025-09-11 10:03:41] [Rank 0] step:8181/10000 train_time:352365ms step_avg:43.07ms +[2025-09-11 10:03:41] [Rank 0] step:8181/10000 train_time:352365ms step_avg:43.07ms +[2025-09-11 10:03:41] [Rank 0] step:8201/10000 train_time:353077ms step_avg:43.05ms +[2025-09-11 10:03:41] [Rank 0] step:8201/10000 train_time:353077ms step_avg:43.05ms +[2025-09-11 10:03:42] [Rank 0] step:8221/10000 train_time:353786ms step_avg:43.03ms +[2025-09-11 10:03:42] [Rank 0] step:8221/10000 train_time:353786ms step_avg:43.03ms +[2025-09-11 10:03:43] [Rank 0] step:8241/10000 train_time:354504ms step_avg:43.02ms +[2025-09-11 10:03:43] [Rank 0] step:8241/10000 train_time:354504ms step_avg:43.02ms +[2025-09-11 10:03:43] [Rank 0] step:8261/10000 train_time:355212ms step_avg:43.00ms +[2025-09-11 10:03:43] [Rank 0] step:8261/10000 train_time:355212ms step_avg:43.00ms +[2025-09-11 10:03:44] [Rank 0] step:8281/10000 train_time:355918ms step_avg:42.98ms +[2025-09-11 10:03:44] [Rank 0] step:8281/10000 train_time:355918ms step_avg:42.98ms +[2025-09-11 10:03:45] [Rank 0] step:8301/10000 train_time:356628ms step_avg:42.96ms +[2025-09-11 10:03:45] [Rank 0] step:8301/10000 train_time:356628ms step_avg:42.96ms +[2025-09-11 10:03:46] [Rank 0] step:8321/10000 train_time:357336ms step_avg:42.94ms +[2025-09-11 10:03:46] [Rank 0] step:8321/10000 train_time:357336ms step_avg:42.94ms +[2025-09-11 10:03:46] [Rank 0] step:8341/10000 train_time:358050ms step_avg:42.93ms +[2025-09-11 10:03:46] [Rank 0] step:8341/10000 train_time:358050ms step_avg:42.93ms +[2025-09-11 10:03:47] [Rank 0] step:8361/10000 train_time:358756ms step_avg:42.91ms +[2025-09-11 10:03:47] [Rank 0] step:8361/10000 train_time:358756ms step_avg:42.91ms +[2025-09-11 10:03:48] [Rank 0] step:8381/10000 train_time:359468ms step_avg:42.89ms +[2025-09-11 10:03:48] [Rank 0] step:8381/10000 train_time:359468ms step_avg:42.89ms +[2025-09-11 10:03:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:03:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:03:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:03:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:00] [Rank 0] PRINT: step:8400/10000 val_loss:4.6384 total_sharp:6.3632e-05 L1_sharp:8.2483e-02 L2_sharp:1.2093e-01 L3_sharp:1.9468e-01 L4_sharp:2.6174e-01 L5_sharp:3.7810e-01 L6_sharp:5.0353e-01 L7_sharp:5.2395e-01 L8_sharp:4.3514e-01 L9_sharp:4.8019e-01 L10_sharp:5.9578e-01 L11_sharp:6.6140e-01 L12_sharp:1.3321e+00 total_fnorm:2.8750e+01 total_l1_linf:3.9424e+04 total_spectral:1.4312e+01 L1_fnorm:1.4038e-02 L2_fnorm:1.4343e-02 L3_fnorm:1.4404e-02 L4_fnorm:1.4526e-02 L5_fnorm:1.4526e-02 L6_fnorm:1.4465e-02 L7_fnorm:1.4404e-02 L8_fnorm:1.3977e-02 L9_fnorm:1.4160e-02 L10_fnorm:1.4099e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3794e-02 L1_l1linf:2.2430e-03 L2_l1linf:2.4261e-03 L3_l1linf:2.5482e-03 L4_l1linf:2.7924e-03 L5_l1linf:2.9144e-03 L6_l1linf:2.9144e-03 L7_l1linf:2.9602e-03 L8_l1linf:3.0365e-03 L9_l1linf:2.9449e-03 L10_l1linf:2.9602e-03 L11_l1linf:2.9602e-03 L12_l1linf:3.0212e-03 L1_spectral:2.8235e-04 L2_spectral:2.8741e-04 L3_spectral:2.8562e-04 L4_spectral:2.8833e-04 L5_spectral:2.8897e-04 L6_spectral:2.8403e-04 L7_spectral:2.8629e-04 L8_spectral:2.6934e-04 L9_spectral:2.7866e-04 L10_spectral:2.7836e-04 L11_spectral:2.7881e-04 L12_spectral:2.6427e-04 train_time:360160ms step_avg:42.88ms +[2025-09-11 10:04:00] [Rank 0] PRINT: step:8400/10000 val_loss:4.6384 total_sharp:6.3632e-05 L1_sharp:8.2483e-02 L2_sharp:1.2093e-01 L3_sharp:1.9468e-01 L4_sharp:2.6174e-01 L5_sharp:3.7810e-01 L6_sharp:5.0353e-01 L7_sharp:5.2395e-01 L8_sharp:4.3514e-01 L9_sharp:4.8019e-01 L10_sharp:5.9578e-01 L11_sharp:6.6140e-01 L12_sharp:1.3321e+00 total_fnorm:2.8750e+01 total_l1_linf:3.9424e+04 total_spectral:1.4312e+01 L1_fnorm:1.4038e-02 L2_fnorm:1.4343e-02 L3_fnorm:1.4404e-02 L4_fnorm:1.4526e-02 L5_fnorm:1.4526e-02 L6_fnorm:1.4465e-02 L7_fnorm:1.4404e-02 L8_fnorm:1.3977e-02 L9_fnorm:1.4160e-02 L10_fnorm:1.4099e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3794e-02 L1_l1linf:2.2430e-03 L2_l1linf:2.4261e-03 L3_l1linf:2.5482e-03 L4_l1linf:2.7924e-03 L5_l1linf:2.9144e-03 L6_l1linf:2.9144e-03 L7_l1linf:2.9602e-03 L8_l1linf:3.0365e-03 L9_l1linf:2.9449e-03 L10_l1linf:2.9602e-03 L11_l1linf:2.9602e-03 L12_l1linf:3.0212e-03 L1_spectral:2.8235e-04 L2_spectral:2.8741e-04 L3_spectral:2.8562e-04 L4_spectral:2.8833e-04 L5_spectral:2.8897e-04 L6_spectral:2.8403e-04 L7_spectral:2.8629e-04 L8_spectral:2.6934e-04 L9_spectral:2.7866e-04 L10_spectral:2.7836e-04 L11_spectral:2.7881e-04 L12_spectral:2.6427e-04 train_time:360160ms step_avg:42.88ms +[2025-09-11 10:04:02] [Rank 0] step:8401/10000 train_time:362312ms step_avg:43.13ms +[2025-09-11 10:04:02] [Rank 0] step:8401/10000 train_time:362312ms step_avg:43.13ms +[2025-09-11 10:04:03] [Rank 0] step:8421/10000 train_time:363056ms step_avg:43.11ms +[2025-09-11 10:04:03] [Rank 0] step:8421/10000 train_time:363056ms step_avg:43.11ms +[2025-09-11 10:04:04] [Rank 0] step:8441/10000 train_time:363769ms step_avg:43.10ms +[2025-09-11 10:04:04] [Rank 0] step:8441/10000 train_time:363769ms step_avg:43.10ms +[2025-09-11 10:04:04] [Rank 0] step:8461/10000 train_time:364481ms step_avg:43.08ms +[2025-09-11 10:04:04] [Rank 0] step:8461/10000 train_time:364481ms step_avg:43.08ms +[2025-09-11 10:04:05] [Rank 0] step:8481/10000 train_time:365193ms step_avg:43.06ms +[2025-09-11 10:04:05] [Rank 0] step:8481/10000 train_time:365193ms step_avg:43.06ms +[2025-09-11 10:04:06] [Rank 0] step:8501/10000 train_time:365903ms step_avg:43.04ms +[2025-09-11 10:04:06] [Rank 0] step:8501/10000 train_time:365903ms step_avg:43.04ms +[2025-09-11 10:04:06] [Rank 0] step:8521/10000 train_time:366614ms step_avg:43.02ms +[2025-09-11 10:04:06] [Rank 0] step:8521/10000 train_time:366614ms step_avg:43.02ms +[2025-09-11 10:04:07] [Rank 0] step:8541/10000 train_time:367325ms step_avg:43.01ms +[2025-09-11 10:04:07] [Rank 0] step:8541/10000 train_time:367325ms step_avg:43.01ms +[2025-09-11 10:04:08] [Rank 0] step:8561/10000 train_time:368040ms step_avg:42.99ms +[2025-09-11 10:04:08] [Rank 0] step:8561/10000 train_time:368040ms step_avg:42.99ms +[2025-09-11 10:04:09] [Rank 0] step:8581/10000 train_time:368754ms step_avg:42.97ms +[2025-09-11 10:04:09] [Rank 0] step:8581/10000 train_time:368754ms step_avg:42.97ms +[2025-09-11 10:04:09] [Rank 0] step:8601/10000 train_time:369466ms step_avg:42.96ms +[2025-09-11 10:04:09] [Rank 0] step:8601/10000 train_time:369466ms step_avg:42.96ms +[2025-09-11 10:04:10] [Rank 0] step:8621/10000 train_time:370175ms step_avg:42.94ms +[2025-09-11 10:04:10] [Rank 0] step:8621/10000 train_time:370175ms step_avg:42.94ms +[2025-09-11 10:04:11] [Rank 0] step:8641/10000 train_time:370885ms step_avg:42.92ms +[2025-09-11 10:04:11] [Rank 0] step:8641/10000 train_time:370885ms step_avg:42.92ms +[2025-09-11 10:04:11] [Rank 0] step:8661/10000 train_time:371595ms step_avg:42.90ms +[2025-09-11 10:04:11] [Rank 0] step:8661/10000 train_time:371595ms step_avg:42.90ms +[2025-09-11 10:04:12] [Rank 0] step:8681/10000 train_time:372307ms step_avg:42.89ms +[2025-09-11 10:04:12] [Rank 0] step:8681/10000 train_time:372307ms step_avg:42.89ms +[2025-09-11 10:04:13] [Rank 0] step:8701/10000 train_time:373020ms step_avg:42.87ms +[2025-09-11 10:04:13] [Rank 0] step:8701/10000 train_time:373020ms step_avg:42.87ms +[2025-09-11 10:04:14] [Rank 0] step:8721/10000 train_time:373734ms step_avg:42.85ms +[2025-09-11 10:04:14] [Rank 0] step:8721/10000 train_time:373734ms step_avg:42.85ms +[2025-09-11 10:04:14] [Rank 0] step:8741/10000 train_time:374438ms step_avg:42.84ms +[2025-09-11 10:04:14] [Rank 0] step:8741/10000 train_time:374438ms step_avg:42.84ms +[2025-09-11 10:04:15] [Rank 0] step:8761/10000 train_time:375152ms step_avg:42.82ms +[2025-09-11 10:04:15] [Rank 0] step:8761/10000 train_time:375152ms step_avg:42.82ms +[2025-09-11 10:04:16] [Rank 0] step:8781/10000 train_time:375860ms step_avg:42.80ms +[2025-09-11 10:04:16] [Rank 0] step:8781/10000 train_time:375860ms step_avg:42.80ms +[2025-09-11 10:04:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:04:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:04:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:04:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:04:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:28] [Rank 0] PRINT: step:8800/10000 val_loss:4.6349 total_sharp:5.7982e-05 L1_sharp:6.4747e-02 L2_sharp:1.1139e-01 L3_sharp:1.4382e-01 L4_sharp:2.1953e-01 L5_sharp:2.9177e-01 L6_sharp:3.9013e-01 L7_sharp:3.7464e-01 L8_sharp:3.6493e-01 L9_sharp:4.0808e-01 L10_sharp:5.1060e-01 L11_sharp:7.0885e-01 L12_sharp:7.7167e-01 total_fnorm:2.1250e+01 total_l1_linf:2.5984e+04 total_spectral:1.0625e+01 L1_fnorm:9.8877e-03 L2_fnorm:1.0132e-02 L3_fnorm:1.0193e-02 L4_fnorm:1.0254e-02 L5_fnorm:1.0315e-02 L6_fnorm:1.0193e-02 L7_fnorm:1.0193e-02 L8_fnorm:9.8267e-03 L9_fnorm:9.9487e-03 L10_fnorm:9.9487e-03 L11_fnorm:9.8877e-03 L12_fnorm:9.6436e-03 L1_l1linf:1.3275e-03 L2_l1linf:1.5488e-03 L3_l1linf:1.6861e-03 L4_l1linf:1.7242e-03 L5_l1linf:1.7700e-03 L6_l1linf:1.8463e-03 L7_l1linf:1.9455e-03 L8_l1linf:1.8845e-03 L9_l1linf:1.9684e-03 L10_l1linf:1.9073e-03 L11_l1linf:1.8768e-03 L12_l1linf:1.7624e-03 L1_spectral:2.0782e-04 L2_spectral:2.0862e-04 L3_spectral:2.0686e-04 L4_spectral:2.0745e-04 L5_spectral:2.0908e-04 L6_spectral:2.0657e-04 L7_spectral:2.0430e-04 L8_spectral:1.9416e-04 L9_spectral:1.9883e-04 L10_spectral:2.0196e-04 L11_spectral:2.0143e-04 L12_spectral:1.9310e-04 train_time:376549ms step_avg:42.79ms +[2025-09-11 10:04:28] [Rank 0] PRINT: step:8800/10000 val_loss:4.6349 total_sharp:5.7982e-05 L1_sharp:6.4747e-02 L2_sharp:1.1139e-01 L3_sharp:1.4382e-01 L4_sharp:2.1953e-01 L5_sharp:2.9177e-01 L6_sharp:3.9013e-01 L7_sharp:3.7464e-01 L8_sharp:3.6493e-01 L9_sharp:4.0808e-01 L10_sharp:5.1060e-01 L11_sharp:7.0885e-01 L12_sharp:7.7167e-01 total_fnorm:2.1250e+01 total_l1_linf:2.5984e+04 total_spectral:1.0625e+01 L1_fnorm:9.8877e-03 L2_fnorm:1.0132e-02 L3_fnorm:1.0193e-02 L4_fnorm:1.0254e-02 L5_fnorm:1.0315e-02 L6_fnorm:1.0193e-02 L7_fnorm:1.0193e-02 L8_fnorm:9.8267e-03 L9_fnorm:9.9487e-03 L10_fnorm:9.9487e-03 L11_fnorm:9.8877e-03 L12_fnorm:9.6436e-03 L1_l1linf:1.3275e-03 L2_l1linf:1.5488e-03 L3_l1linf:1.6861e-03 L4_l1linf:1.7242e-03 L5_l1linf:1.7700e-03 L6_l1linf:1.8463e-03 L7_l1linf:1.9455e-03 L8_l1linf:1.8845e-03 L9_l1linf:1.9684e-03 L10_l1linf:1.9073e-03 L11_l1linf:1.8768e-03 L12_l1linf:1.7624e-03 L1_spectral:2.0782e-04 L2_spectral:2.0862e-04 L3_spectral:2.0686e-04 L4_spectral:2.0745e-04 L5_spectral:2.0908e-04 L6_spectral:2.0657e-04 L7_spectral:2.0430e-04 L8_spectral:1.9416e-04 L9_spectral:1.9883e-04 L10_spectral:2.0196e-04 L11_spectral:2.0143e-04 L12_spectral:1.9310e-04 train_time:376549ms step_avg:42.79ms +[2025-09-11 10:04:30] [Rank 0] step:8801/10000 train_time:378710ms step_avg:43.03ms +[2025-09-11 10:04:30] [Rank 0] step:8801/10000 train_time:378710ms step_avg:43.03ms +[2025-09-11 10:04:31] [Rank 0] step:8821/10000 train_time:379452ms step_avg:43.02ms +[2025-09-11 10:04:31] [Rank 0] step:8821/10000 train_time:379452ms step_avg:43.02ms +[2025-09-11 10:04:31] [Rank 0] step:8841/10000 train_time:380163ms step_avg:43.00ms +[2025-09-11 10:04:31] [Rank 0] step:8841/10000 train_time:380163ms step_avg:43.00ms +[2025-09-11 10:04:32] [Rank 0] step:8861/10000 train_time:380873ms step_avg:42.98ms +[2025-09-11 10:04:32] [Rank 0] step:8861/10000 train_time:380873ms step_avg:42.98ms +[2025-09-11 10:04:33] [Rank 0] step:8881/10000 train_time:381584ms step_avg:42.97ms +[2025-09-11 10:04:33] [Rank 0] step:8881/10000 train_time:381584ms step_avg:42.97ms +[2025-09-11 10:04:33] [Rank 0] step:8901/10000 train_time:382296ms step_avg:42.95ms +[2025-09-11 10:04:33] [Rank 0] step:8901/10000 train_time:382296ms step_avg:42.95ms +[2025-09-11 10:04:34] [Rank 0] step:8921/10000 train_time:383004ms step_avg:42.93ms +[2025-09-11 10:04:34] [Rank 0] step:8921/10000 train_time:383004ms step_avg:42.93ms +[2025-09-11 10:04:35] [Rank 0] step:8941/10000 train_time:383718ms step_avg:42.92ms +[2025-09-11 10:04:35] [Rank 0] step:8941/10000 train_time:383718ms step_avg:42.92ms +[2025-09-11 10:04:36] [Rank 0] step:8961/10000 train_time:384712ms step_avg:42.93ms +[2025-09-11 10:04:36] [Rank 0] step:8961/10000 train_time:384712ms step_avg:42.93ms +[2025-09-11 10:04:37] [Rank 0] step:8981/10000 train_time:385427ms step_avg:42.92ms +[2025-09-11 10:04:37] [Rank 0] step:8981/10000 train_time:385427ms step_avg:42.92ms +[2025-09-11 10:04:37] [Rank 0] step:9001/10000 train_time:386133ms step_avg:42.90ms +[2025-09-11 10:04:37] [Rank 0] step:9001/10000 train_time:386133ms step_avg:42.90ms +[2025-09-11 10:04:38] [Rank 0] step:9021/10000 train_time:387123ms step_avg:42.91ms +[2025-09-11 10:04:38] [Rank 0] step:9021/10000 train_time:387123ms step_avg:42.91ms +[2025-09-11 10:04:39] [Rank 0] step:9041/10000 train_time:387836ms step_avg:42.90ms +[2025-09-11 10:04:39] [Rank 0] step:9041/10000 train_time:387836ms step_avg:42.90ms +[2025-09-11 10:04:40] [Rank 0] step:9061/10000 train_time:388545ms step_avg:42.88ms +[2025-09-11 10:04:40] [Rank 0] step:9061/10000 train_time:388545ms step_avg:42.88ms +[2025-09-11 10:04:40] [Rank 0] step:9081/10000 train_time:389259ms step_avg:42.87ms +[2025-09-11 10:04:40] [Rank 0] step:9081/10000 train_time:389259ms step_avg:42.87ms +[2025-09-11 10:04:41] [Rank 0] step:9101/10000 train_time:389974ms step_avg:42.85ms +[2025-09-11 10:04:41] [Rank 0] step:9101/10000 train_time:389974ms step_avg:42.85ms +[2025-09-11 10:04:42] [Rank 0] step:9121/10000 train_time:390688ms step_avg:42.83ms +[2025-09-11 10:04:42] [Rank 0] step:9121/10000 train_time:390688ms step_avg:42.83ms +[2025-09-11 10:04:43] [Rank 0] step:9141/10000 train_time:391397ms step_avg:42.82ms +[2025-09-11 10:04:43] [Rank 0] step:9141/10000 train_time:391397ms step_avg:42.82ms +[2025-09-11 10:04:43] [Rank 0] step:9161/10000 train_time:392111ms step_avg:42.80ms +[2025-09-11 10:04:43] [Rank 0] step:9161/10000 train_time:392111ms step_avg:42.80ms +[2025-09-11 10:04:44] [Rank 0] step:9181/10000 train_time:392824ms step_avg:42.79ms +[2025-09-11 10:04:44] [Rank 0] step:9181/10000 train_time:392824ms step_avg:42.79ms +[2025-09-11 10:04:45] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:04:45] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:04:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:04:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:04:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:04:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:04:56] [Rank 0] PRINT: step:9200/10000 val_loss:4.6168 total_sharp:6.0024e-05 L1_sharp:6.2199e-02 L2_sharp:9.3944e-02 L3_sharp:1.4439e-01 L4_sharp:2.4217e-01 L5_sharp:2.8618e-01 L6_sharp:3.9866e-01 L7_sharp:4.7799e-01 L8_sharp:4.3295e-01 L9_sharp:4.8269e-01 L10_sharp:6.3706e-01 L11_sharp:1.1920e+00 L12_sharp:2.3881e+00 total_fnorm:1.4938e+01 total_l1_linf:1.6064e+04 total_spectral:7.4688e+00 L1_fnorm:6.4087e-03 L2_fnorm:6.6528e-03 L3_fnorm:6.7444e-03 L4_fnorm:6.7749e-03 L5_fnorm:6.8054e-03 L6_fnorm:6.7749e-03 L7_fnorm:6.7444e-03 L8_fnorm:6.4697e-03 L9_fnorm:6.5918e-03 L10_fnorm:6.5918e-03 L11_fnorm:6.5613e-03 L12_fnorm:6.4392e-03 L1_l1linf:8.3542e-04 L2_l1linf:9.0408e-04 L3_l1linf:1.0376e-03 L4_l1linf:1.1063e-03 L5_l1linf:1.1673e-03 L6_l1linf:1.1749e-03 L7_l1linf:1.2207e-03 L8_l1linf:1.1749e-03 L9_l1linf:1.1520e-03 L10_l1linf:1.1749e-03 L11_l1linf:1.1597e-03 L12_l1linf:1.1292e-03 L1_spectral:1.3232e-04 L2_spectral:1.3538e-04 L3_spectral:1.3404e-04 L4_spectral:1.3511e-04 L5_spectral:1.3661e-04 L6_spectral:1.3703e-04 L7_spectral:1.3321e-04 L8_spectral:1.2601e-04 L9_spectral:1.3173e-04 L10_spectral:1.3080e-04 L11_spectral:1.3320e-04 L12_spectral:1.2478e-04 train_time:393518ms step_avg:42.77ms +[2025-09-11 10:04:56] [Rank 0] PRINT: step:9200/10000 val_loss:4.6168 total_sharp:6.0024e-05 L1_sharp:6.2199e-02 L2_sharp:9.3944e-02 L3_sharp:1.4439e-01 L4_sharp:2.4217e-01 L5_sharp:2.8618e-01 L6_sharp:3.9866e-01 L7_sharp:4.7799e-01 L8_sharp:4.3295e-01 L9_sharp:4.8269e-01 L10_sharp:6.3706e-01 L11_sharp:1.1920e+00 L12_sharp:2.3881e+00 total_fnorm:1.4938e+01 total_l1_linf:1.6064e+04 total_spectral:7.4688e+00 L1_fnorm:6.4087e-03 L2_fnorm:6.6528e-03 L3_fnorm:6.7444e-03 L4_fnorm:6.7749e-03 L5_fnorm:6.8054e-03 L6_fnorm:6.7749e-03 L7_fnorm:6.7444e-03 L8_fnorm:6.4697e-03 L9_fnorm:6.5918e-03 L10_fnorm:6.5918e-03 L11_fnorm:6.5613e-03 L12_fnorm:6.4392e-03 L1_l1linf:8.3542e-04 L2_l1linf:9.0408e-04 L3_l1linf:1.0376e-03 L4_l1linf:1.1063e-03 L5_l1linf:1.1673e-03 L6_l1linf:1.1749e-03 L7_l1linf:1.2207e-03 L8_l1linf:1.1749e-03 L9_l1linf:1.1520e-03 L10_l1linf:1.1749e-03 L11_l1linf:1.1597e-03 L12_l1linf:1.1292e-03 L1_spectral:1.3232e-04 L2_spectral:1.3538e-04 L3_spectral:1.3404e-04 L4_spectral:1.3511e-04 L5_spectral:1.3661e-04 L6_spectral:1.3703e-04 L7_spectral:1.3321e-04 L8_spectral:1.2601e-04 L9_spectral:1.3173e-04 L10_spectral:1.3080e-04 L11_spectral:1.3320e-04 L12_spectral:1.2478e-04 train_time:393518ms step_avg:42.77ms +[2025-09-11 10:04:58] [Rank 0] step:9201/10000 train_time:395718ms step_avg:43.01ms +[2025-09-11 10:04:58] [Rank 0] step:9201/10000 train_time:395718ms step_avg:43.01ms +[2025-09-11 10:04:59] [Rank 0] step:9221/10000 train_time:396456ms step_avg:42.99ms +[2025-09-11 10:04:59] [Rank 0] step:9221/10000 train_time:396456ms step_avg:42.99ms +[2025-09-11 10:05:00] [Rank 0] step:9241/10000 train_time:397166ms step_avg:42.98ms +[2025-09-11 10:05:00] [Rank 0] step:9241/10000 train_time:397166ms step_avg:42.98ms +[2025-09-11 10:05:01] [Rank 0] step:9261/10000 train_time:397880ms step_avg:42.96ms +[2025-09-11 10:05:01] [Rank 0] step:9261/10000 train_time:397880ms step_avg:42.96ms +[2025-09-11 10:05:01] [Rank 0] step:9281/10000 train_time:398660ms step_avg:42.95ms +[2025-09-11 10:05:01] [Rank 0] step:9281/10000 train_time:398660ms step_avg:42.95ms +[2025-09-11 10:05:02] [Rank 0] step:9301/10000 train_time:399432ms step_avg:42.95ms +[2025-09-11 10:05:02] [Rank 0] step:9301/10000 train_time:399432ms step_avg:42.95ms +[2025-09-11 10:05:03] [Rank 0] step:9321/10000 train_time:400145ms step_avg:42.93ms +[2025-09-11 10:05:03] [Rank 0] step:9321/10000 train_time:400145ms step_avg:42.93ms +[2025-09-11 10:05:04] [Rank 0] step:9341/10000 train_time:400853ms step_avg:42.91ms +[2025-09-11 10:05:04] [Rank 0] step:9341/10000 train_time:400853ms step_avg:42.91ms +[2025-09-11 10:05:04] [Rank 0] step:9361/10000 train_time:401560ms step_avg:42.90ms +[2025-09-11 10:05:04] [Rank 0] step:9361/10000 train_time:401560ms step_avg:42.90ms +[2025-09-11 10:05:05] [Rank 0] step:9381/10000 train_time:402270ms step_avg:42.88ms +[2025-09-11 10:05:05] [Rank 0] step:9381/10000 train_time:402270ms step_avg:42.88ms +[2025-09-11 10:05:06] [Rank 0] step:9401/10000 train_time:402983ms step_avg:42.87ms +[2025-09-11 10:05:06] [Rank 0] step:9401/10000 train_time:402983ms step_avg:42.87ms +[2025-09-11 10:05:06] [Rank 0] step:9421/10000 train_time:403696ms step_avg:42.85ms +[2025-09-11 10:05:06] [Rank 0] step:9421/10000 train_time:403696ms step_avg:42.85ms +[2025-09-11 10:05:07] [Rank 0] step:9441/10000 train_time:404410ms step_avg:42.84ms +[2025-09-11 10:05:07] [Rank 0] step:9441/10000 train_time:404410ms step_avg:42.84ms +[2025-09-11 10:05:08] [Rank 0] step:9461/10000 train_time:405121ms step_avg:42.82ms +[2025-09-11 10:05:08] [Rank 0] step:9461/10000 train_time:405121ms step_avg:42.82ms +[2025-09-11 10:05:09] [Rank 0] step:9481/10000 train_time:405833ms step_avg:42.80ms +[2025-09-11 10:05:09] [Rank 0] step:9481/10000 train_time:405833ms step_avg:42.80ms +[2025-09-11 10:05:09] [Rank 0] step:9501/10000 train_time:406546ms step_avg:42.79ms +[2025-09-11 10:05:09] [Rank 0] step:9501/10000 train_time:406546ms step_avg:42.79ms +[2025-09-11 10:05:10] [Rank 0] step:9521/10000 train_time:407261ms step_avg:42.77ms +[2025-09-11 10:05:10] [Rank 0] step:9521/10000 train_time:407261ms step_avg:42.77ms +[2025-09-11 10:05:11] [Rank 0] step:9541/10000 train_time:407969ms step_avg:42.76ms +[2025-09-11 10:05:11] [Rank 0] step:9541/10000 train_time:407969ms step_avg:42.76ms +[2025-09-11 10:05:11] [Rank 0] step:9561/10000 train_time:408681ms step_avg:42.74ms +[2025-09-11 10:05:11] [Rank 0] step:9561/10000 train_time:408681ms step_avg:42.74ms +[2025-09-11 10:05:12] [Rank 0] step:9581/10000 train_time:409394ms step_avg:42.73ms +[2025-09-11 10:05:12] [Rank 0] step:9581/10000 train_time:409394ms step_avg:42.73ms +[2025-09-11 10:05:13] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:05:13] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:05:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:05:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:05:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:05:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:24] [Rank 0] PRINT: step:9600/10000 val_loss:4.6090 total_sharp:3.9771e-05 L1_sharp:4.5271e-02 L2_sharp:6.9988e-02 L3_sharp:1.1243e-01 L4_sharp:1.7266e-01 L5_sharp:2.0453e-01 L6_sharp:3.0834e-01 L7_sharp:3.4228e-01 L8_sharp:3.1321e-01 L9_sharp:3.6918e-01 L10_sharp:3.7231e-01 L11_sharp:4.6982e-01 L12_sharp:7.9822e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0320e+03 total_spectral:4.3750e+00 L1_fnorm:3.5400e-03 L2_fnorm:3.6621e-03 L3_fnorm:3.7384e-03 L4_fnorm:3.7842e-03 L5_fnorm:3.7994e-03 L6_fnorm:3.7689e-03 L7_fnorm:3.7842e-03 L8_fnorm:3.6316e-03 L9_fnorm:3.7231e-03 L10_fnorm:3.7231e-03 L11_fnorm:3.6774e-03 L12_fnorm:3.5858e-03 L1_l1linf:3.8719e-04 L2_l1linf:4.2915e-04 L3_l1linf:4.8828e-04 L4_l1linf:5.1498e-04 L5_l1linf:5.5695e-04 L6_l1linf:5.6076e-04 L7_l1linf:5.6839e-04 L8_l1linf:5.7602e-04 L9_l1linf:5.6839e-04 L10_l1linf:5.9509e-04 L11_l1linf:5.5695e-04 L12_l1linf:5.8365e-04 L1_spectral:7.4572e-05 L2_spectral:7.5918e-05 L3_spectral:7.6373e-05 L4_spectral:7.6221e-05 L5_spectral:7.6934e-05 L6_spectral:7.7312e-05 L7_spectral:7.7040e-05 L8_spectral:7.0670e-05 L9_spectral:7.3978e-05 L10_spectral:7.4129e-05 L11_spectral:7.2882e-05 L12_spectral:6.9275e-05 train_time:410083ms step_avg:42.72ms +[2025-09-11 10:05:24] [Rank 0] PRINT: step:9600/10000 val_loss:4.6090 total_sharp:3.9771e-05 L1_sharp:4.5271e-02 L2_sharp:6.9988e-02 L3_sharp:1.1243e-01 L4_sharp:1.7266e-01 L5_sharp:2.0453e-01 L6_sharp:3.0834e-01 L7_sharp:3.4228e-01 L8_sharp:3.1321e-01 L9_sharp:3.6918e-01 L10_sharp:3.7231e-01 L11_sharp:4.6982e-01 L12_sharp:7.9822e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0320e+03 total_spectral:4.3750e+00 L1_fnorm:3.5400e-03 L2_fnorm:3.6621e-03 L3_fnorm:3.7384e-03 L4_fnorm:3.7842e-03 L5_fnorm:3.7994e-03 L6_fnorm:3.7689e-03 L7_fnorm:3.7842e-03 L8_fnorm:3.6316e-03 L9_fnorm:3.7231e-03 L10_fnorm:3.7231e-03 L11_fnorm:3.6774e-03 L12_fnorm:3.5858e-03 L1_l1linf:3.8719e-04 L2_l1linf:4.2915e-04 L3_l1linf:4.8828e-04 L4_l1linf:5.1498e-04 L5_l1linf:5.5695e-04 L6_l1linf:5.6076e-04 L7_l1linf:5.6839e-04 L8_l1linf:5.7602e-04 L9_l1linf:5.6839e-04 L10_l1linf:5.9509e-04 L11_l1linf:5.5695e-04 L12_l1linf:5.8365e-04 L1_spectral:7.4572e-05 L2_spectral:7.5918e-05 L3_spectral:7.6373e-05 L4_spectral:7.6221e-05 L5_spectral:7.6934e-05 L6_spectral:7.7312e-05 L7_spectral:7.7040e-05 L8_spectral:7.0670e-05 L9_spectral:7.3978e-05 L10_spectral:7.4129e-05 L11_spectral:7.2882e-05 L12_spectral:6.9275e-05 train_time:410083ms step_avg:42.72ms +[2025-09-11 10:05:26] [Rank 0] step:9601/10000 train_time:412309ms step_avg:42.94ms +[2025-09-11 10:05:26] [Rank 0] step:9601/10000 train_time:412309ms step_avg:42.94ms +[2025-09-11 10:05:27] [Rank 0] step:9621/10000 train_time:413038ms step_avg:42.93ms +[2025-09-11 10:05:27] [Rank 0] step:9621/10000 train_time:413038ms step_avg:42.93ms +[2025-09-11 10:05:28] [Rank 0] step:9641/10000 train_time:413754ms step_avg:42.92ms +[2025-09-11 10:05:28] [Rank 0] step:9641/10000 train_time:413754ms step_avg:42.92ms +[2025-09-11 10:05:29] [Rank 0] step:9661/10000 train_time:414476ms step_avg:42.90ms +[2025-09-11 10:05:29] [Rank 0] step:9661/10000 train_time:414476ms step_avg:42.90ms +[2025-09-11 10:05:29] [Rank 0] step:9681/10000 train_time:415192ms step_avg:42.89ms +[2025-09-11 10:05:29] [Rank 0] step:9681/10000 train_time:415192ms step_avg:42.89ms +[2025-09-11 10:05:30] [Rank 0] step:9701/10000 train_time:415909ms step_avg:42.87ms +[2025-09-11 10:05:30] [Rank 0] step:9701/10000 train_time:415909ms step_avg:42.87ms +[2025-09-11 10:05:31] [Rank 0] step:9721/10000 train_time:416632ms step_avg:42.86ms +[2025-09-11 10:05:31] [Rank 0] step:9721/10000 train_time:416632ms step_avg:42.86ms +[2025-09-11 10:05:31] [Rank 0] step:9741/10000 train_time:417350ms step_avg:42.84ms +[2025-09-11 10:05:31] [Rank 0] step:9741/10000 train_time:417350ms step_avg:42.84ms +[2025-09-11 10:05:32] [Rank 0] step:9761/10000 train_time:418068ms step_avg:42.83ms +[2025-09-11 10:05:32] [Rank 0] step:9761/10000 train_time:418068ms step_avg:42.83ms +[2025-09-11 10:05:33] [Rank 0] step:9781/10000 train_time:418784ms step_avg:42.82ms +[2025-09-11 10:05:33] [Rank 0] step:9781/10000 train_time:418784ms step_avg:42.82ms +[2025-09-11 10:05:34] [Rank 0] step:9801/10000 train_time:419506ms step_avg:42.80ms +[2025-09-11 10:05:34] [Rank 0] step:9801/10000 train_time:419506ms step_avg:42.80ms +[2025-09-11 10:05:34] [Rank 0] step:9821/10000 train_time:420226ms step_avg:42.79ms +[2025-09-11 10:05:34] [Rank 0] step:9821/10000 train_time:420226ms step_avg:42.79ms +[2025-09-11 10:05:35] [Rank 0] step:9841/10000 train_time:420947ms step_avg:42.77ms +[2025-09-11 10:05:35] [Rank 0] step:9841/10000 train_time:420947ms step_avg:42.77ms +[2025-09-11 10:05:36] [Rank 0] step:9861/10000 train_time:421664ms step_avg:42.76ms +[2025-09-11 10:05:36] [Rank 0] step:9861/10000 train_time:421664ms step_avg:42.76ms +[2025-09-11 10:05:37] [Rank 0] step:9881/10000 train_time:422382ms step_avg:42.75ms +[2025-09-11 10:05:37] [Rank 0] step:9881/10000 train_time:422382ms step_avg:42.75ms +[2025-09-11 10:05:37] [Rank 0] step:9901/10000 train_time:423098ms step_avg:42.73ms +[2025-09-11 10:05:37] [Rank 0] step:9901/10000 train_time:423098ms step_avg:42.73ms +[2025-09-11 10:05:38] [Rank 0] step:9921/10000 train_time:423814ms step_avg:42.72ms +[2025-09-11 10:05:38] [Rank 0] step:9921/10000 train_time:423814ms step_avg:42.72ms +[2025-09-11 10:05:39] [Rank 0] step:9941/10000 train_time:424800ms step_avg:42.73ms +[2025-09-11 10:05:39] [Rank 0] step:9941/10000 train_time:424800ms step_avg:42.73ms +[2025-09-11 10:05:40] [Rank 0] step:9961/10000 train_time:425522ms step_avg:42.72ms +[2025-09-11 10:05:40] [Rank 0] step:9961/10000 train_time:425522ms step_avg:42.72ms +[2025-09-11 10:05:40] [Rank 0] step:9981/10000 train_time:426242ms step_avg:42.71ms +[2025-09-11 10:05:40] [Rank 0] step:9981/10000 train_time:426242ms step_avg:42.71ms +[2025-09-11 10:05:41] [Rank 0] step:10000/10000 train_time:427222ms step_avg:42.72ms +[2025-09-11 10:05:41] [Rank 0] step:10000/10000 train_time:427222ms step_avg:42.72ms +[2025-09-11 10:05:41] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:05:41] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:05:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:05:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:05:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:05:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:05:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:05:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:05:53] [Rank 0] PRINT: step:10000/10000 val_loss:4.6073 total_sharp:2.8461e-05 L1_sharp:4.0009e-02 L2_sharp:4.6858e-02 L3_sharp:8.2790e-02 L4_sharp:1.4143e-01 L5_sharp:1.6292e-01 L6_sharp:2.3298e-01 L7_sharp:2.7250e-01 L8_sharp:2.6207e-01 L9_sharp:2.9133e-01 L10_sharp:3.2416e-01 L11_sharp:3.4794e-01 L12_sharp:3.5754e-01 total_fnorm:3.3438e+00 total_l1_linf:2.2400e+03 total_spectral:1.6719e+00 L1_fnorm:1.3657e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4420e-03 L4_fnorm:1.4725e-03 L5_fnorm:1.4725e-03 L6_fnorm:1.4725e-03 L7_fnorm:1.4572e-03 L8_fnorm:1.4038e-03 L9_fnorm:1.4267e-03 L10_fnorm:1.4267e-03 L11_fnorm:1.4267e-03 L12_fnorm:1.3733e-03 L1_l1linf:1.2493e-04 L2_l1linf:1.3828e-04 L3_l1linf:1.4877e-04 L4_l1linf:1.6308e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.6689e-04 L7_l1linf:1.7643e-04 L8_l1linf:1.7166e-04 L9_l1linf:1.7166e-04 L10_l1linf:1.6689e-04 L11_l1linf:1.7166e-04 L12_l1linf:1.6117e-04 L1_spectral:2.8572e-05 L2_spectral:3.0548e-05 L3_spectral:3.0413e-05 L4_spectral:3.0305e-05 L5_spectral:3.1487e-05 L6_spectral:3.0497e-05 L7_spectral:3.0541e-05 L8_spectral:2.8629e-05 L9_spectral:3.0034e-05 L10_spectral:2.9571e-05 L11_spectral:3.0432e-05 L12_spectral:2.7901e-05 train_time:427242ms step_avg:42.72ms +[2025-09-11 10:05:53] [Rank 0] PRINT: step:10000/10000 val_loss:4.6073 total_sharp:2.8461e-05 L1_sharp:4.0009e-02 L2_sharp:4.6858e-02 L3_sharp:8.2790e-02 L4_sharp:1.4143e-01 L5_sharp:1.6292e-01 L6_sharp:2.3298e-01 L7_sharp:2.7250e-01 L8_sharp:2.6207e-01 L9_sharp:2.9133e-01 L10_sharp:3.2416e-01 L11_sharp:3.4794e-01 L12_sharp:3.5754e-01 total_fnorm:3.3438e+00 total_l1_linf:2.2400e+03 total_spectral:1.6719e+00 L1_fnorm:1.3657e-03 L2_fnorm:1.4191e-03 L3_fnorm:1.4420e-03 L4_fnorm:1.4725e-03 L5_fnorm:1.4725e-03 L6_fnorm:1.4725e-03 L7_fnorm:1.4572e-03 L8_fnorm:1.4038e-03 L9_fnorm:1.4267e-03 L10_fnorm:1.4267e-03 L11_fnorm:1.4267e-03 L12_fnorm:1.3733e-03 L1_l1linf:1.2493e-04 L2_l1linf:1.3828e-04 L3_l1linf:1.4877e-04 L4_l1linf:1.6308e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.6689e-04 L7_l1linf:1.7643e-04 L8_l1linf:1.7166e-04 L9_l1linf:1.7166e-04 L10_l1linf:1.6689e-04 L11_l1linf:1.7166e-04 L12_l1linf:1.6117e-04 L1_spectral:2.8572e-05 L2_spectral:3.0548e-05 L3_spectral:3.0413e-05 L4_spectral:3.0305e-05 L5_spectral:3.1487e-05 L6_spectral:3.0497e-05 L7_spectral:3.0541e-05 L8_spectral:2.8629e-05 L9_spectral:3.0034e-05 L10_spectral:2.9571e-05 L11_spectral:3.0432e-05 L12_spectral:2.7901e-05 train_time:427242ms step_avg:42.72ms +[2025-09-11 10:05:53] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:05:53 2025 --- +[2025-09-11 10:05:53] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:05:53 2025 --- +[2025-09-11 10:05:53] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:05:53] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..239f314277bb44f2c22633b4caf7f8fda9b4f150 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "fe5e24b0-ed05-43a1-a700-98db702a3b16", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/training_log_fe5e24b0-ed05-43a1-a700-98db702a3b16.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/training_log_fe5e24b0-ed05-43a1-a700-98db702a3b16.txt new file mode 100644 index 0000000000000000000000000000000000000000..92cf5dfeb5f7394fb072e7544303d26627933362 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45/training_log_fe5e24b0-ed05-43a1-a700-98db702a3b16.txt @@ -0,0 +1,4264 @@ +[2025-09-11 14:01:29] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:01:29 2025 --- +[2025-09-11 14:01:29] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:01:29 2025 --- +[2025-09-11 14:01:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:01:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:01:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:01:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:01:29] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:01:29] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:01:29] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45 +[2025-09-11 14:01:29] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.0005_seed_45 +[2025-09-11 14:01:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:01:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:01:29] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:01:29] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:01:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:01:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:01:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:01:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:01:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:01:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:01:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:01:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:01:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:01:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:01:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:01:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:01:32] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:01:32] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:01:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:01:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:01:38] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:01:38] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:01:38] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:01:38] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:02:16] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:02:16] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:02:16] [Rank 0] PRINT: Starting training... +[2025-09-11 14:02:16] [Rank 0] PRINT: Starting training... +[2025-09-11 14:02:17] [Rank 0] step:21/10000 train_time:1139ms step_avg:54.24ms +[2025-09-11 14:02:17] [Rank 0] step:21/10000 train_time:1139ms step_avg:54.24ms +[2025-09-11 14:02:17] [Rank 0] step:41/10000 train_time:1875ms step_avg:45.72ms +[2025-09-11 14:02:17] [Rank 0] step:41/10000 train_time:1875ms step_avg:45.72ms +[2025-09-11 14:02:18] [Rank 0] step:61/10000 train_time:2608ms step_avg:42.76ms +[2025-09-11 14:02:18] [Rank 0] step:61/10000 train_time:2608ms step_avg:42.76ms +[2025-09-11 14:02:19] [Rank 0] step:81/10000 train_time:3343ms step_avg:41.27ms +[2025-09-11 14:02:19] [Rank 0] step:81/10000 train_time:3343ms step_avg:41.27ms +[2025-09-11 14:02:20] [Rank 0] step:101/10000 train_time:4078ms step_avg:40.38ms +[2025-09-11 14:02:20] [Rank 0] step:101/10000 train_time:4078ms step_avg:40.38ms +[2025-09-11 14:02:20] [Rank 0] step:121/10000 train_time:4813ms step_avg:39.78ms +[2025-09-11 14:02:20] [Rank 0] step:121/10000 train_time:4813ms step_avg:39.78ms +[2025-09-11 14:02:21] [Rank 0] step:141/10000 train_time:5548ms step_avg:39.35ms +[2025-09-11 14:02:21] [Rank 0] step:141/10000 train_time:5548ms step_avg:39.35ms +[2025-09-11 14:02:22] [Rank 0] step:161/10000 train_time:6283ms step_avg:39.02ms +[2025-09-11 14:02:22] [Rank 0] step:161/10000 train_time:6283ms step_avg:39.02ms +[2025-09-11 14:02:23] [Rank 0] step:181/10000 train_time:7018ms step_avg:38.77ms +[2025-09-11 14:02:23] [Rank 0] step:181/10000 train_time:7018ms step_avg:38.77ms +[2025-09-11 14:02:23] [Rank 0] step:201/10000 train_time:7753ms step_avg:38.57ms +[2025-09-11 14:02:23] [Rank 0] step:201/10000 train_time:7753ms step_avg:38.57ms +[2025-09-11 14:02:24] [Rank 0] step:221/10000 train_time:8488ms step_avg:38.41ms +[2025-09-11 14:02:24] [Rank 0] step:221/10000 train_time:8488ms step_avg:38.41ms +[2025-09-11 14:02:25] [Rank 0] step:241/10000 train_time:9222ms step_avg:38.27ms +[2025-09-11 14:02:25] [Rank 0] step:241/10000 train_time:9222ms step_avg:38.27ms +[2025-09-11 14:02:26] [Rank 0] step:261/10000 train_time:9958ms step_avg:38.15ms +[2025-09-11 14:02:26] [Rank 0] step:261/10000 train_time:9958ms step_avg:38.15ms +[2025-09-11 14:02:26] [Rank 0] step:281/10000 train_time:10692ms step_avg:38.05ms +[2025-09-11 14:02:26] [Rank 0] step:281/10000 train_time:10692ms step_avg:38.05ms +[2025-09-11 14:02:27] [Rank 0] step:301/10000 train_time:11427ms step_avg:37.96ms +[2025-09-11 14:02:27] [Rank 0] step:301/10000 train_time:11427ms step_avg:37.96ms +[2025-09-11 14:02:28] [Rank 0] step:321/10000 train_time:12161ms step_avg:37.88ms +[2025-09-11 14:02:28] [Rank 0] step:321/10000 train_time:12161ms step_avg:37.88ms +[2025-09-11 14:02:28] [Rank 0] step:341/10000 train_time:12895ms step_avg:37.82ms +[2025-09-11 14:02:28] [Rank 0] step:341/10000 train_time:12895ms step_avg:37.82ms +[2025-09-11 14:02:29] [Rank 0] step:361/10000 train_time:13630ms step_avg:37.76ms +[2025-09-11 14:02:29] [Rank 0] step:361/10000 train_time:13630ms step_avg:37.76ms +[2025-09-11 14:02:30] [Rank 0] step:381/10000 train_time:14364ms step_avg:37.70ms +[2025-09-11 14:02:30] [Rank 0] step:381/10000 train_time:14364ms step_avg:37.70ms +[2025-09-11 14:02:31] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:02:31] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:02:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:03:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:03:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:03:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:03:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:03:19] [Rank 0] PRINT: step:400/10000 val_loss:6.2844 total_sharp:3.6453e-04 L1_sharp:1.0241e-01 L2_sharp:1.0751e-01 L3_sharp:9.8552e-02 L4_sharp:1.3540e-01 L5_sharp:1.5525e-01 L6_sharp:1.5595e-01 L7_sharp:1.8263e-01 L8_sharp:1.8290e-01 L9_sharp:2.6379e-01 L10_sharp:4.4300e-01 L11_sharp:8.0425e-01 L12_sharp:7.0290e-01 total_fnorm:7.2837e+01 total_l1_linf:1.7771e+05 total_spectral:3.6421e+01 L1_fnorm:5.9079e-02 L2_fnorm:5.9355e-02 L3_fnorm:5.9561e-02 L4_fnorm:5.9198e-02 L5_fnorm:5.9374e-02 L6_fnorm:5.9678e-02 L7_fnorm:5.9526e-02 L8_fnorm:5.9489e-02 L9_fnorm:5.9556e-02 L10_fnorm:5.8393e-02 L11_fnorm:5.7788e-02 L12_fnorm:5.9033e-02 L1_l1linf:2.2618e-02 L2_l1linf:2.2564e-02 L3_l1linf:2.2382e-02 L4_l1linf:2.2506e-02 L5_l1linf:2.2470e-02 L6_l1linf:2.2350e-02 L7_l1linf:2.2353e-02 L8_l1linf:2.2388e-02 L9_l1linf:2.2357e-02 L10_l1linf:2.2392e-02 L11_l1linf:2.2264e-02 L12_l1linf:2.2162e-02 L1_spectral:6.0247e-04 L2_spectral:6.0236e-04 L3_spectral:6.0302e-04 L4_spectral:6.0268e-04 L5_spectral:6.0317e-04 L6_spectral:6.0253e-04 L7_spectral:6.0257e-04 L8_spectral:6.0264e-04 L9_spectral:6.0285e-04 L10_spectral:6.0301e-04 L11_spectral:6.0271e-04 L12_spectral:6.0271e-04 train_time:15079ms step_avg:37.70ms +[2025-09-11 14:03:19] [Rank 0] PRINT: step:400/10000 val_loss:6.2844 total_sharp:3.6453e-04 L1_sharp:1.0241e-01 L2_sharp:1.0751e-01 L3_sharp:9.8552e-02 L4_sharp:1.3540e-01 L5_sharp:1.5525e-01 L6_sharp:1.5595e-01 L7_sharp:1.8263e-01 L8_sharp:1.8290e-01 L9_sharp:2.6379e-01 L10_sharp:4.4300e-01 L11_sharp:8.0425e-01 L12_sharp:7.0290e-01 total_fnorm:7.2837e+01 total_l1_linf:1.7771e+05 total_spectral:3.6421e+01 L1_fnorm:5.9079e-02 L2_fnorm:5.9355e-02 L3_fnorm:5.9561e-02 L4_fnorm:5.9198e-02 L5_fnorm:5.9374e-02 L6_fnorm:5.9678e-02 L7_fnorm:5.9526e-02 L8_fnorm:5.9489e-02 L9_fnorm:5.9556e-02 L10_fnorm:5.8393e-02 L11_fnorm:5.7788e-02 L12_fnorm:5.9033e-02 L1_l1linf:2.2618e-02 L2_l1linf:2.2564e-02 L3_l1linf:2.2382e-02 L4_l1linf:2.2506e-02 L5_l1linf:2.2470e-02 L6_l1linf:2.2350e-02 L7_l1linf:2.2353e-02 L8_l1linf:2.2388e-02 L9_l1linf:2.2357e-02 L10_l1linf:2.2392e-02 L11_l1linf:2.2264e-02 L12_l1linf:2.2162e-02 L1_spectral:6.0247e-04 L2_spectral:6.0236e-04 L3_spectral:6.0302e-04 L4_spectral:6.0268e-04 L5_spectral:6.0317e-04 L6_spectral:6.0253e-04 L7_spectral:6.0257e-04 L8_spectral:6.0264e-04 L9_spectral:6.0285e-04 L10_spectral:6.0301e-04 L11_spectral:6.0271e-04 L12_spectral:6.0271e-04 train_time:15079ms step_avg:37.70ms +[2025-09-11 14:03:50] [Rank 0] step:401/10000 train_time:45693ms step_avg:113.95ms +[2025-09-11 14:03:50] [Rank 0] step:401/10000 train_time:45693ms step_avg:113.95ms +[2025-09-11 14:03:52] [Rank 0] step:421/10000 train_time:48229ms step_avg:114.56ms +[2025-09-11 14:03:52] [Rank 0] step:421/10000 train_time:48229ms step_avg:114.56ms +[2025-09-11 14:03:53] [Rank 0] step:441/10000 train_time:48874ms step_avg:110.83ms +[2025-09-11 14:03:53] [Rank 0] step:441/10000 train_time:48874ms step_avg:110.83ms +[2025-09-11 14:03:54] [Rank 0] step:461/10000 train_time:49518ms step_avg:107.41ms +[2025-09-11 14:03:54] [Rank 0] step:461/10000 train_time:49518ms step_avg:107.41ms +[2025-09-11 14:03:55] [Rank 0] step:481/10000 train_time:50475ms step_avg:104.94ms +[2025-09-11 14:03:55] [Rank 0] step:481/10000 train_time:50475ms step_avg:104.94ms +[2025-09-11 14:03:55] [Rank 0] step:501/10000 train_time:51119ms step_avg:102.03ms +[2025-09-11 14:03:55] [Rank 0] step:501/10000 train_time:51119ms step_avg:102.03ms +[2025-09-11 14:03:56] [Rank 0] step:521/10000 train_time:51764ms step_avg:99.35ms +[2025-09-11 14:03:56] [Rank 0] step:521/10000 train_time:51764ms step_avg:99.35ms +[2025-09-11 14:03:57] [Rank 0] step:541/10000 train_time:52407ms step_avg:96.87ms +[2025-09-11 14:03:57] [Rank 0] step:541/10000 train_time:52407ms step_avg:96.87ms +[2025-09-11 14:03:57] [Rank 0] step:561/10000 train_time:53052ms step_avg:94.57ms +[2025-09-11 14:03:57] [Rank 0] step:561/10000 train_time:53052ms step_avg:94.57ms +[2025-09-11 14:03:58] [Rank 0] step:581/10000 train_time:53696ms step_avg:92.42ms +[2025-09-11 14:03:58] [Rank 0] step:581/10000 train_time:53696ms step_avg:92.42ms +[2025-09-11 14:03:59] [Rank 0] step:601/10000 train_time:54339ms step_avg:90.41ms +[2025-09-11 14:03:59] [Rank 0] step:601/10000 train_time:54339ms step_avg:90.41ms +[2025-09-11 14:03:59] [Rank 0] step:621/10000 train_time:54983ms step_avg:88.54ms +[2025-09-11 14:03:59] [Rank 0] step:621/10000 train_time:54983ms step_avg:88.54ms +[2025-09-11 14:04:00] [Rank 0] step:641/10000 train_time:55626ms step_avg:86.78ms +[2025-09-11 14:04:00] [Rank 0] step:641/10000 train_time:55626ms step_avg:86.78ms +[2025-09-11 14:04:00] [Rank 0] step:661/10000 train_time:56270ms step_avg:85.13ms +[2025-09-11 14:04:00] [Rank 0] step:661/10000 train_time:56270ms step_avg:85.13ms +[2025-09-11 14:04:01] [Rank 0] step:681/10000 train_time:56913ms step_avg:83.57ms +[2025-09-11 14:04:01] [Rank 0] step:681/10000 train_time:56913ms step_avg:83.57ms +[2025-09-11 14:04:02] [Rank 0] step:701/10000 train_time:57559ms step_avg:82.11ms +[2025-09-11 14:04:02] [Rank 0] step:701/10000 train_time:57559ms step_avg:82.11ms +[2025-09-11 14:04:02] [Rank 0] step:721/10000 train_time:58202ms step_avg:80.72ms +[2025-09-11 14:04:02] [Rank 0] step:721/10000 train_time:58202ms step_avg:80.72ms +[2025-09-11 14:04:03] [Rank 0] step:741/10000 train_time:58846ms step_avg:79.41ms +[2025-09-11 14:04:03] [Rank 0] step:741/10000 train_time:58846ms step_avg:79.41ms +[2025-09-11 14:04:04] [Rank 0] step:761/10000 train_time:59495ms step_avg:78.18ms +[2025-09-11 14:04:04] [Rank 0] step:761/10000 train_time:59495ms step_avg:78.18ms +[2025-09-11 14:04:04] [Rank 0] step:781/10000 train_time:60142ms step_avg:77.01ms +[2025-09-11 14:04:04] [Rank 0] step:781/10000 train_time:60142ms step_avg:77.01ms +[2025-09-11 14:04:05] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:04:05] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:04:50] [Rank 0] PRINT: step:800/10000 val_loss:5.9236 total_sharp:3.3053e-04 L1_sharp:3.0535e-01 L2_sharp:2.8347e-01 L3_sharp:2.8714e-01 L4_sharp:3.6973e-01 L5_sharp:4.6634e-01 L6_sharp:5.6174e-01 L7_sharp:6.9369e-01 L8_sharp:1.0716e+00 L9_sharp:1.4483e+00 L10_sharp:1.2620e+00 L11_sharp:1.1150e+00 L12_sharp:1.3843e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5667e+05 total_spectral:3.8000e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.6143e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.5654e-02 L11_fnorm:4.5166e-02 L12_fnorm:4.3701e-02 L1_l1linf:2.1362e-02 L2_l1linf:2.1362e-02 L3_l1linf:2.1362e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.1240e-02 L6_l1linf:2.1362e-02 L7_l1linf:2.1240e-02 L8_l1linf:2.1240e-02 L9_l1linf:2.1240e-02 L10_l1linf:2.0996e-02 L11_l1linf:2.0508e-02 L12_l1linf:2.0386e-02 L1_spectral:6.9903e-04 L2_spectral:7.0717e-04 L3_spectral:7.0762e-04 L4_spectral:7.0759e-04 L5_spectral:6.9646e-04 L6_spectral:7.0542e-04 L7_spectral:7.0627e-04 L8_spectral:6.8938e-04 L9_spectral:6.9531e-04 L10_spectral:6.9647e-04 L11_spectral:6.9022e-04 L12_spectral:6.7551e-04 train_time:60773ms step_avg:75.97ms +[2025-09-11 14:04:50] [Rank 0] PRINT: step:800/10000 val_loss:5.9236 total_sharp:3.3053e-04 L1_sharp:3.0535e-01 L2_sharp:2.8347e-01 L3_sharp:2.8714e-01 L4_sharp:3.6973e-01 L5_sharp:4.6634e-01 L6_sharp:5.6174e-01 L7_sharp:6.9369e-01 L8_sharp:1.0716e+00 L9_sharp:1.4483e+00 L10_sharp:1.2620e+00 L11_sharp:1.1150e+00 L12_sharp:1.3843e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5667e+05 total_spectral:3.8000e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.8340e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.6143e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.5654e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.5654e-02 L11_fnorm:4.5166e-02 L12_fnorm:4.3701e-02 L1_l1linf:2.1362e-02 L2_l1linf:2.1362e-02 L3_l1linf:2.1362e-02 L4_l1linf:2.1362e-02 L5_l1linf:2.1240e-02 L6_l1linf:2.1362e-02 L7_l1linf:2.1240e-02 L8_l1linf:2.1240e-02 L9_l1linf:2.1240e-02 L10_l1linf:2.0996e-02 L11_l1linf:2.0508e-02 L12_l1linf:2.0386e-02 L1_spectral:6.9903e-04 L2_spectral:7.0717e-04 L3_spectral:7.0762e-04 L4_spectral:7.0759e-04 L5_spectral:6.9646e-04 L6_spectral:7.0542e-04 L7_spectral:7.0627e-04 L8_spectral:6.8938e-04 L9_spectral:6.9531e-04 L10_spectral:6.9647e-04 L11_spectral:6.9022e-04 L12_spectral:6.7551e-04 train_time:60773ms step_avg:75.97ms +[2025-09-11 14:04:51] [Rank 0] step:801/10000 train_time:62260ms step_avg:77.73ms +[2025-09-11 14:04:51] [Rank 0] step:801/10000 train_time:62260ms step_avg:77.73ms +[2025-09-11 14:04:52] [Rank 0] step:821/10000 train_time:62936ms step_avg:76.66ms +[2025-09-11 14:04:52] [Rank 0] step:821/10000 train_time:62936ms step_avg:76.66ms +[2025-09-11 14:04:52] [Rank 0] step:841/10000 train_time:63586ms step_avg:75.61ms +[2025-09-11 14:04:52] [Rank 0] step:841/10000 train_time:63586ms step_avg:75.61ms +[2025-09-11 14:04:53] [Rank 0] step:861/10000 train_time:64234ms step_avg:74.60ms +[2025-09-11 14:04:53] [Rank 0] step:861/10000 train_time:64234ms step_avg:74.60ms +[2025-09-11 14:04:54] [Rank 0] step:881/10000 train_time:64883ms step_avg:73.65ms +[2025-09-11 14:04:54] [Rank 0] step:881/10000 train_time:64883ms step_avg:73.65ms +[2025-09-11 14:04:54] [Rank 0] step:901/10000 train_time:65532ms step_avg:72.73ms +[2025-09-11 14:04:54] [Rank 0] step:901/10000 train_time:65532ms step_avg:72.73ms +[2025-09-11 14:04:55] [Rank 0] step:921/10000 train_time:66486ms step_avg:72.19ms +[2025-09-11 14:04:55] [Rank 0] step:921/10000 train_time:66486ms step_avg:72.19ms +[2025-09-11 14:04:56] [Rank 0] step:941/10000 train_time:67135ms step_avg:71.34ms +[2025-09-11 14:04:56] [Rank 0] step:941/10000 train_time:67135ms step_avg:71.34ms +[2025-09-11 14:04:57] [Rank 0] step:961/10000 train_time:67784ms step_avg:70.53ms +[2025-09-11 14:04:57] [Rank 0] step:961/10000 train_time:67784ms step_avg:70.53ms +[2025-09-11 14:04:57] [Rank 0] step:981/10000 train_time:68432ms step_avg:69.76ms +[2025-09-11 14:04:57] [Rank 0] step:981/10000 train_time:68432ms step_avg:69.76ms +[2025-09-11 14:04:58] [Rank 0] step:1001/10000 train_time:69337ms step_avg:69.27ms +[2025-09-11 14:04:58] [Rank 0] step:1001/10000 train_time:69337ms step_avg:69.27ms +[2025-09-11 14:04:59] [Rank 0] step:1021/10000 train_time:69986ms step_avg:68.55ms +[2025-09-11 14:04:59] [Rank 0] step:1021/10000 train_time:69986ms step_avg:68.55ms +[2025-09-11 14:04:59] [Rank 0] step:1041/10000 train_time:70634ms step_avg:67.85ms +[2025-09-11 14:04:59] [Rank 0] step:1041/10000 train_time:70634ms step_avg:67.85ms +[2025-09-11 14:05:00] [Rank 0] step:1061/10000 train_time:71282ms step_avg:67.18ms +[2025-09-11 14:05:00] [Rank 0] step:1061/10000 train_time:71282ms step_avg:67.18ms +[2025-09-11 14:05:01] [Rank 0] step:1081/10000 train_time:71989ms step_avg:66.60ms +[2025-09-11 14:05:01] [Rank 0] step:1081/10000 train_time:71989ms step_avg:66.60ms +[2025-09-11 14:05:02] [Rank 0] step:1101/10000 train_time:72675ms step_avg:66.01ms +[2025-09-11 14:05:02] [Rank 0] step:1101/10000 train_time:72675ms step_avg:66.01ms +[2025-09-11 14:05:02] [Rank 0] step:1121/10000 train_time:73415ms step_avg:65.49ms +[2025-09-11 14:05:02] [Rank 0] step:1121/10000 train_time:73415ms step_avg:65.49ms +[2025-09-11 14:05:03] [Rank 0] step:1141/10000 train_time:74064ms step_avg:64.91ms +[2025-09-11 14:05:03] [Rank 0] step:1141/10000 train_time:74064ms step_avg:64.91ms +[2025-09-11 14:05:04] [Rank 0] step:1161/10000 train_time:74712ms step_avg:64.35ms +[2025-09-11 14:05:04] [Rank 0] step:1161/10000 train_time:74712ms step_avg:64.35ms +[2025-09-11 14:05:04] [Rank 0] step:1181/10000 train_time:75360ms step_avg:63.81ms +[2025-09-11 14:05:04] [Rank 0] step:1181/10000 train_time:75360ms step_avg:63.81ms +[2025-09-11 14:05:05] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:05:05] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:05:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:05:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:05:17] [Rank 0] PRINT: step:1200/10000 val_loss:5.6698 total_sharp:3.2742e-04 L1_sharp:4.9516e-01 L2_sharp:5.2676e-01 L3_sharp:4.7783e-01 L4_sharp:5.0144e-01 L5_sharp:5.1631e-01 L6_sharp:5.1181e-01 L7_sharp:5.6128e-01 L8_sharp:6.0343e-01 L9_sharp:9.6546e-01 L10_sharp:1.4462e+00 L11_sharp:2.1561e+00 L12_sharp:4.1488e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5258e+05 total_spectral:3.8250e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9561e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.9561e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0508e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0630e-02 L4_l1linf:2.0630e-02 L5_l1linf:2.0264e-02 L6_l1linf:2.0386e-02 L7_l1linf:2.0630e-02 L8_l1linf:2.0386e-02 L9_l1linf:2.0630e-02 L10_l1linf:2.0630e-02 L11_l1linf:2.0752e-02 L12_l1linf:2.0996e-02 L1_spectral:7.4666e-04 L2_spectral:7.4714e-04 L3_spectral:7.4543e-04 L4_spectral:7.4926e-04 L5_spectral:7.4335e-04 L6_spectral:7.4593e-04 L7_spectral:7.5011e-04 L8_spectral:7.5098e-04 L9_spectral:7.4546e-04 L10_spectral:7.4456e-04 L11_spectral:7.4212e-04 L12_spectral:7.2445e-04 train_time:75993ms step_avg:63.33ms +[2025-09-11 14:05:17] [Rank 0] PRINT: step:1200/10000 val_loss:5.6698 total_sharp:3.2742e-04 L1_sharp:4.9516e-01 L2_sharp:5.2676e-01 L3_sharp:4.7783e-01 L4_sharp:5.0144e-01 L5_sharp:5.1631e-01 L6_sharp:5.1181e-01 L7_sharp:5.6128e-01 L8_sharp:6.0343e-01 L9_sharp:9.6546e-01 L10_sharp:1.4462e+00 L11_sharp:2.1561e+00 L12_sharp:4.1488e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5258e+05 total_spectral:3.8250e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9561e-02 L3_fnorm:4.9561e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.9561e-02 L7_fnorm:4.9072e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.8828e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0508e-02 L2_l1linf:2.0386e-02 L3_l1linf:2.0630e-02 L4_l1linf:2.0630e-02 L5_l1linf:2.0264e-02 L6_l1linf:2.0386e-02 L7_l1linf:2.0630e-02 L8_l1linf:2.0386e-02 L9_l1linf:2.0630e-02 L10_l1linf:2.0630e-02 L11_l1linf:2.0752e-02 L12_l1linf:2.0996e-02 L1_spectral:7.4666e-04 L2_spectral:7.4714e-04 L3_spectral:7.4543e-04 L4_spectral:7.4926e-04 L5_spectral:7.4335e-04 L6_spectral:7.4593e-04 L7_spectral:7.5011e-04 L8_spectral:7.5098e-04 L9_spectral:7.4546e-04 L10_spectral:7.4456e-04 L11_spectral:7.4212e-04 L12_spectral:7.2445e-04 train_time:75993ms step_avg:63.33ms +[2025-09-11 14:05:18] [Rank 0] step:1201/10000 train_time:77458ms step_avg:64.49ms +[2025-09-11 14:05:18] [Rank 0] step:1201/10000 train_time:77458ms step_avg:64.49ms +[2025-09-11 14:05:19] [Rank 0] step:1221/10000 train_time:78123ms step_avg:63.98ms +[2025-09-11 14:05:19] [Rank 0] step:1221/10000 train_time:78123ms step_avg:63.98ms +[2025-09-11 14:05:19] [Rank 0] step:1241/10000 train_time:78774ms step_avg:63.48ms +[2025-09-11 14:05:19] [Rank 0] step:1241/10000 train_time:78774ms step_avg:63.48ms +[2025-09-11 14:05:20] [Rank 0] step:1261/10000 train_time:79424ms step_avg:62.99ms +[2025-09-11 14:05:20] [Rank 0] step:1261/10000 train_time:79424ms step_avg:62.99ms +[2025-09-11 14:05:21] [Rank 0] step:1281/10000 train_time:80074ms step_avg:62.51ms +[2025-09-11 14:05:21] [Rank 0] step:1281/10000 train_time:80074ms step_avg:62.51ms +[2025-09-11 14:05:21] [Rank 0] step:1301/10000 train_time:80723ms step_avg:62.05ms +[2025-09-11 14:05:21] [Rank 0] step:1301/10000 train_time:80723ms step_avg:62.05ms +[2025-09-11 14:05:22] [Rank 0] step:1321/10000 train_time:81372ms step_avg:61.60ms +[2025-09-11 14:05:22] [Rank 0] step:1321/10000 train_time:81372ms step_avg:61.60ms +[2025-09-11 14:05:23] [Rank 0] step:1341/10000 train_time:82023ms step_avg:61.17ms +[2025-09-11 14:05:23] [Rank 0] step:1341/10000 train_time:82023ms step_avg:61.17ms +[2025-09-11 14:05:23] [Rank 0] step:1361/10000 train_time:82672ms step_avg:60.74ms +[2025-09-11 14:05:23] [Rank 0] step:1361/10000 train_time:82672ms step_avg:60.74ms +[2025-09-11 14:05:24] [Rank 0] step:1381/10000 train_time:83321ms step_avg:60.33ms +[2025-09-11 14:05:24] [Rank 0] step:1381/10000 train_time:83321ms step_avg:60.33ms +[2025-09-11 14:05:25] [Rank 0] step:1401/10000 train_time:83971ms step_avg:59.94ms +[2025-09-11 14:05:25] [Rank 0] step:1401/10000 train_time:83971ms step_avg:59.94ms +[2025-09-11 14:05:25] [Rank 0] step:1421/10000 train_time:84621ms step_avg:59.55ms +[2025-09-11 14:05:25] [Rank 0] step:1421/10000 train_time:84621ms step_avg:59.55ms +[2025-09-11 14:05:26] [Rank 0] step:1441/10000 train_time:85270ms step_avg:59.17ms +[2025-09-11 14:05:26] [Rank 0] step:1441/10000 train_time:85270ms step_avg:59.17ms +[2025-09-11 14:05:27] [Rank 0] step:1461/10000 train_time:85919ms step_avg:58.81ms +[2025-09-11 14:05:27] [Rank 0] step:1461/10000 train_time:85919ms step_avg:58.81ms +[2025-09-11 14:05:27] [Rank 0] step:1481/10000 train_time:86568ms step_avg:58.45ms +[2025-09-11 14:05:27] [Rank 0] step:1481/10000 train_time:86568ms step_avg:58.45ms +[2025-09-11 14:05:28] [Rank 0] step:1501/10000 train_time:87222ms step_avg:58.11ms +[2025-09-11 14:05:28] [Rank 0] step:1501/10000 train_time:87222ms step_avg:58.11ms +[2025-09-11 14:05:29] [Rank 0] step:1521/10000 train_time:87875ms step_avg:57.77ms +[2025-09-11 14:05:29] [Rank 0] step:1521/10000 train_time:87875ms step_avg:57.77ms +[2025-09-11 14:05:29] [Rank 0] step:1541/10000 train_time:88529ms step_avg:57.45ms +[2025-09-11 14:05:29] [Rank 0] step:1541/10000 train_time:88529ms step_avg:57.45ms +[2025-09-11 14:05:30] [Rank 0] step:1561/10000 train_time:89184ms step_avg:57.13ms +[2025-09-11 14:05:30] [Rank 0] step:1561/10000 train_time:89184ms step_avg:57.13ms +[2025-09-11 14:05:30] [Rank 0] step:1581/10000 train_time:89838ms step_avg:56.82ms +[2025-09-11 14:05:30] [Rank 0] step:1581/10000 train_time:89838ms step_avg:56.82ms +[2025-09-11 14:05:31] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:05:31] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:05:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:05:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:05:42] [Rank 0] PRINT: step:1600/10000 val_loss:5.5092 total_sharp:2.9170e-04 L1_sharp:3.7363e-01 L2_sharp:4.1043e-01 L3_sharp:4.2314e-01 L4_sharp:4.7623e-01 L5_sharp:5.2712e-01 L6_sharp:5.5835e-01 L7_sharp:5.3069e-01 L8_sharp:7.5671e-01 L9_sharp:8.8258e-01 L10_sharp:1.2280e+00 L11_sharp:1.5357e+00 L12_sharp:2.3928e+00 total_fnorm:7.5500e+01 total_l1_linf:1.4234e+05 total_spectral:3.7750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9805e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.9805e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9561e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.9409e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9531e-02 L4_l1linf:1.9287e-02 L5_l1linf:1.9531e-02 L6_l1linf:1.9531e-02 L7_l1linf:1.9165e-02 L8_l1linf:1.9531e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9897e-02 L12_l1linf:2.0142e-02 L1_spectral:7.5755e-04 L2_spectral:7.6112e-04 L3_spectral:7.6176e-04 L4_spectral:7.6566e-04 L5_spectral:7.6593e-04 L6_spectral:7.6617e-04 L7_spectral:7.6758e-04 L8_spectral:7.5976e-04 L9_spectral:7.6951e-04 L10_spectral:7.6936e-04 L11_spectral:7.6055e-04 L12_spectral:7.4152e-04 train_time:90475ms step_avg:56.55ms +[2025-09-11 14:05:42] [Rank 0] PRINT: step:1600/10000 val_loss:5.5092 total_sharp:2.9170e-04 L1_sharp:3.7363e-01 L2_sharp:4.1043e-01 L3_sharp:4.2314e-01 L4_sharp:4.7623e-01 L5_sharp:5.2712e-01 L6_sharp:5.5835e-01 L7_sharp:5.3069e-01 L8_sharp:7.5671e-01 L9_sharp:8.8258e-01 L10_sharp:1.2280e+00 L11_sharp:1.5357e+00 L12_sharp:2.3928e+00 total_fnorm:7.5500e+01 total_l1_linf:1.4234e+05 total_spectral:3.7750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9805e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.9805e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9561e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.9409e-02 L2_l1linf:1.9409e-02 L3_l1linf:1.9531e-02 L4_l1linf:1.9287e-02 L5_l1linf:1.9531e-02 L6_l1linf:1.9531e-02 L7_l1linf:1.9165e-02 L8_l1linf:1.9531e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9897e-02 L12_l1linf:2.0142e-02 L1_spectral:7.5755e-04 L2_spectral:7.6112e-04 L3_spectral:7.6176e-04 L4_spectral:7.6566e-04 L5_spectral:7.6593e-04 L6_spectral:7.6617e-04 L7_spectral:7.6758e-04 L8_spectral:7.5976e-04 L9_spectral:7.6951e-04 L10_spectral:7.6936e-04 L11_spectral:7.6055e-04 L12_spectral:7.4152e-04 train_time:90475ms step_avg:56.55ms +[2025-09-11 14:05:43] [Rank 0] step:1601/10000 train_time:91898ms step_avg:57.40ms +[2025-09-11 14:05:43] [Rank 0] step:1601/10000 train_time:91898ms step_avg:57.40ms +[2025-09-11 14:05:44] [Rank 0] step:1621/10000 train_time:92579ms step_avg:57.11ms +[2025-09-11 14:05:44] [Rank 0] step:1621/10000 train_time:92579ms step_avg:57.11ms +[2025-09-11 14:05:44] [Rank 0] step:1641/10000 train_time:93235ms step_avg:56.82ms +[2025-09-11 14:05:44] [Rank 0] step:1641/10000 train_time:93235ms step_avg:56.82ms +[2025-09-11 14:05:45] [Rank 0] step:1661/10000 train_time:93890ms step_avg:56.53ms +[2025-09-11 14:05:45] [Rank 0] step:1661/10000 train_time:93890ms step_avg:56.53ms +[2025-09-11 14:05:46] [Rank 0] step:1681/10000 train_time:94545ms step_avg:56.24ms +[2025-09-11 14:05:46] [Rank 0] step:1681/10000 train_time:94545ms step_avg:56.24ms +[2025-09-11 14:05:46] [Rank 0] step:1701/10000 train_time:95199ms step_avg:55.97ms +[2025-09-11 14:05:46] [Rank 0] step:1701/10000 train_time:95199ms step_avg:55.97ms +[2025-09-11 14:05:47] [Rank 0] step:1721/10000 train_time:95855ms step_avg:55.70ms +[2025-09-11 14:05:47] [Rank 0] step:1721/10000 train_time:95855ms step_avg:55.70ms +[2025-09-11 14:05:48] [Rank 0] step:1741/10000 train_time:96510ms step_avg:55.43ms +[2025-09-11 14:05:48] [Rank 0] step:1741/10000 train_time:96510ms step_avg:55.43ms +[2025-09-11 14:05:48] [Rank 0] step:1761/10000 train_time:97164ms step_avg:55.18ms +[2025-09-11 14:05:48] [Rank 0] step:1761/10000 train_time:97164ms step_avg:55.18ms +[2025-09-11 14:05:49] [Rank 0] step:1781/10000 train_time:97821ms step_avg:54.92ms +[2025-09-11 14:05:49] [Rank 0] step:1781/10000 train_time:97821ms step_avg:54.92ms +[2025-09-11 14:05:50] [Rank 0] step:1801/10000 train_time:98477ms step_avg:54.68ms +[2025-09-11 14:05:50] [Rank 0] step:1801/10000 train_time:98477ms step_avg:54.68ms +[2025-09-11 14:05:50] [Rank 0] step:1821/10000 train_time:99133ms step_avg:54.44ms +[2025-09-11 14:05:50] [Rank 0] step:1821/10000 train_time:99133ms step_avg:54.44ms +[2025-09-11 14:05:51] [Rank 0] step:1841/10000 train_time:99787ms step_avg:54.20ms +[2025-09-11 14:05:51] [Rank 0] step:1841/10000 train_time:99787ms step_avg:54.20ms +[2025-09-11 14:05:52] [Rank 0] step:1861/10000 train_time:100441ms step_avg:53.97ms +[2025-09-11 14:05:52] [Rank 0] step:1861/10000 train_time:100441ms step_avg:53.97ms +[2025-09-11 14:05:52] [Rank 0] step:1881/10000 train_time:101095ms step_avg:53.75ms +[2025-09-11 14:05:52] [Rank 0] step:1881/10000 train_time:101095ms step_avg:53.75ms +[2025-09-11 14:05:53] [Rank 0] step:1901/10000 train_time:101750ms step_avg:53.52ms +[2025-09-11 14:05:53] [Rank 0] step:1901/10000 train_time:101750ms step_avg:53.52ms +[2025-09-11 14:05:53] [Rank 0] step:1921/10000 train_time:102405ms step_avg:53.31ms +[2025-09-11 14:05:53] [Rank 0] step:1921/10000 train_time:102405ms step_avg:53.31ms +[2025-09-11 14:05:54] [Rank 0] step:1941/10000 train_time:103060ms step_avg:53.10ms +[2025-09-11 14:05:54] [Rank 0] step:1941/10000 train_time:103060ms step_avg:53.10ms +[2025-09-11 14:05:55] [Rank 0] step:1961/10000 train_time:103718ms step_avg:52.89ms +[2025-09-11 14:05:55] [Rank 0] step:1961/10000 train_time:103718ms step_avg:52.89ms +[2025-09-11 14:05:55] [Rank 0] step:1981/10000 train_time:104371ms step_avg:52.69ms +[2025-09-11 14:05:55] [Rank 0] step:1981/10000 train_time:104371ms step_avg:52.69ms +[2025-09-11 14:05:56] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:05:56] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:06:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:06:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:06:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:06:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:06] [Rank 0] PRINT: step:2000/10000 val_loss:5.3872 total_sharp:2.1998e-04 L1_sharp:2.7383e-01 L2_sharp:3.5176e-01 L3_sharp:3.6116e-01 L4_sharp:4.0500e-01 L5_sharp:4.0223e-01 L6_sharp:3.4147e-01 L7_sharp:3.7500e-01 L8_sharp:3.8843e-01 L9_sharp:4.1035e-01 L10_sharp:1.1094e+00 L11_sharp:2.1888e+00 L12_sharp:3.1766e+00 total_fnorm:7.4000e+01 total_l1_linf:1.4336e+05 total_spectral:3.6750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9561e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9561e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.8066e-02 L2_l1linf:1.8188e-02 L3_l1linf:1.8433e-02 L4_l1linf:1.8433e-02 L5_l1linf:1.8311e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8066e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.8433e-02 L10_l1linf:1.8311e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9165e-02 L1_spectral:7.7477e-04 L2_spectral:7.7391e-04 L3_spectral:7.7526e-04 L4_spectral:7.7549e-04 L5_spectral:7.7685e-04 L6_spectral:7.8208e-04 L7_spectral:7.8238e-04 L8_spectral:7.8073e-04 L9_spectral:7.7993e-04 L10_spectral:7.8088e-04 L11_spectral:7.7557e-04 L12_spectral:7.5423e-04 train_time:105007ms step_avg:52.50ms +[2025-09-11 14:06:06] [Rank 0] PRINT: step:2000/10000 val_loss:5.3872 total_sharp:2.1998e-04 L1_sharp:2.7383e-01 L2_sharp:3.5176e-01 L3_sharp:3.6116e-01 L4_sharp:4.0500e-01 L5_sharp:4.0223e-01 L6_sharp:3.4147e-01 L7_sharp:3.7500e-01 L8_sharp:3.8843e-01 L9_sharp:4.1035e-01 L10_sharp:1.1094e+00 L11_sharp:2.1888e+00 L12_sharp:3.1766e+00 total_fnorm:7.4000e+01 total_l1_linf:1.4336e+05 total_spectral:3.6750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9561e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9561e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.8066e-02 L2_l1linf:1.8188e-02 L3_l1linf:1.8433e-02 L4_l1linf:1.8433e-02 L5_l1linf:1.8311e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8066e-02 L8_l1linf:1.8188e-02 L9_l1linf:1.8433e-02 L10_l1linf:1.8311e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9165e-02 L1_spectral:7.7477e-04 L2_spectral:7.7391e-04 L3_spectral:7.7526e-04 L4_spectral:7.7549e-04 L5_spectral:7.7685e-04 L6_spectral:7.8208e-04 L7_spectral:7.8238e-04 L8_spectral:7.8073e-04 L9_spectral:7.7993e-04 L10_spectral:7.8088e-04 L11_spectral:7.7557e-04 L12_spectral:7.5423e-04 train_time:105007ms step_avg:52.50ms +[2025-09-11 14:06:08] [Rank 0] step:2001/10000 train_time:106461ms step_avg:53.20ms +[2025-09-11 14:06:08] [Rank 0] step:2001/10000 train_time:106461ms step_avg:53.20ms +[2025-09-11 14:06:08] [Rank 0] step:2021/10000 train_time:107121ms step_avg:53.00ms +[2025-09-11 14:06:08] [Rank 0] step:2021/10000 train_time:107121ms step_avg:53.00ms +[2025-09-11 14:06:09] [Rank 0] step:2041/10000 train_time:107776ms step_avg:52.81ms +[2025-09-11 14:06:09] [Rank 0] step:2041/10000 train_time:107776ms step_avg:52.81ms +[2025-09-11 14:06:10] [Rank 0] step:2061/10000 train_time:108430ms step_avg:52.61ms +[2025-09-11 14:06:10] [Rank 0] step:2061/10000 train_time:108430ms step_avg:52.61ms +[2025-09-11 14:06:10] [Rank 0] step:2081/10000 train_time:109084ms step_avg:52.42ms +[2025-09-11 14:06:10] [Rank 0] step:2081/10000 train_time:109084ms step_avg:52.42ms +[2025-09-11 14:06:11] [Rank 0] step:2101/10000 train_time:109738ms step_avg:52.23ms +[2025-09-11 14:06:11] [Rank 0] step:2101/10000 train_time:109738ms step_avg:52.23ms +[2025-09-11 14:06:12] [Rank 0] step:2121/10000 train_time:110393ms step_avg:52.05ms +[2025-09-11 14:06:12] [Rank 0] step:2121/10000 train_time:110393ms step_avg:52.05ms +[2025-09-11 14:06:12] [Rank 0] step:2141/10000 train_time:111047ms step_avg:51.87ms +[2025-09-11 14:06:12] [Rank 0] step:2141/10000 train_time:111047ms step_avg:51.87ms +[2025-09-11 14:06:13] [Rank 0] step:2161/10000 train_time:111702ms step_avg:51.69ms +[2025-09-11 14:06:13] [Rank 0] step:2161/10000 train_time:111702ms step_avg:51.69ms +[2025-09-11 14:06:14] [Rank 0] step:2181/10000 train_time:112355ms step_avg:51.52ms +[2025-09-11 14:06:14] [Rank 0] step:2181/10000 train_time:112355ms step_avg:51.52ms +[2025-09-11 14:06:14] [Rank 0] step:2201/10000 train_time:113008ms step_avg:51.34ms +[2025-09-11 14:06:14] [Rank 0] step:2201/10000 train_time:113008ms step_avg:51.34ms +[2025-09-11 14:06:15] [Rank 0] step:2221/10000 train_time:113662ms step_avg:51.18ms +[2025-09-11 14:06:15] [Rank 0] step:2221/10000 train_time:113662ms step_avg:51.18ms +[2025-09-11 14:06:16] [Rank 0] step:2241/10000 train_time:114327ms step_avg:51.02ms +[2025-09-11 14:06:16] [Rank 0] step:2241/10000 train_time:114327ms step_avg:51.02ms +[2025-09-11 14:06:16] [Rank 0] step:2261/10000 train_time:114994ms step_avg:50.86ms +[2025-09-11 14:06:16] [Rank 0] step:2261/10000 train_time:114994ms step_avg:50.86ms +[2025-09-11 14:06:17] [Rank 0] step:2281/10000 train_time:115661ms step_avg:50.71ms +[2025-09-11 14:06:17] [Rank 0] step:2281/10000 train_time:115661ms step_avg:50.71ms +[2025-09-11 14:06:18] [Rank 0] step:2301/10000 train_time:116327ms step_avg:50.56ms +[2025-09-11 14:06:18] [Rank 0] step:2301/10000 train_time:116327ms step_avg:50.56ms +[2025-09-11 14:06:18] [Rank 0] step:2321/10000 train_time:116995ms step_avg:50.41ms +[2025-09-11 14:06:18] [Rank 0] step:2321/10000 train_time:116995ms step_avg:50.41ms +[2025-09-11 14:06:19] [Rank 0] step:2341/10000 train_time:117661ms step_avg:50.26ms +[2025-09-11 14:06:19] [Rank 0] step:2341/10000 train_time:117661ms step_avg:50.26ms +[2025-09-11 14:06:20] [Rank 0] step:2361/10000 train_time:118328ms step_avg:50.12ms +[2025-09-11 14:06:20] [Rank 0] step:2361/10000 train_time:118328ms step_avg:50.12ms +[2025-09-11 14:06:20] [Rank 0] step:2381/10000 train_time:118994ms step_avg:49.98ms +[2025-09-11 14:06:20] [Rank 0] step:2381/10000 train_time:118994ms step_avg:49.98ms +[2025-09-11 14:06:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:06:21] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:06:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:06:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:06:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:06:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:06:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:06:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:32] [Rank 0] PRINT: step:2400/10000 val_loss:5.2720 total_sharp:2.1466e-04 L1_sharp:2.7893e-01 L2_sharp:3.7846e-01 L3_sharp:4.3454e-01 L4_sharp:5.0267e-01 L5_sharp:5.1233e-01 L6_sharp:5.0133e-01 L7_sharp:4.5247e-01 L8_sharp:4.5125e-01 L9_sharp:4.7964e-01 L10_sharp:4.5615e-01 L11_sharp:6.8272e-01 L12_sharp:1.2074e+00 total_fnorm:6.9500e+01 total_l1_linf:1.2902e+05 total_spectral:3.4750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8828e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.7578e-02 L2_l1linf:1.7700e-02 L3_l1linf:1.7700e-02 L4_l1linf:1.7822e-02 L5_l1linf:1.7700e-02 L6_l1linf:1.7822e-02 L7_l1linf:1.7578e-02 L8_l1linf:1.7700e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7578e-02 L11_l1linf:1.7822e-02 L12_l1linf:1.8433e-02 L1_spectral:7.7736e-04 L2_spectral:7.8448e-04 L3_spectral:7.7993e-04 L4_spectral:7.8536e-04 L5_spectral:7.8183e-04 L6_spectral:7.8728e-04 L7_spectral:7.9326e-04 L8_spectral:7.9258e-04 L9_spectral:7.8240e-04 L10_spectral:7.8919e-04 L11_spectral:7.8197e-04 L12_spectral:7.6254e-04 train_time:119642ms step_avg:49.85ms +[2025-09-11 14:06:32] [Rank 0] PRINT: step:2400/10000 val_loss:5.2720 total_sharp:2.1466e-04 L1_sharp:2.7893e-01 L2_sharp:3.7846e-01 L3_sharp:4.3454e-01 L4_sharp:5.0267e-01 L5_sharp:5.1233e-01 L6_sharp:5.0133e-01 L7_sharp:4.5247e-01 L8_sharp:4.5125e-01 L9_sharp:4.7964e-01 L10_sharp:4.5615e-01 L11_sharp:6.8272e-01 L12_sharp:1.2074e+00 total_fnorm:6.9500e+01 total_l1_linf:1.2902e+05 total_spectral:3.4750e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8828e-02 L12_fnorm:4.8096e-02 L1_l1linf:1.7578e-02 L2_l1linf:1.7700e-02 L3_l1linf:1.7700e-02 L4_l1linf:1.7822e-02 L5_l1linf:1.7700e-02 L6_l1linf:1.7822e-02 L7_l1linf:1.7578e-02 L8_l1linf:1.7700e-02 L9_l1linf:1.7700e-02 L10_l1linf:1.7578e-02 L11_l1linf:1.7822e-02 L12_l1linf:1.8433e-02 L1_spectral:7.7736e-04 L2_spectral:7.8448e-04 L3_spectral:7.7993e-04 L4_spectral:7.8536e-04 L5_spectral:7.8183e-04 L6_spectral:7.8728e-04 L7_spectral:7.9326e-04 L8_spectral:7.9258e-04 L9_spectral:7.8240e-04 L10_spectral:7.8919e-04 L11_spectral:7.8197e-04 L12_spectral:7.6254e-04 train_time:119642ms step_avg:49.85ms +[2025-09-11 14:06:33] [Rank 0] step:2401/10000 train_time:121132ms step_avg:50.45ms +[2025-09-11 14:06:33] [Rank 0] step:2401/10000 train_time:121132ms step_avg:50.45ms +[2025-09-11 14:06:34] [Rank 0] step:2421/10000 train_time:121842ms step_avg:50.33ms +[2025-09-11 14:06:34] [Rank 0] step:2421/10000 train_time:121842ms step_avg:50.33ms +[2025-09-11 14:06:34] [Rank 0] step:2441/10000 train_time:122511ms step_avg:50.19ms +[2025-09-11 14:06:34] [Rank 0] step:2441/10000 train_time:122511ms step_avg:50.19ms +[2025-09-11 14:06:35] [Rank 0] step:2461/10000 train_time:123180ms step_avg:50.05ms +[2025-09-11 14:06:35] [Rank 0] step:2461/10000 train_time:123180ms step_avg:50.05ms +[2025-09-11 14:06:36] [Rank 0] step:2481/10000 train_time:123848ms step_avg:49.92ms +[2025-09-11 14:06:36] [Rank 0] step:2481/10000 train_time:123848ms step_avg:49.92ms +[2025-09-11 14:06:36] [Rank 0] step:2501/10000 train_time:124516ms step_avg:49.79ms +[2025-09-11 14:06:36] [Rank 0] step:2501/10000 train_time:124516ms step_avg:49.79ms +[2025-09-11 14:06:37] [Rank 0] step:2521/10000 train_time:125183ms step_avg:49.66ms +[2025-09-11 14:06:37] [Rank 0] step:2521/10000 train_time:125183ms step_avg:49.66ms +[2025-09-11 14:06:38] [Rank 0] step:2541/10000 train_time:125851ms step_avg:49.53ms +[2025-09-11 14:06:38] [Rank 0] step:2541/10000 train_time:125851ms step_avg:49.53ms +[2025-09-11 14:06:38] [Rank 0] step:2561/10000 train_time:126518ms step_avg:49.40ms +[2025-09-11 14:06:38] [Rank 0] step:2561/10000 train_time:126518ms step_avg:49.40ms +[2025-09-11 14:06:39] [Rank 0] step:2581/10000 train_time:127185ms step_avg:49.28ms +[2025-09-11 14:06:39] [Rank 0] step:2581/10000 train_time:127185ms step_avg:49.28ms +[2025-09-11 14:06:40] [Rank 0] step:2601/10000 train_time:127854ms step_avg:49.16ms +[2025-09-11 14:06:40] [Rank 0] step:2601/10000 train_time:127854ms step_avg:49.16ms +[2025-09-11 14:06:40] [Rank 0] step:2621/10000 train_time:128521ms step_avg:49.04ms +[2025-09-11 14:06:40] [Rank 0] step:2621/10000 train_time:128521ms step_avg:49.04ms +[2025-09-11 14:06:41] [Rank 0] step:2641/10000 train_time:129189ms step_avg:48.92ms +[2025-09-11 14:06:41] [Rank 0] step:2641/10000 train_time:129189ms step_avg:48.92ms +[2025-09-11 14:06:42] [Rank 0] step:2661/10000 train_time:129857ms step_avg:48.80ms +[2025-09-11 14:06:42] [Rank 0] step:2661/10000 train_time:129857ms step_avg:48.80ms +[2025-09-11 14:06:42] [Rank 0] step:2681/10000 train_time:130526ms step_avg:48.69ms +[2025-09-11 14:06:42] [Rank 0] step:2681/10000 train_time:130526ms step_avg:48.69ms +[2025-09-11 14:06:43] [Rank 0] step:2701/10000 train_time:131194ms step_avg:48.57ms +[2025-09-11 14:06:43] [Rank 0] step:2701/10000 train_time:131194ms step_avg:48.57ms +[2025-09-11 14:06:44] [Rank 0] step:2721/10000 train_time:131861ms step_avg:48.46ms +[2025-09-11 14:06:44] [Rank 0] step:2721/10000 train_time:131861ms step_avg:48.46ms +[2025-09-11 14:06:44] [Rank 0] step:2741/10000 train_time:132534ms step_avg:48.35ms +[2025-09-11 14:06:44] [Rank 0] step:2741/10000 train_time:132534ms step_avg:48.35ms +[2025-09-11 14:06:45] [Rank 0] step:2761/10000 train_time:133202ms step_avg:48.24ms +[2025-09-11 14:06:45] [Rank 0] step:2761/10000 train_time:133202ms step_avg:48.24ms +[2025-09-11 14:06:46] [Rank 0] step:2781/10000 train_time:133869ms step_avg:48.14ms +[2025-09-11 14:06:46] [Rank 0] step:2781/10000 train_time:133869ms step_avg:48.14ms +[2025-09-11 14:06:46] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:06:46] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:06:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:06:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:06:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:06:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:06:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:06:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:06:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:06:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:06:57] [Rank 0] PRINT: step:2800/10000 val_loss:5.1889 total_sharp:2.1470e-04 L1_sharp:2.5013e-01 L2_sharp:3.3114e-01 L3_sharp:4.1272e-01 L4_sharp:4.8739e-01 L5_sharp:5.2308e-01 L6_sharp:5.2666e-01 L7_sharp:4.7013e-01 L8_sharp:5.1079e-01 L9_sharp:5.6616e-01 L10_sharp:6.6894e-01 L11_sharp:8.2708e-01 L12_sharp:1.2585e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2698e+05 total_spectral:3.4000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8340e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.6602e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.6968e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6724e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.7456e-02 L12_l1linf:1.7700e-02 L1_spectral:7.8818e-04 L2_spectral:7.8551e-04 L3_spectral:7.8476e-04 L4_spectral:7.8671e-04 L5_spectral:7.8760e-04 L6_spectral:7.9486e-04 L7_spectral:7.9141e-04 L8_spectral:7.8501e-04 L9_spectral:7.8362e-04 L10_spectral:7.9556e-04 L11_spectral:7.8771e-04 L12_spectral:7.6922e-04 train_time:134518ms step_avg:48.04ms +[2025-09-11 14:06:57] [Rank 0] PRINT: step:2800/10000 val_loss:5.1889 total_sharp:2.1470e-04 L1_sharp:2.5013e-01 L2_sharp:3.3114e-01 L3_sharp:4.1272e-01 L4_sharp:4.8739e-01 L5_sharp:5.2308e-01 L6_sharp:5.2666e-01 L7_sharp:4.7013e-01 L8_sharp:5.1079e-01 L9_sharp:5.6616e-01 L10_sharp:6.6894e-01 L11_sharp:8.2708e-01 L12_sharp:1.2585e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2698e+05 total_spectral:3.4000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8340e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.7852e-02 L1_l1linf:1.6602e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.7212e-02 L4_l1linf:1.7090e-02 L5_l1linf:1.6968e-02 L6_l1linf:1.6968e-02 L7_l1linf:1.6846e-02 L8_l1linf:1.6724e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.6968e-02 L11_l1linf:1.7456e-02 L12_l1linf:1.7700e-02 L1_spectral:7.8818e-04 L2_spectral:7.8551e-04 L3_spectral:7.8476e-04 L4_spectral:7.8671e-04 L5_spectral:7.8760e-04 L6_spectral:7.9486e-04 L7_spectral:7.9141e-04 L8_spectral:7.8501e-04 L9_spectral:7.8362e-04 L10_spectral:7.9556e-04 L11_spectral:7.8771e-04 L12_spectral:7.6922e-04 train_time:134518ms step_avg:48.04ms +[2025-09-11 14:06:58] [Rank 0] step:2801/10000 train_time:135999ms step_avg:48.55ms +[2025-09-11 14:06:58] [Rank 0] step:2801/10000 train_time:135999ms step_avg:48.55ms +[2025-09-11 14:06:59] [Rank 0] step:2821/10000 train_time:136693ms step_avg:48.46ms +[2025-09-11 14:06:59] [Rank 0] step:2821/10000 train_time:136693ms step_avg:48.46ms +[2025-09-11 14:07:00] [Rank 0] step:2841/10000 train_time:137363ms step_avg:48.35ms +[2025-09-11 14:07:00] [Rank 0] step:2841/10000 train_time:137363ms step_avg:48.35ms +[2025-09-11 14:07:00] [Rank 0] step:2861/10000 train_time:138032ms step_avg:48.25ms +[2025-09-11 14:07:00] [Rank 0] step:2861/10000 train_time:138032ms step_avg:48.25ms +[2025-09-11 14:07:01] [Rank 0] step:2881/10000 train_time:138701ms step_avg:48.14ms +[2025-09-11 14:07:01] [Rank 0] step:2881/10000 train_time:138701ms step_avg:48.14ms +[2025-09-11 14:07:02] [Rank 0] step:2901/10000 train_time:139666ms step_avg:48.14ms +[2025-09-11 14:07:02] [Rank 0] step:2901/10000 train_time:139666ms step_avg:48.14ms +[2025-09-11 14:07:03] [Rank 0] step:2921/10000 train_time:140335ms step_avg:48.04ms +[2025-09-11 14:07:03] [Rank 0] step:2921/10000 train_time:140335ms step_avg:48.04ms +[2025-09-11 14:07:03] [Rank 0] step:2941/10000 train_time:141003ms step_avg:47.94ms +[2025-09-11 14:07:03] [Rank 0] step:2941/10000 train_time:141003ms step_avg:47.94ms +[2025-09-11 14:07:04] [Rank 0] step:2961/10000 train_time:141832ms step_avg:47.90ms +[2025-09-11 14:07:04] [Rank 0] step:2961/10000 train_time:141832ms step_avg:47.90ms +[2025-09-11 14:07:05] [Rank 0] step:2981/10000 train_time:142617ms step_avg:47.84ms +[2025-09-11 14:07:05] [Rank 0] step:2981/10000 train_time:142617ms step_avg:47.84ms +[2025-09-11 14:07:06] [Rank 0] step:3001/10000 train_time:143288ms step_avg:47.75ms +[2025-09-11 14:07:06] [Rank 0] step:3001/10000 train_time:143288ms step_avg:47.75ms +[2025-09-11 14:07:06] [Rank 0] step:3021/10000 train_time:143959ms step_avg:47.65ms +[2025-09-11 14:07:06] [Rank 0] step:3021/10000 train_time:143959ms step_avg:47.65ms +[2025-09-11 14:07:07] [Rank 0] step:3041/10000 train_time:144630ms step_avg:47.56ms +[2025-09-11 14:07:07] [Rank 0] step:3041/10000 train_time:144630ms step_avg:47.56ms +[2025-09-11 14:07:08] [Rank 0] step:3061/10000 train_time:145301ms step_avg:47.47ms +[2025-09-11 14:07:08] [Rank 0] step:3061/10000 train_time:145301ms step_avg:47.47ms +[2025-09-11 14:07:08] [Rank 0] step:3081/10000 train_time:145974ms step_avg:47.38ms +[2025-09-11 14:07:08] [Rank 0] step:3081/10000 train_time:145974ms step_avg:47.38ms +[2025-09-11 14:07:09] [Rank 0] step:3101/10000 train_time:146644ms step_avg:47.29ms +[2025-09-11 14:07:09] [Rank 0] step:3101/10000 train_time:146644ms step_avg:47.29ms +[2025-09-11 14:07:10] [Rank 0] step:3121/10000 train_time:147316ms step_avg:47.20ms +[2025-09-11 14:07:10] [Rank 0] step:3121/10000 train_time:147316ms step_avg:47.20ms +[2025-09-11 14:07:10] [Rank 0] step:3141/10000 train_time:147986ms step_avg:47.11ms +[2025-09-11 14:07:10] [Rank 0] step:3141/10000 train_time:147986ms step_avg:47.11ms +[2025-09-11 14:07:11] [Rank 0] step:3161/10000 train_time:148657ms step_avg:47.03ms +[2025-09-11 14:07:11] [Rank 0] step:3161/10000 train_time:148657ms step_avg:47.03ms +[2025-09-11 14:07:12] [Rank 0] step:3181/10000 train_time:149327ms step_avg:46.94ms +[2025-09-11 14:07:12] [Rank 0] step:3181/10000 train_time:149327ms step_avg:46.94ms +[2025-09-11 14:07:12] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:07:12] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:07:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:07:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:07:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:07:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:07:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:07:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:07:22] [Rank 0] PRINT: step:3200/10000 val_loss:5.1066 total_sharp:1.3090e-04 L1_sharp:1.8551e-01 L2_sharp:2.8931e-01 L3_sharp:3.6255e-01 L4_sharp:4.6177e-01 L5_sharp:4.3984e-01 L6_sharp:4.2114e-01 L7_sharp:3.8667e-01 L8_sharp:3.5199e-01 L9_sharp:3.9038e-01 L10_sharp:4.1269e-01 L11_sharp:7.1691e-01 L12_sharp:1.3830e+00 total_fnorm:7.8500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9250e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.5747e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.6602e-02 L1_spectral:7.8758e-04 L2_spectral:7.9140e-04 L3_spectral:7.9146e-04 L4_spectral:7.9276e-04 L5_spectral:7.9696e-04 L6_spectral:7.9775e-04 L7_spectral:7.9969e-04 L8_spectral:7.9638e-04 L9_spectral:7.9818e-04 L10_spectral:7.9876e-04 L11_spectral:7.8896e-04 L12_spectral:7.8010e-04 train_time:149980ms step_avg:46.87ms +[2025-09-11 14:07:22] [Rank 0] PRINT: step:3200/10000 val_loss:5.1066 total_sharp:1.3090e-04 L1_sharp:1.8551e-01 L2_sharp:2.8931e-01 L3_sharp:3.6255e-01 L4_sharp:4.6177e-01 L5_sharp:4.3984e-01 L6_sharp:4.2114e-01 L7_sharp:3.8667e-01 L8_sharp:3.5199e-01 L9_sharp:3.9038e-01 L10_sharp:4.1269e-01 L11_sharp:7.1691e-01 L12_sharp:1.3830e+00 total_fnorm:7.8500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9250e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.5869e-02 L3_l1linf:1.5991e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.5991e-02 L6_l1linf:1.5869e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.5747e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6113e-02 L12_l1linf:1.6602e-02 L1_spectral:7.8758e-04 L2_spectral:7.9140e-04 L3_spectral:7.9146e-04 L4_spectral:7.9276e-04 L5_spectral:7.9696e-04 L6_spectral:7.9775e-04 L7_spectral:7.9969e-04 L8_spectral:7.9638e-04 L9_spectral:7.9818e-04 L10_spectral:7.9876e-04 L11_spectral:7.8896e-04 L12_spectral:7.8010e-04 train_time:149980ms step_avg:46.87ms +[2025-09-11 14:07:24] [Rank 0] step:3201/10000 train_time:151431ms step_avg:47.31ms +[2025-09-11 14:07:24] [Rank 0] step:3201/10000 train_time:151431ms step_avg:47.31ms +[2025-09-11 14:07:25] [Rank 0] step:3221/10000 train_time:152136ms step_avg:47.23ms +[2025-09-11 14:07:25] [Rank 0] step:3221/10000 train_time:152136ms step_avg:47.23ms +[2025-09-11 14:07:25] [Rank 0] step:3241/10000 train_time:152807ms step_avg:47.15ms +[2025-09-11 14:07:25] [Rank 0] step:3241/10000 train_time:152807ms step_avg:47.15ms +[2025-09-11 14:07:26] [Rank 0] step:3261/10000 train_time:153478ms step_avg:47.06ms +[2025-09-11 14:07:26] [Rank 0] step:3261/10000 train_time:153478ms step_avg:47.06ms +[2025-09-11 14:07:27] [Rank 0] step:3281/10000 train_time:154148ms step_avg:46.98ms +[2025-09-11 14:07:27] [Rank 0] step:3281/10000 train_time:154148ms step_avg:46.98ms +[2025-09-11 14:07:27] [Rank 0] step:3301/10000 train_time:154819ms step_avg:46.90ms +[2025-09-11 14:07:27] [Rank 0] step:3301/10000 train_time:154819ms step_avg:46.90ms +[2025-09-11 14:07:28] [Rank 0] step:3321/10000 train_time:155489ms step_avg:46.82ms +[2025-09-11 14:07:28] [Rank 0] step:3321/10000 train_time:155489ms step_avg:46.82ms +[2025-09-11 14:07:29] [Rank 0] step:3341/10000 train_time:156159ms step_avg:46.74ms +[2025-09-11 14:07:29] [Rank 0] step:3341/10000 train_time:156159ms step_avg:46.74ms +[2025-09-11 14:07:29] [Rank 0] step:3361/10000 train_time:156830ms step_avg:46.66ms +[2025-09-11 14:07:29] [Rank 0] step:3361/10000 train_time:156830ms step_avg:46.66ms +[2025-09-11 14:07:30] [Rank 0] step:3381/10000 train_time:157500ms step_avg:46.58ms +[2025-09-11 14:07:30] [Rank 0] step:3381/10000 train_time:157500ms step_avg:46.58ms +[2025-09-11 14:07:31] [Rank 0] step:3401/10000 train_time:158171ms step_avg:46.51ms +[2025-09-11 14:07:31] [Rank 0] step:3401/10000 train_time:158171ms step_avg:46.51ms +[2025-09-11 14:07:31] [Rank 0] step:3421/10000 train_time:158840ms step_avg:46.43ms +[2025-09-11 14:07:31] [Rank 0] step:3421/10000 train_time:158840ms step_avg:46.43ms +[2025-09-11 14:07:32] [Rank 0] step:3441/10000 train_time:159509ms step_avg:46.36ms +[2025-09-11 14:07:32] [Rank 0] step:3441/10000 train_time:159509ms step_avg:46.36ms +[2025-09-11 14:07:33] [Rank 0] step:3461/10000 train_time:160179ms step_avg:46.28ms +[2025-09-11 14:07:33] [Rank 0] step:3461/10000 train_time:160179ms step_avg:46.28ms +[2025-09-11 14:07:33] [Rank 0] step:3481/10000 train_time:160849ms step_avg:46.21ms +[2025-09-11 14:07:33] [Rank 0] step:3481/10000 train_time:160849ms step_avg:46.21ms +[2025-09-11 14:07:34] [Rank 0] step:3501/10000 train_time:161519ms step_avg:46.14ms +[2025-09-11 14:07:34] [Rank 0] step:3501/10000 train_time:161519ms step_avg:46.14ms +[2025-09-11 14:07:35] [Rank 0] step:3521/10000 train_time:162188ms step_avg:46.06ms +[2025-09-11 14:07:35] [Rank 0] step:3521/10000 train_time:162188ms step_avg:46.06ms +[2025-09-11 14:07:35] [Rank 0] step:3541/10000 train_time:162858ms step_avg:45.99ms +[2025-09-11 14:07:35] [Rank 0] step:3541/10000 train_time:162858ms step_avg:45.99ms +[2025-09-11 14:07:36] [Rank 0] step:3561/10000 train_time:163528ms step_avg:45.92ms +[2025-09-11 14:07:36] [Rank 0] step:3561/10000 train_time:163528ms step_avg:45.92ms +[2025-09-11 14:07:37] [Rank 0] step:3581/10000 train_time:164197ms step_avg:45.85ms +[2025-09-11 14:07:37] [Rank 0] step:3581/10000 train_time:164197ms step_avg:45.85ms +[2025-09-11 14:07:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:07:37] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:07:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:07:48] [Rank 0] PRINT: step:3600/10000 val_loss:5.0622 total_sharp:1.6407e-04 L1_sharp:1.8150e-01 L2_sharp:2.6207e-01 L3_sharp:3.4716e-01 L4_sharp:4.4011e-01 L5_sharp:4.7778e-01 L6_sharp:4.7282e-01 L7_sharp:4.2396e-01 L8_sharp:4.1137e-01 L9_sharp:4.1243e-01 L10_sharp:4.1543e-01 L11_sharp:4.2966e-01 L12_sharp:6.6131e-01 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.5015e-02 L2_l1linf:1.5381e-02 L3_l1linf:1.5625e-02 L4_l1linf:1.5564e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.5320e-02 L9_l1linf:1.5320e-02 L10_l1linf:1.5625e-02 L11_l1linf:1.5442e-02 L12_l1linf:1.5747e-02 L1_spectral:7.9148e-04 L2_spectral:7.9118e-04 L3_spectral:7.9913e-04 L4_spectral:8.0108e-04 L5_spectral:7.9846e-04 L6_spectral:7.9876e-04 L7_spectral:8.0096e-04 L8_spectral:8.0025e-04 L9_spectral:7.9383e-04 L10_spectral:7.9764e-04 L11_spectral:7.9227e-04 L12_spectral:7.8263e-04 train_time:164848ms step_avg:45.79ms +[2025-09-11 14:07:48] [Rank 0] PRINT: step:3600/10000 val_loss:5.0622 total_sharp:1.6407e-04 L1_sharp:1.8150e-01 L2_sharp:2.6207e-01 L3_sharp:3.4716e-01 L4_sharp:4.4011e-01 L5_sharp:4.7778e-01 L6_sharp:4.7282e-01 L7_sharp:4.2396e-01 L8_sharp:4.1137e-01 L9_sharp:4.1243e-01 L10_sharp:4.1543e-01 L11_sharp:4.2966e-01 L12_sharp:6.6131e-01 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.5015e-02 L2_l1linf:1.5381e-02 L3_l1linf:1.5625e-02 L4_l1linf:1.5564e-02 L5_l1linf:1.5869e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.5320e-02 L9_l1linf:1.5320e-02 L10_l1linf:1.5625e-02 L11_l1linf:1.5442e-02 L12_l1linf:1.5747e-02 L1_spectral:7.9148e-04 L2_spectral:7.9118e-04 L3_spectral:7.9913e-04 L4_spectral:8.0108e-04 L5_spectral:7.9846e-04 L6_spectral:7.9876e-04 L7_spectral:8.0096e-04 L8_spectral:8.0025e-04 L9_spectral:7.9383e-04 L10_spectral:7.9764e-04 L11_spectral:7.9227e-04 L12_spectral:7.8263e-04 train_time:164848ms step_avg:45.79ms +[2025-09-11 14:07:49] [Rank 0] step:3601/10000 train_time:166357ms step_avg:46.20ms +[2025-09-11 14:07:49] [Rank 0] step:3601/10000 train_time:166357ms step_avg:46.20ms +[2025-09-11 14:07:50] [Rank 0] step:3621/10000 train_time:167032ms step_avg:46.13ms +[2025-09-11 14:07:50] [Rank 0] step:3621/10000 train_time:167032ms step_avg:46.13ms +[2025-09-11 14:07:51] [Rank 0] step:3641/10000 train_time:167701ms step_avg:46.06ms +[2025-09-11 14:07:51] [Rank 0] step:3641/10000 train_time:167701ms step_avg:46.06ms +[2025-09-11 14:07:51] [Rank 0] step:3661/10000 train_time:168370ms step_avg:45.99ms +[2025-09-11 14:07:51] [Rank 0] step:3661/10000 train_time:168370ms step_avg:45.99ms +[2025-09-11 14:07:52] [Rank 0] step:3681/10000 train_time:169039ms step_avg:45.92ms +[2025-09-11 14:07:52] [Rank 0] step:3681/10000 train_time:169039ms step_avg:45.92ms +[2025-09-11 14:07:53] [Rank 0] step:3701/10000 train_time:169708ms step_avg:45.85ms +[2025-09-11 14:07:53] [Rank 0] step:3701/10000 train_time:169708ms step_avg:45.85ms +[2025-09-11 14:07:53] [Rank 0] step:3721/10000 train_time:170386ms step_avg:45.79ms +[2025-09-11 14:07:53] [Rank 0] step:3721/10000 train_time:170386ms step_avg:45.79ms +[2025-09-11 14:07:54] [Rank 0] step:3741/10000 train_time:171066ms step_avg:45.73ms +[2025-09-11 14:07:54] [Rank 0] step:3741/10000 train_time:171066ms step_avg:45.73ms +[2025-09-11 14:07:55] [Rank 0] step:3761/10000 train_time:171746ms step_avg:45.67ms +[2025-09-11 14:07:55] [Rank 0] step:3761/10000 train_time:171746ms step_avg:45.67ms +[2025-09-11 14:07:55] [Rank 0] step:3781/10000 train_time:172427ms step_avg:45.60ms +[2025-09-11 14:07:55] [Rank 0] step:3781/10000 train_time:172427ms step_avg:45.60ms +[2025-09-11 14:07:56] [Rank 0] step:3801/10000 train_time:173106ms step_avg:45.54ms +[2025-09-11 14:07:56] [Rank 0] step:3801/10000 train_time:173106ms step_avg:45.54ms +[2025-09-11 14:07:57] [Rank 0] step:3821/10000 train_time:173786ms step_avg:45.48ms +[2025-09-11 14:07:57] [Rank 0] step:3821/10000 train_time:173786ms step_avg:45.48ms +[2025-09-11 14:07:57] [Rank 0] step:3841/10000 train_time:174465ms step_avg:45.42ms +[2025-09-11 14:07:57] [Rank 0] step:3841/10000 train_time:174465ms step_avg:45.42ms +[2025-09-11 14:07:58] [Rank 0] step:3861/10000 train_time:175144ms step_avg:45.36ms +[2025-09-11 14:07:58] [Rank 0] step:3861/10000 train_time:175144ms step_avg:45.36ms +[2025-09-11 14:07:59] [Rank 0] step:3881/10000 train_time:175824ms step_avg:45.30ms +[2025-09-11 14:07:59] [Rank 0] step:3881/10000 train_time:175824ms step_avg:45.30ms +[2025-09-11 14:07:59] [Rank 0] step:3901/10000 train_time:176504ms step_avg:45.25ms +[2025-09-11 14:07:59] [Rank 0] step:3901/10000 train_time:176504ms step_avg:45.25ms +[2025-09-11 14:08:00] [Rank 0] step:3921/10000 train_time:177185ms step_avg:45.19ms +[2025-09-11 14:08:00] [Rank 0] step:3921/10000 train_time:177185ms step_avg:45.19ms +[2025-09-11 14:08:01] [Rank 0] step:3941/10000 train_time:177866ms step_avg:45.13ms +[2025-09-11 14:08:01] [Rank 0] step:3941/10000 train_time:177866ms step_avg:45.13ms +[2025-09-11 14:08:01] [Rank 0] step:3961/10000 train_time:178548ms step_avg:45.08ms +[2025-09-11 14:08:01] [Rank 0] step:3961/10000 train_time:178548ms step_avg:45.08ms +[2025-09-11 14:08:02] [Rank 0] step:3981/10000 train_time:179228ms step_avg:45.02ms +[2025-09-11 14:08:02] [Rank 0] step:3981/10000 train_time:179228ms step_avg:45.02ms +[2025-09-11 14:08:03] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:08:03] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:08:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:08:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:08:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:08:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.0042 total_sharp:1.9224e-04 L1_sharp:2.0160e-01 L2_sharp:3.4448e-01 L3_sharp:4.2693e-01 L4_sharp:6.2346e-01 L5_sharp:6.7487e-01 L6_sharp:7.6657e-01 L7_sharp:9.2350e-01 L8_sharp:1.2255e+00 L9_sharp:2.1467e+00 L10_sharp:2.4472e+00 L11_sharp:3.5097e+00 L12_sharp:5.6193e+00 total_fnorm:8.1500e+01 total_l1_linf:1.5565e+05 total_spectral:4.0750e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.5076e-02 L2_l1linf:1.5442e-02 L3_l1linf:1.5564e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5869e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6357e-02 L1_spectral:7.8826e-04 L2_spectral:7.9038e-04 L3_spectral:7.9272e-04 L4_spectral:7.8763e-04 L5_spectral:7.9139e-04 L6_spectral:8.0854e-04 L7_spectral:7.9755e-04 L8_spectral:7.9370e-04 L9_spectral:7.9227e-04 L10_spectral:7.8838e-04 L11_spectral:7.9168e-04 L12_spectral:7.6690e-04 train_time:179889ms step_avg:44.97ms +[2025-09-11 14:08:17] [Rank 0] PRINT: step:4000/10000 val_loss:5.0042 total_sharp:1.9224e-04 L1_sharp:2.0160e-01 L2_sharp:3.4448e-01 L3_sharp:4.2693e-01 L4_sharp:6.2346e-01 L5_sharp:6.7487e-01 L6_sharp:7.6657e-01 L7_sharp:9.2350e-01 L8_sharp:1.2255e+00 L9_sharp:2.1467e+00 L10_sharp:2.4472e+00 L11_sharp:3.5097e+00 L12_sharp:5.6193e+00 total_fnorm:8.1500e+01 total_l1_linf:1.5565e+05 total_spectral:4.0750e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.5076e-02 L2_l1linf:1.5442e-02 L3_l1linf:1.5564e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.5564e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5869e-02 L9_l1linf:1.5625e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6357e-02 L1_spectral:7.8826e-04 L2_spectral:7.9038e-04 L3_spectral:7.9272e-04 L4_spectral:7.8763e-04 L5_spectral:7.9139e-04 L6_spectral:8.0854e-04 L7_spectral:7.9755e-04 L8_spectral:7.9370e-04 L9_spectral:7.9227e-04 L10_spectral:7.8838e-04 L11_spectral:7.9168e-04 L12_spectral:7.6690e-04 train_time:179889ms step_avg:44.97ms +[2025-09-11 14:08:18] [Rank 0] step:4001/10000 train_time:181389ms step_avg:45.34ms +[2025-09-11 14:08:18] [Rank 0] step:4001/10000 train_time:181389ms step_avg:45.34ms +[2025-09-11 14:08:19] [Rank 0] step:4021/10000 train_time:182091ms step_avg:45.28ms +[2025-09-11 14:08:19] [Rank 0] step:4021/10000 train_time:182091ms step_avg:45.28ms +[2025-09-11 14:08:20] [Rank 0] step:4041/10000 train_time:182772ms step_avg:45.23ms +[2025-09-11 14:08:20] [Rank 0] step:4041/10000 train_time:182772ms step_avg:45.23ms +[2025-09-11 14:08:20] [Rank 0] step:4061/10000 train_time:183452ms step_avg:45.17ms +[2025-09-11 14:08:20] [Rank 0] step:4061/10000 train_time:183452ms step_avg:45.17ms +[2025-09-11 14:08:21] [Rank 0] step:4081/10000 train_time:184133ms step_avg:45.12ms +[2025-09-11 14:08:21] [Rank 0] step:4081/10000 train_time:184133ms step_avg:45.12ms +[2025-09-11 14:08:22] [Rank 0] step:4101/10000 train_time:184814ms step_avg:45.07ms +[2025-09-11 14:08:22] [Rank 0] step:4101/10000 train_time:184814ms step_avg:45.07ms +[2025-09-11 14:08:22] [Rank 0] step:4121/10000 train_time:185494ms step_avg:45.01ms +[2025-09-11 14:08:22] [Rank 0] step:4121/10000 train_time:185494ms step_avg:45.01ms +[2025-09-11 14:08:23] [Rank 0] step:4141/10000 train_time:186174ms step_avg:44.96ms +[2025-09-11 14:08:23] [Rank 0] step:4141/10000 train_time:186174ms step_avg:44.96ms +[2025-09-11 14:08:24] [Rank 0] step:4161/10000 train_time:186855ms step_avg:44.91ms +[2025-09-11 14:08:24] [Rank 0] step:4161/10000 train_time:186855ms step_avg:44.91ms +[2025-09-11 14:08:24] [Rank 0] step:4181/10000 train_time:187535ms step_avg:44.85ms +[2025-09-11 14:08:24] [Rank 0] step:4181/10000 train_time:187535ms step_avg:44.85ms +[2025-09-11 14:08:25] [Rank 0] step:4201/10000 train_time:188216ms step_avg:44.80ms +[2025-09-11 14:08:25] [Rank 0] step:4201/10000 train_time:188216ms step_avg:44.80ms +[2025-09-11 14:08:26] [Rank 0] step:4221/10000 train_time:188897ms step_avg:44.75ms +[2025-09-11 14:08:26] [Rank 0] step:4221/10000 train_time:188897ms step_avg:44.75ms +[2025-09-11 14:08:26] [Rank 0] step:4241/10000 train_time:189577ms step_avg:44.70ms +[2025-09-11 14:08:26] [Rank 0] step:4241/10000 train_time:189577ms step_avg:44.70ms +[2025-09-11 14:08:27] [Rank 0] step:4261/10000 train_time:190257ms step_avg:44.65ms +[2025-09-11 14:08:27] [Rank 0] step:4261/10000 train_time:190257ms step_avg:44.65ms +[2025-09-11 14:08:28] [Rank 0] step:4281/10000 train_time:190939ms step_avg:44.60ms +[2025-09-11 14:08:28] [Rank 0] step:4281/10000 train_time:190939ms step_avg:44.60ms +[2025-09-11 14:08:28] [Rank 0] step:4301/10000 train_time:191620ms step_avg:44.55ms +[2025-09-11 14:08:28] [Rank 0] step:4301/10000 train_time:191620ms step_avg:44.55ms +[2025-09-11 14:08:29] [Rank 0] step:4321/10000 train_time:192300ms step_avg:44.50ms +[2025-09-11 14:08:29] [Rank 0] step:4321/10000 train_time:192300ms step_avg:44.50ms +[2025-09-11 14:08:30] [Rank 0] step:4341/10000 train_time:192988ms step_avg:44.46ms +[2025-09-11 14:08:30] [Rank 0] step:4341/10000 train_time:192988ms step_avg:44.46ms +[2025-09-11 14:08:31] [Rank 0] step:4361/10000 train_time:193668ms step_avg:44.41ms +[2025-09-11 14:08:31] [Rank 0] step:4361/10000 train_time:193668ms step_avg:44.41ms +[2025-09-11 14:08:31] [Rank 0] step:4381/10000 train_time:194350ms step_avg:44.36ms +[2025-09-11 14:08:31] [Rank 0] step:4381/10000 train_time:194350ms step_avg:44.36ms +[2025-09-11 14:08:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:08:32] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:08:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:08:42] [Rank 0] PRINT: step:4400/10000 val_loss:4.9748 total_sharp:1.3159e-04 L1_sharp:1.5172e-01 L2_sharp:2.1679e-01 L3_sharp:3.0049e-01 L4_sharp:4.0332e-01 L5_sharp:4.5429e-01 L6_sharp:4.7325e-01 L7_sharp:4.5623e-01 L8_sharp:5.0943e-01 L9_sharp:5.4997e-01 L10_sharp:5.6886e-01 L11_sharp:6.2358e-01 L12_sharp:1.1237e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3056e+05 total_spectral:3.5500e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4404e-02 L2_l1linf:1.4648e-02 L3_l1linf:1.5015e-02 L4_l1linf:1.5198e-02 L5_l1linf:1.5137e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.5198e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.5747e-02 L1_spectral:7.9494e-04 L2_spectral:7.9891e-04 L3_spectral:7.9489e-04 L4_spectral:7.9195e-04 L5_spectral:7.9204e-04 L6_spectral:8.0221e-04 L7_spectral:8.0676e-04 L8_spectral:7.8616e-04 L9_spectral:7.9682e-04 L10_spectral:7.9244e-04 L11_spectral:7.8969e-04 L12_spectral:7.6855e-04 train_time:195010ms step_avg:44.32ms +[2025-09-11 14:08:42] [Rank 0] PRINT: step:4400/10000 val_loss:4.9748 total_sharp:1.3159e-04 L1_sharp:1.5172e-01 L2_sharp:2.1679e-01 L3_sharp:3.0049e-01 L4_sharp:4.0332e-01 L5_sharp:4.5429e-01 L6_sharp:4.7325e-01 L7_sharp:4.5623e-01 L8_sharp:5.0943e-01 L9_sharp:5.4997e-01 L10_sharp:5.6886e-01 L11_sharp:6.2358e-01 L12_sharp:1.1237e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3056e+05 total_spectral:3.5500e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8340e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7607e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7852e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4404e-02 L2_l1linf:1.4648e-02 L3_l1linf:1.5015e-02 L4_l1linf:1.5198e-02 L5_l1linf:1.5137e-02 L6_l1linf:1.5503e-02 L7_l1linf:1.5320e-02 L8_l1linf:1.5198e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.5564e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.5747e-02 L1_spectral:7.9494e-04 L2_spectral:7.9891e-04 L3_spectral:7.9489e-04 L4_spectral:7.9195e-04 L5_spectral:7.9204e-04 L6_spectral:8.0221e-04 L7_spectral:8.0676e-04 L8_spectral:7.8616e-04 L9_spectral:7.9682e-04 L10_spectral:7.9244e-04 L11_spectral:7.8969e-04 L12_spectral:7.6855e-04 train_time:195010ms step_avg:44.32ms +[2025-09-11 14:08:44] [Rank 0] step:4401/10000 train_time:196543ms step_avg:44.66ms +[2025-09-11 14:08:44] [Rank 0] step:4401/10000 train_time:196543ms step_avg:44.66ms +[2025-09-11 14:08:44] [Rank 0] step:4421/10000 train_time:197242ms step_avg:44.61ms +[2025-09-11 14:08:44] [Rank 0] step:4421/10000 train_time:197242ms step_avg:44.61ms +[2025-09-11 14:08:45] [Rank 0] step:4441/10000 train_time:197925ms step_avg:44.57ms +[2025-09-11 14:08:45] [Rank 0] step:4441/10000 train_time:197925ms step_avg:44.57ms +[2025-09-11 14:08:46] [Rank 0] step:4461/10000 train_time:198609ms step_avg:44.52ms +[2025-09-11 14:08:46] [Rank 0] step:4461/10000 train_time:198609ms step_avg:44.52ms +[2025-09-11 14:08:47] [Rank 0] step:4481/10000 train_time:199295ms step_avg:44.48ms +[2025-09-11 14:08:47] [Rank 0] step:4481/10000 train_time:199295ms step_avg:44.48ms +[2025-09-11 14:08:47] [Rank 0] step:4501/10000 train_time:199978ms step_avg:44.43ms +[2025-09-11 14:08:47] [Rank 0] step:4501/10000 train_time:199978ms step_avg:44.43ms +[2025-09-11 14:08:48] [Rank 0] step:4521/10000 train_time:200662ms step_avg:44.38ms +[2025-09-11 14:08:48] [Rank 0] step:4521/10000 train_time:200662ms step_avg:44.38ms +[2025-09-11 14:08:49] [Rank 0] step:4541/10000 train_time:201345ms step_avg:44.34ms +[2025-09-11 14:08:49] [Rank 0] step:4541/10000 train_time:201345ms step_avg:44.34ms +[2025-09-11 14:08:49] [Rank 0] step:4561/10000 train_time:202027ms step_avg:44.29ms +[2025-09-11 14:08:49] [Rank 0] step:4561/10000 train_time:202027ms step_avg:44.29ms +[2025-09-11 14:08:50] [Rank 0] step:4581/10000 train_time:202710ms step_avg:44.25ms +[2025-09-11 14:08:50] [Rank 0] step:4581/10000 train_time:202710ms step_avg:44.25ms +[2025-09-11 14:08:51] [Rank 0] step:4601/10000 train_time:203393ms step_avg:44.21ms +[2025-09-11 14:08:51] [Rank 0] step:4601/10000 train_time:203393ms step_avg:44.21ms +[2025-09-11 14:08:51] [Rank 0] step:4621/10000 train_time:204076ms step_avg:44.16ms +[2025-09-11 14:08:51] [Rank 0] step:4621/10000 train_time:204076ms step_avg:44.16ms +[2025-09-11 14:08:52] [Rank 0] step:4641/10000 train_time:204758ms step_avg:44.12ms +[2025-09-11 14:08:52] [Rank 0] step:4641/10000 train_time:204758ms step_avg:44.12ms +[2025-09-11 14:08:53] [Rank 0] step:4661/10000 train_time:205441ms step_avg:44.08ms +[2025-09-11 14:08:53] [Rank 0] step:4661/10000 train_time:205441ms step_avg:44.08ms +[2025-09-11 14:08:53] [Rank 0] step:4681/10000 train_time:206124ms step_avg:44.03ms +[2025-09-11 14:08:53] [Rank 0] step:4681/10000 train_time:206124ms step_avg:44.03ms +[2025-09-11 14:08:54] [Rank 0] step:4701/10000 train_time:206805ms step_avg:43.99ms +[2025-09-11 14:08:54] [Rank 0] step:4701/10000 train_time:206805ms step_avg:43.99ms +[2025-09-11 14:08:55] [Rank 0] step:4721/10000 train_time:207488ms step_avg:43.95ms +[2025-09-11 14:08:55] [Rank 0] step:4721/10000 train_time:207488ms step_avg:43.95ms +[2025-09-11 14:08:55] [Rank 0] step:4741/10000 train_time:208170ms step_avg:43.91ms +[2025-09-11 14:08:55] [Rank 0] step:4741/10000 train_time:208170ms step_avg:43.91ms +[2025-09-11 14:08:56] [Rank 0] step:4761/10000 train_time:208854ms step_avg:43.87ms +[2025-09-11 14:08:56] [Rank 0] step:4761/10000 train_time:208854ms step_avg:43.87ms +[2025-09-11 14:08:57] [Rank 0] step:4781/10000 train_time:209536ms step_avg:43.83ms +[2025-09-11 14:08:57] [Rank 0] step:4781/10000 train_time:209536ms step_avg:43.83ms +[2025-09-11 14:08:57] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:08:57] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:08:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:09:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:08] [Rank 0] PRINT: step:4800/10000 val_loss:4.9390 total_sharp:1.5096e-04 L1_sharp:1.4276e-01 L2_sharp:2.8317e-01 L3_sharp:3.9121e-01 L4_sharp:4.9251e-01 L5_sharp:5.2123e-01 L6_sharp:6.9412e-01 L7_sharp:6.9779e-01 L8_sharp:9.0497e-01 L9_sharp:1.6329e+00 L10_sharp:2.6263e+00 L11_sharp:4.6767e+00 L12_sharp:4.0282e+00 total_fnorm:7.5500e+01 total_l1_linf:1.4541e+05 total_spectral:3.7750e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.4404e-02 L3_l1linf:1.4771e-02 L4_l1linf:1.4587e-02 L5_l1linf:1.4526e-02 L6_l1linf:1.4709e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.5991e-02 L1_spectral:7.9653e-04 L2_spectral:7.8952e-04 L3_spectral:7.9679e-04 L4_spectral:7.9532e-04 L5_spectral:7.9130e-04 L6_spectral:8.0147e-04 L7_spectral:8.0641e-04 L8_spectral:8.0020e-04 L9_spectral:7.9725e-04 L10_spectral:7.9323e-04 L11_spectral:7.8938e-04 L12_spectral:7.6161e-04 train_time:210198ms step_avg:43.79ms +[2025-09-11 14:09:08] [Rank 0] PRINT: step:4800/10000 val_loss:4.9390 total_sharp:1.5096e-04 L1_sharp:1.4276e-01 L2_sharp:2.8317e-01 L3_sharp:3.9121e-01 L4_sharp:4.9251e-01 L5_sharp:5.2123e-01 L6_sharp:6.9412e-01 L7_sharp:6.9779e-01 L8_sharp:9.0497e-01 L9_sharp:1.6329e+00 L10_sharp:2.6263e+00 L11_sharp:4.6767e+00 L12_sharp:4.0282e+00 total_fnorm:7.5500e+01 total_l1_linf:1.4541e+05 total_spectral:3.7750e+01 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8340e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7607e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7363e-02 L1_l1linf:1.3672e-02 L2_l1linf:1.4404e-02 L3_l1linf:1.4771e-02 L4_l1linf:1.4587e-02 L5_l1linf:1.4526e-02 L6_l1linf:1.4709e-02 L7_l1linf:1.4587e-02 L8_l1linf:1.4587e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.5991e-02 L1_spectral:7.9653e-04 L2_spectral:7.8952e-04 L3_spectral:7.9679e-04 L4_spectral:7.9532e-04 L5_spectral:7.9130e-04 L6_spectral:8.0147e-04 L7_spectral:8.0641e-04 L8_spectral:8.0020e-04 L9_spectral:7.9725e-04 L10_spectral:7.9323e-04 L11_spectral:7.8938e-04 L12_spectral:7.6161e-04 train_time:210198ms step_avg:43.79ms +[2025-09-11 14:09:09] [Rank 0] step:4801/10000 train_time:211733ms step_avg:44.10ms +[2025-09-11 14:09:09] [Rank 0] step:4801/10000 train_time:211733ms step_avg:44.10ms +[2025-09-11 14:09:10] [Rank 0] step:4821/10000 train_time:212423ms step_avg:44.06ms +[2025-09-11 14:09:10] [Rank 0] step:4821/10000 train_time:212423ms step_avg:44.06ms +[2025-09-11 14:09:11] [Rank 0] step:4841/10000 train_time:213358ms step_avg:44.07ms +[2025-09-11 14:09:11] [Rank 0] step:4841/10000 train_time:213358ms step_avg:44.07ms +[2025-09-11 14:09:12] [Rank 0] step:4861/10000 train_time:214043ms step_avg:44.03ms +[2025-09-11 14:09:12] [Rank 0] step:4861/10000 train_time:214043ms step_avg:44.03ms +[2025-09-11 14:09:12] [Rank 0] step:4881/10000 train_time:214727ms step_avg:43.99ms +[2025-09-11 14:09:12] [Rank 0] step:4881/10000 train_time:214727ms step_avg:43.99ms +[2025-09-11 14:09:13] [Rank 0] step:4901/10000 train_time:215413ms step_avg:43.95ms +[2025-09-11 14:09:13] [Rank 0] step:4901/10000 train_time:215413ms step_avg:43.95ms +[2025-09-11 14:09:14] [Rank 0] step:4921/10000 train_time:216099ms step_avg:43.91ms +[2025-09-11 14:09:14] [Rank 0] step:4921/10000 train_time:216099ms step_avg:43.91ms +[2025-09-11 14:09:14] [Rank 0] step:4941/10000 train_time:216783ms step_avg:43.87ms +[2025-09-11 14:09:14] [Rank 0] step:4941/10000 train_time:216783ms step_avg:43.87ms +[2025-09-11 14:09:15] [Rank 0] step:4961/10000 train_time:217467ms step_avg:43.84ms +[2025-09-11 14:09:15] [Rank 0] step:4961/10000 train_time:217467ms step_avg:43.84ms +[2025-09-11 14:09:16] [Rank 0] step:4981/10000 train_time:218150ms step_avg:43.80ms +[2025-09-11 14:09:16] [Rank 0] step:4981/10000 train_time:218150ms step_avg:43.80ms +[2025-09-11 14:09:17] [Rank 0] step:5001/10000 train_time:218836ms step_avg:43.76ms +[2025-09-11 14:09:17] [Rank 0] step:5001/10000 train_time:218836ms step_avg:43.76ms +[2025-09-11 14:09:17] [Rank 0] step:5021/10000 train_time:219520ms step_avg:43.72ms +[2025-09-11 14:09:17] [Rank 0] step:5021/10000 train_time:219520ms step_avg:43.72ms +[2025-09-11 14:09:18] [Rank 0] step:5041/10000 train_time:220204ms step_avg:43.68ms +[2025-09-11 14:09:18] [Rank 0] step:5041/10000 train_time:220204ms step_avg:43.68ms +[2025-09-11 14:09:19] [Rank 0] step:5061/10000 train_time:220887ms step_avg:43.64ms +[2025-09-11 14:09:19] [Rank 0] step:5061/10000 train_time:220887ms step_avg:43.64ms +[2025-09-11 14:09:19] [Rank 0] step:5081/10000 train_time:221569ms step_avg:43.61ms +[2025-09-11 14:09:19] [Rank 0] step:5081/10000 train_time:221569ms step_avg:43.61ms +[2025-09-11 14:09:20] [Rank 0] step:5101/10000 train_time:222253ms step_avg:43.57ms +[2025-09-11 14:09:20] [Rank 0] step:5101/10000 train_time:222253ms step_avg:43.57ms +[2025-09-11 14:09:21] [Rank 0] step:5121/10000 train_time:222937ms step_avg:43.53ms +[2025-09-11 14:09:21] [Rank 0] step:5121/10000 train_time:222937ms step_avg:43.53ms +[2025-09-11 14:09:21] [Rank 0] step:5141/10000 train_time:223621ms step_avg:43.50ms +[2025-09-11 14:09:21] [Rank 0] step:5141/10000 train_time:223621ms step_avg:43.50ms +[2025-09-11 14:09:22] [Rank 0] step:5161/10000 train_time:224304ms step_avg:43.46ms +[2025-09-11 14:09:22] [Rank 0] step:5161/10000 train_time:224304ms step_avg:43.46ms +[2025-09-11 14:09:23] [Rank 0] step:5181/10000 train_time:224987ms step_avg:43.43ms +[2025-09-11 14:09:23] [Rank 0] step:5181/10000 train_time:224987ms step_avg:43.43ms +[2025-09-11 14:09:23] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:09:23] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:09:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:34] [Rank 0] PRINT: step:5200/10000 val_loss:4.8950 total_sharp:2.0367e-04 L1_sharp:1.2227e-01 L2_sharp:2.5596e-01 L3_sharp:3.6096e-01 L4_sharp:4.1847e-01 L5_sharp:5.3547e-01 L6_sharp:6.6223e-01 L7_sharp:7.7279e-01 L8_sharp:8.8153e-01 L9_sharp:1.6737e+00 L10_sharp:3.2801e+00 L11_sharp:4.5965e+00 L12_sharp:7.0829e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2186e+05 total_spectral:3.4000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3062e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.4160e-02 L4_l1linf:1.4038e-02 L5_l1linf:1.3977e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.4221e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4648e-02 L12_l1linf:1.5137e-02 L1_spectral:7.9226e-04 L2_spectral:7.9715e-04 L3_spectral:8.0022e-04 L4_spectral:7.9989e-04 L5_spectral:7.9386e-04 L6_spectral:8.0411e-04 L7_spectral:8.0078e-04 L8_spectral:7.9953e-04 L9_spectral:7.9930e-04 L10_spectral:7.9402e-04 L11_spectral:7.9495e-04 L12_spectral:7.7722e-04 train_time:225657ms step_avg:43.40ms +[2025-09-11 14:09:34] [Rank 0] PRINT: step:5200/10000 val_loss:4.8950 total_sharp:2.0367e-04 L1_sharp:1.2227e-01 L2_sharp:2.5596e-01 L3_sharp:3.6096e-01 L4_sharp:4.1847e-01 L5_sharp:5.3547e-01 L6_sharp:6.6223e-01 L7_sharp:7.7279e-01 L8_sharp:8.8153e-01 L9_sharp:1.6737e+00 L10_sharp:3.2801e+00 L11_sharp:4.5965e+00 L12_sharp:7.0829e+00 total_fnorm:6.8000e+01 total_l1_linf:1.2186e+05 total_spectral:3.4000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.3062e-02 L2_l1linf:1.3672e-02 L3_l1linf:1.4160e-02 L4_l1linf:1.4038e-02 L5_l1linf:1.3977e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.3794e-02 L8_l1linf:1.4221e-02 L9_l1linf:1.4221e-02 L10_l1linf:1.4526e-02 L11_l1linf:1.4648e-02 L12_l1linf:1.5137e-02 L1_spectral:7.9226e-04 L2_spectral:7.9715e-04 L3_spectral:8.0022e-04 L4_spectral:7.9989e-04 L5_spectral:7.9386e-04 L6_spectral:8.0411e-04 L7_spectral:8.0078e-04 L8_spectral:7.9953e-04 L9_spectral:7.9930e-04 L10_spectral:7.9402e-04 L11_spectral:7.9495e-04 L12_spectral:7.7722e-04 train_time:225657ms step_avg:43.40ms +[2025-09-11 14:09:35] [Rank 0] step:5201/10000 train_time:227185ms step_avg:43.68ms +[2025-09-11 14:09:35] [Rank 0] step:5201/10000 train_time:227185ms step_avg:43.68ms +[2025-09-11 14:09:36] [Rank 0] step:5221/10000 train_time:227882ms step_avg:43.65ms +[2025-09-11 14:09:36] [Rank 0] step:5221/10000 train_time:227882ms step_avg:43.65ms +[2025-09-11 14:09:37] [Rank 0] step:5241/10000 train_time:228576ms step_avg:43.61ms +[2025-09-11 14:09:37] [Rank 0] step:5241/10000 train_time:228576ms step_avg:43.61ms +[2025-09-11 14:09:37] [Rank 0] step:5261/10000 train_time:229268ms step_avg:43.58ms +[2025-09-11 14:09:37] [Rank 0] step:5261/10000 train_time:229268ms step_avg:43.58ms +[2025-09-11 14:09:38] [Rank 0] step:5281/10000 train_time:229961ms step_avg:43.55ms +[2025-09-11 14:09:38] [Rank 0] step:5281/10000 train_time:229961ms step_avg:43.55ms +[2025-09-11 14:09:39] [Rank 0] step:5301/10000 train_time:230654ms step_avg:43.51ms +[2025-09-11 14:09:39] [Rank 0] step:5301/10000 train_time:230654ms step_avg:43.51ms +[2025-09-11 14:09:39] [Rank 0] step:5321/10000 train_time:231348ms step_avg:43.48ms +[2025-09-11 14:09:39] [Rank 0] step:5321/10000 train_time:231348ms step_avg:43.48ms +[2025-09-11 14:09:40] [Rank 0] step:5341/10000 train_time:232041ms step_avg:43.45ms +[2025-09-11 14:09:40] [Rank 0] step:5341/10000 train_time:232041ms step_avg:43.45ms +[2025-09-11 14:09:41] [Rank 0] step:5361/10000 train_time:232734ms step_avg:43.41ms +[2025-09-11 14:09:41] [Rank 0] step:5361/10000 train_time:232734ms step_avg:43.41ms +[2025-09-11 14:09:41] [Rank 0] step:5381/10000 train_time:233428ms step_avg:43.38ms +[2025-09-11 14:09:41] [Rank 0] step:5381/10000 train_time:233428ms step_avg:43.38ms +[2025-09-11 14:09:42] [Rank 0] step:5401/10000 train_time:234118ms step_avg:43.35ms +[2025-09-11 14:09:42] [Rank 0] step:5401/10000 train_time:234118ms step_avg:43.35ms +[2025-09-11 14:09:43] [Rank 0] step:5421/10000 train_time:234813ms step_avg:43.32ms +[2025-09-11 14:09:43] [Rank 0] step:5421/10000 train_time:234813ms step_avg:43.32ms +[2025-09-11 14:09:44] [Rank 0] step:5441/10000 train_time:235508ms step_avg:43.28ms +[2025-09-11 14:09:44] [Rank 0] step:5441/10000 train_time:235508ms step_avg:43.28ms +[2025-09-11 14:09:44] [Rank 0] step:5461/10000 train_time:236200ms step_avg:43.25ms +[2025-09-11 14:09:44] [Rank 0] step:5461/10000 train_time:236200ms step_avg:43.25ms +[2025-09-11 14:09:45] [Rank 0] step:5481/10000 train_time:236899ms step_avg:43.22ms +[2025-09-11 14:09:45] [Rank 0] step:5481/10000 train_time:236899ms step_avg:43.22ms +[2025-09-11 14:09:46] [Rank 0] step:5501/10000 train_time:237593ms step_avg:43.19ms +[2025-09-11 14:09:46] [Rank 0] step:5501/10000 train_time:237593ms step_avg:43.19ms +[2025-09-11 14:09:46] [Rank 0] step:5521/10000 train_time:238285ms step_avg:43.16ms +[2025-09-11 14:09:46] [Rank 0] step:5521/10000 train_time:238285ms step_avg:43.16ms +[2025-09-11 14:09:47] [Rank 0] step:5541/10000 train_time:238980ms step_avg:43.13ms +[2025-09-11 14:09:47] [Rank 0] step:5541/10000 train_time:238980ms step_avg:43.13ms +[2025-09-11 14:09:48] [Rank 0] step:5561/10000 train_time:239675ms step_avg:43.10ms +[2025-09-11 14:09:48] [Rank 0] step:5561/10000 train_time:239675ms step_avg:43.10ms +[2025-09-11 14:09:48] [Rank 0] step:5581/10000 train_time:240370ms step_avg:43.07ms +[2025-09-11 14:09:48] [Rank 0] step:5581/10000 train_time:240370ms step_avg:43.07ms +[2025-09-11 14:09:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:09:49] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:09:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:09:59] [Rank 0] PRINT: step:5600/10000 val_loss:4.8558 total_sharp:1.3041e-04 L1_sharp:1.0266e-01 L2_sharp:2.0797e-01 L3_sharp:3.0233e-01 L4_sharp:3.8849e-01 L5_sharp:4.2833e-01 L6_sharp:5.4241e-01 L7_sharp:4.9699e-01 L8_sharp:4.7993e-01 L9_sharp:5.9628e-01 L10_sharp:7.1811e-01 L11_sharp:9.1887e-01 L12_sharp:8.7435e-01 total_fnorm:6.9000e+01 total_l1_linf:1.2646e+05 total_spectral:3.4500e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2878e-02 L2_l1linf:1.3184e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3794e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.4099e-02 L8_l1linf:1.3733e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.4099e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.4099e-02 L1_spectral:7.9486e-04 L2_spectral:7.9412e-04 L3_spectral:7.9527e-04 L4_spectral:8.0360e-04 L5_spectral:7.9921e-04 L6_spectral:8.0279e-04 L7_spectral:8.0131e-04 L8_spectral:7.9301e-04 L9_spectral:8.0088e-04 L10_spectral:7.9173e-04 L11_spectral:7.9894e-04 L12_spectral:7.8649e-04 train_time:241043ms step_avg:43.04ms +[2025-09-11 14:09:59] [Rank 0] PRINT: step:5600/10000 val_loss:4.8558 total_sharp:1.3041e-04 L1_sharp:1.0266e-01 L2_sharp:2.0797e-01 L3_sharp:3.0233e-01 L4_sharp:3.8849e-01 L5_sharp:4.2833e-01 L6_sharp:5.4241e-01 L7_sharp:4.9699e-01 L8_sharp:4.7993e-01 L9_sharp:5.9628e-01 L10_sharp:7.1811e-01 L11_sharp:9.1887e-01 L12_sharp:8.7435e-01 total_fnorm:6.9000e+01 total_l1_linf:1.2646e+05 total_spectral:3.4500e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7852e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7363e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6631e-02 L1_l1linf:1.2878e-02 L2_l1linf:1.3184e-02 L3_l1linf:1.3672e-02 L4_l1linf:1.3794e-02 L5_l1linf:1.3611e-02 L6_l1linf:1.3916e-02 L7_l1linf:1.4099e-02 L8_l1linf:1.3733e-02 L9_l1linf:1.3855e-02 L10_l1linf:1.4099e-02 L11_l1linf:1.4038e-02 L12_l1linf:1.4099e-02 L1_spectral:7.9486e-04 L2_spectral:7.9412e-04 L3_spectral:7.9527e-04 L4_spectral:8.0360e-04 L5_spectral:7.9921e-04 L6_spectral:8.0279e-04 L7_spectral:8.0131e-04 L8_spectral:7.9301e-04 L9_spectral:8.0088e-04 L10_spectral:7.9173e-04 L11_spectral:7.9894e-04 L12_spectral:7.8649e-04 train_time:241043ms step_avg:43.04ms +[2025-09-11 14:10:01] [Rank 0] step:5601/10000 train_time:242602ms step_avg:43.31ms +[2025-09-11 14:10:01] [Rank 0] step:5601/10000 train_time:242602ms step_avg:43.31ms +[2025-09-11 14:10:02] [Rank 0] step:5621/10000 train_time:243403ms step_avg:43.30ms +[2025-09-11 14:10:02] [Rank 0] step:5621/10000 train_time:243403ms step_avg:43.30ms +[2025-09-11 14:10:03] [Rank 0] step:5641/10000 train_time:244095ms step_avg:43.27ms +[2025-09-11 14:10:03] [Rank 0] step:5641/10000 train_time:244095ms step_avg:43.27ms +[2025-09-11 14:10:03] [Rank 0] step:5661/10000 train_time:244787ms step_avg:43.24ms +[2025-09-11 14:10:03] [Rank 0] step:5661/10000 train_time:244787ms step_avg:43.24ms +[2025-09-11 14:10:04] [Rank 0] step:5681/10000 train_time:245481ms step_avg:43.21ms +[2025-09-11 14:10:04] [Rank 0] step:5681/10000 train_time:245481ms step_avg:43.21ms +[2025-09-11 14:10:05] [Rank 0] step:5701/10000 train_time:246177ms step_avg:43.18ms +[2025-09-11 14:10:05] [Rank 0] step:5701/10000 train_time:246177ms step_avg:43.18ms +[2025-09-11 14:10:05] [Rank 0] step:5721/10000 train_time:246869ms step_avg:43.15ms +[2025-09-11 14:10:05] [Rank 0] step:5721/10000 train_time:246869ms step_avg:43.15ms +[2025-09-11 14:10:06] [Rank 0] step:5741/10000 train_time:247563ms step_avg:43.12ms +[2025-09-11 14:10:06] [Rank 0] step:5741/10000 train_time:247563ms step_avg:43.12ms +[2025-09-11 14:10:07] [Rank 0] step:5761/10000 train_time:248257ms step_avg:43.09ms +[2025-09-11 14:10:07] [Rank 0] step:5761/10000 train_time:248257ms step_avg:43.09ms +[2025-09-11 14:10:07] [Rank 0] step:5781/10000 train_time:248950ms step_avg:43.06ms +[2025-09-11 14:10:07] [Rank 0] step:5781/10000 train_time:248950ms step_avg:43.06ms +[2025-09-11 14:10:08] [Rank 0] step:5801/10000 train_time:249646ms step_avg:43.03ms +[2025-09-11 14:10:08] [Rank 0] step:5801/10000 train_time:249646ms step_avg:43.03ms +[2025-09-11 14:10:09] [Rank 0] step:5821/10000 train_time:250339ms step_avg:43.01ms +[2025-09-11 14:10:09] [Rank 0] step:5821/10000 train_time:250339ms step_avg:43.01ms +[2025-09-11 14:10:09] [Rank 0] step:5841/10000 train_time:251033ms step_avg:42.98ms +[2025-09-11 14:10:09] [Rank 0] step:5841/10000 train_time:251033ms step_avg:42.98ms +[2025-09-11 14:10:10] [Rank 0] step:5861/10000 train_time:251726ms step_avg:42.95ms +[2025-09-11 14:10:10] [Rank 0] step:5861/10000 train_time:251726ms step_avg:42.95ms +[2025-09-11 14:10:11] [Rank 0] step:5881/10000 train_time:252419ms step_avg:42.92ms +[2025-09-11 14:10:11] [Rank 0] step:5881/10000 train_time:252419ms step_avg:42.92ms +[2025-09-11 14:10:12] [Rank 0] step:5901/10000 train_time:253402ms step_avg:42.94ms +[2025-09-11 14:10:12] [Rank 0] step:5901/10000 train_time:253402ms step_avg:42.94ms +[2025-09-11 14:10:13] [Rank 0] step:5921/10000 train_time:254099ms step_avg:42.91ms +[2025-09-11 14:10:13] [Rank 0] step:5921/10000 train_time:254099ms step_avg:42.91ms +[2025-09-11 14:10:13] [Rank 0] step:5941/10000 train_time:254794ms step_avg:42.89ms +[2025-09-11 14:10:13] [Rank 0] step:5941/10000 train_time:254794ms step_avg:42.89ms +[2025-09-11 14:10:14] [Rank 0] step:5961/10000 train_time:255636ms step_avg:42.88ms +[2025-09-11 14:10:14] [Rank 0] step:5961/10000 train_time:255636ms step_avg:42.88ms +[2025-09-11 14:10:15] [Rank 0] step:5981/10000 train_time:256436ms step_avg:42.88ms +[2025-09-11 14:10:15] [Rank 0] step:5981/10000 train_time:256436ms step_avg:42.88ms +[2025-09-11 14:10:16] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:10:16] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:10:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:10:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:10:27] [Rank 0] PRINT: step:6000/10000 val_loss:4.7993 total_sharp:1.0957e-04 L1_sharp:1.1636e-01 L2_sharp:2.1489e-01 L3_sharp:3.3061e-01 L4_sharp:3.9107e-01 L5_sharp:4.6203e-01 L6_sharp:5.2391e-01 L7_sharp:4.8766e-01 L8_sharp:4.7350e-01 L9_sharp:5.5202e-01 L10_sharp:5.7984e-01 L11_sharp:6.9310e-01 L12_sharp:1.6652e+00 total_fnorm:7.0000e+01 total_l1_linf:1.2749e+05 total_spectral:3.5000e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.2207e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3184e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3306e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3184e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.3428e-02 L12_l1linf:1.3794e-02 L1_spectral:7.9780e-04 L2_spectral:7.9402e-04 L3_spectral:8.0356e-04 L4_spectral:7.9359e-04 L5_spectral:7.9939e-04 L6_spectral:8.0471e-04 L7_spectral:8.0567e-04 L8_spectral:7.9716e-04 L9_spectral:8.0370e-04 L10_spectral:8.0325e-04 L11_spectral:8.0463e-04 L12_spectral:7.9330e-04 train_time:257114ms step_avg:42.85ms +[2025-09-11 14:10:27] [Rank 0] PRINT: step:6000/10000 val_loss:4.7993 total_sharp:1.0957e-04 L1_sharp:1.1636e-01 L2_sharp:2.1489e-01 L3_sharp:3.3061e-01 L4_sharp:3.9107e-01 L5_sharp:4.6203e-01 L6_sharp:5.2391e-01 L7_sharp:4.8766e-01 L8_sharp:4.7350e-01 L9_sharp:5.5202e-01 L10_sharp:5.7984e-01 L11_sharp:6.9310e-01 L12_sharp:1.6652e+00 total_fnorm:7.0000e+01 total_l1_linf:1.2749e+05 total_spectral:3.5000e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.2207e-02 L2_l1linf:1.3428e-02 L3_l1linf:1.3184e-02 L4_l1linf:1.3428e-02 L5_l1linf:1.3306e-02 L6_l1linf:1.3550e-02 L7_l1linf:1.3184e-02 L8_l1linf:1.3428e-02 L9_l1linf:1.3489e-02 L10_l1linf:1.3245e-02 L11_l1linf:1.3428e-02 L12_l1linf:1.3794e-02 L1_spectral:7.9780e-04 L2_spectral:7.9402e-04 L3_spectral:8.0356e-04 L4_spectral:7.9359e-04 L5_spectral:7.9939e-04 L6_spectral:8.0471e-04 L7_spectral:8.0567e-04 L8_spectral:7.9716e-04 L9_spectral:8.0370e-04 L10_spectral:8.0325e-04 L11_spectral:8.0463e-04 L12_spectral:7.9330e-04 train_time:257114ms step_avg:42.85ms +[2025-09-11 14:10:29] [Rank 0] step:6001/10000 train_time:258703ms step_avg:43.11ms +[2025-09-11 14:10:29] [Rank 0] step:6001/10000 train_time:258703ms step_avg:43.11ms +[2025-09-11 14:10:30] [Rank 0] step:6021/10000 train_time:259421ms step_avg:43.09ms +[2025-09-11 14:10:30] [Rank 0] step:6021/10000 train_time:259421ms step_avg:43.09ms +[2025-09-11 14:10:30] [Rank 0] step:6041/10000 train_time:260119ms step_avg:43.06ms +[2025-09-11 14:10:30] [Rank 0] step:6041/10000 train_time:260119ms step_avg:43.06ms +[2025-09-11 14:10:31] [Rank 0] step:6061/10000 train_time:260819ms step_avg:43.03ms +[2025-09-11 14:10:31] [Rank 0] step:6061/10000 train_time:260819ms step_avg:43.03ms +[2025-09-11 14:10:32] [Rank 0] step:6081/10000 train_time:261516ms step_avg:43.01ms +[2025-09-11 14:10:32] [Rank 0] step:6081/10000 train_time:261516ms step_avg:43.01ms +[2025-09-11 14:10:33] [Rank 0] step:6101/10000 train_time:262210ms step_avg:42.98ms +[2025-09-11 14:10:33] [Rank 0] step:6101/10000 train_time:262210ms step_avg:42.98ms +[2025-09-11 14:10:33] [Rank 0] step:6121/10000 train_time:262907ms step_avg:42.95ms +[2025-09-11 14:10:33] [Rank 0] step:6121/10000 train_time:262907ms step_avg:42.95ms +[2025-09-11 14:10:34] [Rank 0] step:6141/10000 train_time:263603ms step_avg:42.93ms +[2025-09-11 14:10:34] [Rank 0] step:6141/10000 train_time:263603ms step_avg:42.93ms +[2025-09-11 14:10:35] [Rank 0] step:6161/10000 train_time:264297ms step_avg:42.90ms +[2025-09-11 14:10:35] [Rank 0] step:6161/10000 train_time:264297ms step_avg:42.90ms +[2025-09-11 14:10:35] [Rank 0] step:6181/10000 train_time:264990ms step_avg:42.87ms +[2025-09-11 14:10:35] [Rank 0] step:6181/10000 train_time:264990ms step_avg:42.87ms +[2025-09-11 14:10:36] [Rank 0] step:6201/10000 train_time:265687ms step_avg:42.85ms +[2025-09-11 14:10:36] [Rank 0] step:6201/10000 train_time:265687ms step_avg:42.85ms +[2025-09-11 14:10:37] [Rank 0] step:6221/10000 train_time:266384ms step_avg:42.82ms +[2025-09-11 14:10:37] [Rank 0] step:6221/10000 train_time:266384ms step_avg:42.82ms +[2025-09-11 14:10:37] [Rank 0] step:6241/10000 train_time:267079ms step_avg:42.79ms +[2025-09-11 14:10:37] [Rank 0] step:6241/10000 train_time:267079ms step_avg:42.79ms +[2025-09-11 14:10:38] [Rank 0] step:6261/10000 train_time:267772ms step_avg:42.77ms +[2025-09-11 14:10:38] [Rank 0] step:6261/10000 train_time:267772ms step_avg:42.77ms +[2025-09-11 14:10:39] [Rank 0] step:6281/10000 train_time:268467ms step_avg:42.74ms +[2025-09-11 14:10:39] [Rank 0] step:6281/10000 train_time:268467ms step_avg:42.74ms +[2025-09-11 14:10:40] [Rank 0] step:6301/10000 train_time:269160ms step_avg:42.72ms +[2025-09-11 14:10:40] [Rank 0] step:6301/10000 train_time:269160ms step_avg:42.72ms +[2025-09-11 14:10:40] [Rank 0] step:6321/10000 train_time:269858ms step_avg:42.69ms +[2025-09-11 14:10:40] [Rank 0] step:6321/10000 train_time:269858ms step_avg:42.69ms +[2025-09-11 14:10:41] [Rank 0] step:6341/10000 train_time:270555ms step_avg:42.67ms +[2025-09-11 14:10:41] [Rank 0] step:6341/10000 train_time:270555ms step_avg:42.67ms +[2025-09-11 14:10:42] [Rank 0] step:6361/10000 train_time:271251ms step_avg:42.64ms +[2025-09-11 14:10:42] [Rank 0] step:6361/10000 train_time:271251ms step_avg:42.64ms +[2025-09-11 14:10:42] [Rank 0] step:6381/10000 train_time:271946ms step_avg:42.62ms +[2025-09-11 14:10:42] [Rank 0] step:6381/10000 train_time:271946ms step_avg:42.62ms +[2025-09-11 14:10:43] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:10:43] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:10:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:10:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:10:53] [Rank 0] PRINT: step:6400/10000 val_loss:4.7699 total_sharp:1.2397e-04 L1_sharp:9.7849e-02 L2_sharp:2.0592e-01 L3_sharp:2.8907e-01 L4_sharp:3.7136e-01 L5_sharp:4.5441e-01 L6_sharp:5.0774e-01 L7_sharp:5.1657e-01 L8_sharp:5.1791e-01 L9_sharp:5.8132e-01 L10_sharp:6.5518e-01 L11_sharp:9.2232e-01 L12_sharp:2.7246e+00 total_fnorm:6.2000e+01 total_l1_linf:1.0854e+05 total_spectral:3.1000e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1504e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1504e-02 L5_fnorm:4.1260e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0771e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0498e-02 L3_l1linf:1.0986e-02 L4_l1linf:1.1353e-02 L5_l1linf:1.1169e-02 L6_l1linf:1.1353e-02 L7_l1linf:1.1230e-02 L8_l1linf:1.1597e-02 L9_l1linf:1.1475e-02 L10_l1linf:1.1353e-02 L11_l1linf:1.1902e-02 L12_l1linf:1.1780e-02 L1_spectral:7.1067e-04 L2_spectral:7.1520e-04 L3_spectral:7.1593e-04 L4_spectral:7.1896e-04 L5_spectral:7.1254e-04 L6_spectral:7.1775e-04 L7_spectral:7.1822e-04 L8_spectral:7.0680e-04 L9_spectral:7.0916e-04 L10_spectral:7.1222e-04 L11_spectral:7.1571e-04 L12_spectral:6.9900e-04 train_time:272621ms step_avg:42.60ms +[2025-09-11 14:10:53] [Rank 0] PRINT: step:6400/10000 val_loss:4.7699 total_sharp:1.2397e-04 L1_sharp:9.7849e-02 L2_sharp:2.0592e-01 L3_sharp:2.8907e-01 L4_sharp:3.7136e-01 L5_sharp:4.5441e-01 L6_sharp:5.0774e-01 L7_sharp:5.1657e-01 L8_sharp:5.1791e-01 L9_sharp:5.8132e-01 L10_sharp:6.5518e-01 L11_sharp:9.2232e-01 L12_sharp:2.7246e+00 total_fnorm:6.2000e+01 total_l1_linf:1.0854e+05 total_spectral:3.1000e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1504e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1504e-02 L5_fnorm:4.1260e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1260e-02 L8_fnorm:4.0527e-02 L9_fnorm:4.0771e-02 L10_fnorm:4.0771e-02 L11_fnorm:4.1016e-02 L12_fnorm:4.0527e-02 L1_l1linf:9.8267e-03 L2_l1linf:1.0498e-02 L3_l1linf:1.0986e-02 L4_l1linf:1.1353e-02 L5_l1linf:1.1169e-02 L6_l1linf:1.1353e-02 L7_l1linf:1.1230e-02 L8_l1linf:1.1597e-02 L9_l1linf:1.1475e-02 L10_l1linf:1.1353e-02 L11_l1linf:1.1902e-02 L12_l1linf:1.1780e-02 L1_spectral:7.1067e-04 L2_spectral:7.1520e-04 L3_spectral:7.1593e-04 L4_spectral:7.1896e-04 L5_spectral:7.1254e-04 L6_spectral:7.1775e-04 L7_spectral:7.1822e-04 L8_spectral:7.0680e-04 L9_spectral:7.0916e-04 L10_spectral:7.1222e-04 L11_spectral:7.1571e-04 L12_spectral:6.9900e-04 train_time:272621ms step_avg:42.60ms +[2025-09-11 14:10:55] [Rank 0] step:6401/10000 train_time:274264ms step_avg:42.85ms +[2025-09-11 14:10:55] [Rank 0] step:6401/10000 train_time:274264ms step_avg:42.85ms +[2025-09-11 14:10:56] [Rank 0] step:6421/10000 train_time:274982ms step_avg:42.83ms +[2025-09-11 14:10:56] [Rank 0] step:6421/10000 train_time:274982ms step_avg:42.83ms +[2025-09-11 14:10:56] [Rank 0] step:6441/10000 train_time:275676ms step_avg:42.80ms +[2025-09-11 14:10:56] [Rank 0] step:6441/10000 train_time:275676ms step_avg:42.80ms +[2025-09-11 14:10:57] [Rank 0] step:6461/10000 train_time:276371ms step_avg:42.78ms +[2025-09-11 14:10:57] [Rank 0] step:6461/10000 train_time:276371ms step_avg:42.78ms +[2025-09-11 14:10:58] [Rank 0] step:6481/10000 train_time:277068ms step_avg:42.75ms +[2025-09-11 14:10:58] [Rank 0] step:6481/10000 train_time:277068ms step_avg:42.75ms +[2025-09-11 14:10:59] [Rank 0] step:6501/10000 train_time:277764ms step_avg:42.73ms +[2025-09-11 14:10:59] [Rank 0] step:6501/10000 train_time:277764ms step_avg:42.73ms +[2025-09-11 14:10:59] [Rank 0] step:6521/10000 train_time:278463ms step_avg:42.70ms +[2025-09-11 14:10:59] [Rank 0] step:6521/10000 train_time:278463ms step_avg:42.70ms +[2025-09-11 14:11:00] [Rank 0] step:6541/10000 train_time:279156ms step_avg:42.68ms +[2025-09-11 14:11:00] [Rank 0] step:6541/10000 train_time:279156ms step_avg:42.68ms +[2025-09-11 14:11:01] [Rank 0] step:6561/10000 train_time:279851ms step_avg:42.65ms +[2025-09-11 14:11:01] [Rank 0] step:6561/10000 train_time:279851ms step_avg:42.65ms +[2025-09-11 14:11:01] [Rank 0] step:6581/10000 train_time:280547ms step_avg:42.63ms +[2025-09-11 14:11:01] [Rank 0] step:6581/10000 train_time:280547ms step_avg:42.63ms +[2025-09-11 14:11:02] [Rank 0] step:6601/10000 train_time:281243ms step_avg:42.61ms +[2025-09-11 14:11:02] [Rank 0] step:6601/10000 train_time:281243ms step_avg:42.61ms +[2025-09-11 14:11:03] [Rank 0] step:6621/10000 train_time:281939ms step_avg:42.58ms +[2025-09-11 14:11:03] [Rank 0] step:6621/10000 train_time:281939ms step_avg:42.58ms +[2025-09-11 14:11:03] [Rank 0] step:6641/10000 train_time:282635ms step_avg:42.56ms +[2025-09-11 14:11:03] [Rank 0] step:6641/10000 train_time:282635ms step_avg:42.56ms +[2025-09-11 14:11:04] [Rank 0] step:6661/10000 train_time:283330ms step_avg:42.54ms +[2025-09-11 14:11:04] [Rank 0] step:6661/10000 train_time:283330ms step_avg:42.54ms +[2025-09-11 14:11:05] [Rank 0] step:6681/10000 train_time:284033ms step_avg:42.51ms +[2025-09-11 14:11:05] [Rank 0] step:6681/10000 train_time:284033ms step_avg:42.51ms +[2025-09-11 14:11:06] [Rank 0] step:6701/10000 train_time:284734ms step_avg:42.49ms +[2025-09-11 14:11:06] [Rank 0] step:6701/10000 train_time:284734ms step_avg:42.49ms +[2025-09-11 14:11:06] [Rank 0] step:6721/10000 train_time:285437ms step_avg:42.47ms +[2025-09-11 14:11:06] [Rank 0] step:6721/10000 train_time:285437ms step_avg:42.47ms +[2025-09-11 14:11:07] [Rank 0] step:6741/10000 train_time:286139ms step_avg:42.45ms +[2025-09-11 14:11:07] [Rank 0] step:6741/10000 train_time:286139ms step_avg:42.45ms +[2025-09-11 14:11:08] [Rank 0] step:6761/10000 train_time:286840ms step_avg:42.43ms +[2025-09-11 14:11:08] [Rank 0] step:6761/10000 train_time:286840ms step_avg:42.43ms +[2025-09-11 14:11:08] [Rank 0] step:6781/10000 train_time:287542ms step_avg:42.40ms +[2025-09-11 14:11:08] [Rank 0] step:6781/10000 train_time:287542ms step_avg:42.40ms +[2025-09-11 14:11:09] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:11:09] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:11:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:11:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:11:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:11:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:11:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:11:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:11:20] [Rank 0] PRINT: step:6800/10000 val_loss:4.7338 total_sharp:8.9081e-05 L1_sharp:8.6623e-02 L2_sharp:1.9487e-01 L3_sharp:2.8898e-01 L4_sharp:3.0826e-01 L5_sharp:3.8627e-01 L6_sharp:5.0889e-01 L7_sharp:5.2932e-01 L8_sharp:4.8096e-01 L9_sharp:5.2554e-01 L10_sharp:6.2968e-01 L11_sharp:6.6694e-01 L12_sharp:1.0016e+00 total_fnorm:6.0250e+01 total_l1_linf:1.0496e+05 total_spectral:3.0000e+01 L1_fnorm:3.4668e-02 L2_fnorm:3.5156e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.4912e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.3936e-02 L9_fnorm:3.4180e-02 L10_fnorm:3.3936e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.7209e-03 L2_l1linf:8.3618e-03 L3_l1linf:8.6060e-03 L4_l1linf:9.0332e-03 L5_l1linf:8.9111e-03 L6_l1linf:8.9722e-03 L7_l1linf:8.9722e-03 L8_l1linf:8.8501e-03 L9_l1linf:8.8501e-03 L10_l1linf:8.7891e-03 L11_l1linf:8.8501e-03 L12_l1linf:8.9111e-03 L1_spectral:6.2743e-04 L2_spectral:6.2571e-04 L3_spectral:6.2677e-04 L4_spectral:6.2713e-04 L5_spectral:6.2223e-04 L6_spectral:6.3187e-04 L7_spectral:6.2750e-04 L8_spectral:6.1547e-04 L9_spectral:6.2149e-04 L10_spectral:6.1913e-04 L11_spectral:6.2006e-04 L12_spectral:6.0621e-04 train_time:288223ms step_avg:42.39ms +[2025-09-11 14:11:20] [Rank 0] PRINT: step:6800/10000 val_loss:4.7338 total_sharp:8.9081e-05 L1_sharp:8.6623e-02 L2_sharp:1.9487e-01 L3_sharp:2.8898e-01 L4_sharp:3.0826e-01 L5_sharp:3.8627e-01 L6_sharp:5.0889e-01 L7_sharp:5.2932e-01 L8_sharp:4.8096e-01 L9_sharp:5.2554e-01 L10_sharp:6.2968e-01 L11_sharp:6.6694e-01 L12_sharp:1.0016e+00 total_fnorm:6.0250e+01 total_l1_linf:1.0496e+05 total_spectral:3.0000e+01 L1_fnorm:3.4668e-02 L2_fnorm:3.5156e-02 L3_fnorm:3.5156e-02 L4_fnorm:3.4912e-02 L5_fnorm:3.4912e-02 L6_fnorm:3.4912e-02 L7_fnorm:3.4668e-02 L8_fnorm:3.3936e-02 L9_fnorm:3.4180e-02 L10_fnorm:3.3936e-02 L11_fnorm:3.3936e-02 L12_fnorm:3.3936e-02 L1_l1linf:7.7209e-03 L2_l1linf:8.3618e-03 L3_l1linf:8.6060e-03 L4_l1linf:9.0332e-03 L5_l1linf:8.9111e-03 L6_l1linf:8.9722e-03 L7_l1linf:8.9722e-03 L8_l1linf:8.8501e-03 L9_l1linf:8.8501e-03 L10_l1linf:8.7891e-03 L11_l1linf:8.8501e-03 L12_l1linf:8.9111e-03 L1_spectral:6.2743e-04 L2_spectral:6.2571e-04 L3_spectral:6.2677e-04 L4_spectral:6.2713e-04 L5_spectral:6.2223e-04 L6_spectral:6.3187e-04 L7_spectral:6.2750e-04 L8_spectral:6.1547e-04 L9_spectral:6.2149e-04 L10_spectral:6.1913e-04 L11_spectral:6.2006e-04 L12_spectral:6.0621e-04 train_time:288223ms step_avg:42.39ms +[2025-09-11 14:11:22] [Rank 0] step:6801/10000 train_time:289895ms step_avg:42.63ms +[2025-09-11 14:11:22] [Rank 0] step:6801/10000 train_time:289895ms step_avg:42.63ms +[2025-09-11 14:11:23] [Rank 0] step:6821/10000 train_time:290623ms step_avg:42.61ms +[2025-09-11 14:11:23] [Rank 0] step:6821/10000 train_time:290623ms step_avg:42.61ms +[2025-09-11 14:11:23] [Rank 0] step:6841/10000 train_time:291328ms step_avg:42.59ms +[2025-09-11 14:11:23] [Rank 0] step:6841/10000 train_time:291328ms step_avg:42.59ms +[2025-09-11 14:11:24] [Rank 0] step:6861/10000 train_time:292031ms step_avg:42.56ms +[2025-09-11 14:11:24] [Rank 0] step:6861/10000 train_time:292031ms step_avg:42.56ms +[2025-09-11 14:11:25] [Rank 0] step:6881/10000 train_time:292736ms step_avg:42.54ms +[2025-09-11 14:11:25] [Rank 0] step:6881/10000 train_time:292736ms step_avg:42.54ms +[2025-09-11 14:11:25] [Rank 0] step:6901/10000 train_time:293437ms step_avg:42.52ms +[2025-09-11 14:11:25] [Rank 0] step:6901/10000 train_time:293437ms step_avg:42.52ms +[2025-09-11 14:11:26] [Rank 0] step:6921/10000 train_time:294139ms step_avg:42.50ms +[2025-09-11 14:11:26] [Rank 0] step:6921/10000 train_time:294139ms step_avg:42.50ms +[2025-09-11 14:11:27] [Rank 0] step:6941/10000 train_time:294841ms step_avg:42.48ms +[2025-09-11 14:11:27] [Rank 0] step:6941/10000 train_time:294841ms step_avg:42.48ms +[2025-09-11 14:11:27] [Rank 0] step:6961/10000 train_time:295545ms step_avg:42.46ms +[2025-09-11 14:11:27] [Rank 0] step:6961/10000 train_time:295545ms step_avg:42.46ms +[2025-09-11 14:11:28] [Rank 0] step:6981/10000 train_time:296250ms step_avg:42.44ms +[2025-09-11 14:11:28] [Rank 0] step:6981/10000 train_time:296250ms step_avg:42.44ms +[2025-09-11 14:11:29] [Rank 0] step:7001/10000 train_time:296953ms step_avg:42.42ms +[2025-09-11 14:11:29] [Rank 0] step:7001/10000 train_time:296953ms step_avg:42.42ms +[2025-09-11 14:11:30] [Rank 0] step:7021/10000 train_time:297656ms step_avg:42.40ms +[2025-09-11 14:11:30] [Rank 0] step:7021/10000 train_time:297656ms step_avg:42.40ms +[2025-09-11 14:11:30] [Rank 0] step:7041/10000 train_time:298357ms step_avg:42.37ms +[2025-09-11 14:11:30] [Rank 0] step:7041/10000 train_time:298357ms step_avg:42.37ms +[2025-09-11 14:11:31] [Rank 0] step:7061/10000 train_time:299061ms step_avg:42.35ms +[2025-09-11 14:11:31] [Rank 0] step:7061/10000 train_time:299061ms step_avg:42.35ms +[2025-09-11 14:11:32] [Rank 0] step:7081/10000 train_time:299763ms step_avg:42.33ms +[2025-09-11 14:11:32] [Rank 0] step:7081/10000 train_time:299763ms step_avg:42.33ms +[2025-09-11 14:11:32] [Rank 0] step:7101/10000 train_time:300466ms step_avg:42.31ms +[2025-09-11 14:11:32] [Rank 0] step:7101/10000 train_time:300466ms step_avg:42.31ms +[2025-09-11 14:11:33] [Rank 0] step:7121/10000 train_time:301170ms step_avg:42.29ms +[2025-09-11 14:11:33] [Rank 0] step:7121/10000 train_time:301170ms step_avg:42.29ms +[2025-09-11 14:11:34] [Rank 0] step:7141/10000 train_time:301873ms step_avg:42.27ms +[2025-09-11 14:11:34] [Rank 0] step:7141/10000 train_time:301873ms step_avg:42.27ms +[2025-09-11 14:11:35] [Rank 0] step:7161/10000 train_time:302576ms step_avg:42.25ms +[2025-09-11 14:11:35] [Rank 0] step:7161/10000 train_time:302576ms step_avg:42.25ms +[2025-09-11 14:11:35] [Rank 0] step:7181/10000 train_time:303278ms step_avg:42.23ms +[2025-09-11 14:11:35] [Rank 0] step:7181/10000 train_time:303278ms step_avg:42.23ms +[2025-09-11 14:11:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:11:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:11:47] [Rank 0] PRINT: step:7200/10000 val_loss:4.7091 total_sharp:8.2740e-05 L1_sharp:9.1294e-02 L2_sharp:2.0235e-01 L3_sharp:2.9268e-01 L4_sharp:3.3988e-01 L5_sharp:4.4763e-01 L6_sharp:5.5691e-01 L7_sharp:5.0498e-01 L8_sharp:4.7919e-01 L9_sharp:5.3993e-01 L10_sharp:7.0532e-01 L11_sharp:8.3290e-01 L12_sharp:2.0488e+00 total_fnorm:5.2750e+01 total_l1_linf:8.8064e+04 total_spectral:2.6375e+01 L1_fnorm:2.8809e-02 L2_fnorm:2.9175e-02 L3_fnorm:2.9541e-02 L4_fnorm:2.9175e-02 L5_fnorm:2.9053e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.8931e-02 L8_fnorm:2.8320e-02 L9_fnorm:2.8687e-02 L10_fnorm:2.8564e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:6.0730e-03 L2_l1linf:6.6833e-03 L3_l1linf:6.8054e-03 L4_l1linf:7.0496e-03 L5_l1linf:7.1106e-03 L6_l1linf:7.0801e-03 L7_l1linf:7.2021e-03 L8_l1linf:7.1106e-03 L9_l1linf:7.2937e-03 L10_l1linf:7.2632e-03 L11_l1linf:7.2937e-03 L12_l1linf:7.4463e-03 L1_spectral:5.3938e-04 L2_spectral:5.3166e-04 L3_spectral:5.3721e-04 L4_spectral:5.3648e-04 L5_spectral:5.3454e-04 L6_spectral:5.3831e-04 L7_spectral:5.4070e-04 L8_spectral:5.2430e-04 L9_spectral:5.3596e-04 L10_spectral:5.2554e-04 L11_spectral:5.3299e-04 L12_spectral:5.2313e-04 train_time:303961ms step_avg:42.22ms +[2025-09-11 14:11:47] [Rank 0] PRINT: step:7200/10000 val_loss:4.7091 total_sharp:8.2740e-05 L1_sharp:9.1294e-02 L2_sharp:2.0235e-01 L3_sharp:2.9268e-01 L4_sharp:3.3988e-01 L5_sharp:4.4763e-01 L6_sharp:5.5691e-01 L7_sharp:5.0498e-01 L8_sharp:4.7919e-01 L9_sharp:5.3993e-01 L10_sharp:7.0532e-01 L11_sharp:8.3290e-01 L12_sharp:2.0488e+00 total_fnorm:5.2750e+01 total_l1_linf:8.8064e+04 total_spectral:2.6375e+01 L1_fnorm:2.8809e-02 L2_fnorm:2.9175e-02 L3_fnorm:2.9541e-02 L4_fnorm:2.9175e-02 L5_fnorm:2.9053e-02 L6_fnorm:2.9053e-02 L7_fnorm:2.8931e-02 L8_fnorm:2.8320e-02 L9_fnorm:2.8687e-02 L10_fnorm:2.8564e-02 L11_fnorm:2.8564e-02 L12_fnorm:2.8320e-02 L1_l1linf:6.0730e-03 L2_l1linf:6.6833e-03 L3_l1linf:6.8054e-03 L4_l1linf:7.0496e-03 L5_l1linf:7.1106e-03 L6_l1linf:7.0801e-03 L7_l1linf:7.2021e-03 L8_l1linf:7.1106e-03 L9_l1linf:7.2937e-03 L10_l1linf:7.2632e-03 L11_l1linf:7.2937e-03 L12_l1linf:7.4463e-03 L1_spectral:5.3938e-04 L2_spectral:5.3166e-04 L3_spectral:5.3721e-04 L4_spectral:5.3648e-04 L5_spectral:5.3454e-04 L6_spectral:5.3831e-04 L7_spectral:5.4070e-04 L8_spectral:5.2430e-04 L9_spectral:5.3596e-04 L10_spectral:5.2554e-04 L11_spectral:5.3299e-04 L12_spectral:5.2313e-04 train_time:303961ms step_avg:42.22ms +[2025-09-11 14:11:48] [Rank 0] step:7201/10000 train_time:305632ms step_avg:42.44ms +[2025-09-11 14:11:48] [Rank 0] step:7201/10000 train_time:305632ms step_avg:42.44ms +[2025-09-11 14:11:49] [Rank 0] step:7221/10000 train_time:306342ms step_avg:42.42ms +[2025-09-11 14:11:49] [Rank 0] step:7221/10000 train_time:306342ms step_avg:42.42ms +[2025-09-11 14:11:50] [Rank 0] step:7241/10000 train_time:307046ms step_avg:42.40ms +[2025-09-11 14:11:50] [Rank 0] step:7241/10000 train_time:307046ms step_avg:42.40ms +[2025-09-11 14:11:50] [Rank 0] step:7261/10000 train_time:307752ms step_avg:42.38ms +[2025-09-11 14:11:50] [Rank 0] step:7261/10000 train_time:307752ms step_avg:42.38ms +[2025-09-11 14:11:51] [Rank 0] step:7281/10000 train_time:308462ms step_avg:42.37ms +[2025-09-11 14:11:51] [Rank 0] step:7281/10000 train_time:308462ms step_avg:42.37ms +[2025-09-11 14:11:52] [Rank 0] step:7301/10000 train_time:309166ms step_avg:42.35ms +[2025-09-11 14:11:52] [Rank 0] step:7301/10000 train_time:309166ms step_avg:42.35ms +[2025-09-11 14:11:53] [Rank 0] step:7321/10000 train_time:309869ms step_avg:42.33ms +[2025-09-11 14:11:53] [Rank 0] step:7321/10000 train_time:309869ms step_avg:42.33ms +[2025-09-11 14:11:53] [Rank 0] step:7341/10000 train_time:310575ms step_avg:42.31ms +[2025-09-11 14:11:53] [Rank 0] step:7341/10000 train_time:310575ms step_avg:42.31ms +[2025-09-11 14:11:54] [Rank 0] step:7361/10000 train_time:311279ms step_avg:42.29ms +[2025-09-11 14:11:54] [Rank 0] step:7361/10000 train_time:311279ms step_avg:42.29ms +[2025-09-11 14:11:55] [Rank 0] step:7381/10000 train_time:311984ms step_avg:42.27ms +[2025-09-11 14:11:55] [Rank 0] step:7381/10000 train_time:311984ms step_avg:42.27ms +[2025-09-11 14:11:55] [Rank 0] step:7401/10000 train_time:312692ms step_avg:42.25ms +[2025-09-11 14:11:55] [Rank 0] step:7401/10000 train_time:312692ms step_avg:42.25ms +[2025-09-11 14:11:56] [Rank 0] step:7421/10000 train_time:313396ms step_avg:42.23ms +[2025-09-11 14:11:56] [Rank 0] step:7421/10000 train_time:313396ms step_avg:42.23ms +[2025-09-11 14:11:57] [Rank 0] step:7441/10000 train_time:314100ms step_avg:42.21ms +[2025-09-11 14:11:57] [Rank 0] step:7441/10000 train_time:314100ms step_avg:42.21ms +[2025-09-11 14:11:57] [Rank 0] step:7461/10000 train_time:314805ms step_avg:42.19ms +[2025-09-11 14:11:57] [Rank 0] step:7461/10000 train_time:314805ms step_avg:42.19ms +[2025-09-11 14:11:58] [Rank 0] step:7481/10000 train_time:315509ms step_avg:42.17ms +[2025-09-11 14:11:58] [Rank 0] step:7481/10000 train_time:315509ms step_avg:42.17ms +[2025-09-11 14:11:59] [Rank 0] step:7501/10000 train_time:316213ms step_avg:42.16ms +[2025-09-11 14:11:59] [Rank 0] step:7501/10000 train_time:316213ms step_avg:42.16ms +[2025-09-11 14:12:00] [Rank 0] step:7521/10000 train_time:316919ms step_avg:42.14ms +[2025-09-11 14:12:00] [Rank 0] step:7521/10000 train_time:316919ms step_avg:42.14ms +[2025-09-11 14:12:00] [Rank 0] step:7541/10000 train_time:317621ms step_avg:42.12ms +[2025-09-11 14:12:00] [Rank 0] step:7541/10000 train_time:317621ms step_avg:42.12ms +[2025-09-11 14:12:01] [Rank 0] step:7561/10000 train_time:318328ms step_avg:42.10ms +[2025-09-11 14:12:01] [Rank 0] step:7561/10000 train_time:318328ms step_avg:42.10ms +[2025-09-11 14:12:02] [Rank 0] step:7581/10000 train_time:319034ms step_avg:42.08ms +[2025-09-11 14:12:02] [Rank 0] step:7581/10000 train_time:319034ms step_avg:42.08ms +[2025-09-11 14:12:02] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:12:02] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:12:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:12:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:12:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:12:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:12:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:12:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:12:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:12:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:12:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:12:17] [Rank 0] PRINT: step:7600/10000 val_loss:4.6850 total_sharp:8.3987e-05 L1_sharp:9.0486e-02 L2_sharp:2.1188e-01 L3_sharp:2.5975e-01 L4_sharp:3.2536e-01 L5_sharp:4.1060e-01 L6_sharp:4.7696e-01 L7_sharp:4.8230e-01 L8_sharp:4.3840e-01 L9_sharp:4.9428e-01 L10_sharp:5.5464e-01 L11_sharp:9.0659e-01 L12_sharp:8.1819e-01 total_fnorm:4.1500e+01 total_l1_linf:6.4512e+04 total_spectral:2.0750e+01 L1_fnorm:2.3438e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3682e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3438e-02 L10_fnorm:2.3193e-02 L11_fnorm:2.3193e-02 L12_fnorm:2.2949e-02 L1_l1linf:4.5471e-03 L2_l1linf:4.9133e-03 L3_l1linf:5.2795e-03 L4_l1linf:5.4016e-03 L5_l1linf:5.4321e-03 L6_l1linf:5.6763e-03 L7_l1linf:5.4626e-03 L8_l1linf:5.6763e-03 L9_l1linf:5.6458e-03 L10_l1linf:5.7983e-03 L11_l1linf:5.6763e-03 L12_l1linf:5.8899e-03 L1_spectral:4.4981e-04 L2_spectral:4.4537e-04 L3_spectral:4.4801e-04 L4_spectral:4.4680e-04 L5_spectral:4.4180e-04 L6_spectral:4.4931e-04 L7_spectral:4.4723e-04 L8_spectral:4.2726e-04 L9_spectral:4.4415e-04 L10_spectral:4.3897e-04 L11_spectral:4.4048e-04 L12_spectral:4.2740e-04 train_time:319720ms step_avg:42.07ms +[2025-09-11 14:12:17] [Rank 0] PRINT: step:7600/10000 val_loss:4.6850 total_sharp:8.3987e-05 L1_sharp:9.0486e-02 L2_sharp:2.1188e-01 L3_sharp:2.5975e-01 L4_sharp:3.2536e-01 L5_sharp:4.1060e-01 L6_sharp:4.7696e-01 L7_sharp:4.8230e-01 L8_sharp:4.3840e-01 L9_sharp:4.9428e-01 L10_sharp:5.5464e-01 L11_sharp:9.0659e-01 L12_sharp:8.1819e-01 total_fnorm:4.1500e+01 total_l1_linf:6.4512e+04 total_spectral:2.0750e+01 L1_fnorm:2.3438e-02 L2_fnorm:2.3804e-02 L3_fnorm:2.4048e-02 L4_fnorm:2.3804e-02 L5_fnorm:2.3804e-02 L6_fnorm:2.3804e-02 L7_fnorm:2.3682e-02 L8_fnorm:2.2949e-02 L9_fnorm:2.3438e-02 L10_fnorm:2.3193e-02 L11_fnorm:2.3193e-02 L12_fnorm:2.2949e-02 L1_l1linf:4.5471e-03 L2_l1linf:4.9133e-03 L3_l1linf:5.2795e-03 L4_l1linf:5.4016e-03 L5_l1linf:5.4321e-03 L6_l1linf:5.6763e-03 L7_l1linf:5.4626e-03 L8_l1linf:5.6763e-03 L9_l1linf:5.6458e-03 L10_l1linf:5.7983e-03 L11_l1linf:5.6763e-03 L12_l1linf:5.8899e-03 L1_spectral:4.4981e-04 L2_spectral:4.4537e-04 L3_spectral:4.4801e-04 L4_spectral:4.4680e-04 L5_spectral:4.4180e-04 L6_spectral:4.4931e-04 L7_spectral:4.4723e-04 L8_spectral:4.2726e-04 L9_spectral:4.4415e-04 L10_spectral:4.3897e-04 L11_spectral:4.4048e-04 L12_spectral:4.2740e-04 train_time:319720ms step_avg:42.07ms +[2025-09-11 14:12:18] [Rank 0] step:7601/10000 train_time:321384ms step_avg:42.28ms +[2025-09-11 14:12:18] [Rank 0] step:7601/10000 train_time:321384ms step_avg:42.28ms +[2025-09-11 14:12:19] [Rank 0] step:7621/10000 train_time:322102ms step_avg:42.27ms +[2025-09-11 14:12:19] [Rank 0] step:7621/10000 train_time:322102ms step_avg:42.27ms +[2025-09-11 14:12:20] [Rank 0] step:7641/10000 train_time:322808ms step_avg:42.25ms +[2025-09-11 14:12:20] [Rank 0] step:7641/10000 train_time:322808ms step_avg:42.25ms +[2025-09-11 14:12:20] [Rank 0] step:7661/10000 train_time:323627ms step_avg:42.24ms +[2025-09-11 14:12:20] [Rank 0] step:7661/10000 train_time:323627ms step_avg:42.24ms +[2025-09-11 14:12:21] [Rank 0] step:7681/10000 train_time:324484ms step_avg:42.25ms +[2025-09-11 14:12:21] [Rank 0] step:7681/10000 train_time:324484ms step_avg:42.25ms +[2025-09-11 14:12:22] [Rank 0] step:7701/10000 train_time:325191ms step_avg:42.23ms +[2025-09-11 14:12:22] [Rank 0] step:7701/10000 train_time:325191ms step_avg:42.23ms +[2025-09-11 14:12:23] [Rank 0] step:7721/10000 train_time:325898ms step_avg:42.21ms +[2025-09-11 14:12:23] [Rank 0] step:7721/10000 train_time:325898ms step_avg:42.21ms +[2025-09-11 14:12:23] [Rank 0] step:7741/10000 train_time:326604ms step_avg:42.19ms +[2025-09-11 14:12:23] [Rank 0] step:7741/10000 train_time:326604ms step_avg:42.19ms +[2025-09-11 14:12:24] [Rank 0] step:7761/10000 train_time:327307ms step_avg:42.17ms +[2025-09-11 14:12:24] [Rank 0] step:7761/10000 train_time:327307ms step_avg:42.17ms +[2025-09-11 14:12:25] [Rank 0] step:7781/10000 train_time:328013ms step_avg:42.16ms +[2025-09-11 14:12:25] [Rank 0] step:7781/10000 train_time:328013ms step_avg:42.16ms +[2025-09-11 14:12:26] [Rank 0] step:7801/10000 train_time:328716ms step_avg:42.14ms +[2025-09-11 14:12:26] [Rank 0] step:7801/10000 train_time:328716ms step_avg:42.14ms +[2025-09-11 14:12:26] [Rank 0] step:7821/10000 train_time:329420ms step_avg:42.12ms +[2025-09-11 14:12:26] [Rank 0] step:7821/10000 train_time:329420ms step_avg:42.12ms +[2025-09-11 14:12:27] [Rank 0] step:7841/10000 train_time:330126ms step_avg:42.10ms +[2025-09-11 14:12:27] [Rank 0] step:7841/10000 train_time:330126ms step_avg:42.10ms +[2025-09-11 14:12:28] [Rank 0] step:7861/10000 train_time:330834ms step_avg:42.09ms +[2025-09-11 14:12:28] [Rank 0] step:7861/10000 train_time:330834ms step_avg:42.09ms +[2025-09-11 14:12:28] [Rank 0] step:7881/10000 train_time:331539ms step_avg:42.07ms +[2025-09-11 14:12:28] [Rank 0] step:7881/10000 train_time:331539ms step_avg:42.07ms +[2025-09-11 14:12:29] [Rank 0] step:7901/10000 train_time:332245ms step_avg:42.05ms +[2025-09-11 14:12:29] [Rank 0] step:7901/10000 train_time:332245ms step_avg:42.05ms +[2025-09-11 14:12:30] [Rank 0] step:7921/10000 train_time:332950ms step_avg:42.03ms +[2025-09-11 14:12:30] [Rank 0] step:7921/10000 train_time:332950ms step_avg:42.03ms +[2025-09-11 14:12:30] [Rank 0] step:7941/10000 train_time:333656ms step_avg:42.02ms +[2025-09-11 14:12:30] [Rank 0] step:7941/10000 train_time:333656ms step_avg:42.02ms +[2025-09-11 14:12:31] [Rank 0] step:7961/10000 train_time:334359ms step_avg:42.00ms +[2025-09-11 14:12:31] [Rank 0] step:7961/10000 train_time:334359ms step_avg:42.00ms +[2025-09-11 14:12:32] [Rank 0] step:7981/10000 train_time:335067ms step_avg:41.98ms +[2025-09-11 14:12:32] [Rank 0] step:7981/10000 train_time:335067ms step_avg:41.98ms +[2025-09-11 14:12:33] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:12:33] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:12:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:12:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:12:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:12:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:12:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:12:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:12:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:12:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:12:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:12:43] [Rank 0] PRINT: step:8000/10000 val_loss:4.6638 total_sharp:7.0454e-05 L1_sharp:8.4148e-02 L2_sharp:1.6313e-01 L3_sharp:2.5555e-01 L4_sharp:3.0962e-01 L5_sharp:3.9115e-01 L6_sharp:4.9704e-01 L7_sharp:5.3276e-01 L8_sharp:4.5577e-01 L9_sharp:5.5045e-01 L10_sharp:6.5915e-01 L11_sharp:1.0689e+00 L12_sharp:3.6370e+00 total_fnorm:3.6500e+01 total_l1_linf:5.4784e+04 total_spectral:1.8250e+01 L1_fnorm:1.8555e-02 L2_fnorm:1.8799e-02 L3_fnorm:1.9043e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.8921e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.8799e-02 L8_fnorm:1.8188e-02 L9_fnorm:1.8677e-02 L10_fnorm:1.8555e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8311e-02 L1_l1linf:3.4027e-03 L2_l1linf:3.5400e-03 L3_l1linf:3.9673e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.0894e-03 L6_l1linf:4.2725e-03 L7_l1linf:4.2114e-03 L8_l1linf:4.2725e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.1504e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.6607e-04 L2_spectral:3.6186e-04 L3_spectral:3.6489e-04 L4_spectral:3.6082e-04 L5_spectral:3.6334e-04 L6_spectral:3.6441e-04 L7_spectral:3.6354e-04 L8_spectral:3.4528e-04 L9_spectral:3.6176e-04 L10_spectral:3.5852e-04 L11_spectral:3.5678e-04 L12_spectral:3.4752e-04 train_time:335749ms step_avg:41.97ms +[2025-09-11 14:12:43] [Rank 0] PRINT: step:8000/10000 val_loss:4.6638 total_sharp:7.0454e-05 L1_sharp:8.4148e-02 L2_sharp:1.6313e-01 L3_sharp:2.5555e-01 L4_sharp:3.0962e-01 L5_sharp:3.9115e-01 L6_sharp:4.9704e-01 L7_sharp:5.3276e-01 L8_sharp:4.5577e-01 L9_sharp:5.5045e-01 L10_sharp:6.5915e-01 L11_sharp:1.0689e+00 L12_sharp:3.6370e+00 total_fnorm:3.6500e+01 total_l1_linf:5.4784e+04 total_spectral:1.8250e+01 L1_fnorm:1.8555e-02 L2_fnorm:1.8799e-02 L3_fnorm:1.9043e-02 L4_fnorm:1.8921e-02 L5_fnorm:1.8921e-02 L6_fnorm:1.9043e-02 L7_fnorm:1.8799e-02 L8_fnorm:1.8188e-02 L9_fnorm:1.8677e-02 L10_fnorm:1.8555e-02 L11_fnorm:1.8433e-02 L12_fnorm:1.8311e-02 L1_l1linf:3.4027e-03 L2_l1linf:3.5400e-03 L3_l1linf:3.9673e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.0894e-03 L6_l1linf:4.2725e-03 L7_l1linf:4.2114e-03 L8_l1linf:4.2725e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.1504e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.6607e-04 L2_spectral:3.6186e-04 L3_spectral:3.6489e-04 L4_spectral:3.6082e-04 L5_spectral:3.6334e-04 L6_spectral:3.6441e-04 L7_spectral:3.6354e-04 L8_spectral:3.4528e-04 L9_spectral:3.6176e-04 L10_spectral:3.5852e-04 L11_spectral:3.5678e-04 L12_spectral:3.4752e-04 train_time:335749ms step_avg:41.97ms +[2025-09-11 14:12:45] [Rank 0] step:8001/10000 train_time:337441ms step_avg:42.17ms +[2025-09-11 14:12:45] [Rank 0] step:8001/10000 train_time:337441ms step_avg:42.17ms +[2025-09-11 14:12:46] [Rank 0] step:8021/10000 train_time:338164ms step_avg:42.16ms +[2025-09-11 14:12:46] [Rank 0] step:8021/10000 train_time:338164ms step_avg:42.16ms +[2025-09-11 14:12:46] [Rank 0] step:8041/10000 train_time:338872ms step_avg:42.14ms +[2025-09-11 14:12:46] [Rank 0] step:8041/10000 train_time:338872ms step_avg:42.14ms +[2025-09-11 14:12:47] [Rank 0] step:8061/10000 train_time:339580ms step_avg:42.13ms +[2025-09-11 14:12:47] [Rank 0] step:8061/10000 train_time:339580ms step_avg:42.13ms +[2025-09-11 14:12:48] [Rank 0] step:8081/10000 train_time:340291ms step_avg:42.11ms +[2025-09-11 14:12:48] [Rank 0] step:8081/10000 train_time:340291ms step_avg:42.11ms +[2025-09-11 14:12:49] [Rank 0] step:8101/10000 train_time:340996ms step_avg:42.09ms +[2025-09-11 14:12:49] [Rank 0] step:8101/10000 train_time:340996ms step_avg:42.09ms +[2025-09-11 14:12:49] [Rank 0] step:8121/10000 train_time:341706ms step_avg:42.08ms +[2025-09-11 14:12:49] [Rank 0] step:8121/10000 train_time:341706ms step_avg:42.08ms +[2025-09-11 14:12:51] [Rank 0] step:8141/10000 train_time:343166ms step_avg:42.15ms +[2025-09-11 14:12:51] [Rank 0] step:8141/10000 train_time:343166ms step_avg:42.15ms +[2025-09-11 14:12:51] [Rank 0] step:8161/10000 train_time:343875ms step_avg:42.14ms +[2025-09-11 14:12:51] [Rank 0] step:8161/10000 train_time:343875ms step_avg:42.14ms +[2025-09-11 14:12:52] [Rank 0] step:8181/10000 train_time:344592ms step_avg:42.12ms +[2025-09-11 14:12:52] [Rank 0] step:8181/10000 train_time:344592ms step_avg:42.12ms +[2025-09-11 14:12:53] [Rank 0] step:8201/10000 train_time:345306ms step_avg:42.11ms +[2025-09-11 14:12:53] [Rank 0] step:8201/10000 train_time:345306ms step_avg:42.11ms +[2025-09-11 14:12:54] [Rank 0] step:8221/10000 train_time:346019ms step_avg:42.09ms +[2025-09-11 14:12:54] [Rank 0] step:8221/10000 train_time:346019ms step_avg:42.09ms +[2025-09-11 14:12:54] [Rank 0] step:8241/10000 train_time:346739ms step_avg:42.07ms +[2025-09-11 14:12:54] [Rank 0] step:8241/10000 train_time:346739ms step_avg:42.07ms +[2025-09-11 14:12:55] [Rank 0] step:8261/10000 train_time:347451ms step_avg:42.06ms +[2025-09-11 14:12:55] [Rank 0] step:8261/10000 train_time:347451ms step_avg:42.06ms +[2025-09-11 14:12:56] [Rank 0] step:8281/10000 train_time:348160ms step_avg:42.04ms +[2025-09-11 14:12:56] [Rank 0] step:8281/10000 train_time:348160ms step_avg:42.04ms +[2025-09-11 14:12:56] [Rank 0] step:8301/10000 train_time:348871ms step_avg:42.03ms +[2025-09-11 14:12:56] [Rank 0] step:8301/10000 train_time:348871ms step_avg:42.03ms +[2025-09-11 14:12:57] [Rank 0] step:8321/10000 train_time:349585ms step_avg:42.01ms +[2025-09-11 14:12:57] [Rank 0] step:8321/10000 train_time:349585ms step_avg:42.01ms +[2025-09-11 14:12:58] [Rank 0] step:8341/10000 train_time:350303ms step_avg:42.00ms +[2025-09-11 14:12:58] [Rank 0] step:8341/10000 train_time:350303ms step_avg:42.00ms +[2025-09-11 14:12:59] [Rank 0] step:8361/10000 train_time:351011ms step_avg:41.98ms +[2025-09-11 14:12:59] [Rank 0] step:8361/10000 train_time:351011ms step_avg:41.98ms +[2025-09-11 14:12:59] [Rank 0] step:8381/10000 train_time:351726ms step_avg:41.97ms +[2025-09-11 14:12:59] [Rank 0] step:8381/10000 train_time:351726ms step_avg:41.97ms +[2025-09-11 14:13:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:13:00] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:13:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:13:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:13:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:13:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:13:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:13:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:13:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:13:13] [Rank 0] PRINT: step:8400/10000 val_loss:4.6473 total_sharp:6.4774e-05 L1_sharp:7.9033e-02 L2_sharp:1.3889e-01 L3_sharp:2.1425e-01 L4_sharp:2.9152e-01 L5_sharp:3.6948e-01 L6_sharp:4.5010e-01 L7_sharp:4.6072e-01 L8_sharp:4.3160e-01 L9_sharp:4.9742e-01 L10_sharp:5.9222e-01 L11_sharp:7.1170e-01 L12_sharp:6.4524e-01 total_fnorm:2.8375e+01 total_l1_linf:3.8400e+04 total_spectral:1.4188e+01 L1_fnorm:1.4099e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4526e-02 L4_fnorm:1.4404e-02 L5_fnorm:1.4465e-02 L6_fnorm:1.4526e-02 L7_fnorm:1.4404e-02 L8_fnorm:1.3855e-02 L9_fnorm:1.4282e-02 L10_fnorm:1.4099e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3855e-02 L1_l1linf:2.2430e-03 L2_l1linf:2.5024e-03 L3_l1linf:2.6703e-03 L4_l1linf:2.7161e-03 L5_l1linf:2.9297e-03 L6_l1linf:3.0212e-03 L7_l1linf:3.0518e-03 L8_l1linf:2.9602e-03 L9_l1linf:2.9144e-03 L10_l1linf:2.9907e-03 L11_l1linf:2.8839e-03 L12_l1linf:2.8534e-03 L1_spectral:2.8858e-04 L2_spectral:2.8107e-04 L3_spectral:2.8520e-04 L4_spectral:2.8254e-04 L5_spectral:2.8320e-04 L6_spectral:2.8506e-04 L7_spectral:2.8289e-04 L8_spectral:2.6736e-04 L9_spectral:2.7977e-04 L10_spectral:2.7462e-04 L11_spectral:2.7992e-04 L12_spectral:2.7044e-04 train_time:352421ms step_avg:41.95ms +[2025-09-11 14:13:13] [Rank 0] PRINT: step:8400/10000 val_loss:4.6473 total_sharp:6.4774e-05 L1_sharp:7.9033e-02 L2_sharp:1.3889e-01 L3_sharp:2.1425e-01 L4_sharp:2.9152e-01 L5_sharp:3.6948e-01 L6_sharp:4.5010e-01 L7_sharp:4.6072e-01 L8_sharp:4.3160e-01 L9_sharp:4.9742e-01 L10_sharp:5.9222e-01 L11_sharp:7.1170e-01 L12_sharp:6.4524e-01 total_fnorm:2.8375e+01 total_l1_linf:3.8400e+04 total_spectral:1.4188e+01 L1_fnorm:1.4099e-02 L2_fnorm:1.4282e-02 L3_fnorm:1.4526e-02 L4_fnorm:1.4404e-02 L5_fnorm:1.4465e-02 L6_fnorm:1.4526e-02 L7_fnorm:1.4404e-02 L8_fnorm:1.3855e-02 L9_fnorm:1.4282e-02 L10_fnorm:1.4099e-02 L11_fnorm:1.4099e-02 L12_fnorm:1.3855e-02 L1_l1linf:2.2430e-03 L2_l1linf:2.5024e-03 L3_l1linf:2.6703e-03 L4_l1linf:2.7161e-03 L5_l1linf:2.9297e-03 L6_l1linf:3.0212e-03 L7_l1linf:3.0518e-03 L8_l1linf:2.9602e-03 L9_l1linf:2.9144e-03 L10_l1linf:2.9907e-03 L11_l1linf:2.8839e-03 L12_l1linf:2.8534e-03 L1_spectral:2.8858e-04 L2_spectral:2.8107e-04 L3_spectral:2.8520e-04 L4_spectral:2.8254e-04 L5_spectral:2.8320e-04 L6_spectral:2.8506e-04 L7_spectral:2.8289e-04 L8_spectral:2.6736e-04 L9_spectral:2.7977e-04 L10_spectral:2.7462e-04 L11_spectral:2.7992e-04 L12_spectral:2.7044e-04 train_time:352421ms step_avg:41.95ms +[2025-09-11 14:13:15] [Rank 0] step:8401/10000 train_time:354228ms step_avg:42.17ms +[2025-09-11 14:13:15] [Rank 0] step:8401/10000 train_time:354228ms step_avg:42.17ms +[2025-09-11 14:13:16] [Rank 0] step:8421/10000 train_time:354957ms step_avg:42.15ms +[2025-09-11 14:13:16] [Rank 0] step:8421/10000 train_time:354957ms step_avg:42.15ms +[2025-09-11 14:13:17] [Rank 0] step:8441/10000 train_time:355671ms step_avg:42.14ms +[2025-09-11 14:13:17] [Rank 0] step:8441/10000 train_time:355671ms step_avg:42.14ms +[2025-09-11 14:13:17] [Rank 0] step:8461/10000 train_time:356384ms step_avg:42.12ms +[2025-09-11 14:13:17] [Rank 0] step:8461/10000 train_time:356384ms step_avg:42.12ms +[2025-09-11 14:13:18] [Rank 0] step:8481/10000 train_time:357098ms step_avg:42.11ms +[2025-09-11 14:13:18] [Rank 0] step:8481/10000 train_time:357098ms step_avg:42.11ms +[2025-09-11 14:13:19] [Rank 0] step:8501/10000 train_time:357810ms step_avg:42.09ms +[2025-09-11 14:13:19] [Rank 0] step:8501/10000 train_time:357810ms step_avg:42.09ms +[2025-09-11 14:13:20] [Rank 0] step:8521/10000 train_time:358522ms step_avg:42.08ms +[2025-09-11 14:13:20] [Rank 0] step:8521/10000 train_time:358522ms step_avg:42.08ms +[2025-09-11 14:13:20] [Rank 0] step:8541/10000 train_time:359234ms step_avg:42.06ms +[2025-09-11 14:13:20] [Rank 0] step:8541/10000 train_time:359234ms step_avg:42.06ms +[2025-09-11 14:13:21] [Rank 0] step:8561/10000 train_time:359951ms step_avg:42.05ms +[2025-09-11 14:13:21] [Rank 0] step:8561/10000 train_time:359951ms step_avg:42.05ms +[2025-09-11 14:13:22] [Rank 0] step:8581/10000 train_time:360919ms step_avg:42.06ms +[2025-09-11 14:13:22] [Rank 0] step:8581/10000 train_time:360919ms step_avg:42.06ms +[2025-09-11 14:13:23] [Rank 0] step:8601/10000 train_time:361633ms step_avg:42.05ms +[2025-09-11 14:13:23] [Rank 0] step:8601/10000 train_time:361633ms step_avg:42.05ms +[2025-09-11 14:13:23] [Rank 0] step:8621/10000 train_time:362344ms step_avg:42.03ms +[2025-09-11 14:13:23] [Rank 0] step:8621/10000 train_time:362344ms step_avg:42.03ms +[2025-09-11 14:13:24] [Rank 0] step:8641/10000 train_time:363306ms step_avg:42.04ms +[2025-09-11 14:13:24] [Rank 0] step:8641/10000 train_time:363306ms step_avg:42.04ms +[2025-09-11 14:13:25] [Rank 0] step:8661/10000 train_time:364018ms step_avg:42.03ms +[2025-09-11 14:13:25] [Rank 0] step:8661/10000 train_time:364018ms step_avg:42.03ms +[2025-09-11 14:13:26] [Rank 0] step:8681/10000 train_time:364732ms step_avg:42.01ms +[2025-09-11 14:13:26] [Rank 0] step:8681/10000 train_time:364732ms step_avg:42.01ms +[2025-09-11 14:13:26] [Rank 0] step:8701/10000 train_time:365444ms step_avg:42.00ms +[2025-09-11 14:13:26] [Rank 0] step:8701/10000 train_time:365444ms step_avg:42.00ms +[2025-09-11 14:13:27] [Rank 0] step:8721/10000 train_time:366158ms step_avg:41.99ms +[2025-09-11 14:13:27] [Rank 0] step:8721/10000 train_time:366158ms step_avg:41.99ms +[2025-09-11 14:13:28] [Rank 0] step:8741/10000 train_time:366866ms step_avg:41.97ms +[2025-09-11 14:13:28] [Rank 0] step:8741/10000 train_time:366866ms step_avg:41.97ms +[2025-09-11 14:13:29] [Rank 0] step:8761/10000 train_time:367581ms step_avg:41.96ms +[2025-09-11 14:13:29] [Rank 0] step:8761/10000 train_time:367581ms step_avg:41.96ms +[2025-09-11 14:13:29] [Rank 0] step:8781/10000 train_time:368290ms step_avg:41.94ms +[2025-09-11 14:13:29] [Rank 0] step:8781/10000 train_time:368290ms step_avg:41.94ms +[2025-09-11 14:13:30] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:13:30] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:13:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:13:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:13:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:13:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 14:13:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 14:13:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 14:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 14:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 14:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 14:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 14:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:13:41] [Rank 0] PRINT: step:8800/10000 val_loss:4.6417 total_sharp:5.7265e-05 L1_sharp:5.9176e-02 L2_sharp:1.1500e-01 L3_sharp:1.8689e-01 L4_sharp:2.1622e-01 L5_sharp:2.5770e-01 L6_sharp:3.7544e-01 L7_sharp:3.8696e-01 L8_sharp:3.8103e-01 L9_sharp:4.4608e-01 L10_sharp:4.6922e-01 L11_sharp:6.2761e-01 L12_sharp:5.7214e-01 total_fnorm:2.1125e+01 total_l1_linf:2.5728e+04 total_spectral:1.0562e+01 L1_fnorm:9.8877e-03 L2_fnorm:1.0071e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0193e-02 L6_fnorm:1.0315e-02 L7_fnorm:1.0132e-02 L8_fnorm:9.7656e-03 L9_fnorm:1.0071e-02 L10_fnorm:9.9487e-03 L11_fnorm:9.8877e-03 L12_fnorm:9.7046e-03 L1_l1linf:1.4343e-03 L2_l1linf:1.5945e-03 L3_l1linf:1.7014e-03 L4_l1linf:1.7853e-03 L5_l1linf:1.8768e-03 L6_l1linf:1.9302e-03 L7_l1linf:1.8539e-03 L8_l1linf:1.8768e-03 L9_l1linf:1.9989e-03 L10_l1linf:1.8539e-03 L11_l1linf:1.8921e-03 L12_l1linf:1.7700e-03 L1_spectral:2.0506e-04 L2_spectral:2.0451e-04 L3_spectral:2.0767e-04 L4_spectral:2.0479e-04 L5_spectral:2.0389e-04 L6_spectral:2.0871e-04 L7_spectral:2.0569e-04 L8_spectral:1.9073e-04 L9_spectral:2.0130e-04 L10_spectral:1.9971e-04 L11_spectral:2.0123e-04 L12_spectral:1.9488e-04 train_time:368979ms step_avg:41.93ms +[2025-09-11 14:13:41] [Rank 0] PRINT: step:8800/10000 val_loss:4.6417 total_sharp:5.7265e-05 L1_sharp:5.9176e-02 L2_sharp:1.1500e-01 L3_sharp:1.8689e-01 L4_sharp:2.1622e-01 L5_sharp:2.5770e-01 L6_sharp:3.7544e-01 L7_sharp:3.8696e-01 L8_sharp:3.8103e-01 L9_sharp:4.4608e-01 L10_sharp:4.6922e-01 L11_sharp:6.2761e-01 L12_sharp:5.7214e-01 total_fnorm:2.1125e+01 total_l1_linf:2.5728e+04 total_spectral:1.0562e+01 L1_fnorm:9.8877e-03 L2_fnorm:1.0071e-02 L3_fnorm:1.0254e-02 L4_fnorm:1.0193e-02 L5_fnorm:1.0193e-02 L6_fnorm:1.0315e-02 L7_fnorm:1.0132e-02 L8_fnorm:9.7656e-03 L9_fnorm:1.0071e-02 L10_fnorm:9.9487e-03 L11_fnorm:9.8877e-03 L12_fnorm:9.7046e-03 L1_l1linf:1.4343e-03 L2_l1linf:1.5945e-03 L3_l1linf:1.7014e-03 L4_l1linf:1.7853e-03 L5_l1linf:1.8768e-03 L6_l1linf:1.9302e-03 L7_l1linf:1.8539e-03 L8_l1linf:1.8768e-03 L9_l1linf:1.9989e-03 L10_l1linf:1.8539e-03 L11_l1linf:1.8921e-03 L12_l1linf:1.7700e-03 L1_spectral:2.0506e-04 L2_spectral:2.0451e-04 L3_spectral:2.0767e-04 L4_spectral:2.0479e-04 L5_spectral:2.0389e-04 L6_spectral:2.0871e-04 L7_spectral:2.0569e-04 L8_spectral:1.9073e-04 L9_spectral:2.0130e-04 L10_spectral:1.9971e-04 L11_spectral:2.0123e-04 L12_spectral:1.9488e-04 train_time:368979ms step_avg:41.93ms +[2025-09-11 14:13:42] [Rank 0] step:8801/10000 train_time:370686ms step_avg:42.12ms +[2025-09-11 14:13:42] [Rank 0] step:8801/10000 train_time:370686ms step_avg:42.12ms +[2025-09-11 14:13:43] [Rank 0] step:8821/10000 train_time:371418ms step_avg:42.11ms +[2025-09-11 14:13:43] [Rank 0] step:8821/10000 train_time:371418ms step_avg:42.11ms +[2025-09-11 14:13:44] [Rank 0] step:8841/10000 train_time:372131ms step_avg:42.09ms +[2025-09-11 14:13:44] [Rank 0] step:8841/10000 train_time:372131ms step_avg:42.09ms +[2025-09-11 14:13:45] [Rank 0] step:8861/10000 train_time:372843ms step_avg:42.08ms +[2025-09-11 14:13:45] [Rank 0] step:8861/10000 train_time:372843ms step_avg:42.08ms +[2025-09-11 14:13:45] [Rank 0] step:8881/10000 train_time:373558ms step_avg:42.06ms +[2025-09-11 14:13:45] [Rank 0] step:8881/10000 train_time:373558ms step_avg:42.06ms +[2025-09-11 14:13:46] [Rank 0] step:8901/10000 train_time:374274ms step_avg:42.05ms +[2025-09-11 14:13:46] [Rank 0] step:8901/10000 train_time:374274ms step_avg:42.05ms +[2025-09-11 14:13:47] [Rank 0] step:8921/10000 train_time:374984ms step_avg:42.03ms +[2025-09-11 14:13:47] [Rank 0] step:8921/10000 train_time:374984ms step_avg:42.03ms +[2025-09-11 14:13:47] [Rank 0] step:8941/10000 train_time:375700ms step_avg:42.02ms +[2025-09-11 14:13:47] [Rank 0] step:8941/10000 train_time:375700ms step_avg:42.02ms +[2025-09-11 14:13:48] [Rank 0] step:8961/10000 train_time:376423ms step_avg:42.01ms +[2025-09-11 14:13:48] [Rank 0] step:8961/10000 train_time:376423ms step_avg:42.01ms +[2025-09-11 14:13:49] [Rank 0] step:8981/10000 train_time:377141ms step_avg:41.99ms +[2025-09-11 14:13:49] [Rank 0] step:8981/10000 train_time:377141ms step_avg:41.99ms +[2025-09-11 14:13:50] [Rank 0] step:9001/10000 train_time:377849ms step_avg:41.98ms +[2025-09-11 14:13:50] [Rank 0] step:9001/10000 train_time:377849ms step_avg:41.98ms +[2025-09-11 14:13:50] [Rank 0] step:9021/10000 train_time:378562ms step_avg:41.96ms +[2025-09-11 14:13:50] [Rank 0] step:9021/10000 train_time:378562ms step_avg:41.96ms +[2025-09-11 14:13:51] [Rank 0] step:9041/10000 train_time:379277ms step_avg:41.95ms +[2025-09-11 14:13:51] [Rank 0] step:9041/10000 train_time:379277ms step_avg:41.95ms +[2025-09-11 14:13:52] [Rank 0] step:9061/10000 train_time:379989ms step_avg:41.94ms +[2025-09-11 14:13:52] [Rank 0] step:9061/10000 train_time:379989ms step_avg:41.94ms +[2025-09-11 14:13:52] [Rank 0] step:9081/10000 train_time:380705ms step_avg:41.92ms +[2025-09-11 14:13:52] [Rank 0] step:9081/10000 train_time:380705ms step_avg:41.92ms +[2025-09-11 14:13:53] [Rank 0] step:9101/10000 train_time:381423ms step_avg:41.91ms +[2025-09-11 14:13:53] [Rank 0] step:9101/10000 train_time:381423ms step_avg:41.91ms +[2025-09-11 14:13:54] [Rank 0] step:9121/10000 train_time:382141ms step_avg:41.90ms +[2025-09-11 14:13:54] [Rank 0] step:9121/10000 train_time:382141ms step_avg:41.90ms +[2025-09-11 14:13:55] [Rank 0] step:9141/10000 train_time:382852ms step_avg:41.88ms +[2025-09-11 14:13:55] [Rank 0] step:9141/10000 train_time:382852ms step_avg:41.88ms +[2025-09-11 14:13:55] [Rank 0] step:9161/10000 train_time:383570ms step_avg:41.87ms +[2025-09-11 14:13:55] [Rank 0] step:9161/10000 train_time:383570ms step_avg:41.87ms +[2025-09-11 14:13:56] [Rank 0] step:9181/10000 train_time:384286ms step_avg:41.86ms +[2025-09-11 14:13:56] [Rank 0] step:9181/10000 train_time:384286ms step_avg:41.86ms +[2025-09-11 14:13:57] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 14:13:57] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 14:13:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 14:13:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:14:07] [Rank 0] PRINT: step:9200/10000 val_loss:4.6250 total_sharp:5.6170e-05 L1_sharp:3.9561e-02 L2_sharp:9.3393e-02 L3_sharp:1.6597e-01 L4_sharp:2.4693e-01 L5_sharp:2.7500e-01 L6_sharp:3.8866e-01 L7_sharp:4.0623e-01 L8_sharp:3.7621e-01 L9_sharp:5.4128e-01 L10_sharp:6.0159e-01 L11_sharp:1.1727e+00 L12_sharp:2.2724e+00 total_fnorm:1.4875e+01 total_l1_linf:1.5936e+04 total_spectral:7.4375e+00 L1_fnorm:6.4697e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.7444e-03 L4_fnorm:6.7749e-03 L5_fnorm:6.7749e-03 L6_fnorm:6.8054e-03 L7_fnorm:6.7444e-03 L8_fnorm:6.4697e-03 L9_fnorm:6.6528e-03 L10_fnorm:6.5613e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.4697e-03 L1_l1linf:9.1934e-04 L2_l1linf:9.2697e-04 L3_l1linf:1.0529e-03 L4_l1linf:1.1444e-03 L5_l1linf:1.1902e-03 L6_l1linf:1.1826e-03 L7_l1linf:1.1292e-03 L8_l1linf:1.1368e-03 L9_l1linf:1.1749e-03 L10_l1linf:1.1978e-03 L11_l1linf:1.1444e-03 L12_l1linf:1.1749e-03 L1_spectral:1.3522e-04 L2_spectral:1.3287e-04 L3_spectral:1.3346e-04 L4_spectral:1.3272e-04 L5_spectral:1.3478e-04 L6_spectral:1.3613e-04 L7_spectral:1.3524e-04 L8_spectral:1.2563e-04 L9_spectral:1.3256e-04 L10_spectral:1.3109e-04 L11_spectral:1.3158e-04 L12_spectral:1.2915e-04 train_time:384982ms step_avg:41.85ms +[2025-09-11 14:14:07] [Rank 0] PRINT: step:9200/10000 val_loss:4.6250 total_sharp:5.6170e-05 L1_sharp:3.9561e-02 L2_sharp:9.3393e-02 L3_sharp:1.6597e-01 L4_sharp:2.4693e-01 L5_sharp:2.7500e-01 L6_sharp:3.8866e-01 L7_sharp:4.0623e-01 L8_sharp:3.7621e-01 L9_sharp:5.4128e-01 L10_sharp:6.0159e-01 L11_sharp:1.1727e+00 L12_sharp:2.2724e+00 total_fnorm:1.4875e+01 total_l1_linf:1.5936e+04 total_spectral:7.4375e+00 L1_fnorm:6.4697e-03 L2_fnorm:6.5918e-03 L3_fnorm:6.7444e-03 L4_fnorm:6.7749e-03 L5_fnorm:6.7749e-03 L6_fnorm:6.8054e-03 L7_fnorm:6.7444e-03 L8_fnorm:6.4697e-03 L9_fnorm:6.6528e-03 L10_fnorm:6.5613e-03 L11_fnorm:6.5918e-03 L12_fnorm:6.4697e-03 L1_l1linf:9.1934e-04 L2_l1linf:9.2697e-04 L3_l1linf:1.0529e-03 L4_l1linf:1.1444e-03 L5_l1linf:1.1902e-03 L6_l1linf:1.1826e-03 L7_l1linf:1.1292e-03 L8_l1linf:1.1368e-03 L9_l1linf:1.1749e-03 L10_l1linf:1.1978e-03 L11_l1linf:1.1444e-03 L12_l1linf:1.1749e-03 L1_spectral:1.3522e-04 L2_spectral:1.3287e-04 L3_spectral:1.3346e-04 L4_spectral:1.3272e-04 L5_spectral:1.3478e-04 L6_spectral:1.3613e-04 L7_spectral:1.3524e-04 L8_spectral:1.2563e-04 L9_spectral:1.3256e-04 L10_spectral:1.3109e-04 L11_spectral:1.3158e-04 L12_spectral:1.2915e-04 train_time:384982ms step_avg:41.85ms +[2025-09-11 14:14:09] [Rank 0] step:9201/10000 train_time:386932ms step_avg:42.05ms +[2025-09-11 14:14:09] [Rank 0] step:9201/10000 train_time:386932ms step_avg:42.05ms +[2025-09-11 14:14:10] [Rank 0] step:9221/10000 train_time:387674ms step_avg:42.04ms +[2025-09-11 14:14:10] [Rank 0] step:9221/10000 train_time:387674ms step_avg:42.04ms +[2025-09-11 14:14:11] [Rank 0] step:9241/10000 train_time:388387ms step_avg:42.03ms +[2025-09-11 14:14:11] [Rank 0] step:9241/10000 train_time:388387ms step_avg:42.03ms +[2025-09-11 14:14:11] [Rank 0] step:9261/10000 train_time:389104ms step_avg:42.02ms +[2025-09-11 14:14:11] [Rank 0] step:9261/10000 train_time:389104ms step_avg:42.02ms +[2025-09-11 14:14:12] [Rank 0] step:9281/10000 train_time:389820ms step_avg:42.00ms +[2025-09-11 14:14:12] [Rank 0] step:9281/10000 train_time:389820ms step_avg:42.00ms +[2025-09-11 14:14:13] [Rank 0] step:9301/10000 train_time:390531ms step_avg:41.99ms +[2025-09-11 14:14:13] [Rank 0] step:9301/10000 train_time:390531ms step_avg:41.99ms +[2025-09-11 14:14:14] [Rank 0] step:9321/10000 train_time:391248ms step_avg:41.97ms +[2025-09-11 14:14:14] [Rank 0] step:9321/10000 train_time:391248ms step_avg:41.97ms +[2025-09-11 14:14:14] [Rank 0] step:9341/10000 train_time:391959ms step_avg:41.96ms +[2025-09-11 14:14:14] [Rank 0] step:9341/10000 train_time:391959ms step_avg:41.96ms +[2025-09-11 14:14:15] [Rank 0] step:9361/10000 train_time:392669ms step_avg:41.95ms +[2025-09-11 14:14:15] [Rank 0] step:9361/10000 train_time:392669ms step_avg:41.95ms +[2025-09-11 14:14:16] [Rank 0] step:9381/10000 train_time:393382ms step_avg:41.93ms +[2025-09-11 14:14:16] [Rank 0] step:9381/10000 train_time:393382ms step_avg:41.93ms +[2025-09-11 14:14:16] [Rank 0] step:9401/10000 train_time:394098ms step_avg:41.92ms +[2025-09-11 14:14:16] [Rank 0] step:9401/10000 train_time:394098ms step_avg:41.92ms +[2025-09-11 14:14:17] [Rank 0] step:9421/10000 train_time:394814ms step_avg:41.91ms +[2025-09-11 14:14:17] [Rank 0] step:9421/10000 train_time:394814ms step_avg:41.91ms +[2025-09-11 14:14:18] [Rank 0] step:9441/10000 train_time:395530ms step_avg:41.89ms +[2025-09-11 14:14:18] [Rank 0] step:9441/10000 train_time:395530ms step_avg:41.89ms +[2025-09-11 14:14:19] [Rank 0] step:9461/10000 train_time:396244ms step_avg:41.88ms +[2025-09-11 14:14:19] [Rank 0] step:9461/10000 train_time:396244ms step_avg:41.88ms +[2025-09-11 14:14:19] [Rank 0] step:9481/10000 train_time:396959ms step_avg:41.87ms +[2025-09-11 14:14:19] [Rank 0] step:9481/10000 train_time:396959ms step_avg:41.87ms +[2025-09-11 14:14:20] [Rank 0] step:9501/10000 train_time:397675ms step_avg:41.86ms +[2025-09-11 14:14:20] [Rank 0] step:9501/10000 train_time:397675ms step_avg:41.86ms +[2025-09-11 14:14:21] [Rank 0] step:9521/10000 train_time:398392ms step_avg:41.84ms +[2025-09-11 14:14:21] [Rank 0] step:9521/10000 train_time:398392ms step_avg:41.84ms +[2025-09-11 14:14:21] [Rank 0] step:9541/10000 train_time:399104ms step_avg:41.83ms +[2025-09-11 14:14:21] [Rank 0] step:9541/10000 train_time:399104ms step_avg:41.83ms +[2025-09-11 14:14:22] [Rank 0] step:9561/10000 train_time:399820ms step_avg:41.82ms +[2025-09-11 14:14:22] [Rank 0] step:9561/10000 train_time:399820ms step_avg:41.82ms +[2025-09-11 14:14:23] [Rank 0] step:9581/10000 train_time:400536ms step_avg:41.81ms +[2025-09-11 14:14:23] [Rank 0] step:9581/10000 train_time:400536ms step_avg:41.81ms +[2025-09-11 14:14:24] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:14:24] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:14:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:14:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:14:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:14:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:14:34] [Rank 0] PRINT: step:9600/10000 val_loss:4.6163 total_sharp:3.8226e-05 L1_sharp:4.5152e-02 L2_sharp:7.6553e-02 L3_sharp:1.2371e-01 L4_sharp:1.5070e-01 L5_sharp:1.8995e-01 L6_sharp:2.8450e-01 L7_sharp:2.9841e-01 L8_sharp:3.1064e-01 L9_sharp:3.8265e-01 L10_sharp:4.3314e-01 L11_sharp:6.8789e-01 L12_sharp:6.7314e-01 total_fnorm:8.6875e+00 total_l1_linf:7.9040e+03 total_spectral:4.3438e+00 L1_fnorm:3.6011e-03 L2_fnorm:3.6469e-03 L3_fnorm:3.7537e-03 L4_fnorm:3.7689e-03 L5_fnorm:3.7689e-03 L6_fnorm:3.7842e-03 L7_fnorm:3.7689e-03 L8_fnorm:3.6163e-03 L9_fnorm:3.7537e-03 L10_fnorm:3.7079e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.6163e-03 L1_l1linf:4.0627e-04 L2_l1linf:4.4060e-04 L3_l1linf:4.9973e-04 L4_l1linf:5.5313e-04 L5_l1linf:5.3024e-04 L6_l1linf:5.6458e-04 L7_l1linf:5.5313e-04 L8_l1linf:5.6458e-04 L9_l1linf:5.7602e-04 L10_l1linf:5.9509e-04 L11_l1linf:6.2943e-04 L12_l1linf:6.1035e-04 L1_spectral:7.6707e-05 L2_spectral:7.5502e-05 L3_spectral:7.6459e-05 L4_spectral:7.6072e-05 L5_spectral:7.5020e-05 L6_spectral:7.5992e-05 L7_spectral:7.6160e-05 L8_spectral:7.0149e-05 L9_spectral:7.4582e-05 L10_spectral:7.2726e-05 L11_spectral:7.3610e-05 L12_spectral:7.1756e-05 train_time:401228ms step_avg:41.79ms +[2025-09-11 14:14:34] [Rank 0] PRINT: step:9600/10000 val_loss:4.6163 total_sharp:3.8226e-05 L1_sharp:4.5152e-02 L2_sharp:7.6553e-02 L3_sharp:1.2371e-01 L4_sharp:1.5070e-01 L5_sharp:1.8995e-01 L6_sharp:2.8450e-01 L7_sharp:2.9841e-01 L8_sharp:3.1064e-01 L9_sharp:3.8265e-01 L10_sharp:4.3314e-01 L11_sharp:6.8789e-01 L12_sharp:6.7314e-01 total_fnorm:8.6875e+00 total_l1_linf:7.9040e+03 total_spectral:4.3438e+00 L1_fnorm:3.6011e-03 L2_fnorm:3.6469e-03 L3_fnorm:3.7537e-03 L4_fnorm:3.7689e-03 L5_fnorm:3.7689e-03 L6_fnorm:3.7842e-03 L7_fnorm:3.7689e-03 L8_fnorm:3.6163e-03 L9_fnorm:3.7537e-03 L10_fnorm:3.7079e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.6163e-03 L1_l1linf:4.0627e-04 L2_l1linf:4.4060e-04 L3_l1linf:4.9973e-04 L4_l1linf:5.5313e-04 L5_l1linf:5.3024e-04 L6_l1linf:5.6458e-04 L7_l1linf:5.5313e-04 L8_l1linf:5.6458e-04 L9_l1linf:5.7602e-04 L10_l1linf:5.9509e-04 L11_l1linf:6.2943e-04 L12_l1linf:6.1035e-04 L1_spectral:7.6707e-05 L2_spectral:7.5502e-05 L3_spectral:7.6459e-05 L4_spectral:7.6072e-05 L5_spectral:7.5020e-05 L6_spectral:7.5992e-05 L7_spectral:7.6160e-05 L8_spectral:7.0149e-05 L9_spectral:7.4582e-05 L10_spectral:7.2726e-05 L11_spectral:7.3610e-05 L12_spectral:7.1756e-05 train_time:401228ms step_avg:41.79ms +[2025-09-11 14:14:36] [Rank 0] step:9601/10000 train_time:402970ms step_avg:41.97ms +[2025-09-11 14:14:36] [Rank 0] step:9601/10000 train_time:402970ms step_avg:41.97ms +[2025-09-11 14:14:37] [Rank 0] step:9621/10000 train_time:403716ms step_avg:41.96ms +[2025-09-11 14:14:37] [Rank 0] step:9621/10000 train_time:403716ms step_avg:41.96ms +[2025-09-11 14:14:38] [Rank 0] step:9641/10000 train_time:404437ms step_avg:41.95ms +[2025-09-11 14:14:38] [Rank 0] step:9641/10000 train_time:404437ms step_avg:41.95ms +[2025-09-11 14:14:38] [Rank 0] step:9661/10000 train_time:405165ms step_avg:41.94ms +[2025-09-11 14:14:38] [Rank 0] step:9661/10000 train_time:405165ms step_avg:41.94ms +[2025-09-11 14:14:39] [Rank 0] step:9681/10000 train_time:405886ms step_avg:41.93ms +[2025-09-11 14:14:39] [Rank 0] step:9681/10000 train_time:405886ms step_avg:41.93ms +[2025-09-11 14:14:40] [Rank 0] step:9701/10000 train_time:406608ms step_avg:41.91ms +[2025-09-11 14:14:40] [Rank 0] step:9701/10000 train_time:406608ms step_avg:41.91ms +[2025-09-11 14:14:41] [Rank 0] step:9721/10000 train_time:407333ms step_avg:41.90ms +[2025-09-11 14:14:41] [Rank 0] step:9721/10000 train_time:407333ms step_avg:41.90ms +[2025-09-11 14:14:41] [Rank 0] step:9741/10000 train_time:408056ms step_avg:41.89ms +[2025-09-11 14:14:41] [Rank 0] step:9741/10000 train_time:408056ms step_avg:41.89ms +[2025-09-11 14:14:42] [Rank 0] step:9761/10000 train_time:408778ms step_avg:41.88ms +[2025-09-11 14:14:42] [Rank 0] step:9761/10000 train_time:408778ms step_avg:41.88ms +[2025-09-11 14:14:43] [Rank 0] step:9781/10000 train_time:409499ms step_avg:41.87ms +[2025-09-11 14:14:43] [Rank 0] step:9781/10000 train_time:409499ms step_avg:41.87ms +[2025-09-11 14:14:44] [Rank 0] step:9801/10000 train_time:410226ms step_avg:41.86ms +[2025-09-11 14:14:44] [Rank 0] step:9801/10000 train_time:410226ms step_avg:41.86ms +[2025-09-11 14:14:44] [Rank 0] step:9821/10000 train_time:410948ms step_avg:41.84ms +[2025-09-11 14:14:44] [Rank 0] step:9821/10000 train_time:410948ms step_avg:41.84ms +[2025-09-11 14:14:45] [Rank 0] step:9841/10000 train_time:411674ms step_avg:41.83ms +[2025-09-11 14:14:45] [Rank 0] step:9841/10000 train_time:411674ms step_avg:41.83ms +[2025-09-11 14:14:46] [Rank 0] step:9861/10000 train_time:412395ms step_avg:41.82ms +[2025-09-11 14:14:46] [Rank 0] step:9861/10000 train_time:412395ms step_avg:41.82ms +[2025-09-11 14:14:46] [Rank 0] step:9881/10000 train_time:413116ms step_avg:41.81ms +[2025-09-11 14:14:46] [Rank 0] step:9881/10000 train_time:413116ms step_avg:41.81ms +[2025-09-11 14:14:47] [Rank 0] step:9901/10000 train_time:413834ms step_avg:41.80ms +[2025-09-11 14:14:47] [Rank 0] step:9901/10000 train_time:413834ms step_avg:41.80ms +[2025-09-11 14:14:48] [Rank 0] step:9921/10000 train_time:414555ms step_avg:41.79ms +[2025-09-11 14:14:48] [Rank 0] step:9921/10000 train_time:414555ms step_avg:41.79ms +[2025-09-11 14:14:49] [Rank 0] step:9941/10000 train_time:415282ms step_avg:41.77ms +[2025-09-11 14:14:49] [Rank 0] step:9941/10000 train_time:415282ms step_avg:41.77ms +[2025-09-11 14:14:49] [Rank 0] step:9961/10000 train_time:416008ms step_avg:41.76ms +[2025-09-11 14:14:49] [Rank 0] step:9961/10000 train_time:416008ms step_avg:41.76ms +[2025-09-11 14:14:50] [Rank 0] step:9981/10000 train_time:416732ms step_avg:41.75ms +[2025-09-11 14:14:50] [Rank 0] step:9981/10000 train_time:416732ms step_avg:41.75ms +[2025-09-11 14:14:51] [Rank 0] step:10000/10000 train_time:417427ms step_avg:41.74ms +[2025-09-11 14:14:51] [Rank 0] step:10000/10000 train_time:417427ms step_avg:41.74ms +[2025-09-11 14:14:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:14:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:15:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.6143 total_sharp:2.7267e-05 L1_sharp:3.1498e-02 L2_sharp:5.8591e-02 L3_sharp:1.0781e-01 L4_sharp:1.3158e-01 L5_sharp:1.8273e-01 L6_sharp:2.1838e-01 L7_sharp:2.5967e-01 L8_sharp:2.4910e-01 L9_sharp:3.0342e-01 L10_sharp:3.5207e-01 L11_sharp:3.5860e-01 L12_sharp:6.8227e-01 total_fnorm:3.3125e+00 total_l1_linf:2.2080e+03 total_spectral:1.6562e+00 L1_fnorm:1.3809e-03 L2_fnorm:1.4114e-03 L3_fnorm:1.4496e-03 L4_fnorm:1.4572e-03 L5_fnorm:1.4496e-03 L6_fnorm:1.4725e-03 L7_fnorm:1.4572e-03 L8_fnorm:1.3885e-03 L9_fnorm:1.4420e-03 L10_fnorm:1.4267e-03 L11_fnorm:1.4191e-03 L12_fnorm:1.3885e-03 L1_l1linf:1.2589e-04 L2_l1linf:1.4019e-04 L3_l1linf:1.5354e-04 L4_l1linf:1.5736e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.6689e-04 L7_l1linf:1.6212e-04 L8_l1linf:1.6880e-04 L9_l1linf:1.6785e-04 L10_l1linf:1.7071e-04 L11_l1linf:1.8311e-04 L12_l1linf:1.7929e-04 L1_spectral:3.0662e-05 L2_spectral:3.0295e-05 L3_spectral:3.0492e-05 L4_spectral:3.0449e-05 L5_spectral:3.0443e-05 L6_spectral:3.0740e-05 L7_spectral:3.0548e-05 L8_spectral:2.8125e-05 L9_spectral:3.0541e-05 L10_spectral:2.9568e-05 L11_spectral:2.9422e-05 L12_spectral:2.8578e-05 train_time:417448ms step_avg:41.74ms +[2025-09-11 14:15:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.6143 total_sharp:2.7267e-05 L1_sharp:3.1498e-02 L2_sharp:5.8591e-02 L3_sharp:1.0781e-01 L4_sharp:1.3158e-01 L5_sharp:1.8273e-01 L6_sharp:2.1838e-01 L7_sharp:2.5967e-01 L8_sharp:2.4910e-01 L9_sharp:3.0342e-01 L10_sharp:3.5207e-01 L11_sharp:3.5860e-01 L12_sharp:6.8227e-01 total_fnorm:3.3125e+00 total_l1_linf:2.2080e+03 total_spectral:1.6562e+00 L1_fnorm:1.3809e-03 L2_fnorm:1.4114e-03 L3_fnorm:1.4496e-03 L4_fnorm:1.4572e-03 L5_fnorm:1.4496e-03 L6_fnorm:1.4725e-03 L7_fnorm:1.4572e-03 L8_fnorm:1.3885e-03 L9_fnorm:1.4420e-03 L10_fnorm:1.4267e-03 L11_fnorm:1.4191e-03 L12_fnorm:1.3885e-03 L1_l1linf:1.2589e-04 L2_l1linf:1.4019e-04 L3_l1linf:1.5354e-04 L4_l1linf:1.5736e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.6689e-04 L7_l1linf:1.6212e-04 L8_l1linf:1.6880e-04 L9_l1linf:1.6785e-04 L10_l1linf:1.7071e-04 L11_l1linf:1.8311e-04 L12_l1linf:1.7929e-04 L1_spectral:3.0662e-05 L2_spectral:3.0295e-05 L3_spectral:3.0492e-05 L4_spectral:3.0449e-05 L5_spectral:3.0443e-05 L6_spectral:3.0740e-05 L7_spectral:3.0548e-05 L8_spectral:2.8125e-05 L9_spectral:3.0541e-05 L10_spectral:2.9568e-05 L11_spectral:2.9422e-05 L12_spectral:2.8578e-05 train_time:417448ms step_avg:41.74ms +[2025-09-11 14:15:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:15:01 2025 --- +[2025-09-11 14:15:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:15:01 2025 --- +[2025-09-11 14:15:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 14:15:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..859ba5ecaa13ff9b6cced14e426030d162ea5079 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "4f2374fd-085e-4344-ad06-3cc6eadbcb03", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/training_log_4f2374fd-085e-4344-ad06-3cc6eadbcb03.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/training_log_4f2374fd-085e-4344-ad06-3cc6eadbcb03.txt new file mode 100644 index 0000000000000000000000000000000000000000..761a1596dab69707a28ce3ca372b8b12d4a2f3fd --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42/training_log_4f2374fd-085e-4344-ad06-3cc6eadbcb03.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:19:35] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:19:35 2025 --- +[2025-09-11 10:19:35] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:19:35 2025 --- +[2025-09-11 10:19:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:19:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:19:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:19:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:19:35] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:19:35] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:19:35] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42 +[2025-09-11 10:19:35] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_42 +[2025-09-11 10:19:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:19:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:19:35] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:19:35] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:19:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:19:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:19:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:19:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:19:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:19:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:19:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:19:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:19:36] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:19:36] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:19:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:19:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:19:38] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:19:38] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:19:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:19:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:19:45] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:19:45] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:19:45] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:19:45] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:20:24] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:20:24] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:20:24] [Rank 0] PRINT: Starting training... +[2025-09-11 10:20:24] [Rank 0] PRINT: Starting training... +[2025-09-11 10:20:25] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.20ms +[2025-09-11 10:20:25] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.20ms +[2025-09-11 10:20:26] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.57ms +[2025-09-11 10:20:26] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.57ms +[2025-09-11 10:20:26] [Rank 0] step:61/10000 train_time:2598ms step_avg:42.59ms +[2025-09-11 10:20:26] [Rank 0] step:61/10000 train_time:2598ms step_avg:42.59ms +[2025-09-11 10:20:27] [Rank 0] step:81/10000 train_time:3593ms step_avg:44.35ms +[2025-09-11 10:20:27] [Rank 0] step:81/10000 train_time:3593ms step_avg:44.35ms +[2025-09-11 10:20:28] [Rank 0] step:101/10000 train_time:4322ms step_avg:42.79ms +[2025-09-11 10:20:28] [Rank 0] step:101/10000 train_time:4322ms step_avg:42.79ms +[2025-09-11 10:20:29] [Rank 0] step:121/10000 train_time:5051ms step_avg:41.74ms +[2025-09-11 10:20:29] [Rank 0] step:121/10000 train_time:5051ms step_avg:41.74ms +[2025-09-11 10:20:29] [Rank 0] step:141/10000 train_time:5780ms step_avg:40.99ms +[2025-09-11 10:20:29] [Rank 0] step:141/10000 train_time:5780ms step_avg:40.99ms +[2025-09-11 10:20:30] [Rank 0] step:161/10000 train_time:6508ms step_avg:40.42ms +[2025-09-11 10:20:30] [Rank 0] step:161/10000 train_time:6508ms step_avg:40.42ms +[2025-09-11 10:20:31] [Rank 0] step:181/10000 train_time:7236ms step_avg:39.98ms +[2025-09-11 10:20:31] [Rank 0] step:181/10000 train_time:7236ms step_avg:39.98ms +[2025-09-11 10:20:32] [Rank 0] step:201/10000 train_time:7964ms step_avg:39.62ms +[2025-09-11 10:20:32] [Rank 0] step:201/10000 train_time:7964ms step_avg:39.62ms +[2025-09-11 10:20:32] [Rank 0] step:221/10000 train_time:8694ms step_avg:39.34ms +[2025-09-11 10:20:32] [Rank 0] step:221/10000 train_time:8694ms step_avg:39.34ms +[2025-09-11 10:20:33] [Rank 0] step:241/10000 train_time:9423ms step_avg:39.10ms +[2025-09-11 10:20:33] [Rank 0] step:241/10000 train_time:9423ms step_avg:39.10ms +[2025-09-11 10:20:34] [Rank 0] step:261/10000 train_time:10151ms step_avg:38.89ms +[2025-09-11 10:20:34] [Rank 0] step:261/10000 train_time:10151ms step_avg:38.89ms +[2025-09-11 10:20:35] [Rank 0] step:281/10000 train_time:10881ms step_avg:38.72ms +[2025-09-11 10:20:35] [Rank 0] step:281/10000 train_time:10881ms step_avg:38.72ms +[2025-09-11 10:20:35] [Rank 0] step:301/10000 train_time:11609ms step_avg:38.57ms +[2025-09-11 10:20:35] [Rank 0] step:301/10000 train_time:11609ms step_avg:38.57ms +[2025-09-11 10:20:36] [Rank 0] step:321/10000 train_time:12338ms step_avg:38.43ms +[2025-09-11 10:20:36] [Rank 0] step:321/10000 train_time:12338ms step_avg:38.43ms +[2025-09-11 10:20:37] [Rank 0] step:341/10000 train_time:13066ms step_avg:38.32ms +[2025-09-11 10:20:37] [Rank 0] step:341/10000 train_time:13066ms step_avg:38.32ms +[2025-09-11 10:20:37] [Rank 0] step:361/10000 train_time:13795ms step_avg:38.21ms +[2025-09-11 10:20:37] [Rank 0] step:361/10000 train_time:13795ms step_avg:38.21ms +[2025-09-11 10:20:38] [Rank 0] step:381/10000 train_time:14523ms step_avg:38.12ms +[2025-09-11 10:20:38] [Rank 0] step:381/10000 train_time:14523ms step_avg:38.12ms +[2025-09-11 10:20:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:20:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:21:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:21:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:21:28] [Rank 0] PRINT: step:400/10000 val_loss:6.1371 total_sharp:4.3401e-04 L1_sharp:1.0999e-01 L2_sharp:1.1052e-01 L3_sharp:1.0023e-01 L4_sharp:1.2498e-01 L5_sharp:1.3821e-01 L6_sharp:1.3243e-01 L7_sharp:1.6769e-01 L8_sharp:1.4904e-01 L9_sharp:1.7169e-01 L10_sharp:2.8528e-01 L11_sharp:3.5878e-01 L12_sharp:4.6132e-01 total_fnorm:7.6284e+01 total_l1_linf:1.8657e+05 total_spectral:3.8132e+01 L1_fnorm:1.1938e-01 L2_fnorm:1.1923e-01 L3_fnorm:1.1937e-01 L4_fnorm:1.1913e-01 L5_fnorm:1.1903e-01 L6_fnorm:1.1918e-01 L7_fnorm:1.2014e-01 L8_fnorm:1.1934e-01 L9_fnorm:1.1921e-01 L10_fnorm:1.1877e-01 L11_fnorm:1.1875e-01 L12_fnorm:1.1738e-01 L1_l1linf:4.4968e-02 L2_l1linf:4.4925e-02 L3_l1linf:4.4651e-02 L4_l1linf:4.5062e-02 L5_l1linf:4.4231e-02 L6_l1linf:4.4462e-02 L7_l1linf:4.4314e-02 L8_l1linf:4.4332e-02 L9_l1linf:4.4482e-02 L10_l1linf:4.4397e-02 L11_l1linf:4.4487e-02 L12_l1linf:4.4039e-02 L1_spectral:1.2049e-03 L2_spectral:1.2047e-03 L3_spectral:1.2049e-03 L4_spectral:1.2051e-03 L5_spectral:1.2054e-03 L6_spectral:1.2061e-03 L7_spectral:1.2053e-03 L8_spectral:1.2054e-03 L9_spectral:1.2059e-03 L10_spectral:1.2048e-03 L11_spectral:1.2053e-03 L12_spectral:1.2051e-03 train_time:15232ms step_avg:38.08ms +[2025-09-11 10:21:28] [Rank 0] PRINT: step:400/10000 val_loss:6.1371 total_sharp:4.3401e-04 L1_sharp:1.0999e-01 L2_sharp:1.1052e-01 L3_sharp:1.0023e-01 L4_sharp:1.2498e-01 L5_sharp:1.3821e-01 L6_sharp:1.3243e-01 L7_sharp:1.6769e-01 L8_sharp:1.4904e-01 L9_sharp:1.7169e-01 L10_sharp:2.8528e-01 L11_sharp:3.5878e-01 L12_sharp:4.6132e-01 total_fnorm:7.6284e+01 total_l1_linf:1.8657e+05 total_spectral:3.8132e+01 L1_fnorm:1.1938e-01 L2_fnorm:1.1923e-01 L3_fnorm:1.1937e-01 L4_fnorm:1.1913e-01 L5_fnorm:1.1903e-01 L6_fnorm:1.1918e-01 L7_fnorm:1.2014e-01 L8_fnorm:1.1934e-01 L9_fnorm:1.1921e-01 L10_fnorm:1.1877e-01 L11_fnorm:1.1875e-01 L12_fnorm:1.1738e-01 L1_l1linf:4.4968e-02 L2_l1linf:4.4925e-02 L3_l1linf:4.4651e-02 L4_l1linf:4.5062e-02 L5_l1linf:4.4231e-02 L6_l1linf:4.4462e-02 L7_l1linf:4.4314e-02 L8_l1linf:4.4332e-02 L9_l1linf:4.4482e-02 L10_l1linf:4.4397e-02 L11_l1linf:4.4487e-02 L12_l1linf:4.4039e-02 L1_spectral:1.2049e-03 L2_spectral:1.2047e-03 L3_spectral:1.2049e-03 L4_spectral:1.2051e-03 L5_spectral:1.2054e-03 L6_spectral:1.2061e-03 L7_spectral:1.2053e-03 L8_spectral:1.2054e-03 L9_spectral:1.2059e-03 L10_spectral:1.2048e-03 L11_spectral:1.2053e-03 L12_spectral:1.2051e-03 train_time:15232ms step_avg:38.08ms +[2025-09-11 10:22:03] [Rank 0] step:401/10000 train_time:49718ms step_avg:123.99ms +[2025-09-11 10:22:03] [Rank 0] step:401/10000 train_time:49718ms step_avg:123.99ms +[2025-09-11 10:22:05] [Rank 0] step:421/10000 train_time:52007ms step_avg:123.53ms +[2025-09-11 10:22:05] [Rank 0] step:421/10000 train_time:52007ms step_avg:123.53ms +[2025-09-11 10:22:06] [Rank 0] step:441/10000 train_time:52648ms step_avg:119.38ms +[2025-09-11 10:22:06] [Rank 0] step:441/10000 train_time:52648ms step_avg:119.38ms +[2025-09-11 10:22:06] [Rank 0] step:461/10000 train_time:53288ms step_avg:115.59ms +[2025-09-11 10:22:06] [Rank 0] step:461/10000 train_time:53288ms step_avg:115.59ms +[2025-09-11 10:22:07] [Rank 0] step:481/10000 train_time:53928ms step_avg:112.12ms +[2025-09-11 10:22:07] [Rank 0] step:481/10000 train_time:53928ms step_avg:112.12ms +[2025-09-11 10:22:08] [Rank 0] step:501/10000 train_time:54568ms step_avg:108.92ms +[2025-09-11 10:22:08] [Rank 0] step:501/10000 train_time:54568ms step_avg:108.92ms +[2025-09-11 10:22:08] [Rank 0] step:521/10000 train_time:55209ms step_avg:105.97ms +[2025-09-11 10:22:08] [Rank 0] step:521/10000 train_time:55209ms step_avg:105.97ms +[2025-09-11 10:22:09] [Rank 0] step:541/10000 train_time:55849ms step_avg:103.23ms +[2025-09-11 10:22:09] [Rank 0] step:541/10000 train_time:55849ms step_avg:103.23ms +[2025-09-11 10:22:10] [Rank 0] step:561/10000 train_time:56489ms step_avg:100.69ms +[2025-09-11 10:22:10] [Rank 0] step:561/10000 train_time:56489ms step_avg:100.69ms +[2025-09-11 10:22:10] [Rank 0] step:581/10000 train_time:57129ms step_avg:98.33ms +[2025-09-11 10:22:10] [Rank 0] step:581/10000 train_time:57129ms step_avg:98.33ms +[2025-09-11 10:22:11] [Rank 0] step:601/10000 train_time:57771ms step_avg:96.12ms +[2025-09-11 10:22:11] [Rank 0] step:601/10000 train_time:57771ms step_avg:96.12ms +[2025-09-11 10:22:11] [Rank 0] step:621/10000 train_time:58411ms step_avg:94.06ms +[2025-09-11 10:22:11] [Rank 0] step:621/10000 train_time:58411ms step_avg:94.06ms +[2025-09-11 10:22:12] [Rank 0] step:641/10000 train_time:59051ms step_avg:92.12ms +[2025-09-11 10:22:12] [Rank 0] step:641/10000 train_time:59051ms step_avg:92.12ms +[2025-09-11 10:22:13] [Rank 0] step:661/10000 train_time:59691ms step_avg:90.30ms +[2025-09-11 10:22:13] [Rank 0] step:661/10000 train_time:59691ms step_avg:90.30ms +[2025-09-11 10:22:13] [Rank 0] step:681/10000 train_time:60331ms step_avg:88.59ms +[2025-09-11 10:22:13] [Rank 0] step:681/10000 train_time:60331ms step_avg:88.59ms +[2025-09-11 10:22:14] [Rank 0] step:701/10000 train_time:60971ms step_avg:86.98ms +[2025-09-11 10:22:14] [Rank 0] step:701/10000 train_time:60971ms step_avg:86.98ms +[2025-09-11 10:22:15] [Rank 0] step:721/10000 train_time:61612ms step_avg:85.45ms +[2025-09-11 10:22:15] [Rank 0] step:721/10000 train_time:61612ms step_avg:85.45ms +[2025-09-11 10:22:15] [Rank 0] step:741/10000 train_time:62252ms step_avg:84.01ms +[2025-09-11 10:22:15] [Rank 0] step:741/10000 train_time:62252ms step_avg:84.01ms +[2025-09-11 10:22:16] [Rank 0] step:761/10000 train_time:62898ms step_avg:82.65ms +[2025-09-11 10:22:16] [Rank 0] step:761/10000 train_time:62898ms step_avg:82.65ms +[2025-09-11 10:22:17] [Rank 0] step:781/10000 train_time:63543ms step_avg:81.36ms +[2025-09-11 10:22:17] [Rank 0] step:781/10000 train_time:63543ms step_avg:81.36ms +[2025-09-11 10:22:17] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:22:17] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:22:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:22:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:23:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:03] [Rank 0] PRINT: step:800/10000 val_loss:5.6690 total_sharp:3.6090e-04 L1_sharp:1.8120e-01 L2_sharp:1.6307e-01 L3_sharp:1.5699e-01 L4_sharp:1.7142e-01 L5_sharp:1.9105e-01 L6_sharp:2.1010e-01 L7_sharp:3.1023e-01 L8_sharp:4.4310e-01 L9_sharp:6.4277e-01 L10_sharp:6.4901e-01 L11_sharp:6.1281e-01 L12_sharp:8.7353e-01 total_fnorm:7.6000e+01 total_l1_linf:1.5974e+05 total_spectral:3.8000e+01 L1_fnorm:1.1084e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1328e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1279e-01 L10_fnorm:1.1182e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0205e-01 L1_l1linf:4.2725e-02 L2_l1linf:4.3457e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2725e-02 L6_l1linf:4.2725e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.2480e-02 L9_l1linf:4.2480e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.1748e-02 L12_l1linf:3.9795e-02 L1_spectral:1.5871e-03 L2_spectral:1.5957e-03 L3_spectral:1.5950e-03 L4_spectral:1.5653e-03 L5_spectral:1.5890e-03 L6_spectral:1.5848e-03 L7_spectral:1.6000e-03 L8_spectral:1.5888e-03 L9_spectral:1.5738e-03 L10_spectral:1.5824e-03 L11_spectral:1.5745e-03 L12_spectral:1.5512e-03 train_time:64171ms step_avg:80.21ms +[2025-09-11 10:23:03] [Rank 0] PRINT: step:800/10000 val_loss:5.6690 total_sharp:3.6090e-04 L1_sharp:1.8120e-01 L2_sharp:1.6307e-01 L3_sharp:1.5699e-01 L4_sharp:1.7142e-01 L5_sharp:1.9105e-01 L6_sharp:2.1010e-01 L7_sharp:3.1023e-01 L8_sharp:4.4310e-01 L9_sharp:6.4277e-01 L10_sharp:6.4901e-01 L11_sharp:6.1281e-01 L12_sharp:8.7353e-01 total_fnorm:7.6000e+01 total_l1_linf:1.5974e+05 total_spectral:3.8000e+01 L1_fnorm:1.1084e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1328e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1279e-01 L10_fnorm:1.1182e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0205e-01 L1_l1linf:4.2725e-02 L2_l1linf:4.3457e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2725e-02 L6_l1linf:4.2725e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.2480e-02 L9_l1linf:4.2480e-02 L10_l1linf:4.1748e-02 L11_l1linf:4.1748e-02 L12_l1linf:3.9795e-02 L1_spectral:1.5871e-03 L2_spectral:1.5957e-03 L3_spectral:1.5950e-03 L4_spectral:1.5653e-03 L5_spectral:1.5890e-03 L6_spectral:1.5848e-03 L7_spectral:1.6000e-03 L8_spectral:1.5888e-03 L9_spectral:1.5738e-03 L10_spectral:1.5824e-03 L11_spectral:1.5745e-03 L12_spectral:1.5512e-03 train_time:64171ms step_avg:80.21ms +[2025-09-11 10:23:05] [Rank 0] step:801/10000 train_time:65737ms step_avg:82.07ms +[2025-09-11 10:23:05] [Rank 0] step:801/10000 train_time:65737ms step_avg:82.07ms +[2025-09-11 10:23:05] [Rank 0] step:821/10000 train_time:66404ms step_avg:80.88ms +[2025-09-11 10:23:05] [Rank 0] step:821/10000 train_time:66404ms step_avg:80.88ms +[2025-09-11 10:23:06] [Rank 0] step:841/10000 train_time:67051ms step_avg:79.73ms +[2025-09-11 10:23:06] [Rank 0] step:841/10000 train_time:67051ms step_avg:79.73ms +[2025-09-11 10:23:07] [Rank 0] step:861/10000 train_time:67698ms step_avg:78.63ms +[2025-09-11 10:23:07] [Rank 0] step:861/10000 train_time:67698ms step_avg:78.63ms +[2025-09-11 10:23:07] [Rank 0] step:881/10000 train_time:68344ms step_avg:77.58ms +[2025-09-11 10:23:07] [Rank 0] step:881/10000 train_time:68344ms step_avg:77.58ms +[2025-09-11 10:23:08] [Rank 0] step:901/10000 train_time:68990ms step_avg:76.57ms +[2025-09-11 10:23:08] [Rank 0] step:901/10000 train_time:68990ms step_avg:76.57ms +[2025-09-11 10:23:09] [Rank 0] step:921/10000 train_time:69636ms step_avg:75.61ms +[2025-09-11 10:23:09] [Rank 0] step:921/10000 train_time:69636ms step_avg:75.61ms +[2025-09-11 10:23:09] [Rank 0] step:941/10000 train_time:70282ms step_avg:74.69ms +[2025-09-11 10:23:09] [Rank 0] step:941/10000 train_time:70282ms step_avg:74.69ms +[2025-09-11 10:23:10] [Rank 0] step:961/10000 train_time:70928ms step_avg:73.81ms +[2025-09-11 10:23:10] [Rank 0] step:961/10000 train_time:70928ms step_avg:73.81ms +[2025-09-11 10:23:11] [Rank 0] step:981/10000 train_time:71574ms step_avg:72.96ms +[2025-09-11 10:23:11] [Rank 0] step:981/10000 train_time:71574ms step_avg:72.96ms +[2025-09-11 10:23:11] [Rank 0] step:1001/10000 train_time:72220ms step_avg:72.15ms +[2025-09-11 10:23:11] [Rank 0] step:1001/10000 train_time:72220ms step_avg:72.15ms +[2025-09-11 10:23:12] [Rank 0] step:1021/10000 train_time:72866ms step_avg:71.37ms +[2025-09-11 10:23:12] [Rank 0] step:1021/10000 train_time:72866ms step_avg:71.37ms +[2025-09-11 10:23:13] [Rank 0] step:1041/10000 train_time:73512ms step_avg:70.62ms +[2025-09-11 10:23:13] [Rank 0] step:1041/10000 train_time:73512ms step_avg:70.62ms +[2025-09-11 10:23:13] [Rank 0] step:1061/10000 train_time:74158ms step_avg:69.89ms +[2025-09-11 10:23:13] [Rank 0] step:1061/10000 train_time:74158ms step_avg:69.89ms +[2025-09-11 10:23:14] [Rank 0] step:1081/10000 train_time:74804ms step_avg:69.20ms +[2025-09-11 10:23:14] [Rank 0] step:1081/10000 train_time:74804ms step_avg:69.20ms +[2025-09-11 10:23:14] [Rank 0] step:1101/10000 train_time:75451ms step_avg:68.53ms +[2025-09-11 10:23:14] [Rank 0] step:1101/10000 train_time:75451ms step_avg:68.53ms +[2025-09-11 10:23:15] [Rank 0] step:1121/10000 train_time:76097ms step_avg:67.88ms +[2025-09-11 10:23:15] [Rank 0] step:1121/10000 train_time:76097ms step_avg:67.88ms +[2025-09-11 10:23:16] [Rank 0] step:1141/10000 train_time:76743ms step_avg:67.26ms +[2025-09-11 10:23:16] [Rank 0] step:1141/10000 train_time:76743ms step_avg:67.26ms +[2025-09-11 10:23:16] [Rank 0] step:1161/10000 train_time:77389ms step_avg:66.66ms +[2025-09-11 10:23:16] [Rank 0] step:1161/10000 train_time:77389ms step_avg:66.66ms +[2025-09-11 10:23:17] [Rank 0] step:1181/10000 train_time:78035ms step_avg:66.07ms +[2025-09-11 10:23:17] [Rank 0] step:1181/10000 train_time:78035ms step_avg:66.07ms +[2025-09-11 10:23:18] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:23:18] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:23:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:28] [Rank 0] PRINT: step:1200/10000 val_loss:5.3938 total_sharp:2.7926e-04 L1_sharp:1.4002e-01 L2_sharp:1.2158e-01 L3_sharp:1.2978e-01 L4_sharp:1.5340e-01 L5_sharp:1.6376e-01 L6_sharp:1.5782e-01 L7_sharp:1.9567e-01 L8_sharp:1.7327e-01 L9_sharp:1.8129e-01 L10_sharp:3.6296e-01 L11_sharp:6.4386e-01 L12_sharp:1.5388e+00 total_fnorm:7.8000e+01 total_l1_linf:1.5770e+05 total_spectral:3.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1475e-01 L1_l1linf:4.0527e-02 L2_l1linf:4.0771e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:3.9551e-02 L7_l1linf:4.0039e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0283e-02 L10_l1linf:4.1016e-02 L11_l1linf:4.2236e-02 L12_l1linf:4.1748e-02 L1_spectral:1.6102e-03 L2_spectral:1.5983e-03 L3_spectral:1.6128e-03 L4_spectral:1.6066e-03 L5_spectral:1.6057e-03 L6_spectral:1.6002e-03 L7_spectral:1.6001e-03 L8_spectral:1.5979e-03 L9_spectral:1.6091e-03 L10_spectral:1.5946e-03 L11_spectral:1.5938e-03 L12_spectral:1.5830e-03 train_time:78663ms step_avg:65.55ms +[2025-09-11 10:23:28] [Rank 0] PRINT: step:1200/10000 val_loss:5.3938 total_sharp:2.7926e-04 L1_sharp:1.4002e-01 L2_sharp:1.2158e-01 L3_sharp:1.2978e-01 L4_sharp:1.5340e-01 L5_sharp:1.6376e-01 L6_sharp:1.5782e-01 L7_sharp:1.9567e-01 L8_sharp:1.7327e-01 L9_sharp:1.8129e-01 L10_sharp:3.6296e-01 L11_sharp:6.4386e-01 L12_sharp:1.5388e+00 total_fnorm:7.8000e+01 total_l1_linf:1.5770e+05 total_spectral:3.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1475e-01 L1_l1linf:4.0527e-02 L2_l1linf:4.0771e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:3.9551e-02 L7_l1linf:4.0039e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0283e-02 L10_l1linf:4.1016e-02 L11_l1linf:4.2236e-02 L12_l1linf:4.1748e-02 L1_spectral:1.6102e-03 L2_spectral:1.5983e-03 L3_spectral:1.6128e-03 L4_spectral:1.6066e-03 L5_spectral:1.6057e-03 L6_spectral:1.6002e-03 L7_spectral:1.6001e-03 L8_spectral:1.5979e-03 L9_spectral:1.6091e-03 L10_spectral:1.5946e-03 L11_spectral:1.5938e-03 L12_spectral:1.5830e-03 train_time:78663ms step_avg:65.55ms +[2025-09-11 10:23:29] [Rank 0] step:1201/10000 train_time:80229ms step_avg:66.80ms +[2025-09-11 10:23:29] [Rank 0] step:1201/10000 train_time:80229ms step_avg:66.80ms +[2025-09-11 10:23:30] [Rank 0] step:1221/10000 train_time:80896ms step_avg:66.25ms +[2025-09-11 10:23:30] [Rank 0] step:1221/10000 train_time:80896ms step_avg:66.25ms +[2025-09-11 10:23:31] [Rank 0] step:1241/10000 train_time:81541ms step_avg:65.71ms +[2025-09-11 10:23:31] [Rank 0] step:1241/10000 train_time:81541ms step_avg:65.71ms +[2025-09-11 10:23:31] [Rank 0] step:1261/10000 train_time:82187ms step_avg:65.18ms +[2025-09-11 10:23:31] [Rank 0] step:1261/10000 train_time:82187ms step_avg:65.18ms +[2025-09-11 10:23:32] [Rank 0] step:1281/10000 train_time:82833ms step_avg:64.66ms +[2025-09-11 10:23:32] [Rank 0] step:1281/10000 train_time:82833ms step_avg:64.66ms +[2025-09-11 10:23:33] [Rank 0] step:1301/10000 train_time:83479ms step_avg:64.17ms +[2025-09-11 10:23:33] [Rank 0] step:1301/10000 train_time:83479ms step_avg:64.17ms +[2025-09-11 10:23:33] [Rank 0] step:1321/10000 train_time:84125ms step_avg:63.68ms +[2025-09-11 10:23:33] [Rank 0] step:1321/10000 train_time:84125ms step_avg:63.68ms +[2025-09-11 10:23:34] [Rank 0] step:1341/10000 train_time:85050ms step_avg:63.42ms +[2025-09-11 10:23:34] [Rank 0] step:1341/10000 train_time:85050ms step_avg:63.42ms +[2025-09-11 10:23:35] [Rank 0] step:1361/10000 train_time:85695ms step_avg:62.97ms +[2025-09-11 10:23:35] [Rank 0] step:1361/10000 train_time:85695ms step_avg:62.97ms +[2025-09-11 10:23:36] [Rank 0] step:1381/10000 train_time:86341ms step_avg:62.52ms +[2025-09-11 10:23:36] [Rank 0] step:1381/10000 train_time:86341ms step_avg:62.52ms +[2025-09-11 10:23:37] [Rank 0] step:1401/10000 train_time:87300ms step_avg:62.31ms +[2025-09-11 10:23:37] [Rank 0] step:1401/10000 train_time:87300ms step_avg:62.31ms +[2025-09-11 10:23:37] [Rank 0] step:1421/10000 train_time:87944ms step_avg:61.89ms +[2025-09-11 10:23:37] [Rank 0] step:1421/10000 train_time:87944ms step_avg:61.89ms +[2025-09-11 10:23:38] [Rank 0] step:1441/10000 train_time:88589ms step_avg:61.48ms +[2025-09-11 10:23:38] [Rank 0] step:1441/10000 train_time:88589ms step_avg:61.48ms +[2025-09-11 10:23:39] [Rank 0] step:1461/10000 train_time:89234ms step_avg:61.08ms +[2025-09-11 10:23:39] [Rank 0] step:1461/10000 train_time:89234ms step_avg:61.08ms +[2025-09-11 10:23:39] [Rank 0] step:1481/10000 train_time:89879ms step_avg:60.69ms +[2025-09-11 10:23:39] [Rank 0] step:1481/10000 train_time:89879ms step_avg:60.69ms +[2025-09-11 10:23:40] [Rank 0] step:1501/10000 train_time:90527ms step_avg:60.31ms +[2025-09-11 10:23:40] [Rank 0] step:1501/10000 train_time:90527ms step_avg:60.31ms +[2025-09-11 10:23:40] [Rank 0] step:1521/10000 train_time:91176ms step_avg:59.94ms +[2025-09-11 10:23:40] [Rank 0] step:1521/10000 train_time:91176ms step_avg:59.94ms +[2025-09-11 10:23:41] [Rank 0] step:1541/10000 train_time:91826ms step_avg:59.59ms +[2025-09-11 10:23:41] [Rank 0] step:1541/10000 train_time:91826ms step_avg:59.59ms +[2025-09-11 10:23:42] [Rank 0] step:1561/10000 train_time:92474ms step_avg:59.24ms +[2025-09-11 10:23:42] [Rank 0] step:1561/10000 train_time:92474ms step_avg:59.24ms +[2025-09-11 10:23:42] [Rank 0] step:1581/10000 train_time:93123ms step_avg:58.90ms +[2025-09-11 10:23:42] [Rank 0] step:1581/10000 train_time:93123ms step_avg:58.90ms +[2025-09-11 10:23:43] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:23:43] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:23:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:23:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:23:53] [Rank 0] PRINT: step:1600/10000 val_loss:5.2452 total_sharp:2.4380e-04 L1_sharp:9.7321e-02 L2_sharp:9.1596e-02 L3_sharp:1.0689e-01 L4_sharp:1.0857e-01 L5_sharp:1.1406e-01 L6_sharp:1.1172e-01 L7_sharp:1.1282e-01 L8_sharp:1.3312e-01 L9_sharp:1.6999e-01 L10_sharp:2.0862e-01 L11_sharp:2.3188e-01 L12_sharp:8.3185e-01 total_fnorm:7.5000e+01 total_l1_linf:1.4336e+05 total_spectral:3.7500e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1768e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.9062e-02 L2_l1linf:3.9062e-02 L3_l1linf:3.8086e-02 L4_l1linf:3.8086e-02 L5_l1linf:3.8086e-02 L6_l1linf:3.7842e-02 L7_l1linf:3.8086e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9307e-02 L12_l1linf:3.9795e-02 L1_spectral:1.6007e-03 L2_spectral:1.6051e-03 L3_spectral:1.6044e-03 L4_spectral:1.6150e-03 L5_spectral:1.5863e-03 L6_spectral:1.6006e-03 L7_spectral:1.6149e-03 L8_spectral:1.6076e-03 L9_spectral:1.6096e-03 L10_spectral:1.6000e-03 L11_spectral:1.6083e-03 L12_spectral:1.5962e-03 train_time:93754ms step_avg:58.60ms +[2025-09-11 10:23:53] [Rank 0] PRINT: step:1600/10000 val_loss:5.2452 total_sharp:2.4380e-04 L1_sharp:9.7321e-02 L2_sharp:9.1596e-02 L3_sharp:1.0689e-01 L4_sharp:1.0857e-01 L5_sharp:1.1406e-01 L6_sharp:1.1172e-01 L7_sharp:1.1282e-01 L8_sharp:1.3312e-01 L9_sharp:1.6999e-01 L10_sharp:2.0862e-01 L11_sharp:2.3188e-01 L12_sharp:8.3185e-01 total_fnorm:7.5000e+01 total_l1_linf:1.4336e+05 total_spectral:3.7500e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1768e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.9062e-02 L2_l1linf:3.9062e-02 L3_l1linf:3.8086e-02 L4_l1linf:3.8086e-02 L5_l1linf:3.8086e-02 L6_l1linf:3.7842e-02 L7_l1linf:3.8086e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9307e-02 L12_l1linf:3.9795e-02 L1_spectral:1.6007e-03 L2_spectral:1.6051e-03 L3_spectral:1.6044e-03 L4_spectral:1.6150e-03 L5_spectral:1.5863e-03 L6_spectral:1.6006e-03 L7_spectral:1.6149e-03 L8_spectral:1.6076e-03 L9_spectral:1.6096e-03 L10_spectral:1.6000e-03 L11_spectral:1.6083e-03 L12_spectral:1.5962e-03 train_time:93754ms step_avg:58.60ms +[2025-09-11 10:23:55] [Rank 0] step:1601/10000 train_time:95334ms step_avg:59.55ms +[2025-09-11 10:23:55] [Rank 0] step:1601/10000 train_time:95334ms step_avg:59.55ms +[2025-09-11 10:23:56] [Rank 0] step:1621/10000 train_time:96007ms step_avg:59.23ms +[2025-09-11 10:23:56] [Rank 0] step:1621/10000 train_time:96007ms step_avg:59.23ms +[2025-09-11 10:23:56] [Rank 0] step:1641/10000 train_time:96659ms step_avg:58.90ms +[2025-09-11 10:23:56] [Rank 0] step:1641/10000 train_time:96659ms step_avg:58.90ms +[2025-09-11 10:23:57] [Rank 0] step:1661/10000 train_time:97309ms step_avg:58.58ms +[2025-09-11 10:23:57] [Rank 0] step:1661/10000 train_time:97309ms step_avg:58.58ms +[2025-09-11 10:23:58] [Rank 0] step:1681/10000 train_time:97961ms step_avg:58.28ms +[2025-09-11 10:23:58] [Rank 0] step:1681/10000 train_time:97961ms step_avg:58.28ms +[2025-09-11 10:23:58] [Rank 0] step:1701/10000 train_time:98610ms step_avg:57.97ms +[2025-09-11 10:23:58] [Rank 0] step:1701/10000 train_time:98610ms step_avg:57.97ms +[2025-09-11 10:23:59] [Rank 0] step:1721/10000 train_time:99260ms step_avg:57.68ms +[2025-09-11 10:23:59] [Rank 0] step:1721/10000 train_time:99260ms step_avg:57.68ms +[2025-09-11 10:24:00] [Rank 0] step:1741/10000 train_time:99910ms step_avg:57.39ms +[2025-09-11 10:24:00] [Rank 0] step:1741/10000 train_time:99910ms step_avg:57.39ms +[2025-09-11 10:24:00] [Rank 0] step:1761/10000 train_time:100561ms step_avg:57.10ms +[2025-09-11 10:24:00] [Rank 0] step:1761/10000 train_time:100561ms step_avg:57.10ms +[2025-09-11 10:24:01] [Rank 0] step:1781/10000 train_time:101210ms step_avg:56.83ms +[2025-09-11 10:24:01] [Rank 0] step:1781/10000 train_time:101210ms step_avg:56.83ms +[2025-09-11 10:24:02] [Rank 0] step:1801/10000 train_time:101860ms step_avg:56.56ms +[2025-09-11 10:24:02] [Rank 0] step:1801/10000 train_time:101860ms step_avg:56.56ms +[2025-09-11 10:24:02] [Rank 0] step:1821/10000 train_time:102511ms step_avg:56.29ms +[2025-09-11 10:24:02] [Rank 0] step:1821/10000 train_time:102511ms step_avg:56.29ms +[2025-09-11 10:24:03] [Rank 0] step:1841/10000 train_time:103161ms step_avg:56.04ms +[2025-09-11 10:24:03] [Rank 0] step:1841/10000 train_time:103161ms step_avg:56.04ms +[2025-09-11 10:24:04] [Rank 0] step:1861/10000 train_time:103811ms step_avg:55.78ms +[2025-09-11 10:24:04] [Rank 0] step:1861/10000 train_time:103811ms step_avg:55.78ms +[2025-09-11 10:24:04] [Rank 0] step:1881/10000 train_time:104461ms step_avg:55.53ms +[2025-09-11 10:24:04] [Rank 0] step:1881/10000 train_time:104461ms step_avg:55.53ms +[2025-09-11 10:24:05] [Rank 0] step:1901/10000 train_time:105111ms step_avg:55.29ms +[2025-09-11 10:24:05] [Rank 0] step:1901/10000 train_time:105111ms step_avg:55.29ms +[2025-09-11 10:24:05] [Rank 0] step:1921/10000 train_time:105761ms step_avg:55.05ms +[2025-09-11 10:24:05] [Rank 0] step:1921/10000 train_time:105761ms step_avg:55.05ms +[2025-09-11 10:24:06] [Rank 0] step:1941/10000 train_time:106411ms step_avg:54.82ms +[2025-09-11 10:24:06] [Rank 0] step:1941/10000 train_time:106411ms step_avg:54.82ms +[2025-09-11 10:24:07] [Rank 0] step:1961/10000 train_time:107062ms step_avg:54.60ms +[2025-09-11 10:24:07] [Rank 0] step:1961/10000 train_time:107062ms step_avg:54.60ms +[2025-09-11 10:24:07] [Rank 0] step:1981/10000 train_time:107711ms step_avg:54.37ms +[2025-09-11 10:24:07] [Rank 0] step:1981/10000 train_time:107711ms step_avg:54.37ms +[2025-09-11 10:24:08] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:24:08] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:24:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:24:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:24:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:19] [Rank 0] PRINT: step:2000/10000 val_loss:5.1149 total_sharp:2.1734e-04 L1_sharp:7.6284e-02 L2_sharp:7.9637e-02 L3_sharp:8.1335e-02 L4_sharp:1.0423e-01 L5_sharp:1.0954e-01 L6_sharp:1.2230e-01 L7_sharp:1.4629e-01 L8_sharp:1.7902e-01 L9_sharp:1.8483e-01 L10_sharp:4.3576e-01 L11_sharp:8.2921e-01 L12_sharp:2.6058e+00 total_fnorm:7.5000e+01 total_l1_linf:1.4950e+05 total_spectral:3.7500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1768e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1719e-01 L1_l1linf:3.7109e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.6865e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.6621e-02 L6_l1linf:3.6377e-02 L7_l1linf:3.6865e-02 L8_l1linf:3.6377e-02 L9_l1linf:3.6377e-02 L10_l1linf:3.7354e-02 L11_l1linf:3.8086e-02 L12_l1linf:3.8574e-02 L1_spectral:1.6029e-03 L2_spectral:1.6021e-03 L3_spectral:1.6032e-03 L4_spectral:1.6075e-03 L5_spectral:1.6082e-03 L6_spectral:1.6026e-03 L7_spectral:1.6032e-03 L8_spectral:1.6091e-03 L9_spectral:1.6089e-03 L10_spectral:1.6132e-03 L11_spectral:1.6081e-03 L12_spectral:1.6021e-03 train_time:108343ms step_avg:54.17ms +[2025-09-11 10:24:19] [Rank 0] PRINT: step:2000/10000 val_loss:5.1149 total_sharp:2.1734e-04 L1_sharp:7.6284e-02 L2_sharp:7.9637e-02 L3_sharp:8.1335e-02 L4_sharp:1.0423e-01 L5_sharp:1.0954e-01 L6_sharp:1.2230e-01 L7_sharp:1.4629e-01 L8_sharp:1.7902e-01 L9_sharp:1.8483e-01 L10_sharp:4.3576e-01 L11_sharp:8.2921e-01 L12_sharp:2.6058e+00 total_fnorm:7.5000e+01 total_l1_linf:1.4950e+05 total_spectral:3.7500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1768e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1719e-01 L1_l1linf:3.7109e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.6865e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.6621e-02 L6_l1linf:3.6377e-02 L7_l1linf:3.6865e-02 L8_l1linf:3.6377e-02 L9_l1linf:3.6377e-02 L10_l1linf:3.7354e-02 L11_l1linf:3.8086e-02 L12_l1linf:3.8574e-02 L1_spectral:1.6029e-03 L2_spectral:1.6021e-03 L3_spectral:1.6032e-03 L4_spectral:1.6075e-03 L5_spectral:1.6082e-03 L6_spectral:1.6026e-03 L7_spectral:1.6032e-03 L8_spectral:1.6091e-03 L9_spectral:1.6089e-03 L10_spectral:1.6132e-03 L11_spectral:1.6081e-03 L12_spectral:1.6021e-03 train_time:108343ms step_avg:54.17ms +[2025-09-11 10:24:20] [Rank 0] step:2001/10000 train_time:109940ms step_avg:54.94ms +[2025-09-11 10:24:20] [Rank 0] step:2001/10000 train_time:109940ms step_avg:54.94ms +[2025-09-11 10:24:21] [Rank 0] step:2021/10000 train_time:110606ms step_avg:54.73ms +[2025-09-11 10:24:21] [Rank 0] step:2021/10000 train_time:110606ms step_avg:54.73ms +[2025-09-11 10:24:22] [Rank 0] step:2041/10000 train_time:111256ms step_avg:54.51ms +[2025-09-11 10:24:22] [Rank 0] step:2041/10000 train_time:111256ms step_avg:54.51ms +[2025-09-11 10:24:22] [Rank 0] step:2061/10000 train_time:111905ms step_avg:54.30ms +[2025-09-11 10:24:22] [Rank 0] step:2061/10000 train_time:111905ms step_avg:54.30ms +[2025-09-11 10:24:23] [Rank 0] step:2081/10000 train_time:112553ms step_avg:54.09ms +[2025-09-11 10:24:23] [Rank 0] step:2081/10000 train_time:112553ms step_avg:54.09ms +[2025-09-11 10:24:24] [Rank 0] step:2101/10000 train_time:113202ms step_avg:53.88ms +[2025-09-11 10:24:24] [Rank 0] step:2101/10000 train_time:113202ms step_avg:53.88ms +[2025-09-11 10:24:24] [Rank 0] step:2121/10000 train_time:113851ms step_avg:53.68ms +[2025-09-11 10:24:24] [Rank 0] step:2121/10000 train_time:113851ms step_avg:53.68ms +[2025-09-11 10:24:25] [Rank 0] step:2141/10000 train_time:114501ms step_avg:53.48ms +[2025-09-11 10:24:25] [Rank 0] step:2141/10000 train_time:114501ms step_avg:53.48ms +[2025-09-11 10:24:26] [Rank 0] step:2161/10000 train_time:115150ms step_avg:53.29ms +[2025-09-11 10:24:26] [Rank 0] step:2161/10000 train_time:115150ms step_avg:53.29ms +[2025-09-11 10:24:26] [Rank 0] step:2181/10000 train_time:115801ms step_avg:53.10ms +[2025-09-11 10:24:26] [Rank 0] step:2181/10000 train_time:115801ms step_avg:53.10ms +[2025-09-11 10:24:27] [Rank 0] step:2201/10000 train_time:116450ms step_avg:52.91ms +[2025-09-11 10:24:27] [Rank 0] step:2201/10000 train_time:116450ms step_avg:52.91ms +[2025-09-11 10:24:28] [Rank 0] step:2221/10000 train_time:117099ms step_avg:52.72ms +[2025-09-11 10:24:28] [Rank 0] step:2221/10000 train_time:117099ms step_avg:52.72ms +[2025-09-11 10:24:28] [Rank 0] step:2241/10000 train_time:117760ms step_avg:52.55ms +[2025-09-11 10:24:28] [Rank 0] step:2241/10000 train_time:117760ms step_avg:52.55ms +[2025-09-11 10:24:29] [Rank 0] step:2261/10000 train_time:118423ms step_avg:52.38ms +[2025-09-11 10:24:29] [Rank 0] step:2261/10000 train_time:118423ms step_avg:52.38ms +[2025-09-11 10:24:30] [Rank 0] step:2281/10000 train_time:119084ms step_avg:52.21ms +[2025-09-11 10:24:30] [Rank 0] step:2281/10000 train_time:119084ms step_avg:52.21ms +[2025-09-11 10:24:30] [Rank 0] step:2301/10000 train_time:119746ms step_avg:52.04ms +[2025-09-11 10:24:30] [Rank 0] step:2301/10000 train_time:119746ms step_avg:52.04ms +[2025-09-11 10:24:31] [Rank 0] step:2321/10000 train_time:120408ms step_avg:51.88ms +[2025-09-11 10:24:31] [Rank 0] step:2321/10000 train_time:120408ms step_avg:51.88ms +[2025-09-11 10:24:32] [Rank 0] step:2341/10000 train_time:121071ms step_avg:51.72ms +[2025-09-11 10:24:32] [Rank 0] step:2341/10000 train_time:121071ms step_avg:51.72ms +[2025-09-11 10:24:32] [Rank 0] step:2361/10000 train_time:121733ms step_avg:51.56ms +[2025-09-11 10:24:32] [Rank 0] step:2361/10000 train_time:121733ms step_avg:51.56ms +[2025-09-11 10:24:33] [Rank 0] step:2381/10000 train_time:122396ms step_avg:51.41ms +[2025-09-11 10:24:33] [Rank 0] step:2381/10000 train_time:122396ms step_avg:51.41ms +[2025-09-11 10:24:34] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:24:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:24:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:24:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:24:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:24:44] [Rank 0] PRINT: step:2400/10000 val_loss:4.9899 total_sharp:2.1195e-04 L1_sharp:5.9679e-02 L2_sharp:6.7025e-02 L3_sharp:7.3475e-02 L4_sharp:9.4497e-02 L5_sharp:1.1578e-01 L6_sharp:1.2282e-01 L7_sharp:1.4033e-01 L8_sharp:1.6974e-01 L9_sharp:1.7890e-01 L10_sharp:2.4112e-01 L11_sharp:4.7168e-01 L12_sharp:1.5686e+00 total_fnorm:7.1500e+01 total_l1_linf:1.3619e+05 total_spectral:3.5750e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1768e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1719e-01 L1_l1linf:3.5645e-02 L2_l1linf:3.6133e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5156e-02 L6_l1linf:3.5156e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4912e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5156e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.6865e-02 L1_spectral:1.6067e-03 L2_spectral:1.6098e-03 L3_spectral:1.6115e-03 L4_spectral:1.6058e-03 L5_spectral:1.6057e-03 L6_spectral:1.5938e-03 L7_spectral:1.6064e-03 L8_spectral:1.6072e-03 L9_spectral:1.6198e-03 L10_spectral:1.6049e-03 L11_spectral:1.6119e-03 L12_spectral:1.6123e-03 train_time:123037ms step_avg:51.27ms +[2025-09-11 10:24:44] [Rank 0] PRINT: step:2400/10000 val_loss:4.9899 total_sharp:2.1195e-04 L1_sharp:5.9679e-02 L2_sharp:6.7025e-02 L3_sharp:7.3475e-02 L4_sharp:9.4497e-02 L5_sharp:1.1578e-01 L6_sharp:1.2282e-01 L7_sharp:1.4033e-01 L8_sharp:1.6974e-01 L9_sharp:1.7890e-01 L10_sharp:2.4112e-01 L11_sharp:4.7168e-01 L12_sharp:1.5686e+00 total_fnorm:7.1500e+01 total_l1_linf:1.3619e+05 total_spectral:3.5750e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1768e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1719e-01 L1_l1linf:3.5645e-02 L2_l1linf:3.6133e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5156e-02 L6_l1linf:3.5156e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.4912e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5156e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.6865e-02 L1_spectral:1.6067e-03 L2_spectral:1.6098e-03 L3_spectral:1.6115e-03 L4_spectral:1.6058e-03 L5_spectral:1.6057e-03 L6_spectral:1.5938e-03 L7_spectral:1.6064e-03 L8_spectral:1.6072e-03 L9_spectral:1.6198e-03 L10_spectral:1.6049e-03 L11_spectral:1.6119e-03 L12_spectral:1.6123e-03 train_time:123037ms step_avg:51.27ms +[2025-09-11 10:24:46] [Rank 0] step:2401/10000 train_time:124593ms step_avg:51.89ms +[2025-09-11 10:24:46] [Rank 0] step:2401/10000 train_time:124593ms step_avg:51.89ms +[2025-09-11 10:24:46] [Rank 0] step:2421/10000 train_time:125277ms step_avg:51.75ms +[2025-09-11 10:24:46] [Rank 0] step:2421/10000 train_time:125277ms step_avg:51.75ms +[2025-09-11 10:24:47] [Rank 0] step:2441/10000 train_time:125941ms step_avg:51.59ms +[2025-09-11 10:24:47] [Rank 0] step:2441/10000 train_time:125941ms step_avg:51.59ms +[2025-09-11 10:24:48] [Rank 0] step:2461/10000 train_time:126604ms step_avg:51.44ms +[2025-09-11 10:24:48] [Rank 0] step:2461/10000 train_time:126604ms step_avg:51.44ms +[2025-09-11 10:24:48] [Rank 0] step:2481/10000 train_time:127267ms step_avg:51.30ms +[2025-09-11 10:24:48] [Rank 0] step:2481/10000 train_time:127267ms step_avg:51.30ms +[2025-09-11 10:24:49] [Rank 0] step:2501/10000 train_time:127930ms step_avg:51.15ms +[2025-09-11 10:24:49] [Rank 0] step:2501/10000 train_time:127930ms step_avg:51.15ms +[2025-09-11 10:24:50] [Rank 0] step:2521/10000 train_time:128593ms step_avg:51.01ms +[2025-09-11 10:24:50] [Rank 0] step:2521/10000 train_time:128593ms step_avg:51.01ms +[2025-09-11 10:24:50] [Rank 0] step:2541/10000 train_time:129255ms step_avg:50.87ms +[2025-09-11 10:24:50] [Rank 0] step:2541/10000 train_time:129255ms step_avg:50.87ms +[2025-09-11 10:24:51] [Rank 0] step:2561/10000 train_time:129917ms step_avg:50.73ms +[2025-09-11 10:24:51] [Rank 0] step:2561/10000 train_time:129917ms step_avg:50.73ms +[2025-09-11 10:24:52] [Rank 0] step:2581/10000 train_time:130580ms step_avg:50.59ms +[2025-09-11 10:24:52] [Rank 0] step:2581/10000 train_time:130580ms step_avg:50.59ms +[2025-09-11 10:24:52] [Rank 0] step:2601/10000 train_time:131243ms step_avg:50.46ms +[2025-09-11 10:24:52] [Rank 0] step:2601/10000 train_time:131243ms step_avg:50.46ms +[2025-09-11 10:24:53] [Rank 0] step:2621/10000 train_time:131905ms step_avg:50.33ms +[2025-09-11 10:24:53] [Rank 0] step:2621/10000 train_time:131905ms step_avg:50.33ms +[2025-09-11 10:24:54] [Rank 0] step:2641/10000 train_time:132567ms step_avg:50.20ms +[2025-09-11 10:24:54] [Rank 0] step:2641/10000 train_time:132567ms step_avg:50.20ms +[2025-09-11 10:24:54] [Rank 0] step:2661/10000 train_time:133230ms step_avg:50.07ms +[2025-09-11 10:24:54] [Rank 0] step:2661/10000 train_time:133230ms step_avg:50.07ms +[2025-09-11 10:24:55] [Rank 0] step:2681/10000 train_time:133892ms step_avg:49.94ms +[2025-09-11 10:24:55] [Rank 0] step:2681/10000 train_time:133892ms step_avg:49.94ms +[2025-09-11 10:24:56] [Rank 0] step:2701/10000 train_time:134555ms step_avg:49.82ms +[2025-09-11 10:24:56] [Rank 0] step:2701/10000 train_time:134555ms step_avg:49.82ms +[2025-09-11 10:24:56] [Rank 0] step:2721/10000 train_time:135218ms step_avg:49.69ms +[2025-09-11 10:24:56] [Rank 0] step:2721/10000 train_time:135218ms step_avg:49.69ms +[2025-09-11 10:24:57] [Rank 0] step:2741/10000 train_time:135881ms step_avg:49.57ms +[2025-09-11 10:24:57] [Rank 0] step:2741/10000 train_time:135881ms step_avg:49.57ms +[2025-09-11 10:24:58] [Rank 0] step:2761/10000 train_time:136543ms step_avg:49.45ms +[2025-09-11 10:24:58] [Rank 0] step:2761/10000 train_time:136543ms step_avg:49.45ms +[2025-09-11 10:24:58] [Rank 0] step:2781/10000 train_time:137204ms step_avg:49.34ms +[2025-09-11 10:24:58] [Rank 0] step:2781/10000 train_time:137204ms step_avg:49.34ms +[2025-09-11 10:24:59] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:24:59] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:25:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:25:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:25:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:25:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:10] [Rank 0] PRINT: step:2800/10000 val_loss:4.8969 total_sharp:1.8528e-04 L1_sharp:5.0708e-02 L2_sharp:6.1375e-02 L3_sharp:7.2608e-02 L4_sharp:9.0460e-02 L5_sharp:9.6940e-02 L6_sharp:1.0861e-01 L7_sharp:1.2230e-01 L8_sharp:1.3525e-01 L9_sharp:1.5513e-01 L10_sharp:2.3274e-01 L11_sharp:2.9410e-01 L12_sharp:6.7255e-01 total_fnorm:6.9500e+01 total_l1_linf:1.3107e+05 total_spectral:3.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.4668e-02 L2_l1linf:3.4668e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4424e-02 L12_l1linf:3.5889e-02 L1_spectral:1.6209e-03 L2_spectral:1.6039e-03 L3_spectral:1.6086e-03 L4_spectral:1.6093e-03 L5_spectral:1.6013e-03 L6_spectral:1.5951e-03 L7_spectral:1.6104e-03 L8_spectral:1.6150e-03 L9_spectral:1.6146e-03 L10_spectral:1.6049e-03 L11_spectral:1.6016e-03 L12_spectral:1.6178e-03 train_time:137850ms step_avg:49.23ms +[2025-09-11 10:25:10] [Rank 0] PRINT: step:2800/10000 val_loss:4.8969 total_sharp:1.8528e-04 L1_sharp:5.0708e-02 L2_sharp:6.1375e-02 L3_sharp:7.2608e-02 L4_sharp:9.0460e-02 L5_sharp:9.6940e-02 L6_sharp:1.0861e-01 L7_sharp:1.2230e-01 L8_sharp:1.3525e-01 L9_sharp:1.5513e-01 L10_sharp:2.3274e-01 L11_sharp:2.9410e-01 L12_sharp:6.7255e-01 total_fnorm:6.9500e+01 total_l1_linf:1.3107e+05 total_spectral:3.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.4668e-02 L2_l1linf:3.4668e-02 L3_l1linf:3.4180e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2959e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4424e-02 L12_l1linf:3.5889e-02 L1_spectral:1.6209e-03 L2_spectral:1.6039e-03 L3_spectral:1.6086e-03 L4_spectral:1.6093e-03 L5_spectral:1.6013e-03 L6_spectral:1.5951e-03 L7_spectral:1.6104e-03 L8_spectral:1.6150e-03 L9_spectral:1.6146e-03 L10_spectral:1.6049e-03 L11_spectral:1.6016e-03 L12_spectral:1.6178e-03 train_time:137850ms step_avg:49.23ms +[2025-09-11 10:25:12] [Rank 0] step:2801/10000 train_time:139899ms step_avg:49.95ms +[2025-09-11 10:25:12] [Rank 0] step:2801/10000 train_time:139899ms step_avg:49.95ms +[2025-09-11 10:25:13] [Rank 0] step:2821/10000 train_time:140726ms step_avg:49.89ms +[2025-09-11 10:25:13] [Rank 0] step:2821/10000 train_time:140726ms step_avg:49.89ms +[2025-09-11 10:25:13] [Rank 0] step:2841/10000 train_time:141390ms step_avg:49.77ms +[2025-09-11 10:25:13] [Rank 0] step:2841/10000 train_time:141390ms step_avg:49.77ms +[2025-09-11 10:25:14] [Rank 0] step:2861/10000 train_time:142055ms step_avg:49.65ms +[2025-09-11 10:25:14] [Rank 0] step:2861/10000 train_time:142055ms step_avg:49.65ms +[2025-09-11 10:25:15] [Rank 0] step:2881/10000 train_time:142719ms step_avg:49.54ms +[2025-09-11 10:25:15] [Rank 0] step:2881/10000 train_time:142719ms step_avg:49.54ms +[2025-09-11 10:25:15] [Rank 0] step:2901/10000 train_time:143382ms step_avg:49.42ms +[2025-09-11 10:25:15] [Rank 0] step:2901/10000 train_time:143382ms step_avg:49.42ms +[2025-09-11 10:25:16] [Rank 0] step:2921/10000 train_time:144045ms step_avg:49.31ms +[2025-09-11 10:25:16] [Rank 0] step:2921/10000 train_time:144045ms step_avg:49.31ms +[2025-09-11 10:25:16] [Rank 0] step:2941/10000 train_time:144707ms step_avg:49.20ms +[2025-09-11 10:25:16] [Rank 0] step:2941/10000 train_time:144707ms step_avg:49.20ms +[2025-09-11 10:25:17] [Rank 0] step:2961/10000 train_time:145370ms step_avg:49.09ms +[2025-09-11 10:25:17] [Rank 0] step:2961/10000 train_time:145370ms step_avg:49.09ms +[2025-09-11 10:25:18] [Rank 0] step:2981/10000 train_time:146035ms step_avg:48.99ms +[2025-09-11 10:25:18] [Rank 0] step:2981/10000 train_time:146035ms step_avg:48.99ms +[2025-09-11 10:25:18] [Rank 0] step:3001/10000 train_time:146701ms step_avg:48.88ms +[2025-09-11 10:25:18] [Rank 0] step:3001/10000 train_time:146701ms step_avg:48.88ms +[2025-09-11 10:25:19] [Rank 0] step:3021/10000 train_time:147367ms step_avg:48.78ms +[2025-09-11 10:25:19] [Rank 0] step:3021/10000 train_time:147367ms step_avg:48.78ms +[2025-09-11 10:25:20] [Rank 0] step:3041/10000 train_time:148033ms step_avg:48.68ms +[2025-09-11 10:25:20] [Rank 0] step:3041/10000 train_time:148033ms step_avg:48.68ms +[2025-09-11 10:25:20] [Rank 0] step:3061/10000 train_time:148700ms step_avg:48.58ms +[2025-09-11 10:25:20] [Rank 0] step:3061/10000 train_time:148700ms step_avg:48.58ms +[2025-09-11 10:25:21] [Rank 0] step:3081/10000 train_time:149366ms step_avg:48.48ms +[2025-09-11 10:25:21] [Rank 0] step:3081/10000 train_time:149366ms step_avg:48.48ms +[2025-09-11 10:25:22] [Rank 0] step:3101/10000 train_time:150032ms step_avg:48.38ms +[2025-09-11 10:25:22] [Rank 0] step:3101/10000 train_time:150032ms step_avg:48.38ms +[2025-09-11 10:25:22] [Rank 0] step:3121/10000 train_time:150697ms step_avg:48.29ms +[2025-09-11 10:25:22] [Rank 0] step:3121/10000 train_time:150697ms step_avg:48.29ms +[2025-09-11 10:25:23] [Rank 0] step:3141/10000 train_time:151363ms step_avg:48.19ms +[2025-09-11 10:25:23] [Rank 0] step:3141/10000 train_time:151363ms step_avg:48.19ms +[2025-09-11 10:25:24] [Rank 0] step:3161/10000 train_time:152030ms step_avg:48.10ms +[2025-09-11 10:25:24] [Rank 0] step:3161/10000 train_time:152030ms step_avg:48.10ms +[2025-09-11 10:25:24] [Rank 0] step:3181/10000 train_time:152695ms step_avg:48.00ms +[2025-09-11 10:25:24] [Rank 0] step:3181/10000 train_time:152695ms step_avg:48.00ms +[2025-09-11 10:25:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:25:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:25:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:25:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:25:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:25:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:25:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.8010 total_sharp:1.2773e-04 L1_sharp:4.3373e-02 L2_sharp:5.4148e-02 L3_sharp:6.6942e-02 L4_sharp:7.8109e-02 L5_sharp:7.5584e-02 L6_sharp:9.4980e-02 L7_sharp:1.0700e-01 L8_sharp:1.2948e-01 L9_sharp:1.3630e-01 L10_sharp:1.9971e-01 L11_sharp:2.5006e-01 L12_sharp:7.8951e-01 total_fnorm:7.7500e+01 total_l1_linf:1.5462e+05 total_spectral:3.8750e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2471e-02 L9_l1linf:3.1982e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.3936e-02 L1_spectral:1.6055e-03 L2_spectral:1.6061e-03 L3_spectral:1.6032e-03 L4_spectral:1.5983e-03 L5_spectral:1.6012e-03 L6_spectral:1.6038e-03 L7_spectral:1.6095e-03 L8_spectral:1.6071e-03 L9_spectral:1.6231e-03 L10_spectral:1.6095e-03 L11_spectral:1.6109e-03 L12_spectral:1.6078e-03 train_time:153343ms step_avg:47.92ms +[2025-09-11 10:25:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.8010 total_sharp:1.2773e-04 L1_sharp:4.3373e-02 L2_sharp:5.4148e-02 L3_sharp:6.6942e-02 L4_sharp:7.8109e-02 L5_sharp:7.5584e-02 L6_sharp:9.4980e-02 L7_sharp:1.0700e-01 L8_sharp:1.2948e-01 L9_sharp:1.3630e-01 L10_sharp:1.9971e-01 L11_sharp:2.5006e-01 L12_sharp:7.8951e-01 total_fnorm:7.7500e+01 total_l1_linf:1.5462e+05 total_spectral:3.8750e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2471e-02 L9_l1linf:3.1982e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.3936e-02 L1_spectral:1.6055e-03 L2_spectral:1.6061e-03 L3_spectral:1.6032e-03 L4_spectral:1.5983e-03 L5_spectral:1.6012e-03 L6_spectral:1.6038e-03 L7_spectral:1.6095e-03 L8_spectral:1.6071e-03 L9_spectral:1.6231e-03 L10_spectral:1.6095e-03 L11_spectral:1.6109e-03 L12_spectral:1.6078e-03 train_time:153343ms step_avg:47.92ms +[2025-09-11 10:25:37] [Rank 0] step:3201/10000 train_time:154972ms step_avg:48.41ms +[2025-09-11 10:25:37] [Rank 0] step:3201/10000 train_time:154972ms step_avg:48.41ms +[2025-09-11 10:25:38] [Rank 0] step:3221/10000 train_time:155628ms step_avg:48.32ms +[2025-09-11 10:25:38] [Rank 0] step:3221/10000 train_time:155628ms step_avg:48.32ms +[2025-09-11 10:25:39] [Rank 0] step:3241/10000 train_time:156296ms step_avg:48.22ms +[2025-09-11 10:25:39] [Rank 0] step:3241/10000 train_time:156296ms step_avg:48.22ms +[2025-09-11 10:25:39] [Rank 0] step:3261/10000 train_time:156963ms step_avg:48.13ms +[2025-09-11 10:25:39] [Rank 0] step:3261/10000 train_time:156963ms step_avg:48.13ms +[2025-09-11 10:25:40] [Rank 0] step:3281/10000 train_time:157926ms step_avg:48.13ms +[2025-09-11 10:25:40] [Rank 0] step:3281/10000 train_time:157926ms step_avg:48.13ms +[2025-09-11 10:25:41] [Rank 0] step:3301/10000 train_time:158593ms step_avg:48.04ms +[2025-09-11 10:25:41] [Rank 0] step:3301/10000 train_time:158593ms step_avg:48.04ms +[2025-09-11 10:25:42] [Rank 0] step:3321/10000 train_time:159259ms step_avg:47.96ms +[2025-09-11 10:25:42] [Rank 0] step:3321/10000 train_time:159259ms step_avg:47.96ms +[2025-09-11 10:25:42] [Rank 0] step:3341/10000 train_time:159926ms step_avg:47.87ms +[2025-09-11 10:25:42] [Rank 0] step:3341/10000 train_time:159926ms step_avg:47.87ms +[2025-09-11 10:25:43] [Rank 0] step:3361/10000 train_time:160867ms step_avg:47.86ms +[2025-09-11 10:25:43] [Rank 0] step:3361/10000 train_time:160867ms step_avg:47.86ms +[2025-09-11 10:25:44] [Rank 0] step:3381/10000 train_time:161533ms step_avg:47.78ms +[2025-09-11 10:25:44] [Rank 0] step:3381/10000 train_time:161533ms step_avg:47.78ms +[2025-09-11 10:25:45] [Rank 0] step:3401/10000 train_time:162199ms step_avg:47.69ms +[2025-09-11 10:25:45] [Rank 0] step:3401/10000 train_time:162199ms step_avg:47.69ms +[2025-09-11 10:25:45] [Rank 0] step:3421/10000 train_time:162865ms step_avg:47.61ms +[2025-09-11 10:25:45] [Rank 0] step:3421/10000 train_time:162865ms step_avg:47.61ms +[2025-09-11 10:25:46] [Rank 0] step:3441/10000 train_time:163531ms step_avg:47.52ms +[2025-09-11 10:25:46] [Rank 0] step:3441/10000 train_time:163531ms step_avg:47.52ms +[2025-09-11 10:25:47] [Rank 0] step:3461/10000 train_time:164197ms step_avg:47.44ms +[2025-09-11 10:25:47] [Rank 0] step:3461/10000 train_time:164197ms step_avg:47.44ms +[2025-09-11 10:25:47] [Rank 0] step:3481/10000 train_time:164865ms step_avg:47.36ms +[2025-09-11 10:25:47] [Rank 0] step:3481/10000 train_time:164865ms step_avg:47.36ms +[2025-09-11 10:25:48] [Rank 0] step:3501/10000 train_time:165531ms step_avg:47.28ms +[2025-09-11 10:25:48] [Rank 0] step:3501/10000 train_time:165531ms step_avg:47.28ms +[2025-09-11 10:25:49] [Rank 0] step:3521/10000 train_time:166197ms step_avg:47.20ms +[2025-09-11 10:25:49] [Rank 0] step:3521/10000 train_time:166197ms step_avg:47.20ms +[2025-09-11 10:25:49] [Rank 0] step:3541/10000 train_time:166863ms step_avg:47.12ms +[2025-09-11 10:25:49] [Rank 0] step:3541/10000 train_time:166863ms step_avg:47.12ms +[2025-09-11 10:25:50] [Rank 0] step:3561/10000 train_time:167529ms step_avg:47.05ms +[2025-09-11 10:25:50] [Rank 0] step:3561/10000 train_time:167529ms step_avg:47.05ms +[2025-09-11 10:25:51] [Rank 0] step:3581/10000 train_time:168196ms step_avg:46.97ms +[2025-09-11 10:25:51] [Rank 0] step:3581/10000 train_time:168196ms step_avg:46.97ms +[2025-09-11 10:25:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:25:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:25:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:26:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:02] [Rank 0] PRINT: step:3600/10000 val_loss:4.7214 total_sharp:1.5913e-04 L1_sharp:3.6050e-02 L2_sharp:4.6388e-02 L3_sharp:6.0389e-02 L4_sharp:6.5296e-02 L5_sharp:7.1988e-02 L6_sharp:8.9380e-02 L7_sharp:1.1635e-01 L8_sharp:1.2420e-01 L9_sharp:1.5144e-01 L10_sharp:2.3396e-01 L11_sharp:2.1756e-01 L12_sharp:1.3035e+00 total_fnorm:7.0500e+01 total_l1_linf:1.3312e+05 total_spectral:3.5250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.1982e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1738e-02 L6_l1linf:3.0884e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1006e-02 L10_l1linf:3.0884e-02 L11_l1linf:3.0884e-02 L12_l1linf:3.3447e-02 L1_spectral:1.6055e-03 L2_spectral:1.6051e-03 L3_spectral:1.6006e-03 L4_spectral:1.6208e-03 L5_spectral:1.6026e-03 L6_spectral:1.6091e-03 L7_spectral:1.6061e-03 L8_spectral:1.6073e-03 L9_spectral:1.6052e-03 L10_spectral:1.6179e-03 L11_spectral:1.6202e-03 L12_spectral:1.6162e-03 train_time:168843ms step_avg:46.90ms +[2025-09-11 10:26:02] [Rank 0] PRINT: step:3600/10000 val_loss:4.7214 total_sharp:1.5913e-04 L1_sharp:3.6050e-02 L2_sharp:4.6388e-02 L3_sharp:6.0389e-02 L4_sharp:6.5296e-02 L5_sharp:7.1988e-02 L6_sharp:8.9380e-02 L7_sharp:1.1635e-01 L8_sharp:1.2420e-01 L9_sharp:1.5144e-01 L10_sharp:2.3396e-01 L11_sharp:2.1756e-01 L12_sharp:1.3035e+00 total_fnorm:7.0500e+01 total_l1_linf:1.3312e+05 total_spectral:3.5250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.1982e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1738e-02 L5_l1linf:3.1738e-02 L6_l1linf:3.0884e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1006e-02 L10_l1linf:3.0884e-02 L11_l1linf:3.0884e-02 L12_l1linf:3.3447e-02 L1_spectral:1.6055e-03 L2_spectral:1.6051e-03 L3_spectral:1.6006e-03 L4_spectral:1.6208e-03 L5_spectral:1.6026e-03 L6_spectral:1.6091e-03 L7_spectral:1.6061e-03 L8_spectral:1.6073e-03 L9_spectral:1.6052e-03 L10_spectral:1.6179e-03 L11_spectral:1.6202e-03 L12_spectral:1.6162e-03 train_time:168843ms step_avg:46.90ms +[2025-09-11 10:26:04] [Rank 0] step:3601/10000 train_time:170493ms step_avg:47.35ms +[2025-09-11 10:26:04] [Rank 0] step:3601/10000 train_time:170493ms step_avg:47.35ms +[2025-09-11 10:26:04] [Rank 0] step:3621/10000 train_time:171170ms step_avg:47.27ms +[2025-09-11 10:26:04] [Rank 0] step:3621/10000 train_time:171170ms step_avg:47.27ms +[2025-09-11 10:26:05] [Rank 0] step:3641/10000 train_time:171837ms step_avg:47.19ms +[2025-09-11 10:26:05] [Rank 0] step:3641/10000 train_time:171837ms step_avg:47.19ms +[2025-09-11 10:26:06] [Rank 0] step:3661/10000 train_time:172503ms step_avg:47.12ms +[2025-09-11 10:26:06] [Rank 0] step:3661/10000 train_time:172503ms step_avg:47.12ms +[2025-09-11 10:26:06] [Rank 0] step:3681/10000 train_time:173169ms step_avg:47.04ms +[2025-09-11 10:26:06] [Rank 0] step:3681/10000 train_time:173169ms step_avg:47.04ms +[2025-09-11 10:26:07] [Rank 0] step:3701/10000 train_time:173835ms step_avg:46.97ms +[2025-09-11 10:26:07] [Rank 0] step:3701/10000 train_time:173835ms step_avg:46.97ms +[2025-09-11 10:26:08] [Rank 0] step:3721/10000 train_time:174510ms step_avg:46.90ms +[2025-09-11 10:26:08] [Rank 0] step:3721/10000 train_time:174510ms step_avg:46.90ms +[2025-09-11 10:26:08] [Rank 0] step:3741/10000 train_time:175187ms step_avg:46.83ms +[2025-09-11 10:26:08] [Rank 0] step:3741/10000 train_time:175187ms step_avg:46.83ms +[2025-09-11 10:26:09] [Rank 0] step:3761/10000 train_time:175864ms step_avg:46.76ms +[2025-09-11 10:26:09] [Rank 0] step:3761/10000 train_time:175864ms step_avg:46.76ms +[2025-09-11 10:26:10] [Rank 0] step:3781/10000 train_time:176540ms step_avg:46.69ms +[2025-09-11 10:26:10] [Rank 0] step:3781/10000 train_time:176540ms step_avg:46.69ms +[2025-09-11 10:26:10] [Rank 0] step:3801/10000 train_time:177217ms step_avg:46.62ms +[2025-09-11 10:26:10] [Rank 0] step:3801/10000 train_time:177217ms step_avg:46.62ms +[2025-09-11 10:26:11] [Rank 0] step:3821/10000 train_time:177894ms step_avg:46.56ms +[2025-09-11 10:26:11] [Rank 0] step:3821/10000 train_time:177894ms step_avg:46.56ms +[2025-09-11 10:26:12] [Rank 0] step:3841/10000 train_time:178571ms step_avg:46.49ms +[2025-09-11 10:26:12] [Rank 0] step:3841/10000 train_time:178571ms step_avg:46.49ms +[2025-09-11 10:26:12] [Rank 0] step:3861/10000 train_time:179247ms step_avg:46.42ms +[2025-09-11 10:26:12] [Rank 0] step:3861/10000 train_time:179247ms step_avg:46.42ms +[2025-09-11 10:26:13] [Rank 0] step:3881/10000 train_time:179923ms step_avg:46.36ms +[2025-09-11 10:26:13] [Rank 0] step:3881/10000 train_time:179923ms step_avg:46.36ms +[2025-09-11 10:26:14] [Rank 0] step:3901/10000 train_time:180600ms step_avg:46.30ms +[2025-09-11 10:26:14] [Rank 0] step:3901/10000 train_time:180600ms step_avg:46.30ms +[2025-09-11 10:26:14] [Rank 0] step:3921/10000 train_time:181276ms step_avg:46.23ms +[2025-09-11 10:26:14] [Rank 0] step:3921/10000 train_time:181276ms step_avg:46.23ms +[2025-09-11 10:26:15] [Rank 0] step:3941/10000 train_time:181954ms step_avg:46.17ms +[2025-09-11 10:26:15] [Rank 0] step:3941/10000 train_time:181954ms step_avg:46.17ms +[2025-09-11 10:26:16] [Rank 0] step:3961/10000 train_time:182630ms step_avg:46.11ms +[2025-09-11 10:26:16] [Rank 0] step:3961/10000 train_time:182630ms step_avg:46.11ms +[2025-09-11 10:26:16] [Rank 0] step:3981/10000 train_time:183306ms step_avg:46.05ms +[2025-09-11 10:26:16] [Rank 0] step:3981/10000 train_time:183306ms step_avg:46.05ms +[2025-09-11 10:26:17] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:26:17] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:26:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:26:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:26:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:26:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:26:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:28] [Rank 0] PRINT: step:4000/10000 val_loss:4.6494 total_sharp:1.6531e-04 L1_sharp:4.3222e-02 L2_sharp:5.2345e-02 L3_sharp:6.6244e-02 L4_sharp:9.1195e-02 L5_sharp:1.0248e-01 L6_sharp:1.3709e-01 L7_sharp:1.8835e-01 L8_sharp:1.8921e-01 L9_sharp:2.0568e-01 L10_sharp:3.9620e-01 L11_sharp:5.8948e-01 L12_sharp:1.8583e+00 total_fnorm:8.0500e+01 total_l1_linf:1.5565e+05 total_spectral:4.0250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.2715e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.2715e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1128e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.5971e-03 L2_spectral:1.5974e-03 L3_spectral:1.6005e-03 L4_spectral:1.5915e-03 L5_spectral:1.6002e-03 L6_spectral:1.6084e-03 L7_spectral:1.6125e-03 L8_spectral:1.6173e-03 L9_spectral:1.6016e-03 L10_spectral:1.6121e-03 L11_spectral:1.6058e-03 L12_spectral:1.6156e-03 train_time:183963ms step_avg:45.99ms +[2025-09-11 10:26:28] [Rank 0] PRINT: step:4000/10000 val_loss:4.6494 total_sharp:1.6531e-04 L1_sharp:4.3222e-02 L2_sharp:5.2345e-02 L3_sharp:6.6244e-02 L4_sharp:9.1195e-02 L5_sharp:1.0248e-01 L6_sharp:1.3709e-01 L7_sharp:1.8835e-01 L8_sharp:1.8921e-01 L9_sharp:2.0568e-01 L10_sharp:3.9620e-01 L11_sharp:5.8948e-01 L12_sharp:1.8583e+00 total_fnorm:8.0500e+01 total_l1_linf:1.5565e+05 total_spectral:4.0250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.2715e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.2715e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.1982e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.1006e-02 L9_l1linf:3.1128e-02 L10_l1linf:3.1494e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.5971e-03 L2_spectral:1.5974e-03 L3_spectral:1.6005e-03 L4_spectral:1.5915e-03 L5_spectral:1.6002e-03 L6_spectral:1.6084e-03 L7_spectral:1.6125e-03 L8_spectral:1.6173e-03 L9_spectral:1.6016e-03 L10_spectral:1.6121e-03 L11_spectral:1.6058e-03 L12_spectral:1.6156e-03 train_time:183963ms step_avg:45.99ms +[2025-09-11 10:26:29] [Rank 0] step:4001/10000 train_time:185594ms step_avg:46.39ms +[2025-09-11 10:26:29] [Rank 0] step:4001/10000 train_time:185594ms step_avg:46.39ms +[2025-09-11 10:26:30] [Rank 0] step:4021/10000 train_time:186260ms step_avg:46.32ms +[2025-09-11 10:26:30] [Rank 0] step:4021/10000 train_time:186260ms step_avg:46.32ms +[2025-09-11 10:26:31] [Rank 0] step:4041/10000 train_time:186938ms step_avg:46.26ms +[2025-09-11 10:26:31] [Rank 0] step:4041/10000 train_time:186938ms step_avg:46.26ms +[2025-09-11 10:26:31] [Rank 0] step:4061/10000 train_time:187613ms step_avg:46.20ms +[2025-09-11 10:26:31] [Rank 0] step:4061/10000 train_time:187613ms step_avg:46.20ms +[2025-09-11 10:26:32] [Rank 0] step:4081/10000 train_time:188291ms step_avg:46.14ms +[2025-09-11 10:26:32] [Rank 0] step:4081/10000 train_time:188291ms step_avg:46.14ms +[2025-09-11 10:26:33] [Rank 0] step:4101/10000 train_time:188967ms step_avg:46.08ms +[2025-09-11 10:26:33] [Rank 0] step:4101/10000 train_time:188967ms step_avg:46.08ms +[2025-09-11 10:26:33] [Rank 0] step:4121/10000 train_time:189644ms step_avg:46.02ms +[2025-09-11 10:26:33] [Rank 0] step:4121/10000 train_time:189644ms step_avg:46.02ms +[2025-09-11 10:26:34] [Rank 0] step:4141/10000 train_time:190320ms step_avg:45.96ms +[2025-09-11 10:26:34] [Rank 0] step:4141/10000 train_time:190320ms step_avg:45.96ms +[2025-09-11 10:26:35] [Rank 0] step:4161/10000 train_time:190996ms step_avg:45.90ms +[2025-09-11 10:26:35] [Rank 0] step:4161/10000 train_time:190996ms step_avg:45.90ms +[2025-09-11 10:26:35] [Rank 0] step:4181/10000 train_time:191672ms step_avg:45.84ms +[2025-09-11 10:26:35] [Rank 0] step:4181/10000 train_time:191672ms step_avg:45.84ms +[2025-09-11 10:26:36] [Rank 0] step:4201/10000 train_time:192350ms step_avg:45.79ms +[2025-09-11 10:26:36] [Rank 0] step:4201/10000 train_time:192350ms step_avg:45.79ms +[2025-09-11 10:26:37] [Rank 0] step:4221/10000 train_time:193027ms step_avg:45.73ms +[2025-09-11 10:26:37] [Rank 0] step:4221/10000 train_time:193027ms step_avg:45.73ms +[2025-09-11 10:26:37] [Rank 0] step:4241/10000 train_time:193702ms step_avg:45.67ms +[2025-09-11 10:26:37] [Rank 0] step:4241/10000 train_time:193702ms step_avg:45.67ms +[2025-09-11 10:26:38] [Rank 0] step:4261/10000 train_time:194378ms step_avg:45.62ms +[2025-09-11 10:26:38] [Rank 0] step:4261/10000 train_time:194378ms step_avg:45.62ms +[2025-09-11 10:26:39] [Rank 0] step:4281/10000 train_time:195056ms step_avg:45.56ms +[2025-09-11 10:26:39] [Rank 0] step:4281/10000 train_time:195056ms step_avg:45.56ms +[2025-09-11 10:26:39] [Rank 0] step:4301/10000 train_time:195733ms step_avg:45.51ms +[2025-09-11 10:26:39] [Rank 0] step:4301/10000 train_time:195733ms step_avg:45.51ms +[2025-09-11 10:26:40] [Rank 0] step:4321/10000 train_time:196408ms step_avg:45.45ms +[2025-09-11 10:26:40] [Rank 0] step:4321/10000 train_time:196408ms step_avg:45.45ms +[2025-09-11 10:26:41] [Rank 0] step:4341/10000 train_time:197085ms step_avg:45.40ms +[2025-09-11 10:26:41] [Rank 0] step:4341/10000 train_time:197085ms step_avg:45.40ms +[2025-09-11 10:26:41] [Rank 0] step:4361/10000 train_time:197760ms step_avg:45.35ms +[2025-09-11 10:26:41] [Rank 0] step:4361/10000 train_time:197760ms step_avg:45.35ms +[2025-09-11 10:26:42] [Rank 0] step:4381/10000 train_time:198437ms step_avg:45.29ms +[2025-09-11 10:26:42] [Rank 0] step:4381/10000 train_time:198437ms step_avg:45.29ms +[2025-09-11 10:26:43] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:26:43] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:26:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:26:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:26:53] [Rank 0] PRINT: step:4400/10000 val_loss:4.5979 total_sharp:1.1987e-04 L1_sharp:2.9383e-02 L2_sharp:3.8221e-02 L3_sharp:5.0062e-02 L4_sharp:6.7150e-02 L5_sharp:7.5427e-02 L6_sharp:9.3149e-02 L7_sharp:1.1019e-01 L8_sharp:1.3243e-01 L9_sharp:1.3420e-01 L10_sharp:2.0712e-01 L11_sharp:2.7822e-01 L12_sharp:8.3433e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6250e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0762e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0762e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0029e-02 L8_l1linf:3.0396e-02 L9_l1linf:3.0396e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.6057e-03 L2_spectral:1.6063e-03 L3_spectral:1.6060e-03 L4_spectral:1.6003e-03 L5_spectral:1.6102e-03 L6_spectral:1.5989e-03 L7_spectral:1.6026e-03 L8_spectral:1.6045e-03 L9_spectral:1.6125e-03 L10_spectral:1.6088e-03 L11_spectral:1.6057e-03 L12_spectral:1.6056e-03 train_time:199094ms step_avg:45.25ms +[2025-09-11 10:26:53] [Rank 0] PRINT: step:4400/10000 val_loss:4.5979 total_sharp:1.1987e-04 L1_sharp:2.9383e-02 L2_sharp:3.8221e-02 L3_sharp:5.0062e-02 L4_sharp:6.7150e-02 L5_sharp:7.5427e-02 L6_sharp:9.3149e-02 L7_sharp:1.1019e-01 L8_sharp:1.3243e-01 L9_sharp:1.3420e-01 L10_sharp:2.0712e-01 L11_sharp:2.7822e-01 L12_sharp:8.3433e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6250e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.0762e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.0884e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0762e-02 L6_l1linf:3.0273e-02 L7_l1linf:3.0029e-02 L8_l1linf:3.0396e-02 L9_l1linf:3.0396e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.6057e-03 L2_spectral:1.6063e-03 L3_spectral:1.6060e-03 L4_spectral:1.6003e-03 L5_spectral:1.6102e-03 L6_spectral:1.5989e-03 L7_spectral:1.6026e-03 L8_spectral:1.6045e-03 L9_spectral:1.6125e-03 L10_spectral:1.6088e-03 L11_spectral:1.6057e-03 L12_spectral:1.6056e-03 train_time:199094ms step_avg:45.25ms +[2025-09-11 10:26:55] [Rank 0] step:4401/10000 train_time:200689ms step_avg:45.60ms +[2025-09-11 10:26:55] [Rank 0] step:4401/10000 train_time:200689ms step_avg:45.60ms +[2025-09-11 10:26:56] [Rank 0] step:4421/10000 train_time:201378ms step_avg:45.55ms +[2025-09-11 10:26:56] [Rank 0] step:4421/10000 train_time:201378ms step_avg:45.55ms +[2025-09-11 10:26:56] [Rank 0] step:4441/10000 train_time:202056ms step_avg:45.50ms +[2025-09-11 10:26:56] [Rank 0] step:4441/10000 train_time:202056ms step_avg:45.50ms +[2025-09-11 10:26:57] [Rank 0] step:4461/10000 train_time:202734ms step_avg:45.45ms +[2025-09-11 10:26:57] [Rank 0] step:4461/10000 train_time:202734ms step_avg:45.45ms +[2025-09-11 10:26:58] [Rank 0] step:4481/10000 train_time:203412ms step_avg:45.39ms +[2025-09-11 10:26:58] [Rank 0] step:4481/10000 train_time:203412ms step_avg:45.39ms +[2025-09-11 10:26:58] [Rank 0] step:4501/10000 train_time:204091ms step_avg:45.34ms +[2025-09-11 10:26:58] [Rank 0] step:4501/10000 train_time:204091ms step_avg:45.34ms +[2025-09-11 10:26:59] [Rank 0] step:4521/10000 train_time:204768ms step_avg:45.29ms +[2025-09-11 10:26:59] [Rank 0] step:4521/10000 train_time:204768ms step_avg:45.29ms +[2025-09-11 10:27:00] [Rank 0] step:4541/10000 train_time:205446ms step_avg:45.24ms +[2025-09-11 10:27:00] [Rank 0] step:4541/10000 train_time:205446ms step_avg:45.24ms +[2025-09-11 10:27:01] [Rank 0] step:4561/10000 train_time:206123ms step_avg:45.19ms +[2025-09-11 10:27:01] [Rank 0] step:4561/10000 train_time:206123ms step_avg:45.19ms +[2025-09-11 10:27:01] [Rank 0] step:4581/10000 train_time:206809ms step_avg:45.14ms +[2025-09-11 10:27:01] [Rank 0] step:4581/10000 train_time:206809ms step_avg:45.14ms +[2025-09-11 10:27:02] [Rank 0] step:4601/10000 train_time:207487ms step_avg:45.10ms +[2025-09-11 10:27:02] [Rank 0] step:4601/10000 train_time:207487ms step_avg:45.10ms +[2025-09-11 10:27:03] [Rank 0] step:4621/10000 train_time:208165ms step_avg:45.05ms +[2025-09-11 10:27:03] [Rank 0] step:4621/10000 train_time:208165ms step_avg:45.05ms +[2025-09-11 10:27:03] [Rank 0] step:4641/10000 train_time:208844ms step_avg:45.00ms +[2025-09-11 10:27:03] [Rank 0] step:4641/10000 train_time:208844ms step_avg:45.00ms +[2025-09-11 10:27:04] [Rank 0] step:4661/10000 train_time:209523ms step_avg:44.95ms +[2025-09-11 10:27:04] [Rank 0] step:4661/10000 train_time:209523ms step_avg:44.95ms +[2025-09-11 10:27:05] [Rank 0] step:4681/10000 train_time:210200ms step_avg:44.91ms +[2025-09-11 10:27:05] [Rank 0] step:4681/10000 train_time:210200ms step_avg:44.91ms +[2025-09-11 10:27:05] [Rank 0] step:4701/10000 train_time:210878ms step_avg:44.86ms +[2025-09-11 10:27:05] [Rank 0] step:4701/10000 train_time:210878ms step_avg:44.86ms +[2025-09-11 10:27:06] [Rank 0] step:4721/10000 train_time:211556ms step_avg:44.81ms +[2025-09-11 10:27:06] [Rank 0] step:4721/10000 train_time:211556ms step_avg:44.81ms +[2025-09-11 10:27:07] [Rank 0] step:4741/10000 train_time:212234ms step_avg:44.77ms +[2025-09-11 10:27:07] [Rank 0] step:4741/10000 train_time:212234ms step_avg:44.77ms +[2025-09-11 10:27:07] [Rank 0] step:4761/10000 train_time:212913ms step_avg:44.72ms +[2025-09-11 10:27:07] [Rank 0] step:4761/10000 train_time:212913ms step_avg:44.72ms +[2025-09-11 10:27:08] [Rank 0] step:4781/10000 train_time:213592ms step_avg:44.68ms +[2025-09-11 10:27:08] [Rank 0] step:4781/10000 train_time:213592ms step_avg:44.68ms +[2025-09-11 10:27:09] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:27:09] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:27:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:19] [Rank 0] PRINT: step:4800/10000 val_loss:4.5434 total_sharp:1.0961e-04 L1_sharp:3.0445e-02 L2_sharp:4.3581e-02 L3_sharp:5.4402e-02 L4_sharp:7.5179e-02 L5_sharp:9.6095e-02 L6_sharp:1.2342e-01 L7_sharp:1.4052e-01 L8_sharp:1.2975e-01 L9_sharp:1.4111e-01 L10_sharp:2.1566e-01 L11_sharp:3.1770e-01 L12_sharp:1.0881e+00 total_fnorm:7.6000e+01 total_l1_linf:1.4746e+05 total_spectral:3.8000e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9907e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.1494e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9785e-02 L6_l1linf:2.9419e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9419e-02 L11_l1linf:2.9907e-02 L12_l1linf:3.1128e-02 L1_spectral:1.6196e-03 L2_spectral:1.6076e-03 L3_spectral:1.6012e-03 L4_spectral:1.6102e-03 L5_spectral:1.6101e-03 L6_spectral:1.6072e-03 L7_spectral:1.6164e-03 L8_spectral:1.6060e-03 L9_spectral:1.6066e-03 L10_spectral:1.6153e-03 L11_spectral:1.6150e-03 L12_spectral:1.6110e-03 train_time:214248ms step_avg:44.63ms +[2025-09-11 10:27:19] [Rank 0] PRINT: step:4800/10000 val_loss:4.5434 total_sharp:1.0961e-04 L1_sharp:3.0445e-02 L2_sharp:4.3581e-02 L3_sharp:5.4402e-02 L4_sharp:7.5179e-02 L5_sharp:9.6095e-02 L6_sharp:1.2342e-01 L7_sharp:1.4052e-01 L8_sharp:1.2975e-01 L9_sharp:1.4111e-01 L10_sharp:2.1566e-01 L11_sharp:3.1770e-01 L12_sharp:1.0881e+00 total_fnorm:7.6000e+01 total_l1_linf:1.4746e+05 total_spectral:3.8000e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9907e-02 L2_l1linf:3.0762e-02 L3_l1linf:3.1494e-02 L4_l1linf:3.0151e-02 L5_l1linf:2.9785e-02 L6_l1linf:2.9419e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9419e-02 L10_l1linf:2.9419e-02 L11_l1linf:2.9907e-02 L12_l1linf:3.1128e-02 L1_spectral:1.6196e-03 L2_spectral:1.6076e-03 L3_spectral:1.6012e-03 L4_spectral:1.6102e-03 L5_spectral:1.6101e-03 L6_spectral:1.6072e-03 L7_spectral:1.6164e-03 L8_spectral:1.6060e-03 L9_spectral:1.6066e-03 L10_spectral:1.6153e-03 L11_spectral:1.6150e-03 L12_spectral:1.6110e-03 train_time:214248ms step_avg:44.63ms +[2025-09-11 10:27:21] [Rank 0] step:4801/10000 train_time:215858ms step_avg:44.96ms +[2025-09-11 10:27:21] [Rank 0] step:4801/10000 train_time:215858ms step_avg:44.96ms +[2025-09-11 10:27:22] [Rank 0] step:4821/10000 train_time:216563ms step_avg:44.92ms +[2025-09-11 10:27:22] [Rank 0] step:4821/10000 train_time:216563ms step_avg:44.92ms +[2025-09-11 10:27:22] [Rank 0] step:4841/10000 train_time:217242ms step_avg:44.88ms +[2025-09-11 10:27:22] [Rank 0] step:4841/10000 train_time:217242ms step_avg:44.88ms +[2025-09-11 10:27:23] [Rank 0] step:4861/10000 train_time:217921ms step_avg:44.83ms +[2025-09-11 10:27:23] [Rank 0] step:4861/10000 train_time:217921ms step_avg:44.83ms +[2025-09-11 10:27:24] [Rank 0] step:4881/10000 train_time:218600ms step_avg:44.79ms +[2025-09-11 10:27:24] [Rank 0] step:4881/10000 train_time:218600ms step_avg:44.79ms +[2025-09-11 10:27:24] [Rank 0] step:4901/10000 train_time:219279ms step_avg:44.74ms +[2025-09-11 10:27:24] [Rank 0] step:4901/10000 train_time:219279ms step_avg:44.74ms +[2025-09-11 10:27:25] [Rank 0] step:4921/10000 train_time:219958ms step_avg:44.70ms +[2025-09-11 10:27:25] [Rank 0] step:4921/10000 train_time:219958ms step_avg:44.70ms +[2025-09-11 10:27:26] [Rank 0] step:4941/10000 train_time:220637ms step_avg:44.65ms +[2025-09-11 10:27:26] [Rank 0] step:4941/10000 train_time:220637ms step_avg:44.65ms +[2025-09-11 10:27:26] [Rank 0] step:4961/10000 train_time:221315ms step_avg:44.61ms +[2025-09-11 10:27:26] [Rank 0] step:4961/10000 train_time:221315ms step_avg:44.61ms +[2025-09-11 10:27:27] [Rank 0] step:4981/10000 train_time:221995ms step_avg:44.57ms +[2025-09-11 10:27:27] [Rank 0] step:4981/10000 train_time:221995ms step_avg:44.57ms +[2025-09-11 10:27:28] [Rank 0] step:5001/10000 train_time:222674ms step_avg:44.53ms +[2025-09-11 10:27:28] [Rank 0] step:5001/10000 train_time:222674ms step_avg:44.53ms +[2025-09-11 10:27:28] [Rank 0] step:5021/10000 train_time:223352ms step_avg:44.48ms +[2025-09-11 10:27:28] [Rank 0] step:5021/10000 train_time:223352ms step_avg:44.48ms +[2025-09-11 10:27:29] [Rank 0] step:5041/10000 train_time:224030ms step_avg:44.44ms +[2025-09-11 10:27:29] [Rank 0] step:5041/10000 train_time:224030ms step_avg:44.44ms +[2025-09-11 10:27:30] [Rank 0] step:5061/10000 train_time:224708ms step_avg:44.40ms +[2025-09-11 10:27:30] [Rank 0] step:5061/10000 train_time:224708ms step_avg:44.40ms +[2025-09-11 10:27:30] [Rank 0] step:5081/10000 train_time:225385ms step_avg:44.36ms +[2025-09-11 10:27:30] [Rank 0] step:5081/10000 train_time:225385ms step_avg:44.36ms +[2025-09-11 10:27:31] [Rank 0] step:5101/10000 train_time:226063ms step_avg:44.32ms +[2025-09-11 10:27:31] [Rank 0] step:5101/10000 train_time:226063ms step_avg:44.32ms +[2025-09-11 10:27:32] [Rank 0] step:5121/10000 train_time:226740ms step_avg:44.28ms +[2025-09-11 10:27:32] [Rank 0] step:5121/10000 train_time:226740ms step_avg:44.28ms +[2025-09-11 10:27:33] [Rank 0] step:5141/10000 train_time:227419ms step_avg:44.24ms +[2025-09-11 10:27:33] [Rank 0] step:5141/10000 train_time:227419ms step_avg:44.24ms +[2025-09-11 10:27:33] [Rank 0] step:5161/10000 train_time:228097ms step_avg:44.20ms +[2025-09-11 10:27:33] [Rank 0] step:5161/10000 train_time:228097ms step_avg:44.20ms +[2025-09-11 10:27:34] [Rank 0] step:5181/10000 train_time:228775ms step_avg:44.16ms +[2025-09-11 10:27:34] [Rank 0] step:5181/10000 train_time:228775ms step_avg:44.16ms +[2025-09-11 10:27:35] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:27:35] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:27:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:27:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:27:45] [Rank 0] PRINT: step:5200/10000 val_loss:4.5033 total_sharp:1.5095e-04 L1_sharp:2.7559e-02 L2_sharp:3.4047e-02 L3_sharp:4.8682e-02 L4_sharp:7.5297e-02 L5_sharp:9.4523e-02 L6_sharp:9.8539e-02 L7_sharp:1.3627e-01 L8_sharp:1.4465e-01 L9_sharp:1.7024e-01 L10_sharp:2.8892e-01 L11_sharp:4.8815e-01 L12_sharp:2.9978e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3056e+05 total_spectral:3.5500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9419e-02 L9_l1linf:2.8809e-02 L10_l1linf:2.8564e-02 L11_l1linf:2.9663e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6050e-03 L2_spectral:1.6163e-03 L3_spectral:1.6120e-03 L4_spectral:1.6027e-03 L5_spectral:1.6004e-03 L6_spectral:1.6190e-03 L7_spectral:1.6149e-03 L8_spectral:1.6198e-03 L9_spectral:1.6196e-03 L10_spectral:1.6260e-03 L11_spectral:1.6153e-03 L12_spectral:1.6119e-03 train_time:229440ms step_avg:44.12ms +[2025-09-11 10:27:45] [Rank 0] PRINT: step:5200/10000 val_loss:4.5033 total_sharp:1.5095e-04 L1_sharp:2.7559e-02 L2_sharp:3.4047e-02 L3_sharp:4.8682e-02 L4_sharp:7.5297e-02 L5_sharp:9.4523e-02 L6_sharp:9.8539e-02 L7_sharp:1.3627e-01 L8_sharp:1.4465e-01 L9_sharp:1.7024e-01 L10_sharp:2.8892e-01 L11_sharp:4.8815e-01 L12_sharp:2.9978e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3056e+05 total_spectral:3.5500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1523e-01 L1_l1linf:2.9053e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.9175e-02 L7_l1linf:2.9175e-02 L8_l1linf:2.9419e-02 L9_l1linf:2.8809e-02 L10_l1linf:2.8564e-02 L11_l1linf:2.9663e-02 L12_l1linf:3.1494e-02 L1_spectral:1.6050e-03 L2_spectral:1.6163e-03 L3_spectral:1.6120e-03 L4_spectral:1.6027e-03 L5_spectral:1.6004e-03 L6_spectral:1.6190e-03 L7_spectral:1.6149e-03 L8_spectral:1.6198e-03 L9_spectral:1.6196e-03 L10_spectral:1.6260e-03 L11_spectral:1.6153e-03 L12_spectral:1.6119e-03 train_time:229440ms step_avg:44.12ms +[2025-09-11 10:27:47] [Rank 0] step:5201/10000 train_time:231063ms step_avg:44.43ms +[2025-09-11 10:27:47] [Rank 0] step:5201/10000 train_time:231063ms step_avg:44.43ms +[2025-09-11 10:27:48] [Rank 0] step:5221/10000 train_time:231772ms step_avg:44.39ms +[2025-09-11 10:27:48] [Rank 0] step:5221/10000 train_time:231772ms step_avg:44.39ms +[2025-09-11 10:27:48] [Rank 0] step:5241/10000 train_time:232459ms step_avg:44.35ms +[2025-09-11 10:27:48] [Rank 0] step:5241/10000 train_time:232459ms step_avg:44.35ms +[2025-09-11 10:27:49] [Rank 0] step:5261/10000 train_time:233405ms step_avg:44.37ms +[2025-09-11 10:27:49] [Rank 0] step:5261/10000 train_time:233405ms step_avg:44.37ms +[2025-09-11 10:27:50] [Rank 0] step:5281/10000 train_time:234093ms step_avg:44.33ms +[2025-09-11 10:27:50] [Rank 0] step:5281/10000 train_time:234093ms step_avg:44.33ms +[2025-09-11 10:27:51] [Rank 0] step:5301/10000 train_time:234781ms step_avg:44.29ms +[2025-09-11 10:27:51] [Rank 0] step:5301/10000 train_time:234781ms step_avg:44.29ms +[2025-09-11 10:27:51] [Rank 0] step:5321/10000 train_time:235468ms step_avg:44.25ms +[2025-09-11 10:27:51] [Rank 0] step:5321/10000 train_time:235468ms step_avg:44.25ms +[2025-09-11 10:27:52] [Rank 0] step:5341/10000 train_time:236155ms step_avg:44.22ms +[2025-09-11 10:27:52] [Rank 0] step:5341/10000 train_time:236155ms step_avg:44.22ms +[2025-09-11 10:27:53] [Rank 0] step:5361/10000 train_time:236844ms step_avg:44.18ms +[2025-09-11 10:27:53] [Rank 0] step:5361/10000 train_time:236844ms step_avg:44.18ms +[2025-09-11 10:27:53] [Rank 0] step:5381/10000 train_time:237534ms step_avg:44.14ms +[2025-09-11 10:27:53] [Rank 0] step:5381/10000 train_time:237534ms step_avg:44.14ms +[2025-09-11 10:27:54] [Rank 0] step:5401/10000 train_time:238220ms step_avg:44.11ms +[2025-09-11 10:27:54] [Rank 0] step:5401/10000 train_time:238220ms step_avg:44.11ms +[2025-09-11 10:27:55] [Rank 0] step:5421/10000 train_time:238909ms step_avg:44.07ms +[2025-09-11 10:27:55] [Rank 0] step:5421/10000 train_time:238909ms step_avg:44.07ms +[2025-09-11 10:27:55] [Rank 0] step:5441/10000 train_time:239598ms step_avg:44.04ms +[2025-09-11 10:27:55] [Rank 0] step:5441/10000 train_time:239598ms step_avg:44.04ms +[2025-09-11 10:27:56] [Rank 0] step:5461/10000 train_time:240287ms step_avg:44.00ms +[2025-09-11 10:27:56] [Rank 0] step:5461/10000 train_time:240287ms step_avg:44.00ms +[2025-09-11 10:27:57] [Rank 0] step:5481/10000 train_time:240975ms step_avg:43.97ms +[2025-09-11 10:27:57] [Rank 0] step:5481/10000 train_time:240975ms step_avg:43.97ms +[2025-09-11 10:27:58] [Rank 0] step:5501/10000 train_time:241663ms step_avg:43.93ms +[2025-09-11 10:27:58] [Rank 0] step:5501/10000 train_time:241663ms step_avg:43.93ms +[2025-09-11 10:27:58] [Rank 0] step:5521/10000 train_time:242351ms step_avg:43.90ms +[2025-09-11 10:27:58] [Rank 0] step:5521/10000 train_time:242351ms step_avg:43.90ms +[2025-09-11 10:27:59] [Rank 0] step:5541/10000 train_time:243042ms step_avg:43.86ms +[2025-09-11 10:27:59] [Rank 0] step:5541/10000 train_time:243042ms step_avg:43.86ms +[2025-09-11 10:28:00] [Rank 0] step:5561/10000 train_time:243733ms step_avg:43.83ms +[2025-09-11 10:28:00] [Rank 0] step:5561/10000 train_time:243733ms step_avg:43.83ms +[2025-09-11 10:28:00] [Rank 0] step:5581/10000 train_time:244422ms step_avg:43.80ms +[2025-09-11 10:28:00] [Rank 0] step:5581/10000 train_time:244422ms step_avg:43.80ms +[2025-09-11 10:28:01] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:28:01] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:14] [Rank 0] PRINT: step:5600/10000 val_loss:4.4672 total_sharp:1.3814e-04 L1_sharp:2.9348e-02 L2_sharp:3.9270e-02 L3_sharp:4.6690e-02 L4_sharp:6.0029e-02 L5_sharp:8.2377e-02 L6_sharp:1.1411e-01 L7_sharp:1.3842e-01 L8_sharp:1.3220e-01 L9_sharp:1.5147e-01 L10_sharp:2.6112e-01 L11_sharp:5.6852e-01 L12_sharp:1.9822e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.9419e-02 L2_l1linf:2.9907e-02 L3_l1linf:2.9785e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8931e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.8564e-02 L10_l1linf:2.8320e-02 L11_l1linf:2.8809e-02 L12_l1linf:2.9907e-02 L1_spectral:1.5938e-03 L2_spectral:1.6144e-03 L3_spectral:1.5982e-03 L4_spectral:1.6094e-03 L5_spectral:1.6022e-03 L6_spectral:1.6149e-03 L7_spectral:1.6268e-03 L8_spectral:1.6148e-03 L9_spectral:1.6238e-03 L10_spectral:1.6073e-03 L11_spectral:1.6136e-03 L12_spectral:1.6099e-03 train_time:245092ms step_avg:43.77ms +[2025-09-11 10:28:14] [Rank 0] PRINT: step:5600/10000 val_loss:4.4672 total_sharp:1.3814e-04 L1_sharp:2.9348e-02 L2_sharp:3.9270e-02 L3_sharp:4.6690e-02 L4_sharp:6.0029e-02 L5_sharp:8.2377e-02 L6_sharp:1.1411e-01 L7_sharp:1.3842e-01 L8_sharp:1.3220e-01 L9_sharp:1.5147e-01 L10_sharp:2.6112e-01 L11_sharp:5.6852e-01 L12_sharp:1.9822e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5000e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1475e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.9419e-02 L2_l1linf:2.9907e-02 L3_l1linf:2.9785e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.8931e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9419e-02 L8_l1linf:2.9297e-02 L9_l1linf:2.8564e-02 L10_l1linf:2.8320e-02 L11_l1linf:2.8809e-02 L12_l1linf:2.9907e-02 L1_spectral:1.5938e-03 L2_spectral:1.6144e-03 L3_spectral:1.5982e-03 L4_spectral:1.6094e-03 L5_spectral:1.6022e-03 L6_spectral:1.6149e-03 L7_spectral:1.6268e-03 L8_spectral:1.6148e-03 L9_spectral:1.6238e-03 L10_spectral:1.6073e-03 L11_spectral:1.6136e-03 L12_spectral:1.6099e-03 train_time:245092ms step_avg:43.77ms +[2025-09-11 10:28:16] [Rank 0] step:5601/10000 train_time:246718ms step_avg:44.05ms +[2025-09-11 10:28:16] [Rank 0] step:5601/10000 train_time:246718ms step_avg:44.05ms +[2025-09-11 10:28:16] [Rank 0] step:5621/10000 train_time:247438ms step_avg:44.02ms +[2025-09-11 10:28:16] [Rank 0] step:5621/10000 train_time:247438ms step_avg:44.02ms +[2025-09-11 10:28:17] [Rank 0] step:5641/10000 train_time:248125ms step_avg:43.99ms +[2025-09-11 10:28:17] [Rank 0] step:5641/10000 train_time:248125ms step_avg:43.99ms +[2025-09-11 10:28:18] [Rank 0] step:5661/10000 train_time:248813ms step_avg:43.95ms +[2025-09-11 10:28:18] [Rank 0] step:5661/10000 train_time:248813ms step_avg:43.95ms +[2025-09-11 10:28:19] [Rank 0] step:5681/10000 train_time:249502ms step_avg:43.92ms +[2025-09-11 10:28:19] [Rank 0] step:5681/10000 train_time:249502ms step_avg:43.92ms +[2025-09-11 10:28:19] [Rank 0] step:5701/10000 train_time:250192ms step_avg:43.89ms +[2025-09-11 10:28:19] [Rank 0] step:5701/10000 train_time:250192ms step_avg:43.89ms +[2025-09-11 10:28:20] [Rank 0] step:5721/10000 train_time:250878ms step_avg:43.85ms +[2025-09-11 10:28:20] [Rank 0] step:5721/10000 train_time:250878ms step_avg:43.85ms +[2025-09-11 10:28:21] [Rank 0] step:5741/10000 train_time:251567ms step_avg:43.82ms +[2025-09-11 10:28:21] [Rank 0] step:5741/10000 train_time:251567ms step_avg:43.82ms +[2025-09-11 10:28:21] [Rank 0] step:5761/10000 train_time:252255ms step_avg:43.79ms +[2025-09-11 10:28:21] [Rank 0] step:5761/10000 train_time:252255ms step_avg:43.79ms +[2025-09-11 10:28:22] [Rank 0] step:5781/10000 train_time:252944ms step_avg:43.75ms +[2025-09-11 10:28:22] [Rank 0] step:5781/10000 train_time:252944ms step_avg:43.75ms +[2025-09-11 10:28:23] [Rank 0] step:5801/10000 train_time:253634ms step_avg:43.72ms +[2025-09-11 10:28:23] [Rank 0] step:5801/10000 train_time:253634ms step_avg:43.72ms +[2025-09-11 10:28:23] [Rank 0] step:5821/10000 train_time:254322ms step_avg:43.69ms +[2025-09-11 10:28:23] [Rank 0] step:5821/10000 train_time:254322ms step_avg:43.69ms +[2025-09-11 10:28:24] [Rank 0] step:5841/10000 train_time:255012ms step_avg:43.66ms +[2025-09-11 10:28:24] [Rank 0] step:5841/10000 train_time:255012ms step_avg:43.66ms +[2025-09-11 10:28:25] [Rank 0] step:5861/10000 train_time:255699ms step_avg:43.63ms +[2025-09-11 10:28:25] [Rank 0] step:5861/10000 train_time:255699ms step_avg:43.63ms +[2025-09-11 10:28:25] [Rank 0] step:5881/10000 train_time:256386ms step_avg:43.60ms +[2025-09-11 10:28:25] [Rank 0] step:5881/10000 train_time:256386ms step_avg:43.60ms +[2025-09-11 10:28:26] [Rank 0] step:5901/10000 train_time:257075ms step_avg:43.56ms +[2025-09-11 10:28:26] [Rank 0] step:5901/10000 train_time:257075ms step_avg:43.56ms +[2025-09-11 10:28:27] [Rank 0] step:5921/10000 train_time:257765ms step_avg:43.53ms +[2025-09-11 10:28:27] [Rank 0] step:5921/10000 train_time:257765ms step_avg:43.53ms +[2025-09-11 10:28:27] [Rank 0] step:5941/10000 train_time:258455ms step_avg:43.50ms +[2025-09-11 10:28:27] [Rank 0] step:5941/10000 train_time:258455ms step_avg:43.50ms +[2025-09-11 10:28:28] [Rank 0] step:5961/10000 train_time:259145ms step_avg:43.47ms +[2025-09-11 10:28:28] [Rank 0] step:5961/10000 train_time:259145ms step_avg:43.47ms +[2025-09-11 10:28:29] [Rank 0] step:5981/10000 train_time:259834ms step_avg:43.44ms +[2025-09-11 10:28:29] [Rank 0] step:5981/10000 train_time:259834ms step_avg:43.44ms +[2025-09-11 10:28:29] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:28:29] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:28:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:28:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:28:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.4260 total_sharp:9.7713e-05 L1_sharp:2.6690e-02 L2_sharp:3.5747e-02 L3_sharp:4.2134e-02 L4_sharp:6.4772e-02 L5_sharp:8.1310e-02 L6_sharp:9.4829e-02 L7_sharp:1.1062e-01 L8_sharp:1.1449e-01 L9_sharp:1.1774e-01 L10_sharp:1.6583e-01 L11_sharp:2.7624e-01 L12_sharp:5.3512e-01 total_fnorm:7.3000e+01 total_l1_linf:1.3517e+05 total_spectral:3.6500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1377e-01 L1_l1linf:2.8442e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.8809e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.8442e-02 L6_l1linf:2.8076e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.8198e-02 L10_l1linf:2.7466e-02 L11_l1linf:2.7100e-02 L12_l1linf:2.8320e-02 L1_spectral:1.6007e-03 L2_spectral:1.6114e-03 L3_spectral:1.6057e-03 L4_spectral:1.6073e-03 L5_spectral:1.5999e-03 L6_spectral:1.6127e-03 L7_spectral:1.6135e-03 L8_spectral:1.6053e-03 L9_spectral:1.6149e-03 L10_spectral:1.6231e-03 L11_spectral:1.6096e-03 L12_spectral:1.6134e-03 train_time:260506ms step_avg:43.42ms +[2025-09-11 10:28:40] [Rank 0] PRINT: step:6000/10000 val_loss:4.4260 total_sharp:9.7713e-05 L1_sharp:2.6690e-02 L2_sharp:3.5747e-02 L3_sharp:4.2134e-02 L4_sharp:6.4772e-02 L5_sharp:8.1310e-02 L6_sharp:9.4829e-02 L7_sharp:1.1062e-01 L8_sharp:1.1449e-01 L9_sharp:1.1774e-01 L10_sharp:1.6583e-01 L11_sharp:2.7624e-01 L12_sharp:5.3512e-01 total_fnorm:7.3000e+01 total_l1_linf:1.3517e+05 total_spectral:3.6500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1426e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1377e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1328e-01 L12_fnorm:1.1377e-01 L1_l1linf:2.8442e-02 L2_l1linf:2.8809e-02 L3_l1linf:2.8809e-02 L4_l1linf:2.8687e-02 L5_l1linf:2.8442e-02 L6_l1linf:2.8076e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8687e-02 L9_l1linf:2.8198e-02 L10_l1linf:2.7466e-02 L11_l1linf:2.7100e-02 L12_l1linf:2.8320e-02 L1_spectral:1.6007e-03 L2_spectral:1.6114e-03 L3_spectral:1.6057e-03 L4_spectral:1.6073e-03 L5_spectral:1.5999e-03 L6_spectral:1.6127e-03 L7_spectral:1.6135e-03 L8_spectral:1.6053e-03 L9_spectral:1.6149e-03 L10_spectral:1.6231e-03 L11_spectral:1.6096e-03 L12_spectral:1.6134e-03 train_time:260506ms step_avg:43.42ms +[2025-09-11 10:28:42] [Rank 0] step:6001/10000 train_time:262112ms step_avg:43.68ms +[2025-09-11 10:28:42] [Rank 0] step:6001/10000 train_time:262112ms step_avg:43.68ms +[2025-09-11 10:28:43] [Rank 0] step:6021/10000 train_time:262816ms step_avg:43.65ms +[2025-09-11 10:28:43] [Rank 0] step:6021/10000 train_time:262816ms step_avg:43.65ms +[2025-09-11 10:28:43] [Rank 0] step:6041/10000 train_time:263508ms step_avg:43.62ms +[2025-09-11 10:28:43] [Rank 0] step:6041/10000 train_time:263508ms step_avg:43.62ms +[2025-09-11 10:28:44] [Rank 0] step:6061/10000 train_time:264197ms step_avg:43.59ms +[2025-09-11 10:28:44] [Rank 0] step:6061/10000 train_time:264197ms step_avg:43.59ms +[2025-09-11 10:28:45] [Rank 0] step:6081/10000 train_time:264890ms step_avg:43.56ms +[2025-09-11 10:28:45] [Rank 0] step:6081/10000 train_time:264890ms step_avg:43.56ms +[2025-09-11 10:28:45] [Rank 0] step:6101/10000 train_time:265580ms step_avg:43.53ms +[2025-09-11 10:28:45] [Rank 0] step:6101/10000 train_time:265580ms step_avg:43.53ms +[2025-09-11 10:28:46] [Rank 0] step:6121/10000 train_time:266271ms step_avg:43.50ms +[2025-09-11 10:28:46] [Rank 0] step:6121/10000 train_time:266271ms step_avg:43.50ms +[2025-09-11 10:28:47] [Rank 0] step:6141/10000 train_time:266962ms step_avg:43.47ms +[2025-09-11 10:28:47] [Rank 0] step:6141/10000 train_time:266962ms step_avg:43.47ms +[2025-09-11 10:28:47] [Rank 0] step:6161/10000 train_time:267651ms step_avg:43.44ms +[2025-09-11 10:28:47] [Rank 0] step:6161/10000 train_time:267651ms step_avg:43.44ms +[2025-09-11 10:28:48] [Rank 0] step:6181/10000 train_time:268340ms step_avg:43.41ms +[2025-09-11 10:28:48] [Rank 0] step:6181/10000 train_time:268340ms step_avg:43.41ms +[2025-09-11 10:28:49] [Rank 0] step:6201/10000 train_time:269031ms step_avg:43.39ms +[2025-09-11 10:28:49] [Rank 0] step:6201/10000 train_time:269031ms step_avg:43.39ms +[2025-09-11 10:28:50] [Rank 0] step:6221/10000 train_time:269722ms step_avg:43.36ms +[2025-09-11 10:28:50] [Rank 0] step:6221/10000 train_time:269722ms step_avg:43.36ms +[2025-09-11 10:28:50] [Rank 0] step:6241/10000 train_time:270668ms step_avg:43.37ms +[2025-09-11 10:28:50] [Rank 0] step:6241/10000 train_time:270668ms step_avg:43.37ms +[2025-09-11 10:28:51] [Rank 0] step:6261/10000 train_time:271357ms step_avg:43.34ms +[2025-09-11 10:28:51] [Rank 0] step:6261/10000 train_time:271357ms step_avg:43.34ms +[2025-09-11 10:28:52] [Rank 0] step:6281/10000 train_time:272047ms step_avg:43.31ms +[2025-09-11 10:28:52] [Rank 0] step:6281/10000 train_time:272047ms step_avg:43.31ms +[2025-09-11 10:28:53] [Rank 0] step:6301/10000 train_time:273003ms step_avg:43.33ms +[2025-09-11 10:28:53] [Rank 0] step:6301/10000 train_time:273003ms step_avg:43.33ms +[2025-09-11 10:28:53] [Rank 0] step:6321/10000 train_time:273696ms step_avg:43.30ms +[2025-09-11 10:28:53] [Rank 0] step:6321/10000 train_time:273696ms step_avg:43.30ms +[2025-09-11 10:28:54] [Rank 0] step:6341/10000 train_time:274388ms step_avg:43.27ms +[2025-09-11 10:28:54] [Rank 0] step:6341/10000 train_time:274388ms step_avg:43.27ms +[2025-09-11 10:28:55] [Rank 0] step:6361/10000 train_time:275079ms step_avg:43.24ms +[2025-09-11 10:28:55] [Rank 0] step:6361/10000 train_time:275079ms step_avg:43.24ms +[2025-09-11 10:28:56] [Rank 0] step:6381/10000 train_time:275770ms step_avg:43.22ms +[2025-09-11 10:28:56] [Rank 0] step:6381/10000 train_time:275770ms step_avg:43.22ms +[2025-09-11 10:28:56] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:28:56] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:28:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:28:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:07] [Rank 0] PRINT: step:6400/10000 val_loss:4.3975 total_sharp:1.1657e-04 L1_sharp:1.7515e-02 L2_sharp:3.2459e-02 L3_sharp:4.2842e-02 L4_sharp:6.4931e-02 L5_sharp:8.4289e-02 L6_sharp:9.6368e-02 L7_sharp:1.1452e-01 L8_sharp:1.3008e-01 L9_sharp:1.4030e-01 L10_sharp:2.2129e-01 L11_sharp:3.4793e-01 L12_sharp:1.3978e+00 total_fnorm:6.3000e+01 total_l1_linf:1.1315e+05 total_spectral:3.1500e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0059e-01 L4_fnorm:1.0010e-01 L5_fnorm:1.0010e-01 L6_fnorm:1.0010e-01 L7_fnorm:9.9609e-02 L8_fnorm:9.7656e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3804e-02 L2_l1linf:2.4170e-02 L3_l1linf:2.4414e-02 L4_l1linf:2.3804e-02 L5_l1linf:2.4292e-02 L6_l1linf:2.4292e-02 L7_l1linf:2.3926e-02 L8_l1linf:2.4780e-02 L9_l1linf:2.3682e-02 L10_l1linf:2.3926e-02 L11_l1linf:2.3071e-02 L12_l1linf:2.4414e-02 L1_spectral:1.4469e-03 L2_spectral:1.4483e-03 L3_spectral:1.4515e-03 L4_spectral:1.4492e-03 L5_spectral:1.4433e-03 L6_spectral:1.4548e-03 L7_spectral:1.4533e-03 L8_spectral:1.4377e-03 L9_spectral:1.4597e-03 L10_spectral:1.4563e-03 L11_spectral:1.4380e-03 L12_spectral:1.4325e-03 train_time:276440ms step_avg:43.19ms +[2025-09-11 10:29:07] [Rank 0] PRINT: step:6400/10000 val_loss:4.3975 total_sharp:1.1657e-04 L1_sharp:1.7515e-02 L2_sharp:3.2459e-02 L3_sharp:4.2842e-02 L4_sharp:6.4931e-02 L5_sharp:8.4289e-02 L6_sharp:9.6368e-02 L7_sharp:1.1452e-01 L8_sharp:1.3008e-01 L9_sharp:1.4030e-01 L10_sharp:2.2129e-01 L11_sharp:3.4793e-01 L12_sharp:1.3978e+00 total_fnorm:6.3000e+01 total_l1_linf:1.1315e+05 total_spectral:3.1500e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0059e-01 L4_fnorm:1.0010e-01 L5_fnorm:1.0010e-01 L6_fnorm:1.0010e-01 L7_fnorm:9.9609e-02 L8_fnorm:9.7656e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3804e-02 L2_l1linf:2.4170e-02 L3_l1linf:2.4414e-02 L4_l1linf:2.3804e-02 L5_l1linf:2.4292e-02 L6_l1linf:2.4292e-02 L7_l1linf:2.3926e-02 L8_l1linf:2.4780e-02 L9_l1linf:2.3682e-02 L10_l1linf:2.3926e-02 L11_l1linf:2.3071e-02 L12_l1linf:2.4414e-02 L1_spectral:1.4469e-03 L2_spectral:1.4483e-03 L3_spectral:1.4515e-03 L4_spectral:1.4492e-03 L5_spectral:1.4433e-03 L6_spectral:1.4548e-03 L7_spectral:1.4533e-03 L8_spectral:1.4377e-03 L9_spectral:1.4597e-03 L10_spectral:1.4563e-03 L11_spectral:1.4380e-03 L12_spectral:1.4325e-03 train_time:276440ms step_avg:43.19ms +[2025-09-11 10:29:09] [Rank 0] step:6401/10000 train_time:278055ms step_avg:43.44ms +[2025-09-11 10:29:09] [Rank 0] step:6401/10000 train_time:278055ms step_avg:43.44ms +[2025-09-11 10:29:09] [Rank 0] step:6421/10000 train_time:278778ms step_avg:43.42ms +[2025-09-11 10:29:09] [Rank 0] step:6421/10000 train_time:278778ms step_avg:43.42ms +[2025-09-11 10:29:10] [Rank 0] step:6441/10000 train_time:279468ms step_avg:43.39ms +[2025-09-11 10:29:10] [Rank 0] step:6441/10000 train_time:279468ms step_avg:43.39ms +[2025-09-11 10:29:11] [Rank 0] step:6461/10000 train_time:280159ms step_avg:43.36ms +[2025-09-11 10:29:11] [Rank 0] step:6461/10000 train_time:280159ms step_avg:43.36ms +[2025-09-11 10:29:11] [Rank 0] step:6481/10000 train_time:280851ms step_avg:43.33ms +[2025-09-11 10:29:11] [Rank 0] step:6481/10000 train_time:280851ms step_avg:43.33ms +[2025-09-11 10:29:12] [Rank 0] step:6501/10000 train_time:281543ms step_avg:43.31ms +[2025-09-11 10:29:12] [Rank 0] step:6501/10000 train_time:281543ms step_avg:43.31ms +[2025-09-11 10:29:13] [Rank 0] step:6521/10000 train_time:282234ms step_avg:43.28ms +[2025-09-11 10:29:13] [Rank 0] step:6521/10000 train_time:282234ms step_avg:43.28ms +[2025-09-11 10:29:14] [Rank 0] step:6541/10000 train_time:282923ms step_avg:43.25ms +[2025-09-11 10:29:14] [Rank 0] step:6541/10000 train_time:282923ms step_avg:43.25ms +[2025-09-11 10:29:14] [Rank 0] step:6561/10000 train_time:283614ms step_avg:43.23ms +[2025-09-11 10:29:14] [Rank 0] step:6561/10000 train_time:283614ms step_avg:43.23ms +[2025-09-11 10:29:15] [Rank 0] step:6581/10000 train_time:284306ms step_avg:43.20ms +[2025-09-11 10:29:15] [Rank 0] step:6581/10000 train_time:284306ms step_avg:43.20ms +[2025-09-11 10:29:16] [Rank 0] step:6601/10000 train_time:284997ms step_avg:43.17ms +[2025-09-11 10:29:16] [Rank 0] step:6601/10000 train_time:284997ms step_avg:43.17ms +[2025-09-11 10:29:16] [Rank 0] step:6621/10000 train_time:285687ms step_avg:43.15ms +[2025-09-11 10:29:16] [Rank 0] step:6621/10000 train_time:285687ms step_avg:43.15ms +[2025-09-11 10:29:17] [Rank 0] step:6641/10000 train_time:286379ms step_avg:43.12ms +[2025-09-11 10:29:17] [Rank 0] step:6641/10000 train_time:286379ms step_avg:43.12ms +[2025-09-11 10:29:18] [Rank 0] step:6661/10000 train_time:287070ms step_avg:43.10ms +[2025-09-11 10:29:18] [Rank 0] step:6661/10000 train_time:287070ms step_avg:43.10ms +[2025-09-11 10:29:18] [Rank 0] step:6681/10000 train_time:287768ms step_avg:43.07ms +[2025-09-11 10:29:18] [Rank 0] step:6681/10000 train_time:287768ms step_avg:43.07ms +[2025-09-11 10:29:19] [Rank 0] step:6701/10000 train_time:288465ms step_avg:43.05ms +[2025-09-11 10:29:19] [Rank 0] step:6701/10000 train_time:288465ms step_avg:43.05ms +[2025-09-11 10:29:20] [Rank 0] step:6721/10000 train_time:289162ms step_avg:43.02ms +[2025-09-11 10:29:20] [Rank 0] step:6721/10000 train_time:289162ms step_avg:43.02ms +[2025-09-11 10:29:20] [Rank 0] step:6741/10000 train_time:289860ms step_avg:43.00ms +[2025-09-11 10:29:20] [Rank 0] step:6741/10000 train_time:289860ms step_avg:43.00ms +[2025-09-11 10:29:21] [Rank 0] step:6761/10000 train_time:290556ms step_avg:42.98ms +[2025-09-11 10:29:21] [Rank 0] step:6761/10000 train_time:290556ms step_avg:42.98ms +[2025-09-11 10:29:22] [Rank 0] step:6781/10000 train_time:291254ms step_avg:42.95ms +[2025-09-11 10:29:22] [Rank 0] step:6781/10000 train_time:291254ms step_avg:42.95ms +[2025-09-11 10:29:23] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:29:23] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.3625 total_sharp:8.1278e-05 L1_sharp:2.3876e-02 L2_sharp:3.4661e-02 L3_sharp:3.9351e-02 L4_sharp:6.0570e-02 L5_sharp:8.0323e-02 L6_sharp:1.0909e-01 L7_sharp:1.4151e-01 L8_sharp:1.2553e-01 L9_sharp:1.2560e-01 L10_sharp:1.6450e-01 L11_sharp:2.3837e-01 L12_sharp:5.7728e-01 total_fnorm:6.1750e+01 total_l1_linf:1.1110e+05 total_spectral:3.0875e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.3008e-02 L9_fnorm:8.4961e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.9653e-02 L2_l1linf:2.0020e-02 L3_l1linf:1.9897e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0020e-02 L6_l1linf:1.9653e-02 L7_l1linf:1.9653e-02 L8_l1linf:2.0020e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.9043e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9287e-02 L1_spectral:1.2914e-03 L2_spectral:1.2931e-03 L3_spectral:1.2949e-03 L4_spectral:1.2899e-03 L5_spectral:1.2903e-03 L6_spectral:1.2907e-03 L7_spectral:1.2922e-03 L8_spectral:1.2489e-03 L9_spectral:1.2800e-03 L10_spectral:1.2801e-03 L11_spectral:1.2836e-03 L12_spectral:1.2449e-03 train_time:291931ms step_avg:42.93ms +[2025-09-11 10:29:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.3625 total_sharp:8.1278e-05 L1_sharp:2.3876e-02 L2_sharp:3.4661e-02 L3_sharp:3.9351e-02 L4_sharp:6.0570e-02 L5_sharp:8.0323e-02 L6_sharp:1.0909e-01 L7_sharp:1.4151e-01 L8_sharp:1.2553e-01 L9_sharp:1.2560e-01 L10_sharp:1.6450e-01 L11_sharp:2.3837e-01 L12_sharp:5.7728e-01 total_fnorm:6.1750e+01 total_l1_linf:1.1110e+05 total_spectral:3.0875e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5449e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.5449e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.3008e-02 L9_fnorm:8.4961e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.9653e-02 L2_l1linf:2.0020e-02 L3_l1linf:1.9897e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0020e-02 L6_l1linf:1.9653e-02 L7_l1linf:1.9653e-02 L8_l1linf:2.0020e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.9043e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9287e-02 L1_spectral:1.2914e-03 L2_spectral:1.2931e-03 L3_spectral:1.2949e-03 L4_spectral:1.2899e-03 L5_spectral:1.2903e-03 L6_spectral:1.2907e-03 L7_spectral:1.2922e-03 L8_spectral:1.2489e-03 L9_spectral:1.2800e-03 L10_spectral:1.2801e-03 L11_spectral:1.2836e-03 L12_spectral:1.2449e-03 train_time:291931ms step_avg:42.93ms +[2025-09-11 10:29:35] [Rank 0] step:6801/10000 train_time:293556ms step_avg:43.16ms +[2025-09-11 10:29:35] [Rank 0] step:6801/10000 train_time:293556ms step_avg:43.16ms +[2025-09-11 10:29:36] [Rank 0] step:6821/10000 train_time:294285ms step_avg:43.14ms +[2025-09-11 10:29:36] [Rank 0] step:6821/10000 train_time:294285ms step_avg:43.14ms +[2025-09-11 10:29:36] [Rank 0] step:6841/10000 train_time:294987ms step_avg:43.12ms +[2025-09-11 10:29:36] [Rank 0] step:6841/10000 train_time:294987ms step_avg:43.12ms +[2025-09-11 10:29:37] [Rank 0] step:6861/10000 train_time:295687ms step_avg:43.10ms +[2025-09-11 10:29:37] [Rank 0] step:6861/10000 train_time:295687ms step_avg:43.10ms +[2025-09-11 10:29:38] [Rank 0] step:6881/10000 train_time:296388ms step_avg:43.07ms +[2025-09-11 10:29:38] [Rank 0] step:6881/10000 train_time:296388ms step_avg:43.07ms +[2025-09-11 10:29:38] [Rank 0] step:6901/10000 train_time:297086ms step_avg:43.05ms +[2025-09-11 10:29:38] [Rank 0] step:6901/10000 train_time:297086ms step_avg:43.05ms +[2025-09-11 10:29:39] [Rank 0] step:6921/10000 train_time:297784ms step_avg:43.03ms +[2025-09-11 10:29:39] [Rank 0] step:6921/10000 train_time:297784ms step_avg:43.03ms +[2025-09-11 10:29:40] [Rank 0] step:6941/10000 train_time:298483ms step_avg:43.00ms +[2025-09-11 10:29:40] [Rank 0] step:6941/10000 train_time:298483ms step_avg:43.00ms +[2025-09-11 10:29:41] [Rank 0] step:6961/10000 train_time:299183ms step_avg:42.98ms +[2025-09-11 10:29:41] [Rank 0] step:6961/10000 train_time:299183ms step_avg:42.98ms +[2025-09-11 10:29:41] [Rank 0] step:6981/10000 train_time:299885ms step_avg:42.96ms +[2025-09-11 10:29:41] [Rank 0] step:6981/10000 train_time:299885ms step_avg:42.96ms +[2025-09-11 10:29:42] [Rank 0] step:7001/10000 train_time:300589ms step_avg:42.94ms +[2025-09-11 10:29:42] [Rank 0] step:7001/10000 train_time:300589ms step_avg:42.94ms +[2025-09-11 10:29:43] [Rank 0] step:7021/10000 train_time:301288ms step_avg:42.91ms +[2025-09-11 10:29:43] [Rank 0] step:7021/10000 train_time:301288ms step_avg:42.91ms +[2025-09-11 10:29:43] [Rank 0] step:7041/10000 train_time:301985ms step_avg:42.89ms +[2025-09-11 10:29:43] [Rank 0] step:7041/10000 train_time:301985ms step_avg:42.89ms +[2025-09-11 10:29:44] [Rank 0] step:7061/10000 train_time:302686ms step_avg:42.87ms +[2025-09-11 10:29:44] [Rank 0] step:7061/10000 train_time:302686ms step_avg:42.87ms +[2025-09-11 10:29:45] [Rank 0] step:7081/10000 train_time:303384ms step_avg:42.84ms +[2025-09-11 10:29:45] [Rank 0] step:7081/10000 train_time:303384ms step_avg:42.84ms +[2025-09-11 10:29:45] [Rank 0] step:7101/10000 train_time:304083ms step_avg:42.82ms +[2025-09-11 10:29:45] [Rank 0] step:7101/10000 train_time:304083ms step_avg:42.82ms +[2025-09-11 10:29:46] [Rank 0] step:7121/10000 train_time:304783ms step_avg:42.80ms +[2025-09-11 10:29:46] [Rank 0] step:7121/10000 train_time:304783ms step_avg:42.80ms +[2025-09-11 10:29:47] [Rank 0] step:7141/10000 train_time:305483ms step_avg:42.78ms +[2025-09-11 10:29:47] [Rank 0] step:7141/10000 train_time:305483ms step_avg:42.78ms +[2025-09-11 10:29:48] [Rank 0] step:7161/10000 train_time:306183ms step_avg:42.76ms +[2025-09-11 10:29:48] [Rank 0] step:7161/10000 train_time:306183ms step_avg:42.76ms +[2025-09-11 10:29:48] [Rank 0] step:7181/10000 train_time:306881ms step_avg:42.74ms +[2025-09-11 10:29:48] [Rank 0] step:7181/10000 train_time:306881ms step_avg:42.74ms +[2025-09-11 10:29:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:29:49] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:29:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:29:59] [Rank 0] PRINT: step:7200/10000 val_loss:4.3317 total_sharp:7.5085e-05 L1_sharp:2.5283e-02 L2_sharp:3.0923e-02 L3_sharp:4.0778e-02 L4_sharp:6.8114e-02 L5_sharp:8.1006e-02 L6_sharp:1.0245e-01 L7_sharp:1.1062e-01 L8_sharp:1.2911e-01 L9_sharp:1.3153e-01 L10_sharp:1.7005e-01 L11_sharp:2.6402e-01 L12_sharp:6.8006e-01 total_fnorm:5.3250e+01 total_l1_linf:9.1136e+04 total_spectral:2.6625e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2266e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0312e-02 L9_fnorm:7.1777e-02 L10_fnorm:7.1777e-02 L11_fnorm:7.1289e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5503e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5747e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.5747e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5869e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.5869e-02 L1_spectral:1.1365e-03 L2_spectral:1.1320e-03 L3_spectral:1.1298e-03 L4_spectral:1.1298e-03 L5_spectral:1.1336e-03 L6_spectral:1.1293e-03 L7_spectral:1.1301e-03 L8_spectral:1.0766e-03 L9_spectral:1.1190e-03 L10_spectral:1.1215e-03 L11_spectral:1.1015e-03 L12_spectral:1.0696e-03 train_time:307561ms step_avg:42.72ms +[2025-09-11 10:29:59] [Rank 0] PRINT: step:7200/10000 val_loss:4.3317 total_sharp:7.5085e-05 L1_sharp:2.5283e-02 L2_sharp:3.0923e-02 L3_sharp:4.0778e-02 L4_sharp:6.8114e-02 L5_sharp:8.1006e-02 L6_sharp:1.0245e-01 L7_sharp:1.1062e-01 L8_sharp:1.2911e-01 L9_sharp:1.3153e-01 L10_sharp:1.7005e-01 L11_sharp:2.6402e-01 L12_sharp:6.8006e-01 total_fnorm:5.3250e+01 total_l1_linf:9.1136e+04 total_spectral:2.6625e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2266e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0312e-02 L9_fnorm:7.1777e-02 L10_fnorm:7.1777e-02 L11_fnorm:7.1289e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5503e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5747e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.5747e-02 L7_l1linf:1.6113e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5869e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5320e-02 L12_l1linf:1.5869e-02 L1_spectral:1.1365e-03 L2_spectral:1.1320e-03 L3_spectral:1.1298e-03 L4_spectral:1.1298e-03 L5_spectral:1.1336e-03 L6_spectral:1.1293e-03 L7_spectral:1.1301e-03 L8_spectral:1.0766e-03 L9_spectral:1.1190e-03 L10_spectral:1.1215e-03 L11_spectral:1.1015e-03 L12_spectral:1.0696e-03 train_time:307561ms step_avg:42.72ms +[2025-09-11 10:30:01] [Rank 0] step:7201/10000 train_time:309206ms step_avg:42.94ms +[2025-09-11 10:30:01] [Rank 0] step:7201/10000 train_time:309206ms step_avg:42.94ms +[2025-09-11 10:30:02] [Rank 0] step:7221/10000 train_time:309986ms step_avg:42.93ms +[2025-09-11 10:30:02] [Rank 0] step:7221/10000 train_time:309986ms step_avg:42.93ms +[2025-09-11 10:30:03] [Rank 0] step:7241/10000 train_time:310731ms step_avg:42.91ms +[2025-09-11 10:30:03] [Rank 0] step:7241/10000 train_time:310731ms step_avg:42.91ms +[2025-09-11 10:30:03] [Rank 0] step:7261/10000 train_time:311433ms step_avg:42.89ms +[2025-09-11 10:30:03] [Rank 0] step:7261/10000 train_time:311433ms step_avg:42.89ms +[2025-09-11 10:30:04] [Rank 0] step:7281/10000 train_time:312138ms step_avg:42.87ms +[2025-09-11 10:30:04] [Rank 0] step:7281/10000 train_time:312138ms step_avg:42.87ms +[2025-09-11 10:30:05] [Rank 0] step:7301/10000 train_time:312838ms step_avg:42.85ms +[2025-09-11 10:30:05] [Rank 0] step:7301/10000 train_time:312838ms step_avg:42.85ms +[2025-09-11 10:30:05] [Rank 0] step:7321/10000 train_time:313538ms step_avg:42.83ms +[2025-09-11 10:30:05] [Rank 0] step:7321/10000 train_time:313538ms step_avg:42.83ms +[2025-09-11 10:30:06] [Rank 0] step:7341/10000 train_time:314238ms step_avg:42.81ms +[2025-09-11 10:30:06] [Rank 0] step:7341/10000 train_time:314238ms step_avg:42.81ms +[2025-09-11 10:30:07] [Rank 0] step:7361/10000 train_time:314937ms step_avg:42.78ms +[2025-09-11 10:30:07] [Rank 0] step:7361/10000 train_time:314937ms step_avg:42.78ms +[2025-09-11 10:30:08] [Rank 0] step:7381/10000 train_time:315638ms step_avg:42.76ms +[2025-09-11 10:30:08] [Rank 0] step:7381/10000 train_time:315638ms step_avg:42.76ms +[2025-09-11 10:30:08] [Rank 0] step:7401/10000 train_time:316336ms step_avg:42.74ms +[2025-09-11 10:30:08] [Rank 0] step:7401/10000 train_time:316336ms step_avg:42.74ms +[2025-09-11 10:30:09] [Rank 0] step:7421/10000 train_time:317035ms step_avg:42.72ms +[2025-09-11 10:30:09] [Rank 0] step:7421/10000 train_time:317035ms step_avg:42.72ms +[2025-09-11 10:30:10] [Rank 0] step:7441/10000 train_time:317737ms step_avg:42.70ms +[2025-09-11 10:30:10] [Rank 0] step:7441/10000 train_time:317737ms step_avg:42.70ms +[2025-09-11 10:30:10] [Rank 0] step:7461/10000 train_time:318438ms step_avg:42.68ms +[2025-09-11 10:30:10] [Rank 0] step:7461/10000 train_time:318438ms step_avg:42.68ms +[2025-09-11 10:30:11] [Rank 0] step:7481/10000 train_time:319140ms step_avg:42.66ms +[2025-09-11 10:30:11] [Rank 0] step:7481/10000 train_time:319140ms step_avg:42.66ms +[2025-09-11 10:30:12] [Rank 0] step:7501/10000 train_time:319841ms step_avg:42.64ms +[2025-09-11 10:30:12] [Rank 0] step:7501/10000 train_time:319841ms step_avg:42.64ms +[2025-09-11 10:30:12] [Rank 0] step:7521/10000 train_time:320542ms step_avg:42.62ms +[2025-09-11 10:30:12] [Rank 0] step:7521/10000 train_time:320542ms step_avg:42.62ms +[2025-09-11 10:30:13] [Rank 0] step:7541/10000 train_time:321241ms step_avg:42.60ms +[2025-09-11 10:30:13] [Rank 0] step:7541/10000 train_time:321241ms step_avg:42.60ms +[2025-09-11 10:30:14] [Rank 0] step:7561/10000 train_time:321943ms step_avg:42.58ms +[2025-09-11 10:30:14] [Rank 0] step:7561/10000 train_time:321943ms step_avg:42.58ms +[2025-09-11 10:30:15] [Rank 0] step:7581/10000 train_time:322645ms step_avg:42.56ms +[2025-09-11 10:30:15] [Rank 0] step:7581/10000 train_time:322645ms step_avg:42.56ms +[2025-09-11 10:30:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:30:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:30:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:30:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:26] [Rank 0] PRINT: step:7600/10000 val_loss:4.3079 total_sharp:6.8961e-05 L1_sharp:2.5935e-02 L2_sharp:3.0131e-02 L3_sharp:3.7995e-02 L4_sharp:5.2951e-02 L5_sharp:7.0605e-02 L6_sharp:1.0092e-01 L7_sharp:1.0995e-01 L8_sharp:1.0966e-01 L9_sharp:1.2580e-01 L10_sharp:1.9216e-01 L11_sharp:2.9680e-01 L12_sharp:7.6111e-01 total_fnorm:4.3750e+01 total_l1_linf:7.0656e+04 total_spectral:2.1875e+01 L1_fnorm:5.9082e-02 L2_fnorm:5.9570e-02 L3_fnorm:5.9814e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9570e-02 L6_fnorm:5.9326e-02 L7_fnorm:5.9570e-02 L8_fnorm:5.7861e-02 L9_fnorm:5.9082e-02 L10_fnorm:5.9082e-02 L11_fnorm:5.8350e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.1963e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2512e-02 L6_l1linf:1.2695e-02 L7_l1linf:1.2939e-02 L8_l1linf:1.2939e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.2390e-02 L11_l1linf:1.2207e-02 L12_l1linf:1.2146e-02 L1_spectral:9.6191e-04 L2_spectral:9.7348e-04 L3_spectral:9.6491e-04 L4_spectral:9.7294e-04 L5_spectral:9.7030e-04 L6_spectral:9.5633e-04 L7_spectral:9.6463e-04 L8_spectral:9.0361e-04 L9_spectral:9.5001e-04 L10_spectral:9.5367e-04 L11_spectral:9.4092e-04 L12_spectral:8.9405e-04 train_time:323327ms step_avg:42.54ms +[2025-09-11 10:30:26] [Rank 0] PRINT: step:7600/10000 val_loss:4.3079 total_sharp:6.8961e-05 L1_sharp:2.5935e-02 L2_sharp:3.0131e-02 L3_sharp:3.7995e-02 L4_sharp:5.2951e-02 L5_sharp:7.0605e-02 L6_sharp:1.0092e-01 L7_sharp:1.0995e-01 L8_sharp:1.0966e-01 L9_sharp:1.2580e-01 L10_sharp:1.9216e-01 L11_sharp:2.9680e-01 L12_sharp:7.6111e-01 total_fnorm:4.3750e+01 total_l1_linf:7.0656e+04 total_spectral:2.1875e+01 L1_fnorm:5.9082e-02 L2_fnorm:5.9570e-02 L3_fnorm:5.9814e-02 L4_fnorm:5.9814e-02 L5_fnorm:5.9570e-02 L6_fnorm:5.9326e-02 L7_fnorm:5.9570e-02 L8_fnorm:5.7861e-02 L9_fnorm:5.9082e-02 L10_fnorm:5.9082e-02 L11_fnorm:5.8350e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.1963e-02 L2_l1linf:1.2573e-02 L3_l1linf:1.2634e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2512e-02 L6_l1linf:1.2695e-02 L7_l1linf:1.2939e-02 L8_l1linf:1.2939e-02 L9_l1linf:1.2939e-02 L10_l1linf:1.2390e-02 L11_l1linf:1.2207e-02 L12_l1linf:1.2146e-02 L1_spectral:9.6191e-04 L2_spectral:9.7348e-04 L3_spectral:9.6491e-04 L4_spectral:9.7294e-04 L5_spectral:9.7030e-04 L6_spectral:9.5633e-04 L7_spectral:9.6463e-04 L8_spectral:9.0361e-04 L9_spectral:9.5001e-04 L10_spectral:9.5367e-04 L11_spectral:9.4092e-04 L12_spectral:8.9405e-04 train_time:323327ms step_avg:42.54ms +[2025-09-11 10:30:28] [Rank 0] step:7601/10000 train_time:324991ms step_avg:42.76ms +[2025-09-11 10:30:28] [Rank 0] step:7601/10000 train_time:324991ms step_avg:42.76ms +[2025-09-11 10:30:28] [Rank 0] step:7621/10000 train_time:325704ms step_avg:42.74ms +[2025-09-11 10:30:28] [Rank 0] step:7621/10000 train_time:325704ms step_avg:42.74ms +[2025-09-11 10:30:29] [Rank 0] step:7641/10000 train_time:326407ms step_avg:42.72ms +[2025-09-11 10:30:29] [Rank 0] step:7641/10000 train_time:326407ms step_avg:42.72ms +[2025-09-11 10:30:30] [Rank 0] step:7661/10000 train_time:327107ms step_avg:42.70ms +[2025-09-11 10:30:30] [Rank 0] step:7661/10000 train_time:327107ms step_avg:42.70ms +[2025-09-11 10:30:30] [Rank 0] step:7681/10000 train_time:327808ms step_avg:42.68ms +[2025-09-11 10:30:30] [Rank 0] step:7681/10000 train_time:327808ms step_avg:42.68ms +[2025-09-11 10:30:31] [Rank 0] step:7701/10000 train_time:328509ms step_avg:42.66ms +[2025-09-11 10:30:31] [Rank 0] step:7701/10000 train_time:328509ms step_avg:42.66ms +[2025-09-11 10:30:32] [Rank 0] step:7721/10000 train_time:329211ms step_avg:42.64ms +[2025-09-11 10:30:32] [Rank 0] step:7721/10000 train_time:329211ms step_avg:42.64ms +[2025-09-11 10:30:33] [Rank 0] step:7741/10000 train_time:329913ms step_avg:42.62ms +[2025-09-11 10:30:33] [Rank 0] step:7741/10000 train_time:329913ms step_avg:42.62ms +[2025-09-11 10:30:33] [Rank 0] step:7761/10000 train_time:330613ms step_avg:42.60ms +[2025-09-11 10:30:33] [Rank 0] step:7761/10000 train_time:330613ms step_avg:42.60ms +[2025-09-11 10:30:34] [Rank 0] step:7781/10000 train_time:331317ms step_avg:42.58ms +[2025-09-11 10:30:34] [Rank 0] step:7781/10000 train_time:331317ms step_avg:42.58ms +[2025-09-11 10:30:35] [Rank 0] step:7801/10000 train_time:332017ms step_avg:42.56ms +[2025-09-11 10:30:35] [Rank 0] step:7801/10000 train_time:332017ms step_avg:42.56ms +[2025-09-11 10:30:35] [Rank 0] step:7821/10000 train_time:332718ms step_avg:42.54ms +[2025-09-11 10:30:35] [Rank 0] step:7821/10000 train_time:332718ms step_avg:42.54ms +[2025-09-11 10:30:36] [Rank 0] step:7841/10000 train_time:333420ms step_avg:42.52ms +[2025-09-11 10:30:36] [Rank 0] step:7841/10000 train_time:333420ms step_avg:42.52ms +[2025-09-11 10:30:37] [Rank 0] step:7861/10000 train_time:334123ms step_avg:42.50ms +[2025-09-11 10:30:37] [Rank 0] step:7861/10000 train_time:334123ms step_avg:42.50ms +[2025-09-11 10:30:38] [Rank 0] step:7881/10000 train_time:334825ms step_avg:42.49ms +[2025-09-11 10:30:38] [Rank 0] step:7881/10000 train_time:334825ms step_avg:42.49ms +[2025-09-11 10:30:38] [Rank 0] step:7901/10000 train_time:335527ms step_avg:42.47ms +[2025-09-11 10:30:38] [Rank 0] step:7901/10000 train_time:335527ms step_avg:42.47ms +[2025-09-11 10:30:39] [Rank 0] step:7921/10000 train_time:336229ms step_avg:42.45ms +[2025-09-11 10:30:39] [Rank 0] step:7921/10000 train_time:336229ms step_avg:42.45ms +[2025-09-11 10:30:40] [Rank 0] step:7941/10000 train_time:336932ms step_avg:42.43ms +[2025-09-11 10:30:40] [Rank 0] step:7941/10000 train_time:336932ms step_avg:42.43ms +[2025-09-11 10:30:40] [Rank 0] step:7961/10000 train_time:337632ms step_avg:42.41ms +[2025-09-11 10:30:40] [Rank 0] step:7961/10000 train_time:337632ms step_avg:42.41ms +[2025-09-11 10:30:41] [Rank 0] step:7981/10000 train_time:338335ms step_avg:42.39ms +[2025-09-11 10:30:41] [Rank 0] step:7981/10000 train_time:338335ms step_avg:42.39ms +[2025-09-11 10:30:42] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:30:42] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:30:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:30:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:30:52] [Rank 0] PRINT: step:8000/10000 val_loss:4.2889 total_sharp:6.3253e-05 L1_sharp:2.0749e-02 L2_sharp:2.6777e-02 L3_sharp:3.6261e-02 L4_sharp:5.5339e-02 L5_sharp:7.8458e-02 L6_sharp:1.1438e-01 L7_sharp:1.3764e-01 L8_sharp:1.2148e-01 L9_sharp:1.3204e-01 L10_sharp:1.8078e-01 L11_sharp:2.3155e-01 L12_sharp:1.6605e+00 total_fnorm:3.7250e+01 total_l1_linf:5.7088e+04 total_spectral:1.8625e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6143e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.7046e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.2773e-03 L5_l1linf:9.3384e-03 L6_l1linf:9.3384e-03 L7_l1linf:9.5215e-03 L8_l1linf:9.5825e-03 L9_l1linf:9.4604e-03 L10_l1linf:9.3994e-03 L11_l1linf:8.9111e-03 L12_l1linf:9.2773e-03 L1_spectral:8.0632e-04 L2_spectral:8.0496e-04 L3_spectral:8.0018e-04 L4_spectral:7.9777e-04 L5_spectral:8.0515e-04 L6_spectral:7.9692e-04 L7_spectral:8.0280e-04 L8_spectral:7.3523e-04 L9_spectral:7.8562e-04 L10_spectral:7.8010e-04 L11_spectral:7.6443e-04 L12_spectral:7.2691e-04 train_time:339014ms step_avg:42.38ms +[2025-09-11 10:30:52] [Rank 0] PRINT: step:8000/10000 val_loss:4.2889 total_sharp:6.3253e-05 L1_sharp:2.0749e-02 L2_sharp:2.6777e-02 L3_sharp:3.6261e-02 L4_sharp:5.5339e-02 L5_sharp:7.8458e-02 L6_sharp:1.1438e-01 L7_sharp:1.3764e-01 L8_sharp:1.2148e-01 L9_sharp:1.3204e-01 L10_sharp:1.8078e-01 L11_sharp:2.3155e-01 L12_sharp:1.6605e+00 total_fnorm:3.7250e+01 total_l1_linf:5.7088e+04 total_spectral:1.8625e+01 L1_fnorm:4.7363e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7852e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.7119e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6143e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.7046e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.2773e-03 L5_l1linf:9.3384e-03 L6_l1linf:9.3384e-03 L7_l1linf:9.5215e-03 L8_l1linf:9.5825e-03 L9_l1linf:9.4604e-03 L10_l1linf:9.3994e-03 L11_l1linf:8.9111e-03 L12_l1linf:9.2773e-03 L1_spectral:8.0632e-04 L2_spectral:8.0496e-04 L3_spectral:8.0018e-04 L4_spectral:7.9777e-04 L5_spectral:8.0515e-04 L6_spectral:7.9692e-04 L7_spectral:8.0280e-04 L8_spectral:7.3523e-04 L9_spectral:7.8562e-04 L10_spectral:7.8010e-04 L11_spectral:7.6443e-04 L12_spectral:7.2691e-04 train_time:339014ms step_avg:42.38ms +[2025-09-11 10:30:54] [Rank 0] step:8001/10000 train_time:340676ms step_avg:42.58ms +[2025-09-11 10:30:54] [Rank 0] step:8001/10000 train_time:340676ms step_avg:42.58ms +[2025-09-11 10:30:55] [Rank 0] step:8021/10000 train_time:341398ms step_avg:42.56ms +[2025-09-11 10:30:55] [Rank 0] step:8021/10000 train_time:341398ms step_avg:42.56ms +[2025-09-11 10:30:56] [Rank 0] step:8041/10000 train_time:342101ms step_avg:42.54ms +[2025-09-11 10:30:56] [Rank 0] step:8041/10000 train_time:342101ms step_avg:42.54ms +[2025-09-11 10:30:57] [Rank 0] step:8061/10000 train_time:343064ms step_avg:42.56ms +[2025-09-11 10:30:57] [Rank 0] step:8061/10000 train_time:343064ms step_avg:42.56ms +[2025-09-11 10:30:57] [Rank 0] step:8081/10000 train_time:343764ms step_avg:42.54ms +[2025-09-11 10:30:57] [Rank 0] step:8081/10000 train_time:343764ms step_avg:42.54ms +[2025-09-11 10:30:58] [Rank 0] step:8101/10000 train_time:344464ms step_avg:42.52ms +[2025-09-11 10:30:58] [Rank 0] step:8101/10000 train_time:344464ms step_avg:42.52ms +[2025-09-11 10:30:59] [Rank 0] step:8121/10000 train_time:345466ms step_avg:42.54ms +[2025-09-11 10:30:59] [Rank 0] step:8121/10000 train_time:345466ms step_avg:42.54ms +[2025-09-11 10:31:00] [Rank 0] step:8141/10000 train_time:346897ms step_avg:42.61ms +[2025-09-11 10:31:00] [Rank 0] step:8141/10000 train_time:346897ms step_avg:42.61ms +[2025-09-11 10:31:01] [Rank 0] step:8161/10000 train_time:347603ms step_avg:42.59ms +[2025-09-11 10:31:01] [Rank 0] step:8161/10000 train_time:347603ms step_avg:42.59ms +[2025-09-11 10:31:02] [Rank 0] step:8181/10000 train_time:348315ms step_avg:42.58ms +[2025-09-11 10:31:02] [Rank 0] step:8181/10000 train_time:348315ms step_avg:42.58ms +[2025-09-11 10:31:03] [Rank 0] step:8201/10000 train_time:349025ms step_avg:42.56ms +[2025-09-11 10:31:03] [Rank 0] step:8201/10000 train_time:349025ms step_avg:42.56ms +[2025-09-11 10:31:03] [Rank 0] step:8221/10000 train_time:349733ms step_avg:42.54ms +[2025-09-11 10:31:03] [Rank 0] step:8221/10000 train_time:349733ms step_avg:42.54ms +[2025-09-11 10:31:04] [Rank 0] step:8241/10000 train_time:350450ms step_avg:42.53ms +[2025-09-11 10:31:04] [Rank 0] step:8241/10000 train_time:350450ms step_avg:42.53ms +[2025-09-11 10:31:05] [Rank 0] step:8261/10000 train_time:351158ms step_avg:42.51ms +[2025-09-11 10:31:05] [Rank 0] step:8261/10000 train_time:351158ms step_avg:42.51ms +[2025-09-11 10:31:05] [Rank 0] step:8281/10000 train_time:351863ms step_avg:42.49ms +[2025-09-11 10:31:05] [Rank 0] step:8281/10000 train_time:351863ms step_avg:42.49ms +[2025-09-11 10:31:06] [Rank 0] step:8301/10000 train_time:352571ms step_avg:42.47ms +[2025-09-11 10:31:06] [Rank 0] step:8301/10000 train_time:352571ms step_avg:42.47ms +[2025-09-11 10:31:07] [Rank 0] step:8321/10000 train_time:353278ms step_avg:42.46ms +[2025-09-11 10:31:07] [Rank 0] step:8321/10000 train_time:353278ms step_avg:42.46ms +[2025-09-11 10:31:07] [Rank 0] step:8341/10000 train_time:353992ms step_avg:42.44ms +[2025-09-11 10:31:07] [Rank 0] step:8341/10000 train_time:353992ms step_avg:42.44ms +[2025-09-11 10:31:08] [Rank 0] step:8361/10000 train_time:354695ms step_avg:42.42ms +[2025-09-11 10:31:08] [Rank 0] step:8361/10000 train_time:354695ms step_avg:42.42ms +[2025-09-11 10:31:09] [Rank 0] step:8381/10000 train_time:355406ms step_avg:42.41ms +[2025-09-11 10:31:09] [Rank 0] step:8381/10000 train_time:355406ms step_avg:42.41ms +[2025-09-11 10:31:10] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:31:10] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:31:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:31:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:20] [Rank 0] PRINT: step:8400/10000 val_loss:4.2694 total_sharp:5.2188e-05 L1_sharp:1.9330e-02 L2_sharp:2.5078e-02 L3_sharp:4.0125e-02 L4_sharp:5.5492e-02 L5_sharp:6.7461e-02 L6_sharp:9.5680e-02 L7_sharp:1.1638e-01 L8_sharp:1.0752e-01 L9_sharp:1.1920e-01 L10_sharp:1.7841e-01 L11_sharp:2.2567e-01 L12_sharp:1.0541e+00 total_fnorm:2.9500e+01 total_l1_linf:4.1216e+04 total_spectral:1.4750e+01 L1_fnorm:3.6621e-02 L2_fnorm:3.7109e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.6865e-02 L7_fnorm:3.7354e-02 L8_fnorm:3.5889e-02 L9_fnorm:3.6621e-02 L10_fnorm:3.6377e-02 L11_fnorm:3.6133e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5613e-03 L2_l1linf:6.5002e-03 L3_l1linf:6.7749e-03 L4_l1linf:6.8359e-03 L5_l1linf:7.1411e-03 L6_l1linf:6.6833e-03 L7_l1linf:6.8359e-03 L8_l1linf:6.8970e-03 L9_l1linf:6.7444e-03 L10_l1linf:6.8054e-03 L11_l1linf:6.6833e-03 L12_l1linf:6.4392e-03 L1_spectral:6.2637e-04 L2_spectral:6.2995e-04 L3_spectral:6.3740e-04 L4_spectral:6.3783e-04 L5_spectral:6.3278e-04 L6_spectral:6.3185e-04 L7_spectral:6.3508e-04 L8_spectral:5.8658e-04 L9_spectral:6.1794e-04 L10_spectral:6.1950e-04 L11_spectral:6.1017e-04 L12_spectral:5.7346e-04 train_time:356096ms step_avg:42.39ms +[2025-09-11 10:31:20] [Rank 0] PRINT: step:8400/10000 val_loss:4.2694 total_sharp:5.2188e-05 L1_sharp:1.9330e-02 L2_sharp:2.5078e-02 L3_sharp:4.0125e-02 L4_sharp:5.5492e-02 L5_sharp:6.7461e-02 L6_sharp:9.5680e-02 L7_sharp:1.1638e-01 L8_sharp:1.0752e-01 L9_sharp:1.1920e-01 L10_sharp:1.7841e-01 L11_sharp:2.2567e-01 L12_sharp:1.0541e+00 total_fnorm:2.9500e+01 total_l1_linf:4.1216e+04 total_spectral:1.4750e+01 L1_fnorm:3.6621e-02 L2_fnorm:3.7109e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.6865e-02 L7_fnorm:3.7354e-02 L8_fnorm:3.5889e-02 L9_fnorm:3.6621e-02 L10_fnorm:3.6377e-02 L11_fnorm:3.6133e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5613e-03 L2_l1linf:6.5002e-03 L3_l1linf:6.7749e-03 L4_l1linf:6.8359e-03 L5_l1linf:7.1411e-03 L6_l1linf:6.6833e-03 L7_l1linf:6.8359e-03 L8_l1linf:6.8970e-03 L9_l1linf:6.7444e-03 L10_l1linf:6.8054e-03 L11_l1linf:6.6833e-03 L12_l1linf:6.4392e-03 L1_spectral:6.2637e-04 L2_spectral:6.2995e-04 L3_spectral:6.3740e-04 L4_spectral:6.3783e-04 L5_spectral:6.3278e-04 L6_spectral:6.3185e-04 L7_spectral:6.3508e-04 L8_spectral:5.8658e-04 L9_spectral:6.1794e-04 L10_spectral:6.1950e-04 L11_spectral:6.1017e-04 L12_spectral:5.7346e-04 train_time:356096ms step_avg:42.39ms +[2025-09-11 10:31:22] [Rank 0] step:8401/10000 train_time:357843ms step_avg:42.60ms +[2025-09-11 10:31:22] [Rank 0] step:8401/10000 train_time:357843ms step_avg:42.60ms +[2025-09-11 10:31:23] [Rank 0] step:8421/10000 train_time:358592ms step_avg:42.58ms +[2025-09-11 10:31:23] [Rank 0] step:8421/10000 train_time:358592ms step_avg:42.58ms +[2025-09-11 10:31:24] [Rank 0] step:8441/10000 train_time:359303ms step_avg:42.57ms +[2025-09-11 10:31:24] [Rank 0] step:8441/10000 train_time:359303ms step_avg:42.57ms +[2025-09-11 10:31:24] [Rank 0] step:8461/10000 train_time:360013ms step_avg:42.55ms +[2025-09-11 10:31:24] [Rank 0] step:8461/10000 train_time:360013ms step_avg:42.55ms +[2025-09-11 10:31:25] [Rank 0] step:8481/10000 train_time:360724ms step_avg:42.53ms +[2025-09-11 10:31:25] [Rank 0] step:8481/10000 train_time:360724ms step_avg:42.53ms +[2025-09-11 10:31:26] [Rank 0] step:8501/10000 train_time:361433ms step_avg:42.52ms +[2025-09-11 10:31:26] [Rank 0] step:8501/10000 train_time:361433ms step_avg:42.52ms +[2025-09-11 10:31:26] [Rank 0] step:8521/10000 train_time:362141ms step_avg:42.50ms +[2025-09-11 10:31:26] [Rank 0] step:8521/10000 train_time:362141ms step_avg:42.50ms +[2025-09-11 10:31:27] [Rank 0] step:8541/10000 train_time:362850ms step_avg:42.48ms +[2025-09-11 10:31:27] [Rank 0] step:8541/10000 train_time:362850ms step_avg:42.48ms +[2025-09-11 10:31:28] [Rank 0] step:8561/10000 train_time:363563ms step_avg:42.47ms +[2025-09-11 10:31:28] [Rank 0] step:8561/10000 train_time:363563ms step_avg:42.47ms +[2025-09-11 10:31:29] [Rank 0] step:8581/10000 train_time:364276ms step_avg:42.45ms +[2025-09-11 10:31:29] [Rank 0] step:8581/10000 train_time:364276ms step_avg:42.45ms +[2025-09-11 10:31:29] [Rank 0] step:8601/10000 train_time:364986ms step_avg:42.44ms +[2025-09-11 10:31:29] [Rank 0] step:8601/10000 train_time:364986ms step_avg:42.44ms +[2025-09-11 10:31:30] [Rank 0] step:8621/10000 train_time:365694ms step_avg:42.42ms +[2025-09-11 10:31:30] [Rank 0] step:8621/10000 train_time:365694ms step_avg:42.42ms +[2025-09-11 10:31:31] [Rank 0] step:8641/10000 train_time:366402ms step_avg:42.40ms +[2025-09-11 10:31:31] [Rank 0] step:8641/10000 train_time:366402ms step_avg:42.40ms +[2025-09-11 10:31:31] [Rank 0] step:8661/10000 train_time:367112ms step_avg:42.39ms +[2025-09-11 10:31:31] [Rank 0] step:8661/10000 train_time:367112ms step_avg:42.39ms +[2025-09-11 10:31:32] [Rank 0] step:8681/10000 train_time:367822ms step_avg:42.37ms +[2025-09-11 10:31:32] [Rank 0] step:8681/10000 train_time:367822ms step_avg:42.37ms +[2025-09-11 10:31:33] [Rank 0] step:8701/10000 train_time:368530ms step_avg:42.35ms +[2025-09-11 10:31:33] [Rank 0] step:8701/10000 train_time:368530ms step_avg:42.35ms +[2025-09-11 10:31:33] [Rank 0] step:8721/10000 train_time:369241ms step_avg:42.34ms +[2025-09-11 10:31:33] [Rank 0] step:8721/10000 train_time:369241ms step_avg:42.34ms +[2025-09-11 10:31:34] [Rank 0] step:8741/10000 train_time:369946ms step_avg:42.32ms +[2025-09-11 10:31:34] [Rank 0] step:8741/10000 train_time:369946ms step_avg:42.32ms +[2025-09-11 10:31:35] [Rank 0] step:8761/10000 train_time:370657ms step_avg:42.31ms +[2025-09-11 10:31:35] [Rank 0] step:8761/10000 train_time:370657ms step_avg:42.31ms +[2025-09-11 10:31:36] [Rank 0] step:8781/10000 train_time:371363ms step_avg:42.29ms +[2025-09-11 10:31:36] [Rank 0] step:8781/10000 train_time:371363ms step_avg:42.29ms +[2025-09-11 10:31:36] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:31:36] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:31:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:31:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:31:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:31:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:31:47] [Rank 0] PRINT: step:8800/10000 val_loss:4.2645 total_sharp:4.3583e-05 L1_sharp:1.3675e-02 L2_sharp:1.9622e-02 L3_sharp:2.8350e-02 L4_sharp:3.9197e-02 L5_sharp:5.7069e-02 L6_sharp:7.2396e-02 L7_sharp:8.0674e-02 L8_sharp:9.2582e-02 L9_sharp:1.0256e-01 L10_sharp:1.3846e-01 L11_sharp:1.9123e-01 L12_sharp:4.9398e-01 total_fnorm:2.1625e+01 total_l1_linf:2.7008e+04 total_spectral:1.0812e+01 L1_fnorm:2.5879e-02 L2_fnorm:2.6245e-02 L3_fnorm:2.6245e-02 L4_fnorm:2.6245e-02 L5_fnorm:2.6245e-02 L6_fnorm:2.6245e-02 L7_fnorm:2.6489e-02 L8_fnorm:2.5635e-02 L9_fnorm:2.6123e-02 L10_fnorm:2.6001e-02 L11_fnorm:2.5635e-02 L12_fnorm:2.5146e-02 L1_l1linf:4.0894e-03 L2_l1linf:4.2114e-03 L3_l1linf:4.2725e-03 L4_l1linf:4.3335e-03 L5_l1linf:4.3030e-03 L6_l1linf:4.3640e-03 L7_l1linf:4.4861e-03 L8_l1linf:4.5471e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.2114e-03 L11_l1linf:4.0588e-03 L12_l1linf:4.0283e-03 L1_spectral:4.5220e-04 L2_spectral:4.6104e-04 L3_spectral:4.6179e-04 L4_spectral:4.6574e-04 L5_spectral:4.6546e-04 L6_spectral:4.6254e-04 L7_spectral:4.6288e-04 L8_spectral:4.2610e-04 L9_spectral:4.4988e-04 L10_spectral:4.5330e-04 L11_spectral:4.3952e-04 L12_spectral:4.1200e-04 train_time:372050ms step_avg:42.28ms +[2025-09-11 10:31:47] [Rank 0] PRINT: step:8800/10000 val_loss:4.2645 total_sharp:4.3583e-05 L1_sharp:1.3675e-02 L2_sharp:1.9622e-02 L3_sharp:2.8350e-02 L4_sharp:3.9197e-02 L5_sharp:5.7069e-02 L6_sharp:7.2396e-02 L7_sharp:8.0674e-02 L8_sharp:9.2582e-02 L9_sharp:1.0256e-01 L10_sharp:1.3846e-01 L11_sharp:1.9123e-01 L12_sharp:4.9398e-01 total_fnorm:2.1625e+01 total_l1_linf:2.7008e+04 total_spectral:1.0812e+01 L1_fnorm:2.5879e-02 L2_fnorm:2.6245e-02 L3_fnorm:2.6245e-02 L4_fnorm:2.6245e-02 L5_fnorm:2.6245e-02 L6_fnorm:2.6245e-02 L7_fnorm:2.6489e-02 L8_fnorm:2.5635e-02 L9_fnorm:2.6123e-02 L10_fnorm:2.6001e-02 L11_fnorm:2.5635e-02 L12_fnorm:2.5146e-02 L1_l1linf:4.0894e-03 L2_l1linf:4.2114e-03 L3_l1linf:4.2725e-03 L4_l1linf:4.3335e-03 L5_l1linf:4.3030e-03 L6_l1linf:4.3640e-03 L7_l1linf:4.4861e-03 L8_l1linf:4.5471e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.2114e-03 L11_l1linf:4.0588e-03 L12_l1linf:4.0283e-03 L1_spectral:4.5220e-04 L2_spectral:4.6104e-04 L3_spectral:4.6179e-04 L4_spectral:4.6574e-04 L5_spectral:4.6546e-04 L6_spectral:4.6254e-04 L7_spectral:4.6288e-04 L8_spectral:4.2610e-04 L9_spectral:4.4988e-04 L10_spectral:4.5330e-04 L11_spectral:4.3952e-04 L12_spectral:4.1200e-04 train_time:372050ms step_avg:42.28ms +[2025-09-11 10:31:49] [Rank 0] step:8801/10000 train_time:373827ms step_avg:42.48ms +[2025-09-11 10:31:49] [Rank 0] step:8801/10000 train_time:373827ms step_avg:42.48ms +[2025-09-11 10:31:50] [Rank 0] step:8821/10000 train_time:374564ms step_avg:42.46ms +[2025-09-11 10:31:50] [Rank 0] step:8821/10000 train_time:374564ms step_avg:42.46ms +[2025-09-11 10:31:50] [Rank 0] step:8841/10000 train_time:375274ms step_avg:42.45ms +[2025-09-11 10:31:50] [Rank 0] step:8841/10000 train_time:375274ms step_avg:42.45ms +[2025-09-11 10:31:51] [Rank 0] step:8861/10000 train_time:375983ms step_avg:42.43ms +[2025-09-11 10:31:51] [Rank 0] step:8861/10000 train_time:375983ms step_avg:42.43ms +[2025-09-11 10:31:52] [Rank 0] step:8881/10000 train_time:376693ms step_avg:42.42ms +[2025-09-11 10:31:52] [Rank 0] step:8881/10000 train_time:376693ms step_avg:42.42ms +[2025-09-11 10:31:53] [Rank 0] step:8901/10000 train_time:377404ms step_avg:42.40ms +[2025-09-11 10:31:53] [Rank 0] step:8901/10000 train_time:377404ms step_avg:42.40ms +[2025-09-11 10:31:53] [Rank 0] step:8921/10000 train_time:378111ms step_avg:42.38ms +[2025-09-11 10:31:53] [Rank 0] step:8921/10000 train_time:378111ms step_avg:42.38ms +[2025-09-11 10:31:54] [Rank 0] step:8941/10000 train_time:378824ms step_avg:42.37ms +[2025-09-11 10:31:54] [Rank 0] step:8941/10000 train_time:378824ms step_avg:42.37ms +[2025-09-11 10:31:55] [Rank 0] step:8961/10000 train_time:379542ms step_avg:42.35ms +[2025-09-11 10:31:55] [Rank 0] step:8961/10000 train_time:379542ms step_avg:42.35ms +[2025-09-11 10:31:55] [Rank 0] step:8981/10000 train_time:380255ms step_avg:42.34ms +[2025-09-11 10:31:55] [Rank 0] step:8981/10000 train_time:380255ms step_avg:42.34ms +[2025-09-11 10:31:56] [Rank 0] step:9001/10000 train_time:380960ms step_avg:42.32ms +[2025-09-11 10:31:56] [Rank 0] step:9001/10000 train_time:380960ms step_avg:42.32ms +[2025-09-11 10:31:57] [Rank 0] step:9021/10000 train_time:381669ms step_avg:42.31ms +[2025-09-11 10:31:57] [Rank 0] step:9021/10000 train_time:381669ms step_avg:42.31ms +[2025-09-11 10:31:58] [Rank 0] step:9041/10000 train_time:382382ms step_avg:42.29ms +[2025-09-11 10:31:58] [Rank 0] step:9041/10000 train_time:382382ms step_avg:42.29ms +[2025-09-11 10:31:58] [Rank 0] step:9061/10000 train_time:383090ms step_avg:42.28ms +[2025-09-11 10:31:58] [Rank 0] step:9061/10000 train_time:383090ms step_avg:42.28ms +[2025-09-11 10:31:59] [Rank 0] step:9081/10000 train_time:383803ms step_avg:42.26ms +[2025-09-11 10:31:59] [Rank 0] step:9081/10000 train_time:383803ms step_avg:42.26ms +[2025-09-11 10:32:00] [Rank 0] step:9101/10000 train_time:384793ms step_avg:42.28ms +[2025-09-11 10:32:00] [Rank 0] step:9101/10000 train_time:384793ms step_avg:42.28ms +[2025-09-11 10:32:01] [Rank 0] step:9121/10000 train_time:385507ms step_avg:42.27ms +[2025-09-11 10:32:01] [Rank 0] step:9121/10000 train_time:385507ms step_avg:42.27ms +[2025-09-11 10:32:01] [Rank 0] step:9141/10000 train_time:386215ms step_avg:42.25ms +[2025-09-11 10:32:01] [Rank 0] step:9141/10000 train_time:386215ms step_avg:42.25ms +[2025-09-11 10:32:02] [Rank 0] step:9161/10000 train_time:387206ms step_avg:42.27ms +[2025-09-11 10:32:02] [Rank 0] step:9161/10000 train_time:387206ms step_avg:42.27ms +[2025-09-11 10:32:03] [Rank 0] step:9181/10000 train_time:387918ms step_avg:42.25ms +[2025-09-11 10:32:03] [Rank 0] step:9181/10000 train_time:387918ms step_avg:42.25ms +[2025-09-11 10:32:04] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:32:04] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:32:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:32:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:32:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:32:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:32:16] [Rank 0] PRINT: step:9200/10000 val_loss:4.2507 total_sharp:4.1545e-05 L1_sharp:1.2858e-02 L2_sharp:2.0726e-02 L3_sharp:2.5510e-02 L4_sharp:4.5898e-02 L5_sharp:5.4188e-02 L6_sharp:7.7845e-02 L7_sharp:9.5708e-02 L8_sharp:9.7556e-02 L9_sharp:9.5313e-02 L10_sharp:1.2883e-01 L11_sharp:1.6856e-01 L12_sharp:5.3811e-01 total_fnorm:1.4938e+01 total_l1_linf:1.6256e+04 total_spectral:7.4688e+00 L1_fnorm:1.7090e-02 L2_fnorm:1.7456e-02 L3_fnorm:1.7578e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7456e-02 L6_fnorm:1.7456e-02 L7_fnorm:1.7578e-02 L8_fnorm:1.6968e-02 L9_fnorm:1.7212e-02 L10_fnorm:1.7212e-02 L11_fnorm:1.6968e-02 L12_fnorm:1.6724e-02 L1_l1linf:2.3651e-03 L2_l1linf:2.5482e-03 L3_l1linf:2.7618e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.5787e-03 L6_l1linf:2.6703e-03 L7_l1linf:2.6398e-03 L8_l1linf:2.6398e-03 L9_l1linf:2.5787e-03 L10_l1linf:2.7161e-03 L11_l1linf:2.5177e-03 L12_l1linf:2.4872e-03 L1_spectral:3.0556e-04 L2_spectral:3.1168e-04 L3_spectral:3.1625e-04 L4_spectral:3.1284e-04 L5_spectral:3.1154e-04 L6_spectral:3.1310e-04 L7_spectral:3.1456e-04 L8_spectral:2.8775e-04 L9_spectral:3.0035e-04 L10_spectral:3.0388e-04 L11_spectral:2.9382e-04 L12_spectral:2.7763e-04 train_time:388612ms step_avg:42.24ms +[2025-09-11 10:32:16] [Rank 0] PRINT: step:9200/10000 val_loss:4.2507 total_sharp:4.1545e-05 L1_sharp:1.2858e-02 L2_sharp:2.0726e-02 L3_sharp:2.5510e-02 L4_sharp:4.5898e-02 L5_sharp:5.4188e-02 L6_sharp:7.7845e-02 L7_sharp:9.5708e-02 L8_sharp:9.7556e-02 L9_sharp:9.5313e-02 L10_sharp:1.2883e-01 L11_sharp:1.6856e-01 L12_sharp:5.3811e-01 total_fnorm:1.4938e+01 total_l1_linf:1.6256e+04 total_spectral:7.4688e+00 L1_fnorm:1.7090e-02 L2_fnorm:1.7456e-02 L3_fnorm:1.7578e-02 L4_fnorm:1.7578e-02 L5_fnorm:1.7456e-02 L6_fnorm:1.7456e-02 L7_fnorm:1.7578e-02 L8_fnorm:1.6968e-02 L9_fnorm:1.7212e-02 L10_fnorm:1.7212e-02 L11_fnorm:1.6968e-02 L12_fnorm:1.6724e-02 L1_l1linf:2.3651e-03 L2_l1linf:2.5482e-03 L3_l1linf:2.7618e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.5787e-03 L6_l1linf:2.6703e-03 L7_l1linf:2.6398e-03 L8_l1linf:2.6398e-03 L9_l1linf:2.5787e-03 L10_l1linf:2.7161e-03 L11_l1linf:2.5177e-03 L12_l1linf:2.4872e-03 L1_spectral:3.0556e-04 L2_spectral:3.1168e-04 L3_spectral:3.1625e-04 L4_spectral:3.1284e-04 L5_spectral:3.1154e-04 L6_spectral:3.1310e-04 L7_spectral:3.1456e-04 L8_spectral:2.8775e-04 L9_spectral:3.0035e-04 L10_spectral:3.0388e-04 L11_spectral:2.9382e-04 L12_spectral:2.7763e-04 train_time:388612ms step_avg:42.24ms +[2025-09-11 10:32:18] [Rank 0] step:9201/10000 train_time:390572ms step_avg:42.45ms +[2025-09-11 10:32:18] [Rank 0] step:9201/10000 train_time:390572ms step_avg:42.45ms +[2025-09-11 10:32:19] [Rank 0] step:9221/10000 train_time:391304ms step_avg:42.44ms +[2025-09-11 10:32:19] [Rank 0] step:9221/10000 train_time:391304ms step_avg:42.44ms +[2025-09-11 10:32:19] [Rank 0] step:9241/10000 train_time:392011ms step_avg:42.42ms +[2025-09-11 10:32:19] [Rank 0] step:9241/10000 train_time:392011ms step_avg:42.42ms +[2025-09-11 10:32:20] [Rank 0] step:9261/10000 train_time:392723ms step_avg:42.41ms +[2025-09-11 10:32:20] [Rank 0] step:9261/10000 train_time:392723ms step_avg:42.41ms +[2025-09-11 10:32:21] [Rank 0] step:9281/10000 train_time:393433ms step_avg:42.39ms +[2025-09-11 10:32:21] [Rank 0] step:9281/10000 train_time:393433ms step_avg:42.39ms +[2025-09-11 10:32:21] [Rank 0] step:9301/10000 train_time:394140ms step_avg:42.38ms +[2025-09-11 10:32:21] [Rank 0] step:9301/10000 train_time:394140ms step_avg:42.38ms +[2025-09-11 10:32:22] [Rank 0] step:9321/10000 train_time:394851ms step_avg:42.36ms +[2025-09-11 10:32:22] [Rank 0] step:9321/10000 train_time:394851ms step_avg:42.36ms +[2025-09-11 10:32:23] [Rank 0] step:9341/10000 train_time:395558ms step_avg:42.35ms +[2025-09-11 10:32:23] [Rank 0] step:9341/10000 train_time:395558ms step_avg:42.35ms +[2025-09-11 10:32:23] [Rank 0] step:9361/10000 train_time:396263ms step_avg:42.33ms +[2025-09-11 10:32:23] [Rank 0] step:9361/10000 train_time:396263ms step_avg:42.33ms +[2025-09-11 10:32:24] [Rank 0] step:9381/10000 train_time:396972ms step_avg:42.32ms +[2025-09-11 10:32:24] [Rank 0] step:9381/10000 train_time:396972ms step_avg:42.32ms +[2025-09-11 10:32:25] [Rank 0] step:9401/10000 train_time:397683ms step_avg:42.30ms +[2025-09-11 10:32:25] [Rank 0] step:9401/10000 train_time:397683ms step_avg:42.30ms +[2025-09-11 10:32:26] [Rank 0] step:9421/10000 train_time:398395ms step_avg:42.29ms +[2025-09-11 10:32:26] [Rank 0] step:9421/10000 train_time:398395ms step_avg:42.29ms +[2025-09-11 10:32:26] [Rank 0] step:9441/10000 train_time:399108ms step_avg:42.27ms +[2025-09-11 10:32:26] [Rank 0] step:9441/10000 train_time:399108ms step_avg:42.27ms +[2025-09-11 10:32:27] [Rank 0] step:9461/10000 train_time:399818ms step_avg:42.26ms +[2025-09-11 10:32:27] [Rank 0] step:9461/10000 train_time:399818ms step_avg:42.26ms +[2025-09-11 10:32:28] [Rank 0] step:9481/10000 train_time:400539ms step_avg:42.25ms +[2025-09-11 10:32:28] [Rank 0] step:9481/10000 train_time:400539ms step_avg:42.25ms +[2025-09-11 10:32:28] [Rank 0] step:9501/10000 train_time:401251ms step_avg:42.23ms +[2025-09-11 10:32:28] [Rank 0] step:9501/10000 train_time:401251ms step_avg:42.23ms +[2025-09-11 10:32:29] [Rank 0] step:9521/10000 train_time:401965ms step_avg:42.22ms +[2025-09-11 10:32:29] [Rank 0] step:9521/10000 train_time:401965ms step_avg:42.22ms +[2025-09-11 10:32:30] [Rank 0] step:9541/10000 train_time:402673ms step_avg:42.20ms +[2025-09-11 10:32:30] [Rank 0] step:9541/10000 train_time:402673ms step_avg:42.20ms +[2025-09-11 10:32:31] [Rank 0] step:9561/10000 train_time:403385ms step_avg:42.19ms +[2025-09-11 10:32:31] [Rank 0] step:9561/10000 train_time:403385ms step_avg:42.19ms +[2025-09-11 10:32:31] [Rank 0] step:9581/10000 train_time:404096ms step_avg:42.18ms +[2025-09-11 10:32:31] [Rank 0] step:9581/10000 train_time:404096ms step_avg:42.18ms +[2025-09-11 10:32:32] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:32:32] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:32:43] [Rank 0] PRINT: step:9600/10000 val_loss:4.2442 total_sharp:2.4178e-05 L1_sharp:8.7603e-03 L2_sharp:1.4662e-02 L3_sharp:1.8224e-02 L4_sharp:2.8889e-02 L5_sharp:3.9644e-02 L6_sharp:5.6374e-02 L7_sharp:6.8581e-02 L8_sharp:6.8073e-02 L9_sharp:6.8640e-02 L10_sharp:1.0097e-01 L11_sharp:1.3595e-01 L12_sharp:3.1716e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0000e+03 total_spectral:4.3438e+00 L1_fnorm:9.5215e-03 L2_fnorm:9.7656e-03 L3_fnorm:9.7656e-03 L4_fnorm:9.7656e-03 L5_fnorm:9.8267e-03 L6_fnorm:9.7046e-03 L7_fnorm:9.8267e-03 L8_fnorm:9.4604e-03 L9_fnorm:9.5825e-03 L10_fnorm:9.6436e-03 L11_fnorm:9.5215e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.1368e-03 L2_l1linf:1.2283e-03 L3_l1linf:1.2131e-03 L4_l1linf:1.2360e-03 L5_l1linf:1.2360e-03 L6_l1linf:1.2970e-03 L7_l1linf:1.2741e-03 L8_l1linf:1.2665e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.1978e-03 L11_l1linf:1.1978e-03 L12_l1linf:1.3504e-03 L1_spectral:1.7240e-04 L2_spectral:1.7540e-04 L3_spectral:1.7834e-04 L4_spectral:1.7727e-04 L5_spectral:1.7666e-04 L6_spectral:1.7628e-04 L7_spectral:1.7639e-04 L8_spectral:1.6461e-04 L9_spectral:1.6947e-04 L10_spectral:1.7334e-04 L11_spectral:1.6834e-04 L12_spectral:1.5688e-04 train_time:404784ms step_avg:42.16ms +[2025-09-11 10:32:43] [Rank 0] PRINT: step:9600/10000 val_loss:4.2442 total_sharp:2.4178e-05 L1_sharp:8.7603e-03 L2_sharp:1.4662e-02 L3_sharp:1.8224e-02 L4_sharp:2.8889e-02 L5_sharp:3.9644e-02 L6_sharp:5.6374e-02 L7_sharp:6.8581e-02 L8_sharp:6.8073e-02 L9_sharp:6.8640e-02 L10_sharp:1.0097e-01 L11_sharp:1.3595e-01 L12_sharp:3.1716e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0000e+03 total_spectral:4.3438e+00 L1_fnorm:9.5215e-03 L2_fnorm:9.7656e-03 L3_fnorm:9.7656e-03 L4_fnorm:9.7656e-03 L5_fnorm:9.8267e-03 L6_fnorm:9.7046e-03 L7_fnorm:9.8267e-03 L8_fnorm:9.4604e-03 L9_fnorm:9.5825e-03 L10_fnorm:9.6436e-03 L11_fnorm:9.5215e-03 L12_fnorm:9.3384e-03 L1_l1linf:1.1368e-03 L2_l1linf:1.2283e-03 L3_l1linf:1.2131e-03 L4_l1linf:1.2360e-03 L5_l1linf:1.2360e-03 L6_l1linf:1.2970e-03 L7_l1linf:1.2741e-03 L8_l1linf:1.2665e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.1978e-03 L11_l1linf:1.1978e-03 L12_l1linf:1.3504e-03 L1_spectral:1.7240e-04 L2_spectral:1.7540e-04 L3_spectral:1.7834e-04 L4_spectral:1.7727e-04 L5_spectral:1.7666e-04 L6_spectral:1.7628e-04 L7_spectral:1.7639e-04 L8_spectral:1.6461e-04 L9_spectral:1.6947e-04 L10_spectral:1.7334e-04 L11_spectral:1.6834e-04 L12_spectral:1.5688e-04 train_time:404784ms step_avg:42.16ms +[2025-09-11 10:32:45] [Rank 0] step:9601/10000 train_time:406760ms step_avg:42.37ms +[2025-09-11 10:32:45] [Rank 0] step:9601/10000 train_time:406760ms step_avg:42.37ms +[2025-09-11 10:32:46] [Rank 0] step:9621/10000 train_time:407503ms step_avg:42.36ms +[2025-09-11 10:32:46] [Rank 0] step:9621/10000 train_time:407503ms step_avg:42.36ms +[2025-09-11 10:32:46] [Rank 0] step:9641/10000 train_time:408217ms step_avg:42.34ms +[2025-09-11 10:32:46] [Rank 0] step:9641/10000 train_time:408217ms step_avg:42.34ms +[2025-09-11 10:32:47] [Rank 0] step:9661/10000 train_time:408939ms step_avg:42.33ms +[2025-09-11 10:32:47] [Rank 0] step:9661/10000 train_time:408939ms step_avg:42.33ms +[2025-09-11 10:32:48] [Rank 0] step:9681/10000 train_time:409654ms step_avg:42.32ms +[2025-09-11 10:32:48] [Rank 0] step:9681/10000 train_time:409654ms step_avg:42.32ms +[2025-09-11 10:32:49] [Rank 0] step:9701/10000 train_time:410369ms step_avg:42.30ms +[2025-09-11 10:32:49] [Rank 0] step:9701/10000 train_time:410369ms step_avg:42.30ms +[2025-09-11 10:32:49] [Rank 0] step:9721/10000 train_time:411090ms step_avg:42.29ms +[2025-09-11 10:32:49] [Rank 0] step:9721/10000 train_time:411090ms step_avg:42.29ms +[2025-09-11 10:32:50] [Rank 0] step:9741/10000 train_time:411807ms step_avg:42.28ms +[2025-09-11 10:32:50] [Rank 0] step:9741/10000 train_time:411807ms step_avg:42.28ms +[2025-09-11 10:32:51] [Rank 0] step:9761/10000 train_time:412525ms step_avg:42.26ms +[2025-09-11 10:32:51] [Rank 0] step:9761/10000 train_time:412525ms step_avg:42.26ms +[2025-09-11 10:32:52] [Rank 0] step:9781/10000 train_time:413240ms step_avg:42.25ms +[2025-09-11 10:32:52] [Rank 0] step:9781/10000 train_time:413240ms step_avg:42.25ms +[2025-09-11 10:32:52] [Rank 0] step:9801/10000 train_time:413961ms step_avg:42.24ms +[2025-09-11 10:32:52] [Rank 0] step:9801/10000 train_time:413961ms step_avg:42.24ms +[2025-09-11 10:32:53] [Rank 0] step:9821/10000 train_time:414679ms step_avg:42.22ms +[2025-09-11 10:32:53] [Rank 0] step:9821/10000 train_time:414679ms step_avg:42.22ms +[2025-09-11 10:32:54] [Rank 0] step:9841/10000 train_time:415400ms step_avg:42.21ms +[2025-09-11 10:32:54] [Rank 0] step:9841/10000 train_time:415400ms step_avg:42.21ms +[2025-09-11 10:32:54] [Rank 0] step:9861/10000 train_time:416116ms step_avg:42.20ms +[2025-09-11 10:32:54] [Rank 0] step:9861/10000 train_time:416116ms step_avg:42.20ms +[2025-09-11 10:32:55] [Rank 0] step:9881/10000 train_time:416834ms step_avg:42.19ms +[2025-09-11 10:32:55] [Rank 0] step:9881/10000 train_time:416834ms step_avg:42.19ms +[2025-09-11 10:32:56] [Rank 0] step:9901/10000 train_time:417549ms step_avg:42.17ms +[2025-09-11 10:32:56] [Rank 0] step:9901/10000 train_time:417549ms step_avg:42.17ms +[2025-09-11 10:32:57] [Rank 0] step:9921/10000 train_time:418266ms step_avg:42.16ms +[2025-09-11 10:32:57] [Rank 0] step:9921/10000 train_time:418266ms step_avg:42.16ms +[2025-09-11 10:32:57] [Rank 0] step:9941/10000 train_time:418988ms step_avg:42.15ms +[2025-09-11 10:32:57] [Rank 0] step:9941/10000 train_time:418988ms step_avg:42.15ms +[2025-09-11 10:32:58] [Rank 0] step:9961/10000 train_time:419710ms step_avg:42.14ms +[2025-09-11 10:32:58] [Rank 0] step:9961/10000 train_time:419710ms step_avg:42.14ms +[2025-09-11 10:32:59] [Rank 0] step:9981/10000 train_time:420429ms step_avg:42.12ms +[2025-09-11 10:32:59] [Rank 0] step:9981/10000 train_time:420429ms step_avg:42.12ms +[2025-09-11 10:32:59] [Rank 0] step:10000/10000 train_time:421120ms step_avg:42.11ms +[2025-09-11 10:32:59] [Rank 0] step:10000/10000 train_time:421120ms step_avg:42.11ms +[2025-09-11 10:32:59] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:32:59] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:33:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:33:12] [Rank 0] PRINT: step:10000/10000 val_loss:4.2422 total_sharp:1.7110e-05 L1_sharp:8.8266e-03 L2_sharp:1.0456e-02 L3_sharp:1.4834e-02 L4_sharp:2.9052e-02 L5_sharp:3.3856e-02 L6_sharp:4.7243e-02 L7_sharp:6.2702e-02 L8_sharp:6.5922e-02 L9_sharp:5.8677e-02 L10_sharp:8.7357e-02 L11_sharp:1.1003e-01 L12_sharp:2.9880e-01 total_fnorm:3.3281e+00 total_l1_linf:2.2240e+03 total_spectral:1.6641e+00 L1_fnorm:3.7384e-03 L2_fnorm:3.8147e-03 L3_fnorm:3.8300e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8300e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.6774e-03 L9_fnorm:3.7689e-03 L10_fnorm:3.7689e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.6316e-03 L1_l1linf:3.5286e-04 L2_l1linf:4.1008e-04 L3_l1linf:3.9673e-04 L4_l1linf:3.9673e-04 L5_l1linf:4.0054e-04 L6_l1linf:4.0245e-04 L7_l1linf:3.9482e-04 L8_l1linf:3.8910e-04 L9_l1linf:3.9291e-04 L10_l1linf:3.7766e-04 L11_l1linf:3.7003e-04 L12_l1linf:3.7003e-04 L1_spectral:6.9966e-05 L2_spectral:7.0615e-05 L3_spectral:6.9901e-05 L4_spectral:7.1788e-05 L5_spectral:7.0991e-05 L6_spectral:7.1663e-05 L7_spectral:7.1418e-05 L8_spectral:6.7922e-05 L9_spectral:6.8463e-05 L10_spectral:7.0532e-05 L11_spectral:6.6312e-05 L12_spectral:6.3805e-05 train_time:421141ms step_avg:42.11ms +[2025-09-11 10:33:12] [Rank 0] PRINT: step:10000/10000 val_loss:4.2422 total_sharp:1.7110e-05 L1_sharp:8.8266e-03 L2_sharp:1.0456e-02 L3_sharp:1.4834e-02 L4_sharp:2.9052e-02 L5_sharp:3.3856e-02 L6_sharp:4.7243e-02 L7_sharp:6.2702e-02 L8_sharp:6.5922e-02 L9_sharp:5.8677e-02 L10_sharp:8.7357e-02 L11_sharp:1.1003e-01 L12_sharp:2.9880e-01 total_fnorm:3.3281e+00 total_l1_linf:2.2240e+03 total_spectral:1.6641e+00 L1_fnorm:3.7384e-03 L2_fnorm:3.8147e-03 L3_fnorm:3.8300e-03 L4_fnorm:3.8452e-03 L5_fnorm:3.8300e-03 L6_fnorm:3.8300e-03 L7_fnorm:3.8452e-03 L8_fnorm:3.6774e-03 L9_fnorm:3.7689e-03 L10_fnorm:3.7689e-03 L11_fnorm:3.6926e-03 L12_fnorm:3.6316e-03 L1_l1linf:3.5286e-04 L2_l1linf:4.1008e-04 L3_l1linf:3.9673e-04 L4_l1linf:3.9673e-04 L5_l1linf:4.0054e-04 L6_l1linf:4.0245e-04 L7_l1linf:3.9482e-04 L8_l1linf:3.8910e-04 L9_l1linf:3.9291e-04 L10_l1linf:3.7766e-04 L11_l1linf:3.7003e-04 L12_l1linf:3.7003e-04 L1_spectral:6.9966e-05 L2_spectral:7.0615e-05 L3_spectral:6.9901e-05 L4_spectral:7.1788e-05 L5_spectral:7.0991e-05 L6_spectral:7.1663e-05 L7_spectral:7.1418e-05 L8_spectral:6.7922e-05 L9_spectral:6.8463e-05 L10_spectral:7.0532e-05 L11_spectral:6.6312e-05 L12_spectral:6.3805e-05 train_time:421141ms step_avg:42.11ms +[2025-09-11 10:33:12] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:33:12 2025 --- +[2025-09-11 10:33:12] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:33:12 2025 --- +[2025-09-11 10:33:12] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:33:12] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e913c1fd7bdc306c49ef2e61948548db9846386c --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "b20c085f-b55d-4a79-99be-2a2e6abeca87", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/training_log_b20c085f-b55d-4a79-99be-2a2e6abeca87.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/training_log_b20c085f-b55d-4a79-99be-2a2e6abeca87.txt new file mode 100644 index 0000000000000000000000000000000000000000..b13ac0d00b6eab6156a24e3328c6838a73ce20db --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45/training_log_b20c085f-b55d-4a79-99be-2a2e6abeca87.txt @@ -0,0 +1,3972 @@ +[2025-09-11 14:28:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:28:58 2025 --- +[2025-09-11 14:28:58] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:28:58 2025 --- +[2025-09-11 14:28:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:28:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:28:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:28:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:28:58] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:28:58] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:28:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45 +[2025-09-11 14:28:58] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.001_seed_45 +[2025-09-11 14:28:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:28:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:28:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:28:58] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:28:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:28:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:28:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:28:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:28:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:28:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:28:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:28:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:28:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:28:59] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:29:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:29:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:29:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:29:02] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:29:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:29:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:29:07] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:29:07] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:29:07] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:29:07] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:29:44] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:29:44] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:29:44] [Rank 0] PRINT: Starting training... +[2025-09-11 14:29:44] [Rank 0] PRINT: Starting training... +[2025-09-11 14:29:45] [Rank 0] step:21/10000 train_time:1131ms step_avg:53.85ms +[2025-09-11 14:29:45] [Rank 0] step:21/10000 train_time:1131ms step_avg:53.85ms +[2025-09-11 14:29:46] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.41ms +[2025-09-11 14:29:46] [Rank 0] step:41/10000 train_time:1862ms step_avg:45.41ms +[2025-09-11 14:29:47] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 14:29:47] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 14:29:47] [Rank 0] step:81/10000 train_time:3323ms step_avg:41.02ms +[2025-09-11 14:29:47] [Rank 0] step:81/10000 train_time:3323ms step_avg:41.02ms +[2025-09-11 14:29:48] [Rank 0] step:101/10000 train_time:4052ms step_avg:40.12ms +[2025-09-11 14:29:48] [Rank 0] step:101/10000 train_time:4052ms step_avg:40.12ms +[2025-09-11 14:29:49] [Rank 0] step:121/10000 train_time:4782ms step_avg:39.52ms +[2025-09-11 14:29:49] [Rank 0] step:121/10000 train_time:4782ms step_avg:39.52ms +[2025-09-11 14:29:50] [Rank 0] step:141/10000 train_time:5512ms step_avg:39.09ms +[2025-09-11 14:29:50] [Rank 0] step:141/10000 train_time:5512ms step_avg:39.09ms +[2025-09-11 14:29:50] [Rank 0] step:161/10000 train_time:6241ms step_avg:38.76ms +[2025-09-11 14:29:50] [Rank 0] step:161/10000 train_time:6241ms step_avg:38.76ms +[2025-09-11 14:29:51] [Rank 0] step:181/10000 train_time:6970ms step_avg:38.51ms +[2025-09-11 14:29:51] [Rank 0] step:181/10000 train_time:6970ms step_avg:38.51ms +[2025-09-11 14:29:52] [Rank 0] step:201/10000 train_time:7700ms step_avg:38.31ms +[2025-09-11 14:29:52] [Rank 0] step:201/10000 train_time:7700ms step_avg:38.31ms +[2025-09-11 14:29:53] [Rank 0] step:221/10000 train_time:8430ms step_avg:38.14ms +[2025-09-11 14:29:53] [Rank 0] step:221/10000 train_time:8430ms step_avg:38.14ms +[2025-09-11 14:29:53] [Rank 0] step:241/10000 train_time:9159ms step_avg:38.01ms +[2025-09-11 14:29:53] [Rank 0] step:241/10000 train_time:9159ms step_avg:38.01ms +[2025-09-11 14:29:54] [Rank 0] step:261/10000 train_time:9889ms step_avg:37.89ms +[2025-09-11 14:29:54] [Rank 0] step:261/10000 train_time:9889ms step_avg:37.89ms +[2025-09-11 14:29:55] [Rank 0] step:281/10000 train_time:10618ms step_avg:37.79ms +[2025-09-11 14:29:55] [Rank 0] step:281/10000 train_time:10618ms step_avg:37.79ms +[2025-09-11 14:29:55] [Rank 0] step:301/10000 train_time:11348ms step_avg:37.70ms +[2025-09-11 14:29:55] [Rank 0] step:301/10000 train_time:11348ms step_avg:37.70ms +[2025-09-11 14:29:56] [Rank 0] step:321/10000 train_time:12078ms step_avg:37.63ms +[2025-09-11 14:29:56] [Rank 0] step:321/10000 train_time:12078ms step_avg:37.63ms +[2025-09-11 14:29:57] [Rank 0] step:341/10000 train_time:12808ms step_avg:37.56ms +[2025-09-11 14:29:57] [Rank 0] step:341/10000 train_time:12808ms step_avg:37.56ms +[2025-09-11 14:29:58] [Rank 0] step:361/10000 train_time:13537ms step_avg:37.50ms +[2025-09-11 14:29:58] [Rank 0] step:361/10000 train_time:13537ms step_avg:37.50ms +[2025-09-11 14:29:58] [Rank 0] step:381/10000 train_time:14266ms step_avg:37.44ms +[2025-09-11 14:29:58] [Rank 0] step:381/10000 train_time:14266ms step_avg:37.44ms +[2025-09-11 14:29:59] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:29:59] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:30:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:30:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:30:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:30:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:30:46] [Rank 0] PRINT: step:400/10000 val_loss:6.1336 total_sharp:4.5113e-04 L1_sharp:1.1948e-01 L2_sharp:1.1861e-01 L3_sharp:1.0892e-01 L4_sharp:1.1533e-01 L5_sharp:1.2647e-01 L6_sharp:1.6745e-01 L7_sharp:1.2499e-01 L8_sharp:1.3588e-01 L9_sharp:2.6459e-01 L10_sharp:2.7595e-01 L11_sharp:4.0968e-01 L12_sharp:4.4211e-01 total_fnorm:7.4935e+01 total_l1_linf:1.8387e+05 total_spectral:3.7478e+01 L1_fnorm:1.1974e-01 L2_fnorm:1.1941e-01 L3_fnorm:1.1922e-01 L4_fnorm:1.1861e-01 L5_fnorm:1.1910e-01 L6_fnorm:1.1992e-01 L7_fnorm:1.1898e-01 L8_fnorm:1.1869e-01 L9_fnorm:1.1890e-01 L10_fnorm:1.1831e-01 L11_fnorm:1.1785e-01 L12_fnorm:1.1892e-01 L1_l1linf:4.5066e-02 L2_l1linf:4.4886e-02 L3_l1linf:4.4856e-02 L4_l1linf:4.4617e-02 L5_l1linf:4.4761e-02 L6_l1linf:4.4501e-02 L7_l1linf:4.4471e-02 L8_l1linf:4.4539e-02 L9_l1linf:4.4573e-02 L10_l1linf:4.4557e-02 L11_l1linf:4.4348e-02 L12_l1linf:4.4092e-02 L1_spectral:1.2049e-03 L2_spectral:1.2048e-03 L3_spectral:1.2051e-03 L4_spectral:1.2054e-03 L5_spectral:1.2051e-03 L6_spectral:1.2050e-03 L7_spectral:1.2066e-03 L8_spectral:1.2057e-03 L9_spectral:1.2057e-03 L10_spectral:1.2053e-03 L11_spectral:1.2051e-03 L12_spectral:1.2054e-03 train_time:14975ms step_avg:37.44ms +[2025-09-11 14:30:46] [Rank 0] PRINT: step:400/10000 val_loss:6.1336 total_sharp:4.5113e-04 L1_sharp:1.1948e-01 L2_sharp:1.1861e-01 L3_sharp:1.0892e-01 L4_sharp:1.1533e-01 L5_sharp:1.2647e-01 L6_sharp:1.6745e-01 L7_sharp:1.2499e-01 L8_sharp:1.3588e-01 L9_sharp:2.6459e-01 L10_sharp:2.7595e-01 L11_sharp:4.0968e-01 L12_sharp:4.4211e-01 total_fnorm:7.4935e+01 total_l1_linf:1.8387e+05 total_spectral:3.7478e+01 L1_fnorm:1.1974e-01 L2_fnorm:1.1941e-01 L3_fnorm:1.1922e-01 L4_fnorm:1.1861e-01 L5_fnorm:1.1910e-01 L6_fnorm:1.1992e-01 L7_fnorm:1.1898e-01 L8_fnorm:1.1869e-01 L9_fnorm:1.1890e-01 L10_fnorm:1.1831e-01 L11_fnorm:1.1785e-01 L12_fnorm:1.1892e-01 L1_l1linf:4.5066e-02 L2_l1linf:4.4886e-02 L3_l1linf:4.4856e-02 L4_l1linf:4.4617e-02 L5_l1linf:4.4761e-02 L6_l1linf:4.4501e-02 L7_l1linf:4.4471e-02 L8_l1linf:4.4539e-02 L9_l1linf:4.4573e-02 L10_l1linf:4.4557e-02 L11_l1linf:4.4348e-02 L12_l1linf:4.4092e-02 L1_spectral:1.2049e-03 L2_spectral:1.2048e-03 L3_spectral:1.2051e-03 L4_spectral:1.2054e-03 L5_spectral:1.2051e-03 L6_spectral:1.2050e-03 L7_spectral:1.2066e-03 L8_spectral:1.2057e-03 L9_spectral:1.2057e-03 L10_spectral:1.2053e-03 L11_spectral:1.2051e-03 L12_spectral:1.2054e-03 train_time:14975ms step_avg:37.44ms +[2025-09-11 14:31:16] [Rank 0] step:401/10000 train_time:45215ms step_avg:112.76ms +[2025-09-11 14:31:16] [Rank 0] step:401/10000 train_time:45215ms step_avg:112.76ms +[2025-09-11 14:31:18] [Rank 0] step:421/10000 train_time:47510ms step_avg:112.85ms +[2025-09-11 14:31:18] [Rank 0] step:421/10000 train_time:47510ms step_avg:112.85ms +[2025-09-11 14:31:19] [Rank 0] step:441/10000 train_time:48459ms step_avg:109.88ms +[2025-09-11 14:31:19] [Rank 0] step:441/10000 train_time:48459ms step_avg:109.88ms +[2025-09-11 14:31:20] [Rank 0] step:461/10000 train_time:49101ms step_avg:106.51ms +[2025-09-11 14:31:20] [Rank 0] step:461/10000 train_time:49101ms step_avg:106.51ms +[2025-09-11 14:31:21] [Rank 0] step:481/10000 train_time:49742ms step_avg:103.41ms +[2025-09-11 14:31:21] [Rank 0] step:481/10000 train_time:49742ms step_avg:103.41ms +[2025-09-11 14:31:21] [Rank 0] step:501/10000 train_time:50383ms step_avg:100.56ms +[2025-09-11 14:31:21] [Rank 0] step:501/10000 train_time:50383ms step_avg:100.56ms +[2025-09-11 14:31:22] [Rank 0] step:521/10000 train_time:51023ms step_avg:97.93ms +[2025-09-11 14:31:22] [Rank 0] step:521/10000 train_time:51023ms step_avg:97.93ms +[2025-09-11 14:31:22] [Rank 0] step:541/10000 train_time:51663ms step_avg:95.50ms +[2025-09-11 14:31:22] [Rank 0] step:541/10000 train_time:51663ms step_avg:95.50ms +[2025-09-11 14:31:23] [Rank 0] step:561/10000 train_time:52304ms step_avg:93.23ms +[2025-09-11 14:31:23] [Rank 0] step:561/10000 train_time:52304ms step_avg:93.23ms +[2025-09-11 14:31:24] [Rank 0] step:581/10000 train_time:52944ms step_avg:91.13ms +[2025-09-11 14:31:24] [Rank 0] step:581/10000 train_time:52944ms step_avg:91.13ms +[2025-09-11 14:31:24] [Rank 0] step:601/10000 train_time:53585ms step_avg:89.16ms +[2025-09-11 14:31:24] [Rank 0] step:601/10000 train_time:53585ms step_avg:89.16ms +[2025-09-11 14:31:25] [Rank 0] step:621/10000 train_time:54226ms step_avg:87.32ms +[2025-09-11 14:31:25] [Rank 0] step:621/10000 train_time:54226ms step_avg:87.32ms +[2025-09-11 14:31:26] [Rank 0] step:641/10000 train_time:54868ms step_avg:85.60ms +[2025-09-11 14:31:26] [Rank 0] step:641/10000 train_time:54868ms step_avg:85.60ms +[2025-09-11 14:31:26] [Rank 0] step:661/10000 train_time:55508ms step_avg:83.98ms +[2025-09-11 14:31:26] [Rank 0] step:661/10000 train_time:55508ms step_avg:83.98ms +[2025-09-11 14:31:27] [Rank 0] step:681/10000 train_time:56150ms step_avg:82.45ms +[2025-09-11 14:31:27] [Rank 0] step:681/10000 train_time:56150ms step_avg:82.45ms +[2025-09-11 14:31:28] [Rank 0] step:701/10000 train_time:56791ms step_avg:81.01ms +[2025-09-11 14:31:28] [Rank 0] step:701/10000 train_time:56791ms step_avg:81.01ms +[2025-09-11 14:31:28] [Rank 0] step:721/10000 train_time:57431ms step_avg:79.66ms +[2025-09-11 14:31:28] [Rank 0] step:721/10000 train_time:57431ms step_avg:79.66ms +[2025-09-11 14:31:29] [Rank 0] step:741/10000 train_time:58071ms step_avg:78.37ms +[2025-09-11 14:31:29] [Rank 0] step:741/10000 train_time:58071ms step_avg:78.37ms +[2025-09-11 14:31:30] [Rank 0] step:761/10000 train_time:58717ms step_avg:77.16ms +[2025-09-11 14:31:30] [Rank 0] step:761/10000 train_time:58717ms step_avg:77.16ms +[2025-09-11 14:31:30] [Rank 0] step:781/10000 train_time:59364ms step_avg:76.01ms +[2025-09-11 14:31:30] [Rank 0] step:781/10000 train_time:59364ms step_avg:76.01ms +[2025-09-11 14:31:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:31:31] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:32:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:32:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:32:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:32:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:32:17] [Rank 0] PRINT: step:800/10000 val_loss:5.6676 total_sharp:3.8597e-04 L1_sharp:1.8458e-01 L2_sharp:1.7030e-01 L3_sharp:1.8314e-01 L4_sharp:1.9431e-01 L5_sharp:2.0556e-01 L6_sharp:2.8910e-01 L7_sharp:2.8859e-01 L8_sharp:4.6323e-01 L9_sharp:7.4726e-01 L10_sharp:7.2416e-01 L11_sharp:7.8706e-01 L12_sharp:9.4098e-01 total_fnorm:7.6000e+01 total_l1_linf:1.5974e+05 total_spectral:3.8000e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1279e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1230e-01 L10_fnorm:1.1182e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0400e-01 L1_l1linf:4.2725e-02 L2_l1linf:4.2480e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.2480e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.2480e-02 L9_l1linf:4.2236e-02 L10_l1linf:4.1992e-02 L11_l1linf:4.1016e-02 L12_l1linf:4.0039e-02 L1_spectral:1.6048e-03 L2_spectral:1.6029e-03 L3_spectral:1.6075e-03 L4_spectral:1.6111e-03 L5_spectral:1.5923e-03 L6_spectral:1.5886e-03 L7_spectral:1.6007e-03 L8_spectral:1.5625e-03 L9_spectral:1.5747e-03 L10_spectral:1.5796e-03 L11_spectral:1.5724e-03 L12_spectral:1.5599e-03 train_time:59991ms step_avg:74.99ms +[2025-09-11 14:32:17] [Rank 0] PRINT: step:800/10000 val_loss:5.6676 total_sharp:3.8597e-04 L1_sharp:1.8458e-01 L2_sharp:1.7030e-01 L3_sharp:1.8314e-01 L4_sharp:1.9431e-01 L5_sharp:2.0556e-01 L6_sharp:2.8910e-01 L7_sharp:2.8859e-01 L8_sharp:4.6323e-01 L9_sharp:7.4726e-01 L10_sharp:7.2416e-01 L11_sharp:7.8706e-01 L12_sharp:9.4098e-01 total_fnorm:7.6000e+01 total_l1_linf:1.5974e+05 total_spectral:3.8000e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1279e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1230e-01 L10_fnorm:1.1182e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0400e-01 L1_l1linf:4.2725e-02 L2_l1linf:4.2480e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.2480e-02 L6_l1linf:4.2236e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.2480e-02 L9_l1linf:4.2236e-02 L10_l1linf:4.1992e-02 L11_l1linf:4.1016e-02 L12_l1linf:4.0039e-02 L1_spectral:1.6048e-03 L2_spectral:1.6029e-03 L3_spectral:1.6075e-03 L4_spectral:1.6111e-03 L5_spectral:1.5923e-03 L6_spectral:1.5886e-03 L7_spectral:1.6007e-03 L8_spectral:1.5625e-03 L9_spectral:1.5747e-03 L10_spectral:1.5796e-03 L11_spectral:1.5724e-03 L12_spectral:1.5599e-03 train_time:59991ms step_avg:74.99ms +[2025-09-11 14:32:19] [Rank 0] step:801/10000 train_time:61612ms step_avg:76.92ms +[2025-09-11 14:32:19] [Rank 0] step:801/10000 train_time:61612ms step_avg:76.92ms +[2025-09-11 14:32:20] [Rank 0] step:821/10000 train_time:62416ms step_avg:76.02ms +[2025-09-11 14:32:20] [Rank 0] step:821/10000 train_time:62416ms step_avg:76.02ms +[2025-09-11 14:32:20] [Rank 0] step:841/10000 train_time:63178ms step_avg:75.12ms +[2025-09-11 14:32:20] [Rank 0] step:841/10000 train_time:63178ms step_avg:75.12ms +[2025-09-11 14:32:21] [Rank 0] step:861/10000 train_time:63824ms step_avg:74.13ms +[2025-09-11 14:32:21] [Rank 0] step:861/10000 train_time:63824ms step_avg:74.13ms +[2025-09-11 14:32:22] [Rank 0] step:881/10000 train_time:64470ms step_avg:73.18ms +[2025-09-11 14:32:22] [Rank 0] step:881/10000 train_time:64470ms step_avg:73.18ms +[2025-09-11 14:32:23] [Rank 0] step:901/10000 train_time:65420ms step_avg:72.61ms +[2025-09-11 14:32:23] [Rank 0] step:901/10000 train_time:65420ms step_avg:72.61ms +[2025-09-11 14:32:23] [Rank 0] step:921/10000 train_time:66066ms step_avg:71.73ms +[2025-09-11 14:32:23] [Rank 0] step:921/10000 train_time:66066ms step_avg:71.73ms +[2025-09-11 14:32:24] [Rank 0] step:941/10000 train_time:66711ms step_avg:70.89ms +[2025-09-11 14:32:24] [Rank 0] step:941/10000 train_time:66711ms step_avg:70.89ms +[2025-09-11 14:32:25] [Rank 0] step:961/10000 train_time:67357ms step_avg:70.09ms +[2025-09-11 14:32:25] [Rank 0] step:961/10000 train_time:67357ms step_avg:70.09ms +[2025-09-11 14:32:25] [Rank 0] step:981/10000 train_time:68002ms step_avg:69.32ms +[2025-09-11 14:32:25] [Rank 0] step:981/10000 train_time:68002ms step_avg:69.32ms +[2025-09-11 14:32:26] [Rank 0] step:1001/10000 train_time:68651ms step_avg:68.58ms +[2025-09-11 14:32:26] [Rank 0] step:1001/10000 train_time:68651ms step_avg:68.58ms +[2025-09-11 14:32:27] [Rank 0] step:1021/10000 train_time:69295ms step_avg:67.87ms +[2025-09-11 14:32:27] [Rank 0] step:1021/10000 train_time:69295ms step_avg:67.87ms +[2025-09-11 14:32:27] [Rank 0] step:1041/10000 train_time:69940ms step_avg:67.19ms +[2025-09-11 14:32:27] [Rank 0] step:1041/10000 train_time:69940ms step_avg:67.19ms +[2025-09-11 14:32:28] [Rank 0] step:1061/10000 train_time:70586ms step_avg:66.53ms +[2025-09-11 14:32:28] [Rank 0] step:1061/10000 train_time:70586ms step_avg:66.53ms +[2025-09-11 14:32:28] [Rank 0] step:1081/10000 train_time:71233ms step_avg:65.90ms +[2025-09-11 14:32:28] [Rank 0] step:1081/10000 train_time:71233ms step_avg:65.90ms +[2025-09-11 14:32:29] [Rank 0] step:1101/10000 train_time:71879ms step_avg:65.29ms +[2025-09-11 14:32:29] [Rank 0] step:1101/10000 train_time:71879ms step_avg:65.29ms +[2025-09-11 14:32:30] [Rank 0] step:1121/10000 train_time:72525ms step_avg:64.70ms +[2025-09-11 14:32:30] [Rank 0] step:1121/10000 train_time:72525ms step_avg:64.70ms +[2025-09-11 14:32:30] [Rank 0] step:1141/10000 train_time:73171ms step_avg:64.13ms +[2025-09-11 14:32:30] [Rank 0] step:1141/10000 train_time:73171ms step_avg:64.13ms +[2025-09-11 14:32:31] [Rank 0] step:1161/10000 train_time:73817ms step_avg:63.58ms +[2025-09-11 14:32:31] [Rank 0] step:1161/10000 train_time:73817ms step_avg:63.58ms +[2025-09-11 14:32:32] [Rank 0] step:1181/10000 train_time:74462ms step_avg:63.05ms +[2025-09-11 14:32:32] [Rank 0] step:1181/10000 train_time:74462ms step_avg:63.05ms +[2025-09-11 14:32:32] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:32:32] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:32:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:32:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:32:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:32:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:32:43] [Rank 0] PRINT: step:1200/10000 val_loss:5.3847 total_sharp:3.0760e-04 L1_sharp:1.2953e-01 L2_sharp:1.4618e-01 L3_sharp:1.5083e-01 L4_sharp:1.6809e-01 L5_sharp:1.8295e-01 L6_sharp:1.6115e-01 L7_sharp:1.7492e-01 L8_sharp:1.9182e-01 L9_sharp:2.3522e-01 L10_sharp:3.2549e-01 L11_sharp:8.4087e-01 L12_sharp:1.9470e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5462e+05 total_spectral:3.8250e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1719e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1719e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1523e-01 L1_l1linf:4.0771e-02 L2_l1linf:4.1016e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0283e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.1016e-02 L11_l1linf:4.1504e-02 L12_l1linf:4.1504e-02 L1_spectral:1.5926e-03 L2_spectral:1.6193e-03 L3_spectral:1.6052e-03 L4_spectral:1.6126e-03 L5_spectral:1.6004e-03 L6_spectral:1.5992e-03 L7_spectral:1.6017e-03 L8_spectral:1.5869e-03 L9_spectral:1.6028e-03 L10_spectral:1.5963e-03 L11_spectral:1.5969e-03 L12_spectral:1.5947e-03 train_time:75090ms step_avg:62.57ms +[2025-09-11 14:32:43] [Rank 0] PRINT: step:1200/10000 val_loss:5.3847 total_sharp:3.0760e-04 L1_sharp:1.2953e-01 L2_sharp:1.4618e-01 L3_sharp:1.5083e-01 L4_sharp:1.6809e-01 L5_sharp:1.8295e-01 L6_sharp:1.6115e-01 L7_sharp:1.7492e-01 L8_sharp:1.9182e-01 L9_sharp:2.3522e-01 L10_sharp:3.2549e-01 L11_sharp:8.4087e-01 L12_sharp:1.9470e+00 total_fnorm:7.6500e+01 total_l1_linf:1.5462e+05 total_spectral:3.8250e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1719e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1719e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1523e-01 L1_l1linf:4.0771e-02 L2_l1linf:4.1016e-02 L3_l1linf:4.0771e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0283e-02 L8_l1linf:3.9795e-02 L9_l1linf:4.0527e-02 L10_l1linf:4.1016e-02 L11_l1linf:4.1504e-02 L12_l1linf:4.1504e-02 L1_spectral:1.5926e-03 L2_spectral:1.6193e-03 L3_spectral:1.6052e-03 L4_spectral:1.6126e-03 L5_spectral:1.6004e-03 L6_spectral:1.5992e-03 L7_spectral:1.6017e-03 L8_spectral:1.5869e-03 L9_spectral:1.6028e-03 L10_spectral:1.5963e-03 L11_spectral:1.5969e-03 L12_spectral:1.5947e-03 train_time:75090ms step_avg:62.57ms +[2025-09-11 14:32:44] [Rank 0] step:1201/10000 train_time:76708ms step_avg:63.87ms +[2025-09-11 14:32:44] [Rank 0] step:1201/10000 train_time:76708ms step_avg:63.87ms +[2025-09-11 14:32:45] [Rank 0] step:1221/10000 train_time:77358ms step_avg:63.36ms +[2025-09-11 14:32:45] [Rank 0] step:1221/10000 train_time:77358ms step_avg:63.36ms +[2025-09-11 14:32:46] [Rank 0] step:1241/10000 train_time:78004ms step_avg:62.86ms +[2025-09-11 14:32:46] [Rank 0] step:1241/10000 train_time:78004ms step_avg:62.86ms +[2025-09-11 14:32:46] [Rank 0] step:1261/10000 train_time:78651ms step_avg:62.37ms +[2025-09-11 14:32:46] [Rank 0] step:1261/10000 train_time:78651ms step_avg:62.37ms +[2025-09-11 14:32:47] [Rank 0] step:1281/10000 train_time:79296ms step_avg:61.90ms +[2025-09-11 14:32:47] [Rank 0] step:1281/10000 train_time:79296ms step_avg:61.90ms +[2025-09-11 14:32:48] [Rank 0] step:1301/10000 train_time:79943ms step_avg:61.45ms +[2025-09-11 14:32:48] [Rank 0] step:1301/10000 train_time:79943ms step_avg:61.45ms +[2025-09-11 14:32:48] [Rank 0] step:1321/10000 train_time:80589ms step_avg:61.01ms +[2025-09-11 14:32:48] [Rank 0] step:1321/10000 train_time:80589ms step_avg:61.01ms +[2025-09-11 14:32:49] [Rank 0] step:1341/10000 train_time:81235ms step_avg:60.58ms +[2025-09-11 14:32:49] [Rank 0] step:1341/10000 train_time:81235ms step_avg:60.58ms +[2025-09-11 14:32:50] [Rank 0] step:1361/10000 train_time:81881ms step_avg:60.16ms +[2025-09-11 14:32:50] [Rank 0] step:1361/10000 train_time:81881ms step_avg:60.16ms +[2025-09-11 14:32:50] [Rank 0] step:1381/10000 train_time:82526ms step_avg:59.76ms +[2025-09-11 14:32:50] [Rank 0] step:1381/10000 train_time:82526ms step_avg:59.76ms +[2025-09-11 14:32:51] [Rank 0] step:1401/10000 train_time:83172ms step_avg:59.37ms +[2025-09-11 14:32:51] [Rank 0] step:1401/10000 train_time:83172ms step_avg:59.37ms +[2025-09-11 14:32:51] [Rank 0] step:1421/10000 train_time:83818ms step_avg:58.99ms +[2025-09-11 14:32:51] [Rank 0] step:1421/10000 train_time:83818ms step_avg:58.99ms +[2025-09-11 14:32:52] [Rank 0] step:1441/10000 train_time:84464ms step_avg:58.61ms +[2025-09-11 14:32:52] [Rank 0] step:1441/10000 train_time:84464ms step_avg:58.61ms +[2025-09-11 14:32:53] [Rank 0] step:1461/10000 train_time:85110ms step_avg:58.25ms +[2025-09-11 14:32:53] [Rank 0] step:1461/10000 train_time:85110ms step_avg:58.25ms +[2025-09-11 14:32:53] [Rank 0] step:1481/10000 train_time:85765ms step_avg:57.91ms +[2025-09-11 14:32:53] [Rank 0] step:1481/10000 train_time:85765ms step_avg:57.91ms +[2025-09-11 14:32:54] [Rank 0] step:1501/10000 train_time:86414ms step_avg:57.57ms +[2025-09-11 14:32:54] [Rank 0] step:1501/10000 train_time:86414ms step_avg:57.57ms +[2025-09-11 14:32:55] [Rank 0] step:1521/10000 train_time:87065ms step_avg:57.24ms +[2025-09-11 14:32:55] [Rank 0] step:1521/10000 train_time:87065ms step_avg:57.24ms +[2025-09-11 14:32:55] [Rank 0] step:1541/10000 train_time:87714ms step_avg:56.92ms +[2025-09-11 14:32:55] [Rank 0] step:1541/10000 train_time:87714ms step_avg:56.92ms +[2025-09-11 14:32:56] [Rank 0] step:1561/10000 train_time:88364ms step_avg:56.61ms +[2025-09-11 14:32:56] [Rank 0] step:1561/10000 train_time:88364ms step_avg:56.61ms +[2025-09-11 14:32:57] [Rank 0] step:1581/10000 train_time:89015ms step_avg:56.30ms +[2025-09-11 14:32:57] [Rank 0] step:1581/10000 train_time:89015ms step_avg:56.30ms +[2025-09-11 14:32:57] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:32:57] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:32:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:33:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:33:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:33:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:33:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:08] [Rank 0] PRINT: step:1600/10000 val_loss:5.2389 total_sharp:2.4186e-04 L1_sharp:9.2361e-02 L2_sharp:9.8083e-02 L3_sharp:1.1841e-01 L4_sharp:1.3550e-01 L5_sharp:1.3677e-01 L6_sharp:1.2443e-01 L7_sharp:1.1532e-01 L8_sharp:1.4847e-01 L9_sharp:1.8392e-01 L10_sharp:2.0464e-01 L11_sharp:2.4989e-01 L12_sharp:7.7952e-01 total_fnorm:7.5500e+01 total_l1_linf:1.4541e+05 total_spectral:3.7750e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1719e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.8818e-02 L2_l1linf:3.8574e-02 L3_l1linf:3.8574e-02 L4_l1linf:3.8574e-02 L5_l1linf:3.8330e-02 L6_l1linf:3.7842e-02 L7_l1linf:3.8330e-02 L8_l1linf:3.7842e-02 L9_l1linf:3.8086e-02 L10_l1linf:3.8330e-02 L11_l1linf:3.9062e-02 L12_l1linf:4.0283e-02 L1_spectral:1.6028e-03 L2_spectral:1.6050e-03 L3_spectral:1.6060e-03 L4_spectral:1.5964e-03 L5_spectral:1.6003e-03 L6_spectral:1.5993e-03 L7_spectral:1.6070e-03 L8_spectral:1.5978e-03 L9_spectral:1.6131e-03 L10_spectral:1.6008e-03 L11_spectral:1.6134e-03 L12_spectral:1.6068e-03 train_time:89646ms step_avg:56.03ms +[2025-09-11 14:33:08] [Rank 0] PRINT: step:1600/10000 val_loss:5.2389 total_sharp:2.4186e-04 L1_sharp:9.2361e-02 L2_sharp:9.8083e-02 L3_sharp:1.1841e-01 L4_sharp:1.3550e-01 L5_sharp:1.3677e-01 L6_sharp:1.2443e-01 L7_sharp:1.1532e-01 L8_sharp:1.4847e-01 L9_sharp:1.8392e-01 L10_sharp:2.0464e-01 L11_sharp:2.4989e-01 L12_sharp:7.7952e-01 total_fnorm:7.5500e+01 total_l1_linf:1.4541e+05 total_spectral:3.7750e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1719e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.8818e-02 L2_l1linf:3.8574e-02 L3_l1linf:3.8574e-02 L4_l1linf:3.8574e-02 L5_l1linf:3.8330e-02 L6_l1linf:3.7842e-02 L7_l1linf:3.8330e-02 L8_l1linf:3.7842e-02 L9_l1linf:3.8086e-02 L10_l1linf:3.8330e-02 L11_l1linf:3.9062e-02 L12_l1linf:4.0283e-02 L1_spectral:1.6028e-03 L2_spectral:1.6050e-03 L3_spectral:1.6060e-03 L4_spectral:1.5964e-03 L5_spectral:1.6003e-03 L6_spectral:1.5993e-03 L7_spectral:1.6070e-03 L8_spectral:1.5978e-03 L9_spectral:1.6131e-03 L10_spectral:1.6008e-03 L11_spectral:1.6134e-03 L12_spectral:1.6068e-03 train_time:89646ms step_avg:56.03ms +[2025-09-11 14:33:09] [Rank 0] step:1601/10000 train_time:91320ms step_avg:57.04ms +[2025-09-11 14:33:09] [Rank 0] step:1601/10000 train_time:91320ms step_avg:57.04ms +[2025-09-11 14:33:10] [Rank 0] step:1621/10000 train_time:91978ms step_avg:56.74ms +[2025-09-11 14:33:10] [Rank 0] step:1621/10000 train_time:91978ms step_avg:56.74ms +[2025-09-11 14:33:11] [Rank 0] step:1641/10000 train_time:92629ms step_avg:56.45ms +[2025-09-11 14:33:11] [Rank 0] step:1641/10000 train_time:92629ms step_avg:56.45ms +[2025-09-11 14:33:11] [Rank 0] step:1661/10000 train_time:93280ms step_avg:56.16ms +[2025-09-11 14:33:11] [Rank 0] step:1661/10000 train_time:93280ms step_avg:56.16ms +[2025-09-11 14:33:12] [Rank 0] step:1681/10000 train_time:93930ms step_avg:55.88ms +[2025-09-11 14:33:12] [Rank 0] step:1681/10000 train_time:93930ms step_avg:55.88ms +[2025-09-11 14:33:13] [Rank 0] step:1701/10000 train_time:94581ms step_avg:55.60ms +[2025-09-11 14:33:13] [Rank 0] step:1701/10000 train_time:94581ms step_avg:55.60ms +[2025-09-11 14:33:13] [Rank 0] step:1721/10000 train_time:95231ms step_avg:55.33ms +[2025-09-11 14:33:13] [Rank 0] step:1721/10000 train_time:95231ms step_avg:55.33ms +[2025-09-11 14:33:14] [Rank 0] step:1741/10000 train_time:95881ms step_avg:55.07ms +[2025-09-11 14:33:14] [Rank 0] step:1741/10000 train_time:95881ms step_avg:55.07ms +[2025-09-11 14:33:14] [Rank 0] step:1761/10000 train_time:96531ms step_avg:54.82ms +[2025-09-11 14:33:14] [Rank 0] step:1761/10000 train_time:96531ms step_avg:54.82ms +[2025-09-11 14:33:15] [Rank 0] step:1781/10000 train_time:97181ms step_avg:54.57ms +[2025-09-11 14:33:15] [Rank 0] step:1781/10000 train_time:97181ms step_avg:54.57ms +[2025-09-11 14:33:16] [Rank 0] step:1801/10000 train_time:97831ms step_avg:54.32ms +[2025-09-11 14:33:16] [Rank 0] step:1801/10000 train_time:97831ms step_avg:54.32ms +[2025-09-11 14:33:16] [Rank 0] step:1821/10000 train_time:98481ms step_avg:54.08ms +[2025-09-11 14:33:16] [Rank 0] step:1821/10000 train_time:98481ms step_avg:54.08ms +[2025-09-11 14:33:17] [Rank 0] step:1841/10000 train_time:99131ms step_avg:53.85ms +[2025-09-11 14:33:17] [Rank 0] step:1841/10000 train_time:99131ms step_avg:53.85ms +[2025-09-11 14:33:18] [Rank 0] step:1861/10000 train_time:99781ms step_avg:53.62ms +[2025-09-11 14:33:18] [Rank 0] step:1861/10000 train_time:99781ms step_avg:53.62ms +[2025-09-11 14:33:18] [Rank 0] step:1881/10000 train_time:100432ms step_avg:53.39ms +[2025-09-11 14:33:18] [Rank 0] step:1881/10000 train_time:100432ms step_avg:53.39ms +[2025-09-11 14:33:19] [Rank 0] step:1901/10000 train_time:101082ms step_avg:53.17ms +[2025-09-11 14:33:19] [Rank 0] step:1901/10000 train_time:101082ms step_avg:53.17ms +[2025-09-11 14:33:20] [Rank 0] step:1921/10000 train_time:101732ms step_avg:52.96ms +[2025-09-11 14:33:20] [Rank 0] step:1921/10000 train_time:101732ms step_avg:52.96ms +[2025-09-11 14:33:20] [Rank 0] step:1941/10000 train_time:102383ms step_avg:52.75ms +[2025-09-11 14:33:20] [Rank 0] step:1941/10000 train_time:102383ms step_avg:52.75ms +[2025-09-11 14:33:21] [Rank 0] step:1961/10000 train_time:103032ms step_avg:52.54ms +[2025-09-11 14:33:21] [Rank 0] step:1961/10000 train_time:103032ms step_avg:52.54ms +[2025-09-11 14:33:22] [Rank 0] step:1981/10000 train_time:103683ms step_avg:52.34ms +[2025-09-11 14:33:22] [Rank 0] step:1981/10000 train_time:103683ms step_avg:52.34ms +[2025-09-11 14:33:22] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:33:22] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:33:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:33:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:33:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:33:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:33:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:33:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:33] [Rank 0] PRINT: step:2000/10000 val_loss:5.1097 total_sharp:2.2337e-04 L1_sharp:7.3985e-02 L2_sharp:8.4784e-02 L3_sharp:9.0027e-02 L4_sharp:1.0988e-01 L5_sharp:1.0905e-01 L6_sharp:1.1208e-01 L7_sharp:1.3140e-01 L8_sharp:1.7852e-01 L9_sharp:2.0811e-01 L10_sharp:2.9045e-01 L11_sharp:1.2221e+00 L12_sharp:2.4730e+00 total_fnorm:7.4000e+01 total_l1_linf:1.4643e+05 total_spectral:3.7000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1719e-01 L11_fnorm:1.1865e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.7109e-02 L3_l1linf:3.6621e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.6133e-02 L6_l1linf:3.6133e-02 L7_l1linf:3.5889e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.6621e-02 L10_l1linf:3.7109e-02 L11_l1linf:3.8330e-02 L12_l1linf:3.9062e-02 L1_spectral:1.6010e-03 L2_spectral:1.6061e-03 L3_spectral:1.6066e-03 L4_spectral:1.6081e-03 L5_spectral:1.6068e-03 L6_spectral:1.5998e-03 L7_spectral:1.6053e-03 L8_spectral:1.5886e-03 L9_spectral:1.6032e-03 L10_spectral:1.6012e-03 L11_spectral:1.6041e-03 L12_spectral:1.6035e-03 train_time:104316ms step_avg:52.16ms +[2025-09-11 14:33:33] [Rank 0] PRINT: step:2000/10000 val_loss:5.1097 total_sharp:2.2337e-04 L1_sharp:7.3985e-02 L2_sharp:8.4784e-02 L3_sharp:9.0027e-02 L4_sharp:1.0988e-01 L5_sharp:1.0905e-01 L6_sharp:1.1208e-01 L7_sharp:1.3140e-01 L8_sharp:1.7852e-01 L9_sharp:2.0811e-01 L10_sharp:2.9045e-01 L11_sharp:1.2221e+00 L12_sharp:2.4730e+00 total_fnorm:7.4000e+01 total_l1_linf:1.4643e+05 total_spectral:3.7000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1719e-01 L11_fnorm:1.1865e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.6621e-02 L2_l1linf:3.7109e-02 L3_l1linf:3.6621e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.6133e-02 L6_l1linf:3.6133e-02 L7_l1linf:3.5889e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.6621e-02 L10_l1linf:3.7109e-02 L11_l1linf:3.8330e-02 L12_l1linf:3.9062e-02 L1_spectral:1.6010e-03 L2_spectral:1.6061e-03 L3_spectral:1.6066e-03 L4_spectral:1.6081e-03 L5_spectral:1.6068e-03 L6_spectral:1.5998e-03 L7_spectral:1.6053e-03 L8_spectral:1.5886e-03 L9_spectral:1.6032e-03 L10_spectral:1.6012e-03 L11_spectral:1.6041e-03 L12_spectral:1.6035e-03 train_time:104316ms step_avg:52.16ms +[2025-09-11 14:33:34] [Rank 0] step:2001/10000 train_time:105845ms step_avg:52.90ms +[2025-09-11 14:33:34] [Rank 0] step:2001/10000 train_time:105845ms step_avg:52.90ms +[2025-09-11 14:33:35] [Rank 0] step:2021/10000 train_time:106515ms step_avg:52.70ms +[2025-09-11 14:33:35] [Rank 0] step:2021/10000 train_time:106515ms step_avg:52.70ms +[2025-09-11 14:33:36] [Rank 0] step:2041/10000 train_time:107167ms step_avg:52.51ms +[2025-09-11 14:33:36] [Rank 0] step:2041/10000 train_time:107167ms step_avg:52.51ms +[2025-09-11 14:33:36] [Rank 0] step:2061/10000 train_time:107817ms step_avg:52.31ms +[2025-09-11 14:33:36] [Rank 0] step:2061/10000 train_time:107817ms step_avg:52.31ms +[2025-09-11 14:33:37] [Rank 0] step:2081/10000 train_time:108468ms step_avg:52.12ms +[2025-09-11 14:33:37] [Rank 0] step:2081/10000 train_time:108468ms step_avg:52.12ms +[2025-09-11 14:33:37] [Rank 0] step:2101/10000 train_time:109119ms step_avg:51.94ms +[2025-09-11 14:33:37] [Rank 0] step:2101/10000 train_time:109119ms step_avg:51.94ms +[2025-09-11 14:33:38] [Rank 0] step:2121/10000 train_time:109769ms step_avg:51.75ms +[2025-09-11 14:33:38] [Rank 0] step:2121/10000 train_time:109769ms step_avg:51.75ms +[2025-09-11 14:33:39] [Rank 0] step:2141/10000 train_time:110420ms step_avg:51.57ms +[2025-09-11 14:33:39] [Rank 0] step:2141/10000 train_time:110420ms step_avg:51.57ms +[2025-09-11 14:33:39] [Rank 0] step:2161/10000 train_time:111070ms step_avg:51.40ms +[2025-09-11 14:33:39] [Rank 0] step:2161/10000 train_time:111070ms step_avg:51.40ms +[2025-09-11 14:33:40] [Rank 0] step:2181/10000 train_time:111719ms step_avg:51.22ms +[2025-09-11 14:33:40] [Rank 0] step:2181/10000 train_time:111719ms step_avg:51.22ms +[2025-09-11 14:33:41] [Rank 0] step:2201/10000 train_time:112369ms step_avg:51.05ms +[2025-09-11 14:33:41] [Rank 0] step:2201/10000 train_time:112369ms step_avg:51.05ms +[2025-09-11 14:33:41] [Rank 0] step:2221/10000 train_time:113021ms step_avg:50.89ms +[2025-09-11 14:33:41] [Rank 0] step:2221/10000 train_time:113021ms step_avg:50.89ms +[2025-09-11 14:33:42] [Rank 0] step:2241/10000 train_time:113683ms step_avg:50.73ms +[2025-09-11 14:33:42] [Rank 0] step:2241/10000 train_time:113683ms step_avg:50.73ms +[2025-09-11 14:33:43] [Rank 0] step:2261/10000 train_time:114346ms step_avg:50.57ms +[2025-09-11 14:33:43] [Rank 0] step:2261/10000 train_time:114346ms step_avg:50.57ms +[2025-09-11 14:33:43] [Rank 0] step:2281/10000 train_time:115010ms step_avg:50.42ms +[2025-09-11 14:33:43] [Rank 0] step:2281/10000 train_time:115010ms step_avg:50.42ms +[2025-09-11 14:33:44] [Rank 0] step:2301/10000 train_time:115674ms step_avg:50.27ms +[2025-09-11 14:33:44] [Rank 0] step:2301/10000 train_time:115674ms step_avg:50.27ms +[2025-09-11 14:33:45] [Rank 0] step:2321/10000 train_time:116338ms step_avg:50.12ms +[2025-09-11 14:33:45] [Rank 0] step:2321/10000 train_time:116338ms step_avg:50.12ms +[2025-09-11 14:33:45] [Rank 0] step:2341/10000 train_time:117001ms step_avg:49.98ms +[2025-09-11 14:33:45] [Rank 0] step:2341/10000 train_time:117001ms step_avg:49.98ms +[2025-09-11 14:33:46] [Rank 0] step:2361/10000 train_time:117665ms step_avg:49.84ms +[2025-09-11 14:33:46] [Rank 0] step:2361/10000 train_time:117665ms step_avg:49.84ms +[2025-09-11 14:33:47] [Rank 0] step:2381/10000 train_time:118328ms step_avg:49.70ms +[2025-09-11 14:33:47] [Rank 0] step:2381/10000 train_time:118328ms step_avg:49.70ms +[2025-09-11 14:33:47] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:33:47] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:33:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:33:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:33:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:33:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:33:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:33:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:33:58] [Rank 0] PRINT: step:2400/10000 val_loss:4.9878 total_sharp:1.9609e-04 L1_sharp:5.0469e-02 L2_sharp:6.5528e-02 L3_sharp:8.0678e-02 L4_sharp:1.0209e-01 L5_sharp:1.0229e-01 L6_sharp:1.1173e-01 L7_sharp:1.1125e-01 L8_sharp:1.4850e-01 L9_sharp:1.7224e-01 L10_sharp:2.1206e-01 L11_sharp:2.8334e-01 L12_sharp:9.6136e-01 total_fnorm:7.1000e+01 total_l1_linf:1.3517e+05 total_spectral:3.5500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.5645e-02 L2_l1linf:3.5889e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.5156e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.6133e-02 L12_l1linf:3.7842e-02 L1_spectral:1.6026e-03 L2_spectral:1.6124e-03 L3_spectral:1.6104e-03 L4_spectral:1.6114e-03 L5_spectral:1.6062e-03 L6_spectral:1.6027e-03 L7_spectral:1.6182e-03 L8_spectral:1.6139e-03 L9_spectral:1.6192e-03 L10_spectral:1.6213e-03 L11_spectral:1.6047e-03 L12_spectral:1.6026e-03 train_time:118972ms step_avg:49.57ms +[2025-09-11 14:33:58] [Rank 0] PRINT: step:2400/10000 val_loss:4.9878 total_sharp:1.9609e-04 L1_sharp:5.0469e-02 L2_sharp:6.5528e-02 L3_sharp:8.0678e-02 L4_sharp:1.0209e-01 L5_sharp:1.0229e-01 L6_sharp:1.1173e-01 L7_sharp:1.1125e-01 L8_sharp:1.4850e-01 L9_sharp:1.7224e-01 L10_sharp:2.1206e-01 L11_sharp:2.8334e-01 L12_sharp:9.6136e-01 total_fnorm:7.1000e+01 total_l1_linf:1.3517e+05 total_spectral:3.5500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1621e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1523e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.5645e-02 L2_l1linf:3.5889e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.4912e-02 L7_l1linf:3.5156e-02 L8_l1linf:3.5156e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.6133e-02 L12_l1linf:3.7842e-02 L1_spectral:1.6026e-03 L2_spectral:1.6124e-03 L3_spectral:1.6104e-03 L4_spectral:1.6114e-03 L5_spectral:1.6062e-03 L6_spectral:1.6027e-03 L7_spectral:1.6182e-03 L8_spectral:1.6139e-03 L9_spectral:1.6192e-03 L10_spectral:1.6213e-03 L11_spectral:1.6047e-03 L12_spectral:1.6026e-03 train_time:118972ms step_avg:49.57ms +[2025-09-11 14:34:00] [Rank 0] step:2401/10000 train_time:120602ms step_avg:50.23ms +[2025-09-11 14:34:00] [Rank 0] step:2401/10000 train_time:120602ms step_avg:50.23ms +[2025-09-11 14:34:00] [Rank 0] step:2421/10000 train_time:121270ms step_avg:50.09ms +[2025-09-11 14:34:00] [Rank 0] step:2421/10000 train_time:121270ms step_avg:50.09ms +[2025-09-11 14:34:01] [Rank 0] step:2441/10000 train_time:121939ms step_avg:49.95ms +[2025-09-11 14:34:01] [Rank 0] step:2441/10000 train_time:121939ms step_avg:49.95ms +[2025-09-11 14:34:02] [Rank 0] step:2461/10000 train_time:122604ms step_avg:49.82ms +[2025-09-11 14:34:02] [Rank 0] step:2461/10000 train_time:122604ms step_avg:49.82ms +[2025-09-11 14:34:02] [Rank 0] step:2481/10000 train_time:123270ms step_avg:49.69ms +[2025-09-11 14:34:02] [Rank 0] step:2481/10000 train_time:123270ms step_avg:49.69ms +[2025-09-11 14:34:03] [Rank 0] step:2501/10000 train_time:123935ms step_avg:49.55ms +[2025-09-11 14:34:03] [Rank 0] step:2501/10000 train_time:123935ms step_avg:49.55ms +[2025-09-11 14:34:04] [Rank 0] step:2521/10000 train_time:124602ms step_avg:49.43ms +[2025-09-11 14:34:04] [Rank 0] step:2521/10000 train_time:124602ms step_avg:49.43ms +[2025-09-11 14:34:04] [Rank 0] step:2541/10000 train_time:125266ms step_avg:49.30ms +[2025-09-11 14:34:04] [Rank 0] step:2541/10000 train_time:125266ms step_avg:49.30ms +[2025-09-11 14:34:05] [Rank 0] step:2561/10000 train_time:125930ms step_avg:49.17ms +[2025-09-11 14:34:05] [Rank 0] step:2561/10000 train_time:125930ms step_avg:49.17ms +[2025-09-11 14:34:06] [Rank 0] step:2581/10000 train_time:126594ms step_avg:49.05ms +[2025-09-11 14:34:06] [Rank 0] step:2581/10000 train_time:126594ms step_avg:49.05ms +[2025-09-11 14:34:06] [Rank 0] step:2601/10000 train_time:127259ms step_avg:48.93ms +[2025-09-11 14:34:06] [Rank 0] step:2601/10000 train_time:127259ms step_avg:48.93ms +[2025-09-11 14:34:07] [Rank 0] step:2621/10000 train_time:127923ms step_avg:48.81ms +[2025-09-11 14:34:07] [Rank 0] step:2621/10000 train_time:127923ms step_avg:48.81ms +[2025-09-11 14:34:08] [Rank 0] step:2641/10000 train_time:128588ms step_avg:48.69ms +[2025-09-11 14:34:08] [Rank 0] step:2641/10000 train_time:128588ms step_avg:48.69ms +[2025-09-11 14:34:08] [Rank 0] step:2661/10000 train_time:129252ms step_avg:48.57ms +[2025-09-11 14:34:08] [Rank 0] step:2661/10000 train_time:129252ms step_avg:48.57ms +[2025-09-11 14:34:09] [Rank 0] step:2681/10000 train_time:129916ms step_avg:48.46ms +[2025-09-11 14:34:09] [Rank 0] step:2681/10000 train_time:129916ms step_avg:48.46ms +[2025-09-11 14:34:10] [Rank 0] step:2701/10000 train_time:130581ms step_avg:48.35ms +[2025-09-11 14:34:10] [Rank 0] step:2701/10000 train_time:130581ms step_avg:48.35ms +[2025-09-11 14:34:10] [Rank 0] step:2721/10000 train_time:131247ms step_avg:48.23ms +[2025-09-11 14:34:10] [Rank 0] step:2721/10000 train_time:131247ms step_avg:48.23ms +[2025-09-11 14:34:11] [Rank 0] step:2741/10000 train_time:131919ms step_avg:48.13ms +[2025-09-11 14:34:11] [Rank 0] step:2741/10000 train_time:131919ms step_avg:48.13ms +[2025-09-11 14:34:12] [Rank 0] step:2761/10000 train_time:132583ms step_avg:48.02ms +[2025-09-11 14:34:12] [Rank 0] step:2761/10000 train_time:132583ms step_avg:48.02ms +[2025-09-11 14:34:12] [Rank 0] step:2781/10000 train_time:133247ms step_avg:47.91ms +[2025-09-11 14:34:12] [Rank 0] step:2781/10000 train_time:133247ms step_avg:47.91ms +[2025-09-11 14:34:13] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:34:13] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:34:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:34:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:34:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:34:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:34:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:34:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:34:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:34:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:34:23] [Rank 0] PRINT: step:2800/10000 val_loss:4.8939 total_sharp:1.8941e-04 L1_sharp:4.9221e-02 L2_sharp:5.1622e-02 L3_sharp:7.7546e-02 L4_sharp:9.3636e-02 L5_sharp:9.3218e-02 L6_sharp:1.0733e-01 L7_sharp:1.1434e-01 L8_sharp:1.4640e-01 L9_sharp:1.7638e-01 L10_sharp:2.5026e-01 L11_sharp:2.9465e-01 L12_sharp:9.7899e-01 total_fnorm:6.9000e+01 total_l1_linf:1.3005e+05 total_spectral:3.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.4424e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.3203e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.4424e-02 L12_l1linf:3.5645e-02 L1_spectral:1.6047e-03 L2_spectral:1.6068e-03 L3_spectral:1.6147e-03 L4_spectral:1.6144e-03 L5_spectral:1.6163e-03 L6_spectral:1.6100e-03 L7_spectral:1.6038e-03 L8_spectral:1.6066e-03 L9_spectral:1.6021e-03 L10_spectral:1.6109e-03 L11_spectral:1.6114e-03 L12_spectral:1.6057e-03 train_time:133893ms step_avg:47.82ms +[2025-09-11 14:34:23] [Rank 0] PRINT: step:2800/10000 val_loss:4.8939 total_sharp:1.8941e-04 L1_sharp:4.9221e-02 L2_sharp:5.1622e-02 L3_sharp:7.7546e-02 L4_sharp:9.3636e-02 L5_sharp:9.3218e-02 L6_sharp:1.0733e-01 L7_sharp:1.1434e-01 L8_sharp:1.4640e-01 L9_sharp:1.7638e-01 L10_sharp:2.5026e-01 L11_sharp:2.9465e-01 L12_sharp:9.7899e-01 total_fnorm:6.9000e+01 total_l1_linf:1.3005e+05 total_spectral:3.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1572e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1670e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.4180e-02 L3_l1linf:3.4424e-02 L4_l1linf:3.3691e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.3203e-02 L9_l1linf:3.3447e-02 L10_l1linf:3.4180e-02 L11_l1linf:3.4424e-02 L12_l1linf:3.5645e-02 L1_spectral:1.6047e-03 L2_spectral:1.6068e-03 L3_spectral:1.6147e-03 L4_spectral:1.6144e-03 L5_spectral:1.6163e-03 L6_spectral:1.6100e-03 L7_spectral:1.6038e-03 L8_spectral:1.6066e-03 L9_spectral:1.6021e-03 L10_spectral:1.6109e-03 L11_spectral:1.6114e-03 L12_spectral:1.6057e-03 train_time:133893ms step_avg:47.82ms +[2025-09-11 14:34:25] [Rank 0] step:2801/10000 train_time:135477ms step_avg:48.37ms +[2025-09-11 14:34:25] [Rank 0] step:2801/10000 train_time:135477ms step_avg:48.37ms +[2025-09-11 14:34:26] [Rank 0] step:2821/10000 train_time:136145ms step_avg:48.26ms +[2025-09-11 14:34:26] [Rank 0] step:2821/10000 train_time:136145ms step_avg:48.26ms +[2025-09-11 14:34:27] [Rank 0] step:2841/10000 train_time:137121ms step_avg:48.27ms +[2025-09-11 14:34:27] [Rank 0] step:2841/10000 train_time:137121ms step_avg:48.27ms +[2025-09-11 14:34:27] [Rank 0] step:2861/10000 train_time:137785ms step_avg:48.16ms +[2025-09-11 14:34:27] [Rank 0] step:2861/10000 train_time:137785ms step_avg:48.16ms +[2025-09-11 14:34:28] [Rank 0] step:2881/10000 train_time:138450ms step_avg:48.06ms +[2025-09-11 14:34:28] [Rank 0] step:2881/10000 train_time:138450ms step_avg:48.06ms +[2025-09-11 14:34:29] [Rank 0] step:2901/10000 train_time:139260ms step_avg:48.00ms +[2025-09-11 14:34:29] [Rank 0] step:2901/10000 train_time:139260ms step_avg:48.00ms +[2025-09-11 14:34:29] [Rank 0] step:2921/10000 train_time:140032ms step_avg:47.94ms +[2025-09-11 14:34:29] [Rank 0] step:2921/10000 train_time:140032ms step_avg:47.94ms +[2025-09-11 14:34:30] [Rank 0] step:2941/10000 train_time:140697ms step_avg:47.84ms +[2025-09-11 14:34:30] [Rank 0] step:2941/10000 train_time:140697ms step_avg:47.84ms +[2025-09-11 14:34:31] [Rank 0] step:2961/10000 train_time:141361ms step_avg:47.74ms +[2025-09-11 14:34:31] [Rank 0] step:2961/10000 train_time:141361ms step_avg:47.74ms +[2025-09-11 14:34:31] [Rank 0] step:2981/10000 train_time:142027ms step_avg:47.64ms +[2025-09-11 14:34:31] [Rank 0] step:2981/10000 train_time:142027ms step_avg:47.64ms +[2025-09-11 14:34:32] [Rank 0] step:3001/10000 train_time:142694ms step_avg:47.55ms +[2025-09-11 14:34:32] [Rank 0] step:3001/10000 train_time:142694ms step_avg:47.55ms +[2025-09-11 14:34:33] [Rank 0] step:3021/10000 train_time:143361ms step_avg:47.45ms +[2025-09-11 14:34:33] [Rank 0] step:3021/10000 train_time:143361ms step_avg:47.45ms +[2025-09-11 14:34:33] [Rank 0] step:3041/10000 train_time:144028ms step_avg:47.36ms +[2025-09-11 14:34:33] [Rank 0] step:3041/10000 train_time:144028ms step_avg:47.36ms +[2025-09-11 14:34:34] [Rank 0] step:3061/10000 train_time:144694ms step_avg:47.27ms +[2025-09-11 14:34:34] [Rank 0] step:3061/10000 train_time:144694ms step_avg:47.27ms +[2025-09-11 14:34:35] [Rank 0] step:3081/10000 train_time:145362ms step_avg:47.18ms +[2025-09-11 14:34:35] [Rank 0] step:3081/10000 train_time:145362ms step_avg:47.18ms +[2025-09-11 14:34:35] [Rank 0] step:3101/10000 train_time:146028ms step_avg:47.09ms +[2025-09-11 14:34:35] [Rank 0] step:3101/10000 train_time:146028ms step_avg:47.09ms +[2025-09-11 14:34:36] [Rank 0] step:3121/10000 train_time:146695ms step_avg:47.00ms +[2025-09-11 14:34:36] [Rank 0] step:3121/10000 train_time:146695ms step_avg:47.00ms +[2025-09-11 14:34:37] [Rank 0] step:3141/10000 train_time:147362ms step_avg:46.92ms +[2025-09-11 14:34:37] [Rank 0] step:3141/10000 train_time:147362ms step_avg:46.92ms +[2025-09-11 14:34:37] [Rank 0] step:3161/10000 train_time:148028ms step_avg:46.83ms +[2025-09-11 14:34:37] [Rank 0] step:3161/10000 train_time:148028ms step_avg:46.83ms +[2025-09-11 14:34:38] [Rank 0] step:3181/10000 train_time:148694ms step_avg:46.74ms +[2025-09-11 14:34:38] [Rank 0] step:3181/10000 train_time:148694ms step_avg:46.74ms +[2025-09-11 14:34:39] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:34:39] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:34:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:34:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:34:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:34:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:34:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:34:49] [Rank 0] PRINT: step:3200/10000 val_loss:4.7918 total_sharp:1.3002e-04 L1_sharp:4.0045e-02 L2_sharp:5.2522e-02 L3_sharp:7.3244e-02 L4_sharp:8.7141e-02 L5_sharp:8.6279e-02 L6_sharp:9.3602e-02 L7_sharp:1.1436e-01 L8_sharp:1.3878e-01 L9_sharp:1.4732e-01 L10_sharp:2.0246e-01 L11_sharp:2.7063e-01 L12_sharp:5.6043e-01 total_fnorm:7.8000e+01 total_l1_linf:1.5462e+05 total_spectral:3.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2715e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.3936e-02 L1_spectral:1.6020e-03 L2_spectral:1.6049e-03 L3_spectral:1.6043e-03 L4_spectral:1.6042e-03 L5_spectral:1.5982e-03 L6_spectral:1.6099e-03 L7_spectral:1.6087e-03 L8_spectral:1.6083e-03 L9_spectral:1.6160e-03 L10_spectral:1.6113e-03 L11_spectral:1.6094e-03 L12_spectral:1.6219e-03 train_time:149342ms step_avg:46.67ms +[2025-09-11 14:34:49] [Rank 0] PRINT: step:3200/10000 val_loss:4.7918 total_sharp:1.3002e-04 L1_sharp:4.0045e-02 L2_sharp:5.2522e-02 L3_sharp:7.3244e-02 L4_sharp:8.7141e-02 L5_sharp:8.6279e-02 L6_sharp:9.3602e-02 L7_sharp:1.1436e-01 L8_sharp:1.3878e-01 L9_sharp:1.4732e-01 L10_sharp:2.0246e-01 L11_sharp:2.7063e-01 L12_sharp:5.6043e-01 total_fnorm:7.8000e+01 total_l1_linf:1.5462e+05 total_spectral:3.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1377e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2471e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.2715e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.2471e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1738e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.3936e-02 L1_spectral:1.6020e-03 L2_spectral:1.6049e-03 L3_spectral:1.6043e-03 L4_spectral:1.6042e-03 L5_spectral:1.5982e-03 L6_spectral:1.6099e-03 L7_spectral:1.6087e-03 L8_spectral:1.6083e-03 L9_spectral:1.6160e-03 L10_spectral:1.6113e-03 L11_spectral:1.6094e-03 L12_spectral:1.6219e-03 train_time:149342ms step_avg:46.67ms +[2025-09-11 14:34:51] [Rank 0] step:3201/10000 train_time:151082ms step_avg:47.20ms +[2025-09-11 14:34:51] [Rank 0] step:3201/10000 train_time:151082ms step_avg:47.20ms +[2025-09-11 14:34:52] [Rank 0] step:3221/10000 train_time:151754ms step_avg:47.11ms +[2025-09-11 14:34:52] [Rank 0] step:3221/10000 train_time:151754ms step_avg:47.11ms +[2025-09-11 14:34:52] [Rank 0] step:3241/10000 train_time:152421ms step_avg:47.03ms +[2025-09-11 14:34:52] [Rank 0] step:3241/10000 train_time:152421ms step_avg:47.03ms +[2025-09-11 14:34:53] [Rank 0] step:3261/10000 train_time:153088ms step_avg:46.95ms +[2025-09-11 14:34:53] [Rank 0] step:3261/10000 train_time:153088ms step_avg:46.95ms +[2025-09-11 14:34:54] [Rank 0] step:3281/10000 train_time:153754ms step_avg:46.86ms +[2025-09-11 14:34:54] [Rank 0] step:3281/10000 train_time:153754ms step_avg:46.86ms +[2025-09-11 14:34:54] [Rank 0] step:3301/10000 train_time:154421ms step_avg:46.78ms +[2025-09-11 14:34:54] [Rank 0] step:3301/10000 train_time:154421ms step_avg:46.78ms +[2025-09-11 14:34:55] [Rank 0] step:3321/10000 train_time:155088ms step_avg:46.70ms +[2025-09-11 14:34:55] [Rank 0] step:3321/10000 train_time:155088ms step_avg:46.70ms +[2025-09-11 14:34:56] [Rank 0] step:3341/10000 train_time:155756ms step_avg:46.62ms +[2025-09-11 14:34:56] [Rank 0] step:3341/10000 train_time:155756ms step_avg:46.62ms +[2025-09-11 14:34:56] [Rank 0] step:3361/10000 train_time:156423ms step_avg:46.54ms +[2025-09-11 14:34:56] [Rank 0] step:3361/10000 train_time:156423ms step_avg:46.54ms +[2025-09-11 14:34:57] [Rank 0] step:3381/10000 train_time:157089ms step_avg:46.46ms +[2025-09-11 14:34:57] [Rank 0] step:3381/10000 train_time:157089ms step_avg:46.46ms +[2025-09-11 14:34:58] [Rank 0] step:3401/10000 train_time:157755ms step_avg:46.38ms +[2025-09-11 14:34:58] [Rank 0] step:3401/10000 train_time:157755ms step_avg:46.38ms +[2025-09-11 14:34:58] [Rank 0] step:3421/10000 train_time:158421ms step_avg:46.31ms +[2025-09-11 14:34:58] [Rank 0] step:3421/10000 train_time:158421ms step_avg:46.31ms +[2025-09-11 14:34:59] [Rank 0] step:3441/10000 train_time:159088ms step_avg:46.23ms +[2025-09-11 14:34:59] [Rank 0] step:3441/10000 train_time:159088ms step_avg:46.23ms +[2025-09-11 14:35:00] [Rank 0] step:3461/10000 train_time:159754ms step_avg:46.16ms +[2025-09-11 14:35:00] [Rank 0] step:3461/10000 train_time:159754ms step_avg:46.16ms +[2025-09-11 14:35:00] [Rank 0] step:3481/10000 train_time:160421ms step_avg:46.08ms +[2025-09-11 14:35:00] [Rank 0] step:3481/10000 train_time:160421ms step_avg:46.08ms +[2025-09-11 14:35:01] [Rank 0] step:3501/10000 train_time:161143ms step_avg:46.03ms +[2025-09-11 14:35:01] [Rank 0] step:3501/10000 train_time:161143ms step_avg:46.03ms +[2025-09-11 14:35:02] [Rank 0] step:3521/10000 train_time:161870ms step_avg:45.97ms +[2025-09-11 14:35:02] [Rank 0] step:3521/10000 train_time:161870ms step_avg:45.97ms +[2025-09-11 14:35:02] [Rank 0] step:3541/10000 train_time:162537ms step_avg:45.90ms +[2025-09-11 14:35:02] [Rank 0] step:3541/10000 train_time:162537ms step_avg:45.90ms +[2025-09-11 14:35:03] [Rank 0] step:3561/10000 train_time:163204ms step_avg:45.83ms +[2025-09-11 14:35:03] [Rank 0] step:3561/10000 train_time:163204ms step_avg:45.83ms +[2025-09-11 14:35:04] [Rank 0] step:3581/10000 train_time:163870ms step_avg:45.76ms +[2025-09-11 14:35:04] [Rank 0] step:3581/10000 train_time:163870ms step_avg:45.76ms +[2025-09-11 14:35:04] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:35:04] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:35:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:35:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:35:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:35:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:35:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:35:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:35:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:35:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:35:17] [Rank 0] PRINT: step:3600/10000 val_loss:4.7178 total_sharp:1.5889e-04 L1_sharp:3.4976e-02 L2_sharp:5.1269e-02 L3_sharp:7.0017e-02 L4_sharp:8.0476e-02 L5_sharp:7.7280e-02 L6_sharp:9.5794e-02 L7_sharp:1.0761e-01 L8_sharp:1.3391e-01 L9_sharp:1.5398e-01 L10_sharp:2.2431e-01 L11_sharp:2.7934e-01 L12_sharp:1.4716e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3312e+05 total_spectral:3.5000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0762e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.2959e-02 L1_spectral:1.6114e-03 L2_spectral:1.6055e-03 L3_spectral:1.6063e-03 L4_spectral:1.6193e-03 L5_spectral:1.6048e-03 L6_spectral:1.6013e-03 L7_spectral:1.6121e-03 L8_spectral:1.6007e-03 L9_spectral:1.6144e-03 L10_spectral:1.6099e-03 L11_spectral:1.6201e-03 L12_spectral:1.6094e-03 train_time:164517ms step_avg:45.70ms +[2025-09-11 14:35:17] [Rank 0] PRINT: step:3600/10000 val_loss:4.7178 total_sharp:1.5889e-04 L1_sharp:3.4976e-02 L2_sharp:5.1269e-02 L3_sharp:7.0017e-02 L4_sharp:8.0476e-02 L5_sharp:7.7280e-02 L6_sharp:9.5794e-02 L7_sharp:1.0761e-01 L8_sharp:1.3391e-01 L9_sharp:1.5398e-01 L10_sharp:2.2431e-01 L11_sharp:2.7934e-01 L12_sharp:1.4716e+00 total_fnorm:7.0000e+01 total_l1_linf:1.3312e+05 total_spectral:3.5000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1250e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1738e-02 L4_l1linf:3.1128e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.1738e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.1250e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0762e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.2959e-02 L1_spectral:1.6114e-03 L2_spectral:1.6055e-03 L3_spectral:1.6063e-03 L4_spectral:1.6193e-03 L5_spectral:1.6048e-03 L6_spectral:1.6013e-03 L7_spectral:1.6121e-03 L8_spectral:1.6007e-03 L9_spectral:1.6144e-03 L10_spectral:1.6099e-03 L11_spectral:1.6201e-03 L12_spectral:1.6094e-03 train_time:164517ms step_avg:45.70ms +[2025-09-11 14:35:18] [Rank 0] step:3601/10000 train_time:166194ms step_avg:46.15ms +[2025-09-11 14:35:18] [Rank 0] step:3601/10000 train_time:166194ms step_avg:46.15ms +[2025-09-11 14:35:19] [Rank 0] step:3621/10000 train_time:166869ms step_avg:46.08ms +[2025-09-11 14:35:19] [Rank 0] step:3621/10000 train_time:166869ms step_avg:46.08ms +[2025-09-11 14:35:20] [Rank 0] step:3641/10000 train_time:167536ms step_avg:46.01ms +[2025-09-11 14:35:20] [Rank 0] step:3641/10000 train_time:167536ms step_avg:46.01ms +[2025-09-11 14:35:20] [Rank 0] step:3661/10000 train_time:168204ms step_avg:45.94ms +[2025-09-11 14:35:20] [Rank 0] step:3661/10000 train_time:168204ms step_avg:45.94ms +[2025-09-11 14:35:21] [Rank 0] step:3681/10000 train_time:168871ms step_avg:45.88ms +[2025-09-11 14:35:21] [Rank 0] step:3681/10000 train_time:168871ms step_avg:45.88ms +[2025-09-11 14:35:22] [Rank 0] step:3701/10000 train_time:169538ms step_avg:45.81ms +[2025-09-11 14:35:22] [Rank 0] step:3701/10000 train_time:169538ms step_avg:45.81ms +[2025-09-11 14:35:22] [Rank 0] step:3721/10000 train_time:170215ms step_avg:45.74ms +[2025-09-11 14:35:22] [Rank 0] step:3721/10000 train_time:170215ms step_avg:45.74ms +[2025-09-11 14:35:23] [Rank 0] step:3741/10000 train_time:170891ms step_avg:45.68ms +[2025-09-11 14:35:23] [Rank 0] step:3741/10000 train_time:170891ms step_avg:45.68ms +[2025-09-11 14:35:24] [Rank 0] step:3761/10000 train_time:171569ms step_avg:45.62ms +[2025-09-11 14:35:24] [Rank 0] step:3761/10000 train_time:171569ms step_avg:45.62ms +[2025-09-11 14:35:24] [Rank 0] step:3781/10000 train_time:172246ms step_avg:45.56ms +[2025-09-11 14:35:24] [Rank 0] step:3781/10000 train_time:172246ms step_avg:45.56ms +[2025-09-11 14:35:25] [Rank 0] step:3801/10000 train_time:172925ms step_avg:45.49ms +[2025-09-11 14:35:25] [Rank 0] step:3801/10000 train_time:172925ms step_avg:45.49ms +[2025-09-11 14:35:26] [Rank 0] step:3821/10000 train_time:173602ms step_avg:45.43ms +[2025-09-11 14:35:26] [Rank 0] step:3821/10000 train_time:173602ms step_avg:45.43ms +[2025-09-11 14:35:26] [Rank 0] step:3841/10000 train_time:174279ms step_avg:45.37ms +[2025-09-11 14:35:26] [Rank 0] step:3841/10000 train_time:174279ms step_avg:45.37ms +[2025-09-11 14:35:27] [Rank 0] step:3861/10000 train_time:174956ms step_avg:45.31ms +[2025-09-11 14:35:27] [Rank 0] step:3861/10000 train_time:174956ms step_avg:45.31ms +[2025-09-11 14:35:28] [Rank 0] step:3881/10000 train_time:175633ms step_avg:45.25ms +[2025-09-11 14:35:28] [Rank 0] step:3881/10000 train_time:175633ms step_avg:45.25ms +[2025-09-11 14:35:28] [Rank 0] step:3901/10000 train_time:176310ms step_avg:45.20ms +[2025-09-11 14:35:28] [Rank 0] step:3901/10000 train_time:176310ms step_avg:45.20ms +[2025-09-11 14:35:29] [Rank 0] step:3921/10000 train_time:177139ms step_avg:45.18ms +[2025-09-11 14:35:29] [Rank 0] step:3921/10000 train_time:177139ms step_avg:45.18ms +[2025-09-11 14:35:30] [Rank 0] step:3941/10000 train_time:177948ms step_avg:45.15ms +[2025-09-11 14:35:30] [Rank 0] step:3941/10000 train_time:177948ms step_avg:45.15ms +[2025-09-11 14:35:31] [Rank 0] step:3961/10000 train_time:178625ms step_avg:45.10ms +[2025-09-11 14:35:31] [Rank 0] step:3961/10000 train_time:178625ms step_avg:45.10ms +[2025-09-11 14:35:31] [Rank 0] step:3981/10000 train_time:179303ms step_avg:45.04ms +[2025-09-11 14:35:31] [Rank 0] step:3981/10000 train_time:179303ms step_avg:45.04ms +[2025-09-11 14:35:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:35:32] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:35:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:35:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:35:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:35:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:35:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:35:43] [Rank 0] PRINT: step:4000/10000 val_loss:4.6418 total_sharp:1.7014e-04 L1_sharp:4.1883e-02 L2_sharp:5.4604e-02 L3_sharp:7.9157e-02 L4_sharp:9.3089e-02 L5_sharp:9.9920e-02 L6_sharp:1.4000e-01 L7_sharp:1.5604e-01 L8_sharp:1.9735e-01 L9_sharp:2.2244e-01 L10_sharp:3.4571e-01 L11_sharp:7.7092e-01 L12_sharp:1.9016e+00 total_fnorm:8.1000e+01 total_l1_linf:1.5462e+05 total_spectral:4.0250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2715e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.1738e-02 L6_l1linf:3.1250e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1006e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.3203e-02 L1_spectral:1.5973e-03 L2_spectral:1.6095e-03 L3_spectral:1.5967e-03 L4_spectral:1.6079e-03 L5_spectral:1.6112e-03 L6_spectral:1.6241e-03 L7_spectral:1.6103e-03 L8_spectral:1.6041e-03 L9_spectral:1.6277e-03 L10_spectral:1.6293e-03 L11_spectral:1.6187e-03 L12_spectral:1.6054e-03 train_time:180251ms step_avg:45.06ms +[2025-09-11 14:35:43] [Rank 0] PRINT: step:4000/10000 val_loss:4.6418 total_sharp:1.7014e-04 L1_sharp:4.1883e-02 L2_sharp:5.4604e-02 L3_sharp:7.9157e-02 L4_sharp:9.3089e-02 L5_sharp:9.9920e-02 L6_sharp:1.4000e-01 L7_sharp:1.5604e-01 L8_sharp:1.9735e-01 L9_sharp:2.2244e-01 L10_sharp:3.4571e-01 L11_sharp:7.7092e-01 L12_sharp:1.9016e+00 total_fnorm:8.1000e+01 total_l1_linf:1.5462e+05 total_spectral:4.0250e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1279e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2227e-02 L2_l1linf:3.2471e-02 L3_l1linf:3.2715e-02 L4_l1linf:3.2227e-02 L5_l1linf:3.1738e-02 L6_l1linf:3.1250e-02 L7_l1linf:3.1738e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1006e-02 L10_l1linf:3.1250e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.3203e-02 L1_spectral:1.5973e-03 L2_spectral:1.6095e-03 L3_spectral:1.5967e-03 L4_spectral:1.6079e-03 L5_spectral:1.6112e-03 L6_spectral:1.6241e-03 L7_spectral:1.6103e-03 L8_spectral:1.6041e-03 L9_spectral:1.6277e-03 L10_spectral:1.6293e-03 L11_spectral:1.6187e-03 L12_spectral:1.6054e-03 train_time:180251ms step_avg:45.06ms +[2025-09-11 14:35:45] [Rank 0] step:4001/10000 train_time:182055ms step_avg:45.50ms +[2025-09-11 14:35:45] [Rank 0] step:4001/10000 train_time:182055ms step_avg:45.50ms +[2025-09-11 14:35:46] [Rank 0] step:4021/10000 train_time:182737ms step_avg:45.45ms +[2025-09-11 14:35:46] [Rank 0] step:4021/10000 train_time:182737ms step_avg:45.45ms +[2025-09-11 14:35:46] [Rank 0] step:4041/10000 train_time:183414ms step_avg:45.39ms +[2025-09-11 14:35:46] [Rank 0] step:4041/10000 train_time:183414ms step_avg:45.39ms +[2025-09-11 14:35:47] [Rank 0] step:4061/10000 train_time:184090ms step_avg:45.33ms +[2025-09-11 14:35:47] [Rank 0] step:4061/10000 train_time:184090ms step_avg:45.33ms +[2025-09-11 14:35:48] [Rank 0] step:4081/10000 train_time:184767ms step_avg:45.27ms +[2025-09-11 14:35:48] [Rank 0] step:4081/10000 train_time:184767ms step_avg:45.27ms +[2025-09-11 14:35:48] [Rank 0] step:4101/10000 train_time:185442ms step_avg:45.22ms +[2025-09-11 14:35:48] [Rank 0] step:4101/10000 train_time:185442ms step_avg:45.22ms +[2025-09-11 14:35:49] [Rank 0] step:4121/10000 train_time:186120ms step_avg:45.16ms +[2025-09-11 14:35:49] [Rank 0] step:4121/10000 train_time:186120ms step_avg:45.16ms +[2025-09-11 14:35:50] [Rank 0] step:4141/10000 train_time:186796ms step_avg:45.11ms +[2025-09-11 14:35:50] [Rank 0] step:4141/10000 train_time:186796ms step_avg:45.11ms +[2025-09-11 14:35:50] [Rank 0] step:4161/10000 train_time:187473ms step_avg:45.05ms +[2025-09-11 14:35:50] [Rank 0] step:4161/10000 train_time:187473ms step_avg:45.05ms +[2025-09-11 14:35:51] [Rank 0] step:4181/10000 train_time:188149ms step_avg:45.00ms +[2025-09-11 14:35:51] [Rank 0] step:4181/10000 train_time:188149ms step_avg:45.00ms +[2025-09-11 14:35:52] [Rank 0] step:4201/10000 train_time:188826ms step_avg:44.95ms +[2025-09-11 14:35:52] [Rank 0] step:4201/10000 train_time:188826ms step_avg:44.95ms +[2025-09-11 14:35:52] [Rank 0] step:4221/10000 train_time:189502ms step_avg:44.89ms +[2025-09-11 14:35:52] [Rank 0] step:4221/10000 train_time:189502ms step_avg:44.89ms +[2025-09-11 14:35:53] [Rank 0] step:4241/10000 train_time:190178ms step_avg:44.84ms +[2025-09-11 14:35:53] [Rank 0] step:4241/10000 train_time:190178ms step_avg:44.84ms +[2025-09-11 14:35:54] [Rank 0] step:4261/10000 train_time:190855ms step_avg:44.79ms +[2025-09-11 14:35:54] [Rank 0] step:4261/10000 train_time:190855ms step_avg:44.79ms +[2025-09-11 14:35:54] [Rank 0] step:4281/10000 train_time:191532ms step_avg:44.74ms +[2025-09-11 14:35:54] [Rank 0] step:4281/10000 train_time:191532ms step_avg:44.74ms +[2025-09-11 14:35:55] [Rank 0] step:4301/10000 train_time:192209ms step_avg:44.69ms +[2025-09-11 14:35:55] [Rank 0] step:4301/10000 train_time:192209ms step_avg:44.69ms +[2025-09-11 14:35:56] [Rank 0] step:4321/10000 train_time:192885ms step_avg:44.64ms +[2025-09-11 14:35:56] [Rank 0] step:4321/10000 train_time:192885ms step_avg:44.64ms +[2025-09-11 14:35:56] [Rank 0] step:4341/10000 train_time:193562ms step_avg:44.59ms +[2025-09-11 14:35:56] [Rank 0] step:4341/10000 train_time:193562ms step_avg:44.59ms +[2025-09-11 14:35:57] [Rank 0] step:4361/10000 train_time:194237ms step_avg:44.54ms +[2025-09-11 14:35:57] [Rank 0] step:4361/10000 train_time:194237ms step_avg:44.54ms +[2025-09-11 14:35:58] [Rank 0] step:4381/10000 train_time:194914ms step_avg:44.49ms +[2025-09-11 14:35:58] [Rank 0] step:4381/10000 train_time:194914ms step_avg:44.49ms +[2025-09-11 14:35:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:35:58] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:36:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:36:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:36:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:36:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:36:09] [Rank 0] PRINT: step:4400/10000 val_loss:4.6003 total_sharp:1.1631e-04 L1_sharp:3.0164e-02 L2_sharp:4.3966e-02 L3_sharp:5.7247e-02 L4_sharp:8.2290e-02 L5_sharp:7.9604e-02 L6_sharp:1.0373e-01 L7_sharp:1.1562e-01 L8_sharp:1.4241e-01 L9_sharp:1.5514e-01 L10_sharp:2.1401e-01 L11_sharp:3.3138e-01 L12_sharp:5.4967e-01 total_fnorm:7.4000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6750e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.1128e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0518e-02 L6_l1linf:3.0396e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.0151e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.0762e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.6068e-03 L2_spectral:1.6012e-03 L3_spectral:1.6104e-03 L4_spectral:1.5967e-03 L5_spectral:1.6191e-03 L6_spectral:1.6128e-03 L7_spectral:1.6116e-03 L8_spectral:1.5976e-03 L9_spectral:1.6180e-03 L10_spectral:1.6200e-03 L11_spectral:1.6028e-03 L12_spectral:1.6139e-03 train_time:195571ms step_avg:44.45ms +[2025-09-11 14:36:09] [Rank 0] PRINT: step:4400/10000 val_loss:4.6003 total_sharp:1.1631e-04 L1_sharp:3.0164e-02 L2_sharp:4.3966e-02 L3_sharp:5.7247e-02 L4_sharp:8.2290e-02 L5_sharp:7.9604e-02 L6_sharp:1.0373e-01 L7_sharp:1.1562e-01 L8_sharp:1.4241e-01 L9_sharp:1.5514e-01 L10_sharp:2.1401e-01 L11_sharp:3.3138e-01 L12_sharp:5.4967e-01 total_fnorm:7.4000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6750e+01 L1_fnorm:1.1475e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1475e-01 L7_fnorm:1.1475e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1523e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.1128e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1250e-02 L5_l1linf:3.0518e-02 L6_l1linf:3.0396e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.0151e-02 L9_l1linf:3.0884e-02 L10_l1linf:3.0762e-02 L11_l1linf:3.1494e-02 L12_l1linf:3.2471e-02 L1_spectral:1.6068e-03 L2_spectral:1.6012e-03 L3_spectral:1.6104e-03 L4_spectral:1.5967e-03 L5_spectral:1.6191e-03 L6_spectral:1.6128e-03 L7_spectral:1.6116e-03 L8_spectral:1.5976e-03 L9_spectral:1.6180e-03 L10_spectral:1.6200e-03 L11_spectral:1.6028e-03 L12_spectral:1.6139e-03 train_time:195571ms step_avg:44.45ms +[2025-09-11 14:36:12] [Rank 0] step:4401/10000 train_time:197817ms step_avg:44.95ms +[2025-09-11 14:36:12] [Rank 0] step:4401/10000 train_time:197817ms step_avg:44.95ms +[2025-09-11 14:36:12] [Rank 0] step:4421/10000 train_time:198763ms step_avg:44.96ms +[2025-09-11 14:36:12] [Rank 0] step:4421/10000 train_time:198763ms step_avg:44.96ms +[2025-09-11 14:36:13] [Rank 0] step:4441/10000 train_time:199442ms step_avg:44.91ms +[2025-09-11 14:36:13] [Rank 0] step:4441/10000 train_time:199442ms step_avg:44.91ms +[2025-09-11 14:36:14] [Rank 0] step:4461/10000 train_time:200122ms step_avg:44.86ms +[2025-09-11 14:36:14] [Rank 0] step:4461/10000 train_time:200122ms step_avg:44.86ms +[2025-09-11 14:36:14] [Rank 0] step:4481/10000 train_time:200801ms step_avg:44.81ms +[2025-09-11 14:36:14] [Rank 0] step:4481/10000 train_time:200801ms step_avg:44.81ms +[2025-09-11 14:36:15] [Rank 0] step:4501/10000 train_time:201482ms step_avg:44.76ms +[2025-09-11 14:36:15] [Rank 0] step:4501/10000 train_time:201482ms step_avg:44.76ms +[2025-09-11 14:36:16] [Rank 0] step:4521/10000 train_time:202163ms step_avg:44.72ms +[2025-09-11 14:36:16] [Rank 0] step:4521/10000 train_time:202163ms step_avg:44.72ms +[2025-09-11 14:36:16] [Rank 0] step:4541/10000 train_time:202843ms step_avg:44.67ms +[2025-09-11 14:36:16] [Rank 0] step:4541/10000 train_time:202843ms step_avg:44.67ms +[2025-09-11 14:36:17] [Rank 0] step:4561/10000 train_time:203522ms step_avg:44.62ms +[2025-09-11 14:36:17] [Rank 0] step:4561/10000 train_time:203522ms step_avg:44.62ms +[2025-09-11 14:36:18] [Rank 0] step:4581/10000 train_time:204202ms step_avg:44.58ms +[2025-09-11 14:36:18] [Rank 0] step:4581/10000 train_time:204202ms step_avg:44.58ms +[2025-09-11 14:36:19] [Rank 0] step:4601/10000 train_time:204882ms step_avg:44.53ms +[2025-09-11 14:36:19] [Rank 0] step:4601/10000 train_time:204882ms step_avg:44.53ms +[2025-09-11 14:36:19] [Rank 0] step:4621/10000 train_time:205561ms step_avg:44.48ms +[2025-09-11 14:36:19] [Rank 0] step:4621/10000 train_time:205561ms step_avg:44.48ms +[2025-09-11 14:36:20] [Rank 0] step:4641/10000 train_time:206240ms step_avg:44.44ms +[2025-09-11 14:36:20] [Rank 0] step:4641/10000 train_time:206240ms step_avg:44.44ms +[2025-09-11 14:36:21] [Rank 0] step:4661/10000 train_time:206920ms step_avg:44.39ms +[2025-09-11 14:36:21] [Rank 0] step:4661/10000 train_time:206920ms step_avg:44.39ms +[2025-09-11 14:36:21] [Rank 0] step:4681/10000 train_time:207600ms step_avg:44.35ms +[2025-09-11 14:36:21] [Rank 0] step:4681/10000 train_time:207600ms step_avg:44.35ms +[2025-09-11 14:36:22] [Rank 0] step:4701/10000 train_time:208279ms step_avg:44.31ms +[2025-09-11 14:36:22] [Rank 0] step:4701/10000 train_time:208279ms step_avg:44.31ms +[2025-09-11 14:36:23] [Rank 0] step:4721/10000 train_time:208959ms step_avg:44.26ms +[2025-09-11 14:36:23] [Rank 0] step:4721/10000 train_time:208959ms step_avg:44.26ms +[2025-09-11 14:36:23] [Rank 0] step:4741/10000 train_time:209639ms step_avg:44.22ms +[2025-09-11 14:36:23] [Rank 0] step:4741/10000 train_time:209639ms step_avg:44.22ms +[2025-09-11 14:36:24] [Rank 0] step:4761/10000 train_time:210320ms step_avg:44.18ms +[2025-09-11 14:36:24] [Rank 0] step:4761/10000 train_time:210320ms step_avg:44.18ms +[2025-09-11 14:36:25] [Rank 0] step:4781/10000 train_time:211000ms step_avg:44.13ms +[2025-09-11 14:36:25] [Rank 0] step:4781/10000 train_time:211000ms step_avg:44.13ms +[2025-09-11 14:36:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:36:25] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:36:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:36:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:36:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:36:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:36:36] [Rank 0] PRINT: step:4800/10000 val_loss:4.5475 total_sharp:1.1590e-04 L1_sharp:3.0862e-02 L2_sharp:5.0197e-02 L3_sharp:6.4686e-02 L4_sharp:8.3685e-02 L5_sharp:9.0345e-02 L6_sharp:1.2619e-01 L7_sharp:1.4726e-01 L8_sharp:1.5083e-01 L9_sharp:1.5441e-01 L10_sharp:2.2654e-01 L11_sharp:3.7086e-01 L12_sharp:1.5512e+00 total_fnorm:7.6000e+01 total_l1_linf:1.4746e+05 total_spectral:3.8000e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.0884e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.0884e-02 L5_l1linf:2.9785e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9785e-02 L8_l1linf:2.9419e-02 L9_l1linf:2.9297e-02 L10_l1linf:2.9175e-02 L11_l1linf:3.0518e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6083e-03 L2_spectral:1.6003e-03 L3_spectral:1.5969e-03 L4_spectral:1.6007e-03 L5_spectral:1.5988e-03 L6_spectral:1.6077e-03 L7_spectral:1.6138e-03 L8_spectral:1.6073e-03 L9_spectral:1.6055e-03 L10_spectral:1.6085e-03 L11_spectral:1.6099e-03 L12_spectral:1.6118e-03 train_time:211659ms step_avg:44.10ms +[2025-09-11 14:36:36] [Rank 0] PRINT: step:4800/10000 val_loss:4.5475 total_sharp:1.1590e-04 L1_sharp:3.0862e-02 L2_sharp:5.0197e-02 L3_sharp:6.4686e-02 L4_sharp:8.3685e-02 L5_sharp:9.0345e-02 L6_sharp:1.2619e-01 L7_sharp:1.4726e-01 L8_sharp:1.5083e-01 L9_sharp:1.5441e-01 L10_sharp:2.2654e-01 L11_sharp:3.7086e-01 L12_sharp:1.5512e+00 total_fnorm:7.6000e+01 total_l1_linf:1.4746e+05 total_spectral:3.8000e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1523e-01 L3_fnorm:1.1523e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1128e-02 L2_l1linf:3.0884e-02 L3_l1linf:3.1006e-02 L4_l1linf:3.0884e-02 L5_l1linf:2.9785e-02 L6_l1linf:2.9785e-02 L7_l1linf:2.9785e-02 L8_l1linf:2.9419e-02 L9_l1linf:2.9297e-02 L10_l1linf:2.9175e-02 L11_l1linf:3.0518e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6083e-03 L2_spectral:1.6003e-03 L3_spectral:1.5969e-03 L4_spectral:1.6007e-03 L5_spectral:1.5988e-03 L6_spectral:1.6077e-03 L7_spectral:1.6138e-03 L8_spectral:1.6073e-03 L9_spectral:1.6055e-03 L10_spectral:1.6085e-03 L11_spectral:1.6099e-03 L12_spectral:1.6118e-03 train_time:211659ms step_avg:44.10ms +[2025-09-11 14:36:37] [Rank 0] step:4801/10000 train_time:213279ms step_avg:44.42ms +[2025-09-11 14:36:37] [Rank 0] step:4801/10000 train_time:213279ms step_avg:44.42ms +[2025-09-11 14:36:38] [Rank 0] step:4821/10000 train_time:213988ms step_avg:44.39ms +[2025-09-11 14:36:38] [Rank 0] step:4821/10000 train_time:213988ms step_avg:44.39ms +[2025-09-11 14:36:39] [Rank 0] step:4841/10000 train_time:214669ms step_avg:44.34ms +[2025-09-11 14:36:39] [Rank 0] step:4841/10000 train_time:214669ms step_avg:44.34ms +[2025-09-11 14:36:39] [Rank 0] step:4861/10000 train_time:215349ms step_avg:44.30ms +[2025-09-11 14:36:39] [Rank 0] step:4861/10000 train_time:215349ms step_avg:44.30ms +[2025-09-11 14:36:40] [Rank 0] step:4881/10000 train_time:216029ms step_avg:44.26ms +[2025-09-11 14:36:40] [Rank 0] step:4881/10000 train_time:216029ms step_avg:44.26ms +[2025-09-11 14:36:41] [Rank 0] step:4901/10000 train_time:216712ms step_avg:44.22ms +[2025-09-11 14:36:41] [Rank 0] step:4901/10000 train_time:216712ms step_avg:44.22ms +[2025-09-11 14:36:41] [Rank 0] step:4921/10000 train_time:217393ms step_avg:44.18ms +[2025-09-11 14:36:41] [Rank 0] step:4921/10000 train_time:217393ms step_avg:44.18ms +[2025-09-11 14:36:42] [Rank 0] step:4941/10000 train_time:218073ms step_avg:44.14ms +[2025-09-11 14:36:42] [Rank 0] step:4941/10000 train_time:218073ms step_avg:44.14ms +[2025-09-11 14:36:43] [Rank 0] step:4961/10000 train_time:218753ms step_avg:44.09ms +[2025-09-11 14:36:43] [Rank 0] step:4961/10000 train_time:218753ms step_avg:44.09ms +[2025-09-11 14:36:43] [Rank 0] step:4981/10000 train_time:219433ms step_avg:44.05ms +[2025-09-11 14:36:43] [Rank 0] step:4981/10000 train_time:219433ms step_avg:44.05ms +[2025-09-11 14:36:44] [Rank 0] step:5001/10000 train_time:220115ms step_avg:44.01ms +[2025-09-11 14:36:44] [Rank 0] step:5001/10000 train_time:220115ms step_avg:44.01ms +[2025-09-11 14:36:45] [Rank 0] step:5021/10000 train_time:220793ms step_avg:43.97ms +[2025-09-11 14:36:45] [Rank 0] step:5021/10000 train_time:220793ms step_avg:43.97ms +[2025-09-11 14:36:45] [Rank 0] step:5041/10000 train_time:221472ms step_avg:43.93ms +[2025-09-11 14:36:45] [Rank 0] step:5041/10000 train_time:221472ms step_avg:43.93ms +[2025-09-11 14:36:46] [Rank 0] step:5061/10000 train_time:222152ms step_avg:43.89ms +[2025-09-11 14:36:46] [Rank 0] step:5061/10000 train_time:222152ms step_avg:43.89ms +[2025-09-11 14:36:47] [Rank 0] step:5081/10000 train_time:222831ms step_avg:43.86ms +[2025-09-11 14:36:47] [Rank 0] step:5081/10000 train_time:222831ms step_avg:43.86ms +[2025-09-11 14:36:48] [Rank 0] step:5101/10000 train_time:223511ms step_avg:43.82ms +[2025-09-11 14:36:48] [Rank 0] step:5101/10000 train_time:223511ms step_avg:43.82ms +[2025-09-11 14:36:48] [Rank 0] step:5121/10000 train_time:224191ms step_avg:43.78ms +[2025-09-11 14:36:48] [Rank 0] step:5121/10000 train_time:224191ms step_avg:43.78ms +[2025-09-11 14:36:49] [Rank 0] step:5141/10000 train_time:224872ms step_avg:43.74ms +[2025-09-11 14:36:49] [Rank 0] step:5141/10000 train_time:224872ms step_avg:43.74ms +[2025-09-11 14:36:50] [Rank 0] step:5161/10000 train_time:225551ms step_avg:43.70ms +[2025-09-11 14:36:50] [Rank 0] step:5161/10000 train_time:225551ms step_avg:43.70ms +[2025-09-11 14:36:50] [Rank 0] step:5181/10000 train_time:226230ms step_avg:43.67ms +[2025-09-11 14:36:50] [Rank 0] step:5181/10000 train_time:226230ms step_avg:43.67ms +[2025-09-11 14:36:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:36:51] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:36:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:36:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:36:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:37:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:02] [Rank 0] PRINT: step:5200/10000 val_loss:4.5067 total_sharp:1.9014e-04 L1_sharp:2.3540e-02 L2_sharp:4.0119e-02 L3_sharp:5.4330e-02 L4_sharp:8.2876e-02 L5_sharp:8.8245e-02 L6_sharp:1.2411e-01 L7_sharp:1.3234e-01 L8_sharp:1.6540e-01 L9_sharp:2.1881e-01 L10_sharp:3.2486e-01 L11_sharp:9.5557e-01 L12_sharp:4.1320e+00 total_fnorm:6.9000e+01 total_l1_linf:1.2544e+05 total_spectral:3.4500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1182e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1621e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.8809e-02 L5_l1linf:2.8564e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8809e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9175e-02 L10_l1linf:2.8931e-02 L11_l1linf:3.0273e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6073e-03 L2_spectral:1.6096e-03 L3_spectral:1.5967e-03 L4_spectral:1.6038e-03 L5_spectral:1.6108e-03 L6_spectral:1.6094e-03 L7_spectral:1.6084e-03 L8_spectral:1.6081e-03 L9_spectral:1.6089e-03 L10_spectral:1.6111e-03 L11_spectral:1.6094e-03 L12_spectral:1.6197e-03 train_time:226896ms step_avg:43.63ms +[2025-09-11 14:37:02] [Rank 0] PRINT: step:5200/10000 val_loss:4.5067 total_sharp:1.9014e-04 L1_sharp:2.3540e-02 L2_sharp:4.0119e-02 L3_sharp:5.4330e-02 L4_sharp:8.2876e-02 L5_sharp:8.8245e-02 L6_sharp:1.2411e-01 L7_sharp:1.3234e-01 L8_sharp:1.6540e-01 L9_sharp:2.1881e-01 L10_sharp:3.2486e-01 L11_sharp:9.5557e-01 L12_sharp:4.1320e+00 total_fnorm:6.9000e+01 total_l1_linf:1.2544e+05 total_spectral:3.4500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1182e-01 L9_fnorm:1.1426e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1621e-01 L1_l1linf:2.8931e-02 L2_l1linf:2.9297e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.8809e-02 L5_l1linf:2.8564e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8809e-02 L8_l1linf:2.8931e-02 L9_l1linf:2.9175e-02 L10_l1linf:2.8931e-02 L11_l1linf:3.0273e-02 L12_l1linf:3.1982e-02 L1_spectral:1.6073e-03 L2_spectral:1.6096e-03 L3_spectral:1.5967e-03 L4_spectral:1.6038e-03 L5_spectral:1.6108e-03 L6_spectral:1.6094e-03 L7_spectral:1.6084e-03 L8_spectral:1.6081e-03 L9_spectral:1.6089e-03 L10_spectral:1.6111e-03 L11_spectral:1.6094e-03 L12_spectral:1.6197e-03 train_time:226896ms step_avg:43.63ms +[2025-09-11 14:37:04] [Rank 0] step:5201/10000 train_time:228998ms step_avg:44.03ms +[2025-09-11 14:37:04] [Rank 0] step:5201/10000 train_time:228998ms step_avg:44.03ms +[2025-09-11 14:37:05] [Rank 0] step:5221/10000 train_time:229701ms step_avg:44.00ms +[2025-09-11 14:37:05] [Rank 0] step:5221/10000 train_time:229701ms step_avg:44.00ms +[2025-09-11 14:37:05] [Rank 0] step:5241/10000 train_time:230390ms step_avg:43.96ms +[2025-09-11 14:37:05] [Rank 0] step:5241/10000 train_time:230390ms step_avg:43.96ms +[2025-09-11 14:37:06] [Rank 0] step:5261/10000 train_time:231079ms step_avg:43.92ms +[2025-09-11 14:37:06] [Rank 0] step:5261/10000 train_time:231079ms step_avg:43.92ms +[2025-09-11 14:37:07] [Rank 0] step:5281/10000 train_time:231768ms step_avg:43.89ms +[2025-09-11 14:37:07] [Rank 0] step:5281/10000 train_time:231768ms step_avg:43.89ms +[2025-09-11 14:37:07] [Rank 0] step:5301/10000 train_time:232458ms step_avg:43.85ms +[2025-09-11 14:37:07] [Rank 0] step:5301/10000 train_time:232458ms step_avg:43.85ms +[2025-09-11 14:37:08] [Rank 0] step:5321/10000 train_time:233147ms step_avg:43.82ms +[2025-09-11 14:37:08] [Rank 0] step:5321/10000 train_time:233147ms step_avg:43.82ms +[2025-09-11 14:37:09] [Rank 0] step:5341/10000 train_time:233837ms step_avg:43.78ms +[2025-09-11 14:37:09] [Rank 0] step:5341/10000 train_time:233837ms step_avg:43.78ms +[2025-09-11 14:37:10] [Rank 0] step:5361/10000 train_time:234527ms step_avg:43.75ms +[2025-09-11 14:37:10] [Rank 0] step:5361/10000 train_time:234527ms step_avg:43.75ms +[2025-09-11 14:37:10] [Rank 0] step:5381/10000 train_time:235218ms step_avg:43.71ms +[2025-09-11 14:37:10] [Rank 0] step:5381/10000 train_time:235218ms step_avg:43.71ms +[2025-09-11 14:37:11] [Rank 0] step:5401/10000 train_time:235906ms step_avg:43.68ms +[2025-09-11 14:37:11] [Rank 0] step:5401/10000 train_time:235906ms step_avg:43.68ms +[2025-09-11 14:37:12] [Rank 0] step:5421/10000 train_time:236597ms step_avg:43.64ms +[2025-09-11 14:37:12] [Rank 0] step:5421/10000 train_time:236597ms step_avg:43.64ms +[2025-09-11 14:37:12] [Rank 0] step:5441/10000 train_time:237287ms step_avg:43.61ms +[2025-09-11 14:37:12] [Rank 0] step:5441/10000 train_time:237287ms step_avg:43.61ms +[2025-09-11 14:37:13] [Rank 0] step:5461/10000 train_time:237979ms step_avg:43.58ms +[2025-09-11 14:37:13] [Rank 0] step:5461/10000 train_time:237979ms step_avg:43.58ms +[2025-09-11 14:37:14] [Rank 0] step:5481/10000 train_time:238670ms step_avg:43.54ms +[2025-09-11 14:37:14] [Rank 0] step:5481/10000 train_time:238670ms step_avg:43.54ms +[2025-09-11 14:37:14] [Rank 0] step:5501/10000 train_time:239359ms step_avg:43.51ms +[2025-09-11 14:37:14] [Rank 0] step:5501/10000 train_time:239359ms step_avg:43.51ms +[2025-09-11 14:37:15] [Rank 0] step:5521/10000 train_time:240049ms step_avg:43.48ms +[2025-09-11 14:37:15] [Rank 0] step:5521/10000 train_time:240049ms step_avg:43.48ms +[2025-09-11 14:37:16] [Rank 0] step:5541/10000 train_time:240740ms step_avg:43.45ms +[2025-09-11 14:37:16] [Rank 0] step:5541/10000 train_time:240740ms step_avg:43.45ms +[2025-09-11 14:37:16] [Rank 0] step:5561/10000 train_time:241432ms step_avg:43.42ms +[2025-09-11 14:37:16] [Rank 0] step:5561/10000 train_time:241432ms step_avg:43.42ms +[2025-09-11 14:37:17] [Rank 0] step:5581/10000 train_time:242122ms step_avg:43.38ms +[2025-09-11 14:37:17] [Rank 0] step:5581/10000 train_time:242122ms step_avg:43.38ms +[2025-09-11 14:37:18] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:37:18] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:37:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:37:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:29] [Rank 0] PRINT: step:5600/10000 val_loss:4.4741 total_sharp:1.2113e-04 L1_sharp:2.5789e-02 L2_sharp:3.5862e-02 L3_sharp:5.5101e-02 L4_sharp:7.0058e-02 L5_sharp:7.9949e-02 L6_sharp:1.1467e-01 L7_sharp:1.3337e-01 L8_sharp:1.3885e-01 L9_sharp:1.6312e-01 L10_sharp:2.5598e-01 L11_sharp:3.6213e-01 L12_sharp:1.4704e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8809e-02 L2_l1linf:2.9419e-02 L3_l1linf:2.9663e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.8809e-02 L10_l1linf:2.8687e-02 L11_l1linf:2.9053e-02 L12_l1linf:3.0640e-02 L1_spectral:1.5991e-03 L2_spectral:1.5977e-03 L3_spectral:1.6176e-03 L4_spectral:1.6104e-03 L5_spectral:1.5998e-03 L6_spectral:1.6105e-03 L7_spectral:1.6069e-03 L8_spectral:1.5950e-03 L9_spectral:1.6132e-03 L10_spectral:1.6159e-03 L11_spectral:1.6072e-03 L12_spectral:1.6059e-03 train_time:242793ms step_avg:43.36ms +[2025-09-11 14:37:29] [Rank 0] PRINT: step:5600/10000 val_loss:4.4741 total_sharp:1.2113e-04 L1_sharp:2.5789e-02 L2_sharp:3.5862e-02 L3_sharp:5.5101e-02 L4_sharp:7.0058e-02 L5_sharp:7.9949e-02 L6_sharp:1.1467e-01 L7_sharp:1.3337e-01 L8_sharp:1.3885e-01 L9_sharp:1.6312e-01 L10_sharp:2.5598e-01 L11_sharp:3.6213e-01 L12_sharp:1.4704e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3107e+05 total_spectral:3.5500e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1475e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8809e-02 L2_l1linf:2.9419e-02 L3_l1linf:2.9663e-02 L4_l1linf:2.9175e-02 L5_l1linf:2.9053e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.8687e-02 L8_l1linf:2.9053e-02 L9_l1linf:2.8809e-02 L10_l1linf:2.8687e-02 L11_l1linf:2.9053e-02 L12_l1linf:3.0640e-02 L1_spectral:1.5991e-03 L2_spectral:1.5977e-03 L3_spectral:1.6176e-03 L4_spectral:1.6104e-03 L5_spectral:1.5998e-03 L6_spectral:1.6105e-03 L7_spectral:1.6069e-03 L8_spectral:1.5950e-03 L9_spectral:1.6132e-03 L10_spectral:1.6159e-03 L11_spectral:1.6072e-03 L12_spectral:1.6059e-03 train_time:242793ms step_avg:43.36ms +[2025-09-11 14:37:31] [Rank 0] step:5601/10000 train_time:244839ms step_avg:43.71ms +[2025-09-11 14:37:31] [Rank 0] step:5601/10000 train_time:244839ms step_avg:43.71ms +[2025-09-11 14:37:32] [Rank 0] step:5621/10000 train_time:245545ms step_avg:43.68ms +[2025-09-11 14:37:32] [Rank 0] step:5621/10000 train_time:245545ms step_avg:43.68ms +[2025-09-11 14:37:32] [Rank 0] step:5641/10000 train_time:246234ms step_avg:43.65ms +[2025-09-11 14:37:32] [Rank 0] step:5641/10000 train_time:246234ms step_avg:43.65ms +[2025-09-11 14:37:33] [Rank 0] step:5661/10000 train_time:246924ms step_avg:43.62ms +[2025-09-11 14:37:33] [Rank 0] step:5661/10000 train_time:246924ms step_avg:43.62ms +[2025-09-11 14:37:34] [Rank 0] step:5681/10000 train_time:247614ms step_avg:43.59ms +[2025-09-11 14:37:34] [Rank 0] step:5681/10000 train_time:247614ms step_avg:43.59ms +[2025-09-11 14:37:34] [Rank 0] step:5701/10000 train_time:248307ms step_avg:43.55ms +[2025-09-11 14:37:34] [Rank 0] step:5701/10000 train_time:248307ms step_avg:43.55ms +[2025-09-11 14:37:35] [Rank 0] step:5721/10000 train_time:248995ms step_avg:43.52ms +[2025-09-11 14:37:35] [Rank 0] step:5721/10000 train_time:248995ms step_avg:43.52ms +[2025-09-11 14:37:36] [Rank 0] step:5741/10000 train_time:249978ms step_avg:43.54ms +[2025-09-11 14:37:36] [Rank 0] step:5741/10000 train_time:249978ms step_avg:43.54ms +[2025-09-11 14:37:37] [Rank 0] step:5761/10000 train_time:250668ms step_avg:43.51ms +[2025-09-11 14:37:37] [Rank 0] step:5761/10000 train_time:250668ms step_avg:43.51ms +[2025-09-11 14:37:37] [Rank 0] step:5781/10000 train_time:251359ms step_avg:43.48ms +[2025-09-11 14:37:37] [Rank 0] step:5781/10000 train_time:251359ms step_avg:43.48ms +[2025-09-11 14:37:38] [Rank 0] step:5801/10000 train_time:252227ms step_avg:43.48ms +[2025-09-11 14:37:38] [Rank 0] step:5801/10000 train_time:252227ms step_avg:43.48ms +[2025-09-11 14:37:39] [Rank 0] step:5821/10000 train_time:252980ms step_avg:43.46ms +[2025-09-11 14:37:39] [Rank 0] step:5821/10000 train_time:252980ms step_avg:43.46ms +[2025-09-11 14:37:40] [Rank 0] step:5841/10000 train_time:253671ms step_avg:43.43ms +[2025-09-11 14:37:40] [Rank 0] step:5841/10000 train_time:253671ms step_avg:43.43ms +[2025-09-11 14:37:40] [Rank 0] step:5861/10000 train_time:254360ms step_avg:43.40ms +[2025-09-11 14:37:40] [Rank 0] step:5861/10000 train_time:254360ms step_avg:43.40ms +[2025-09-11 14:37:41] [Rank 0] step:5881/10000 train_time:255049ms step_avg:43.37ms +[2025-09-11 14:37:41] [Rank 0] step:5881/10000 train_time:255049ms step_avg:43.37ms +[2025-09-11 14:37:42] [Rank 0] step:5901/10000 train_time:255738ms step_avg:43.34ms +[2025-09-11 14:37:42] [Rank 0] step:5901/10000 train_time:255738ms step_avg:43.34ms +[2025-09-11 14:37:42] [Rank 0] step:5921/10000 train_time:256430ms step_avg:43.31ms +[2025-09-11 14:37:42] [Rank 0] step:5921/10000 train_time:256430ms step_avg:43.31ms +[2025-09-11 14:37:43] [Rank 0] step:5941/10000 train_time:257122ms step_avg:43.28ms +[2025-09-11 14:37:43] [Rank 0] step:5941/10000 train_time:257122ms step_avg:43.28ms +[2025-09-11 14:37:44] [Rank 0] step:5961/10000 train_time:257813ms step_avg:43.25ms +[2025-09-11 14:37:44] [Rank 0] step:5961/10000 train_time:257813ms step_avg:43.25ms +[2025-09-11 14:37:45] [Rank 0] step:5981/10000 train_time:258503ms step_avg:43.22ms +[2025-09-11 14:37:45] [Rank 0] step:5981/10000 train_time:258503ms step_avg:43.22ms +[2025-09-11 14:37:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:37:45] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:37:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:37:56] [Rank 0] PRINT: step:6000/10000 val_loss:4.4268 total_sharp:1.0446e-04 L1_sharp:2.7020e-02 L2_sharp:3.6026e-02 L3_sharp:5.7541e-02 L4_sharp:7.1210e-02 L5_sharp:8.1260e-02 L6_sharp:1.0780e-01 L7_sharp:1.2206e-01 L8_sharp:1.2092e-01 L9_sharp:1.3316e-01 L10_sharp:1.8267e-01 L11_sharp:2.9931e-01 L12_sharp:1.3839e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3210e+05 total_spectral:3.6000e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.9175e-02 L3_l1linf:2.8931e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.8198e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.7954e-02 L10_l1linf:2.7344e-02 L11_l1linf:2.7954e-02 L12_l1linf:2.9541e-02 L1_spectral:1.6146e-03 L2_spectral:1.6012e-03 L3_spectral:1.6038e-03 L4_spectral:1.6140e-03 L5_spectral:1.6174e-03 L6_spectral:1.6196e-03 L7_spectral:1.6125e-03 L8_spectral:1.6052e-03 L9_spectral:1.6089e-03 L10_spectral:1.6203e-03 L11_spectral:1.6227e-03 L12_spectral:1.6142e-03 train_time:259177ms step_avg:43.20ms +[2025-09-11 14:37:56] [Rank 0] PRINT: step:6000/10000 val_loss:4.4268 total_sharp:1.0446e-04 L1_sharp:2.7020e-02 L2_sharp:3.6026e-02 L3_sharp:5.7541e-02 L4_sharp:7.1210e-02 L5_sharp:8.1260e-02 L6_sharp:1.0780e-01 L7_sharp:1.2206e-01 L8_sharp:1.2092e-01 L9_sharp:1.3316e-01 L10_sharp:1.8267e-01 L11_sharp:2.9931e-01 L12_sharp:1.3839e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3210e+05 total_spectral:3.6000e+01 L1_fnorm:1.1426e-01 L2_fnorm:1.1475e-01 L3_fnorm:1.1475e-01 L4_fnorm:1.1426e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1377e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.9175e-02 L3_l1linf:2.8931e-02 L4_l1linf:2.8564e-02 L5_l1linf:2.8198e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8931e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.7954e-02 L10_l1linf:2.7344e-02 L11_l1linf:2.7954e-02 L12_l1linf:2.9541e-02 L1_spectral:1.6146e-03 L2_spectral:1.6012e-03 L3_spectral:1.6038e-03 L4_spectral:1.6140e-03 L5_spectral:1.6174e-03 L6_spectral:1.6196e-03 L7_spectral:1.6125e-03 L8_spectral:1.6052e-03 L9_spectral:1.6089e-03 L10_spectral:1.6203e-03 L11_spectral:1.6227e-03 L12_spectral:1.6142e-03 train_time:259177ms step_avg:43.20ms +[2025-09-11 14:37:58] [Rank 0] step:6001/10000 train_time:261264ms step_avg:43.54ms +[2025-09-11 14:37:58] [Rank 0] step:6001/10000 train_time:261264ms step_avg:43.54ms +[2025-09-11 14:37:59] [Rank 0] step:6021/10000 train_time:261986ms step_avg:43.51ms +[2025-09-11 14:37:59] [Rank 0] step:6021/10000 train_time:261986ms step_avg:43.51ms +[2025-09-11 14:38:00] [Rank 0] step:6041/10000 train_time:262679ms step_avg:43.48ms +[2025-09-11 14:38:00] [Rank 0] step:6041/10000 train_time:262679ms step_avg:43.48ms +[2025-09-11 14:38:00] [Rank 0] step:6061/10000 train_time:263370ms step_avg:43.45ms +[2025-09-11 14:38:00] [Rank 0] step:6061/10000 train_time:263370ms step_avg:43.45ms +[2025-09-11 14:38:01] [Rank 0] step:6081/10000 train_time:264064ms step_avg:43.42ms +[2025-09-11 14:38:01] [Rank 0] step:6081/10000 train_time:264064ms step_avg:43.42ms +[2025-09-11 14:38:02] [Rank 0] step:6101/10000 train_time:264755ms step_avg:43.40ms +[2025-09-11 14:38:02] [Rank 0] step:6101/10000 train_time:264755ms step_avg:43.40ms +[2025-09-11 14:38:03] [Rank 0] step:6121/10000 train_time:265498ms step_avg:43.37ms +[2025-09-11 14:38:03] [Rank 0] step:6121/10000 train_time:265498ms step_avg:43.37ms +[2025-09-11 14:38:03] [Rank 0] step:6141/10000 train_time:266193ms step_avg:43.35ms +[2025-09-11 14:38:03] [Rank 0] step:6141/10000 train_time:266193ms step_avg:43.35ms +[2025-09-11 14:38:04] [Rank 0] step:6161/10000 train_time:266882ms step_avg:43.32ms +[2025-09-11 14:38:04] [Rank 0] step:6161/10000 train_time:266882ms step_avg:43.32ms +[2025-09-11 14:38:05] [Rank 0] step:6181/10000 train_time:267574ms step_avg:43.29ms +[2025-09-11 14:38:05] [Rank 0] step:6181/10000 train_time:267574ms step_avg:43.29ms +[2025-09-11 14:38:05] [Rank 0] step:6201/10000 train_time:268267ms step_avg:43.26ms +[2025-09-11 14:38:05] [Rank 0] step:6201/10000 train_time:268267ms step_avg:43.26ms +[2025-09-11 14:38:06] [Rank 0] step:6221/10000 train_time:268961ms step_avg:43.23ms +[2025-09-11 14:38:06] [Rank 0] step:6221/10000 train_time:268961ms step_avg:43.23ms +[2025-09-11 14:38:07] [Rank 0] step:6241/10000 train_time:269653ms step_avg:43.21ms +[2025-09-11 14:38:07] [Rank 0] step:6241/10000 train_time:269653ms step_avg:43.21ms +[2025-09-11 14:38:07] [Rank 0] step:6261/10000 train_time:270343ms step_avg:43.18ms +[2025-09-11 14:38:07] [Rank 0] step:6261/10000 train_time:270343ms step_avg:43.18ms +[2025-09-11 14:38:08] [Rank 0] step:6281/10000 train_time:271035ms step_avg:43.15ms +[2025-09-11 14:38:08] [Rank 0] step:6281/10000 train_time:271035ms step_avg:43.15ms +[2025-09-11 14:38:09] [Rank 0] step:6301/10000 train_time:271726ms step_avg:43.12ms +[2025-09-11 14:38:09] [Rank 0] step:6301/10000 train_time:271726ms step_avg:43.12ms +[2025-09-11 14:38:10] [Rank 0] step:6321/10000 train_time:272422ms step_avg:43.10ms +[2025-09-11 14:38:10] [Rank 0] step:6321/10000 train_time:272422ms step_avg:43.10ms +[2025-09-11 14:38:10] [Rank 0] step:6341/10000 train_time:273115ms step_avg:43.07ms +[2025-09-11 14:38:10] [Rank 0] step:6341/10000 train_time:273115ms step_avg:43.07ms +[2025-09-11 14:38:11] [Rank 0] step:6361/10000 train_time:273808ms step_avg:43.04ms +[2025-09-11 14:38:11] [Rank 0] step:6361/10000 train_time:273808ms step_avg:43.04ms +[2025-09-11 14:38:12] [Rank 0] step:6381/10000 train_time:274500ms step_avg:43.02ms +[2025-09-11 14:38:12] [Rank 0] step:6381/10000 train_time:274500ms step_avg:43.02ms +[2025-09-11 14:38:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:38:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:38:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:38:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:38:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:38:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:38:24] [Rank 0] PRINT: step:6400/10000 val_loss:4.3959 total_sharp:1.0943e-04 L1_sharp:2.0294e-02 L2_sharp:3.3915e-02 L3_sharp:5.7347e-02 L4_sharp:7.4492e-02 L5_sharp:7.6440e-02 L6_sharp:9.4091e-02 L7_sharp:1.1431e-01 L8_sharp:1.3045e-01 L9_sharp:1.4283e-01 L10_sharp:2.1302e-01 L11_sharp:3.8780e-01 L12_sharp:6.3007e-01 total_fnorm:6.3250e+01 total_l1_linf:1.1366e+05 total_spectral:3.1750e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0059e-01 L5_fnorm:9.9609e-02 L6_fnorm:9.9609e-02 L7_fnorm:1.0059e-01 L8_fnorm:9.7168e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3560e-02 L2_l1linf:2.4658e-02 L3_l1linf:2.3682e-02 L4_l1linf:2.4536e-02 L5_l1linf:2.3682e-02 L6_l1linf:2.3926e-02 L7_l1linf:2.4414e-02 L8_l1linf:2.3926e-02 L9_l1linf:2.3926e-02 L10_l1linf:2.3682e-02 L11_l1linf:2.3193e-02 L12_l1linf:2.4902e-02 L1_spectral:1.4497e-03 L2_spectral:1.4497e-03 L3_spectral:1.4615e-03 L4_spectral:1.4607e-03 L5_spectral:1.4474e-03 L6_spectral:1.5079e-03 L7_spectral:1.4644e-03 L8_spectral:1.4217e-03 L9_spectral:1.4527e-03 L10_spectral:1.4558e-03 L11_spectral:1.4445e-03 L12_spectral:1.4280e-03 train_time:275173ms step_avg:43.00ms +[2025-09-11 14:38:24] [Rank 0] PRINT: step:6400/10000 val_loss:4.3959 total_sharp:1.0943e-04 L1_sharp:2.0294e-02 L2_sharp:3.3915e-02 L3_sharp:5.7347e-02 L4_sharp:7.4492e-02 L5_sharp:7.6440e-02 L6_sharp:9.4091e-02 L7_sharp:1.1431e-01 L8_sharp:1.3045e-01 L9_sharp:1.4283e-01 L10_sharp:2.1302e-01 L11_sharp:3.8780e-01 L12_sharp:6.3007e-01 total_fnorm:6.3250e+01 total_l1_linf:1.1366e+05 total_spectral:3.1750e+01 L1_fnorm:1.0010e-01 L2_fnorm:1.0059e-01 L3_fnorm:1.0010e-01 L4_fnorm:1.0059e-01 L5_fnorm:9.9609e-02 L6_fnorm:9.9609e-02 L7_fnorm:1.0059e-01 L8_fnorm:9.7168e-02 L9_fnorm:9.9609e-02 L10_fnorm:9.9609e-02 L11_fnorm:9.9609e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.3560e-02 L2_l1linf:2.4658e-02 L3_l1linf:2.3682e-02 L4_l1linf:2.4536e-02 L5_l1linf:2.3682e-02 L6_l1linf:2.3926e-02 L7_l1linf:2.4414e-02 L8_l1linf:2.3926e-02 L9_l1linf:2.3926e-02 L10_l1linf:2.3682e-02 L11_l1linf:2.3193e-02 L12_l1linf:2.4902e-02 L1_spectral:1.4497e-03 L2_spectral:1.4497e-03 L3_spectral:1.4615e-03 L4_spectral:1.4607e-03 L5_spectral:1.4474e-03 L6_spectral:1.5079e-03 L7_spectral:1.4644e-03 L8_spectral:1.4217e-03 L9_spectral:1.4527e-03 L10_spectral:1.4558e-03 L11_spectral:1.4445e-03 L12_spectral:1.4280e-03 train_time:275173ms step_avg:43.00ms +[2025-09-11 14:38:26] [Rank 0] step:6401/10000 train_time:277263ms step_avg:43.32ms +[2025-09-11 14:38:26] [Rank 0] step:6401/10000 train_time:277263ms step_avg:43.32ms +[2025-09-11 14:38:26] [Rank 0] step:6421/10000 train_time:277986ms step_avg:43.29ms +[2025-09-11 14:38:26] [Rank 0] step:6421/10000 train_time:277986ms step_avg:43.29ms +[2025-09-11 14:38:27] [Rank 0] step:6441/10000 train_time:278679ms step_avg:43.27ms +[2025-09-11 14:38:27] [Rank 0] step:6441/10000 train_time:278679ms step_avg:43.27ms +[2025-09-11 14:38:28] [Rank 0] step:6461/10000 train_time:279372ms step_avg:43.24ms +[2025-09-11 14:38:28] [Rank 0] step:6461/10000 train_time:279372ms step_avg:43.24ms +[2025-09-11 14:38:28] [Rank 0] step:6481/10000 train_time:280066ms step_avg:43.21ms +[2025-09-11 14:38:28] [Rank 0] step:6481/10000 train_time:280066ms step_avg:43.21ms +[2025-09-11 14:38:29] [Rank 0] step:6501/10000 train_time:280760ms step_avg:43.19ms +[2025-09-11 14:38:29] [Rank 0] step:6501/10000 train_time:280760ms step_avg:43.19ms +[2025-09-11 14:38:30] [Rank 0] step:6521/10000 train_time:281453ms step_avg:43.16ms +[2025-09-11 14:38:30] [Rank 0] step:6521/10000 train_time:281453ms step_avg:43.16ms +[2025-09-11 14:38:31] [Rank 0] step:6541/10000 train_time:282144ms step_avg:43.13ms +[2025-09-11 14:38:31] [Rank 0] step:6541/10000 train_time:282144ms step_avg:43.13ms +[2025-09-11 14:38:31] [Rank 0] step:6561/10000 train_time:282836ms step_avg:43.11ms +[2025-09-11 14:38:31] [Rank 0] step:6561/10000 train_time:282836ms step_avg:43.11ms +[2025-09-11 14:38:32] [Rank 0] step:6581/10000 train_time:283528ms step_avg:43.08ms +[2025-09-11 14:38:32] [Rank 0] step:6581/10000 train_time:283528ms step_avg:43.08ms +[2025-09-11 14:38:33] [Rank 0] step:6601/10000 train_time:284221ms step_avg:43.06ms +[2025-09-11 14:38:33] [Rank 0] step:6601/10000 train_time:284221ms step_avg:43.06ms +[2025-09-11 14:38:33] [Rank 0] step:6621/10000 train_time:284911ms step_avg:43.03ms +[2025-09-11 14:38:33] [Rank 0] step:6621/10000 train_time:284911ms step_avg:43.03ms +[2025-09-11 14:38:34] [Rank 0] step:6641/10000 train_time:285603ms step_avg:43.01ms +[2025-09-11 14:38:34] [Rank 0] step:6641/10000 train_time:285603ms step_avg:43.01ms +[2025-09-11 14:38:35] [Rank 0] step:6661/10000 train_time:286296ms step_avg:42.98ms +[2025-09-11 14:38:35] [Rank 0] step:6661/10000 train_time:286296ms step_avg:42.98ms +[2025-09-11 14:38:35] [Rank 0] step:6681/10000 train_time:286996ms step_avg:42.96ms +[2025-09-11 14:38:35] [Rank 0] step:6681/10000 train_time:286996ms step_avg:42.96ms +[2025-09-11 14:38:36] [Rank 0] step:6701/10000 train_time:287695ms step_avg:42.93ms +[2025-09-11 14:38:36] [Rank 0] step:6701/10000 train_time:287695ms step_avg:42.93ms +[2025-09-11 14:38:37] [Rank 0] step:6721/10000 train_time:288393ms step_avg:42.91ms +[2025-09-11 14:38:37] [Rank 0] step:6721/10000 train_time:288393ms step_avg:42.91ms +[2025-09-11 14:38:38] [Rank 0] step:6741/10000 train_time:289092ms step_avg:42.89ms +[2025-09-11 14:38:38] [Rank 0] step:6741/10000 train_time:289092ms step_avg:42.89ms +[2025-09-11 14:38:38] [Rank 0] step:6761/10000 train_time:289789ms step_avg:42.86ms +[2025-09-11 14:38:38] [Rank 0] step:6761/10000 train_time:289789ms step_avg:42.86ms +[2025-09-11 14:38:39] [Rank 0] step:6781/10000 train_time:290761ms step_avg:42.88ms +[2025-09-11 14:38:39] [Rank 0] step:6781/10000 train_time:290761ms step_avg:42.88ms +[2025-09-11 14:38:40] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:38:40] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:38:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:38:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:38:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:38:51] [Rank 0] PRINT: step:6800/10000 val_loss:4.3623 total_sharp:8.4105e-05 L1_sharp:2.2513e-02 L2_sharp:3.3806e-02 L3_sharp:5.5798e-02 L4_sharp:6.8642e-02 L5_sharp:7.6268e-02 L6_sharp:1.1034e-01 L7_sharp:1.1937e-01 L8_sharp:1.2673e-01 L9_sharp:1.4245e-01 L10_sharp:1.7851e-01 L11_sharp:3.6901e-01 L12_sharp:1.0072e+00 total_fnorm:6.1750e+01 total_l1_linf:1.0957e+05 total_spectral:3.0875e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5938e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.4961e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.4473e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9409e-02 L2_l1linf:2.0020e-02 L3_l1linf:2.0142e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9043e-02 L6_l1linf:2.0020e-02 L7_l1linf:1.9775e-02 L8_l1linf:2.0020e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9531e-02 L1_spectral:1.2910e-03 L2_spectral:1.2855e-03 L3_spectral:1.2905e-03 L4_spectral:1.2902e-03 L5_spectral:1.2905e-03 L6_spectral:1.2872e-03 L7_spectral:1.2865e-03 L8_spectral:1.2379e-03 L9_spectral:1.2831e-03 L10_spectral:1.2781e-03 L11_spectral:1.2755e-03 L12_spectral:1.2536e-03 train_time:291439ms step_avg:42.86ms +[2025-09-11 14:38:51] [Rank 0] PRINT: step:6800/10000 val_loss:4.3623 total_sharp:8.4105e-05 L1_sharp:2.2513e-02 L2_sharp:3.3806e-02 L3_sharp:5.5798e-02 L4_sharp:6.8642e-02 L5_sharp:7.6268e-02 L6_sharp:1.1034e-01 L7_sharp:1.1937e-01 L8_sharp:1.2673e-01 L9_sharp:1.4245e-01 L10_sharp:1.7851e-01 L11_sharp:3.6901e-01 L12_sharp:1.0072e+00 total_fnorm:6.1750e+01 total_l1_linf:1.0957e+05 total_spectral:3.0875e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.5938e-02 L5_fnorm:8.5449e-02 L6_fnorm:8.4961e-02 L7_fnorm:8.5449e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.4473e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.9409e-02 L2_l1linf:2.0020e-02 L3_l1linf:2.0142e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9043e-02 L6_l1linf:2.0020e-02 L7_l1linf:1.9775e-02 L8_l1linf:2.0020e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.9531e-02 L11_l1linf:1.9043e-02 L12_l1linf:1.9531e-02 L1_spectral:1.2910e-03 L2_spectral:1.2855e-03 L3_spectral:1.2905e-03 L4_spectral:1.2902e-03 L5_spectral:1.2905e-03 L6_spectral:1.2872e-03 L7_spectral:1.2865e-03 L8_spectral:1.2379e-03 L9_spectral:1.2831e-03 L10_spectral:1.2781e-03 L11_spectral:1.2755e-03 L12_spectral:1.2536e-03 train_time:291439ms step_avg:42.86ms +[2025-09-11 14:38:53] [Rank 0] step:6801/10000 train_time:293484ms step_avg:43.15ms +[2025-09-11 14:38:53] [Rank 0] step:6801/10000 train_time:293484ms step_avg:43.15ms +[2025-09-11 14:38:54] [Rank 0] step:6821/10000 train_time:294217ms step_avg:43.13ms +[2025-09-11 14:38:54] [Rank 0] step:6821/10000 train_time:294217ms step_avg:43.13ms +[2025-09-11 14:38:55] [Rank 0] step:6841/10000 train_time:294918ms step_avg:43.11ms +[2025-09-11 14:38:55] [Rank 0] step:6841/10000 train_time:294918ms step_avg:43.11ms +[2025-09-11 14:38:55] [Rank 0] step:6861/10000 train_time:295618ms step_avg:43.09ms +[2025-09-11 14:38:55] [Rank 0] step:6861/10000 train_time:295618ms step_avg:43.09ms +[2025-09-11 14:38:56] [Rank 0] step:6881/10000 train_time:296318ms step_avg:43.06ms +[2025-09-11 14:38:56] [Rank 0] step:6881/10000 train_time:296318ms step_avg:43.06ms +[2025-09-11 14:38:57] [Rank 0] step:6901/10000 train_time:297017ms step_avg:43.04ms +[2025-09-11 14:38:57] [Rank 0] step:6901/10000 train_time:297017ms step_avg:43.04ms +[2025-09-11 14:38:57] [Rank 0] step:6921/10000 train_time:297716ms step_avg:43.02ms +[2025-09-11 14:38:57] [Rank 0] step:6921/10000 train_time:297716ms step_avg:43.02ms +[2025-09-11 14:38:58] [Rank 0] step:6941/10000 train_time:298415ms step_avg:42.99ms +[2025-09-11 14:38:58] [Rank 0] step:6941/10000 train_time:298415ms step_avg:42.99ms +[2025-09-11 14:38:59] [Rank 0] step:6961/10000 train_time:299116ms step_avg:42.97ms +[2025-09-11 14:38:59] [Rank 0] step:6961/10000 train_time:299116ms step_avg:42.97ms +[2025-09-11 14:39:00] [Rank 0] step:6981/10000 train_time:299817ms step_avg:42.95ms +[2025-09-11 14:39:00] [Rank 0] step:6981/10000 train_time:299817ms step_avg:42.95ms +[2025-09-11 14:39:00] [Rank 0] step:7001/10000 train_time:300517ms step_avg:42.92ms +[2025-09-11 14:39:00] [Rank 0] step:7001/10000 train_time:300517ms step_avg:42.92ms +[2025-09-11 14:39:01] [Rank 0] step:7021/10000 train_time:301216ms step_avg:42.90ms +[2025-09-11 14:39:01] [Rank 0] step:7021/10000 train_time:301216ms step_avg:42.90ms +[2025-09-11 14:39:02] [Rank 0] step:7041/10000 train_time:301914ms step_avg:42.88ms +[2025-09-11 14:39:02] [Rank 0] step:7041/10000 train_time:301914ms step_avg:42.88ms +[2025-09-11 14:39:02] [Rank 0] step:7061/10000 train_time:302614ms step_avg:42.86ms +[2025-09-11 14:39:02] [Rank 0] step:7061/10000 train_time:302614ms step_avg:42.86ms +[2025-09-11 14:39:03] [Rank 0] step:7081/10000 train_time:303313ms step_avg:42.83ms +[2025-09-11 14:39:03] [Rank 0] step:7081/10000 train_time:303313ms step_avg:42.83ms +[2025-09-11 14:39:04] [Rank 0] step:7101/10000 train_time:304013ms step_avg:42.81ms +[2025-09-11 14:39:04] [Rank 0] step:7101/10000 train_time:304013ms step_avg:42.81ms +[2025-09-11 14:39:04] [Rank 0] step:7121/10000 train_time:304715ms step_avg:42.79ms +[2025-09-11 14:39:04] [Rank 0] step:7121/10000 train_time:304715ms step_avg:42.79ms +[2025-09-11 14:39:05] [Rank 0] step:7141/10000 train_time:305414ms step_avg:42.77ms +[2025-09-11 14:39:05] [Rank 0] step:7141/10000 train_time:305414ms step_avg:42.77ms +[2025-09-11 14:39:06] [Rank 0] step:7161/10000 train_time:306114ms step_avg:42.75ms +[2025-09-11 14:39:06] [Rank 0] step:7161/10000 train_time:306114ms step_avg:42.75ms +[2025-09-11 14:39:07] [Rank 0] step:7181/10000 train_time:306812ms step_avg:42.73ms +[2025-09-11 14:39:07] [Rank 0] step:7181/10000 train_time:306812ms step_avg:42.73ms +[2025-09-11 14:39:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:39:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:39:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:39:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:39:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:39:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:39:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:39:19] [Rank 0] PRINT: step:7200/10000 val_loss:4.3331 total_sharp:7.9537e-05 L1_sharp:3.0532e-02 L2_sharp:4.3076e-02 L3_sharp:5.5901e-02 L4_sharp:7.5498e-02 L5_sharp:8.5301e-02 L6_sharp:1.0578e-01 L7_sharp:1.2204e-01 L8_sharp:1.3432e-01 L9_sharp:1.4763e-01 L10_sharp:1.9479e-01 L11_sharp:3.2298e-01 L12_sharp:6.2430e-01 total_fnorm:5.3000e+01 total_l1_linf:9.0112e+04 total_spectral:2.6500e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2754e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2754e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0312e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.1777e-02 L11_fnorm:7.1777e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6235e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5869e-02 L10_l1linf:1.6113e-02 L11_l1linf:1.5442e-02 L12_l1linf:1.5869e-02 L1_spectral:1.1307e-03 L2_spectral:1.1335e-03 L3_spectral:1.1264e-03 L4_spectral:1.1379e-03 L5_spectral:1.1246e-03 L6_spectral:1.1230e-03 L7_spectral:1.1259e-03 L8_spectral:1.0744e-03 L9_spectral:1.1349e-03 L10_spectral:1.1188e-03 L11_spectral:1.1131e-03 L12_spectral:1.0782e-03 train_time:307493ms step_avg:42.71ms +[2025-09-11 14:39:19] [Rank 0] PRINT: step:7200/10000 val_loss:4.3331 total_sharp:7.9537e-05 L1_sharp:3.0532e-02 L2_sharp:4.3076e-02 L3_sharp:5.5901e-02 L4_sharp:7.5498e-02 L5_sharp:8.5301e-02 L6_sharp:1.0578e-01 L7_sharp:1.2204e-01 L8_sharp:1.3432e-01 L9_sharp:1.4763e-01 L10_sharp:1.9479e-01 L11_sharp:3.2298e-01 L12_sharp:6.2430e-01 total_fnorm:5.3000e+01 total_l1_linf:9.0112e+04 total_spectral:2.6500e+01 L1_fnorm:7.2266e-02 L2_fnorm:7.2754e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2754e-02 L5_fnorm:7.2266e-02 L6_fnorm:7.2266e-02 L7_fnorm:7.2266e-02 L8_fnorm:7.0312e-02 L9_fnorm:7.2266e-02 L10_fnorm:7.1777e-02 L11_fnorm:7.1777e-02 L12_fnorm:7.0801e-02 L1_l1linf:1.5625e-02 L2_l1linf:1.6113e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.5869e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6235e-02 L8_l1linf:1.6113e-02 L9_l1linf:1.5869e-02 L10_l1linf:1.6113e-02 L11_l1linf:1.5442e-02 L12_l1linf:1.5869e-02 L1_spectral:1.1307e-03 L2_spectral:1.1335e-03 L3_spectral:1.1264e-03 L4_spectral:1.1379e-03 L5_spectral:1.1246e-03 L6_spectral:1.1230e-03 L7_spectral:1.1259e-03 L8_spectral:1.0744e-03 L9_spectral:1.1349e-03 L10_spectral:1.1188e-03 L11_spectral:1.1131e-03 L12_spectral:1.0782e-03 train_time:307493ms step_avg:42.71ms +[2025-09-11 14:39:21] [Rank 0] step:7201/10000 train_time:309464ms step_avg:42.98ms +[2025-09-11 14:39:21] [Rank 0] step:7201/10000 train_time:309464ms step_avg:42.98ms +[2025-09-11 14:39:21] [Rank 0] step:7221/10000 train_time:310182ms step_avg:42.96ms +[2025-09-11 14:39:21] [Rank 0] step:7221/10000 train_time:310182ms step_avg:42.96ms +[2025-09-11 14:39:22] [Rank 0] step:7241/10000 train_time:310882ms step_avg:42.93ms +[2025-09-11 14:39:22] [Rank 0] step:7241/10000 train_time:310882ms step_avg:42.93ms +[2025-09-11 14:39:23] [Rank 0] step:7261/10000 train_time:311585ms step_avg:42.91ms +[2025-09-11 14:39:23] [Rank 0] step:7261/10000 train_time:311585ms step_avg:42.91ms +[2025-09-11 14:39:23] [Rank 0] step:7281/10000 train_time:312291ms step_avg:42.89ms +[2025-09-11 14:39:23] [Rank 0] step:7281/10000 train_time:312291ms step_avg:42.89ms +[2025-09-11 14:39:24] [Rank 0] step:7301/10000 train_time:312991ms step_avg:42.87ms +[2025-09-11 14:39:24] [Rank 0] step:7301/10000 train_time:312991ms step_avg:42.87ms +[2025-09-11 14:39:25] [Rank 0] step:7321/10000 train_time:313692ms step_avg:42.85ms +[2025-09-11 14:39:25] [Rank 0] step:7321/10000 train_time:313692ms step_avg:42.85ms +[2025-09-11 14:39:25] [Rank 0] step:7341/10000 train_time:314393ms step_avg:42.83ms +[2025-09-11 14:39:25] [Rank 0] step:7341/10000 train_time:314393ms step_avg:42.83ms +[2025-09-11 14:39:26] [Rank 0] step:7361/10000 train_time:315094ms step_avg:42.81ms +[2025-09-11 14:39:26] [Rank 0] step:7361/10000 train_time:315094ms step_avg:42.81ms +[2025-09-11 14:39:27] [Rank 0] step:7381/10000 train_time:315796ms step_avg:42.78ms +[2025-09-11 14:39:27] [Rank 0] step:7381/10000 train_time:315796ms step_avg:42.78ms +[2025-09-11 14:39:28] [Rank 0] step:7401/10000 train_time:316494ms step_avg:42.76ms +[2025-09-11 14:39:28] [Rank 0] step:7401/10000 train_time:316494ms step_avg:42.76ms +[2025-09-11 14:39:28] [Rank 0] step:7421/10000 train_time:317194ms step_avg:42.74ms +[2025-09-11 14:39:28] [Rank 0] step:7421/10000 train_time:317194ms step_avg:42.74ms +[2025-09-11 14:39:29] [Rank 0] step:7441/10000 train_time:317896ms step_avg:42.72ms +[2025-09-11 14:39:29] [Rank 0] step:7441/10000 train_time:317896ms step_avg:42.72ms +[2025-09-11 14:39:30] [Rank 0] step:7461/10000 train_time:318597ms step_avg:42.70ms +[2025-09-11 14:39:30] [Rank 0] step:7461/10000 train_time:318597ms step_avg:42.70ms +[2025-09-11 14:39:30] [Rank 0] step:7481/10000 train_time:319299ms step_avg:42.68ms +[2025-09-11 14:39:30] [Rank 0] step:7481/10000 train_time:319299ms step_avg:42.68ms +[2025-09-11 14:39:31] [Rank 0] step:7501/10000 train_time:319999ms step_avg:42.66ms +[2025-09-11 14:39:31] [Rank 0] step:7501/10000 train_time:319999ms step_avg:42.66ms +[2025-09-11 14:39:32] [Rank 0] step:7521/10000 train_time:320701ms step_avg:42.64ms +[2025-09-11 14:39:32] [Rank 0] step:7521/10000 train_time:320701ms step_avg:42.64ms +[2025-09-11 14:39:32] [Rank 0] step:7541/10000 train_time:321400ms step_avg:42.62ms +[2025-09-11 14:39:32] [Rank 0] step:7541/10000 train_time:321400ms step_avg:42.62ms +[2025-09-11 14:39:33] [Rank 0] step:7561/10000 train_time:322104ms step_avg:42.60ms +[2025-09-11 14:39:33] [Rank 0] step:7561/10000 train_time:322104ms step_avg:42.60ms +[2025-09-11 14:39:34] [Rank 0] step:7581/10000 train_time:322806ms step_avg:42.58ms +[2025-09-11 14:39:34] [Rank 0] step:7581/10000 train_time:322806ms step_avg:42.58ms +[2025-09-11 14:39:35] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:39:35] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:39:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:39:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:39:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:39:46] [Rank 0] PRINT: step:7600/10000 val_loss:4.3036 total_sharp:7.3117e-05 L1_sharp:2.8199e-02 L2_sharp:3.8321e-02 L3_sharp:5.0747e-02 L4_sharp:6.5522e-02 L5_sharp:7.3232e-02 L6_sharp:1.1151e-01 L7_sharp:1.2492e-01 L8_sharp:1.2270e-01 L9_sharp:1.2907e-01 L10_sharp:1.7035e-01 L11_sharp:3.2316e-01 L12_sharp:4.9675e-01 total_fnorm:4.3000e+01 total_l1_linf:6.7584e+04 total_spectral:2.1375e+01 L1_fnorm:5.9326e-02 L2_fnorm:5.9814e-02 L3_fnorm:6.0059e-02 L4_fnorm:6.0059e-02 L5_fnorm:5.9326e-02 L6_fnorm:5.9570e-02 L7_fnorm:5.9570e-02 L8_fnorm:5.7617e-02 L9_fnorm:5.9082e-02 L10_fnorm:5.8838e-02 L11_fnorm:5.8594e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2878e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.2573e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.2573e-02 L10_l1linf:1.2329e-02 L11_l1linf:1.2024e-02 L12_l1linf:1.2756e-02 L1_spectral:9.5860e-04 L2_spectral:9.6634e-04 L3_spectral:9.5862e-04 L4_spectral:9.6886e-04 L5_spectral:9.5978e-04 L6_spectral:9.6079e-04 L7_spectral:9.6218e-04 L8_spectral:9.0169e-04 L9_spectral:9.5510e-04 L10_spectral:9.5150e-04 L11_spectral:9.4405e-04 L12_spectral:8.9476e-04 train_time:323489ms step_avg:42.56ms +[2025-09-11 14:39:46] [Rank 0] PRINT: step:7600/10000 val_loss:4.3036 total_sharp:7.3117e-05 L1_sharp:2.8199e-02 L2_sharp:3.8321e-02 L3_sharp:5.0747e-02 L4_sharp:6.5522e-02 L5_sharp:7.3232e-02 L6_sharp:1.1151e-01 L7_sharp:1.2492e-01 L8_sharp:1.2270e-01 L9_sharp:1.2907e-01 L10_sharp:1.7035e-01 L11_sharp:3.2316e-01 L12_sharp:4.9675e-01 total_fnorm:4.3000e+01 total_l1_linf:6.7584e+04 total_spectral:2.1375e+01 L1_fnorm:5.9326e-02 L2_fnorm:5.9814e-02 L3_fnorm:6.0059e-02 L4_fnorm:6.0059e-02 L5_fnorm:5.9326e-02 L6_fnorm:5.9570e-02 L7_fnorm:5.9570e-02 L8_fnorm:5.7617e-02 L9_fnorm:5.9082e-02 L10_fnorm:5.8838e-02 L11_fnorm:5.8594e-02 L12_fnorm:5.7861e-02 L1_l1linf:1.2146e-02 L2_l1linf:1.2878e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.2817e-02 L5_l1linf:1.2695e-02 L6_l1linf:1.2573e-02 L7_l1linf:1.3062e-02 L8_l1linf:1.2695e-02 L9_l1linf:1.2573e-02 L10_l1linf:1.2329e-02 L11_l1linf:1.2024e-02 L12_l1linf:1.2756e-02 L1_spectral:9.5860e-04 L2_spectral:9.6634e-04 L3_spectral:9.5862e-04 L4_spectral:9.6886e-04 L5_spectral:9.5978e-04 L6_spectral:9.6079e-04 L7_spectral:9.6218e-04 L8_spectral:9.0169e-04 L9_spectral:9.5510e-04 L10_spectral:9.5150e-04 L11_spectral:9.4405e-04 L12_spectral:8.9476e-04 train_time:323489ms step_avg:42.56ms +[2025-09-11 14:39:48] [Rank 0] step:7601/10000 train_time:325505ms step_avg:42.82ms +[2025-09-11 14:39:48] [Rank 0] step:7601/10000 train_time:325505ms step_avg:42.82ms +[2025-09-11 14:39:49] [Rank 0] step:7621/10000 train_time:326236ms step_avg:42.81ms +[2025-09-11 14:39:49] [Rank 0] step:7621/10000 train_time:326236ms step_avg:42.81ms +[2025-09-11 14:39:49] [Rank 0] step:7641/10000 train_time:326940ms step_avg:42.79ms +[2025-09-11 14:39:49] [Rank 0] step:7641/10000 train_time:326940ms step_avg:42.79ms +[2025-09-11 14:39:50] [Rank 0] step:7661/10000 train_time:327642ms step_avg:42.77ms +[2025-09-11 14:39:50] [Rank 0] step:7661/10000 train_time:327642ms step_avg:42.77ms +[2025-09-11 14:39:51] [Rank 0] step:7681/10000 train_time:328344ms step_avg:42.75ms +[2025-09-11 14:39:51] [Rank 0] step:7681/10000 train_time:328344ms step_avg:42.75ms +[2025-09-11 14:39:51] [Rank 0] step:7701/10000 train_time:329046ms step_avg:42.73ms +[2025-09-11 14:39:51] [Rank 0] step:7701/10000 train_time:329046ms step_avg:42.73ms +[2025-09-11 14:39:52] [Rank 0] step:7721/10000 train_time:329749ms step_avg:42.71ms +[2025-09-11 14:39:52] [Rank 0] step:7721/10000 train_time:329749ms step_avg:42.71ms +[2025-09-11 14:39:53] [Rank 0] step:7741/10000 train_time:330452ms step_avg:42.69ms +[2025-09-11 14:39:53] [Rank 0] step:7741/10000 train_time:330452ms step_avg:42.69ms +[2025-09-11 14:39:53] [Rank 0] step:7761/10000 train_time:331152ms step_avg:42.67ms +[2025-09-11 14:39:53] [Rank 0] step:7761/10000 train_time:331152ms step_avg:42.67ms +[2025-09-11 14:39:54] [Rank 0] step:7781/10000 train_time:331856ms step_avg:42.65ms +[2025-09-11 14:39:54] [Rank 0] step:7781/10000 train_time:331856ms step_avg:42.65ms +[2025-09-11 14:39:55] [Rank 0] step:7801/10000 train_time:332557ms step_avg:42.63ms +[2025-09-11 14:39:55] [Rank 0] step:7801/10000 train_time:332557ms step_avg:42.63ms +[2025-09-11 14:39:56] [Rank 0] step:7821/10000 train_time:333258ms step_avg:42.61ms +[2025-09-11 14:39:56] [Rank 0] step:7821/10000 train_time:333258ms step_avg:42.61ms +[2025-09-11 14:39:56] [Rank 0] step:7841/10000 train_time:333962ms step_avg:42.59ms +[2025-09-11 14:39:56] [Rank 0] step:7841/10000 train_time:333962ms step_avg:42.59ms +[2025-09-11 14:39:57] [Rank 0] step:7861/10000 train_time:334667ms step_avg:42.57ms +[2025-09-11 14:39:57] [Rank 0] step:7861/10000 train_time:334667ms step_avg:42.57ms +[2025-09-11 14:39:58] [Rank 0] step:7881/10000 train_time:335370ms step_avg:42.55ms +[2025-09-11 14:39:58] [Rank 0] step:7881/10000 train_time:335370ms step_avg:42.55ms +[2025-09-11 14:39:58] [Rank 0] step:7901/10000 train_time:336073ms step_avg:42.54ms +[2025-09-11 14:39:58] [Rank 0] step:7901/10000 train_time:336073ms step_avg:42.54ms +[2025-09-11 14:39:59] [Rank 0] step:7921/10000 train_time:336776ms step_avg:42.52ms +[2025-09-11 14:39:59] [Rank 0] step:7921/10000 train_time:336776ms step_avg:42.52ms +[2025-09-11 14:40:00] [Rank 0] step:7941/10000 train_time:337480ms step_avg:42.50ms +[2025-09-11 14:40:00] [Rank 0] step:7941/10000 train_time:337480ms step_avg:42.50ms +[2025-09-11 14:40:00] [Rank 0] step:7961/10000 train_time:338181ms step_avg:42.48ms +[2025-09-11 14:40:00] [Rank 0] step:7961/10000 train_time:338181ms step_avg:42.48ms +[2025-09-11 14:40:01] [Rank 0] step:7981/10000 train_time:338948ms step_avg:42.47ms +[2025-09-11 14:40:01] [Rank 0] step:7981/10000 train_time:338948ms step_avg:42.47ms +[2025-09-11 14:40:02] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:40:02] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:40:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:40:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:40:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:40:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:40:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:40:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:40:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:40:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:40:16] [Rank 0] PRINT: step:8000/10000 val_loss:4.2857 total_sharp:6.4419e-05 L1_sharp:2.5717e-02 L2_sharp:3.7869e-02 L3_sharp:4.7942e-02 L4_sharp:6.5349e-02 L5_sharp:7.7852e-02 L6_sharp:1.0498e-01 L7_sharp:1.2639e-01 L8_sharp:1.1771e-01 L9_sharp:1.3090e-01 L10_sharp:1.8811e-01 L11_sharp:2.5407e-01 L12_sharp:1.2828e+00 total_fnorm:3.6750e+01 total_l1_linf:5.5040e+04 total_spectral:1.8375e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.5898e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6387e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.5825e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.5215e-03 L5_l1linf:9.2163e-03 L6_l1linf:9.8877e-03 L7_l1linf:9.4604e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.5215e-03 L10_l1linf:9.1553e-03 L11_l1linf:9.0942e-03 L12_l1linf:9.3994e-03 L1_spectral:7.9400e-04 L2_spectral:8.0706e-04 L3_spectral:7.9964e-04 L4_spectral:8.1029e-04 L5_spectral:7.9418e-04 L6_spectral:7.9678e-04 L7_spectral:8.0028e-04 L8_spectral:7.3776e-04 L9_spectral:7.8801e-04 L10_spectral:7.8198e-04 L11_spectral:7.8536e-04 L12_spectral:7.3099e-04 train_time:339665ms step_avg:42.46ms +[2025-09-11 14:40:16] [Rank 0] PRINT: step:8000/10000 val_loss:4.2857 total_sharp:6.4419e-05 L1_sharp:2.5717e-02 L2_sharp:3.7869e-02 L3_sharp:4.7942e-02 L4_sharp:6.5349e-02 L5_sharp:7.7852e-02 L6_sharp:1.0498e-01 L7_sharp:1.2639e-01 L8_sharp:1.1771e-01 L9_sharp:1.3090e-01 L10_sharp:1.8811e-01 L11_sharp:2.5407e-01 L12_sharp:1.2828e+00 total_fnorm:3.6750e+01 total_l1_linf:5.5040e+04 total_spectral:1.8375e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.5898e-02 L9_fnorm:4.7363e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6875e-02 L12_fnorm:4.6387e-02 L1_l1linf:8.9111e-03 L2_l1linf:9.5825e-03 L3_l1linf:9.5215e-03 L4_l1linf:9.5215e-03 L5_l1linf:9.2163e-03 L6_l1linf:9.8877e-03 L7_l1linf:9.4604e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.5215e-03 L10_l1linf:9.1553e-03 L11_l1linf:9.0942e-03 L12_l1linf:9.3994e-03 L1_spectral:7.9400e-04 L2_spectral:8.0706e-04 L3_spectral:7.9964e-04 L4_spectral:8.1029e-04 L5_spectral:7.9418e-04 L6_spectral:7.9678e-04 L7_spectral:8.0028e-04 L8_spectral:7.3776e-04 L9_spectral:7.8801e-04 L10_spectral:7.8198e-04 L11_spectral:7.8536e-04 L12_spectral:7.3099e-04 train_time:339665ms step_avg:42.46ms +[2025-09-11 14:40:19] [Rank 0] step:8001/10000 train_time:341936ms step_avg:42.74ms +[2025-09-11 14:40:19] [Rank 0] step:8001/10000 train_time:341936ms step_avg:42.74ms +[2025-09-11 14:40:19] [Rank 0] step:8021/10000 train_time:342669ms step_avg:42.72ms +[2025-09-11 14:40:19] [Rank 0] step:8021/10000 train_time:342669ms step_avg:42.72ms +[2025-09-11 14:40:20] [Rank 0] step:8041/10000 train_time:343372ms step_avg:42.70ms +[2025-09-11 14:40:20] [Rank 0] step:8041/10000 train_time:343372ms step_avg:42.70ms +[2025-09-11 14:40:21] [Rank 0] step:8061/10000 train_time:344078ms step_avg:42.68ms +[2025-09-11 14:40:21] [Rank 0] step:8061/10000 train_time:344078ms step_avg:42.68ms +[2025-09-11 14:40:22] [Rank 0] step:8081/10000 train_time:344780ms step_avg:42.67ms +[2025-09-11 14:40:22] [Rank 0] step:8081/10000 train_time:344780ms step_avg:42.67ms +[2025-09-11 14:40:22] [Rank 0] step:8101/10000 train_time:345485ms step_avg:42.65ms +[2025-09-11 14:40:22] [Rank 0] step:8101/10000 train_time:345485ms step_avg:42.65ms +[2025-09-11 14:40:23] [Rank 0] step:8121/10000 train_time:346192ms step_avg:42.63ms +[2025-09-11 14:40:23] [Rank 0] step:8121/10000 train_time:346192ms step_avg:42.63ms +[2025-09-11 14:40:24] [Rank 0] step:8141/10000 train_time:347621ms step_avg:42.70ms +[2025-09-11 14:40:24] [Rank 0] step:8141/10000 train_time:347621ms step_avg:42.70ms +[2025-09-11 14:40:25] [Rank 0] step:8161/10000 train_time:348328ms step_avg:42.68ms +[2025-09-11 14:40:25] [Rank 0] step:8161/10000 train_time:348328ms step_avg:42.68ms +[2025-09-11 14:40:26] [Rank 0] step:8181/10000 train_time:349042ms step_avg:42.66ms +[2025-09-11 14:40:26] [Rank 0] step:8181/10000 train_time:349042ms step_avg:42.66ms +[2025-09-11 14:40:27] [Rank 0] step:8201/10000 train_time:349752ms step_avg:42.65ms +[2025-09-11 14:40:27] [Rank 0] step:8201/10000 train_time:349752ms step_avg:42.65ms +[2025-09-11 14:40:27] [Rank 0] step:8221/10000 train_time:350462ms step_avg:42.63ms +[2025-09-11 14:40:27] [Rank 0] step:8221/10000 train_time:350462ms step_avg:42.63ms +[2025-09-11 14:40:28] [Rank 0] step:8241/10000 train_time:351179ms step_avg:42.61ms +[2025-09-11 14:40:28] [Rank 0] step:8241/10000 train_time:351179ms step_avg:42.61ms +[2025-09-11 14:40:29] [Rank 0] step:8261/10000 train_time:351888ms step_avg:42.60ms +[2025-09-11 14:40:29] [Rank 0] step:8261/10000 train_time:351888ms step_avg:42.60ms +[2025-09-11 14:40:29] [Rank 0] step:8281/10000 train_time:352594ms step_avg:42.58ms +[2025-09-11 14:40:29] [Rank 0] step:8281/10000 train_time:352594ms step_avg:42.58ms +[2025-09-11 14:40:30] [Rank 0] step:8301/10000 train_time:353303ms step_avg:42.56ms +[2025-09-11 14:40:30] [Rank 0] step:8301/10000 train_time:353303ms step_avg:42.56ms +[2025-09-11 14:40:31] [Rank 0] step:8321/10000 train_time:354011ms step_avg:42.54ms +[2025-09-11 14:40:31] [Rank 0] step:8321/10000 train_time:354011ms step_avg:42.54ms +[2025-09-11 14:40:32] [Rank 0] step:8341/10000 train_time:354726ms step_avg:42.53ms +[2025-09-11 14:40:32] [Rank 0] step:8341/10000 train_time:354726ms step_avg:42.53ms +[2025-09-11 14:40:32] [Rank 0] step:8361/10000 train_time:355430ms step_avg:42.51ms +[2025-09-11 14:40:32] [Rank 0] step:8361/10000 train_time:355430ms step_avg:42.51ms +[2025-09-11 14:40:33] [Rank 0] step:8381/10000 train_time:356143ms step_avg:42.49ms +[2025-09-11 14:40:33] [Rank 0] step:8381/10000 train_time:356143ms step_avg:42.49ms +[2025-09-11 14:40:34] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:40:34] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:40:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:40:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:40:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:40:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:40:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:40:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:40:45] [Rank 0] PRINT: step:8400/10000 val_loss:4.2680 total_sharp:5.4293e-05 L1_sharp:1.9471e-02 L2_sharp:3.2834e-02 L3_sharp:4.5107e-02 L4_sharp:6.3238e-02 L5_sharp:7.4029e-02 L6_sharp:9.1518e-02 L7_sharp:1.1734e-01 L8_sharp:1.1681e-01 L9_sharp:1.2525e-01 L10_sharp:1.5791e-01 L11_sharp:2.5537e-01 L12_sharp:4.7174e-01 total_fnorm:2.8875e+01 total_l1_linf:3.9424e+04 total_spectral:1.4438e+01 L1_fnorm:3.6377e-02 L2_fnorm:3.6865e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.6865e-02 L7_fnorm:3.7109e-02 L8_fnorm:3.5645e-02 L9_fnorm:3.6621e-02 L10_fnorm:3.6621e-02 L11_fnorm:3.6133e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5308e-03 L2_l1linf:6.6833e-03 L3_l1linf:6.8665e-03 L4_l1linf:7.1716e-03 L5_l1linf:6.6528e-03 L6_l1linf:6.7444e-03 L7_l1linf:6.9580e-03 L8_l1linf:6.7139e-03 L9_l1linf:6.8054e-03 L10_l1linf:6.6833e-03 L11_l1linf:6.4697e-03 L12_l1linf:6.8054e-03 L1_spectral:6.2723e-04 L2_spectral:6.3899e-04 L3_spectral:6.3377e-04 L4_spectral:6.4002e-04 L5_spectral:6.2633e-04 L6_spectral:6.2854e-04 L7_spectral:6.3039e-04 L8_spectral:5.8091e-04 L9_spectral:6.2322e-04 L10_spectral:6.1818e-04 L11_spectral:6.1016e-04 L12_spectral:5.7114e-04 train_time:356835ms step_avg:42.48ms +[2025-09-11 14:40:45] [Rank 0] PRINT: step:8400/10000 val_loss:4.2680 total_sharp:5.4293e-05 L1_sharp:1.9471e-02 L2_sharp:3.2834e-02 L3_sharp:4.5107e-02 L4_sharp:6.3238e-02 L5_sharp:7.4029e-02 L6_sharp:9.1518e-02 L7_sharp:1.1734e-01 L8_sharp:1.1681e-01 L9_sharp:1.2525e-01 L10_sharp:1.5791e-01 L11_sharp:2.5537e-01 L12_sharp:4.7174e-01 total_fnorm:2.8875e+01 total_l1_linf:3.9424e+04 total_spectral:1.4438e+01 L1_fnorm:3.6377e-02 L2_fnorm:3.6865e-02 L3_fnorm:3.7109e-02 L4_fnorm:3.7109e-02 L5_fnorm:3.6865e-02 L6_fnorm:3.6865e-02 L7_fnorm:3.7109e-02 L8_fnorm:3.5645e-02 L9_fnorm:3.6621e-02 L10_fnorm:3.6621e-02 L11_fnorm:3.6133e-02 L12_fnorm:3.5645e-02 L1_l1linf:6.5308e-03 L2_l1linf:6.6833e-03 L3_l1linf:6.8665e-03 L4_l1linf:7.1716e-03 L5_l1linf:6.6528e-03 L6_l1linf:6.7444e-03 L7_l1linf:6.9580e-03 L8_l1linf:6.7139e-03 L9_l1linf:6.8054e-03 L10_l1linf:6.6833e-03 L11_l1linf:6.4697e-03 L12_l1linf:6.8054e-03 L1_spectral:6.2723e-04 L2_spectral:6.3899e-04 L3_spectral:6.3377e-04 L4_spectral:6.4002e-04 L5_spectral:6.2633e-04 L6_spectral:6.2854e-04 L7_spectral:6.3039e-04 L8_spectral:5.8091e-04 L9_spectral:6.2322e-04 L10_spectral:6.1818e-04 L11_spectral:6.1016e-04 L12_spectral:5.7114e-04 train_time:356835ms step_avg:42.48ms +[2025-09-11 14:40:47] [Rank 0] step:8401/10000 train_time:359066ms step_avg:42.74ms +[2025-09-11 14:40:47] [Rank 0] step:8401/10000 train_time:359066ms step_avg:42.74ms +[2025-09-11 14:40:48] [Rank 0] step:8421/10000 train_time:360060ms step_avg:42.76ms +[2025-09-11 14:40:48] [Rank 0] step:8421/10000 train_time:360060ms step_avg:42.76ms +[2025-09-11 14:40:49] [Rank 0] step:8441/10000 train_time:360771ms step_avg:42.74ms +[2025-09-11 14:40:49] [Rank 0] step:8441/10000 train_time:360771ms step_avg:42.74ms +[2025-09-11 14:40:50] [Rank 0] step:8461/10000 train_time:361484ms step_avg:42.72ms +[2025-09-11 14:40:50] [Rank 0] step:8461/10000 train_time:361484ms step_avg:42.72ms +[2025-09-11 14:40:50] [Rank 0] step:8481/10000 train_time:362195ms step_avg:42.71ms +[2025-09-11 14:40:50] [Rank 0] step:8481/10000 train_time:362195ms step_avg:42.71ms +[2025-09-11 14:40:51] [Rank 0] step:8501/10000 train_time:362904ms step_avg:42.69ms +[2025-09-11 14:40:51] [Rank 0] step:8501/10000 train_time:362904ms step_avg:42.69ms +[2025-09-11 14:40:52] [Rank 0] step:8521/10000 train_time:363614ms step_avg:42.67ms +[2025-09-11 14:40:52] [Rank 0] step:8521/10000 train_time:363614ms step_avg:42.67ms +[2025-09-11 14:40:53] [Rank 0] step:8541/10000 train_time:364325ms step_avg:42.66ms +[2025-09-11 14:40:53] [Rank 0] step:8541/10000 train_time:364325ms step_avg:42.66ms +[2025-09-11 14:40:53] [Rank 0] step:8561/10000 train_time:365039ms step_avg:42.64ms +[2025-09-11 14:40:53] [Rank 0] step:8561/10000 train_time:365039ms step_avg:42.64ms +[2025-09-11 14:40:54] [Rank 0] step:8581/10000 train_time:365753ms step_avg:42.62ms +[2025-09-11 14:40:54] [Rank 0] step:8581/10000 train_time:365753ms step_avg:42.62ms +[2025-09-11 14:40:55] [Rank 0] step:8601/10000 train_time:366465ms step_avg:42.61ms +[2025-09-11 14:40:55] [Rank 0] step:8601/10000 train_time:366465ms step_avg:42.61ms +[2025-09-11 14:40:55] [Rank 0] step:8621/10000 train_time:367174ms step_avg:42.59ms +[2025-09-11 14:40:55] [Rank 0] step:8621/10000 train_time:367174ms step_avg:42.59ms +[2025-09-11 14:40:56] [Rank 0] step:8641/10000 train_time:367883ms step_avg:42.57ms +[2025-09-11 14:40:56] [Rank 0] step:8641/10000 train_time:367883ms step_avg:42.57ms +[2025-09-11 14:40:57] [Rank 0] step:8661/10000 train_time:368594ms step_avg:42.56ms +[2025-09-11 14:40:57] [Rank 0] step:8661/10000 train_time:368594ms step_avg:42.56ms +[2025-09-11 14:40:58] [Rank 0] step:8681/10000 train_time:369306ms step_avg:42.54ms +[2025-09-11 14:40:58] [Rank 0] step:8681/10000 train_time:369306ms step_avg:42.54ms +[2025-09-11 14:40:58] [Rank 0] step:8701/10000 train_time:370015ms step_avg:42.53ms +[2025-09-11 14:40:58] [Rank 0] step:8701/10000 train_time:370015ms step_avg:42.53ms +[2025-09-11 14:40:59] [Rank 0] step:8721/10000 train_time:370727ms step_avg:42.51ms +[2025-09-11 14:40:59] [Rank 0] step:8721/10000 train_time:370727ms step_avg:42.51ms +[2025-09-11 14:41:00] [Rank 0] step:8741/10000 train_time:371432ms step_avg:42.49ms +[2025-09-11 14:41:00] [Rank 0] step:8741/10000 train_time:371432ms step_avg:42.49ms +[2025-09-11 14:41:00] [Rank 0] step:8761/10000 train_time:372145ms step_avg:42.48ms +[2025-09-11 14:41:00] [Rank 0] step:8761/10000 train_time:372145ms step_avg:42.48ms +[2025-09-11 14:41:01] [Rank 0] step:8781/10000 train_time:372852ms step_avg:42.46ms +[2025-09-11 14:41:01] [Rank 0] step:8781/10000 train_time:372852ms step_avg:42.46ms +[2025-09-11 14:41:02] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:41:02] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:41:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:41:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:41:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:41:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:41:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 14:41:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..796ad4eafb39473503848201ea3a187b7bd88e01 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "a5e4cd0c-bd9c-49da-8a35-06f1315dad2f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/training_log_a5e4cd0c-bd9c-49da-8a35-06f1315dad2f.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/training_log_a5e4cd0c-bd9c-49da-8a35-06f1315dad2f.txt new file mode 100644 index 0000000000000000000000000000000000000000..39b00c084fc2316ffca12e7ce5f4b65318a0dee7 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42/training_log_a5e4cd0c-bd9c-49da-8a35-06f1315dad2f.txt @@ -0,0 +1,4264 @@ +[2025-09-11 10:06:12] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:06:12 2025 --- +[2025-09-11 10:06:12] [Rank 0] PRINT: --- Script Start: Thu Sep 11 10:06:12 2025 --- +[2025-09-11 10:06:12] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:06:12] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 10:06:12] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:06:12] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 10:06:12] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:06:12] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 10:06:12] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42 +[2025-09-11 10:06:12] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_42 +[2025-09-11 10:06:12] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:06:12] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 10:06:12] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:06:12] [Rank 0] PRINT: Constructing model... +[2025-09-11 10:06:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:06:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 10:06:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:06:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 10:06:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:06:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 10:06:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:06:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 10:06:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:06:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 10:06:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:06:15] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 10:06:15] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:06:15] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 10:06:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:06:15] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 10:06:21] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:06:21] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 10:06:21] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:06:21] [Rank 0] PRINT: Starting warmup... +[2025-09-11 10:07:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:07:01] [Rank 0] PRINT: Warmup complete. +[2025-09-11 10:07:01] [Rank 0] PRINT: Starting training... +[2025-09-11 10:07:01] [Rank 0] PRINT: Starting training... +[2025-09-11 10:07:02] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.89ms +[2025-09-11 10:07:02] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.89ms +[2025-09-11 10:07:03] [Rank 0] step:41/10000 train_time:1859ms step_avg:45.34ms +[2025-09-11 10:07:03] [Rank 0] step:41/10000 train_time:1859ms step_avg:45.34ms +[2025-09-11 10:07:04] [Rank 0] step:61/10000 train_time:2585ms step_avg:42.37ms +[2025-09-11 10:07:04] [Rank 0] step:61/10000 train_time:2585ms step_avg:42.37ms +[2025-09-11 10:07:04] [Rank 0] step:81/10000 train_time:3311ms step_avg:40.88ms +[2025-09-11 10:07:04] [Rank 0] step:81/10000 train_time:3311ms step_avg:40.88ms +[2025-09-11 10:07:05] [Rank 0] step:101/10000 train_time:4037ms step_avg:39.97ms +[2025-09-11 10:07:05] [Rank 0] step:101/10000 train_time:4037ms step_avg:39.97ms +[2025-09-11 10:07:06] [Rank 0] step:121/10000 train_time:4763ms step_avg:39.36ms +[2025-09-11 10:07:06] [Rank 0] step:121/10000 train_time:4763ms step_avg:39.36ms +[2025-09-11 10:07:07] [Rank 0] step:141/10000 train_time:5487ms step_avg:38.92ms +[2025-09-11 10:07:07] [Rank 0] step:141/10000 train_time:5487ms step_avg:38.92ms +[2025-09-11 10:07:07] [Rank 0] step:161/10000 train_time:6211ms step_avg:38.58ms +[2025-09-11 10:07:07] [Rank 0] step:161/10000 train_time:6211ms step_avg:38.58ms +[2025-09-11 10:07:08] [Rank 0] step:181/10000 train_time:6936ms step_avg:38.32ms +[2025-09-11 10:07:08] [Rank 0] step:181/10000 train_time:6936ms step_avg:38.32ms +[2025-09-11 10:07:09] [Rank 0] step:201/10000 train_time:7661ms step_avg:38.11ms +[2025-09-11 10:07:09] [Rank 0] step:201/10000 train_time:7661ms step_avg:38.11ms +[2025-09-11 10:07:09] [Rank 0] step:221/10000 train_time:8386ms step_avg:37.95ms +[2025-09-11 10:07:09] [Rank 0] step:221/10000 train_time:8386ms step_avg:37.95ms +[2025-09-11 10:07:10] [Rank 0] step:241/10000 train_time:9111ms step_avg:37.81ms +[2025-09-11 10:07:10] [Rank 0] step:241/10000 train_time:9111ms step_avg:37.81ms +[2025-09-11 10:07:11] [Rank 0] step:261/10000 train_time:9836ms step_avg:37.68ms +[2025-09-11 10:07:11] [Rank 0] step:261/10000 train_time:9836ms step_avg:37.68ms +[2025-09-11 10:07:12] [Rank 0] step:281/10000 train_time:10560ms step_avg:37.58ms +[2025-09-11 10:07:12] [Rank 0] step:281/10000 train_time:10560ms step_avg:37.58ms +[2025-09-11 10:07:12] [Rank 0] step:301/10000 train_time:11285ms step_avg:37.49ms +[2025-09-11 10:07:12] [Rank 0] step:301/10000 train_time:11285ms step_avg:37.49ms +[2025-09-11 10:07:13] [Rank 0] step:321/10000 train_time:12011ms step_avg:37.42ms +[2025-09-11 10:07:13] [Rank 0] step:321/10000 train_time:12011ms step_avg:37.42ms +[2025-09-11 10:07:14] [Rank 0] step:341/10000 train_time:12744ms step_avg:37.37ms +[2025-09-11 10:07:14] [Rank 0] step:341/10000 train_time:12744ms step_avg:37.37ms +[2025-09-11 10:07:15] [Rank 0] step:361/10000 train_time:13470ms step_avg:37.31ms +[2025-09-11 10:07:15] [Rank 0] step:361/10000 train_time:13470ms step_avg:37.31ms +[2025-09-11 10:07:15] [Rank 0] step:381/10000 train_time:14195ms step_avg:37.26ms +[2025-09-11 10:07:15] [Rank 0] step:381/10000 train_time:14195ms step_avg:37.26ms +[2025-09-11 10:07:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:07:16] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 10:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:07:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 10:07:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:07:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:07:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:08:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:08:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:08:02] [Rank 0] PRINT: step:400/10000 val_loss:5.9610 total_sharp:4.3167e-04 L1_sharp:1.0133e-01 L2_sharp:8.3911e-02 L3_sharp:7.7931e-02 L4_sharp:7.7267e-02 L5_sharp:7.8593e-02 L6_sharp:8.3015e-02 L7_sharp:9.1168e-02 L8_sharp:9.0381e-02 L9_sharp:1.1328e-01 L10_sharp:1.6432e-01 L11_sharp:1.6644e-01 L12_sharp:4.1721e-01 total_fnorm:7.8517e+01 total_l1_linf:1.9284e+05 total_spectral:3.9263e+01 L1_fnorm:2.4230e-01 L2_fnorm:2.4223e-01 L3_fnorm:2.4155e-01 L4_fnorm:2.4055e-01 L5_fnorm:2.4078e-01 L6_fnorm:2.4110e-01 L7_fnorm:2.4225e-01 L8_fnorm:2.4176e-01 L9_fnorm:2.4193e-01 L10_fnorm:2.4119e-01 L11_fnorm:2.3967e-01 L12_fnorm:2.3605e-01 L1_l1linf:8.9335e-02 L2_l1linf:8.9602e-02 L3_l1linf:8.9718e-02 L4_l1linf:8.8646e-02 L5_l1linf:8.8534e-02 L6_l1linf:8.8088e-02 L7_l1linf:8.8122e-02 L8_l1linf:8.8204e-02 L9_l1linf:8.8766e-02 L10_l1linf:8.8114e-02 L11_l1linf:8.8629e-02 L12_l1linf:8.7778e-02 L1_spectral:2.4089e-03 L2_spectral:2.4098e-03 L3_spectral:2.4106e-03 L4_spectral:2.4100e-03 L5_spectral:2.4105e-03 L6_spectral:2.4106e-03 L7_spectral:2.4136e-03 L8_spectral:2.4112e-03 L9_spectral:2.4098e-03 L10_spectral:2.4097e-03 L11_spectral:2.4094e-03 L12_spectral:2.4090e-03 train_time:14900ms step_avg:37.25ms +[2025-09-11 10:08:02] [Rank 0] PRINT: step:400/10000 val_loss:5.9610 total_sharp:4.3167e-04 L1_sharp:1.0133e-01 L2_sharp:8.3911e-02 L3_sharp:7.7931e-02 L4_sharp:7.7267e-02 L5_sharp:7.8593e-02 L6_sharp:8.3015e-02 L7_sharp:9.1168e-02 L8_sharp:9.0381e-02 L9_sharp:1.1328e-01 L10_sharp:1.6432e-01 L11_sharp:1.6644e-01 L12_sharp:4.1721e-01 total_fnorm:7.8517e+01 total_l1_linf:1.9284e+05 total_spectral:3.9263e+01 L1_fnorm:2.4230e-01 L2_fnorm:2.4223e-01 L3_fnorm:2.4155e-01 L4_fnorm:2.4055e-01 L5_fnorm:2.4078e-01 L6_fnorm:2.4110e-01 L7_fnorm:2.4225e-01 L8_fnorm:2.4176e-01 L9_fnorm:2.4193e-01 L10_fnorm:2.4119e-01 L11_fnorm:2.3967e-01 L12_fnorm:2.3605e-01 L1_l1linf:8.9335e-02 L2_l1linf:8.9602e-02 L3_l1linf:8.9718e-02 L4_l1linf:8.8646e-02 L5_l1linf:8.8534e-02 L6_l1linf:8.8088e-02 L7_l1linf:8.8122e-02 L8_l1linf:8.8204e-02 L9_l1linf:8.8766e-02 L10_l1linf:8.8114e-02 L11_l1linf:8.8629e-02 L12_l1linf:8.7778e-02 L1_spectral:2.4089e-03 L2_spectral:2.4098e-03 L3_spectral:2.4106e-03 L4_spectral:2.4100e-03 L5_spectral:2.4105e-03 L6_spectral:2.4106e-03 L7_spectral:2.4136e-03 L8_spectral:2.4112e-03 L9_spectral:2.4098e-03 L10_spectral:2.4097e-03 L11_spectral:2.4094e-03 L12_spectral:2.4090e-03 train_time:14900ms step_avg:37.25ms +[2025-09-11 10:08:33] [Rank 0] step:401/10000 train_time:45468ms step_avg:113.39ms +[2025-09-11 10:08:33] [Rank 0] step:401/10000 train_time:45468ms step_avg:113.39ms +[2025-09-11 10:08:35] [Rank 0] step:421/10000 train_time:47685ms step_avg:113.27ms +[2025-09-11 10:08:35] [Rank 0] step:421/10000 train_time:47685ms step_avg:113.27ms +[2025-09-11 10:08:36] [Rank 0] step:441/10000 train_time:48323ms step_avg:109.58ms +[2025-09-11 10:08:36] [Rank 0] step:441/10000 train_time:48323ms step_avg:109.58ms +[2025-09-11 10:08:37] [Rank 0] step:461/10000 train_time:48963ms step_avg:106.21ms +[2025-09-11 10:08:37] [Rank 0] step:461/10000 train_time:48963ms step_avg:106.21ms +[2025-09-11 10:08:37] [Rank 0] step:481/10000 train_time:49600ms step_avg:103.12ms +[2025-09-11 10:08:37] [Rank 0] step:481/10000 train_time:49600ms step_avg:103.12ms +[2025-09-11 10:08:38] [Rank 0] step:501/10000 train_time:50239ms step_avg:100.28ms +[2025-09-11 10:08:38] [Rank 0] step:501/10000 train_time:50239ms step_avg:100.28ms +[2025-09-11 10:08:38] [Rank 0] step:521/10000 train_time:50878ms step_avg:97.65ms +[2025-09-11 10:08:38] [Rank 0] step:521/10000 train_time:50878ms step_avg:97.65ms +[2025-09-11 10:08:39] [Rank 0] step:541/10000 train_time:51516ms step_avg:95.22ms +[2025-09-11 10:08:39] [Rank 0] step:541/10000 train_time:51516ms step_avg:95.22ms +[2025-09-11 10:08:40] [Rank 0] step:561/10000 train_time:52153ms step_avg:92.96ms +[2025-09-11 10:08:40] [Rank 0] step:561/10000 train_time:52153ms step_avg:92.96ms +[2025-09-11 10:08:40] [Rank 0] step:581/10000 train_time:52789ms step_avg:90.86ms +[2025-09-11 10:08:40] [Rank 0] step:581/10000 train_time:52789ms step_avg:90.86ms +[2025-09-11 10:08:41] [Rank 0] step:601/10000 train_time:53426ms step_avg:88.90ms +[2025-09-11 10:08:41] [Rank 0] step:601/10000 train_time:53426ms step_avg:88.90ms +[2025-09-11 10:08:42] [Rank 0] step:621/10000 train_time:54064ms step_avg:87.06ms +[2025-09-11 10:08:42] [Rank 0] step:621/10000 train_time:54064ms step_avg:87.06ms +[2025-09-11 10:08:42] [Rank 0] step:641/10000 train_time:54701ms step_avg:85.34ms +[2025-09-11 10:08:42] [Rank 0] step:641/10000 train_time:54701ms step_avg:85.34ms +[2025-09-11 10:08:43] [Rank 0] step:661/10000 train_time:55338ms step_avg:83.72ms +[2025-09-11 10:08:43] [Rank 0] step:661/10000 train_time:55338ms step_avg:83.72ms +[2025-09-11 10:08:44] [Rank 0] step:681/10000 train_time:55976ms step_avg:82.20ms +[2025-09-11 10:08:44] [Rank 0] step:681/10000 train_time:55976ms step_avg:82.20ms +[2025-09-11 10:08:44] [Rank 0] step:701/10000 train_time:56615ms step_avg:80.76ms +[2025-09-11 10:08:44] [Rank 0] step:701/10000 train_time:56615ms step_avg:80.76ms +[2025-09-11 10:08:45] [Rank 0] step:721/10000 train_time:57251ms step_avg:79.40ms +[2025-09-11 10:08:45] [Rank 0] step:721/10000 train_time:57251ms step_avg:79.40ms +[2025-09-11 10:08:45] [Rank 0] step:741/10000 train_time:57888ms step_avg:78.12ms +[2025-09-11 10:08:45] [Rank 0] step:741/10000 train_time:57888ms step_avg:78.12ms +[2025-09-11 10:08:46] [Rank 0] step:761/10000 train_time:58531ms step_avg:76.91ms +[2025-09-11 10:08:46] [Rank 0] step:761/10000 train_time:58531ms step_avg:76.91ms +[2025-09-11 10:08:47] [Rank 0] step:781/10000 train_time:59174ms step_avg:75.77ms +[2025-09-11 10:08:47] [Rank 0] step:781/10000 train_time:59174ms step_avg:75.77ms +[2025-09-11 10:08:47] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:08:47] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 10:08:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:08:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:09:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 10:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 10:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:09:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:09:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:09:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 10:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:32] [Rank 0] PRINT: step:800/10000 val_loss:5.4984 total_sharp:3.9149e-04 L1_sharp:4.6223e-02 L2_sharp:4.0458e-02 L3_sharp:4.6456e-02 L4_sharp:5.1507e-02 L5_sharp:5.5997e-02 L6_sharp:6.2463e-02 L7_sharp:7.0093e-02 L8_sharp:9.2877e-02 L9_sharp:1.1858e-01 L10_sharp:3.2258e-01 L11_sharp:3.3938e-01 L12_sharp:4.0583e-01 total_fnorm:7.6500e+01 total_l1_linf:1.6384e+05 total_spectral:3.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.3145e-01 L1_l1linf:8.7402e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.5938e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.5938e-02 L6_l1linf:8.5938e-02 L7_l1linf:8.5938e-02 L8_l1linf:8.5938e-02 L9_l1linf:8.6426e-02 L10_l1linf:8.5449e-02 L11_l1linf:8.3496e-02 L12_l1linf:7.9590e-02 L1_spectral:3.1209e-03 L2_spectral:3.1243e-03 L3_spectral:3.1031e-03 L4_spectral:3.1185e-03 L5_spectral:3.1034e-03 L6_spectral:3.0911e-03 L7_spectral:3.0867e-03 L8_spectral:3.1066e-03 L9_spectral:3.0993e-03 L10_spectral:3.0852e-03 L11_spectral:3.0983e-03 L12_spectral:3.0586e-03 train_time:59799ms step_avg:74.75ms +[2025-09-11 10:09:32] [Rank 0] PRINT: step:800/10000 val_loss:5.4984 total_sharp:3.9149e-04 L1_sharp:4.6223e-02 L2_sharp:4.0458e-02 L3_sharp:4.6456e-02 L4_sharp:5.1507e-02 L5_sharp:5.5997e-02 L6_sharp:6.2463e-02 L7_sharp:7.0093e-02 L8_sharp:9.2877e-02 L9_sharp:1.1858e-01 L10_sharp:3.2258e-01 L11_sharp:3.3938e-01 L12_sharp:4.0583e-01 total_fnorm:7.6500e+01 total_l1_linf:1.6384e+05 total_spectral:3.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.3145e-01 L1_l1linf:8.7402e-02 L2_l1linf:8.6914e-02 L3_l1linf:8.5938e-02 L4_l1linf:8.6426e-02 L5_l1linf:8.5938e-02 L6_l1linf:8.5938e-02 L7_l1linf:8.5938e-02 L8_l1linf:8.5938e-02 L9_l1linf:8.6426e-02 L10_l1linf:8.5449e-02 L11_l1linf:8.3496e-02 L12_l1linf:7.9590e-02 L1_spectral:3.1209e-03 L2_spectral:3.1243e-03 L3_spectral:3.1031e-03 L4_spectral:3.1185e-03 L5_spectral:3.1034e-03 L6_spectral:3.0911e-03 L7_spectral:3.0867e-03 L8_spectral:3.1066e-03 L9_spectral:3.0993e-03 L10_spectral:3.0852e-03 L11_spectral:3.0983e-03 L12_spectral:3.0586e-03 train_time:59799ms step_avg:74.75ms +[2025-09-11 10:09:33] [Rank 0] step:801/10000 train_time:61383ms step_avg:76.63ms +[2025-09-11 10:09:33] [Rank 0] step:801/10000 train_time:61383ms step_avg:76.63ms +[2025-09-11 10:09:34] [Rank 0] step:821/10000 train_time:62031ms step_avg:75.55ms +[2025-09-11 10:09:34] [Rank 0] step:821/10000 train_time:62031ms step_avg:75.55ms +[2025-09-11 10:09:34] [Rank 0] step:841/10000 train_time:62676ms step_avg:74.53ms +[2025-09-11 10:09:34] [Rank 0] step:841/10000 train_time:62676ms step_avg:74.53ms +[2025-09-11 10:09:35] [Rank 0] step:861/10000 train_time:63323ms step_avg:73.55ms +[2025-09-11 10:09:35] [Rank 0] step:861/10000 train_time:63323ms step_avg:73.55ms +[2025-09-11 10:09:36] [Rank 0] step:881/10000 train_time:63968ms step_avg:72.61ms +[2025-09-11 10:09:36] [Rank 0] step:881/10000 train_time:63968ms step_avg:72.61ms +[2025-09-11 10:09:36] [Rank 0] step:901/10000 train_time:64611ms step_avg:71.71ms +[2025-09-11 10:09:36] [Rank 0] step:901/10000 train_time:64611ms step_avg:71.71ms +[2025-09-11 10:09:37] [Rank 0] step:921/10000 train_time:65255ms step_avg:70.85ms +[2025-09-11 10:09:37] [Rank 0] step:921/10000 train_time:65255ms step_avg:70.85ms +[2025-09-11 10:09:38] [Rank 0] step:941/10000 train_time:65899ms step_avg:70.03ms +[2025-09-11 10:09:38] [Rank 0] step:941/10000 train_time:65899ms step_avg:70.03ms +[2025-09-11 10:09:38] [Rank 0] step:961/10000 train_time:66543ms step_avg:69.24ms +[2025-09-11 10:09:38] [Rank 0] step:961/10000 train_time:66543ms step_avg:69.24ms +[2025-09-11 10:09:39] [Rank 0] step:981/10000 train_time:67187ms step_avg:68.49ms +[2025-09-11 10:09:39] [Rank 0] step:981/10000 train_time:67187ms step_avg:68.49ms +[2025-09-11 10:09:40] [Rank 0] step:1001/10000 train_time:67830ms step_avg:67.76ms +[2025-09-11 10:09:40] [Rank 0] step:1001/10000 train_time:67830ms step_avg:67.76ms +[2025-09-11 10:09:40] [Rank 0] step:1021/10000 train_time:68474ms step_avg:67.07ms +[2025-09-11 10:09:40] [Rank 0] step:1021/10000 train_time:68474ms step_avg:67.07ms +[2025-09-11 10:09:41] [Rank 0] step:1041/10000 train_time:69119ms step_avg:66.40ms +[2025-09-11 10:09:41] [Rank 0] step:1041/10000 train_time:69119ms step_avg:66.40ms +[2025-09-11 10:09:42] [Rank 0] step:1061/10000 train_time:69763ms step_avg:65.75ms +[2025-09-11 10:09:42] [Rank 0] step:1061/10000 train_time:69763ms step_avg:65.75ms +[2025-09-11 10:09:42] [Rank 0] step:1081/10000 train_time:70407ms step_avg:65.13ms +[2025-09-11 10:09:42] [Rank 0] step:1081/10000 train_time:70407ms step_avg:65.13ms +[2025-09-11 10:09:43] [Rank 0] step:1101/10000 train_time:71050ms step_avg:64.53ms +[2025-09-11 10:09:43] [Rank 0] step:1101/10000 train_time:71050ms step_avg:64.53ms +[2025-09-11 10:09:43] [Rank 0] step:1121/10000 train_time:71694ms step_avg:63.96ms +[2025-09-11 10:09:43] [Rank 0] step:1121/10000 train_time:71694ms step_avg:63.96ms +[2025-09-11 10:09:44] [Rank 0] step:1141/10000 train_time:72338ms step_avg:63.40ms +[2025-09-11 10:09:44] [Rank 0] step:1141/10000 train_time:72338ms step_avg:63.40ms +[2025-09-11 10:09:45] [Rank 0] step:1161/10000 train_time:72981ms step_avg:62.86ms +[2025-09-11 10:09:45] [Rank 0] step:1161/10000 train_time:72981ms step_avg:62.86ms +[2025-09-11 10:09:45] [Rank 0] step:1181/10000 train_time:73625ms step_avg:62.34ms +[2025-09-11 10:09:45] [Rank 0] step:1181/10000 train_time:73625ms step_avg:62.34ms +[2025-09-11 10:09:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:09:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 10:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 10:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:09:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:09:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 10:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:09:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.2247 total_sharp:2.8561e-04 L1_sharp:3.3969e-02 L2_sharp:2.6455e-02 L3_sharp:2.5907e-02 L4_sharp:3.3055e-02 L5_sharp:3.9804e-02 L6_sharp:3.9986e-02 L7_sharp:4.8116e-02 L8_sharp:6.1173e-02 L9_sharp:7.0992e-02 L10_sharp:1.1000e-01 L11_sharp:1.3903e-01 L12_sharp:6.6574e-01 total_fnorm:7.9000e+01 total_l1_linf:1.6282e+05 total_spectral:3.9500e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.9102e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.7637e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.7148e-02 L6_l1linf:7.7148e-02 L7_l1linf:7.7637e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.9590e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.2031e-02 L1_spectral:3.1343e-03 L2_spectral:3.1172e-03 L3_spectral:3.1246e-03 L4_spectral:3.1284e-03 L5_spectral:3.1365e-03 L6_spectral:3.1421e-03 L7_spectral:3.1234e-03 L8_spectral:3.1260e-03 L9_spectral:3.1134e-03 L10_spectral:3.1244e-03 L11_spectral:3.1118e-03 L12_spectral:3.0880e-03 train_time:74251ms step_avg:61.88ms +[2025-09-11 10:09:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.2247 total_sharp:2.8561e-04 L1_sharp:3.3969e-02 L2_sharp:2.6455e-02 L3_sharp:2.5907e-02 L4_sharp:3.3055e-02 L5_sharp:3.9804e-02 L6_sharp:3.9986e-02 L7_sharp:4.8116e-02 L8_sharp:6.1173e-02 L9_sharp:7.0992e-02 L10_sharp:1.1000e-01 L11_sharp:1.3903e-01 L12_sharp:6.6574e-01 total_fnorm:7.9000e+01 total_l1_linf:1.6282e+05 total_spectral:3.9500e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.9102e-02 L2_l1linf:7.8613e-02 L3_l1linf:7.7637e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.7148e-02 L6_l1linf:7.7148e-02 L7_l1linf:7.7637e-02 L8_l1linf:7.8125e-02 L9_l1linf:7.8125e-02 L10_l1linf:7.9590e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.2031e-02 L1_spectral:3.1343e-03 L2_spectral:3.1172e-03 L3_spectral:3.1246e-03 L4_spectral:3.1284e-03 L5_spectral:3.1365e-03 L6_spectral:3.1421e-03 L7_spectral:3.1234e-03 L8_spectral:3.1260e-03 L9_spectral:3.1134e-03 L10_spectral:3.1244e-03 L11_spectral:3.1118e-03 L12_spectral:3.0880e-03 train_time:74251ms step_avg:61.88ms +[2025-09-11 10:09:57] [Rank 0] step:1201/10000 train_time:75499ms step_avg:62.86ms +[2025-09-11 10:09:57] [Rank 0] step:1201/10000 train_time:75499ms step_avg:62.86ms +[2025-09-11 10:09:58] [Rank 0] step:1221/10000 train_time:76146ms step_avg:62.36ms +[2025-09-11 10:09:58] [Rank 0] step:1221/10000 train_time:76146ms step_avg:62.36ms +[2025-09-11 10:09:59] [Rank 0] step:1241/10000 train_time:76790ms step_avg:61.88ms +[2025-09-11 10:09:59] [Rank 0] step:1241/10000 train_time:76790ms step_avg:61.88ms +[2025-09-11 10:09:59] [Rank 0] step:1261/10000 train_time:77433ms step_avg:61.41ms +[2025-09-11 10:09:59] [Rank 0] step:1261/10000 train_time:77433ms step_avg:61.41ms +[2025-09-11 10:10:00] [Rank 0] step:1281/10000 train_time:78077ms step_avg:60.95ms +[2025-09-11 10:10:00] [Rank 0] step:1281/10000 train_time:78077ms step_avg:60.95ms +[2025-09-11 10:10:01] [Rank 0] step:1301/10000 train_time:78720ms step_avg:60.51ms +[2025-09-11 10:10:01] [Rank 0] step:1301/10000 train_time:78720ms step_avg:60.51ms +[2025-09-11 10:10:01] [Rank 0] step:1321/10000 train_time:79427ms step_avg:60.13ms +[2025-09-11 10:10:01] [Rank 0] step:1321/10000 train_time:79427ms step_avg:60.13ms +[2025-09-11 10:10:02] [Rank 0] step:1341/10000 train_time:80071ms step_avg:59.71ms +[2025-09-11 10:10:02] [Rank 0] step:1341/10000 train_time:80071ms step_avg:59.71ms +[2025-09-11 10:10:03] [Rank 0] step:1361/10000 train_time:80759ms step_avg:59.34ms +[2025-09-11 10:10:03] [Rank 0] step:1361/10000 train_time:80759ms step_avg:59.34ms +[2025-09-11 10:10:03] [Rank 0] step:1381/10000 train_time:81402ms step_avg:58.94ms +[2025-09-11 10:10:03] [Rank 0] step:1381/10000 train_time:81402ms step_avg:58.94ms +[2025-09-11 10:10:04] [Rank 0] step:1401/10000 train_time:82046ms step_avg:58.56ms +[2025-09-11 10:10:04] [Rank 0] step:1401/10000 train_time:82046ms step_avg:58.56ms +[2025-09-11 10:10:05] [Rank 0] step:1421/10000 train_time:82689ms step_avg:58.19ms +[2025-09-11 10:10:05] [Rank 0] step:1421/10000 train_time:82689ms step_avg:58.19ms +[2025-09-11 10:10:05] [Rank 0] step:1441/10000 train_time:83332ms step_avg:57.83ms +[2025-09-11 10:10:05] [Rank 0] step:1441/10000 train_time:83332ms step_avg:57.83ms +[2025-09-11 10:10:06] [Rank 0] step:1461/10000 train_time:83975ms step_avg:57.48ms +[2025-09-11 10:10:06] [Rank 0] step:1461/10000 train_time:83975ms step_avg:57.48ms +[2025-09-11 10:10:06] [Rank 0] step:1481/10000 train_time:84618ms step_avg:57.14ms +[2025-09-11 10:10:06] [Rank 0] step:1481/10000 train_time:84618ms step_avg:57.14ms +[2025-09-11 10:10:07] [Rank 0] step:1501/10000 train_time:85271ms step_avg:56.81ms +[2025-09-11 10:10:07] [Rank 0] step:1501/10000 train_time:85271ms step_avg:56.81ms +[2025-09-11 10:10:08] [Rank 0] step:1521/10000 train_time:85918ms step_avg:56.49ms +[2025-09-11 10:10:08] [Rank 0] step:1521/10000 train_time:85918ms step_avg:56.49ms +[2025-09-11 10:10:08] [Rank 0] step:1541/10000 train_time:86565ms step_avg:56.17ms +[2025-09-11 10:10:08] [Rank 0] step:1541/10000 train_time:86565ms step_avg:56.17ms +[2025-09-11 10:10:09] [Rank 0] step:1561/10000 train_time:87212ms step_avg:55.87ms +[2025-09-11 10:10:09] [Rank 0] step:1561/10000 train_time:87212ms step_avg:55.87ms +[2025-09-11 10:10:10] [Rank 0] step:1581/10000 train_time:87860ms step_avg:55.57ms +[2025-09-11 10:10:10] [Rank 0] step:1581/10000 train_time:87860ms step_avg:55.57ms +[2025-09-11 10:10:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:10:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 10:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:10:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:10:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.0575 total_sharp:2.2485e-04 L1_sharp:1.0459e-02 L2_sharp:9.9243e-03 L3_sharp:1.4689e-02 L4_sharp:1.2384e-02 L5_sharp:2.3560e-02 L6_sharp:2.6215e-02 L7_sharp:3.5537e-02 L8_sharp:5.1318e-02 L9_sharp:5.7937e-02 L10_sharp:8.2156e-02 L11_sharp:1.2897e-01 L12_sharp:3.8809e-01 total_fnorm:7.6500e+01 total_l1_linf:1.4848e+05 total_spectral:3.8250e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.3730e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.3242e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.2754e-02 L9_l1linf:7.3730e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.6172e-02 L12_l1linf:7.9590e-02 L1_spectral:3.1555e-03 L2_spectral:3.1307e-03 L3_spectral:3.1252e-03 L4_spectral:3.1452e-03 L5_spectral:3.1459e-03 L6_spectral:3.1235e-03 L7_spectral:3.1733e-03 L8_spectral:3.1602e-03 L9_spectral:3.1578e-03 L10_spectral:3.1654e-03 L11_spectral:3.1626e-03 L12_spectral:3.1379e-03 train_time:88489ms step_avg:55.31ms +[2025-09-11 10:10:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.0575 total_sharp:2.2485e-04 L1_sharp:1.0459e-02 L2_sharp:9.9243e-03 L3_sharp:1.4689e-02 L4_sharp:1.2384e-02 L5_sharp:2.3560e-02 L6_sharp:2.6215e-02 L7_sharp:3.5537e-02 L8_sharp:5.1318e-02 L9_sharp:5.7937e-02 L10_sharp:8.2156e-02 L11_sharp:1.2897e-01 L12_sharp:3.8809e-01 total_fnorm:7.6500e+01 total_l1_linf:1.4848e+05 total_spectral:3.8250e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.5684e-02 L2_l1linf:7.3730e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.3242e-02 L5_l1linf:7.3730e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.3730e-02 L8_l1linf:7.2754e-02 L9_l1linf:7.3730e-02 L10_l1linf:7.5195e-02 L11_l1linf:7.6172e-02 L12_l1linf:7.9590e-02 L1_spectral:3.1555e-03 L2_spectral:3.1307e-03 L3_spectral:3.1252e-03 L4_spectral:3.1452e-03 L5_spectral:3.1459e-03 L6_spectral:3.1235e-03 L7_spectral:3.1733e-03 L8_spectral:3.1602e-03 L9_spectral:3.1578e-03 L10_spectral:3.1654e-03 L11_spectral:3.1626e-03 L12_spectral:3.1379e-03 train_time:88489ms step_avg:55.31ms +[2025-09-11 10:10:22] [Rank 0] step:1601/10000 train_time:89839ms step_avg:56.11ms +[2025-09-11 10:10:22] [Rank 0] step:1601/10000 train_time:89839ms step_avg:56.11ms +[2025-09-11 10:10:22] [Rank 0] step:1621/10000 train_time:90517ms step_avg:55.84ms +[2025-09-11 10:10:22] [Rank 0] step:1621/10000 train_time:90517ms step_avg:55.84ms +[2025-09-11 10:10:23] [Rank 0] step:1641/10000 train_time:91165ms step_avg:55.55ms +[2025-09-11 10:10:23] [Rank 0] step:1641/10000 train_time:91165ms step_avg:55.55ms +[2025-09-11 10:10:24] [Rank 0] step:1661/10000 train_time:91814ms step_avg:55.28ms +[2025-09-11 10:10:24] [Rank 0] step:1661/10000 train_time:91814ms step_avg:55.28ms +[2025-09-11 10:10:24] [Rank 0] step:1681/10000 train_time:92462ms step_avg:55.00ms +[2025-09-11 10:10:24] [Rank 0] step:1681/10000 train_time:92462ms step_avg:55.00ms +[2025-09-11 10:10:25] [Rank 0] step:1701/10000 train_time:93112ms step_avg:54.74ms +[2025-09-11 10:10:25] [Rank 0] step:1701/10000 train_time:93112ms step_avg:54.74ms +[2025-09-11 10:10:26] [Rank 0] step:1721/10000 train_time:93759ms step_avg:54.48ms +[2025-09-11 10:10:26] [Rank 0] step:1721/10000 train_time:93759ms step_avg:54.48ms +[2025-09-11 10:10:26] [Rank 0] step:1741/10000 train_time:94406ms step_avg:54.23ms +[2025-09-11 10:10:26] [Rank 0] step:1741/10000 train_time:94406ms step_avg:54.23ms +[2025-09-11 10:10:27] [Rank 0] step:1761/10000 train_time:95054ms step_avg:53.98ms +[2025-09-11 10:10:27] [Rank 0] step:1761/10000 train_time:95054ms step_avg:53.98ms +[2025-09-11 10:10:28] [Rank 0] step:1781/10000 train_time:95700ms step_avg:53.73ms +[2025-09-11 10:10:28] [Rank 0] step:1781/10000 train_time:95700ms step_avg:53.73ms +[2025-09-11 10:10:28] [Rank 0] step:1801/10000 train_time:96348ms step_avg:53.50ms +[2025-09-11 10:10:28] [Rank 0] step:1801/10000 train_time:96348ms step_avg:53.50ms +[2025-09-11 10:10:29] [Rank 0] step:1821/10000 train_time:96996ms step_avg:53.27ms +[2025-09-11 10:10:29] [Rank 0] step:1821/10000 train_time:96996ms step_avg:53.27ms +[2025-09-11 10:10:30] [Rank 0] step:1841/10000 train_time:97644ms step_avg:53.04ms +[2025-09-11 10:10:30] [Rank 0] step:1841/10000 train_time:97644ms step_avg:53.04ms +[2025-09-11 10:10:30] [Rank 0] step:1861/10000 train_time:98291ms step_avg:52.82ms +[2025-09-11 10:10:30] [Rank 0] step:1861/10000 train_time:98291ms step_avg:52.82ms +[2025-09-11 10:10:31] [Rank 0] step:1881/10000 train_time:98939ms step_avg:52.60ms +[2025-09-11 10:10:31] [Rank 0] step:1881/10000 train_time:98939ms step_avg:52.60ms +[2025-09-11 10:10:32] [Rank 0] step:1901/10000 train_time:99586ms step_avg:52.39ms +[2025-09-11 10:10:32] [Rank 0] step:1901/10000 train_time:99586ms step_avg:52.39ms +[2025-09-11 10:10:32] [Rank 0] step:1921/10000 train_time:100234ms step_avg:52.18ms +[2025-09-11 10:10:32] [Rank 0] step:1921/10000 train_time:100234ms step_avg:52.18ms +[2025-09-11 10:10:33] [Rank 0] step:1941/10000 train_time:100882ms step_avg:51.97ms +[2025-09-11 10:10:33] [Rank 0] step:1941/10000 train_time:100882ms step_avg:51.97ms +[2025-09-11 10:10:33] [Rank 0] step:1961/10000 train_time:101529ms step_avg:51.77ms +[2025-09-11 10:10:33] [Rank 0] step:1961/10000 train_time:101529ms step_avg:51.77ms +[2025-09-11 10:10:34] [Rank 0] step:1981/10000 train_time:102176ms step_avg:51.58ms +[2025-09-11 10:10:34] [Rank 0] step:1981/10000 train_time:102176ms step_avg:51.58ms +[2025-09-11 10:10:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:10:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 10:10:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:10:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:10:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 10:10:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:10:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 10:10:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:10:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:10:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 10:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:10:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8871 total_sharp:2.0809e-04 L1_sharp:1.2535e-02 L2_sharp:7.5996e-03 L3_sharp:9.9602e-03 L4_sharp:1.2128e-02 L5_sharp:2.4047e-02 L6_sharp:2.6188e-02 L7_sharp:3.7923e-02 L8_sharp:5.6385e-02 L9_sharp:5.1954e-02 L10_sharp:8.5284e-02 L11_sharp:1.3606e-01 L12_sharp:1.4299e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5258e+05 total_spectral:3.8000e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.1289e-02 L6_l1linf:7.1777e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.7148e-02 L1_spectral:3.1632e-03 L2_spectral:3.1451e-03 L3_spectral:3.1448e-03 L4_spectral:3.1407e-03 L5_spectral:3.1435e-03 L6_spectral:3.1352e-03 L7_spectral:3.1513e-03 L8_spectral:3.2056e-03 L9_spectral:3.1622e-03 L10_spectral:3.1702e-03 L11_spectral:3.1610e-03 L12_spectral:3.1636e-03 train_time:102806ms step_avg:51.40ms +[2025-09-11 10:10:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8871 total_sharp:2.0809e-04 L1_sharp:1.2535e-02 L2_sharp:7.5996e-03 L3_sharp:9.9602e-03 L4_sharp:1.2128e-02 L5_sharp:2.4047e-02 L6_sharp:2.6188e-02 L7_sharp:3.7923e-02 L8_sharp:5.6385e-02 L9_sharp:5.1954e-02 L10_sharp:8.5284e-02 L11_sharp:1.3606e-01 L12_sharp:1.4299e+00 total_fnorm:7.6000e+01 total_l1_linf:1.5258e+05 total_spectral:3.8000e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.2754e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.1289e-02 L6_l1linf:7.1777e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.2754e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.7148e-02 L1_spectral:3.1632e-03 L2_spectral:3.1451e-03 L3_spectral:3.1448e-03 L4_spectral:3.1407e-03 L5_spectral:3.1435e-03 L6_spectral:3.1352e-03 L7_spectral:3.1513e-03 L8_spectral:3.2056e-03 L9_spectral:3.1622e-03 L10_spectral:3.1702e-03 L11_spectral:3.1610e-03 L12_spectral:3.1636e-03 train_time:102806ms step_avg:51.40ms +[2025-09-11 10:10:46] [Rank 0] step:2001/10000 train_time:104065ms step_avg:52.01ms +[2025-09-11 10:10:46] [Rank 0] step:2001/10000 train_time:104065ms step_avg:52.01ms +[2025-09-11 10:10:47] [Rank 0] step:2021/10000 train_time:104717ms step_avg:51.81ms +[2025-09-11 10:10:47] [Rank 0] step:2021/10000 train_time:104717ms step_avg:51.81ms +[2025-09-11 10:10:47] [Rank 0] step:2041/10000 train_time:105365ms step_avg:51.62ms +[2025-09-11 10:10:47] [Rank 0] step:2041/10000 train_time:105365ms step_avg:51.62ms +[2025-09-11 10:10:48] [Rank 0] step:2061/10000 train_time:106013ms step_avg:51.44ms +[2025-09-11 10:10:48] [Rank 0] step:2061/10000 train_time:106013ms step_avg:51.44ms +[2025-09-11 10:10:48] [Rank 0] step:2081/10000 train_time:106661ms step_avg:51.25ms +[2025-09-11 10:10:48] [Rank 0] step:2081/10000 train_time:106661ms step_avg:51.25ms +[2025-09-11 10:10:49] [Rank 0] step:2101/10000 train_time:107308ms step_avg:51.07ms +[2025-09-11 10:10:49] [Rank 0] step:2101/10000 train_time:107308ms step_avg:51.07ms +[2025-09-11 10:10:50] [Rank 0] step:2121/10000 train_time:107956ms step_avg:50.90ms +[2025-09-11 10:10:50] [Rank 0] step:2121/10000 train_time:107956ms step_avg:50.90ms +[2025-09-11 10:10:50] [Rank 0] step:2141/10000 train_time:108604ms step_avg:50.73ms +[2025-09-11 10:10:50] [Rank 0] step:2141/10000 train_time:108604ms step_avg:50.73ms +[2025-09-11 10:10:51] [Rank 0] step:2161/10000 train_time:109252ms step_avg:50.56ms +[2025-09-11 10:10:51] [Rank 0] step:2161/10000 train_time:109252ms step_avg:50.56ms +[2025-09-11 10:10:52] [Rank 0] step:2181/10000 train_time:109900ms step_avg:50.39ms +[2025-09-11 10:10:52] [Rank 0] step:2181/10000 train_time:109900ms step_avg:50.39ms +[2025-09-11 10:10:52] [Rank 0] step:2201/10000 train_time:110547ms step_avg:50.23ms +[2025-09-11 10:10:52] [Rank 0] step:2201/10000 train_time:110547ms step_avg:50.23ms +[2025-09-11 10:10:53] [Rank 0] step:2221/10000 train_time:111194ms step_avg:50.06ms +[2025-09-11 10:10:53] [Rank 0] step:2221/10000 train_time:111194ms step_avg:50.06ms +[2025-09-11 10:10:54] [Rank 0] step:2241/10000 train_time:111854ms step_avg:49.91ms +[2025-09-11 10:10:54] [Rank 0] step:2241/10000 train_time:111854ms step_avg:49.91ms +[2025-09-11 10:10:54] [Rank 0] step:2261/10000 train_time:112514ms step_avg:49.76ms +[2025-09-11 10:10:54] [Rank 0] step:2261/10000 train_time:112514ms step_avg:49.76ms +[2025-09-11 10:10:55] [Rank 0] step:2281/10000 train_time:113455ms step_avg:49.74ms +[2025-09-11 10:10:55] [Rank 0] step:2281/10000 train_time:113455ms step_avg:49.74ms +[2025-09-11 10:10:56] [Rank 0] step:2301/10000 train_time:114119ms step_avg:49.60ms +[2025-09-11 10:10:56] [Rank 0] step:2301/10000 train_time:114119ms step_avg:49.60ms +[2025-09-11 10:10:57] [Rank 0] step:2321/10000 train_time:114780ms step_avg:49.45ms +[2025-09-11 10:10:57] [Rank 0] step:2321/10000 train_time:114780ms step_avg:49.45ms +[2025-09-11 10:10:58] [Rank 0] step:2341/10000 train_time:115743ms step_avg:49.44ms +[2025-09-11 10:10:58] [Rank 0] step:2341/10000 train_time:115743ms step_avg:49.44ms +[2025-09-11 10:10:58] [Rank 0] step:2361/10000 train_time:116404ms step_avg:49.30ms +[2025-09-11 10:10:58] [Rank 0] step:2361/10000 train_time:116404ms step_avg:49.30ms +[2025-09-11 10:10:59] [Rank 0] step:2381/10000 train_time:117064ms step_avg:49.17ms +[2025-09-11 10:10:59] [Rank 0] step:2381/10000 train_time:117064ms step_avg:49.17ms +[2025-09-11 10:11:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:11:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 10:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:11:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:11:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.7459 total_sharp:2.1023e-04 L1_sharp:7.8895e-03 L2_sharp:9.4177e-03 L3_sharp:7.4151e-03 L4_sharp:1.2842e-02 L5_sharp:1.9040e-02 L6_sharp:2.6252e-02 L7_sharp:3.9353e-02 L8_sharp:5.9863e-02 L9_sharp:5.3818e-02 L10_sharp:8.3749e-02 L11_sharp:1.2500e-01 L12_sharp:1.1094e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3926e+05 total_spectral:3.6000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.0801e-02 L2_l1linf:6.9824e-02 L3_l1linf:7.0312e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.9336e-02 L6_l1linf:6.9336e-02 L7_l1linf:7.0312e-02 L8_l1linf:6.9336e-02 L9_l1linf:6.8848e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.0801e-02 L12_l1linf:7.5195e-02 L1_spectral:3.1579e-03 L2_spectral:3.1524e-03 L3_spectral:3.1623e-03 L4_spectral:3.1297e-03 L5_spectral:3.1581e-03 L6_spectral:3.1620e-03 L7_spectral:3.1580e-03 L8_spectral:3.1740e-03 L9_spectral:3.1520e-03 L10_spectral:3.1538e-03 L11_spectral:3.1784e-03 L12_spectral:3.1770e-03 train_time:117706ms step_avg:49.04ms +[2025-09-11 10:11:09] [Rank 0] PRINT: step:2400/10000 val_loss:4.7459 total_sharp:2.1023e-04 L1_sharp:7.8895e-03 L2_sharp:9.4177e-03 L3_sharp:7.4151e-03 L4_sharp:1.2842e-02 L5_sharp:1.9040e-02 L6_sharp:2.6252e-02 L7_sharp:3.9353e-02 L8_sharp:5.9863e-02 L9_sharp:5.3818e-02 L10_sharp:8.3749e-02 L11_sharp:1.2500e-01 L12_sharp:1.1094e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3926e+05 total_spectral:3.6000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.0801e-02 L2_l1linf:6.9824e-02 L3_l1linf:7.0312e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.9336e-02 L6_l1linf:6.9336e-02 L7_l1linf:7.0312e-02 L8_l1linf:6.9336e-02 L9_l1linf:6.8848e-02 L10_l1linf:6.8848e-02 L11_l1linf:7.0801e-02 L12_l1linf:7.5195e-02 L1_spectral:3.1579e-03 L2_spectral:3.1524e-03 L3_spectral:3.1623e-03 L4_spectral:3.1297e-03 L5_spectral:3.1581e-03 L6_spectral:3.1620e-03 L7_spectral:3.1580e-03 L8_spectral:3.1740e-03 L9_spectral:3.1520e-03 L10_spectral:3.1538e-03 L11_spectral:3.1784e-03 L12_spectral:3.1770e-03 train_time:117706ms step_avg:49.04ms +[2025-09-11 10:11:15] [Rank 0] step:2401/10000 train_time:122772ms step_avg:51.13ms +[2025-09-11 10:11:15] [Rank 0] step:2401/10000 train_time:122772ms step_avg:51.13ms +[2025-09-11 10:11:15] [Rank 0] step:2421/10000 train_time:123464ms step_avg:51.00ms +[2025-09-11 10:11:15] [Rank 0] step:2421/10000 train_time:123464ms step_avg:51.00ms +[2025-09-11 10:11:16] [Rank 0] step:2441/10000 train_time:124127ms step_avg:50.85ms +[2025-09-11 10:11:16] [Rank 0] step:2441/10000 train_time:124127ms step_avg:50.85ms +[2025-09-11 10:11:17] [Rank 0] step:2461/10000 train_time:124789ms step_avg:50.71ms +[2025-09-11 10:11:17] [Rank 0] step:2461/10000 train_time:124789ms step_avg:50.71ms +[2025-09-11 10:11:17] [Rank 0] step:2481/10000 train_time:125452ms step_avg:50.57ms +[2025-09-11 10:11:17] [Rank 0] step:2481/10000 train_time:125452ms step_avg:50.57ms +[2025-09-11 10:11:18] [Rank 0] step:2501/10000 train_time:126113ms step_avg:50.43ms +[2025-09-11 10:11:18] [Rank 0] step:2501/10000 train_time:126113ms step_avg:50.43ms +[2025-09-11 10:11:19] [Rank 0] step:2521/10000 train_time:126775ms step_avg:50.29ms +[2025-09-11 10:11:19] [Rank 0] step:2521/10000 train_time:126775ms step_avg:50.29ms +[2025-09-11 10:11:19] [Rank 0] step:2541/10000 train_time:127437ms step_avg:50.15ms +[2025-09-11 10:11:19] [Rank 0] step:2541/10000 train_time:127437ms step_avg:50.15ms +[2025-09-11 10:11:20] [Rank 0] step:2561/10000 train_time:128098ms step_avg:50.02ms +[2025-09-11 10:11:20] [Rank 0] step:2561/10000 train_time:128098ms step_avg:50.02ms +[2025-09-11 10:11:21] [Rank 0] step:2581/10000 train_time:128761ms step_avg:49.89ms +[2025-09-11 10:11:21] [Rank 0] step:2581/10000 train_time:128761ms step_avg:49.89ms +[2025-09-11 10:11:21] [Rank 0] step:2601/10000 train_time:129422ms step_avg:49.76ms +[2025-09-11 10:11:21] [Rank 0] step:2601/10000 train_time:129422ms step_avg:49.76ms +[2025-09-11 10:11:22] [Rank 0] step:2621/10000 train_time:130084ms step_avg:49.63ms +[2025-09-11 10:11:22] [Rank 0] step:2621/10000 train_time:130084ms step_avg:49.63ms +[2025-09-11 10:11:23] [Rank 0] step:2641/10000 train_time:130746ms step_avg:49.51ms +[2025-09-11 10:11:23] [Rank 0] step:2641/10000 train_time:130746ms step_avg:49.51ms +[2025-09-11 10:11:23] [Rank 0] step:2661/10000 train_time:131408ms step_avg:49.38ms +[2025-09-11 10:11:23] [Rank 0] step:2661/10000 train_time:131408ms step_avg:49.38ms +[2025-09-11 10:11:24] [Rank 0] step:2681/10000 train_time:132071ms step_avg:49.26ms +[2025-09-11 10:11:24] [Rank 0] step:2681/10000 train_time:132071ms step_avg:49.26ms +[2025-09-11 10:11:25] [Rank 0] step:2701/10000 train_time:132733ms step_avg:49.14ms +[2025-09-11 10:11:25] [Rank 0] step:2701/10000 train_time:132733ms step_avg:49.14ms +[2025-09-11 10:11:25] [Rank 0] step:2721/10000 train_time:133398ms step_avg:49.03ms +[2025-09-11 10:11:25] [Rank 0] step:2721/10000 train_time:133398ms step_avg:49.03ms +[2025-09-11 10:11:26] [Rank 0] step:2741/10000 train_time:134066ms step_avg:48.91ms +[2025-09-11 10:11:26] [Rank 0] step:2741/10000 train_time:134066ms step_avg:48.91ms +[2025-09-11 10:11:27] [Rank 0] step:2761/10000 train_time:134728ms step_avg:48.80ms +[2025-09-11 10:11:27] [Rank 0] step:2761/10000 train_time:134728ms step_avg:48.80ms +[2025-09-11 10:11:27] [Rank 0] step:2781/10000 train_time:135390ms step_avg:48.68ms +[2025-09-11 10:11:27] [Rank 0] step:2781/10000 train_time:135390ms step_avg:48.68ms +[2025-09-11 10:11:28] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:11:28] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 10:11:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:11:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:11:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 10:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:11:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:11:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:11:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:11:38] [Rank 0] PRINT: step:2800/10000 val_loss:4.6645 total_sharp:1.9709e-04 L1_sharp:7.8161e-03 L2_sharp:8.9202e-03 L3_sharp:7.5059e-03 L4_sharp:1.1569e-02 L5_sharp:1.8861e-02 L6_sharp:2.6996e-02 L7_sharp:3.4350e-02 L8_sharp:5.5105e-02 L9_sharp:5.0959e-02 L10_sharp:7.1817e-02 L11_sharp:1.1459e-01 L12_sharp:6.7686e-01 total_fnorm:7.0000e+01 total_l1_linf:1.3517e+05 total_spectral:3.5000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.9336e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.8848e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.7871e-02 L8_l1linf:6.7871e-02 L9_l1linf:6.7383e-02 L10_l1linf:6.7383e-02 L11_l1linf:6.7871e-02 L12_l1linf:7.3242e-02 L1_spectral:3.1916e-03 L2_spectral:3.1610e-03 L3_spectral:3.1728e-03 L4_spectral:3.1588e-03 L5_spectral:3.1808e-03 L6_spectral:3.1445e-03 L7_spectral:3.1707e-03 L8_spectral:3.1944e-03 L9_spectral:3.1914e-03 L10_spectral:3.1720e-03 L11_spectral:3.1959e-03 L12_spectral:3.1634e-03 train_time:136037ms step_avg:48.58ms +[2025-09-11 10:11:38] [Rank 0] PRINT: step:2800/10000 val_loss:4.6645 total_sharp:1.9709e-04 L1_sharp:7.8161e-03 L2_sharp:8.9202e-03 L3_sharp:7.5059e-03 L4_sharp:1.1569e-02 L5_sharp:1.8861e-02 L6_sharp:2.6996e-02 L7_sharp:3.4350e-02 L8_sharp:5.5105e-02 L9_sharp:5.0959e-02 L10_sharp:7.1817e-02 L11_sharp:1.1459e-01 L12_sharp:6.7686e-01 total_fnorm:7.0000e+01 total_l1_linf:1.3517e+05 total_spectral:3.5000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.9336e-02 L2_l1linf:6.8848e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.8848e-02 L5_l1linf:6.8359e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.7871e-02 L8_l1linf:6.7871e-02 L9_l1linf:6.7383e-02 L10_l1linf:6.7383e-02 L11_l1linf:6.7871e-02 L12_l1linf:7.3242e-02 L1_spectral:3.1916e-03 L2_spectral:3.1610e-03 L3_spectral:3.1728e-03 L4_spectral:3.1588e-03 L5_spectral:3.1808e-03 L6_spectral:3.1445e-03 L7_spectral:3.1707e-03 L8_spectral:3.1944e-03 L9_spectral:3.1914e-03 L10_spectral:3.1720e-03 L11_spectral:3.1959e-03 L12_spectral:3.1634e-03 train_time:136037ms step_avg:48.58ms +[2025-09-11 10:11:40] [Rank 0] step:2801/10000 train_time:137350ms step_avg:49.04ms +[2025-09-11 10:11:40] [Rank 0] step:2801/10000 train_time:137350ms step_avg:49.04ms +[2025-09-11 10:11:40] [Rank 0] step:2821/10000 train_time:138017ms step_avg:48.92ms +[2025-09-11 10:11:40] [Rank 0] step:2821/10000 train_time:138017ms step_avg:48.92ms +[2025-09-11 10:11:41] [Rank 0] step:2841/10000 train_time:138680ms step_avg:48.81ms +[2025-09-11 10:11:41] [Rank 0] step:2841/10000 train_time:138680ms step_avg:48.81ms +[2025-09-11 10:11:42] [Rank 0] step:2861/10000 train_time:139344ms step_avg:48.70ms +[2025-09-11 10:11:42] [Rank 0] step:2861/10000 train_time:139344ms step_avg:48.70ms +[2025-09-11 10:11:42] [Rank 0] step:2881/10000 train_time:140007ms step_avg:48.60ms +[2025-09-11 10:11:42] [Rank 0] step:2881/10000 train_time:140007ms step_avg:48.60ms +[2025-09-11 10:11:43] [Rank 0] step:2901/10000 train_time:140669ms step_avg:48.49ms +[2025-09-11 10:11:43] [Rank 0] step:2901/10000 train_time:140669ms step_avg:48.49ms +[2025-09-11 10:11:44] [Rank 0] step:2921/10000 train_time:141331ms step_avg:48.38ms +[2025-09-11 10:11:44] [Rank 0] step:2921/10000 train_time:141331ms step_avg:48.38ms +[2025-09-11 10:11:44] [Rank 0] step:2941/10000 train_time:141997ms step_avg:48.28ms +[2025-09-11 10:11:44] [Rank 0] step:2941/10000 train_time:141997ms step_avg:48.28ms +[2025-09-11 10:11:45] [Rank 0] step:2961/10000 train_time:142660ms step_avg:48.18ms +[2025-09-11 10:11:45] [Rank 0] step:2961/10000 train_time:142660ms step_avg:48.18ms +[2025-09-11 10:11:46] [Rank 0] step:2981/10000 train_time:143324ms step_avg:48.08ms +[2025-09-11 10:11:46] [Rank 0] step:2981/10000 train_time:143324ms step_avg:48.08ms +[2025-09-11 10:11:46] [Rank 0] step:3001/10000 train_time:143989ms step_avg:47.98ms +[2025-09-11 10:11:46] [Rank 0] step:3001/10000 train_time:143989ms step_avg:47.98ms +[2025-09-11 10:11:47] [Rank 0] step:3021/10000 train_time:144654ms step_avg:47.88ms +[2025-09-11 10:11:47] [Rank 0] step:3021/10000 train_time:144654ms step_avg:47.88ms +[2025-09-11 10:11:48] [Rank 0] step:3041/10000 train_time:145322ms step_avg:47.79ms +[2025-09-11 10:11:48] [Rank 0] step:3041/10000 train_time:145322ms step_avg:47.79ms +[2025-09-11 10:11:48] [Rank 0] step:3061/10000 train_time:145987ms step_avg:47.69ms +[2025-09-11 10:11:48] [Rank 0] step:3061/10000 train_time:145987ms step_avg:47.69ms +[2025-09-11 10:11:49] [Rank 0] step:3081/10000 train_time:146652ms step_avg:47.60ms +[2025-09-11 10:11:49] [Rank 0] step:3081/10000 train_time:146652ms step_avg:47.60ms +[2025-09-11 10:11:50] [Rank 0] step:3101/10000 train_time:147317ms step_avg:47.51ms +[2025-09-11 10:11:50] [Rank 0] step:3101/10000 train_time:147317ms step_avg:47.51ms +[2025-09-11 10:11:50] [Rank 0] step:3121/10000 train_time:147982ms step_avg:47.41ms +[2025-09-11 10:11:50] [Rank 0] step:3121/10000 train_time:147982ms step_avg:47.41ms +[2025-09-11 10:11:51] [Rank 0] step:3141/10000 train_time:148647ms step_avg:47.32ms +[2025-09-11 10:11:51] [Rank 0] step:3141/10000 train_time:148647ms step_avg:47.32ms +[2025-09-11 10:11:52] [Rank 0] step:3161/10000 train_time:149311ms step_avg:47.24ms +[2025-09-11 10:11:52] [Rank 0] step:3161/10000 train_time:149311ms step_avg:47.24ms +[2025-09-11 10:11:52] [Rank 0] step:3181/10000 train_time:149975ms step_avg:47.15ms +[2025-09-11 10:11:52] [Rank 0] step:3181/10000 train_time:149975ms step_avg:47.15ms +[2025-09-11 10:11:53] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:11:53] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 10:11:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:11:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:11:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 10:11:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:11:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 10:11:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:11:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:12:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:12:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:12:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 10:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:03] [Rank 0] PRINT: step:3200/10000 val_loss:4.5819 total_sharp:1.2954e-04 L1_sharp:7.9807e-03 L2_sharp:6.1849e-03 L3_sharp:9.8772e-03 L4_sharp:8.8378e-03 L5_sharp:1.8673e-02 L6_sharp:2.8369e-02 L7_sharp:3.0165e-02 L8_sharp:4.8191e-02 L9_sharp:4.9928e-02 L10_sharp:6.2609e-02 L11_sharp:9.6458e-02 L12_sharp:4.6077e-01 total_fnorm:7.8000e+01 total_l1_linf:1.5770e+05 total_spectral:3.9000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.5918e-02 L9_l1linf:6.5430e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.8848e-02 L1_spectral:3.1924e-03 L2_spectral:3.1731e-03 L3_spectral:3.1585e-03 L4_spectral:3.1731e-03 L5_spectral:3.1711e-03 L6_spectral:3.1604e-03 L7_spectral:3.1846e-03 L8_spectral:3.1982e-03 L9_spectral:3.1904e-03 L10_spectral:3.1971e-03 L11_spectral:3.2125e-03 L12_spectral:3.2030e-03 train_time:150622ms step_avg:47.07ms +[2025-09-11 10:12:03] [Rank 0] PRINT: step:3200/10000 val_loss:4.5819 total_sharp:1.2954e-04 L1_sharp:7.9807e-03 L2_sharp:6.1849e-03 L3_sharp:9.8772e-03 L4_sharp:8.8378e-03 L5_sharp:1.8673e-02 L6_sharp:2.8369e-02 L7_sharp:3.0165e-02 L8_sharp:4.8191e-02 L9_sharp:4.9928e-02 L10_sharp:6.2609e-02 L11_sharp:9.6458e-02 L12_sharp:4.6077e-01 total_fnorm:7.8000e+01 total_l1_linf:1.5770e+05 total_spectral:3.9000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.7383e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.5918e-02 L9_l1linf:6.5430e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.8848e-02 L1_spectral:3.1924e-03 L2_spectral:3.1731e-03 L3_spectral:3.1585e-03 L4_spectral:3.1731e-03 L5_spectral:3.1711e-03 L6_spectral:3.1604e-03 L7_spectral:3.1846e-03 L8_spectral:3.1982e-03 L9_spectral:3.1904e-03 L10_spectral:3.1971e-03 L11_spectral:3.2125e-03 L12_spectral:3.2030e-03 train_time:150622ms step_avg:47.07ms +[2025-09-11 10:12:04] [Rank 0] step:3201/10000 train_time:151950ms step_avg:47.47ms +[2025-09-11 10:12:04] [Rank 0] step:3201/10000 train_time:151950ms step_avg:47.47ms +[2025-09-11 10:12:05] [Rank 0] step:3221/10000 train_time:152621ms step_avg:47.38ms +[2025-09-11 10:12:05] [Rank 0] step:3221/10000 train_time:152621ms step_avg:47.38ms +[2025-09-11 10:12:05] [Rank 0] step:3241/10000 train_time:153287ms step_avg:47.30ms +[2025-09-11 10:12:05] [Rank 0] step:3241/10000 train_time:153287ms step_avg:47.30ms +[2025-09-11 10:12:06] [Rank 0] step:3261/10000 train_time:153953ms step_avg:47.21ms +[2025-09-11 10:12:06] [Rank 0] step:3261/10000 train_time:153953ms step_avg:47.21ms +[2025-09-11 10:12:07] [Rank 0] step:3281/10000 train_time:154618ms step_avg:47.13ms +[2025-09-11 10:12:07] [Rank 0] step:3281/10000 train_time:154618ms step_avg:47.13ms +[2025-09-11 10:12:07] [Rank 0] step:3301/10000 train_time:155283ms step_avg:47.04ms +[2025-09-11 10:12:07] [Rank 0] step:3301/10000 train_time:155283ms step_avg:47.04ms +[2025-09-11 10:12:08] [Rank 0] step:3321/10000 train_time:155947ms step_avg:46.96ms +[2025-09-11 10:12:08] [Rank 0] step:3321/10000 train_time:155947ms step_avg:46.96ms +[2025-09-11 10:12:09] [Rank 0] step:3341/10000 train_time:156611ms step_avg:46.88ms +[2025-09-11 10:12:09] [Rank 0] step:3341/10000 train_time:156611ms step_avg:46.88ms +[2025-09-11 10:12:09] [Rank 0] step:3361/10000 train_time:157277ms step_avg:46.79ms +[2025-09-11 10:12:09] [Rank 0] step:3361/10000 train_time:157277ms step_avg:46.79ms +[2025-09-11 10:12:10] [Rank 0] step:3381/10000 train_time:157942ms step_avg:46.71ms +[2025-09-11 10:12:10] [Rank 0] step:3381/10000 train_time:157942ms step_avg:46.71ms +[2025-09-11 10:12:11] [Rank 0] step:3401/10000 train_time:158607ms step_avg:46.64ms +[2025-09-11 10:12:11] [Rank 0] step:3401/10000 train_time:158607ms step_avg:46.64ms +[2025-09-11 10:12:11] [Rank 0] step:3421/10000 train_time:159271ms step_avg:46.56ms +[2025-09-11 10:12:11] [Rank 0] step:3421/10000 train_time:159271ms step_avg:46.56ms +[2025-09-11 10:12:12] [Rank 0] step:3441/10000 train_time:159936ms step_avg:46.48ms +[2025-09-11 10:12:12] [Rank 0] step:3441/10000 train_time:159936ms step_avg:46.48ms +[2025-09-11 10:12:13] [Rank 0] step:3461/10000 train_time:160601ms step_avg:46.40ms +[2025-09-11 10:12:13] [Rank 0] step:3461/10000 train_time:160601ms step_avg:46.40ms +[2025-09-11 10:12:13] [Rank 0] step:3481/10000 train_time:161265ms step_avg:46.33ms +[2025-09-11 10:12:13] [Rank 0] step:3481/10000 train_time:161265ms step_avg:46.33ms +[2025-09-11 10:12:14] [Rank 0] step:3501/10000 train_time:161929ms step_avg:46.25ms +[2025-09-11 10:12:14] [Rank 0] step:3501/10000 train_time:161929ms step_avg:46.25ms +[2025-09-11 10:12:15] [Rank 0] step:3521/10000 train_time:162593ms step_avg:46.18ms +[2025-09-11 10:12:15] [Rank 0] step:3521/10000 train_time:162593ms step_avg:46.18ms +[2025-09-11 10:12:15] [Rank 0] step:3541/10000 train_time:163257ms step_avg:46.10ms +[2025-09-11 10:12:15] [Rank 0] step:3541/10000 train_time:163257ms step_avg:46.10ms +[2025-09-11 10:12:16] [Rank 0] step:3561/10000 train_time:163921ms step_avg:46.03ms +[2025-09-11 10:12:16] [Rank 0] step:3561/10000 train_time:163921ms step_avg:46.03ms +[2025-09-11 10:12:17] [Rank 0] step:3581/10000 train_time:164584ms step_avg:45.96ms +[2025-09-11 10:12:17] [Rank 0] step:3581/10000 train_time:164584ms step_avg:45.96ms +[2025-09-11 10:12:17] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:12:17] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 10:12:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:12:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:12:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:12:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:12:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:12:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 10:12:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:12:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 10:12:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:27] [Rank 0] PRINT: step:3600/10000 val_loss:4.5321 total_sharp:1.3936e-04 L1_sharp:3.4966e-03 L2_sharp:2.7251e-03 L3_sharp:5.4434e-03 L4_sharp:6.1998e-03 L5_sharp:1.3781e-02 L6_sharp:1.9597e-02 L7_sharp:2.5449e-02 L8_sharp:4.2334e-02 L9_sharp:4.3218e-02 L10_sharp:5.6158e-02 L11_sharp:8.3606e-02 L12_sharp:4.2029e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3619e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.6895e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.5918e-02 L4_l1linf:6.5918e-02 L5_l1linf:6.6406e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.4453e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.3477e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.2988e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1743e-03 L2_spectral:3.1672e-03 L3_spectral:3.1962e-03 L4_spectral:3.1619e-03 L5_spectral:3.1855e-03 L6_spectral:3.1652e-03 L7_spectral:3.1899e-03 L8_spectral:3.2116e-03 L9_spectral:3.2140e-03 L10_spectral:3.2105e-03 L11_spectral:3.2220e-03 L12_spectral:3.2120e-03 train_time:165230ms step_avg:45.90ms +[2025-09-11 10:12:27] [Rank 0] PRINT: step:3600/10000 val_loss:4.5321 total_sharp:1.3936e-04 L1_sharp:3.4966e-03 L2_sharp:2.7251e-03 L3_sharp:5.4434e-03 L4_sharp:6.1998e-03 L5_sharp:1.3781e-02 L6_sharp:1.9597e-02 L7_sharp:2.5449e-02 L8_sharp:4.2334e-02 L9_sharp:4.3218e-02 L10_sharp:5.6158e-02 L11_sharp:8.3606e-02 L12_sharp:4.2029e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3619e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.6895e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.5918e-02 L4_l1linf:6.5918e-02 L5_l1linf:6.6406e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.4453e-02 L8_l1linf:6.4941e-02 L9_l1linf:6.3477e-02 L10_l1linf:6.4941e-02 L11_l1linf:6.2988e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1743e-03 L2_spectral:3.1672e-03 L3_spectral:3.1962e-03 L4_spectral:3.1619e-03 L5_spectral:3.1855e-03 L6_spectral:3.1652e-03 L7_spectral:3.1899e-03 L8_spectral:3.2116e-03 L9_spectral:3.2140e-03 L10_spectral:3.2105e-03 L11_spectral:3.2220e-03 L12_spectral:3.2120e-03 train_time:165230ms step_avg:45.90ms +[2025-09-11 10:12:29] [Rank 0] step:3601/10000 train_time:166575ms step_avg:46.26ms +[2025-09-11 10:12:29] [Rank 0] step:3601/10000 train_time:166575ms step_avg:46.26ms +[2025-09-11 10:12:29] [Rank 0] step:3621/10000 train_time:167241ms step_avg:46.19ms +[2025-09-11 10:12:29] [Rank 0] step:3621/10000 train_time:167241ms step_avg:46.19ms +[2025-09-11 10:12:30] [Rank 0] step:3641/10000 train_time:167906ms step_avg:46.12ms +[2025-09-11 10:12:30] [Rank 0] step:3641/10000 train_time:167906ms step_avg:46.12ms +[2025-09-11 10:12:31] [Rank 0] step:3661/10000 train_time:168570ms step_avg:46.04ms +[2025-09-11 10:12:31] [Rank 0] step:3661/10000 train_time:168570ms step_avg:46.04ms +[2025-09-11 10:12:31] [Rank 0] step:3681/10000 train_time:169235ms step_avg:45.98ms +[2025-09-11 10:12:31] [Rank 0] step:3681/10000 train_time:169235ms step_avg:45.98ms +[2025-09-11 10:12:32] [Rank 0] step:3701/10000 train_time:169898ms step_avg:45.91ms +[2025-09-11 10:12:32] [Rank 0] step:3701/10000 train_time:169898ms step_avg:45.91ms +[2025-09-11 10:12:33] [Rank 0] step:3721/10000 train_time:170573ms step_avg:45.84ms +[2025-09-11 10:12:33] [Rank 0] step:3721/10000 train_time:170573ms step_avg:45.84ms +[2025-09-11 10:12:33] [Rank 0] step:3741/10000 train_time:171247ms step_avg:45.78ms +[2025-09-11 10:12:33] [Rank 0] step:3741/10000 train_time:171247ms step_avg:45.78ms +[2025-09-11 10:12:34] [Rank 0] step:3761/10000 train_time:171922ms step_avg:45.71ms +[2025-09-11 10:12:34] [Rank 0] step:3761/10000 train_time:171922ms step_avg:45.71ms +[2025-09-11 10:12:35] [Rank 0] step:3781/10000 train_time:172596ms step_avg:45.65ms +[2025-09-11 10:12:35] [Rank 0] step:3781/10000 train_time:172596ms step_avg:45.65ms +[2025-09-11 10:12:35] [Rank 0] step:3801/10000 train_time:173271ms step_avg:45.59ms +[2025-09-11 10:12:35] [Rank 0] step:3801/10000 train_time:173271ms step_avg:45.59ms +[2025-09-11 10:12:36] [Rank 0] step:3821/10000 train_time:173947ms step_avg:45.52ms +[2025-09-11 10:12:36] [Rank 0] step:3821/10000 train_time:173947ms step_avg:45.52ms +[2025-09-11 10:12:37] [Rank 0] step:3841/10000 train_time:174622ms step_avg:45.46ms +[2025-09-11 10:12:37] [Rank 0] step:3841/10000 train_time:174622ms step_avg:45.46ms +[2025-09-11 10:12:37] [Rank 0] step:3861/10000 train_time:175296ms step_avg:45.40ms +[2025-09-11 10:12:37] [Rank 0] step:3861/10000 train_time:175296ms step_avg:45.40ms +[2025-09-11 10:12:38] [Rank 0] step:3881/10000 train_time:175971ms step_avg:45.34ms +[2025-09-11 10:12:38] [Rank 0] step:3881/10000 train_time:175971ms step_avg:45.34ms +[2025-09-11 10:12:39] [Rank 0] step:3901/10000 train_time:176645ms step_avg:45.28ms +[2025-09-11 10:12:39] [Rank 0] step:3901/10000 train_time:176645ms step_avg:45.28ms +[2025-09-11 10:12:39] [Rank 0] step:3921/10000 train_time:177320ms step_avg:45.22ms +[2025-09-11 10:12:39] [Rank 0] step:3921/10000 train_time:177320ms step_avg:45.22ms +[2025-09-11 10:12:40] [Rank 0] step:3941/10000 train_time:177995ms step_avg:45.16ms +[2025-09-11 10:12:40] [Rank 0] step:3941/10000 train_time:177995ms step_avg:45.16ms +[2025-09-11 10:12:41] [Rank 0] step:3961/10000 train_time:178670ms step_avg:45.11ms +[2025-09-11 10:12:41] [Rank 0] step:3961/10000 train_time:178670ms step_avg:45.11ms +[2025-09-11 10:12:41] [Rank 0] step:3981/10000 train_time:179345ms step_avg:45.05ms +[2025-09-11 10:12:41] [Rank 0] step:3981/10000 train_time:179345ms step_avg:45.05ms +[2025-09-11 10:12:42] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:12:42] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 10:12:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:12:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:12:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:12:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:12:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:12:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 10:12:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:12:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 10:12:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:12:52] [Rank 0] PRINT: step:4000/10000 val_loss:4.4782 total_sharp:1.4267e-04 L1_sharp:5.0010e-03 L2_sharp:3.7313e-03 L3_sharp:6.5253e-03 L4_sharp:4.9174e-03 L5_sharp:1.3979e-02 L6_sharp:2.0152e-02 L7_sharp:2.7642e-02 L8_sharp:4.3536e-02 L9_sharp:4.8812e-02 L10_sharp:7.0752e-02 L11_sharp:1.2015e-01 L12_sharp:1.0347e+00 total_fnorm:8.2000e+01 total_l1_linf:1.5974e+05 total_spectral:4.1000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.5430e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.2988e-02 L10_l1linf:6.2500e-02 L11_l1linf:6.2988e-02 L12_l1linf:6.7383e-02 L1_spectral:3.2114e-03 L2_spectral:3.1813e-03 L3_spectral:3.2133e-03 L4_spectral:3.1716e-03 L5_spectral:3.1821e-03 L6_spectral:3.1688e-03 L7_spectral:3.1767e-03 L8_spectral:3.1847e-03 L9_spectral:3.1981e-03 L10_spectral:3.1940e-03 L11_spectral:3.1978e-03 L12_spectral:3.1876e-03 train_time:180001ms step_avg:45.00ms +[2025-09-11 10:12:52] [Rank 0] PRINT: step:4000/10000 val_loss:4.4782 total_sharp:1.4267e-04 L1_sharp:5.0010e-03 L2_sharp:3.7313e-03 L3_sharp:6.5253e-03 L4_sharp:4.9174e-03 L5_sharp:1.3979e-02 L6_sharp:2.0152e-02 L7_sharp:2.7642e-02 L8_sharp:4.3536e-02 L9_sharp:4.8812e-02 L10_sharp:7.0752e-02 L11_sharp:1.2015e-01 L12_sharp:1.0347e+00 total_fnorm:8.2000e+01 total_l1_linf:1.5974e+05 total_spectral:4.1000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.5430e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.2988e-02 L10_l1linf:6.2500e-02 L11_l1linf:6.2988e-02 L12_l1linf:6.7383e-02 L1_spectral:3.2114e-03 L2_spectral:3.1813e-03 L3_spectral:3.2133e-03 L4_spectral:3.1716e-03 L5_spectral:3.1821e-03 L6_spectral:3.1688e-03 L7_spectral:3.1767e-03 L8_spectral:3.1847e-03 L9_spectral:3.1981e-03 L10_spectral:3.1940e-03 L11_spectral:3.1978e-03 L12_spectral:3.1876e-03 train_time:180001ms step_avg:45.00ms +[2025-09-11 10:12:54] [Rank 0] step:4001/10000 train_time:181365ms step_avg:45.33ms +[2025-09-11 10:12:54] [Rank 0] step:4001/10000 train_time:181365ms step_avg:45.33ms +[2025-09-11 10:12:54] [Rank 0] step:4021/10000 train_time:182059ms step_avg:45.28ms +[2025-09-11 10:12:54] [Rank 0] step:4021/10000 train_time:182059ms step_avg:45.28ms +[2025-09-11 10:12:55] [Rank 0] step:4041/10000 train_time:182734ms step_avg:45.22ms +[2025-09-11 10:12:55] [Rank 0] step:4041/10000 train_time:182734ms step_avg:45.22ms +[2025-09-11 10:12:56] [Rank 0] step:4061/10000 train_time:183407ms step_avg:45.16ms +[2025-09-11 10:12:56] [Rank 0] step:4061/10000 train_time:183407ms step_avg:45.16ms +[2025-09-11 10:12:56] [Rank 0] step:4081/10000 train_time:184083ms step_avg:45.11ms +[2025-09-11 10:12:56] [Rank 0] step:4081/10000 train_time:184083ms step_avg:45.11ms +[2025-09-11 10:12:57] [Rank 0] step:4101/10000 train_time:184758ms step_avg:45.05ms +[2025-09-11 10:12:57] [Rank 0] step:4101/10000 train_time:184758ms step_avg:45.05ms +[2025-09-11 10:12:58] [Rank 0] step:4121/10000 train_time:185433ms step_avg:45.00ms +[2025-09-11 10:12:58] [Rank 0] step:4121/10000 train_time:185433ms step_avg:45.00ms +[2025-09-11 10:12:58] [Rank 0] step:4141/10000 train_time:186107ms step_avg:44.94ms +[2025-09-11 10:12:58] [Rank 0] step:4141/10000 train_time:186107ms step_avg:44.94ms +[2025-09-11 10:12:59] [Rank 0] step:4161/10000 train_time:186782ms step_avg:44.89ms +[2025-09-11 10:12:59] [Rank 0] step:4161/10000 train_time:186782ms step_avg:44.89ms +[2025-09-11 10:13:00] [Rank 0] step:4181/10000 train_time:187457ms step_avg:44.84ms +[2025-09-11 10:13:00] [Rank 0] step:4181/10000 train_time:187457ms step_avg:44.84ms +[2025-09-11 10:13:01] [Rank 0] step:4201/10000 train_time:188132ms step_avg:44.78ms +[2025-09-11 10:13:01] [Rank 0] step:4201/10000 train_time:188132ms step_avg:44.78ms +[2025-09-11 10:13:01] [Rank 0] step:4221/10000 train_time:189109ms step_avg:44.80ms +[2025-09-11 10:13:01] [Rank 0] step:4221/10000 train_time:189109ms step_avg:44.80ms +[2025-09-11 10:13:02] [Rank 0] step:4241/10000 train_time:189785ms step_avg:44.75ms +[2025-09-11 10:13:02] [Rank 0] step:4241/10000 train_time:189785ms step_avg:44.75ms +[2025-09-11 10:13:03] [Rank 0] step:4261/10000 train_time:190460ms step_avg:44.70ms +[2025-09-11 10:13:03] [Rank 0] step:4261/10000 train_time:190460ms step_avg:44.70ms +[2025-09-11 10:13:04] [Rank 0] step:4281/10000 train_time:191287ms step_avg:44.68ms +[2025-09-11 10:13:04] [Rank 0] step:4281/10000 train_time:191287ms step_avg:44.68ms +[2025-09-11 10:13:04] [Rank 0] step:4301/10000 train_time:192089ms step_avg:44.66ms +[2025-09-11 10:13:04] [Rank 0] step:4301/10000 train_time:192089ms step_avg:44.66ms +[2025-09-11 10:13:05] [Rank 0] step:4321/10000 train_time:192763ms step_avg:44.61ms +[2025-09-11 10:13:05] [Rank 0] step:4321/10000 train_time:192763ms step_avg:44.61ms +[2025-09-11 10:13:06] [Rank 0] step:4341/10000 train_time:193439ms step_avg:44.56ms +[2025-09-11 10:13:06] [Rank 0] step:4341/10000 train_time:193439ms step_avg:44.56ms +[2025-09-11 10:13:07] [Rank 0] step:4361/10000 train_time:194112ms step_avg:44.51ms +[2025-09-11 10:13:07] [Rank 0] step:4361/10000 train_time:194112ms step_avg:44.51ms +[2025-09-11 10:13:07] [Rank 0] step:4381/10000 train_time:194788ms step_avg:44.46ms +[2025-09-11 10:13:07] [Rank 0] step:4381/10000 train_time:194788ms step_avg:44.46ms +[2025-09-11 10:13:08] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:13:08] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 10:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:13:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 10:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:13:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:13:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:13:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:13:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:19] [Rank 0] PRINT: step:4400/10000 val_loss:4.4508 total_sharp:1.2832e-04 L1_sharp:3.7815e-03 L2_sharp:2.0145e-03 L3_sharp:5.5320e-03 L4_sharp:5.5141e-03 L5_sharp:1.3012e-02 L6_sharp:2.1532e-02 L7_sharp:2.5066e-02 L8_sharp:4.4621e-02 L9_sharp:4.0150e-02 L10_sharp:5.3552e-02 L11_sharp:9.2159e-02 L12_sharp:5.4715e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3517e+05 total_spectral:3.6000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.2012e-02 L9_l1linf:6.2256e-02 L10_l1linf:6.1768e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1877e-03 L2_spectral:3.1865e-03 L3_spectral:3.1909e-03 L4_spectral:3.1853e-03 L5_spectral:3.1857e-03 L6_spectral:3.1867e-03 L7_spectral:3.1864e-03 L8_spectral:3.1984e-03 L9_spectral:3.2268e-03 L10_spectral:3.1844e-03 L11_spectral:3.2099e-03 L12_spectral:3.1938e-03 train_time:195442ms step_avg:44.42ms +[2025-09-11 10:13:19] [Rank 0] PRINT: step:4400/10000 val_loss:4.4508 total_sharp:1.2832e-04 L1_sharp:3.7815e-03 L2_sharp:2.0145e-03 L3_sharp:5.5320e-03 L4_sharp:5.5141e-03 L5_sharp:1.3012e-02 L6_sharp:2.1532e-02 L7_sharp:2.5066e-02 L8_sharp:4.4621e-02 L9_sharp:4.0150e-02 L10_sharp:5.3552e-02 L11_sharp:9.2159e-02 L12_sharp:5.4715e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3517e+05 total_spectral:3.6000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.3965e-02 L6_l1linf:6.2988e-02 L7_l1linf:6.3477e-02 L8_l1linf:6.2012e-02 L9_l1linf:6.2256e-02 L10_l1linf:6.1768e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1877e-03 L2_spectral:3.1865e-03 L3_spectral:3.1909e-03 L4_spectral:3.1853e-03 L5_spectral:3.1857e-03 L6_spectral:3.1867e-03 L7_spectral:3.1864e-03 L8_spectral:3.1984e-03 L9_spectral:3.2268e-03 L10_spectral:3.1844e-03 L11_spectral:3.2099e-03 L12_spectral:3.1938e-03 train_time:195442ms step_avg:44.42ms +[2025-09-11 10:13:20] [Rank 0] step:4401/10000 train_time:196787ms step_avg:44.71ms +[2025-09-11 10:13:20] [Rank 0] step:4401/10000 train_time:196787ms step_avg:44.71ms +[2025-09-11 10:13:21] [Rank 0] step:4421/10000 train_time:197486ms step_avg:44.67ms +[2025-09-11 10:13:21] [Rank 0] step:4421/10000 train_time:197486ms step_avg:44.67ms +[2025-09-11 10:13:21] [Rank 0] step:4441/10000 train_time:198163ms step_avg:44.62ms +[2025-09-11 10:13:21] [Rank 0] step:4441/10000 train_time:198163ms step_avg:44.62ms +[2025-09-11 10:13:22] [Rank 0] step:4461/10000 train_time:198841ms step_avg:44.57ms +[2025-09-11 10:13:22] [Rank 0] step:4461/10000 train_time:198841ms step_avg:44.57ms +[2025-09-11 10:13:23] [Rank 0] step:4481/10000 train_time:199518ms step_avg:44.53ms +[2025-09-11 10:13:23] [Rank 0] step:4481/10000 train_time:199518ms step_avg:44.53ms +[2025-09-11 10:13:23] [Rank 0] step:4501/10000 train_time:200199ms step_avg:44.48ms +[2025-09-11 10:13:23] [Rank 0] step:4501/10000 train_time:200199ms step_avg:44.48ms +[2025-09-11 10:13:24] [Rank 0] step:4521/10000 train_time:200877ms step_avg:44.43ms +[2025-09-11 10:13:24] [Rank 0] step:4521/10000 train_time:200877ms step_avg:44.43ms +[2025-09-11 10:13:25] [Rank 0] step:4541/10000 train_time:201557ms step_avg:44.39ms +[2025-09-11 10:13:25] [Rank 0] step:4541/10000 train_time:201557ms step_avg:44.39ms +[2025-09-11 10:13:25] [Rank 0] step:4561/10000 train_time:202235ms step_avg:44.34ms +[2025-09-11 10:13:25] [Rank 0] step:4561/10000 train_time:202235ms step_avg:44.34ms +[2025-09-11 10:13:26] [Rank 0] step:4581/10000 train_time:202912ms step_avg:44.29ms +[2025-09-11 10:13:26] [Rank 0] step:4581/10000 train_time:202912ms step_avg:44.29ms +[2025-09-11 10:13:27] [Rank 0] step:4601/10000 train_time:203590ms step_avg:44.25ms +[2025-09-11 10:13:27] [Rank 0] step:4601/10000 train_time:203590ms step_avg:44.25ms +[2025-09-11 10:13:27] [Rank 0] step:4621/10000 train_time:204267ms step_avg:44.20ms +[2025-09-11 10:13:27] [Rank 0] step:4621/10000 train_time:204267ms step_avg:44.20ms +[2025-09-11 10:13:28] [Rank 0] step:4641/10000 train_time:204944ms step_avg:44.16ms +[2025-09-11 10:13:28] [Rank 0] step:4641/10000 train_time:204944ms step_avg:44.16ms +[2025-09-11 10:13:29] [Rank 0] step:4661/10000 train_time:205621ms step_avg:44.12ms +[2025-09-11 10:13:29] [Rank 0] step:4661/10000 train_time:205621ms step_avg:44.12ms +[2025-09-11 10:13:29] [Rank 0] step:4681/10000 train_time:206298ms step_avg:44.07ms +[2025-09-11 10:13:29] [Rank 0] step:4681/10000 train_time:206298ms step_avg:44.07ms +[2025-09-11 10:13:30] [Rank 0] step:4701/10000 train_time:206975ms step_avg:44.03ms +[2025-09-11 10:13:30] [Rank 0] step:4701/10000 train_time:206975ms step_avg:44.03ms +[2025-09-11 10:13:31] [Rank 0] step:4721/10000 train_time:207653ms step_avg:43.99ms +[2025-09-11 10:13:31] [Rank 0] step:4721/10000 train_time:207653ms step_avg:43.99ms +[2025-09-11 10:13:31] [Rank 0] step:4741/10000 train_time:208331ms step_avg:43.94ms +[2025-09-11 10:13:31] [Rank 0] step:4741/10000 train_time:208331ms step_avg:43.94ms +[2025-09-11 10:13:32] [Rank 0] step:4761/10000 train_time:209009ms step_avg:43.90ms +[2025-09-11 10:13:32] [Rank 0] step:4761/10000 train_time:209009ms step_avg:43.90ms +[2025-09-11 10:13:33] [Rank 0] step:4781/10000 train_time:209686ms step_avg:43.86ms +[2025-09-11 10:13:33] [Rank 0] step:4781/10000 train_time:209686ms step_avg:43.86ms +[2025-09-11 10:13:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:13:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 10:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:13:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:13:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:13:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 10:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 10:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 10:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:13:44] [Rank 0] PRINT: step:4800/10000 val_loss:4.4087 total_sharp:1.0763e-04 L1_sharp:4.8278e-03 L2_sharp:3.5278e-03 L3_sharp:5.8448e-03 L4_sharp:6.5226e-03 L5_sharp:1.4434e-02 L6_sharp:2.1990e-02 L7_sharp:2.7770e-02 L8_sharp:3.9861e-02 L9_sharp:3.9650e-02 L10_sharp:5.4643e-02 L11_sharp:8.6800e-02 L12_sharp:5.9583e-01 total_fnorm:7.8500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.1035e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.0547e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1861e-03 L2_spectral:3.1948e-03 L3_spectral:3.1942e-03 L4_spectral:3.1982e-03 L5_spectral:3.1827e-03 L6_spectral:3.1805e-03 L7_spectral:3.2059e-03 L8_spectral:3.1834e-03 L9_spectral:3.2164e-03 L10_spectral:3.2097e-03 L11_spectral:3.2106e-03 L12_spectral:3.2064e-03 train_time:210343ms step_avg:43.82ms +[2025-09-11 10:13:44] [Rank 0] PRINT: step:4800/10000 val_loss:4.4087 total_sharp:1.0763e-04 L1_sharp:4.8278e-03 L2_sharp:3.5278e-03 L3_sharp:5.8448e-03 L4_sharp:6.5226e-03 L5_sharp:1.4434e-02 L6_sharp:2.1990e-02 L7_sharp:2.7770e-02 L8_sharp:3.9861e-02 L9_sharp:3.9650e-02 L10_sharp:5.4643e-02 L11_sharp:8.6800e-02 L12_sharp:5.9583e-01 total_fnorm:7.8500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2256e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.1035e-02 L10_l1linf:6.0059e-02 L11_l1linf:6.0547e-02 L12_l1linf:6.4453e-02 L1_spectral:3.1861e-03 L2_spectral:3.1948e-03 L3_spectral:3.1942e-03 L4_spectral:3.1982e-03 L5_spectral:3.1827e-03 L6_spectral:3.1805e-03 L7_spectral:3.2059e-03 L8_spectral:3.1834e-03 L9_spectral:3.2164e-03 L10_spectral:3.2097e-03 L11_spectral:3.2106e-03 L12_spectral:3.2064e-03 train_time:210343ms step_avg:43.82ms +[2025-09-11 10:13:45] [Rank 0] step:4801/10000 train_time:211650ms step_avg:44.08ms +[2025-09-11 10:13:45] [Rank 0] step:4801/10000 train_time:211650ms step_avg:44.08ms +[2025-09-11 10:13:46] [Rank 0] step:4821/10000 train_time:212356ms step_avg:44.05ms +[2025-09-11 10:13:46] [Rank 0] step:4821/10000 train_time:212356ms step_avg:44.05ms +[2025-09-11 10:13:46] [Rank 0] step:4841/10000 train_time:213035ms step_avg:44.01ms +[2025-09-11 10:13:46] [Rank 0] step:4841/10000 train_time:213035ms step_avg:44.01ms +[2025-09-11 10:13:47] [Rank 0] step:4861/10000 train_time:213714ms step_avg:43.97ms +[2025-09-11 10:13:47] [Rank 0] step:4861/10000 train_time:213714ms step_avg:43.97ms +[2025-09-11 10:13:48] [Rank 0] step:4881/10000 train_time:214392ms step_avg:43.92ms +[2025-09-11 10:13:48] [Rank 0] step:4881/10000 train_time:214392ms step_avg:43.92ms +[2025-09-11 10:13:48] [Rank 0] step:4901/10000 train_time:215071ms step_avg:43.88ms +[2025-09-11 10:13:48] [Rank 0] step:4901/10000 train_time:215071ms step_avg:43.88ms +[2025-09-11 10:13:49] [Rank 0] step:4921/10000 train_time:215750ms step_avg:43.84ms +[2025-09-11 10:13:49] [Rank 0] step:4921/10000 train_time:215750ms step_avg:43.84ms +[2025-09-11 10:13:50] [Rank 0] step:4941/10000 train_time:216427ms step_avg:43.80ms +[2025-09-11 10:13:50] [Rank 0] step:4941/10000 train_time:216427ms step_avg:43.80ms +[2025-09-11 10:13:50] [Rank 0] step:4961/10000 train_time:217105ms step_avg:43.76ms +[2025-09-11 10:13:50] [Rank 0] step:4961/10000 train_time:217105ms step_avg:43.76ms +[2025-09-11 10:13:51] [Rank 0] step:4981/10000 train_time:217783ms step_avg:43.72ms +[2025-09-11 10:13:51] [Rank 0] step:4981/10000 train_time:217783ms step_avg:43.72ms +[2025-09-11 10:13:52] [Rank 0] step:5001/10000 train_time:218462ms step_avg:43.68ms +[2025-09-11 10:13:52] [Rank 0] step:5001/10000 train_time:218462ms step_avg:43.68ms +[2025-09-11 10:13:52] [Rank 0] step:5021/10000 train_time:219140ms step_avg:43.64ms +[2025-09-11 10:13:52] [Rank 0] step:5021/10000 train_time:219140ms step_avg:43.64ms +[2025-09-11 10:13:53] [Rank 0] step:5041/10000 train_time:219817ms step_avg:43.61ms +[2025-09-11 10:13:53] [Rank 0] step:5041/10000 train_time:219817ms step_avg:43.61ms +[2025-09-11 10:13:54] [Rank 0] step:5061/10000 train_time:220495ms step_avg:43.57ms +[2025-09-11 10:13:54] [Rank 0] step:5061/10000 train_time:220495ms step_avg:43.57ms +[2025-09-11 10:13:54] [Rank 0] step:5081/10000 train_time:221174ms step_avg:43.53ms +[2025-09-11 10:13:54] [Rank 0] step:5081/10000 train_time:221174ms step_avg:43.53ms +[2025-09-11 10:13:55] [Rank 0] step:5101/10000 train_time:221853ms step_avg:43.49ms +[2025-09-11 10:13:55] [Rank 0] step:5101/10000 train_time:221853ms step_avg:43.49ms +[2025-09-11 10:13:56] [Rank 0] step:5121/10000 train_time:222530ms step_avg:43.45ms +[2025-09-11 10:13:56] [Rank 0] step:5121/10000 train_time:222530ms step_avg:43.45ms +[2025-09-11 10:13:56] [Rank 0] step:5141/10000 train_time:223208ms step_avg:43.42ms +[2025-09-11 10:13:56] [Rank 0] step:5141/10000 train_time:223208ms step_avg:43.42ms +[2025-09-11 10:13:57] [Rank 0] step:5161/10000 train_time:223886ms step_avg:43.38ms +[2025-09-11 10:13:57] [Rank 0] step:5161/10000 train_time:223886ms step_avg:43.38ms +[2025-09-11 10:13:58] [Rank 0] step:5181/10000 train_time:224565ms step_avg:43.34ms +[2025-09-11 10:13:58] [Rank 0] step:5181/10000 train_time:224565ms step_avg:43.34ms +[2025-09-11 10:13:58] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:13:58] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 10:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:14:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 10:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 10:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:08] [Rank 0] PRINT: step:5200/10000 val_loss:4.3752 total_sharp:1.8357e-04 L1_sharp:3.0454e-03 L2_sharp:2.1889e-03 L3_sharp:4.5932e-03 L4_sharp:6.5560e-03 L5_sharp:1.2424e-02 L6_sharp:2.2565e-02 L7_sharp:2.8598e-02 L8_sharp:4.7387e-02 L9_sharp:5.4883e-02 L10_sharp:7.7990e-02 L11_sharp:1.2961e-01 L12_sharp:2.1142e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.1523e-02 L4_l1linf:6.2500e-02 L5_l1linf:6.1768e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.2256e-02 L8_l1linf:5.9814e-02 L9_l1linf:6.1035e-02 L10_l1linf:5.8838e-02 L11_l1linf:5.8594e-02 L12_l1linf:6.3965e-02 L1_spectral:3.1968e-03 L2_spectral:3.1868e-03 L3_spectral:3.1935e-03 L4_spectral:3.1992e-03 L5_spectral:3.1920e-03 L6_spectral:3.2060e-03 L7_spectral:3.2133e-03 L8_spectral:3.2152e-03 L9_spectral:3.2182e-03 L10_spectral:3.2007e-03 L11_spectral:3.2138e-03 L12_spectral:3.2011e-03 train_time:225229ms step_avg:43.31ms +[2025-09-11 10:14:08] [Rank 0] PRINT: step:5200/10000 val_loss:4.3752 total_sharp:1.8357e-04 L1_sharp:3.0454e-03 L2_sharp:2.1889e-03 L3_sharp:4.5932e-03 L4_sharp:6.5560e-03 L5_sharp:1.2424e-02 L6_sharp:2.2565e-02 L7_sharp:2.8598e-02 L8_sharp:4.7387e-02 L9_sharp:5.4883e-02 L10_sharp:7.7990e-02 L11_sharp:1.2961e-01 L12_sharp:2.1142e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.3965e-02 L3_l1linf:6.1523e-02 L4_l1linf:6.2500e-02 L5_l1linf:6.1768e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.2256e-02 L8_l1linf:5.9814e-02 L9_l1linf:6.1035e-02 L10_l1linf:5.8838e-02 L11_l1linf:5.8594e-02 L12_l1linf:6.3965e-02 L1_spectral:3.1968e-03 L2_spectral:3.1868e-03 L3_spectral:3.1935e-03 L4_spectral:3.1992e-03 L5_spectral:3.1920e-03 L6_spectral:3.2060e-03 L7_spectral:3.2133e-03 L8_spectral:3.2152e-03 L9_spectral:3.2182e-03 L10_spectral:3.2007e-03 L11_spectral:3.2138e-03 L12_spectral:3.2011e-03 train_time:225229ms step_avg:43.31ms +[2025-09-11 10:14:10] [Rank 0] step:5201/10000 train_time:226625ms step_avg:43.57ms +[2025-09-11 10:14:10] [Rank 0] step:5201/10000 train_time:226625ms step_avg:43.57ms +[2025-09-11 10:14:11] [Rank 0] step:5221/10000 train_time:227529ms step_avg:43.58ms +[2025-09-11 10:14:11] [Rank 0] step:5221/10000 train_time:227529ms step_avg:43.58ms +[2025-09-11 10:14:11] [Rank 0] step:5241/10000 train_time:228216ms step_avg:43.54ms +[2025-09-11 10:14:11] [Rank 0] step:5241/10000 train_time:228216ms step_avg:43.54ms +[2025-09-11 10:14:12] [Rank 0] step:5261/10000 train_time:228902ms step_avg:43.51ms +[2025-09-11 10:14:12] [Rank 0] step:5261/10000 train_time:228902ms step_avg:43.51ms +[2025-09-11 10:14:13] [Rank 0] step:5281/10000 train_time:229590ms step_avg:43.47ms +[2025-09-11 10:14:13] [Rank 0] step:5281/10000 train_time:229590ms step_avg:43.47ms +[2025-09-11 10:14:13] [Rank 0] step:5301/10000 train_time:230277ms step_avg:43.44ms +[2025-09-11 10:14:13] [Rank 0] step:5301/10000 train_time:230277ms step_avg:43.44ms +[2025-09-11 10:14:14] [Rank 0] step:5321/10000 train_time:230963ms step_avg:43.41ms +[2025-09-11 10:14:14] [Rank 0] step:5321/10000 train_time:230963ms step_avg:43.41ms +[2025-09-11 10:14:15] [Rank 0] step:5341/10000 train_time:231651ms step_avg:43.37ms +[2025-09-11 10:14:15] [Rank 0] step:5341/10000 train_time:231651ms step_avg:43.37ms +[2025-09-11 10:14:15] [Rank 0] step:5361/10000 train_time:232339ms step_avg:43.34ms +[2025-09-11 10:14:15] [Rank 0] step:5361/10000 train_time:232339ms step_avg:43.34ms +[2025-09-11 10:14:16] [Rank 0] step:5381/10000 train_time:233027ms step_avg:43.31ms +[2025-09-11 10:14:16] [Rank 0] step:5381/10000 train_time:233027ms step_avg:43.31ms +[2025-09-11 10:14:17] [Rank 0] step:5401/10000 train_time:233712ms step_avg:43.27ms +[2025-09-11 10:14:17] [Rank 0] step:5401/10000 train_time:233712ms step_avg:43.27ms +[2025-09-11 10:14:18] [Rank 0] step:5421/10000 train_time:234401ms step_avg:43.24ms +[2025-09-11 10:14:18] [Rank 0] step:5421/10000 train_time:234401ms step_avg:43.24ms +[2025-09-11 10:14:18] [Rank 0] step:5441/10000 train_time:235088ms step_avg:43.21ms +[2025-09-11 10:14:18] [Rank 0] step:5441/10000 train_time:235088ms step_avg:43.21ms +[2025-09-11 10:14:19] [Rank 0] step:5461/10000 train_time:235775ms step_avg:43.17ms +[2025-09-11 10:14:19] [Rank 0] step:5461/10000 train_time:235775ms step_avg:43.17ms +[2025-09-11 10:14:20] [Rank 0] step:5481/10000 train_time:236463ms step_avg:43.14ms +[2025-09-11 10:14:20] [Rank 0] step:5481/10000 train_time:236463ms step_avg:43.14ms +[2025-09-11 10:14:20] [Rank 0] step:5501/10000 train_time:237150ms step_avg:43.11ms +[2025-09-11 10:14:20] [Rank 0] step:5501/10000 train_time:237150ms step_avg:43.11ms +[2025-09-11 10:14:21] [Rank 0] step:5521/10000 train_time:237837ms step_avg:43.08ms +[2025-09-11 10:14:21] [Rank 0] step:5521/10000 train_time:237837ms step_avg:43.08ms +[2025-09-11 10:14:22] [Rank 0] step:5541/10000 train_time:238527ms step_avg:43.05ms +[2025-09-11 10:14:22] [Rank 0] step:5541/10000 train_time:238527ms step_avg:43.05ms +[2025-09-11 10:14:22] [Rank 0] step:5561/10000 train_time:239216ms step_avg:43.02ms +[2025-09-11 10:14:22] [Rank 0] step:5561/10000 train_time:239216ms step_avg:43.02ms +[2025-09-11 10:14:23] [Rank 0] step:5581/10000 train_time:239905ms step_avg:42.99ms +[2025-09-11 10:14:23] [Rank 0] step:5581/10000 train_time:239905ms step_avg:42.99ms +[2025-09-11 10:14:24] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:14:24] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 10:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:14:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 10:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 10:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:14:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 10:14:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:34] [Rank 0] PRINT: step:5600/10000 val_loss:4.3537 total_sharp:1.2279e-04 L1_sharp:2.1637e-03 L2_sharp:1.1300e-03 L3_sharp:3.3206e-03 L4_sharp:6.4176e-03 L5_sharp:1.1440e-02 L6_sharp:1.7010e-02 L7_sharp:2.1953e-02 L8_sharp:3.3652e-02 L9_sharp:3.8071e-02 L10_sharp:5.9010e-02 L11_sharp:9.3412e-02 L12_sharp:1.0460e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6000e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.1523e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.0303e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.8838e-02 L10_l1linf:5.8350e-02 L11_l1linf:5.7861e-02 L12_l1linf:6.2988e-02 L1_spectral:3.2179e-03 L2_spectral:3.2104e-03 L3_spectral:3.2100e-03 L4_spectral:3.2256e-03 L5_spectral:3.2107e-03 L6_spectral:3.2132e-03 L7_spectral:3.2347e-03 L8_spectral:3.1979e-03 L9_spectral:3.2167e-03 L10_spectral:3.2184e-03 L11_spectral:3.2050e-03 L12_spectral:3.2231e-03 train_time:240572ms step_avg:42.96ms +[2025-09-11 10:14:34] [Rank 0] PRINT: step:5600/10000 val_loss:4.3537 total_sharp:1.2279e-04 L1_sharp:2.1637e-03 L2_sharp:1.1300e-03 L3_sharp:3.3206e-03 L4_sharp:6.4176e-03 L5_sharp:1.1440e-02 L6_sharp:1.7010e-02 L7_sharp:2.1953e-02 L8_sharp:3.3652e-02 L9_sharp:3.8071e-02 L10_sharp:5.9010e-02 L11_sharp:9.3412e-02 L12_sharp:1.0460e+00 total_fnorm:7.2000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6000e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4219e-01 L12_fnorm:2.4609e-01 L1_l1linf:6.1523e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.0303e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9814e-02 L9_l1linf:5.8838e-02 L10_l1linf:5.8350e-02 L11_l1linf:5.7861e-02 L12_l1linf:6.2988e-02 L1_spectral:3.2179e-03 L2_spectral:3.2104e-03 L3_spectral:3.2100e-03 L4_spectral:3.2256e-03 L5_spectral:3.2107e-03 L6_spectral:3.2132e-03 L7_spectral:3.2347e-03 L8_spectral:3.1979e-03 L9_spectral:3.2167e-03 L10_spectral:3.2184e-03 L11_spectral:3.2050e-03 L12_spectral:3.2231e-03 train_time:240572ms step_avg:42.96ms +[2025-09-11 10:14:35] [Rank 0] step:5601/10000 train_time:241875ms step_avg:43.18ms +[2025-09-11 10:14:35] [Rank 0] step:5601/10000 train_time:241875ms step_avg:43.18ms +[2025-09-11 10:14:36] [Rank 0] step:5621/10000 train_time:242594ms step_avg:43.16ms +[2025-09-11 10:14:36] [Rank 0] step:5621/10000 train_time:242594ms step_avg:43.16ms +[2025-09-11 10:14:37] [Rank 0] step:5641/10000 train_time:243281ms step_avg:43.13ms +[2025-09-11 10:14:37] [Rank 0] step:5641/10000 train_time:243281ms step_avg:43.13ms +[2025-09-11 10:14:37] [Rank 0] step:5661/10000 train_time:243968ms step_avg:43.10ms +[2025-09-11 10:14:37] [Rank 0] step:5661/10000 train_time:243968ms step_avg:43.10ms +[2025-09-11 10:14:38] [Rank 0] step:5681/10000 train_time:244659ms step_avg:43.07ms +[2025-09-11 10:14:38] [Rank 0] step:5681/10000 train_time:244659ms step_avg:43.07ms +[2025-09-11 10:14:39] [Rank 0] step:5701/10000 train_time:245348ms step_avg:43.04ms +[2025-09-11 10:14:39] [Rank 0] step:5701/10000 train_time:245348ms step_avg:43.04ms +[2025-09-11 10:14:39] [Rank 0] step:5721/10000 train_time:246035ms step_avg:43.01ms +[2025-09-11 10:14:39] [Rank 0] step:5721/10000 train_time:246035ms step_avg:43.01ms +[2025-09-11 10:14:40] [Rank 0] step:5741/10000 train_time:246723ms step_avg:42.98ms +[2025-09-11 10:14:40] [Rank 0] step:5741/10000 train_time:246723ms step_avg:42.98ms +[2025-09-11 10:14:41] [Rank 0] step:5761/10000 train_time:247411ms step_avg:42.95ms +[2025-09-11 10:14:41] [Rank 0] step:5761/10000 train_time:247411ms step_avg:42.95ms +[2025-09-11 10:14:41] [Rank 0] step:5781/10000 train_time:248101ms step_avg:42.92ms +[2025-09-11 10:14:41] [Rank 0] step:5781/10000 train_time:248101ms step_avg:42.92ms +[2025-09-11 10:14:42] [Rank 0] step:5801/10000 train_time:248790ms step_avg:42.89ms +[2025-09-11 10:14:42] [Rank 0] step:5801/10000 train_time:248790ms step_avg:42.89ms +[2025-09-11 10:14:43] [Rank 0] step:5821/10000 train_time:249477ms step_avg:42.86ms +[2025-09-11 10:14:43] [Rank 0] step:5821/10000 train_time:249477ms step_avg:42.86ms +[2025-09-11 10:14:43] [Rank 0] step:5841/10000 train_time:250166ms step_avg:42.83ms +[2025-09-11 10:14:43] [Rank 0] step:5841/10000 train_time:250166ms step_avg:42.83ms +[2025-09-11 10:14:44] [Rank 0] step:5861/10000 train_time:250853ms step_avg:42.80ms +[2025-09-11 10:14:44] [Rank 0] step:5861/10000 train_time:250853ms step_avg:42.80ms +[2025-09-11 10:14:45] [Rank 0] step:5881/10000 train_time:251541ms step_avg:42.77ms +[2025-09-11 10:14:45] [Rank 0] step:5881/10000 train_time:251541ms step_avg:42.77ms +[2025-09-11 10:14:46] [Rank 0] step:5901/10000 train_time:252227ms step_avg:42.74ms +[2025-09-11 10:14:46] [Rank 0] step:5901/10000 train_time:252227ms step_avg:42.74ms +[2025-09-11 10:14:46] [Rank 0] step:5921/10000 train_time:252917ms step_avg:42.72ms +[2025-09-11 10:14:46] [Rank 0] step:5921/10000 train_time:252917ms step_avg:42.72ms +[2025-09-11 10:14:47] [Rank 0] step:5941/10000 train_time:253606ms step_avg:42.69ms +[2025-09-11 10:14:47] [Rank 0] step:5941/10000 train_time:253606ms step_avg:42.69ms +[2025-09-11 10:14:48] [Rank 0] step:5961/10000 train_time:254296ms step_avg:42.66ms +[2025-09-11 10:14:48] [Rank 0] step:5961/10000 train_time:254296ms step_avg:42.66ms +[2025-09-11 10:14:48] [Rank 0] step:5981/10000 train_time:254985ms step_avg:42.63ms +[2025-09-11 10:14:48] [Rank 0] step:5981/10000 train_time:254985ms step_avg:42.63ms +[2025-09-11 10:14:49] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:14:49] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 10:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:14:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:14:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 10:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 10:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:14:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:14:59] [Rank 0] PRINT: step:6000/10000 val_loss:4.3164 total_sharp:9.5931e-05 L1_sharp:2.4017e-03 L2_sharp:2.0694e-03 L3_sharp:3.9916e-03 L4_sharp:6.6994e-03 L5_sharp:1.0084e-02 L6_sharp:1.4979e-02 L7_sharp:1.9347e-02 L8_sharp:2.6797e-02 L9_sharp:3.4722e-02 L10_sharp:4.7007e-02 L11_sharp:7.1907e-02 L12_sharp:2.7801e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3312e+05 total_spectral:3.6000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4316e-01 L1_l1linf:6.1279e-02 L2_l1linf:6.1035e-02 L3_l1linf:5.9570e-02 L4_l1linf:6.0547e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9570e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.6885e-02 L12_l1linf:5.9082e-02 L1_spectral:3.2091e-03 L2_spectral:3.2072e-03 L3_spectral:3.2001e-03 L4_spectral:3.2206e-03 L5_spectral:3.1951e-03 L6_spectral:3.2040e-03 L7_spectral:3.2223e-03 L8_spectral:3.2210e-03 L9_spectral:3.2152e-03 L10_spectral:3.2024e-03 L11_spectral:3.2127e-03 L12_spectral:3.2151e-03 train_time:255657ms step_avg:42.61ms +[2025-09-11 10:14:59] [Rank 0] PRINT: step:6000/10000 val_loss:4.3164 total_sharp:9.5931e-05 L1_sharp:2.4017e-03 L2_sharp:2.0694e-03 L3_sharp:3.9916e-03 L4_sharp:6.6994e-03 L5_sharp:1.0084e-02 L6_sharp:1.4979e-02 L7_sharp:1.9347e-02 L8_sharp:2.6797e-02 L9_sharp:3.4722e-02 L10_sharp:4.7007e-02 L11_sharp:7.1907e-02 L12_sharp:2.7801e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3312e+05 total_spectral:3.6000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4316e-01 L1_l1linf:6.1279e-02 L2_l1linf:6.1035e-02 L3_l1linf:5.9570e-02 L4_l1linf:6.0547e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0303e-02 L7_l1linf:5.9570e-02 L8_l1linf:5.8350e-02 L9_l1linf:5.8594e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.6885e-02 L12_l1linf:5.9082e-02 L1_spectral:3.2091e-03 L2_spectral:3.2072e-03 L3_spectral:3.2001e-03 L4_spectral:3.2206e-03 L5_spectral:3.1951e-03 L6_spectral:3.2040e-03 L7_spectral:3.2223e-03 L8_spectral:3.2210e-03 L9_spectral:3.2152e-03 L10_spectral:3.2024e-03 L11_spectral:3.2127e-03 L12_spectral:3.2151e-03 train_time:255657ms step_avg:42.61ms +[2025-09-11 10:15:00] [Rank 0] step:6001/10000 train_time:256987ms step_avg:42.82ms +[2025-09-11 10:15:00] [Rank 0] step:6001/10000 train_time:256987ms step_avg:42.82ms +[2025-09-11 10:15:01] [Rank 0] step:6021/10000 train_time:257699ms step_avg:42.80ms +[2025-09-11 10:15:01] [Rank 0] step:6021/10000 train_time:257699ms step_avg:42.80ms +[2025-09-11 10:15:02] [Rank 0] step:6041/10000 train_time:258448ms step_avg:42.78ms +[2025-09-11 10:15:02] [Rank 0] step:6041/10000 train_time:258448ms step_avg:42.78ms +[2025-09-11 10:15:03] [Rank 0] step:6061/10000 train_time:259217ms step_avg:42.77ms +[2025-09-11 10:15:03] [Rank 0] step:6061/10000 train_time:259217ms step_avg:42.77ms +[2025-09-11 10:15:03] [Rank 0] step:6081/10000 train_time:259907ms step_avg:42.74ms +[2025-09-11 10:15:03] [Rank 0] step:6081/10000 train_time:259907ms step_avg:42.74ms +[2025-09-11 10:15:04] [Rank 0] step:6101/10000 train_time:260596ms step_avg:42.71ms +[2025-09-11 10:15:04] [Rank 0] step:6101/10000 train_time:260596ms step_avg:42.71ms +[2025-09-11 10:15:05] [Rank 0] step:6121/10000 train_time:261287ms step_avg:42.69ms +[2025-09-11 10:15:05] [Rank 0] step:6121/10000 train_time:261287ms step_avg:42.69ms +[2025-09-11 10:15:05] [Rank 0] step:6141/10000 train_time:261977ms step_avg:42.66ms +[2025-09-11 10:15:05] [Rank 0] step:6141/10000 train_time:261977ms step_avg:42.66ms +[2025-09-11 10:15:06] [Rank 0] step:6161/10000 train_time:262667ms step_avg:42.63ms +[2025-09-11 10:15:06] [Rank 0] step:6161/10000 train_time:262667ms step_avg:42.63ms +[2025-09-11 10:15:07] [Rank 0] step:6181/10000 train_time:263355ms step_avg:42.61ms +[2025-09-11 10:15:07] [Rank 0] step:6181/10000 train_time:263355ms step_avg:42.61ms +[2025-09-11 10:15:08] [Rank 0] step:6201/10000 train_time:264328ms step_avg:42.63ms +[2025-09-11 10:15:08] [Rank 0] step:6201/10000 train_time:264328ms step_avg:42.63ms +[2025-09-11 10:15:08] [Rank 0] step:6221/10000 train_time:265019ms step_avg:42.60ms +[2025-09-11 10:15:08] [Rank 0] step:6221/10000 train_time:265019ms step_avg:42.60ms +[2025-09-11 10:15:09] [Rank 0] step:6241/10000 train_time:265709ms step_avg:42.57ms +[2025-09-11 10:15:09] [Rank 0] step:6241/10000 train_time:265709ms step_avg:42.57ms +[2025-09-11 10:15:10] [Rank 0] step:6261/10000 train_time:266558ms step_avg:42.57ms +[2025-09-11 10:15:10] [Rank 0] step:6261/10000 train_time:266558ms step_avg:42.57ms +[2025-09-11 10:15:11] [Rank 0] step:6281/10000 train_time:267378ms step_avg:42.57ms +[2025-09-11 10:15:11] [Rank 0] step:6281/10000 train_time:267378ms step_avg:42.57ms +[2025-09-11 10:15:11] [Rank 0] step:6301/10000 train_time:268067ms step_avg:42.54ms +[2025-09-11 10:15:11] [Rank 0] step:6301/10000 train_time:268067ms step_avg:42.54ms +[2025-09-11 10:15:12] [Rank 0] step:6321/10000 train_time:268760ms step_avg:42.52ms +[2025-09-11 10:15:12] [Rank 0] step:6321/10000 train_time:268760ms step_avg:42.52ms +[2025-09-11 10:15:13] [Rank 0] step:6341/10000 train_time:269451ms step_avg:42.49ms +[2025-09-11 10:15:13] [Rank 0] step:6341/10000 train_time:269451ms step_avg:42.49ms +[2025-09-11 10:15:14] [Rank 0] step:6361/10000 train_time:270141ms step_avg:42.47ms +[2025-09-11 10:15:14] [Rank 0] step:6361/10000 train_time:270141ms step_avg:42.47ms +[2025-09-11 10:15:14] [Rank 0] step:6381/10000 train_time:270830ms step_avg:42.44ms +[2025-09-11 10:15:14] [Rank 0] step:6381/10000 train_time:270830ms step_avg:42.44ms +[2025-09-11 10:15:15] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:15:15] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 10:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:15:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:15:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 10:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:15:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 10:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:25] [Rank 0] PRINT: step:6400/10000 val_loss:4.2870 total_sharp:1.2209e-04 L1_sharp:2.6520e-03 L2_sharp:2.1169e-03 L3_sharp:6.4153e-03 L4_sharp:6.9458e-03 L5_sharp:1.2729e-02 L6_sharp:1.8805e-02 L7_sharp:2.3251e-02 L8_sharp:3.5193e-02 L9_sharp:3.8986e-02 L10_sharp:5.2744e-02 L11_sharp:8.5635e-02 L12_sharp:9.4150e-01 total_fnorm:6.3000e+01 total_l1_linf:1.1366e+05 total_spectral:3.1500e+01 L1_fnorm:2.1875e-01 L2_fnorm:2.1777e-01 L3_fnorm:2.1680e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1484e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.3223e-02 L2_l1linf:5.2246e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.2490e-02 L5_l1linf:5.1025e-02 L6_l1linf:5.0049e-02 L7_l1linf:5.0537e-02 L8_l1linf:4.9805e-02 L9_l1linf:5.0049e-02 L10_l1linf:4.9072e-02 L11_l1linf:4.8096e-02 L12_l1linf:5.0537e-02 L1_spectral:2.9146e-03 L2_spectral:2.9203e-03 L3_spectral:2.9057e-03 L4_spectral:2.9117e-03 L5_spectral:2.9048e-03 L6_spectral:2.9019e-03 L7_spectral:2.9313e-03 L8_spectral:2.8965e-03 L9_spectral:2.9121e-03 L10_spectral:2.9224e-03 L11_spectral:2.9053e-03 L12_spectral:2.8927e-03 train_time:271500ms step_avg:42.42ms +[2025-09-11 10:15:25] [Rank 0] PRINT: step:6400/10000 val_loss:4.2870 total_sharp:1.2209e-04 L1_sharp:2.6520e-03 L2_sharp:2.1169e-03 L3_sharp:6.4153e-03 L4_sharp:6.9458e-03 L5_sharp:1.2729e-02 L6_sharp:1.8805e-02 L7_sharp:2.3251e-02 L8_sharp:3.5193e-02 L9_sharp:3.8986e-02 L10_sharp:5.2744e-02 L11_sharp:8.5635e-02 L12_sharp:9.4150e-01 total_fnorm:6.3000e+01 total_l1_linf:1.1366e+05 total_spectral:3.1500e+01 L1_fnorm:2.1875e-01 L2_fnorm:2.1777e-01 L3_fnorm:2.1680e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1484e-01 L12_fnorm:2.1680e-01 L1_l1linf:5.3223e-02 L2_l1linf:5.2246e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.2490e-02 L5_l1linf:5.1025e-02 L6_l1linf:5.0049e-02 L7_l1linf:5.0537e-02 L8_l1linf:4.9805e-02 L9_l1linf:5.0049e-02 L10_l1linf:4.9072e-02 L11_l1linf:4.8096e-02 L12_l1linf:5.0537e-02 L1_spectral:2.9146e-03 L2_spectral:2.9203e-03 L3_spectral:2.9057e-03 L4_spectral:2.9117e-03 L5_spectral:2.9048e-03 L6_spectral:2.9019e-03 L7_spectral:2.9313e-03 L8_spectral:2.8965e-03 L9_spectral:2.9121e-03 L10_spectral:2.9224e-03 L11_spectral:2.9053e-03 L12_spectral:2.8927e-03 train_time:271500ms step_avg:42.42ms +[2025-09-11 10:15:26] [Rank 0] step:6401/10000 train_time:272879ms step_avg:42.63ms +[2025-09-11 10:15:26] [Rank 0] step:6401/10000 train_time:272879ms step_avg:42.63ms +[2025-09-11 10:15:27] [Rank 0] step:6421/10000 train_time:273607ms step_avg:42.61ms +[2025-09-11 10:15:27] [Rank 0] step:6421/10000 train_time:273607ms step_avg:42.61ms +[2025-09-11 10:15:28] [Rank 0] step:6441/10000 train_time:274298ms step_avg:42.59ms +[2025-09-11 10:15:28] [Rank 0] step:6441/10000 train_time:274298ms step_avg:42.59ms +[2025-09-11 10:15:29] [Rank 0] step:6461/10000 train_time:274988ms step_avg:42.56ms +[2025-09-11 10:15:29] [Rank 0] step:6461/10000 train_time:274988ms step_avg:42.56ms +[2025-09-11 10:15:29] [Rank 0] step:6481/10000 train_time:275682ms step_avg:42.54ms +[2025-09-11 10:15:29] [Rank 0] step:6481/10000 train_time:275682ms step_avg:42.54ms +[2025-09-11 10:15:30] [Rank 0] step:6501/10000 train_time:276374ms step_avg:42.51ms +[2025-09-11 10:15:30] [Rank 0] step:6501/10000 train_time:276374ms step_avg:42.51ms +[2025-09-11 10:15:31] [Rank 0] step:6521/10000 train_time:277066ms step_avg:42.49ms +[2025-09-11 10:15:31] [Rank 0] step:6521/10000 train_time:277066ms step_avg:42.49ms +[2025-09-11 10:15:31] [Rank 0] step:6541/10000 train_time:277755ms step_avg:42.46ms +[2025-09-11 10:15:31] [Rank 0] step:6541/10000 train_time:277755ms step_avg:42.46ms +[2025-09-11 10:15:32] [Rank 0] step:6561/10000 train_time:278446ms step_avg:42.44ms +[2025-09-11 10:15:32] [Rank 0] step:6561/10000 train_time:278446ms step_avg:42.44ms +[2025-09-11 10:15:33] [Rank 0] step:6581/10000 train_time:279137ms step_avg:42.42ms +[2025-09-11 10:15:33] [Rank 0] step:6581/10000 train_time:279137ms step_avg:42.42ms +[2025-09-11 10:15:33] [Rank 0] step:6601/10000 train_time:279828ms step_avg:42.39ms +[2025-09-11 10:15:33] [Rank 0] step:6601/10000 train_time:279828ms step_avg:42.39ms +[2025-09-11 10:15:34] [Rank 0] step:6621/10000 train_time:280518ms step_avg:42.37ms +[2025-09-11 10:15:34] [Rank 0] step:6621/10000 train_time:280518ms step_avg:42.37ms +[2025-09-11 10:15:35] [Rank 0] step:6641/10000 train_time:281209ms step_avg:42.34ms +[2025-09-11 10:15:35] [Rank 0] step:6641/10000 train_time:281209ms step_avg:42.34ms +[2025-09-11 10:15:36] [Rank 0] step:6661/10000 train_time:281905ms step_avg:42.32ms +[2025-09-11 10:15:36] [Rank 0] step:6661/10000 train_time:281905ms step_avg:42.32ms +[2025-09-11 10:15:36] [Rank 0] step:6681/10000 train_time:282603ms step_avg:42.30ms +[2025-09-11 10:15:36] [Rank 0] step:6681/10000 train_time:282603ms step_avg:42.30ms +[2025-09-11 10:15:37] [Rank 0] step:6701/10000 train_time:283300ms step_avg:42.28ms +[2025-09-11 10:15:37] [Rank 0] step:6701/10000 train_time:283300ms step_avg:42.28ms +[2025-09-11 10:15:38] [Rank 0] step:6721/10000 train_time:283998ms step_avg:42.26ms +[2025-09-11 10:15:38] [Rank 0] step:6721/10000 train_time:283998ms step_avg:42.26ms +[2025-09-11 10:15:38] [Rank 0] step:6741/10000 train_time:284696ms step_avg:42.23ms +[2025-09-11 10:15:38] [Rank 0] step:6741/10000 train_time:284696ms step_avg:42.23ms +[2025-09-11 10:15:39] [Rank 0] step:6761/10000 train_time:285392ms step_avg:42.21ms +[2025-09-11 10:15:39] [Rank 0] step:6761/10000 train_time:285392ms step_avg:42.21ms +[2025-09-11 10:15:40] [Rank 0] step:6781/10000 train_time:286090ms step_avg:42.19ms +[2025-09-11 10:15:40] [Rank 0] step:6781/10000 train_time:286090ms step_avg:42.19ms +[2025-09-11 10:15:40] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:15:40] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 10:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:15:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:15:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:15:50] [Rank 0] PRINT: step:6800/10000 val_loss:4.2532 total_sharp:8.1867e-05 L1_sharp:1.9400e-03 L2_sharp:1.9912e-03 L3_sharp:4.6713e-03 L4_sharp:6.2274e-03 L5_sharp:1.1797e-02 L6_sharp:1.5673e-02 L7_sharp:2.0834e-02 L8_sharp:3.1997e-02 L9_sharp:3.7498e-02 L10_sharp:4.7901e-02 L11_sharp:7.2820e-02 L12_sharp:3.8496e-01 total_fnorm:6.1500e+01 total_l1_linf:1.1110e+05 total_spectral:3.0750e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8945e-01 L4_fnorm:1.8945e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8945e-01 L8_fnorm:1.8555e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8652e-01 L1_l1linf:4.3945e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2236e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2725e-02 L6_l1linf:4.2480e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.1992e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.1504e-02 L11_l1linf:3.9307e-02 L12_l1linf:4.1016e-02 L1_spectral:2.5950e-03 L2_spectral:2.6026e-03 L3_spectral:2.5921e-03 L4_spectral:2.5995e-03 L5_spectral:2.6017e-03 L6_spectral:2.6015e-03 L7_spectral:2.5932e-03 L8_spectral:2.5843e-03 L9_spectral:2.6047e-03 L10_spectral:2.6004e-03 L11_spectral:2.5993e-03 L12_spectral:2.5862e-03 train_time:286767ms step_avg:42.17ms +[2025-09-11 10:15:50] [Rank 0] PRINT: step:6800/10000 val_loss:4.2532 total_sharp:8.1867e-05 L1_sharp:1.9400e-03 L2_sharp:1.9912e-03 L3_sharp:4.6713e-03 L4_sharp:6.2274e-03 L5_sharp:1.1797e-02 L6_sharp:1.5673e-02 L7_sharp:2.0834e-02 L8_sharp:3.1997e-02 L9_sharp:3.7498e-02 L10_sharp:4.7901e-02 L11_sharp:7.2820e-02 L12_sharp:3.8496e-01 total_fnorm:6.1500e+01 total_l1_linf:1.1110e+05 total_spectral:3.0750e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8945e-01 L4_fnorm:1.8945e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8945e-01 L8_fnorm:1.8555e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8652e-01 L1_l1linf:4.3945e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2236e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2725e-02 L6_l1linf:4.2480e-02 L7_l1linf:4.2725e-02 L8_l1linf:4.1992e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.1504e-02 L11_l1linf:3.9307e-02 L12_l1linf:4.1016e-02 L1_spectral:2.5950e-03 L2_spectral:2.6026e-03 L3_spectral:2.5921e-03 L4_spectral:2.5995e-03 L5_spectral:2.6017e-03 L6_spectral:2.6015e-03 L7_spectral:2.5932e-03 L8_spectral:2.5843e-03 L9_spectral:2.6047e-03 L10_spectral:2.6004e-03 L11_spectral:2.5993e-03 L12_spectral:2.5862e-03 train_time:286767ms step_avg:42.17ms +[2025-09-11 10:15:52] [Rank 0] step:6801/10000 train_time:288155ms step_avg:42.37ms +[2025-09-11 10:15:52] [Rank 0] step:6801/10000 train_time:288155ms step_avg:42.37ms +[2025-09-11 10:15:53] [Rank 0] step:6821/10000 train_time:288877ms step_avg:42.35ms +[2025-09-11 10:15:53] [Rank 0] step:6821/10000 train_time:288877ms step_avg:42.35ms +[2025-09-11 10:15:53] [Rank 0] step:6841/10000 train_time:289578ms step_avg:42.33ms +[2025-09-11 10:15:53] [Rank 0] step:6841/10000 train_time:289578ms step_avg:42.33ms +[2025-09-11 10:15:54] [Rank 0] step:6861/10000 train_time:290276ms step_avg:42.31ms +[2025-09-11 10:15:54] [Rank 0] step:6861/10000 train_time:290276ms step_avg:42.31ms +[2025-09-11 10:15:55] [Rank 0] step:6881/10000 train_time:290977ms step_avg:42.29ms +[2025-09-11 10:15:55] [Rank 0] step:6881/10000 train_time:290977ms step_avg:42.29ms +[2025-09-11 10:15:55] [Rank 0] step:6901/10000 train_time:291675ms step_avg:42.27ms +[2025-09-11 10:15:55] [Rank 0] step:6901/10000 train_time:291675ms step_avg:42.27ms +[2025-09-11 10:15:56] [Rank 0] step:6921/10000 train_time:292374ms step_avg:42.24ms +[2025-09-11 10:15:56] [Rank 0] step:6921/10000 train_time:292374ms step_avg:42.24ms +[2025-09-11 10:15:57] [Rank 0] step:6941/10000 train_time:293073ms step_avg:42.22ms +[2025-09-11 10:15:57] [Rank 0] step:6941/10000 train_time:293073ms step_avg:42.22ms +[2025-09-11 10:15:58] [Rank 0] step:6961/10000 train_time:293772ms step_avg:42.20ms +[2025-09-11 10:15:58] [Rank 0] step:6961/10000 train_time:293772ms step_avg:42.20ms +[2025-09-11 10:15:58] [Rank 0] step:6981/10000 train_time:294473ms step_avg:42.18ms +[2025-09-11 10:15:58] [Rank 0] step:6981/10000 train_time:294473ms step_avg:42.18ms +[2025-09-11 10:15:59] [Rank 0] step:7001/10000 train_time:295172ms step_avg:42.16ms +[2025-09-11 10:15:59] [Rank 0] step:7001/10000 train_time:295172ms step_avg:42.16ms +[2025-09-11 10:16:00] [Rank 0] step:7021/10000 train_time:295870ms step_avg:42.14ms +[2025-09-11 10:16:00] [Rank 0] step:7021/10000 train_time:295870ms step_avg:42.14ms +[2025-09-11 10:16:00] [Rank 0] step:7041/10000 train_time:296567ms step_avg:42.12ms +[2025-09-11 10:16:00] [Rank 0] step:7041/10000 train_time:296567ms step_avg:42.12ms +[2025-09-11 10:16:01] [Rank 0] step:7061/10000 train_time:297266ms step_avg:42.10ms +[2025-09-11 10:16:01] [Rank 0] step:7061/10000 train_time:297266ms step_avg:42.10ms +[2025-09-11 10:16:02] [Rank 0] step:7081/10000 train_time:297964ms step_avg:42.08ms +[2025-09-11 10:16:02] [Rank 0] step:7081/10000 train_time:297964ms step_avg:42.08ms +[2025-09-11 10:16:02] [Rank 0] step:7101/10000 train_time:298664ms step_avg:42.06ms +[2025-09-11 10:16:02] [Rank 0] step:7101/10000 train_time:298664ms step_avg:42.06ms +[2025-09-11 10:16:03] [Rank 0] step:7121/10000 train_time:299365ms step_avg:42.04ms +[2025-09-11 10:16:03] [Rank 0] step:7121/10000 train_time:299365ms step_avg:42.04ms +[2025-09-11 10:16:04] [Rank 0] step:7141/10000 train_time:300063ms step_avg:42.02ms +[2025-09-11 10:16:04] [Rank 0] step:7141/10000 train_time:300063ms step_avg:42.02ms +[2025-09-11 10:16:04] [Rank 0] step:7161/10000 train_time:300763ms step_avg:42.00ms +[2025-09-11 10:16:04] [Rank 0] step:7161/10000 train_time:300763ms step_avg:42.00ms +[2025-09-11 10:16:05] [Rank 0] step:7181/10000 train_time:301460ms step_avg:41.98ms +[2025-09-11 10:16:05] [Rank 0] step:7181/10000 train_time:301460ms step_avg:41.98ms +[2025-09-11 10:16:06] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:16:06] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 10:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:16:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:16:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 10:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 10:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:17] [Rank 0] PRINT: step:7200/10000 val_loss:4.2228 total_sharp:7.5071e-05 L1_sharp:3.4931e-03 L2_sharp:1.0702e-03 L3_sharp:3.6964e-03 L4_sharp:7.2705e-03 L5_sharp:1.0905e-02 L6_sharp:1.8297e-02 L7_sharp:1.9736e-02 L8_sharp:3.6655e-02 L9_sharp:3.4260e-02 L10_sharp:4.4616e-02 L11_sharp:6.7009e-02 L12_sharp:3.3327e-01 total_fnorm:5.3500e+01 total_l1_linf:9.2672e+04 total_spectral:2.6750e+01 L1_fnorm:1.6602e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6309e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6406e-01 L8_fnorm:1.5918e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6309e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.5918e-01 L1_l1linf:3.7109e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5156e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3447e-02 L1_spectral:2.3063e-03 L2_spectral:2.3036e-03 L3_spectral:2.3000e-03 L4_spectral:2.3211e-03 L5_spectral:2.3074e-03 L6_spectral:2.3078e-03 L7_spectral:2.3122e-03 L8_spectral:2.2816e-03 L9_spectral:2.3015e-03 L10_spectral:2.2925e-03 L11_spectral:2.2720e-03 L12_spectral:2.2869e-03 train_time:302139ms step_avg:41.96ms +[2025-09-11 10:16:17] [Rank 0] PRINT: step:7200/10000 val_loss:4.2228 total_sharp:7.5071e-05 L1_sharp:3.4931e-03 L2_sharp:1.0702e-03 L3_sharp:3.6964e-03 L4_sharp:7.2705e-03 L5_sharp:1.0905e-02 L6_sharp:1.8297e-02 L7_sharp:1.9736e-02 L8_sharp:3.6655e-02 L9_sharp:3.4260e-02 L10_sharp:4.4616e-02 L11_sharp:6.7009e-02 L12_sharp:3.3327e-01 total_fnorm:5.3500e+01 total_l1_linf:9.2672e+04 total_spectral:2.6750e+01 L1_fnorm:1.6602e-01 L2_fnorm:1.6504e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6309e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6406e-01 L7_fnorm:1.6406e-01 L8_fnorm:1.5918e-01 L9_fnorm:1.6309e-01 L10_fnorm:1.6309e-01 L11_fnorm:1.6113e-01 L12_fnorm:1.5918e-01 L1_l1linf:3.7109e-02 L2_l1linf:3.6621e-02 L3_l1linf:3.5400e-02 L4_l1linf:3.5645e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5156e-02 L7_l1linf:3.4668e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.4180e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3447e-02 L1_spectral:2.3063e-03 L2_spectral:2.3036e-03 L3_spectral:2.3000e-03 L4_spectral:2.3211e-03 L5_spectral:2.3074e-03 L6_spectral:2.3078e-03 L7_spectral:2.3122e-03 L8_spectral:2.2816e-03 L9_spectral:2.3015e-03 L10_spectral:2.2925e-03 L11_spectral:2.2720e-03 L12_spectral:2.2869e-03 train_time:302139ms step_avg:41.96ms +[2025-09-11 10:16:18] [Rank 0] step:7201/10000 train_time:303542ms step_avg:42.15ms +[2025-09-11 10:16:18] [Rank 0] step:7201/10000 train_time:303542ms step_avg:42.15ms +[2025-09-11 10:16:19] [Rank 0] step:7221/10000 train_time:304259ms step_avg:42.14ms +[2025-09-11 10:16:19] [Rank 0] step:7221/10000 train_time:304259ms step_avg:42.14ms +[2025-09-11 10:16:20] [Rank 0] step:7241/10000 train_time:304958ms step_avg:42.12ms +[2025-09-11 10:16:20] [Rank 0] step:7241/10000 train_time:304958ms step_avg:42.12ms +[2025-09-11 10:16:20] [Rank 0] step:7261/10000 train_time:305660ms step_avg:42.10ms +[2025-09-11 10:16:20] [Rank 0] step:7261/10000 train_time:305660ms step_avg:42.10ms +[2025-09-11 10:16:21] [Rank 0] step:7281/10000 train_time:306364ms step_avg:42.08ms +[2025-09-11 10:16:21] [Rank 0] step:7281/10000 train_time:306364ms step_avg:42.08ms +[2025-09-11 10:16:22] [Rank 0] step:7301/10000 train_time:307061ms step_avg:42.06ms +[2025-09-11 10:16:22] [Rank 0] step:7301/10000 train_time:307061ms step_avg:42.06ms +[2025-09-11 10:16:22] [Rank 0] step:7321/10000 train_time:307760ms step_avg:42.04ms +[2025-09-11 10:16:22] [Rank 0] step:7321/10000 train_time:307760ms step_avg:42.04ms +[2025-09-11 10:16:23] [Rank 0] step:7341/10000 train_time:308460ms step_avg:42.02ms +[2025-09-11 10:16:23] [Rank 0] step:7341/10000 train_time:308460ms step_avg:42.02ms +[2025-09-11 10:16:24] [Rank 0] step:7361/10000 train_time:309159ms step_avg:42.00ms +[2025-09-11 10:16:24] [Rank 0] step:7361/10000 train_time:309159ms step_avg:42.00ms +[2025-09-11 10:16:24] [Rank 0] step:7381/10000 train_time:309860ms step_avg:41.98ms +[2025-09-11 10:16:24] [Rank 0] step:7381/10000 train_time:309860ms step_avg:41.98ms +[2025-09-11 10:16:25] [Rank 0] step:7401/10000 train_time:310558ms step_avg:41.96ms +[2025-09-11 10:16:25] [Rank 0] step:7401/10000 train_time:310558ms step_avg:41.96ms +[2025-09-11 10:16:26] [Rank 0] step:7421/10000 train_time:311257ms step_avg:41.94ms +[2025-09-11 10:16:26] [Rank 0] step:7421/10000 train_time:311257ms step_avg:41.94ms +[2025-09-11 10:16:27] [Rank 0] step:7441/10000 train_time:311957ms step_avg:41.92ms +[2025-09-11 10:16:27] [Rank 0] step:7441/10000 train_time:311957ms step_avg:41.92ms +[2025-09-11 10:16:27] [Rank 0] step:7461/10000 train_time:312659ms step_avg:41.91ms +[2025-09-11 10:16:27] [Rank 0] step:7461/10000 train_time:312659ms step_avg:41.91ms +[2025-09-11 10:16:28] [Rank 0] step:7481/10000 train_time:313361ms step_avg:41.89ms +[2025-09-11 10:16:28] [Rank 0] step:7481/10000 train_time:313361ms step_avg:41.89ms +[2025-09-11 10:16:29] [Rank 0] step:7501/10000 train_time:314060ms step_avg:41.87ms +[2025-09-11 10:16:29] [Rank 0] step:7501/10000 train_time:314060ms step_avg:41.87ms +[2025-09-11 10:16:29] [Rank 0] step:7521/10000 train_time:314760ms step_avg:41.85ms +[2025-09-11 10:16:29] [Rank 0] step:7521/10000 train_time:314760ms step_avg:41.85ms +[2025-09-11 10:16:30] [Rank 0] step:7541/10000 train_time:315457ms step_avg:41.83ms +[2025-09-11 10:16:30] [Rank 0] step:7541/10000 train_time:315457ms step_avg:41.83ms +[2025-09-11 10:16:31] [Rank 0] step:7561/10000 train_time:316160ms step_avg:41.81ms +[2025-09-11 10:16:31] [Rank 0] step:7561/10000 train_time:316160ms step_avg:41.81ms +[2025-09-11 10:16:31] [Rank 0] step:7581/10000 train_time:316860ms step_avg:41.80ms +[2025-09-11 10:16:31] [Rank 0] step:7581/10000 train_time:316860ms step_avg:41.80ms +[2025-09-11 10:16:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:16:32] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 10:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:16:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 10:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 10:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 10:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:16:42] [Rank 0] PRINT: step:7600/10000 val_loss:4.1957 total_sharp:6.6850e-05 L1_sharp:2.9812e-03 L2_sharp:2.6330e-03 L3_sharp:3.5286e-03 L4_sharp:6.8665e-03 L5_sharp:8.4091e-03 L6_sharp:1.6064e-02 L7_sharp:1.6949e-02 L8_sharp:3.1137e-02 L9_sharp:3.3671e-02 L10_sharp:4.3385e-02 L11_sharp:6.5201e-02 L12_sharp:3.0330e-01 total_fnorm:4.3750e+01 total_l1_linf:7.1168e+04 total_spectral:2.1875e+01 L1_fnorm:1.3770e-01 L2_fnorm:1.3770e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3770e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.7710e-02 L4_l1linf:2.8198e-02 L5_l1linf:2.7710e-02 L6_l1linf:2.7588e-02 L7_l1linf:2.7344e-02 L8_l1linf:2.8076e-02 L9_l1linf:2.6733e-02 L10_l1linf:2.6489e-02 L11_l1linf:2.5879e-02 L12_l1linf:2.6489e-02 L1_spectral:1.9900e-03 L2_spectral:2.0332e-03 L3_spectral:2.0153e-03 L4_spectral:2.0120e-03 L5_spectral:2.0112e-03 L6_spectral:2.0090e-03 L7_spectral:2.0141e-03 L8_spectral:1.9441e-03 L9_spectral:1.9758e-03 L10_spectral:1.9760e-03 L11_spectral:1.9666e-03 L12_spectral:1.9831e-03 train_time:317542ms step_avg:41.78ms +[2025-09-11 10:16:42] [Rank 0] PRINT: step:7600/10000 val_loss:4.1957 total_sharp:6.6850e-05 L1_sharp:2.9812e-03 L2_sharp:2.6330e-03 L3_sharp:3.5286e-03 L4_sharp:6.8665e-03 L5_sharp:8.4091e-03 L6_sharp:1.6064e-02 L7_sharp:1.6949e-02 L8_sharp:3.1137e-02 L9_sharp:3.3671e-02 L10_sharp:4.3385e-02 L11_sharp:6.5201e-02 L12_sharp:3.0330e-01 total_fnorm:4.3750e+01 total_l1_linf:7.1168e+04 total_spectral:2.1875e+01 L1_fnorm:1.3770e-01 L2_fnorm:1.3770e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3770e-01 L5_fnorm:1.3672e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3379e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.9175e-02 L2_l1linf:2.8931e-02 L3_l1linf:2.7710e-02 L4_l1linf:2.8198e-02 L5_l1linf:2.7710e-02 L6_l1linf:2.7588e-02 L7_l1linf:2.7344e-02 L8_l1linf:2.8076e-02 L9_l1linf:2.6733e-02 L10_l1linf:2.6489e-02 L11_l1linf:2.5879e-02 L12_l1linf:2.6489e-02 L1_spectral:1.9900e-03 L2_spectral:2.0332e-03 L3_spectral:2.0153e-03 L4_spectral:2.0120e-03 L5_spectral:2.0112e-03 L6_spectral:2.0090e-03 L7_spectral:2.0141e-03 L8_spectral:1.9441e-03 L9_spectral:1.9758e-03 L10_spectral:1.9760e-03 L11_spectral:1.9666e-03 L12_spectral:1.9831e-03 train_time:317542ms step_avg:41.78ms +[2025-09-11 10:16:44] [Rank 0] step:7601/10000 train_time:318955ms step_avg:41.96ms +[2025-09-11 10:16:44] [Rank 0] step:7601/10000 train_time:318955ms step_avg:41.96ms +[2025-09-11 10:16:44] [Rank 0] step:7621/10000 train_time:319691ms step_avg:41.95ms +[2025-09-11 10:16:44] [Rank 0] step:7621/10000 train_time:319691ms step_avg:41.95ms +[2025-09-11 10:16:45] [Rank 0] step:7641/10000 train_time:320393ms step_avg:41.93ms +[2025-09-11 10:16:45] [Rank 0] step:7641/10000 train_time:320393ms step_avg:41.93ms +[2025-09-11 10:16:46] [Rank 0] step:7661/10000 train_time:321092ms step_avg:41.91ms +[2025-09-11 10:16:46] [Rank 0] step:7661/10000 train_time:321092ms step_avg:41.91ms +[2025-09-11 10:16:47] [Rank 0] step:7681/10000 train_time:321793ms step_avg:41.89ms +[2025-09-11 10:16:47] [Rank 0] step:7681/10000 train_time:321793ms step_avg:41.89ms +[2025-09-11 10:16:47] [Rank 0] step:7701/10000 train_time:322495ms step_avg:41.88ms +[2025-09-11 10:16:47] [Rank 0] step:7701/10000 train_time:322495ms step_avg:41.88ms +[2025-09-11 10:16:48] [Rank 0] step:7721/10000 train_time:323196ms step_avg:41.86ms +[2025-09-11 10:16:48] [Rank 0] step:7721/10000 train_time:323196ms step_avg:41.86ms +[2025-09-11 10:16:49] [Rank 0] step:7741/10000 train_time:323898ms step_avg:41.84ms +[2025-09-11 10:16:49] [Rank 0] step:7741/10000 train_time:323898ms step_avg:41.84ms +[2025-09-11 10:16:49] [Rank 0] step:7761/10000 train_time:324597ms step_avg:41.82ms +[2025-09-11 10:16:49] [Rank 0] step:7761/10000 train_time:324597ms step_avg:41.82ms +[2025-09-11 10:16:50] [Rank 0] step:7781/10000 train_time:325299ms step_avg:41.81ms +[2025-09-11 10:16:50] [Rank 0] step:7781/10000 train_time:325299ms step_avg:41.81ms +[2025-09-11 10:16:51] [Rank 0] step:7801/10000 train_time:325998ms step_avg:41.79ms +[2025-09-11 10:16:51] [Rank 0] step:7801/10000 train_time:325998ms step_avg:41.79ms +[2025-09-11 10:16:51] [Rank 0] step:7821/10000 train_time:326698ms step_avg:41.77ms +[2025-09-11 10:16:51] [Rank 0] step:7821/10000 train_time:326698ms step_avg:41.77ms +[2025-09-11 10:16:52] [Rank 0] step:7841/10000 train_time:327399ms step_avg:41.75ms +[2025-09-11 10:16:52] [Rank 0] step:7841/10000 train_time:327399ms step_avg:41.75ms +[2025-09-11 10:16:53] [Rank 0] step:7861/10000 train_time:328102ms step_avg:41.74ms +[2025-09-11 10:16:53] [Rank 0] step:7861/10000 train_time:328102ms step_avg:41.74ms +[2025-09-11 10:16:54] [Rank 0] step:7881/10000 train_time:328803ms step_avg:41.72ms +[2025-09-11 10:16:54] [Rank 0] step:7881/10000 train_time:328803ms step_avg:41.72ms +[2025-09-11 10:16:54] [Rank 0] step:7901/10000 train_time:329504ms step_avg:41.70ms +[2025-09-11 10:16:54] [Rank 0] step:7901/10000 train_time:329504ms step_avg:41.70ms +[2025-09-11 10:16:55] [Rank 0] step:7921/10000 train_time:330206ms step_avg:41.69ms +[2025-09-11 10:16:55] [Rank 0] step:7921/10000 train_time:330206ms step_avg:41.69ms +[2025-09-11 10:16:56] [Rank 0] step:7941/10000 train_time:330907ms step_avg:41.67ms +[2025-09-11 10:16:56] [Rank 0] step:7941/10000 train_time:330907ms step_avg:41.67ms +[2025-09-11 10:16:56] [Rank 0] step:7961/10000 train_time:331605ms step_avg:41.65ms +[2025-09-11 10:16:56] [Rank 0] step:7961/10000 train_time:331605ms step_avg:41.65ms +[2025-09-11 10:16:57] [Rank 0] step:7981/10000 train_time:332309ms step_avg:41.64ms +[2025-09-11 10:16:57] [Rank 0] step:7981/10000 train_time:332309ms step_avg:41.64ms +[2025-09-11 10:16:58] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:16:58] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 10:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:16:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 10:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 10:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 10:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:08] [Rank 0] PRINT: step:8000/10000 val_loss:4.1746 total_sharp:6.1559e-05 L1_sharp:3.0325e-03 L2_sharp:3.1498e-03 L3_sharp:5.4602e-03 L4_sharp:7.0386e-03 L5_sharp:1.3269e-02 L6_sharp:1.6842e-02 L7_sharp:2.0242e-02 L8_sharp:2.6620e-02 L9_sharp:3.1432e-02 L10_sharp:4.3911e-02 L11_sharp:6.4636e-02 L12_sharp:3.1814e-01 total_fnorm:3.7000e+01 total_l1_linf:5.6576e+04 total_spectral:1.8500e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1230e-01 L3_fnorm:1.1182e-01 L4_fnorm:1.1182e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1133e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.0791e-01 L9_fnorm:1.1035e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0889e-01 L12_fnorm:1.0742e-01 L1_l1linf:2.1973e-02 L2_l1linf:2.2217e-02 L3_l1linf:2.1240e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0874e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0874e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9653e-02 L11_l1linf:1.8921e-02 L12_l1linf:2.0020e-02 L1_spectral:1.6851e-03 L2_spectral:1.6952e-03 L3_spectral:1.6862e-03 L4_spectral:1.6843e-03 L5_spectral:1.6750e-03 L6_spectral:1.6674e-03 L7_spectral:1.6764e-03 L8_spectral:1.6347e-03 L9_spectral:1.6455e-03 L10_spectral:1.6598e-03 L11_spectral:1.6366e-03 L12_spectral:1.6535e-03 train_time:332987ms step_avg:41.62ms +[2025-09-11 10:17:08] [Rank 0] PRINT: step:8000/10000 val_loss:4.1746 total_sharp:6.1559e-05 L1_sharp:3.0325e-03 L2_sharp:3.1498e-03 L3_sharp:5.4602e-03 L4_sharp:7.0386e-03 L5_sharp:1.3269e-02 L6_sharp:1.6842e-02 L7_sharp:2.0242e-02 L8_sharp:2.6620e-02 L9_sharp:3.1432e-02 L10_sharp:4.3911e-02 L11_sharp:6.4636e-02 L12_sharp:3.1814e-01 total_fnorm:3.7000e+01 total_l1_linf:5.6576e+04 total_spectral:1.8500e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1230e-01 L3_fnorm:1.1182e-01 L4_fnorm:1.1182e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1133e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.0791e-01 L9_fnorm:1.1035e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0889e-01 L12_fnorm:1.0742e-01 L1_l1linf:2.1973e-02 L2_l1linf:2.2217e-02 L3_l1linf:2.1240e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0874e-02 L6_l1linf:2.0508e-02 L7_l1linf:2.0874e-02 L8_l1linf:2.0264e-02 L9_l1linf:2.0020e-02 L10_l1linf:1.9653e-02 L11_l1linf:1.8921e-02 L12_l1linf:2.0020e-02 L1_spectral:1.6851e-03 L2_spectral:1.6952e-03 L3_spectral:1.6862e-03 L4_spectral:1.6843e-03 L5_spectral:1.6750e-03 L6_spectral:1.6674e-03 L7_spectral:1.6764e-03 L8_spectral:1.6347e-03 L9_spectral:1.6455e-03 L10_spectral:1.6598e-03 L11_spectral:1.6366e-03 L12_spectral:1.6535e-03 train_time:332987ms step_avg:41.62ms +[2025-09-11 10:17:10] [Rank 0] step:8001/10000 train_time:334385ms step_avg:41.79ms +[2025-09-11 10:17:10] [Rank 0] step:8001/10000 train_time:334385ms step_avg:41.79ms +[2025-09-11 10:17:10] [Rank 0] step:8021/10000 train_time:335296ms step_avg:41.80ms +[2025-09-11 10:17:10] [Rank 0] step:8021/10000 train_time:335296ms step_avg:41.80ms +[2025-09-11 10:17:11] [Rank 0] step:8041/10000 train_time:335998ms step_avg:41.79ms +[2025-09-11 10:17:11] [Rank 0] step:8041/10000 train_time:335998ms step_avg:41.79ms +[2025-09-11 10:17:12] [Rank 0] step:8061/10000 train_time:336700ms step_avg:41.77ms +[2025-09-11 10:17:12] [Rank 0] step:8061/10000 train_time:336700ms step_avg:41.77ms +[2025-09-11 10:17:12] [Rank 0] step:8081/10000 train_time:337400ms step_avg:41.75ms +[2025-09-11 10:17:12] [Rank 0] step:8081/10000 train_time:337400ms step_avg:41.75ms +[2025-09-11 10:17:13] [Rank 0] step:8101/10000 train_time:338100ms step_avg:41.74ms +[2025-09-11 10:17:13] [Rank 0] step:8101/10000 train_time:338100ms step_avg:41.74ms +[2025-09-11 10:17:14] [Rank 0] step:8121/10000 train_time:338805ms step_avg:41.72ms +[2025-09-11 10:17:14] [Rank 0] step:8121/10000 train_time:338805ms step_avg:41.72ms +[2025-09-11 10:17:16] [Rank 0] step:8141/10000 train_time:340551ms step_avg:41.83ms +[2025-09-11 10:17:16] [Rank 0] step:8141/10000 train_time:340551ms step_avg:41.83ms +[2025-09-11 10:17:16] [Rank 0] step:8161/10000 train_time:341256ms step_avg:41.82ms +[2025-09-11 10:17:16] [Rank 0] step:8161/10000 train_time:341256ms step_avg:41.82ms +[2025-09-11 10:17:17] [Rank 0] step:8181/10000 train_time:342233ms step_avg:41.83ms +[2025-09-11 10:17:17] [Rank 0] step:8181/10000 train_time:342233ms step_avg:41.83ms +[2025-09-11 10:17:18] [Rank 0] step:8201/10000 train_time:342942ms step_avg:41.82ms +[2025-09-11 10:17:18] [Rank 0] step:8201/10000 train_time:342942ms step_avg:41.82ms +[2025-09-11 10:17:19] [Rank 0] step:8221/10000 train_time:343650ms step_avg:41.80ms +[2025-09-11 10:17:19] [Rank 0] step:8221/10000 train_time:343650ms step_avg:41.80ms +[2025-09-11 10:17:19] [Rank 0] step:8241/10000 train_time:344366ms step_avg:41.79ms +[2025-09-11 10:17:19] [Rank 0] step:8241/10000 train_time:344366ms step_avg:41.79ms +[2025-09-11 10:17:20] [Rank 0] step:8261/10000 train_time:345073ms step_avg:41.77ms +[2025-09-11 10:17:20] [Rank 0] step:8261/10000 train_time:345073ms step_avg:41.77ms +[2025-09-11 10:17:21] [Rank 0] step:8281/10000 train_time:345777ms step_avg:41.76ms +[2025-09-11 10:17:21] [Rank 0] step:8281/10000 train_time:345777ms step_avg:41.76ms +[2025-09-11 10:17:21] [Rank 0] step:8301/10000 train_time:346486ms step_avg:41.74ms +[2025-09-11 10:17:21] [Rank 0] step:8301/10000 train_time:346486ms step_avg:41.74ms +[2025-09-11 10:17:22] [Rank 0] step:8321/10000 train_time:347193ms step_avg:41.72ms +[2025-09-11 10:17:22] [Rank 0] step:8321/10000 train_time:347193ms step_avg:41.72ms +[2025-09-11 10:17:23] [Rank 0] step:8341/10000 train_time:347907ms step_avg:41.71ms +[2025-09-11 10:17:23] [Rank 0] step:8341/10000 train_time:347907ms step_avg:41.71ms +[2025-09-11 10:17:24] [Rank 0] step:8361/10000 train_time:348610ms step_avg:41.69ms +[2025-09-11 10:17:24] [Rank 0] step:8361/10000 train_time:348610ms step_avg:41.69ms +[2025-09-11 10:17:24] [Rank 0] step:8381/10000 train_time:349320ms step_avg:41.68ms +[2025-09-11 10:17:24] [Rank 0] step:8381/10000 train_time:349320ms step_avg:41.68ms +[2025-09-11 10:17:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:17:25] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 10:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:17:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:17:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 10:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:17:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 10:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 10:17:35] [Rank 0] PRINT: step:8400/10000 val_loss:4.1564 total_sharp:5.0568e-05 L1_sharp:3.6742e-03 L2_sharp:3.5026e-03 L3_sharp:4.7660e-03 L4_sharp:7.7832e-03 L5_sharp:8.7507e-03 L6_sharp:1.5841e-02 L7_sharp:1.6201e-02 L8_sharp:2.3033e-02 L9_sharp:2.5311e-02 L10_sharp:3.3153e-02 L11_sharp:4.9373e-02 L12_sharp:3.5783e-01 total_fnorm:2.9000e+01 total_l1_linf:4.0448e+04 total_spectral:1.4500e+01 L1_fnorm:8.8379e-02 L2_fnorm:8.7891e-02 L3_fnorm:8.6914e-02 L4_fnorm:8.6914e-02 L5_fnorm:8.6914e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.4473e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.6426e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.5747e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.4954e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4648e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.3916e-02 L11_l1linf:1.4160e-02 L12_l1linf:1.3977e-02 L1_spectral:1.3650e-03 L2_spectral:1.3472e-03 L3_spectral:1.3603e-03 L4_spectral:1.3520e-03 L5_spectral:1.3402e-03 L6_spectral:1.3446e-03 L7_spectral:1.3564e-03 L8_spectral:1.3206e-03 L9_spectral:1.3204e-03 L10_spectral:1.3163e-03 L11_spectral:1.3080e-03 L12_spectral:1.3364e-03 train_time:350011ms step_avg:41.67ms +[2025-09-11 10:17:35] [Rank 0] PRINT: step:8400/10000 val_loss:4.1564 total_sharp:5.0568e-05 L1_sharp:3.6742e-03 L2_sharp:3.5026e-03 L3_sharp:4.7660e-03 L4_sharp:7.7832e-03 L5_sharp:8.7507e-03 L6_sharp:1.5841e-02 L7_sharp:1.6201e-02 L8_sharp:2.3033e-02 L9_sharp:2.5311e-02 L10_sharp:3.3153e-02 L11_sharp:4.9373e-02 L12_sharp:3.5783e-01 total_fnorm:2.9000e+01 total_l1_linf:4.0448e+04 total_spectral:1.4500e+01 L1_fnorm:8.8379e-02 L2_fnorm:8.7891e-02 L3_fnorm:8.6914e-02 L4_fnorm:8.6914e-02 L5_fnorm:8.6914e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.4473e-02 L9_fnorm:8.6426e-02 L10_fnorm:8.6426e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.5747e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.4954e-02 L6_l1linf:1.4587e-02 L7_l1linf:1.4648e-02 L8_l1linf:1.4709e-02 L9_l1linf:1.4709e-02 L10_l1linf:1.3916e-02 L11_l1linf:1.4160e-02 L12_l1linf:1.3977e-02 L1_spectral:1.3650e-03 L2_spectral:1.3472e-03 L3_spectral:1.3603e-03 L4_spectral:1.3520e-03 L5_spectral:1.3402e-03 L6_spectral:1.3446e-03 L7_spectral:1.3564e-03 L8_spectral:1.3206e-03 L9_spectral:1.3204e-03 L10_spectral:1.3163e-03 L11_spectral:1.3080e-03 L12_spectral:1.3364e-03 train_time:350011ms step_avg:41.67ms +[2025-09-11 10:17:37] [Rank 0] step:8401/10000 train_time:351422ms step_avg:41.83ms +[2025-09-11 10:17:37] [Rank 0] step:8401/10000 train_time:351422ms step_avg:41.83ms +[2025-09-11 10:17:37] [Rank 0] step:8421/10000 train_time:352171ms step_avg:41.82ms +[2025-09-11 10:17:37] [Rank 0] step:8421/10000 train_time:352171ms step_avg:41.82ms +[2025-09-11 10:17:38] [Rank 0] step:8441/10000 train_time:352880ms step_avg:41.81ms +[2025-09-11 10:17:38] [Rank 0] step:8441/10000 train_time:352880ms step_avg:41.81ms +[2025-09-11 10:17:39] [Rank 0] step:8461/10000 train_time:353589ms step_avg:41.79ms +[2025-09-11 10:17:39] [Rank 0] step:8461/10000 train_time:353589ms step_avg:41.79ms +[2025-09-11 10:17:39] [Rank 0] step:8481/10000 train_time:354298ms step_avg:41.78ms +[2025-09-11 10:17:39] [Rank 0] step:8481/10000 train_time:354298ms step_avg:41.78ms +[2025-09-11 10:17:40] [Rank 0] step:8501/10000 train_time:355004ms step_avg:41.76ms +[2025-09-11 10:17:40] [Rank 0] step:8501/10000 train_time:355004ms step_avg:41.76ms +[2025-09-11 10:17:41] [Rank 0] step:8521/10000 train_time:355711ms step_avg:41.75ms +[2025-09-11 10:17:41] [Rank 0] step:8521/10000 train_time:355711ms step_avg:41.75ms +[2025-09-11 10:17:42] [Rank 0] step:8541/10000 train_time:356419ms step_avg:41.73ms +[2025-09-11 10:17:42] [Rank 0] step:8541/10000 train_time:356419ms step_avg:41.73ms +[2025-09-11 10:17:42] [Rank 0] step:8561/10000 train_time:357132ms step_avg:41.72ms +[2025-09-11 10:17:42] [Rank 0] step:8561/10000 train_time:357132ms step_avg:41.72ms +[2025-09-11 10:17:43] [Rank 0] step:8581/10000 train_time:357844ms step_avg:41.70ms +[2025-09-11 10:17:43] [Rank 0] step:8581/10000 train_time:357844ms step_avg:41.70ms +[2025-09-11 10:17:44] [Rank 0] step:8601/10000 train_time:358552ms step_avg:41.69ms +[2025-09-11 10:17:44] [Rank 0] step:8601/10000 train_time:358552ms step_avg:41.69ms +[2025-09-11 10:17:44] [Rank 0] step:8621/10000 train_time:359259ms step_avg:41.67ms +[2025-09-11 10:17:44] [Rank 0] step:8621/10000 train_time:359259ms step_avg:41.67ms +[2025-09-11 10:17:45] [Rank 0] step:8641/10000 train_time:359966ms step_avg:41.66ms +[2025-09-11 10:17:45] [Rank 0] step:8641/10000 train_time:359966ms step_avg:41.66ms +[2025-09-11 10:17:46] [Rank 0] step:8661/10000 train_time:360674ms step_avg:41.64ms +[2025-09-11 10:17:46] [Rank 0] step:8661/10000 train_time:360674ms step_avg:41.64ms +[2025-09-11 10:17:47] [Rank 0] step:8681/10000 train_time:361382ms step_avg:41.63ms +[2025-09-11 10:17:47] [Rank 0] step:8681/10000 train_time:361382ms step_avg:41.63ms +[2025-09-11 10:17:47] [Rank 0] step:8701/10000 train_time:362089ms step_avg:41.61ms +[2025-09-11 10:17:47] [Rank 0] step:8701/10000 train_time:362089ms step_avg:41.61ms +[2025-09-11 10:17:48] [Rank 0] step:8721/10000 train_time:362798ms step_avg:41.60ms +[2025-09-11 10:17:48] [Rank 0] step:8721/10000 train_time:362798ms step_avg:41.60ms +[2025-09-11 10:17:49] [Rank 0] step:8741/10000 train_time:363503ms step_avg:41.59ms +[2025-09-11 10:17:49] [Rank 0] step:8741/10000 train_time:363503ms step_avg:41.59ms +[2025-09-11 10:17:49] [Rank 0] step:8761/10000 train_time:364213ms step_avg:41.57ms +[2025-09-11 10:17:49] [Rank 0] step:8761/10000 train_time:364213ms step_avg:41.57ms +[2025-09-11 10:17:50] [Rank 0] step:8781/10000 train_time:364918ms step_avg:41.56ms +[2025-09-11 10:17:50] [Rank 0] step:8781/10000 train_time:364918ms step_avg:41.56ms +[2025-09-11 10:17:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:17:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 10:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:17:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 10:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:17:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:17:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:17:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 10:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:01] [Rank 0] PRINT: step:8800/10000 val_loss:4.1518 total_sharp:4.6285e-05 L1_sharp:2.4571e-03 L2_sharp:2.8542e-03 L3_sharp:3.3492e-03 L4_sharp:4.5173e-03 L5_sharp:7.3748e-03 L6_sharp:1.3800e-02 L7_sharp:1.5965e-02 L8_sharp:2.4415e-02 L9_sharp:2.3474e-02 L10_sharp:3.1889e-02 L11_sharp:5.1102e-02 L12_sharp:2.5067e-01 total_fnorm:2.1375e+01 total_l1_linf:2.6496e+04 total_spectral:1.0688e+01 L1_fnorm:6.3477e-02 L2_fnorm:6.3477e-02 L3_fnorm:6.2500e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2500e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.0791e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.0791e-02 L12_fnorm:5.9814e-02 L1_l1linf:1.0559e-02 L2_l1linf:1.0437e-02 L3_l1linf:9.7046e-03 L4_l1linf:9.9487e-03 L5_l1linf:9.8267e-03 L6_l1linf:9.8267e-03 L7_l1linf:9.8267e-03 L8_l1linf:1.0071e-02 L9_l1linf:9.3384e-03 L10_l1linf:8.9722e-03 L11_l1linf:8.7891e-03 L12_l1linf:9.2163e-03 L1_spectral:1.0013e-03 L2_spectral:1.0012e-03 L3_spectral:1.0059e-03 L4_spectral:1.0066e-03 L5_spectral:1.0739e-03 L6_spectral:9.9305e-04 L7_spectral:9.9358e-04 L8_spectral:9.7889e-04 L9_spectral:9.7415e-04 L10_spectral:9.6929e-04 L11_spectral:9.5938e-04 L12_spectral:9.8383e-04 train_time:365604ms step_avg:41.55ms +[2025-09-11 10:18:01] [Rank 0] PRINT: step:8800/10000 val_loss:4.1518 total_sharp:4.6285e-05 L1_sharp:2.4571e-03 L2_sharp:2.8542e-03 L3_sharp:3.3492e-03 L4_sharp:4.5173e-03 L5_sharp:7.3748e-03 L6_sharp:1.3800e-02 L7_sharp:1.5965e-02 L8_sharp:2.4415e-02 L9_sharp:2.3474e-02 L10_sharp:3.1889e-02 L11_sharp:5.1102e-02 L12_sharp:2.5067e-01 total_fnorm:2.1375e+01 total_l1_linf:2.6496e+04 total_spectral:1.0688e+01 L1_fnorm:6.3477e-02 L2_fnorm:6.3477e-02 L3_fnorm:6.2500e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2500e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2988e-02 L8_fnorm:6.0791e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.2012e-02 L11_fnorm:6.0791e-02 L12_fnorm:5.9814e-02 L1_l1linf:1.0559e-02 L2_l1linf:1.0437e-02 L3_l1linf:9.7046e-03 L4_l1linf:9.9487e-03 L5_l1linf:9.8267e-03 L6_l1linf:9.8267e-03 L7_l1linf:9.8267e-03 L8_l1linf:1.0071e-02 L9_l1linf:9.3384e-03 L10_l1linf:8.9722e-03 L11_l1linf:8.7891e-03 L12_l1linf:9.2163e-03 L1_spectral:1.0013e-03 L2_spectral:1.0012e-03 L3_spectral:1.0059e-03 L4_spectral:1.0066e-03 L5_spectral:1.0739e-03 L6_spectral:9.9305e-04 L7_spectral:9.9358e-04 L8_spectral:9.7889e-04 L9_spectral:9.7415e-04 L10_spectral:9.6929e-04 L11_spectral:9.5938e-04 L12_spectral:9.8383e-04 train_time:365604ms step_avg:41.55ms +[2025-09-11 10:18:02] [Rank 0] step:8801/10000 train_time:367016ms step_avg:41.70ms +[2025-09-11 10:18:02] [Rank 0] step:8801/10000 train_time:367016ms step_avg:41.70ms +[2025-09-11 10:18:03] [Rank 0] step:8821/10000 train_time:367746ms step_avg:41.69ms +[2025-09-11 10:18:03] [Rank 0] step:8821/10000 train_time:367746ms step_avg:41.69ms +[2025-09-11 10:18:04] [Rank 0] step:8841/10000 train_time:368455ms step_avg:41.68ms +[2025-09-11 10:18:04] [Rank 0] step:8841/10000 train_time:368455ms step_avg:41.68ms +[2025-09-11 10:18:05] [Rank 0] step:8861/10000 train_time:369164ms step_avg:41.66ms +[2025-09-11 10:18:05] [Rank 0] step:8861/10000 train_time:369164ms step_avg:41.66ms +[2025-09-11 10:18:05] [Rank 0] step:8881/10000 train_time:369872ms step_avg:41.65ms +[2025-09-11 10:18:05] [Rank 0] step:8881/10000 train_time:369872ms step_avg:41.65ms +[2025-09-11 10:18:06] [Rank 0] step:8901/10000 train_time:370584ms step_avg:41.63ms +[2025-09-11 10:18:06] [Rank 0] step:8901/10000 train_time:370584ms step_avg:41.63ms +[2025-09-11 10:18:07] [Rank 0] step:8921/10000 train_time:371288ms step_avg:41.62ms +[2025-09-11 10:18:07] [Rank 0] step:8921/10000 train_time:371288ms step_avg:41.62ms +[2025-09-11 10:18:07] [Rank 0] step:8941/10000 train_time:372000ms step_avg:41.61ms +[2025-09-11 10:18:07] [Rank 0] step:8941/10000 train_time:372000ms step_avg:41.61ms +[2025-09-11 10:18:08] [Rank 0] step:8961/10000 train_time:372717ms step_avg:41.59ms +[2025-09-11 10:18:08] [Rank 0] step:8961/10000 train_time:372717ms step_avg:41.59ms +[2025-09-11 10:18:09] [Rank 0] step:8981/10000 train_time:373429ms step_avg:41.58ms +[2025-09-11 10:18:09] [Rank 0] step:8981/10000 train_time:373429ms step_avg:41.58ms +[2025-09-11 10:18:10] [Rank 0] step:9001/10000 train_time:374133ms step_avg:41.57ms +[2025-09-11 10:18:10] [Rank 0] step:9001/10000 train_time:374133ms step_avg:41.57ms +[2025-09-11 10:18:10] [Rank 0] step:9021/10000 train_time:374842ms step_avg:41.55ms +[2025-09-11 10:18:10] [Rank 0] step:9021/10000 train_time:374842ms step_avg:41.55ms +[2025-09-11 10:18:11] [Rank 0] step:9041/10000 train_time:375553ms step_avg:41.54ms +[2025-09-11 10:18:11] [Rank 0] step:9041/10000 train_time:375553ms step_avg:41.54ms +[2025-09-11 10:18:12] [Rank 0] step:9061/10000 train_time:376260ms step_avg:41.53ms +[2025-09-11 10:18:12] [Rank 0] step:9061/10000 train_time:376260ms step_avg:41.53ms +[2025-09-11 10:18:12] [Rank 0] step:9081/10000 train_time:376970ms step_avg:41.51ms +[2025-09-11 10:18:12] [Rank 0] step:9081/10000 train_time:376970ms step_avg:41.51ms +[2025-09-11 10:18:13] [Rank 0] step:9101/10000 train_time:377682ms step_avg:41.50ms +[2025-09-11 10:18:13] [Rank 0] step:9101/10000 train_time:377682ms step_avg:41.50ms +[2025-09-11 10:18:14] [Rank 0] step:9121/10000 train_time:378394ms step_avg:41.49ms +[2025-09-11 10:18:14] [Rank 0] step:9121/10000 train_time:378394ms step_avg:41.49ms +[2025-09-11 10:18:15] [Rank 0] step:9141/10000 train_time:379100ms step_avg:41.47ms +[2025-09-11 10:18:15] [Rank 0] step:9141/10000 train_time:379100ms step_avg:41.47ms +[2025-09-11 10:18:15] [Rank 0] step:9161/10000 train_time:379812ms step_avg:41.46ms +[2025-09-11 10:18:15] [Rank 0] step:9161/10000 train_time:379812ms step_avg:41.46ms +[2025-09-11 10:18:16] [Rank 0] step:9181/10000 train_time:380522ms step_avg:41.45ms +[2025-09-11 10:18:16] [Rank 0] step:9181/10000 train_time:380522ms step_avg:41.45ms +[2025-09-11 10:18:17] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:18:17] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 10:18:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:18:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:18:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 10:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 10:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:18:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:18:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 10:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:27] [Rank 0] PRINT: step:9200/10000 val_loss:4.1369 total_sharp:3.7629e-05 L1_sharp:3.4169e-03 L2_sharp:1.9399e-03 L3_sharp:4.7732e-03 L4_sharp:5.7213e-03 L5_sharp:7.5674e-03 L6_sharp:1.0529e-02 L7_sharp:1.3207e-02 L8_sharp:1.9260e-02 L9_sharp:1.9605e-02 L10_sharp:2.8414e-02 L11_sharp:4.1407e-02 L12_sharp:3.0371e-01 total_fnorm:1.5312e+01 total_l1_linf:1.6768e+04 total_spectral:7.6562e+00 L1_fnorm:4.2480e-02 L2_fnorm:4.1992e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1748e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1992e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0039e-02 L1_l1linf:6.2561e-03 L2_l1linf:6.3477e-03 L3_l1linf:5.8594e-03 L4_l1linf:5.8289e-03 L5_l1linf:5.7983e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.8594e-03 L8_l1linf:6.3782e-03 L9_l1linf:5.4932e-03 L10_l1linf:5.5542e-03 L11_l1linf:5.3101e-03 L12_l1linf:6.1035e-03 L1_spectral:6.8880e-04 L2_spectral:6.8344e-04 L3_spectral:6.8665e-04 L4_spectral:6.8987e-04 L5_spectral:6.7640e-04 L6_spectral:6.8453e-04 L7_spectral:6.8024e-04 L8_spectral:6.7093e-04 L9_spectral:6.5763e-04 L10_spectral:6.6550e-04 L11_spectral:6.5887e-04 L12_spectral:6.7626e-04 train_time:381214ms step_avg:41.44ms +[2025-09-11 10:18:27] [Rank 0] PRINT: step:9200/10000 val_loss:4.1369 total_sharp:3.7629e-05 L1_sharp:3.4169e-03 L2_sharp:1.9399e-03 L3_sharp:4.7732e-03 L4_sharp:5.7213e-03 L5_sharp:7.5674e-03 L6_sharp:1.0529e-02 L7_sharp:1.3207e-02 L8_sharp:1.9260e-02 L9_sharp:1.9605e-02 L10_sharp:2.8414e-02 L11_sharp:4.1407e-02 L12_sharp:3.0371e-01 total_fnorm:1.5312e+01 total_l1_linf:1.6768e+04 total_spectral:7.6562e+00 L1_fnorm:4.2480e-02 L2_fnorm:4.1992e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1748e-02 L6_fnorm:4.1992e-02 L7_fnorm:4.1992e-02 L8_fnorm:4.0771e-02 L9_fnorm:4.1260e-02 L10_fnorm:4.1260e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0039e-02 L1_l1linf:6.2561e-03 L2_l1linf:6.3477e-03 L3_l1linf:5.8594e-03 L4_l1linf:5.8289e-03 L5_l1linf:5.7983e-03 L6_l1linf:5.7983e-03 L7_l1linf:5.8594e-03 L8_l1linf:6.3782e-03 L9_l1linf:5.4932e-03 L10_l1linf:5.5542e-03 L11_l1linf:5.3101e-03 L12_l1linf:6.1035e-03 L1_spectral:6.8880e-04 L2_spectral:6.8344e-04 L3_spectral:6.8665e-04 L4_spectral:6.8987e-04 L5_spectral:6.7640e-04 L6_spectral:6.8453e-04 L7_spectral:6.8024e-04 L8_spectral:6.7093e-04 L9_spectral:6.5763e-04 L10_spectral:6.6550e-04 L11_spectral:6.5887e-04 L12_spectral:6.7626e-04 train_time:381214ms step_avg:41.44ms +[2025-09-11 10:18:28] [Rank 0] step:9201/10000 train_time:382642ms step_avg:41.59ms +[2025-09-11 10:18:28] [Rank 0] step:9201/10000 train_time:382642ms step_avg:41.59ms +[2025-09-11 10:18:29] [Rank 0] step:9221/10000 train_time:383366ms step_avg:41.58ms +[2025-09-11 10:18:29] [Rank 0] step:9221/10000 train_time:383366ms step_avg:41.58ms +[2025-09-11 10:18:30] [Rank 0] step:9241/10000 train_time:384074ms step_avg:41.56ms +[2025-09-11 10:18:30] [Rank 0] step:9241/10000 train_time:384074ms step_avg:41.56ms +[2025-09-11 10:18:31] [Rank 0] step:9261/10000 train_time:384785ms step_avg:41.55ms +[2025-09-11 10:18:31] [Rank 0] step:9261/10000 train_time:384785ms step_avg:41.55ms +[2025-09-11 10:18:31] [Rank 0] step:9281/10000 train_time:385494ms step_avg:41.54ms +[2025-09-11 10:18:31] [Rank 0] step:9281/10000 train_time:385494ms step_avg:41.54ms +[2025-09-11 10:18:32] [Rank 0] step:9301/10000 train_time:386201ms step_avg:41.52ms +[2025-09-11 10:18:32] [Rank 0] step:9301/10000 train_time:386201ms step_avg:41.52ms +[2025-09-11 10:18:33] [Rank 0] step:9321/10000 train_time:386911ms step_avg:41.51ms +[2025-09-11 10:18:33] [Rank 0] step:9321/10000 train_time:386911ms step_avg:41.51ms +[2025-09-11 10:18:33] [Rank 0] step:9341/10000 train_time:387617ms step_avg:41.50ms +[2025-09-11 10:18:33] [Rank 0] step:9341/10000 train_time:387617ms step_avg:41.50ms +[2025-09-11 10:18:34] [Rank 0] step:9361/10000 train_time:388321ms step_avg:41.48ms +[2025-09-11 10:18:34] [Rank 0] step:9361/10000 train_time:388321ms step_avg:41.48ms +[2025-09-11 10:18:35] [Rank 0] step:9381/10000 train_time:389029ms step_avg:41.47ms +[2025-09-11 10:18:35] [Rank 0] step:9381/10000 train_time:389029ms step_avg:41.47ms +[2025-09-11 10:18:36] [Rank 0] step:9401/10000 train_time:389739ms step_avg:41.46ms +[2025-09-11 10:18:36] [Rank 0] step:9401/10000 train_time:389739ms step_avg:41.46ms +[2025-09-11 10:18:36] [Rank 0] step:9421/10000 train_time:390450ms step_avg:41.44ms +[2025-09-11 10:18:36] [Rank 0] step:9421/10000 train_time:390450ms step_avg:41.44ms +[2025-09-11 10:18:37] [Rank 0] step:9441/10000 train_time:391162ms step_avg:41.43ms +[2025-09-11 10:18:37] [Rank 0] step:9441/10000 train_time:391162ms step_avg:41.43ms +[2025-09-11 10:18:38] [Rank 0] step:9461/10000 train_time:391870ms step_avg:41.42ms +[2025-09-11 10:18:38] [Rank 0] step:9461/10000 train_time:391870ms step_avg:41.42ms +[2025-09-11 10:18:38] [Rank 0] step:9481/10000 train_time:392580ms step_avg:41.41ms +[2025-09-11 10:18:38] [Rank 0] step:9481/10000 train_time:392580ms step_avg:41.41ms +[2025-09-11 10:18:39] [Rank 0] step:9501/10000 train_time:393291ms step_avg:41.39ms +[2025-09-11 10:18:39] [Rank 0] step:9501/10000 train_time:393291ms step_avg:41.39ms +[2025-09-11 10:18:40] [Rank 0] step:9521/10000 train_time:394004ms step_avg:41.38ms +[2025-09-11 10:18:40] [Rank 0] step:9521/10000 train_time:394004ms step_avg:41.38ms +[2025-09-11 10:18:40] [Rank 0] step:9541/10000 train_time:394709ms step_avg:41.37ms +[2025-09-11 10:18:40] [Rank 0] step:9541/10000 train_time:394709ms step_avg:41.37ms +[2025-09-11 10:18:41] [Rank 0] step:9561/10000 train_time:395419ms step_avg:41.36ms +[2025-09-11 10:18:41] [Rank 0] step:9561/10000 train_time:395419ms step_avg:41.36ms +[2025-09-11 10:18:42] [Rank 0] step:9581/10000 train_time:396129ms step_avg:41.35ms +[2025-09-11 10:18:42] [Rank 0] step:9581/10000 train_time:396129ms step_avg:41.35ms +[2025-09-11 10:18:43] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:18:43] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 10:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:18:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:18:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 10:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 10:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:18:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 10:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 10:18:53] [Rank 0] PRINT: step:9600/10000 val_loss:4.1293 total_sharp:2.4252e-05 L1_sharp:2.7010e-03 L2_sharp:2.9448e-03 L3_sharp:2.9261e-03 L4_sharp:4.6271e-03 L5_sharp:4.6096e-03 L6_sharp:6.9310e-03 L7_sharp:1.1099e-02 L8_sharp:1.7157e-02 L9_sharp:1.4611e-02 L10_sharp:2.0220e-02 L11_sharp:3.0877e-02 L12_sharp:1.5341e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0960e+03 total_spectral:4.3438e+00 L1_fnorm:2.3682e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3315e-02 L5_fnorm:2.3193e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3438e-02 L8_fnorm:2.2705e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2705e-02 L12_fnorm:2.2339e-02 L1_l1linf:2.8687e-03 L2_l1linf:2.8229e-03 L3_l1linf:2.8381e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.7313e-03 L6_l1linf:2.6855e-03 L7_l1linf:2.6550e-03 L8_l1linf:3.2043e-03 L9_l1linf:2.5482e-03 L10_l1linf:2.6550e-03 L11_l1linf:2.5940e-03 L12_l1linf:2.9755e-03 L1_spectral:3.9525e-04 L2_spectral:3.9559e-04 L3_spectral:3.9143e-04 L4_spectral:3.9372e-04 L5_spectral:3.8528e-04 L6_spectral:3.8916e-04 L7_spectral:3.9140e-04 L8_spectral:3.9003e-04 L9_spectral:3.7612e-04 L10_spectral:3.7746e-04 L11_spectral:3.7274e-04 L12_spectral:3.9096e-04 train_time:396816ms step_avg:41.34ms +[2025-09-11 10:18:53] [Rank 0] PRINT: step:9600/10000 val_loss:4.1293 total_sharp:2.4252e-05 L1_sharp:2.7010e-03 L2_sharp:2.9448e-03 L3_sharp:2.9261e-03 L4_sharp:4.6271e-03 L5_sharp:4.6096e-03 L6_sharp:6.9310e-03 L7_sharp:1.1099e-02 L8_sharp:1.7157e-02 L9_sharp:1.4611e-02 L10_sharp:2.0220e-02 L11_sharp:3.0877e-02 L12_sharp:1.5341e-01 total_fnorm:8.6875e+00 total_l1_linf:8.0960e+03 total_spectral:4.3438e+00 L1_fnorm:2.3682e-02 L2_fnorm:2.3560e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3315e-02 L5_fnorm:2.3193e-02 L6_fnorm:2.3315e-02 L7_fnorm:2.3438e-02 L8_fnorm:2.2705e-02 L9_fnorm:2.3071e-02 L10_fnorm:2.3071e-02 L11_fnorm:2.2705e-02 L12_fnorm:2.2339e-02 L1_l1linf:2.8687e-03 L2_l1linf:2.8229e-03 L3_l1linf:2.8381e-03 L4_l1linf:2.6855e-03 L5_l1linf:2.7313e-03 L6_l1linf:2.6855e-03 L7_l1linf:2.6550e-03 L8_l1linf:3.2043e-03 L9_l1linf:2.5482e-03 L10_l1linf:2.6550e-03 L11_l1linf:2.5940e-03 L12_l1linf:2.9755e-03 L1_spectral:3.9525e-04 L2_spectral:3.9559e-04 L3_spectral:3.9143e-04 L4_spectral:3.9372e-04 L5_spectral:3.8528e-04 L6_spectral:3.8916e-04 L7_spectral:3.9140e-04 L8_spectral:3.9003e-04 L9_spectral:3.7612e-04 L10_spectral:3.7746e-04 L11_spectral:3.7274e-04 L12_spectral:3.9096e-04 train_time:396816ms step_avg:41.34ms +[2025-09-11 10:18:54] [Rank 0] step:9601/10000 train_time:398249ms step_avg:41.48ms +[2025-09-11 10:18:54] [Rank 0] step:9601/10000 train_time:398249ms step_avg:41.48ms +[2025-09-11 10:18:55] [Rank 0] step:9621/10000 train_time:398998ms step_avg:41.47ms +[2025-09-11 10:18:55] [Rank 0] step:9621/10000 train_time:398998ms step_avg:41.47ms +[2025-09-11 10:18:56] [Rank 0] step:9641/10000 train_time:399713ms step_avg:41.46ms +[2025-09-11 10:18:56] [Rank 0] step:9641/10000 train_time:399713ms step_avg:41.46ms +[2025-09-11 10:18:56] [Rank 0] step:9661/10000 train_time:400434ms step_avg:41.45ms +[2025-09-11 10:18:56] [Rank 0] step:9661/10000 train_time:400434ms step_avg:41.45ms +[2025-09-11 10:18:57] [Rank 0] step:9681/10000 train_time:401149ms step_avg:41.44ms +[2025-09-11 10:18:57] [Rank 0] step:9681/10000 train_time:401149ms step_avg:41.44ms +[2025-09-11 10:18:58] [Rank 0] step:9701/10000 train_time:401864ms step_avg:41.42ms +[2025-09-11 10:18:58] [Rank 0] step:9701/10000 train_time:401864ms step_avg:41.42ms +[2025-09-11 10:18:59] [Rank 0] step:9721/10000 train_time:402583ms step_avg:41.41ms +[2025-09-11 10:18:59] [Rank 0] step:9721/10000 train_time:402583ms step_avg:41.41ms +[2025-09-11 10:18:59] [Rank 0] step:9741/10000 train_time:403300ms step_avg:41.40ms +[2025-09-11 10:18:59] [Rank 0] step:9741/10000 train_time:403300ms step_avg:41.40ms +[2025-09-11 10:19:00] [Rank 0] step:9761/10000 train_time:404016ms step_avg:41.39ms +[2025-09-11 10:19:00] [Rank 0] step:9761/10000 train_time:404016ms step_avg:41.39ms +[2025-09-11 10:19:01] [Rank 0] step:9781/10000 train_time:404731ms step_avg:41.38ms +[2025-09-11 10:19:01] [Rank 0] step:9781/10000 train_time:404731ms step_avg:41.38ms +[2025-09-11 10:19:01] [Rank 0] step:9801/10000 train_time:405451ms step_avg:41.37ms +[2025-09-11 10:19:01] [Rank 0] step:9801/10000 train_time:405451ms step_avg:41.37ms +[2025-09-11 10:19:02] [Rank 0] step:9821/10000 train_time:406168ms step_avg:41.36ms +[2025-09-11 10:19:02] [Rank 0] step:9821/10000 train_time:406168ms step_avg:41.36ms +[2025-09-11 10:19:03] [Rank 0] step:9841/10000 train_time:406889ms step_avg:41.35ms +[2025-09-11 10:19:03] [Rank 0] step:9841/10000 train_time:406889ms step_avg:41.35ms +[2025-09-11 10:19:04] [Rank 0] step:9861/10000 train_time:407605ms step_avg:41.34ms +[2025-09-11 10:19:04] [Rank 0] step:9861/10000 train_time:407605ms step_avg:41.34ms +[2025-09-11 10:19:04] [Rank 0] step:9881/10000 train_time:408322ms step_avg:41.32ms +[2025-09-11 10:19:04] [Rank 0] step:9881/10000 train_time:408322ms step_avg:41.32ms +[2025-09-11 10:19:05] [Rank 0] step:9901/10000 train_time:409036ms step_avg:41.31ms +[2025-09-11 10:19:05] [Rank 0] step:9901/10000 train_time:409036ms step_avg:41.31ms +[2025-09-11 10:19:06] [Rank 0] step:9921/10000 train_time:409752ms step_avg:41.30ms +[2025-09-11 10:19:06] [Rank 0] step:9921/10000 train_time:409752ms step_avg:41.30ms +[2025-09-11 10:19:06] [Rank 0] step:9941/10000 train_time:410473ms step_avg:41.29ms +[2025-09-11 10:19:06] [Rank 0] step:9941/10000 train_time:410473ms step_avg:41.29ms +[2025-09-11 10:19:07] [Rank 0] step:9961/10000 train_time:411195ms step_avg:41.28ms +[2025-09-11 10:19:07] [Rank 0] step:9961/10000 train_time:411195ms step_avg:41.28ms +[2025-09-11 10:19:08] [Rank 0] step:9981/10000 train_time:411913ms step_avg:41.27ms +[2025-09-11 10:19:08] [Rank 0] step:9981/10000 train_time:411913ms step_avg:41.27ms +[2025-09-11 10:19:09] [Rank 0] step:10000/10000 train_time:412604ms step_avg:41.26ms +[2025-09-11 10:19:09] [Rank 0] step:10000/10000 train_time:412604ms step_avg:41.26ms +[2025-09-11 10:19:09] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:19:09] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 10:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:19:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:19:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 10:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:19:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 10:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 10:19:19] [Rank 0] PRINT: step:10000/10000 val_loss:4.1269 total_sharp:1.6650e-05 L1_sharp:2.3437e-03 L2_sharp:1.1861e-03 L3_sharp:2.0908e-03 L4_sharp:3.5648e-03 L5_sharp:5.2453e-03 L6_sharp:7.9546e-03 L7_sharp:8.8099e-03 L8_sharp:1.3039e-02 L9_sharp:1.2880e-02 L10_sharp:1.5820e-02 L11_sharp:2.3951e-02 L12_sharp:1.1922e-01 total_fnorm:3.3438e+00 total_l1_linf:2.2560e+03 total_spectral:1.6719e+00 L1_fnorm:9.2773e-03 L2_fnorm:9.2163e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.0942e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.8501e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9111e-03 L12_fnorm:8.7891e-03 L1_l1linf:9.3842e-04 L2_l1linf:9.0027e-04 L3_l1linf:8.3160e-04 L4_l1linf:8.5831e-04 L5_l1linf:8.3160e-04 L6_l1linf:8.1635e-04 L7_l1linf:8.4686e-04 L8_l1linf:1.0223e-03 L9_l1linf:7.8583e-04 L10_l1linf:8.0872e-04 L11_l1linf:7.8583e-04 L12_l1linf:9.1553e-04 L1_spectral:1.5953e-04 L2_spectral:1.5830e-04 L3_spectral:1.5782e-04 L4_spectral:1.5746e-04 L5_spectral:1.5691e-04 L6_spectral:1.5807e-04 L7_spectral:1.5566e-04 L8_spectral:1.5820e-04 L9_spectral:1.5196e-04 L10_spectral:1.5140e-04 L11_spectral:1.5043e-04 L12_spectral:1.5823e-04 train_time:412625ms step_avg:41.26ms +[2025-09-11 10:19:19] [Rank 0] PRINT: step:10000/10000 val_loss:4.1269 total_sharp:1.6650e-05 L1_sharp:2.3437e-03 L2_sharp:1.1861e-03 L3_sharp:2.0908e-03 L4_sharp:3.5648e-03 L5_sharp:5.2453e-03 L6_sharp:7.9546e-03 L7_sharp:8.8099e-03 L8_sharp:1.3039e-02 L9_sharp:1.2880e-02 L10_sharp:1.5820e-02 L11_sharp:2.3951e-02 L12_sharp:1.1922e-01 total_fnorm:3.3438e+00 total_l1_linf:2.2560e+03 total_spectral:1.6719e+00 L1_fnorm:9.2773e-03 L2_fnorm:9.2163e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.0942e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.8501e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.9111e-03 L12_fnorm:8.7891e-03 L1_l1linf:9.3842e-04 L2_l1linf:9.0027e-04 L3_l1linf:8.3160e-04 L4_l1linf:8.5831e-04 L5_l1linf:8.3160e-04 L6_l1linf:8.1635e-04 L7_l1linf:8.4686e-04 L8_l1linf:1.0223e-03 L9_l1linf:7.8583e-04 L10_l1linf:8.0872e-04 L11_l1linf:7.8583e-04 L12_l1linf:9.1553e-04 L1_spectral:1.5953e-04 L2_spectral:1.5830e-04 L3_spectral:1.5782e-04 L4_spectral:1.5746e-04 L5_spectral:1.5691e-04 L6_spectral:1.5807e-04 L7_spectral:1.5566e-04 L8_spectral:1.5820e-04 L9_spectral:1.5196e-04 L10_spectral:1.5140e-04 L11_spectral:1.5043e-04 L12_spectral:1.5823e-04 train_time:412625ms step_avg:41.26ms +[2025-09-11 10:19:19] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:19:19 2025 --- +[2025-09-11 10:19:19] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 10:19:19 2025 --- +[2025-09-11 10:19:19] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 10:19:19] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3789122937eaf4d4bd8e06b0c01311d01836563c --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "cd4c5f42-8d16-4bd3-933f-28e4968dcccf", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/training_log_cd4c5f42-8d16-4bd3-933f-28e4968dcccf.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/training_log_cd4c5f42-8d16-4bd3-933f-28e4968dcccf.txt new file mode 100644 index 0000000000000000000000000000000000000000..92b6965c3ec1c6c52602bc96657d012c32f809de --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45/training_log_cd4c5f42-8d16-4bd3-933f-28e4968dcccf.txt @@ -0,0 +1,4264 @@ +[2025-09-11 14:15:17] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:15:17 2025 --- +[2025-09-11 14:15:17] [Rank 0] PRINT: --- Script Start: Thu Sep 11 14:15:17 2025 --- +[2025-09-11 14:15:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:15:17] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 14:15:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:15:17] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 14:15:17] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:15:17] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-11 14:15:17] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45 +[2025-09-11 14:15:17] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.002_seed_45 +[2025-09-11 14:15:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:15:17] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 14:15:17] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:15:17] [Rank 0] PRINT: Constructing model... +[2025-09-11 14:15:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:15:18] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 14:15:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:15:18] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 14:15:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:15:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 14:15:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:15:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 14:15:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:15:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 14:15:21] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:15:21] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 14:15:21] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:15:21] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 14:15:21] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:15:21] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 14:15:27] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:15:27] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 14:15:27] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:15:27] [Rank 0] PRINT: Starting warmup... +[2025-09-11 14:16:05] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:16:05] [Rank 0] PRINT: Warmup complete. +[2025-09-11 14:16:05] [Rank 0] PRINT: Starting training... +[2025-09-11 14:16:05] [Rank 0] PRINT: Starting training... +[2025-09-11 14:16:06] [Rank 0] step:21/10000 train_time:1129ms step_avg:53.77ms +[2025-09-11 14:16:06] [Rank 0] step:21/10000 train_time:1129ms step_avg:53.77ms +[2025-09-11 14:16:07] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 14:16:07] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 14:16:07] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 14:16:07] [Rank 0] step:61/10000 train_time:2592ms step_avg:42.49ms +[2025-09-11 14:16:08] [Rank 0] step:81/10000 train_time:3323ms step_avg:41.02ms +[2025-09-11 14:16:08] [Rank 0] step:81/10000 train_time:3323ms step_avg:41.02ms +[2025-09-11 14:16:09] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 14:16:09] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 14:16:10] [Rank 0] step:121/10000 train_time:4785ms step_avg:39.54ms +[2025-09-11 14:16:10] [Rank 0] step:121/10000 train_time:4785ms step_avg:39.54ms +[2025-09-11 14:16:10] [Rank 0] step:141/10000 train_time:5514ms step_avg:39.11ms +[2025-09-11 14:16:10] [Rank 0] step:141/10000 train_time:5514ms step_avg:39.11ms +[2025-09-11 14:16:11] [Rank 0] step:161/10000 train_time:6244ms step_avg:38.78ms +[2025-09-11 14:16:11] [Rank 0] step:161/10000 train_time:6244ms step_avg:38.78ms +[2025-09-11 14:16:12] [Rank 0] step:181/10000 train_time:6974ms step_avg:38.53ms +[2025-09-11 14:16:12] [Rank 0] step:181/10000 train_time:6974ms step_avg:38.53ms +[2025-09-11 14:16:12] [Rank 0] step:201/10000 train_time:7705ms step_avg:38.33ms +[2025-09-11 14:16:12] [Rank 0] step:201/10000 train_time:7705ms step_avg:38.33ms +[2025-09-11 14:16:13] [Rank 0] step:221/10000 train_time:8434ms step_avg:38.16ms +[2025-09-11 14:16:13] [Rank 0] step:221/10000 train_time:8434ms step_avg:38.16ms +[2025-09-11 14:16:14] [Rank 0] step:241/10000 train_time:9163ms step_avg:38.02ms +[2025-09-11 14:16:14] [Rank 0] step:241/10000 train_time:9163ms step_avg:38.02ms +[2025-09-11 14:16:15] [Rank 0] step:261/10000 train_time:9894ms step_avg:37.91ms +[2025-09-11 14:16:15] [Rank 0] step:261/10000 train_time:9894ms step_avg:37.91ms +[2025-09-11 14:16:15] [Rank 0] step:281/10000 train_time:10626ms step_avg:37.81ms +[2025-09-11 14:16:15] [Rank 0] step:281/10000 train_time:10626ms step_avg:37.81ms +[2025-09-11 14:16:16] [Rank 0] step:301/10000 train_time:11358ms step_avg:37.74ms +[2025-09-11 14:16:16] [Rank 0] step:301/10000 train_time:11358ms step_avg:37.74ms +[2025-09-11 14:16:17] [Rank 0] step:321/10000 train_time:12088ms step_avg:37.66ms +[2025-09-11 14:16:17] [Rank 0] step:321/10000 train_time:12088ms step_avg:37.66ms +[2025-09-11 14:16:18] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 14:16:18] [Rank 0] step:341/10000 train_time:12818ms step_avg:37.59ms +[2025-09-11 14:16:18] [Rank 0] step:361/10000 train_time:13548ms step_avg:37.53ms +[2025-09-11 14:16:18] [Rank 0] step:361/10000 train_time:13548ms step_avg:37.53ms +[2025-09-11 14:16:19] [Rank 0] step:381/10000 train_time:14278ms step_avg:37.48ms +[2025-09-11 14:16:19] [Rank 0] step:381/10000 train_time:14278ms step_avg:37.48ms +[2025-09-11 14:16:20] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:16:20] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 14:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:16:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 14:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:17:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:17:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:17:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 14:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 14:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:17:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 14:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:17:06] [Rank 0] PRINT: step:400/10000 val_loss:5.9531 total_sharp:4.7103e-04 L1_sharp:1.0123e-01 L2_sharp:1.0090e-01 L3_sharp:8.6415e-02 L4_sharp:1.0011e-01 L5_sharp:8.4540e-02 L6_sharp:9.8133e-02 L7_sharp:9.9951e-02 L8_sharp:9.9590e-02 L9_sharp:1.5719e-01 L10_sharp:1.7175e-01 L11_sharp:3.4778e-01 L12_sharp:5.2558e-01 total_fnorm:7.7882e+01 total_l1_linf:1.9176e+05 total_spectral:3.8942e+01 L1_fnorm:2.4256e-01 L2_fnorm:2.4178e-01 L3_fnorm:2.4131e-01 L4_fnorm:2.4042e-01 L5_fnorm:2.4040e-01 L6_fnorm:2.4106e-01 L7_fnorm:2.4189e-01 L8_fnorm:2.4149e-01 L9_fnorm:2.4180e-01 L10_fnorm:2.4141e-01 L11_fnorm:2.3853e-01 L12_fnorm:2.3719e-01 L1_l1linf:8.9412e-02 L2_l1linf:8.9270e-02 L3_l1linf:8.9101e-02 L4_l1linf:8.8462e-02 L5_l1linf:8.8728e-02 L6_l1linf:8.8799e-02 L7_l1linf:8.7760e-02 L8_l1linf:8.8828e-02 L9_l1linf:8.8592e-02 L10_l1linf:8.8287e-02 L11_l1linf:8.8139e-02 L12_l1linf:8.6867e-02 L1_spectral:2.4102e-03 L2_spectral:2.4101e-03 L3_spectral:2.4096e-03 L4_spectral:2.4100e-03 L5_spectral:2.4101e-03 L6_spectral:2.4109e-03 L7_spectral:2.4134e-03 L8_spectral:2.4100e-03 L9_spectral:2.4110e-03 L10_spectral:2.4121e-03 L11_spectral:2.4099e-03 L12_spectral:2.4093e-03 train_time:14988ms step_avg:37.47ms +[2025-09-11 14:17:06] [Rank 0] PRINT: step:400/10000 val_loss:5.9531 total_sharp:4.7103e-04 L1_sharp:1.0123e-01 L2_sharp:1.0090e-01 L3_sharp:8.6415e-02 L4_sharp:1.0011e-01 L5_sharp:8.4540e-02 L6_sharp:9.8133e-02 L7_sharp:9.9951e-02 L8_sharp:9.9590e-02 L9_sharp:1.5719e-01 L10_sharp:1.7175e-01 L11_sharp:3.4778e-01 L12_sharp:5.2558e-01 total_fnorm:7.7882e+01 total_l1_linf:1.9176e+05 total_spectral:3.8942e+01 L1_fnorm:2.4256e-01 L2_fnorm:2.4178e-01 L3_fnorm:2.4131e-01 L4_fnorm:2.4042e-01 L5_fnorm:2.4040e-01 L6_fnorm:2.4106e-01 L7_fnorm:2.4189e-01 L8_fnorm:2.4149e-01 L9_fnorm:2.4180e-01 L10_fnorm:2.4141e-01 L11_fnorm:2.3853e-01 L12_fnorm:2.3719e-01 L1_l1linf:8.9412e-02 L2_l1linf:8.9270e-02 L3_l1linf:8.9101e-02 L4_l1linf:8.8462e-02 L5_l1linf:8.8728e-02 L6_l1linf:8.8799e-02 L7_l1linf:8.7760e-02 L8_l1linf:8.8828e-02 L9_l1linf:8.8592e-02 L10_l1linf:8.8287e-02 L11_l1linf:8.8139e-02 L12_l1linf:8.6867e-02 L1_spectral:2.4102e-03 L2_spectral:2.4101e-03 L3_spectral:2.4096e-03 L4_spectral:2.4100e-03 L5_spectral:2.4101e-03 L6_spectral:2.4109e-03 L7_spectral:2.4134e-03 L8_spectral:2.4100e-03 L9_spectral:2.4110e-03 L10_spectral:2.4121e-03 L11_spectral:2.4099e-03 L12_spectral:2.4093e-03 train_time:14988ms step_avg:37.47ms +[2025-09-11 14:17:35] [Rank 0] step:401/10000 train_time:44435ms step_avg:110.81ms +[2025-09-11 14:17:35] [Rank 0] step:401/10000 train_time:44435ms step_avg:110.81ms +[2025-09-11 14:17:37] [Rank 0] step:421/10000 train_time:46634ms step_avg:110.77ms +[2025-09-11 14:17:37] [Rank 0] step:421/10000 train_time:46634ms step_avg:110.77ms +[2025-09-11 14:17:38] [Rank 0] step:441/10000 train_time:47278ms step_avg:107.21ms +[2025-09-11 14:17:38] [Rank 0] step:441/10000 train_time:47278ms step_avg:107.21ms +[2025-09-11 14:17:39] [Rank 0] step:461/10000 train_time:47921ms step_avg:103.95ms +[2025-09-11 14:17:39] [Rank 0] step:461/10000 train_time:47921ms step_avg:103.95ms +[2025-09-11 14:17:39] [Rank 0] step:481/10000 train_time:48562ms step_avg:100.96ms +[2025-09-11 14:17:39] [Rank 0] step:481/10000 train_time:48562ms step_avg:100.96ms +[2025-09-11 14:17:40] [Rank 0] step:501/10000 train_time:49204ms step_avg:98.21ms +[2025-09-11 14:17:40] [Rank 0] step:501/10000 train_time:49204ms step_avg:98.21ms +[2025-09-11 14:17:41] [Rank 0] step:521/10000 train_time:49845ms step_avg:95.67ms +[2025-09-11 14:17:41] [Rank 0] step:521/10000 train_time:49845ms step_avg:95.67ms +[2025-09-11 14:17:41] [Rank 0] step:541/10000 train_time:50486ms step_avg:93.32ms +[2025-09-11 14:17:41] [Rank 0] step:541/10000 train_time:50486ms step_avg:93.32ms +[2025-09-11 14:17:42] [Rank 0] step:561/10000 train_time:51128ms step_avg:91.14ms +[2025-09-11 14:17:42] [Rank 0] step:561/10000 train_time:51128ms step_avg:91.14ms +[2025-09-11 14:17:43] [Rank 0] step:581/10000 train_time:51769ms step_avg:89.10ms +[2025-09-11 14:17:43] [Rank 0] step:581/10000 train_time:51769ms step_avg:89.10ms +[2025-09-11 14:17:43] [Rank 0] step:601/10000 train_time:52411ms step_avg:87.21ms +[2025-09-11 14:17:43] [Rank 0] step:601/10000 train_time:52411ms step_avg:87.21ms +[2025-09-11 14:17:44] [Rank 0] step:621/10000 train_time:53053ms step_avg:85.43ms +[2025-09-11 14:17:44] [Rank 0] step:621/10000 train_time:53053ms step_avg:85.43ms +[2025-09-11 14:17:44] [Rank 0] step:641/10000 train_time:53695ms step_avg:83.77ms +[2025-09-11 14:17:44] [Rank 0] step:641/10000 train_time:53695ms step_avg:83.77ms +[2025-09-11 14:17:45] [Rank 0] step:661/10000 train_time:54336ms step_avg:82.20ms +[2025-09-11 14:17:45] [Rank 0] step:661/10000 train_time:54336ms step_avg:82.20ms +[2025-09-11 14:17:46] [Rank 0] step:681/10000 train_time:54977ms step_avg:80.73ms +[2025-09-11 14:17:46] [Rank 0] step:681/10000 train_time:54977ms step_avg:80.73ms +[2025-09-11 14:17:46] [Rank 0] step:701/10000 train_time:55619ms step_avg:79.34ms +[2025-09-11 14:17:46] [Rank 0] step:701/10000 train_time:55619ms step_avg:79.34ms +[2025-09-11 14:17:47] [Rank 0] step:721/10000 train_time:56261ms step_avg:78.03ms +[2025-09-11 14:17:47] [Rank 0] step:721/10000 train_time:56261ms step_avg:78.03ms +[2025-09-11 14:17:48] [Rank 0] step:741/10000 train_time:56901ms step_avg:76.79ms +[2025-09-11 14:17:48] [Rank 0] step:741/10000 train_time:56901ms step_avg:76.79ms +[2025-09-11 14:17:48] [Rank 0] step:761/10000 train_time:57548ms step_avg:75.62ms +[2025-09-11 14:17:48] [Rank 0] step:761/10000 train_time:57548ms step_avg:75.62ms +[2025-09-11 14:17:49] [Rank 0] step:781/10000 train_time:58195ms step_avg:74.51ms +[2025-09-11 14:17:49] [Rank 0] step:781/10000 train_time:58195ms step_avg:74.51ms +[2025-09-11 14:17:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:17:50] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 14:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:17:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 14:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 14:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:18:34] [Rank 0] PRINT: step:800/10000 val_loss:5.4904 total_sharp:3.8304e-04 L1_sharp:4.4782e-02 L2_sharp:5.0094e-02 L3_sharp:4.8026e-02 L4_sharp:5.4117e-02 L5_sharp:5.7614e-02 L6_sharp:6.5682e-02 L7_sharp:7.4175e-02 L8_sharp:1.0247e-01 L9_sharp:1.3039e-01 L10_sharp:2.3652e-01 L11_sharp:3.5419e-01 L12_sharp:4.2993e-01 total_fnorm:7.8000e+01 total_l1_linf:1.6691e+05 total_spectral:3.9000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3242e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.5938e-02 L5_l1linf:8.5449e-02 L6_l1linf:8.5938e-02 L7_l1linf:8.5938e-02 L8_l1linf:8.5938e-02 L9_l1linf:8.6426e-02 L10_l1linf:8.5938e-02 L11_l1linf:8.3984e-02 L12_l1linf:7.7637e-02 L1_spectral:3.0953e-03 L2_spectral:3.1118e-03 L3_spectral:3.1099e-03 L4_spectral:3.1058e-03 L5_spectral:3.0854e-03 L6_spectral:3.0893e-03 L7_spectral:3.0813e-03 L8_spectral:3.0797e-03 L9_spectral:3.0854e-03 L10_spectral:3.0815e-03 L11_spectral:3.0804e-03 L12_spectral:3.0805e-03 train_time:58823ms step_avg:73.53ms +[2025-09-11 14:18:34] [Rank 0] PRINT: step:800/10000 val_loss:5.4904 total_sharp:3.8304e-04 L1_sharp:4.4782e-02 L2_sharp:5.0094e-02 L3_sharp:4.8026e-02 L4_sharp:5.4117e-02 L5_sharp:5.7614e-02 L6_sharp:6.5682e-02 L7_sharp:7.4175e-02 L8_sharp:1.0247e-01 L9_sharp:1.3039e-01 L10_sharp:2.3652e-01 L11_sharp:3.5419e-01 L12_sharp:4.2993e-01 total_fnorm:7.8000e+01 total_l1_linf:1.6691e+05 total_spectral:3.9000e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.5000e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3242e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.5938e-02 L5_l1linf:8.5449e-02 L6_l1linf:8.5938e-02 L7_l1linf:8.5938e-02 L8_l1linf:8.5938e-02 L9_l1linf:8.6426e-02 L10_l1linf:8.5938e-02 L11_l1linf:8.3984e-02 L12_l1linf:7.7637e-02 L1_spectral:3.0953e-03 L2_spectral:3.1118e-03 L3_spectral:3.1099e-03 L4_spectral:3.1058e-03 L5_spectral:3.0854e-03 L6_spectral:3.0893e-03 L7_spectral:3.0813e-03 L8_spectral:3.0797e-03 L9_spectral:3.0854e-03 L10_spectral:3.0815e-03 L11_spectral:3.0804e-03 L12_spectral:3.0805e-03 train_time:58823ms step_avg:73.53ms +[2025-09-11 14:18:35] [Rank 0] step:801/10000 train_time:60319ms step_avg:75.30ms +[2025-09-11 14:18:35] [Rank 0] step:801/10000 train_time:60319ms step_avg:75.30ms +[2025-09-11 14:18:36] [Rank 0] step:821/10000 train_time:60969ms step_avg:74.26ms +[2025-09-11 14:18:36] [Rank 0] step:821/10000 train_time:60969ms step_avg:74.26ms +[2025-09-11 14:18:36] [Rank 0] step:841/10000 train_time:61616ms step_avg:73.27ms +[2025-09-11 14:18:36] [Rank 0] step:841/10000 train_time:61616ms step_avg:73.27ms +[2025-09-11 14:18:37] [Rank 0] step:861/10000 train_time:62263ms step_avg:72.31ms +[2025-09-11 14:18:37] [Rank 0] step:861/10000 train_time:62263ms step_avg:72.31ms +[2025-09-11 14:18:38] [Rank 0] step:881/10000 train_time:63176ms step_avg:71.71ms +[2025-09-11 14:18:38] [Rank 0] step:881/10000 train_time:63176ms step_avg:71.71ms +[2025-09-11 14:18:39] [Rank 0] step:901/10000 train_time:63822ms step_avg:70.84ms +[2025-09-11 14:18:39] [Rank 0] step:901/10000 train_time:63822ms step_avg:70.84ms +[2025-09-11 14:18:39] [Rank 0] step:921/10000 train_time:64468ms step_avg:70.00ms +[2025-09-11 14:18:39] [Rank 0] step:921/10000 train_time:64468ms step_avg:70.00ms +[2025-09-11 14:18:40] [Rank 0] step:941/10000 train_time:65418ms step_avg:69.52ms +[2025-09-11 14:18:40] [Rank 0] step:941/10000 train_time:65418ms step_avg:69.52ms +[2025-09-11 14:18:41] [Rank 0] step:961/10000 train_time:66063ms step_avg:68.74ms +[2025-09-11 14:18:41] [Rank 0] step:961/10000 train_time:66063ms step_avg:68.74ms +[2025-09-11 14:18:41] [Rank 0] step:981/10000 train_time:66709ms step_avg:68.00ms +[2025-09-11 14:18:41] [Rank 0] step:981/10000 train_time:66709ms step_avg:68.00ms +[2025-09-11 14:18:42] [Rank 0] step:1001/10000 train_time:67354ms step_avg:67.29ms +[2025-09-11 14:18:42] [Rank 0] step:1001/10000 train_time:67354ms step_avg:67.29ms +[2025-09-11 14:18:43] [Rank 0] step:1021/10000 train_time:68001ms step_avg:66.60ms +[2025-09-11 14:18:43] [Rank 0] step:1021/10000 train_time:68001ms step_avg:66.60ms +[2025-09-11 14:18:43] [Rank 0] step:1041/10000 train_time:68646ms step_avg:65.94ms +[2025-09-11 14:18:43] [Rank 0] step:1041/10000 train_time:68646ms step_avg:65.94ms +[2025-09-11 14:18:44] [Rank 0] step:1061/10000 train_time:69291ms step_avg:65.31ms +[2025-09-11 14:18:44] [Rank 0] step:1061/10000 train_time:69291ms step_avg:65.31ms +[2025-09-11 14:18:45] [Rank 0] step:1081/10000 train_time:69936ms step_avg:64.70ms +[2025-09-11 14:18:45] [Rank 0] step:1081/10000 train_time:69936ms step_avg:64.70ms +[2025-09-11 14:18:45] [Rank 0] step:1101/10000 train_time:70582ms step_avg:64.11ms +[2025-09-11 14:18:45] [Rank 0] step:1101/10000 train_time:70582ms step_avg:64.11ms +[2025-09-11 14:18:46] [Rank 0] step:1121/10000 train_time:71227ms step_avg:63.54ms +[2025-09-11 14:18:46] [Rank 0] step:1121/10000 train_time:71227ms step_avg:63.54ms +[2025-09-11 14:18:47] [Rank 0] step:1141/10000 train_time:71874ms step_avg:62.99ms +[2025-09-11 14:18:47] [Rank 0] step:1141/10000 train_time:71874ms step_avg:62.99ms +[2025-09-11 14:18:47] [Rank 0] step:1161/10000 train_time:72519ms step_avg:62.46ms +[2025-09-11 14:18:47] [Rank 0] step:1161/10000 train_time:72519ms step_avg:62.46ms +[2025-09-11 14:18:48] [Rank 0] step:1181/10000 train_time:73164ms step_avg:61.95ms +[2025-09-11 14:18:48] [Rank 0] step:1181/10000 train_time:73164ms step_avg:61.95ms +[2025-09-11 14:18:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:18:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 14:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 14:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 14:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 14:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:18:59] [Rank 0] PRINT: step:1200/10000 val_loss:5.2159 total_sharp:3.1797e-04 L1_sharp:3.1614e-02 L2_sharp:2.9601e-02 L3_sharp:3.0127e-02 L4_sharp:3.4782e-02 L5_sharp:4.4625e-02 L6_sharp:4.6213e-02 L7_sharp:5.5127e-02 L8_sharp:7.2963e-02 L9_sharp:8.5730e-02 L10_sharp:1.1815e-01 L11_sharp:2.0611e-01 L12_sharp:1.1124e+00 total_fnorm:7.8000e+01 total_l1_linf:1.5974e+05 total_spectral:3.9000e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8125e-02 L3_l1linf:7.8125e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.6172e-02 L6_l1linf:7.7148e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.9102e-02 L10_l1linf:8.0078e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.3008e-02 L1_spectral:3.1329e-03 L2_spectral:3.1118e-03 L3_spectral:3.1373e-03 L4_spectral:3.1449e-03 L5_spectral:3.1475e-03 L6_spectral:3.1427e-03 L7_spectral:3.1408e-03 L8_spectral:3.1339e-03 L9_spectral:3.1188e-03 L10_spectral:3.1181e-03 L11_spectral:3.1141e-03 L12_spectral:3.1082e-03 train_time:73792ms step_avg:61.49ms +[2025-09-11 14:18:59] [Rank 0] PRINT: step:1200/10000 val_loss:5.2159 total_sharp:3.1797e-04 L1_sharp:3.1614e-02 L2_sharp:2.9601e-02 L3_sharp:3.0127e-02 L4_sharp:3.4782e-02 L5_sharp:4.4625e-02 L6_sharp:4.6213e-02 L7_sharp:5.5127e-02 L8_sharp:7.2963e-02 L9_sharp:8.5730e-02 L10_sharp:1.1815e-01 L11_sharp:2.0611e-01 L12_sharp:1.1124e+00 total_fnorm:7.8000e+01 total_l1_linf:1.5974e+05 total_spectral:3.9000e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.5000e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5195e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.8125e-02 L3_l1linf:7.8125e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.6172e-02 L6_l1linf:7.7148e-02 L7_l1linf:7.7148e-02 L8_l1linf:7.7637e-02 L9_l1linf:7.9102e-02 L10_l1linf:8.0078e-02 L11_l1linf:8.1543e-02 L12_l1linf:8.3008e-02 L1_spectral:3.1329e-03 L2_spectral:3.1118e-03 L3_spectral:3.1373e-03 L4_spectral:3.1449e-03 L5_spectral:3.1475e-03 L6_spectral:3.1427e-03 L7_spectral:3.1408e-03 L8_spectral:3.1339e-03 L9_spectral:3.1188e-03 L10_spectral:3.1181e-03 L11_spectral:3.1141e-03 L12_spectral:3.1082e-03 train_time:73792ms step_avg:61.49ms +[2025-09-11 14:19:00] [Rank 0] step:1201/10000 train_time:75312ms step_avg:62.71ms +[2025-09-11 14:19:00] [Rank 0] step:1201/10000 train_time:75312ms step_avg:62.71ms +[2025-09-11 14:19:01] [Rank 0] step:1221/10000 train_time:75962ms step_avg:62.21ms +[2025-09-11 14:19:01] [Rank 0] step:1221/10000 train_time:75962ms step_avg:62.21ms +[2025-09-11 14:19:02] [Rank 0] step:1241/10000 train_time:76609ms step_avg:61.73ms +[2025-09-11 14:19:02] [Rank 0] step:1241/10000 train_time:76609ms step_avg:61.73ms +[2025-09-11 14:19:02] [Rank 0] step:1261/10000 train_time:77257ms step_avg:61.27ms +[2025-09-11 14:19:02] [Rank 0] step:1261/10000 train_time:77257ms step_avg:61.27ms +[2025-09-11 14:19:03] [Rank 0] step:1281/10000 train_time:77904ms step_avg:60.82ms +[2025-09-11 14:19:03] [Rank 0] step:1281/10000 train_time:77904ms step_avg:60.82ms +[2025-09-11 14:19:04] [Rank 0] step:1301/10000 train_time:78551ms step_avg:60.38ms +[2025-09-11 14:19:04] [Rank 0] step:1301/10000 train_time:78551ms step_avg:60.38ms +[2025-09-11 14:19:04] [Rank 0] step:1321/10000 train_time:79197ms step_avg:59.95ms +[2025-09-11 14:19:04] [Rank 0] step:1321/10000 train_time:79197ms step_avg:59.95ms +[2025-09-11 14:19:05] [Rank 0] step:1341/10000 train_time:79843ms step_avg:59.54ms +[2025-09-11 14:19:05] [Rank 0] step:1341/10000 train_time:79843ms step_avg:59.54ms +[2025-09-11 14:19:06] [Rank 0] step:1361/10000 train_time:80489ms step_avg:59.14ms +[2025-09-11 14:19:06] [Rank 0] step:1361/10000 train_time:80489ms step_avg:59.14ms +[2025-09-11 14:19:06] [Rank 0] step:1381/10000 train_time:81136ms step_avg:58.75ms +[2025-09-11 14:19:06] [Rank 0] step:1381/10000 train_time:81136ms step_avg:58.75ms +[2025-09-11 14:19:07] [Rank 0] step:1401/10000 train_time:81782ms step_avg:58.37ms +[2025-09-11 14:19:07] [Rank 0] step:1401/10000 train_time:81782ms step_avg:58.37ms +[2025-09-11 14:19:07] [Rank 0] step:1421/10000 train_time:82429ms step_avg:58.01ms +[2025-09-11 14:19:07] [Rank 0] step:1421/10000 train_time:82429ms step_avg:58.01ms +[2025-09-11 14:19:08] [Rank 0] step:1441/10000 train_time:83075ms step_avg:57.65ms +[2025-09-11 14:19:08] [Rank 0] step:1441/10000 train_time:83075ms step_avg:57.65ms +[2025-09-11 14:19:09] [Rank 0] step:1461/10000 train_time:83721ms step_avg:57.30ms +[2025-09-11 14:19:09] [Rank 0] step:1461/10000 train_time:83721ms step_avg:57.30ms +[2025-09-11 14:19:09] [Rank 0] step:1481/10000 train_time:84367ms step_avg:56.97ms +[2025-09-11 14:19:09] [Rank 0] step:1481/10000 train_time:84367ms step_avg:56.97ms +[2025-09-11 14:19:10] [Rank 0] step:1501/10000 train_time:85017ms step_avg:56.64ms +[2025-09-11 14:19:10] [Rank 0] step:1501/10000 train_time:85017ms step_avg:56.64ms +[2025-09-11 14:19:11] [Rank 0] step:1521/10000 train_time:85666ms step_avg:56.32ms +[2025-09-11 14:19:11] [Rank 0] step:1521/10000 train_time:85666ms step_avg:56.32ms +[2025-09-11 14:19:11] [Rank 0] step:1541/10000 train_time:86317ms step_avg:56.01ms +[2025-09-11 14:19:11] [Rank 0] step:1541/10000 train_time:86317ms step_avg:56.01ms +[2025-09-11 14:19:12] [Rank 0] step:1561/10000 train_time:86968ms step_avg:55.71ms +[2025-09-11 14:19:12] [Rank 0] step:1561/10000 train_time:86968ms step_avg:55.71ms +[2025-09-11 14:19:13] [Rank 0] step:1581/10000 train_time:87619ms step_avg:55.42ms +[2025-09-11 14:19:13] [Rank 0] step:1581/10000 train_time:87619ms step_avg:55.42ms +[2025-09-11 14:19:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:19:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 14:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 14:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 14:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 14:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:19:24] [Rank 0] PRINT: step:1600/10000 val_loss:5.0550 total_sharp:2.3457e-04 L1_sharp:1.1951e-02 L2_sharp:1.0011e-02 L3_sharp:1.1535e-02 L4_sharp:1.2723e-02 L5_sharp:2.4205e-02 L6_sharp:2.6726e-02 L7_sharp:3.3539e-02 L8_sharp:6.8242e-02 L9_sharp:5.6644e-02 L10_sharp:7.3032e-02 L11_sharp:1.2938e-01 L12_sharp:6.8641e-01 total_fnorm:7.5000e+01 total_l1_linf:1.4746e+05 total_spectral:3.7500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.3242e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.3242e-02 L8_l1linf:7.3730e-02 L9_l1linf:7.4219e-02 L10_l1linf:7.5684e-02 L11_l1linf:7.7637e-02 L12_l1linf:8.0078e-02 L1_spectral:3.1435e-03 L2_spectral:3.1233e-03 L3_spectral:3.1369e-03 L4_spectral:3.1200e-03 L5_spectral:3.1481e-03 L6_spectral:3.1424e-03 L7_spectral:3.1495e-03 L8_spectral:3.1669e-03 L9_spectral:3.1560e-03 L10_spectral:3.1395e-03 L11_spectral:3.1463e-03 L12_spectral:3.1320e-03 train_time:88251ms step_avg:55.16ms +[2025-09-11 14:19:24] [Rank 0] PRINT: step:1600/10000 val_loss:5.0550 total_sharp:2.3457e-04 L1_sharp:1.1951e-02 L2_sharp:1.0011e-02 L3_sharp:1.1535e-02 L4_sharp:1.2723e-02 L5_sharp:2.4205e-02 L6_sharp:2.6726e-02 L7_sharp:3.3539e-02 L8_sharp:6.8242e-02 L9_sharp:5.6644e-02 L10_sharp:7.3032e-02 L11_sharp:1.2938e-01 L12_sharp:6.8641e-01 total_fnorm:7.5000e+01 total_l1_linf:1.4746e+05 total_spectral:3.7500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4707e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.5195e-02 L2_l1linf:7.4219e-02 L3_l1linf:7.3730e-02 L4_l1linf:7.4219e-02 L5_l1linf:7.3242e-02 L6_l1linf:7.3730e-02 L7_l1linf:7.3242e-02 L8_l1linf:7.3730e-02 L9_l1linf:7.4219e-02 L10_l1linf:7.5684e-02 L11_l1linf:7.7637e-02 L12_l1linf:8.0078e-02 L1_spectral:3.1435e-03 L2_spectral:3.1233e-03 L3_spectral:3.1369e-03 L4_spectral:3.1200e-03 L5_spectral:3.1481e-03 L6_spectral:3.1424e-03 L7_spectral:3.1495e-03 L8_spectral:3.1669e-03 L9_spectral:3.1560e-03 L10_spectral:3.1395e-03 L11_spectral:3.1463e-03 L12_spectral:3.1320e-03 train_time:88251ms step_avg:55.16ms +[2025-09-11 14:19:25] [Rank 0] step:1601/10000 train_time:89778ms step_avg:56.08ms +[2025-09-11 14:19:25] [Rank 0] step:1601/10000 train_time:89778ms step_avg:56.08ms +[2025-09-11 14:19:26] [Rank 0] step:1621/10000 train_time:90433ms step_avg:55.79ms +[2025-09-11 14:19:26] [Rank 0] step:1621/10000 train_time:90433ms step_avg:55.79ms +[2025-09-11 14:19:27] [Rank 0] step:1641/10000 train_time:91088ms step_avg:55.51ms +[2025-09-11 14:19:27] [Rank 0] step:1641/10000 train_time:91088ms step_avg:55.51ms +[2025-09-11 14:19:27] [Rank 0] step:1661/10000 train_time:91741ms step_avg:55.23ms +[2025-09-11 14:19:27] [Rank 0] step:1661/10000 train_time:91741ms step_avg:55.23ms +[2025-09-11 14:19:28] [Rank 0] step:1681/10000 train_time:92395ms step_avg:54.96ms +[2025-09-11 14:19:28] [Rank 0] step:1681/10000 train_time:92395ms step_avg:54.96ms +[2025-09-11 14:19:28] [Rank 0] step:1701/10000 train_time:93046ms step_avg:54.70ms +[2025-09-11 14:19:28] [Rank 0] step:1701/10000 train_time:93046ms step_avg:54.70ms +[2025-09-11 14:19:29] [Rank 0] step:1721/10000 train_time:93699ms step_avg:54.44ms +[2025-09-11 14:19:29] [Rank 0] step:1721/10000 train_time:93699ms step_avg:54.44ms +[2025-09-11 14:19:30] [Rank 0] step:1741/10000 train_time:94352ms step_avg:54.19ms +[2025-09-11 14:19:30] [Rank 0] step:1741/10000 train_time:94352ms step_avg:54.19ms +[2025-09-11 14:19:30] [Rank 0] step:1761/10000 train_time:95004ms step_avg:53.95ms +[2025-09-11 14:19:30] [Rank 0] step:1761/10000 train_time:95004ms step_avg:53.95ms +[2025-09-11 14:19:31] [Rank 0] step:1781/10000 train_time:95655ms step_avg:53.71ms +[2025-09-11 14:19:31] [Rank 0] step:1781/10000 train_time:95655ms step_avg:53.71ms +[2025-09-11 14:19:32] [Rank 0] step:1801/10000 train_time:96307ms step_avg:53.47ms +[2025-09-11 14:19:32] [Rank 0] step:1801/10000 train_time:96307ms step_avg:53.47ms +[2025-09-11 14:19:32] [Rank 0] step:1821/10000 train_time:96959ms step_avg:53.24ms +[2025-09-11 14:19:32] [Rank 0] step:1821/10000 train_time:96959ms step_avg:53.24ms +[2025-09-11 14:19:33] [Rank 0] step:1841/10000 train_time:97610ms step_avg:53.02ms +[2025-09-11 14:19:33] [Rank 0] step:1841/10000 train_time:97610ms step_avg:53.02ms +[2025-09-11 14:19:34] [Rank 0] step:1861/10000 train_time:98261ms step_avg:52.80ms +[2025-09-11 14:19:34] [Rank 0] step:1861/10000 train_time:98261ms step_avg:52.80ms +[2025-09-11 14:19:34] [Rank 0] step:1881/10000 train_time:98912ms step_avg:52.59ms +[2025-09-11 14:19:34] [Rank 0] step:1881/10000 train_time:98912ms step_avg:52.59ms +[2025-09-11 14:19:35] [Rank 0] step:1901/10000 train_time:99565ms step_avg:52.37ms +[2025-09-11 14:19:35] [Rank 0] step:1901/10000 train_time:99565ms step_avg:52.37ms +[2025-09-11 14:19:36] [Rank 0] step:1921/10000 train_time:100216ms step_avg:52.17ms +[2025-09-11 14:19:36] [Rank 0] step:1921/10000 train_time:100216ms step_avg:52.17ms +[2025-09-11 14:19:36] [Rank 0] step:1941/10000 train_time:100867ms step_avg:51.97ms +[2025-09-11 14:19:36] [Rank 0] step:1941/10000 train_time:100867ms step_avg:51.97ms +[2025-09-11 14:19:37] [Rank 0] step:1961/10000 train_time:101519ms step_avg:51.77ms +[2025-09-11 14:19:37] [Rank 0] step:1961/10000 train_time:101519ms step_avg:51.77ms +[2025-09-11 14:19:38] [Rank 0] step:1981/10000 train_time:102171ms step_avg:51.58ms +[2025-09-11 14:19:38] [Rank 0] step:1981/10000 train_time:102171ms step_avg:51.58ms +[2025-09-11 14:19:38] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:19:38] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 14:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:19:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 14:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 14:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:19:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 14:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:19:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.8854 total_sharp:2.0910e-04 L1_sharp:1.4559e-02 L2_sharp:1.0788e-02 L3_sharp:1.1470e-02 L4_sharp:2.0150e-02 L5_sharp:2.3759e-02 L6_sharp:2.6242e-02 L7_sharp:3.4483e-02 L8_sharp:6.9432e-02 L9_sharp:6.0726e-02 L10_sharp:8.4091e-02 L11_sharp:1.6710e-01 L12_sharp:1.7895e+00 total_fnorm:7.7000e+01 total_l1_linf:1.5462e+05 total_spectral:3.8500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.2266e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.1777e-02 L6_l1linf:7.1777e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.1777e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.7148e-02 L1_spectral:3.1493e-03 L2_spectral:3.1373e-03 L3_spectral:3.1409e-03 L4_spectral:3.1433e-03 L5_spectral:3.1361e-03 L6_spectral:3.1447e-03 L7_spectral:3.1356e-03 L8_spectral:3.1925e-03 L9_spectral:3.1638e-03 L10_spectral:3.1715e-03 L11_spectral:3.1738e-03 L12_spectral:3.1583e-03 train_time:102804ms step_avg:51.40ms +[2025-09-11 14:19:49] [Rank 0] PRINT: step:2000/10000 val_loss:4.8854 total_sharp:2.0910e-04 L1_sharp:1.4559e-02 L2_sharp:1.0788e-02 L3_sharp:1.1470e-02 L4_sharp:2.0150e-02 L5_sharp:2.3759e-02 L6_sharp:2.6242e-02 L7_sharp:3.4483e-02 L8_sharp:6.9432e-02 L9_sharp:6.0726e-02 L10_sharp:8.4091e-02 L11_sharp:1.6710e-01 L12_sharp:1.7895e+00 total_fnorm:7.7000e+01 total_l1_linf:1.5462e+05 total_spectral:3.8500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.2266e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.1777e-02 L6_l1linf:7.1777e-02 L7_l1linf:7.0801e-02 L8_l1linf:7.0801e-02 L9_l1linf:7.1289e-02 L10_l1linf:7.1777e-02 L11_l1linf:7.3730e-02 L12_l1linf:7.7148e-02 L1_spectral:3.1493e-03 L2_spectral:3.1373e-03 L3_spectral:3.1409e-03 L4_spectral:3.1433e-03 L5_spectral:3.1361e-03 L6_spectral:3.1447e-03 L7_spectral:3.1356e-03 L8_spectral:3.1925e-03 L9_spectral:3.1638e-03 L10_spectral:3.1715e-03 L11_spectral:3.1738e-03 L12_spectral:3.1583e-03 train_time:102804ms step_avg:51.40ms +[2025-09-11 14:19:50] [Rank 0] step:2001/10000 train_time:104314ms step_avg:52.13ms +[2025-09-11 14:19:50] [Rank 0] step:2001/10000 train_time:104314ms step_avg:52.13ms +[2025-09-11 14:19:51] [Rank 0] step:2021/10000 train_time:104969ms step_avg:51.94ms +[2025-09-11 14:19:51] [Rank 0] step:2021/10000 train_time:104969ms step_avg:51.94ms +[2025-09-11 14:19:52] [Rank 0] step:2041/10000 train_time:105620ms step_avg:51.75ms +[2025-09-11 14:19:52] [Rank 0] step:2041/10000 train_time:105620ms step_avg:51.75ms +[2025-09-11 14:19:52] [Rank 0] step:2061/10000 train_time:106272ms step_avg:51.56ms +[2025-09-11 14:19:52] [Rank 0] step:2061/10000 train_time:106272ms step_avg:51.56ms +[2025-09-11 14:19:53] [Rank 0] step:2081/10000 train_time:106923ms step_avg:51.38ms +[2025-09-11 14:19:53] [Rank 0] step:2081/10000 train_time:106923ms step_avg:51.38ms +[2025-09-11 14:19:54] [Rank 0] step:2101/10000 train_time:107575ms step_avg:51.20ms +[2025-09-11 14:19:54] [Rank 0] step:2101/10000 train_time:107575ms step_avg:51.20ms +[2025-09-11 14:19:54] [Rank 0] step:2121/10000 train_time:108226ms step_avg:51.03ms +[2025-09-11 14:19:54] [Rank 0] step:2121/10000 train_time:108226ms step_avg:51.03ms +[2025-09-11 14:19:55] [Rank 0] step:2141/10000 train_time:108877ms step_avg:50.85ms +[2025-09-11 14:19:55] [Rank 0] step:2141/10000 train_time:108877ms step_avg:50.85ms +[2025-09-11 14:19:56] [Rank 0] step:2161/10000 train_time:109529ms step_avg:50.68ms +[2025-09-11 14:19:56] [Rank 0] step:2161/10000 train_time:109529ms step_avg:50.68ms +[2025-09-11 14:19:56] [Rank 0] step:2181/10000 train_time:110180ms step_avg:50.52ms +[2025-09-11 14:19:56] [Rank 0] step:2181/10000 train_time:110180ms step_avg:50.52ms +[2025-09-11 14:19:57] [Rank 0] step:2201/10000 train_time:110831ms step_avg:50.35ms +[2025-09-11 14:19:57] [Rank 0] step:2201/10000 train_time:110831ms step_avg:50.35ms +[2025-09-11 14:19:58] [Rank 0] step:2221/10000 train_time:111482ms step_avg:50.19ms +[2025-09-11 14:19:58] [Rank 0] step:2221/10000 train_time:111482ms step_avg:50.19ms +[2025-09-11 14:19:58] [Rank 0] step:2241/10000 train_time:112145ms step_avg:50.04ms +[2025-09-11 14:19:58] [Rank 0] step:2241/10000 train_time:112145ms step_avg:50.04ms +[2025-09-11 14:19:59] [Rank 0] step:2261/10000 train_time:112810ms step_avg:49.89ms +[2025-09-11 14:19:59] [Rank 0] step:2261/10000 train_time:112810ms step_avg:49.89ms +[2025-09-11 14:20:00] [Rank 0] step:2281/10000 train_time:113474ms step_avg:49.75ms +[2025-09-11 14:20:00] [Rank 0] step:2281/10000 train_time:113474ms step_avg:49.75ms +[2025-09-11 14:20:00] [Rank 0] step:2301/10000 train_time:114138ms step_avg:49.60ms +[2025-09-11 14:20:00] [Rank 0] step:2301/10000 train_time:114138ms step_avg:49.60ms +[2025-09-11 14:20:01] [Rank 0] step:2321/10000 train_time:114860ms step_avg:49.49ms +[2025-09-11 14:20:01] [Rank 0] step:2321/10000 train_time:114860ms step_avg:49.49ms +[2025-09-11 14:20:02] [Rank 0] step:2341/10000 train_time:115526ms step_avg:49.35ms +[2025-09-11 14:20:02] [Rank 0] step:2341/10000 train_time:115526ms step_avg:49.35ms +[2025-09-11 14:20:02] [Rank 0] step:2361/10000 train_time:116241ms step_avg:49.23ms +[2025-09-11 14:20:02] [Rank 0] step:2361/10000 train_time:116241ms step_avg:49.23ms +[2025-09-11 14:20:03] [Rank 0] step:2381/10000 train_time:116906ms step_avg:49.10ms +[2025-09-11 14:20:03] [Rank 0] step:2381/10000 train_time:116906ms step_avg:49.10ms +[2025-09-11 14:20:04] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:20:04] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 14:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:20:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:20:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 14:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 14:20:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:20:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 14:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:20:17] [Rank 0] PRINT: step:2400/10000 val_loss:4.7378 total_sharp:2.3393e-04 L1_sharp:7.1539e-03 L2_sharp:5.5457e-03 L3_sharp:9.7844e-03 L4_sharp:8.3358e-03 L5_sharp:1.9534e-02 L6_sharp:2.5013e-02 L7_sharp:3.5109e-02 L8_sharp:6.6327e-02 L9_sharp:6.1920e-02 L10_sharp:8.9816e-02 L11_sharp:1.5230e-01 L12_sharp:1.4929e+00 total_fnorm:7.2500e+01 total_l1_linf:1.3926e+05 total_spectral:3.6250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.0801e-02 L2_l1linf:7.0801e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.1289e-02 L5_l1linf:6.9824e-02 L6_l1linf:6.9336e-02 L7_l1linf:6.9824e-02 L8_l1linf:6.8359e-02 L9_l1linf:6.8359e-02 L10_l1linf:6.9336e-02 L11_l1linf:7.0312e-02 L12_l1linf:7.5684e-02 L1_spectral:3.1751e-03 L2_spectral:3.1533e-03 L3_spectral:3.1550e-03 L4_spectral:3.1474e-03 L5_spectral:3.1585e-03 L6_spectral:3.1706e-03 L7_spectral:3.1573e-03 L8_spectral:3.1853e-03 L9_spectral:3.1601e-03 L10_spectral:3.1772e-03 L11_spectral:3.1675e-03 L12_spectral:3.1902e-03 train_time:117551ms step_avg:48.98ms +[2025-09-11 14:20:17] [Rank 0] PRINT: step:2400/10000 val_loss:4.7378 total_sharp:2.3393e-04 L1_sharp:7.1539e-03 L2_sharp:5.5457e-03 L3_sharp:9.7844e-03 L4_sharp:8.3358e-03 L5_sharp:1.9534e-02 L6_sharp:2.5013e-02 L7_sharp:3.5109e-02 L8_sharp:6.6327e-02 L9_sharp:6.1920e-02 L10_sharp:8.9816e-02 L11_sharp:1.5230e-01 L12_sharp:1.4929e+00 total_fnorm:7.2500e+01 total_l1_linf:1.3926e+05 total_spectral:3.6250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.0801e-02 L2_l1linf:7.0801e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.1289e-02 L5_l1linf:6.9824e-02 L6_l1linf:6.9336e-02 L7_l1linf:6.9824e-02 L8_l1linf:6.8359e-02 L9_l1linf:6.8359e-02 L10_l1linf:6.9336e-02 L11_l1linf:7.0312e-02 L12_l1linf:7.5684e-02 L1_spectral:3.1751e-03 L2_spectral:3.1533e-03 L3_spectral:3.1550e-03 L4_spectral:3.1474e-03 L5_spectral:3.1585e-03 L6_spectral:3.1706e-03 L7_spectral:3.1573e-03 L8_spectral:3.1853e-03 L9_spectral:3.1601e-03 L10_spectral:3.1772e-03 L11_spectral:3.1675e-03 L12_spectral:3.1902e-03 train_time:117551ms step_avg:48.98ms +[2025-09-11 14:20:18] [Rank 0] step:2401/10000 train_time:119061ms step_avg:49.59ms +[2025-09-11 14:20:18] [Rank 0] step:2401/10000 train_time:119061ms step_avg:49.59ms +[2025-09-11 14:20:19] [Rank 0] step:2421/10000 train_time:119730ms step_avg:49.45ms +[2025-09-11 14:20:19] [Rank 0] step:2421/10000 train_time:119730ms step_avg:49.45ms +[2025-09-11 14:20:20] [Rank 0] step:2441/10000 train_time:120395ms step_avg:49.32ms +[2025-09-11 14:20:20] [Rank 0] step:2441/10000 train_time:120395ms step_avg:49.32ms +[2025-09-11 14:20:20] [Rank 0] step:2461/10000 train_time:121061ms step_avg:49.19ms +[2025-09-11 14:20:20] [Rank 0] step:2461/10000 train_time:121061ms step_avg:49.19ms +[2025-09-11 14:20:21] [Rank 0] step:2481/10000 train_time:121726ms step_avg:49.06ms +[2025-09-11 14:20:21] [Rank 0] step:2481/10000 train_time:121726ms step_avg:49.06ms +[2025-09-11 14:20:22] [Rank 0] step:2501/10000 train_time:122391ms step_avg:48.94ms +[2025-09-11 14:20:22] [Rank 0] step:2501/10000 train_time:122391ms step_avg:48.94ms +[2025-09-11 14:20:22] [Rank 0] step:2521/10000 train_time:123056ms step_avg:48.81ms +[2025-09-11 14:20:22] [Rank 0] step:2521/10000 train_time:123056ms step_avg:48.81ms +[2025-09-11 14:20:23] [Rank 0] step:2541/10000 train_time:123721ms step_avg:48.69ms +[2025-09-11 14:20:23] [Rank 0] step:2541/10000 train_time:123721ms step_avg:48.69ms +[2025-09-11 14:20:24] [Rank 0] step:2561/10000 train_time:124385ms step_avg:48.57ms +[2025-09-11 14:20:24] [Rank 0] step:2561/10000 train_time:124385ms step_avg:48.57ms +[2025-09-11 14:20:24] [Rank 0] step:2581/10000 train_time:125050ms step_avg:48.45ms +[2025-09-11 14:20:24] [Rank 0] step:2581/10000 train_time:125050ms step_avg:48.45ms +[2025-09-11 14:20:25] [Rank 0] step:2601/10000 train_time:125716ms step_avg:48.33ms +[2025-09-11 14:20:25] [Rank 0] step:2601/10000 train_time:125716ms step_avg:48.33ms +[2025-09-11 14:20:26] [Rank 0] step:2621/10000 train_time:126381ms step_avg:48.22ms +[2025-09-11 14:20:26] [Rank 0] step:2621/10000 train_time:126381ms step_avg:48.22ms +[2025-09-11 14:20:26] [Rank 0] step:2641/10000 train_time:127046ms step_avg:48.11ms +[2025-09-11 14:20:26] [Rank 0] step:2641/10000 train_time:127046ms step_avg:48.11ms +[2025-09-11 14:20:27] [Rank 0] step:2661/10000 train_time:127713ms step_avg:47.99ms +[2025-09-11 14:20:27] [Rank 0] step:2661/10000 train_time:127713ms step_avg:47.99ms +[2025-09-11 14:20:28] [Rank 0] step:2681/10000 train_time:128378ms step_avg:47.88ms +[2025-09-11 14:20:28] [Rank 0] step:2681/10000 train_time:128378ms step_avg:47.88ms +[2025-09-11 14:20:28] [Rank 0] step:2701/10000 train_time:129044ms step_avg:47.78ms +[2025-09-11 14:20:28] [Rank 0] step:2701/10000 train_time:129044ms step_avg:47.78ms +[2025-09-11 14:20:29] [Rank 0] step:2721/10000 train_time:129709ms step_avg:47.67ms +[2025-09-11 14:20:29] [Rank 0] step:2721/10000 train_time:129709ms step_avg:47.67ms +[2025-09-11 14:20:30] [Rank 0] step:2741/10000 train_time:130376ms step_avg:47.57ms +[2025-09-11 14:20:30] [Rank 0] step:2741/10000 train_time:130376ms step_avg:47.57ms +[2025-09-11 14:20:30] [Rank 0] step:2761/10000 train_time:131041ms step_avg:47.46ms +[2025-09-11 14:20:30] [Rank 0] step:2761/10000 train_time:131041ms step_avg:47.46ms +[2025-09-11 14:20:31] [Rank 0] step:2781/10000 train_time:131706ms step_avg:47.36ms +[2025-09-11 14:20:31] [Rank 0] step:2781/10000 train_time:131706ms step_avg:47.36ms +[2025-09-11 14:20:32] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:20:32] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 14:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:20:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:20:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 14:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 14:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:20:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 14:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:20:42] [Rank 0] PRINT: step:2800/10000 val_loss:4.6616 total_sharp:1.9036e-04 L1_sharp:9.0330e-03 L2_sharp:6.3381e-03 L3_sharp:5.8547e-03 L4_sharp:1.1218e-02 L5_sharp:1.6116e-02 L6_sharp:2.2145e-02 L7_sharp:3.2474e-02 L8_sharp:5.1760e-02 L9_sharp:5.5671e-02 L10_sharp:7.0494e-02 L11_sharp:1.3266e-01 L12_sharp:3.9689e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3517e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5391e-01 L1_l1linf:6.9824e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.8848e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.7383e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.8848e-02 L12_l1linf:7.1777e-02 L1_spectral:3.1646e-03 L2_spectral:3.1683e-03 L3_spectral:3.1671e-03 L4_spectral:3.1410e-03 L5_spectral:3.1855e-03 L6_spectral:3.1488e-03 L7_spectral:3.1884e-03 L8_spectral:3.1972e-03 L9_spectral:3.1685e-03 L10_spectral:3.1878e-03 L11_spectral:3.2036e-03 L12_spectral:3.2080e-03 train_time:132352ms step_avg:47.27ms +[2025-09-11 14:20:42] [Rank 0] PRINT: step:2800/10000 val_loss:4.6616 total_sharp:1.9036e-04 L1_sharp:9.0330e-03 L2_sharp:6.3381e-03 L3_sharp:5.8547e-03 L4_sharp:1.1218e-02 L5_sharp:1.6116e-02 L6_sharp:2.2145e-02 L7_sharp:3.2474e-02 L8_sharp:5.1760e-02 L9_sharp:5.5671e-02 L10_sharp:7.0494e-02 L11_sharp:1.3266e-01 L12_sharp:3.9689e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3517e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4805e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5391e-01 L1_l1linf:6.9824e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8848e-02 L4_l1linf:6.8848e-02 L5_l1linf:6.8848e-02 L6_l1linf:6.7383e-02 L7_l1linf:6.7383e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.6895e-02 L10_l1linf:6.6406e-02 L11_l1linf:6.8848e-02 L12_l1linf:7.1777e-02 L1_spectral:3.1646e-03 L2_spectral:3.1683e-03 L3_spectral:3.1671e-03 L4_spectral:3.1410e-03 L5_spectral:3.1855e-03 L6_spectral:3.1488e-03 L7_spectral:3.1884e-03 L8_spectral:3.1972e-03 L9_spectral:3.1685e-03 L10_spectral:3.1878e-03 L11_spectral:3.2036e-03 L12_spectral:3.2080e-03 train_time:132352ms step_avg:47.27ms +[2025-09-11 14:20:44] [Rank 0] step:2801/10000 train_time:134029ms step_avg:47.85ms +[2025-09-11 14:20:44] [Rank 0] step:2801/10000 train_time:134029ms step_avg:47.85ms +[2025-09-11 14:20:45] [Rank 0] step:2821/10000 train_time:134709ms step_avg:47.75ms +[2025-09-11 14:20:45] [Rank 0] step:2821/10000 train_time:134709ms step_avg:47.75ms +[2025-09-11 14:20:45] [Rank 0] step:2841/10000 train_time:135376ms step_avg:47.65ms +[2025-09-11 14:20:45] [Rank 0] step:2841/10000 train_time:135376ms step_avg:47.65ms +[2025-09-11 14:20:46] [Rank 0] step:2861/10000 train_time:136042ms step_avg:47.55ms +[2025-09-11 14:20:46] [Rank 0] step:2861/10000 train_time:136042ms step_avg:47.55ms +[2025-09-11 14:20:47] [Rank 0] step:2881/10000 train_time:136983ms step_avg:47.55ms +[2025-09-11 14:20:47] [Rank 0] step:2881/10000 train_time:136983ms step_avg:47.55ms +[2025-09-11 14:20:47] [Rank 0] step:2901/10000 train_time:137648ms step_avg:47.45ms +[2025-09-11 14:20:47] [Rank 0] step:2901/10000 train_time:137648ms step_avg:47.45ms +[2025-09-11 14:20:48] [Rank 0] step:2921/10000 train_time:138313ms step_avg:47.35ms +[2025-09-11 14:20:48] [Rank 0] step:2921/10000 train_time:138313ms step_avg:47.35ms +[2025-09-11 14:20:49] [Rank 0] step:2941/10000 train_time:138982ms step_avg:47.26ms +[2025-09-11 14:20:49] [Rank 0] step:2941/10000 train_time:138982ms step_avg:47.26ms +[2025-09-11 14:20:49] [Rank 0] step:2961/10000 train_time:139647ms step_avg:47.16ms +[2025-09-11 14:20:49] [Rank 0] step:2961/10000 train_time:139647ms step_avg:47.16ms +[2025-09-11 14:20:50] [Rank 0] step:2981/10000 train_time:140315ms step_avg:47.07ms +[2025-09-11 14:20:50] [Rank 0] step:2981/10000 train_time:140315ms step_avg:47.07ms +[2025-09-11 14:20:51] [Rank 0] step:3001/10000 train_time:140982ms step_avg:46.98ms +[2025-09-11 14:20:51] [Rank 0] step:3001/10000 train_time:140982ms step_avg:46.98ms +[2025-09-11 14:20:51] [Rank 0] step:3021/10000 train_time:141651ms step_avg:46.89ms +[2025-09-11 14:20:51] [Rank 0] step:3021/10000 train_time:141651ms step_avg:46.89ms +[2025-09-11 14:20:52] [Rank 0] step:3041/10000 train_time:142319ms step_avg:46.80ms +[2025-09-11 14:20:52] [Rank 0] step:3041/10000 train_time:142319ms step_avg:46.80ms +[2025-09-11 14:20:53] [Rank 0] step:3061/10000 train_time:142987ms step_avg:46.71ms +[2025-09-11 14:20:53] [Rank 0] step:3061/10000 train_time:142987ms step_avg:46.71ms +[2025-09-11 14:20:53] [Rank 0] step:3081/10000 train_time:143655ms step_avg:46.63ms +[2025-09-11 14:20:53] [Rank 0] step:3081/10000 train_time:143655ms step_avg:46.63ms +[2025-09-11 14:20:54] [Rank 0] step:3101/10000 train_time:144322ms step_avg:46.54ms +[2025-09-11 14:20:54] [Rank 0] step:3101/10000 train_time:144322ms step_avg:46.54ms +[2025-09-11 14:20:55] [Rank 0] step:3121/10000 train_time:144991ms step_avg:46.46ms +[2025-09-11 14:20:55] [Rank 0] step:3121/10000 train_time:144991ms step_avg:46.46ms +[2025-09-11 14:20:55] [Rank 0] step:3141/10000 train_time:145659ms step_avg:46.37ms +[2025-09-11 14:20:55] [Rank 0] step:3141/10000 train_time:145659ms step_avg:46.37ms +[2025-09-11 14:20:56] [Rank 0] step:3161/10000 train_time:146327ms step_avg:46.29ms +[2025-09-11 14:20:56] [Rank 0] step:3161/10000 train_time:146327ms step_avg:46.29ms +[2025-09-11 14:20:57] [Rank 0] step:3181/10000 train_time:146994ms step_avg:46.21ms +[2025-09-11 14:20:57] [Rank 0] step:3181/10000 train_time:146994ms step_avg:46.21ms +[2025-09-11 14:20:57] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:20:57] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 14:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:20:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:21:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 14:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 14:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:21:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:21:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 14:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:21:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.5785 total_sharp:1.3320e-04 L1_sharp:7.7217e-03 L2_sharp:5.3347e-03 L3_sharp:8.5895e-03 L4_sharp:1.2549e-02 L5_sharp:1.7671e-02 L6_sharp:2.3972e-02 L7_sharp:2.9801e-02 L8_sharp:5.5982e-02 L9_sharp:4.9568e-02 L10_sharp:6.3758e-02 L11_sharp:1.1743e-01 L12_sharp:3.4413e-01 total_fnorm:7.9000e+01 total_l1_linf:1.5872e+05 total_spectral:3.9500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1849e-03 L2_spectral:3.1804e-03 L3_spectral:3.1699e-03 L4_spectral:3.1615e-03 L5_spectral:3.1596e-03 L6_spectral:3.1640e-03 L7_spectral:3.1863e-03 L8_spectral:3.1828e-03 L9_spectral:3.2016e-03 L10_spectral:3.1957e-03 L11_spectral:3.1937e-03 L12_spectral:3.2329e-03 train_time:147643ms step_avg:46.14ms +[2025-09-11 14:21:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.5785 total_sharp:1.3320e-04 L1_sharp:7.7217e-03 L2_sharp:5.3347e-03 L3_sharp:8.5895e-03 L4_sharp:1.2549e-02 L5_sharp:1.7671e-02 L6_sharp:2.3972e-02 L7_sharp:2.9801e-02 L8_sharp:5.5982e-02 L9_sharp:4.9568e-02 L10_sharp:6.3758e-02 L11_sharp:1.1743e-01 L12_sharp:3.4413e-01 total_fnorm:7.9000e+01 total_l1_linf:1.5872e+05 total_spectral:3.9500e+01 L1_fnorm:2.5000e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.7383e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.3965e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1849e-03 L2_spectral:3.1804e-03 L3_spectral:3.1699e-03 L4_spectral:3.1615e-03 L5_spectral:3.1596e-03 L6_spectral:3.1640e-03 L7_spectral:3.1863e-03 L8_spectral:3.1828e-03 L9_spectral:3.2016e-03 L10_spectral:3.1957e-03 L11_spectral:3.1937e-03 L12_spectral:3.2329e-03 train_time:147643ms step_avg:46.14ms +[2025-09-11 14:21:11] [Rank 0] step:3201/10000 train_time:149821ms step_avg:46.80ms +[2025-09-11 14:21:11] [Rank 0] step:3201/10000 train_time:149821ms step_avg:46.80ms +[2025-09-11 14:21:11] [Rank 0] step:3221/10000 train_time:150695ms step_avg:46.79ms +[2025-09-11 14:21:11] [Rank 0] step:3221/10000 train_time:150695ms step_avg:46.79ms +[2025-09-11 14:21:12] [Rank 0] step:3241/10000 train_time:151363ms step_avg:46.70ms +[2025-09-11 14:21:12] [Rank 0] step:3241/10000 train_time:151363ms step_avg:46.70ms +[2025-09-11 14:21:13] [Rank 0] step:3261/10000 train_time:152031ms step_avg:46.62ms +[2025-09-11 14:21:13] [Rank 0] step:3261/10000 train_time:152031ms step_avg:46.62ms +[2025-09-11 14:21:13] [Rank 0] step:3281/10000 train_time:152700ms step_avg:46.54ms +[2025-09-11 14:21:13] [Rank 0] step:3281/10000 train_time:152700ms step_avg:46.54ms +[2025-09-11 14:21:14] [Rank 0] step:3301/10000 train_time:153369ms step_avg:46.46ms +[2025-09-11 14:21:14] [Rank 0] step:3301/10000 train_time:153369ms step_avg:46.46ms +[2025-09-11 14:21:15] [Rank 0] step:3321/10000 train_time:154036ms step_avg:46.38ms +[2025-09-11 14:21:15] [Rank 0] step:3321/10000 train_time:154036ms step_avg:46.38ms +[2025-09-11 14:21:15] [Rank 0] step:3341/10000 train_time:154703ms step_avg:46.30ms +[2025-09-11 14:21:15] [Rank 0] step:3341/10000 train_time:154703ms step_avg:46.30ms +[2025-09-11 14:21:16] [Rank 0] step:3361/10000 train_time:155370ms step_avg:46.23ms +[2025-09-11 14:21:16] [Rank 0] step:3361/10000 train_time:155370ms step_avg:46.23ms +[2025-09-11 14:21:17] [Rank 0] step:3381/10000 train_time:156037ms step_avg:46.15ms +[2025-09-11 14:21:17] [Rank 0] step:3381/10000 train_time:156037ms step_avg:46.15ms +[2025-09-11 14:21:17] [Rank 0] step:3401/10000 train_time:156704ms step_avg:46.08ms +[2025-09-11 14:21:17] [Rank 0] step:3401/10000 train_time:156704ms step_avg:46.08ms +[2025-09-11 14:21:18] [Rank 0] step:3421/10000 train_time:157369ms step_avg:46.00ms +[2025-09-11 14:21:18] [Rank 0] step:3421/10000 train_time:157369ms step_avg:46.00ms +[2025-09-11 14:21:19] [Rank 0] step:3441/10000 train_time:158035ms step_avg:45.93ms +[2025-09-11 14:21:19] [Rank 0] step:3441/10000 train_time:158035ms step_avg:45.93ms +[2025-09-11 14:21:19] [Rank 0] step:3461/10000 train_time:158701ms step_avg:45.85ms +[2025-09-11 14:21:19] [Rank 0] step:3461/10000 train_time:158701ms step_avg:45.85ms +[2025-09-11 14:21:20] [Rank 0] step:3481/10000 train_time:159368ms step_avg:45.78ms +[2025-09-11 14:21:20] [Rank 0] step:3481/10000 train_time:159368ms step_avg:45.78ms +[2025-09-11 14:21:21] [Rank 0] step:3501/10000 train_time:160035ms step_avg:45.71ms +[2025-09-11 14:21:21] [Rank 0] step:3501/10000 train_time:160035ms step_avg:45.71ms +[2025-09-11 14:21:21] [Rank 0] step:3521/10000 train_time:160701ms step_avg:45.64ms +[2025-09-11 14:21:21] [Rank 0] step:3521/10000 train_time:160701ms step_avg:45.64ms +[2025-09-11 14:21:22] [Rank 0] step:3541/10000 train_time:161367ms step_avg:45.57ms +[2025-09-11 14:21:22] [Rank 0] step:3541/10000 train_time:161367ms step_avg:45.57ms +[2025-09-11 14:21:23] [Rank 0] step:3561/10000 train_time:162035ms step_avg:45.50ms +[2025-09-11 14:21:23] [Rank 0] step:3561/10000 train_time:162035ms step_avg:45.50ms +[2025-09-11 14:21:23] [Rank 0] step:3581/10000 train_time:162701ms step_avg:45.43ms +[2025-09-11 14:21:23] [Rank 0] step:3581/10000 train_time:162701ms step_avg:45.43ms +[2025-09-11 14:21:24] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:21:24] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 14:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:21:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:21:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 14:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 14:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:21:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:21:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 14:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:21:35] [Rank 0] PRINT: step:3600/10000 val_loss:4.5290 total_sharp:1.4634e-04 L1_sharp:6.2716e-03 L2_sharp:3.0074e-03 L3_sharp:4.7382e-03 L4_sharp:5.0871e-03 L5_sharp:1.3612e-02 L6_sharp:2.1029e-02 L7_sharp:2.9160e-02 L8_sharp:4.3470e-02 L9_sharp:4.5383e-02 L10_sharp:5.9440e-02 L11_sharp:1.1781e-01 L12_sharp:4.0732e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3517e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2500e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1864e-03 L2_spectral:3.1715e-03 L3_spectral:3.1841e-03 L4_spectral:3.1703e-03 L5_spectral:3.1784e-03 L6_spectral:3.1684e-03 L7_spectral:3.2061e-03 L8_spectral:3.1885e-03 L9_spectral:3.1949e-03 L10_spectral:3.1930e-03 L11_spectral:3.2135e-03 L12_spectral:3.2090e-03 train_time:163349ms step_avg:45.37ms +[2025-09-11 14:21:35] [Rank 0] PRINT: step:3600/10000 val_loss:4.5290 total_sharp:1.4634e-04 L1_sharp:6.2716e-03 L2_sharp:3.0074e-03 L3_sharp:4.7382e-03 L4_sharp:5.0871e-03 L5_sharp:1.3612e-02 L6_sharp:2.1029e-02 L7_sharp:2.9160e-02 L8_sharp:4.3470e-02 L9_sharp:4.5383e-02 L10_sharp:5.9440e-02 L11_sharp:1.1781e-01 L12_sharp:4.0732e-01 total_fnorm:7.0500e+01 total_l1_linf:1.3517e+05 total_spectral:3.5250e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4414e-01 L9_fnorm:2.4609e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.7383e-02 L2_l1linf:6.7871e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.7383e-02 L5_l1linf:6.7871e-02 L6_l1linf:6.5430e-02 L7_l1linf:6.5430e-02 L8_l1linf:6.4453e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.2500e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1864e-03 L2_spectral:3.1715e-03 L3_spectral:3.1841e-03 L4_spectral:3.1703e-03 L5_spectral:3.1784e-03 L6_spectral:3.1684e-03 L7_spectral:3.2061e-03 L8_spectral:3.1885e-03 L9_spectral:3.1949e-03 L10_spectral:3.1930e-03 L11_spectral:3.2135e-03 L12_spectral:3.2090e-03 train_time:163349ms step_avg:45.37ms +[2025-09-11 14:21:37] [Rank 0] step:3601/10000 train_time:165328ms step_avg:45.91ms +[2025-09-11 14:21:37] [Rank 0] step:3601/10000 train_time:165328ms step_avg:45.91ms +[2025-09-11 14:21:37] [Rank 0] step:3621/10000 train_time:165999ms step_avg:45.84ms +[2025-09-11 14:21:37] [Rank 0] step:3621/10000 train_time:165999ms step_avg:45.84ms +[2025-09-11 14:21:38] [Rank 0] step:3641/10000 train_time:166668ms step_avg:45.78ms +[2025-09-11 14:21:38] [Rank 0] step:3641/10000 train_time:166668ms step_avg:45.78ms +[2025-09-11 14:21:39] [Rank 0] step:3661/10000 train_time:167335ms step_avg:45.71ms +[2025-09-11 14:21:39] [Rank 0] step:3661/10000 train_time:167335ms step_avg:45.71ms +[2025-09-11 14:21:39] [Rank 0] step:3681/10000 train_time:168003ms step_avg:45.64ms +[2025-09-11 14:21:39] [Rank 0] step:3681/10000 train_time:168003ms step_avg:45.64ms +[2025-09-11 14:21:40] [Rank 0] step:3701/10000 train_time:168671ms step_avg:45.57ms +[2025-09-11 14:21:40] [Rank 0] step:3701/10000 train_time:168671ms step_avg:45.57ms +[2025-09-11 14:21:41] [Rank 0] step:3721/10000 train_time:169348ms step_avg:45.51ms +[2025-09-11 14:21:41] [Rank 0] step:3721/10000 train_time:169348ms step_avg:45.51ms +[2025-09-11 14:21:41] [Rank 0] step:3741/10000 train_time:170026ms step_avg:45.45ms +[2025-09-11 14:21:41] [Rank 0] step:3741/10000 train_time:170026ms step_avg:45.45ms +[2025-09-11 14:21:42] [Rank 0] step:3761/10000 train_time:170704ms step_avg:45.39ms +[2025-09-11 14:21:42] [Rank 0] step:3761/10000 train_time:170704ms step_avg:45.39ms +[2025-09-11 14:21:43] [Rank 0] step:3781/10000 train_time:171382ms step_avg:45.33ms +[2025-09-11 14:21:43] [Rank 0] step:3781/10000 train_time:171382ms step_avg:45.33ms +[2025-09-11 14:21:44] [Rank 0] step:3801/10000 train_time:172061ms step_avg:45.27ms +[2025-09-11 14:21:44] [Rank 0] step:3801/10000 train_time:172061ms step_avg:45.27ms +[2025-09-11 14:21:44] [Rank 0] step:3821/10000 train_time:172739ms step_avg:45.21ms +[2025-09-11 14:21:44] [Rank 0] step:3821/10000 train_time:172739ms step_avg:45.21ms +[2025-09-11 14:21:45] [Rank 0] step:3841/10000 train_time:173417ms step_avg:45.15ms +[2025-09-11 14:21:45] [Rank 0] step:3841/10000 train_time:173417ms step_avg:45.15ms +[2025-09-11 14:21:46] [Rank 0] step:3861/10000 train_time:174094ms step_avg:45.09ms +[2025-09-11 14:21:46] [Rank 0] step:3861/10000 train_time:174094ms step_avg:45.09ms +[2025-09-11 14:21:46] [Rank 0] step:3881/10000 train_time:174771ms step_avg:45.03ms +[2025-09-11 14:21:46] [Rank 0] step:3881/10000 train_time:174771ms step_avg:45.03ms +[2025-09-11 14:21:47] [Rank 0] step:3901/10000 train_time:175736ms step_avg:45.05ms +[2025-09-11 14:21:47] [Rank 0] step:3901/10000 train_time:175736ms step_avg:45.05ms +[2025-09-11 14:21:48] [Rank 0] step:3921/10000 train_time:176414ms step_avg:44.99ms +[2025-09-11 14:21:48] [Rank 0] step:3921/10000 train_time:176414ms step_avg:44.99ms +[2025-09-11 14:21:49] [Rank 0] step:3941/10000 train_time:177093ms step_avg:44.94ms +[2025-09-11 14:21:49] [Rank 0] step:3941/10000 train_time:177093ms step_avg:44.94ms +[2025-09-11 14:21:49] [Rank 0] step:3961/10000 train_time:177928ms step_avg:44.92ms +[2025-09-11 14:21:49] [Rank 0] step:3961/10000 train_time:177928ms step_avg:44.92ms +[2025-09-11 14:21:50] [Rank 0] step:3981/10000 train_time:178747ms step_avg:44.90ms +[2025-09-11 14:21:50] [Rank 0] step:3981/10000 train_time:178747ms step_avg:44.90ms +[2025-09-11 14:21:51] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:21:51] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 14:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:21:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:21:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 14:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 14:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:22:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 14:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.4783 total_sharp:1.4370e-04 L1_sharp:3.7270e-03 L2_sharp:2.0952e-03 L3_sharp:4.4334e-03 L4_sharp:6.5443e-03 L5_sharp:1.4846e-02 L6_sharp:2.1858e-02 L7_sharp:3.0029e-02 L8_sharp:5.3052e-02 L9_sharp:5.0637e-02 L10_sharp:6.9542e-02 L11_sharp:1.2853e-01 L12_sharp:4.4717e-01 total_fnorm:7.9500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9750e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.4453e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.3477e-02 L10_l1linf:6.2988e-02 L11_l1linf:6.2500e-02 L12_l1linf:6.7871e-02 L1_spectral:3.1879e-03 L2_spectral:3.1819e-03 L3_spectral:3.1847e-03 L4_spectral:3.1695e-03 L5_spectral:3.1880e-03 L6_spectral:3.1680e-03 L7_spectral:3.1928e-03 L8_spectral:3.1955e-03 L9_spectral:3.2044e-03 L10_spectral:3.2092e-03 L11_spectral:3.2129e-03 L12_spectral:3.2264e-03 train_time:179406ms step_avg:44.85ms +[2025-09-11 14:22:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.4783 total_sharp:1.4370e-04 L1_sharp:3.7270e-03 L2_sharp:2.0952e-03 L3_sharp:4.4334e-03 L4_sharp:6.5443e-03 L5_sharp:1.4846e-02 L6_sharp:2.1858e-02 L7_sharp:3.0029e-02 L8_sharp:5.3052e-02 L9_sharp:5.0637e-02 L10_sharp:6.9542e-02 L11_sharp:1.2853e-01 L12_sharp:4.4717e-01 total_fnorm:7.9500e+01 total_l1_linf:1.5462e+05 total_spectral:3.9750e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4609e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.6406e-02 L2_l1linf:6.5430e-02 L3_l1linf:6.4453e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.4453e-02 L6_l1linf:6.4453e-02 L7_l1linf:6.4941e-02 L8_l1linf:6.2988e-02 L9_l1linf:6.3477e-02 L10_l1linf:6.2988e-02 L11_l1linf:6.2500e-02 L12_l1linf:6.7871e-02 L1_spectral:3.1879e-03 L2_spectral:3.1819e-03 L3_spectral:3.1847e-03 L4_spectral:3.1695e-03 L5_spectral:3.1880e-03 L6_spectral:3.1680e-03 L7_spectral:3.1928e-03 L8_spectral:3.1955e-03 L9_spectral:3.2044e-03 L10_spectral:3.2092e-03 L11_spectral:3.2129e-03 L12_spectral:3.2264e-03 train_time:179406ms step_avg:44.85ms +[2025-09-11 14:22:04] [Rank 0] step:4001/10000 train_time:181322ms step_avg:45.32ms +[2025-09-11 14:22:04] [Rank 0] step:4001/10000 train_time:181322ms step_avg:45.32ms +[2025-09-11 14:22:04] [Rank 0] step:4021/10000 train_time:182006ms step_avg:45.26ms +[2025-09-11 14:22:04] [Rank 0] step:4021/10000 train_time:182006ms step_avg:45.26ms +[2025-09-11 14:22:05] [Rank 0] step:4041/10000 train_time:182684ms step_avg:45.21ms +[2025-09-11 14:22:05] [Rank 0] step:4041/10000 train_time:182684ms step_avg:45.21ms +[2025-09-11 14:22:06] [Rank 0] step:4061/10000 train_time:183361ms step_avg:45.15ms +[2025-09-11 14:22:06] [Rank 0] step:4061/10000 train_time:183361ms step_avg:45.15ms +[2025-09-11 14:22:06] [Rank 0] step:4081/10000 train_time:184040ms step_avg:45.10ms +[2025-09-11 14:22:06] [Rank 0] step:4081/10000 train_time:184040ms step_avg:45.10ms +[2025-09-11 14:22:07] [Rank 0] step:4101/10000 train_time:184717ms step_avg:45.04ms +[2025-09-11 14:22:07] [Rank 0] step:4101/10000 train_time:184717ms step_avg:45.04ms +[2025-09-11 14:22:08] [Rank 0] step:4121/10000 train_time:185397ms step_avg:44.99ms +[2025-09-11 14:22:08] [Rank 0] step:4121/10000 train_time:185397ms step_avg:44.99ms +[2025-09-11 14:22:08] [Rank 0] step:4141/10000 train_time:186074ms step_avg:44.93ms +[2025-09-11 14:22:08] [Rank 0] step:4141/10000 train_time:186074ms step_avg:44.93ms +[2025-09-11 14:22:09] [Rank 0] step:4161/10000 train_time:186753ms step_avg:44.88ms +[2025-09-11 14:22:09] [Rank 0] step:4161/10000 train_time:186753ms step_avg:44.88ms +[2025-09-11 14:22:10] [Rank 0] step:4181/10000 train_time:187430ms step_avg:44.83ms +[2025-09-11 14:22:10] [Rank 0] step:4181/10000 train_time:187430ms step_avg:44.83ms +[2025-09-11 14:22:10] [Rank 0] step:4201/10000 train_time:188108ms step_avg:44.78ms +[2025-09-11 14:22:10] [Rank 0] step:4201/10000 train_time:188108ms step_avg:44.78ms +[2025-09-11 14:22:11] [Rank 0] step:4221/10000 train_time:188785ms step_avg:44.73ms +[2025-09-11 14:22:11] [Rank 0] step:4221/10000 train_time:188785ms step_avg:44.73ms +[2025-09-11 14:22:12] [Rank 0] step:4241/10000 train_time:189463ms step_avg:44.67ms +[2025-09-11 14:22:12] [Rank 0] step:4241/10000 train_time:189463ms step_avg:44.67ms +[2025-09-11 14:22:13] [Rank 0] step:4261/10000 train_time:190140ms step_avg:44.62ms +[2025-09-11 14:22:13] [Rank 0] step:4261/10000 train_time:190140ms step_avg:44.62ms +[2025-09-11 14:22:13] [Rank 0] step:4281/10000 train_time:190819ms step_avg:44.57ms +[2025-09-11 14:22:13] [Rank 0] step:4281/10000 train_time:190819ms step_avg:44.57ms +[2025-09-11 14:22:14] [Rank 0] step:4301/10000 train_time:191497ms step_avg:44.52ms +[2025-09-11 14:22:14] [Rank 0] step:4301/10000 train_time:191497ms step_avg:44.52ms +[2025-09-11 14:22:15] [Rank 0] step:4321/10000 train_time:192175ms step_avg:44.47ms +[2025-09-11 14:22:15] [Rank 0] step:4321/10000 train_time:192175ms step_avg:44.47ms +[2025-09-11 14:22:15] [Rank 0] step:4341/10000 train_time:192854ms step_avg:44.43ms +[2025-09-11 14:22:15] [Rank 0] step:4341/10000 train_time:192854ms step_avg:44.43ms +[2025-09-11 14:22:16] [Rank 0] step:4361/10000 train_time:193531ms step_avg:44.38ms +[2025-09-11 14:22:16] [Rank 0] step:4361/10000 train_time:193531ms step_avg:44.38ms +[2025-09-11 14:22:17] [Rank 0] step:4381/10000 train_time:194210ms step_avg:44.33ms +[2025-09-11 14:22:17] [Rank 0] step:4381/10000 train_time:194210ms step_avg:44.33ms +[2025-09-11 14:22:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:22:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 14:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 14:22:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:22:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 14:22:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:22:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:22:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:22:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:22:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 14:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:28] [Rank 0] PRINT: step:4400/10000 val_loss:4.4546 total_sharp:1.2037e-04 L1_sharp:5.6106e-03 L2_sharp:2.3502e-03 L3_sharp:8.6751e-03 L4_sharp:1.1287e-02 L5_sharp:1.3622e-02 L6_sharp:1.8951e-02 L7_sharp:2.8975e-02 L8_sharp:4.8955e-02 L9_sharp:4.3998e-02 L10_sharp:5.5730e-02 L11_sharp:9.4056e-02 L12_sharp:4.8744e-01 total_fnorm:7.3000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6500e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2500e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.2012e-02 L9_l1linf:6.1768e-02 L10_l1linf:6.2256e-02 L11_l1linf:6.4453e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1862e-03 L2_spectral:3.1926e-03 L3_spectral:3.1902e-03 L4_spectral:3.1821e-03 L5_spectral:3.1733e-03 L6_spectral:3.1870e-03 L7_spectral:3.1912e-03 L8_spectral:3.1632e-03 L9_spectral:3.1905e-03 L10_spectral:3.1974e-03 L11_spectral:3.2024e-03 L12_spectral:3.2064e-03 train_time:194869ms step_avg:44.29ms +[2025-09-11 14:22:28] [Rank 0] PRINT: step:4400/10000 val_loss:4.4546 total_sharp:1.2037e-04 L1_sharp:5.6106e-03 L2_sharp:2.3502e-03 L3_sharp:8.6751e-03 L4_sharp:1.1287e-02 L5_sharp:1.3622e-02 L6_sharp:1.8951e-02 L7_sharp:2.8975e-02 L8_sharp:4.8955e-02 L9_sharp:4.3998e-02 L10_sharp:5.5730e-02 L11_sharp:9.4056e-02 L12_sharp:4.8744e-01 total_fnorm:7.3000e+01 total_l1_linf:1.3619e+05 total_spectral:3.6500e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4609e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.4941e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3965e-02 L5_l1linf:6.2500e-02 L6_l1linf:6.2256e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.2012e-02 L9_l1linf:6.1768e-02 L10_l1linf:6.2256e-02 L11_l1linf:6.4453e-02 L12_l1linf:6.6406e-02 L1_spectral:3.1862e-03 L2_spectral:3.1926e-03 L3_spectral:3.1902e-03 L4_spectral:3.1821e-03 L5_spectral:3.1733e-03 L6_spectral:3.1870e-03 L7_spectral:3.1912e-03 L8_spectral:3.1632e-03 L9_spectral:3.1905e-03 L10_spectral:3.1974e-03 L11_spectral:3.2024e-03 L12_spectral:3.2064e-03 train_time:194869ms step_avg:44.29ms +[2025-09-11 14:22:29] [Rank 0] step:4401/10000 train_time:196254ms step_avg:44.59ms +[2025-09-11 14:22:29] [Rank 0] step:4401/10000 train_time:196254ms step_avg:44.59ms +[2025-09-11 14:22:30] [Rank 0] step:4421/10000 train_time:196957ms step_avg:44.55ms +[2025-09-11 14:22:30] [Rank 0] step:4421/10000 train_time:196957ms step_avg:44.55ms +[2025-09-11 14:22:31] [Rank 0] step:4441/10000 train_time:197638ms step_avg:44.50ms +[2025-09-11 14:22:31] [Rank 0] step:4441/10000 train_time:197638ms step_avg:44.50ms +[2025-09-11 14:22:31] [Rank 0] step:4461/10000 train_time:198318ms step_avg:44.46ms +[2025-09-11 14:22:31] [Rank 0] step:4461/10000 train_time:198318ms step_avg:44.46ms +[2025-09-11 14:22:32] [Rank 0] step:4481/10000 train_time:199000ms step_avg:44.41ms +[2025-09-11 14:22:32] [Rank 0] step:4481/10000 train_time:199000ms step_avg:44.41ms +[2025-09-11 14:22:33] [Rank 0] step:4501/10000 train_time:199681ms step_avg:44.36ms +[2025-09-11 14:22:33] [Rank 0] step:4501/10000 train_time:199681ms step_avg:44.36ms +[2025-09-11 14:22:33] [Rank 0] step:4521/10000 train_time:200363ms step_avg:44.32ms +[2025-09-11 14:22:33] [Rank 0] step:4521/10000 train_time:200363ms step_avg:44.32ms +[2025-09-11 14:22:34] [Rank 0] step:4541/10000 train_time:201047ms step_avg:44.27ms +[2025-09-11 14:22:34] [Rank 0] step:4541/10000 train_time:201047ms step_avg:44.27ms +[2025-09-11 14:22:35] [Rank 0] step:4561/10000 train_time:201727ms step_avg:44.23ms +[2025-09-11 14:22:35] [Rank 0] step:4561/10000 train_time:201727ms step_avg:44.23ms +[2025-09-11 14:22:36] [Rank 0] step:4581/10000 train_time:202408ms step_avg:44.18ms +[2025-09-11 14:22:36] [Rank 0] step:4581/10000 train_time:202408ms step_avg:44.18ms +[2025-09-11 14:22:36] [Rank 0] step:4601/10000 train_time:203090ms step_avg:44.14ms +[2025-09-11 14:22:36] [Rank 0] step:4601/10000 train_time:203090ms step_avg:44.14ms +[2025-09-11 14:22:37] [Rank 0] step:4621/10000 train_time:203771ms step_avg:44.10ms +[2025-09-11 14:22:37] [Rank 0] step:4621/10000 train_time:203771ms step_avg:44.10ms +[2025-09-11 14:22:38] [Rank 0] step:4641/10000 train_time:204451ms step_avg:44.05ms +[2025-09-11 14:22:38] [Rank 0] step:4641/10000 train_time:204451ms step_avg:44.05ms +[2025-09-11 14:22:38] [Rank 0] step:4661/10000 train_time:205134ms step_avg:44.01ms +[2025-09-11 14:22:38] [Rank 0] step:4661/10000 train_time:205134ms step_avg:44.01ms +[2025-09-11 14:22:39] [Rank 0] step:4681/10000 train_time:205815ms step_avg:43.97ms +[2025-09-11 14:22:39] [Rank 0] step:4681/10000 train_time:205815ms step_avg:43.97ms +[2025-09-11 14:22:40] [Rank 0] step:4701/10000 train_time:206496ms step_avg:43.93ms +[2025-09-11 14:22:40] [Rank 0] step:4701/10000 train_time:206496ms step_avg:43.93ms +[2025-09-11 14:22:40] [Rank 0] step:4721/10000 train_time:207178ms step_avg:43.88ms +[2025-09-11 14:22:40] [Rank 0] step:4721/10000 train_time:207178ms step_avg:43.88ms +[2025-09-11 14:22:41] [Rank 0] step:4741/10000 train_time:207859ms step_avg:43.84ms +[2025-09-11 14:22:41] [Rank 0] step:4741/10000 train_time:207859ms step_avg:43.84ms +[2025-09-11 14:22:42] [Rank 0] step:4761/10000 train_time:208542ms step_avg:43.80ms +[2025-09-11 14:22:42] [Rank 0] step:4761/10000 train_time:208542ms step_avg:43.80ms +[2025-09-11 14:22:42] [Rank 0] step:4781/10000 train_time:209223ms step_avg:43.76ms +[2025-09-11 14:22:42] [Rank 0] step:4781/10000 train_time:209223ms step_avg:43.76ms +[2025-09-11 14:22:43] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:22:43] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 14:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:22:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 14:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:22:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:22:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:22:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:22:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 14:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 14:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:22:53] [Rank 0] PRINT: step:4800/10000 val_loss:4.4151 total_sharp:1.1844e-04 L1_sharp:3.3854e-03 L2_sharp:4.9893e-03 L3_sharp:7.4771e-03 L4_sharp:7.0167e-03 L5_sharp:1.2460e-02 L6_sharp:2.0055e-02 L7_sharp:2.6523e-02 L8_sharp:4.5166e-02 L9_sharp:4.0735e-02 L10_sharp:5.5682e-02 L11_sharp:9.8872e-02 L12_sharp:6.2551e-01 total_fnorm:7.7000e+01 total_l1_linf:1.5053e+05 total_spectral:3.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2500e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.1035e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.0059e-02 L10_l1linf:5.9814e-02 L11_l1linf:6.0547e-02 L12_l1linf:6.5430e-02 L1_spectral:3.2094e-03 L2_spectral:3.1902e-03 L3_spectral:3.2022e-03 L4_spectral:3.1947e-03 L5_spectral:3.1930e-03 L6_spectral:3.1956e-03 L7_spectral:3.2008e-03 L8_spectral:3.2009e-03 L9_spectral:3.2117e-03 L10_spectral:3.1996e-03 L11_spectral:3.2028e-03 L12_spectral:3.2205e-03 train_time:209883ms step_avg:43.73ms +[2025-09-11 14:22:53] [Rank 0] PRINT: step:4800/10000 val_loss:4.4151 total_sharp:1.1844e-04 L1_sharp:3.3854e-03 L2_sharp:4.9893e-03 L3_sharp:7.4771e-03 L4_sharp:7.0167e-03 L5_sharp:1.2460e-02 L6_sharp:2.0055e-02 L7_sharp:2.6523e-02 L8_sharp:4.5166e-02 L9_sharp:4.0735e-02 L10_sharp:5.5682e-02 L11_sharp:9.8872e-02 L12_sharp:6.2551e-01 total_fnorm:7.7000e+01 total_l1_linf:1.5053e+05 total_spectral:3.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.4453e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2500e-02 L4_l1linf:6.2988e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.1035e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.0059e-02 L10_l1linf:5.9814e-02 L11_l1linf:6.0547e-02 L12_l1linf:6.5430e-02 L1_spectral:3.2094e-03 L2_spectral:3.1902e-03 L3_spectral:3.2022e-03 L4_spectral:3.1947e-03 L5_spectral:3.1930e-03 L6_spectral:3.1956e-03 L7_spectral:3.2008e-03 L8_spectral:3.2009e-03 L9_spectral:3.2117e-03 L10_spectral:3.1996e-03 L11_spectral:3.2028e-03 L12_spectral:3.2205e-03 train_time:209883ms step_avg:43.73ms +[2025-09-11 14:22:55] [Rank 0] step:4801/10000 train_time:211302ms step_avg:44.01ms +[2025-09-11 14:22:55] [Rank 0] step:4801/10000 train_time:211302ms step_avg:44.01ms +[2025-09-11 14:22:56] [Rank 0] step:4821/10000 train_time:212000ms step_avg:43.97ms +[2025-09-11 14:22:56] [Rank 0] step:4821/10000 train_time:212000ms step_avg:43.97ms +[2025-09-11 14:22:56] [Rank 0] step:4841/10000 train_time:212683ms step_avg:43.93ms +[2025-09-11 14:22:56] [Rank 0] step:4841/10000 train_time:212683ms step_avg:43.93ms +[2025-09-11 14:22:57] [Rank 0] step:4861/10000 train_time:213364ms step_avg:43.89ms +[2025-09-11 14:22:57] [Rank 0] step:4861/10000 train_time:213364ms step_avg:43.89ms +[2025-09-11 14:22:58] [Rank 0] step:4881/10000 train_time:214045ms step_avg:43.85ms +[2025-09-11 14:22:58] [Rank 0] step:4881/10000 train_time:214045ms step_avg:43.85ms +[2025-09-11 14:22:58] [Rank 0] step:4901/10000 train_time:214726ms step_avg:43.81ms +[2025-09-11 14:22:58] [Rank 0] step:4901/10000 train_time:214726ms step_avg:43.81ms +[2025-09-11 14:22:59] [Rank 0] step:4921/10000 train_time:215408ms step_avg:43.77ms +[2025-09-11 14:22:59] [Rank 0] step:4921/10000 train_time:215408ms step_avg:43.77ms +[2025-09-11 14:23:00] [Rank 0] step:4941/10000 train_time:216088ms step_avg:43.73ms +[2025-09-11 14:23:00] [Rank 0] step:4941/10000 train_time:216088ms step_avg:43.73ms +[2025-09-11 14:23:00] [Rank 0] step:4961/10000 train_time:216767ms step_avg:43.69ms +[2025-09-11 14:23:00] [Rank 0] step:4961/10000 train_time:216767ms step_avg:43.69ms +[2025-09-11 14:23:01] [Rank 0] step:4981/10000 train_time:217448ms step_avg:43.66ms +[2025-09-11 14:23:01] [Rank 0] step:4981/10000 train_time:217448ms step_avg:43.66ms +[2025-09-11 14:23:02] [Rank 0] step:5001/10000 train_time:218130ms step_avg:43.62ms +[2025-09-11 14:23:02] [Rank 0] step:5001/10000 train_time:218130ms step_avg:43.62ms +[2025-09-11 14:23:02] [Rank 0] step:5021/10000 train_time:218809ms step_avg:43.58ms +[2025-09-11 14:23:02] [Rank 0] step:5021/10000 train_time:218809ms step_avg:43.58ms +[2025-09-11 14:23:03] [Rank 0] step:5041/10000 train_time:219488ms step_avg:43.54ms +[2025-09-11 14:23:03] [Rank 0] step:5041/10000 train_time:219488ms step_avg:43.54ms +[2025-09-11 14:23:04] [Rank 0] step:5061/10000 train_time:220168ms step_avg:43.50ms +[2025-09-11 14:23:04] [Rank 0] step:5061/10000 train_time:220168ms step_avg:43.50ms +[2025-09-11 14:23:04] [Rank 0] step:5081/10000 train_time:220847ms step_avg:43.47ms +[2025-09-11 14:23:04] [Rank 0] step:5081/10000 train_time:220847ms step_avg:43.47ms +[2025-09-11 14:23:05] [Rank 0] step:5101/10000 train_time:221527ms step_avg:43.43ms +[2025-09-11 14:23:05] [Rank 0] step:5101/10000 train_time:221527ms step_avg:43.43ms +[2025-09-11 14:23:06] [Rank 0] step:5121/10000 train_time:222206ms step_avg:43.39ms +[2025-09-11 14:23:06] [Rank 0] step:5121/10000 train_time:222206ms step_avg:43.39ms +[2025-09-11 14:23:06] [Rank 0] step:5141/10000 train_time:222886ms step_avg:43.35ms +[2025-09-11 14:23:06] [Rank 0] step:5141/10000 train_time:222886ms step_avg:43.35ms +[2025-09-11 14:23:07] [Rank 0] step:5161/10000 train_time:223567ms step_avg:43.32ms +[2025-09-11 14:23:07] [Rank 0] step:5161/10000 train_time:223567ms step_avg:43.32ms +[2025-09-11 14:23:08] [Rank 0] step:5181/10000 train_time:224245ms step_avg:43.28ms +[2025-09-11 14:23:08] [Rank 0] step:5181/10000 train_time:224245ms step_avg:43.28ms +[2025-09-11 14:23:08] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:23:08] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 14:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:23:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 14:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:23:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:23:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:23:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:23:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 14:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:23:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:23:19] [Rank 0] PRINT: step:5200/10000 val_loss:4.3849 total_sharp:1.7208e-04 L1_sharp:2.7088e-03 L2_sharp:4.1300e-03 L3_sharp:4.8831e-03 L4_sharp:7.4589e-03 L5_sharp:1.2465e-02 L6_sharp:2.0048e-02 L7_sharp:3.0365e-02 L8_sharp:5.0328e-02 L9_sharp:5.2419e-02 L10_sharp:7.4522e-02 L11_sharp:1.5377e-01 L12_sharp:1.3071e+00 total_fnorm:7.0000e+01 total_l1_linf:1.2902e+05 total_spectral:3.5000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.2500e-02 L2_l1linf:6.2500e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.2012e-02 L5_l1linf:6.1768e-02 L6_l1linf:6.1768e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.0059e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0303e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.3477e-02 L1_spectral:3.2081e-03 L2_spectral:3.2053e-03 L3_spectral:3.1996e-03 L4_spectral:3.1959e-03 L5_spectral:3.1919e-03 L6_spectral:3.1957e-03 L7_spectral:3.2006e-03 L8_spectral:3.1918e-03 L9_spectral:3.2117e-03 L10_spectral:3.2042e-03 L11_spectral:3.2152e-03 L12_spectral:3.2226e-03 train_time:224911ms step_avg:43.25ms +[2025-09-11 14:23:19] [Rank 0] PRINT: step:5200/10000 val_loss:4.3849 total_sharp:1.7208e-04 L1_sharp:2.7088e-03 L2_sharp:4.1300e-03 L3_sharp:4.8831e-03 L4_sharp:7.4589e-03 L5_sharp:1.2465e-02 L6_sharp:2.0048e-02 L7_sharp:3.0365e-02 L8_sharp:5.0328e-02 L9_sharp:5.2419e-02 L10_sharp:7.4522e-02 L11_sharp:1.5377e-01 L12_sharp:1.3071e+00 total_fnorm:7.0000e+01 total_l1_linf:1.2902e+05 total_spectral:3.5000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.2500e-02 L2_l1linf:6.2500e-02 L3_l1linf:6.2988e-02 L4_l1linf:6.2012e-02 L5_l1linf:6.1768e-02 L6_l1linf:6.1768e-02 L7_l1linf:6.1768e-02 L8_l1linf:6.0059e-02 L9_l1linf:5.9814e-02 L10_l1linf:6.0303e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.3477e-02 L1_spectral:3.2081e-03 L2_spectral:3.2053e-03 L3_spectral:3.1996e-03 L4_spectral:3.1959e-03 L5_spectral:3.1919e-03 L6_spectral:3.1957e-03 L7_spectral:3.2006e-03 L8_spectral:3.1918e-03 L9_spectral:3.2117e-03 L10_spectral:3.2042e-03 L11_spectral:3.2152e-03 L12_spectral:3.2226e-03 train_time:224911ms step_avg:43.25ms +[2025-09-11 14:23:21] [Rank 0] step:5201/10000 train_time:226334ms step_avg:43.52ms +[2025-09-11 14:23:21] [Rank 0] step:5201/10000 train_time:226334ms step_avg:43.52ms +[2025-09-11 14:23:21] [Rank 0] step:5221/10000 train_time:227035ms step_avg:43.48ms +[2025-09-11 14:23:21] [Rank 0] step:5221/10000 train_time:227035ms step_avg:43.48ms +[2025-09-11 14:23:22] [Rank 0] step:5241/10000 train_time:227724ms step_avg:43.45ms +[2025-09-11 14:23:22] [Rank 0] step:5241/10000 train_time:227724ms step_avg:43.45ms +[2025-09-11 14:23:23] [Rank 0] step:5261/10000 train_time:228415ms step_avg:43.42ms +[2025-09-11 14:23:23] [Rank 0] step:5261/10000 train_time:228415ms step_avg:43.42ms +[2025-09-11 14:23:24] [Rank 0] step:5281/10000 train_time:229107ms step_avg:43.38ms +[2025-09-11 14:23:24] [Rank 0] step:5281/10000 train_time:229107ms step_avg:43.38ms +[2025-09-11 14:23:24] [Rank 0] step:5301/10000 train_time:229797ms step_avg:43.35ms +[2025-09-11 14:23:24] [Rank 0] step:5301/10000 train_time:229797ms step_avg:43.35ms +[2025-09-11 14:23:25] [Rank 0] step:5321/10000 train_time:230495ms step_avg:43.32ms +[2025-09-11 14:23:25] [Rank 0] step:5321/10000 train_time:230495ms step_avg:43.32ms +[2025-09-11 14:23:26] [Rank 0] step:5341/10000 train_time:231184ms step_avg:43.28ms +[2025-09-11 14:23:26] [Rank 0] step:5341/10000 train_time:231184ms step_avg:43.28ms +[2025-09-11 14:23:26] [Rank 0] step:5361/10000 train_time:231874ms step_avg:43.25ms +[2025-09-11 14:23:26] [Rank 0] step:5361/10000 train_time:231874ms step_avg:43.25ms +[2025-09-11 14:23:27] [Rank 0] step:5381/10000 train_time:232564ms step_avg:43.22ms +[2025-09-11 14:23:27] [Rank 0] step:5381/10000 train_time:232564ms step_avg:43.22ms +[2025-09-11 14:23:28] [Rank 0] step:5401/10000 train_time:233255ms step_avg:43.19ms +[2025-09-11 14:23:28] [Rank 0] step:5401/10000 train_time:233255ms step_avg:43.19ms +[2025-09-11 14:23:28] [Rank 0] step:5421/10000 train_time:233946ms step_avg:43.16ms +[2025-09-11 14:23:28] [Rank 0] step:5421/10000 train_time:233946ms step_avg:43.16ms +[2025-09-11 14:23:29] [Rank 0] step:5441/10000 train_time:234636ms step_avg:43.12ms +[2025-09-11 14:23:29] [Rank 0] step:5441/10000 train_time:234636ms step_avg:43.12ms +[2025-09-11 14:23:30] [Rank 0] step:5461/10000 train_time:235327ms step_avg:43.09ms +[2025-09-11 14:23:30] [Rank 0] step:5461/10000 train_time:235327ms step_avg:43.09ms +[2025-09-11 14:23:30] [Rank 0] step:5481/10000 train_time:236017ms step_avg:43.06ms +[2025-09-11 14:23:30] [Rank 0] step:5481/10000 train_time:236017ms step_avg:43.06ms +[2025-09-11 14:23:31] [Rank 0] step:5501/10000 train_time:236705ms step_avg:43.03ms +[2025-09-11 14:23:31] [Rank 0] step:5501/10000 train_time:236705ms step_avg:43.03ms +[2025-09-11 14:23:32] [Rank 0] step:5521/10000 train_time:237397ms step_avg:43.00ms +[2025-09-11 14:23:32] [Rank 0] step:5521/10000 train_time:237397ms step_avg:43.00ms +[2025-09-11 14:23:32] [Rank 0] step:5541/10000 train_time:238088ms step_avg:42.97ms +[2025-09-11 14:23:32] [Rank 0] step:5541/10000 train_time:238088ms step_avg:42.97ms +[2025-09-11 14:23:33] [Rank 0] step:5561/10000 train_time:238779ms step_avg:42.94ms +[2025-09-11 14:23:33] [Rank 0] step:5561/10000 train_time:238779ms step_avg:42.94ms +[2025-09-11 14:23:34] [Rank 0] step:5581/10000 train_time:239470ms step_avg:42.91ms +[2025-09-11 14:23:34] [Rank 0] step:5581/10000 train_time:239470ms step_avg:42.91ms +[2025-09-11 14:23:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:23:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 14:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:23:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 14:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 14:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:23:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:23:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:23:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:23:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 14:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:23:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:23:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.3601 total_sharp:1.3408e-04 L1_sharp:1.7749e-03 L2_sharp:1.7224e-03 L3_sharp:6.8665e-03 L4_sharp:7.2081e-03 L5_sharp:1.2530e-02 L6_sharp:1.7167e-02 L7_sharp:2.6920e-02 L8_sharp:3.5570e-02 L9_sharp:3.7560e-02 L10_sharp:5.5575e-02 L11_sharp:1.0066e-01 L12_sharp:1.2062e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3312e+05 total_spectral:3.5500e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4316e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.1523e-02 L3_l1linf:6.1279e-02 L4_l1linf:6.0303e-02 L5_l1linf:6.1035e-02 L6_l1linf:5.9326e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9570e-02 L9_l1linf:5.8105e-02 L10_l1linf:5.7861e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.3477e-02 L1_spectral:3.1987e-03 L2_spectral:3.2040e-03 L3_spectral:3.2157e-03 L4_spectral:3.2171e-03 L5_spectral:3.2009e-03 L6_spectral:3.1890e-03 L7_spectral:3.2085e-03 L8_spectral:3.1847e-03 L9_spectral:3.2110e-03 L10_spectral:3.2199e-03 L11_spectral:3.2111e-03 L12_spectral:3.2196e-03 train_time:240141ms step_avg:42.88ms +[2025-09-11 14:23:45] [Rank 0] PRINT: step:5600/10000 val_loss:4.3601 total_sharp:1.3408e-04 L1_sharp:1.7749e-03 L2_sharp:1.7224e-03 L3_sharp:6.8665e-03 L4_sharp:7.2081e-03 L5_sharp:1.2530e-02 L6_sharp:1.7167e-02 L7_sharp:2.6920e-02 L8_sharp:3.5570e-02 L9_sharp:3.7560e-02 L10_sharp:5.5575e-02 L11_sharp:1.0066e-01 L12_sharp:1.2062e+00 total_fnorm:7.1000e+01 total_l1_linf:1.3312e+05 total_spectral:3.5500e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4316e-01 L4_fnorm:2.4316e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4414e-01 L12_fnorm:2.4805e-01 L1_l1linf:6.3477e-02 L2_l1linf:6.1523e-02 L3_l1linf:6.1279e-02 L4_l1linf:6.0303e-02 L5_l1linf:6.1035e-02 L6_l1linf:5.9326e-02 L7_l1linf:6.0303e-02 L8_l1linf:5.9570e-02 L9_l1linf:5.8105e-02 L10_l1linf:5.7861e-02 L11_l1linf:5.8838e-02 L12_l1linf:6.3477e-02 L1_spectral:3.1987e-03 L2_spectral:3.2040e-03 L3_spectral:3.2157e-03 L4_spectral:3.2171e-03 L5_spectral:3.2009e-03 L6_spectral:3.1890e-03 L7_spectral:3.2085e-03 L8_spectral:3.1847e-03 L9_spectral:3.2110e-03 L10_spectral:3.2199e-03 L11_spectral:3.2111e-03 L12_spectral:3.2196e-03 train_time:240141ms step_avg:42.88ms +[2025-09-11 14:23:46] [Rank 0] step:5601/10000 train_time:241725ms step_avg:43.16ms +[2025-09-11 14:23:46] [Rank 0] step:5601/10000 train_time:241725ms step_avg:43.16ms +[2025-09-11 14:23:47] [Rank 0] step:5621/10000 train_time:242425ms step_avg:43.13ms +[2025-09-11 14:23:47] [Rank 0] step:5621/10000 train_time:242425ms step_avg:43.13ms +[2025-09-11 14:23:48] [Rank 0] step:5641/10000 train_time:243115ms step_avg:43.10ms +[2025-09-11 14:23:48] [Rank 0] step:5641/10000 train_time:243115ms step_avg:43.10ms +[2025-09-11 14:23:49] [Rank 0] step:5661/10000 train_time:243803ms step_avg:43.07ms +[2025-09-11 14:23:49] [Rank 0] step:5661/10000 train_time:243803ms step_avg:43.07ms +[2025-09-11 14:23:49] [Rank 0] step:5681/10000 train_time:244493ms step_avg:43.04ms +[2025-09-11 14:23:49] [Rank 0] step:5681/10000 train_time:244493ms step_avg:43.04ms +[2025-09-11 14:23:50] [Rank 0] step:5701/10000 train_time:245185ms step_avg:43.01ms +[2025-09-11 14:23:50] [Rank 0] step:5701/10000 train_time:245185ms step_avg:43.01ms +[2025-09-11 14:23:51] [Rank 0] step:5721/10000 train_time:245873ms step_avg:42.98ms +[2025-09-11 14:23:51] [Rank 0] step:5721/10000 train_time:245873ms step_avg:42.98ms +[2025-09-11 14:23:51] [Rank 0] step:5741/10000 train_time:246565ms step_avg:42.95ms +[2025-09-11 14:23:51] [Rank 0] step:5741/10000 train_time:246565ms step_avg:42.95ms +[2025-09-11 14:23:52] [Rank 0] step:5761/10000 train_time:247257ms step_avg:42.92ms +[2025-09-11 14:23:52] [Rank 0] step:5761/10000 train_time:247257ms step_avg:42.92ms +[2025-09-11 14:23:53] [Rank 0] step:5781/10000 train_time:247947ms step_avg:42.89ms +[2025-09-11 14:23:53] [Rank 0] step:5781/10000 train_time:247947ms step_avg:42.89ms +[2025-09-11 14:23:54] [Rank 0] step:5801/10000 train_time:248917ms step_avg:42.91ms +[2025-09-11 14:23:54] [Rank 0] step:5801/10000 train_time:248917ms step_avg:42.91ms +[2025-09-11 14:23:54] [Rank 0] step:5821/10000 train_time:249606ms step_avg:42.88ms +[2025-09-11 14:23:54] [Rank 0] step:5821/10000 train_time:249606ms step_avg:42.88ms +[2025-09-11 14:23:55] [Rank 0] step:5841/10000 train_time:250297ms step_avg:42.85ms +[2025-09-11 14:23:55] [Rank 0] step:5841/10000 train_time:250297ms step_avg:42.85ms +[2025-09-11 14:23:56] [Rank 0] step:5861/10000 train_time:251296ms step_avg:42.88ms +[2025-09-11 14:23:56] [Rank 0] step:5861/10000 train_time:251296ms step_avg:42.88ms +[2025-09-11 14:23:57] [Rank 0] step:5881/10000 train_time:251985ms step_avg:42.85ms +[2025-09-11 14:23:57] [Rank 0] step:5881/10000 train_time:251985ms step_avg:42.85ms +[2025-09-11 14:23:57] [Rank 0] step:5901/10000 train_time:252674ms step_avg:42.82ms +[2025-09-11 14:23:57] [Rank 0] step:5901/10000 train_time:252674ms step_avg:42.82ms +[2025-09-11 14:23:58] [Rank 0] step:5921/10000 train_time:253366ms step_avg:42.79ms +[2025-09-11 14:23:58] [Rank 0] step:5921/10000 train_time:253366ms step_avg:42.79ms +[2025-09-11 14:23:59] [Rank 0] step:5941/10000 train_time:254057ms step_avg:42.76ms +[2025-09-11 14:23:59] [Rank 0] step:5941/10000 train_time:254057ms step_avg:42.76ms +[2025-09-11 14:24:00] [Rank 0] step:5961/10000 train_time:254748ms step_avg:42.74ms +[2025-09-11 14:24:00] [Rank 0] step:5961/10000 train_time:254748ms step_avg:42.74ms +[2025-09-11 14:24:00] [Rank 0] step:5981/10000 train_time:255438ms step_avg:42.71ms +[2025-09-11 14:24:00] [Rank 0] step:5981/10000 train_time:255438ms step_avg:42.71ms +[2025-09-11 14:24:01] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:24:01] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 14:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:24:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 14:24:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:24:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 14:24:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:24:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:24:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:24:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 14:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 14:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:24:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 14:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:24:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 14:24:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:24:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 14:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 14:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:24:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 14:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:24:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.3221 total_sharp:1.0082e-04 L1_sharp:9.1868e-04 L2_sharp:2.3923e-03 L3_sharp:3.2266e-03 L4_sharp:7.8303e-03 L5_sharp:1.0270e-02 L6_sharp:1.5834e-02 L7_sharp:2.2087e-02 L8_sharp:3.5915e-02 L9_sharp:3.8029e-02 L10_sharp:4.9603e-02 L11_sharp:8.2753e-02 L12_sharp:4.0139e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.1768e-02 L2_l1linf:6.1279e-02 L3_l1linf:6.0303e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0303e-02 L6_l1linf:5.9082e-02 L7_l1linf:5.9326e-02 L8_l1linf:5.7861e-02 L9_l1linf:5.7373e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.6641e-02 L12_l1linf:6.1279e-02 L1_spectral:3.1920e-03 L2_spectral:3.2016e-03 L3_spectral:3.2149e-03 L4_spectral:3.2023e-03 L5_spectral:3.1998e-03 L6_spectral:3.2039e-03 L7_spectral:3.2388e-03 L8_spectral:3.2168e-03 L9_spectral:3.2234e-03 L10_spectral:3.2089e-03 L11_spectral:3.2435e-03 L12_spectral:3.2210e-03 train_time:256112ms step_avg:42.69ms +[2025-09-11 14:24:14] [Rank 0] PRINT: step:6000/10000 val_loss:4.3221 total_sharp:1.0082e-04 L1_sharp:9.1868e-04 L2_sharp:2.3923e-03 L3_sharp:3.2266e-03 L4_sharp:7.8303e-03 L5_sharp:1.0270e-02 L6_sharp:1.5834e-02 L7_sharp:2.2087e-02 L8_sharp:3.5915e-02 L9_sharp:3.8029e-02 L10_sharp:4.9603e-02 L11_sharp:8.2753e-02 L12_sharp:4.0139e-01 total_fnorm:7.2000e+01 total_l1_linf:1.3414e+05 total_spectral:3.6000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4316e-01 L6_fnorm:2.4316e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4023e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4512e-01 L1_l1linf:6.1768e-02 L2_l1linf:6.1279e-02 L3_l1linf:6.0303e-02 L4_l1linf:6.1279e-02 L5_l1linf:6.0303e-02 L6_l1linf:5.9082e-02 L7_l1linf:5.9326e-02 L8_l1linf:5.7861e-02 L9_l1linf:5.7373e-02 L10_l1linf:5.7373e-02 L11_l1linf:5.6641e-02 L12_l1linf:6.1279e-02 L1_spectral:3.1920e-03 L2_spectral:3.2016e-03 L3_spectral:3.2149e-03 L4_spectral:3.2023e-03 L5_spectral:3.1998e-03 L6_spectral:3.2039e-03 L7_spectral:3.2388e-03 L8_spectral:3.2168e-03 L9_spectral:3.2234e-03 L10_spectral:3.2089e-03 L11_spectral:3.2435e-03 L12_spectral:3.2210e-03 train_time:256112ms step_avg:42.69ms +[2025-09-11 14:24:16] [Rank 0] step:6001/10000 train_time:257601ms step_avg:42.93ms +[2025-09-11 14:24:16] [Rank 0] step:6001/10000 train_time:257601ms step_avg:42.93ms +[2025-09-11 14:24:17] [Rank 0] step:6021/10000 train_time:258298ms step_avg:42.90ms +[2025-09-11 14:24:17] [Rank 0] step:6021/10000 train_time:258298ms step_avg:42.90ms +[2025-09-11 14:24:17] [Rank 0] step:6041/10000 train_time:258992ms step_avg:42.87ms +[2025-09-11 14:24:17] [Rank 0] step:6041/10000 train_time:258992ms step_avg:42.87ms +[2025-09-11 14:24:18] [Rank 0] step:6061/10000 train_time:259684ms step_avg:42.85ms +[2025-09-11 14:24:18] [Rank 0] step:6061/10000 train_time:259684ms step_avg:42.85ms +[2025-09-11 14:24:19] [Rank 0] step:6081/10000 train_time:260378ms step_avg:42.82ms +[2025-09-11 14:24:19] [Rank 0] step:6081/10000 train_time:260378ms step_avg:42.82ms +[2025-09-11 14:24:19] [Rank 0] step:6101/10000 train_time:261069ms step_avg:42.79ms +[2025-09-11 14:24:19] [Rank 0] step:6101/10000 train_time:261069ms step_avg:42.79ms +[2025-09-11 14:24:20] [Rank 0] step:6121/10000 train_time:261764ms step_avg:42.76ms +[2025-09-11 14:24:20] [Rank 0] step:6121/10000 train_time:261764ms step_avg:42.76ms +[2025-09-11 14:24:21] [Rank 0] step:6141/10000 train_time:262457ms step_avg:42.74ms +[2025-09-11 14:24:21] [Rank 0] step:6141/10000 train_time:262457ms step_avg:42.74ms +[2025-09-11 14:24:21] [Rank 0] step:6161/10000 train_time:263149ms step_avg:42.71ms +[2025-09-11 14:24:21] [Rank 0] step:6161/10000 train_time:263149ms step_avg:42.71ms +[2025-09-11 14:24:22] [Rank 0] step:6181/10000 train_time:263840ms step_avg:42.69ms +[2025-09-11 14:24:22] [Rank 0] step:6181/10000 train_time:263840ms step_avg:42.69ms +[2025-09-11 14:24:23] [Rank 0] step:6201/10000 train_time:264534ms step_avg:42.66ms +[2025-09-11 14:24:23] [Rank 0] step:6201/10000 train_time:264534ms step_avg:42.66ms +[2025-09-11 14:24:23] [Rank 0] step:6221/10000 train_time:265228ms step_avg:42.63ms +[2025-09-11 14:24:23] [Rank 0] step:6221/10000 train_time:265228ms step_avg:42.63ms +[2025-09-11 14:24:24] [Rank 0] step:6241/10000 train_time:265921ms step_avg:42.61ms +[2025-09-11 14:24:24] [Rank 0] step:6241/10000 train_time:265921ms step_avg:42.61ms +[2025-09-11 14:24:25] [Rank 0] step:6261/10000 train_time:266611ms step_avg:42.58ms +[2025-09-11 14:24:25] [Rank 0] step:6261/10000 train_time:266611ms step_avg:42.58ms +[2025-09-11 14:24:26] [Rank 0] step:6281/10000 train_time:267304ms step_avg:42.56ms +[2025-09-11 14:24:26] [Rank 0] step:6281/10000 train_time:267304ms step_avg:42.56ms +[2025-09-11 14:24:26] [Rank 0] step:6301/10000 train_time:267995ms step_avg:42.53ms +[2025-09-11 14:24:26] [Rank 0] step:6301/10000 train_time:267995ms step_avg:42.53ms +[2025-09-11 14:24:27] [Rank 0] step:6321/10000 train_time:268691ms step_avg:42.51ms +[2025-09-11 14:24:27] [Rank 0] step:6321/10000 train_time:268691ms step_avg:42.51ms +[2025-09-11 14:24:28] [Rank 0] step:6341/10000 train_time:269384ms step_avg:42.48ms +[2025-09-11 14:24:28] [Rank 0] step:6341/10000 train_time:269384ms step_avg:42.48ms +[2025-09-11 14:24:28] [Rank 0] step:6361/10000 train_time:270080ms step_avg:42.46ms +[2025-09-11 14:24:28] [Rank 0] step:6361/10000 train_time:270080ms step_avg:42.46ms +[2025-09-11 14:24:29] [Rank 0] step:6381/10000 train_time:270773ms step_avg:42.43ms +[2025-09-11 14:24:29] [Rank 0] step:6381/10000 train_time:270773ms step_avg:42.43ms +[2025-09-11 14:24:30] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:24:30] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 14:24:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:24:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:24:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 14:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 14:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:24:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:24:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:24:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 14:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:24:40] [Rank 0] PRINT: step:6400/10000 val_loss:4.2921 total_sharp:1.0790e-04 L1_sharp:1.3782e-03 L2_sharp:1.1426e-03 L3_sharp:3.7057e-03 L4_sharp:7.2468e-03 L5_sharp:1.2358e-02 L6_sharp:2.4088e-02 L7_sharp:2.4087e-02 L8_sharp:3.9830e-02 L9_sharp:4.1571e-02 L10_sharp:5.4108e-02 L11_sharp:7.9918e-02 L12_sharp:3.2707e-01 total_fnorm:6.3000e+01 total_l1_linf:1.1315e+05 total_spectral:3.1500e+01 L1_fnorm:2.1875e-01 L2_fnorm:2.1875e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1777e-01 L1_l1linf:5.3467e-02 L2_l1linf:5.2734e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.1758e-02 L5_l1linf:5.2246e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0293e-02 L8_l1linf:5.1514e-02 L9_l1linf:4.9072e-02 L10_l1linf:4.8584e-02 L11_l1linf:4.8096e-02 L12_l1linf:5.0781e-02 L1_spectral:2.9257e-03 L2_spectral:2.9189e-03 L3_spectral:2.9196e-03 L4_spectral:2.9114e-03 L5_spectral:2.9207e-03 L6_spectral:2.9112e-03 L7_spectral:2.9189e-03 L8_spectral:2.8986e-03 L9_spectral:2.9087e-03 L10_spectral:2.9147e-03 L11_spectral:2.9136e-03 L12_spectral:2.9122e-03 train_time:271446ms step_avg:42.41ms +[2025-09-11 14:24:40] [Rank 0] PRINT: step:6400/10000 val_loss:4.2921 total_sharp:1.0790e-04 L1_sharp:1.3782e-03 L2_sharp:1.1426e-03 L3_sharp:3.7057e-03 L4_sharp:7.2468e-03 L5_sharp:1.2358e-02 L6_sharp:2.4088e-02 L7_sharp:2.4087e-02 L8_sharp:3.9830e-02 L9_sharp:4.1571e-02 L10_sharp:5.4108e-02 L11_sharp:7.9918e-02 L12_sharp:3.2707e-01 total_fnorm:6.3000e+01 total_l1_linf:1.1315e+05 total_spectral:3.1500e+01 L1_fnorm:2.1875e-01 L2_fnorm:2.1875e-01 L3_fnorm:2.1777e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1680e-01 L8_fnorm:2.1289e-01 L9_fnorm:2.1680e-01 L10_fnorm:2.1680e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1777e-01 L1_l1linf:5.3467e-02 L2_l1linf:5.2734e-02 L3_l1linf:5.2002e-02 L4_l1linf:5.1758e-02 L5_l1linf:5.2246e-02 L6_l1linf:5.0781e-02 L7_l1linf:5.0293e-02 L8_l1linf:5.1514e-02 L9_l1linf:4.9072e-02 L10_l1linf:4.8584e-02 L11_l1linf:4.8096e-02 L12_l1linf:5.0781e-02 L1_spectral:2.9257e-03 L2_spectral:2.9189e-03 L3_spectral:2.9196e-03 L4_spectral:2.9114e-03 L5_spectral:2.9207e-03 L6_spectral:2.9112e-03 L7_spectral:2.9189e-03 L8_spectral:2.8986e-03 L9_spectral:2.9087e-03 L10_spectral:2.9147e-03 L11_spectral:2.9136e-03 L12_spectral:2.9122e-03 train_time:271446ms step_avg:42.41ms +[2025-09-11 14:24:41] [Rank 0] step:6401/10000 train_time:272914ms step_avg:42.64ms +[2025-09-11 14:24:41] [Rank 0] step:6401/10000 train_time:272914ms step_avg:42.64ms +[2025-09-11 14:24:42] [Rank 0] step:6421/10000 train_time:273617ms step_avg:42.61ms +[2025-09-11 14:24:42] [Rank 0] step:6421/10000 train_time:273617ms step_avg:42.61ms +[2025-09-11 14:24:43] [Rank 0] step:6441/10000 train_time:274310ms step_avg:42.59ms +[2025-09-11 14:24:43] [Rank 0] step:6441/10000 train_time:274310ms step_avg:42.59ms +[2025-09-11 14:24:43] [Rank 0] step:6461/10000 train_time:275003ms step_avg:42.56ms +[2025-09-11 14:24:43] [Rank 0] step:6461/10000 train_time:275003ms step_avg:42.56ms +[2025-09-11 14:24:44] [Rank 0] step:6481/10000 train_time:275697ms step_avg:42.54ms +[2025-09-11 14:24:44] [Rank 0] step:6481/10000 train_time:275697ms step_avg:42.54ms +[2025-09-11 14:24:45] [Rank 0] step:6501/10000 train_time:276393ms step_avg:42.52ms +[2025-09-11 14:24:45] [Rank 0] step:6501/10000 train_time:276393ms step_avg:42.52ms +[2025-09-11 14:24:46] [Rank 0] step:6521/10000 train_time:277085ms step_avg:42.49ms +[2025-09-11 14:24:46] [Rank 0] step:6521/10000 train_time:277085ms step_avg:42.49ms +[2025-09-11 14:24:46] [Rank 0] step:6541/10000 train_time:277777ms step_avg:42.47ms +[2025-09-11 14:24:46] [Rank 0] step:6541/10000 train_time:277777ms step_avg:42.47ms +[2025-09-11 14:24:47] [Rank 0] step:6561/10000 train_time:278469ms step_avg:42.44ms +[2025-09-11 14:24:47] [Rank 0] step:6561/10000 train_time:278469ms step_avg:42.44ms +[2025-09-11 14:24:48] [Rank 0] step:6581/10000 train_time:279162ms step_avg:42.42ms +[2025-09-11 14:24:48] [Rank 0] step:6581/10000 train_time:279162ms step_avg:42.42ms +[2025-09-11 14:24:48] [Rank 0] step:6601/10000 train_time:279856ms step_avg:42.40ms +[2025-09-11 14:24:48] [Rank 0] step:6601/10000 train_time:279856ms step_avg:42.40ms +[2025-09-11 14:24:49] [Rank 0] step:6621/10000 train_time:280546ms step_avg:42.37ms +[2025-09-11 14:24:49] [Rank 0] step:6621/10000 train_time:280546ms step_avg:42.37ms +[2025-09-11 14:24:50] [Rank 0] step:6641/10000 train_time:281239ms step_avg:42.35ms +[2025-09-11 14:24:50] [Rank 0] step:6641/10000 train_time:281239ms step_avg:42.35ms +[2025-09-11 14:24:50] [Rank 0] step:6661/10000 train_time:281932ms step_avg:42.33ms +[2025-09-11 14:24:50] [Rank 0] step:6661/10000 train_time:281932ms step_avg:42.33ms +[2025-09-11 14:24:51] [Rank 0] step:6681/10000 train_time:282632ms step_avg:42.30ms +[2025-09-11 14:24:51] [Rank 0] step:6681/10000 train_time:282632ms step_avg:42.30ms +[2025-09-11 14:24:52] [Rank 0] step:6701/10000 train_time:283331ms step_avg:42.28ms +[2025-09-11 14:24:52] [Rank 0] step:6701/10000 train_time:283331ms step_avg:42.28ms +[2025-09-11 14:24:52] [Rank 0] step:6721/10000 train_time:284029ms step_avg:42.26ms +[2025-09-11 14:24:52] [Rank 0] step:6721/10000 train_time:284029ms step_avg:42.26ms +[2025-09-11 14:24:53] [Rank 0] step:6741/10000 train_time:284729ms step_avg:42.24ms +[2025-09-11 14:24:53] [Rank 0] step:6741/10000 train_time:284729ms step_avg:42.24ms +[2025-09-11 14:24:54] [Rank 0] step:6761/10000 train_time:285426ms step_avg:42.22ms +[2025-09-11 14:24:54] [Rank 0] step:6761/10000 train_time:285426ms step_avg:42.22ms +[2025-09-11 14:24:55] [Rank 0] step:6781/10000 train_time:286126ms step_avg:42.20ms +[2025-09-11 14:24:55] [Rank 0] step:6781/10000 train_time:286126ms step_avg:42.20ms +[2025-09-11 14:24:55] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:24:55] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 14:24:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:24:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:24:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 14:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:25:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:25:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:25:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:25:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 14:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 14:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:05] [Rank 0] PRINT: step:6800/10000 val_loss:4.2569 total_sharp:8.7908e-05 L1_sharp:3.6760e-03 L2_sharp:2.7977e-03 L3_sharp:4.9649e-03 L4_sharp:6.9393e-03 L5_sharp:1.1050e-02 L6_sharp:1.7002e-02 L7_sharp:2.5895e-02 L8_sharp:3.4138e-02 L9_sharp:3.8132e-02 L10_sharp:4.9565e-02 L11_sharp:8.0557e-02 L12_sharp:4.1616e-01 total_fnorm:6.0750e+01 total_l1_linf:1.0803e+05 total_spectral:3.0250e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8848e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8457e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2480e-02 L6_l1linf:4.1748e-02 L7_l1linf:4.1992e-02 L8_l1linf:4.1992e-02 L9_l1linf:4.1016e-02 L10_l1linf:4.0771e-02 L11_l1linf:3.9307e-02 L12_l1linf:4.1992e-02 L1_spectral:2.5941e-03 L2_spectral:2.5915e-03 L3_spectral:2.5968e-03 L4_spectral:2.5964e-03 L5_spectral:2.5895e-03 L6_spectral:2.5948e-03 L7_spectral:2.6005e-03 L8_spectral:2.5746e-03 L9_spectral:2.6169e-03 L10_spectral:2.5997e-03 L11_spectral:2.5987e-03 L12_spectral:2.5809e-03 train_time:286805ms step_avg:42.18ms +[2025-09-11 14:25:05] [Rank 0] PRINT: step:6800/10000 val_loss:4.2569 total_sharp:8.7908e-05 L1_sharp:3.6760e-03 L2_sharp:2.7977e-03 L3_sharp:4.9649e-03 L4_sharp:6.9393e-03 L5_sharp:1.1050e-02 L6_sharp:1.7002e-02 L7_sharp:2.5895e-02 L8_sharp:3.4138e-02 L9_sharp:3.8132e-02 L10_sharp:4.9565e-02 L11_sharp:8.0557e-02 L12_sharp:4.1616e-01 total_fnorm:6.0750e+01 total_l1_linf:1.0803e+05 total_spectral:3.0250e+01 L1_fnorm:1.9141e-01 L2_fnorm:1.9043e-01 L3_fnorm:1.8848e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8457e-01 L9_fnorm:1.8848e-01 L10_fnorm:1.8848e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.5166e-02 L2_l1linf:4.3701e-02 L3_l1linf:4.2725e-02 L4_l1linf:4.2725e-02 L5_l1linf:4.2480e-02 L6_l1linf:4.1748e-02 L7_l1linf:4.1992e-02 L8_l1linf:4.1992e-02 L9_l1linf:4.1016e-02 L10_l1linf:4.0771e-02 L11_l1linf:3.9307e-02 L12_l1linf:4.1992e-02 L1_spectral:2.5941e-03 L2_spectral:2.5915e-03 L3_spectral:2.5968e-03 L4_spectral:2.5964e-03 L5_spectral:2.5895e-03 L6_spectral:2.5948e-03 L7_spectral:2.6005e-03 L8_spectral:2.5746e-03 L9_spectral:2.6169e-03 L10_spectral:2.5997e-03 L11_spectral:2.5987e-03 L12_spectral:2.5809e-03 train_time:286805ms step_avg:42.18ms +[2025-09-11 14:25:07] [Rank 0] step:6801/10000 train_time:288287ms step_avg:42.39ms +[2025-09-11 14:25:07] [Rank 0] step:6801/10000 train_time:288287ms step_avg:42.39ms +[2025-09-11 14:25:08] [Rank 0] step:6821/10000 train_time:289013ms step_avg:42.37ms +[2025-09-11 14:25:08] [Rank 0] step:6821/10000 train_time:289013ms step_avg:42.37ms +[2025-09-11 14:25:08] [Rank 0] step:6841/10000 train_time:289715ms step_avg:42.35ms +[2025-09-11 14:25:08] [Rank 0] step:6841/10000 train_time:289715ms step_avg:42.35ms +[2025-09-11 14:25:09] [Rank 0] step:6861/10000 train_time:290416ms step_avg:42.33ms +[2025-09-11 14:25:09] [Rank 0] step:6861/10000 train_time:290416ms step_avg:42.33ms +[2025-09-11 14:25:10] [Rank 0] step:6881/10000 train_time:291119ms step_avg:42.31ms +[2025-09-11 14:25:10] [Rank 0] step:6881/10000 train_time:291119ms step_avg:42.31ms +[2025-09-11 14:25:11] [Rank 0] step:6901/10000 train_time:291819ms step_avg:42.29ms +[2025-09-11 14:25:11] [Rank 0] step:6901/10000 train_time:291819ms step_avg:42.29ms +[2025-09-11 14:25:11] [Rank 0] step:6921/10000 train_time:292518ms step_avg:42.27ms +[2025-09-11 14:25:11] [Rank 0] step:6921/10000 train_time:292518ms step_avg:42.27ms +[2025-09-11 14:25:12] [Rank 0] step:6941/10000 train_time:293218ms step_avg:42.24ms +[2025-09-11 14:25:12] [Rank 0] step:6941/10000 train_time:293218ms step_avg:42.24ms +[2025-09-11 14:25:13] [Rank 0] step:6961/10000 train_time:293920ms step_avg:42.22ms +[2025-09-11 14:25:13] [Rank 0] step:6961/10000 train_time:293920ms step_avg:42.22ms +[2025-09-11 14:25:13] [Rank 0] step:6981/10000 train_time:294622ms step_avg:42.20ms +[2025-09-11 14:25:13] [Rank 0] step:6981/10000 train_time:294622ms step_avg:42.20ms +[2025-09-11 14:25:14] [Rank 0] step:7001/10000 train_time:295322ms step_avg:42.18ms +[2025-09-11 14:25:14] [Rank 0] step:7001/10000 train_time:295322ms step_avg:42.18ms +[2025-09-11 14:25:15] [Rank 0] step:7021/10000 train_time:296021ms step_avg:42.16ms +[2025-09-11 14:25:15] [Rank 0] step:7021/10000 train_time:296021ms step_avg:42.16ms +[2025-09-11 14:25:15] [Rank 0] step:7041/10000 train_time:296720ms step_avg:42.14ms +[2025-09-11 14:25:15] [Rank 0] step:7041/10000 train_time:296720ms step_avg:42.14ms +[2025-09-11 14:25:16] [Rank 0] step:7061/10000 train_time:297420ms step_avg:42.12ms +[2025-09-11 14:25:16] [Rank 0] step:7061/10000 train_time:297420ms step_avg:42.12ms +[2025-09-11 14:25:17] [Rank 0] step:7081/10000 train_time:298119ms step_avg:42.10ms +[2025-09-11 14:25:17] [Rank 0] step:7081/10000 train_time:298119ms step_avg:42.10ms +[2025-09-11 14:25:18] [Rank 0] step:7101/10000 train_time:298819ms step_avg:42.08ms +[2025-09-11 14:25:18] [Rank 0] step:7101/10000 train_time:298819ms step_avg:42.08ms +[2025-09-11 14:25:18] [Rank 0] step:7121/10000 train_time:299521ms step_avg:42.06ms +[2025-09-11 14:25:18] [Rank 0] step:7121/10000 train_time:299521ms step_avg:42.06ms +[2025-09-11 14:25:19] [Rank 0] step:7141/10000 train_time:300221ms step_avg:42.04ms +[2025-09-11 14:25:19] [Rank 0] step:7141/10000 train_time:300221ms step_avg:42.04ms +[2025-09-11 14:25:20] [Rank 0] step:7161/10000 train_time:300921ms step_avg:42.02ms +[2025-09-11 14:25:20] [Rank 0] step:7161/10000 train_time:300921ms step_avg:42.02ms +[2025-09-11 14:25:20] [Rank 0] step:7181/10000 train_time:301619ms step_avg:42.00ms +[2025-09-11 14:25:20] [Rank 0] step:7181/10000 train_time:301619ms step_avg:42.00ms +[2025-09-11 14:25:21] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:25:21] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 14:25:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:25:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:25:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 14:25:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:25:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:25:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:25:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 14:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:31] [Rank 0] PRINT: step:7200/10000 val_loss:4.2273 total_sharp:8.1236e-05 L1_sharp:1.3513e-03 L2_sharp:2.2226e-03 L3_sharp:3.0584e-03 L4_sharp:5.5896e-03 L5_sharp:1.0113e-02 L6_sharp:1.8321e-02 L7_sharp:2.5091e-02 L8_sharp:4.0910e-02 L9_sharp:4.2344e-02 L10_sharp:4.8252e-02 L11_sharp:7.5203e-02 L12_sharp:2.6556e-01 total_fnorm:5.3000e+01 total_l1_linf:9.0624e+04 total_spectral:2.6500e+01 L1_fnorm:1.6504e-01 L2_fnorm:1.6406e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.5918e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6016e-01 L12_fnorm:1.6113e-01 L1_l1linf:3.6377e-02 L2_l1linf:3.7109e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5400e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.3691e-02 L10_l1linf:3.2227e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.3447e-02 L1_spectral:2.3210e-03 L2_spectral:2.3103e-03 L3_spectral:2.3274e-03 L4_spectral:2.3174e-03 L5_spectral:2.2891e-03 L6_spectral:2.3001e-03 L7_spectral:2.2969e-03 L8_spectral:2.2703e-03 L9_spectral:2.2893e-03 L10_spectral:2.2936e-03 L11_spectral:2.2998e-03 L12_spectral:2.2999e-03 train_time:302299ms step_avg:41.99ms +[2025-09-11 14:25:31] [Rank 0] PRINT: step:7200/10000 val_loss:4.2273 total_sharp:8.1236e-05 L1_sharp:1.3513e-03 L2_sharp:2.2226e-03 L3_sharp:3.0584e-03 L4_sharp:5.5896e-03 L5_sharp:1.0113e-02 L6_sharp:1.8321e-02 L7_sharp:2.5091e-02 L8_sharp:4.0910e-02 L9_sharp:4.2344e-02 L10_sharp:4.8252e-02 L11_sharp:7.5203e-02 L12_sharp:2.6556e-01 total_fnorm:5.3000e+01 total_l1_linf:9.0624e+04 total_spectral:2.6500e+01 L1_fnorm:1.6504e-01 L2_fnorm:1.6406e-01 L3_fnorm:1.6309e-01 L4_fnorm:1.6406e-01 L5_fnorm:1.6309e-01 L6_fnorm:1.6309e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.5918e-01 L9_fnorm:1.6211e-01 L10_fnorm:1.6211e-01 L11_fnorm:1.6016e-01 L12_fnorm:1.6113e-01 L1_l1linf:3.6377e-02 L2_l1linf:3.7109e-02 L3_l1linf:3.5889e-02 L4_l1linf:3.5400e-02 L5_l1linf:3.5645e-02 L6_l1linf:3.5400e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.3936e-02 L9_l1linf:3.3691e-02 L10_l1linf:3.2227e-02 L11_l1linf:3.2227e-02 L12_l1linf:3.3447e-02 L1_spectral:2.3210e-03 L2_spectral:2.3103e-03 L3_spectral:2.3274e-03 L4_spectral:2.3174e-03 L5_spectral:2.2891e-03 L6_spectral:2.3001e-03 L7_spectral:2.2969e-03 L8_spectral:2.2703e-03 L9_spectral:2.2893e-03 L10_spectral:2.2936e-03 L11_spectral:2.2998e-03 L12_spectral:2.2999e-03 train_time:302299ms step_avg:41.99ms +[2025-09-11 14:25:33] [Rank 0] step:7201/10000 train_time:303787ms step_avg:42.19ms +[2025-09-11 14:25:33] [Rank 0] step:7201/10000 train_time:303787ms step_avg:42.19ms +[2025-09-11 14:25:34] [Rank 0] step:7221/10000 train_time:304501ms step_avg:42.17ms +[2025-09-11 14:25:34] [Rank 0] step:7221/10000 train_time:304501ms step_avg:42.17ms +[2025-09-11 14:25:34] [Rank 0] step:7241/10000 train_time:305201ms step_avg:42.15ms +[2025-09-11 14:25:34] [Rank 0] step:7241/10000 train_time:305201ms step_avg:42.15ms +[2025-09-11 14:25:35] [Rank 0] step:7261/10000 train_time:305905ms step_avg:42.13ms +[2025-09-11 14:25:35] [Rank 0] step:7261/10000 train_time:305905ms step_avg:42.13ms +[2025-09-11 14:25:36] [Rank 0] step:7281/10000 train_time:306612ms step_avg:42.11ms +[2025-09-11 14:25:36] [Rank 0] step:7281/10000 train_time:306612ms step_avg:42.11ms +[2025-09-11 14:25:36] [Rank 0] step:7301/10000 train_time:307312ms step_avg:42.09ms +[2025-09-11 14:25:36] [Rank 0] step:7301/10000 train_time:307312ms step_avg:42.09ms +[2025-09-11 14:25:37] [Rank 0] step:7321/10000 train_time:308012ms step_avg:42.07ms +[2025-09-11 14:25:37] [Rank 0] step:7321/10000 train_time:308012ms step_avg:42.07ms +[2025-09-11 14:25:38] [Rank 0] step:7341/10000 train_time:308714ms step_avg:42.05ms +[2025-09-11 14:25:38] [Rank 0] step:7341/10000 train_time:308714ms step_avg:42.05ms +[2025-09-11 14:25:38] [Rank 0] step:7361/10000 train_time:309415ms step_avg:42.03ms +[2025-09-11 14:25:38] [Rank 0] step:7361/10000 train_time:309415ms step_avg:42.03ms +[2025-09-11 14:25:39] [Rank 0] step:7381/10000 train_time:310117ms step_avg:42.02ms +[2025-09-11 14:25:39] [Rank 0] step:7381/10000 train_time:310117ms step_avg:42.02ms +[2025-09-11 14:25:40] [Rank 0] step:7401/10000 train_time:310816ms step_avg:42.00ms +[2025-09-11 14:25:40] [Rank 0] step:7401/10000 train_time:310816ms step_avg:42.00ms +[2025-09-11 14:25:41] [Rank 0] step:7421/10000 train_time:311515ms step_avg:41.98ms +[2025-09-11 14:25:41] [Rank 0] step:7421/10000 train_time:311515ms step_avg:41.98ms +[2025-09-11 14:25:41] [Rank 0] step:7441/10000 train_time:312217ms step_avg:41.96ms +[2025-09-11 14:25:41] [Rank 0] step:7441/10000 train_time:312217ms step_avg:41.96ms +[2025-09-11 14:25:42] [Rank 0] step:7461/10000 train_time:312919ms step_avg:41.94ms +[2025-09-11 14:25:42] [Rank 0] step:7461/10000 train_time:312919ms step_avg:41.94ms +[2025-09-11 14:25:43] [Rank 0] step:7481/10000 train_time:313620ms step_avg:41.92ms +[2025-09-11 14:25:43] [Rank 0] step:7481/10000 train_time:313620ms step_avg:41.92ms +[2025-09-11 14:25:43] [Rank 0] step:7501/10000 train_time:314325ms step_avg:41.90ms +[2025-09-11 14:25:43] [Rank 0] step:7501/10000 train_time:314325ms step_avg:41.90ms +[2025-09-11 14:25:44] [Rank 0] step:7521/10000 train_time:315028ms step_avg:41.89ms +[2025-09-11 14:25:44] [Rank 0] step:7521/10000 train_time:315028ms step_avg:41.89ms +[2025-09-11 14:25:45] [Rank 0] step:7541/10000 train_time:315728ms step_avg:41.87ms +[2025-09-11 14:25:45] [Rank 0] step:7541/10000 train_time:315728ms step_avg:41.87ms +[2025-09-11 14:25:45] [Rank 0] step:7561/10000 train_time:316432ms step_avg:41.85ms +[2025-09-11 14:25:45] [Rank 0] step:7561/10000 train_time:316432ms step_avg:41.85ms +[2025-09-11 14:25:46] [Rank 0] step:7581/10000 train_time:317135ms step_avg:41.83ms +[2025-09-11 14:25:46] [Rank 0] step:7581/10000 train_time:317135ms step_avg:41.83ms +[2025-09-11 14:25:47] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:25:47] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 14:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:25:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:25:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 14:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 14:25:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:25:57] [Rank 0] PRINT: step:7600/10000 val_loss:4.2005 total_sharp:7.2760e-05 L1_sharp:2.4554e-03 L2_sharp:3.7373e-03 L3_sharp:4.4867e-03 L4_sharp:4.4604e-03 L5_sharp:9.1812e-03 L6_sharp:1.6574e-02 L7_sharp:2.0784e-02 L8_sharp:3.4771e-02 L9_sharp:3.2424e-02 L10_sharp:4.2957e-02 L11_sharp:7.7491e-02 L12_sharp:3.3852e-01 total_fnorm:4.3250e+01 total_l1_linf:6.9632e+04 total_spectral:2.1625e+01 L1_fnorm:1.3867e-01 L2_fnorm:1.3672e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3672e-01 L5_fnorm:1.3574e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3281e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.9541e-02 L2_l1linf:2.8320e-02 L3_l1linf:2.8076e-02 L4_l1linf:2.8198e-02 L5_l1linf:2.7710e-02 L6_l1linf:2.6978e-02 L7_l1linf:2.7832e-02 L8_l1linf:2.6733e-02 L9_l1linf:2.6123e-02 L10_l1linf:2.5757e-02 L11_l1linf:2.5024e-02 L12_l1linf:2.6978e-02 L1_spectral:2.0122e-03 L2_spectral:1.9992e-03 L3_spectral:1.9989e-03 L4_spectral:2.0126e-03 L5_spectral:1.9948e-03 L6_spectral:2.0005e-03 L7_spectral:1.9947e-03 L8_spectral:1.9579e-03 L9_spectral:1.9768e-03 L10_spectral:1.9716e-03 L11_spectral:1.9608e-03 L12_spectral:1.9716e-03 train_time:317817ms step_avg:41.82ms +[2025-09-11 14:25:57] [Rank 0] PRINT: step:7600/10000 val_loss:4.2005 total_sharp:7.2760e-05 L1_sharp:2.4554e-03 L2_sharp:3.7373e-03 L3_sharp:4.4867e-03 L4_sharp:4.4604e-03 L5_sharp:9.1812e-03 L6_sharp:1.6574e-02 L7_sharp:2.0784e-02 L8_sharp:3.4771e-02 L9_sharp:3.2424e-02 L10_sharp:4.2957e-02 L11_sharp:7.7491e-02 L12_sharp:3.3852e-01 total_fnorm:4.3250e+01 total_l1_linf:6.9632e+04 total_spectral:2.1625e+01 L1_fnorm:1.3867e-01 L2_fnorm:1.3672e-01 L3_fnorm:1.3672e-01 L4_fnorm:1.3672e-01 L5_fnorm:1.3574e-01 L6_fnorm:1.3672e-01 L7_fnorm:1.3672e-01 L8_fnorm:1.3281e-01 L9_fnorm:1.3574e-01 L10_fnorm:1.3574e-01 L11_fnorm:1.3477e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.9541e-02 L2_l1linf:2.8320e-02 L3_l1linf:2.8076e-02 L4_l1linf:2.8198e-02 L5_l1linf:2.7710e-02 L6_l1linf:2.6978e-02 L7_l1linf:2.7832e-02 L8_l1linf:2.6733e-02 L9_l1linf:2.6123e-02 L10_l1linf:2.5757e-02 L11_l1linf:2.5024e-02 L12_l1linf:2.6978e-02 L1_spectral:2.0122e-03 L2_spectral:1.9992e-03 L3_spectral:1.9989e-03 L4_spectral:2.0126e-03 L5_spectral:1.9948e-03 L6_spectral:2.0005e-03 L7_spectral:1.9947e-03 L8_spectral:1.9579e-03 L9_spectral:1.9768e-03 L10_spectral:1.9716e-03 L11_spectral:1.9608e-03 L12_spectral:1.9716e-03 train_time:317817ms step_avg:41.82ms +[2025-09-11 14:25:59] [Rank 0] step:7601/10000 train_time:319343ms step_avg:42.01ms +[2025-09-11 14:25:59] [Rank 0] step:7601/10000 train_time:319343ms step_avg:42.01ms +[2025-09-11 14:26:00] [Rank 0] step:7621/10000 train_time:320201ms step_avg:42.02ms +[2025-09-11 14:26:00] [Rank 0] step:7621/10000 train_time:320201ms step_avg:42.02ms +[2025-09-11 14:26:00] [Rank 0] step:7641/10000 train_time:321008ms step_avg:42.01ms +[2025-09-11 14:26:00] [Rank 0] step:7641/10000 train_time:321008ms step_avg:42.01ms +[2025-09-11 14:26:01] [Rank 0] step:7661/10000 train_time:321709ms step_avg:41.99ms +[2025-09-11 14:26:01] [Rank 0] step:7661/10000 train_time:321709ms step_avg:41.99ms +[2025-09-11 14:26:02] [Rank 0] step:7681/10000 train_time:322412ms step_avg:41.98ms +[2025-09-11 14:26:02] [Rank 0] step:7681/10000 train_time:322412ms step_avg:41.98ms +[2025-09-11 14:26:03] [Rank 0] step:7701/10000 train_time:323377ms step_avg:41.99ms +[2025-09-11 14:26:03] [Rank 0] step:7701/10000 train_time:323377ms step_avg:41.99ms +[2025-09-11 14:26:04] [Rank 0] step:7721/10000 train_time:324080ms step_avg:41.97ms +[2025-09-11 14:26:04] [Rank 0] step:7721/10000 train_time:324080ms step_avg:41.97ms +[2025-09-11 14:26:04] [Rank 0] step:7741/10000 train_time:324783ms step_avg:41.96ms +[2025-09-11 14:26:04] [Rank 0] step:7741/10000 train_time:324783ms step_avg:41.96ms +[2025-09-11 14:26:05] [Rank 0] step:7761/10000 train_time:325484ms step_avg:41.94ms +[2025-09-11 14:26:05] [Rank 0] step:7761/10000 train_time:325484ms step_avg:41.94ms +[2025-09-11 14:26:06] [Rank 0] step:7781/10000 train_time:326188ms step_avg:41.92ms +[2025-09-11 14:26:06] [Rank 0] step:7781/10000 train_time:326188ms step_avg:41.92ms +[2025-09-11 14:26:06] [Rank 0] step:7801/10000 train_time:326889ms step_avg:41.90ms +[2025-09-11 14:26:06] [Rank 0] step:7801/10000 train_time:326889ms step_avg:41.90ms +[2025-09-11 14:26:07] [Rank 0] step:7821/10000 train_time:327591ms step_avg:41.89ms +[2025-09-11 14:26:07] [Rank 0] step:7821/10000 train_time:327591ms step_avg:41.89ms +[2025-09-11 14:26:08] [Rank 0] step:7841/10000 train_time:328295ms step_avg:41.87ms +[2025-09-11 14:26:08] [Rank 0] step:7841/10000 train_time:328295ms step_avg:41.87ms +[2025-09-11 14:26:08] [Rank 0] step:7861/10000 train_time:328999ms step_avg:41.85ms +[2025-09-11 14:26:08] [Rank 0] step:7861/10000 train_time:328999ms step_avg:41.85ms +[2025-09-11 14:26:09] [Rank 0] step:7881/10000 train_time:329701ms step_avg:41.83ms +[2025-09-11 14:26:09] [Rank 0] step:7881/10000 train_time:329701ms step_avg:41.83ms +[2025-09-11 14:26:10] [Rank 0] step:7901/10000 train_time:330404ms step_avg:41.82ms +[2025-09-11 14:26:10] [Rank 0] step:7901/10000 train_time:330404ms step_avg:41.82ms +[2025-09-11 14:26:11] [Rank 0] step:7921/10000 train_time:331106ms step_avg:41.80ms +[2025-09-11 14:26:11] [Rank 0] step:7921/10000 train_time:331106ms step_avg:41.80ms +[2025-09-11 14:26:11] [Rank 0] step:7941/10000 train_time:331810ms step_avg:41.78ms +[2025-09-11 14:26:11] [Rank 0] step:7941/10000 train_time:331810ms step_avg:41.78ms +[2025-09-11 14:26:12] [Rank 0] step:7961/10000 train_time:332510ms step_avg:41.77ms +[2025-09-11 14:26:12] [Rank 0] step:7961/10000 train_time:332510ms step_avg:41.77ms +[2025-09-11 14:26:13] [Rank 0] step:7981/10000 train_time:333215ms step_avg:41.75ms +[2025-09-11 14:26:13] [Rank 0] step:7981/10000 train_time:333215ms step_avg:41.75ms +[2025-09-11 14:26:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:26:13] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 14:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:26:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:26:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:26:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:26:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 14:26:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:26:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 14:26:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:26:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 14:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:26:24] [Rank 0] PRINT: step:8000/10000 val_loss:4.1784 total_sharp:6.4942e-05 L1_sharp:3.3852e-03 L2_sharp:4.2645e-03 L3_sharp:4.3005e-03 L4_sharp:8.0700e-03 L5_sharp:1.2130e-02 L6_sharp:1.3534e-02 L7_sharp:2.3148e-02 L8_sharp:3.0747e-02 L9_sharp:3.1496e-02 L10_sharp:4.2671e-02 L11_sharp:7.1919e-02 L12_sharp:2.8305e-01 total_fnorm:3.7000e+01 total_l1_linf:5.6320e+04 total_spectral:1.8375e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1230e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.0791e-01 L9_fnorm:1.1035e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0840e-01 L1_l1linf:2.2095e-02 L2_l1linf:2.1973e-02 L3_l1linf:2.1362e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0508e-02 L6_l1linf:2.0264e-02 L7_l1linf:2.0508e-02 L8_l1linf:1.9775e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.9775e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9531e-02 L1_spectral:1.6876e-03 L2_spectral:1.6897e-03 L3_spectral:1.6772e-03 L4_spectral:1.6957e-03 L5_spectral:1.6753e-03 L6_spectral:1.6851e-03 L7_spectral:1.6785e-03 L8_spectral:1.6256e-03 L9_spectral:1.6491e-03 L10_spectral:1.6503e-03 L11_spectral:1.6389e-03 L12_spectral:1.6458e-03 train_time:333894ms step_avg:41.74ms +[2025-09-11 14:26:24] [Rank 0] PRINT: step:8000/10000 val_loss:4.1784 total_sharp:6.4942e-05 L1_sharp:3.3852e-03 L2_sharp:4.2645e-03 L3_sharp:4.3005e-03 L4_sharp:8.0700e-03 L5_sharp:1.2130e-02 L6_sharp:1.3534e-02 L7_sharp:2.3148e-02 L8_sharp:3.0747e-02 L9_sharp:3.1496e-02 L10_sharp:4.2671e-02 L11_sharp:7.1919e-02 L12_sharp:2.8305e-01 total_fnorm:3.7000e+01 total_l1_linf:5.6320e+04 total_spectral:1.8375e+01 L1_fnorm:1.1279e-01 L2_fnorm:1.1230e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1230e-01 L5_fnorm:1.1084e-01 L6_fnorm:1.1182e-01 L7_fnorm:1.1182e-01 L8_fnorm:1.0791e-01 L9_fnorm:1.1035e-01 L10_fnorm:1.1035e-01 L11_fnorm:1.0938e-01 L12_fnorm:1.0840e-01 L1_l1linf:2.2095e-02 L2_l1linf:2.1973e-02 L3_l1linf:2.1362e-02 L4_l1linf:2.1118e-02 L5_l1linf:2.0508e-02 L6_l1linf:2.0264e-02 L7_l1linf:2.0508e-02 L8_l1linf:1.9775e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.9775e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9531e-02 L1_spectral:1.6876e-03 L2_spectral:1.6897e-03 L3_spectral:1.6772e-03 L4_spectral:1.6957e-03 L5_spectral:1.6753e-03 L6_spectral:1.6851e-03 L7_spectral:1.6785e-03 L8_spectral:1.6256e-03 L9_spectral:1.6491e-03 L10_spectral:1.6503e-03 L11_spectral:1.6389e-03 L12_spectral:1.6458e-03 train_time:333894ms step_avg:41.74ms +[2025-09-11 14:26:25] [Rank 0] step:8001/10000 train_time:335459ms step_avg:41.93ms +[2025-09-11 14:26:25] [Rank 0] step:8001/10000 train_time:335459ms step_avg:41.93ms +[2025-09-11 14:26:26] [Rank 0] step:8021/10000 train_time:336186ms step_avg:41.91ms +[2025-09-11 14:26:26] [Rank 0] step:8021/10000 train_time:336186ms step_avg:41.91ms +[2025-09-11 14:26:27] [Rank 0] step:8041/10000 train_time:336889ms step_avg:41.90ms +[2025-09-11 14:26:27] [Rank 0] step:8041/10000 train_time:336889ms step_avg:41.90ms +[2025-09-11 14:26:27] [Rank 0] step:8061/10000 train_time:337594ms step_avg:41.88ms +[2025-09-11 14:26:27] [Rank 0] step:8061/10000 train_time:337594ms step_avg:41.88ms +[2025-09-11 14:26:28] [Rank 0] step:8081/10000 train_time:338296ms step_avg:41.86ms +[2025-09-11 14:26:28] [Rank 0] step:8081/10000 train_time:338296ms step_avg:41.86ms +[2025-09-11 14:26:29] [Rank 0] step:8101/10000 train_time:339000ms step_avg:41.85ms +[2025-09-11 14:26:29] [Rank 0] step:8101/10000 train_time:339000ms step_avg:41.85ms +[2025-09-11 14:26:29] [Rank 0] step:8121/10000 train_time:339708ms step_avg:41.83ms +[2025-09-11 14:26:29] [Rank 0] step:8121/10000 train_time:339708ms step_avg:41.83ms +[2025-09-11 14:26:31] [Rank 0] step:8141/10000 train_time:341156ms step_avg:41.91ms +[2025-09-11 14:26:31] [Rank 0] step:8141/10000 train_time:341156ms step_avg:41.91ms +[2025-09-11 14:26:32] [Rank 0] step:8161/10000 train_time:341862ms step_avg:41.89ms +[2025-09-11 14:26:32] [Rank 0] step:8161/10000 train_time:341862ms step_avg:41.89ms +[2025-09-11 14:26:32] [Rank 0] step:8181/10000 train_time:342577ms step_avg:41.87ms +[2025-09-11 14:26:32] [Rank 0] step:8181/10000 train_time:342577ms step_avg:41.87ms +[2025-09-11 14:26:33] [Rank 0] step:8201/10000 train_time:343287ms step_avg:41.86ms +[2025-09-11 14:26:33] [Rank 0] step:8201/10000 train_time:343287ms step_avg:41.86ms +[2025-09-11 14:26:34] [Rank 0] step:8221/10000 train_time:343996ms step_avg:41.84ms +[2025-09-11 14:26:34] [Rank 0] step:8221/10000 train_time:343996ms step_avg:41.84ms +[2025-09-11 14:26:34] [Rank 0] step:8241/10000 train_time:344714ms step_avg:41.83ms +[2025-09-11 14:26:34] [Rank 0] step:8241/10000 train_time:344714ms step_avg:41.83ms +[2025-09-11 14:26:35] [Rank 0] step:8261/10000 train_time:345422ms step_avg:41.81ms +[2025-09-11 14:26:35] [Rank 0] step:8261/10000 train_time:345422ms step_avg:41.81ms +[2025-09-11 14:26:36] [Rank 0] step:8281/10000 train_time:346128ms step_avg:41.80ms +[2025-09-11 14:26:36] [Rank 0] step:8281/10000 train_time:346128ms step_avg:41.80ms +[2025-09-11 14:26:37] [Rank 0] step:8301/10000 train_time:346837ms step_avg:41.78ms +[2025-09-11 14:26:37] [Rank 0] step:8301/10000 train_time:346837ms step_avg:41.78ms +[2025-09-11 14:26:37] [Rank 0] step:8321/10000 train_time:347544ms step_avg:41.77ms +[2025-09-11 14:26:37] [Rank 0] step:8321/10000 train_time:347544ms step_avg:41.77ms +[2025-09-11 14:26:38] [Rank 0] step:8341/10000 train_time:348260ms step_avg:41.75ms +[2025-09-11 14:26:38] [Rank 0] step:8341/10000 train_time:348260ms step_avg:41.75ms +[2025-09-11 14:26:39] [Rank 0] step:8361/10000 train_time:348964ms step_avg:41.74ms +[2025-09-11 14:26:39] [Rank 0] step:8361/10000 train_time:348964ms step_avg:41.74ms +[2025-09-11 14:26:39] [Rank 0] step:8381/10000 train_time:349677ms step_avg:41.72ms +[2025-09-11 14:26:39] [Rank 0] step:8381/10000 train_time:349677ms step_avg:41.72ms +[2025-09-11 14:26:40] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:26:40] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 14:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:26:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 14:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 14:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 14:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 14:26:51] [Rank 0] PRINT: step:8400/10000 val_loss:4.1612 total_sharp:5.2774e-05 L1_sharp:3.1780e-03 L2_sharp:2.4420e-03 L3_sharp:4.0177e-03 L4_sharp:7.8046e-03 L5_sharp:8.8363e-03 L6_sharp:1.3714e-02 L7_sharp:1.9424e-02 L8_sharp:2.7744e-02 L9_sharp:2.7955e-02 L10_sharp:3.5598e-02 L11_sharp:6.0462e-02 L12_sharp:3.4943e-01 total_fnorm:2.8875e+01 total_l1_linf:4.0192e+04 total_spectral:1.4438e+01 L1_fnorm:8.8379e-02 L2_fnorm:8.7891e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.3984e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.4648e-02 L6_l1linf:1.4465e-02 L7_l1linf:1.4771e-02 L8_l1linf:1.4282e-02 L9_l1linf:1.4160e-02 L10_l1linf:1.4038e-02 L11_l1linf:1.3367e-02 L12_l1linf:1.4282e-02 L1_spectral:1.3623e-03 L2_spectral:1.3489e-03 L3_spectral:1.3508e-03 L4_spectral:1.3614e-03 L5_spectral:1.3352e-03 L6_spectral:1.3415e-03 L7_spectral:1.3358e-03 L8_spectral:1.3131e-03 L9_spectral:1.3209e-03 L10_spectral:1.3129e-03 L11_spectral:1.2967e-03 L12_spectral:1.3203e-03 train_time:350369ms step_avg:41.71ms +[2025-09-11 14:26:51] [Rank 0] PRINT: step:8400/10000 val_loss:4.1612 total_sharp:5.2774e-05 L1_sharp:3.1780e-03 L2_sharp:2.4420e-03 L3_sharp:4.0177e-03 L4_sharp:7.8046e-03 L5_sharp:8.8363e-03 L6_sharp:1.3714e-02 L7_sharp:1.9424e-02 L8_sharp:2.7744e-02 L9_sharp:2.7955e-02 L10_sharp:3.5598e-02 L11_sharp:6.0462e-02 L12_sharp:3.4943e-01 total_fnorm:2.8875e+01 total_l1_linf:4.0192e+04 total_spectral:1.4438e+01 L1_fnorm:8.8379e-02 L2_fnorm:8.7891e-02 L3_fnorm:8.7891e-02 L4_fnorm:8.7402e-02 L5_fnorm:8.6426e-02 L6_fnorm:8.6914e-02 L7_fnorm:8.7402e-02 L8_fnorm:8.3984e-02 L9_fnorm:8.5938e-02 L10_fnorm:8.5938e-02 L11_fnorm:8.4961e-02 L12_fnorm:8.4473e-02 L1_l1linf:1.6113e-02 L2_l1linf:1.5747e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.4648e-02 L6_l1linf:1.4465e-02 L7_l1linf:1.4771e-02 L8_l1linf:1.4282e-02 L9_l1linf:1.4160e-02 L10_l1linf:1.4038e-02 L11_l1linf:1.3367e-02 L12_l1linf:1.4282e-02 L1_spectral:1.3623e-03 L2_spectral:1.3489e-03 L3_spectral:1.3508e-03 L4_spectral:1.3614e-03 L5_spectral:1.3352e-03 L6_spectral:1.3415e-03 L7_spectral:1.3358e-03 L8_spectral:1.3131e-03 L9_spectral:1.3209e-03 L10_spectral:1.3129e-03 L11_spectral:1.2967e-03 L12_spectral:1.3203e-03 train_time:350369ms step_avg:41.71ms +[2025-09-11 14:26:53] [Rank 0] step:8401/10000 train_time:352300ms step_avg:41.94ms +[2025-09-11 14:26:53] [Rank 0] step:8401/10000 train_time:352300ms step_avg:41.94ms +[2025-09-11 14:26:54] [Rank 0] step:8421/10000 train_time:353018ms step_avg:41.92ms +[2025-09-11 14:26:54] [Rank 0] step:8421/10000 train_time:353018ms step_avg:41.92ms +[2025-09-11 14:26:54] [Rank 0] step:8441/10000 train_time:353730ms step_avg:41.91ms +[2025-09-11 14:26:54] [Rank 0] step:8441/10000 train_time:353730ms step_avg:41.91ms +[2025-09-11 14:26:55] [Rank 0] step:8461/10000 train_time:354443ms step_avg:41.89ms +[2025-09-11 14:26:55] [Rank 0] step:8461/10000 train_time:354443ms step_avg:41.89ms +[2025-09-11 14:26:56] [Rank 0] step:8481/10000 train_time:355155ms step_avg:41.88ms +[2025-09-11 14:26:56] [Rank 0] step:8481/10000 train_time:355155ms step_avg:41.88ms +[2025-09-11 14:26:56] [Rank 0] step:8501/10000 train_time:355866ms step_avg:41.86ms +[2025-09-11 14:26:56] [Rank 0] step:8501/10000 train_time:355866ms step_avg:41.86ms +[2025-09-11 14:26:57] [Rank 0] step:8521/10000 train_time:356575ms step_avg:41.85ms +[2025-09-11 14:26:57] [Rank 0] step:8521/10000 train_time:356575ms step_avg:41.85ms +[2025-09-11 14:26:58] [Rank 0] step:8541/10000 train_time:357284ms step_avg:41.83ms +[2025-09-11 14:26:58] [Rank 0] step:8541/10000 train_time:357284ms step_avg:41.83ms +[2025-09-11 14:26:59] [Rank 0] step:8561/10000 train_time:357998ms step_avg:41.82ms +[2025-09-11 14:26:59] [Rank 0] step:8561/10000 train_time:357998ms step_avg:41.82ms +[2025-09-11 14:26:59] [Rank 0] step:8581/10000 train_time:358713ms step_avg:41.80ms +[2025-09-11 14:26:59] [Rank 0] step:8581/10000 train_time:358713ms step_avg:41.80ms +[2025-09-11 14:27:00] [Rank 0] step:8601/10000 train_time:359423ms step_avg:41.79ms +[2025-09-11 14:27:00] [Rank 0] step:8601/10000 train_time:359423ms step_avg:41.79ms +[2025-09-11 14:27:01] [Rank 0] step:8621/10000 train_time:360132ms step_avg:41.77ms +[2025-09-11 14:27:01] [Rank 0] step:8621/10000 train_time:360132ms step_avg:41.77ms +[2025-09-11 14:27:01] [Rank 0] step:8641/10000 train_time:360842ms step_avg:41.76ms +[2025-09-11 14:27:01] [Rank 0] step:8641/10000 train_time:360842ms step_avg:41.76ms +[2025-09-11 14:27:02] [Rank 0] step:8661/10000 train_time:361552ms step_avg:41.74ms +[2025-09-11 14:27:02] [Rank 0] step:8661/10000 train_time:361552ms step_avg:41.74ms +[2025-09-11 14:27:03] [Rank 0] step:8681/10000 train_time:362563ms step_avg:41.77ms +[2025-09-11 14:27:03] [Rank 0] step:8681/10000 train_time:362563ms step_avg:41.77ms +[2025-09-11 14:27:04] [Rank 0] step:8701/10000 train_time:363272ms step_avg:41.75ms +[2025-09-11 14:27:04] [Rank 0] step:8701/10000 train_time:363272ms step_avg:41.75ms +[2025-09-11 14:27:05] [Rank 0] step:8721/10000 train_time:363984ms step_avg:41.74ms +[2025-09-11 14:27:05] [Rank 0] step:8721/10000 train_time:363984ms step_avg:41.74ms +[2025-09-11 14:27:05] [Rank 0] step:8741/10000 train_time:364829ms step_avg:41.74ms +[2025-09-11 14:27:05] [Rank 0] step:8741/10000 train_time:364829ms step_avg:41.74ms +[2025-09-11 14:27:06] [Rank 0] step:8761/10000 train_time:365662ms step_avg:41.74ms +[2025-09-11 14:27:06] [Rank 0] step:8761/10000 train_time:365662ms step_avg:41.74ms +[2025-09-11 14:27:07] [Rank 0] step:8781/10000 train_time:366371ms step_avg:41.72ms +[2025-09-11 14:27:07] [Rank 0] step:8781/10000 train_time:366371ms step_avg:41.72ms +[2025-09-11 14:27:08] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:27:08] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 14:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:27:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:27:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 14:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:27:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 14:27:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 14:27:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 14:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:27:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 14:27:18] [Rank 0] PRINT: step:8800/10000 val_loss:4.1550 total_sharp:4.5206e-05 L1_sharp:2.2251e-04 L2_sharp:2.6961e-03 L3_sharp:4.0904e-03 L4_sharp:6.1637e-03 L5_sharp:7.7044e-03 L6_sharp:1.0952e-02 L7_sharp:1.7360e-02 L8_sharp:2.2982e-02 L9_sharp:2.6112e-02 L10_sharp:3.3269e-02 L11_sharp:5.0014e-02 L12_sharp:2.0851e-01 total_fnorm:2.1375e+01 total_l1_linf:2.6624e+04 total_spectral:1.0688e+01 L1_fnorm:6.3477e-02 L2_fnorm:6.2988e-02 L3_fnorm:6.2988e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2256e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.0547e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.1768e-02 L11_fnorm:6.0791e-02 L12_fnorm:5.9814e-02 L1_l1linf:1.0193e-02 L2_l1linf:1.0376e-02 L3_l1linf:1.0132e-02 L4_l1linf:9.7656e-03 L5_l1linf:9.8877e-03 L6_l1linf:9.7656e-03 L7_l1linf:9.7046e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.3384e-03 L10_l1linf:8.9722e-03 L11_l1linf:8.7891e-03 L12_l1linf:8.9722e-03 L1_spectral:1.0061e-03 L2_spectral:1.0009e-03 L3_spectral:9.9359e-04 L4_spectral:1.0134e-03 L5_spectral:9.7786e-04 L6_spectral:9.9592e-04 L7_spectral:9.9746e-04 L8_spectral:9.7235e-04 L9_spectral:9.6797e-04 L10_spectral:9.6345e-04 L11_spectral:9.5654e-04 L12_spectral:9.7834e-04 train_time:367059ms step_avg:41.71ms +[2025-09-11 14:27:18] [Rank 0] PRINT: step:8800/10000 val_loss:4.1550 total_sharp:4.5206e-05 L1_sharp:2.2251e-04 L2_sharp:2.6961e-03 L3_sharp:4.0904e-03 L4_sharp:6.1637e-03 L5_sharp:7.7044e-03 L6_sharp:1.0952e-02 L7_sharp:1.7360e-02 L8_sharp:2.2982e-02 L9_sharp:2.6112e-02 L10_sharp:3.3269e-02 L11_sharp:5.0014e-02 L12_sharp:2.0851e-01 total_fnorm:2.1375e+01 total_l1_linf:2.6624e+04 total_spectral:1.0688e+01 L1_fnorm:6.3477e-02 L2_fnorm:6.2988e-02 L3_fnorm:6.2988e-02 L4_fnorm:6.2988e-02 L5_fnorm:6.2256e-02 L6_fnorm:6.2500e-02 L7_fnorm:6.2500e-02 L8_fnorm:6.0547e-02 L9_fnorm:6.1768e-02 L10_fnorm:6.1768e-02 L11_fnorm:6.0791e-02 L12_fnorm:5.9814e-02 L1_l1linf:1.0193e-02 L2_l1linf:1.0376e-02 L3_l1linf:1.0132e-02 L4_l1linf:9.7656e-03 L5_l1linf:9.8877e-03 L6_l1linf:9.7656e-03 L7_l1linf:9.7046e-03 L8_l1linf:9.5215e-03 L9_l1linf:9.3384e-03 L10_l1linf:8.9722e-03 L11_l1linf:8.7891e-03 L12_l1linf:8.9722e-03 L1_spectral:1.0061e-03 L2_spectral:1.0009e-03 L3_spectral:9.9359e-04 L4_spectral:1.0134e-03 L5_spectral:9.7786e-04 L6_spectral:9.9592e-04 L7_spectral:9.9746e-04 L8_spectral:9.7235e-04 L9_spectral:9.6797e-04 L10_spectral:9.6345e-04 L11_spectral:9.5654e-04 L12_spectral:9.7834e-04 train_time:367059ms step_avg:41.71ms +[2025-09-11 14:27:20] [Rank 0] step:8801/10000 train_time:368795ms step_avg:41.90ms +[2025-09-11 14:27:20] [Rank 0] step:8801/10000 train_time:368795ms step_avg:41.90ms +[2025-09-11 14:27:21] [Rank 0] step:8821/10000 train_time:369509ms step_avg:41.89ms +[2025-09-11 14:27:21] [Rank 0] step:8821/10000 train_time:369509ms step_avg:41.89ms +[2025-09-11 14:27:21] [Rank 0] step:8841/10000 train_time:370219ms step_avg:41.88ms +[2025-09-11 14:27:21] [Rank 0] step:8841/10000 train_time:370219ms step_avg:41.88ms +[2025-09-11 14:27:22] [Rank 0] step:8861/10000 train_time:370928ms step_avg:41.86ms +[2025-09-11 14:27:22] [Rank 0] step:8861/10000 train_time:370928ms step_avg:41.86ms +[2025-09-11 14:27:23] [Rank 0] step:8881/10000 train_time:371639ms step_avg:41.85ms +[2025-09-11 14:27:23] [Rank 0] step:8881/10000 train_time:371639ms step_avg:41.85ms +[2025-09-11 14:27:24] [Rank 0] step:8901/10000 train_time:372350ms step_avg:41.83ms +[2025-09-11 14:27:24] [Rank 0] step:8901/10000 train_time:372350ms step_avg:41.83ms +[2025-09-11 14:27:24] [Rank 0] step:8921/10000 train_time:373057ms step_avg:41.82ms +[2025-09-11 14:27:24] [Rank 0] step:8921/10000 train_time:373057ms step_avg:41.82ms +[2025-09-11 14:27:25] [Rank 0] step:8941/10000 train_time:373770ms step_avg:41.80ms +[2025-09-11 14:27:25] [Rank 0] step:8941/10000 train_time:373770ms step_avg:41.80ms +[2025-09-11 14:27:26] [Rank 0] step:8961/10000 train_time:374488ms step_avg:41.79ms +[2025-09-11 14:27:26] [Rank 0] step:8961/10000 train_time:374488ms step_avg:41.79ms +[2025-09-11 14:27:26] [Rank 0] step:8981/10000 train_time:375202ms step_avg:41.78ms +[2025-09-11 14:27:26] [Rank 0] step:8981/10000 train_time:375202ms step_avg:41.78ms +[2025-09-11 14:27:27] [Rank 0] step:9001/10000 train_time:375907ms step_avg:41.76ms +[2025-09-11 14:27:27] [Rank 0] step:9001/10000 train_time:375907ms step_avg:41.76ms +[2025-09-11 14:27:28] [Rank 0] step:9021/10000 train_time:376617ms step_avg:41.75ms +[2025-09-11 14:27:28] [Rank 0] step:9021/10000 train_time:376617ms step_avg:41.75ms +[2025-09-11 14:27:29] [Rank 0] step:9041/10000 train_time:377329ms step_avg:41.74ms +[2025-09-11 14:27:29] [Rank 0] step:9041/10000 train_time:377329ms step_avg:41.74ms +[2025-09-11 14:27:29] [Rank 0] step:9061/10000 train_time:378039ms step_avg:41.72ms +[2025-09-11 14:27:29] [Rank 0] step:9061/10000 train_time:378039ms step_avg:41.72ms +[2025-09-11 14:27:30] [Rank 0] step:9081/10000 train_time:378752ms step_avg:41.71ms +[2025-09-11 14:27:30] [Rank 0] step:9081/10000 train_time:378752ms step_avg:41.71ms +[2025-09-11 14:27:31] [Rank 0] step:9101/10000 train_time:379466ms step_avg:41.69ms +[2025-09-11 14:27:31] [Rank 0] step:9101/10000 train_time:379466ms step_avg:41.69ms +[2025-09-11 14:27:31] [Rank 0] step:9121/10000 train_time:380181ms step_avg:41.68ms +[2025-09-11 14:27:31] [Rank 0] step:9121/10000 train_time:380181ms step_avg:41.68ms +[2025-09-11 14:27:32] [Rank 0] step:9141/10000 train_time:380888ms step_avg:41.67ms +[2025-09-11 14:27:32] [Rank 0] step:9141/10000 train_time:380888ms step_avg:41.67ms +[2025-09-11 14:27:33] [Rank 0] step:9161/10000 train_time:381603ms step_avg:41.66ms +[2025-09-11 14:27:33] [Rank 0] step:9161/10000 train_time:381603ms step_avg:41.66ms +[2025-09-11 14:27:34] [Rank 0] step:9181/10000 train_time:382316ms step_avg:41.64ms +[2025-09-11 14:27:34] [Rank 0] step:9181/10000 train_time:382316ms step_avg:41.64ms +[2025-09-11 14:27:34] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 14:27:34] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 14:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 14:27:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:27:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 14:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 14:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:27:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:27:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:27:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 14:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 14:27:45] [Rank 0] PRINT: step:9200/10000 val_loss:4.1405 total_sharp:4.2490e-05 L1_sharp:2.9641e-03 L2_sharp:2.9147e-03 L3_sharp:5.8832e-03 L4_sharp:6.8234e-03 L5_sharp:8.0950e-03 L6_sharp:1.0957e-02 L7_sharp:1.8499e-02 L8_sharp:2.3161e-02 L9_sharp:2.2589e-02 L10_sharp:2.9093e-02 L11_sharp:4.6319e-02 L12_sharp:4.5378e-01 total_fnorm:1.4875e+01 total_l1_linf:1.6256e+04 total_spectral:7.4375e+00 L1_fnorm:4.2480e-02 L2_fnorm:4.1992e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.0283e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0283e-02 L1_l1linf:6.2561e-03 L2_l1linf:5.8899e-03 L3_l1linf:5.7983e-03 L4_l1linf:5.6152e-03 L5_l1linf:5.7678e-03 L6_l1linf:5.6152e-03 L7_l1linf:5.5847e-03 L8_l1linf:5.7373e-03 L9_l1linf:5.6458e-03 L10_l1linf:5.2490e-03 L11_l1linf:5.2490e-03 L12_l1linf:6.1340e-03 L1_spectral:6.9058e-04 L2_spectral:6.8317e-04 L3_spectral:6.8417e-04 L4_spectral:6.8939e-04 L5_spectral:6.7276e-04 L6_spectral:6.8025e-04 L7_spectral:6.7098e-04 L8_spectral:6.6660e-04 L9_spectral:6.5460e-04 L10_spectral:6.5273e-04 L11_spectral:6.5267e-04 L12_spectral:6.6086e-04 train_time:383009ms step_avg:41.63ms +[2025-09-11 14:27:45] [Rank 0] PRINT: step:9200/10000 val_loss:4.1405 total_sharp:4.2490e-05 L1_sharp:2.9641e-03 L2_sharp:2.9147e-03 L3_sharp:5.8832e-03 L4_sharp:6.8234e-03 L5_sharp:8.0950e-03 L6_sharp:1.0957e-02 L7_sharp:1.8499e-02 L8_sharp:2.3161e-02 L9_sharp:2.2589e-02 L10_sharp:2.9093e-02 L11_sharp:4.6319e-02 L12_sharp:4.5378e-01 total_fnorm:1.4875e+01 total_l1_linf:1.6256e+04 total_spectral:7.4375e+00 L1_fnorm:4.2480e-02 L2_fnorm:4.1992e-02 L3_fnorm:4.1748e-02 L4_fnorm:4.1748e-02 L5_fnorm:4.1504e-02 L6_fnorm:4.1504e-02 L7_fnorm:4.1748e-02 L8_fnorm:4.0283e-02 L9_fnorm:4.1016e-02 L10_fnorm:4.1016e-02 L11_fnorm:4.0527e-02 L12_fnorm:4.0283e-02 L1_l1linf:6.2561e-03 L2_l1linf:5.8899e-03 L3_l1linf:5.7983e-03 L4_l1linf:5.6152e-03 L5_l1linf:5.7678e-03 L6_l1linf:5.6152e-03 L7_l1linf:5.5847e-03 L8_l1linf:5.7373e-03 L9_l1linf:5.6458e-03 L10_l1linf:5.2490e-03 L11_l1linf:5.2490e-03 L12_l1linf:6.1340e-03 L1_spectral:6.9058e-04 L2_spectral:6.8317e-04 L3_spectral:6.8417e-04 L4_spectral:6.8939e-04 L5_spectral:6.7276e-04 L6_spectral:6.8025e-04 L7_spectral:6.7098e-04 L8_spectral:6.6660e-04 L9_spectral:6.5460e-04 L10_spectral:6.5273e-04 L11_spectral:6.5267e-04 L12_spectral:6.6086e-04 train_time:383009ms step_avg:41.63ms +[2025-09-11 14:27:47] [Rank 0] step:9201/10000 train_time:384941ms step_avg:41.84ms +[2025-09-11 14:27:47] [Rank 0] step:9201/10000 train_time:384941ms step_avg:41.84ms +[2025-09-11 14:27:48] [Rank 0] step:9221/10000 train_time:385682ms step_avg:41.83ms +[2025-09-11 14:27:48] [Rank 0] step:9221/10000 train_time:385682ms step_avg:41.83ms +[2025-09-11 14:27:48] [Rank 0] step:9241/10000 train_time:386391ms step_avg:41.81ms +[2025-09-11 14:27:48] [Rank 0] step:9241/10000 train_time:386391ms step_avg:41.81ms +[2025-09-11 14:27:49] [Rank 0] step:9261/10000 train_time:387102ms step_avg:41.80ms +[2025-09-11 14:27:49] [Rank 0] step:9261/10000 train_time:387102ms step_avg:41.80ms +[2025-09-11 14:27:50] [Rank 0] step:9281/10000 train_time:387814ms step_avg:41.79ms +[2025-09-11 14:27:50] [Rank 0] step:9281/10000 train_time:387814ms step_avg:41.79ms +[2025-09-11 14:27:51] [Rank 0] step:9301/10000 train_time:388522ms step_avg:41.77ms +[2025-09-11 14:27:51] [Rank 0] step:9301/10000 train_time:388522ms step_avg:41.77ms +[2025-09-11 14:27:51] [Rank 0] step:9321/10000 train_time:389237ms step_avg:41.76ms +[2025-09-11 14:27:51] [Rank 0] step:9321/10000 train_time:389237ms step_avg:41.76ms +[2025-09-11 14:27:52] [Rank 0] step:9341/10000 train_time:389947ms step_avg:41.75ms +[2025-09-11 14:27:52] [Rank 0] step:9341/10000 train_time:389947ms step_avg:41.75ms +[2025-09-11 14:27:53] [Rank 0] step:9361/10000 train_time:390652ms step_avg:41.73ms +[2025-09-11 14:27:53] [Rank 0] step:9361/10000 train_time:390652ms step_avg:41.73ms +[2025-09-11 14:27:53] [Rank 0] step:9381/10000 train_time:391363ms step_avg:41.72ms +[2025-09-11 14:27:53] [Rank 0] step:9381/10000 train_time:391363ms step_avg:41.72ms +[2025-09-11 14:27:54] [Rank 0] step:9401/10000 train_time:392077ms step_avg:41.71ms +[2025-09-11 14:27:54] [Rank 0] step:9401/10000 train_time:392077ms step_avg:41.71ms +[2025-09-11 14:27:55] [Rank 0] step:9421/10000 train_time:392789ms step_avg:41.69ms +[2025-09-11 14:27:55] [Rank 0] step:9421/10000 train_time:392789ms step_avg:41.69ms +[2025-09-11 14:27:56] [Rank 0] step:9441/10000 train_time:393502ms step_avg:41.68ms +[2025-09-11 14:27:56] [Rank 0] step:9441/10000 train_time:393502ms step_avg:41.68ms +[2025-09-11 14:27:56] [Rank 0] step:9461/10000 train_time:394213ms step_avg:41.67ms +[2025-09-11 14:27:56] [Rank 0] step:9461/10000 train_time:394213ms step_avg:41.67ms +[2025-09-11 14:27:57] [Rank 0] step:9481/10000 train_time:394925ms step_avg:41.65ms +[2025-09-11 14:27:57] [Rank 0] step:9481/10000 train_time:394925ms step_avg:41.65ms +[2025-09-11 14:27:58] [Rank 0] step:9501/10000 train_time:395638ms step_avg:41.64ms +[2025-09-11 14:27:58] [Rank 0] step:9501/10000 train_time:395638ms step_avg:41.64ms +[2025-09-11 14:27:58] [Rank 0] step:9521/10000 train_time:396352ms step_avg:41.63ms +[2025-09-11 14:27:58] [Rank 0] step:9521/10000 train_time:396352ms step_avg:41.63ms +[2025-09-11 14:27:59] [Rank 0] step:9541/10000 train_time:397061ms step_avg:41.62ms +[2025-09-11 14:27:59] [Rank 0] step:9541/10000 train_time:397061ms step_avg:41.62ms +[2025-09-11 14:28:00] [Rank 0] step:9561/10000 train_time:397772ms step_avg:41.60ms +[2025-09-11 14:28:00] [Rank 0] step:9561/10000 train_time:397772ms step_avg:41.60ms +[2025-09-11 14:28:01] [Rank 0] step:9581/10000 train_time:398484ms step_avg:41.59ms +[2025-09-11 14:28:01] [Rank 0] step:9581/10000 train_time:398484ms step_avg:41.59ms +[2025-09-11 14:28:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:28:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 14:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:28:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 14:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 14:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:28:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 14:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 14:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 14:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 14:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 14:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 14:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 14:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 14:28:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.1318 total_sharp:2.5415e-05 L1_sharp:2.0927e-03 L2_sharp:3.4902e-03 L3_sharp:3.1552e-03 L4_sharp:6.4034e-03 L5_sharp:7.8797e-03 L6_sharp:6.2084e-03 L7_sharp:1.3607e-02 L8_sharp:1.6759e-02 L9_sharp:1.7206e-02 L10_sharp:2.2895e-02 L11_sharp:3.2983e-02 L12_sharp:1.7477e-01 total_fnorm:8.6875e+00 total_l1_linf:8.1280e+03 total_spectral:4.3750e+00 L1_fnorm:2.3560e-02 L2_fnorm:2.3315e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3438e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.3193e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2461e-02 L9_fnorm:2.2949e-02 L10_fnorm:2.2949e-02 L11_fnorm:2.2583e-02 L12_fnorm:2.2583e-02 L1_l1linf:2.8687e-03 L2_l1linf:2.8381e-03 L3_l1linf:2.7466e-03 L4_l1linf:2.7466e-03 L5_l1linf:2.6245e-03 L6_l1linf:2.7161e-03 L7_l1linf:2.6550e-03 L8_l1linf:2.7771e-03 L9_l1linf:2.5635e-03 L10_l1linf:2.5177e-03 L11_l1linf:2.4109e-03 L12_l1linf:2.7466e-03 L1_spectral:3.9409e-04 L2_spectral:3.9084e-04 L3_spectral:3.9468e-04 L4_spectral:3.9273e-04 L5_spectral:3.8334e-04 L6_spectral:3.8819e-04 L7_spectral:3.8813e-04 L8_spectral:3.8870e-04 L9_spectral:3.7146e-04 L10_spectral:3.7385e-04 L11_spectral:3.6935e-04 L12_spectral:3.7934e-04 train_time:399172ms step_avg:41.58ms +[2025-09-11 14:28:16] [Rank 0] PRINT: step:9600/10000 val_loss:4.1318 total_sharp:2.5415e-05 L1_sharp:2.0927e-03 L2_sharp:3.4902e-03 L3_sharp:3.1552e-03 L4_sharp:6.4034e-03 L5_sharp:7.8797e-03 L6_sharp:6.2084e-03 L7_sharp:1.3607e-02 L8_sharp:1.6759e-02 L9_sharp:1.7206e-02 L10_sharp:2.2895e-02 L11_sharp:3.2983e-02 L12_sharp:1.7477e-01 total_fnorm:8.6875e+00 total_l1_linf:8.1280e+03 total_spectral:4.3750e+00 L1_fnorm:2.3560e-02 L2_fnorm:2.3315e-02 L3_fnorm:2.3315e-02 L4_fnorm:2.3438e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.3193e-02 L7_fnorm:2.3315e-02 L8_fnorm:2.2461e-02 L9_fnorm:2.2949e-02 L10_fnorm:2.2949e-02 L11_fnorm:2.2583e-02 L12_fnorm:2.2583e-02 L1_l1linf:2.8687e-03 L2_l1linf:2.8381e-03 L3_l1linf:2.7466e-03 L4_l1linf:2.7466e-03 L5_l1linf:2.6245e-03 L6_l1linf:2.7161e-03 L7_l1linf:2.6550e-03 L8_l1linf:2.7771e-03 L9_l1linf:2.5635e-03 L10_l1linf:2.5177e-03 L11_l1linf:2.4109e-03 L12_l1linf:2.7466e-03 L1_spectral:3.9409e-04 L2_spectral:3.9084e-04 L3_spectral:3.9468e-04 L4_spectral:3.9273e-04 L5_spectral:3.8334e-04 L6_spectral:3.8819e-04 L7_spectral:3.8813e-04 L8_spectral:3.8870e-04 L9_spectral:3.7146e-04 L10_spectral:3.7385e-04 L11_spectral:3.6935e-04 L12_spectral:3.7934e-04 train_time:399172ms step_avg:41.58ms +[2025-09-11 14:28:18] [Rank 0] step:9601/10000 train_time:401053ms step_avg:41.77ms +[2025-09-11 14:28:18] [Rank 0] step:9601/10000 train_time:401053ms step_avg:41.77ms +[2025-09-11 14:28:19] [Rank 0] step:9621/10000 train_time:401784ms step_avg:41.76ms +[2025-09-11 14:28:19] [Rank 0] step:9621/10000 train_time:401784ms step_avg:41.76ms +[2025-09-11 14:28:19] [Rank 0] step:9641/10000 train_time:402501ms step_avg:41.75ms +[2025-09-11 14:28:19] [Rank 0] step:9641/10000 train_time:402501ms step_avg:41.75ms +[2025-09-11 14:28:20] [Rank 0] step:9661/10000 train_time:403224ms step_avg:41.74ms +[2025-09-11 14:28:20] [Rank 0] step:9661/10000 train_time:403224ms step_avg:41.74ms +[2025-09-11 14:28:21] [Rank 0] step:9681/10000 train_time:403941ms step_avg:41.73ms +[2025-09-11 14:28:21] [Rank 0] step:9681/10000 train_time:403941ms step_avg:41.73ms +[2025-09-11 14:28:21] [Rank 0] step:9701/10000 train_time:404658ms step_avg:41.71ms +[2025-09-11 14:28:21] [Rank 0] step:9701/10000 train_time:404658ms step_avg:41.71ms +[2025-09-11 14:28:22] [Rank 0] step:9721/10000 train_time:405379ms step_avg:41.70ms +[2025-09-11 14:28:22] [Rank 0] step:9721/10000 train_time:405379ms step_avg:41.70ms +[2025-09-11 14:28:23] [Rank 0] step:9741/10000 train_time:406098ms step_avg:41.69ms +[2025-09-11 14:28:23] [Rank 0] step:9741/10000 train_time:406098ms step_avg:41.69ms +[2025-09-11 14:28:24] [Rank 0] step:9761/10000 train_time:406816ms step_avg:41.68ms +[2025-09-11 14:28:24] [Rank 0] step:9761/10000 train_time:406816ms step_avg:41.68ms +[2025-09-11 14:28:24] [Rank 0] step:9781/10000 train_time:407534ms step_avg:41.67ms +[2025-09-11 14:28:24] [Rank 0] step:9781/10000 train_time:407534ms step_avg:41.67ms +[2025-09-11 14:28:25] [Rank 0] step:9801/10000 train_time:408256ms step_avg:41.65ms +[2025-09-11 14:28:25] [Rank 0] step:9801/10000 train_time:408256ms step_avg:41.65ms +[2025-09-11 14:28:26] [Rank 0] step:9821/10000 train_time:408975ms step_avg:41.64ms +[2025-09-11 14:28:26] [Rank 0] step:9821/10000 train_time:408975ms step_avg:41.64ms +[2025-09-11 14:28:26] [Rank 0] step:9841/10000 train_time:409697ms step_avg:41.63ms +[2025-09-11 14:28:26] [Rank 0] step:9841/10000 train_time:409697ms step_avg:41.63ms +[2025-09-11 14:28:27] [Rank 0] step:9861/10000 train_time:410414ms step_avg:41.62ms +[2025-09-11 14:28:27] [Rank 0] step:9861/10000 train_time:410414ms step_avg:41.62ms +[2025-09-11 14:28:28] [Rank 0] step:9881/10000 train_time:411133ms step_avg:41.61ms +[2025-09-11 14:28:28] [Rank 0] step:9881/10000 train_time:411133ms step_avg:41.61ms +[2025-09-11 14:28:29] [Rank 0] step:9901/10000 train_time:411849ms step_avg:41.60ms +[2025-09-11 14:28:29] [Rank 0] step:9901/10000 train_time:411849ms step_avg:41.60ms +[2025-09-11 14:28:29] [Rank 0] step:9921/10000 train_time:412569ms step_avg:41.59ms +[2025-09-11 14:28:29] [Rank 0] step:9921/10000 train_time:412569ms step_avg:41.59ms +[2025-09-11 14:28:30] [Rank 0] step:9941/10000 train_time:413292ms step_avg:41.57ms +[2025-09-11 14:28:30] [Rank 0] step:9941/10000 train_time:413292ms step_avg:41.57ms +[2025-09-11 14:28:31] [Rank 0] step:9961/10000 train_time:414015ms step_avg:41.56ms +[2025-09-11 14:28:31] [Rank 0] step:9961/10000 train_time:414015ms step_avg:41.56ms +[2025-09-11 14:28:32] [Rank 0] step:9981/10000 train_time:414735ms step_avg:41.55ms +[2025-09-11 14:28:32] [Rank 0] step:9981/10000 train_time:414735ms step_avg:41.55ms +[2025-09-11 14:28:32] [Rank 0] step:10000/10000 train_time:415427ms step_avg:41.54ms +[2025-09-11 14:28:32] [Rank 0] step:10000/10000 train_time:415427ms step_avg:41.54ms +[2025-09-11 14:28:32] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:28:32] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 14:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 14:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 14:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 14:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 14:28:43] [Rank 0] PRINT: step:10000/10000 val_loss:4.1291 total_sharp:1.6016e-05 L1_sharp:2.5908e-03 L2_sharp:1.7301e-03 L3_sharp:3.0875e-03 L4_sharp:3.8884e-03 L5_sharp:4.4044e-03 L6_sharp:6.3615e-03 L7_sharp:8.7608e-03 L8_sharp:1.2341e-02 L9_sharp:1.3671e-02 L10_sharp:1.7063e-02 L11_sharp:2.7850e-02 L12_sharp:8.0718e-02 total_fnorm:3.3281e+00 total_l1_linf:2.2400e+03 total_spectral:1.6641e+00 L1_fnorm:9.2773e-03 L2_fnorm:9.1553e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.0332e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.8501e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.8501e-03 L12_fnorm:8.7891e-03 L1_l1linf:8.8882e-04 L2_l1linf:9.0408e-04 L3_l1linf:8.6594e-04 L4_l1linf:8.2397e-04 L5_l1linf:9.0790e-04 L6_l1linf:8.2016e-04 L7_l1linf:8.4686e-04 L8_l1linf:9.0027e-04 L9_l1linf:8.3160e-04 L10_l1linf:7.4387e-04 L11_l1linf:8.0109e-04 L12_l1linf:8.3923e-04 L1_spectral:1.5956e-04 L2_spectral:1.5677e-04 L3_spectral:1.5833e-04 L4_spectral:1.5937e-04 L5_spectral:1.5240e-04 L6_spectral:1.5618e-04 L7_spectral:1.5446e-04 L8_spectral:1.5517e-04 L9_spectral:1.5157e-04 L10_spectral:1.5025e-04 L11_spectral:1.5029e-04 L12_spectral:1.5344e-04 train_time:415448ms step_avg:41.54ms +[2025-09-11 14:28:43] [Rank 0] PRINT: step:10000/10000 val_loss:4.1291 total_sharp:1.6016e-05 L1_sharp:2.5908e-03 L2_sharp:1.7301e-03 L3_sharp:3.0875e-03 L4_sharp:3.8884e-03 L5_sharp:4.4044e-03 L6_sharp:6.3615e-03 L7_sharp:8.7608e-03 L8_sharp:1.2341e-02 L9_sharp:1.3671e-02 L10_sharp:1.7063e-02 L11_sharp:2.7850e-02 L12_sharp:8.0718e-02 total_fnorm:3.3281e+00 total_l1_linf:2.2400e+03 total_spectral:1.6641e+00 L1_fnorm:9.2773e-03 L2_fnorm:9.1553e-03 L3_fnorm:9.2163e-03 L4_fnorm:9.1553e-03 L5_fnorm:9.0332e-03 L6_fnorm:9.1553e-03 L7_fnorm:9.1553e-03 L8_fnorm:8.8501e-03 L9_fnorm:9.0332e-03 L10_fnorm:9.0332e-03 L11_fnorm:8.8501e-03 L12_fnorm:8.7891e-03 L1_l1linf:8.8882e-04 L2_l1linf:9.0408e-04 L3_l1linf:8.6594e-04 L4_l1linf:8.2397e-04 L5_l1linf:9.0790e-04 L6_l1linf:8.2016e-04 L7_l1linf:8.4686e-04 L8_l1linf:9.0027e-04 L9_l1linf:8.3160e-04 L10_l1linf:7.4387e-04 L11_l1linf:8.0109e-04 L12_l1linf:8.3923e-04 L1_spectral:1.5956e-04 L2_spectral:1.5677e-04 L3_spectral:1.5833e-04 L4_spectral:1.5937e-04 L5_spectral:1.5240e-04 L6_spectral:1.5618e-04 L7_spectral:1.5446e-04 L8_spectral:1.5517e-04 L9_spectral:1.5157e-04 L10_spectral:1.5025e-04 L11_spectral:1.5029e-04 L12_spectral:1.5344e-04 train_time:415448ms step_avg:41.54ms +[2025-09-11 14:28:43] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:28:43 2025 --- +[2025-09-11 14:28:43] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 14:28:43 2025 --- +[2025-09-11 14:28:43] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 14:28:43] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2863dcd787932da78bdc736afccc1d7c5476d85 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "5d10dbe7-6521-4d75-ae1b-a29ec7c795d3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/training_log_5d10dbe7-6521-4d75-ae1b-a29ec7c795d3.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/training_log_5d10dbe7-6521-4d75-ae1b-a29ec7c795d3.txt new file mode 100644 index 0000000000000000000000000000000000000000..58a643ca7244d928c7acc06f93994b4570b5cb37 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44/training_log_5d10dbe7-6521-4d75-ae1b-a29ec7c795d3.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:18:10] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:18:10 2025 --- +[2025-09-11 08:18:10] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:18:10 2025 --- +[2025-09-11 08:18:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:18:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:18:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:18:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:18:10] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:18:10] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:18:10] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44 +[2025-09-11 08:18:10] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.01_seed_44 +[2025-09-11 08:18:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:18:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:18:10] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:18:10] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:18:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:18:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:18:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:18:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:18:11] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:18:11] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:18:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:18:11] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:18:11] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:18:11] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:18:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:18:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:18:13] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:18:13] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:18:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:18:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:18:18] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:18:18] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:18:18] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:18:18] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:18:56] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:18:56] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:18:56] [Rank 0] PRINT: Starting training... +[2025-09-11 08:18:56] [Rank 0] PRINT: Starting training... +[2025-09-11 08:18:57] [Rank 0] step:21/10000 train_time:958ms step_avg:45.60ms +[2025-09-11 08:18:57] [Rank 0] step:21/10000 train_time:958ms step_avg:45.60ms +[2025-09-11 08:18:58] [Rank 0] step:41/10000 train_time:1690ms step_avg:41.23ms +[2025-09-11 08:18:58] [Rank 0] step:41/10000 train_time:1690ms step_avg:41.23ms +[2025-09-11 08:18:59] [Rank 0] step:61/10000 train_time:2423ms step_avg:39.72ms +[2025-09-11 08:18:59] [Rank 0] step:61/10000 train_time:2423ms step_avg:39.72ms +[2025-09-11 08:18:59] [Rank 0] step:81/10000 train_time:3156ms step_avg:38.96ms +[2025-09-11 08:18:59] [Rank 0] step:81/10000 train_time:3156ms step_avg:38.96ms +[2025-09-11 08:19:00] [Rank 0] step:101/10000 train_time:3888ms step_avg:38.49ms +[2025-09-11 08:19:00] [Rank 0] step:101/10000 train_time:3888ms step_avg:38.49ms +[2025-09-11 08:19:01] [Rank 0] step:121/10000 train_time:4620ms step_avg:38.18ms +[2025-09-11 08:19:01] [Rank 0] step:121/10000 train_time:4620ms step_avg:38.18ms +[2025-09-11 08:19:02] [Rank 0] step:141/10000 train_time:5353ms step_avg:37.96ms +[2025-09-11 08:19:02] [Rank 0] step:141/10000 train_time:5353ms step_avg:37.96ms +[2025-09-11 08:19:02] [Rank 0] step:161/10000 train_time:6085ms step_avg:37.80ms +[2025-09-11 08:19:02] [Rank 0] step:161/10000 train_time:6085ms step_avg:37.80ms +[2025-09-11 08:19:03] [Rank 0] step:181/10000 train_time:6817ms step_avg:37.66ms +[2025-09-11 08:19:03] [Rank 0] step:181/10000 train_time:6817ms step_avg:37.66ms +[2025-09-11 08:19:04] [Rank 0] step:201/10000 train_time:7549ms step_avg:37.56ms +[2025-09-11 08:19:04] [Rank 0] step:201/10000 train_time:7549ms step_avg:37.56ms +[2025-09-11 08:19:04] [Rank 0] step:221/10000 train_time:8281ms step_avg:37.47ms +[2025-09-11 08:19:04] [Rank 0] step:221/10000 train_time:8281ms step_avg:37.47ms +[2025-09-11 08:19:05] [Rank 0] step:241/10000 train_time:9018ms step_avg:37.42ms +[2025-09-11 08:19:05] [Rank 0] step:241/10000 train_time:9018ms step_avg:37.42ms +[2025-09-11 08:19:06] [Rank 0] step:261/10000 train_time:9751ms step_avg:37.36ms +[2025-09-11 08:19:06] [Rank 0] step:261/10000 train_time:9751ms step_avg:37.36ms +[2025-09-11 08:19:07] [Rank 0] step:281/10000 train_time:10483ms step_avg:37.31ms +[2025-09-11 08:19:07] [Rank 0] step:281/10000 train_time:10483ms step_avg:37.31ms +[2025-09-11 08:19:07] [Rank 0] step:301/10000 train_time:11215ms step_avg:37.26ms +[2025-09-11 08:19:07] [Rank 0] step:301/10000 train_time:11215ms step_avg:37.26ms +[2025-09-11 08:19:08] [Rank 0] step:321/10000 train_time:11947ms step_avg:37.22ms +[2025-09-11 08:19:08] [Rank 0] step:321/10000 train_time:11947ms step_avg:37.22ms +[2025-09-11 08:19:09] [Rank 0] step:341/10000 train_time:12678ms step_avg:37.18ms +[2025-09-11 08:19:09] [Rank 0] step:341/10000 train_time:12678ms step_avg:37.18ms +[2025-09-11 08:19:10] [Rank 0] step:361/10000 train_time:13410ms step_avg:37.15ms +[2025-09-11 08:19:10] [Rank 0] step:361/10000 train_time:13410ms step_avg:37.15ms +[2025-09-11 08:19:10] [Rank 0] step:381/10000 train_time:14142ms step_avg:37.12ms +[2025-09-11 08:19:10] [Rank 0] step:381/10000 train_time:14142ms step_avg:37.12ms +[2025-09-11 08:19:11] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:19:11] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:19:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:19:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:19:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:19:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:19:58] [Rank 0] PRINT: step:400/10000 val_loss:5.8379 total_sharp:7.8973e-04 L1_sharp:2.7574e-02 L2_sharp:1.2608e-02 L3_sharp:1.5036e-02 L4_sharp:1.0767e-02 L5_sharp:8.6245e-03 L6_sharp:1.0499e-02 L7_sharp:1.2407e-02 L8_sharp:1.2071e-02 L9_sharp:1.3768e-02 L10_sharp:1.4306e-02 L11_sharp:2.5178e-02 L12_sharp:1.7514e-01 total_fnorm:7.8767e+01 total_l1_linf:2.0481e+05 total_spectral:3.9396e+01 L1_fnorm:1.2270e+00 L2_fnorm:1.2137e+00 L3_fnorm:1.2087e+00 L4_fnorm:1.2125e+00 L5_fnorm:1.2117e+00 L6_fnorm:1.2164e+00 L7_fnorm:1.2091e+00 L8_fnorm:1.2095e+00 L9_fnorm:1.2030e+00 L10_fnorm:1.2022e+00 L11_fnorm:1.1918e+00 L12_fnorm:1.1536e+00 L1_l1linf:4.1552e-01 L2_l1linf:4.0983e-01 L3_l1linf:4.0909e-01 L4_l1linf:4.1374e-01 L5_l1linf:4.1503e-01 L6_l1linf:4.1906e-01 L7_l1linf:4.2484e-01 L8_l1linf:4.2383e-01 L9_l1linf:4.2728e-01 L10_l1linf:4.2907e-01 L11_l1linf:4.2151e-01 L12_l1linf:3.9485e-01 L1_spectral:1.2052e-02 L2_spectral:1.2045e-02 L3_spectral:1.2049e-02 L4_spectral:1.2056e-02 L5_spectral:1.2043e-02 L6_spectral:1.2044e-02 L7_spectral:1.2041e-02 L8_spectral:1.2047e-02 L9_spectral:1.2051e-02 L10_spectral:1.2050e-02 L11_spectral:1.2041e-02 L12_spectral:1.2039e-02 train_time:14854ms step_avg:37.13ms +[2025-09-11 08:19:58] [Rank 0] PRINT: step:400/10000 val_loss:5.8379 total_sharp:7.8973e-04 L1_sharp:2.7574e-02 L2_sharp:1.2608e-02 L3_sharp:1.5036e-02 L4_sharp:1.0767e-02 L5_sharp:8.6245e-03 L6_sharp:1.0499e-02 L7_sharp:1.2407e-02 L8_sharp:1.2071e-02 L9_sharp:1.3768e-02 L10_sharp:1.4306e-02 L11_sharp:2.5178e-02 L12_sharp:1.7514e-01 total_fnorm:7.8767e+01 total_l1_linf:2.0481e+05 total_spectral:3.9396e+01 L1_fnorm:1.2270e+00 L2_fnorm:1.2137e+00 L3_fnorm:1.2087e+00 L4_fnorm:1.2125e+00 L5_fnorm:1.2117e+00 L6_fnorm:1.2164e+00 L7_fnorm:1.2091e+00 L8_fnorm:1.2095e+00 L9_fnorm:1.2030e+00 L10_fnorm:1.2022e+00 L11_fnorm:1.1918e+00 L12_fnorm:1.1536e+00 L1_l1linf:4.1552e-01 L2_l1linf:4.0983e-01 L3_l1linf:4.0909e-01 L4_l1linf:4.1374e-01 L5_l1linf:4.1503e-01 L6_l1linf:4.1906e-01 L7_l1linf:4.2484e-01 L8_l1linf:4.2383e-01 L9_l1linf:4.2728e-01 L10_l1linf:4.2907e-01 L11_l1linf:4.2151e-01 L12_l1linf:3.9485e-01 L1_spectral:1.2052e-02 L2_spectral:1.2045e-02 L3_spectral:1.2049e-02 L4_spectral:1.2056e-02 L5_spectral:1.2043e-02 L6_spectral:1.2044e-02 L7_spectral:1.2041e-02 L8_spectral:1.2047e-02 L9_spectral:1.2051e-02 L10_spectral:1.2050e-02 L11_spectral:1.2041e-02 L12_spectral:1.2039e-02 train_time:14854ms step_avg:37.13ms +[2025-09-11 08:20:32] [Rank 0] step:401/10000 train_time:49053ms step_avg:122.33ms +[2025-09-11 08:20:32] [Rank 0] step:401/10000 train_time:49053ms step_avg:122.33ms +[2025-09-11 08:20:34] [Rank 0] step:421/10000 train_time:51002ms step_avg:121.14ms +[2025-09-11 08:20:34] [Rank 0] step:421/10000 train_time:51002ms step_avg:121.14ms +[2025-09-11 08:20:35] [Rank 0] step:441/10000 train_time:51645ms step_avg:117.11ms +[2025-09-11 08:20:35] [Rank 0] step:441/10000 train_time:51645ms step_avg:117.11ms +[2025-09-11 08:20:35] [Rank 0] step:461/10000 train_time:52287ms step_avg:113.42ms +[2025-09-11 08:20:35] [Rank 0] step:461/10000 train_time:52287ms step_avg:113.42ms +[2025-09-11 08:20:36] [Rank 0] step:481/10000 train_time:52929ms step_avg:110.04ms +[2025-09-11 08:20:36] [Rank 0] step:481/10000 train_time:52929ms step_avg:110.04ms +[2025-09-11 08:20:37] [Rank 0] step:501/10000 train_time:53571ms step_avg:106.93ms +[2025-09-11 08:20:37] [Rank 0] step:501/10000 train_time:53571ms step_avg:106.93ms +[2025-09-11 08:20:37] [Rank 0] step:521/10000 train_time:54212ms step_avg:104.05ms +[2025-09-11 08:20:37] [Rank 0] step:521/10000 train_time:54212ms step_avg:104.05ms +[2025-09-11 08:20:38] [Rank 0] step:541/10000 train_time:54854ms step_avg:101.39ms +[2025-09-11 08:20:38] [Rank 0] step:541/10000 train_time:54854ms step_avg:101.39ms +[2025-09-11 08:20:38] [Rank 0] step:561/10000 train_time:55496ms step_avg:98.92ms +[2025-09-11 08:20:38] [Rank 0] step:561/10000 train_time:55496ms step_avg:98.92ms +[2025-09-11 08:20:39] [Rank 0] step:581/10000 train_time:56137ms step_avg:96.62ms +[2025-09-11 08:20:39] [Rank 0] step:581/10000 train_time:56137ms step_avg:96.62ms +[2025-09-11 08:20:40] [Rank 0] step:601/10000 train_time:56779ms step_avg:94.48ms +[2025-09-11 08:20:40] [Rank 0] step:601/10000 train_time:56779ms step_avg:94.48ms +[2025-09-11 08:20:40] [Rank 0] step:621/10000 train_time:57420ms step_avg:92.46ms +[2025-09-11 08:20:40] [Rank 0] step:621/10000 train_time:57420ms step_avg:92.46ms +[2025-09-11 08:20:41] [Rank 0] step:641/10000 train_time:58062ms step_avg:90.58ms +[2025-09-11 08:20:41] [Rank 0] step:641/10000 train_time:58062ms step_avg:90.58ms +[2025-09-11 08:20:42] [Rank 0] step:661/10000 train_time:58703ms step_avg:88.81ms +[2025-09-11 08:20:42] [Rank 0] step:661/10000 train_time:58703ms step_avg:88.81ms +[2025-09-11 08:20:42] [Rank 0] step:681/10000 train_time:59345ms step_avg:87.14ms +[2025-09-11 08:20:42] [Rank 0] step:681/10000 train_time:59345ms step_avg:87.14ms +[2025-09-11 08:20:43] [Rank 0] step:701/10000 train_time:59987ms step_avg:85.57ms +[2025-09-11 08:20:43] [Rank 0] step:701/10000 train_time:59987ms step_avg:85.57ms +[2025-09-11 08:20:44] [Rank 0] step:721/10000 train_time:60628ms step_avg:84.09ms +[2025-09-11 08:20:44] [Rank 0] step:721/10000 train_time:60628ms step_avg:84.09ms +[2025-09-11 08:20:44] [Rank 0] step:741/10000 train_time:61269ms step_avg:82.68ms +[2025-09-11 08:20:44] [Rank 0] step:741/10000 train_time:61269ms step_avg:82.68ms +[2025-09-11 08:20:45] [Rank 0] step:761/10000 train_time:61916ms step_avg:81.36ms +[2025-09-11 08:20:45] [Rank 0] step:761/10000 train_time:61916ms step_avg:81.36ms +[2025-09-11 08:20:46] [Rank 0] step:781/10000 train_time:62562ms step_avg:80.11ms +[2025-09-11 08:20:46] [Rank 0] step:781/10000 train_time:62562ms step_avg:80.11ms +[2025-09-11 08:20:46] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:20:46] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:20:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:21:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:21:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:21:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:21:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:21:32] [Rank 0] PRINT: step:800/10000 val_loss:5.4357 total_sharp:5.0673e-04 L1_sharp:2.6644e-02 L2_sharp:5.1247e-03 L3_sharp:3.0389e-03 L4_sharp:5.2513e-03 L5_sharp:7.9259e-03 L6_sharp:6.4912e-03 L7_sharp:5.3923e-03 L8_sharp:7.7769e-03 L9_sharp:9.1135e-03 L10_sharp:9.8145e-03 L11_sharp:1.9900e-02 L12_sharp:8.1348e-02 total_fnorm:8.0000e+01 total_l1_linf:1.8534e+05 total_spectral:4.0000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2344e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2422e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2266e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2266e+00 L12_fnorm:1.1484e+00 L1_l1linf:4.1016e-01 L2_l1linf:4.0039e-01 L3_l1linf:3.9453e-01 L4_l1linf:3.9062e-01 L5_l1linf:3.9258e-01 L6_l1linf:3.9258e-01 L7_l1linf:3.9453e-01 L8_l1linf:3.9453e-01 L9_l1linf:4.0039e-01 L10_l1linf:4.0820e-01 L11_l1linf:4.0430e-01 L12_l1linf:3.4766e-01 L1_spectral:1.3549e-02 L2_spectral:1.3577e-02 L3_spectral:1.3549e-02 L4_spectral:1.3540e-02 L5_spectral:1.3539e-02 L6_spectral:1.3567e-02 L7_spectral:1.3481e-02 L8_spectral:1.3459e-02 L9_spectral:1.3488e-02 L10_spectral:1.3431e-02 L11_spectral:1.3447e-02 L12_spectral:1.3427e-02 train_time:63229ms step_avg:79.04ms +[2025-09-11 08:21:32] [Rank 0] PRINT: step:800/10000 val_loss:5.4357 total_sharp:5.0673e-04 L1_sharp:2.6644e-02 L2_sharp:5.1247e-03 L3_sharp:3.0389e-03 L4_sharp:5.2513e-03 L5_sharp:7.9259e-03 L6_sharp:6.4912e-03 L7_sharp:5.3923e-03 L8_sharp:7.7769e-03 L9_sharp:9.1135e-03 L10_sharp:9.8145e-03 L11_sharp:1.9900e-02 L12_sharp:8.1348e-02 total_fnorm:8.0000e+01 total_l1_linf:1.8534e+05 total_spectral:4.0000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2344e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2422e+00 L7_fnorm:1.2344e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2266e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2266e+00 L12_fnorm:1.1484e+00 L1_l1linf:4.1016e-01 L2_l1linf:4.0039e-01 L3_l1linf:3.9453e-01 L4_l1linf:3.9062e-01 L5_l1linf:3.9258e-01 L6_l1linf:3.9258e-01 L7_l1linf:3.9453e-01 L8_l1linf:3.9453e-01 L9_l1linf:4.0039e-01 L10_l1linf:4.0820e-01 L11_l1linf:4.0430e-01 L12_l1linf:3.4766e-01 L1_spectral:1.3549e-02 L2_spectral:1.3577e-02 L3_spectral:1.3549e-02 L4_spectral:1.3540e-02 L5_spectral:1.3539e-02 L6_spectral:1.3567e-02 L7_spectral:1.3481e-02 L8_spectral:1.3459e-02 L9_spectral:1.3488e-02 L10_spectral:1.3431e-02 L11_spectral:1.3447e-02 L12_spectral:1.3427e-02 train_time:63229ms step_avg:79.04ms +[2025-09-11 08:21:33] [Rank 0] step:801/10000 train_time:64413ms step_avg:80.42ms +[2025-09-11 08:21:33] [Rank 0] step:801/10000 train_time:64413ms step_avg:80.42ms +[2025-09-11 08:21:34] [Rank 0] step:821/10000 train_time:65066ms step_avg:79.25ms +[2025-09-11 08:21:34] [Rank 0] step:821/10000 train_time:65066ms step_avg:79.25ms +[2025-09-11 08:21:34] [Rank 0] step:841/10000 train_time:65716ms step_avg:78.14ms +[2025-09-11 08:21:34] [Rank 0] step:841/10000 train_time:65716ms step_avg:78.14ms +[2025-09-11 08:21:35] [Rank 0] step:861/10000 train_time:66365ms step_avg:77.08ms +[2025-09-11 08:21:35] [Rank 0] step:861/10000 train_time:66365ms step_avg:77.08ms +[2025-09-11 08:21:36] [Rank 0] step:881/10000 train_time:67013ms step_avg:76.06ms +[2025-09-11 08:21:36] [Rank 0] step:881/10000 train_time:67013ms step_avg:76.06ms +[2025-09-11 08:21:36] [Rank 0] step:901/10000 train_time:67661ms step_avg:75.10ms +[2025-09-11 08:21:36] [Rank 0] step:901/10000 train_time:67661ms step_avg:75.10ms +[2025-09-11 08:21:37] [Rank 0] step:921/10000 train_time:68309ms step_avg:74.17ms +[2025-09-11 08:21:37] [Rank 0] step:921/10000 train_time:68309ms step_avg:74.17ms +[2025-09-11 08:21:38] [Rank 0] step:941/10000 train_time:68958ms step_avg:73.28ms +[2025-09-11 08:21:38] [Rank 0] step:941/10000 train_time:68958ms step_avg:73.28ms +[2025-09-11 08:21:38] [Rank 0] step:961/10000 train_time:69605ms step_avg:72.43ms +[2025-09-11 08:21:38] [Rank 0] step:961/10000 train_time:69605ms step_avg:72.43ms +[2025-09-11 08:21:39] [Rank 0] step:981/10000 train_time:70253ms step_avg:71.61ms +[2025-09-11 08:21:39] [Rank 0] step:981/10000 train_time:70253ms step_avg:71.61ms +[2025-09-11 08:21:40] [Rank 0] step:1001/10000 train_time:70902ms step_avg:70.83ms +[2025-09-11 08:21:40] [Rank 0] step:1001/10000 train_time:70902ms step_avg:70.83ms +[2025-09-11 08:21:40] [Rank 0] step:1021/10000 train_time:71550ms step_avg:70.08ms +[2025-09-11 08:21:40] [Rank 0] step:1021/10000 train_time:71550ms step_avg:70.08ms +[2025-09-11 08:21:41] [Rank 0] step:1041/10000 train_time:72199ms step_avg:69.35ms +[2025-09-11 08:21:41] [Rank 0] step:1041/10000 train_time:72199ms step_avg:69.35ms +[2025-09-11 08:21:42] [Rank 0] step:1061/10000 train_time:72846ms step_avg:68.66ms +[2025-09-11 08:21:42] [Rank 0] step:1061/10000 train_time:72846ms step_avg:68.66ms +[2025-09-11 08:21:42] [Rank 0] step:1081/10000 train_time:73494ms step_avg:67.99ms +[2025-09-11 08:21:42] [Rank 0] step:1081/10000 train_time:73494ms step_avg:67.99ms +[2025-09-11 08:21:43] [Rank 0] step:1101/10000 train_time:74142ms step_avg:67.34ms +[2025-09-11 08:21:43] [Rank 0] step:1101/10000 train_time:74142ms step_avg:67.34ms +[2025-09-11 08:21:44] [Rank 0] step:1121/10000 train_time:74790ms step_avg:66.72ms +[2025-09-11 08:21:44] [Rank 0] step:1121/10000 train_time:74790ms step_avg:66.72ms +[2025-09-11 08:21:44] [Rank 0] step:1141/10000 train_time:75438ms step_avg:66.12ms +[2025-09-11 08:21:44] [Rank 0] step:1141/10000 train_time:75438ms step_avg:66.12ms +[2025-09-11 08:21:45] [Rank 0] step:1161/10000 train_time:76086ms step_avg:65.53ms +[2025-09-11 08:21:45] [Rank 0] step:1161/10000 train_time:76086ms step_avg:65.53ms +[2025-09-11 08:21:45] [Rank 0] step:1181/10000 train_time:76733ms step_avg:64.97ms +[2025-09-11 08:21:45] [Rank 0] step:1181/10000 train_time:76733ms step_avg:64.97ms +[2025-09-11 08:21:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:21:46] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:21:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:21:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:21:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:21:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:21:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:21:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:21:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:21:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:21:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.1202 total_sharp:3.6658e-04 L1_sharp:2.0359e-02 L2_sharp:3.2235e-03 L3_sharp:1.9777e-03 L4_sharp:4.0789e-03 L5_sharp:4.4890e-03 L6_sharp:3.2254e-03 L7_sharp:4.6538e-03 L8_sharp:6.0601e-03 L9_sharp:7.5100e-03 L10_sharp:7.3960e-03 L11_sharp:1.3747e-02 L12_sharp:8.5293e-02 total_fnorm:7.9500e+01 total_l1_linf:1.7715e+05 total_spectral:3.9750e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.7891e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.6523e-01 L5_l1linf:3.6133e-01 L6_l1linf:3.6133e-01 L7_l1linf:3.6133e-01 L8_l1linf:3.6328e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.7109e-01 L11_l1linf:3.8086e-01 L12_l1linf:3.8281e-01 L1_spectral:1.4045e-02 L2_spectral:1.4047e-02 L3_spectral:1.4052e-02 L4_spectral:1.4005e-02 L5_spectral:1.3953e-02 L6_spectral:1.3902e-02 L7_spectral:1.3922e-02 L8_spectral:1.3946e-02 L9_spectral:1.4013e-02 L10_spectral:1.4041e-02 L11_spectral:1.3961e-02 L12_spectral:1.4200e-02 train_time:77364ms step_avg:64.47ms +[2025-09-11 08:21:56] [Rank 0] PRINT: step:1200/10000 val_loss:5.1202 total_sharp:3.6658e-04 L1_sharp:2.0359e-02 L2_sharp:3.2235e-03 L3_sharp:1.9777e-03 L4_sharp:4.0789e-03 L5_sharp:4.4890e-03 L6_sharp:3.2254e-03 L7_sharp:4.6538e-03 L8_sharp:6.0601e-03 L9_sharp:7.5100e-03 L10_sharp:7.3960e-03 L11_sharp:1.3747e-02 L12_sharp:8.5293e-02 total_fnorm:7.9500e+01 total_l1_linf:1.7715e+05 total_spectral:3.9750e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.7891e-01 L2_l1linf:3.6719e-01 L3_l1linf:3.6328e-01 L4_l1linf:3.6523e-01 L5_l1linf:3.6133e-01 L6_l1linf:3.6133e-01 L7_l1linf:3.6133e-01 L8_l1linf:3.6328e-01 L9_l1linf:3.6328e-01 L10_l1linf:3.7109e-01 L11_l1linf:3.8086e-01 L12_l1linf:3.8281e-01 L1_spectral:1.4045e-02 L2_spectral:1.4047e-02 L3_spectral:1.4052e-02 L4_spectral:1.4005e-02 L5_spectral:1.3953e-02 L6_spectral:1.3902e-02 L7_spectral:1.3922e-02 L8_spectral:1.3946e-02 L9_spectral:1.4013e-02 L10_spectral:1.4041e-02 L11_spectral:1.3961e-02 L12_spectral:1.4200e-02 train_time:77364ms step_avg:64.47ms +[2025-09-11 08:21:57] [Rank 0] step:1201/10000 train_time:78479ms step_avg:65.34ms +[2025-09-11 08:21:57] [Rank 0] step:1201/10000 train_time:78479ms step_avg:65.34ms +[2025-09-11 08:21:58] [Rank 0] step:1221/10000 train_time:79130ms step_avg:64.81ms +[2025-09-11 08:21:58] [Rank 0] step:1221/10000 train_time:79130ms step_avg:64.81ms +[2025-09-11 08:21:58] [Rank 0] step:1241/10000 train_time:79778ms step_avg:64.29ms +[2025-09-11 08:21:58] [Rank 0] step:1241/10000 train_time:79778ms step_avg:64.29ms +[2025-09-11 08:21:59] [Rank 0] step:1261/10000 train_time:80426ms step_avg:63.78ms +[2025-09-11 08:21:59] [Rank 0] step:1261/10000 train_time:80426ms step_avg:63.78ms +[2025-09-11 08:22:00] [Rank 0] step:1281/10000 train_time:81073ms step_avg:63.29ms +[2025-09-11 08:22:00] [Rank 0] step:1281/10000 train_time:81073ms step_avg:63.29ms +[2025-09-11 08:22:00] [Rank 0] step:1301/10000 train_time:81724ms step_avg:62.82ms +[2025-09-11 08:22:00] [Rank 0] step:1301/10000 train_time:81724ms step_avg:62.82ms +[2025-09-11 08:22:01] [Rank 0] step:1321/10000 train_time:82372ms step_avg:62.36ms +[2025-09-11 08:22:01] [Rank 0] step:1321/10000 train_time:82372ms step_avg:62.36ms +[2025-09-11 08:22:02] [Rank 0] step:1341/10000 train_time:83019ms step_avg:61.91ms +[2025-09-11 08:22:02] [Rank 0] step:1341/10000 train_time:83019ms step_avg:61.91ms +[2025-09-11 08:22:02] [Rank 0] step:1361/10000 train_time:83667ms step_avg:61.47ms +[2025-09-11 08:22:02] [Rank 0] step:1361/10000 train_time:83667ms step_avg:61.47ms +[2025-09-11 08:22:03] [Rank 0] step:1381/10000 train_time:84314ms step_avg:61.05ms +[2025-09-11 08:22:03] [Rank 0] step:1381/10000 train_time:84314ms step_avg:61.05ms +[2025-09-11 08:22:03] [Rank 0] step:1401/10000 train_time:84961ms step_avg:60.64ms +[2025-09-11 08:22:03] [Rank 0] step:1401/10000 train_time:84961ms step_avg:60.64ms +[2025-09-11 08:22:04] [Rank 0] step:1421/10000 train_time:85609ms step_avg:60.25ms +[2025-09-11 08:22:04] [Rank 0] step:1421/10000 train_time:85609ms step_avg:60.25ms +[2025-09-11 08:22:05] [Rank 0] step:1441/10000 train_time:86255ms step_avg:59.86ms +[2025-09-11 08:22:05] [Rank 0] step:1441/10000 train_time:86255ms step_avg:59.86ms +[2025-09-11 08:22:05] [Rank 0] step:1461/10000 train_time:86901ms step_avg:59.48ms +[2025-09-11 08:22:05] [Rank 0] step:1461/10000 train_time:86901ms step_avg:59.48ms +[2025-09-11 08:22:06] [Rank 0] step:1481/10000 train_time:87549ms step_avg:59.11ms +[2025-09-11 08:22:06] [Rank 0] step:1481/10000 train_time:87549ms step_avg:59.11ms +[2025-09-11 08:22:07] [Rank 0] step:1501/10000 train_time:88199ms step_avg:58.76ms +[2025-09-11 08:22:07] [Rank 0] step:1501/10000 train_time:88199ms step_avg:58.76ms +[2025-09-11 08:22:07] [Rank 0] step:1521/10000 train_time:88849ms step_avg:58.42ms +[2025-09-11 08:22:07] [Rank 0] step:1521/10000 train_time:88849ms step_avg:58.42ms +[2025-09-11 08:22:08] [Rank 0] step:1541/10000 train_time:89500ms step_avg:58.08ms +[2025-09-11 08:22:08] [Rank 0] step:1541/10000 train_time:89500ms step_avg:58.08ms +[2025-09-11 08:22:09] [Rank 0] step:1561/10000 train_time:90151ms step_avg:57.75ms +[2025-09-11 08:22:09] [Rank 0] step:1561/10000 train_time:90151ms step_avg:57.75ms +[2025-09-11 08:22:09] [Rank 0] step:1581/10000 train_time:90801ms step_avg:57.43ms +[2025-09-11 08:22:09] [Rank 0] step:1581/10000 train_time:90801ms step_avg:57.43ms +[2025-09-11 08:22:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:22:10] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:22:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:22:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:22:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:22:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:22:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:22:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:22:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:22:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:22:20] [Rank 0] PRINT: step:1600/10000 val_loss:4.9631 total_sharp:3.0878e-04 L1_sharp:1.1723e-02 L2_sharp:8.7050e-04 L3_sharp:1.1743e-03 L4_sharp:2.7022e-03 L5_sharp:3.1904e-03 L6_sharp:2.8980e-03 L7_sharp:2.2687e-03 L8_sharp:4.4060e-03 L9_sharp:4.5186e-03 L10_sharp:5.3654e-03 L11_sharp:9.8181e-03 L12_sharp:9.3612e-02 total_fnorm:7.6000e+01 total_l1_linf:1.6179e+05 total_spectral:3.8000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.7500e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.5352e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.5156e-01 L7_l1linf:3.4961e-01 L8_l1linf:3.4961e-01 L9_l1linf:3.4766e-01 L10_l1linf:3.5547e-01 L11_l1linf:3.6719e-01 L12_l1linf:3.7500e-01 L1_spectral:1.4417e-02 L2_spectral:1.4375e-02 L3_spectral:1.4347e-02 L4_spectral:1.4313e-02 L5_spectral:1.4304e-02 L6_spectral:1.4384e-02 L7_spectral:1.4284e-02 L8_spectral:1.4401e-02 L9_spectral:1.4434e-02 L10_spectral:1.4430e-02 L11_spectral:1.4304e-02 L12_spectral:1.4905e-02 train_time:91434ms step_avg:57.15ms +[2025-09-11 08:22:20] [Rank 0] PRINT: step:1600/10000 val_loss:4.9631 total_sharp:3.0878e-04 L1_sharp:1.1723e-02 L2_sharp:8.7050e-04 L3_sharp:1.1743e-03 L4_sharp:2.7022e-03 L5_sharp:3.1904e-03 L6_sharp:2.8980e-03 L7_sharp:2.2687e-03 L8_sharp:4.4060e-03 L9_sharp:4.5186e-03 L10_sharp:5.3654e-03 L11_sharp:9.8181e-03 L12_sharp:9.3612e-02 total_fnorm:7.6000e+01 total_l1_linf:1.6179e+05 total_spectral:3.8000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.7500e-01 L2_l1linf:3.5938e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.5352e-01 L5_l1linf:3.5156e-01 L6_l1linf:3.5156e-01 L7_l1linf:3.4961e-01 L8_l1linf:3.4961e-01 L9_l1linf:3.4766e-01 L10_l1linf:3.5547e-01 L11_l1linf:3.6719e-01 L12_l1linf:3.7500e-01 L1_spectral:1.4417e-02 L2_spectral:1.4375e-02 L3_spectral:1.4347e-02 L4_spectral:1.4313e-02 L5_spectral:1.4304e-02 L6_spectral:1.4384e-02 L7_spectral:1.4284e-02 L8_spectral:1.4401e-02 L9_spectral:1.4434e-02 L10_spectral:1.4430e-02 L11_spectral:1.4304e-02 L12_spectral:1.4905e-02 train_time:91434ms step_avg:57.15ms +[2025-09-11 08:22:21] [Rank 0] step:1601/10000 train_time:92589ms step_avg:57.83ms +[2025-09-11 08:22:21] [Rank 0] step:1601/10000 train_time:92589ms step_avg:57.83ms +[2025-09-11 08:22:22] [Rank 0] step:1621/10000 train_time:93243ms step_avg:57.52ms +[2025-09-11 08:22:22] [Rank 0] step:1621/10000 train_time:93243ms step_avg:57.52ms +[2025-09-11 08:22:22] [Rank 0] step:1641/10000 train_time:93895ms step_avg:57.22ms +[2025-09-11 08:22:22] [Rank 0] step:1641/10000 train_time:93895ms step_avg:57.22ms +[2025-09-11 08:22:23] [Rank 0] step:1661/10000 train_time:94547ms step_avg:56.92ms +[2025-09-11 08:22:23] [Rank 0] step:1661/10000 train_time:94547ms step_avg:56.92ms +[2025-09-11 08:22:24] [Rank 0] step:1681/10000 train_time:95198ms step_avg:56.63ms +[2025-09-11 08:22:24] [Rank 0] step:1681/10000 train_time:95198ms step_avg:56.63ms +[2025-09-11 08:22:24] [Rank 0] step:1701/10000 train_time:95849ms step_avg:56.35ms +[2025-09-11 08:22:24] [Rank 0] step:1701/10000 train_time:95849ms step_avg:56.35ms +[2025-09-11 08:22:25] [Rank 0] step:1721/10000 train_time:96500ms step_avg:56.07ms +[2025-09-11 08:22:25] [Rank 0] step:1721/10000 train_time:96500ms step_avg:56.07ms +[2025-09-11 08:22:26] [Rank 0] step:1741/10000 train_time:97151ms step_avg:55.80ms +[2025-09-11 08:22:26] [Rank 0] step:1741/10000 train_time:97151ms step_avg:55.80ms +[2025-09-11 08:22:26] [Rank 0] step:1761/10000 train_time:97803ms step_avg:55.54ms +[2025-09-11 08:22:26] [Rank 0] step:1761/10000 train_time:97803ms step_avg:55.54ms +[2025-09-11 08:22:27] [Rank 0] step:1781/10000 train_time:98455ms step_avg:55.28ms +[2025-09-11 08:22:27] [Rank 0] step:1781/10000 train_time:98455ms step_avg:55.28ms +[2025-09-11 08:22:28] [Rank 0] step:1801/10000 train_time:99105ms step_avg:55.03ms +[2025-09-11 08:22:28] [Rank 0] step:1801/10000 train_time:99105ms step_avg:55.03ms +[2025-09-11 08:22:28] [Rank 0] step:1821/10000 train_time:99756ms step_avg:54.78ms +[2025-09-11 08:22:28] [Rank 0] step:1821/10000 train_time:99756ms step_avg:54.78ms +[2025-09-11 08:22:29] [Rank 0] step:1841/10000 train_time:100406ms step_avg:54.54ms +[2025-09-11 08:22:29] [Rank 0] step:1841/10000 train_time:100406ms step_avg:54.54ms +[2025-09-11 08:22:30] [Rank 0] step:1861/10000 train_time:101057ms step_avg:54.30ms +[2025-09-11 08:22:30] [Rank 0] step:1861/10000 train_time:101057ms step_avg:54.30ms +[2025-09-11 08:22:30] [Rank 0] step:1881/10000 train_time:101707ms step_avg:54.07ms +[2025-09-11 08:22:30] [Rank 0] step:1881/10000 train_time:101707ms step_avg:54.07ms +[2025-09-11 08:22:31] [Rank 0] step:1901/10000 train_time:102358ms step_avg:53.84ms +[2025-09-11 08:22:31] [Rank 0] step:1901/10000 train_time:102358ms step_avg:53.84ms +[2025-09-11 08:22:31] [Rank 0] step:1921/10000 train_time:103008ms step_avg:53.62ms +[2025-09-11 08:22:31] [Rank 0] step:1921/10000 train_time:103008ms step_avg:53.62ms +[2025-09-11 08:22:32] [Rank 0] step:1941/10000 train_time:103658ms step_avg:53.40ms +[2025-09-11 08:22:32] [Rank 0] step:1941/10000 train_time:103658ms step_avg:53.40ms +[2025-09-11 08:22:33] [Rank 0] step:1961/10000 train_time:104309ms step_avg:53.19ms +[2025-09-11 08:22:33] [Rank 0] step:1961/10000 train_time:104309ms step_avg:53.19ms +[2025-09-11 08:22:33] [Rank 0] step:1981/10000 train_time:104960ms step_avg:52.98ms +[2025-09-11 08:22:33] [Rank 0] step:1981/10000 train_time:104960ms step_avg:52.98ms +[2025-09-11 08:22:34] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:22:34] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:22:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:22:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:22:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:22:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:22:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8134 total_sharp:2.2040e-04 L1_sharp:1.2920e-02 L2_sharp:8.9430e-04 L3_sharp:1.1450e-03 L4_sharp:8.3646e-04 L5_sharp:2.5345e-03 L6_sharp:2.1576e-03 L7_sharp:2.7050e-03 L8_sharp:4.0246e-03 L9_sharp:3.8290e-03 L10_sharp:3.9676e-03 L11_sharp:8.7706e-03 L12_sharp:6.3313e-02 total_fnorm:7.8000e+01 total_l1_linf:1.6998e+05 total_spectral:3.9000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4766e-01 L3_l1linf:3.4180e-01 L4_l1linf:3.4570e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.4375e-01 L7_l1linf:3.4375e-01 L8_l1linf:3.3984e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.3984e-01 L11_l1linf:3.5742e-01 L12_l1linf:3.6719e-01 L1_spectral:1.4731e-02 L2_spectral:1.4639e-02 L3_spectral:1.4616e-02 L4_spectral:1.4661e-02 L5_spectral:1.4601e-02 L6_spectral:1.4664e-02 L7_spectral:1.4622e-02 L8_spectral:1.4749e-02 L9_spectral:1.4660e-02 L10_spectral:1.4709e-02 L11_spectral:1.4657e-02 L12_spectral:1.5207e-02 train_time:105594ms step_avg:52.80ms +[2025-09-11 08:22:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8134 total_sharp:2.2040e-04 L1_sharp:1.2920e-02 L2_sharp:8.9430e-04 L3_sharp:1.1450e-03 L4_sharp:8.3646e-04 L5_sharp:2.5345e-03 L6_sharp:2.1576e-03 L7_sharp:2.7050e-03 L8_sharp:4.0246e-03 L9_sharp:3.8290e-03 L10_sharp:3.9676e-03 L11_sharp:8.7706e-03 L12_sharp:6.3313e-02 total_fnorm:7.8000e+01 total_l1_linf:1.6998e+05 total_spectral:3.9000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4766e-01 L3_l1linf:3.4180e-01 L4_l1linf:3.4570e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.4375e-01 L7_l1linf:3.4375e-01 L8_l1linf:3.3984e-01 L9_l1linf:3.3984e-01 L10_l1linf:3.3984e-01 L11_l1linf:3.5742e-01 L12_l1linf:3.6719e-01 L1_spectral:1.4731e-02 L2_spectral:1.4639e-02 L3_spectral:1.4616e-02 L4_spectral:1.4661e-02 L5_spectral:1.4601e-02 L6_spectral:1.4664e-02 L7_spectral:1.4622e-02 L8_spectral:1.4749e-02 L9_spectral:1.4660e-02 L10_spectral:1.4709e-02 L11_spectral:1.4657e-02 L12_spectral:1.5207e-02 train_time:105594ms step_avg:52.80ms +[2025-09-11 08:22:46] [Rank 0] step:2001/10000 train_time:106755ms step_avg:53.35ms +[2025-09-11 08:22:46] [Rank 0] step:2001/10000 train_time:106755ms step_avg:53.35ms +[2025-09-11 08:22:47] [Rank 0] step:2021/10000 train_time:107410ms step_avg:53.15ms +[2025-09-11 08:22:47] [Rank 0] step:2021/10000 train_time:107410ms step_avg:53.15ms +[2025-09-11 08:22:47] [Rank 0] step:2041/10000 train_time:108062ms step_avg:52.95ms +[2025-09-11 08:22:47] [Rank 0] step:2041/10000 train_time:108062ms step_avg:52.95ms +[2025-09-11 08:22:48] [Rank 0] step:2061/10000 train_time:108714ms step_avg:52.75ms +[2025-09-11 08:22:48] [Rank 0] step:2061/10000 train_time:108714ms step_avg:52.75ms +[2025-09-11 08:22:49] [Rank 0] step:2081/10000 train_time:109366ms step_avg:52.55ms +[2025-09-11 08:22:49] [Rank 0] step:2081/10000 train_time:109366ms step_avg:52.55ms +[2025-09-11 08:22:49] [Rank 0] step:2101/10000 train_time:110018ms step_avg:52.36ms +[2025-09-11 08:22:49] [Rank 0] step:2101/10000 train_time:110018ms step_avg:52.36ms +[2025-09-11 08:22:50] [Rank 0] step:2121/10000 train_time:110669ms step_avg:52.18ms +[2025-09-11 08:22:50] [Rank 0] step:2121/10000 train_time:110669ms step_avg:52.18ms +[2025-09-11 08:22:50] [Rank 0] step:2141/10000 train_time:111321ms step_avg:51.99ms +[2025-09-11 08:22:50] [Rank 0] step:2141/10000 train_time:111321ms step_avg:51.99ms +[2025-09-11 08:22:51] [Rank 0] step:2161/10000 train_time:111973ms step_avg:51.82ms +[2025-09-11 08:22:51] [Rank 0] step:2161/10000 train_time:111973ms step_avg:51.82ms +[2025-09-11 08:22:52] [Rank 0] step:2181/10000 train_time:112624ms step_avg:51.64ms +[2025-09-11 08:22:52] [Rank 0] step:2181/10000 train_time:112624ms step_avg:51.64ms +[2025-09-11 08:22:52] [Rank 0] step:2201/10000 train_time:113274ms step_avg:51.46ms +[2025-09-11 08:22:52] [Rank 0] step:2201/10000 train_time:113274ms step_avg:51.46ms +[2025-09-11 08:22:54] [Rank 0] step:2221/10000 train_time:114448ms step_avg:51.53ms +[2025-09-11 08:22:54] [Rank 0] step:2221/10000 train_time:114448ms step_avg:51.53ms +[2025-09-11 08:22:54] [Rank 0] step:2241/10000 train_time:115110ms step_avg:51.37ms +[2025-09-11 08:22:54] [Rank 0] step:2241/10000 train_time:115110ms step_avg:51.37ms +[2025-09-11 08:22:55] [Rank 0] step:2261/10000 train_time:115775ms step_avg:51.21ms +[2025-09-11 08:22:55] [Rank 0] step:2261/10000 train_time:115775ms step_avg:51.21ms +[2025-09-11 08:22:56] [Rank 0] step:2281/10000 train_time:116715ms step_avg:51.17ms +[2025-09-11 08:22:56] [Rank 0] step:2281/10000 train_time:116715ms step_avg:51.17ms +[2025-09-11 08:22:57] [Rank 0] step:2301/10000 train_time:117380ms step_avg:51.01ms +[2025-09-11 08:22:57] [Rank 0] step:2301/10000 train_time:117380ms step_avg:51.01ms +[2025-09-11 08:22:57] [Rank 0] step:2321/10000 train_time:118048ms step_avg:50.86ms +[2025-09-11 08:22:57] [Rank 0] step:2321/10000 train_time:118048ms step_avg:50.86ms +[2025-09-11 08:22:58] [Rank 0] step:2341/10000 train_time:118712ms step_avg:50.71ms +[2025-09-11 08:22:58] [Rank 0] step:2341/10000 train_time:118712ms step_avg:50.71ms +[2025-09-11 08:22:59] [Rank 0] step:2361/10000 train_time:119377ms step_avg:50.56ms +[2025-09-11 08:22:59] [Rank 0] step:2361/10000 train_time:119377ms step_avg:50.56ms +[2025-09-11 08:22:59] [Rank 0] step:2381/10000 train_time:120041ms step_avg:50.42ms +[2025-09-11 08:22:59] [Rank 0] step:2381/10000 train_time:120041ms step_avg:50.42ms +[2025-09-11 08:23:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:23:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.6868 total_sharp:2.2777e-04 L1_sharp:1.0858e-02 L2_sharp:3.2529e-04 L3_sharp:1.2154e-03 L4_sharp:1.3837e-03 L5_sharp:1.0892e-03 L6_sharp:1.1629e-03 L7_sharp:1.5766e-03 L8_sharp:2.7803e-03 L9_sharp:3.4620e-03 L10_sharp:3.6386e-03 L11_sharp:6.1096e-03 L12_sharp:7.9302e-02 total_fnorm:7.4500e+01 total_l1_linf:1.5565e+05 total_spectral:3.7250e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.3789e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3789e-01 L9_l1linf:3.3398e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.5352e-01 L12_l1linf:3.6523e-01 L1_spectral:1.5011e-02 L2_spectral:1.4859e-02 L3_spectral:1.4811e-02 L4_spectral:1.4916e-02 L5_spectral:1.4850e-02 L6_spectral:1.4844e-02 L7_spectral:1.4832e-02 L8_spectral:1.5085e-02 L9_spectral:1.4885e-02 L10_spectral:1.4978e-02 L11_spectral:1.4938e-02 L12_spectral:1.5581e-02 train_time:120686ms step_avg:50.29ms +[2025-09-11 08:23:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.6868 total_sharp:2.2777e-04 L1_sharp:1.0858e-02 L2_sharp:3.2529e-04 L3_sharp:1.2154e-03 L4_sharp:1.3837e-03 L5_sharp:1.0892e-03 L6_sharp:1.1629e-03 L7_sharp:1.5766e-03 L8_sharp:2.7803e-03 L9_sharp:3.4620e-03 L10_sharp:3.6386e-03 L11_sharp:6.1096e-03 L12_sharp:7.9302e-02 total_fnorm:7.4500e+01 total_l1_linf:1.5565e+05 total_spectral:3.7250e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4180e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3594e-01 L5_l1linf:3.3789e-01 L6_l1linf:3.3789e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3789e-01 L9_l1linf:3.3398e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.5352e-01 L12_l1linf:3.6523e-01 L1_spectral:1.5011e-02 L2_spectral:1.4859e-02 L3_spectral:1.4811e-02 L4_spectral:1.4916e-02 L5_spectral:1.4850e-02 L6_spectral:1.4844e-02 L7_spectral:1.4832e-02 L8_spectral:1.5085e-02 L9_spectral:1.4885e-02 L10_spectral:1.4978e-02 L11_spectral:1.4938e-02 L12_spectral:1.5581e-02 train_time:120686ms step_avg:50.29ms +[2025-09-11 08:23:11] [Rank 0] step:2401/10000 train_time:121838ms step_avg:50.74ms +[2025-09-11 08:23:11] [Rank 0] step:2401/10000 train_time:121838ms step_avg:50.74ms +[2025-09-11 08:23:12] [Rank 0] step:2421/10000 train_time:122506ms step_avg:50.60ms +[2025-09-11 08:23:12] [Rank 0] step:2421/10000 train_time:122506ms step_avg:50.60ms +[2025-09-11 08:23:12] [Rank 0] step:2441/10000 train_time:123172ms step_avg:50.46ms +[2025-09-11 08:23:12] [Rank 0] step:2441/10000 train_time:123172ms step_avg:50.46ms +[2025-09-11 08:23:13] [Rank 0] step:2461/10000 train_time:123836ms step_avg:50.32ms +[2025-09-11 08:23:13] [Rank 0] step:2461/10000 train_time:123836ms step_avg:50.32ms +[2025-09-11 08:23:14] [Rank 0] step:2481/10000 train_time:124502ms step_avg:50.18ms +[2025-09-11 08:23:14] [Rank 0] step:2481/10000 train_time:124502ms step_avg:50.18ms +[2025-09-11 08:23:14] [Rank 0] step:2501/10000 train_time:125167ms step_avg:50.05ms +[2025-09-11 08:23:14] [Rank 0] step:2501/10000 train_time:125167ms step_avg:50.05ms +[2025-09-11 08:23:15] [Rank 0] step:2521/10000 train_time:125832ms step_avg:49.91ms +[2025-09-11 08:23:15] [Rank 0] step:2521/10000 train_time:125832ms step_avg:49.91ms +[2025-09-11 08:23:16] [Rank 0] step:2541/10000 train_time:126497ms step_avg:49.78ms +[2025-09-11 08:23:16] [Rank 0] step:2541/10000 train_time:126497ms step_avg:49.78ms +[2025-09-11 08:23:16] [Rank 0] step:2561/10000 train_time:127161ms step_avg:49.65ms +[2025-09-11 08:23:16] [Rank 0] step:2561/10000 train_time:127161ms step_avg:49.65ms +[2025-09-11 08:23:17] [Rank 0] step:2581/10000 train_time:127826ms step_avg:49.53ms +[2025-09-11 08:23:17] [Rank 0] step:2581/10000 train_time:127826ms step_avg:49.53ms +[2025-09-11 08:23:18] [Rank 0] step:2601/10000 train_time:128491ms step_avg:49.40ms +[2025-09-11 08:23:18] [Rank 0] step:2601/10000 train_time:128491ms step_avg:49.40ms +[2025-09-11 08:23:18] [Rank 0] step:2621/10000 train_time:129155ms step_avg:49.28ms +[2025-09-11 08:23:18] [Rank 0] step:2621/10000 train_time:129155ms step_avg:49.28ms +[2025-09-11 08:23:19] [Rank 0] step:2641/10000 train_time:129819ms step_avg:49.16ms +[2025-09-11 08:23:19] [Rank 0] step:2641/10000 train_time:129819ms step_avg:49.16ms +[2025-09-11 08:23:20] [Rank 0] step:2661/10000 train_time:130483ms step_avg:49.04ms +[2025-09-11 08:23:20] [Rank 0] step:2661/10000 train_time:130483ms step_avg:49.04ms +[2025-09-11 08:23:20] [Rank 0] step:2681/10000 train_time:131148ms step_avg:48.92ms +[2025-09-11 08:23:20] [Rank 0] step:2681/10000 train_time:131148ms step_avg:48.92ms +[2025-09-11 08:23:21] [Rank 0] step:2701/10000 train_time:131813ms step_avg:48.80ms +[2025-09-11 08:23:21] [Rank 0] step:2701/10000 train_time:131813ms step_avg:48.80ms +[2025-09-11 08:23:22] [Rank 0] step:2721/10000 train_time:132477ms step_avg:48.69ms +[2025-09-11 08:23:22] [Rank 0] step:2721/10000 train_time:132477ms step_avg:48.69ms +[2025-09-11 08:23:22] [Rank 0] step:2741/10000 train_time:133142ms step_avg:48.57ms +[2025-09-11 08:23:22] [Rank 0] step:2741/10000 train_time:133142ms step_avg:48.57ms +[2025-09-11 08:23:23] [Rank 0] step:2761/10000 train_time:133807ms step_avg:48.46ms +[2025-09-11 08:23:23] [Rank 0] step:2761/10000 train_time:133807ms step_avg:48.46ms +[2025-09-11 08:23:24] [Rank 0] step:2781/10000 train_time:134472ms step_avg:48.35ms +[2025-09-11 08:23:24] [Rank 0] step:2781/10000 train_time:134472ms step_avg:48.35ms +[2025-09-11 08:23:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:23:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:23:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.6270 total_sharp:2.2407e-04 L1_sharp:8.4659e-03 L2_sharp:3.7746e-04 L3_sharp:2.6638e-04 L4_sharp:1.2075e-03 L5_sharp:2.0899e-03 L6_sharp:1.5414e-03 L7_sharp:1.8667e-03 L8_sharp:2.9535e-03 L9_sharp:3.8828e-03 L10_sharp:3.9480e-03 L11_sharp:6.3944e-03 L12_sharp:5.3449e-02 total_fnorm:7.3000e+01 total_l1_linf:1.5258e+05 total_spectral:3.6500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.2422e-01 L11_l1linf:3.3203e-01 L12_l1linf:3.4375e-01 L1_spectral:1.5231e-02 L2_spectral:1.4922e-02 L3_spectral:1.5016e-02 L4_spectral:1.5095e-02 L5_spectral:1.4985e-02 L6_spectral:1.5116e-02 L7_spectral:1.5097e-02 L8_spectral:1.5290e-02 L9_spectral:1.5116e-02 L10_spectral:1.5303e-02 L11_spectral:1.5156e-02 L12_spectral:1.5591e-02 train_time:135117ms step_avg:48.26ms +[2025-09-11 08:23:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.6270 total_sharp:2.2407e-04 L1_sharp:8.4659e-03 L2_sharp:3.7746e-04 L3_sharp:2.6638e-04 L4_sharp:1.2075e-03 L5_sharp:2.0899e-03 L6_sharp:1.5414e-03 L7_sharp:1.8667e-03 L8_sharp:2.9535e-03 L9_sharp:3.8828e-03 L10_sharp:3.9480e-03 L11_sharp:6.3944e-03 L12_sharp:5.3449e-02 total_fnorm:7.3000e+01 total_l1_linf:1.5258e+05 total_spectral:3.6500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5547e-01 L2_l1linf:3.3984e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2422e-01 L10_l1linf:3.2422e-01 L11_l1linf:3.3203e-01 L12_l1linf:3.4375e-01 L1_spectral:1.5231e-02 L2_spectral:1.4922e-02 L3_spectral:1.5016e-02 L4_spectral:1.5095e-02 L5_spectral:1.4985e-02 L6_spectral:1.5116e-02 L7_spectral:1.5097e-02 L8_spectral:1.5290e-02 L9_spectral:1.5116e-02 L10_spectral:1.5303e-02 L11_spectral:1.5156e-02 L12_spectral:1.5591e-02 train_time:135117ms step_avg:48.26ms +[2025-09-11 08:23:35] [Rank 0] step:2801/10000 train_time:136257ms step_avg:48.65ms +[2025-09-11 08:23:35] [Rank 0] step:2801/10000 train_time:136257ms step_avg:48.65ms +[2025-09-11 08:23:36] [Rank 0] step:2821/10000 train_time:136924ms step_avg:48.54ms +[2025-09-11 08:23:36] [Rank 0] step:2821/10000 train_time:136924ms step_avg:48.54ms +[2025-09-11 08:23:37] [Rank 0] step:2841/10000 train_time:137590ms step_avg:48.43ms +[2025-09-11 08:23:37] [Rank 0] step:2841/10000 train_time:137590ms step_avg:48.43ms +[2025-09-11 08:23:37] [Rank 0] step:2861/10000 train_time:138255ms step_avg:48.32ms +[2025-09-11 08:23:37] [Rank 0] step:2861/10000 train_time:138255ms step_avg:48.32ms +[2025-09-11 08:23:38] [Rank 0] step:2881/10000 train_time:138920ms step_avg:48.22ms +[2025-09-11 08:23:38] [Rank 0] step:2881/10000 train_time:138920ms step_avg:48.22ms +[2025-09-11 08:23:39] [Rank 0] step:2901/10000 train_time:139584ms step_avg:48.12ms +[2025-09-11 08:23:39] [Rank 0] step:2901/10000 train_time:139584ms step_avg:48.12ms +[2025-09-11 08:23:39] [Rank 0] step:2921/10000 train_time:140249ms step_avg:48.01ms +[2025-09-11 08:23:39] [Rank 0] step:2921/10000 train_time:140249ms step_avg:48.01ms +[2025-09-11 08:23:40] [Rank 0] step:2941/10000 train_time:140913ms step_avg:47.91ms +[2025-09-11 08:23:40] [Rank 0] step:2941/10000 train_time:140913ms step_avg:47.91ms +[2025-09-11 08:23:41] [Rank 0] step:2961/10000 train_time:141578ms step_avg:47.81ms +[2025-09-11 08:23:41] [Rank 0] step:2961/10000 train_time:141578ms step_avg:47.81ms +[2025-09-11 08:23:41] [Rank 0] step:2981/10000 train_time:142245ms step_avg:47.72ms +[2025-09-11 08:23:41] [Rank 0] step:2981/10000 train_time:142245ms step_avg:47.72ms +[2025-09-11 08:23:42] [Rank 0] step:3001/10000 train_time:142912ms step_avg:47.62ms +[2025-09-11 08:23:42] [Rank 0] step:3001/10000 train_time:142912ms step_avg:47.62ms +[2025-09-11 08:23:43] [Rank 0] step:3021/10000 train_time:143579ms step_avg:47.53ms +[2025-09-11 08:23:43] [Rank 0] step:3021/10000 train_time:143579ms step_avg:47.53ms +[2025-09-11 08:23:43] [Rank 0] step:3041/10000 train_time:144246ms step_avg:47.43ms +[2025-09-11 08:23:43] [Rank 0] step:3041/10000 train_time:144246ms step_avg:47.43ms +[2025-09-11 08:23:44] [Rank 0] step:3061/10000 train_time:144914ms step_avg:47.34ms +[2025-09-11 08:23:44] [Rank 0] step:3061/10000 train_time:144914ms step_avg:47.34ms +[2025-09-11 08:23:45] [Rank 0] step:3081/10000 train_time:145582ms step_avg:47.25ms +[2025-09-11 08:23:45] [Rank 0] step:3081/10000 train_time:145582ms step_avg:47.25ms +[2025-09-11 08:23:45] [Rank 0] step:3101/10000 train_time:146249ms step_avg:47.16ms +[2025-09-11 08:23:45] [Rank 0] step:3101/10000 train_time:146249ms step_avg:47.16ms +[2025-09-11 08:23:46] [Rank 0] step:3121/10000 train_time:146917ms step_avg:47.07ms +[2025-09-11 08:23:46] [Rank 0] step:3121/10000 train_time:146917ms step_avg:47.07ms +[2025-09-11 08:23:47] [Rank 0] step:3141/10000 train_time:147583ms step_avg:46.99ms +[2025-09-11 08:23:47] [Rank 0] step:3141/10000 train_time:147583ms step_avg:46.99ms +[2025-09-11 08:23:47] [Rank 0] step:3161/10000 train_time:148250ms step_avg:46.90ms +[2025-09-11 08:23:47] [Rank 0] step:3161/10000 train_time:148250ms step_avg:46.90ms +[2025-09-11 08:23:48] [Rank 0] step:3181/10000 train_time:148918ms step_avg:46.81ms +[2025-09-11 08:23:48] [Rank 0] step:3181/10000 train_time:148918ms step_avg:46.81ms +[2025-09-11 08:23:49] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:23:49] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:23:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:23:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:23:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:23:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:23:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:23:59] [Rank 0] PRINT: step:3200/10000 val_loss:4.5459 total_sharp:1.7185e-04 L1_sharp:9.7127e-03 L2_sharp:1.3028e-03 L3_sharp:6.7924e-04 L4_sharp:1.2051e-03 L5_sharp:2.1859e-03 L6_sharp:1.7639e-03 L7_sharp:1.9736e-03 L8_sharp:3.4754e-03 L9_sharp:3.8792e-03 L10_sharp:4.2717e-03 L11_sharp:5.9138e-03 L12_sharp:6.0593e-02 total_fnorm:8.0000e+01 total_l1_linf:1.7306e+05 total_spectral:4.0000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3789e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.2617e-01 L12_l1linf:3.3398e-01 L1_spectral:1.5525e-02 L2_spectral:1.5088e-02 L3_spectral:1.5196e-02 L4_spectral:1.5211e-02 L5_spectral:1.5224e-02 L6_spectral:1.5218e-02 L7_spectral:1.5214e-02 L8_spectral:1.5414e-02 L9_spectral:1.5436e-02 L10_spectral:1.5429e-02 L11_spectral:1.5493e-02 L12_spectral:1.5861e-02 train_time:149567ms step_avg:46.74ms +[2025-09-11 08:23:59] [Rank 0] PRINT: step:3200/10000 val_loss:4.5459 total_sharp:1.7185e-04 L1_sharp:9.7127e-03 L2_sharp:1.3028e-03 L3_sharp:6.7924e-04 L4_sharp:1.2051e-03 L5_sharp:2.1859e-03 L6_sharp:1.7639e-03 L7_sharp:1.9736e-03 L8_sharp:3.4754e-03 L9_sharp:3.8792e-03 L10_sharp:4.2717e-03 L11_sharp:5.9138e-03 L12_sharp:6.0593e-02 total_fnorm:8.0000e+01 total_l1_linf:1.7306e+05 total_spectral:4.0000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3203e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.3008e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3789e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.2617e-01 L12_l1linf:3.3398e-01 L1_spectral:1.5525e-02 L2_spectral:1.5088e-02 L3_spectral:1.5196e-02 L4_spectral:1.5211e-02 L5_spectral:1.5224e-02 L6_spectral:1.5218e-02 L7_spectral:1.5214e-02 L8_spectral:1.5414e-02 L9_spectral:1.5436e-02 L10_spectral:1.5429e-02 L11_spectral:1.5493e-02 L12_spectral:1.5861e-02 train_time:149567ms step_avg:46.74ms +[2025-09-11 08:24:00] [Rank 0] step:3201/10000 train_time:150720ms step_avg:47.09ms +[2025-09-11 08:24:00] [Rank 0] step:3201/10000 train_time:150720ms step_avg:47.09ms +[2025-09-11 08:24:01] [Rank 0] step:3221/10000 train_time:151424ms step_avg:47.01ms +[2025-09-11 08:24:01] [Rank 0] step:3221/10000 train_time:151424ms step_avg:47.01ms +[2025-09-11 08:24:01] [Rank 0] step:3241/10000 train_time:152095ms step_avg:46.93ms +[2025-09-11 08:24:01] [Rank 0] step:3241/10000 train_time:152095ms step_avg:46.93ms +[2025-09-11 08:24:02] [Rank 0] step:3261/10000 train_time:152763ms step_avg:46.85ms +[2025-09-11 08:24:02] [Rank 0] step:3261/10000 train_time:152763ms step_avg:46.85ms +[2025-09-11 08:24:03] [Rank 0] step:3281/10000 train_time:153432ms step_avg:46.76ms +[2025-09-11 08:24:03] [Rank 0] step:3281/10000 train_time:153432ms step_avg:46.76ms +[2025-09-11 08:24:03] [Rank 0] step:3301/10000 train_time:154101ms step_avg:46.68ms +[2025-09-11 08:24:03] [Rank 0] step:3301/10000 train_time:154101ms step_avg:46.68ms +[2025-09-11 08:24:04] [Rank 0] step:3321/10000 train_time:154770ms step_avg:46.60ms +[2025-09-11 08:24:04] [Rank 0] step:3321/10000 train_time:154770ms step_avg:46.60ms +[2025-09-11 08:24:05] [Rank 0] step:3341/10000 train_time:155439ms step_avg:46.52ms +[2025-09-11 08:24:05] [Rank 0] step:3341/10000 train_time:155439ms step_avg:46.52ms +[2025-09-11 08:24:05] [Rank 0] step:3361/10000 train_time:156108ms step_avg:46.45ms +[2025-09-11 08:24:05] [Rank 0] step:3361/10000 train_time:156108ms step_avg:46.45ms +[2025-09-11 08:24:06] [Rank 0] step:3381/10000 train_time:156776ms step_avg:46.37ms +[2025-09-11 08:24:06] [Rank 0] step:3381/10000 train_time:156776ms step_avg:46.37ms +[2025-09-11 08:24:07] [Rank 0] step:3401/10000 train_time:157444ms step_avg:46.29ms +[2025-09-11 08:24:07] [Rank 0] step:3401/10000 train_time:157444ms step_avg:46.29ms +[2025-09-11 08:24:07] [Rank 0] step:3421/10000 train_time:158111ms step_avg:46.22ms +[2025-09-11 08:24:07] [Rank 0] step:3421/10000 train_time:158111ms step_avg:46.22ms +[2025-09-11 08:24:08] [Rank 0] step:3441/10000 train_time:158779ms step_avg:46.14ms +[2025-09-11 08:24:08] [Rank 0] step:3441/10000 train_time:158779ms step_avg:46.14ms +[2025-09-11 08:24:09] [Rank 0] step:3461/10000 train_time:159446ms step_avg:46.07ms +[2025-09-11 08:24:09] [Rank 0] step:3461/10000 train_time:159446ms step_avg:46.07ms +[2025-09-11 08:24:09] [Rank 0] step:3481/10000 train_time:160114ms step_avg:46.00ms +[2025-09-11 08:24:09] [Rank 0] step:3481/10000 train_time:160114ms step_avg:46.00ms +[2025-09-11 08:24:10] [Rank 0] step:3501/10000 train_time:160782ms step_avg:45.92ms +[2025-09-11 08:24:10] [Rank 0] step:3501/10000 train_time:160782ms step_avg:45.92ms +[2025-09-11 08:24:11] [Rank 0] step:3521/10000 train_time:161451ms step_avg:45.85ms +[2025-09-11 08:24:11] [Rank 0] step:3521/10000 train_time:161451ms step_avg:45.85ms +[2025-09-11 08:24:11] [Rank 0] step:3541/10000 train_time:162131ms step_avg:45.79ms +[2025-09-11 08:24:11] [Rank 0] step:3541/10000 train_time:162131ms step_avg:45.79ms +[2025-09-11 08:24:12] [Rank 0] step:3561/10000 train_time:162799ms step_avg:45.72ms +[2025-09-11 08:24:12] [Rank 0] step:3561/10000 train_time:162799ms step_avg:45.72ms +[2025-09-11 08:24:13] [Rank 0] step:3581/10000 train_time:163467ms step_avg:45.65ms +[2025-09-11 08:24:13] [Rank 0] step:3581/10000 train_time:163467ms step_avg:45.65ms +[2025-09-11 08:24:13] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:24:13] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:24:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:24:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:24:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:24:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:24:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.5041 total_sharp:2.4303e-04 L1_sharp:1.0952e-02 L2_sharp:9.4681e-04 L3_sharp:6.2450e-04 L4_sharp:2.6635e-04 L5_sharp:2.0477e-03 L6_sharp:1.6389e-03 L7_sharp:1.8038e-03 L8_sharp:3.2830e-03 L9_sharp:3.5906e-03 L10_sharp:3.9711e-03 L11_sharp:7.5211e-03 L12_sharp:9.0316e-02 total_fnorm:7.0000e+01 total_l1_linf:1.4541e+05 total_spectral:3.5250e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3008e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.1836e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.1641e-01 L12_l1linf:3.3594e-01 L1_spectral:1.5628e-02 L2_spectral:1.5252e-02 L3_spectral:1.5328e-02 L4_spectral:1.5345e-02 L5_spectral:1.5338e-02 L6_spectral:1.5427e-02 L7_spectral:1.5404e-02 L8_spectral:1.5496e-02 L9_spectral:1.5515e-02 L10_spectral:1.5592e-02 L11_spectral:1.5470e-02 L12_spectral:1.5857e-02 train_time:164117ms step_avg:45.59ms +[2025-09-11 08:24:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.5041 total_sharp:2.4303e-04 L1_sharp:1.0952e-02 L2_sharp:9.4681e-04 L3_sharp:6.2450e-04 L4_sharp:2.6635e-04 L5_sharp:2.0477e-03 L6_sharp:1.6389e-03 L7_sharp:1.8038e-03 L8_sharp:3.2830e-03 L9_sharp:3.5906e-03 L10_sharp:3.9711e-03 L11_sharp:7.5211e-03 L12_sharp:9.0316e-02 total_fnorm:7.0000e+01 total_l1_linf:1.4541e+05 total_spectral:3.5250e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4961e-01 L2_l1linf:3.3008e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.2227e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2422e-01 L9_l1linf:3.1836e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.1641e-01 L12_l1linf:3.3594e-01 L1_spectral:1.5628e-02 L2_spectral:1.5252e-02 L3_spectral:1.5328e-02 L4_spectral:1.5345e-02 L5_spectral:1.5338e-02 L6_spectral:1.5427e-02 L7_spectral:1.5404e-02 L8_spectral:1.5496e-02 L9_spectral:1.5515e-02 L10_spectral:1.5592e-02 L11_spectral:1.5470e-02 L12_spectral:1.5857e-02 train_time:164117ms step_avg:45.59ms +[2025-09-11 08:24:24] [Rank 0] step:3601/10000 train_time:165280ms step_avg:45.90ms +[2025-09-11 08:24:24] [Rank 0] step:3601/10000 train_time:165280ms step_avg:45.90ms +[2025-09-11 08:24:25] [Rank 0] step:3621/10000 train_time:165952ms step_avg:45.83ms +[2025-09-11 08:24:25] [Rank 0] step:3621/10000 train_time:165952ms step_avg:45.83ms +[2025-09-11 08:24:26] [Rank 0] step:3641/10000 train_time:166622ms step_avg:45.76ms +[2025-09-11 08:24:26] [Rank 0] step:3641/10000 train_time:166622ms step_avg:45.76ms +[2025-09-11 08:24:26] [Rank 0] step:3661/10000 train_time:167291ms step_avg:45.70ms +[2025-09-11 08:24:26] [Rank 0] step:3661/10000 train_time:167291ms step_avg:45.70ms +[2025-09-11 08:24:27] [Rank 0] step:3681/10000 train_time:167960ms step_avg:45.63ms +[2025-09-11 08:24:27] [Rank 0] step:3681/10000 train_time:167960ms step_avg:45.63ms +[2025-09-11 08:24:28] [Rank 0] step:3701/10000 train_time:168628ms step_avg:45.56ms +[2025-09-11 08:24:28] [Rank 0] step:3701/10000 train_time:168628ms step_avg:45.56ms +[2025-09-11 08:24:28] [Rank 0] step:3721/10000 train_time:169306ms step_avg:45.50ms +[2025-09-11 08:24:28] [Rank 0] step:3721/10000 train_time:169306ms step_avg:45.50ms +[2025-09-11 08:24:29] [Rank 0] step:3741/10000 train_time:169986ms step_avg:45.44ms +[2025-09-11 08:24:29] [Rank 0] step:3741/10000 train_time:169986ms step_avg:45.44ms +[2025-09-11 08:24:30] [Rank 0] step:3761/10000 train_time:170666ms step_avg:45.38ms +[2025-09-11 08:24:30] [Rank 0] step:3761/10000 train_time:170666ms step_avg:45.38ms +[2025-09-11 08:24:31] [Rank 0] step:3781/10000 train_time:171345ms step_avg:45.32ms +[2025-09-11 08:24:31] [Rank 0] step:3781/10000 train_time:171345ms step_avg:45.32ms +[2025-09-11 08:24:31] [Rank 0] step:3801/10000 train_time:172026ms step_avg:45.26ms +[2025-09-11 08:24:31] [Rank 0] step:3801/10000 train_time:172026ms step_avg:45.26ms +[2025-09-11 08:24:32] [Rank 0] step:3821/10000 train_time:172706ms step_avg:45.20ms +[2025-09-11 08:24:32] [Rank 0] step:3821/10000 train_time:172706ms step_avg:45.20ms +[2025-09-11 08:24:33] [Rank 0] step:3841/10000 train_time:173386ms step_avg:45.14ms +[2025-09-11 08:24:33] [Rank 0] step:3841/10000 train_time:173386ms step_avg:45.14ms +[2025-09-11 08:24:33] [Rank 0] step:3861/10000 train_time:174065ms step_avg:45.08ms +[2025-09-11 08:24:33] [Rank 0] step:3861/10000 train_time:174065ms step_avg:45.08ms +[2025-09-11 08:24:34] [Rank 0] step:3881/10000 train_time:174744ms step_avg:45.03ms +[2025-09-11 08:24:34] [Rank 0] step:3881/10000 train_time:174744ms step_avg:45.03ms +[2025-09-11 08:24:35] [Rank 0] step:3901/10000 train_time:175425ms step_avg:44.97ms +[2025-09-11 08:24:35] [Rank 0] step:3901/10000 train_time:175425ms step_avg:44.97ms +[2025-09-11 08:24:35] [Rank 0] step:3921/10000 train_time:176105ms step_avg:44.91ms +[2025-09-11 08:24:35] [Rank 0] step:3921/10000 train_time:176105ms step_avg:44.91ms +[2025-09-11 08:24:36] [Rank 0] step:3941/10000 train_time:176797ms step_avg:44.86ms +[2025-09-11 08:24:36] [Rank 0] step:3941/10000 train_time:176797ms step_avg:44.86ms +[2025-09-11 08:24:37] [Rank 0] step:3961/10000 train_time:177476ms step_avg:44.81ms +[2025-09-11 08:24:37] [Rank 0] step:3961/10000 train_time:177476ms step_avg:44.81ms +[2025-09-11 08:24:37] [Rank 0] step:3981/10000 train_time:178155ms step_avg:44.75ms +[2025-09-11 08:24:37] [Rank 0] step:3981/10000 train_time:178155ms step_avg:44.75ms +[2025-09-11 08:24:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:24:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:24:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:24:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:24:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:24:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:24:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:24:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:24:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:24:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.4518 total_sharp:2.0620e-04 L1_sharp:1.2268e-02 L2_sharp:5.7916e-04 L3_sharp:1.0158e-03 L4_sharp:9.8629e-04 L5_sharp:1.7847e-03 L6_sharp:1.5112e-03 L7_sharp:1.9680e-03 L8_sharp:3.3662e-03 L9_sharp:3.4426e-03 L10_sharp:4.2726e-03 L11_sharp:7.8166e-03 L12_sharp:6.9316e-02 total_fnorm:7.9500e+01 total_l1_linf:1.6589e+05 total_spectral:3.9750e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.2031e-01 L9_l1linf:3.2031e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.3203e-01 L12_l1linf:3.3789e-01 L1_spectral:1.5713e-02 L2_spectral:1.5296e-02 L3_spectral:1.5399e-02 L4_spectral:1.5501e-02 L5_spectral:1.5369e-02 L6_spectral:1.5594e-02 L7_spectral:1.5469e-02 L8_spectral:1.5755e-02 L9_spectral:1.5509e-02 L10_spectral:1.5632e-02 L11_spectral:1.5680e-02 L12_spectral:1.5810e-02 train_time:178816ms step_avg:44.70ms +[2025-09-11 08:24:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.4518 total_sharp:2.0620e-04 L1_sharp:1.2268e-02 L2_sharp:5.7916e-04 L3_sharp:1.0158e-03 L4_sharp:9.8629e-04 L5_sharp:1.7847e-03 L6_sharp:1.5112e-03 L7_sharp:1.9680e-03 L8_sharp:3.3662e-03 L9_sharp:3.4426e-03 L10_sharp:4.2726e-03 L11_sharp:7.8166e-03 L12_sharp:6.9316e-02 total_fnorm:7.9500e+01 total_l1_linf:1.6589e+05 total_spectral:3.9750e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.2227e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1641e-01 L5_l1linf:3.1836e-01 L6_l1linf:3.2422e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.2031e-01 L9_l1linf:3.2031e-01 L10_l1linf:3.1641e-01 L11_l1linf:3.3203e-01 L12_l1linf:3.3789e-01 L1_spectral:1.5713e-02 L2_spectral:1.5296e-02 L3_spectral:1.5399e-02 L4_spectral:1.5501e-02 L5_spectral:1.5369e-02 L6_spectral:1.5594e-02 L7_spectral:1.5469e-02 L8_spectral:1.5755e-02 L9_spectral:1.5509e-02 L10_spectral:1.5632e-02 L11_spectral:1.5680e-02 L12_spectral:1.5810e-02 train_time:178816ms step_avg:44.70ms +[2025-09-11 08:24:49] [Rank 0] step:4001/10000 train_time:179977ms step_avg:44.98ms +[2025-09-11 08:24:49] [Rank 0] step:4001/10000 train_time:179977ms step_avg:44.98ms +[2025-09-11 08:24:50] [Rank 0] step:4021/10000 train_time:180655ms step_avg:44.93ms +[2025-09-11 08:24:50] [Rank 0] step:4021/10000 train_time:180655ms step_avg:44.93ms +[2025-09-11 08:24:51] [Rank 0] step:4041/10000 train_time:181334ms step_avg:44.87ms +[2025-09-11 08:24:51] [Rank 0] step:4041/10000 train_time:181334ms step_avg:44.87ms +[2025-09-11 08:24:51] [Rank 0] step:4061/10000 train_time:182013ms step_avg:44.82ms +[2025-09-11 08:24:51] [Rank 0] step:4061/10000 train_time:182013ms step_avg:44.82ms +[2025-09-11 08:24:52] [Rank 0] step:4081/10000 train_time:182692ms step_avg:44.77ms +[2025-09-11 08:24:52] [Rank 0] step:4081/10000 train_time:182692ms step_avg:44.77ms +[2025-09-11 08:24:53] [Rank 0] step:4101/10000 train_time:183370ms step_avg:44.71ms +[2025-09-11 08:24:53] [Rank 0] step:4101/10000 train_time:183370ms step_avg:44.71ms +[2025-09-11 08:24:53] [Rank 0] step:4121/10000 train_time:184049ms step_avg:44.66ms +[2025-09-11 08:24:53] [Rank 0] step:4121/10000 train_time:184049ms step_avg:44.66ms +[2025-09-11 08:24:54] [Rank 0] step:4141/10000 train_time:184727ms step_avg:44.61ms +[2025-09-11 08:24:54] [Rank 0] step:4141/10000 train_time:184727ms step_avg:44.61ms +[2025-09-11 08:24:55] [Rank 0] step:4161/10000 train_time:185407ms step_avg:44.56ms +[2025-09-11 08:24:55] [Rank 0] step:4161/10000 train_time:185407ms step_avg:44.56ms +[2025-09-11 08:24:55] [Rank 0] step:4181/10000 train_time:186086ms step_avg:44.51ms +[2025-09-11 08:24:55] [Rank 0] step:4181/10000 train_time:186086ms step_avg:44.51ms +[2025-09-11 08:24:56] [Rank 0] step:4201/10000 train_time:186766ms step_avg:44.46ms +[2025-09-11 08:24:56] [Rank 0] step:4201/10000 train_time:186766ms step_avg:44.46ms +[2025-09-11 08:24:57] [Rank 0] step:4221/10000 train_time:187444ms step_avg:44.41ms +[2025-09-11 08:24:57] [Rank 0] step:4221/10000 train_time:187444ms step_avg:44.41ms +[2025-09-11 08:24:57] [Rank 0] step:4241/10000 train_time:188123ms step_avg:44.36ms +[2025-09-11 08:24:57] [Rank 0] step:4241/10000 train_time:188123ms step_avg:44.36ms +[2025-09-11 08:24:58] [Rank 0] step:4261/10000 train_time:188802ms step_avg:44.31ms +[2025-09-11 08:24:58] [Rank 0] step:4261/10000 train_time:188802ms step_avg:44.31ms +[2025-09-11 08:24:59] [Rank 0] step:4281/10000 train_time:189484ms step_avg:44.26ms +[2025-09-11 08:24:59] [Rank 0] step:4281/10000 train_time:189484ms step_avg:44.26ms +[2025-09-11 08:25:00] [Rank 0] step:4301/10000 train_time:190730ms step_avg:44.35ms +[2025-09-11 08:25:00] [Rank 0] step:4301/10000 train_time:190730ms step_avg:44.35ms +[2025-09-11 08:25:01] [Rank 0] step:4321/10000 train_time:191409ms step_avg:44.30ms +[2025-09-11 08:25:01] [Rank 0] step:4321/10000 train_time:191409ms step_avg:44.30ms +[2025-09-11 08:25:01] [Rank 0] step:4341/10000 train_time:192089ms step_avg:44.25ms +[2025-09-11 08:25:01] [Rank 0] step:4341/10000 train_time:192089ms step_avg:44.25ms +[2025-09-11 08:25:02] [Rank 0] step:4361/10000 train_time:192969ms step_avg:44.25ms +[2025-09-11 08:25:02] [Rank 0] step:4361/10000 train_time:192969ms step_avg:44.25ms +[2025-09-11 08:25:03] [Rank 0] step:4381/10000 train_time:193712ms step_avg:44.22ms +[2025-09-11 08:25:03] [Rank 0] step:4381/10000 train_time:193712ms step_avg:44.22ms +[2025-09-11 08:25:04] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:25:04] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:25:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:25:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:25:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:25:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:25:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:25:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:25:14] [Rank 0] PRINT: step:4400/10000 val_loss:4.4361 total_sharp:1.6078e-04 L1_sharp:8.9683e-03 L2_sharp:8.0859e-04 L3_sharp:1.5236e-03 L4_sharp:5.4570e-04 L5_sharp:1.2631e-03 L6_sharp:1.2929e-03 L7_sharp:2.0271e-03 L8_sharp:2.6548e-03 L9_sharp:3.4409e-03 L10_sharp:3.4454e-03 L11_sharp:5.9975e-03 L12_sharp:5.4222e-02 total_fnorm:7.4500e+01 total_l1_linf:1.5258e+05 total_spectral:3.7500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2266e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0859e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.3008e-01 L1_spectral:1.5862e-02 L2_spectral:1.5319e-02 L3_spectral:1.5482e-02 L4_spectral:1.5548e-02 L5_spectral:1.5531e-02 L6_spectral:1.5670e-02 L7_spectral:1.5599e-02 L8_spectral:1.5649e-02 L9_spectral:1.5592e-02 L10_spectral:1.5801e-02 L11_spectral:1.5736e-02 L12_spectral:1.5929e-02 train_time:194372ms step_avg:44.18ms +[2025-09-11 08:25:14] [Rank 0] PRINT: step:4400/10000 val_loss:4.4361 total_sharp:1.6078e-04 L1_sharp:8.9683e-03 L2_sharp:8.0859e-04 L3_sharp:1.5236e-03 L4_sharp:5.4570e-04 L5_sharp:1.2631e-03 L6_sharp:1.2929e-03 L7_sharp:2.0271e-03 L8_sharp:2.6548e-03 L9_sharp:3.4409e-03 L10_sharp:3.4454e-03 L11_sharp:5.9975e-03 L12_sharp:5.4222e-02 total_fnorm:7.4500e+01 total_l1_linf:1.5258e+05 total_spectral:3.7500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2266e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.1641e-01 L3_l1linf:3.1445e-01 L4_l1linf:3.1055e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0859e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.3008e-01 L1_spectral:1.5862e-02 L2_spectral:1.5319e-02 L3_spectral:1.5482e-02 L4_spectral:1.5548e-02 L5_spectral:1.5531e-02 L6_spectral:1.5670e-02 L7_spectral:1.5599e-02 L8_spectral:1.5649e-02 L9_spectral:1.5592e-02 L10_spectral:1.5801e-02 L11_spectral:1.5736e-02 L12_spectral:1.5929e-02 train_time:194372ms step_avg:44.18ms +[2025-09-11 08:25:15] [Rank 0] step:4401/10000 train_time:195519ms step_avg:44.43ms +[2025-09-11 08:25:15] [Rank 0] step:4401/10000 train_time:195519ms step_avg:44.43ms +[2025-09-11 08:25:16] [Rank 0] step:4421/10000 train_time:196203ms step_avg:44.38ms +[2025-09-11 08:25:16] [Rank 0] step:4421/10000 train_time:196203ms step_avg:44.38ms +[2025-09-11 08:25:16] [Rank 0] step:4441/10000 train_time:196883ms step_avg:44.33ms +[2025-09-11 08:25:16] [Rank 0] step:4441/10000 train_time:196883ms step_avg:44.33ms +[2025-09-11 08:25:17] [Rank 0] step:4461/10000 train_time:197566ms step_avg:44.29ms +[2025-09-11 08:25:17] [Rank 0] step:4461/10000 train_time:197566ms step_avg:44.29ms +[2025-09-11 08:25:18] [Rank 0] step:4481/10000 train_time:198248ms step_avg:44.24ms +[2025-09-11 08:25:18] [Rank 0] step:4481/10000 train_time:198248ms step_avg:44.24ms +[2025-09-11 08:25:18] [Rank 0] step:4501/10000 train_time:198930ms step_avg:44.20ms +[2025-09-11 08:25:18] [Rank 0] step:4501/10000 train_time:198930ms step_avg:44.20ms +[2025-09-11 08:25:19] [Rank 0] step:4521/10000 train_time:199613ms step_avg:44.15ms +[2025-09-11 08:25:19] [Rank 0] step:4521/10000 train_time:199613ms step_avg:44.15ms +[2025-09-11 08:25:20] [Rank 0] step:4541/10000 train_time:200295ms step_avg:44.11ms +[2025-09-11 08:25:20] [Rank 0] step:4541/10000 train_time:200295ms step_avg:44.11ms +[2025-09-11 08:25:20] [Rank 0] step:4561/10000 train_time:200977ms step_avg:44.06ms +[2025-09-11 08:25:20] [Rank 0] step:4561/10000 train_time:200977ms step_avg:44.06ms +[2025-09-11 08:25:21] [Rank 0] step:4581/10000 train_time:201660ms step_avg:44.02ms +[2025-09-11 08:25:21] [Rank 0] step:4581/10000 train_time:201660ms step_avg:44.02ms +[2025-09-11 08:25:22] [Rank 0] step:4601/10000 train_time:202342ms step_avg:43.98ms +[2025-09-11 08:25:22] [Rank 0] step:4601/10000 train_time:202342ms step_avg:43.98ms +[2025-09-11 08:25:22] [Rank 0] step:4621/10000 train_time:203024ms step_avg:43.94ms +[2025-09-11 08:25:22] [Rank 0] step:4621/10000 train_time:203024ms step_avg:43.94ms +[2025-09-11 08:25:23] [Rank 0] step:4641/10000 train_time:203706ms step_avg:43.89ms +[2025-09-11 08:25:23] [Rank 0] step:4641/10000 train_time:203706ms step_avg:43.89ms +[2025-09-11 08:25:24] [Rank 0] step:4661/10000 train_time:204390ms step_avg:43.85ms +[2025-09-11 08:25:24] [Rank 0] step:4661/10000 train_time:204390ms step_avg:43.85ms +[2025-09-11 08:25:24] [Rank 0] step:4681/10000 train_time:205070ms step_avg:43.81ms +[2025-09-11 08:25:24] [Rank 0] step:4681/10000 train_time:205070ms step_avg:43.81ms +[2025-09-11 08:25:25] [Rank 0] step:4701/10000 train_time:205753ms step_avg:43.77ms +[2025-09-11 08:25:25] [Rank 0] step:4701/10000 train_time:205753ms step_avg:43.77ms +[2025-09-11 08:25:26] [Rank 0] step:4721/10000 train_time:206435ms step_avg:43.73ms +[2025-09-11 08:25:26] [Rank 0] step:4721/10000 train_time:206435ms step_avg:43.73ms +[2025-09-11 08:25:26] [Rank 0] step:4741/10000 train_time:207118ms step_avg:43.69ms +[2025-09-11 08:25:26] [Rank 0] step:4741/10000 train_time:207118ms step_avg:43.69ms +[2025-09-11 08:25:27] [Rank 0] step:4761/10000 train_time:207800ms step_avg:43.65ms +[2025-09-11 08:25:27] [Rank 0] step:4761/10000 train_time:207800ms step_avg:43.65ms +[2025-09-11 08:25:28] [Rank 0] step:4781/10000 train_time:208482ms step_avg:43.61ms +[2025-09-11 08:25:28] [Rank 0] step:4781/10000 train_time:208482ms step_avg:43.61ms +[2025-09-11 08:25:28] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:25:28] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:25:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:25:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:25:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:25:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:25:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:25:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:25:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:25:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:25:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:25:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:25:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:25:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:25:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:25:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:25:42] [Rank 0] PRINT: step:4800/10000 val_loss:4.3923 total_sharp:1.2950e-04 L1_sharp:9.0578e-03 L2_sharp:1.1652e-04 L3_sharp:4.0904e-04 L4_sharp:7.2425e-04 L5_sharp:1.5282e-03 L6_sharp:9.3378e-04 L7_sharp:1.7522e-03 L8_sharp:3.2682e-03 L9_sharp:2.7069e-03 L10_sharp:3.2026e-03 L11_sharp:4.6305e-03 L12_sharp:2.7670e-02 total_fnorm:7.7500e+01 total_l1_linf:1.6282e+05 total_spectral:3.9000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.2031e-01 L6_l1linf:3.1641e-01 L7_l1linf:3.2031e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1055e-01 L10_l1linf:3.0664e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.0859e-01 L1_spectral:1.5967e-02 L2_spectral:1.5466e-02 L3_spectral:1.5626e-02 L4_spectral:1.5688e-02 L5_spectral:1.5728e-02 L6_spectral:1.5768e-02 L7_spectral:1.5809e-02 L8_spectral:1.5763e-02 L9_spectral:1.5695e-02 L10_spectral:1.5801e-02 L11_spectral:1.5789e-02 L12_spectral:1.5978e-02 train_time:209144ms step_avg:43.57ms +[2025-09-11 08:25:42] [Rank 0] PRINT: step:4800/10000 val_loss:4.3923 total_sharp:1.2950e-04 L1_sharp:9.0578e-03 L2_sharp:1.1652e-04 L3_sharp:4.0904e-04 L4_sharp:7.2425e-04 L5_sharp:1.5282e-03 L6_sharp:9.3378e-04 L7_sharp:1.7522e-03 L8_sharp:3.2682e-03 L9_sharp:2.7069e-03 L10_sharp:3.2026e-03 L11_sharp:4.6305e-03 L12_sharp:2.7670e-02 total_fnorm:7.7500e+01 total_l1_linf:1.6282e+05 total_spectral:3.9000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.3594e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.2031e-01 L6_l1linf:3.1641e-01 L7_l1linf:3.2031e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.1055e-01 L10_l1linf:3.0664e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.0859e-01 L1_spectral:1.5967e-02 L2_spectral:1.5466e-02 L3_spectral:1.5626e-02 L4_spectral:1.5688e-02 L5_spectral:1.5728e-02 L6_spectral:1.5768e-02 L7_spectral:1.5809e-02 L8_spectral:1.5763e-02 L9_spectral:1.5695e-02 L10_spectral:1.5801e-02 L11_spectral:1.5789e-02 L12_spectral:1.5978e-02 train_time:209144ms step_avg:43.57ms +[2025-09-11 08:25:43] [Rank 0] step:4801/10000 train_time:210308ms step_avg:43.81ms +[2025-09-11 08:25:43] [Rank 0] step:4801/10000 train_time:210308ms step_avg:43.81ms +[2025-09-11 08:25:43] [Rank 0] step:4821/10000 train_time:211023ms step_avg:43.77ms +[2025-09-11 08:25:43] [Rank 0] step:4821/10000 train_time:211023ms step_avg:43.77ms +[2025-09-11 08:25:44] [Rank 0] step:4841/10000 train_time:211706ms step_avg:43.73ms +[2025-09-11 08:25:44] [Rank 0] step:4841/10000 train_time:211706ms step_avg:43.73ms +[2025-09-11 08:25:45] [Rank 0] step:4861/10000 train_time:212388ms step_avg:43.69ms +[2025-09-11 08:25:45] [Rank 0] step:4861/10000 train_time:212388ms step_avg:43.69ms +[2025-09-11 08:25:46] [Rank 0] step:4881/10000 train_time:213070ms step_avg:43.65ms +[2025-09-11 08:25:46] [Rank 0] step:4881/10000 train_time:213070ms step_avg:43.65ms +[2025-09-11 08:25:46] [Rank 0] step:4901/10000 train_time:213753ms step_avg:43.61ms +[2025-09-11 08:25:46] [Rank 0] step:4901/10000 train_time:213753ms step_avg:43.61ms +[2025-09-11 08:25:47] [Rank 0] step:4921/10000 train_time:214435ms step_avg:43.58ms +[2025-09-11 08:25:47] [Rank 0] step:4921/10000 train_time:214435ms step_avg:43.58ms +[2025-09-11 08:25:48] [Rank 0] step:4941/10000 train_time:215118ms step_avg:43.54ms +[2025-09-11 08:25:48] [Rank 0] step:4941/10000 train_time:215118ms step_avg:43.54ms +[2025-09-11 08:25:48] [Rank 0] step:4961/10000 train_time:215800ms step_avg:43.50ms +[2025-09-11 08:25:48] [Rank 0] step:4961/10000 train_time:215800ms step_avg:43.50ms +[2025-09-11 08:25:49] [Rank 0] step:4981/10000 train_time:216481ms step_avg:43.46ms +[2025-09-11 08:25:49] [Rank 0] step:4981/10000 train_time:216481ms step_avg:43.46ms +[2025-09-11 08:25:50] [Rank 0] step:5001/10000 train_time:217164ms step_avg:43.42ms +[2025-09-11 08:25:50] [Rank 0] step:5001/10000 train_time:217164ms step_avg:43.42ms +[2025-09-11 08:25:50] [Rank 0] step:5021/10000 train_time:217846ms step_avg:43.39ms +[2025-09-11 08:25:50] [Rank 0] step:5021/10000 train_time:217846ms step_avg:43.39ms +[2025-09-11 08:25:51] [Rank 0] step:5041/10000 train_time:218528ms step_avg:43.35ms +[2025-09-11 08:25:51] [Rank 0] step:5041/10000 train_time:218528ms step_avg:43.35ms +[2025-09-11 08:25:52] [Rank 0] step:5061/10000 train_time:219209ms step_avg:43.31ms +[2025-09-11 08:25:52] [Rank 0] step:5061/10000 train_time:219209ms step_avg:43.31ms +[2025-09-11 08:25:52] [Rank 0] step:5081/10000 train_time:219891ms step_avg:43.28ms +[2025-09-11 08:25:52] [Rank 0] step:5081/10000 train_time:219891ms step_avg:43.28ms +[2025-09-11 08:25:53] [Rank 0] step:5101/10000 train_time:220572ms step_avg:43.24ms +[2025-09-11 08:25:53] [Rank 0] step:5101/10000 train_time:220572ms step_avg:43.24ms +[2025-09-11 08:25:54] [Rank 0] step:5121/10000 train_time:221253ms step_avg:43.21ms +[2025-09-11 08:25:54] [Rank 0] step:5121/10000 train_time:221253ms step_avg:43.21ms +[2025-09-11 08:25:54] [Rank 0] step:5141/10000 train_time:221935ms step_avg:43.17ms +[2025-09-11 08:25:54] [Rank 0] step:5141/10000 train_time:221935ms step_avg:43.17ms +[2025-09-11 08:25:55] [Rank 0] step:5161/10000 train_time:222616ms step_avg:43.13ms +[2025-09-11 08:25:55] [Rank 0] step:5161/10000 train_time:222616ms step_avg:43.13ms +[2025-09-11 08:25:56] [Rank 0] step:5181/10000 train_time:223298ms step_avg:43.10ms +[2025-09-11 08:25:56] [Rank 0] step:5181/10000 train_time:223298ms step_avg:43.10ms +[2025-09-11 08:25:56] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:25:56] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:25:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:25:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:25:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:26:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:26:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:26:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.3558 total_sharp:2.2955e-04 L1_sharp:1.2104e-02 L2_sharp:1.7523e-03 L3_sharp:1.3628e-03 L4_sharp:1.2743e-03 L5_sharp:1.8320e-03 L6_sharp:1.2121e-03 L7_sharp:2.0011e-03 L8_sharp:3.0048e-03 L9_sharp:3.0155e-03 L10_sharp:3.9179e-03 L11_sharp:6.5257e-03 L12_sharp:1.0582e-01 total_fnorm:7.1000e+01 total_l1_linf:1.4131e+05 total_spectral:3.5500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.1641e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.0078e-01 L11_l1linf:3.0078e-01 L12_l1linf:3.1250e-01 L1_spectral:1.6042e-02 L2_spectral:1.5580e-02 L3_spectral:1.5634e-02 L4_spectral:1.5768e-02 L5_spectral:1.5755e-02 L6_spectral:1.5795e-02 L7_spectral:1.5698e-02 L8_spectral:1.5949e-02 L9_spectral:1.5826e-02 L10_spectral:1.5924e-02 L11_spectral:1.6028e-02 L12_spectral:1.5951e-02 train_time:223967ms step_avg:43.07ms +[2025-09-11 08:26:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.3558 total_sharp:2.2955e-04 L1_sharp:1.2104e-02 L2_sharp:1.7523e-03 L3_sharp:1.3628e-03 L4_sharp:1.2743e-03 L5_sharp:1.8320e-03 L6_sharp:1.2121e-03 L7_sharp:2.0011e-03 L8_sharp:3.0048e-03 L9_sharp:3.0155e-03 L10_sharp:3.9179e-03 L11_sharp:6.5257e-03 L12_sharp:1.0582e-01 total_fnorm:7.1000e+01 total_l1_linf:1.4131e+05 total_spectral:3.5500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2266e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.1641e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.0078e-01 L11_l1linf:3.0078e-01 L12_l1linf:3.1250e-01 L1_spectral:1.6042e-02 L2_spectral:1.5580e-02 L3_spectral:1.5634e-02 L4_spectral:1.5768e-02 L5_spectral:1.5755e-02 L6_spectral:1.5795e-02 L7_spectral:1.5698e-02 L8_spectral:1.5949e-02 L9_spectral:1.5826e-02 L10_spectral:1.5924e-02 L11_spectral:1.6028e-02 L12_spectral:1.5951e-02 train_time:223967ms step_avg:43.07ms +[2025-09-11 08:26:08] [Rank 0] step:5201/10000 train_time:225136ms step_avg:43.29ms +[2025-09-11 08:26:08] [Rank 0] step:5201/10000 train_time:225136ms step_avg:43.29ms +[2025-09-11 08:26:08] [Rank 0] step:5221/10000 train_time:225832ms step_avg:43.25ms +[2025-09-11 08:26:08] [Rank 0] step:5221/10000 train_time:225832ms step_avg:43.25ms +[2025-09-11 08:26:09] [Rank 0] step:5241/10000 train_time:226523ms step_avg:43.22ms +[2025-09-11 08:26:09] [Rank 0] step:5241/10000 train_time:226523ms step_avg:43.22ms +[2025-09-11 08:26:10] [Rank 0] step:5261/10000 train_time:227215ms step_avg:43.19ms +[2025-09-11 08:26:10] [Rank 0] step:5261/10000 train_time:227215ms step_avg:43.19ms +[2025-09-11 08:26:10] [Rank 0] step:5281/10000 train_time:227906ms step_avg:43.16ms +[2025-09-11 08:26:10] [Rank 0] step:5281/10000 train_time:227906ms step_avg:43.16ms +[2025-09-11 08:26:11] [Rank 0] step:5301/10000 train_time:228598ms step_avg:43.12ms +[2025-09-11 08:26:11] [Rank 0] step:5301/10000 train_time:228598ms step_avg:43.12ms +[2025-09-11 08:26:12] [Rank 0] step:5321/10000 train_time:229288ms step_avg:43.09ms +[2025-09-11 08:26:12] [Rank 0] step:5321/10000 train_time:229288ms step_avg:43.09ms +[2025-09-11 08:26:12] [Rank 0] step:5341/10000 train_time:229979ms step_avg:43.06ms +[2025-09-11 08:26:12] [Rank 0] step:5341/10000 train_time:229979ms step_avg:43.06ms +[2025-09-11 08:26:13] [Rank 0] step:5361/10000 train_time:230670ms step_avg:43.03ms +[2025-09-11 08:26:13] [Rank 0] step:5361/10000 train_time:230670ms step_avg:43.03ms +[2025-09-11 08:26:14] [Rank 0] step:5381/10000 train_time:231361ms step_avg:43.00ms +[2025-09-11 08:26:14] [Rank 0] step:5381/10000 train_time:231361ms step_avg:43.00ms +[2025-09-11 08:26:14] [Rank 0] step:5401/10000 train_time:232051ms step_avg:42.96ms +[2025-09-11 08:26:14] [Rank 0] step:5401/10000 train_time:232051ms step_avg:42.96ms +[2025-09-11 08:26:15] [Rank 0] step:5421/10000 train_time:232744ms step_avg:42.93ms +[2025-09-11 08:26:15] [Rank 0] step:5421/10000 train_time:232744ms step_avg:42.93ms +[2025-09-11 08:26:16] [Rank 0] step:5441/10000 train_time:233434ms step_avg:42.90ms +[2025-09-11 08:26:16] [Rank 0] step:5441/10000 train_time:233434ms step_avg:42.90ms +[2025-09-11 08:26:17] [Rank 0] step:5461/10000 train_time:234126ms step_avg:42.87ms +[2025-09-11 08:26:17] [Rank 0] step:5461/10000 train_time:234126ms step_avg:42.87ms +[2025-09-11 08:26:17] [Rank 0] step:5481/10000 train_time:234817ms step_avg:42.84ms +[2025-09-11 08:26:17] [Rank 0] step:5481/10000 train_time:234817ms step_avg:42.84ms +[2025-09-11 08:26:18] [Rank 0] step:5501/10000 train_time:235508ms step_avg:42.81ms +[2025-09-11 08:26:18] [Rank 0] step:5501/10000 train_time:235508ms step_avg:42.81ms +[2025-09-11 08:26:19] [Rank 0] step:5521/10000 train_time:236198ms step_avg:42.78ms +[2025-09-11 08:26:19] [Rank 0] step:5521/10000 train_time:236198ms step_avg:42.78ms +[2025-09-11 08:26:19] [Rank 0] step:5541/10000 train_time:236891ms step_avg:42.75ms +[2025-09-11 08:26:19] [Rank 0] step:5541/10000 train_time:236891ms step_avg:42.75ms +[2025-09-11 08:26:20] [Rank 0] step:5561/10000 train_time:237583ms step_avg:42.72ms +[2025-09-11 08:26:20] [Rank 0] step:5561/10000 train_time:237583ms step_avg:42.72ms +[2025-09-11 08:26:21] [Rank 0] step:5581/10000 train_time:238275ms step_avg:42.69ms +[2025-09-11 08:26:21] [Rank 0] step:5581/10000 train_time:238275ms step_avg:42.69ms +[2025-09-11 08:26:21] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:26:21] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:26:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:26:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:26:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:26:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:26:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:31] [Rank 0] PRINT: step:5600/10000 val_loss:4.3429 total_sharp:1.3185e-04 L1_sharp:7.5988e-03 L2_sharp:-1.5157e-04 L3_sharp:3.6718e-04 L4_sharp:7.4836e-04 L5_sharp:8.1456e-04 L6_sharp:1.6261e-03 L7_sharp:1.2594e-03 L8_sharp:2.3505e-03 L9_sharp:2.3356e-03 L10_sharp:2.8326e-03 L11_sharp:4.8802e-03 L12_sharp:2.0312e-02 total_fnorm:7.2000e+01 total_l1_linf:1.4541e+05 total_spectral:3.6000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1055e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.0469e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.2031e-01 L8_l1linf:3.0859e-01 L9_l1linf:3.1055e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.9883e-01 L1_spectral:1.6115e-02 L2_spectral:1.5555e-02 L3_spectral:1.5812e-02 L4_spectral:1.5841e-02 L5_spectral:1.5750e-02 L6_spectral:1.5897e-02 L7_spectral:1.5810e-02 L8_spectral:1.5968e-02 L9_spectral:1.5817e-02 L10_spectral:1.5908e-02 L11_spectral:1.5901e-02 L12_spectral:1.5980e-02 train_time:238947ms step_avg:42.67ms +[2025-09-11 08:26:31] [Rank 0] PRINT: step:5600/10000 val_loss:4.3429 total_sharp:1.3185e-04 L1_sharp:7.5988e-03 L2_sharp:-1.5157e-04 L3_sharp:3.6718e-04 L4_sharp:7.4836e-04 L5_sharp:8.1456e-04 L6_sharp:1.6261e-03 L7_sharp:1.2594e-03 L8_sharp:2.3505e-03 L9_sharp:2.3356e-03 L10_sharp:2.8326e-03 L11_sharp:4.8802e-03 L12_sharp:2.0312e-02 total_fnorm:7.2000e+01 total_l1_linf:1.4541e+05 total_spectral:3.6000e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2578e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.2422e-01 L2_l1linf:3.1055e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.0469e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.2031e-01 L8_l1linf:3.0859e-01 L9_l1linf:3.1055e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.9883e-01 L12_l1linf:2.9883e-01 L1_spectral:1.6115e-02 L2_spectral:1.5555e-02 L3_spectral:1.5812e-02 L4_spectral:1.5841e-02 L5_spectral:1.5750e-02 L6_spectral:1.5897e-02 L7_spectral:1.5810e-02 L8_spectral:1.5968e-02 L9_spectral:1.5817e-02 L10_spectral:1.5908e-02 L11_spectral:1.5901e-02 L12_spectral:1.5980e-02 train_time:238947ms step_avg:42.67ms +[2025-09-11 08:26:33] [Rank 0] step:5601/10000 train_time:240126ms step_avg:42.87ms +[2025-09-11 08:26:33] [Rank 0] step:5601/10000 train_time:240126ms step_avg:42.87ms +[2025-09-11 08:26:33] [Rank 0] step:5621/10000 train_time:240841ms step_avg:42.85ms +[2025-09-11 08:26:33] [Rank 0] step:5621/10000 train_time:240841ms step_avg:42.85ms +[2025-09-11 08:26:34] [Rank 0] step:5641/10000 train_time:241533ms step_avg:42.82ms +[2025-09-11 08:26:34] [Rank 0] step:5641/10000 train_time:241533ms step_avg:42.82ms +[2025-09-11 08:26:35] [Rank 0] step:5661/10000 train_time:242226ms step_avg:42.79ms +[2025-09-11 08:26:35] [Rank 0] step:5661/10000 train_time:242226ms step_avg:42.79ms +[2025-09-11 08:26:35] [Rank 0] step:5681/10000 train_time:242919ms step_avg:42.76ms +[2025-09-11 08:26:35] [Rank 0] step:5681/10000 train_time:242919ms step_avg:42.76ms +[2025-09-11 08:26:36] [Rank 0] step:5701/10000 train_time:243614ms step_avg:42.73ms +[2025-09-11 08:26:36] [Rank 0] step:5701/10000 train_time:243614ms step_avg:42.73ms +[2025-09-11 08:26:37] [Rank 0] step:5721/10000 train_time:244305ms step_avg:42.70ms +[2025-09-11 08:26:37] [Rank 0] step:5721/10000 train_time:244305ms step_avg:42.70ms +[2025-09-11 08:26:37] [Rank 0] step:5741/10000 train_time:244999ms step_avg:42.68ms +[2025-09-11 08:26:37] [Rank 0] step:5741/10000 train_time:244999ms step_avg:42.68ms +[2025-09-11 08:26:38] [Rank 0] step:5761/10000 train_time:245692ms step_avg:42.65ms +[2025-09-11 08:26:38] [Rank 0] step:5761/10000 train_time:245692ms step_avg:42.65ms +[2025-09-11 08:26:39] [Rank 0] step:5781/10000 train_time:246385ms step_avg:42.62ms +[2025-09-11 08:26:39] [Rank 0] step:5781/10000 train_time:246385ms step_avg:42.62ms +[2025-09-11 08:26:40] [Rank 0] step:5801/10000 train_time:247080ms step_avg:42.59ms +[2025-09-11 08:26:40] [Rank 0] step:5801/10000 train_time:247080ms step_avg:42.59ms +[2025-09-11 08:26:40] [Rank 0] step:5821/10000 train_time:247771ms step_avg:42.57ms +[2025-09-11 08:26:40] [Rank 0] step:5821/10000 train_time:247771ms step_avg:42.57ms +[2025-09-11 08:26:41] [Rank 0] step:5841/10000 train_time:248464ms step_avg:42.54ms +[2025-09-11 08:26:41] [Rank 0] step:5841/10000 train_time:248464ms step_avg:42.54ms +[2025-09-11 08:26:42] [Rank 0] step:5861/10000 train_time:249156ms step_avg:42.51ms +[2025-09-11 08:26:42] [Rank 0] step:5861/10000 train_time:249156ms step_avg:42.51ms +[2025-09-11 08:26:42] [Rank 0] step:5881/10000 train_time:249848ms step_avg:42.48ms +[2025-09-11 08:26:42] [Rank 0] step:5881/10000 train_time:249848ms step_avg:42.48ms +[2025-09-11 08:26:43] [Rank 0] step:5901/10000 train_time:250541ms step_avg:42.46ms +[2025-09-11 08:26:43] [Rank 0] step:5901/10000 train_time:250541ms step_avg:42.46ms +[2025-09-11 08:26:44] [Rank 0] step:5921/10000 train_time:251235ms step_avg:42.43ms +[2025-09-11 08:26:44] [Rank 0] step:5921/10000 train_time:251235ms step_avg:42.43ms +[2025-09-11 08:26:44] [Rank 0] step:5941/10000 train_time:251930ms step_avg:42.41ms +[2025-09-11 08:26:44] [Rank 0] step:5941/10000 train_time:251930ms step_avg:42.41ms +[2025-09-11 08:26:45] [Rank 0] step:5961/10000 train_time:252623ms step_avg:42.38ms +[2025-09-11 08:26:45] [Rank 0] step:5961/10000 train_time:252623ms step_avg:42.38ms +[2025-09-11 08:26:46] [Rank 0] step:5981/10000 train_time:253317ms step_avg:42.35ms +[2025-09-11 08:26:46] [Rank 0] step:5981/10000 train_time:253317ms step_avg:42.35ms +[2025-09-11 08:26:46] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:26:46] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:26:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:26:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:26:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:26:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:26:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:26:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:26:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:26:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:26:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.2951 total_sharp:1.2214e-04 L1_sharp:5.9282e-03 L2_sharp:8.3865e-04 L3_sharp:5.0753e-04 L4_sharp:7.9201e-04 L5_sharp:9.2885e-04 L6_sharp:1.2247e-03 L7_sharp:1.3111e-03 L8_sharp:2.4449e-03 L9_sharp:2.2490e-03 L10_sharp:2.6153e-03 L11_sharp:4.0961e-03 L12_sharp:2.0906e-02 total_fnorm:7.1500e+01 total_l1_linf:1.4131e+05 total_spectral:3.5750e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.2227e-01 L2_l1linf:3.0664e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.1250e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0469e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.8906e-01 L12_l1linf:2.9492e-01 L1_spectral:1.6096e-02 L2_spectral:1.5627e-02 L3_spectral:1.5819e-02 L4_spectral:1.5971e-02 L5_spectral:1.5822e-02 L6_spectral:1.5996e-02 L7_spectral:1.5859e-02 L8_spectral:1.5911e-02 L9_spectral:1.5923e-02 L10_spectral:1.6146e-02 L11_spectral:1.5964e-02 L12_spectral:1.5961e-02 train_time:253992ms step_avg:42.33ms +[2025-09-11 08:26:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.2951 total_sharp:1.2214e-04 L1_sharp:5.9282e-03 L2_sharp:8.3865e-04 L3_sharp:5.0753e-04 L4_sharp:7.9201e-04 L5_sharp:9.2885e-04 L6_sharp:1.2247e-03 L7_sharp:1.3111e-03 L8_sharp:2.4449e-03 L9_sharp:2.2490e-03 L10_sharp:2.6153e-03 L11_sharp:4.0961e-03 L12_sharp:2.0906e-02 total_fnorm:7.1500e+01 total_l1_linf:1.4131e+05 total_spectral:3.5750e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2188e+00 L3_fnorm:1.2344e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2500e+00 L1_l1linf:3.2227e-01 L2_l1linf:3.0664e-01 L3_l1linf:3.0273e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.1250e-01 L8_l1linf:3.0664e-01 L9_l1linf:3.0469e-01 L10_l1linf:2.9883e-01 L11_l1linf:2.8906e-01 L12_l1linf:2.9492e-01 L1_spectral:1.6096e-02 L2_spectral:1.5627e-02 L3_spectral:1.5819e-02 L4_spectral:1.5971e-02 L5_spectral:1.5822e-02 L6_spectral:1.5996e-02 L7_spectral:1.5859e-02 L8_spectral:1.5911e-02 L9_spectral:1.5923e-02 L10_spectral:1.6146e-02 L11_spectral:1.5964e-02 L12_spectral:1.5961e-02 train_time:253992ms step_avg:42.33ms +[2025-09-11 08:26:58] [Rank 0] step:6001/10000 train_time:255176ms step_avg:42.52ms +[2025-09-11 08:26:58] [Rank 0] step:6001/10000 train_time:255176ms step_avg:42.52ms +[2025-09-11 08:26:58] [Rank 0] step:6021/10000 train_time:255886ms step_avg:42.50ms +[2025-09-11 08:26:58] [Rank 0] step:6021/10000 train_time:255886ms step_avg:42.50ms +[2025-09-11 08:26:59] [Rank 0] step:6041/10000 train_time:256582ms step_avg:42.47ms +[2025-09-11 08:26:59] [Rank 0] step:6041/10000 train_time:256582ms step_avg:42.47ms +[2025-09-11 08:27:00] [Rank 0] step:6061/10000 train_time:257276ms step_avg:42.45ms +[2025-09-11 08:27:00] [Rank 0] step:6061/10000 train_time:257276ms step_avg:42.45ms +[2025-09-11 08:27:01] [Rank 0] step:6081/10000 train_time:257972ms step_avg:42.42ms +[2025-09-11 08:27:01] [Rank 0] step:6081/10000 train_time:257972ms step_avg:42.42ms +[2025-09-11 08:27:01] [Rank 0] step:6101/10000 train_time:258664ms step_avg:42.40ms +[2025-09-11 08:27:01] [Rank 0] step:6101/10000 train_time:258664ms step_avg:42.40ms +[2025-09-11 08:27:02] [Rank 0] step:6121/10000 train_time:259359ms step_avg:42.37ms +[2025-09-11 08:27:02] [Rank 0] step:6121/10000 train_time:259359ms step_avg:42.37ms +[2025-09-11 08:27:03] [Rank 0] step:6141/10000 train_time:260052ms step_avg:42.35ms +[2025-09-11 08:27:03] [Rank 0] step:6141/10000 train_time:260052ms step_avg:42.35ms +[2025-09-11 08:27:03] [Rank 0] step:6161/10000 train_time:260745ms step_avg:42.32ms +[2025-09-11 08:27:03] [Rank 0] step:6161/10000 train_time:260745ms step_avg:42.32ms +[2025-09-11 08:27:04] [Rank 0] step:6181/10000 train_time:261437ms step_avg:42.30ms +[2025-09-11 08:27:04] [Rank 0] step:6181/10000 train_time:261437ms step_avg:42.30ms +[2025-09-11 08:27:05] [Rank 0] step:6201/10000 train_time:262131ms step_avg:42.27ms +[2025-09-11 08:27:05] [Rank 0] step:6201/10000 train_time:262131ms step_avg:42.27ms +[2025-09-11 08:27:05] [Rank 0] step:6221/10000 train_time:262825ms step_avg:42.25ms +[2025-09-11 08:27:05] [Rank 0] step:6221/10000 train_time:262825ms step_avg:42.25ms +[2025-09-11 08:27:07] [Rank 0] step:6241/10000 train_time:264067ms step_avg:42.31ms +[2025-09-11 08:27:07] [Rank 0] step:6241/10000 train_time:264067ms step_avg:42.31ms +[2025-09-11 08:27:07] [Rank 0] step:6261/10000 train_time:264761ms step_avg:42.29ms +[2025-09-11 08:27:07] [Rank 0] step:6261/10000 train_time:264761ms step_avg:42.29ms +[2025-09-11 08:27:08] [Rank 0] step:6281/10000 train_time:265617ms step_avg:42.29ms +[2025-09-11 08:27:08] [Rank 0] step:6281/10000 train_time:265617ms step_avg:42.29ms +[2025-09-11 08:27:09] [Rank 0] step:6301/10000 train_time:266417ms step_avg:42.28ms +[2025-09-11 08:27:09] [Rank 0] step:6301/10000 train_time:266417ms step_avg:42.28ms +[2025-09-11 08:27:10] [Rank 0] step:6321/10000 train_time:267112ms step_avg:42.26ms +[2025-09-11 08:27:10] [Rank 0] step:6321/10000 train_time:267112ms step_avg:42.26ms +[2025-09-11 08:27:10] [Rank 0] step:6341/10000 train_time:267806ms step_avg:42.23ms +[2025-09-11 08:27:10] [Rank 0] step:6341/10000 train_time:267806ms step_avg:42.23ms +[2025-09-11 08:27:11] [Rank 0] step:6361/10000 train_time:268504ms step_avg:42.21ms +[2025-09-11 08:27:11] [Rank 0] step:6361/10000 train_time:268504ms step_avg:42.21ms +[2025-09-11 08:27:12] [Rank 0] step:6381/10000 train_time:269199ms step_avg:42.19ms +[2025-09-11 08:27:12] [Rank 0] step:6381/10000 train_time:269199ms step_avg:42.19ms +[2025-09-11 08:27:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:27:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:27:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:27:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:27:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:27:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:27:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:27:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:27:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:27:22] [Rank 0] PRINT: step:6400/10000 val_loss:4.2681 total_sharp:1.2351e-04 L1_sharp:5.1512e-03 L2_sharp:4.1880e-04 L3_sharp:3.7571e-04 L4_sharp:1.0083e-03 L5_sharp:1.0068e-03 L6_sharp:1.0486e-03 L7_sharp:1.3612e-03 L8_sharp:2.3285e-03 L9_sharp:2.2254e-03 L10_sharp:2.6935e-03 L11_sharp:4.5206e-03 L12_sharp:2.5850e-02 total_fnorm:6.3000e+01 total_l1_linf:1.2186e+05 total_spectral:3.1625e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1172e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1250e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.6758e-01 L3_l1linf:2.6758e-01 L4_l1linf:2.6562e-01 L5_l1linf:2.7344e-01 L6_l1linf:2.7148e-01 L7_l1linf:2.7344e-01 L8_l1linf:2.6562e-01 L9_l1linf:2.5977e-01 L10_l1linf:2.5781e-01 L11_l1linf:2.5391e-01 L12_l1linf:2.5781e-01 L1_spectral:1.4814e-02 L2_spectral:1.4228e-02 L3_spectral:1.4362e-02 L4_spectral:1.4478e-02 L5_spectral:1.4509e-02 L6_spectral:1.4680e-02 L7_spectral:1.4684e-02 L8_spectral:1.4622e-02 L9_spectral:1.4751e-02 L10_spectral:1.4695e-02 L11_spectral:1.4644e-02 L12_spectral:1.4553e-02 train_time:269872ms step_avg:42.17ms +[2025-09-11 08:27:22] [Rank 0] PRINT: step:6400/10000 val_loss:4.2681 total_sharp:1.2351e-04 L1_sharp:5.1512e-03 L2_sharp:4.1880e-04 L3_sharp:3.7571e-04 L4_sharp:1.0083e-03 L5_sharp:1.0068e-03 L6_sharp:1.0486e-03 L7_sharp:1.3612e-03 L8_sharp:2.3285e-03 L9_sharp:2.2254e-03 L10_sharp:2.6935e-03 L11_sharp:4.5206e-03 L12_sharp:2.5850e-02 total_fnorm:6.3000e+01 total_l1_linf:1.2186e+05 total_spectral:3.1625e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.1016e+00 L3_fnorm:1.1172e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1250e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.8125e-01 L2_l1linf:2.6758e-01 L3_l1linf:2.6758e-01 L4_l1linf:2.6562e-01 L5_l1linf:2.7344e-01 L6_l1linf:2.7148e-01 L7_l1linf:2.7344e-01 L8_l1linf:2.6562e-01 L9_l1linf:2.5977e-01 L10_l1linf:2.5781e-01 L11_l1linf:2.5391e-01 L12_l1linf:2.5781e-01 L1_spectral:1.4814e-02 L2_spectral:1.4228e-02 L3_spectral:1.4362e-02 L4_spectral:1.4478e-02 L5_spectral:1.4509e-02 L6_spectral:1.4680e-02 L7_spectral:1.4684e-02 L8_spectral:1.4622e-02 L9_spectral:1.4751e-02 L10_spectral:1.4695e-02 L11_spectral:1.4644e-02 L12_spectral:1.4553e-02 train_time:269872ms step_avg:42.17ms +[2025-09-11 08:27:23] [Rank 0] step:6401/10000 train_time:271028ms step_avg:42.34ms +[2025-09-11 08:27:23] [Rank 0] step:6401/10000 train_time:271028ms step_avg:42.34ms +[2025-09-11 08:27:24] [Rank 0] step:6421/10000 train_time:271749ms step_avg:42.32ms +[2025-09-11 08:27:24] [Rank 0] step:6421/10000 train_time:271749ms step_avg:42.32ms +[2025-09-11 08:27:25] [Rank 0] step:6441/10000 train_time:272444ms step_avg:42.30ms +[2025-09-11 08:27:25] [Rank 0] step:6441/10000 train_time:272444ms step_avg:42.30ms +[2025-09-11 08:27:26] [Rank 0] step:6461/10000 train_time:273139ms step_avg:42.28ms +[2025-09-11 08:27:26] [Rank 0] step:6461/10000 train_time:273139ms step_avg:42.28ms +[2025-09-11 08:27:26] [Rank 0] step:6481/10000 train_time:273834ms step_avg:42.25ms +[2025-09-11 08:27:26] [Rank 0] step:6481/10000 train_time:273834ms step_avg:42.25ms +[2025-09-11 08:27:27] [Rank 0] step:6501/10000 train_time:274532ms step_avg:42.23ms +[2025-09-11 08:27:27] [Rank 0] step:6501/10000 train_time:274532ms step_avg:42.23ms +[2025-09-11 08:27:28] [Rank 0] step:6521/10000 train_time:275227ms step_avg:42.21ms +[2025-09-11 08:27:28] [Rank 0] step:6521/10000 train_time:275227ms step_avg:42.21ms +[2025-09-11 08:27:28] [Rank 0] step:6541/10000 train_time:275920ms step_avg:42.18ms +[2025-09-11 08:27:28] [Rank 0] step:6541/10000 train_time:275920ms step_avg:42.18ms +[2025-09-11 08:27:29] [Rank 0] step:6561/10000 train_time:276615ms step_avg:42.16ms +[2025-09-11 08:27:29] [Rank 0] step:6561/10000 train_time:276615ms step_avg:42.16ms +[2025-09-11 08:27:30] [Rank 0] step:6581/10000 train_time:277309ms step_avg:42.14ms +[2025-09-11 08:27:30] [Rank 0] step:6581/10000 train_time:277309ms step_avg:42.14ms +[2025-09-11 08:27:30] [Rank 0] step:6601/10000 train_time:278003ms step_avg:42.12ms +[2025-09-11 08:27:30] [Rank 0] step:6601/10000 train_time:278003ms step_avg:42.12ms +[2025-09-11 08:27:31] [Rank 0] step:6621/10000 train_time:278696ms step_avg:42.09ms +[2025-09-11 08:27:31] [Rank 0] step:6621/10000 train_time:278696ms step_avg:42.09ms +[2025-09-11 08:27:32] [Rank 0] step:6641/10000 train_time:279390ms step_avg:42.07ms +[2025-09-11 08:27:32] [Rank 0] step:6641/10000 train_time:279390ms step_avg:42.07ms +[2025-09-11 08:27:32] [Rank 0] step:6661/10000 train_time:280086ms step_avg:42.05ms +[2025-09-11 08:27:32] [Rank 0] step:6661/10000 train_time:280086ms step_avg:42.05ms +[2025-09-11 08:27:33] [Rank 0] step:6681/10000 train_time:280787ms step_avg:42.03ms +[2025-09-11 08:27:33] [Rank 0] step:6681/10000 train_time:280787ms step_avg:42.03ms +[2025-09-11 08:27:34] [Rank 0] step:6701/10000 train_time:281486ms step_avg:42.01ms +[2025-09-11 08:27:34] [Rank 0] step:6701/10000 train_time:281486ms step_avg:42.01ms +[2025-09-11 08:27:35] [Rank 0] step:6721/10000 train_time:282188ms step_avg:41.99ms +[2025-09-11 08:27:35] [Rank 0] step:6721/10000 train_time:282188ms step_avg:41.99ms +[2025-09-11 08:27:35] [Rank 0] step:6741/10000 train_time:282890ms step_avg:41.97ms +[2025-09-11 08:27:35] [Rank 0] step:6741/10000 train_time:282890ms step_avg:41.97ms +[2025-09-11 08:27:36] [Rank 0] step:6761/10000 train_time:283590ms step_avg:41.94ms +[2025-09-11 08:27:36] [Rank 0] step:6761/10000 train_time:283590ms step_avg:41.94ms +[2025-09-11 08:27:37] [Rank 0] step:6781/10000 train_time:284291ms step_avg:41.92ms +[2025-09-11 08:27:37] [Rank 0] step:6781/10000 train_time:284291ms step_avg:41.92ms +[2025-09-11 08:27:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:27:37] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:27:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:27:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:27:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:27:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:27:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:27:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:27:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:27:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:27:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:27:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:27:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:27:48] [Rank 0] PRINT: step:6800/10000 val_loss:4.2296 total_sharp:1.1070e-04 L1_sharp:6.8469e-03 L2_sharp:8.2210e-04 L3_sharp:1.1885e-03 L4_sharp:1.7690e-03 L5_sharp:1.2062e-03 L6_sharp:1.2691e-03 L7_sharp:1.7206e-03 L8_sharp:2.2479e-03 L9_sharp:2.4645e-03 L10_sharp:3.1069e-03 L11_sharp:4.3969e-03 L12_sharp:3.4620e-02 total_fnorm:6.1250e+01 total_l1_linf:1.1674e+05 total_spectral:3.0625e+01 L1_fnorm:1.0000e+00 L2_fnorm:9.7266e-01 L3_fnorm:9.8438e-01 L4_fnorm:9.9219e-01 L5_fnorm:9.9609e-01 L6_fnorm:1.0078e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.9219e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0000e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.8828e-01 L1_l1linf:2.4023e-01 L2_l1linf:2.3145e-01 L3_l1linf:2.3145e-01 L4_l1linf:2.2559e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3438e-01 L7_l1linf:2.3242e-01 L8_l1linf:2.3047e-01 L9_l1linf:2.2754e-01 L10_l1linf:2.2168e-01 L11_l1linf:2.1289e-01 L12_l1linf:2.1680e-01 L1_spectral:1.3323e-02 L2_spectral:1.2745e-02 L3_spectral:1.2993e-02 L4_spectral:1.3066e-02 L5_spectral:1.3123e-02 L6_spectral:1.3235e-02 L7_spectral:1.3204e-02 L8_spectral:1.3223e-02 L9_spectral:1.3189e-02 L10_spectral:1.3262e-02 L11_spectral:1.3263e-02 L12_spectral:1.3115e-02 train_time:284972ms step_avg:41.91ms +[2025-09-11 08:27:48] [Rank 0] PRINT: step:6800/10000 val_loss:4.2296 total_sharp:1.1070e-04 L1_sharp:6.8469e-03 L2_sharp:8.2210e-04 L3_sharp:1.1885e-03 L4_sharp:1.7690e-03 L5_sharp:1.2062e-03 L6_sharp:1.2691e-03 L7_sharp:1.7206e-03 L8_sharp:2.2479e-03 L9_sharp:2.4645e-03 L10_sharp:3.1069e-03 L11_sharp:4.3969e-03 L12_sharp:3.4620e-02 total_fnorm:6.1250e+01 total_l1_linf:1.1674e+05 total_spectral:3.0625e+01 L1_fnorm:1.0000e+00 L2_fnorm:9.7266e-01 L3_fnorm:9.8438e-01 L4_fnorm:9.9219e-01 L5_fnorm:9.9609e-01 L6_fnorm:1.0078e+00 L7_fnorm:1.0000e+00 L8_fnorm:9.9219e-01 L9_fnorm:1.0000e+00 L10_fnorm:1.0000e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.8828e-01 L1_l1linf:2.4023e-01 L2_l1linf:2.3145e-01 L3_l1linf:2.3145e-01 L4_l1linf:2.2559e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3438e-01 L7_l1linf:2.3242e-01 L8_l1linf:2.3047e-01 L9_l1linf:2.2754e-01 L10_l1linf:2.2168e-01 L11_l1linf:2.1289e-01 L12_l1linf:2.1680e-01 L1_spectral:1.3323e-02 L2_spectral:1.2745e-02 L3_spectral:1.2993e-02 L4_spectral:1.3066e-02 L5_spectral:1.3123e-02 L6_spectral:1.3235e-02 L7_spectral:1.3204e-02 L8_spectral:1.3223e-02 L9_spectral:1.3189e-02 L10_spectral:1.3262e-02 L11_spectral:1.3263e-02 L12_spectral:1.3115e-02 train_time:284972ms step_avg:41.91ms +[2025-09-11 08:27:49] [Rank 0] step:6801/10000 train_time:286165ms step_avg:42.08ms +[2025-09-11 08:27:49] [Rank 0] step:6801/10000 train_time:286165ms step_avg:42.08ms +[2025-09-11 08:27:50] [Rank 0] step:6821/10000 train_time:286900ms step_avg:42.06ms +[2025-09-11 08:27:50] [Rank 0] step:6821/10000 train_time:286900ms step_avg:42.06ms +[2025-09-11 08:27:50] [Rank 0] step:6841/10000 train_time:287606ms step_avg:42.04ms +[2025-09-11 08:27:50] [Rank 0] step:6841/10000 train_time:287606ms step_avg:42.04ms +[2025-09-11 08:27:51] [Rank 0] step:6861/10000 train_time:288310ms step_avg:42.02ms +[2025-09-11 08:27:51] [Rank 0] step:6861/10000 train_time:288310ms step_avg:42.02ms +[2025-09-11 08:27:52] [Rank 0] step:6881/10000 train_time:289013ms step_avg:42.00ms +[2025-09-11 08:27:52] [Rank 0] step:6881/10000 train_time:289013ms step_avg:42.00ms +[2025-09-11 08:27:52] [Rank 0] step:6901/10000 train_time:289714ms step_avg:41.98ms +[2025-09-11 08:27:52] [Rank 0] step:6901/10000 train_time:289714ms step_avg:41.98ms +[2025-09-11 08:27:53] [Rank 0] step:6921/10000 train_time:290416ms step_avg:41.96ms +[2025-09-11 08:27:53] [Rank 0] step:6921/10000 train_time:290416ms step_avg:41.96ms +[2025-09-11 08:27:54] [Rank 0] step:6941/10000 train_time:291119ms step_avg:41.94ms +[2025-09-11 08:27:54] [Rank 0] step:6941/10000 train_time:291119ms step_avg:41.94ms +[2025-09-11 08:27:54] [Rank 0] step:6961/10000 train_time:291821ms step_avg:41.92ms +[2025-09-11 08:27:54] [Rank 0] step:6961/10000 train_time:291821ms step_avg:41.92ms +[2025-09-11 08:27:55] [Rank 0] step:6981/10000 train_time:292525ms step_avg:41.90ms +[2025-09-11 08:27:55] [Rank 0] step:6981/10000 train_time:292525ms step_avg:41.90ms +[2025-09-11 08:27:56] [Rank 0] step:7001/10000 train_time:293228ms step_avg:41.88ms +[2025-09-11 08:27:56] [Rank 0] step:7001/10000 train_time:293228ms step_avg:41.88ms +[2025-09-11 08:27:57] [Rank 0] step:7021/10000 train_time:293930ms step_avg:41.86ms +[2025-09-11 08:27:57] [Rank 0] step:7021/10000 train_time:293930ms step_avg:41.86ms +[2025-09-11 08:27:57] [Rank 0] step:7041/10000 train_time:294632ms step_avg:41.85ms +[2025-09-11 08:27:57] [Rank 0] step:7041/10000 train_time:294632ms step_avg:41.85ms +[2025-09-11 08:27:58] [Rank 0] step:7061/10000 train_time:295335ms step_avg:41.83ms +[2025-09-11 08:27:58] [Rank 0] step:7061/10000 train_time:295335ms step_avg:41.83ms +[2025-09-11 08:27:59] [Rank 0] step:7081/10000 train_time:296037ms step_avg:41.81ms +[2025-09-11 08:27:59] [Rank 0] step:7081/10000 train_time:296037ms step_avg:41.81ms +[2025-09-11 08:27:59] [Rank 0] step:7101/10000 train_time:296740ms step_avg:41.79ms +[2025-09-11 08:27:59] [Rank 0] step:7101/10000 train_time:296740ms step_avg:41.79ms +[2025-09-11 08:28:00] [Rank 0] step:7121/10000 train_time:297443ms step_avg:41.77ms +[2025-09-11 08:28:00] [Rank 0] step:7121/10000 train_time:297443ms step_avg:41.77ms +[2025-09-11 08:28:01] [Rank 0] step:7141/10000 train_time:298145ms step_avg:41.75ms +[2025-09-11 08:28:01] [Rank 0] step:7141/10000 train_time:298145ms step_avg:41.75ms +[2025-09-11 08:28:02] [Rank 0] step:7161/10000 train_time:298850ms step_avg:41.73ms +[2025-09-11 08:28:02] [Rank 0] step:7161/10000 train_time:298850ms step_avg:41.73ms +[2025-09-11 08:28:02] [Rank 0] step:7181/10000 train_time:299552ms step_avg:41.71ms +[2025-09-11 08:28:02] [Rank 0] step:7181/10000 train_time:299552ms step_avg:41.71ms +[2025-09-11 08:28:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:28:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:28:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:28:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:28:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:28:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:28:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:28:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:28:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:28:13] [Rank 0] PRINT: step:7200/10000 val_loss:4.1904 total_sharp:9.3028e-05 L1_sharp:5.6986e-03 L2_sharp:4.6233e-04 L3_sharp:-1.0364e-05 L4_sharp:6.4369e-04 L5_sharp:8.3173e-04 L6_sharp:9.0316e-04 L7_sharp:1.3800e-03 L8_sharp:2.1098e-03 L9_sharp:2.3789e-03 L10_sharp:2.7033e-03 L11_sharp:3.9920e-03 L12_sharp:2.5378e-02 total_fnorm:5.4000e+01 total_l1_linf:9.8304e+04 total_spectral:2.7000e+01 L1_fnorm:8.7891e-01 L2_fnorm:8.4375e-01 L3_fnorm:8.5156e-01 L4_fnorm:8.5547e-01 L5_fnorm:8.6328e-01 L6_fnorm:8.6719e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.5547e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5547e-01 L12_fnorm:8.4766e-01 L1_l1linf:1.9434e-01 L2_l1linf:1.9727e-01 L3_l1linf:1.9336e-01 L4_l1linf:1.8652e-01 L5_l1linf:1.9141e-01 L6_l1linf:1.9141e-01 L7_l1linf:1.9336e-01 L8_l1linf:1.9238e-01 L9_l1linf:1.8848e-01 L10_l1linf:1.7871e-01 L11_l1linf:1.7578e-01 L12_l1linf:1.7285e-01 L1_spectral:1.1614e-02 L2_spectral:1.1205e-02 L3_spectral:1.1557e-02 L4_spectral:1.1576e-02 L5_spectral:1.1548e-02 L6_spectral:1.1590e-02 L7_spectral:1.1638e-02 L8_spectral:1.1538e-02 L9_spectral:1.1604e-02 L10_spectral:1.1614e-02 L11_spectral:1.1642e-02 L12_spectral:1.1559e-02 train_time:300235ms step_avg:41.70ms +[2025-09-11 08:28:13] [Rank 0] PRINT: step:7200/10000 val_loss:4.1904 total_sharp:9.3028e-05 L1_sharp:5.6986e-03 L2_sharp:4.6233e-04 L3_sharp:-1.0364e-05 L4_sharp:6.4369e-04 L5_sharp:8.3173e-04 L6_sharp:9.0316e-04 L7_sharp:1.3800e-03 L8_sharp:2.1098e-03 L9_sharp:2.3789e-03 L10_sharp:2.7033e-03 L11_sharp:3.9920e-03 L12_sharp:2.5378e-02 total_fnorm:5.4000e+01 total_l1_linf:9.8304e+04 total_spectral:2.7000e+01 L1_fnorm:8.7891e-01 L2_fnorm:8.4375e-01 L3_fnorm:8.5156e-01 L4_fnorm:8.5547e-01 L5_fnorm:8.6328e-01 L6_fnorm:8.6719e-01 L7_fnorm:8.6328e-01 L8_fnorm:8.5547e-01 L9_fnorm:8.5938e-01 L10_fnorm:8.5547e-01 L11_fnorm:8.5547e-01 L12_fnorm:8.4766e-01 L1_l1linf:1.9434e-01 L2_l1linf:1.9727e-01 L3_l1linf:1.9336e-01 L4_l1linf:1.8652e-01 L5_l1linf:1.9141e-01 L6_l1linf:1.9141e-01 L7_l1linf:1.9336e-01 L8_l1linf:1.9238e-01 L9_l1linf:1.8848e-01 L10_l1linf:1.7871e-01 L11_l1linf:1.7578e-01 L12_l1linf:1.7285e-01 L1_spectral:1.1614e-02 L2_spectral:1.1205e-02 L3_spectral:1.1557e-02 L4_spectral:1.1576e-02 L5_spectral:1.1548e-02 L6_spectral:1.1590e-02 L7_spectral:1.1638e-02 L8_spectral:1.1538e-02 L9_spectral:1.1604e-02 L10_spectral:1.1614e-02 L11_spectral:1.1642e-02 L12_spectral:1.1559e-02 train_time:300235ms step_avg:41.70ms +[2025-09-11 08:28:14] [Rank 0] step:7201/10000 train_time:301408ms step_avg:41.86ms +[2025-09-11 08:28:14] [Rank 0] step:7201/10000 train_time:301408ms step_avg:41.86ms +[2025-09-11 08:28:15] [Rank 0] step:7221/10000 train_time:302124ms step_avg:41.84ms +[2025-09-11 08:28:15] [Rank 0] step:7221/10000 train_time:302124ms step_avg:41.84ms +[2025-09-11 08:28:16] [Rank 0] step:7241/10000 train_time:302828ms step_avg:41.82ms +[2025-09-11 08:28:16] [Rank 0] step:7241/10000 train_time:302828ms step_avg:41.82ms +[2025-09-11 08:28:16] [Rank 0] step:7261/10000 train_time:303533ms step_avg:41.80ms +[2025-09-11 08:28:16] [Rank 0] step:7261/10000 train_time:303533ms step_avg:41.80ms +[2025-09-11 08:28:17] [Rank 0] step:7281/10000 train_time:304241ms step_avg:41.79ms +[2025-09-11 08:28:17] [Rank 0] step:7281/10000 train_time:304241ms step_avg:41.79ms +[2025-09-11 08:28:18] [Rank 0] step:7301/10000 train_time:304943ms step_avg:41.77ms +[2025-09-11 08:28:18] [Rank 0] step:7301/10000 train_time:304943ms step_avg:41.77ms +[2025-09-11 08:28:18] [Rank 0] step:7321/10000 train_time:305646ms step_avg:41.75ms +[2025-09-11 08:28:18] [Rank 0] step:7321/10000 train_time:305646ms step_avg:41.75ms +[2025-09-11 08:28:19] [Rank 0] step:7341/10000 train_time:306350ms step_avg:41.73ms +[2025-09-11 08:28:19] [Rank 0] step:7341/10000 train_time:306350ms step_avg:41.73ms +[2025-09-11 08:28:20] [Rank 0] step:7361/10000 train_time:307052ms step_avg:41.71ms +[2025-09-11 08:28:20] [Rank 0] step:7361/10000 train_time:307052ms step_avg:41.71ms +[2025-09-11 08:28:20] [Rank 0] step:7381/10000 train_time:307755ms step_avg:41.70ms +[2025-09-11 08:28:20] [Rank 0] step:7381/10000 train_time:307755ms step_avg:41.70ms +[2025-09-11 08:28:21] [Rank 0] step:7401/10000 train_time:308457ms step_avg:41.68ms +[2025-09-11 08:28:21] [Rank 0] step:7401/10000 train_time:308457ms step_avg:41.68ms +[2025-09-11 08:28:22] [Rank 0] step:7421/10000 train_time:309160ms step_avg:41.66ms +[2025-09-11 08:28:22] [Rank 0] step:7421/10000 train_time:309160ms step_avg:41.66ms +[2025-09-11 08:28:23] [Rank 0] step:7441/10000 train_time:309863ms step_avg:41.64ms +[2025-09-11 08:28:23] [Rank 0] step:7441/10000 train_time:309863ms step_avg:41.64ms +[2025-09-11 08:28:23] [Rank 0] step:7461/10000 train_time:310567ms step_avg:41.63ms +[2025-09-11 08:28:23] [Rank 0] step:7461/10000 train_time:310567ms step_avg:41.63ms +[2025-09-11 08:28:24] [Rank 0] step:7481/10000 train_time:311273ms step_avg:41.61ms +[2025-09-11 08:28:24] [Rank 0] step:7481/10000 train_time:311273ms step_avg:41.61ms +[2025-09-11 08:28:25] [Rank 0] step:7501/10000 train_time:311977ms step_avg:41.59ms +[2025-09-11 08:28:25] [Rank 0] step:7501/10000 train_time:311977ms step_avg:41.59ms +[2025-09-11 08:28:25] [Rank 0] step:7521/10000 train_time:312682ms step_avg:41.57ms +[2025-09-11 08:28:25] [Rank 0] step:7521/10000 train_time:312682ms step_avg:41.57ms +[2025-09-11 08:28:26] [Rank 0] step:7541/10000 train_time:313383ms step_avg:41.56ms +[2025-09-11 08:28:26] [Rank 0] step:7541/10000 train_time:313383ms step_avg:41.56ms +[2025-09-11 08:28:27] [Rank 0] step:7561/10000 train_time:314087ms step_avg:41.54ms +[2025-09-11 08:28:27] [Rank 0] step:7561/10000 train_time:314087ms step_avg:41.54ms +[2025-09-11 08:28:27] [Rank 0] step:7581/10000 train_time:314792ms step_avg:41.52ms +[2025-09-11 08:28:27] [Rank 0] step:7581/10000 train_time:314792ms step_avg:41.52ms +[2025-09-11 08:28:28] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:28:28] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:28:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:28:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:28:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:28:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:28:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:28:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:28:41] [Rank 0] PRINT: step:7600/10000 val_loss:4.1567 total_sharp:8.9347e-05 L1_sharp:5.3803e-03 L2_sharp:5.0603e-04 L3_sharp:5.3074e-04 L4_sharp:6.5525e-04 L5_sharp:9.8983e-04 L6_sharp:1.2020e-03 L7_sharp:9.0220e-04 L8_sharp:1.8708e-03 L9_sharp:2.1704e-03 L10_sharp:2.3965e-03 L11_sharp:4.0321e-03 L12_sharp:2.1674e-02 total_fnorm:4.4250e+01 total_l1_linf:7.5776e+04 total_spectral:2.2125e+01 L1_fnorm:7.4219e-01 L2_fnorm:7.1094e-01 L3_fnorm:7.1875e-01 L4_fnorm:7.1875e-01 L5_fnorm:7.2266e-01 L6_fnorm:7.2656e-01 L7_fnorm:7.2266e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.1484e-01 L11_fnorm:7.1094e-01 L12_fnorm:7.0312e-01 L1_l1linf:1.5527e-01 L2_l1linf:1.5234e-01 L3_l1linf:1.5137e-01 L4_l1linf:1.4844e-01 L5_l1linf:1.5234e-01 L6_l1linf:1.5137e-01 L7_l1linf:1.4844e-01 L8_l1linf:1.5137e-01 L9_l1linf:1.4453e-01 L10_l1linf:1.4062e-01 L11_l1linf:1.3574e-01 L12_l1linf:1.4551e-01 L1_spectral:9.8828e-03 L2_spectral:9.7161e-03 L3_spectral:9.9759e-03 L4_spectral:9.9192e-03 L5_spectral:1.0001e-02 L6_spectral:9.9730e-03 L7_spectral:9.9725e-03 L8_spectral:9.8861e-03 L9_spectral:9.9143e-03 L10_spectral:9.9307e-03 L11_spectral:9.8812e-03 L12_spectral:9.9430e-03 train_time:315477ms step_avg:41.51ms +[2025-09-11 08:28:41] [Rank 0] PRINT: step:7600/10000 val_loss:4.1567 total_sharp:8.9347e-05 L1_sharp:5.3803e-03 L2_sharp:5.0603e-04 L3_sharp:5.3074e-04 L4_sharp:6.5525e-04 L5_sharp:9.8983e-04 L6_sharp:1.2020e-03 L7_sharp:9.0220e-04 L8_sharp:1.8708e-03 L9_sharp:2.1704e-03 L10_sharp:2.3965e-03 L11_sharp:4.0321e-03 L12_sharp:2.1674e-02 total_fnorm:4.4250e+01 total_l1_linf:7.5776e+04 total_spectral:2.2125e+01 L1_fnorm:7.4219e-01 L2_fnorm:7.1094e-01 L3_fnorm:7.1875e-01 L4_fnorm:7.1875e-01 L5_fnorm:7.2266e-01 L6_fnorm:7.2656e-01 L7_fnorm:7.2266e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.1484e-01 L11_fnorm:7.1094e-01 L12_fnorm:7.0312e-01 L1_l1linf:1.5527e-01 L2_l1linf:1.5234e-01 L3_l1linf:1.5137e-01 L4_l1linf:1.4844e-01 L5_l1linf:1.5234e-01 L6_l1linf:1.5137e-01 L7_l1linf:1.4844e-01 L8_l1linf:1.5137e-01 L9_l1linf:1.4453e-01 L10_l1linf:1.4062e-01 L11_l1linf:1.3574e-01 L12_l1linf:1.4551e-01 L1_spectral:9.8828e-03 L2_spectral:9.7161e-03 L3_spectral:9.9759e-03 L4_spectral:9.9192e-03 L5_spectral:1.0001e-02 L6_spectral:9.9730e-03 L7_spectral:9.9725e-03 L8_spectral:9.8861e-03 L9_spectral:9.9143e-03 L10_spectral:9.9307e-03 L11_spectral:9.8812e-03 L12_spectral:9.9430e-03 train_time:315477ms step_avg:41.51ms +[2025-09-11 08:28:42] [Rank 0] step:7601/10000 train_time:316988ms step_avg:41.70ms +[2025-09-11 08:28:42] [Rank 0] step:7601/10000 train_time:316988ms step_avg:41.70ms +[2025-09-11 08:28:43] [Rank 0] step:7621/10000 train_time:317736ms step_avg:41.69ms +[2025-09-11 08:28:43] [Rank 0] step:7621/10000 train_time:317736ms step_avg:41.69ms +[2025-09-11 08:28:44] [Rank 0] step:7641/10000 train_time:318441ms step_avg:41.68ms +[2025-09-11 08:28:44] [Rank 0] step:7641/10000 train_time:318441ms step_avg:41.68ms +[2025-09-11 08:28:44] [Rank 0] step:7661/10000 train_time:319145ms step_avg:41.66ms +[2025-09-11 08:28:44] [Rank 0] step:7661/10000 train_time:319145ms step_avg:41.66ms +[2025-09-11 08:28:45] [Rank 0] step:7681/10000 train_time:319849ms step_avg:41.64ms +[2025-09-11 08:28:45] [Rank 0] step:7681/10000 train_time:319849ms step_avg:41.64ms +[2025-09-11 08:28:46] [Rank 0] step:7701/10000 train_time:320554ms step_avg:41.63ms +[2025-09-11 08:28:46] [Rank 0] step:7701/10000 train_time:320554ms step_avg:41.63ms +[2025-09-11 08:28:46] [Rank 0] step:7721/10000 train_time:321258ms step_avg:41.61ms +[2025-09-11 08:28:46] [Rank 0] step:7721/10000 train_time:321258ms step_avg:41.61ms +[2025-09-11 08:28:47] [Rank 0] step:7741/10000 train_time:321962ms step_avg:41.59ms +[2025-09-11 08:28:47] [Rank 0] step:7741/10000 train_time:321962ms step_avg:41.59ms +[2025-09-11 08:28:48] [Rank 0] step:7761/10000 train_time:322666ms step_avg:41.58ms +[2025-09-11 08:28:48] [Rank 0] step:7761/10000 train_time:322666ms step_avg:41.58ms +[2025-09-11 08:28:49] [Rank 0] step:7781/10000 train_time:323372ms step_avg:41.56ms +[2025-09-11 08:28:49] [Rank 0] step:7781/10000 train_time:323372ms step_avg:41.56ms +[2025-09-11 08:28:49] [Rank 0] step:7801/10000 train_time:324075ms step_avg:41.54ms +[2025-09-11 08:28:49] [Rank 0] step:7801/10000 train_time:324075ms step_avg:41.54ms +[2025-09-11 08:28:50] [Rank 0] step:7821/10000 train_time:324780ms step_avg:41.53ms +[2025-09-11 08:28:50] [Rank 0] step:7821/10000 train_time:324780ms step_avg:41.53ms +[2025-09-11 08:28:51] [Rank 0] step:7841/10000 train_time:325486ms step_avg:41.51ms +[2025-09-11 08:28:51] [Rank 0] step:7841/10000 train_time:325486ms step_avg:41.51ms +[2025-09-11 08:28:51] [Rank 0] step:7861/10000 train_time:326192ms step_avg:41.50ms +[2025-09-11 08:28:51] [Rank 0] step:7861/10000 train_time:326192ms step_avg:41.50ms +[2025-09-11 08:28:52] [Rank 0] step:7881/10000 train_time:326896ms step_avg:41.48ms +[2025-09-11 08:28:52] [Rank 0] step:7881/10000 train_time:326896ms step_avg:41.48ms +[2025-09-11 08:28:53] [Rank 0] step:7901/10000 train_time:327601ms step_avg:41.46ms +[2025-09-11 08:28:53] [Rank 0] step:7901/10000 train_time:327601ms step_avg:41.46ms +[2025-09-11 08:28:53] [Rank 0] step:7921/10000 train_time:328306ms step_avg:41.45ms +[2025-09-11 08:28:53] [Rank 0] step:7921/10000 train_time:328306ms step_avg:41.45ms +[2025-09-11 08:28:54] [Rank 0] step:7941/10000 train_time:329012ms step_avg:41.43ms +[2025-09-11 08:28:54] [Rank 0] step:7941/10000 train_time:329012ms step_avg:41.43ms +[2025-09-11 08:28:55] [Rank 0] step:7961/10000 train_time:329715ms step_avg:41.42ms +[2025-09-11 08:28:55] [Rank 0] step:7961/10000 train_time:329715ms step_avg:41.42ms +[2025-09-11 08:28:56] [Rank 0] step:7981/10000 train_time:330422ms step_avg:41.40ms +[2025-09-11 08:28:56] [Rank 0] step:7981/10000 train_time:330422ms step_avg:41.40ms +[2025-09-11 08:28:56] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:28:56] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:28:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:28:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:28:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:29:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:29:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:29:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:06] [Rank 0] PRINT: step:8000/10000 val_loss:4.1364 total_sharp:7.9438e-05 L1_sharp:6.7616e-03 L2_sharp:3.9679e-04 L3_sharp:3.6570e-04 L4_sharp:1.5122e-03 L5_sharp:1.1249e-03 L6_sharp:1.2266e-03 L7_sharp:1.1304e-03 L8_sharp:1.4263e-03 L9_sharp:2.1873e-03 L10_sharp:2.5548e-03 L11_sharp:4.2616e-03 L12_sharp:2.0974e-02 total_fnorm:3.7250e+01 total_l1_linf:6.0416e+04 total_spectral:1.8625e+01 L1_fnorm:6.1328e-01 L2_fnorm:5.7812e-01 L3_fnorm:5.8203e-01 L4_fnorm:5.8594e-01 L5_fnorm:5.8984e-01 L6_fnorm:5.9766e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.8203e-01 L9_fnorm:5.8594e-01 L10_fnorm:5.8594e-01 L11_fnorm:5.8594e-01 L12_fnorm:5.7422e-01 L1_l1linf:1.2109e-01 L2_l1linf:1.2061e-01 L3_l1linf:1.1670e-01 L4_l1linf:1.1377e-01 L5_l1linf:1.1768e-01 L6_l1linf:1.1670e-01 L7_l1linf:1.1572e-01 L8_l1linf:1.1621e-01 L9_l1linf:1.1084e-01 L10_l1linf:1.0791e-01 L11_l1linf:1.0498e-01 L12_l1linf:1.0596e-01 L1_spectral:8.5344e-03 L2_spectral:7.9299e-03 L3_spectral:8.4531e-03 L4_spectral:8.3851e-03 L5_spectral:8.2848e-03 L6_spectral:8.3806e-03 L7_spectral:8.3319e-03 L8_spectral:8.2599e-03 L9_spectral:8.2902e-03 L10_spectral:8.3134e-03 L11_spectral:8.3205e-03 L12_spectral:8.3351e-03 train_time:331106ms step_avg:41.39ms +[2025-09-11 08:29:06] [Rank 0] PRINT: step:8000/10000 val_loss:4.1364 total_sharp:7.9438e-05 L1_sharp:6.7616e-03 L2_sharp:3.9679e-04 L3_sharp:3.6570e-04 L4_sharp:1.5122e-03 L5_sharp:1.1249e-03 L6_sharp:1.2266e-03 L7_sharp:1.1304e-03 L8_sharp:1.4263e-03 L9_sharp:2.1873e-03 L10_sharp:2.5548e-03 L11_sharp:4.2616e-03 L12_sharp:2.0974e-02 total_fnorm:3.7250e+01 total_l1_linf:6.0416e+04 total_spectral:1.8625e+01 L1_fnorm:6.1328e-01 L2_fnorm:5.7812e-01 L3_fnorm:5.8203e-01 L4_fnorm:5.8594e-01 L5_fnorm:5.8984e-01 L6_fnorm:5.9766e-01 L7_fnorm:5.8984e-01 L8_fnorm:5.8203e-01 L9_fnorm:5.8594e-01 L10_fnorm:5.8594e-01 L11_fnorm:5.8594e-01 L12_fnorm:5.7422e-01 L1_l1linf:1.2109e-01 L2_l1linf:1.2061e-01 L3_l1linf:1.1670e-01 L4_l1linf:1.1377e-01 L5_l1linf:1.1768e-01 L6_l1linf:1.1670e-01 L7_l1linf:1.1572e-01 L8_l1linf:1.1621e-01 L9_l1linf:1.1084e-01 L10_l1linf:1.0791e-01 L11_l1linf:1.0498e-01 L12_l1linf:1.0596e-01 L1_spectral:8.5344e-03 L2_spectral:7.9299e-03 L3_spectral:8.4531e-03 L4_spectral:8.3851e-03 L5_spectral:8.2848e-03 L6_spectral:8.3806e-03 L7_spectral:8.3319e-03 L8_spectral:8.2599e-03 L9_spectral:8.2902e-03 L10_spectral:8.3134e-03 L11_spectral:8.3205e-03 L12_spectral:8.3351e-03 train_time:331106ms step_avg:41.39ms +[2025-09-11 08:29:07] [Rank 0] step:8001/10000 train_time:332266ms step_avg:41.53ms +[2025-09-11 08:29:07] [Rank 0] step:8001/10000 train_time:332266ms step_avg:41.53ms +[2025-09-11 08:29:08] [Rank 0] step:8021/10000 train_time:333002ms step_avg:41.52ms +[2025-09-11 08:29:08] [Rank 0] step:8021/10000 train_time:333002ms step_avg:41.52ms +[2025-09-11 08:29:09] [Rank 0] step:8041/10000 train_time:333708ms step_avg:41.50ms +[2025-09-11 08:29:09] [Rank 0] step:8041/10000 train_time:333708ms step_avg:41.50ms +[2025-09-11 08:29:10] [Rank 0] step:8061/10000 train_time:334416ms step_avg:41.49ms +[2025-09-11 08:29:10] [Rank 0] step:8061/10000 train_time:334416ms step_avg:41.49ms +[2025-09-11 08:29:10] [Rank 0] step:8081/10000 train_time:335118ms step_avg:41.47ms +[2025-09-11 08:29:10] [Rank 0] step:8081/10000 train_time:335118ms step_avg:41.47ms +[2025-09-11 08:29:11] [Rank 0] step:8101/10000 train_time:335820ms step_avg:41.45ms +[2025-09-11 08:29:11] [Rank 0] step:8101/10000 train_time:335820ms step_avg:41.45ms +[2025-09-11 08:29:12] [Rank 0] step:8121/10000 train_time:336527ms step_avg:41.44ms +[2025-09-11 08:29:12] [Rank 0] step:8121/10000 train_time:336527ms step_avg:41.44ms +[2025-09-11 08:29:13] [Rank 0] step:8141/10000 train_time:338159ms step_avg:41.54ms +[2025-09-11 08:29:13] [Rank 0] step:8141/10000 train_time:338159ms step_avg:41.54ms +[2025-09-11 08:29:14] [Rank 0] step:8161/10000 train_time:338867ms step_avg:41.52ms +[2025-09-11 08:29:14] [Rank 0] step:8161/10000 train_time:338867ms step_avg:41.52ms +[2025-09-11 08:29:15] [Rank 0] step:8181/10000 train_time:339880ms step_avg:41.55ms +[2025-09-11 08:29:15] [Rank 0] step:8181/10000 train_time:339880ms step_avg:41.55ms +[2025-09-11 08:29:16] [Rank 0] step:8201/10000 train_time:340592ms step_avg:41.53ms +[2025-09-11 08:29:16] [Rank 0] step:8201/10000 train_time:340592ms step_avg:41.53ms +[2025-09-11 08:29:16] [Rank 0] step:8221/10000 train_time:341304ms step_avg:41.52ms +[2025-09-11 08:29:16] [Rank 0] step:8221/10000 train_time:341304ms step_avg:41.52ms +[2025-09-11 08:29:17] [Rank 0] step:8241/10000 train_time:342024ms step_avg:41.50ms +[2025-09-11 08:29:17] [Rank 0] step:8241/10000 train_time:342024ms step_avg:41.50ms +[2025-09-11 08:29:18] [Rank 0] step:8261/10000 train_time:342734ms step_avg:41.49ms +[2025-09-11 08:29:18] [Rank 0] step:8261/10000 train_time:342734ms step_avg:41.49ms +[2025-09-11 08:29:19] [Rank 0] step:8281/10000 train_time:343442ms step_avg:41.47ms +[2025-09-11 08:29:19] [Rank 0] step:8281/10000 train_time:343442ms step_avg:41.47ms +[2025-09-11 08:29:19] [Rank 0] step:8301/10000 train_time:344153ms step_avg:41.46ms +[2025-09-11 08:29:19] [Rank 0] step:8301/10000 train_time:344153ms step_avg:41.46ms +[2025-09-11 08:29:20] [Rank 0] step:8321/10000 train_time:344863ms step_avg:41.44ms +[2025-09-11 08:29:20] [Rank 0] step:8321/10000 train_time:344863ms step_avg:41.44ms +[2025-09-11 08:29:21] [Rank 0] step:8341/10000 train_time:345581ms step_avg:41.43ms +[2025-09-11 08:29:21] [Rank 0] step:8341/10000 train_time:345581ms step_avg:41.43ms +[2025-09-11 08:29:21] [Rank 0] step:8361/10000 train_time:346288ms step_avg:41.42ms +[2025-09-11 08:29:21] [Rank 0] step:8361/10000 train_time:346288ms step_avg:41.42ms +[2025-09-11 08:29:22] [Rank 0] step:8381/10000 train_time:347002ms step_avg:41.40ms +[2025-09-11 08:29:22] [Rank 0] step:8381/10000 train_time:347002ms step_avg:41.40ms +[2025-09-11 08:29:23] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:29:23] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:29:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:33] [Rank 0] PRINT: step:8400/10000 val_loss:4.1146 total_sharp:6.7374e-05 L1_sharp:3.7947e-03 L2_sharp:4.4842e-04 L3_sharp:1.5178e-04 L4_sharp:9.6119e-04 L5_sharp:7.0401e-04 L6_sharp:7.1130e-04 L7_sharp:1.0956e-03 L8_sharp:1.7830e-03 L9_sharp:1.9545e-03 L10_sharp:2.7101e-03 L11_sharp:3.3311e-03 L12_sharp:2.5308e-02 total_fnorm:2.9625e+01 total_l1_linf:4.4032e+04 total_spectral:1.4875e+01 L1_fnorm:4.9023e-01 L2_fnorm:4.5508e-01 L3_fnorm:4.5898e-01 L4_fnorm:4.5898e-01 L5_fnorm:4.6289e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.6289e-01 L8_fnorm:4.5703e-01 L9_fnorm:4.5898e-01 L10_fnorm:4.5703e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.4922e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.8867e-02 L3_l1linf:8.7402e-02 L4_l1linf:8.3496e-02 L5_l1linf:8.7402e-02 L6_l1linf:8.5449e-02 L7_l1linf:8.4473e-02 L8_l1linf:8.2520e-02 L9_l1linf:8.0078e-02 L10_l1linf:7.8125e-02 L11_l1linf:7.5684e-02 L12_l1linf:7.8125e-02 L1_spectral:7.1242e-03 L2_spectral:6.4687e-03 L3_spectral:6.7586e-03 L4_spectral:6.8106e-03 L5_spectral:6.6832e-03 L6_spectral:6.7836e-03 L7_spectral:6.7038e-03 L8_spectral:6.6080e-03 L9_spectral:6.6789e-03 L10_spectral:6.6475e-03 L11_spectral:6.6323e-03 L12_spectral:6.8170e-03 train_time:347695ms step_avg:41.39ms +[2025-09-11 08:29:33] [Rank 0] PRINT: step:8400/10000 val_loss:4.1146 total_sharp:6.7374e-05 L1_sharp:3.7947e-03 L2_sharp:4.4842e-04 L3_sharp:1.5178e-04 L4_sharp:9.6119e-04 L5_sharp:7.0401e-04 L6_sharp:7.1130e-04 L7_sharp:1.0956e-03 L8_sharp:1.7830e-03 L9_sharp:1.9545e-03 L10_sharp:2.7101e-03 L11_sharp:3.3311e-03 L12_sharp:2.5308e-02 total_fnorm:2.9625e+01 total_l1_linf:4.4032e+04 total_spectral:1.4875e+01 L1_fnorm:4.9023e-01 L2_fnorm:4.5508e-01 L3_fnorm:4.5898e-01 L4_fnorm:4.5898e-01 L5_fnorm:4.6289e-01 L6_fnorm:4.6289e-01 L7_fnorm:4.6289e-01 L8_fnorm:4.5703e-01 L9_fnorm:4.5898e-01 L10_fnorm:4.5703e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.4922e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.8867e-02 L3_l1linf:8.7402e-02 L4_l1linf:8.3496e-02 L5_l1linf:8.7402e-02 L6_l1linf:8.5449e-02 L7_l1linf:8.4473e-02 L8_l1linf:8.2520e-02 L9_l1linf:8.0078e-02 L10_l1linf:7.8125e-02 L11_l1linf:7.5684e-02 L12_l1linf:7.8125e-02 L1_spectral:7.1242e-03 L2_spectral:6.4687e-03 L3_spectral:6.7586e-03 L4_spectral:6.8106e-03 L5_spectral:6.6832e-03 L6_spectral:6.7836e-03 L7_spectral:6.7038e-03 L8_spectral:6.6080e-03 L9_spectral:6.6789e-03 L10_spectral:6.6475e-03 L11_spectral:6.6323e-03 L12_spectral:6.8170e-03 train_time:347695ms step_avg:41.39ms +[2025-09-11 08:29:34] [Rank 0] step:8401/10000 train_time:348914ms step_avg:41.53ms +[2025-09-11 08:29:34] [Rank 0] step:8401/10000 train_time:348914ms step_avg:41.53ms +[2025-09-11 08:29:35] [Rank 0] step:8421/10000 train_time:349654ms step_avg:41.52ms +[2025-09-11 08:29:35] [Rank 0] step:8421/10000 train_time:349654ms step_avg:41.52ms +[2025-09-11 08:29:36] [Rank 0] step:8441/10000 train_time:350367ms step_avg:41.51ms +[2025-09-11 08:29:36] [Rank 0] step:8441/10000 train_time:350367ms step_avg:41.51ms +[2025-09-11 08:29:36] [Rank 0] step:8461/10000 train_time:351080ms step_avg:41.49ms +[2025-09-11 08:29:36] [Rank 0] step:8461/10000 train_time:351080ms step_avg:41.49ms +[2025-09-11 08:29:37] [Rank 0] step:8481/10000 train_time:351793ms step_avg:41.48ms +[2025-09-11 08:29:37] [Rank 0] step:8481/10000 train_time:351793ms step_avg:41.48ms +[2025-09-11 08:29:38] [Rank 0] step:8501/10000 train_time:352505ms step_avg:41.47ms +[2025-09-11 08:29:38] [Rank 0] step:8501/10000 train_time:352505ms step_avg:41.47ms +[2025-09-11 08:29:38] [Rank 0] step:8521/10000 train_time:353216ms step_avg:41.45ms +[2025-09-11 08:29:38] [Rank 0] step:8521/10000 train_time:353216ms step_avg:41.45ms +[2025-09-11 08:29:39] [Rank 0] step:8541/10000 train_time:353927ms step_avg:41.44ms +[2025-09-11 08:29:39] [Rank 0] step:8541/10000 train_time:353927ms step_avg:41.44ms +[2025-09-11 08:29:40] [Rank 0] step:8561/10000 train_time:354643ms step_avg:41.43ms +[2025-09-11 08:29:40] [Rank 0] step:8561/10000 train_time:354643ms step_avg:41.43ms +[2025-09-11 08:29:41] [Rank 0] step:8581/10000 train_time:355357ms step_avg:41.41ms +[2025-09-11 08:29:41] [Rank 0] step:8581/10000 train_time:355357ms step_avg:41.41ms +[2025-09-11 08:29:41] [Rank 0] step:8601/10000 train_time:356070ms step_avg:41.40ms +[2025-09-11 08:29:41] [Rank 0] step:8601/10000 train_time:356070ms step_avg:41.40ms +[2025-09-11 08:29:42] [Rank 0] step:8621/10000 train_time:356781ms step_avg:41.39ms +[2025-09-11 08:29:42] [Rank 0] step:8621/10000 train_time:356781ms step_avg:41.39ms +[2025-09-11 08:29:43] [Rank 0] step:8641/10000 train_time:357491ms step_avg:41.37ms +[2025-09-11 08:29:43] [Rank 0] step:8641/10000 train_time:357491ms step_avg:41.37ms +[2025-09-11 08:29:43] [Rank 0] step:8661/10000 train_time:358203ms step_avg:41.36ms +[2025-09-11 08:29:43] [Rank 0] step:8661/10000 train_time:358203ms step_avg:41.36ms +[2025-09-11 08:29:44] [Rank 0] step:8681/10000 train_time:358916ms step_avg:41.35ms +[2025-09-11 08:29:44] [Rank 0] step:8681/10000 train_time:358916ms step_avg:41.35ms +[2025-09-11 08:29:45] [Rank 0] step:8701/10000 train_time:359627ms step_avg:41.33ms +[2025-09-11 08:29:45] [Rank 0] step:8701/10000 train_time:359627ms step_avg:41.33ms +[2025-09-11 08:29:46] [Rank 0] step:8721/10000 train_time:360341ms step_avg:41.32ms +[2025-09-11 08:29:46] [Rank 0] step:8721/10000 train_time:360341ms step_avg:41.32ms +[2025-09-11 08:29:46] [Rank 0] step:8741/10000 train_time:361049ms step_avg:41.31ms +[2025-09-11 08:29:46] [Rank 0] step:8741/10000 train_time:361049ms step_avg:41.31ms +[2025-09-11 08:29:47] [Rank 0] step:8761/10000 train_time:361764ms step_avg:41.29ms +[2025-09-11 08:29:47] [Rank 0] step:8761/10000 train_time:361764ms step_avg:41.29ms +[2025-09-11 08:29:48] [Rank 0] step:8781/10000 train_time:362472ms step_avg:41.28ms +[2025-09-11 08:29:48] [Rank 0] step:8781/10000 train_time:362472ms step_avg:41.28ms +[2025-09-11 08:29:48] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:29:48] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:29:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:29:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:29:58] [Rank 0] PRINT: step:8800/10000 val_loss:4.1013 total_sharp:6.1775e-05 L1_sharp:4.6242e-03 L2_sharp:5.2303e-04 L3_sharp:6.0467e-05 L4_sharp:9.7805e-04 L5_sharp:5.4067e-04 L6_sharp:5.5096e-04 L7_sharp:7.1640e-04 L8_sharp:9.6669e-04 L9_sharp:1.4780e-03 L10_sharp:2.0434e-03 L11_sharp:3.4441e-03 L12_sharp:2.2091e-02 total_fnorm:2.1500e+01 total_l1_linf:2.8544e+04 total_spectral:1.0812e+01 L1_fnorm:3.6523e-01 L2_fnorm:3.3594e-01 L3_fnorm:3.3789e-01 L4_fnorm:3.3984e-01 L5_fnorm:3.3984e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.4180e-01 L8_fnorm:3.3789e-01 L9_fnorm:3.3789e-01 L10_fnorm:3.3594e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3008e-01 L1_l1linf:5.8594e-02 L2_l1linf:6.0303e-02 L3_l1linf:5.9326e-02 L4_l1linf:6.0791e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6641e-02 L7_l1linf:5.9326e-02 L8_l1linf:5.4199e-02 L9_l1linf:5.3711e-02 L10_l1linf:5.1758e-02 L11_l1linf:5.2734e-02 L12_l1linf:5.2246e-02 L1_spectral:5.5907e-03 L2_spectral:4.9001e-03 L3_spectral:5.1328e-03 L4_spectral:5.1269e-03 L5_spectral:5.0535e-03 L6_spectral:5.0964e-03 L7_spectral:5.0706e-03 L8_spectral:5.0955e-03 L9_spectral:5.0108e-03 L10_spectral:4.9858e-03 L11_spectral:5.0722e-03 L12_spectral:5.1691e-03 train_time:363160ms step_avg:41.27ms +[2025-09-11 08:29:58] [Rank 0] PRINT: step:8800/10000 val_loss:4.1013 total_sharp:6.1775e-05 L1_sharp:4.6242e-03 L2_sharp:5.2303e-04 L3_sharp:6.0467e-05 L4_sharp:9.7805e-04 L5_sharp:5.4067e-04 L6_sharp:5.5096e-04 L7_sharp:7.1640e-04 L8_sharp:9.6669e-04 L9_sharp:1.4780e-03 L10_sharp:2.0434e-03 L11_sharp:3.4441e-03 L12_sharp:2.2091e-02 total_fnorm:2.1500e+01 total_l1_linf:2.8544e+04 total_spectral:1.0812e+01 L1_fnorm:3.6523e-01 L2_fnorm:3.3594e-01 L3_fnorm:3.3789e-01 L4_fnorm:3.3984e-01 L5_fnorm:3.3984e-01 L6_fnorm:3.4180e-01 L7_fnorm:3.4180e-01 L8_fnorm:3.3789e-01 L9_fnorm:3.3789e-01 L10_fnorm:3.3594e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3008e-01 L1_l1linf:5.8594e-02 L2_l1linf:6.0303e-02 L3_l1linf:5.9326e-02 L4_l1linf:6.0791e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6641e-02 L7_l1linf:5.9326e-02 L8_l1linf:5.4199e-02 L9_l1linf:5.3711e-02 L10_l1linf:5.1758e-02 L11_l1linf:5.2734e-02 L12_l1linf:5.2246e-02 L1_spectral:5.5907e-03 L2_spectral:4.9001e-03 L3_spectral:5.1328e-03 L4_spectral:5.1269e-03 L5_spectral:5.0535e-03 L6_spectral:5.0964e-03 L7_spectral:5.0706e-03 L8_spectral:5.0955e-03 L9_spectral:5.0108e-03 L10_spectral:4.9858e-03 L11_spectral:5.0722e-03 L12_spectral:5.1691e-03 train_time:363160ms step_avg:41.27ms +[2025-09-11 08:30:00] [Rank 0] step:8801/10000 train_time:364333ms step_avg:41.40ms +[2025-09-11 08:30:00] [Rank 0] step:8801/10000 train_time:364333ms step_avg:41.40ms +[2025-09-11 08:30:00] [Rank 0] step:8821/10000 train_time:365074ms step_avg:41.39ms +[2025-09-11 08:30:00] [Rank 0] step:8821/10000 train_time:365074ms step_avg:41.39ms +[2025-09-11 08:30:01] [Rank 0] step:8841/10000 train_time:365828ms step_avg:41.38ms +[2025-09-11 08:30:01] [Rank 0] step:8841/10000 train_time:365828ms step_avg:41.38ms +[2025-09-11 08:30:02] [Rank 0] step:8861/10000 train_time:366632ms step_avg:41.38ms +[2025-09-11 08:30:02] [Rank 0] step:8861/10000 train_time:366632ms step_avg:41.38ms +[2025-09-11 08:30:03] [Rank 0] step:8881/10000 train_time:367397ms step_avg:41.37ms +[2025-09-11 08:30:03] [Rank 0] step:8881/10000 train_time:367397ms step_avg:41.37ms +[2025-09-11 08:30:03] [Rank 0] step:8901/10000 train_time:368112ms step_avg:41.36ms +[2025-09-11 08:30:03] [Rank 0] step:8901/10000 train_time:368112ms step_avg:41.36ms +[2025-09-11 08:30:04] [Rank 0] step:8921/10000 train_time:368821ms step_avg:41.34ms +[2025-09-11 08:30:04] [Rank 0] step:8921/10000 train_time:368821ms step_avg:41.34ms +[2025-09-11 08:30:05] [Rank 0] step:8941/10000 train_time:369535ms step_avg:41.33ms +[2025-09-11 08:30:05] [Rank 0] step:8941/10000 train_time:369535ms step_avg:41.33ms +[2025-09-11 08:30:05] [Rank 0] step:8961/10000 train_time:370256ms step_avg:41.32ms +[2025-09-11 08:30:05] [Rank 0] step:8961/10000 train_time:370256ms step_avg:41.32ms +[2025-09-11 08:30:06] [Rank 0] step:8981/10000 train_time:370972ms step_avg:41.31ms +[2025-09-11 08:30:06] [Rank 0] step:8981/10000 train_time:370972ms step_avg:41.31ms +[2025-09-11 08:30:07] [Rank 0] step:9001/10000 train_time:371679ms step_avg:41.29ms +[2025-09-11 08:30:07] [Rank 0] step:9001/10000 train_time:371679ms step_avg:41.29ms +[2025-09-11 08:30:08] [Rank 0] step:9021/10000 train_time:372394ms step_avg:41.28ms +[2025-09-11 08:30:08] [Rank 0] step:9021/10000 train_time:372394ms step_avg:41.28ms +[2025-09-11 08:30:08] [Rank 0] step:9041/10000 train_time:373109ms step_avg:41.27ms +[2025-09-11 08:30:08] [Rank 0] step:9041/10000 train_time:373109ms step_avg:41.27ms +[2025-09-11 08:30:09] [Rank 0] step:9061/10000 train_time:373821ms step_avg:41.26ms +[2025-09-11 08:30:09] [Rank 0] step:9061/10000 train_time:373821ms step_avg:41.26ms +[2025-09-11 08:30:10] [Rank 0] step:9081/10000 train_time:374536ms step_avg:41.24ms +[2025-09-11 08:30:10] [Rank 0] step:9081/10000 train_time:374536ms step_avg:41.24ms +[2025-09-11 08:30:10] [Rank 0] step:9101/10000 train_time:375252ms step_avg:41.23ms +[2025-09-11 08:30:10] [Rank 0] step:9101/10000 train_time:375252ms step_avg:41.23ms +[2025-09-11 08:30:11] [Rank 0] step:9121/10000 train_time:375970ms step_avg:41.22ms +[2025-09-11 08:30:11] [Rank 0] step:9121/10000 train_time:375970ms step_avg:41.22ms +[2025-09-11 08:30:12] [Rank 0] step:9141/10000 train_time:376682ms step_avg:41.21ms +[2025-09-11 08:30:12] [Rank 0] step:9141/10000 train_time:376682ms step_avg:41.21ms +[2025-09-11 08:30:13] [Rank 0] step:9161/10000 train_time:377397ms step_avg:41.20ms +[2025-09-11 08:30:13] [Rank 0] step:9161/10000 train_time:377397ms step_avg:41.20ms +[2025-09-11 08:30:13] [Rank 0] step:9181/10000 train_time:378113ms step_avg:41.18ms +[2025-09-11 08:30:13] [Rank 0] step:9181/10000 train_time:378113ms step_avg:41.18ms +[2025-09-11 08:30:14] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:30:14] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:30:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:30:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:30:24] [Rank 0] PRINT: step:9200/10000 val_loss:4.0817 total_sharp:5.1393e-05 L1_sharp:3.8623e-03 L2_sharp:5.5141e-04 L3_sharp:3.6868e-04 L4_sharp:9.1051e-04 L5_sharp:6.1372e-04 L6_sharp:7.8719e-04 L7_sharp:7.4068e-04 L8_sharp:1.1203e-03 L9_sharp:1.3952e-03 L10_sharp:1.7362e-03 L11_sharp:2.6426e-03 L12_sharp:1.9141e-02 total_fnorm:1.5062e+01 total_l1_linf:1.7280e+04 total_spectral:7.5312e+00 L1_fnorm:2.4414e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2363e-01 L5_fnorm:2.2461e-01 L6_fnorm:2.2656e-01 L7_fnorm:2.2559e-01 L8_fnorm:2.2168e-01 L9_fnorm:2.2363e-01 L10_fnorm:2.2168e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.1875e-01 L1_l1linf:3.4424e-02 L2_l1linf:3.4424e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.3203e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.4424e-02 L7_l1linf:3.2715e-02 L8_l1linf:3.2471e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.0396e-02 L12_l1linf:3.1494e-02 L1_spectral:3.8607e-03 L2_spectral:3.2440e-03 L3_spectral:3.4724e-03 L4_spectral:3.4685e-03 L5_spectral:3.4285e-03 L6_spectral:3.4548e-03 L7_spectral:3.4124e-03 L8_spectral:3.4270e-03 L9_spectral:3.3624e-03 L10_spectral:3.3757e-03 L11_spectral:3.3824e-03 L12_spectral:3.5022e-03 train_time:378809ms step_avg:41.17ms +[2025-09-11 08:30:24] [Rank 0] PRINT: step:9200/10000 val_loss:4.0817 total_sharp:5.1393e-05 L1_sharp:3.8623e-03 L2_sharp:5.5141e-04 L3_sharp:3.6868e-04 L4_sharp:9.1051e-04 L5_sharp:6.1372e-04 L6_sharp:7.8719e-04 L7_sharp:7.4068e-04 L8_sharp:1.1203e-03 L9_sharp:1.3952e-03 L10_sharp:1.7362e-03 L11_sharp:2.6426e-03 L12_sharp:1.9141e-02 total_fnorm:1.5062e+01 total_l1_linf:1.7280e+04 total_spectral:7.5312e+00 L1_fnorm:2.4414e-01 L2_fnorm:2.2070e-01 L3_fnorm:2.2168e-01 L4_fnorm:2.2363e-01 L5_fnorm:2.2461e-01 L6_fnorm:2.2656e-01 L7_fnorm:2.2559e-01 L8_fnorm:2.2168e-01 L9_fnorm:2.2363e-01 L10_fnorm:2.2168e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.1875e-01 L1_l1linf:3.4424e-02 L2_l1linf:3.4424e-02 L3_l1linf:3.5645e-02 L4_l1linf:3.3203e-02 L5_l1linf:3.3691e-02 L6_l1linf:3.4424e-02 L7_l1linf:3.2715e-02 L8_l1linf:3.2471e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.0396e-02 L12_l1linf:3.1494e-02 L1_spectral:3.8607e-03 L2_spectral:3.2440e-03 L3_spectral:3.4724e-03 L4_spectral:3.4685e-03 L5_spectral:3.4285e-03 L6_spectral:3.4548e-03 L7_spectral:3.4124e-03 L8_spectral:3.4270e-03 L9_spectral:3.3624e-03 L10_spectral:3.3757e-03 L11_spectral:3.3824e-03 L12_spectral:3.5022e-03 train_time:378809ms step_avg:41.17ms +[2025-09-11 08:30:25] [Rank 0] step:9201/10000 train_time:380007ms step_avg:41.30ms +[2025-09-11 08:30:25] [Rank 0] step:9201/10000 train_time:380007ms step_avg:41.30ms +[2025-09-11 08:30:26] [Rank 0] step:9221/10000 train_time:380754ms step_avg:41.29ms +[2025-09-11 08:30:26] [Rank 0] step:9221/10000 train_time:380754ms step_avg:41.29ms +[2025-09-11 08:30:27] [Rank 0] step:9241/10000 train_time:381466ms step_avg:41.28ms +[2025-09-11 08:30:27] [Rank 0] step:9241/10000 train_time:381466ms step_avg:41.28ms +[2025-09-11 08:30:27] [Rank 0] step:9261/10000 train_time:382181ms step_avg:41.27ms +[2025-09-11 08:30:27] [Rank 0] step:9261/10000 train_time:382181ms step_avg:41.27ms +[2025-09-11 08:30:28] [Rank 0] step:9281/10000 train_time:382897ms step_avg:41.26ms +[2025-09-11 08:30:28] [Rank 0] step:9281/10000 train_time:382897ms step_avg:41.26ms +[2025-09-11 08:30:29] [Rank 0] step:9301/10000 train_time:383609ms step_avg:41.24ms +[2025-09-11 08:30:29] [Rank 0] step:9301/10000 train_time:383609ms step_avg:41.24ms +[2025-09-11 08:30:30] [Rank 0] step:9321/10000 train_time:384324ms step_avg:41.23ms +[2025-09-11 08:30:30] [Rank 0] step:9321/10000 train_time:384324ms step_avg:41.23ms +[2025-09-11 08:30:30] [Rank 0] step:9341/10000 train_time:385034ms step_avg:41.22ms +[2025-09-11 08:30:30] [Rank 0] step:9341/10000 train_time:385034ms step_avg:41.22ms +[2025-09-11 08:30:31] [Rank 0] step:9361/10000 train_time:385744ms step_avg:41.21ms +[2025-09-11 08:30:31] [Rank 0] step:9361/10000 train_time:385744ms step_avg:41.21ms +[2025-09-11 08:30:32] [Rank 0] step:9381/10000 train_time:386454ms step_avg:41.20ms +[2025-09-11 08:30:32] [Rank 0] step:9381/10000 train_time:386454ms step_avg:41.20ms +[2025-09-11 08:30:32] [Rank 0] step:9401/10000 train_time:387169ms step_avg:41.18ms +[2025-09-11 08:30:32] [Rank 0] step:9401/10000 train_time:387169ms step_avg:41.18ms +[2025-09-11 08:30:33] [Rank 0] step:9421/10000 train_time:387883ms step_avg:41.17ms +[2025-09-11 08:30:33] [Rank 0] step:9421/10000 train_time:387883ms step_avg:41.17ms +[2025-09-11 08:30:34] [Rank 0] step:9441/10000 train_time:388600ms step_avg:41.16ms +[2025-09-11 08:30:34] [Rank 0] step:9441/10000 train_time:388600ms step_avg:41.16ms +[2025-09-11 08:30:35] [Rank 0] step:9461/10000 train_time:389313ms step_avg:41.15ms +[2025-09-11 08:30:35] [Rank 0] step:9461/10000 train_time:389313ms step_avg:41.15ms +[2025-09-11 08:30:35] [Rank 0] step:9481/10000 train_time:390029ms step_avg:41.14ms +[2025-09-11 08:30:35] [Rank 0] step:9481/10000 train_time:390029ms step_avg:41.14ms +[2025-09-11 08:30:36] [Rank 0] step:9501/10000 train_time:390744ms step_avg:41.13ms +[2025-09-11 08:30:36] [Rank 0] step:9501/10000 train_time:390744ms step_avg:41.13ms +[2025-09-11 08:30:37] [Rank 0] step:9521/10000 train_time:391461ms step_avg:41.12ms +[2025-09-11 08:30:37] [Rank 0] step:9521/10000 train_time:391461ms step_avg:41.12ms +[2025-09-11 08:30:37] [Rank 0] step:9541/10000 train_time:392172ms step_avg:41.10ms +[2025-09-11 08:30:37] [Rank 0] step:9541/10000 train_time:392172ms step_avg:41.10ms +[2025-09-11 08:30:38] [Rank 0] step:9561/10000 train_time:392885ms step_avg:41.09ms +[2025-09-11 08:30:38] [Rank 0] step:9561/10000 train_time:392885ms step_avg:41.09ms +[2025-09-11 08:30:39] [Rank 0] step:9581/10000 train_time:393601ms step_avg:41.08ms +[2025-09-11 08:30:39] [Rank 0] step:9581/10000 train_time:393601ms step_avg:41.08ms +[2025-09-11 08:30:40] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:30:40] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:30:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:30:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:30:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:30:49] [Rank 0] PRINT: step:9600/10000 val_loss:4.0722 total_sharp:3.0598e-05 L1_sharp:2.6078e-03 L2_sharp:4.3028e-04 L3_sharp:1.5038e-04 L4_sharp:3.4514e-04 L5_sharp:3.9311e-04 L6_sharp:1.8780e-04 L7_sharp:5.9176e-04 L8_sharp:1.1388e-03 L9_sharp:1.0244e-03 L10_sharp:1.2959e-03 L11_sharp:2.2624e-03 L12_sharp:9.7526e-03 total_fnorm:8.6875e+00 total_l1_linf:8.4480e+03 total_spectral:4.3438e+00 L1_fnorm:1.4258e-01 L2_fnorm:1.2598e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2695e-01 L5_fnorm:1.2793e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2793e-01 L8_fnorm:1.2598e-01 L9_fnorm:1.2695e-01 L10_fnorm:1.2598e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2451e-01 L1_l1linf:1.6724e-02 L2_l1linf:1.7456e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.6602e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.7090e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.4465e-02 L11_l1linf:1.8433e-02 L12_l1linf:1.6479e-02 L1_spectral:2.3471e-03 L2_spectral:1.8839e-03 L3_spectral:2.0401e-03 L4_spectral:2.0352e-03 L5_spectral:1.9996e-03 L6_spectral:2.0364e-03 L7_spectral:1.9720e-03 L8_spectral:2.0341e-03 L9_spectral:1.9612e-03 L10_spectral:1.9571e-03 L11_spectral:1.9716e-03 L12_spectral:2.0676e-03 train_time:394291ms step_avg:41.07ms +[2025-09-11 08:30:49] [Rank 0] PRINT: step:9600/10000 val_loss:4.0722 total_sharp:3.0598e-05 L1_sharp:2.6078e-03 L2_sharp:4.3028e-04 L3_sharp:1.5038e-04 L4_sharp:3.4514e-04 L5_sharp:3.9311e-04 L6_sharp:1.8780e-04 L7_sharp:5.9176e-04 L8_sharp:1.1388e-03 L9_sharp:1.0244e-03 L10_sharp:1.2959e-03 L11_sharp:2.2624e-03 L12_sharp:9.7526e-03 total_fnorm:8.6875e+00 total_l1_linf:8.4480e+03 total_spectral:4.3438e+00 L1_fnorm:1.4258e-01 L2_fnorm:1.2598e-01 L3_fnorm:1.2598e-01 L4_fnorm:1.2695e-01 L5_fnorm:1.2793e-01 L6_fnorm:1.2793e-01 L7_fnorm:1.2793e-01 L8_fnorm:1.2598e-01 L9_fnorm:1.2695e-01 L10_fnorm:1.2598e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2451e-01 L1_l1linf:1.6724e-02 L2_l1linf:1.7456e-02 L3_l1linf:1.5747e-02 L4_l1linf:1.6602e-02 L5_l1linf:1.6357e-02 L6_l1linf:1.7090e-02 L7_l1linf:1.6724e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.4465e-02 L11_l1linf:1.8433e-02 L12_l1linf:1.6479e-02 L1_spectral:2.3471e-03 L2_spectral:1.8839e-03 L3_spectral:2.0401e-03 L4_spectral:2.0352e-03 L5_spectral:1.9996e-03 L6_spectral:2.0364e-03 L7_spectral:1.9720e-03 L8_spectral:2.0341e-03 L9_spectral:1.9612e-03 L10_spectral:1.9571e-03 L11_spectral:1.9716e-03 L12_spectral:2.0676e-03 train_time:394291ms step_avg:41.07ms +[2025-09-11 08:30:51] [Rank 0] step:9601/10000 train_time:395525ms step_avg:41.20ms +[2025-09-11 08:30:51] [Rank 0] step:9601/10000 train_time:395525ms step_avg:41.20ms +[2025-09-11 08:30:51] [Rank 0] step:9621/10000 train_time:396260ms step_avg:41.19ms +[2025-09-11 08:30:51] [Rank 0] step:9621/10000 train_time:396260ms step_avg:41.19ms +[2025-09-11 08:30:52] [Rank 0] step:9641/10000 train_time:396980ms step_avg:41.18ms +[2025-09-11 08:30:52] [Rank 0] step:9641/10000 train_time:396980ms step_avg:41.18ms +[2025-09-11 08:30:53] [Rank 0] step:9661/10000 train_time:397706ms step_avg:41.17ms +[2025-09-11 08:30:53] [Rank 0] step:9661/10000 train_time:397706ms step_avg:41.17ms +[2025-09-11 08:30:54] [Rank 0] step:9681/10000 train_time:398424ms step_avg:41.16ms +[2025-09-11 08:30:54] [Rank 0] step:9681/10000 train_time:398424ms step_avg:41.16ms +[2025-09-11 08:30:54] [Rank 0] step:9701/10000 train_time:399145ms step_avg:41.14ms +[2025-09-11 08:30:54] [Rank 0] step:9701/10000 train_time:399145ms step_avg:41.14ms +[2025-09-11 08:30:55] [Rank 0] step:9721/10000 train_time:399869ms step_avg:41.13ms +[2025-09-11 08:30:55] [Rank 0] step:9721/10000 train_time:399869ms step_avg:41.13ms +[2025-09-11 08:30:56] [Rank 0] step:9741/10000 train_time:400590ms step_avg:41.12ms +[2025-09-11 08:30:56] [Rank 0] step:9741/10000 train_time:400590ms step_avg:41.12ms +[2025-09-11 08:30:57] [Rank 0] step:9761/10000 train_time:401311ms step_avg:41.11ms +[2025-09-11 08:30:57] [Rank 0] step:9761/10000 train_time:401311ms step_avg:41.11ms +[2025-09-11 08:30:57] [Rank 0] step:9781/10000 train_time:402030ms step_avg:41.10ms +[2025-09-11 08:30:57] [Rank 0] step:9781/10000 train_time:402030ms step_avg:41.10ms +[2025-09-11 08:30:58] [Rank 0] step:9801/10000 train_time:402755ms step_avg:41.09ms +[2025-09-11 08:30:58] [Rank 0] step:9801/10000 train_time:402755ms step_avg:41.09ms +[2025-09-11 08:30:59] [Rank 0] step:9821/10000 train_time:403477ms step_avg:41.08ms +[2025-09-11 08:30:59] [Rank 0] step:9821/10000 train_time:403477ms step_avg:41.08ms +[2025-09-11 08:30:59] [Rank 0] step:9841/10000 train_time:404201ms step_avg:41.07ms +[2025-09-11 08:30:59] [Rank 0] step:9841/10000 train_time:404201ms step_avg:41.07ms +[2025-09-11 08:31:00] [Rank 0] step:9861/10000 train_time:404923ms step_avg:41.06ms +[2025-09-11 08:31:00] [Rank 0] step:9861/10000 train_time:404923ms step_avg:41.06ms +[2025-09-11 08:31:01] [Rank 0] step:9881/10000 train_time:405644ms step_avg:41.05ms +[2025-09-11 08:31:01] [Rank 0] step:9881/10000 train_time:405644ms step_avg:41.05ms +[2025-09-11 08:31:02] [Rank 0] step:9901/10000 train_time:406362ms step_avg:41.04ms +[2025-09-11 08:31:02] [Rank 0] step:9901/10000 train_time:406362ms step_avg:41.04ms +[2025-09-11 08:31:02] [Rank 0] step:9921/10000 train_time:407082ms step_avg:41.03ms +[2025-09-11 08:31:02] [Rank 0] step:9921/10000 train_time:407082ms step_avg:41.03ms +[2025-09-11 08:31:03] [Rank 0] step:9941/10000 train_time:407807ms step_avg:41.02ms +[2025-09-11 08:31:03] [Rank 0] step:9941/10000 train_time:407807ms step_avg:41.02ms +[2025-09-11 08:31:04] [Rank 0] step:9961/10000 train_time:408534ms step_avg:41.01ms +[2025-09-11 08:31:04] [Rank 0] step:9961/10000 train_time:408534ms step_avg:41.01ms +[2025-09-11 08:31:04] [Rank 0] step:9981/10000 train_time:409255ms step_avg:41.00ms +[2025-09-11 08:31:04] [Rank 0] step:9981/10000 train_time:409255ms step_avg:41.00ms +[2025-09-11 08:31:05] [Rank 0] step:10000/10000 train_time:409949ms step_avg:40.99ms +[2025-09-11 08:31:05] [Rank 0] step:10000/10000 train_time:409949ms step_avg:40.99ms +[2025-09-11 08:31:05] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:31:05] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:31:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:31:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:31:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:31:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:31:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:31:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:31:15] [Rank 0] PRINT: step:10000/10000 val_loss:4.0688 total_sharp:1.9852e-05 L1_sharp:2.1607e-03 L2_sharp:1.2392e-04 L3_sharp:1.2785e-05 L4_sharp:6.2442e-04 L5_sharp:4.0638e-04 L6_sharp:3.5180e-04 L7_sharp:5.3654e-04 L8_sharp:8.7593e-04 L9_sharp:7.8513e-04 L10_sharp:1.0562e-03 L11_sharp:1.4537e-03 L12_sharp:7.6116e-03 total_fnorm:3.3281e+00 total_l1_linf:2.3680e+03 total_spectral:1.6719e+00 L1_fnorm:5.5176e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.9072e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8096e-02 L1_l1linf:5.1575e-03 L2_l1linf:5.3101e-03 L3_l1linf:4.9133e-03 L4_l1linf:4.8828e-03 L5_l1linf:4.7607e-03 L6_l1linf:5.0659e-03 L7_l1linf:4.8218e-03 L8_l1linf:4.8828e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.5776e-03 L11_l1linf:4.3640e-03 L12_l1linf:5.0964e-03 L1_spectral:9.4473e-04 L2_spectral:7.0985e-04 L3_spectral:7.9932e-04 L4_spectral:8.0595e-04 L5_spectral:7.8461e-04 L6_spectral:7.9830e-04 L7_spectral:7.9277e-04 L8_spectral:8.1125e-04 L9_spectral:7.7380e-04 L10_spectral:7.7771e-04 L11_spectral:7.8178e-04 L12_spectral:8.3405e-04 train_time:409969ms step_avg:41.00ms +[2025-09-11 08:31:15] [Rank 0] PRINT: step:10000/10000 val_loss:4.0688 total_sharp:1.9852e-05 L1_sharp:2.1607e-03 L2_sharp:1.2392e-04 L3_sharp:1.2785e-05 L4_sharp:6.2442e-04 L5_sharp:4.0638e-04 L6_sharp:3.5180e-04 L7_sharp:5.3654e-04 L8_sharp:8.7593e-04 L9_sharp:7.8513e-04 L10_sharp:1.0562e-03 L11_sharp:1.4537e-03 L12_sharp:7.6116e-03 total_fnorm:3.3281e+00 total_l1_linf:2.3680e+03 total_spectral:1.6719e+00 L1_fnorm:5.5176e-02 L2_fnorm:4.7852e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9561e-02 L8_fnorm:4.9072e-02 L9_fnorm:4.8828e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8096e-02 L1_l1linf:5.1575e-03 L2_l1linf:5.3101e-03 L3_l1linf:4.9133e-03 L4_l1linf:4.8828e-03 L5_l1linf:4.7607e-03 L6_l1linf:5.0659e-03 L7_l1linf:4.8218e-03 L8_l1linf:4.8828e-03 L9_l1linf:4.6387e-03 L10_l1linf:4.5776e-03 L11_l1linf:4.3640e-03 L12_l1linf:5.0964e-03 L1_spectral:9.4473e-04 L2_spectral:7.0985e-04 L3_spectral:7.9932e-04 L4_spectral:8.0595e-04 L5_spectral:7.8461e-04 L6_spectral:7.9830e-04 L7_spectral:7.9277e-04 L8_spectral:8.1125e-04 L9_spectral:7.7380e-04 L10_spectral:7.7771e-04 L11_spectral:7.8178e-04 L12_spectral:8.3405e-04 train_time:409969ms step_avg:41.00ms +[2025-09-11 08:31:15] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:31:15 2025 --- +[2025-09-11 08:31:15] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:31:15 2025 --- +[2025-09-11 08:31:15] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 08:31:15] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..10a132c318441f5bc63a2372ee6157580eac7732 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "6aa4b22d-53fa-4c9b-8fdc-c84c826fe3ed", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/training_log_6aa4b22d-53fa-4c9b-8fdc-c84c826fe3ed.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/training_log_6aa4b22d-53fa-4c9b-8fdc-c84c826fe3ed.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2738bc1de16140aa940177b2afe1bd3986dee5c --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44/training_log_6aa4b22d-53fa-4c9b-8fdc-c84c826fe3ed.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:58:07] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:58:07 2025 --- +[2025-09-11 08:58:07] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:58:07 2025 --- +[2025-09-11 08:58:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:58:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:58:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:58:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:58:07] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:58:07] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:58:07] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44 +[2025-09-11 08:58:07] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.02_seed_44 +[2025-09-11 08:58:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:58:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:58:07] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:58:07] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:58:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:58:08] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:58:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:58:08] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:58:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:58:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:58:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:58:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:58:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:58:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:58:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:58:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:58:10] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:58:10] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:58:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:58:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:58:16] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:58:16] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:58:16] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:58:16] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:58:55] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:58:55] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:58:55] [Rank 0] PRINT: Starting training... +[2025-09-11 08:58:55] [Rank 0] PRINT: Starting training... +[2025-09-11 08:58:56] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.02ms +[2025-09-11 08:58:56] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.02ms +[2025-09-11 08:58:57] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.48ms +[2025-09-11 08:58:57] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.48ms +[2025-09-11 08:58:57] [Rank 0] step:61/10000 train_time:2590ms step_avg:42.45ms +[2025-09-11 08:58:57] [Rank 0] step:61/10000 train_time:2590ms step_avg:42.45ms +[2025-09-11 08:58:58] [Rank 0] step:81/10000 train_time:3315ms step_avg:40.93ms +[2025-09-11 08:58:58] [Rank 0] step:81/10000 train_time:3315ms step_avg:40.93ms +[2025-09-11 08:58:59] [Rank 0] step:101/10000 train_time:4040ms step_avg:40.00ms +[2025-09-11 08:58:59] [Rank 0] step:101/10000 train_time:4040ms step_avg:40.00ms +[2025-09-11 08:58:59] [Rank 0] step:121/10000 train_time:4765ms step_avg:39.38ms +[2025-09-11 08:58:59] [Rank 0] step:121/10000 train_time:4765ms step_avg:39.38ms +[2025-09-11 08:59:00] [Rank 0] step:141/10000 train_time:5490ms step_avg:38.94ms +[2025-09-11 08:59:00] [Rank 0] step:141/10000 train_time:5490ms step_avg:38.94ms +[2025-09-11 08:59:01] [Rank 0] step:161/10000 train_time:6214ms step_avg:38.60ms +[2025-09-11 08:59:01] [Rank 0] step:161/10000 train_time:6214ms step_avg:38.60ms +[2025-09-11 08:59:02] [Rank 0] step:181/10000 train_time:6941ms step_avg:38.35ms +[2025-09-11 08:59:02] [Rank 0] step:181/10000 train_time:6941ms step_avg:38.35ms +[2025-09-11 08:59:02] [Rank 0] step:201/10000 train_time:7666ms step_avg:38.14ms +[2025-09-11 08:59:02] [Rank 0] step:201/10000 train_time:7666ms step_avg:38.14ms +[2025-09-11 08:59:03] [Rank 0] step:221/10000 train_time:8391ms step_avg:37.97ms +[2025-09-11 08:59:03] [Rank 0] step:221/10000 train_time:8391ms step_avg:37.97ms +[2025-09-11 08:59:04] [Rank 0] step:241/10000 train_time:9115ms step_avg:37.82ms +[2025-09-11 08:59:04] [Rank 0] step:241/10000 train_time:9115ms step_avg:37.82ms +[2025-09-11 08:59:05] [Rank 0] step:261/10000 train_time:9841ms step_avg:37.70ms +[2025-09-11 08:59:05] [Rank 0] step:261/10000 train_time:9841ms step_avg:37.70ms +[2025-09-11 08:59:05] [Rank 0] step:281/10000 train_time:10565ms step_avg:37.60ms +[2025-09-11 08:59:05] [Rank 0] step:281/10000 train_time:10565ms step_avg:37.60ms +[2025-09-11 08:59:06] [Rank 0] step:301/10000 train_time:11289ms step_avg:37.51ms +[2025-09-11 08:59:06] [Rank 0] step:301/10000 train_time:11289ms step_avg:37.51ms +[2025-09-11 08:59:07] [Rank 0] step:321/10000 train_time:12014ms step_avg:37.43ms +[2025-09-11 08:59:07] [Rank 0] step:321/10000 train_time:12014ms step_avg:37.43ms +[2025-09-11 08:59:07] [Rank 0] step:341/10000 train_time:12738ms step_avg:37.36ms +[2025-09-11 08:59:07] [Rank 0] step:341/10000 train_time:12738ms step_avg:37.36ms +[2025-09-11 08:59:08] [Rank 0] step:361/10000 train_time:13463ms step_avg:37.29ms +[2025-09-11 08:59:08] [Rank 0] step:361/10000 train_time:13463ms step_avg:37.29ms +[2025-09-11 08:59:09] [Rank 0] step:381/10000 train_time:14188ms step_avg:37.24ms +[2025-09-11 08:59:09] [Rank 0] step:381/10000 train_time:14188ms step_avg:37.24ms +[2025-09-11 08:59:10] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:59:10] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:59:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:59:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:59:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:59:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:57] [Rank 0] PRINT: step:400/10000 val_loss:5.9094 total_sharp:1.0045e-03 L1_sharp:2.5947e-02 L2_sharp:1.0876e-02 L3_sharp:5.0793e-03 L4_sharp:4.4771e-03 L5_sharp:4.3333e-03 L6_sharp:5.1880e-03 L7_sharp:3.8567e-03 L8_sharp:4.1061e-03 L9_sharp:3.6976e-03 L10_sharp:4.2634e-03 L11_sharp:4.6799e-03 L12_sharp:3.5689e-02 total_fnorm:7.7860e+01 total_l1_linf:2.1593e+05 total_spectral:3.8920e+01 L1_fnorm:2.4535e+00 L2_fnorm:2.4416e+00 L3_fnorm:2.4408e+00 L4_fnorm:2.4100e+00 L5_fnorm:2.3916e+00 L6_fnorm:2.3811e+00 L7_fnorm:2.3689e+00 L8_fnorm:2.3617e+00 L9_fnorm:2.3418e+00 L10_fnorm:2.3280e+00 L11_fnorm:2.3064e+00 L12_fnorm:2.2110e+00 L1_l1linf:8.2560e-01 L2_l1linf:8.1428e-01 L3_l1linf:8.1197e-01 L4_l1linf:8.2402e-01 L5_l1linf:8.1667e-01 L6_l1linf:8.1744e-01 L7_l1linf:8.1072e-01 L8_l1linf:8.1394e-01 L9_l1linf:8.0710e-01 L10_l1linf:7.8552e-01 L11_l1linf:7.6279e-01 L12_l1linf:6.9514e-01 L1_spectral:2.4094e-02 L2_spectral:2.4098e-02 L3_spectral:2.4082e-02 L4_spectral:2.4092e-02 L5_spectral:2.4090e-02 L6_spectral:2.4082e-02 L7_spectral:2.4082e-02 L8_spectral:2.4080e-02 L9_spectral:2.4077e-02 L10_spectral:2.4071e-02 L11_spectral:2.4077e-02 L12_spectral:2.4067e-02 train_time:14892ms step_avg:37.23ms +[2025-09-11 08:59:57] [Rank 0] PRINT: step:400/10000 val_loss:5.9094 total_sharp:1.0045e-03 L1_sharp:2.5947e-02 L2_sharp:1.0876e-02 L3_sharp:5.0793e-03 L4_sharp:4.4771e-03 L5_sharp:4.3333e-03 L6_sharp:5.1880e-03 L7_sharp:3.8567e-03 L8_sharp:4.1061e-03 L9_sharp:3.6976e-03 L10_sharp:4.2634e-03 L11_sharp:4.6799e-03 L12_sharp:3.5689e-02 total_fnorm:7.7860e+01 total_l1_linf:2.1593e+05 total_spectral:3.8920e+01 L1_fnorm:2.4535e+00 L2_fnorm:2.4416e+00 L3_fnorm:2.4408e+00 L4_fnorm:2.4100e+00 L5_fnorm:2.3916e+00 L6_fnorm:2.3811e+00 L7_fnorm:2.3689e+00 L8_fnorm:2.3617e+00 L9_fnorm:2.3418e+00 L10_fnorm:2.3280e+00 L11_fnorm:2.3064e+00 L12_fnorm:2.2110e+00 L1_l1linf:8.2560e-01 L2_l1linf:8.1428e-01 L3_l1linf:8.1197e-01 L4_l1linf:8.2402e-01 L5_l1linf:8.1667e-01 L6_l1linf:8.1744e-01 L7_l1linf:8.1072e-01 L8_l1linf:8.1394e-01 L9_l1linf:8.0710e-01 L10_l1linf:7.8552e-01 L11_l1linf:7.6279e-01 L12_l1linf:6.9514e-01 L1_spectral:2.4094e-02 L2_spectral:2.4098e-02 L3_spectral:2.4082e-02 L4_spectral:2.4092e-02 L5_spectral:2.4090e-02 L6_spectral:2.4082e-02 L7_spectral:2.4082e-02 L8_spectral:2.4080e-02 L9_spectral:2.4077e-02 L10_spectral:2.4071e-02 L11_spectral:2.4077e-02 L12_spectral:2.4067e-02 train_time:14892ms step_avg:37.23ms +[2025-09-11 09:00:27] [Rank 0] step:401/10000 train_time:45120ms step_avg:112.52ms +[2025-09-11 09:00:27] [Rank 0] step:401/10000 train_time:45120ms step_avg:112.52ms +[2025-09-11 09:00:29] [Rank 0] step:421/10000 train_time:47317ms step_avg:112.39ms +[2025-09-11 09:00:29] [Rank 0] step:421/10000 train_time:47317ms step_avg:112.39ms +[2025-09-11 09:00:30] [Rank 0] step:441/10000 train_time:47954ms step_avg:108.74ms +[2025-09-11 09:00:30] [Rank 0] step:441/10000 train_time:47954ms step_avg:108.74ms +[2025-09-11 09:00:31] [Rank 0] step:461/10000 train_time:48590ms step_avg:105.40ms +[2025-09-11 09:00:31] [Rank 0] step:461/10000 train_time:48590ms step_avg:105.40ms +[2025-09-11 09:00:31] [Rank 0] step:481/10000 train_time:49227ms step_avg:102.34ms +[2025-09-11 09:00:31] [Rank 0] step:481/10000 train_time:49227ms step_avg:102.34ms +[2025-09-11 09:00:32] [Rank 0] step:501/10000 train_time:49862ms step_avg:99.53ms +[2025-09-11 09:00:32] [Rank 0] step:501/10000 train_time:49862ms step_avg:99.53ms +[2025-09-11 09:00:33] [Rank 0] step:521/10000 train_time:50498ms step_avg:96.93ms +[2025-09-11 09:00:33] [Rank 0] step:521/10000 train_time:50498ms step_avg:96.93ms +[2025-09-11 09:00:33] [Rank 0] step:541/10000 train_time:51134ms step_avg:94.52ms +[2025-09-11 09:00:33] [Rank 0] step:541/10000 train_time:51134ms step_avg:94.52ms +[2025-09-11 09:00:34] [Rank 0] step:561/10000 train_time:51769ms step_avg:92.28ms +[2025-09-11 09:00:34] [Rank 0] step:561/10000 train_time:51769ms step_avg:92.28ms +[2025-09-11 09:00:34] [Rank 0] step:581/10000 train_time:52405ms step_avg:90.20ms +[2025-09-11 09:00:34] [Rank 0] step:581/10000 train_time:52405ms step_avg:90.20ms +[2025-09-11 09:00:35] [Rank 0] step:601/10000 train_time:53042ms step_avg:88.26ms +[2025-09-11 09:00:35] [Rank 0] step:601/10000 train_time:53042ms step_avg:88.26ms +[2025-09-11 09:00:36] [Rank 0] step:621/10000 train_time:53677ms step_avg:86.44ms +[2025-09-11 09:00:36] [Rank 0] step:621/10000 train_time:53677ms step_avg:86.44ms +[2025-09-11 09:00:36] [Rank 0] step:641/10000 train_time:54312ms step_avg:84.73ms +[2025-09-11 09:00:36] [Rank 0] step:641/10000 train_time:54312ms step_avg:84.73ms +[2025-09-11 09:00:37] [Rank 0] step:661/10000 train_time:54947ms step_avg:83.13ms +[2025-09-11 09:00:37] [Rank 0] step:661/10000 train_time:54947ms step_avg:83.13ms +[2025-09-11 09:00:38] [Rank 0] step:681/10000 train_time:55583ms step_avg:81.62ms +[2025-09-11 09:00:38] [Rank 0] step:681/10000 train_time:55583ms step_avg:81.62ms +[2025-09-11 09:00:38] [Rank 0] step:701/10000 train_time:56218ms step_avg:80.20ms +[2025-09-11 09:00:38] [Rank 0] step:701/10000 train_time:56218ms step_avg:80.20ms +[2025-09-11 09:00:39] [Rank 0] step:721/10000 train_time:56853ms step_avg:78.85ms +[2025-09-11 09:00:39] [Rank 0] step:721/10000 train_time:56853ms step_avg:78.85ms +[2025-09-11 09:00:39] [Rank 0] step:741/10000 train_time:57489ms step_avg:77.58ms +[2025-09-11 09:00:39] [Rank 0] step:741/10000 train_time:57489ms step_avg:77.58ms +[2025-09-11 09:00:40] [Rank 0] step:761/10000 train_time:58129ms step_avg:76.39ms +[2025-09-11 09:00:40] [Rank 0] step:761/10000 train_time:58129ms step_avg:76.39ms +[2025-09-11 09:00:41] [Rank 0] step:781/10000 train_time:58769ms step_avg:75.25ms +[2025-09-11 09:00:41] [Rank 0] step:781/10000 train_time:58769ms step_avg:75.25ms +[2025-09-11 09:00:41] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:00:41] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:01:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:01:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:01:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:01:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:26] [Rank 0] PRINT: step:800/10000 val_loss:5.4755 total_sharp:5.1077e-04 L1_sharp:1.5572e-02 L2_sharp:2.5335e-03 L3_sharp:1.2848e-03 L4_sharp:8.4747e-04 L5_sharp:1.3259e-03 L6_sharp:1.3717e-03 L7_sharp:1.3525e-03 L8_sharp:2.9899e-03 L9_sharp:1.9608e-03 L10_sharp:2.6255e-03 L11_sharp:3.5478e-03 L12_sharp:2.8381e-02 total_fnorm:7.9500e+01 total_l1_linf:1.9763e+05 total_spectral:4.0000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4531e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.4375e+00 L10_fnorm:2.4062e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.2031e+00 L1_l1linf:7.9688e-01 L2_l1linf:7.8125e-01 L3_l1linf:7.6562e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.6172e-01 L6_l1linf:7.7344e-01 L7_l1linf:7.6172e-01 L8_l1linf:7.8906e-01 L9_l1linf:7.8516e-01 L10_l1linf:7.7344e-01 L11_l1linf:7.2266e-01 L12_l1linf:5.9375e-01 L1_spectral:2.6773e-02 L2_spectral:2.6957e-02 L3_spectral:2.6833e-02 L4_spectral:2.6720e-02 L5_spectral:2.6694e-02 L6_spectral:2.6652e-02 L7_spectral:2.6795e-02 L8_spectral:2.6798e-02 L9_spectral:2.6613e-02 L10_spectral:2.6618e-02 L11_spectral:2.6599e-02 L12_spectral:2.6648e-02 train_time:59392ms step_avg:74.24ms +[2025-09-11 09:01:26] [Rank 0] PRINT: step:800/10000 val_loss:5.4755 total_sharp:5.1077e-04 L1_sharp:1.5572e-02 L2_sharp:2.5335e-03 L3_sharp:1.2848e-03 L4_sharp:8.4747e-04 L5_sharp:1.3259e-03 L6_sharp:1.3717e-03 L7_sharp:1.3525e-03 L8_sharp:2.9899e-03 L9_sharp:1.9608e-03 L10_sharp:2.6255e-03 L11_sharp:3.5478e-03 L12_sharp:2.8381e-02 total_fnorm:7.9500e+01 total_l1_linf:1.9763e+05 total_spectral:4.0000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4531e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4062e+00 L9_fnorm:2.4375e+00 L10_fnorm:2.4062e+00 L11_fnorm:2.3906e+00 L12_fnorm:2.2031e+00 L1_l1linf:7.9688e-01 L2_l1linf:7.8125e-01 L3_l1linf:7.6562e-01 L4_l1linf:7.6953e-01 L5_l1linf:7.6172e-01 L6_l1linf:7.7344e-01 L7_l1linf:7.6172e-01 L8_l1linf:7.8906e-01 L9_l1linf:7.8516e-01 L10_l1linf:7.7344e-01 L11_l1linf:7.2266e-01 L12_l1linf:5.9375e-01 L1_spectral:2.6773e-02 L2_spectral:2.6957e-02 L3_spectral:2.6833e-02 L4_spectral:2.6720e-02 L5_spectral:2.6694e-02 L6_spectral:2.6652e-02 L7_spectral:2.6795e-02 L8_spectral:2.6798e-02 L9_spectral:2.6613e-02 L10_spectral:2.6618e-02 L11_spectral:2.6599e-02 L12_spectral:2.6648e-02 train_time:59392ms step_avg:74.24ms +[2025-09-11 09:01:28] [Rank 0] step:801/10000 train_time:61011ms step_avg:76.17ms +[2025-09-11 09:01:28] [Rank 0] step:801/10000 train_time:61011ms step_avg:76.17ms +[2025-09-11 09:01:29] [Rank 0] step:821/10000 train_time:61658ms step_avg:75.10ms +[2025-09-11 09:01:29] [Rank 0] step:821/10000 train_time:61658ms step_avg:75.10ms +[2025-09-11 09:01:29] [Rank 0] step:841/10000 train_time:62301ms step_avg:74.08ms +[2025-09-11 09:01:29] [Rank 0] step:841/10000 train_time:62301ms step_avg:74.08ms +[2025-09-11 09:01:30] [Rank 0] step:861/10000 train_time:62943ms step_avg:73.10ms +[2025-09-11 09:01:30] [Rank 0] step:861/10000 train_time:62943ms step_avg:73.10ms +[2025-09-11 09:01:31] [Rank 0] step:881/10000 train_time:63584ms step_avg:72.17ms +[2025-09-11 09:01:31] [Rank 0] step:881/10000 train_time:63584ms step_avg:72.17ms +[2025-09-11 09:01:31] [Rank 0] step:901/10000 train_time:64226ms step_avg:71.28ms +[2025-09-11 09:01:31] [Rank 0] step:901/10000 train_time:64226ms step_avg:71.28ms +[2025-09-11 09:01:32] [Rank 0] step:921/10000 train_time:64867ms step_avg:70.43ms +[2025-09-11 09:01:32] [Rank 0] step:921/10000 train_time:64867ms step_avg:70.43ms +[2025-09-11 09:01:33] [Rank 0] step:941/10000 train_time:65508ms step_avg:69.62ms +[2025-09-11 09:01:33] [Rank 0] step:941/10000 train_time:65508ms step_avg:69.62ms +[2025-09-11 09:01:33] [Rank 0] step:961/10000 train_time:66150ms step_avg:68.83ms +[2025-09-11 09:01:33] [Rank 0] step:961/10000 train_time:66150ms step_avg:68.83ms +[2025-09-11 09:01:34] [Rank 0] step:981/10000 train_time:66791ms step_avg:68.08ms +[2025-09-11 09:01:34] [Rank 0] step:981/10000 train_time:66791ms step_avg:68.08ms +[2025-09-11 09:01:34] [Rank 0] step:1001/10000 train_time:67433ms step_avg:67.37ms +[2025-09-11 09:01:34] [Rank 0] step:1001/10000 train_time:67433ms step_avg:67.37ms +[2025-09-11 09:01:35] [Rank 0] step:1021/10000 train_time:68074ms step_avg:66.67ms +[2025-09-11 09:01:35] [Rank 0] step:1021/10000 train_time:68074ms step_avg:66.67ms +[2025-09-11 09:01:36] [Rank 0] step:1041/10000 train_time:68716ms step_avg:66.01ms +[2025-09-11 09:01:36] [Rank 0] step:1041/10000 train_time:68716ms step_avg:66.01ms +[2025-09-11 09:01:36] [Rank 0] step:1061/10000 train_time:69356ms step_avg:65.37ms +[2025-09-11 09:01:36] [Rank 0] step:1061/10000 train_time:69356ms step_avg:65.37ms +[2025-09-11 09:01:37] [Rank 0] step:1081/10000 train_time:69997ms step_avg:64.75ms +[2025-09-11 09:01:37] [Rank 0] step:1081/10000 train_time:69997ms step_avg:64.75ms +[2025-09-11 09:01:38] [Rank 0] step:1101/10000 train_time:70638ms step_avg:64.16ms +[2025-09-11 09:01:38] [Rank 0] step:1101/10000 train_time:70638ms step_avg:64.16ms +[2025-09-11 09:01:38] [Rank 0] step:1121/10000 train_time:71279ms step_avg:63.59ms +[2025-09-11 09:01:38] [Rank 0] step:1121/10000 train_time:71279ms step_avg:63.59ms +[2025-09-11 09:01:39] [Rank 0] step:1141/10000 train_time:71921ms step_avg:63.03ms +[2025-09-11 09:01:39] [Rank 0] step:1141/10000 train_time:71921ms step_avg:63.03ms +[2025-09-11 09:01:40] [Rank 0] step:1161/10000 train_time:72562ms step_avg:62.50ms +[2025-09-11 09:01:40] [Rank 0] step:1161/10000 train_time:72562ms step_avg:62.50ms +[2025-09-11 09:01:40] [Rank 0] step:1181/10000 train_time:73204ms step_avg:61.98ms +[2025-09-11 09:01:40] [Rank 0] step:1181/10000 train_time:73204ms step_avg:61.98ms +[2025-09-11 09:01:41] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:01:41] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:01:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:01:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:01:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:01:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:51] [Rank 0] PRINT: step:1200/10000 val_loss:5.1500 total_sharp:4.2183e-04 L1_sharp:1.0755e-02 L2_sharp:1.6501e-03 L3_sharp:8.2772e-04 L4_sharp:1.1396e-03 L5_sharp:1.6249e-03 L6_sharp:1.1875e-03 L7_sharp:1.1312e-03 L8_sharp:2.8866e-03 L9_sharp:2.2093e-03 L10_sharp:2.2186e-03 L11_sharp:3.2489e-03 L12_sharp:1.6715e-02 total_fnorm:7.9500e+01 total_l1_linf:1.9149e+05 total_spectral:4.0000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.3828e-01 L3_l1linf:7.1484e-01 L4_l1linf:7.1875e-01 L5_l1linf:7.1875e-01 L6_l1linf:7.1094e-01 L7_l1linf:7.1875e-01 L8_l1linf:7.2266e-01 L9_l1linf:7.1875e-01 L10_l1linf:7.2656e-01 L11_l1linf:7.3438e-01 L12_l1linf:7.1094e-01 L1_spectral:2.7908e-02 L2_spectral:2.7704e-02 L3_spectral:2.7645e-02 L4_spectral:2.7616e-02 L5_spectral:2.7731e-02 L6_spectral:2.7609e-02 L7_spectral:2.7614e-02 L8_spectral:2.8705e-02 L9_spectral:2.7747e-02 L10_spectral:2.7688e-02 L11_spectral:2.7652e-02 L12_spectral:2.7735e-02 train_time:73827ms step_avg:61.52ms +[2025-09-11 09:01:51] [Rank 0] PRINT: step:1200/10000 val_loss:5.1500 total_sharp:4.2183e-04 L1_sharp:1.0755e-02 L2_sharp:1.6501e-03 L3_sharp:8.2772e-04 L4_sharp:1.1396e-03 L5_sharp:1.6249e-03 L6_sharp:1.1875e-03 L7_sharp:1.1312e-03 L8_sharp:2.8866e-03 L9_sharp:2.2093e-03 L10_sharp:2.2186e-03 L11_sharp:3.2489e-03 L12_sharp:1.6715e-02 total_fnorm:7.9500e+01 total_l1_linf:1.9149e+05 total_spectral:4.0000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4844e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5000e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.5000e-01 L2_l1linf:7.3828e-01 L3_l1linf:7.1484e-01 L4_l1linf:7.1875e-01 L5_l1linf:7.1875e-01 L6_l1linf:7.1094e-01 L7_l1linf:7.1875e-01 L8_l1linf:7.2266e-01 L9_l1linf:7.1875e-01 L10_l1linf:7.2656e-01 L11_l1linf:7.3438e-01 L12_l1linf:7.1094e-01 L1_spectral:2.7908e-02 L2_spectral:2.7704e-02 L3_spectral:2.7645e-02 L4_spectral:2.7616e-02 L5_spectral:2.7731e-02 L6_spectral:2.7609e-02 L7_spectral:2.7614e-02 L8_spectral:2.8705e-02 L9_spectral:2.7747e-02 L10_spectral:2.7688e-02 L11_spectral:2.7652e-02 L12_spectral:2.7735e-02 train_time:73827ms step_avg:61.52ms +[2025-09-11 09:01:52] [Rank 0] step:1201/10000 train_time:75379ms step_avg:62.76ms +[2025-09-11 09:01:52] [Rank 0] step:1201/10000 train_time:75379ms step_avg:62.76ms +[2025-09-11 09:01:53] [Rank 0] step:1221/10000 train_time:76315ms step_avg:62.50ms +[2025-09-11 09:01:53] [Rank 0] step:1221/10000 train_time:76315ms step_avg:62.50ms +[2025-09-11 09:01:54] [Rank 0] step:1241/10000 train_time:77219ms step_avg:62.22ms +[2025-09-11 09:01:54] [Rank 0] step:1241/10000 train_time:77219ms step_avg:62.22ms +[2025-09-11 09:01:55] [Rank 0] step:1261/10000 train_time:77862ms step_avg:61.75ms +[2025-09-11 09:01:55] [Rank 0] step:1261/10000 train_time:77862ms step_avg:61.75ms +[2025-09-11 09:01:56] [Rank 0] step:1281/10000 train_time:78778ms step_avg:61.50ms +[2025-09-11 09:01:56] [Rank 0] step:1281/10000 train_time:78778ms step_avg:61.50ms +[2025-09-11 09:01:56] [Rank 0] step:1301/10000 train_time:79420ms step_avg:61.05ms +[2025-09-11 09:01:56] [Rank 0] step:1301/10000 train_time:79420ms step_avg:61.05ms +[2025-09-11 09:01:57] [Rank 0] step:1321/10000 train_time:80063ms step_avg:60.61ms +[2025-09-11 09:01:57] [Rank 0] step:1321/10000 train_time:80063ms step_avg:60.61ms +[2025-09-11 09:01:58] [Rank 0] step:1341/10000 train_time:80707ms step_avg:60.18ms +[2025-09-11 09:01:58] [Rank 0] step:1341/10000 train_time:80707ms step_avg:60.18ms +[2025-09-11 09:01:58] [Rank 0] step:1361/10000 train_time:81349ms step_avg:59.77ms +[2025-09-11 09:01:58] [Rank 0] step:1361/10000 train_time:81349ms step_avg:59.77ms +[2025-09-11 09:01:59] [Rank 0] step:1381/10000 train_time:81991ms step_avg:59.37ms +[2025-09-11 09:01:59] [Rank 0] step:1381/10000 train_time:81991ms step_avg:59.37ms +[2025-09-11 09:02:00] [Rank 0] step:1401/10000 train_time:82634ms step_avg:58.98ms +[2025-09-11 09:02:00] [Rank 0] step:1401/10000 train_time:82634ms step_avg:58.98ms +[2025-09-11 09:02:00] [Rank 0] step:1421/10000 train_time:83275ms step_avg:58.60ms +[2025-09-11 09:02:00] [Rank 0] step:1421/10000 train_time:83275ms step_avg:58.60ms +[2025-09-11 09:02:01] [Rank 0] step:1441/10000 train_time:83918ms step_avg:58.24ms +[2025-09-11 09:02:01] [Rank 0] step:1441/10000 train_time:83918ms step_avg:58.24ms +[2025-09-11 09:02:02] [Rank 0] step:1461/10000 train_time:84561ms step_avg:57.88ms +[2025-09-11 09:02:02] [Rank 0] step:1461/10000 train_time:84561ms step_avg:57.88ms +[2025-09-11 09:02:02] [Rank 0] step:1481/10000 train_time:85204ms step_avg:57.53ms +[2025-09-11 09:02:02] [Rank 0] step:1481/10000 train_time:85204ms step_avg:57.53ms +[2025-09-11 09:02:03] [Rank 0] step:1501/10000 train_time:85851ms step_avg:57.20ms +[2025-09-11 09:02:03] [Rank 0] step:1501/10000 train_time:85851ms step_avg:57.20ms +[2025-09-11 09:02:04] [Rank 0] step:1521/10000 train_time:86497ms step_avg:56.87ms +[2025-09-11 09:02:04] [Rank 0] step:1521/10000 train_time:86497ms step_avg:56.87ms +[2025-09-11 09:02:04] [Rank 0] step:1541/10000 train_time:87144ms step_avg:56.55ms +[2025-09-11 09:02:04] [Rank 0] step:1541/10000 train_time:87144ms step_avg:56.55ms +[2025-09-11 09:02:05] [Rank 0] step:1561/10000 train_time:87793ms step_avg:56.24ms +[2025-09-11 09:02:05] [Rank 0] step:1561/10000 train_time:87793ms step_avg:56.24ms +[2025-09-11 09:02:05] [Rank 0] step:1581/10000 train_time:88438ms step_avg:55.94ms +[2025-09-11 09:02:05] [Rank 0] step:1581/10000 train_time:88438ms step_avg:55.94ms +[2025-09-11 09:02:06] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:02:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:16] [Rank 0] PRINT: step:1600/10000 val_loss:4.9885 total_sharp:3.1802e-04 L1_sharp:6.6315e-03 L2_sharp:8.0765e-04 L3_sharp:4.1646e-04 L4_sharp:6.2989e-04 L5_sharp:1.0114e-03 L6_sharp:9.3600e-04 L7_sharp:1.0151e-03 L8_sharp:2.2074e-03 L9_sharp:1.3993e-03 L10_sharp:1.5037e-03 L11_sharp:2.3767e-03 L12_sharp:1.1372e-02 total_fnorm:7.6000e+01 total_l1_linf:1.7510e+05 total_spectral:3.8250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.3438e-01 L2_l1linf:7.0703e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.9141e-01 L5_l1linf:6.8359e-01 L6_l1linf:6.9141e-01 L7_l1linf:6.9141e-01 L8_l1linf:6.9141e-01 L9_l1linf:6.9922e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.9531e-01 L12_l1linf:6.9141e-01 L1_spectral:2.8932e-02 L2_spectral:2.8481e-02 L3_spectral:2.8402e-02 L4_spectral:2.8364e-02 L5_spectral:2.8533e-02 L6_spectral:2.8461e-02 L7_spectral:2.8426e-02 L8_spectral:2.9472e-02 L9_spectral:2.8637e-02 L10_spectral:2.8552e-02 L11_spectral:2.8645e-02 L12_spectral:2.8897e-02 train_time:89067ms step_avg:55.67ms +[2025-09-11 09:02:16] [Rank 0] PRINT: step:1600/10000 val_loss:4.9885 total_sharp:3.1802e-04 L1_sharp:6.6315e-03 L2_sharp:8.0765e-04 L3_sharp:4.1646e-04 L4_sharp:6.2989e-04 L5_sharp:1.0114e-03 L6_sharp:9.3600e-04 L7_sharp:1.0151e-03 L8_sharp:2.2074e-03 L9_sharp:1.3993e-03 L10_sharp:1.5037e-03 L11_sharp:2.3767e-03 L12_sharp:1.1372e-02 total_fnorm:7.6000e+01 total_l1_linf:1.7510e+05 total_spectral:3.8250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.5000e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.3438e-01 L2_l1linf:7.0703e-01 L3_l1linf:6.9141e-01 L4_l1linf:6.9141e-01 L5_l1linf:6.8359e-01 L6_l1linf:6.9141e-01 L7_l1linf:6.9141e-01 L8_l1linf:6.9141e-01 L9_l1linf:6.9922e-01 L10_l1linf:6.8750e-01 L11_l1linf:6.9531e-01 L12_l1linf:6.9141e-01 L1_spectral:2.8932e-02 L2_spectral:2.8481e-02 L3_spectral:2.8402e-02 L4_spectral:2.8364e-02 L5_spectral:2.8533e-02 L6_spectral:2.8461e-02 L7_spectral:2.8426e-02 L8_spectral:2.9472e-02 L9_spectral:2.8637e-02 L10_spectral:2.8552e-02 L11_spectral:2.8645e-02 L12_spectral:2.8897e-02 train_time:89067ms step_avg:55.67ms +[2025-09-11 09:02:18] [Rank 0] step:1601/10000 train_time:90676ms step_avg:56.64ms +[2025-09-11 09:02:18] [Rank 0] step:1601/10000 train_time:90676ms step_avg:56.64ms +[2025-09-11 09:02:19] [Rank 0] step:1621/10000 train_time:91327ms step_avg:56.34ms +[2025-09-11 09:02:19] [Rank 0] step:1621/10000 train_time:91327ms step_avg:56.34ms +[2025-09-11 09:02:19] [Rank 0] step:1641/10000 train_time:91976ms step_avg:56.05ms +[2025-09-11 09:02:19] [Rank 0] step:1641/10000 train_time:91976ms step_avg:56.05ms +[2025-09-11 09:02:20] [Rank 0] step:1661/10000 train_time:92624ms step_avg:55.76ms +[2025-09-11 09:02:20] [Rank 0] step:1661/10000 train_time:92624ms step_avg:55.76ms +[2025-09-11 09:02:21] [Rank 0] step:1681/10000 train_time:93270ms step_avg:55.49ms +[2025-09-11 09:02:21] [Rank 0] step:1681/10000 train_time:93270ms step_avg:55.49ms +[2025-09-11 09:02:21] [Rank 0] step:1701/10000 train_time:93917ms step_avg:55.21ms +[2025-09-11 09:02:21] [Rank 0] step:1701/10000 train_time:93917ms step_avg:55.21ms +[2025-09-11 09:02:22] [Rank 0] step:1721/10000 train_time:94563ms step_avg:54.95ms +[2025-09-11 09:02:22] [Rank 0] step:1721/10000 train_time:94563ms step_avg:54.95ms +[2025-09-11 09:02:23] [Rank 0] step:1741/10000 train_time:95210ms step_avg:54.69ms +[2025-09-11 09:02:23] [Rank 0] step:1741/10000 train_time:95210ms step_avg:54.69ms +[2025-09-11 09:02:23] [Rank 0] step:1761/10000 train_time:95857ms step_avg:54.43ms +[2025-09-11 09:02:23] [Rank 0] step:1761/10000 train_time:95857ms step_avg:54.43ms +[2025-09-11 09:02:24] [Rank 0] step:1781/10000 train_time:96503ms step_avg:54.18ms +[2025-09-11 09:02:24] [Rank 0] step:1781/10000 train_time:96503ms step_avg:54.18ms +[2025-09-11 09:02:25] [Rank 0] step:1801/10000 train_time:97150ms step_avg:53.94ms +[2025-09-11 09:02:25] [Rank 0] step:1801/10000 train_time:97150ms step_avg:53.94ms +[2025-09-11 09:02:25] [Rank 0] step:1821/10000 train_time:97796ms step_avg:53.70ms +[2025-09-11 09:02:25] [Rank 0] step:1821/10000 train_time:97796ms step_avg:53.70ms +[2025-09-11 09:02:26] [Rank 0] step:1841/10000 train_time:98443ms step_avg:53.47ms +[2025-09-11 09:02:26] [Rank 0] step:1841/10000 train_time:98443ms step_avg:53.47ms +[2025-09-11 09:02:27] [Rank 0] step:1861/10000 train_time:99089ms step_avg:53.25ms +[2025-09-11 09:02:27] [Rank 0] step:1861/10000 train_time:99089ms step_avg:53.25ms +[2025-09-11 09:02:27] [Rank 0] step:1881/10000 train_time:99746ms step_avg:53.03ms +[2025-09-11 09:02:27] [Rank 0] step:1881/10000 train_time:99746ms step_avg:53.03ms +[2025-09-11 09:02:28] [Rank 0] step:1901/10000 train_time:100393ms step_avg:52.81ms +[2025-09-11 09:02:28] [Rank 0] step:1901/10000 train_time:100393ms step_avg:52.81ms +[2025-09-11 09:02:28] [Rank 0] step:1921/10000 train_time:101047ms step_avg:52.60ms +[2025-09-11 09:02:28] [Rank 0] step:1921/10000 train_time:101047ms step_avg:52.60ms +[2025-09-11 09:02:29] [Rank 0] step:1941/10000 train_time:101703ms step_avg:52.40ms +[2025-09-11 09:02:29] [Rank 0] step:1941/10000 train_time:101703ms step_avg:52.40ms +[2025-09-11 09:02:30] [Rank 0] step:1961/10000 train_time:102350ms step_avg:52.19ms +[2025-09-11 09:02:30] [Rank 0] step:1961/10000 train_time:102350ms step_avg:52.19ms +[2025-09-11 09:02:30] [Rank 0] step:1981/10000 train_time:102997ms step_avg:51.99ms +[2025-09-11 09:02:30] [Rank 0] step:1981/10000 train_time:102997ms step_avg:51.99ms +[2025-09-11 09:02:31] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:02:31] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:02:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:02:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:02:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:02:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:02:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:02:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:44] [Rank 0] PRINT: step:2000/10000 val_loss:4.8609 total_sharp:2.3506e-04 L1_sharp:4.8426e-03 L2_sharp:8.7158e-04 L3_sharp:6.5237e-04 L4_sharp:5.3982e-04 L5_sharp:1.0509e-03 L6_sharp:6.7005e-04 L7_sharp:8.9281e-04 L8_sharp:1.9265e-03 L9_sharp:1.5058e-03 L10_sharp:1.3640e-03 L11_sharp:2.0112e-03 L12_sharp:1.2993e-02 total_fnorm:7.8000e+01 total_l1_linf:1.8330e+05 total_spectral:3.9250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.9531e-01 L3_l1linf:6.7188e-01 L4_l1linf:6.6797e-01 L5_l1linf:6.6797e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.7578e-01 L9_l1linf:6.6406e-01 L10_l1linf:6.7578e-01 L11_l1linf:6.7188e-01 L12_l1linf:6.8750e-01 L1_spectral:2.9501e-02 L2_spectral:2.9026e-02 L3_spectral:2.8909e-02 L4_spectral:2.8995e-02 L5_spectral:2.9087e-02 L6_spectral:2.9019e-02 L7_spectral:2.9016e-02 L8_spectral:3.0141e-02 L9_spectral:2.9401e-02 L10_spectral:2.9277e-02 L11_spectral:2.9207e-02 L12_spectral:2.9811e-02 train_time:103625ms step_avg:51.81ms +[2025-09-11 09:02:44] [Rank 0] PRINT: step:2000/10000 val_loss:4.8609 total_sharp:2.3506e-04 L1_sharp:4.8426e-03 L2_sharp:8.7158e-04 L3_sharp:6.5237e-04 L4_sharp:5.3982e-04 L5_sharp:1.0509e-03 L6_sharp:6.7005e-04 L7_sharp:8.9281e-04 L8_sharp:1.9265e-03 L9_sharp:1.5058e-03 L10_sharp:1.3640e-03 L11_sharp:2.0112e-03 L12_sharp:1.2993e-02 total_fnorm:7.8000e+01 total_l1_linf:1.8330e+05 total_spectral:3.9250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.2266e-01 L2_l1linf:6.9531e-01 L3_l1linf:6.7188e-01 L4_l1linf:6.6797e-01 L5_l1linf:6.6797e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.7188e-01 L8_l1linf:6.7578e-01 L9_l1linf:6.6406e-01 L10_l1linf:6.7578e-01 L11_l1linf:6.7188e-01 L12_l1linf:6.8750e-01 L1_spectral:2.9501e-02 L2_spectral:2.9026e-02 L3_spectral:2.8909e-02 L4_spectral:2.8995e-02 L5_spectral:2.9087e-02 L6_spectral:2.9019e-02 L7_spectral:2.9016e-02 L8_spectral:3.0141e-02 L9_spectral:2.9401e-02 L10_spectral:2.9277e-02 L11_spectral:2.9207e-02 L12_spectral:2.9811e-02 train_time:103625ms step_avg:51.81ms +[2025-09-11 09:02:46] [Rank 0] step:2001/10000 train_time:105253ms step_avg:52.60ms +[2025-09-11 09:02:46] [Rank 0] step:2001/10000 train_time:105253ms step_avg:52.60ms +[2025-09-11 09:02:46] [Rank 0] step:2021/10000 train_time:105903ms step_avg:52.40ms +[2025-09-11 09:02:46] [Rank 0] step:2021/10000 train_time:105903ms step_avg:52.40ms +[2025-09-11 09:02:47] [Rank 0] step:2041/10000 train_time:106550ms step_avg:52.20ms +[2025-09-11 09:02:47] [Rank 0] step:2041/10000 train_time:106550ms step_avg:52.20ms +[2025-09-11 09:02:48] [Rank 0] step:2061/10000 train_time:107197ms step_avg:52.01ms +[2025-09-11 09:02:48] [Rank 0] step:2061/10000 train_time:107197ms step_avg:52.01ms +[2025-09-11 09:02:48] [Rank 0] step:2081/10000 train_time:107843ms step_avg:51.82ms +[2025-09-11 09:02:48] [Rank 0] step:2081/10000 train_time:107843ms step_avg:51.82ms +[2025-09-11 09:02:49] [Rank 0] step:2101/10000 train_time:108492ms step_avg:51.64ms +[2025-09-11 09:02:49] [Rank 0] step:2101/10000 train_time:108492ms step_avg:51.64ms +[2025-09-11 09:02:50] [Rank 0] step:2121/10000 train_time:109139ms step_avg:51.46ms +[2025-09-11 09:02:50] [Rank 0] step:2121/10000 train_time:109139ms step_avg:51.46ms +[2025-09-11 09:02:50] [Rank 0] step:2141/10000 train_time:109786ms step_avg:51.28ms +[2025-09-11 09:02:50] [Rank 0] step:2141/10000 train_time:109786ms step_avg:51.28ms +[2025-09-11 09:02:51] [Rank 0] step:2161/10000 train_time:110431ms step_avg:51.10ms +[2025-09-11 09:02:51] [Rank 0] step:2161/10000 train_time:110431ms step_avg:51.10ms +[2025-09-11 09:02:52] [Rank 0] step:2181/10000 train_time:111077ms step_avg:50.93ms +[2025-09-11 09:02:52] [Rank 0] step:2181/10000 train_time:111077ms step_avg:50.93ms +[2025-09-11 09:02:52] [Rank 0] step:2201/10000 train_time:111722ms step_avg:50.76ms +[2025-09-11 09:02:52] [Rank 0] step:2201/10000 train_time:111722ms step_avg:50.76ms +[2025-09-11 09:02:53] [Rank 0] step:2221/10000 train_time:112367ms step_avg:50.59ms +[2025-09-11 09:02:53] [Rank 0] step:2221/10000 train_time:112367ms step_avg:50.59ms +[2025-09-11 09:02:54] [Rank 0] step:2241/10000 train_time:113024ms step_avg:50.43ms +[2025-09-11 09:02:54] [Rank 0] step:2241/10000 train_time:113024ms step_avg:50.43ms +[2025-09-11 09:02:54] [Rank 0] step:2261/10000 train_time:113683ms step_avg:50.28ms +[2025-09-11 09:02:54] [Rank 0] step:2261/10000 train_time:113683ms step_avg:50.28ms +[2025-09-11 09:02:55] [Rank 0] step:2281/10000 train_time:114342ms step_avg:50.13ms +[2025-09-11 09:02:55] [Rank 0] step:2281/10000 train_time:114342ms step_avg:50.13ms +[2025-09-11 09:02:56] [Rank 0] step:2301/10000 train_time:115001ms step_avg:49.98ms +[2025-09-11 09:02:56] [Rank 0] step:2301/10000 train_time:115001ms step_avg:49.98ms +[2025-09-11 09:02:57] [Rank 0] step:2321/10000 train_time:115912ms step_avg:49.94ms +[2025-09-11 09:02:57] [Rank 0] step:2321/10000 train_time:115912ms step_avg:49.94ms +[2025-09-11 09:02:57] [Rank 0] step:2341/10000 train_time:116801ms step_avg:49.89ms +[2025-09-11 09:02:57] [Rank 0] step:2341/10000 train_time:116801ms step_avg:49.89ms +[2025-09-11 09:02:58] [Rank 0] step:2361/10000 train_time:117460ms step_avg:49.75ms +[2025-09-11 09:02:58] [Rank 0] step:2361/10000 train_time:117460ms step_avg:49.75ms +[2025-09-11 09:02:59] [Rank 0] step:2381/10000 train_time:118399ms step_avg:49.73ms +[2025-09-11 09:02:59] [Rank 0] step:2381/10000 train_time:118399ms step_avg:49.73ms +[2025-09-11 09:03:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:03:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:03:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:03:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:03:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:03:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:03:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:03:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.7409 total_sharp:2.3018e-04 L1_sharp:5.3935e-03 L2_sharp:2.7674e-04 L3_sharp:4.0718e-04 L4_sharp:5.4850e-04 L5_sharp:1.3617e-03 L6_sharp:5.0828e-04 L7_sharp:6.9401e-04 L8_sharp:1.6555e-03 L9_sharp:1.3996e-03 L10_sharp:1.2191e-03 L11_sharp:1.8702e-03 L12_sharp:9.9301e-03 total_fnorm:7.3500e+01 total_l1_linf:1.6691e+05 total_spectral:3.7000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.1094e-01 L2_l1linf:6.7969e-01 L3_l1linf:6.6016e-01 L4_l1linf:6.5625e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6016e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.6016e-01 L11_l1linf:6.5625e-01 L12_l1linf:6.8750e-01 L1_spectral:3.0017e-02 L2_spectral:2.9329e-02 L3_spectral:2.9556e-02 L4_spectral:2.9571e-02 L5_spectral:2.9647e-02 L6_spectral:2.9502e-02 L7_spectral:2.9653e-02 L8_spectral:3.0515e-02 L9_spectral:2.9864e-02 L10_spectral:2.9856e-02 L11_spectral:2.9962e-02 L12_spectral:3.0341e-02 train_time:119039ms step_avg:49.60ms +[2025-09-11 09:03:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.7409 total_sharp:2.3018e-04 L1_sharp:5.3935e-03 L2_sharp:2.7674e-04 L3_sharp:4.0718e-04 L4_sharp:5.4850e-04 L5_sharp:1.3617e-03 L6_sharp:5.0828e-04 L7_sharp:6.9401e-04 L8_sharp:1.6555e-03 L9_sharp:1.3996e-03 L10_sharp:1.2191e-03 L11_sharp:1.8702e-03 L12_sharp:9.9301e-03 total_fnorm:7.3500e+01 total_l1_linf:1.6691e+05 total_spectral:3.7000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.1094e-01 L2_l1linf:6.7969e-01 L3_l1linf:6.6016e-01 L4_l1linf:6.5625e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6016e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.6016e-01 L10_l1linf:6.6016e-01 L11_l1linf:6.5625e-01 L12_l1linf:6.8750e-01 L1_spectral:3.0017e-02 L2_spectral:2.9329e-02 L3_spectral:2.9556e-02 L4_spectral:2.9571e-02 L5_spectral:2.9647e-02 L6_spectral:2.9502e-02 L7_spectral:2.9653e-02 L8_spectral:3.0515e-02 L9_spectral:2.9864e-02 L10_spectral:2.9856e-02 L11_spectral:2.9962e-02 L12_spectral:3.0341e-02 train_time:119039ms step_avg:49.60ms +[2025-09-11 09:03:12] [Rank 0] step:2401/10000 train_time:120643ms step_avg:50.25ms +[2025-09-11 09:03:12] [Rank 0] step:2401/10000 train_time:120643ms step_avg:50.25ms +[2025-09-11 09:03:12] [Rank 0] step:2421/10000 train_time:121307ms step_avg:50.11ms +[2025-09-11 09:03:12] [Rank 0] step:2421/10000 train_time:121307ms step_avg:50.11ms +[2025-09-11 09:03:13] [Rank 0] step:2441/10000 train_time:121968ms step_avg:49.97ms +[2025-09-11 09:03:13] [Rank 0] step:2441/10000 train_time:121968ms step_avg:49.97ms +[2025-09-11 09:03:14] [Rank 0] step:2461/10000 train_time:122627ms step_avg:49.83ms +[2025-09-11 09:03:14] [Rank 0] step:2461/10000 train_time:122627ms step_avg:49.83ms +[2025-09-11 09:03:14] [Rank 0] step:2481/10000 train_time:123288ms step_avg:49.69ms +[2025-09-11 09:03:14] [Rank 0] step:2481/10000 train_time:123288ms step_avg:49.69ms +[2025-09-11 09:03:15] [Rank 0] step:2501/10000 train_time:123948ms step_avg:49.56ms +[2025-09-11 09:03:15] [Rank 0] step:2501/10000 train_time:123948ms step_avg:49.56ms +[2025-09-11 09:03:16] [Rank 0] step:2521/10000 train_time:124608ms step_avg:49.43ms +[2025-09-11 09:03:16] [Rank 0] step:2521/10000 train_time:124608ms step_avg:49.43ms +[2025-09-11 09:03:16] [Rank 0] step:2541/10000 train_time:125268ms step_avg:49.30ms +[2025-09-11 09:03:16] [Rank 0] step:2541/10000 train_time:125268ms step_avg:49.30ms +[2025-09-11 09:03:17] [Rank 0] step:2561/10000 train_time:125927ms step_avg:49.17ms +[2025-09-11 09:03:17] [Rank 0] step:2561/10000 train_time:125927ms step_avg:49.17ms +[2025-09-11 09:03:18] [Rank 0] step:2581/10000 train_time:126586ms step_avg:49.05ms +[2025-09-11 09:03:18] [Rank 0] step:2581/10000 train_time:126586ms step_avg:49.05ms +[2025-09-11 09:03:18] [Rank 0] step:2601/10000 train_time:127245ms step_avg:48.92ms +[2025-09-11 09:03:18] [Rank 0] step:2601/10000 train_time:127245ms step_avg:48.92ms +[2025-09-11 09:03:19] [Rank 0] step:2621/10000 train_time:127904ms step_avg:48.80ms +[2025-09-11 09:03:19] [Rank 0] step:2621/10000 train_time:127904ms step_avg:48.80ms +[2025-09-11 09:03:20] [Rank 0] step:2641/10000 train_time:128563ms step_avg:48.68ms +[2025-09-11 09:03:20] [Rank 0] step:2641/10000 train_time:128563ms step_avg:48.68ms +[2025-09-11 09:03:20] [Rank 0] step:2661/10000 train_time:129224ms step_avg:48.56ms +[2025-09-11 09:03:20] [Rank 0] step:2661/10000 train_time:129224ms step_avg:48.56ms +[2025-09-11 09:03:21] [Rank 0] step:2681/10000 train_time:129883ms step_avg:48.45ms +[2025-09-11 09:03:21] [Rank 0] step:2681/10000 train_time:129883ms step_avg:48.45ms +[2025-09-11 09:03:22] [Rank 0] step:2701/10000 train_time:130542ms step_avg:48.33ms +[2025-09-11 09:03:22] [Rank 0] step:2701/10000 train_time:130542ms step_avg:48.33ms +[2025-09-11 09:03:22] [Rank 0] step:2721/10000 train_time:131202ms step_avg:48.22ms +[2025-09-11 09:03:22] [Rank 0] step:2721/10000 train_time:131202ms step_avg:48.22ms +[2025-09-11 09:03:23] [Rank 0] step:2741/10000 train_time:131860ms step_avg:48.11ms +[2025-09-11 09:03:23] [Rank 0] step:2741/10000 train_time:131860ms step_avg:48.11ms +[2025-09-11 09:03:24] [Rank 0] step:2761/10000 train_time:132519ms step_avg:48.00ms +[2025-09-11 09:03:24] [Rank 0] step:2761/10000 train_time:132519ms step_avg:48.00ms +[2025-09-11 09:03:24] [Rank 0] step:2781/10000 train_time:133178ms step_avg:47.89ms +[2025-09-11 09:03:24] [Rank 0] step:2781/10000 train_time:133178ms step_avg:47.89ms +[2025-09-11 09:03:25] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:03:25] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:03:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:03:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:03:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:03:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:03:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.6798 total_sharp:2.8302e-04 L1_sharp:7.7113e-03 L2_sharp:2.7285e-04 L3_sharp:3.6657e-04 L4_sharp:3.5890e-04 L5_sharp:7.4828e-04 L6_sharp:7.4365e-04 L7_sharp:8.3419e-04 L8_sharp:1.5903e-03 L9_sharp:1.3242e-03 L10_sharp:1.2065e-03 L11_sharp:1.9164e-03 L12_sharp:9.8760e-03 total_fnorm:6.9500e+01 total_l1_linf:1.5770e+05 total_spectral:3.5000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.4453e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.5625e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.4844e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3281e-01 L11_l1linf:6.4453e-01 L12_l1linf:6.6797e-01 L1_spectral:3.0657e-02 L2_spectral:2.9765e-02 L3_spectral:2.9748e-02 L4_spectral:2.9750e-02 L5_spectral:2.9932e-02 L6_spectral:3.0036e-02 L7_spectral:3.0086e-02 L8_spectral:3.1066e-02 L9_spectral:3.0453e-02 L10_spectral:3.0312e-02 L11_spectral:3.0528e-02 L12_spectral:3.0601e-02 train_time:133818ms step_avg:47.79ms +[2025-09-11 09:03:35] [Rank 0] PRINT: step:2800/10000 val_loss:4.6798 total_sharp:2.8302e-04 L1_sharp:7.7113e-03 L2_sharp:2.7285e-04 L3_sharp:3.6657e-04 L4_sharp:3.5890e-04 L5_sharp:7.4828e-04 L6_sharp:7.4365e-04 L7_sharp:8.3419e-04 L8_sharp:1.5903e-03 L9_sharp:1.3242e-03 L10_sharp:1.2065e-03 L11_sharp:1.9164e-03 L12_sharp:9.8760e-03 total_fnorm:6.9500e+01 total_l1_linf:1.5770e+05 total_spectral:3.5000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9922e-01 L2_l1linf:6.6406e-01 L3_l1linf:6.3281e-01 L4_l1linf:6.4453e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.5625e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.4844e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.3281e-01 L11_l1linf:6.4453e-01 L12_l1linf:6.6797e-01 L1_spectral:3.0657e-02 L2_spectral:2.9765e-02 L3_spectral:2.9748e-02 L4_spectral:2.9750e-02 L5_spectral:2.9932e-02 L6_spectral:3.0036e-02 L7_spectral:3.0086e-02 L8_spectral:3.1066e-02 L9_spectral:3.0453e-02 L10_spectral:3.0312e-02 L11_spectral:3.0528e-02 L12_spectral:3.0601e-02 train_time:133818ms step_avg:47.79ms +[2025-09-11 09:03:37] [Rank 0] step:2801/10000 train_time:135527ms step_avg:48.39ms +[2025-09-11 09:03:37] [Rank 0] step:2801/10000 train_time:135527ms step_avg:48.39ms +[2025-09-11 09:03:38] [Rank 0] step:2821/10000 train_time:136279ms step_avg:48.31ms +[2025-09-11 09:03:38] [Rank 0] step:2821/10000 train_time:136279ms step_avg:48.31ms +[2025-09-11 09:03:38] [Rank 0] step:2841/10000 train_time:136940ms step_avg:48.20ms +[2025-09-11 09:03:38] [Rank 0] step:2841/10000 train_time:136940ms step_avg:48.20ms +[2025-09-11 09:03:39] [Rank 0] step:2861/10000 train_time:137600ms step_avg:48.10ms +[2025-09-11 09:03:39] [Rank 0] step:2861/10000 train_time:137600ms step_avg:48.10ms +[2025-09-11 09:03:40] [Rank 0] step:2881/10000 train_time:138259ms step_avg:47.99ms +[2025-09-11 09:03:40] [Rank 0] step:2881/10000 train_time:138259ms step_avg:47.99ms +[2025-09-11 09:03:40] [Rank 0] step:2901/10000 train_time:138918ms step_avg:47.89ms +[2025-09-11 09:03:40] [Rank 0] step:2901/10000 train_time:138918ms step_avg:47.89ms +[2025-09-11 09:03:41] [Rank 0] step:2921/10000 train_time:139577ms step_avg:47.78ms +[2025-09-11 09:03:41] [Rank 0] step:2921/10000 train_time:139577ms step_avg:47.78ms +[2025-09-11 09:03:42] [Rank 0] step:2941/10000 train_time:140236ms step_avg:47.68ms +[2025-09-11 09:03:42] [Rank 0] step:2941/10000 train_time:140236ms step_avg:47.68ms +[2025-09-11 09:03:42] [Rank 0] step:2961/10000 train_time:140896ms step_avg:47.58ms +[2025-09-11 09:03:42] [Rank 0] step:2961/10000 train_time:140896ms step_avg:47.58ms +[2025-09-11 09:03:43] [Rank 0] step:2981/10000 train_time:141556ms step_avg:47.49ms +[2025-09-11 09:03:43] [Rank 0] step:2981/10000 train_time:141556ms step_avg:47.49ms +[2025-09-11 09:03:44] [Rank 0] step:3001/10000 train_time:142220ms step_avg:47.39ms +[2025-09-11 09:03:44] [Rank 0] step:3001/10000 train_time:142220ms step_avg:47.39ms +[2025-09-11 09:03:44] [Rank 0] step:3021/10000 train_time:142881ms step_avg:47.30ms +[2025-09-11 09:03:44] [Rank 0] step:3021/10000 train_time:142881ms step_avg:47.30ms +[2025-09-11 09:03:45] [Rank 0] step:3041/10000 train_time:143544ms step_avg:47.20ms +[2025-09-11 09:03:45] [Rank 0] step:3041/10000 train_time:143544ms step_avg:47.20ms +[2025-09-11 09:03:46] [Rank 0] step:3061/10000 train_time:144205ms step_avg:47.11ms +[2025-09-11 09:03:46] [Rank 0] step:3061/10000 train_time:144205ms step_avg:47.11ms +[2025-09-11 09:03:46] [Rank 0] step:3081/10000 train_time:144867ms step_avg:47.02ms +[2025-09-11 09:03:46] [Rank 0] step:3081/10000 train_time:144867ms step_avg:47.02ms +[2025-09-11 09:03:47] [Rank 0] step:3101/10000 train_time:145528ms step_avg:46.93ms +[2025-09-11 09:03:47] [Rank 0] step:3101/10000 train_time:145528ms step_avg:46.93ms +[2025-09-11 09:03:48] [Rank 0] step:3121/10000 train_time:146190ms step_avg:46.84ms +[2025-09-11 09:03:48] [Rank 0] step:3121/10000 train_time:146190ms step_avg:46.84ms +[2025-09-11 09:03:48] [Rank 0] step:3141/10000 train_time:146852ms step_avg:46.75ms +[2025-09-11 09:03:48] [Rank 0] step:3141/10000 train_time:146852ms step_avg:46.75ms +[2025-09-11 09:03:49] [Rank 0] step:3161/10000 train_time:147513ms step_avg:46.67ms +[2025-09-11 09:03:49] [Rank 0] step:3161/10000 train_time:147513ms step_avg:46.67ms +[2025-09-11 09:03:50] [Rank 0] step:3181/10000 train_time:148176ms step_avg:46.58ms +[2025-09-11 09:03:50] [Rank 0] step:3181/10000 train_time:148176ms step_avg:46.58ms +[2025-09-11 09:03:50] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:03:50] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:03:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:03:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:03:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:03:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:03:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.5899 total_sharp:1.8371e-04 L1_sharp:6.3232e-03 L2_sharp:2.6510e-04 L3_sharp:1.5427e-04 L4_sharp:1.5505e-04 L5_sharp:5.0120e-04 L6_sharp:6.0087e-04 L7_sharp:6.3613e-04 L8_sharp:1.3186e-03 L9_sharp:1.0324e-03 L10_sharp:1.1128e-03 L11_sharp:1.5662e-03 L12_sharp:1.1761e-02 total_fnorm:7.8500e+01 total_l1_linf:1.8330e+05 total_spectral:3.9500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5469e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.3672e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.1719e-01 L12_l1linf:6.4844e-01 L1_spectral:3.0779e-02 L2_spectral:3.0160e-02 L3_spectral:3.0016e-02 L4_spectral:3.0168e-02 L5_spectral:3.0200e-02 L6_spectral:3.0408e-02 L7_spectral:3.0436e-02 L8_spectral:3.1156e-02 L9_spectral:3.1068e-02 L10_spectral:3.0883e-02 L11_spectral:3.1144e-02 L12_spectral:3.1230e-02 train_time:148820ms step_avg:46.51ms +[2025-09-11 09:04:00] [Rank 0] PRINT: step:3200/10000 val_loss:4.5899 total_sharp:1.8371e-04 L1_sharp:6.3232e-03 L2_sharp:2.6510e-04 L3_sharp:1.5427e-04 L4_sharp:1.5505e-04 L5_sharp:5.0120e-04 L6_sharp:6.0087e-04 L7_sharp:6.3613e-04 L8_sharp:1.3186e-03 L9_sharp:1.0324e-03 L10_sharp:1.1128e-03 L11_sharp:1.5662e-03 L12_sharp:1.1761e-02 total_fnorm:7.8500e+01 total_l1_linf:1.8330e+05 total_spectral:3.9500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5469e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.5625e-01 L3_l1linf:6.3672e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.3672e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.1719e-01 L12_l1linf:6.4844e-01 L1_spectral:3.0779e-02 L2_spectral:3.0160e-02 L3_spectral:3.0016e-02 L4_spectral:3.0168e-02 L5_spectral:3.0200e-02 L6_spectral:3.0408e-02 L7_spectral:3.0436e-02 L8_spectral:3.1156e-02 L9_spectral:3.1068e-02 L10_spectral:3.0883e-02 L11_spectral:3.1144e-02 L12_spectral:3.1230e-02 train_time:148820ms step_avg:46.51ms +[2025-09-11 09:04:02] [Rank 0] step:3201/10000 train_time:150420ms step_avg:46.99ms +[2025-09-11 09:04:02] [Rank 0] step:3201/10000 train_time:150420ms step_avg:46.99ms +[2025-09-11 09:04:03] [Rank 0] step:3221/10000 train_time:151172ms step_avg:46.93ms +[2025-09-11 09:04:03] [Rank 0] step:3221/10000 train_time:151172ms step_avg:46.93ms +[2025-09-11 09:04:03] [Rank 0] step:3241/10000 train_time:151837ms step_avg:46.85ms +[2025-09-11 09:04:03] [Rank 0] step:3241/10000 train_time:151837ms step_avg:46.85ms +[2025-09-11 09:04:04] [Rank 0] step:3261/10000 train_time:152501ms step_avg:46.77ms +[2025-09-11 09:04:04] [Rank 0] step:3261/10000 train_time:152501ms step_avg:46.77ms +[2025-09-11 09:04:05] [Rank 0] step:3281/10000 train_time:153164ms step_avg:46.68ms +[2025-09-11 09:04:05] [Rank 0] step:3281/10000 train_time:153164ms step_avg:46.68ms +[2025-09-11 09:04:05] [Rank 0] step:3301/10000 train_time:153827ms step_avg:46.60ms +[2025-09-11 09:04:05] [Rank 0] step:3301/10000 train_time:153827ms step_avg:46.60ms +[2025-09-11 09:04:06] [Rank 0] step:3321/10000 train_time:154489ms step_avg:46.52ms +[2025-09-11 09:04:06] [Rank 0] step:3321/10000 train_time:154489ms step_avg:46.52ms +[2025-09-11 09:04:07] [Rank 0] step:3341/10000 train_time:155152ms step_avg:46.44ms +[2025-09-11 09:04:07] [Rank 0] step:3341/10000 train_time:155152ms step_avg:46.44ms +[2025-09-11 09:04:07] [Rank 0] step:3361/10000 train_time:155816ms step_avg:46.36ms +[2025-09-11 09:04:07] [Rank 0] step:3361/10000 train_time:155816ms step_avg:46.36ms +[2025-09-11 09:04:08] [Rank 0] step:3381/10000 train_time:156478ms step_avg:46.28ms +[2025-09-11 09:04:08] [Rank 0] step:3381/10000 train_time:156478ms step_avg:46.28ms +[2025-09-11 09:04:09] [Rank 0] step:3401/10000 train_time:157140ms step_avg:46.20ms +[2025-09-11 09:04:09] [Rank 0] step:3401/10000 train_time:157140ms step_avg:46.20ms +[2025-09-11 09:04:09] [Rank 0] step:3421/10000 train_time:157802ms step_avg:46.13ms +[2025-09-11 09:04:09] [Rank 0] step:3421/10000 train_time:157802ms step_avg:46.13ms +[2025-09-11 09:04:10] [Rank 0] step:3441/10000 train_time:158464ms step_avg:46.05ms +[2025-09-11 09:04:10] [Rank 0] step:3441/10000 train_time:158464ms step_avg:46.05ms +[2025-09-11 09:04:11] [Rank 0] step:3461/10000 train_time:159129ms step_avg:45.98ms +[2025-09-11 09:04:11] [Rank 0] step:3461/10000 train_time:159129ms step_avg:45.98ms +[2025-09-11 09:04:11] [Rank 0] step:3481/10000 train_time:159792ms step_avg:45.90ms +[2025-09-11 09:04:11] [Rank 0] step:3481/10000 train_time:159792ms step_avg:45.90ms +[2025-09-11 09:04:12] [Rank 0] step:3501/10000 train_time:160454ms step_avg:45.83ms +[2025-09-11 09:04:12] [Rank 0] step:3501/10000 train_time:160454ms step_avg:45.83ms +[2025-09-11 09:04:13] [Rank 0] step:3521/10000 train_time:161116ms step_avg:45.76ms +[2025-09-11 09:04:13] [Rank 0] step:3521/10000 train_time:161116ms step_avg:45.76ms +[2025-09-11 09:04:13] [Rank 0] step:3541/10000 train_time:161778ms step_avg:45.69ms +[2025-09-11 09:04:13] [Rank 0] step:3541/10000 train_time:161778ms step_avg:45.69ms +[2025-09-11 09:04:14] [Rank 0] step:3561/10000 train_time:162440ms step_avg:45.62ms +[2025-09-11 09:04:14] [Rank 0] step:3561/10000 train_time:162440ms step_avg:45.62ms +[2025-09-11 09:04:15] [Rank 0] step:3581/10000 train_time:163102ms step_avg:45.55ms +[2025-09-11 09:04:15] [Rank 0] step:3581/10000 train_time:163102ms step_avg:45.55ms +[2025-09-11 09:04:15] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:04:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:04:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:04:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:04:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:26] [Rank 0] PRINT: step:3600/10000 val_loss:4.5505 total_sharp:2.1414e-04 L1_sharp:4.7383e-03 L2_sharp:4.8742e-04 L3_sharp:1.6919e-04 L4_sharp:2.5971e-04 L5_sharp:2.9900e-04 L6_sharp:4.4186e-04 L7_sharp:4.4560e-04 L8_sharp:1.0764e-03 L9_sharp:1.1574e-03 L10_sharp:1.0760e-03 L11_sharp:1.4549e-03 L12_sharp:9.2845e-03 total_fnorm:7.1000e+01 total_l1_linf:1.5974e+05 total_spectral:3.5750e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.2891e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1197e-02 L2_spectral:3.0319e-02 L3_spectral:3.0511e-02 L4_spectral:3.0552e-02 L5_spectral:3.0494e-02 L6_spectral:3.0686e-02 L7_spectral:3.0795e-02 L8_spectral:3.1348e-02 L9_spectral:3.1401e-02 L10_spectral:3.1224e-02 L11_spectral:3.1365e-02 L12_spectral:3.1507e-02 train_time:163746ms step_avg:45.48ms +[2025-09-11 09:04:26] [Rank 0] PRINT: step:3600/10000 val_loss:4.5505 total_sharp:2.1414e-04 L1_sharp:4.7383e-03 L2_sharp:4.8742e-04 L3_sharp:1.6919e-04 L4_sharp:2.5971e-04 L5_sharp:2.9900e-04 L6_sharp:4.4186e-04 L7_sharp:4.4560e-04 L8_sharp:1.0764e-03 L9_sharp:1.1574e-03 L10_sharp:1.0760e-03 L11_sharp:1.4549e-03 L12_sharp:9.2845e-03 total_fnorm:7.1000e+01 total_l1_linf:1.5974e+05 total_spectral:3.5750e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5156e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7969e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.2891e-01 L5_l1linf:6.4453e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.2891e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1197e-02 L2_spectral:3.0319e-02 L3_spectral:3.0511e-02 L4_spectral:3.0552e-02 L5_spectral:3.0494e-02 L6_spectral:3.0686e-02 L7_spectral:3.0795e-02 L8_spectral:3.1348e-02 L9_spectral:3.1401e-02 L10_spectral:3.1224e-02 L11_spectral:3.1365e-02 L12_spectral:3.1507e-02 train_time:163746ms step_avg:45.48ms +[2025-09-11 09:04:28] [Rank 0] step:3601/10000 train_time:165427ms step_avg:45.94ms +[2025-09-11 09:04:28] [Rank 0] step:3601/10000 train_time:165427ms step_avg:45.94ms +[2025-09-11 09:04:28] [Rank 0] step:3621/10000 train_time:166095ms step_avg:45.87ms +[2025-09-11 09:04:28] [Rank 0] step:3621/10000 train_time:166095ms step_avg:45.87ms +[2025-09-11 09:04:29] [Rank 0] step:3641/10000 train_time:166758ms step_avg:45.80ms +[2025-09-11 09:04:29] [Rank 0] step:3641/10000 train_time:166758ms step_avg:45.80ms +[2025-09-11 09:04:30] [Rank 0] step:3661/10000 train_time:167422ms step_avg:45.73ms +[2025-09-11 09:04:30] [Rank 0] step:3661/10000 train_time:167422ms step_avg:45.73ms +[2025-09-11 09:04:30] [Rank 0] step:3681/10000 train_time:168084ms step_avg:45.66ms +[2025-09-11 09:04:30] [Rank 0] step:3681/10000 train_time:168084ms step_avg:45.66ms +[2025-09-11 09:04:31] [Rank 0] step:3701/10000 train_time:168747ms step_avg:45.59ms +[2025-09-11 09:04:31] [Rank 0] step:3701/10000 train_time:168747ms step_avg:45.59ms +[2025-09-11 09:04:32] [Rank 0] step:3721/10000 train_time:169419ms step_avg:45.53ms +[2025-09-11 09:04:32] [Rank 0] step:3721/10000 train_time:169419ms step_avg:45.53ms +[2025-09-11 09:04:32] [Rank 0] step:3741/10000 train_time:170093ms step_avg:45.47ms +[2025-09-11 09:04:32] [Rank 0] step:3741/10000 train_time:170093ms step_avg:45.47ms +[2025-09-11 09:04:33] [Rank 0] step:3761/10000 train_time:170768ms step_avg:45.40ms +[2025-09-11 09:04:33] [Rank 0] step:3761/10000 train_time:170768ms step_avg:45.40ms +[2025-09-11 09:04:34] [Rank 0] step:3781/10000 train_time:171441ms step_avg:45.34ms +[2025-09-11 09:04:34] [Rank 0] step:3781/10000 train_time:171441ms step_avg:45.34ms +[2025-09-11 09:04:34] [Rank 0] step:3801/10000 train_time:172114ms step_avg:45.28ms +[2025-09-11 09:04:34] [Rank 0] step:3801/10000 train_time:172114ms step_avg:45.28ms +[2025-09-11 09:04:35] [Rank 0] step:3821/10000 train_time:172788ms step_avg:45.22ms +[2025-09-11 09:04:35] [Rank 0] step:3821/10000 train_time:172788ms step_avg:45.22ms +[2025-09-11 09:04:36] [Rank 0] step:3841/10000 train_time:173463ms step_avg:45.16ms +[2025-09-11 09:04:36] [Rank 0] step:3841/10000 train_time:173463ms step_avg:45.16ms +[2025-09-11 09:04:36] [Rank 0] step:3861/10000 train_time:174134ms step_avg:45.10ms +[2025-09-11 09:04:36] [Rank 0] step:3861/10000 train_time:174134ms step_avg:45.10ms +[2025-09-11 09:04:37] [Rank 0] step:3881/10000 train_time:174808ms step_avg:45.04ms +[2025-09-11 09:04:37] [Rank 0] step:3881/10000 train_time:174808ms step_avg:45.04ms +[2025-09-11 09:04:38] [Rank 0] step:3901/10000 train_time:175481ms step_avg:44.98ms +[2025-09-11 09:04:38] [Rank 0] step:3901/10000 train_time:175481ms step_avg:44.98ms +[2025-09-11 09:04:38] [Rank 0] step:3921/10000 train_time:176155ms step_avg:44.93ms +[2025-09-11 09:04:38] [Rank 0] step:3921/10000 train_time:176155ms step_avg:44.93ms +[2025-09-11 09:04:39] [Rank 0] step:3941/10000 train_time:176829ms step_avg:44.87ms +[2025-09-11 09:04:39] [Rank 0] step:3941/10000 train_time:176829ms step_avg:44.87ms +[2025-09-11 09:04:40] [Rank 0] step:3961/10000 train_time:177503ms step_avg:44.81ms +[2025-09-11 09:04:40] [Rank 0] step:3961/10000 train_time:177503ms step_avg:44.81ms +[2025-09-11 09:04:40] [Rank 0] step:3981/10000 train_time:178177ms step_avg:44.76ms +[2025-09-11 09:04:40] [Rank 0] step:3981/10000 train_time:178177ms step_avg:44.76ms +[2025-09-11 09:04:41] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:04:41] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:04:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:04:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:04:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:04:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:04:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:51] [Rank 0] PRINT: step:4000/10000 val_loss:4.5028 total_sharp:2.0225e-04 L1_sharp:4.9752e-03 L2_sharp:2.8335e-04 L3_sharp:1.9030e-04 L4_sharp:4.3890e-04 L5_sharp:4.6982e-04 L6_sharp:3.7462e-04 L7_sharp:4.5815e-04 L8_sharp:1.1436e-03 L9_sharp:1.1537e-03 L10_sharp:1.2470e-03 L11_sharp:1.8688e-03 L12_sharp:1.8471e-02 total_fnorm:7.9000e+01 total_l1_linf:1.7613e+05 total_spectral:3.9750e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5469e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.6016e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.2500e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1288e-02 L2_spectral:3.0349e-02 L3_spectral:3.0629e-02 L4_spectral:3.0776e-02 L5_spectral:3.0888e-02 L6_spectral:3.0963e-02 L7_spectral:3.1104e-02 L8_spectral:3.1577e-02 L9_spectral:3.1361e-02 L10_spectral:3.1208e-02 L11_spectral:3.1531e-02 L12_spectral:3.1442e-02 train_time:178831ms step_avg:44.71ms +[2025-09-11 09:04:51] [Rank 0] PRINT: step:4000/10000 val_loss:4.5028 total_sharp:2.0225e-04 L1_sharp:4.9752e-03 L2_sharp:2.8335e-04 L3_sharp:1.9030e-04 L4_sharp:4.3890e-04 L5_sharp:4.6982e-04 L6_sharp:3.7462e-04 L7_sharp:4.5815e-04 L8_sharp:1.1436e-03 L9_sharp:1.1537e-03 L10_sharp:1.2470e-03 L11_sharp:1.8688e-03 L12_sharp:1.8471e-02 total_fnorm:7.9000e+01 total_l1_linf:1.7613e+05 total_spectral:3.9750e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5469e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.3281e-01 L6_l1linf:6.6016e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.0938e-01 L11_l1linf:6.2500e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1288e-02 L2_spectral:3.0349e-02 L3_spectral:3.0629e-02 L4_spectral:3.0776e-02 L5_spectral:3.0888e-02 L6_spectral:3.0963e-02 L7_spectral:3.1104e-02 L8_spectral:3.1577e-02 L9_spectral:3.1361e-02 L10_spectral:3.1208e-02 L11_spectral:3.1531e-02 L12_spectral:3.1442e-02 train_time:178831ms step_avg:44.71ms +[2025-09-11 09:04:53] [Rank 0] step:4001/10000 train_time:180591ms step_avg:45.14ms +[2025-09-11 09:04:53] [Rank 0] step:4001/10000 train_time:180591ms step_avg:45.14ms +[2025-09-11 09:04:54] [Rank 0] step:4021/10000 train_time:181271ms step_avg:45.08ms +[2025-09-11 09:04:54] [Rank 0] step:4021/10000 train_time:181271ms step_avg:45.08ms +[2025-09-11 09:04:54] [Rank 0] step:4041/10000 train_time:181945ms step_avg:45.02ms +[2025-09-11 09:04:54] [Rank 0] step:4041/10000 train_time:181945ms step_avg:45.02ms +[2025-09-11 09:04:55] [Rank 0] step:4061/10000 train_time:182618ms step_avg:44.97ms +[2025-09-11 09:04:55] [Rank 0] step:4061/10000 train_time:182618ms step_avg:44.97ms +[2025-09-11 09:04:56] [Rank 0] step:4081/10000 train_time:183291ms step_avg:44.91ms +[2025-09-11 09:04:56] [Rank 0] step:4081/10000 train_time:183291ms step_avg:44.91ms +[2025-09-11 09:04:56] [Rank 0] step:4101/10000 train_time:183965ms step_avg:44.86ms +[2025-09-11 09:04:56] [Rank 0] step:4101/10000 train_time:183965ms step_avg:44.86ms +[2025-09-11 09:04:57] [Rank 0] step:4121/10000 train_time:184638ms step_avg:44.80ms +[2025-09-11 09:04:57] [Rank 0] step:4121/10000 train_time:184638ms step_avg:44.80ms +[2025-09-11 09:04:58] [Rank 0] step:4141/10000 train_time:185311ms step_avg:44.75ms +[2025-09-11 09:04:58] [Rank 0] step:4141/10000 train_time:185311ms step_avg:44.75ms +[2025-09-11 09:04:58] [Rank 0] step:4161/10000 train_time:185984ms step_avg:44.70ms +[2025-09-11 09:04:58] [Rank 0] step:4161/10000 train_time:185984ms step_avg:44.70ms +[2025-09-11 09:04:59] [Rank 0] step:4181/10000 train_time:186658ms step_avg:44.64ms +[2025-09-11 09:04:59] [Rank 0] step:4181/10000 train_time:186658ms step_avg:44.64ms +[2025-09-11 09:05:00] [Rank 0] step:4201/10000 train_time:187335ms step_avg:44.59ms +[2025-09-11 09:05:00] [Rank 0] step:4201/10000 train_time:187335ms step_avg:44.59ms +[2025-09-11 09:05:00] [Rank 0] step:4221/10000 train_time:188008ms step_avg:44.54ms +[2025-09-11 09:05:00] [Rank 0] step:4221/10000 train_time:188008ms step_avg:44.54ms +[2025-09-11 09:05:01] [Rank 0] step:4241/10000 train_time:188812ms step_avg:44.52ms +[2025-09-11 09:05:01] [Rank 0] step:4241/10000 train_time:188812ms step_avg:44.52ms +[2025-09-11 09:05:02] [Rank 0] step:4261/10000 train_time:189532ms step_avg:44.48ms +[2025-09-11 09:05:02] [Rank 0] step:4261/10000 train_time:189532ms step_avg:44.48ms +[2025-09-11 09:05:03] [Rank 0] step:4281/10000 train_time:190500ms step_avg:44.50ms +[2025-09-11 09:05:03] [Rank 0] step:4281/10000 train_time:190500ms step_avg:44.50ms +[2025-09-11 09:05:04] [Rank 0] step:4301/10000 train_time:191438ms step_avg:44.51ms +[2025-09-11 09:05:04] [Rank 0] step:4301/10000 train_time:191438ms step_avg:44.51ms +[2025-09-11 09:05:05] [Rank 0] step:4321/10000 train_time:192111ms step_avg:44.46ms +[2025-09-11 09:05:05] [Rank 0] step:4321/10000 train_time:192111ms step_avg:44.46ms +[2025-09-11 09:05:05] [Rank 0] step:4341/10000 train_time:193092ms step_avg:44.48ms +[2025-09-11 09:05:05] [Rank 0] step:4341/10000 train_time:193092ms step_avg:44.48ms +[2025-09-11 09:05:06] [Rank 0] step:4361/10000 train_time:193765ms step_avg:44.43ms +[2025-09-11 09:05:06] [Rank 0] step:4361/10000 train_time:193765ms step_avg:44.43ms +[2025-09-11 09:05:07] [Rank 0] step:4381/10000 train_time:194439ms step_avg:44.38ms +[2025-09-11 09:05:07] [Rank 0] step:4381/10000 train_time:194439ms step_avg:44.38ms +[2025-09-11 09:05:07] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:05:07] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:18] [Rank 0] PRINT: step:4400/10000 val_loss:4.4776 total_sharp:1.7922e-04 L1_sharp:4.0202e-03 L2_sharp:2.5000e-04 L3_sharp:-2.6805e-05 L4_sharp:4.0237e-04 L5_sharp:5.0961e-04 L6_sharp:6.3777e-04 L7_sharp:4.9740e-04 L8_sharp:1.2236e-03 L9_sharp:1.0296e-03 L10_sharp:9.8332e-04 L11_sharp:1.4809e-03 L12_sharp:8.8747e-03 total_fnorm:7.2500e+01 total_l1_linf:1.5872e+05 total_spectral:3.6500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.0938e-01 L10_l1linf:5.9766e-01 L11_l1linf:6.0547e-01 L12_l1linf:6.2500e-01 L1_spectral:3.1623e-02 L2_spectral:3.0194e-02 L3_spectral:3.0797e-02 L4_spectral:3.0865e-02 L5_spectral:3.1051e-02 L6_spectral:3.1088e-02 L7_spectral:3.1041e-02 L8_spectral:3.1684e-02 L9_spectral:3.1686e-02 L10_spectral:3.1666e-02 L11_spectral:3.1717e-02 L12_spectral:3.1502e-02 train_time:195092ms step_avg:44.34ms +[2025-09-11 09:05:18] [Rank 0] PRINT: step:4400/10000 val_loss:4.4776 total_sharp:1.7922e-04 L1_sharp:4.0202e-03 L2_sharp:2.5000e-04 L3_sharp:-2.6805e-05 L4_sharp:4.0237e-04 L5_sharp:5.0961e-04 L6_sharp:6.3777e-04 L7_sharp:4.9740e-04 L8_sharp:1.2236e-03 L9_sharp:1.0296e-03 L10_sharp:9.8332e-04 L11_sharp:1.4809e-03 L12_sharp:8.8747e-03 total_fnorm:7.2500e+01 total_l1_linf:1.5872e+05 total_spectral:3.6500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2891e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.0938e-01 L10_l1linf:5.9766e-01 L11_l1linf:6.0547e-01 L12_l1linf:6.2500e-01 L1_spectral:3.1623e-02 L2_spectral:3.0194e-02 L3_spectral:3.0797e-02 L4_spectral:3.0865e-02 L5_spectral:3.1051e-02 L6_spectral:3.1088e-02 L7_spectral:3.1041e-02 L8_spectral:3.1684e-02 L9_spectral:3.1686e-02 L10_spectral:3.1666e-02 L11_spectral:3.1717e-02 L12_spectral:3.1502e-02 train_time:195092ms step_avg:44.34ms +[2025-09-11 09:05:20] [Rank 0] step:4401/10000 train_time:196741ms step_avg:44.70ms +[2025-09-11 09:05:20] [Rank 0] step:4401/10000 train_time:196741ms step_avg:44.70ms +[2025-09-11 09:05:20] [Rank 0] step:4421/10000 train_time:197438ms step_avg:44.66ms +[2025-09-11 09:05:20] [Rank 0] step:4421/10000 train_time:197438ms step_avg:44.66ms +[2025-09-11 09:05:21] [Rank 0] step:4441/10000 train_time:198112ms step_avg:44.61ms +[2025-09-11 09:05:21] [Rank 0] step:4441/10000 train_time:198112ms step_avg:44.61ms +[2025-09-11 09:05:22] [Rank 0] step:4461/10000 train_time:198788ms step_avg:44.56ms +[2025-09-11 09:05:22] [Rank 0] step:4461/10000 train_time:198788ms step_avg:44.56ms +[2025-09-11 09:05:22] [Rank 0] step:4481/10000 train_time:199463ms step_avg:44.51ms +[2025-09-11 09:05:22] [Rank 0] step:4481/10000 train_time:199463ms step_avg:44.51ms +[2025-09-11 09:05:23] [Rank 0] step:4501/10000 train_time:200140ms step_avg:44.47ms +[2025-09-11 09:05:23] [Rank 0] step:4501/10000 train_time:200140ms step_avg:44.47ms +[2025-09-11 09:05:24] [Rank 0] step:4521/10000 train_time:200816ms step_avg:44.42ms +[2025-09-11 09:05:24] [Rank 0] step:4521/10000 train_time:200816ms step_avg:44.42ms +[2025-09-11 09:05:24] [Rank 0] step:4541/10000 train_time:201492ms step_avg:44.37ms +[2025-09-11 09:05:24] [Rank 0] step:4541/10000 train_time:201492ms step_avg:44.37ms +[2025-09-11 09:05:25] [Rank 0] step:4561/10000 train_time:202167ms step_avg:44.33ms +[2025-09-11 09:05:25] [Rank 0] step:4561/10000 train_time:202167ms step_avg:44.33ms +[2025-09-11 09:05:26] [Rank 0] step:4581/10000 train_time:202843ms step_avg:44.28ms +[2025-09-11 09:05:26] [Rank 0] step:4581/10000 train_time:202843ms step_avg:44.28ms +[2025-09-11 09:05:26] [Rank 0] step:4601/10000 train_time:203518ms step_avg:44.23ms +[2025-09-11 09:05:26] [Rank 0] step:4601/10000 train_time:203518ms step_avg:44.23ms +[2025-09-11 09:05:27] [Rank 0] step:4621/10000 train_time:204194ms step_avg:44.19ms +[2025-09-11 09:05:27] [Rank 0] step:4621/10000 train_time:204194ms step_avg:44.19ms +[2025-09-11 09:05:28] [Rank 0] step:4641/10000 train_time:204870ms step_avg:44.14ms +[2025-09-11 09:05:28] [Rank 0] step:4641/10000 train_time:204870ms step_avg:44.14ms +[2025-09-11 09:05:28] [Rank 0] step:4661/10000 train_time:205546ms step_avg:44.10ms +[2025-09-11 09:05:28] [Rank 0] step:4661/10000 train_time:205546ms step_avg:44.10ms +[2025-09-11 09:05:29] [Rank 0] step:4681/10000 train_time:206228ms step_avg:44.06ms +[2025-09-11 09:05:29] [Rank 0] step:4681/10000 train_time:206228ms step_avg:44.06ms +[2025-09-11 09:05:30] [Rank 0] step:4701/10000 train_time:206903ms step_avg:44.01ms +[2025-09-11 09:05:30] [Rank 0] step:4701/10000 train_time:206903ms step_avg:44.01ms +[2025-09-11 09:05:30] [Rank 0] step:4721/10000 train_time:207579ms step_avg:43.97ms +[2025-09-11 09:05:30] [Rank 0] step:4721/10000 train_time:207579ms step_avg:43.97ms +[2025-09-11 09:05:31] [Rank 0] step:4741/10000 train_time:208255ms step_avg:43.93ms +[2025-09-11 09:05:31] [Rank 0] step:4741/10000 train_time:208255ms step_avg:43.93ms +[2025-09-11 09:05:32] [Rank 0] step:4761/10000 train_time:208931ms step_avg:43.88ms +[2025-09-11 09:05:32] [Rank 0] step:4761/10000 train_time:208931ms step_avg:43.88ms +[2025-09-11 09:05:33] [Rank 0] step:4781/10000 train_time:209606ms step_avg:43.84ms +[2025-09-11 09:05:33] [Rank 0] step:4781/10000 train_time:209606ms step_avg:43.84ms +[2025-09-11 09:05:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:05:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:05:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:05:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:05:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:44] [Rank 0] PRINT: step:4800/10000 val_loss:4.4359 total_sharp:1.3527e-04 L1_sharp:3.2929e-03 L2_sharp:4.0626e-04 L3_sharp:2.5424e-04 L4_sharp:2.7150e-04 L5_sharp:3.1114e-04 L6_sharp:4.2140e-04 L7_sharp:5.9631e-04 L8_sharp:1.0140e-03 L9_sharp:8.5945e-04 L10_sharp:9.1207e-04 L11_sharp:1.3259e-03 L12_sharp:8.7768e-03 total_fnorm:7.8000e+01 total_l1_linf:1.7510e+05 total_spectral:3.9250e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6406e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.3281e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.0156e-01 L11_l1linf:5.7812e-01 L12_l1linf:6.1719e-01 L1_spectral:3.1822e-02 L2_spectral:3.0529e-02 L3_spectral:3.0928e-02 L4_spectral:3.1345e-02 L5_spectral:3.1468e-02 L6_spectral:3.1271e-02 L7_spectral:3.1279e-02 L8_spectral:3.1894e-02 L9_spectral:3.1848e-02 L10_spectral:3.1722e-02 L11_spectral:3.1934e-02 L12_spectral:3.1785e-02 train_time:210262ms step_avg:43.80ms +[2025-09-11 09:05:44] [Rank 0] PRINT: step:4800/10000 val_loss:4.4359 total_sharp:1.3527e-04 L1_sharp:3.2929e-03 L2_sharp:4.0626e-04 L3_sharp:2.5424e-04 L4_sharp:2.7150e-04 L5_sharp:3.1114e-04 L6_sharp:4.2140e-04 L7_sharp:5.9631e-04 L8_sharp:1.0140e-03 L9_sharp:8.5945e-04 L10_sharp:9.1207e-04 L11_sharp:1.3259e-03 L12_sharp:8.7768e-03 total_fnorm:7.8000e+01 total_l1_linf:1.7510e+05 total_spectral:3.9250e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6406e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.2500e-01 L5_l1linf:6.2500e-01 L6_l1linf:6.3281e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.0938e-01 L10_l1linf:6.0156e-01 L11_l1linf:5.7812e-01 L12_l1linf:6.1719e-01 L1_spectral:3.1822e-02 L2_spectral:3.0529e-02 L3_spectral:3.0928e-02 L4_spectral:3.1345e-02 L5_spectral:3.1468e-02 L6_spectral:3.1271e-02 L7_spectral:3.1279e-02 L8_spectral:3.1894e-02 L9_spectral:3.1848e-02 L10_spectral:3.1722e-02 L11_spectral:3.1934e-02 L12_spectral:3.1785e-02 train_time:210262ms step_avg:43.80ms +[2025-09-11 09:05:46] [Rank 0] step:4801/10000 train_time:211902ms step_avg:44.14ms +[2025-09-11 09:05:46] [Rank 0] step:4801/10000 train_time:211902ms step_avg:44.14ms +[2025-09-11 09:05:47] [Rank 0] step:4821/10000 train_time:212617ms step_avg:44.10ms +[2025-09-11 09:05:47] [Rank 0] step:4821/10000 train_time:212617ms step_avg:44.10ms +[2025-09-11 09:05:47] [Rank 0] step:4841/10000 train_time:213294ms step_avg:44.06ms +[2025-09-11 09:05:47] [Rank 0] step:4841/10000 train_time:213294ms step_avg:44.06ms +[2025-09-11 09:05:48] [Rank 0] step:4861/10000 train_time:213970ms step_avg:44.02ms +[2025-09-11 09:05:48] [Rank 0] step:4861/10000 train_time:213970ms step_avg:44.02ms +[2025-09-11 09:05:49] [Rank 0] step:4881/10000 train_time:214647ms step_avg:43.98ms +[2025-09-11 09:05:49] [Rank 0] step:4881/10000 train_time:214647ms step_avg:43.98ms +[2025-09-11 09:05:49] [Rank 0] step:4901/10000 train_time:215325ms step_avg:43.93ms +[2025-09-11 09:05:49] [Rank 0] step:4901/10000 train_time:215325ms step_avg:43.93ms +[2025-09-11 09:05:50] [Rank 0] step:4921/10000 train_time:216002ms step_avg:43.89ms +[2025-09-11 09:05:50] [Rank 0] step:4921/10000 train_time:216002ms step_avg:43.89ms +[2025-09-11 09:05:51] [Rank 0] step:4941/10000 train_time:216679ms step_avg:43.85ms +[2025-09-11 09:05:51] [Rank 0] step:4941/10000 train_time:216679ms step_avg:43.85ms +[2025-09-11 09:05:52] [Rank 0] step:4961/10000 train_time:217356ms step_avg:43.81ms +[2025-09-11 09:05:52] [Rank 0] step:4961/10000 train_time:217356ms step_avg:43.81ms +[2025-09-11 09:05:52] [Rank 0] step:4981/10000 train_time:218033ms step_avg:43.77ms +[2025-09-11 09:05:52] [Rank 0] step:4981/10000 train_time:218033ms step_avg:43.77ms +[2025-09-11 09:05:53] [Rank 0] step:5001/10000 train_time:218711ms step_avg:43.73ms +[2025-09-11 09:05:53] [Rank 0] step:5001/10000 train_time:218711ms step_avg:43.73ms +[2025-09-11 09:05:54] [Rank 0] step:5021/10000 train_time:219385ms step_avg:43.69ms +[2025-09-11 09:05:54] [Rank 0] step:5021/10000 train_time:219385ms step_avg:43.69ms +[2025-09-11 09:05:54] [Rank 0] step:5041/10000 train_time:220061ms step_avg:43.65ms +[2025-09-11 09:05:54] [Rank 0] step:5041/10000 train_time:220061ms step_avg:43.65ms +[2025-09-11 09:05:55] [Rank 0] step:5061/10000 train_time:220737ms step_avg:43.62ms +[2025-09-11 09:05:55] [Rank 0] step:5061/10000 train_time:220737ms step_avg:43.62ms +[2025-09-11 09:05:56] [Rank 0] step:5081/10000 train_time:221413ms step_avg:43.58ms +[2025-09-11 09:05:56] [Rank 0] step:5081/10000 train_time:221413ms step_avg:43.58ms +[2025-09-11 09:05:56] [Rank 0] step:5101/10000 train_time:222089ms step_avg:43.54ms +[2025-09-11 09:05:56] [Rank 0] step:5101/10000 train_time:222089ms step_avg:43.54ms +[2025-09-11 09:05:57] [Rank 0] step:5121/10000 train_time:222765ms step_avg:43.50ms +[2025-09-11 09:05:57] [Rank 0] step:5121/10000 train_time:222765ms step_avg:43.50ms +[2025-09-11 09:05:58] [Rank 0] step:5141/10000 train_time:223441ms step_avg:43.46ms +[2025-09-11 09:05:58] [Rank 0] step:5141/10000 train_time:223441ms step_avg:43.46ms +[2025-09-11 09:05:58] [Rank 0] step:5161/10000 train_time:224118ms step_avg:43.43ms +[2025-09-11 09:05:58] [Rank 0] step:5161/10000 train_time:224118ms step_avg:43.43ms +[2025-09-11 09:05:59] [Rank 0] step:5181/10000 train_time:224794ms step_avg:43.39ms +[2025-09-11 09:05:59] [Rank 0] step:5181/10000 train_time:224794ms step_avg:43.39ms +[2025-09-11 09:06:00] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:06:00] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:06:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:06:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:06:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:06:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:06:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:10] [Rank 0] PRINT: step:5200/10000 val_loss:4.4023 total_sharp:2.4088e-04 L1_sharp:3.2043e-03 L2_sharp:5.3654e-04 L3_sharp:3.3348e-04 L4_sharp:3.1923e-04 L5_sharp:8.1198e-04 L6_sharp:4.1746e-04 L7_sharp:4.8807e-04 L8_sharp:1.0728e-03 L9_sharp:1.1305e-03 L10_sharp:1.0435e-03 L11_sharp:1.8181e-03 L12_sharp:2.5377e-02 total_fnorm:7.0000e+01 total_l1_linf:1.5155e+05 total_spectral:3.5500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4062e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.2109e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2891e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.9375e-01 L11_l1linf:5.7422e-01 L12_l1linf:6.1328e-01 L1_spectral:3.1760e-02 L2_spectral:3.0527e-02 L3_spectral:3.1259e-02 L4_spectral:3.1305e-02 L5_spectral:3.1351e-02 L6_spectral:3.1697e-02 L7_spectral:3.1512e-02 L8_spectral:3.2065e-02 L9_spectral:3.2015e-02 L10_spectral:3.1951e-02 L11_spectral:3.1953e-02 L12_spectral:3.1706e-02 train_time:225457ms step_avg:43.36ms +[2025-09-11 09:06:10] [Rank 0] PRINT: step:5200/10000 val_loss:4.4023 total_sharp:2.4088e-04 L1_sharp:3.2043e-03 L2_sharp:5.3654e-04 L3_sharp:3.3348e-04 L4_sharp:3.1923e-04 L5_sharp:8.1198e-04 L6_sharp:4.1746e-04 L7_sharp:4.8807e-04 L8_sharp:1.0728e-03 L9_sharp:1.1305e-03 L10_sharp:1.0435e-03 L11_sharp:1.8181e-03 L12_sharp:2.5377e-02 total_fnorm:7.0000e+01 total_l1_linf:1.5155e+05 total_spectral:3.5500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4062e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.2109e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.2891e-01 L9_l1linf:5.9766e-01 L10_l1linf:5.9375e-01 L11_l1linf:5.7422e-01 L12_l1linf:6.1328e-01 L1_spectral:3.1760e-02 L2_spectral:3.0527e-02 L3_spectral:3.1259e-02 L4_spectral:3.1305e-02 L5_spectral:3.1351e-02 L6_spectral:3.1697e-02 L7_spectral:3.1512e-02 L8_spectral:3.2065e-02 L9_spectral:3.2015e-02 L10_spectral:3.1951e-02 L11_spectral:3.1953e-02 L12_spectral:3.1706e-02 train_time:225457ms step_avg:43.36ms +[2025-09-11 09:06:12] [Rank 0] step:5201/10000 train_time:227175ms step_avg:43.68ms +[2025-09-11 09:06:12] [Rank 0] step:5201/10000 train_time:227175ms step_avg:43.68ms +[2025-09-11 09:06:12] [Rank 0] step:5221/10000 train_time:227880ms step_avg:43.65ms +[2025-09-11 09:06:12] [Rank 0] step:5221/10000 train_time:227880ms step_avg:43.65ms +[2025-09-11 09:06:13] [Rank 0] step:5241/10000 train_time:228566ms step_avg:43.61ms +[2025-09-11 09:06:13] [Rank 0] step:5241/10000 train_time:228566ms step_avg:43.61ms +[2025-09-11 09:06:14] [Rank 0] step:5261/10000 train_time:229253ms step_avg:43.58ms +[2025-09-11 09:06:14] [Rank 0] step:5261/10000 train_time:229253ms step_avg:43.58ms +[2025-09-11 09:06:14] [Rank 0] step:5281/10000 train_time:229939ms step_avg:43.54ms +[2025-09-11 09:06:14] [Rank 0] step:5281/10000 train_time:229939ms step_avg:43.54ms +[2025-09-11 09:06:15] [Rank 0] step:5301/10000 train_time:230625ms step_avg:43.51ms +[2025-09-11 09:06:15] [Rank 0] step:5301/10000 train_time:230625ms step_avg:43.51ms +[2025-09-11 09:06:16] [Rank 0] step:5321/10000 train_time:231311ms step_avg:43.47ms +[2025-09-11 09:06:16] [Rank 0] step:5321/10000 train_time:231311ms step_avg:43.47ms +[2025-09-11 09:06:17] [Rank 0] step:5341/10000 train_time:231996ms step_avg:43.44ms +[2025-09-11 09:06:17] [Rank 0] step:5341/10000 train_time:231996ms step_avg:43.44ms +[2025-09-11 09:06:17] [Rank 0] step:5361/10000 train_time:232682ms step_avg:43.40ms +[2025-09-11 09:06:17] [Rank 0] step:5361/10000 train_time:232682ms step_avg:43.40ms +[2025-09-11 09:06:18] [Rank 0] step:5381/10000 train_time:233368ms step_avg:43.37ms +[2025-09-11 09:06:18] [Rank 0] step:5381/10000 train_time:233368ms step_avg:43.37ms +[2025-09-11 09:06:19] [Rank 0] step:5401/10000 train_time:234053ms step_avg:43.34ms +[2025-09-11 09:06:19] [Rank 0] step:5401/10000 train_time:234053ms step_avg:43.34ms +[2025-09-11 09:06:19] [Rank 0] step:5421/10000 train_time:234739ms step_avg:43.30ms +[2025-09-11 09:06:19] [Rank 0] step:5421/10000 train_time:234739ms step_avg:43.30ms +[2025-09-11 09:06:20] [Rank 0] step:5441/10000 train_time:235425ms step_avg:43.27ms +[2025-09-11 09:06:20] [Rank 0] step:5441/10000 train_time:235425ms step_avg:43.27ms +[2025-09-11 09:06:21] [Rank 0] step:5461/10000 train_time:236112ms step_avg:43.24ms +[2025-09-11 09:06:21] [Rank 0] step:5461/10000 train_time:236112ms step_avg:43.24ms +[2025-09-11 09:06:21] [Rank 0] step:5481/10000 train_time:236797ms step_avg:43.20ms +[2025-09-11 09:06:21] [Rank 0] step:5481/10000 train_time:236797ms step_avg:43.20ms +[2025-09-11 09:06:22] [Rank 0] step:5501/10000 train_time:237482ms step_avg:43.17ms +[2025-09-11 09:06:22] [Rank 0] step:5501/10000 train_time:237482ms step_avg:43.17ms +[2025-09-11 09:06:23] [Rank 0] step:5521/10000 train_time:238167ms step_avg:43.14ms +[2025-09-11 09:06:23] [Rank 0] step:5521/10000 train_time:238167ms step_avg:43.14ms +[2025-09-11 09:06:23] [Rank 0] step:5541/10000 train_time:238856ms step_avg:43.11ms +[2025-09-11 09:06:23] [Rank 0] step:5541/10000 train_time:238856ms step_avg:43.11ms +[2025-09-11 09:06:24] [Rank 0] step:5561/10000 train_time:239543ms step_avg:43.08ms +[2025-09-11 09:06:24] [Rank 0] step:5561/10000 train_time:239543ms step_avg:43.08ms +[2025-09-11 09:06:25] [Rank 0] step:5581/10000 train_time:240230ms step_avg:43.04ms +[2025-09-11 09:06:25] [Rank 0] step:5581/10000 train_time:240230ms step_avg:43.04ms +[2025-09-11 09:06:25] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:06:25] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:06:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:06:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:06:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:06:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:06:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:35] [Rank 0] PRINT: step:5600/10000 val_loss:4.3804 total_sharp:1.4899e-04 L1_sharp:4.6942e-03 L2_sharp:4.1285e-04 L3_sharp:2.6848e-04 L4_sharp:3.1369e-04 L5_sharp:4.6114e-04 L6_sharp:3.5084e-04 L7_sharp:4.7513e-04 L8_sharp:9.3106e-04 L9_sharp:8.8019e-04 L10_sharp:8.0958e-04 L11_sharp:1.1627e-03 L12_sharp:4.7334e-03 total_fnorm:7.0500e+01 total_l1_linf:1.5360e+05 total_spectral:3.5500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.3906e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.0938e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.0547e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.8984e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.8984e-01 L1_spectral:3.1649e-02 L2_spectral:3.0344e-02 L3_spectral:3.1247e-02 L4_spectral:3.1351e-02 L5_spectral:3.1558e-02 L6_spectral:3.1561e-02 L7_spectral:3.1620e-02 L8_spectral:3.1859e-02 L9_spectral:3.2066e-02 L10_spectral:3.1917e-02 L11_spectral:3.2086e-02 L12_spectral:3.2017e-02 train_time:240895ms step_avg:43.02ms +[2025-09-11 09:06:35] [Rank 0] PRINT: step:5600/10000 val_loss:4.3804 total_sharp:1.4899e-04 L1_sharp:4.6942e-03 L2_sharp:4.1285e-04 L3_sharp:2.6848e-04 L4_sharp:3.1369e-04 L5_sharp:4.6114e-04 L6_sharp:3.5084e-04 L7_sharp:4.7513e-04 L8_sharp:9.3106e-04 L9_sharp:8.8019e-04 L10_sharp:8.0958e-04 L11_sharp:1.1627e-03 L12_sharp:4.7334e-03 total_fnorm:7.0500e+01 total_l1_linf:1.5360e+05 total_spectral:3.5500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.3906e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.0938e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.1328e-01 L7_l1linf:6.4062e-01 L8_l1linf:6.0547e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.8984e-01 L11_l1linf:5.7812e-01 L12_l1linf:5.8984e-01 L1_spectral:3.1649e-02 L2_spectral:3.0344e-02 L3_spectral:3.1247e-02 L4_spectral:3.1351e-02 L5_spectral:3.1558e-02 L6_spectral:3.1561e-02 L7_spectral:3.1620e-02 L8_spectral:3.1859e-02 L9_spectral:3.2066e-02 L10_spectral:3.1917e-02 L11_spectral:3.2086e-02 L12_spectral:3.2017e-02 train_time:240895ms step_avg:43.02ms +[2025-09-11 09:06:37] [Rank 0] step:5601/10000 train_time:242657ms step_avg:43.32ms +[2025-09-11 09:06:37] [Rank 0] step:5601/10000 train_time:242657ms step_avg:43.32ms +[2025-09-11 09:06:38] [Rank 0] step:5621/10000 train_time:243462ms step_avg:43.31ms +[2025-09-11 09:06:38] [Rank 0] step:5621/10000 train_time:243462ms step_avg:43.31ms +[2025-09-11 09:06:39] [Rank 0] step:5641/10000 train_time:244149ms step_avg:43.28ms +[2025-09-11 09:06:39] [Rank 0] step:5641/10000 train_time:244149ms step_avg:43.28ms +[2025-09-11 09:06:39] [Rank 0] step:5661/10000 train_time:244836ms step_avg:43.25ms +[2025-09-11 09:06:39] [Rank 0] step:5661/10000 train_time:244836ms step_avg:43.25ms +[2025-09-11 09:06:40] [Rank 0] step:5681/10000 train_time:245524ms step_avg:43.22ms +[2025-09-11 09:06:40] [Rank 0] step:5681/10000 train_time:245524ms step_avg:43.22ms +[2025-09-11 09:06:41] [Rank 0] step:5701/10000 train_time:246212ms step_avg:43.19ms +[2025-09-11 09:06:41] [Rank 0] step:5701/10000 train_time:246212ms step_avg:43.19ms +[2025-09-11 09:06:41] [Rank 0] step:5721/10000 train_time:246899ms step_avg:43.16ms +[2025-09-11 09:06:41] [Rank 0] step:5721/10000 train_time:246899ms step_avg:43.16ms +[2025-09-11 09:06:42] [Rank 0] step:5741/10000 train_time:247588ms step_avg:43.13ms +[2025-09-11 09:06:42] [Rank 0] step:5741/10000 train_time:247588ms step_avg:43.13ms +[2025-09-11 09:06:43] [Rank 0] step:5761/10000 train_time:248277ms step_avg:43.10ms +[2025-09-11 09:06:43] [Rank 0] step:5761/10000 train_time:248277ms step_avg:43.10ms +[2025-09-11 09:06:43] [Rank 0] step:5781/10000 train_time:248964ms step_avg:43.07ms +[2025-09-11 09:06:43] [Rank 0] step:5781/10000 train_time:248964ms step_avg:43.07ms +[2025-09-11 09:06:44] [Rank 0] step:5801/10000 train_time:249653ms step_avg:43.04ms +[2025-09-11 09:06:44] [Rank 0] step:5801/10000 train_time:249653ms step_avg:43.04ms +[2025-09-11 09:06:45] [Rank 0] step:5821/10000 train_time:250340ms step_avg:43.01ms +[2025-09-11 09:06:45] [Rank 0] step:5821/10000 train_time:250340ms step_avg:43.01ms +[2025-09-11 09:06:46] [Rank 0] step:5841/10000 train_time:251029ms step_avg:42.98ms +[2025-09-11 09:06:46] [Rank 0] step:5841/10000 train_time:251029ms step_avg:42.98ms +[2025-09-11 09:06:46] [Rank 0] step:5861/10000 train_time:251714ms step_avg:42.95ms +[2025-09-11 09:06:46] [Rank 0] step:5861/10000 train_time:251714ms step_avg:42.95ms +[2025-09-11 09:06:47] [Rank 0] step:5881/10000 train_time:252402ms step_avg:42.92ms +[2025-09-11 09:06:47] [Rank 0] step:5881/10000 train_time:252402ms step_avg:42.92ms +[2025-09-11 09:06:48] [Rank 0] step:5901/10000 train_time:253089ms step_avg:42.89ms +[2025-09-11 09:06:48] [Rank 0] step:5901/10000 train_time:253089ms step_avg:42.89ms +[2025-09-11 09:06:48] [Rank 0] step:5921/10000 train_time:253778ms step_avg:42.86ms +[2025-09-11 09:06:48] [Rank 0] step:5921/10000 train_time:253778ms step_avg:42.86ms +[2025-09-11 09:06:49] [Rank 0] step:5941/10000 train_time:254468ms step_avg:42.83ms +[2025-09-11 09:06:49] [Rank 0] step:5941/10000 train_time:254468ms step_avg:42.83ms +[2025-09-11 09:06:50] [Rank 0] step:5961/10000 train_time:255155ms step_avg:42.80ms +[2025-09-11 09:06:50] [Rank 0] step:5961/10000 train_time:255155ms step_avg:42.80ms +[2025-09-11 09:06:50] [Rank 0] step:5981/10000 train_time:255843ms step_avg:42.78ms +[2025-09-11 09:06:50] [Rank 0] step:5981/10000 train_time:255843ms step_avg:42.78ms +[2025-09-11 09:06:51] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:06:51] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:06:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:06:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:06:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:06:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:06:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:06:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:07:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:07:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:07:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:07:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:01] [Rank 0] PRINT: step:6000/10000 val_loss:4.3329 total_sharp:1.3465e-04 L1_sharp:3.6038e-03 L2_sharp:2.0391e-04 L3_sharp:2.5662e-04 L4_sharp:3.7955e-04 L5_sharp:3.3566e-04 L6_sharp:2.1588e-04 L7_sharp:4.1086e-04 L8_sharp:8.9606e-04 L9_sharp:8.0848e-04 L10_sharp:8.3423e-04 L11_sharp:1.1646e-03 L12_sharp:7.7137e-03 total_fnorm:7.2000e+01 total_l1_linf:1.5565e+05 total_spectral:3.6250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4062e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2109e-01 L9_l1linf:5.9375e-01 L10_l1linf:5.8594e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.7812e-01 L1_spectral:3.1943e-02 L2_spectral:3.0819e-02 L3_spectral:3.1466e-02 L4_spectral:3.1479e-02 L5_spectral:3.1722e-02 L6_spectral:3.1834e-02 L7_spectral:3.1684e-02 L8_spectral:3.2046e-02 L9_spectral:3.2152e-02 L10_spectral:3.2175e-02 L11_spectral:3.2346e-02 L12_spectral:3.2112e-02 train_time:256513ms step_avg:42.75ms +[2025-09-11 09:07:01] [Rank 0] PRINT: step:6000/10000 val_loss:4.3329 total_sharp:1.3465e-04 L1_sharp:3.6038e-03 L2_sharp:2.0391e-04 L3_sharp:2.5662e-04 L4_sharp:3.7955e-04 L5_sharp:3.3566e-04 L6_sharp:2.1588e-04 L7_sharp:4.1086e-04 L8_sharp:8.9606e-04 L9_sharp:8.0848e-04 L10_sharp:8.3423e-04 L11_sharp:1.1646e-03 L12_sharp:7.7137e-03 total_fnorm:7.2000e+01 total_l1_linf:1.5565e+05 total_spectral:3.6250e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4062e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4688e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.4453e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.1719e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2109e-01 L9_l1linf:5.9375e-01 L10_l1linf:5.8594e-01 L11_l1linf:5.7031e-01 L12_l1linf:5.7812e-01 L1_spectral:3.1943e-02 L2_spectral:3.0819e-02 L3_spectral:3.1466e-02 L4_spectral:3.1479e-02 L5_spectral:3.1722e-02 L6_spectral:3.1834e-02 L7_spectral:3.1684e-02 L8_spectral:3.2046e-02 L9_spectral:3.2152e-02 L10_spectral:3.2175e-02 L11_spectral:3.2346e-02 L12_spectral:3.2112e-02 train_time:256513ms step_avg:42.75ms +[2025-09-11 09:07:03] [Rank 0] step:6001/10000 train_time:257797ms step_avg:42.96ms +[2025-09-11 09:07:03] [Rank 0] step:6001/10000 train_time:257797ms step_avg:42.96ms +[2025-09-11 09:07:03] [Rank 0] step:6021/10000 train_time:258503ms step_avg:42.93ms +[2025-09-11 09:07:03] [Rank 0] step:6021/10000 train_time:258503ms step_avg:42.93ms +[2025-09-11 09:07:04] [Rank 0] step:6041/10000 train_time:259195ms step_avg:42.91ms +[2025-09-11 09:07:04] [Rank 0] step:6041/10000 train_time:259195ms step_avg:42.91ms +[2025-09-11 09:07:05] [Rank 0] step:6061/10000 train_time:259885ms step_avg:42.88ms +[2025-09-11 09:07:05] [Rank 0] step:6061/10000 train_time:259885ms step_avg:42.88ms +[2025-09-11 09:07:05] [Rank 0] step:6081/10000 train_time:260576ms step_avg:42.85ms +[2025-09-11 09:07:05] [Rank 0] step:6081/10000 train_time:260576ms step_avg:42.85ms +[2025-09-11 09:07:06] [Rank 0] step:6101/10000 train_time:261265ms step_avg:42.82ms +[2025-09-11 09:07:06] [Rank 0] step:6101/10000 train_time:261265ms step_avg:42.82ms +[2025-09-11 09:07:07] [Rank 0] step:6121/10000 train_time:261955ms step_avg:42.80ms +[2025-09-11 09:07:07] [Rank 0] step:6121/10000 train_time:261955ms step_avg:42.80ms +[2025-09-11 09:07:07] [Rank 0] step:6141/10000 train_time:262644ms step_avg:42.77ms +[2025-09-11 09:07:07] [Rank 0] step:6141/10000 train_time:262644ms step_avg:42.77ms +[2025-09-11 09:07:08] [Rank 0] step:6161/10000 train_time:263333ms step_avg:42.74ms +[2025-09-11 09:07:08] [Rank 0] step:6161/10000 train_time:263333ms step_avg:42.74ms +[2025-09-11 09:07:09] [Rank 0] step:6181/10000 train_time:264021ms step_avg:42.71ms +[2025-09-11 09:07:09] [Rank 0] step:6181/10000 train_time:264021ms step_avg:42.71ms +[2025-09-11 09:07:10] [Rank 0] step:6201/10000 train_time:265214ms step_avg:42.77ms +[2025-09-11 09:07:10] [Rank 0] step:6201/10000 train_time:265214ms step_avg:42.77ms +[2025-09-11 09:07:11] [Rank 0] step:6221/10000 train_time:265904ms step_avg:42.74ms +[2025-09-11 09:07:11] [Rank 0] step:6221/10000 train_time:265904ms step_avg:42.74ms +[2025-09-11 09:07:12] [Rank 0] step:6241/10000 train_time:266593ms step_avg:42.72ms +[2025-09-11 09:07:12] [Rank 0] step:6241/10000 train_time:266593ms step_avg:42.72ms +[2025-09-11 09:07:12] [Rank 0] step:6261/10000 train_time:267551ms step_avg:42.73ms +[2025-09-11 09:07:12] [Rank 0] step:6261/10000 train_time:267551ms step_avg:42.73ms +[2025-09-11 09:07:13] [Rank 0] step:6281/10000 train_time:268240ms step_avg:42.71ms +[2025-09-11 09:07:13] [Rank 0] step:6281/10000 train_time:268240ms step_avg:42.71ms +[2025-09-11 09:07:14] [Rank 0] step:6301/10000 train_time:268928ms step_avg:42.68ms +[2025-09-11 09:07:14] [Rank 0] step:6301/10000 train_time:268928ms step_avg:42.68ms +[2025-09-11 09:07:14] [Rank 0] step:6321/10000 train_time:269620ms step_avg:42.65ms +[2025-09-11 09:07:14] [Rank 0] step:6321/10000 train_time:269620ms step_avg:42.65ms +[2025-09-11 09:07:15] [Rank 0] step:6341/10000 train_time:270310ms step_avg:42.63ms +[2025-09-11 09:07:15] [Rank 0] step:6341/10000 train_time:270310ms step_avg:42.63ms +[2025-09-11 09:07:16] [Rank 0] step:6361/10000 train_time:270999ms step_avg:42.60ms +[2025-09-11 09:07:16] [Rank 0] step:6361/10000 train_time:270999ms step_avg:42.60ms +[2025-09-11 09:07:17] [Rank 0] step:6381/10000 train_time:271688ms step_avg:42.58ms +[2025-09-11 09:07:17] [Rank 0] step:6381/10000 train_time:271688ms step_avg:42.58ms +[2025-09-11 09:07:17] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:07:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:07:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:07:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:07:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:07:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:27] [Rank 0] PRINT: step:6400/10000 val_loss:4.3048 total_sharp:1.4052e-04 L1_sharp:3.0858e-03 L2_sharp:3.2778e-04 L3_sharp:2.1595e-04 L4_sharp:3.0698e-04 L5_sharp:4.6826e-04 L6_sharp:4.1639e-04 L7_sharp:4.6395e-04 L8_sharp:7.1723e-04 L9_sharp:8.4664e-04 L10_sharp:8.3102e-04 L11_sharp:1.1561e-03 L12_sharp:6.1193e-03 total_fnorm:6.2750e+01 total_l1_linf:1.3107e+05 total_spectral:3.1625e+01 L1_fnorm:2.2656e+00 L2_fnorm:2.1562e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2500e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.5859e-01 L2_l1linf:5.4297e-01 L3_l1linf:5.2344e-01 L4_l1linf:5.3516e-01 L5_l1linf:5.3125e-01 L6_l1linf:5.3906e-01 L7_l1linf:5.4688e-01 L8_l1linf:5.2344e-01 L9_l1linf:5.1953e-01 L10_l1linf:5.0000e-01 L11_l1linf:4.9023e-01 L12_l1linf:5.0000e-01 L1_spectral:2.9437e-02 L2_spectral:2.7804e-02 L3_spectral:2.8856e-02 L4_spectral:2.9149e-02 L5_spectral:2.9103e-02 L6_spectral:2.9197e-02 L7_spectral:2.9319e-02 L8_spectral:2.9100e-02 L9_spectral:2.9579e-02 L10_spectral:2.9276e-02 L11_spectral:2.9450e-02 L12_spectral:2.9225e-02 train_time:272357ms step_avg:42.56ms +[2025-09-11 09:07:27] [Rank 0] PRINT: step:6400/10000 val_loss:4.3048 total_sharp:1.4052e-04 L1_sharp:3.0858e-03 L2_sharp:3.2778e-04 L3_sharp:2.1595e-04 L4_sharp:3.0698e-04 L5_sharp:4.6826e-04 L6_sharp:4.1639e-04 L7_sharp:4.6395e-04 L8_sharp:7.1723e-04 L9_sharp:8.4664e-04 L10_sharp:8.3102e-04 L11_sharp:1.1561e-03 L12_sharp:6.1193e-03 total_fnorm:6.2750e+01 total_l1_linf:1.3107e+05 total_spectral:3.1625e+01 L1_fnorm:2.2656e+00 L2_fnorm:2.1562e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2500e+00 L5_fnorm:2.2500e+00 L6_fnorm:2.2656e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2188e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.5859e-01 L2_l1linf:5.4297e-01 L3_l1linf:5.2344e-01 L4_l1linf:5.3516e-01 L5_l1linf:5.3125e-01 L6_l1linf:5.3906e-01 L7_l1linf:5.4688e-01 L8_l1linf:5.2344e-01 L9_l1linf:5.1953e-01 L10_l1linf:5.0000e-01 L11_l1linf:4.9023e-01 L12_l1linf:5.0000e-01 L1_spectral:2.9437e-02 L2_spectral:2.7804e-02 L3_spectral:2.8856e-02 L4_spectral:2.9149e-02 L5_spectral:2.9103e-02 L6_spectral:2.9197e-02 L7_spectral:2.9319e-02 L8_spectral:2.9100e-02 L9_spectral:2.9579e-02 L10_spectral:2.9276e-02 L11_spectral:2.9450e-02 L12_spectral:2.9225e-02 train_time:272357ms step_avg:42.56ms +[2025-09-11 09:07:28] [Rank 0] step:6401/10000 train_time:273537ms step_avg:42.73ms +[2025-09-11 09:07:28] [Rank 0] step:6401/10000 train_time:273537ms step_avg:42.73ms +[2025-09-11 09:07:29] [Rank 0] step:6421/10000 train_time:274230ms step_avg:42.71ms +[2025-09-11 09:07:29] [Rank 0] step:6421/10000 train_time:274230ms step_avg:42.71ms +[2025-09-11 09:07:30] [Rank 0] step:6441/10000 train_time:274919ms step_avg:42.68ms +[2025-09-11 09:07:30] [Rank 0] step:6441/10000 train_time:274919ms step_avg:42.68ms +[2025-09-11 09:07:30] [Rank 0] step:6461/10000 train_time:275608ms step_avg:42.66ms +[2025-09-11 09:07:30] [Rank 0] step:6461/10000 train_time:275608ms step_avg:42.66ms +[2025-09-11 09:07:31] [Rank 0] step:6481/10000 train_time:276299ms step_avg:42.63ms +[2025-09-11 09:07:31] [Rank 0] step:6481/10000 train_time:276299ms step_avg:42.63ms +[2025-09-11 09:07:32] [Rank 0] step:6501/10000 train_time:276990ms step_avg:42.61ms +[2025-09-11 09:07:32] [Rank 0] step:6501/10000 train_time:276990ms step_avg:42.61ms +[2025-09-11 09:07:32] [Rank 0] step:6521/10000 train_time:277680ms step_avg:42.58ms +[2025-09-11 09:07:32] [Rank 0] step:6521/10000 train_time:277680ms step_avg:42.58ms +[2025-09-11 09:07:33] [Rank 0] step:6541/10000 train_time:278368ms step_avg:42.56ms +[2025-09-11 09:07:33] [Rank 0] step:6541/10000 train_time:278368ms step_avg:42.56ms +[2025-09-11 09:07:34] [Rank 0] step:6561/10000 train_time:279058ms step_avg:42.53ms +[2025-09-11 09:07:34] [Rank 0] step:6561/10000 train_time:279058ms step_avg:42.53ms +[2025-09-11 09:07:35] [Rank 0] step:6581/10000 train_time:279748ms step_avg:42.51ms +[2025-09-11 09:07:35] [Rank 0] step:6581/10000 train_time:279748ms step_avg:42.51ms +[2025-09-11 09:07:35] [Rank 0] step:6601/10000 train_time:280437ms step_avg:42.48ms +[2025-09-11 09:07:35] [Rank 0] step:6601/10000 train_time:280437ms step_avg:42.48ms +[2025-09-11 09:07:36] [Rank 0] step:6621/10000 train_time:281125ms step_avg:42.46ms +[2025-09-11 09:07:36] [Rank 0] step:6621/10000 train_time:281125ms step_avg:42.46ms +[2025-09-11 09:07:37] [Rank 0] step:6641/10000 train_time:281815ms step_avg:42.44ms +[2025-09-11 09:07:37] [Rank 0] step:6641/10000 train_time:281815ms step_avg:42.44ms +[2025-09-11 09:07:37] [Rank 0] step:6661/10000 train_time:282504ms step_avg:42.41ms +[2025-09-11 09:07:37] [Rank 0] step:6661/10000 train_time:282504ms step_avg:42.41ms +[2025-09-11 09:07:38] [Rank 0] step:6681/10000 train_time:283200ms step_avg:42.39ms +[2025-09-11 09:07:38] [Rank 0] step:6681/10000 train_time:283200ms step_avg:42.39ms +[2025-09-11 09:07:39] [Rank 0] step:6701/10000 train_time:283896ms step_avg:42.37ms +[2025-09-11 09:07:39] [Rank 0] step:6701/10000 train_time:283896ms step_avg:42.37ms +[2025-09-11 09:07:39] [Rank 0] step:6721/10000 train_time:284593ms step_avg:42.34ms +[2025-09-11 09:07:39] [Rank 0] step:6721/10000 train_time:284593ms step_avg:42.34ms +[2025-09-11 09:07:40] [Rank 0] step:6741/10000 train_time:285290ms step_avg:42.32ms +[2025-09-11 09:07:40] [Rank 0] step:6741/10000 train_time:285290ms step_avg:42.32ms +[2025-09-11 09:07:41] [Rank 0] step:6761/10000 train_time:285984ms step_avg:42.30ms +[2025-09-11 09:07:41] [Rank 0] step:6761/10000 train_time:285984ms step_avg:42.30ms +[2025-09-11 09:07:41] [Rank 0] step:6781/10000 train_time:286681ms step_avg:42.28ms +[2025-09-11 09:07:41] [Rank 0] step:6781/10000 train_time:286681ms step_avg:42.28ms +[2025-09-11 09:07:42] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:07:42] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:07:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:07:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:07:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:07:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:52] [Rank 0] PRINT: step:6800/10000 val_loss:4.2565 total_sharp:1.1153e-04 L1_sharp:2.7515e-03 L2_sharp:3.2337e-04 L3_sharp:1.5983e-04 L4_sharp:2.6760e-04 L5_sharp:4.4282e-04 L6_sharp:3.9600e-04 L7_sharp:4.0767e-04 L8_sharp:8.2209e-04 L9_sharp:7.9084e-04 L10_sharp:8.5309e-04 L11_sharp:1.1770e-03 L12_sharp:4.6440e-03 total_fnorm:6.0500e+01 total_l1_linf:1.2390e+05 total_spectral:3.0375e+01 L1_fnorm:2.0156e+00 L2_fnorm:1.8984e+00 L3_fnorm:1.9844e+00 L4_fnorm:1.9844e+00 L5_fnorm:1.9844e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9609e+00 L9_fnorm:2.0000e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9922e+00 L1_l1linf:4.7656e-01 L2_l1linf:4.5703e-01 L3_l1linf:4.4531e-01 L4_l1linf:4.6094e-01 L5_l1linf:4.7070e-01 L6_l1linf:4.6875e-01 L7_l1linf:4.7266e-01 L8_l1linf:4.5508e-01 L9_l1linf:4.4531e-01 L10_l1linf:4.3945e-01 L11_l1linf:4.1797e-01 L12_l1linf:4.2188e-01 L1_spectral:2.6459e-02 L2_spectral:2.4933e-02 L3_spectral:2.6267e-02 L4_spectral:2.6198e-02 L5_spectral:2.6326e-02 L6_spectral:2.6410e-02 L7_spectral:2.6300e-02 L8_spectral:2.6019e-02 L9_spectral:2.6524e-02 L10_spectral:2.6681e-02 L11_spectral:2.6376e-02 L12_spectral:2.6439e-02 train_time:287357ms step_avg:42.26ms +[2025-09-11 09:07:52] [Rank 0] PRINT: step:6800/10000 val_loss:4.2565 total_sharp:1.1153e-04 L1_sharp:2.7515e-03 L2_sharp:3.2337e-04 L3_sharp:1.5983e-04 L4_sharp:2.6760e-04 L5_sharp:4.4282e-04 L6_sharp:3.9600e-04 L7_sharp:4.0767e-04 L8_sharp:8.2209e-04 L9_sharp:7.9084e-04 L10_sharp:8.5309e-04 L11_sharp:1.1770e-03 L12_sharp:4.6440e-03 total_fnorm:6.0500e+01 total_l1_linf:1.2390e+05 total_spectral:3.0375e+01 L1_fnorm:2.0156e+00 L2_fnorm:1.8984e+00 L3_fnorm:1.9844e+00 L4_fnorm:1.9844e+00 L5_fnorm:1.9844e+00 L6_fnorm:2.0000e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9609e+00 L9_fnorm:2.0000e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9922e+00 L1_l1linf:4.7656e-01 L2_l1linf:4.5703e-01 L3_l1linf:4.4531e-01 L4_l1linf:4.6094e-01 L5_l1linf:4.7070e-01 L6_l1linf:4.6875e-01 L7_l1linf:4.7266e-01 L8_l1linf:4.5508e-01 L9_l1linf:4.4531e-01 L10_l1linf:4.3945e-01 L11_l1linf:4.1797e-01 L12_l1linf:4.2188e-01 L1_spectral:2.6459e-02 L2_spectral:2.4933e-02 L3_spectral:2.6267e-02 L4_spectral:2.6198e-02 L5_spectral:2.6326e-02 L6_spectral:2.6410e-02 L7_spectral:2.6300e-02 L8_spectral:2.6019e-02 L9_spectral:2.6524e-02 L10_spectral:2.6681e-02 L11_spectral:2.6376e-02 L12_spectral:2.6439e-02 train_time:287357ms step_avg:42.26ms +[2025-09-11 09:07:53] [Rank 0] step:6801/10000 train_time:288531ms step_avg:42.42ms +[2025-09-11 09:07:53] [Rank 0] step:6801/10000 train_time:288531ms step_avg:42.42ms +[2025-09-11 09:07:54] [Rank 0] step:6821/10000 train_time:289254ms step_avg:42.41ms +[2025-09-11 09:07:54] [Rank 0] step:6821/10000 train_time:289254ms step_avg:42.41ms +[2025-09-11 09:07:55] [Rank 0] step:6841/10000 train_time:289955ms step_avg:42.38ms +[2025-09-11 09:07:55] [Rank 0] step:6841/10000 train_time:289955ms step_avg:42.38ms +[2025-09-11 09:07:55] [Rank 0] step:6861/10000 train_time:290654ms step_avg:42.36ms +[2025-09-11 09:07:55] [Rank 0] step:6861/10000 train_time:290654ms step_avg:42.36ms +[2025-09-11 09:07:56] [Rank 0] step:6881/10000 train_time:291353ms step_avg:42.34ms +[2025-09-11 09:07:56] [Rank 0] step:6881/10000 train_time:291353ms step_avg:42.34ms +[2025-09-11 09:07:57] [Rank 0] step:6901/10000 train_time:292049ms step_avg:42.32ms +[2025-09-11 09:07:57] [Rank 0] step:6901/10000 train_time:292049ms step_avg:42.32ms +[2025-09-11 09:07:58] [Rank 0] step:6921/10000 train_time:292745ms step_avg:42.30ms +[2025-09-11 09:07:58] [Rank 0] step:6921/10000 train_time:292745ms step_avg:42.30ms +[2025-09-11 09:07:58] [Rank 0] step:6941/10000 train_time:293443ms step_avg:42.28ms +[2025-09-11 09:07:58] [Rank 0] step:6941/10000 train_time:293443ms step_avg:42.28ms +[2025-09-11 09:07:59] [Rank 0] step:6961/10000 train_time:294141ms step_avg:42.26ms +[2025-09-11 09:07:59] [Rank 0] step:6961/10000 train_time:294141ms step_avg:42.26ms +[2025-09-11 09:08:00] [Rank 0] step:6981/10000 train_time:294841ms step_avg:42.23ms +[2025-09-11 09:08:00] [Rank 0] step:6981/10000 train_time:294841ms step_avg:42.23ms +[2025-09-11 09:08:00] [Rank 0] step:7001/10000 train_time:295539ms step_avg:42.21ms +[2025-09-11 09:08:00] [Rank 0] step:7001/10000 train_time:295539ms step_avg:42.21ms +[2025-09-11 09:08:01] [Rank 0] step:7021/10000 train_time:296237ms step_avg:42.19ms +[2025-09-11 09:08:01] [Rank 0] step:7021/10000 train_time:296237ms step_avg:42.19ms +[2025-09-11 09:08:02] [Rank 0] step:7041/10000 train_time:296934ms step_avg:42.17ms +[2025-09-11 09:08:02] [Rank 0] step:7041/10000 train_time:296934ms step_avg:42.17ms +[2025-09-11 09:08:02] [Rank 0] step:7061/10000 train_time:297634ms step_avg:42.15ms +[2025-09-11 09:08:02] [Rank 0] step:7061/10000 train_time:297634ms step_avg:42.15ms +[2025-09-11 09:08:03] [Rank 0] step:7081/10000 train_time:298332ms step_avg:42.13ms +[2025-09-11 09:08:03] [Rank 0] step:7081/10000 train_time:298332ms step_avg:42.13ms +[2025-09-11 09:08:04] [Rank 0] step:7101/10000 train_time:299029ms step_avg:42.11ms +[2025-09-11 09:08:04] [Rank 0] step:7101/10000 train_time:299029ms step_avg:42.11ms +[2025-09-11 09:08:05] [Rank 0] step:7121/10000 train_time:299729ms step_avg:42.09ms +[2025-09-11 09:08:05] [Rank 0] step:7121/10000 train_time:299729ms step_avg:42.09ms +[2025-09-11 09:08:05] [Rank 0] step:7141/10000 train_time:300426ms step_avg:42.07ms +[2025-09-11 09:08:05] [Rank 0] step:7141/10000 train_time:300426ms step_avg:42.07ms +[2025-09-11 09:08:06] [Rank 0] step:7161/10000 train_time:301126ms step_avg:42.05ms +[2025-09-11 09:08:06] [Rank 0] step:7161/10000 train_time:301126ms step_avg:42.05ms +[2025-09-11 09:08:07] [Rank 0] step:7181/10000 train_time:301823ms step_avg:42.03ms +[2025-09-11 09:08:07] [Rank 0] step:7181/10000 train_time:301823ms step_avg:42.03ms +[2025-09-11 09:08:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:08:07] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:08:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:08:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:17] [Rank 0] PRINT: step:7200/10000 val_loss:4.2236 total_sharp:9.9142e-05 L1_sharp:2.2370e-03 L2_sharp:2.0767e-04 L3_sharp:2.3858e-04 L4_sharp:1.4901e-04 L5_sharp:3.4592e-04 L6_sharp:3.2406e-04 L7_sharp:3.7900e-04 L8_sharp:8.0143e-04 L9_sharp:8.2051e-04 L10_sharp:7.2247e-04 L11_sharp:1.1052e-03 L12_sharp:5.3750e-03 total_fnorm:5.3000e+01 total_l1_linf:1.0394e+05 total_spectral:2.6750e+01 L1_fnorm:1.7578e+00 L2_fnorm:1.6562e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7109e+00 L5_fnorm:1.7109e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6875e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7188e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7109e+00 L1_l1linf:3.8867e-01 L2_l1linf:3.9062e-01 L3_l1linf:3.8086e-01 L4_l1linf:3.8867e-01 L5_l1linf:3.9258e-01 L6_l1linf:3.8477e-01 L7_l1linf:3.9648e-01 L8_l1linf:3.7500e-01 L9_l1linf:3.5742e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.3594e-01 L12_l1linf:3.5352e-01 L1_spectral:2.3101e-02 L2_spectral:2.2044e-02 L3_spectral:2.3090e-02 L4_spectral:2.3128e-02 L5_spectral:2.3160e-02 L6_spectral:2.3236e-02 L7_spectral:2.3297e-02 L8_spectral:2.2938e-02 L9_spectral:2.3146e-02 L10_spectral:2.3228e-02 L11_spectral:2.3179e-02 L12_spectral:2.3179e-02 train_time:302501ms step_avg:42.01ms +[2025-09-11 09:08:17] [Rank 0] PRINT: step:7200/10000 val_loss:4.2236 total_sharp:9.9142e-05 L1_sharp:2.2370e-03 L2_sharp:2.0767e-04 L3_sharp:2.3858e-04 L4_sharp:1.4901e-04 L5_sharp:3.4592e-04 L6_sharp:3.2406e-04 L7_sharp:3.7900e-04 L8_sharp:8.0143e-04 L9_sharp:8.2051e-04 L10_sharp:7.2247e-04 L11_sharp:1.1052e-03 L12_sharp:5.3750e-03 total_fnorm:5.3000e+01 total_l1_linf:1.0394e+05 total_spectral:2.6750e+01 L1_fnorm:1.7578e+00 L2_fnorm:1.6562e+00 L3_fnorm:1.7109e+00 L4_fnorm:1.7109e+00 L5_fnorm:1.7109e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7266e+00 L8_fnorm:1.6875e+00 L9_fnorm:1.7109e+00 L10_fnorm:1.7188e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7109e+00 L1_l1linf:3.8867e-01 L2_l1linf:3.9062e-01 L3_l1linf:3.8086e-01 L4_l1linf:3.8867e-01 L5_l1linf:3.9258e-01 L6_l1linf:3.8477e-01 L7_l1linf:3.9648e-01 L8_l1linf:3.7500e-01 L9_l1linf:3.5742e-01 L10_l1linf:3.4961e-01 L11_l1linf:3.3594e-01 L12_l1linf:3.5352e-01 L1_spectral:2.3101e-02 L2_spectral:2.2044e-02 L3_spectral:2.3090e-02 L4_spectral:2.3128e-02 L5_spectral:2.3160e-02 L6_spectral:2.3236e-02 L7_spectral:2.3297e-02 L8_spectral:2.2938e-02 L9_spectral:2.3146e-02 L10_spectral:2.3228e-02 L11_spectral:2.3179e-02 L12_spectral:2.3179e-02 train_time:302501ms step_avg:42.01ms +[2025-09-11 09:08:19] [Rank 0] step:7201/10000 train_time:303679ms step_avg:42.17ms +[2025-09-11 09:08:19] [Rank 0] step:7201/10000 train_time:303679ms step_avg:42.17ms +[2025-09-11 09:08:19] [Rank 0] step:7221/10000 train_time:304411ms step_avg:42.16ms +[2025-09-11 09:08:19] [Rank 0] step:7221/10000 train_time:304411ms step_avg:42.16ms +[2025-09-11 09:08:20] [Rank 0] step:7241/10000 train_time:305110ms step_avg:42.14ms +[2025-09-11 09:08:20] [Rank 0] step:7241/10000 train_time:305110ms step_avg:42.14ms +[2025-09-11 09:08:21] [Rank 0] step:7261/10000 train_time:305811ms step_avg:42.12ms +[2025-09-11 09:08:21] [Rank 0] step:7261/10000 train_time:305811ms step_avg:42.12ms +[2025-09-11 09:08:21] [Rank 0] step:7281/10000 train_time:306514ms step_avg:42.10ms +[2025-09-11 09:08:21] [Rank 0] step:7281/10000 train_time:306514ms step_avg:42.10ms +[2025-09-11 09:08:22] [Rank 0] step:7301/10000 train_time:307211ms step_avg:42.08ms +[2025-09-11 09:08:22] [Rank 0] step:7301/10000 train_time:307211ms step_avg:42.08ms +[2025-09-11 09:08:23] [Rank 0] step:7321/10000 train_time:307909ms step_avg:42.06ms +[2025-09-11 09:08:23] [Rank 0] step:7321/10000 train_time:307909ms step_avg:42.06ms +[2025-09-11 09:08:24] [Rank 0] step:7341/10000 train_time:308609ms step_avg:42.04ms +[2025-09-11 09:08:24] [Rank 0] step:7341/10000 train_time:308609ms step_avg:42.04ms +[2025-09-11 09:08:24] [Rank 0] step:7361/10000 train_time:309308ms step_avg:42.02ms +[2025-09-11 09:08:24] [Rank 0] step:7361/10000 train_time:309308ms step_avg:42.02ms +[2025-09-11 09:08:25] [Rank 0] step:7381/10000 train_time:310008ms step_avg:42.00ms +[2025-09-11 09:08:25] [Rank 0] step:7381/10000 train_time:310008ms step_avg:42.00ms +[2025-09-11 09:08:26] [Rank 0] step:7401/10000 train_time:310707ms step_avg:41.98ms +[2025-09-11 09:08:26] [Rank 0] step:7401/10000 train_time:310707ms step_avg:41.98ms +[2025-09-11 09:08:26] [Rank 0] step:7421/10000 train_time:311404ms step_avg:41.96ms +[2025-09-11 09:08:26] [Rank 0] step:7421/10000 train_time:311404ms step_avg:41.96ms +[2025-09-11 09:08:27] [Rank 0] step:7441/10000 train_time:312103ms step_avg:41.94ms +[2025-09-11 09:08:27] [Rank 0] step:7441/10000 train_time:312103ms step_avg:41.94ms +[2025-09-11 09:08:28] [Rank 0] step:7461/10000 train_time:312802ms step_avg:41.92ms +[2025-09-11 09:08:28] [Rank 0] step:7461/10000 train_time:312802ms step_avg:41.92ms +[2025-09-11 09:08:28] [Rank 0] step:7481/10000 train_time:313503ms step_avg:41.91ms +[2025-09-11 09:08:28] [Rank 0] step:7481/10000 train_time:313503ms step_avg:41.91ms +[2025-09-11 09:08:29] [Rank 0] step:7501/10000 train_time:314203ms step_avg:41.89ms +[2025-09-11 09:08:29] [Rank 0] step:7501/10000 train_time:314203ms step_avg:41.89ms +[2025-09-11 09:08:30] [Rank 0] step:7521/10000 train_time:314903ms step_avg:41.87ms +[2025-09-11 09:08:30] [Rank 0] step:7521/10000 train_time:314903ms step_avg:41.87ms +[2025-09-11 09:08:30] [Rank 0] step:7541/10000 train_time:315600ms step_avg:41.85ms +[2025-09-11 09:08:30] [Rank 0] step:7541/10000 train_time:315600ms step_avg:41.85ms +[2025-09-11 09:08:31] [Rank 0] step:7561/10000 train_time:316301ms step_avg:41.83ms +[2025-09-11 09:08:31] [Rank 0] step:7561/10000 train_time:316301ms step_avg:41.83ms +[2025-09-11 09:08:32] [Rank 0] step:7581/10000 train_time:317001ms step_avg:41.82ms +[2025-09-11 09:08:32] [Rank 0] step:7581/10000 train_time:317001ms step_avg:41.82ms +[2025-09-11 09:08:33] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:08:33] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:08:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:08:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:08:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:08:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:08:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:08:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:44] [Rank 0] PRINT: step:7600/10000 val_loss:4.1835 total_sharp:1.0875e-04 L1_sharp:2.7418e-03 L2_sharp:3.9660e-04 L3_sharp:1.7808e-04 L4_sharp:1.8274e-04 L5_sharp:3.6386e-04 L6_sharp:2.5950e-04 L7_sharp:2.7680e-04 L8_sharp:7.2194e-04 L9_sharp:8.5209e-04 L10_sharp:8.5990e-04 L11_sharp:1.0455e-03 L12_sharp:6.3291e-03 total_fnorm:4.3750e+01 total_l1_linf:8.0384e+04 total_spectral:2.2000e+01 L1_fnorm:1.4922e+00 L2_fnorm:1.3594e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4375e+00 L5_fnorm:1.4297e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3984e+00 L9_fnorm:1.4219e+00 L10_fnorm:1.4219e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4297e+00 L1_l1linf:3.1250e-01 L2_l1linf:3.0859e-01 L3_l1linf:2.9492e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.7930e-01 L10_l1linf:2.7344e-01 L11_l1linf:2.6562e-01 L12_l1linf:2.8320e-01 L1_spectral:1.9674e-02 L2_spectral:1.8883e-02 L3_spectral:1.9699e-02 L4_spectral:1.9723e-02 L5_spectral:1.9607e-02 L6_spectral:1.9717e-02 L7_spectral:1.9758e-02 L8_spectral:1.9491e-02 L9_spectral:1.9829e-02 L10_spectral:1.9848e-02 L11_spectral:1.9826e-02 L12_spectral:1.9946e-02 train_time:317681ms step_avg:41.80ms +[2025-09-11 09:08:44] [Rank 0] PRINT: step:7600/10000 val_loss:4.1835 total_sharp:1.0875e-04 L1_sharp:2.7418e-03 L2_sharp:3.9660e-04 L3_sharp:1.7808e-04 L4_sharp:1.8274e-04 L5_sharp:3.6386e-04 L6_sharp:2.5950e-04 L7_sharp:2.7680e-04 L8_sharp:7.2194e-04 L9_sharp:8.5209e-04 L10_sharp:8.5990e-04 L11_sharp:1.0455e-03 L12_sharp:6.3291e-03 total_fnorm:4.3750e+01 total_l1_linf:8.0384e+04 total_spectral:2.2000e+01 L1_fnorm:1.4922e+00 L2_fnorm:1.3594e+00 L3_fnorm:1.4453e+00 L4_fnorm:1.4375e+00 L5_fnorm:1.4297e+00 L6_fnorm:1.4453e+00 L7_fnorm:1.4375e+00 L8_fnorm:1.3984e+00 L9_fnorm:1.4219e+00 L10_fnorm:1.4219e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4297e+00 L1_l1linf:3.1250e-01 L2_l1linf:3.0859e-01 L3_l1linf:2.9492e-01 L4_l1linf:3.0273e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.0469e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.7930e-01 L10_l1linf:2.7344e-01 L11_l1linf:2.6562e-01 L12_l1linf:2.8320e-01 L1_spectral:1.9674e-02 L2_spectral:1.8883e-02 L3_spectral:1.9699e-02 L4_spectral:1.9723e-02 L5_spectral:1.9607e-02 L6_spectral:1.9717e-02 L7_spectral:1.9758e-02 L8_spectral:1.9491e-02 L9_spectral:1.9829e-02 L10_spectral:1.9848e-02 L11_spectral:1.9826e-02 L12_spectral:1.9946e-02 train_time:317681ms step_avg:41.80ms +[2025-09-11 09:08:46] [Rank 0] step:7601/10000 train_time:318863ms step_avg:41.95ms +[2025-09-11 09:08:46] [Rank 0] step:7601/10000 train_time:318863ms step_avg:41.95ms +[2025-09-11 09:08:46] [Rank 0] step:7621/10000 train_time:319587ms step_avg:41.94ms +[2025-09-11 09:08:46] [Rank 0] step:7621/10000 train_time:319587ms step_avg:41.94ms +[2025-09-11 09:08:47] [Rank 0] step:7641/10000 train_time:320292ms step_avg:41.92ms +[2025-09-11 09:08:47] [Rank 0] step:7641/10000 train_time:320292ms step_avg:41.92ms +[2025-09-11 09:08:48] [Rank 0] step:7661/10000 train_time:320991ms step_avg:41.90ms +[2025-09-11 09:08:48] [Rank 0] step:7661/10000 train_time:320991ms step_avg:41.90ms +[2025-09-11 09:08:48] [Rank 0] step:7681/10000 train_time:321691ms step_avg:41.88ms +[2025-09-11 09:08:48] [Rank 0] step:7681/10000 train_time:321691ms step_avg:41.88ms +[2025-09-11 09:08:49] [Rank 0] step:7701/10000 train_time:322392ms step_avg:41.86ms +[2025-09-11 09:08:49] [Rank 0] step:7701/10000 train_time:322392ms step_avg:41.86ms +[2025-09-11 09:08:50] [Rank 0] step:7721/10000 train_time:323091ms step_avg:41.85ms +[2025-09-11 09:08:50] [Rank 0] step:7721/10000 train_time:323091ms step_avg:41.85ms +[2025-09-11 09:08:50] [Rank 0] step:7741/10000 train_time:323791ms step_avg:41.83ms +[2025-09-11 09:08:50] [Rank 0] step:7741/10000 train_time:323791ms step_avg:41.83ms +[2025-09-11 09:08:51] [Rank 0] step:7761/10000 train_time:324491ms step_avg:41.81ms +[2025-09-11 09:08:51] [Rank 0] step:7761/10000 train_time:324491ms step_avg:41.81ms +[2025-09-11 09:08:52] [Rank 0] step:7781/10000 train_time:325193ms step_avg:41.79ms +[2025-09-11 09:08:52] [Rank 0] step:7781/10000 train_time:325193ms step_avg:41.79ms +[2025-09-11 09:08:53] [Rank 0] step:7801/10000 train_time:325894ms step_avg:41.78ms +[2025-09-11 09:08:53] [Rank 0] step:7801/10000 train_time:325894ms step_avg:41.78ms +[2025-09-11 09:08:53] [Rank 0] step:7821/10000 train_time:326594ms step_avg:41.76ms +[2025-09-11 09:08:53] [Rank 0] step:7821/10000 train_time:326594ms step_avg:41.76ms +[2025-09-11 09:08:54] [Rank 0] step:7841/10000 train_time:327296ms step_avg:41.74ms +[2025-09-11 09:08:54] [Rank 0] step:7841/10000 train_time:327296ms step_avg:41.74ms +[2025-09-11 09:08:55] [Rank 0] step:7861/10000 train_time:327998ms step_avg:41.72ms +[2025-09-11 09:08:55] [Rank 0] step:7861/10000 train_time:327998ms step_avg:41.72ms +[2025-09-11 09:08:55] [Rank 0] step:7881/10000 train_time:328697ms step_avg:41.71ms +[2025-09-11 09:08:55] [Rank 0] step:7881/10000 train_time:328697ms step_avg:41.71ms +[2025-09-11 09:08:56] [Rank 0] step:7901/10000 train_time:329397ms step_avg:41.69ms +[2025-09-11 09:08:56] [Rank 0] step:7901/10000 train_time:329397ms step_avg:41.69ms +[2025-09-11 09:08:57] [Rank 0] step:7921/10000 train_time:330098ms step_avg:41.67ms +[2025-09-11 09:08:57] [Rank 0] step:7921/10000 train_time:330098ms step_avg:41.67ms +[2025-09-11 09:08:57] [Rank 0] step:7941/10000 train_time:330798ms step_avg:41.66ms +[2025-09-11 09:08:57] [Rank 0] step:7941/10000 train_time:330798ms step_avg:41.66ms +[2025-09-11 09:08:58] [Rank 0] step:7961/10000 train_time:331497ms step_avg:41.64ms +[2025-09-11 09:08:58] [Rank 0] step:7961/10000 train_time:331497ms step_avg:41.64ms +[2025-09-11 09:08:59] [Rank 0] step:7981/10000 train_time:332199ms step_avg:41.62ms +[2025-09-11 09:08:59] [Rank 0] step:7981/10000 train_time:332199ms step_avg:41.62ms +[2025-09-11 09:09:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:09:00] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:09:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:09:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:09:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:09] [Rank 0] PRINT: step:8000/10000 val_loss:4.1608 total_sharp:8.5219e-05 L1_sharp:2.6919e-03 L2_sharp:1.4845e-04 L3_sharp:1.1780e-03 L4_sharp:2.0155e-04 L5_sharp:3.1647e-04 L6_sharp:3.7395e-04 L7_sharp:4.4294e-04 L8_sharp:6.6924e-04 L9_sharp:6.9805e-04 L10_sharp:7.0991e-04 L11_sharp:1.1131e-03 L12_sharp:4.1363e-03 total_fnorm:3.7750e+01 total_l1_linf:6.6048e+04 total_spectral:1.9000e+01 L1_fnorm:1.2344e+00 L2_fnorm:1.1406e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1875e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.1875e+00 L7_fnorm:1.1875e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1562e+00 L1_l1linf:2.4023e-01 L2_l1linf:2.3633e-01 L3_l1linf:2.3438e-01 L4_l1linf:2.3828e-01 L5_l1linf:2.3926e-01 L6_l1linf:2.4414e-01 L7_l1linf:2.3828e-01 L8_l1linf:2.2754e-01 L9_l1linf:2.1973e-01 L10_l1linf:2.1484e-01 L11_l1linf:2.0508e-01 L12_l1linf:2.1289e-01 L1_spectral:1.6332e-02 L2_spectral:1.5816e-02 L3_spectral:1.6583e-02 L4_spectral:1.6639e-02 L5_spectral:1.6459e-02 L6_spectral:1.6563e-02 L7_spectral:1.6615e-02 L8_spectral:1.6613e-02 L9_spectral:1.6583e-02 L10_spectral:1.6820e-02 L11_spectral:1.6678e-02 L12_spectral:1.6849e-02 train_time:332878ms step_avg:41.61ms +[2025-09-11 09:09:09] [Rank 0] PRINT: step:8000/10000 val_loss:4.1608 total_sharp:8.5219e-05 L1_sharp:2.6919e-03 L2_sharp:1.4845e-04 L3_sharp:1.1780e-03 L4_sharp:2.0155e-04 L5_sharp:3.1647e-04 L6_sharp:3.7395e-04 L7_sharp:4.4294e-04 L8_sharp:6.6924e-04 L9_sharp:6.9805e-04 L10_sharp:7.0991e-04 L11_sharp:1.1131e-03 L12_sharp:4.1363e-03 total_fnorm:3.7750e+01 total_l1_linf:6.6048e+04 total_spectral:1.9000e+01 L1_fnorm:1.2344e+00 L2_fnorm:1.1406e+00 L3_fnorm:1.1797e+00 L4_fnorm:1.1875e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.1875e+00 L7_fnorm:1.1875e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1641e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1562e+00 L1_l1linf:2.4023e-01 L2_l1linf:2.3633e-01 L3_l1linf:2.3438e-01 L4_l1linf:2.3828e-01 L5_l1linf:2.3926e-01 L6_l1linf:2.4414e-01 L7_l1linf:2.3828e-01 L8_l1linf:2.2754e-01 L9_l1linf:2.1973e-01 L10_l1linf:2.1484e-01 L11_l1linf:2.0508e-01 L12_l1linf:2.1289e-01 L1_spectral:1.6332e-02 L2_spectral:1.5816e-02 L3_spectral:1.6583e-02 L4_spectral:1.6639e-02 L5_spectral:1.6459e-02 L6_spectral:1.6563e-02 L7_spectral:1.6615e-02 L8_spectral:1.6613e-02 L9_spectral:1.6583e-02 L10_spectral:1.6820e-02 L11_spectral:1.6678e-02 L12_spectral:1.6849e-02 train_time:332878ms step_avg:41.61ms +[2025-09-11 09:09:11] [Rank 0] step:8001/10000 train_time:334054ms step_avg:41.75ms +[2025-09-11 09:09:11] [Rank 0] step:8001/10000 train_time:334054ms step_avg:41.75ms +[2025-09-11 09:09:11] [Rank 0] step:8021/10000 train_time:334795ms step_avg:41.74ms +[2025-09-11 09:09:11] [Rank 0] step:8021/10000 train_time:334795ms step_avg:41.74ms +[2025-09-11 09:09:12] [Rank 0] step:8041/10000 train_time:335495ms step_avg:41.72ms +[2025-09-11 09:09:12] [Rank 0] step:8041/10000 train_time:335495ms step_avg:41.72ms +[2025-09-11 09:09:13] [Rank 0] step:8061/10000 train_time:336198ms step_avg:41.71ms +[2025-09-11 09:09:13] [Rank 0] step:8061/10000 train_time:336198ms step_avg:41.71ms +[2025-09-11 09:09:13] [Rank 0] step:8081/10000 train_time:336896ms step_avg:41.69ms +[2025-09-11 09:09:13] [Rank 0] step:8081/10000 train_time:336896ms step_avg:41.69ms +[2025-09-11 09:09:14] [Rank 0] step:8101/10000 train_time:337593ms step_avg:41.67ms +[2025-09-11 09:09:14] [Rank 0] step:8101/10000 train_time:337593ms step_avg:41.67ms +[2025-09-11 09:09:15] [Rank 0] step:8121/10000 train_time:338296ms step_avg:41.66ms +[2025-09-11 09:09:15] [Rank 0] step:8121/10000 train_time:338296ms step_avg:41.66ms +[2025-09-11 09:09:16] [Rank 0] step:8141/10000 train_time:339569ms step_avg:41.71ms +[2025-09-11 09:09:16] [Rank 0] step:8141/10000 train_time:339569ms step_avg:41.71ms +[2025-09-11 09:09:17] [Rank 0] step:8161/10000 train_time:340352ms step_avg:41.70ms +[2025-09-11 09:09:17] [Rank 0] step:8161/10000 train_time:340352ms step_avg:41.70ms +[2025-09-11 09:09:18] [Rank 0] step:8181/10000 train_time:341062ms step_avg:41.69ms +[2025-09-11 09:09:18] [Rank 0] step:8181/10000 train_time:341062ms step_avg:41.69ms +[2025-09-11 09:09:19] [Rank 0] step:8201/10000 train_time:342019ms step_avg:41.70ms +[2025-09-11 09:09:19] [Rank 0] step:8201/10000 train_time:342019ms step_avg:41.70ms +[2025-09-11 09:09:19] [Rank 0] step:8221/10000 train_time:342725ms step_avg:41.69ms +[2025-09-11 09:09:19] [Rank 0] step:8221/10000 train_time:342725ms step_avg:41.69ms +[2025-09-11 09:09:20] [Rank 0] step:8241/10000 train_time:343441ms step_avg:41.67ms +[2025-09-11 09:09:20] [Rank 0] step:8241/10000 train_time:343441ms step_avg:41.67ms +[2025-09-11 09:09:21] [Rank 0] step:8261/10000 train_time:344146ms step_avg:41.66ms +[2025-09-11 09:09:21] [Rank 0] step:8261/10000 train_time:344146ms step_avg:41.66ms +[2025-09-11 09:09:21] [Rank 0] step:8281/10000 train_time:344849ms step_avg:41.64ms +[2025-09-11 09:09:21] [Rank 0] step:8281/10000 train_time:344849ms step_avg:41.64ms +[2025-09-11 09:09:22] [Rank 0] step:8301/10000 train_time:345554ms step_avg:41.63ms +[2025-09-11 09:09:22] [Rank 0] step:8301/10000 train_time:345554ms step_avg:41.63ms +[2025-09-11 09:09:23] [Rank 0] step:8321/10000 train_time:346260ms step_avg:41.61ms +[2025-09-11 09:09:23] [Rank 0] step:8321/10000 train_time:346260ms step_avg:41.61ms +[2025-09-11 09:09:24] [Rank 0] step:8341/10000 train_time:346972ms step_avg:41.60ms +[2025-09-11 09:09:24] [Rank 0] step:8341/10000 train_time:346972ms step_avg:41.60ms +[2025-09-11 09:09:24] [Rank 0] step:8361/10000 train_time:347676ms step_avg:41.58ms +[2025-09-11 09:09:24] [Rank 0] step:8361/10000 train_time:347676ms step_avg:41.58ms +[2025-09-11 09:09:25] [Rank 0] step:8381/10000 train_time:348385ms step_avg:41.57ms +[2025-09-11 09:09:25] [Rank 0] step:8381/10000 train_time:348385ms step_avg:41.57ms +[2025-09-11 09:09:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:09:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:09:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:09:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:09:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:09:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:09:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:35] [Rank 0] PRINT: step:8400/10000 val_loss:4.1423 total_sharp:6.5687e-05 L1_sharp:2.5667e-03 L2_sharp:1.3704e-04 L3_sharp:1.7663e-04 L4_sharp:2.6219e-04 L5_sharp:3.2721e-04 L6_sharp:2.2166e-04 L7_sharp:2.2160e-04 L8_sharp:4.9224e-04 L9_sharp:5.7783e-04 L10_sharp:5.5970e-04 L11_sharp:9.0044e-04 L12_sharp:4.0722e-03 total_fnorm:2.9250e+01 total_l1_linf:4.6080e+04 total_spectral:1.4688e+01 L1_fnorm:9.8047e-01 L2_fnorm:8.9453e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2578e-01 L5_fnorm:9.1406e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2578e-01 L8_fnorm:8.9844e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.0625e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.0625e-01 L1_l1linf:1.8262e-01 L2_l1linf:1.7285e-01 L3_l1linf:1.7578e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7188e-01 L6_l1linf:1.7480e-01 L7_l1linf:1.7969e-01 L8_l1linf:1.6309e-01 L9_l1linf:1.5918e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.5527e-01 L12_l1linf:1.5820e-01 L1_spectral:1.3746e-02 L2_spectral:1.2674e-02 L3_spectral:1.3405e-02 L4_spectral:1.3312e-02 L5_spectral:1.3320e-02 L6_spectral:1.3312e-02 L7_spectral:1.3204e-02 L8_spectral:1.3370e-02 L9_spectral:1.3325e-02 L10_spectral:1.3362e-02 L11_spectral:1.3350e-02 L12_spectral:1.3468e-02 train_time:349072ms step_avg:41.56ms +[2025-09-11 09:09:35] [Rank 0] PRINT: step:8400/10000 val_loss:4.1423 total_sharp:6.5687e-05 L1_sharp:2.5667e-03 L2_sharp:1.3704e-04 L3_sharp:1.7663e-04 L4_sharp:2.6219e-04 L5_sharp:3.2721e-04 L6_sharp:2.2166e-04 L7_sharp:2.2160e-04 L8_sharp:4.9224e-04 L9_sharp:5.7783e-04 L10_sharp:5.5970e-04 L11_sharp:9.0044e-04 L12_sharp:4.0722e-03 total_fnorm:2.9250e+01 total_l1_linf:4.6080e+04 total_spectral:1.4688e+01 L1_fnorm:9.8047e-01 L2_fnorm:8.9453e-01 L3_fnorm:9.2578e-01 L4_fnorm:9.2578e-01 L5_fnorm:9.1406e-01 L6_fnorm:9.2578e-01 L7_fnorm:9.2578e-01 L8_fnorm:8.9844e-01 L9_fnorm:9.1406e-01 L10_fnorm:9.0625e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.0625e-01 L1_l1linf:1.8262e-01 L2_l1linf:1.7285e-01 L3_l1linf:1.7578e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7188e-01 L6_l1linf:1.7480e-01 L7_l1linf:1.7969e-01 L8_l1linf:1.6309e-01 L9_l1linf:1.5918e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.5527e-01 L12_l1linf:1.5820e-01 L1_spectral:1.3746e-02 L2_spectral:1.2674e-02 L3_spectral:1.3405e-02 L4_spectral:1.3312e-02 L5_spectral:1.3320e-02 L6_spectral:1.3312e-02 L7_spectral:1.3204e-02 L8_spectral:1.3370e-02 L9_spectral:1.3325e-02 L10_spectral:1.3362e-02 L11_spectral:1.3350e-02 L12_spectral:1.3468e-02 train_time:349072ms step_avg:41.56ms +[2025-09-11 09:09:37] [Rank 0] step:8401/10000 train_time:350273ms step_avg:41.69ms +[2025-09-11 09:09:37] [Rank 0] step:8401/10000 train_time:350273ms step_avg:41.69ms +[2025-09-11 09:09:37] [Rank 0] step:8421/10000 train_time:350983ms step_avg:41.68ms +[2025-09-11 09:09:37] [Rank 0] step:8421/10000 train_time:350983ms step_avg:41.68ms +[2025-09-11 09:09:38] [Rank 0] step:8441/10000 train_time:351693ms step_avg:41.66ms +[2025-09-11 09:09:38] [Rank 0] step:8441/10000 train_time:351693ms step_avg:41.66ms +[2025-09-11 09:09:39] [Rank 0] step:8461/10000 train_time:352401ms step_avg:41.65ms +[2025-09-11 09:09:39] [Rank 0] step:8461/10000 train_time:352401ms step_avg:41.65ms +[2025-09-11 09:09:40] [Rank 0] step:8481/10000 train_time:353110ms step_avg:41.64ms +[2025-09-11 09:09:40] [Rank 0] step:8481/10000 train_time:353110ms step_avg:41.64ms +[2025-09-11 09:09:40] [Rank 0] step:8501/10000 train_time:353817ms step_avg:41.62ms +[2025-09-11 09:09:40] [Rank 0] step:8501/10000 train_time:353817ms step_avg:41.62ms +[2025-09-11 09:09:41] [Rank 0] step:8521/10000 train_time:354523ms step_avg:41.61ms +[2025-09-11 09:09:41] [Rank 0] step:8521/10000 train_time:354523ms step_avg:41.61ms +[2025-09-11 09:09:42] [Rank 0] step:8541/10000 train_time:355228ms step_avg:41.59ms +[2025-09-11 09:09:42] [Rank 0] step:8541/10000 train_time:355228ms step_avg:41.59ms +[2025-09-11 09:09:42] [Rank 0] step:8561/10000 train_time:355940ms step_avg:41.58ms +[2025-09-11 09:09:42] [Rank 0] step:8561/10000 train_time:355940ms step_avg:41.58ms +[2025-09-11 09:09:43] [Rank 0] step:8581/10000 train_time:356649ms step_avg:41.56ms +[2025-09-11 09:09:43] [Rank 0] step:8581/10000 train_time:356649ms step_avg:41.56ms +[2025-09-11 09:09:44] [Rank 0] step:8601/10000 train_time:357357ms step_avg:41.55ms +[2025-09-11 09:09:44] [Rank 0] step:8601/10000 train_time:357357ms step_avg:41.55ms +[2025-09-11 09:09:45] [Rank 0] step:8621/10000 train_time:358065ms step_avg:41.53ms +[2025-09-11 09:09:45] [Rank 0] step:8621/10000 train_time:358065ms step_avg:41.53ms +[2025-09-11 09:09:45] [Rank 0] step:8641/10000 train_time:358770ms step_avg:41.52ms +[2025-09-11 09:09:45] [Rank 0] step:8641/10000 train_time:358770ms step_avg:41.52ms +[2025-09-11 09:09:46] [Rank 0] step:8661/10000 train_time:359478ms step_avg:41.51ms +[2025-09-11 09:09:46] [Rank 0] step:8661/10000 train_time:359478ms step_avg:41.51ms +[2025-09-11 09:09:47] [Rank 0] step:8681/10000 train_time:360186ms step_avg:41.49ms +[2025-09-11 09:09:47] [Rank 0] step:8681/10000 train_time:360186ms step_avg:41.49ms +[2025-09-11 09:09:47] [Rank 0] step:8701/10000 train_time:360892ms step_avg:41.48ms +[2025-09-11 09:09:47] [Rank 0] step:8701/10000 train_time:360892ms step_avg:41.48ms +[2025-09-11 09:09:48] [Rank 0] step:8721/10000 train_time:361602ms step_avg:41.46ms +[2025-09-11 09:09:48] [Rank 0] step:8721/10000 train_time:361602ms step_avg:41.46ms +[2025-09-11 09:09:49] [Rank 0] step:8741/10000 train_time:362305ms step_avg:41.45ms +[2025-09-11 09:09:49] [Rank 0] step:8741/10000 train_time:362305ms step_avg:41.45ms +[2025-09-11 09:09:49] [Rank 0] step:8761/10000 train_time:363015ms step_avg:41.44ms +[2025-09-11 09:09:49] [Rank 0] step:8761/10000 train_time:363015ms step_avg:41.44ms +[2025-09-11 09:09:50] [Rank 0] step:8781/10000 train_time:363718ms step_avg:41.42ms +[2025-09-11 09:09:50] [Rank 0] step:8781/10000 train_time:363718ms step_avg:41.42ms +[2025-09-11 09:09:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:09:51] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:09:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:09:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:09:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:09:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:09:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:10:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:01] [Rank 0] PRINT: step:8800/10000 val_loss:4.1287 total_sharp:6.4943e-05 L1_sharp:2.1640e-03 L2_sharp:2.2992e-04 L3_sharp:1.3321e-04 L4_sharp:8.4757e-05 L5_sharp:1.8355e-04 L6_sharp:2.0756e-04 L7_sharp:2.1473e-04 L8_sharp:4.6504e-04 L9_sharp:4.8427e-04 L10_sharp:5.5663e-04 L11_sharp:8.5428e-04 L12_sharp:4.3934e-03 total_fnorm:2.1750e+01 total_l1_linf:3.0720e+04 total_spectral:1.0938e+01 L1_fnorm:7.3438e-01 L2_fnorm:6.6016e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.7969e-01 L6_fnorm:6.8750e-01 L7_fnorm:6.8359e-01 L8_fnorm:6.6406e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.7188e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.6797e-01 L1_l1linf:1.1572e-01 L2_l1linf:1.1963e-01 L3_l1linf:1.1865e-01 L4_l1linf:1.1621e-01 L5_l1linf:1.1963e-01 L6_l1linf:1.1768e-01 L7_l1linf:1.1914e-01 L8_l1linf:1.1328e-01 L9_l1linf:1.0938e-01 L10_l1linf:1.0547e-01 L11_l1linf:1.0303e-01 L12_l1linf:1.0547e-01 L1_spectral:1.0794e-02 L2_spectral:9.5404e-03 L3_spectral:1.0080e-02 L4_spectral:1.0070e-02 L5_spectral:9.8588e-03 L6_spectral:9.9396e-03 L7_spectral:9.9362e-03 L8_spectral:1.0100e-02 L9_spectral:1.0062e-02 L10_spectral:9.9676e-03 L11_spectral:1.0068e-02 L12_spectral:1.0118e-02 train_time:364404ms step_avg:41.41ms +[2025-09-11 09:10:01] [Rank 0] PRINT: step:8800/10000 val_loss:4.1287 total_sharp:6.4943e-05 L1_sharp:2.1640e-03 L2_sharp:2.2992e-04 L3_sharp:1.3321e-04 L4_sharp:8.4757e-05 L5_sharp:1.8355e-04 L6_sharp:2.0756e-04 L7_sharp:2.1473e-04 L8_sharp:4.6504e-04 L9_sharp:4.8427e-04 L10_sharp:5.5663e-04 L11_sharp:8.5428e-04 L12_sharp:4.3934e-03 total_fnorm:2.1750e+01 total_l1_linf:3.0720e+04 total_spectral:1.0938e+01 L1_fnorm:7.3438e-01 L2_fnorm:6.6016e-01 L3_fnorm:6.7969e-01 L4_fnorm:6.8359e-01 L5_fnorm:6.7969e-01 L6_fnorm:6.8750e-01 L7_fnorm:6.8359e-01 L8_fnorm:6.6406e-01 L9_fnorm:6.7188e-01 L10_fnorm:6.7188e-01 L11_fnorm:6.6797e-01 L12_fnorm:6.6797e-01 L1_l1linf:1.1572e-01 L2_l1linf:1.1963e-01 L3_l1linf:1.1865e-01 L4_l1linf:1.1621e-01 L5_l1linf:1.1963e-01 L6_l1linf:1.1768e-01 L7_l1linf:1.1914e-01 L8_l1linf:1.1328e-01 L9_l1linf:1.0938e-01 L10_l1linf:1.0547e-01 L11_l1linf:1.0303e-01 L12_l1linf:1.0547e-01 L1_spectral:1.0794e-02 L2_spectral:9.5404e-03 L3_spectral:1.0080e-02 L4_spectral:1.0070e-02 L5_spectral:9.8588e-03 L6_spectral:9.9396e-03 L7_spectral:9.9362e-03 L8_spectral:1.0100e-02 L9_spectral:1.0062e-02 L10_spectral:9.9676e-03 L11_spectral:1.0068e-02 L12_spectral:1.0118e-02 train_time:364404ms step_avg:41.41ms +[2025-09-11 09:10:02] [Rank 0] step:8801/10000 train_time:365589ms step_avg:41.54ms +[2025-09-11 09:10:02] [Rank 0] step:8801/10000 train_time:365589ms step_avg:41.54ms +[2025-09-11 09:10:03] [Rank 0] step:8821/10000 train_time:366323ms step_avg:41.53ms +[2025-09-11 09:10:03] [Rank 0] step:8821/10000 train_time:366323ms step_avg:41.53ms +[2025-09-11 09:10:03] [Rank 0] step:8841/10000 train_time:367032ms step_avg:41.51ms +[2025-09-11 09:10:03] [Rank 0] step:8841/10000 train_time:367032ms step_avg:41.51ms +[2025-09-11 09:10:04] [Rank 0] step:8861/10000 train_time:367743ms step_avg:41.50ms +[2025-09-11 09:10:04] [Rank 0] step:8861/10000 train_time:367743ms step_avg:41.50ms +[2025-09-11 09:10:05] [Rank 0] step:8881/10000 train_time:368451ms step_avg:41.49ms +[2025-09-11 09:10:05] [Rank 0] step:8881/10000 train_time:368451ms step_avg:41.49ms +[2025-09-11 09:10:06] [Rank 0] step:8901/10000 train_time:369161ms step_avg:41.47ms +[2025-09-11 09:10:06] [Rank 0] step:8901/10000 train_time:369161ms step_avg:41.47ms +[2025-09-11 09:10:06] [Rank 0] step:8921/10000 train_time:369866ms step_avg:41.46ms +[2025-09-11 09:10:06] [Rank 0] step:8921/10000 train_time:369866ms step_avg:41.46ms +[2025-09-11 09:10:07] [Rank 0] step:8941/10000 train_time:370575ms step_avg:41.45ms +[2025-09-11 09:10:07] [Rank 0] step:8941/10000 train_time:370575ms step_avg:41.45ms +[2025-09-11 09:10:08] [Rank 0] step:8961/10000 train_time:371292ms step_avg:41.43ms +[2025-09-11 09:10:08] [Rank 0] step:8961/10000 train_time:371292ms step_avg:41.43ms +[2025-09-11 09:10:08] [Rank 0] step:8981/10000 train_time:372004ms step_avg:41.42ms +[2025-09-11 09:10:08] [Rank 0] step:8981/10000 train_time:372004ms step_avg:41.42ms +[2025-09-11 09:10:09] [Rank 0] step:9001/10000 train_time:372705ms step_avg:41.41ms +[2025-09-11 09:10:09] [Rank 0] step:9001/10000 train_time:372705ms step_avg:41.41ms +[2025-09-11 09:10:10] [Rank 0] step:9021/10000 train_time:373414ms step_avg:41.39ms +[2025-09-11 09:10:10] [Rank 0] step:9021/10000 train_time:373414ms step_avg:41.39ms +[2025-09-11 09:10:11] [Rank 0] step:9041/10000 train_time:374126ms step_avg:41.38ms +[2025-09-11 09:10:11] [Rank 0] step:9041/10000 train_time:374126ms step_avg:41.38ms +[2025-09-11 09:10:11] [Rank 0] step:9061/10000 train_time:374833ms step_avg:41.37ms +[2025-09-11 09:10:11] [Rank 0] step:9061/10000 train_time:374833ms step_avg:41.37ms +[2025-09-11 09:10:12] [Rank 0] step:9081/10000 train_time:375543ms step_avg:41.35ms +[2025-09-11 09:10:12] [Rank 0] step:9081/10000 train_time:375543ms step_avg:41.35ms +[2025-09-11 09:10:13] [Rank 0] step:9101/10000 train_time:376255ms step_avg:41.34ms +[2025-09-11 09:10:13] [Rank 0] step:9101/10000 train_time:376255ms step_avg:41.34ms +[2025-09-11 09:10:13] [Rank 0] step:9121/10000 train_time:376968ms step_avg:41.33ms +[2025-09-11 09:10:13] [Rank 0] step:9121/10000 train_time:376968ms step_avg:41.33ms +[2025-09-11 09:10:14] [Rank 0] step:9141/10000 train_time:377674ms step_avg:41.32ms +[2025-09-11 09:10:14] [Rank 0] step:9141/10000 train_time:377674ms step_avg:41.32ms +[2025-09-11 09:10:15] [Rank 0] step:9161/10000 train_time:378384ms step_avg:41.30ms +[2025-09-11 09:10:15] [Rank 0] step:9161/10000 train_time:378384ms step_avg:41.30ms +[2025-09-11 09:10:15] [Rank 0] step:9181/10000 train_time:379094ms step_avg:41.29ms +[2025-09-11 09:10:15] [Rank 0] step:9181/10000 train_time:379094ms step_avg:41.29ms +[2025-09-11 09:10:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:10:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:10:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:10:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:10:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:10:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:10:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.1063 total_sharp:5.5877e-05 L1_sharp:2.1119e-03 L2_sharp:3.9841e-04 L3_sharp:9.2603e-05 L4_sharp:1.9378e-04 L5_sharp:2.4218e-04 L6_sharp:2.2515e-04 L7_sharp:2.5111e-04 L8_sharp:4.8828e-04 L9_sharp:5.4471e-04 L10_sharp:5.0413e-04 L11_sharp:7.9985e-04 L12_sharp:3.5619e-03 total_fnorm:1.5188e+01 total_l1_linf:1.8816e+04 total_spectral:7.6250e+00 L1_fnorm:4.9805e-01 L2_fnorm:4.3359e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4727e-01 L6_fnorm:4.4922e-01 L7_fnorm:4.5312e-01 L8_fnorm:4.3945e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.4141e-01 L11_fnorm:4.3945e-01 L12_fnorm:4.4141e-01 L1_l1linf:7.1777e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.0312e-02 L6_l1linf:6.8848e-02 L7_l1linf:7.1289e-02 L8_l1linf:6.8848e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.0547e-02 L11_l1linf:5.9814e-02 L12_l1linf:6.3965e-02 L1_spectral:7.9139e-03 L2_spectral:6.3167e-03 L3_spectral:6.7758e-03 L4_spectral:6.7796e-03 L5_spectral:6.5889e-03 L6_spectral:6.7674e-03 L7_spectral:6.6590e-03 L8_spectral:7.0164e-03 L9_spectral:6.8467e-03 L10_spectral:6.7245e-03 L11_spectral:6.7642e-03 L12_spectral:6.8365e-03 train_time:379785ms step_avg:41.28ms +[2025-09-11 09:10:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.1063 total_sharp:5.5877e-05 L1_sharp:2.1119e-03 L2_sharp:3.9841e-04 L3_sharp:9.2603e-05 L4_sharp:1.9378e-04 L5_sharp:2.4218e-04 L6_sharp:2.2515e-04 L7_sharp:2.5111e-04 L8_sharp:4.8828e-04 L9_sharp:5.4471e-04 L10_sharp:5.0413e-04 L11_sharp:7.9985e-04 L12_sharp:3.5619e-03 total_fnorm:1.5188e+01 total_l1_linf:1.8816e+04 total_spectral:7.6250e+00 L1_fnorm:4.9805e-01 L2_fnorm:4.3359e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.5117e-01 L5_fnorm:4.4727e-01 L6_fnorm:4.4922e-01 L7_fnorm:4.5312e-01 L8_fnorm:4.3945e-01 L9_fnorm:4.4141e-01 L10_fnorm:4.4141e-01 L11_fnorm:4.3945e-01 L12_fnorm:4.4141e-01 L1_l1linf:7.1777e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.9824e-02 L4_l1linf:7.2754e-02 L5_l1linf:7.0312e-02 L6_l1linf:6.8848e-02 L7_l1linf:7.1289e-02 L8_l1linf:6.8848e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.0547e-02 L11_l1linf:5.9814e-02 L12_l1linf:6.3965e-02 L1_spectral:7.9139e-03 L2_spectral:6.3167e-03 L3_spectral:6.7758e-03 L4_spectral:6.7796e-03 L5_spectral:6.5889e-03 L6_spectral:6.7674e-03 L7_spectral:6.6590e-03 L8_spectral:7.0164e-03 L9_spectral:6.8467e-03 L10_spectral:6.7245e-03 L11_spectral:6.7642e-03 L12_spectral:6.8365e-03 train_time:379785ms step_avg:41.28ms +[2025-09-11 09:10:28] [Rank 0] step:9201/10000 train_time:380989ms step_avg:41.41ms +[2025-09-11 09:10:28] [Rank 0] step:9201/10000 train_time:380989ms step_avg:41.41ms +[2025-09-11 09:10:28] [Rank 0] step:9221/10000 train_time:381731ms step_avg:41.40ms +[2025-09-11 09:10:28] [Rank 0] step:9221/10000 train_time:381731ms step_avg:41.40ms +[2025-09-11 09:10:29] [Rank 0] step:9241/10000 train_time:382439ms step_avg:41.39ms +[2025-09-11 09:10:29] [Rank 0] step:9241/10000 train_time:382439ms step_avg:41.39ms +[2025-09-11 09:10:30] [Rank 0] step:9261/10000 train_time:383149ms step_avg:41.37ms +[2025-09-11 09:10:30] [Rank 0] step:9261/10000 train_time:383149ms step_avg:41.37ms +[2025-09-11 09:10:30] [Rank 0] step:9281/10000 train_time:383861ms step_avg:41.36ms +[2025-09-11 09:10:30] [Rank 0] step:9281/10000 train_time:383861ms step_avg:41.36ms +[2025-09-11 09:10:31] [Rank 0] step:9301/10000 train_time:384568ms step_avg:41.35ms +[2025-09-11 09:10:31] [Rank 0] step:9301/10000 train_time:384568ms step_avg:41.35ms +[2025-09-11 09:10:32] [Rank 0] step:9321/10000 train_time:385279ms step_avg:41.33ms +[2025-09-11 09:10:32] [Rank 0] step:9321/10000 train_time:385279ms step_avg:41.33ms +[2025-09-11 09:10:33] [Rank 0] step:9341/10000 train_time:385983ms step_avg:41.32ms +[2025-09-11 09:10:33] [Rank 0] step:9341/10000 train_time:385983ms step_avg:41.32ms +[2025-09-11 09:10:33] [Rank 0] step:9361/10000 train_time:386689ms step_avg:41.31ms +[2025-09-11 09:10:33] [Rank 0] step:9361/10000 train_time:386689ms step_avg:41.31ms +[2025-09-11 09:10:34] [Rank 0] step:9381/10000 train_time:387396ms step_avg:41.30ms +[2025-09-11 09:10:34] [Rank 0] step:9381/10000 train_time:387396ms step_avg:41.30ms +[2025-09-11 09:10:35] [Rank 0] step:9401/10000 train_time:388107ms step_avg:41.28ms +[2025-09-11 09:10:35] [Rank 0] step:9401/10000 train_time:388107ms step_avg:41.28ms +[2025-09-11 09:10:35] [Rank 0] step:9421/10000 train_time:388817ms step_avg:41.27ms +[2025-09-11 09:10:35] [Rank 0] step:9421/10000 train_time:388817ms step_avg:41.27ms +[2025-09-11 09:10:36] [Rank 0] step:9441/10000 train_time:389530ms step_avg:41.26ms +[2025-09-11 09:10:36] [Rank 0] step:9441/10000 train_time:389530ms step_avg:41.26ms +[2025-09-11 09:10:37] [Rank 0] step:9461/10000 train_time:390238ms step_avg:41.25ms +[2025-09-11 09:10:37] [Rank 0] step:9461/10000 train_time:390238ms step_avg:41.25ms +[2025-09-11 09:10:37] [Rank 0] step:9481/10000 train_time:390949ms step_avg:41.23ms +[2025-09-11 09:10:37] [Rank 0] step:9481/10000 train_time:390949ms step_avg:41.23ms +[2025-09-11 09:10:38] [Rank 0] step:9501/10000 train_time:391659ms step_avg:41.22ms +[2025-09-11 09:10:38] [Rank 0] step:9501/10000 train_time:391659ms step_avg:41.22ms +[2025-09-11 09:10:39] [Rank 0] step:9521/10000 train_time:392370ms step_avg:41.21ms +[2025-09-11 09:10:39] [Rank 0] step:9521/10000 train_time:392370ms step_avg:41.21ms +[2025-09-11 09:10:40] [Rank 0] step:9541/10000 train_time:393076ms step_avg:41.20ms +[2025-09-11 09:10:40] [Rank 0] step:9541/10000 train_time:393076ms step_avg:41.20ms +[2025-09-11 09:10:40] [Rank 0] step:9561/10000 train_time:393784ms step_avg:41.19ms +[2025-09-11 09:10:40] [Rank 0] step:9561/10000 train_time:393784ms step_avg:41.19ms +[2025-09-11 09:10:41] [Rank 0] step:9581/10000 train_time:394495ms step_avg:41.17ms +[2025-09-11 09:10:41] [Rank 0] step:9581/10000 train_time:394495ms step_avg:41.17ms +[2025-09-11 09:10:42] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:10:42] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:10:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:10:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:10:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:10:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:10:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.0954 total_sharp:3.3306e-05 L1_sharp:2.0920e-03 L2_sharp:1.5408e-04 L3_sharp:1.0498e-04 L4_sharp:4.9786e-05 L5_sharp:1.5284e-04 L6_sharp:7.8557e-05 L7_sharp:1.6839e-04 L8_sharp:3.5989e-04 L9_sharp:3.5863e-04 L10_sharp:3.6469e-04 L11_sharp:5.1578e-04 L12_sharp:2.1596e-03 total_fnorm:8.7500e+00 total_l1_linf:9.1520e+03 total_spectral:4.4062e+00 L1_fnorm:2.8906e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.5586e-01 L4_fnorm:2.5391e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5781e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:3.4912e-02 L2_l1linf:3.2715e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.3203e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0151e-02 L11_l1linf:2.8442e-02 L12_l1linf:3.0396e-02 L1_spectral:4.8089e-03 L2_spectral:3.7002e-03 L3_spectral:3.9519e-03 L4_spectral:3.9748e-03 L5_spectral:3.8884e-03 L6_spectral:3.8886e-03 L7_spectral:3.8646e-03 L8_spectral:4.1719e-03 L9_spectral:3.9889e-03 L10_spectral:3.9230e-03 L11_spectral:3.9678e-03 L12_spectral:4.0045e-03 train_time:395183ms step_avg:41.16ms +[2025-09-11 09:10:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.0954 total_sharp:3.3306e-05 L1_sharp:2.0920e-03 L2_sharp:1.5408e-04 L3_sharp:1.0498e-04 L4_sharp:4.9786e-05 L5_sharp:1.5284e-04 L6_sharp:7.8557e-05 L7_sharp:1.6839e-04 L8_sharp:3.5989e-04 L9_sharp:3.5863e-04 L10_sharp:3.6469e-04 L11_sharp:5.1578e-04 L12_sharp:2.1596e-03 total_fnorm:8.7500e+00 total_l1_linf:9.1520e+03 total_spectral:4.4062e+00 L1_fnorm:2.8906e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.5586e-01 L4_fnorm:2.5391e-01 L5_fnorm:2.5391e-01 L6_fnorm:2.5586e-01 L7_fnorm:2.5781e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:3.4912e-02 L2_l1linf:3.2715e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.2959e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.4912e-02 L8_l1linf:3.3203e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0151e-02 L11_l1linf:2.8442e-02 L12_l1linf:3.0396e-02 L1_spectral:4.8089e-03 L2_spectral:3.7002e-03 L3_spectral:3.9519e-03 L4_spectral:3.9748e-03 L5_spectral:3.8884e-03 L6_spectral:3.8886e-03 L7_spectral:3.8646e-03 L8_spectral:4.1719e-03 L9_spectral:3.9889e-03 L10_spectral:3.9230e-03 L11_spectral:3.9678e-03 L12_spectral:4.0045e-03 train_time:395183ms step_avg:41.16ms +[2025-09-11 09:10:53] [Rank 0] step:9601/10000 train_time:396386ms step_avg:41.29ms +[2025-09-11 09:10:53] [Rank 0] step:9601/10000 train_time:396386ms step_avg:41.29ms +[2025-09-11 09:10:53] [Rank 0] step:9621/10000 train_time:397121ms step_avg:41.28ms +[2025-09-11 09:10:53] [Rank 0] step:9621/10000 train_time:397121ms step_avg:41.28ms +[2025-09-11 09:10:54] [Rank 0] step:9641/10000 train_time:397836ms step_avg:41.26ms +[2025-09-11 09:10:54] [Rank 0] step:9641/10000 train_time:397836ms step_avg:41.26ms +[2025-09-11 09:10:55] [Rank 0] step:9661/10000 train_time:398559ms step_avg:41.25ms +[2025-09-11 09:10:55] [Rank 0] step:9661/10000 train_time:398559ms step_avg:41.25ms +[2025-09-11 09:10:56] [Rank 0] step:9681/10000 train_time:399273ms step_avg:41.24ms +[2025-09-11 09:10:56] [Rank 0] step:9681/10000 train_time:399273ms step_avg:41.24ms +[2025-09-11 09:10:56] [Rank 0] step:9701/10000 train_time:399989ms step_avg:41.23ms +[2025-09-11 09:10:56] [Rank 0] step:9701/10000 train_time:399989ms step_avg:41.23ms +[2025-09-11 09:10:57] [Rank 0] step:9721/10000 train_time:400709ms step_avg:41.22ms +[2025-09-11 09:10:57] [Rank 0] step:9721/10000 train_time:400709ms step_avg:41.22ms +[2025-09-11 09:10:58] [Rank 0] step:9741/10000 train_time:401427ms step_avg:41.21ms +[2025-09-11 09:10:58] [Rank 0] step:9741/10000 train_time:401427ms step_avg:41.21ms +[2025-09-11 09:10:58] [Rank 0] step:9761/10000 train_time:402143ms step_avg:41.20ms +[2025-09-11 09:10:58] [Rank 0] step:9761/10000 train_time:402143ms step_avg:41.20ms +[2025-09-11 09:10:59] [Rank 0] step:9781/10000 train_time:402859ms step_avg:41.19ms +[2025-09-11 09:10:59] [Rank 0] step:9781/10000 train_time:402859ms step_avg:41.19ms +[2025-09-11 09:11:00] [Rank 0] step:9801/10000 train_time:403579ms step_avg:41.18ms +[2025-09-11 09:11:00] [Rank 0] step:9801/10000 train_time:403579ms step_avg:41.18ms +[2025-09-11 09:11:01] [Rank 0] step:9821/10000 train_time:404297ms step_avg:41.17ms +[2025-09-11 09:11:01] [Rank 0] step:9821/10000 train_time:404297ms step_avg:41.17ms +[2025-09-11 09:11:01] [Rank 0] step:9841/10000 train_time:405017ms step_avg:41.16ms +[2025-09-11 09:11:01] [Rank 0] step:9841/10000 train_time:405017ms step_avg:41.16ms +[2025-09-11 09:11:02] [Rank 0] step:9861/10000 train_time:405735ms step_avg:41.15ms +[2025-09-11 09:11:02] [Rank 0] step:9861/10000 train_time:405735ms step_avg:41.15ms +[2025-09-11 09:11:03] [Rank 0] step:9881/10000 train_time:406452ms step_avg:41.13ms +[2025-09-11 09:11:03] [Rank 0] step:9881/10000 train_time:406452ms step_avg:41.13ms +[2025-09-11 09:11:03] [Rank 0] step:9901/10000 train_time:407166ms step_avg:41.12ms +[2025-09-11 09:11:03] [Rank 0] step:9901/10000 train_time:407166ms step_avg:41.12ms +[2025-09-11 09:11:04] [Rank 0] step:9921/10000 train_time:407882ms step_avg:41.11ms +[2025-09-11 09:11:04] [Rank 0] step:9921/10000 train_time:407882ms step_avg:41.11ms +[2025-09-11 09:11:05] [Rank 0] step:9941/10000 train_time:408603ms step_avg:41.10ms +[2025-09-11 09:11:05] [Rank 0] step:9941/10000 train_time:408603ms step_avg:41.10ms +[2025-09-11 09:11:06] [Rank 0] step:9961/10000 train_time:409324ms step_avg:41.09ms +[2025-09-11 09:11:06] [Rank 0] step:9961/10000 train_time:409324ms step_avg:41.09ms +[2025-09-11 09:11:06] [Rank 0] step:9981/10000 train_time:410041ms step_avg:41.08ms +[2025-09-11 09:11:06] [Rank 0] step:9981/10000 train_time:410041ms step_avg:41.08ms +[2025-09-11 09:11:07] [Rank 0] step:10000/10000 train_time:410730ms step_avg:41.07ms +[2025-09-11 09:11:07] [Rank 0] step:10000/10000 train_time:410730ms step_avg:41.07ms +[2025-09-11 09:11:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:11:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:11:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:11:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:11:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:11:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:11:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:11:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.0923 total_sharp:2.2442e-05 L1_sharp:1.4314e-03 L2_sharp:1.1427e-04 L3_sharp:9.6943e-05 L4_sharp:1.0730e-04 L5_sharp:6.9193e-05 L6_sharp:1.1870e-04 L7_sharp:1.2364e-04 L8_sharp:2.7642e-04 L9_sharp:2.8426e-04 L10_sharp:2.8060e-04 L11_sharp:3.8417e-04 L12_sharp:1.8372e-03 total_fnorm:3.3281e+00 total_l1_linf:2.5120e+03 total_spectral:1.6797e+00 L1_fnorm:1.1182e-01 L2_fnorm:9.4238e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.8145e-02 L5_fnorm:9.8145e-02 L6_fnorm:9.9121e-02 L7_fnorm:9.9609e-02 L8_fnorm:9.6191e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.6680e-02 L11_fnorm:9.6191e-02 L12_fnorm:9.7168e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.0132e-02 L3_l1linf:1.0254e-02 L4_l1linf:1.0132e-02 L5_l1linf:1.0010e-02 L6_l1linf:1.0071e-02 L7_l1linf:1.1047e-02 L8_l1linf:1.0437e-02 L9_l1linf:9.0942e-03 L10_l1linf:8.9111e-03 L11_l1linf:8.7280e-03 L12_l1linf:9.6436e-03 L1_spectral:1.9339e-03 L2_spectral:1.4292e-03 L3_spectral:1.5413e-03 L4_spectral:1.5689e-03 L5_spectral:1.5323e-03 L6_spectral:1.5241e-03 L7_spectral:1.5282e-03 L8_spectral:1.6551e-03 L9_spectral:1.5972e-03 L10_spectral:1.5728e-03 L11_spectral:1.5808e-03 L12_spectral:1.5896e-03 train_time:410751ms step_avg:41.08ms +[2025-09-11 09:11:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.0923 total_sharp:2.2442e-05 L1_sharp:1.4314e-03 L2_sharp:1.1427e-04 L3_sharp:9.6943e-05 L4_sharp:1.0730e-04 L5_sharp:6.9193e-05 L6_sharp:1.1870e-04 L7_sharp:1.2364e-04 L8_sharp:2.7642e-04 L9_sharp:2.8426e-04 L10_sharp:2.8060e-04 L11_sharp:3.8417e-04 L12_sharp:1.8372e-03 total_fnorm:3.3281e+00 total_l1_linf:2.5120e+03 total_spectral:1.6797e+00 L1_fnorm:1.1182e-01 L2_fnorm:9.4238e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.8145e-02 L5_fnorm:9.8145e-02 L6_fnorm:9.9121e-02 L7_fnorm:9.9609e-02 L8_fnorm:9.6191e-02 L9_fnorm:9.7168e-02 L10_fnorm:9.6680e-02 L11_fnorm:9.6191e-02 L12_fnorm:9.7168e-02 L1_l1linf:1.1353e-02 L2_l1linf:1.0132e-02 L3_l1linf:1.0254e-02 L4_l1linf:1.0132e-02 L5_l1linf:1.0010e-02 L6_l1linf:1.0071e-02 L7_l1linf:1.1047e-02 L8_l1linf:1.0437e-02 L9_l1linf:9.0942e-03 L10_l1linf:8.9111e-03 L11_l1linf:8.7280e-03 L12_l1linf:9.6436e-03 L1_spectral:1.9339e-03 L2_spectral:1.4292e-03 L3_spectral:1.5413e-03 L4_spectral:1.5689e-03 L5_spectral:1.5323e-03 L6_spectral:1.5241e-03 L7_spectral:1.5282e-03 L8_spectral:1.6551e-03 L9_spectral:1.5972e-03 L10_spectral:1.5728e-03 L11_spectral:1.5808e-03 L12_spectral:1.5896e-03 train_time:410751ms step_avg:41.08ms +[2025-09-11 09:11:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:11:17 2025 --- +[2025-09-11 09:11:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:11:17 2025 --- +[2025-09-11 09:11:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:11:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..abd9d10b53a06eda52c3cb28bfbc0ec9c0b8567b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "13626da7-1abe-4a73-99bd-417003109bf5", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/training_log_13626da7-1abe-4a73-99bd-417003109bf5.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/training_log_13626da7-1abe-4a73-99bd-417003109bf5.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab80fbceeba8f53d59385e643039f28899717206 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44/training_log_13626da7-1abe-4a73-99bd-417003109bf5.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:44:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:44:45 2025 --- +[2025-09-11 08:44:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:44:45 2025 --- +[2025-09-11 08:44:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:44:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:44:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:44:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:44:45] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:44:45] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:44:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44 +[2025-09-11 08:44:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.05_seed_44 +[2025-09-11 08:44:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:44:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:44:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:44:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:44:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:44:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:44:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:44:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:44:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:44:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:44:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:44:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:44:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:44:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:44:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:44:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:44:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:44:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:44:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:44:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:44:53] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:44:53] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:44:53] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:44:53] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:45:30] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:45:30] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:45:30] [Rank 0] PRINT: Starting training... +[2025-09-11 08:45:30] [Rank 0] PRINT: Starting training... +[2025-09-11 08:45:31] [Rank 0] step:21/10000 train_time:1141ms step_avg:54.32ms +[2025-09-11 08:45:31] [Rank 0] step:21/10000 train_time:1141ms step_avg:54.32ms +[2025-09-11 08:45:32] [Rank 0] step:41/10000 train_time:1873ms step_avg:45.68ms +[2025-09-11 08:45:32] [Rank 0] step:41/10000 train_time:1873ms step_avg:45.68ms +[2025-09-11 08:45:33] [Rank 0] step:61/10000 train_time:2605ms step_avg:42.70ms +[2025-09-11 08:45:33] [Rank 0] step:61/10000 train_time:2605ms step_avg:42.70ms +[2025-09-11 08:45:34] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 08:45:34] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 08:45:34] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 08:45:34] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 08:45:35] [Rank 0] step:121/10000 train_time:4798ms step_avg:39.66ms +[2025-09-11 08:45:35] [Rank 0] step:121/10000 train_time:4798ms step_avg:39.66ms +[2025-09-11 08:45:36] [Rank 0] step:141/10000 train_time:5529ms step_avg:39.21ms +[2025-09-11 08:45:36] [Rank 0] step:141/10000 train_time:5529ms step_avg:39.21ms +[2025-09-11 08:45:37] [Rank 0] step:161/10000 train_time:6260ms step_avg:38.88ms +[2025-09-11 08:45:37] [Rank 0] step:161/10000 train_time:6260ms step_avg:38.88ms +[2025-09-11 08:45:37] [Rank 0] step:181/10000 train_time:6991ms step_avg:38.62ms +[2025-09-11 08:45:37] [Rank 0] step:181/10000 train_time:6991ms step_avg:38.62ms +[2025-09-11 08:45:38] [Rank 0] step:201/10000 train_time:7721ms step_avg:38.41ms +[2025-09-11 08:45:38] [Rank 0] step:201/10000 train_time:7721ms step_avg:38.41ms +[2025-09-11 08:45:39] [Rank 0] step:221/10000 train_time:8452ms step_avg:38.25ms +[2025-09-11 08:45:39] [Rank 0] step:221/10000 train_time:8452ms step_avg:38.25ms +[2025-09-11 08:45:39] [Rank 0] step:241/10000 train_time:9183ms step_avg:38.10ms +[2025-09-11 08:45:39] [Rank 0] step:241/10000 train_time:9183ms step_avg:38.10ms +[2025-09-11 08:45:40] [Rank 0] step:261/10000 train_time:9914ms step_avg:37.98ms +[2025-09-11 08:45:40] [Rank 0] step:261/10000 train_time:9914ms step_avg:37.98ms +[2025-09-11 08:45:41] [Rank 0] step:281/10000 train_time:10644ms step_avg:37.88ms +[2025-09-11 08:45:41] [Rank 0] step:281/10000 train_time:10644ms step_avg:37.88ms +[2025-09-11 08:45:42] [Rank 0] step:301/10000 train_time:11374ms step_avg:37.79ms +[2025-09-11 08:45:42] [Rank 0] step:301/10000 train_time:11374ms step_avg:37.79ms +[2025-09-11 08:45:42] [Rank 0] step:321/10000 train_time:12104ms step_avg:37.71ms +[2025-09-11 08:45:42] [Rank 0] step:321/10000 train_time:12104ms step_avg:37.71ms +[2025-09-11 08:45:43] [Rank 0] step:341/10000 train_time:12834ms step_avg:37.64ms +[2025-09-11 08:45:43] [Rank 0] step:341/10000 train_time:12834ms step_avg:37.64ms +[2025-09-11 08:45:44] [Rank 0] step:361/10000 train_time:13564ms step_avg:37.57ms +[2025-09-11 08:45:44] [Rank 0] step:361/10000 train_time:13564ms step_avg:37.57ms +[2025-09-11 08:45:45] [Rank 0] step:381/10000 train_time:14294ms step_avg:37.52ms +[2025-09-11 08:45:45] [Rank 0] step:381/10000 train_time:14294ms step_avg:37.52ms +[2025-09-11 08:45:45] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:45:45] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:45:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:46:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:46:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:46:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:46:32] [Rank 0] PRINT: step:400/10000 val_loss:6.0111 total_sharp:1.0524e-03 L1_sharp:9.8751e-03 L2_sharp:1.8247e-03 L3_sharp:7.3917e-04 L4_sharp:8.4436e-04 L5_sharp:6.3961e-04 L6_sharp:5.7670e-04 L7_sharp:7.9532e-04 L8_sharp:8.0289e-04 L9_sharp:7.8507e-04 L10_sharp:1.0272e-03 L11_sharp:1.6397e-03 L12_sharp:4.5488e-03 total_fnorm:8.2167e+01 total_l1_linf:2.5713e+05 total_spectral:4.1080e+01 L1_fnorm:6.2071e+00 L2_fnorm:6.0426e+00 L3_fnorm:5.8979e+00 L4_fnorm:5.8284e+00 L5_fnorm:5.8201e+00 L6_fnorm:5.7845e+00 L7_fnorm:5.7761e+00 L8_fnorm:5.6908e+00 L9_fnorm:5.6459e+00 L10_fnorm:5.5812e+00 L11_fnorm:5.5160e+00 L12_fnorm:5.2981e+00 L1_l1linf:1.9310e+00 L2_l1linf:1.8405e+00 L3_l1linf:1.8486e+00 L4_l1linf:1.8766e+00 L5_l1linf:1.9340e+00 L6_l1linf:1.9308e+00 L7_l1linf:1.9227e+00 L8_l1linf:1.8685e+00 L9_l1linf:1.8266e+00 L10_l1linf:1.7472e+00 L11_l1linf:1.6332e+00 L12_l1linf:1.4766e+00 L1_spectral:6.0452e-02 L2_spectral:6.0417e-02 L3_spectral:6.0377e-02 L4_spectral:6.0326e-02 L5_spectral:6.0319e-02 L6_spectral:6.0285e-02 L7_spectral:6.0382e-02 L8_spectral:6.0272e-02 L9_spectral:6.0330e-02 L10_spectral:6.0317e-02 L11_spectral:6.0312e-02 L12_spectral:6.0287e-02 train_time:15004ms step_avg:37.51ms +[2025-09-11 08:46:32] [Rank 0] PRINT: step:400/10000 val_loss:6.0111 total_sharp:1.0524e-03 L1_sharp:9.8751e-03 L2_sharp:1.8247e-03 L3_sharp:7.3917e-04 L4_sharp:8.4436e-04 L5_sharp:6.3961e-04 L6_sharp:5.7670e-04 L7_sharp:7.9532e-04 L8_sharp:8.0289e-04 L9_sharp:7.8507e-04 L10_sharp:1.0272e-03 L11_sharp:1.6397e-03 L12_sharp:4.5488e-03 total_fnorm:8.2167e+01 total_l1_linf:2.5713e+05 total_spectral:4.1080e+01 L1_fnorm:6.2071e+00 L2_fnorm:6.0426e+00 L3_fnorm:5.8979e+00 L4_fnorm:5.8284e+00 L5_fnorm:5.8201e+00 L6_fnorm:5.7845e+00 L7_fnorm:5.7761e+00 L8_fnorm:5.6908e+00 L9_fnorm:5.6459e+00 L10_fnorm:5.5812e+00 L11_fnorm:5.5160e+00 L12_fnorm:5.2981e+00 L1_l1linf:1.9310e+00 L2_l1linf:1.8405e+00 L3_l1linf:1.8486e+00 L4_l1linf:1.8766e+00 L5_l1linf:1.9340e+00 L6_l1linf:1.9308e+00 L7_l1linf:1.9227e+00 L8_l1linf:1.8685e+00 L9_l1linf:1.8266e+00 L10_l1linf:1.7472e+00 L11_l1linf:1.6332e+00 L12_l1linf:1.4766e+00 L1_spectral:6.0452e-02 L2_spectral:6.0417e-02 L3_spectral:6.0377e-02 L4_spectral:6.0326e-02 L5_spectral:6.0319e-02 L6_spectral:6.0285e-02 L7_spectral:6.0382e-02 L8_spectral:6.0272e-02 L9_spectral:6.0330e-02 L10_spectral:6.0317e-02 L11_spectral:6.0312e-02 L12_spectral:6.0287e-02 train_time:15004ms step_avg:37.51ms +[2025-09-11 08:47:02] [Rank 0] step:401/10000 train_time:45433ms step_avg:113.30ms +[2025-09-11 08:47:02] [Rank 0] step:401/10000 train_time:45433ms step_avg:113.30ms +[2025-09-11 08:47:04] [Rank 0] step:421/10000 train_time:47268ms step_avg:112.28ms +[2025-09-11 08:47:04] [Rank 0] step:421/10000 train_time:47268ms step_avg:112.28ms +[2025-09-11 08:47:05] [Rank 0] step:441/10000 train_time:47910ms step_avg:108.64ms +[2025-09-11 08:47:05] [Rank 0] step:441/10000 train_time:47910ms step_avg:108.64ms +[2025-09-11 08:47:05] [Rank 0] step:461/10000 train_time:48551ms step_avg:105.32ms +[2025-09-11 08:47:05] [Rank 0] step:461/10000 train_time:48551ms step_avg:105.32ms +[2025-09-11 08:47:06] [Rank 0] step:481/10000 train_time:49192ms step_avg:102.27ms +[2025-09-11 08:47:06] [Rank 0] step:481/10000 train_time:49192ms step_avg:102.27ms +[2025-09-11 08:47:07] [Rank 0] step:501/10000 train_time:49832ms step_avg:99.46ms +[2025-09-11 08:47:07] [Rank 0] step:501/10000 train_time:49832ms step_avg:99.46ms +[2025-09-11 08:47:08] [Rank 0] step:521/10000 train_time:50758ms step_avg:97.42ms +[2025-09-11 08:47:08] [Rank 0] step:521/10000 train_time:50758ms step_avg:97.42ms +[2025-09-11 08:47:09] [Rank 0] step:541/10000 train_time:51663ms step_avg:95.50ms +[2025-09-11 08:47:09] [Rank 0] step:541/10000 train_time:51663ms step_avg:95.50ms +[2025-09-11 08:47:09] [Rank 0] step:561/10000 train_time:52304ms step_avg:93.23ms +[2025-09-11 08:47:09] [Rank 0] step:561/10000 train_time:52304ms step_avg:93.23ms +[2025-09-11 08:47:10] [Rank 0] step:581/10000 train_time:53094ms step_avg:91.38ms +[2025-09-11 08:47:10] [Rank 0] step:581/10000 train_time:53094ms step_avg:91.38ms +[2025-09-11 08:47:11] [Rank 0] step:601/10000 train_time:53859ms step_avg:89.61ms +[2025-09-11 08:47:11] [Rank 0] step:601/10000 train_time:53859ms step_avg:89.61ms +[2025-09-11 08:47:11] [Rank 0] step:621/10000 train_time:54498ms step_avg:87.76ms +[2025-09-11 08:47:11] [Rank 0] step:621/10000 train_time:54498ms step_avg:87.76ms +[2025-09-11 08:47:12] [Rank 0] step:641/10000 train_time:55138ms step_avg:86.02ms +[2025-09-11 08:47:12] [Rank 0] step:641/10000 train_time:55138ms step_avg:86.02ms +[2025-09-11 08:47:13] [Rank 0] step:661/10000 train_time:55779ms step_avg:84.39ms +[2025-09-11 08:47:13] [Rank 0] step:661/10000 train_time:55779ms step_avg:84.39ms +[2025-09-11 08:47:13] [Rank 0] step:681/10000 train_time:56419ms step_avg:82.85ms +[2025-09-11 08:47:13] [Rank 0] step:681/10000 train_time:56419ms step_avg:82.85ms +[2025-09-11 08:47:14] [Rank 0] step:701/10000 train_time:57059ms step_avg:81.40ms +[2025-09-11 08:47:14] [Rank 0] step:701/10000 train_time:57059ms step_avg:81.40ms +[2025-09-11 08:47:15] [Rank 0] step:721/10000 train_time:57698ms step_avg:80.02ms +[2025-09-11 08:47:15] [Rank 0] step:721/10000 train_time:57698ms step_avg:80.02ms +[2025-09-11 08:47:15] [Rank 0] step:741/10000 train_time:58338ms step_avg:78.73ms +[2025-09-11 08:47:15] [Rank 0] step:741/10000 train_time:58338ms step_avg:78.73ms +[2025-09-11 08:47:16] [Rank 0] step:761/10000 train_time:58983ms step_avg:77.51ms +[2025-09-11 08:47:16] [Rank 0] step:761/10000 train_time:58983ms step_avg:77.51ms +[2025-09-11 08:47:17] [Rank 0] step:781/10000 train_time:59628ms step_avg:76.35ms +[2025-09-11 08:47:17] [Rank 0] step:781/10000 train_time:59628ms step_avg:76.35ms +[2025-09-11 08:47:17] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:47:17] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:47:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:47:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:00] [Rank 0] PRINT: step:800/10000 val_loss:5.5671 total_sharp:5.5878e-04 L1_sharp:5.0120e-03 L2_sharp:5.0207e-04 L3_sharp:4.3219e-04 L4_sharp:2.8577e-04 L5_sharp:3.6633e-04 L6_sharp:3.5952e-04 L7_sharp:4.3475e-04 L8_sharp:4.5380e-04 L9_sharp:4.2443e-04 L10_sharp:5.4780e-04 L11_sharp:9.7492e-04 L12_sharp:3.9297e-03 total_fnorm:7.9000e+01 total_l1_linf:2.3142e+05 total_spectral:4.0750e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0000e+00 L4_fnorm:5.9688e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0312e+00 L7_fnorm:6.0312e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.9062e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.4375e+00 L1_l1linf:1.9297e+00 L2_l1linf:1.8672e+00 L3_l1linf:1.8359e+00 L4_l1linf:1.8516e+00 L5_l1linf:1.8594e+00 L6_l1linf:1.8828e+00 L7_l1linf:1.8672e+00 L8_l1linf:1.8516e+00 L9_l1linf:1.8516e+00 L10_l1linf:1.8125e+00 L11_l1linf:1.6484e+00 L12_l1linf:1.4062e+00 L1_spectral:6.7036e-02 L2_spectral:6.6456e-02 L3_spectral:6.6262e-02 L4_spectral:6.6204e-02 L5_spectral:6.6179e-02 L6_spectral:6.6185e-02 L7_spectral:6.6196e-02 L8_spectral:6.6097e-02 L9_spectral:6.5896e-02 L10_spectral:6.6315e-02 L11_spectral:6.6084e-02 L12_spectral:6.6064e-02 train_time:60256ms step_avg:75.32ms +[2025-09-11 08:48:00] [Rank 0] PRINT: step:800/10000 val_loss:5.5671 total_sharp:5.5878e-04 L1_sharp:5.0120e-03 L2_sharp:5.0207e-04 L3_sharp:4.3219e-04 L4_sharp:2.8577e-04 L5_sharp:3.6633e-04 L6_sharp:3.5952e-04 L7_sharp:4.3475e-04 L8_sharp:4.5380e-04 L9_sharp:4.2443e-04 L10_sharp:5.4780e-04 L11_sharp:9.7492e-04 L12_sharp:3.9297e-03 total_fnorm:7.9000e+01 total_l1_linf:2.3142e+05 total_spectral:4.0750e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.0000e+00 L4_fnorm:5.9688e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0312e+00 L7_fnorm:6.0312e+00 L8_fnorm:5.8438e+00 L9_fnorm:5.9375e+00 L10_fnorm:5.9062e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.4375e+00 L1_l1linf:1.9297e+00 L2_l1linf:1.8672e+00 L3_l1linf:1.8359e+00 L4_l1linf:1.8516e+00 L5_l1linf:1.8594e+00 L6_l1linf:1.8828e+00 L7_l1linf:1.8672e+00 L8_l1linf:1.8516e+00 L9_l1linf:1.8516e+00 L10_l1linf:1.8125e+00 L11_l1linf:1.6484e+00 L12_l1linf:1.4062e+00 L1_spectral:6.7036e-02 L2_spectral:6.6456e-02 L3_spectral:6.6262e-02 L4_spectral:6.6204e-02 L5_spectral:6.6179e-02 L6_spectral:6.6185e-02 L7_spectral:6.6196e-02 L8_spectral:6.6097e-02 L9_spectral:6.5896e-02 L10_spectral:6.6315e-02 L11_spectral:6.6084e-02 L12_spectral:6.6064e-02 train_time:60256ms step_avg:75.32ms +[2025-09-11 08:48:02] [Rank 0] step:801/10000 train_time:61449ms step_avg:76.72ms +[2025-09-11 08:48:02] [Rank 0] step:801/10000 train_time:61449ms step_avg:76.72ms +[2025-09-11 08:48:02] [Rank 0] step:821/10000 train_time:62098ms step_avg:75.64ms +[2025-09-11 08:48:02] [Rank 0] step:821/10000 train_time:62098ms step_avg:75.64ms +[2025-09-11 08:48:03] [Rank 0] step:841/10000 train_time:62746ms step_avg:74.61ms +[2025-09-11 08:48:03] [Rank 0] step:841/10000 train_time:62746ms step_avg:74.61ms +[2025-09-11 08:48:04] [Rank 0] step:861/10000 train_time:63393ms step_avg:73.63ms +[2025-09-11 08:48:04] [Rank 0] step:861/10000 train_time:63393ms step_avg:73.63ms +[2025-09-11 08:48:04] [Rank 0] step:881/10000 train_time:64039ms step_avg:72.69ms +[2025-09-11 08:48:04] [Rank 0] step:881/10000 train_time:64039ms step_avg:72.69ms +[2025-09-11 08:48:05] [Rank 0] step:901/10000 train_time:64685ms step_avg:71.79ms +[2025-09-11 08:48:05] [Rank 0] step:901/10000 train_time:64685ms step_avg:71.79ms +[2025-09-11 08:48:06] [Rank 0] step:921/10000 train_time:65330ms step_avg:70.93ms +[2025-09-11 08:48:06] [Rank 0] step:921/10000 train_time:65330ms step_avg:70.93ms +[2025-09-11 08:48:06] [Rank 0] step:941/10000 train_time:65975ms step_avg:70.11ms +[2025-09-11 08:48:06] [Rank 0] step:941/10000 train_time:65975ms step_avg:70.11ms +[2025-09-11 08:48:07] [Rank 0] step:961/10000 train_time:66622ms step_avg:69.33ms +[2025-09-11 08:48:07] [Rank 0] step:961/10000 train_time:66622ms step_avg:69.33ms +[2025-09-11 08:48:08] [Rank 0] step:981/10000 train_time:67270ms step_avg:68.57ms +[2025-09-11 08:48:08] [Rank 0] step:981/10000 train_time:67270ms step_avg:68.57ms +[2025-09-11 08:48:08] [Rank 0] step:1001/10000 train_time:67915ms step_avg:67.85ms +[2025-09-11 08:48:08] [Rank 0] step:1001/10000 train_time:67915ms step_avg:67.85ms +[2025-09-11 08:48:09] [Rank 0] step:1021/10000 train_time:68560ms step_avg:67.15ms +[2025-09-11 08:48:09] [Rank 0] step:1021/10000 train_time:68560ms step_avg:67.15ms +[2025-09-11 08:48:09] [Rank 0] step:1041/10000 train_time:69204ms step_avg:66.48ms +[2025-09-11 08:48:09] [Rank 0] step:1041/10000 train_time:69204ms step_avg:66.48ms +[2025-09-11 08:48:10] [Rank 0] step:1061/10000 train_time:69849ms step_avg:65.83ms +[2025-09-11 08:48:10] [Rank 0] step:1061/10000 train_time:69849ms step_avg:65.83ms +[2025-09-11 08:48:11] [Rank 0] step:1081/10000 train_time:70903ms step_avg:65.59ms +[2025-09-11 08:48:11] [Rank 0] step:1081/10000 train_time:70903ms step_avg:65.59ms +[2025-09-11 08:48:12] [Rank 0] step:1101/10000 train_time:71700ms step_avg:65.12ms +[2025-09-11 08:48:12] [Rank 0] step:1101/10000 train_time:71700ms step_avg:65.12ms +[2025-09-11 08:48:13] [Rank 0] step:1121/10000 train_time:72345ms step_avg:64.54ms +[2025-09-11 08:48:13] [Rank 0] step:1121/10000 train_time:72345ms step_avg:64.54ms +[2025-09-11 08:48:14] [Rank 0] step:1141/10000 train_time:73323ms step_avg:64.26ms +[2025-09-11 08:48:14] [Rank 0] step:1141/10000 train_time:73323ms step_avg:64.26ms +[2025-09-11 08:48:14] [Rank 0] step:1161/10000 train_time:73969ms step_avg:63.71ms +[2025-09-11 08:48:14] [Rank 0] step:1161/10000 train_time:73969ms step_avg:63.71ms +[2025-09-11 08:48:15] [Rank 0] step:1181/10000 train_time:74614ms step_avg:63.18ms +[2025-09-11 08:48:15] [Rank 0] step:1181/10000 train_time:74614ms step_avg:63.18ms +[2025-09-11 08:48:15] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:48:15] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:48:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:48:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:48:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:48:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:25] [Rank 0] PRINT: step:1200/10000 val_loss:5.1998 total_sharp:3.9543e-04 L1_sharp:2.6450e-03 L2_sharp:3.9499e-04 L3_sharp:6.8861e-05 L4_sharp:9.2244e-05 L5_sharp:2.2844e-04 L6_sharp:1.8397e-04 L7_sharp:2.6184e-04 L8_sharp:3.2834e-04 L9_sharp:3.0284e-04 L10_sharp:4.4958e-04 L11_sharp:6.6675e-04 L12_sharp:3.5530e-03 total_fnorm:7.9000e+01 total_l1_linf:2.2528e+05 total_spectral:4.0500e+01 L1_fnorm:6.1875e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1250e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.1562e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.8359e+00 L2_l1linf:1.7891e+00 L3_l1linf:1.7812e+00 L4_l1linf:1.7422e+00 L5_l1linf:1.7500e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.7344e+00 L8_l1linf:1.7188e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7031e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.5859e+00 L1_spectral:7.0113e-02 L2_spectral:6.9069e-02 L3_spectral:6.8755e-02 L4_spectral:6.9131e-02 L5_spectral:6.9394e-02 L6_spectral:6.9290e-02 L7_spectral:6.9332e-02 L8_spectral:7.0644e-02 L9_spectral:6.9580e-02 L10_spectral:6.8843e-02 L11_spectral:6.8631e-02 L12_spectral:6.8790e-02 train_time:75241ms step_avg:62.70ms +[2025-09-11 08:48:25] [Rank 0] PRINT: step:1200/10000 val_loss:5.1998 total_sharp:3.9543e-04 L1_sharp:2.6450e-03 L2_sharp:3.9499e-04 L3_sharp:6.8861e-05 L4_sharp:9.2244e-05 L5_sharp:2.2844e-04 L6_sharp:1.8397e-04 L7_sharp:2.6184e-04 L8_sharp:3.2834e-04 L9_sharp:3.0284e-04 L10_sharp:4.4958e-04 L11_sharp:6.6675e-04 L12_sharp:3.5530e-03 total_fnorm:7.9000e+01 total_l1_linf:2.2528e+05 total_spectral:4.0500e+01 L1_fnorm:6.1875e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.0938e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.1250e+00 L10_fnorm:6.1250e+00 L11_fnorm:6.1562e+00 L12_fnorm:6.0312e+00 L1_l1linf:1.8359e+00 L2_l1linf:1.7891e+00 L3_l1linf:1.7812e+00 L4_l1linf:1.7422e+00 L5_l1linf:1.7500e+00 L6_l1linf:1.7266e+00 L7_l1linf:1.7344e+00 L8_l1linf:1.7188e+00 L9_l1linf:1.7422e+00 L10_l1linf:1.7031e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.5859e+00 L1_spectral:7.0113e-02 L2_spectral:6.9069e-02 L3_spectral:6.8755e-02 L4_spectral:6.9131e-02 L5_spectral:6.9394e-02 L6_spectral:6.9290e-02 L7_spectral:6.9332e-02 L8_spectral:7.0644e-02 L9_spectral:6.9580e-02 L10_spectral:6.8843e-02 L11_spectral:6.8631e-02 L12_spectral:6.8790e-02 train_time:75241ms step_avg:62.70ms +[2025-09-11 08:48:27] [Rank 0] step:1201/10000 train_time:76422ms step_avg:63.63ms +[2025-09-11 08:48:27] [Rank 0] step:1201/10000 train_time:76422ms step_avg:63.63ms +[2025-09-11 08:48:27] [Rank 0] step:1221/10000 train_time:77072ms step_avg:63.12ms +[2025-09-11 08:48:27] [Rank 0] step:1221/10000 train_time:77072ms step_avg:63.12ms +[2025-09-11 08:48:28] [Rank 0] step:1241/10000 train_time:77719ms step_avg:62.63ms +[2025-09-11 08:48:28] [Rank 0] step:1241/10000 train_time:77719ms step_avg:62.63ms +[2025-09-11 08:48:29] [Rank 0] step:1261/10000 train_time:78365ms step_avg:62.15ms +[2025-09-11 08:48:29] [Rank 0] step:1261/10000 train_time:78365ms step_avg:62.15ms +[2025-09-11 08:48:29] [Rank 0] step:1281/10000 train_time:79011ms step_avg:61.68ms +[2025-09-11 08:48:29] [Rank 0] step:1281/10000 train_time:79011ms step_avg:61.68ms +[2025-09-11 08:48:30] [Rank 0] step:1301/10000 train_time:79658ms step_avg:61.23ms +[2025-09-11 08:48:30] [Rank 0] step:1301/10000 train_time:79658ms step_avg:61.23ms +[2025-09-11 08:48:30] [Rank 0] step:1321/10000 train_time:80304ms step_avg:60.79ms +[2025-09-11 08:48:30] [Rank 0] step:1321/10000 train_time:80304ms step_avg:60.79ms +[2025-09-11 08:48:31] [Rank 0] step:1341/10000 train_time:80949ms step_avg:60.36ms +[2025-09-11 08:48:31] [Rank 0] step:1341/10000 train_time:80949ms step_avg:60.36ms +[2025-09-11 08:48:32] [Rank 0] step:1361/10000 train_time:81594ms step_avg:59.95ms +[2025-09-11 08:48:32] [Rank 0] step:1361/10000 train_time:81594ms step_avg:59.95ms +[2025-09-11 08:48:32] [Rank 0] step:1381/10000 train_time:82239ms step_avg:59.55ms +[2025-09-11 08:48:32] [Rank 0] step:1381/10000 train_time:82239ms step_avg:59.55ms +[2025-09-11 08:48:33] [Rank 0] step:1401/10000 train_time:82885ms step_avg:59.16ms +[2025-09-11 08:48:33] [Rank 0] step:1401/10000 train_time:82885ms step_avg:59.16ms +[2025-09-11 08:48:34] [Rank 0] step:1421/10000 train_time:83531ms step_avg:58.78ms +[2025-09-11 08:48:34] [Rank 0] step:1421/10000 train_time:83531ms step_avg:58.78ms +[2025-09-11 08:48:34] [Rank 0] step:1441/10000 train_time:84176ms step_avg:58.41ms +[2025-09-11 08:48:34] [Rank 0] step:1441/10000 train_time:84176ms step_avg:58.41ms +[2025-09-11 08:48:35] [Rank 0] step:1461/10000 train_time:84822ms step_avg:58.06ms +[2025-09-11 08:48:35] [Rank 0] step:1461/10000 train_time:84822ms step_avg:58.06ms +[2025-09-11 08:48:36] [Rank 0] step:1481/10000 train_time:85468ms step_avg:57.71ms +[2025-09-11 08:48:36] [Rank 0] step:1481/10000 train_time:85468ms step_avg:57.71ms +[2025-09-11 08:48:36] [Rank 0] step:1501/10000 train_time:86117ms step_avg:57.37ms +[2025-09-11 08:48:36] [Rank 0] step:1501/10000 train_time:86117ms step_avg:57.37ms +[2025-09-11 08:48:37] [Rank 0] step:1521/10000 train_time:86767ms step_avg:57.05ms +[2025-09-11 08:48:37] [Rank 0] step:1521/10000 train_time:86767ms step_avg:57.05ms +[2025-09-11 08:48:38] [Rank 0] step:1541/10000 train_time:87417ms step_avg:56.73ms +[2025-09-11 08:48:38] [Rank 0] step:1541/10000 train_time:87417ms step_avg:56.73ms +[2025-09-11 08:48:38] [Rank 0] step:1561/10000 train_time:88066ms step_avg:56.42ms +[2025-09-11 08:48:38] [Rank 0] step:1561/10000 train_time:88066ms step_avg:56.42ms +[2025-09-11 08:48:39] [Rank 0] step:1581/10000 train_time:88715ms step_avg:56.11ms +[2025-09-11 08:48:39] [Rank 0] step:1581/10000 train_time:88715ms step_avg:56.11ms +[2025-09-11 08:48:39] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:48:39] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:48:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:48:49] [Rank 0] PRINT: step:1600/10000 val_loss:5.0309 total_sharp:3.1561e-04 L1_sharp:1.8759e-03 L2_sharp:1.9435e-04 L3_sharp:1.0690e-04 L4_sharp:5.7337e-05 L5_sharp:2.5990e-04 L6_sharp:1.9578e-04 L7_sharp:3.1635e-04 L8_sharp:3.1720e-04 L9_sharp:2.8834e-04 L10_sharp:3.0894e-04 L11_sharp:5.1706e-04 L12_sharp:2.7618e-03 total_fnorm:7.6500e+01 total_l1_linf:2.0992e+05 total_spectral:3.9000e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.7969e+00 L2_l1linf:1.7266e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.6875e+00 L5_l1linf:1.7266e+00 L6_l1linf:1.6719e+00 L7_l1linf:1.6719e+00 L8_l1linf:1.6641e+00 L9_l1linf:1.6641e+00 L10_l1linf:1.6719e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.5781e+00 L1_spectral:7.2038e-02 L2_spectral:7.0425e-02 L3_spectral:7.0650e-02 L4_spectral:7.0745e-02 L5_spectral:7.1171e-02 L6_spectral:7.1370e-02 L7_spectral:7.1792e-02 L8_spectral:7.3489e-02 L9_spectral:7.1627e-02 L10_spectral:7.1325e-02 L11_spectral:7.1106e-02 L12_spectral:7.0622e-02 train_time:89347ms step_avg:55.84ms +[2025-09-11 08:48:49] [Rank 0] PRINT: step:1600/10000 val_loss:5.0309 total_sharp:3.1561e-04 L1_sharp:1.8759e-03 L2_sharp:1.9435e-04 L3_sharp:1.0690e-04 L4_sharp:5.7337e-05 L5_sharp:2.5990e-04 L6_sharp:1.9578e-04 L7_sharp:3.1635e-04 L8_sharp:3.1720e-04 L9_sharp:2.8834e-04 L10_sharp:3.0894e-04 L11_sharp:5.1706e-04 L12_sharp:2.7618e-03 total_fnorm:7.6500e+01 total_l1_linf:2.0992e+05 total_spectral:3.9000e+01 L1_fnorm:6.2188e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.2188e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.0000e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.1250e+00 L1_l1linf:1.7969e+00 L2_l1linf:1.7266e+00 L3_l1linf:1.7422e+00 L4_l1linf:1.6875e+00 L5_l1linf:1.7266e+00 L6_l1linf:1.6719e+00 L7_l1linf:1.6719e+00 L8_l1linf:1.6641e+00 L9_l1linf:1.6641e+00 L10_l1linf:1.6719e+00 L11_l1linf:1.6406e+00 L12_l1linf:1.5781e+00 L1_spectral:7.2038e-02 L2_spectral:7.0425e-02 L3_spectral:7.0650e-02 L4_spectral:7.0745e-02 L5_spectral:7.1171e-02 L6_spectral:7.1370e-02 L7_spectral:7.1792e-02 L8_spectral:7.3489e-02 L9_spectral:7.1627e-02 L10_spectral:7.1325e-02 L11_spectral:7.1106e-02 L12_spectral:7.0622e-02 train_time:89347ms step_avg:55.84ms +[2025-09-11 08:48:50] [Rank 0] step:1601/10000 train_time:90539ms step_avg:56.55ms +[2025-09-11 08:48:50] [Rank 0] step:1601/10000 train_time:90539ms step_avg:56.55ms +[2025-09-11 08:48:51] [Rank 0] step:1621/10000 train_time:91193ms step_avg:56.26ms +[2025-09-11 08:48:51] [Rank 0] step:1621/10000 train_time:91193ms step_avg:56.26ms +[2025-09-11 08:48:52] [Rank 0] step:1641/10000 train_time:91844ms step_avg:55.97ms +[2025-09-11 08:48:52] [Rank 0] step:1641/10000 train_time:91844ms step_avg:55.97ms +[2025-09-11 08:48:52] [Rank 0] step:1661/10000 train_time:92494ms step_avg:55.69ms +[2025-09-11 08:48:52] [Rank 0] step:1661/10000 train_time:92494ms step_avg:55.69ms +[2025-09-11 08:48:53] [Rank 0] step:1681/10000 train_time:93145ms step_avg:55.41ms +[2025-09-11 08:48:53] [Rank 0] step:1681/10000 train_time:93145ms step_avg:55.41ms +[2025-09-11 08:48:54] [Rank 0] step:1701/10000 train_time:93796ms step_avg:55.14ms +[2025-09-11 08:48:54] [Rank 0] step:1701/10000 train_time:93796ms step_avg:55.14ms +[2025-09-11 08:48:54] [Rank 0] step:1721/10000 train_time:94446ms step_avg:54.88ms +[2025-09-11 08:48:54] [Rank 0] step:1721/10000 train_time:94446ms step_avg:54.88ms +[2025-09-11 08:48:55] [Rank 0] step:1741/10000 train_time:95096ms step_avg:54.62ms +[2025-09-11 08:48:55] [Rank 0] step:1741/10000 train_time:95096ms step_avg:54.62ms +[2025-09-11 08:48:56] [Rank 0] step:1761/10000 train_time:95746ms step_avg:54.37ms +[2025-09-11 08:48:56] [Rank 0] step:1761/10000 train_time:95746ms step_avg:54.37ms +[2025-09-11 08:48:56] [Rank 0] step:1781/10000 train_time:96395ms step_avg:54.12ms +[2025-09-11 08:48:56] [Rank 0] step:1781/10000 train_time:96395ms step_avg:54.12ms +[2025-09-11 08:48:57] [Rank 0] step:1801/10000 train_time:97046ms step_avg:53.88ms +[2025-09-11 08:48:57] [Rank 0] step:1801/10000 train_time:97046ms step_avg:53.88ms +[2025-09-11 08:48:58] [Rank 0] step:1821/10000 train_time:97695ms step_avg:53.65ms +[2025-09-11 08:48:58] [Rank 0] step:1821/10000 train_time:97695ms step_avg:53.65ms +[2025-09-11 08:48:58] [Rank 0] step:1841/10000 train_time:98345ms step_avg:53.42ms +[2025-09-11 08:48:58] [Rank 0] step:1841/10000 train_time:98345ms step_avg:53.42ms +[2025-09-11 08:48:59] [Rank 0] step:1861/10000 train_time:98995ms step_avg:53.19ms +[2025-09-11 08:48:59] [Rank 0] step:1861/10000 train_time:98995ms step_avg:53.19ms +[2025-09-11 08:49:00] [Rank 0] step:1881/10000 train_time:99644ms step_avg:52.97ms +[2025-09-11 08:49:00] [Rank 0] step:1881/10000 train_time:99644ms step_avg:52.97ms +[2025-09-11 08:49:00] [Rank 0] step:1901/10000 train_time:100295ms step_avg:52.76ms +[2025-09-11 08:49:00] [Rank 0] step:1901/10000 train_time:100295ms step_avg:52.76ms +[2025-09-11 08:49:01] [Rank 0] step:1921/10000 train_time:100944ms step_avg:52.55ms +[2025-09-11 08:49:01] [Rank 0] step:1921/10000 train_time:100944ms step_avg:52.55ms +[2025-09-11 08:49:02] [Rank 0] step:1941/10000 train_time:101594ms step_avg:52.34ms +[2025-09-11 08:49:02] [Rank 0] step:1941/10000 train_time:101594ms step_avg:52.34ms +[2025-09-11 08:49:02] [Rank 0] step:1961/10000 train_time:102245ms step_avg:52.14ms +[2025-09-11 08:49:02] [Rank 0] step:1961/10000 train_time:102245ms step_avg:52.14ms +[2025-09-11 08:49:03] [Rank 0] step:1981/10000 train_time:102895ms step_avg:51.94ms +[2025-09-11 08:49:03] [Rank 0] step:1981/10000 train_time:102895ms step_avg:51.94ms +[2025-09-11 08:49:03] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:49:03] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:49:13] [Rank 0] PRINT: step:2000/10000 val_loss:4.8904 total_sharp:2.4668e-04 L1_sharp:1.3656e-03 L2_sharp:2.1700e-04 L3_sharp:1.6323e-05 L4_sharp:1.3287e-04 L5_sharp:1.3317e-04 L6_sharp:7.9604e-05 L7_sharp:1.2463e-04 L8_sharp:2.9020e-04 L9_sharp:2.1683e-04 L10_sharp:3.2275e-04 L11_sharp:5.3104e-04 L12_sharp:3.0903e-03 total_fnorm:7.8500e+01 total_l1_linf:2.1709e+05 total_spectral:4.0250e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.7891e+00 L2_l1linf:1.6797e+00 L3_l1linf:1.6562e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6406e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6641e+00 L8_l1linf:1.6172e+00 L9_l1linf:1.6172e+00 L10_l1linf:1.6016e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.5781e+00 L1_spectral:7.3508e-02 L2_spectral:7.2055e-02 L3_spectral:7.1992e-02 L4_spectral:7.2597e-02 L5_spectral:7.3108e-02 L6_spectral:7.2967e-02 L7_spectral:7.3474e-02 L8_spectral:7.4542e-02 L9_spectral:7.3676e-02 L10_spectral:7.3236e-02 L11_spectral:7.3156e-02 L12_spectral:7.3134e-02 train_time:103528ms step_avg:51.76ms +[2025-09-11 08:49:13] [Rank 0] PRINT: step:2000/10000 val_loss:4.8904 total_sharp:2.4668e-04 L1_sharp:1.3656e-03 L2_sharp:2.1700e-04 L3_sharp:1.6323e-05 L4_sharp:1.3287e-04 L5_sharp:1.3317e-04 L6_sharp:7.9604e-05 L7_sharp:1.2463e-04 L8_sharp:2.9020e-04 L9_sharp:2.1683e-04 L10_sharp:3.2275e-04 L11_sharp:5.3104e-04 L12_sharp:3.0903e-03 total_fnorm:7.8500e+01 total_l1_linf:2.1709e+05 total_spectral:4.0250e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.7891e+00 L2_l1linf:1.6797e+00 L3_l1linf:1.6562e+00 L4_l1linf:1.6406e+00 L5_l1linf:1.6406e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6641e+00 L8_l1linf:1.6172e+00 L9_l1linf:1.6172e+00 L10_l1linf:1.6016e+00 L11_l1linf:1.6016e+00 L12_l1linf:1.5781e+00 L1_spectral:7.3508e-02 L2_spectral:7.2055e-02 L3_spectral:7.1992e-02 L4_spectral:7.2597e-02 L5_spectral:7.3108e-02 L6_spectral:7.2967e-02 L7_spectral:7.3474e-02 L8_spectral:7.4542e-02 L9_spectral:7.3676e-02 L10_spectral:7.3236e-02 L11_spectral:7.3156e-02 L12_spectral:7.3134e-02 train_time:103528ms step_avg:51.76ms +[2025-09-11 08:49:15] [Rank 0] step:2001/10000 train_time:104679ms step_avg:52.31ms +[2025-09-11 08:49:15] [Rank 0] step:2001/10000 train_time:104679ms step_avg:52.31ms +[2025-09-11 08:49:15] [Rank 0] step:2021/10000 train_time:105363ms step_avg:52.13ms +[2025-09-11 08:49:15] [Rank 0] step:2021/10000 train_time:105363ms step_avg:52.13ms +[2025-09-11 08:49:16] [Rank 0] step:2041/10000 train_time:106015ms step_avg:51.94ms +[2025-09-11 08:49:16] [Rank 0] step:2041/10000 train_time:106015ms step_avg:51.94ms +[2025-09-11 08:49:17] [Rank 0] step:2061/10000 train_time:106971ms step_avg:51.90ms +[2025-09-11 08:49:17] [Rank 0] step:2061/10000 train_time:106971ms step_avg:51.90ms +[2025-09-11 08:49:17] [Rank 0] step:2081/10000 train_time:107622ms step_avg:51.72ms +[2025-09-11 08:49:17] [Rank 0] step:2081/10000 train_time:107622ms step_avg:51.72ms +[2025-09-11 08:49:18] [Rank 0] step:2101/10000 train_time:108273ms step_avg:51.53ms +[2025-09-11 08:49:18] [Rank 0] step:2101/10000 train_time:108273ms step_avg:51.53ms +[2025-09-11 08:49:19] [Rank 0] step:2121/10000 train_time:108924ms step_avg:51.35ms +[2025-09-11 08:49:19] [Rank 0] step:2121/10000 train_time:108924ms step_avg:51.35ms +[2025-09-11 08:49:19] [Rank 0] step:2141/10000 train_time:109574ms step_avg:51.18ms +[2025-09-11 08:49:19] [Rank 0] step:2141/10000 train_time:109574ms step_avg:51.18ms +[2025-09-11 08:49:20] [Rank 0] step:2161/10000 train_time:110224ms step_avg:51.01ms +[2025-09-11 08:49:20] [Rank 0] step:2161/10000 train_time:110224ms step_avg:51.01ms +[2025-09-11 08:49:21] [Rank 0] step:2181/10000 train_time:110874ms step_avg:50.84ms +[2025-09-11 08:49:21] [Rank 0] step:2181/10000 train_time:110874ms step_avg:50.84ms +[2025-09-11 08:49:21] [Rank 0] step:2201/10000 train_time:111524ms step_avg:50.67ms +[2025-09-11 08:49:21] [Rank 0] step:2201/10000 train_time:111524ms step_avg:50.67ms +[2025-09-11 08:49:22] [Rank 0] step:2221/10000 train_time:112174ms step_avg:50.51ms +[2025-09-11 08:49:22] [Rank 0] step:2221/10000 train_time:112174ms step_avg:50.51ms +[2025-09-11 08:49:23] [Rank 0] step:2241/10000 train_time:112836ms step_avg:50.35ms +[2025-09-11 08:49:23] [Rank 0] step:2241/10000 train_time:112836ms step_avg:50.35ms +[2025-09-11 08:49:23] [Rank 0] step:2261/10000 train_time:113498ms step_avg:50.20ms +[2025-09-11 08:49:23] [Rank 0] step:2261/10000 train_time:113498ms step_avg:50.20ms +[2025-09-11 08:49:24] [Rank 0] step:2281/10000 train_time:114162ms step_avg:50.05ms +[2025-09-11 08:49:24] [Rank 0] step:2281/10000 train_time:114162ms step_avg:50.05ms +[2025-09-11 08:49:25] [Rank 0] step:2301/10000 train_time:114825ms step_avg:49.90ms +[2025-09-11 08:49:25] [Rank 0] step:2301/10000 train_time:114825ms step_avg:49.90ms +[2025-09-11 08:49:25] [Rank 0] step:2321/10000 train_time:115488ms step_avg:49.76ms +[2025-09-11 08:49:25] [Rank 0] step:2321/10000 train_time:115488ms step_avg:49.76ms +[2025-09-11 08:49:26] [Rank 0] step:2341/10000 train_time:116151ms step_avg:49.62ms +[2025-09-11 08:49:26] [Rank 0] step:2341/10000 train_time:116151ms step_avg:49.62ms +[2025-09-11 08:49:27] [Rank 0] step:2361/10000 train_time:116814ms step_avg:49.48ms +[2025-09-11 08:49:27] [Rank 0] step:2361/10000 train_time:116814ms step_avg:49.48ms +[2025-09-11 08:49:27] [Rank 0] step:2381/10000 train_time:117478ms step_avg:49.34ms +[2025-09-11 08:49:27] [Rank 0] step:2381/10000 train_time:117478ms step_avg:49.34ms +[2025-09-11 08:49:28] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:49:28] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:49:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:49:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:49:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:49:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:49:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:49:39] [Rank 0] PRINT: step:2400/10000 val_loss:4.7555 total_sharp:2.8844e-04 L1_sharp:1.0614e-03 L2_sharp:4.2382e-05 L3_sharp:8.4506e-05 L4_sharp:1.3747e-04 L5_sharp:1.6325e-04 L6_sharp:1.4922e-04 L7_sharp:1.4172e-04 L8_sharp:3.1600e-04 L9_sharp:2.9508e-04 L10_sharp:2.8345e-04 L11_sharp:4.8205e-04 L12_sharp:3.5454e-03 total_fnorm:7.3000e+01 total_l1_linf:1.9763e+05 total_spectral:3.7250e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.6328e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6016e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6562e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5781e+00 L11_l1linf:1.5547e+00 L12_l1linf:1.5859e+00 L1_spectral:7.4726e-02 L2_spectral:7.3400e-02 L3_spectral:7.3088e-02 L4_spectral:7.3476e-02 L5_spectral:7.4507e-02 L6_spectral:7.4007e-02 L7_spectral:7.5063e-02 L8_spectral:7.5533e-02 L9_spectral:7.5060e-02 L10_spectral:7.4333e-02 L11_spectral:7.4862e-02 L12_spectral:7.4470e-02 train_time:118123ms step_avg:49.22ms +[2025-09-11 08:49:39] [Rank 0] PRINT: step:2400/10000 val_loss:4.7555 total_sharp:2.8844e-04 L1_sharp:1.0614e-03 L2_sharp:4.2382e-05 L3_sharp:8.4506e-05 L4_sharp:1.3747e-04 L5_sharp:1.6325e-04 L6_sharp:1.4922e-04 L7_sharp:1.4172e-04 L8_sharp:3.1600e-04 L9_sharp:2.9508e-04 L10_sharp:2.8345e-04 L11_sharp:4.8205e-04 L12_sharp:3.5454e-03 total_fnorm:7.3000e+01 total_l1_linf:1.9763e+05 total_spectral:3.7250e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7812e+00 L2_l1linf:1.6328e+00 L3_l1linf:1.6406e+00 L4_l1linf:1.6016e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6562e+00 L8_l1linf:1.5938e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5781e+00 L11_l1linf:1.5547e+00 L12_l1linf:1.5859e+00 L1_spectral:7.4726e-02 L2_spectral:7.3400e-02 L3_spectral:7.3088e-02 L4_spectral:7.3476e-02 L5_spectral:7.4507e-02 L6_spectral:7.4007e-02 L7_spectral:7.5063e-02 L8_spectral:7.5533e-02 L9_spectral:7.5060e-02 L10_spectral:7.4333e-02 L11_spectral:7.4862e-02 L12_spectral:7.4470e-02 train_time:118123ms step_avg:49.22ms +[2025-09-11 08:49:41] [Rank 0] step:2401/10000 train_time:120035ms step_avg:49.99ms +[2025-09-11 08:49:41] [Rank 0] step:2401/10000 train_time:120035ms step_avg:49.99ms +[2025-09-11 08:49:42] [Rank 0] step:2421/10000 train_time:120716ms step_avg:49.86ms +[2025-09-11 08:49:42] [Rank 0] step:2421/10000 train_time:120716ms step_avg:49.86ms +[2025-09-11 08:49:43] [Rank 0] step:2441/10000 train_time:121380ms step_avg:49.73ms +[2025-09-11 08:49:43] [Rank 0] step:2441/10000 train_time:121380ms step_avg:49.73ms +[2025-09-11 08:49:43] [Rank 0] step:2461/10000 train_time:122045ms step_avg:49.59ms +[2025-09-11 08:49:43] [Rank 0] step:2461/10000 train_time:122045ms step_avg:49.59ms +[2025-09-11 08:49:44] [Rank 0] step:2481/10000 train_time:122709ms step_avg:49.46ms +[2025-09-11 08:49:44] [Rank 0] step:2481/10000 train_time:122709ms step_avg:49.46ms +[2025-09-11 08:49:45] [Rank 0] step:2501/10000 train_time:123375ms step_avg:49.33ms +[2025-09-11 08:49:45] [Rank 0] step:2501/10000 train_time:123375ms step_avg:49.33ms +[2025-09-11 08:49:45] [Rank 0] step:2521/10000 train_time:124037ms step_avg:49.20ms +[2025-09-11 08:49:45] [Rank 0] step:2521/10000 train_time:124037ms step_avg:49.20ms +[2025-09-11 08:49:46] [Rank 0] step:2541/10000 train_time:124701ms step_avg:49.08ms +[2025-09-11 08:49:46] [Rank 0] step:2541/10000 train_time:124701ms step_avg:49.08ms +[2025-09-11 08:49:47] [Rank 0] step:2561/10000 train_time:125364ms step_avg:48.95ms +[2025-09-11 08:49:47] [Rank 0] step:2561/10000 train_time:125364ms step_avg:48.95ms +[2025-09-11 08:49:47] [Rank 0] step:2581/10000 train_time:126029ms step_avg:48.83ms +[2025-09-11 08:49:47] [Rank 0] step:2581/10000 train_time:126029ms step_avg:48.83ms +[2025-09-11 08:49:48] [Rank 0] step:2601/10000 train_time:126691ms step_avg:48.71ms +[2025-09-11 08:49:48] [Rank 0] step:2601/10000 train_time:126691ms step_avg:48.71ms +[2025-09-11 08:49:49] [Rank 0] step:2621/10000 train_time:127354ms step_avg:48.59ms +[2025-09-11 08:49:49] [Rank 0] step:2621/10000 train_time:127354ms step_avg:48.59ms +[2025-09-11 08:49:49] [Rank 0] step:2641/10000 train_time:128016ms step_avg:48.47ms +[2025-09-11 08:49:49] [Rank 0] step:2641/10000 train_time:128016ms step_avg:48.47ms +[2025-09-11 08:49:50] [Rank 0] step:2661/10000 train_time:128681ms step_avg:48.36ms +[2025-09-11 08:49:50] [Rank 0] step:2661/10000 train_time:128681ms step_avg:48.36ms +[2025-09-11 08:49:51] [Rank 0] step:2681/10000 train_time:129344ms step_avg:48.24ms +[2025-09-11 08:49:51] [Rank 0] step:2681/10000 train_time:129344ms step_avg:48.24ms +[2025-09-11 08:49:51] [Rank 0] step:2701/10000 train_time:130007ms step_avg:48.13ms +[2025-09-11 08:49:51] [Rank 0] step:2701/10000 train_time:130007ms step_avg:48.13ms +[2025-09-11 08:49:52] [Rank 0] step:2721/10000 train_time:130670ms step_avg:48.02ms +[2025-09-11 08:49:52] [Rank 0] step:2721/10000 train_time:130670ms step_avg:48.02ms +[2025-09-11 08:49:53] [Rank 0] step:2741/10000 train_time:131334ms step_avg:47.91ms +[2025-09-11 08:49:53] [Rank 0] step:2741/10000 train_time:131334ms step_avg:47.91ms +[2025-09-11 08:49:53] [Rank 0] step:2761/10000 train_time:131997ms step_avg:47.81ms +[2025-09-11 08:49:53] [Rank 0] step:2761/10000 train_time:131997ms step_avg:47.81ms +[2025-09-11 08:49:54] [Rank 0] step:2781/10000 train_time:132661ms step_avg:47.70ms +[2025-09-11 08:49:54] [Rank 0] step:2781/10000 train_time:132661ms step_avg:47.70ms +[2025-09-11 08:49:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:49:55] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:49:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:49:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:50:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:50:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:50:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:05] [Rank 0] PRINT: step:2800/10000 val_loss:4.7020 total_sharp:2.5852e-04 L1_sharp:8.0614e-04 L2_sharp:2.5921e-04 L3_sharp:1.9629e-06 L4_sharp:7.7706e-05 L5_sharp:1.0653e-04 L6_sharp:6.7147e-05 L7_sharp:1.3896e-04 L8_sharp:2.6096e-04 L9_sharp:2.4470e-04 L10_sharp:3.3472e-04 L11_sharp:4.5156e-04 L12_sharp:1.9312e-03 total_fnorm:7.2000e+01 total_l1_linf:1.9354e+05 total_spectral:3.6750e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.5859e+00 L3_l1linf:1.6250e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.6250e+00 L7_l1linf:1.6641e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.5391e+00 L12_l1linf:1.5391e+00 L1_spectral:7.5584e-02 L2_spectral:7.3895e-02 L3_spectral:7.4403e-02 L4_spectral:7.4608e-02 L5_spectral:7.5732e-02 L6_spectral:7.5851e-02 L7_spectral:7.6260e-02 L8_spectral:7.6277e-02 L9_spectral:7.5863e-02 L10_spectral:7.6452e-02 L11_spectral:7.5923e-02 L12_spectral:7.5626e-02 train_time:133306ms step_avg:47.61ms +[2025-09-11 08:50:05] [Rank 0] PRINT: step:2800/10000 val_loss:4.7020 total_sharp:2.5852e-04 L1_sharp:8.0614e-04 L2_sharp:2.5921e-04 L3_sharp:1.9629e-06 L4_sharp:7.7706e-05 L5_sharp:1.0653e-04 L6_sharp:6.7147e-05 L7_sharp:1.3896e-04 L8_sharp:2.6096e-04 L9_sharp:2.4470e-04 L10_sharp:3.3472e-04 L11_sharp:4.5156e-04 L12_sharp:1.9312e-03 total_fnorm:7.2000e+01 total_l1_linf:1.9354e+05 total_spectral:3.6750e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.0938e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7344e+00 L2_l1linf:1.5859e+00 L3_l1linf:1.6250e+00 L4_l1linf:1.5781e+00 L5_l1linf:1.5703e+00 L6_l1linf:1.6250e+00 L7_l1linf:1.6641e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5391e+00 L11_l1linf:1.5391e+00 L12_l1linf:1.5391e+00 L1_spectral:7.5584e-02 L2_spectral:7.3895e-02 L3_spectral:7.4403e-02 L4_spectral:7.4608e-02 L5_spectral:7.5732e-02 L6_spectral:7.5851e-02 L7_spectral:7.6260e-02 L8_spectral:7.6277e-02 L9_spectral:7.5863e-02 L10_spectral:7.6452e-02 L11_spectral:7.5923e-02 L12_spectral:7.5626e-02 train_time:133306ms step_avg:47.61ms +[2025-09-11 08:50:07] [Rank 0] step:2801/10000 train_time:135137ms step_avg:48.25ms +[2025-09-11 08:50:07] [Rank 0] step:2801/10000 train_time:135137ms step_avg:48.25ms +[2025-09-11 08:50:08] [Rank 0] step:2821/10000 train_time:135835ms step_avg:48.15ms +[2025-09-11 08:50:08] [Rank 0] step:2821/10000 train_time:135835ms step_avg:48.15ms +[2025-09-11 08:50:09] [Rank 0] step:2841/10000 train_time:136502ms step_avg:48.05ms +[2025-09-11 08:50:09] [Rank 0] step:2841/10000 train_time:136502ms step_avg:48.05ms +[2025-09-11 08:50:09] [Rank 0] step:2861/10000 train_time:137167ms step_avg:47.94ms +[2025-09-11 08:50:09] [Rank 0] step:2861/10000 train_time:137167ms step_avg:47.94ms +[2025-09-11 08:50:10] [Rank 0] step:2881/10000 train_time:137832ms step_avg:47.84ms +[2025-09-11 08:50:10] [Rank 0] step:2881/10000 train_time:137832ms step_avg:47.84ms +[2025-09-11 08:50:11] [Rank 0] step:2901/10000 train_time:138496ms step_avg:47.74ms +[2025-09-11 08:50:11] [Rank 0] step:2901/10000 train_time:138496ms step_avg:47.74ms +[2025-09-11 08:50:11] [Rank 0] step:2921/10000 train_time:139161ms step_avg:47.64ms +[2025-09-11 08:50:11] [Rank 0] step:2921/10000 train_time:139161ms step_avg:47.64ms +[2025-09-11 08:50:12] [Rank 0] step:2941/10000 train_time:139825ms step_avg:47.54ms +[2025-09-11 08:50:12] [Rank 0] step:2941/10000 train_time:139825ms step_avg:47.54ms +[2025-09-11 08:50:13] [Rank 0] step:2961/10000 train_time:140489ms step_avg:47.45ms +[2025-09-11 08:50:13] [Rank 0] step:2961/10000 train_time:140489ms step_avg:47.45ms +[2025-09-11 08:50:13] [Rank 0] step:2981/10000 train_time:141156ms step_avg:47.35ms +[2025-09-11 08:50:13] [Rank 0] step:2981/10000 train_time:141156ms step_avg:47.35ms +[2025-09-11 08:50:14] [Rank 0] step:3001/10000 train_time:141825ms step_avg:47.26ms +[2025-09-11 08:50:14] [Rank 0] step:3001/10000 train_time:141825ms step_avg:47.26ms +[2025-09-11 08:50:15] [Rank 0] step:3021/10000 train_time:142493ms step_avg:47.17ms +[2025-09-11 08:50:15] [Rank 0] step:3021/10000 train_time:142493ms step_avg:47.17ms +[2025-09-11 08:50:15] [Rank 0] step:3041/10000 train_time:143161ms step_avg:47.08ms +[2025-09-11 08:50:15] [Rank 0] step:3041/10000 train_time:143161ms step_avg:47.08ms +[2025-09-11 08:50:16] [Rank 0] step:3061/10000 train_time:143830ms step_avg:46.99ms +[2025-09-11 08:50:16] [Rank 0] step:3061/10000 train_time:143830ms step_avg:46.99ms +[2025-09-11 08:50:17] [Rank 0] step:3081/10000 train_time:144497ms step_avg:46.90ms +[2025-09-11 08:50:17] [Rank 0] step:3081/10000 train_time:144497ms step_avg:46.90ms +[2025-09-11 08:50:18] [Rank 0] step:3101/10000 train_time:145715ms step_avg:46.99ms +[2025-09-11 08:50:18] [Rank 0] step:3101/10000 train_time:145715ms step_avg:46.99ms +[2025-09-11 08:50:18] [Rank 0] step:3121/10000 train_time:146384ms step_avg:46.90ms +[2025-09-11 08:50:18] [Rank 0] step:3121/10000 train_time:146384ms step_avg:46.90ms +[2025-09-11 08:50:19] [Rank 0] step:3141/10000 train_time:147051ms step_avg:46.82ms +[2025-09-11 08:50:19] [Rank 0] step:3141/10000 train_time:147051ms step_avg:46.82ms +[2025-09-11 08:50:20] [Rank 0] step:3161/10000 train_time:147988ms step_avg:46.82ms +[2025-09-11 08:50:20] [Rank 0] step:3161/10000 train_time:147988ms step_avg:46.82ms +[2025-09-11 08:50:21] [Rank 0] step:3181/10000 train_time:148656ms step_avg:46.73ms +[2025-09-11 08:50:21] [Rank 0] step:3181/10000 train_time:148656ms step_avg:46.73ms +[2025-09-11 08:50:21] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:50:21] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:50:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:50:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:50:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:50:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:31] [Rank 0] PRINT: step:3200/10000 val_loss:4.6166 total_sharp:1.7246e-04 L1_sharp:7.9063e-04 L2_sharp:1.3080e-04 L3_sharp:4.1684e-05 L4_sharp:9.0986e-05 L5_sharp:1.4262e-04 L6_sharp:1.3014e-04 L7_sharp:1.3198e-04 L8_sharp:2.0939e-04 L9_sharp:2.1150e-04 L10_sharp:2.7030e-04 L11_sharp:3.7476e-04 L12_sharp:1.6426e-03 total_fnorm:7.8000e+01 total_l1_linf:2.1504e+05 total_spectral:4.0500e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.3125e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.3125e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5859e+00 L4_l1linf:1.5625e+00 L5_l1linf:1.6016e+00 L6_l1linf:1.6016e+00 L7_l1linf:1.6562e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5312e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.5000e+00 L1_spectral:7.6638e-02 L2_spectral:7.4598e-02 L3_spectral:7.4999e-02 L4_spectral:7.5985e-02 L5_spectral:7.6276e-02 L6_spectral:7.6638e-02 L7_spectral:7.6975e-02 L8_spectral:7.6579e-02 L9_spectral:7.7047e-02 L10_spectral:7.7304e-02 L11_spectral:7.7458e-02 L12_spectral:7.6941e-02 train_time:149305ms step_avg:46.66ms +[2025-09-11 08:50:31] [Rank 0] PRINT: step:3200/10000 val_loss:4.6166 total_sharp:1.7246e-04 L1_sharp:7.9063e-04 L2_sharp:1.3080e-04 L3_sharp:4.1684e-05 L4_sharp:9.0986e-05 L5_sharp:1.4262e-04 L6_sharp:1.3014e-04 L7_sharp:1.3198e-04 L8_sharp:2.0939e-04 L9_sharp:2.1150e-04 L10_sharp:2.7030e-04 L11_sharp:3.7476e-04 L12_sharp:1.6426e-03 total_fnorm:7.8000e+01 total_l1_linf:2.1504e+05 total_spectral:4.0500e+01 L1_fnorm:6.1562e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.3125e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.3125e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5859e+00 L4_l1linf:1.5625e+00 L5_l1linf:1.6016e+00 L6_l1linf:1.6016e+00 L7_l1linf:1.6562e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5312e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.5000e+00 L1_spectral:7.6638e-02 L2_spectral:7.4598e-02 L3_spectral:7.4999e-02 L4_spectral:7.5985e-02 L5_spectral:7.6276e-02 L6_spectral:7.6638e-02 L7_spectral:7.6975e-02 L8_spectral:7.6579e-02 L9_spectral:7.7047e-02 L10_spectral:7.7304e-02 L11_spectral:7.7458e-02 L12_spectral:7.6941e-02 train_time:149305ms step_avg:46.66ms +[2025-09-11 08:50:33] [Rank 0] step:3201/10000 train_time:151052ms step_avg:47.19ms +[2025-09-11 08:50:33] [Rank 0] step:3201/10000 train_time:151052ms step_avg:47.19ms +[2025-09-11 08:50:34] [Rank 0] step:3221/10000 train_time:151754ms step_avg:47.11ms +[2025-09-11 08:50:34] [Rank 0] step:3221/10000 train_time:151754ms step_avg:47.11ms +[2025-09-11 08:50:35] [Rank 0] step:3241/10000 train_time:152422ms step_avg:47.03ms +[2025-09-11 08:50:35] [Rank 0] step:3241/10000 train_time:152422ms step_avg:47.03ms +[2025-09-11 08:50:35] [Rank 0] step:3261/10000 train_time:153090ms step_avg:46.95ms +[2025-09-11 08:50:35] [Rank 0] step:3261/10000 train_time:153090ms step_avg:46.95ms +[2025-09-11 08:50:36] [Rank 0] step:3281/10000 train_time:153758ms step_avg:46.86ms +[2025-09-11 08:50:36] [Rank 0] step:3281/10000 train_time:153758ms step_avg:46.86ms +[2025-09-11 08:50:37] [Rank 0] step:3301/10000 train_time:154425ms step_avg:46.78ms +[2025-09-11 08:50:37] [Rank 0] step:3301/10000 train_time:154425ms step_avg:46.78ms +[2025-09-11 08:50:37] [Rank 0] step:3321/10000 train_time:155092ms step_avg:46.70ms +[2025-09-11 08:50:37] [Rank 0] step:3321/10000 train_time:155092ms step_avg:46.70ms +[2025-09-11 08:50:38] [Rank 0] step:3341/10000 train_time:155760ms step_avg:46.62ms +[2025-09-11 08:50:38] [Rank 0] step:3341/10000 train_time:155760ms step_avg:46.62ms +[2025-09-11 08:50:39] [Rank 0] step:3361/10000 train_time:156427ms step_avg:46.54ms +[2025-09-11 08:50:39] [Rank 0] step:3361/10000 train_time:156427ms step_avg:46.54ms +[2025-09-11 08:50:39] [Rank 0] step:3381/10000 train_time:157093ms step_avg:46.46ms +[2025-09-11 08:50:39] [Rank 0] step:3381/10000 train_time:157093ms step_avg:46.46ms +[2025-09-11 08:50:40] [Rank 0] step:3401/10000 train_time:157759ms step_avg:46.39ms +[2025-09-11 08:50:40] [Rank 0] step:3401/10000 train_time:157759ms step_avg:46.39ms +[2025-09-11 08:50:41] [Rank 0] step:3421/10000 train_time:158425ms step_avg:46.31ms +[2025-09-11 08:50:41] [Rank 0] step:3421/10000 train_time:158425ms step_avg:46.31ms +[2025-09-11 08:50:41] [Rank 0] step:3441/10000 train_time:159092ms step_avg:46.23ms +[2025-09-11 08:50:41] [Rank 0] step:3441/10000 train_time:159092ms step_avg:46.23ms +[2025-09-11 08:50:42] [Rank 0] step:3461/10000 train_time:159758ms step_avg:46.16ms +[2025-09-11 08:50:42] [Rank 0] step:3461/10000 train_time:159758ms step_avg:46.16ms +[2025-09-11 08:50:43] [Rank 0] step:3481/10000 train_time:160425ms step_avg:46.09ms +[2025-09-11 08:50:43] [Rank 0] step:3481/10000 train_time:160425ms step_avg:46.09ms +[2025-09-11 08:50:43] [Rank 0] step:3501/10000 train_time:161092ms step_avg:46.01ms +[2025-09-11 08:50:43] [Rank 0] step:3501/10000 train_time:161092ms step_avg:46.01ms +[2025-09-11 08:50:44] [Rank 0] step:3521/10000 train_time:161759ms step_avg:45.94ms +[2025-09-11 08:50:44] [Rank 0] step:3521/10000 train_time:161759ms step_avg:45.94ms +[2025-09-11 08:50:45] [Rank 0] step:3541/10000 train_time:162426ms step_avg:45.87ms +[2025-09-11 08:50:45] [Rank 0] step:3541/10000 train_time:162426ms step_avg:45.87ms +[2025-09-11 08:50:45] [Rank 0] step:3561/10000 train_time:163092ms step_avg:45.80ms +[2025-09-11 08:50:45] [Rank 0] step:3561/10000 train_time:163092ms step_avg:45.80ms +[2025-09-11 08:50:46] [Rank 0] step:3581/10000 train_time:163759ms step_avg:45.73ms +[2025-09-11 08:50:46] [Rank 0] step:3581/10000 train_time:163759ms step_avg:45.73ms +[2025-09-11 08:50:47] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:50:47] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:50:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:50:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:50:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:50:57] [Rank 0] PRINT: step:3600/10000 val_loss:4.5725 total_sharp:2.0413e-04 L1_sharp:7.0235e-04 L2_sharp:1.4036e-04 L3_sharp:1.0285e-06 L4_sharp:3.7333e-05 L5_sharp:9.4631e-05 L6_sharp:8.2353e-05 L7_sharp:1.0172e-04 L8_sharp:2.0008e-04 L9_sharp:2.1429e-04 L10_sharp:2.4197e-04 L11_sharp:3.6473e-04 L12_sharp:2.3409e-03 total_fnorm:7.1500e+01 total_l1_linf:1.9149e+05 total_spectral:3.6750e+01 L1_fnorm:6.1250e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.6094e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5234e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.4766e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6944e-02 L2_spectral:7.4887e-02 L3_spectral:7.5510e-02 L4_spectral:7.6288e-02 L5_spectral:7.7246e-02 L6_spectral:7.7333e-02 L7_spectral:7.7401e-02 L8_spectral:7.7165e-02 L9_spectral:7.7557e-02 L10_spectral:7.7738e-02 L11_spectral:7.7781e-02 L12_spectral:7.7130e-02 train_time:164407ms step_avg:45.67ms +[2025-09-11 08:50:57] [Rank 0] PRINT: step:3600/10000 val_loss:4.5725 total_sharp:2.0413e-04 L1_sharp:7.0235e-04 L2_sharp:1.4036e-04 L3_sharp:1.0285e-06 L4_sharp:3.7333e-05 L5_sharp:9.4631e-05 L6_sharp:8.2353e-05 L7_sharp:1.0172e-04 L8_sharp:2.0008e-04 L9_sharp:2.1429e-04 L10_sharp:2.4197e-04 L11_sharp:3.6473e-04 L12_sharp:2.3409e-03 total_fnorm:7.1500e+01 total_l1_linf:1.9149e+05 total_spectral:3.6750e+01 L1_fnorm:6.1250e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.2188e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5703e+00 L3_l1linf:1.5469e+00 L4_l1linf:1.5312e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.6094e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5234e+00 L10_l1linf:1.5078e+00 L11_l1linf:1.4766e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6944e-02 L2_spectral:7.4887e-02 L3_spectral:7.5510e-02 L4_spectral:7.6288e-02 L5_spectral:7.7246e-02 L6_spectral:7.7333e-02 L7_spectral:7.7401e-02 L8_spectral:7.7165e-02 L9_spectral:7.7557e-02 L10_spectral:7.7738e-02 L11_spectral:7.7781e-02 L12_spectral:7.7130e-02 train_time:164407ms step_avg:45.67ms +[2025-09-11 08:51:00] [Rank 0] step:3601/10000 train_time:166919ms step_avg:46.35ms +[2025-09-11 08:51:00] [Rank 0] step:3601/10000 train_time:166919ms step_avg:46.35ms +[2025-09-11 08:51:00] [Rank 0] step:3621/10000 train_time:167592ms step_avg:46.28ms +[2025-09-11 08:51:00] [Rank 0] step:3621/10000 train_time:167592ms step_avg:46.28ms +[2025-09-11 08:51:01] [Rank 0] step:3641/10000 train_time:168259ms step_avg:46.21ms +[2025-09-11 08:51:01] [Rank 0] step:3641/10000 train_time:168259ms step_avg:46.21ms +[2025-09-11 08:51:02] [Rank 0] step:3661/10000 train_time:168927ms step_avg:46.14ms +[2025-09-11 08:51:02] [Rank 0] step:3661/10000 train_time:168927ms step_avg:46.14ms +[2025-09-11 08:51:02] [Rank 0] step:3681/10000 train_time:169594ms step_avg:46.07ms +[2025-09-11 08:51:02] [Rank 0] step:3681/10000 train_time:169594ms step_avg:46.07ms +[2025-09-11 08:51:03] [Rank 0] step:3701/10000 train_time:170261ms step_avg:46.00ms +[2025-09-11 08:51:03] [Rank 0] step:3701/10000 train_time:170261ms step_avg:46.00ms +[2025-09-11 08:51:04] [Rank 0] step:3721/10000 train_time:170938ms step_avg:45.94ms +[2025-09-11 08:51:04] [Rank 0] step:3721/10000 train_time:170938ms step_avg:45.94ms +[2025-09-11 08:51:04] [Rank 0] step:3741/10000 train_time:171617ms step_avg:45.87ms +[2025-09-11 08:51:04] [Rank 0] step:3741/10000 train_time:171617ms step_avg:45.87ms +[2025-09-11 08:51:05] [Rank 0] step:3761/10000 train_time:172295ms step_avg:45.81ms +[2025-09-11 08:51:05] [Rank 0] step:3761/10000 train_time:172295ms step_avg:45.81ms +[2025-09-11 08:51:06] [Rank 0] step:3781/10000 train_time:172973ms step_avg:45.75ms +[2025-09-11 08:51:06] [Rank 0] step:3781/10000 train_time:172973ms step_avg:45.75ms +[2025-09-11 08:51:06] [Rank 0] step:3801/10000 train_time:173651ms step_avg:45.69ms +[2025-09-11 08:51:06] [Rank 0] step:3801/10000 train_time:173651ms step_avg:45.69ms +[2025-09-11 08:51:07] [Rank 0] step:3821/10000 train_time:174330ms step_avg:45.62ms +[2025-09-11 08:51:07] [Rank 0] step:3821/10000 train_time:174330ms step_avg:45.62ms +[2025-09-11 08:51:08] [Rank 0] step:3841/10000 train_time:175008ms step_avg:45.56ms +[2025-09-11 08:51:08] [Rank 0] step:3841/10000 train_time:175008ms step_avg:45.56ms +[2025-09-11 08:51:09] [Rank 0] step:3861/10000 train_time:175685ms step_avg:45.50ms +[2025-09-11 08:51:09] [Rank 0] step:3861/10000 train_time:175685ms step_avg:45.50ms +[2025-09-11 08:51:09] [Rank 0] step:3881/10000 train_time:176362ms step_avg:45.44ms +[2025-09-11 08:51:09] [Rank 0] step:3881/10000 train_time:176362ms step_avg:45.44ms +[2025-09-11 08:51:10] [Rank 0] step:3901/10000 train_time:177039ms step_avg:45.38ms +[2025-09-11 08:51:10] [Rank 0] step:3901/10000 train_time:177039ms step_avg:45.38ms +[2025-09-11 08:51:11] [Rank 0] step:3921/10000 train_time:177716ms step_avg:45.32ms +[2025-09-11 08:51:11] [Rank 0] step:3921/10000 train_time:177716ms step_avg:45.32ms +[2025-09-11 08:51:11] [Rank 0] step:3941/10000 train_time:178394ms step_avg:45.27ms +[2025-09-11 08:51:11] [Rank 0] step:3941/10000 train_time:178394ms step_avg:45.27ms +[2025-09-11 08:51:12] [Rank 0] step:3961/10000 train_time:179072ms step_avg:45.21ms +[2025-09-11 08:51:12] [Rank 0] step:3961/10000 train_time:179072ms step_avg:45.21ms +[2025-09-11 08:51:13] [Rank 0] step:3981/10000 train_time:179749ms step_avg:45.15ms +[2025-09-11 08:51:13] [Rank 0] step:3981/10000 train_time:179749ms step_avg:45.15ms +[2025-09-11 08:51:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:51:13] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:51:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:51:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:51:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.5205 total_sharp:2.0988e-04 L1_sharp:8.6360e-04 L2_sharp:1.1189e-04 L3_sharp:2.9231e-05 L4_sharp:1.1128e-04 L5_sharp:7.2004e-05 L6_sharp:7.8260e-05 L7_sharp:9.6591e-05 L8_sharp:2.3622e-04 L9_sharp:2.1599e-04 L10_sharp:2.8447e-04 L11_sharp:4.2624e-04 L12_sharp:3.2338e-03 total_fnorm:8.0000e+01 total_l1_linf:2.0992e+05 total_spectral:4.1250e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.4453e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7449e-02 L2_spectral:7.6145e-02 L3_spectral:7.5834e-02 L4_spectral:7.6183e-02 L5_spectral:7.7074e-02 L6_spectral:7.7655e-02 L7_spectral:7.7712e-02 L8_spectral:7.6577e-02 L9_spectral:7.8094e-02 L10_spectral:7.7822e-02 L11_spectral:7.7908e-02 L12_spectral:7.6833e-02 train_time:180407ms step_avg:45.10ms +[2025-09-11 08:51:23] [Rank 0] PRINT: step:4000/10000 val_loss:4.5205 total_sharp:2.0988e-04 L1_sharp:8.6360e-04 L2_sharp:1.1189e-04 L3_sharp:2.9231e-05 L4_sharp:1.1128e-04 L5_sharp:7.2004e-05 L6_sharp:7.8260e-05 L7_sharp:9.6591e-05 L8_sharp:2.3622e-04 L9_sharp:2.1599e-04 L10_sharp:2.8447e-04 L11_sharp:4.2624e-04 L12_sharp:3.2338e-03 total_fnorm:8.0000e+01 total_l1_linf:2.0992e+05 total_spectral:4.1250e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6875e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.5156e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.5391e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4844e+00 L11_l1linf:1.4453e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7449e-02 L2_spectral:7.6145e-02 L3_spectral:7.5834e-02 L4_spectral:7.6183e-02 L5_spectral:7.7074e-02 L6_spectral:7.7655e-02 L7_spectral:7.7712e-02 L8_spectral:7.6577e-02 L9_spectral:7.8094e-02 L10_spectral:7.7822e-02 L11_spectral:7.7908e-02 L12_spectral:7.6833e-02 train_time:180407ms step_avg:45.10ms +[2025-09-11 08:51:25] [Rank 0] step:4001/10000 train_time:181624ms step_avg:45.39ms +[2025-09-11 08:51:25] [Rank 0] step:4001/10000 train_time:181624ms step_avg:45.39ms +[2025-09-11 08:51:25] [Rank 0] step:4021/10000 train_time:182325ms step_avg:45.34ms +[2025-09-11 08:51:25] [Rank 0] step:4021/10000 train_time:182325ms step_avg:45.34ms +[2025-09-11 08:51:26] [Rank 0] step:4041/10000 train_time:183005ms step_avg:45.29ms +[2025-09-11 08:51:26] [Rank 0] step:4041/10000 train_time:183005ms step_avg:45.29ms +[2025-09-11 08:51:27] [Rank 0] step:4061/10000 train_time:183682ms step_avg:45.23ms +[2025-09-11 08:51:27] [Rank 0] step:4061/10000 train_time:183682ms step_avg:45.23ms +[2025-09-11 08:51:27] [Rank 0] step:4081/10000 train_time:184382ms step_avg:45.18ms +[2025-09-11 08:51:27] [Rank 0] step:4081/10000 train_time:184382ms step_avg:45.18ms +[2025-09-11 08:51:28] [Rank 0] step:4101/10000 train_time:185060ms step_avg:45.13ms +[2025-09-11 08:51:28] [Rank 0] step:4101/10000 train_time:185060ms step_avg:45.13ms +[2025-09-11 08:51:29] [Rank 0] step:4121/10000 train_time:185738ms step_avg:45.07ms +[2025-09-11 08:51:29] [Rank 0] step:4121/10000 train_time:185738ms step_avg:45.07ms +[2025-09-11 08:51:30] [Rank 0] step:4141/10000 train_time:186425ms step_avg:45.02ms +[2025-09-11 08:51:30] [Rank 0] step:4141/10000 train_time:186425ms step_avg:45.02ms +[2025-09-11 08:51:30] [Rank 0] step:4161/10000 train_time:187103ms step_avg:44.97ms +[2025-09-11 08:51:30] [Rank 0] step:4161/10000 train_time:187103ms step_avg:44.97ms +[2025-09-11 08:51:31] [Rank 0] step:4181/10000 train_time:187800ms step_avg:44.92ms +[2025-09-11 08:51:31] [Rank 0] step:4181/10000 train_time:187800ms step_avg:44.92ms +[2025-09-11 08:51:32] [Rank 0] step:4201/10000 train_time:188490ms step_avg:44.87ms +[2025-09-11 08:51:32] [Rank 0] step:4201/10000 train_time:188490ms step_avg:44.87ms +[2025-09-11 08:51:32] [Rank 0] step:4221/10000 train_time:189167ms step_avg:44.82ms +[2025-09-11 08:51:32] [Rank 0] step:4221/10000 train_time:189167ms step_avg:44.82ms +[2025-09-11 08:51:33] [Rank 0] step:4241/10000 train_time:189844ms step_avg:44.76ms +[2025-09-11 08:51:33] [Rank 0] step:4241/10000 train_time:189844ms step_avg:44.76ms +[2025-09-11 08:51:34] [Rank 0] step:4261/10000 train_time:190534ms step_avg:44.72ms +[2025-09-11 08:51:34] [Rank 0] step:4261/10000 train_time:190534ms step_avg:44.72ms +[2025-09-11 08:51:34] [Rank 0] step:4281/10000 train_time:191213ms step_avg:44.67ms +[2025-09-11 08:51:34] [Rank 0] step:4281/10000 train_time:191213ms step_avg:44.67ms +[2025-09-11 08:51:35] [Rank 0] step:4301/10000 train_time:191891ms step_avg:44.62ms +[2025-09-11 08:51:35] [Rank 0] step:4301/10000 train_time:191891ms step_avg:44.62ms +[2025-09-11 08:51:36] [Rank 0] step:4321/10000 train_time:192596ms step_avg:44.57ms +[2025-09-11 08:51:36] [Rank 0] step:4321/10000 train_time:192596ms step_avg:44.57ms +[2025-09-11 08:51:36] [Rank 0] step:4341/10000 train_time:193273ms step_avg:44.52ms +[2025-09-11 08:51:36] [Rank 0] step:4341/10000 train_time:193273ms step_avg:44.52ms +[2025-09-11 08:51:37] [Rank 0] step:4361/10000 train_time:193950ms step_avg:44.47ms +[2025-09-11 08:51:37] [Rank 0] step:4361/10000 train_time:193950ms step_avg:44.47ms +[2025-09-11 08:51:38] [Rank 0] step:4381/10000 train_time:194632ms step_avg:44.43ms +[2025-09-11 08:51:38] [Rank 0] step:4381/10000 train_time:194632ms step_avg:44.43ms +[2025-09-11 08:51:38] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:51:38] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:51:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:51:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:51:49] [Rank 0] PRINT: step:4400/10000 val_loss:4.4966 total_sharp:1.6986e-04 L1_sharp:8.2665e-04 L2_sharp:8.0310e-05 L3_sharp:3.0196e-05 L4_sharp:3.3838e-05 L5_sharp:8.1925e-05 L6_sharp:8.9387e-05 L7_sharp:8.5194e-05 L8_sharp:2.4145e-04 L9_sharp:2.3635e-04 L10_sharp:2.2897e-04 L11_sharp:3.2772e-04 L12_sharp:2.5383e-03 total_fnorm:7.4500e+01 total_l1_linf:1.9251e+05 total_spectral:3.8250e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7557e-02 L2_spectral:7.5870e-02 L3_spectral:7.6547e-02 L4_spectral:7.6816e-02 L5_spectral:7.7902e-02 L6_spectral:7.8391e-02 L7_spectral:7.8647e-02 L8_spectral:7.6906e-02 L9_spectral:7.8360e-02 L10_spectral:7.8223e-02 L11_spectral:7.8769e-02 L12_spectral:7.7726e-02 train_time:195291ms step_avg:44.38ms +[2025-09-11 08:51:49] [Rank 0] PRINT: step:4400/10000 val_loss:4.4966 total_sharp:1.6986e-04 L1_sharp:8.2665e-04 L2_sharp:8.0310e-05 L3_sharp:3.0196e-05 L4_sharp:3.3838e-05 L5_sharp:8.1925e-05 L6_sharp:8.9387e-05 L7_sharp:8.5194e-05 L8_sharp:2.4145e-04 L9_sharp:2.3635e-04 L10_sharp:2.2897e-04 L11_sharp:3.2772e-04 L12_sharp:2.5383e-03 total_fnorm:7.4500e+01 total_l1_linf:1.9251e+05 total_spectral:3.8250e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2812e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6641e+00 L2_l1linf:1.5078e+00 L3_l1linf:1.5078e+00 L4_l1linf:1.5156e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4688e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.4922e+00 L1_spectral:7.7557e-02 L2_spectral:7.5870e-02 L3_spectral:7.6547e-02 L4_spectral:7.6816e-02 L5_spectral:7.7902e-02 L6_spectral:7.8391e-02 L7_spectral:7.8647e-02 L8_spectral:7.6906e-02 L9_spectral:7.8360e-02 L10_spectral:7.8223e-02 L11_spectral:7.8769e-02 L12_spectral:7.7726e-02 train_time:195291ms step_avg:44.38ms +[2025-09-11 08:51:50] [Rank 0] step:4401/10000 train_time:196513ms step_avg:44.65ms +[2025-09-11 08:51:50] [Rank 0] step:4401/10000 train_time:196513ms step_avg:44.65ms +[2025-09-11 08:51:51] [Rank 0] step:4421/10000 train_time:197236ms step_avg:44.61ms +[2025-09-11 08:51:51] [Rank 0] step:4421/10000 train_time:197236ms step_avg:44.61ms +[2025-09-11 08:51:51] [Rank 0] step:4441/10000 train_time:197915ms step_avg:44.57ms +[2025-09-11 08:51:51] [Rank 0] step:4441/10000 train_time:197915ms step_avg:44.57ms +[2025-09-11 08:51:52] [Rank 0] step:4461/10000 train_time:198595ms step_avg:44.52ms +[2025-09-11 08:51:52] [Rank 0] step:4461/10000 train_time:198595ms step_avg:44.52ms +[2025-09-11 08:51:53] [Rank 0] step:4481/10000 train_time:199275ms step_avg:44.47ms +[2025-09-11 08:51:53] [Rank 0] step:4481/10000 train_time:199275ms step_avg:44.47ms +[2025-09-11 08:51:53] [Rank 0] step:4501/10000 train_time:199955ms step_avg:44.42ms +[2025-09-11 08:51:53] [Rank 0] step:4501/10000 train_time:199955ms step_avg:44.42ms +[2025-09-11 08:51:54] [Rank 0] step:4521/10000 train_time:200636ms step_avg:44.38ms +[2025-09-11 08:51:54] [Rank 0] step:4521/10000 train_time:200636ms step_avg:44.38ms +[2025-09-11 08:51:55] [Rank 0] step:4541/10000 train_time:201319ms step_avg:44.33ms +[2025-09-11 08:51:55] [Rank 0] step:4541/10000 train_time:201319ms step_avg:44.33ms +[2025-09-11 08:51:55] [Rank 0] step:4561/10000 train_time:201998ms step_avg:44.29ms +[2025-09-11 08:51:55] [Rank 0] step:4561/10000 train_time:201998ms step_avg:44.29ms +[2025-09-11 08:51:56] [Rank 0] step:4581/10000 train_time:202679ms step_avg:44.24ms +[2025-09-11 08:51:56] [Rank 0] step:4581/10000 train_time:202679ms step_avg:44.24ms +[2025-09-11 08:51:57] [Rank 0] step:4601/10000 train_time:203359ms step_avg:44.20ms +[2025-09-11 08:51:57] [Rank 0] step:4601/10000 train_time:203359ms step_avg:44.20ms +[2025-09-11 08:51:57] [Rank 0] step:4621/10000 train_time:204039ms step_avg:44.15ms +[2025-09-11 08:51:57] [Rank 0] step:4621/10000 train_time:204039ms step_avg:44.15ms +[2025-09-11 08:51:58] [Rank 0] step:4641/10000 train_time:204719ms step_avg:44.11ms +[2025-09-11 08:51:58] [Rank 0] step:4641/10000 train_time:204719ms step_avg:44.11ms +[2025-09-11 08:51:59] [Rank 0] step:4661/10000 train_time:205400ms step_avg:44.07ms +[2025-09-11 08:51:59] [Rank 0] step:4661/10000 train_time:205400ms step_avg:44.07ms +[2025-09-11 08:51:59] [Rank 0] step:4681/10000 train_time:206080ms step_avg:44.02ms +[2025-09-11 08:51:59] [Rank 0] step:4681/10000 train_time:206080ms step_avg:44.02ms +[2025-09-11 08:52:00] [Rank 0] step:4701/10000 train_time:206759ms step_avg:43.98ms +[2025-09-11 08:52:00] [Rank 0] step:4701/10000 train_time:206759ms step_avg:43.98ms +[2025-09-11 08:52:01] [Rank 0] step:4721/10000 train_time:207440ms step_avg:43.94ms +[2025-09-11 08:52:01] [Rank 0] step:4721/10000 train_time:207440ms step_avg:43.94ms +[2025-09-11 08:52:01] [Rank 0] step:4741/10000 train_time:208120ms step_avg:43.90ms +[2025-09-11 08:52:01] [Rank 0] step:4741/10000 train_time:208120ms step_avg:43.90ms +[2025-09-11 08:52:02] [Rank 0] step:4761/10000 train_time:208803ms step_avg:43.86ms +[2025-09-11 08:52:02] [Rank 0] step:4761/10000 train_time:208803ms step_avg:43.86ms +[2025-09-11 08:52:03] [Rank 0] step:4781/10000 train_time:209482ms step_avg:43.82ms +[2025-09-11 08:52:03] [Rank 0] step:4781/10000 train_time:209482ms step_avg:43.82ms +[2025-09-11 08:52:03] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:52:03] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:52:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:52:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:52:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:52:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:52:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:52:13] [Rank 0] PRINT: step:4800/10000 val_loss:4.4456 total_sharp:1.4675e-04 L1_sharp:3.8221e-04 L2_sharp:3.4434e-05 L3_sharp:-1.7717e-05 L4_sharp:2.0732e-05 L5_sharp:6.7884e-05 L6_sharp:4.2073e-05 L7_sharp:6.0764e-05 L8_sharp:1.8229e-04 L9_sharp:2.0133e-04 L10_sharp:2.4557e-04 L11_sharp:3.2486e-04 L12_sharp:2.9414e-03 total_fnorm:7.8000e+01 total_l1_linf:2.0582e+05 total_spectral:4.0000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.6094e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5547e+00 L9_l1linf:1.5000e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.4375e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8279e-02 L2_spectral:7.6254e-02 L3_spectral:7.6926e-02 L4_spectral:7.7163e-02 L5_spectral:7.7979e-02 L6_spectral:7.8950e-02 L7_spectral:7.8972e-02 L8_spectral:7.7683e-02 L9_spectral:7.8736e-02 L10_spectral:7.8148e-02 L11_spectral:7.8594e-02 L12_spectral:7.8418e-02 train_time:210143ms step_avg:43.78ms +[2025-09-11 08:52:13] [Rank 0] PRINT: step:4800/10000 val_loss:4.4456 total_sharp:1.4675e-04 L1_sharp:3.8221e-04 L2_sharp:3.4434e-05 L3_sharp:-1.7717e-05 L4_sharp:2.0732e-05 L5_sharp:6.7884e-05 L6_sharp:4.2073e-05 L7_sharp:6.0764e-05 L8_sharp:1.8229e-04 L9_sharp:2.0133e-04 L10_sharp:2.4557e-04 L11_sharp:3.2486e-04 L12_sharp:2.9414e-03 total_fnorm:7.8000e+01 total_l1_linf:2.0582e+05 total_spectral:4.0000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.5156e+00 L3_l1linf:1.4766e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.6094e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5547e+00 L9_l1linf:1.5000e+00 L10_l1linf:1.4609e+00 L11_l1linf:1.4375e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8279e-02 L2_spectral:7.6254e-02 L3_spectral:7.6926e-02 L4_spectral:7.7163e-02 L5_spectral:7.7979e-02 L6_spectral:7.8950e-02 L7_spectral:7.8972e-02 L8_spectral:7.7683e-02 L9_spectral:7.8736e-02 L10_spectral:7.8148e-02 L11_spectral:7.8594e-02 L12_spectral:7.8418e-02 train_time:210143ms step_avg:43.78ms +[2025-09-11 08:52:14] [Rank 0] step:4801/10000 train_time:211320ms step_avg:44.02ms +[2025-09-11 08:52:14] [Rank 0] step:4801/10000 train_time:211320ms step_avg:44.02ms +[2025-09-11 08:52:15] [Rank 0] step:4821/10000 train_time:212036ms step_avg:43.98ms +[2025-09-11 08:52:15] [Rank 0] step:4821/10000 train_time:212036ms step_avg:43.98ms +[2025-09-11 08:52:16] [Rank 0] step:4841/10000 train_time:212718ms step_avg:43.94ms +[2025-09-11 08:52:16] [Rank 0] step:4841/10000 train_time:212718ms step_avg:43.94ms +[2025-09-11 08:52:17] [Rank 0] step:4861/10000 train_time:213400ms step_avg:43.90ms +[2025-09-11 08:52:17] [Rank 0] step:4861/10000 train_time:213400ms step_avg:43.90ms +[2025-09-11 08:52:17] [Rank 0] step:4881/10000 train_time:214082ms step_avg:43.86ms +[2025-09-11 08:52:17] [Rank 0] step:4881/10000 train_time:214082ms step_avg:43.86ms +[2025-09-11 08:52:18] [Rank 0] step:4901/10000 train_time:214764ms step_avg:43.82ms +[2025-09-11 08:52:18] [Rank 0] step:4901/10000 train_time:214764ms step_avg:43.82ms +[2025-09-11 08:52:19] [Rank 0] step:4921/10000 train_time:215446ms step_avg:43.78ms +[2025-09-11 08:52:19] [Rank 0] step:4921/10000 train_time:215446ms step_avg:43.78ms +[2025-09-11 08:52:19] [Rank 0] step:4941/10000 train_time:216130ms step_avg:43.74ms +[2025-09-11 08:52:19] [Rank 0] step:4941/10000 train_time:216130ms step_avg:43.74ms +[2025-09-11 08:52:20] [Rank 0] step:4961/10000 train_time:216811ms step_avg:43.70ms +[2025-09-11 08:52:20] [Rank 0] step:4961/10000 train_time:216811ms step_avg:43.70ms +[2025-09-11 08:52:21] [Rank 0] step:4981/10000 train_time:217493ms step_avg:43.66ms +[2025-09-11 08:52:21] [Rank 0] step:4981/10000 train_time:217493ms step_avg:43.66ms +[2025-09-11 08:52:21] [Rank 0] step:5001/10000 train_time:218176ms step_avg:43.63ms +[2025-09-11 08:52:21] [Rank 0] step:5001/10000 train_time:218176ms step_avg:43.63ms +[2025-09-11 08:52:22] [Rank 0] step:5021/10000 train_time:218857ms step_avg:43.59ms +[2025-09-11 08:52:22] [Rank 0] step:5021/10000 train_time:218857ms step_avg:43.59ms +[2025-09-11 08:52:23] [Rank 0] step:5041/10000 train_time:219539ms step_avg:43.55ms +[2025-09-11 08:52:23] [Rank 0] step:5041/10000 train_time:219539ms step_avg:43.55ms +[2025-09-11 08:52:24] [Rank 0] step:5061/10000 train_time:220514ms step_avg:43.57ms +[2025-09-11 08:52:24] [Rank 0] step:5061/10000 train_time:220514ms step_avg:43.57ms +[2025-09-11 08:52:25] [Rank 0] step:5081/10000 train_time:221475ms step_avg:43.59ms +[2025-09-11 08:52:25] [Rank 0] step:5081/10000 train_time:221475ms step_avg:43.59ms +[2025-09-11 08:52:25] [Rank 0] step:5101/10000 train_time:222157ms step_avg:43.55ms +[2025-09-11 08:52:25] [Rank 0] step:5101/10000 train_time:222157ms step_avg:43.55ms +[2025-09-11 08:52:26] [Rank 0] step:5121/10000 train_time:223147ms step_avg:43.57ms +[2025-09-11 08:52:26] [Rank 0] step:5121/10000 train_time:223147ms step_avg:43.57ms +[2025-09-11 08:52:27] [Rank 0] step:5141/10000 train_time:223831ms step_avg:43.54ms +[2025-09-11 08:52:27] [Rank 0] step:5141/10000 train_time:223831ms step_avg:43.54ms +[2025-09-11 08:52:28] [Rank 0] step:5161/10000 train_time:224512ms step_avg:43.50ms +[2025-09-11 08:52:28] [Rank 0] step:5161/10000 train_time:224512ms step_avg:43.50ms +[2025-09-11 08:52:28] [Rank 0] step:5181/10000 train_time:225193ms step_avg:43.47ms +[2025-09-11 08:52:28] [Rank 0] step:5181/10000 train_time:225193ms step_avg:43.47ms +[2025-09-11 08:52:29] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:52:29] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:52:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:52:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:52:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:52:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:52:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:52:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:52:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:52:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:52:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:52:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:52:42] [Rank 0] PRINT: step:5200/10000 val_loss:4.4186 total_sharp:2.2211e-04 L1_sharp:5.1851e-04 L2_sharp:-1.6991e-05 L3_sharp:6.0566e-05 L4_sharp:3.6141e-05 L5_sharp:1.1865e-04 L6_sharp:7.7234e-05 L7_sharp:6.8243e-05 L8_sharp:1.9010e-04 L9_sharp:1.9270e-04 L10_sharp:2.1501e-04 L11_sharp:3.7000e-04 L12_sharp:5.8127e-03 total_fnorm:7.1500e+01 total_l1_linf:1.8227e+05 total_spectral:3.6500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6016e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.4453e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5469e+00 L9_l1linf:1.5625e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3984e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8055e-02 L2_spectral:7.6384e-02 L3_spectral:7.7341e-02 L4_spectral:7.8155e-02 L5_spectral:7.8632e-02 L6_spectral:7.8970e-02 L7_spectral:7.9096e-02 L8_spectral:7.7337e-02 L9_spectral:7.8859e-02 L10_spectral:7.9035e-02 L11_spectral:7.9085e-02 L12_spectral:7.8304e-02 train_time:225861ms step_avg:43.43ms +[2025-09-11 08:52:42] [Rank 0] PRINT: step:5200/10000 val_loss:4.4186 total_sharp:2.2211e-04 L1_sharp:5.1851e-04 L2_sharp:-1.6991e-05 L3_sharp:6.0566e-05 L4_sharp:3.6141e-05 L5_sharp:1.1865e-04 L6_sharp:7.7234e-05 L7_sharp:6.8243e-05 L8_sharp:1.9010e-04 L9_sharp:1.9270e-04 L10_sharp:2.1501e-04 L11_sharp:3.7000e-04 L12_sharp:5.8127e-03 total_fnorm:7.1500e+01 total_l1_linf:1.8227e+05 total_spectral:3.6500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.2812e+00 L8_fnorm:6.0625e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.6016e+00 L2_l1linf:1.4688e+00 L3_l1linf:1.4453e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5469e+00 L9_l1linf:1.5625e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3984e+00 L12_l1linf:1.4688e+00 L1_spectral:7.8055e-02 L2_spectral:7.6384e-02 L3_spectral:7.7341e-02 L4_spectral:7.8155e-02 L5_spectral:7.8632e-02 L6_spectral:7.8970e-02 L7_spectral:7.9096e-02 L8_spectral:7.7337e-02 L9_spectral:7.8859e-02 L10_spectral:7.9035e-02 L11_spectral:7.9085e-02 L12_spectral:7.8304e-02 train_time:225861ms step_avg:43.43ms +[2025-09-11 08:52:43] [Rank 0] step:5201/10000 train_time:227078ms step_avg:43.66ms +[2025-09-11 08:52:43] [Rank 0] step:5201/10000 train_time:227078ms step_avg:43.66ms +[2025-09-11 08:52:44] [Rank 0] step:5221/10000 train_time:227775ms step_avg:43.63ms +[2025-09-11 08:52:44] [Rank 0] step:5221/10000 train_time:227775ms step_avg:43.63ms +[2025-09-11 08:52:45] [Rank 0] step:5241/10000 train_time:228465ms step_avg:43.59ms +[2025-09-11 08:52:45] [Rank 0] step:5241/10000 train_time:228465ms step_avg:43.59ms +[2025-09-11 08:52:46] [Rank 0] step:5261/10000 train_time:229158ms step_avg:43.56ms +[2025-09-11 08:52:46] [Rank 0] step:5261/10000 train_time:229158ms step_avg:43.56ms +[2025-09-11 08:52:46] [Rank 0] step:5281/10000 train_time:229848ms step_avg:43.52ms +[2025-09-11 08:52:46] [Rank 0] step:5281/10000 train_time:229848ms step_avg:43.52ms +[2025-09-11 08:52:47] [Rank 0] step:5301/10000 train_time:230539ms step_avg:43.49ms +[2025-09-11 08:52:47] [Rank 0] step:5301/10000 train_time:230539ms step_avg:43.49ms +[2025-09-11 08:52:48] [Rank 0] step:5321/10000 train_time:231229ms step_avg:43.46ms +[2025-09-11 08:52:48] [Rank 0] step:5321/10000 train_time:231229ms step_avg:43.46ms +[2025-09-11 08:52:48] [Rank 0] step:5341/10000 train_time:231919ms step_avg:43.42ms +[2025-09-11 08:52:48] [Rank 0] step:5341/10000 train_time:231919ms step_avg:43.42ms +[2025-09-11 08:52:49] [Rank 0] step:5361/10000 train_time:232612ms step_avg:43.39ms +[2025-09-11 08:52:49] [Rank 0] step:5361/10000 train_time:232612ms step_avg:43.39ms +[2025-09-11 08:52:50] [Rank 0] step:5381/10000 train_time:233304ms step_avg:43.36ms +[2025-09-11 08:52:50] [Rank 0] step:5381/10000 train_time:233304ms step_avg:43.36ms +[2025-09-11 08:52:50] [Rank 0] step:5401/10000 train_time:233994ms step_avg:43.32ms +[2025-09-11 08:52:50] [Rank 0] step:5401/10000 train_time:233994ms step_avg:43.32ms +[2025-09-11 08:52:51] [Rank 0] step:5421/10000 train_time:234687ms step_avg:43.29ms +[2025-09-11 08:52:51] [Rank 0] step:5421/10000 train_time:234687ms step_avg:43.29ms +[2025-09-11 08:52:52] [Rank 0] step:5441/10000 train_time:235377ms step_avg:43.26ms +[2025-09-11 08:52:52] [Rank 0] step:5441/10000 train_time:235377ms step_avg:43.26ms +[2025-09-11 08:52:52] [Rank 0] step:5461/10000 train_time:236068ms step_avg:43.23ms +[2025-09-11 08:52:52] [Rank 0] step:5461/10000 train_time:236068ms step_avg:43.23ms +[2025-09-11 08:52:53] [Rank 0] step:5481/10000 train_time:236759ms step_avg:43.20ms +[2025-09-11 08:52:53] [Rank 0] step:5481/10000 train_time:236759ms step_avg:43.20ms +[2025-09-11 08:52:54] [Rank 0] step:5501/10000 train_time:237449ms step_avg:43.16ms +[2025-09-11 08:52:54] [Rank 0] step:5501/10000 train_time:237449ms step_avg:43.16ms +[2025-09-11 08:52:55] [Rank 0] step:5521/10000 train_time:238140ms step_avg:43.13ms +[2025-09-11 08:52:55] [Rank 0] step:5521/10000 train_time:238140ms step_avg:43.13ms +[2025-09-11 08:52:55] [Rank 0] step:5541/10000 train_time:238854ms step_avg:43.11ms +[2025-09-11 08:52:55] [Rank 0] step:5541/10000 train_time:238854ms step_avg:43.11ms +[2025-09-11 08:52:56] [Rank 0] step:5561/10000 train_time:239546ms step_avg:43.08ms +[2025-09-11 08:52:56] [Rank 0] step:5561/10000 train_time:239546ms step_avg:43.08ms +[2025-09-11 08:52:57] [Rank 0] step:5581/10000 train_time:240237ms step_avg:43.05ms +[2025-09-11 08:52:57] [Rank 0] step:5581/10000 train_time:240237ms step_avg:43.05ms +[2025-09-11 08:52:57] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:52:57] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:52:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:53:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:53:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:53:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:53:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:53:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:53:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:07] [Rank 0] PRINT: step:5600/10000 val_loss:4.4040 total_sharp:1.4117e-04 L1_sharp:4.5939e-04 L2_sharp:7.5656e-06 L3_sharp:4.2179e-05 L4_sharp:3.5503e-05 L5_sharp:7.5250e-05 L6_sharp:5.7021e-05 L7_sharp:5.7321e-05 L8_sharp:1.4632e-04 L9_sharp:1.9088e-04 L10_sharp:2.0473e-04 L11_sharp:2.5948e-04 L12_sharp:1.9779e-03 total_fnorm:7.1500e+01 total_l1_linf:1.8637e+05 total_spectral:3.7250e+01 L1_fnorm:6.0312e+00 L2_fnorm:6.0000e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.4062e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5703e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4297e+00 L11_l1linf:1.3906e+00 L12_l1linf:1.4062e+00 L1_spectral:7.8372e-02 L2_spectral:7.6928e-02 L3_spectral:7.7889e-02 L4_spectral:7.8311e-02 L5_spectral:7.8719e-02 L6_spectral:7.8967e-02 L7_spectral:7.9926e-02 L8_spectral:7.7299e-02 L9_spectral:7.8957e-02 L10_spectral:7.9383e-02 L11_spectral:7.9174e-02 L12_spectral:7.8920e-02 train_time:240909ms step_avg:43.02ms +[2025-09-11 08:53:07] [Rank 0] PRINT: step:5600/10000 val_loss:4.4040 total_sharp:1.4117e-04 L1_sharp:4.5939e-04 L2_sharp:7.5656e-06 L3_sharp:4.2179e-05 L4_sharp:3.5503e-05 L5_sharp:7.5250e-05 L6_sharp:5.7021e-05 L7_sharp:5.7321e-05 L8_sharp:1.4632e-04 L9_sharp:1.9088e-04 L10_sharp:2.0473e-04 L11_sharp:2.5948e-04 L12_sharp:1.9779e-03 total_fnorm:7.1500e+01 total_l1_linf:1.8637e+05 total_spectral:3.7250e+01 L1_fnorm:6.0312e+00 L2_fnorm:6.0000e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:5.9688e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.4062e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5703e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.4688e+00 L10_l1linf:1.4297e+00 L11_l1linf:1.3906e+00 L12_l1linf:1.4062e+00 L1_spectral:7.8372e-02 L2_spectral:7.6928e-02 L3_spectral:7.7889e-02 L4_spectral:7.8311e-02 L5_spectral:7.8719e-02 L6_spectral:7.8967e-02 L7_spectral:7.9926e-02 L8_spectral:7.7299e-02 L9_spectral:7.8957e-02 L10_spectral:7.9383e-02 L11_spectral:7.9174e-02 L12_spectral:7.8920e-02 train_time:240909ms step_avg:43.02ms +[2025-09-11 08:53:08] [Rank 0] step:5601/10000 train_time:242098ms step_avg:43.22ms +[2025-09-11 08:53:08] [Rank 0] step:5601/10000 train_time:242098ms step_avg:43.22ms +[2025-09-11 08:53:09] [Rank 0] step:5621/10000 train_time:242817ms step_avg:43.20ms +[2025-09-11 08:53:09] [Rank 0] step:5621/10000 train_time:242817ms step_avg:43.20ms +[2025-09-11 08:53:10] [Rank 0] step:5641/10000 train_time:243506ms step_avg:43.17ms +[2025-09-11 08:53:10] [Rank 0] step:5641/10000 train_time:243506ms step_avg:43.17ms +[2025-09-11 08:53:10] [Rank 0] step:5661/10000 train_time:244201ms step_avg:43.14ms +[2025-09-11 08:53:10] [Rank 0] step:5661/10000 train_time:244201ms step_avg:43.14ms +[2025-09-11 08:53:11] [Rank 0] step:5681/10000 train_time:244892ms step_avg:43.11ms +[2025-09-11 08:53:11] [Rank 0] step:5681/10000 train_time:244892ms step_avg:43.11ms +[2025-09-11 08:53:12] [Rank 0] step:5701/10000 train_time:245585ms step_avg:43.08ms +[2025-09-11 08:53:12] [Rank 0] step:5701/10000 train_time:245585ms step_avg:43.08ms +[2025-09-11 08:53:12] [Rank 0] step:5721/10000 train_time:246276ms step_avg:43.05ms +[2025-09-11 08:53:12] [Rank 0] step:5721/10000 train_time:246276ms step_avg:43.05ms +[2025-09-11 08:53:13] [Rank 0] step:5741/10000 train_time:246968ms step_avg:43.02ms +[2025-09-11 08:53:13] [Rank 0] step:5741/10000 train_time:246968ms step_avg:43.02ms +[2025-09-11 08:53:14] [Rank 0] step:5761/10000 train_time:247661ms step_avg:42.99ms +[2025-09-11 08:53:14] [Rank 0] step:5761/10000 train_time:247661ms step_avg:42.99ms +[2025-09-11 08:53:14] [Rank 0] step:5781/10000 train_time:248352ms step_avg:42.96ms +[2025-09-11 08:53:14] [Rank 0] step:5781/10000 train_time:248352ms step_avg:42.96ms +[2025-09-11 08:53:15] [Rank 0] step:5801/10000 train_time:249045ms step_avg:42.93ms +[2025-09-11 08:53:15] [Rank 0] step:5801/10000 train_time:249045ms step_avg:42.93ms +[2025-09-11 08:53:16] [Rank 0] step:5821/10000 train_time:249735ms step_avg:42.90ms +[2025-09-11 08:53:16] [Rank 0] step:5821/10000 train_time:249735ms step_avg:42.90ms +[2025-09-11 08:53:17] [Rank 0] step:5841/10000 train_time:250427ms step_avg:42.87ms +[2025-09-11 08:53:17] [Rank 0] step:5841/10000 train_time:250427ms step_avg:42.87ms +[2025-09-11 08:53:17] [Rank 0] step:5861/10000 train_time:251118ms step_avg:42.85ms +[2025-09-11 08:53:17] [Rank 0] step:5861/10000 train_time:251118ms step_avg:42.85ms +[2025-09-11 08:53:18] [Rank 0] step:5881/10000 train_time:251809ms step_avg:42.82ms +[2025-09-11 08:53:18] [Rank 0] step:5881/10000 train_time:251809ms step_avg:42.82ms +[2025-09-11 08:53:19] [Rank 0] step:5901/10000 train_time:252500ms step_avg:42.79ms +[2025-09-11 08:53:19] [Rank 0] step:5901/10000 train_time:252500ms step_avg:42.79ms +[2025-09-11 08:53:19] [Rank 0] step:5921/10000 train_time:253193ms step_avg:42.76ms +[2025-09-11 08:53:19] [Rank 0] step:5921/10000 train_time:253193ms step_avg:42.76ms +[2025-09-11 08:53:20] [Rank 0] step:5941/10000 train_time:253886ms step_avg:42.73ms +[2025-09-11 08:53:20] [Rank 0] step:5941/10000 train_time:253886ms step_avg:42.73ms +[2025-09-11 08:53:21] [Rank 0] step:5961/10000 train_time:254578ms step_avg:42.71ms +[2025-09-11 08:53:21] [Rank 0] step:5961/10000 train_time:254578ms step_avg:42.71ms +[2025-09-11 08:53:21] [Rank 0] step:5981/10000 train_time:255270ms step_avg:42.68ms +[2025-09-11 08:53:21] [Rank 0] step:5981/10000 train_time:255270ms step_avg:42.68ms +[2025-09-11 08:53:22] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:53:22] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:53:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:53:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:53:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:53:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:53:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:53:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:53:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:32] [Rank 0] PRINT: step:6000/10000 val_loss:4.3622 total_sharp:1.1612e-04 L1_sharp:3.4859e-04 L2_sharp:5.4448e-05 L3_sharp:-1.0020e-05 L4_sharp:2.9235e-05 L5_sharp:6.8537e-05 L6_sharp:7.7223e-05 L7_sharp:4.8401e-05 L8_sharp:1.0965e-04 L9_sharp:1.5574e-04 L10_sharp:1.7365e-04 L11_sharp:2.3550e-04 L12_sharp:1.2097e-03 total_fnorm:7.4000e+01 total_l1_linf:1.8637e+05 total_spectral:3.7750e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5703e+00 L2_l1linf:1.4297e+00 L3_l1linf:1.4141e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5234e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3672e+00 L12_l1linf:1.3828e+00 L1_spectral:7.8920e-02 L2_spectral:7.7053e-02 L3_spectral:7.7988e-02 L4_spectral:7.7928e-02 L5_spectral:7.9273e-02 L6_spectral:7.9706e-02 L7_spectral:7.9322e-02 L8_spectral:7.8114e-02 L9_spectral:7.9048e-02 L10_spectral:7.9284e-02 L11_spectral:7.9492e-02 L12_spectral:7.8674e-02 train_time:255944ms step_avg:42.66ms +[2025-09-11 08:53:32] [Rank 0] PRINT: step:6000/10000 val_loss:4.3622 total_sharp:1.1612e-04 L1_sharp:3.4859e-04 L2_sharp:5.4448e-05 L3_sharp:-1.0020e-05 L4_sharp:2.9235e-05 L5_sharp:6.8537e-05 L6_sharp:7.7223e-05 L7_sharp:4.8401e-05 L8_sharp:1.0965e-04 L9_sharp:1.5574e-04 L10_sharp:1.7365e-04 L11_sharp:2.3550e-04 L12_sharp:1.2097e-03 total_fnorm:7.4000e+01 total_l1_linf:1.8637e+05 total_spectral:3.7750e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.1250e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5703e+00 L2_l1linf:1.4297e+00 L3_l1linf:1.4141e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5469e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5234e+00 L9_l1linf:1.4844e+00 L10_l1linf:1.4453e+00 L11_l1linf:1.3672e+00 L12_l1linf:1.3828e+00 L1_spectral:7.8920e-02 L2_spectral:7.7053e-02 L3_spectral:7.7988e-02 L4_spectral:7.7928e-02 L5_spectral:7.9273e-02 L6_spectral:7.9706e-02 L7_spectral:7.9322e-02 L8_spectral:7.8114e-02 L9_spectral:7.9048e-02 L10_spectral:7.9284e-02 L11_spectral:7.9492e-02 L12_spectral:7.8674e-02 train_time:255944ms step_avg:42.66ms +[2025-09-11 08:53:33] [Rank 0] step:6001/10000 train_time:257091ms step_avg:42.84ms +[2025-09-11 08:53:33] [Rank 0] step:6001/10000 train_time:257091ms step_avg:42.84ms +[2025-09-11 08:53:34] [Rank 0] step:6021/10000 train_time:257788ms step_avg:42.81ms +[2025-09-11 08:53:34] [Rank 0] step:6021/10000 train_time:257788ms step_avg:42.81ms +[2025-09-11 08:53:34] [Rank 0] step:6041/10000 train_time:258484ms step_avg:42.79ms +[2025-09-11 08:53:34] [Rank 0] step:6041/10000 train_time:258484ms step_avg:42.79ms +[2025-09-11 08:53:35] [Rank 0] step:6061/10000 train_time:259177ms step_avg:42.76ms +[2025-09-11 08:53:35] [Rank 0] step:6061/10000 train_time:259177ms step_avg:42.76ms +[2025-09-11 08:53:36] [Rank 0] step:6081/10000 train_time:259872ms step_avg:42.74ms +[2025-09-11 08:53:36] [Rank 0] step:6081/10000 train_time:259872ms step_avg:42.74ms +[2025-09-11 08:53:37] [Rank 0] step:6101/10000 train_time:260564ms step_avg:42.71ms +[2025-09-11 08:53:37] [Rank 0] step:6101/10000 train_time:260564ms step_avg:42.71ms +[2025-09-11 08:53:37] [Rank 0] step:6121/10000 train_time:261257ms step_avg:42.68ms +[2025-09-11 08:53:37] [Rank 0] step:6121/10000 train_time:261257ms step_avg:42.68ms +[2025-09-11 08:53:38] [Rank 0] step:6141/10000 train_time:261950ms step_avg:42.66ms +[2025-09-11 08:53:38] [Rank 0] step:6141/10000 train_time:261950ms step_avg:42.66ms +[2025-09-11 08:53:39] [Rank 0] step:6161/10000 train_time:262643ms step_avg:42.63ms +[2025-09-11 08:53:39] [Rank 0] step:6161/10000 train_time:262643ms step_avg:42.63ms +[2025-09-11 08:53:39] [Rank 0] step:6181/10000 train_time:263334ms step_avg:42.60ms +[2025-09-11 08:53:39] [Rank 0] step:6181/10000 train_time:263334ms step_avg:42.60ms +[2025-09-11 08:53:40] [Rank 0] step:6201/10000 train_time:264028ms step_avg:42.58ms +[2025-09-11 08:53:40] [Rank 0] step:6201/10000 train_time:264028ms step_avg:42.58ms +[2025-09-11 08:53:41] [Rank 0] step:6221/10000 train_time:264721ms step_avg:42.55ms +[2025-09-11 08:53:41] [Rank 0] step:6221/10000 train_time:264721ms step_avg:42.55ms +[2025-09-11 08:53:41] [Rank 0] step:6241/10000 train_time:265414ms step_avg:42.53ms +[2025-09-11 08:53:41] [Rank 0] step:6241/10000 train_time:265414ms step_avg:42.53ms +[2025-09-11 08:53:42] [Rank 0] step:6261/10000 train_time:266106ms step_avg:42.50ms +[2025-09-11 08:53:42] [Rank 0] step:6261/10000 train_time:266106ms step_avg:42.50ms +[2025-09-11 08:53:43] [Rank 0] step:6281/10000 train_time:266799ms step_avg:42.48ms +[2025-09-11 08:53:43] [Rank 0] step:6281/10000 train_time:266799ms step_avg:42.48ms +[2025-09-11 08:53:43] [Rank 0] step:6301/10000 train_time:267491ms step_avg:42.45ms +[2025-09-11 08:53:43] [Rank 0] step:6301/10000 train_time:267491ms step_avg:42.45ms +[2025-09-11 08:53:44] [Rank 0] step:6321/10000 train_time:268187ms step_avg:42.43ms +[2025-09-11 08:53:44] [Rank 0] step:6321/10000 train_time:268187ms step_avg:42.43ms +[2025-09-11 08:53:45] [Rank 0] step:6341/10000 train_time:268880ms step_avg:42.40ms +[2025-09-11 08:53:45] [Rank 0] step:6341/10000 train_time:268880ms step_avg:42.40ms +[2025-09-11 08:53:46] [Rank 0] step:6361/10000 train_time:269573ms step_avg:42.38ms +[2025-09-11 08:53:46] [Rank 0] step:6361/10000 train_time:269573ms step_avg:42.38ms +[2025-09-11 08:53:46] [Rank 0] step:6381/10000 train_time:270266ms step_avg:42.35ms +[2025-09-11 08:53:46] [Rank 0] step:6381/10000 train_time:270266ms step_avg:42.35ms +[2025-09-11 08:53:47] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:53:47] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:53:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:53:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:53:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:53:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:53:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:53:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:53:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:53:57] [Rank 0] PRINT: step:6400/10000 val_loss:4.3271 total_sharp:1.3510e-04 L1_sharp:5.3552e-04 L2_sharp:8.9864e-05 L3_sharp:1.3274e-05 L4_sharp:5.2135e-05 L5_sharp:3.3486e-05 L6_sharp:8.2702e-05 L7_sharp:9.0585e-05 L8_sharp:1.6031e-04 L9_sharp:1.6724e-04 L10_sharp:1.9198e-04 L11_sharp:2.9611e-04 L12_sharp:1.2863e-03 total_fnorm:6.5500e+01 total_l1_linf:1.5667e+05 total_spectral:3.2500e+01 L1_fnorm:5.5000e+00 L2_fnorm:5.4375e+00 L3_fnorm:5.5625e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.6562e+00 L7_fnorm:5.6250e+00 L8_fnorm:5.4375e+00 L9_fnorm:5.5938e+00 L10_fnorm:5.5625e+00 L11_fnorm:5.6250e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3594e+00 L2_l1linf:1.2422e+00 L3_l1linf:1.2344e+00 L4_l1linf:1.3125e+00 L5_l1linf:1.3438e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.3125e+00 L9_l1linf:1.2734e+00 L10_l1linf:1.2656e+00 L11_l1linf:1.1797e+00 L12_l1linf:1.2031e+00 L1_spectral:7.2900e-02 L2_spectral:7.0418e-02 L3_spectral:7.1288e-02 L4_spectral:7.1455e-02 L5_spectral:7.2776e-02 L6_spectral:7.2714e-02 L7_spectral:7.2924e-02 L8_spectral:7.0814e-02 L9_spectral:7.2472e-02 L10_spectral:7.2680e-02 L11_spectral:7.2764e-02 L12_spectral:7.2652e-02 train_time:270938ms step_avg:42.33ms +[2025-09-11 08:53:57] [Rank 0] PRINT: step:6400/10000 val_loss:4.3271 total_sharp:1.3510e-04 L1_sharp:5.3552e-04 L2_sharp:8.9864e-05 L3_sharp:1.3274e-05 L4_sharp:5.2135e-05 L5_sharp:3.3486e-05 L6_sharp:8.2702e-05 L7_sharp:9.0585e-05 L8_sharp:1.6031e-04 L9_sharp:1.6724e-04 L10_sharp:1.9198e-04 L11_sharp:2.9611e-04 L12_sharp:1.2863e-03 total_fnorm:6.5500e+01 total_l1_linf:1.5667e+05 total_spectral:3.2500e+01 L1_fnorm:5.5000e+00 L2_fnorm:5.4375e+00 L3_fnorm:5.5625e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.6562e+00 L7_fnorm:5.6250e+00 L8_fnorm:5.4375e+00 L9_fnorm:5.5938e+00 L10_fnorm:5.5625e+00 L11_fnorm:5.6250e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3594e+00 L2_l1linf:1.2422e+00 L3_l1linf:1.2344e+00 L4_l1linf:1.3125e+00 L5_l1linf:1.3438e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.3906e+00 L8_l1linf:1.3125e+00 L9_l1linf:1.2734e+00 L10_l1linf:1.2656e+00 L11_l1linf:1.1797e+00 L12_l1linf:1.2031e+00 L1_spectral:7.2900e-02 L2_spectral:7.0418e-02 L3_spectral:7.1288e-02 L4_spectral:7.1455e-02 L5_spectral:7.2776e-02 L6_spectral:7.2714e-02 L7_spectral:7.2924e-02 L8_spectral:7.0814e-02 L9_spectral:7.2472e-02 L10_spectral:7.2680e-02 L11_spectral:7.2764e-02 L12_spectral:7.2652e-02 train_time:270938ms step_avg:42.33ms +[2025-09-11 08:53:58] [Rank 0] step:6401/10000 train_time:272070ms step_avg:42.50ms +[2025-09-11 08:53:58] [Rank 0] step:6401/10000 train_time:272070ms step_avg:42.50ms +[2025-09-11 08:53:59] [Rank 0] step:6421/10000 train_time:272765ms step_avg:42.48ms +[2025-09-11 08:53:59] [Rank 0] step:6421/10000 train_time:272765ms step_avg:42.48ms +[2025-09-11 08:53:59] [Rank 0] step:6441/10000 train_time:273457ms step_avg:42.46ms +[2025-09-11 08:53:59] [Rank 0] step:6441/10000 train_time:273457ms step_avg:42.46ms +[2025-09-11 08:54:00] [Rank 0] step:6461/10000 train_time:274150ms step_avg:42.43ms +[2025-09-11 08:54:00] [Rank 0] step:6461/10000 train_time:274150ms step_avg:42.43ms +[2025-09-11 08:54:01] [Rank 0] step:6481/10000 train_time:274845ms step_avg:42.41ms +[2025-09-11 08:54:01] [Rank 0] step:6481/10000 train_time:274845ms step_avg:42.41ms +[2025-09-11 08:54:01] [Rank 0] step:6501/10000 train_time:275540ms step_avg:42.38ms +[2025-09-11 08:54:01] [Rank 0] step:6501/10000 train_time:275540ms step_avg:42.38ms +[2025-09-11 08:54:02] [Rank 0] step:6521/10000 train_time:276233ms step_avg:42.36ms +[2025-09-11 08:54:02] [Rank 0] step:6521/10000 train_time:276233ms step_avg:42.36ms +[2025-09-11 08:54:03] [Rank 0] step:6541/10000 train_time:276925ms step_avg:42.34ms +[2025-09-11 08:54:03] [Rank 0] step:6541/10000 train_time:276925ms step_avg:42.34ms +[2025-09-11 08:54:03] [Rank 0] step:6561/10000 train_time:277618ms step_avg:42.31ms +[2025-09-11 08:54:03] [Rank 0] step:6561/10000 train_time:277618ms step_avg:42.31ms +[2025-09-11 08:54:04] [Rank 0] step:6581/10000 train_time:278310ms step_avg:42.29ms +[2025-09-11 08:54:04] [Rank 0] step:6581/10000 train_time:278310ms step_avg:42.29ms +[2025-09-11 08:54:05] [Rank 0] step:6601/10000 train_time:279003ms step_avg:42.27ms +[2025-09-11 08:54:05] [Rank 0] step:6601/10000 train_time:279003ms step_avg:42.27ms +[2025-09-11 08:54:05] [Rank 0] step:6621/10000 train_time:279694ms step_avg:42.24ms +[2025-09-11 08:54:05] [Rank 0] step:6621/10000 train_time:279694ms step_avg:42.24ms +[2025-09-11 08:54:06] [Rank 0] step:6641/10000 train_time:280387ms step_avg:42.22ms +[2025-09-11 08:54:06] [Rank 0] step:6641/10000 train_time:280387ms step_avg:42.22ms +[2025-09-11 08:54:07] [Rank 0] step:6661/10000 train_time:281079ms step_avg:42.20ms +[2025-09-11 08:54:07] [Rank 0] step:6661/10000 train_time:281079ms step_avg:42.20ms +[2025-09-11 08:54:08] [Rank 0] step:6681/10000 train_time:281778ms step_avg:42.18ms +[2025-09-11 08:54:08] [Rank 0] step:6681/10000 train_time:281778ms step_avg:42.18ms +[2025-09-11 08:54:08] [Rank 0] step:6701/10000 train_time:282476ms step_avg:42.15ms +[2025-09-11 08:54:08] [Rank 0] step:6701/10000 train_time:282476ms step_avg:42.15ms +[2025-09-11 08:54:09] [Rank 0] step:6721/10000 train_time:283177ms step_avg:42.13ms +[2025-09-11 08:54:09] [Rank 0] step:6721/10000 train_time:283177ms step_avg:42.13ms +[2025-09-11 08:54:10] [Rank 0] step:6741/10000 train_time:283877ms step_avg:42.11ms +[2025-09-11 08:54:10] [Rank 0] step:6741/10000 train_time:283877ms step_avg:42.11ms +[2025-09-11 08:54:10] [Rank 0] step:6761/10000 train_time:284576ms step_avg:42.09ms +[2025-09-11 08:54:10] [Rank 0] step:6761/10000 train_time:284576ms step_avg:42.09ms +[2025-09-11 08:54:11] [Rank 0] step:6781/10000 train_time:285275ms step_avg:42.07ms +[2025-09-11 08:54:11] [Rank 0] step:6781/10000 train_time:285275ms step_avg:42.07ms +[2025-09-11 08:54:12] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:54:12] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:54:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:54:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:54:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:54:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:22] [Rank 0] PRINT: step:6800/10000 val_loss:4.2892 total_sharp:1.1433e-04 L1_sharp:4.4409e-04 L2_sharp:6.6229e-05 L3_sharp:-3.8668e-05 L4_sharp:4.5285e-05 L5_sharp:7.3357e-05 L6_sharp:7.1512e-05 L7_sharp:5.9048e-05 L8_sharp:1.4508e-04 L9_sharp:1.6525e-04 L10_sharp:1.8927e-04 L11_sharp:2.4658e-04 L12_sharp:1.9174e-03 total_fnorm:6.3250e+01 total_l1_linf:1.4643e+05 total_spectral:3.1500e+01 L1_fnorm:4.8750e+00 L2_fnorm:4.7812e+00 L3_fnorm:4.9062e+00 L4_fnorm:4.8750e+00 L5_fnorm:4.9375e+00 L6_fnorm:5.0000e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.7812e+00 L9_fnorm:4.9375e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9375e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.1875e+00 L2_l1linf:1.0547e+00 L3_l1linf:1.0391e+00 L4_l1linf:1.1562e+00 L5_l1linf:1.1641e+00 L6_l1linf:1.1797e+00 L7_l1linf:1.1875e+00 L8_l1linf:1.1406e+00 L9_l1linf:1.1016e+00 L10_l1linf:1.0625e+00 L11_l1linf:1.0078e+00 L12_l1linf:1.0391e+00 L1_spectral:6.5337e-02 L2_spectral:6.3182e-02 L3_spectral:6.4239e-02 L4_spectral:6.4997e-02 L5_spectral:6.5050e-02 L6_spectral:6.5579e-02 L7_spectral:6.5454e-02 L8_spectral:6.4033e-02 L9_spectral:6.5508e-02 L10_spectral:6.5977e-02 L11_spectral:6.6186e-02 L12_spectral:6.5716e-02 train_time:285955ms step_avg:42.05ms +[2025-09-11 08:54:22] [Rank 0] PRINT: step:6800/10000 val_loss:4.2892 total_sharp:1.1433e-04 L1_sharp:4.4409e-04 L2_sharp:6.6229e-05 L3_sharp:-3.8668e-05 L4_sharp:4.5285e-05 L5_sharp:7.3357e-05 L6_sharp:7.1512e-05 L7_sharp:5.9048e-05 L8_sharp:1.4508e-04 L9_sharp:1.6525e-04 L10_sharp:1.8927e-04 L11_sharp:2.4658e-04 L12_sharp:1.9174e-03 total_fnorm:6.3250e+01 total_l1_linf:1.4643e+05 total_spectral:3.1500e+01 L1_fnorm:4.8750e+00 L2_fnorm:4.7812e+00 L3_fnorm:4.9062e+00 L4_fnorm:4.8750e+00 L5_fnorm:4.9375e+00 L6_fnorm:5.0000e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.7812e+00 L9_fnorm:4.9375e+00 L10_fnorm:4.9062e+00 L11_fnorm:4.9375e+00 L12_fnorm:4.9375e+00 L1_l1linf:1.1875e+00 L2_l1linf:1.0547e+00 L3_l1linf:1.0391e+00 L4_l1linf:1.1562e+00 L5_l1linf:1.1641e+00 L6_l1linf:1.1797e+00 L7_l1linf:1.1875e+00 L8_l1linf:1.1406e+00 L9_l1linf:1.1016e+00 L10_l1linf:1.0625e+00 L11_l1linf:1.0078e+00 L12_l1linf:1.0391e+00 L1_spectral:6.5337e-02 L2_spectral:6.3182e-02 L3_spectral:6.4239e-02 L4_spectral:6.4997e-02 L5_spectral:6.5050e-02 L6_spectral:6.5579e-02 L7_spectral:6.5454e-02 L8_spectral:6.4033e-02 L9_spectral:6.5508e-02 L10_spectral:6.5977e-02 L11_spectral:6.6186e-02 L12_spectral:6.5716e-02 train_time:285955ms step_avg:42.05ms +[2025-09-11 08:54:23] [Rank 0] step:6801/10000 train_time:287113ms step_avg:42.22ms +[2025-09-11 08:54:23] [Rank 0] step:6801/10000 train_time:287113ms step_avg:42.22ms +[2025-09-11 08:54:24] [Rank 0] step:6821/10000 train_time:287862ms step_avg:42.20ms +[2025-09-11 08:54:24] [Rank 0] step:6821/10000 train_time:287862ms step_avg:42.20ms +[2025-09-11 08:54:24] [Rank 0] step:6841/10000 train_time:288566ms step_avg:42.18ms +[2025-09-11 08:54:24] [Rank 0] step:6841/10000 train_time:288566ms step_avg:42.18ms +[2025-09-11 08:54:25] [Rank 0] step:6861/10000 train_time:289269ms step_avg:42.16ms +[2025-09-11 08:54:25] [Rank 0] step:6861/10000 train_time:289269ms step_avg:42.16ms +[2025-09-11 08:54:26] [Rank 0] step:6881/10000 train_time:289969ms step_avg:42.14ms +[2025-09-11 08:54:26] [Rank 0] step:6881/10000 train_time:289969ms step_avg:42.14ms +[2025-09-11 08:54:26] [Rank 0] step:6901/10000 train_time:290668ms step_avg:42.12ms +[2025-09-11 08:54:26] [Rank 0] step:6901/10000 train_time:290668ms step_avg:42.12ms +[2025-09-11 08:54:27] [Rank 0] step:6921/10000 train_time:291367ms step_avg:42.10ms +[2025-09-11 08:54:27] [Rank 0] step:6921/10000 train_time:291367ms step_avg:42.10ms +[2025-09-11 08:54:28] [Rank 0] step:6941/10000 train_time:292068ms step_avg:42.08ms +[2025-09-11 08:54:28] [Rank 0] step:6941/10000 train_time:292068ms step_avg:42.08ms +[2025-09-11 08:54:29] [Rank 0] step:6961/10000 train_time:292768ms step_avg:42.06ms +[2025-09-11 08:54:29] [Rank 0] step:6961/10000 train_time:292768ms step_avg:42.06ms +[2025-09-11 08:54:29] [Rank 0] step:6981/10000 train_time:293470ms step_avg:42.04ms +[2025-09-11 08:54:29] [Rank 0] step:6981/10000 train_time:293470ms step_avg:42.04ms +[2025-09-11 08:54:31] [Rank 0] step:7001/10000 train_time:294669ms step_avg:42.09ms +[2025-09-11 08:54:31] [Rank 0] step:7001/10000 train_time:294669ms step_avg:42.09ms +[2025-09-11 08:54:31] [Rank 0] step:7021/10000 train_time:295409ms step_avg:42.08ms +[2025-09-11 08:54:31] [Rank 0] step:7021/10000 train_time:295409ms step_avg:42.08ms +[2025-09-11 08:54:32] [Rank 0] step:7041/10000 train_time:296110ms step_avg:42.06ms +[2025-09-11 08:54:32] [Rank 0] step:7041/10000 train_time:296110ms step_avg:42.06ms +[2025-09-11 08:54:33] [Rank 0] step:7061/10000 train_time:297066ms step_avg:42.07ms +[2025-09-11 08:54:33] [Rank 0] step:7061/10000 train_time:297066ms step_avg:42.07ms +[2025-09-11 08:54:34] [Rank 0] step:7081/10000 train_time:297766ms step_avg:42.05ms +[2025-09-11 08:54:34] [Rank 0] step:7081/10000 train_time:297766ms step_avg:42.05ms +[2025-09-11 08:54:34] [Rank 0] step:7101/10000 train_time:298467ms step_avg:42.03ms +[2025-09-11 08:54:34] [Rank 0] step:7101/10000 train_time:298467ms step_avg:42.03ms +[2025-09-11 08:54:35] [Rank 0] step:7121/10000 train_time:299168ms step_avg:42.01ms +[2025-09-11 08:54:35] [Rank 0] step:7121/10000 train_time:299168ms step_avg:42.01ms +[2025-09-11 08:54:36] [Rank 0] step:7141/10000 train_time:299868ms step_avg:41.99ms +[2025-09-11 08:54:36] [Rank 0] step:7141/10000 train_time:299868ms step_avg:41.99ms +[2025-09-11 08:54:36] [Rank 0] step:7161/10000 train_time:300570ms step_avg:41.97ms +[2025-09-11 08:54:36] [Rank 0] step:7161/10000 train_time:300570ms step_avg:41.97ms +[2025-09-11 08:54:37] [Rank 0] step:7181/10000 train_time:301269ms step_avg:41.95ms +[2025-09-11 08:54:37] [Rank 0] step:7181/10000 train_time:301269ms step_avg:41.95ms +[2025-09-11 08:54:38] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:54:38] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:54:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:54:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:54:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:54:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:54:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:54:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:48] [Rank 0] PRINT: step:7200/10000 val_loss:4.2553 total_sharp:9.8885e-05 L1_sharp:2.4066e-04 L2_sharp:1.6200e-05 L3_sharp:1.2068e-05 L4_sharp:-6.3939e-06 L5_sharp:6.5726e-05 L6_sharp:8.5611e-05 L7_sharp:8.1445e-05 L8_sharp:1.4448e-04 L9_sharp:1.5329e-04 L10_sharp:1.8540e-04 L11_sharp:2.2975e-04 L12_sharp:1.0269e-03 total_fnorm:5.2750e+01 total_l1_linf:1.2134e+05 total_spectral:2.7375e+01 L1_fnorm:4.2812e+00 L2_fnorm:4.1562e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.2812e+00 L5_fnorm:4.3438e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3438e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2812e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2500e+00 L1_l1linf:9.5703e-01 L2_l1linf:9.1797e-01 L3_l1linf:8.6328e-01 L4_l1linf:9.5312e-01 L5_l1linf:9.8047e-01 L6_l1linf:9.9609e-01 L7_l1linf:1.0078e+00 L8_l1linf:9.5703e-01 L9_l1linf:9.0625e-01 L10_l1linf:8.8672e-01 L11_l1linf:8.3203e-01 L12_l1linf:8.5156e-01 L1_spectral:5.7629e-02 L2_spectral:5.5381e-02 L3_spectral:5.7704e-02 L4_spectral:5.7506e-02 L5_spectral:5.7869e-02 L6_spectral:5.8362e-02 L7_spectral:5.7867e-02 L8_spectral:5.6752e-02 L9_spectral:5.8665e-02 L10_spectral:5.8247e-02 L11_spectral:5.8331e-02 L12_spectral:5.8325e-02 train_time:301950ms step_avg:41.94ms +[2025-09-11 08:54:48] [Rank 0] PRINT: step:7200/10000 val_loss:4.2553 total_sharp:9.8885e-05 L1_sharp:2.4066e-04 L2_sharp:1.6200e-05 L3_sharp:1.2068e-05 L4_sharp:-6.3939e-06 L5_sharp:6.5726e-05 L6_sharp:8.5611e-05 L7_sharp:8.1445e-05 L8_sharp:1.4448e-04 L9_sharp:1.5329e-04 L10_sharp:1.8540e-04 L11_sharp:2.2975e-04 L12_sharp:1.0269e-03 total_fnorm:5.2750e+01 total_l1_linf:1.2134e+05 total_spectral:2.7375e+01 L1_fnorm:4.2812e+00 L2_fnorm:4.1562e+00 L3_fnorm:4.2812e+00 L4_fnorm:4.2812e+00 L5_fnorm:4.3438e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3438e+00 L8_fnorm:4.1562e+00 L9_fnorm:4.2812e+00 L10_fnorm:4.2500e+00 L11_fnorm:4.2812e+00 L12_fnorm:4.2500e+00 L1_l1linf:9.5703e-01 L2_l1linf:9.1797e-01 L3_l1linf:8.6328e-01 L4_l1linf:9.5312e-01 L5_l1linf:9.8047e-01 L6_l1linf:9.9609e-01 L7_l1linf:1.0078e+00 L8_l1linf:9.5703e-01 L9_l1linf:9.0625e-01 L10_l1linf:8.8672e-01 L11_l1linf:8.3203e-01 L12_l1linf:8.5156e-01 L1_spectral:5.7629e-02 L2_spectral:5.5381e-02 L3_spectral:5.7704e-02 L4_spectral:5.7506e-02 L5_spectral:5.7869e-02 L6_spectral:5.8362e-02 L7_spectral:5.7867e-02 L8_spectral:5.6752e-02 L9_spectral:5.8665e-02 L10_spectral:5.8247e-02 L11_spectral:5.8331e-02 L12_spectral:5.8325e-02 train_time:301950ms step_avg:41.94ms +[2025-09-11 08:54:49] [Rank 0] step:7201/10000 train_time:303134ms step_avg:42.10ms +[2025-09-11 08:54:49] [Rank 0] step:7201/10000 train_time:303134ms step_avg:42.10ms +[2025-09-11 08:54:50] [Rank 0] step:7221/10000 train_time:303836ms step_avg:42.08ms +[2025-09-11 08:54:50] [Rank 0] step:7221/10000 train_time:303836ms step_avg:42.08ms +[2025-09-11 08:54:51] [Rank 0] step:7241/10000 train_time:304539ms step_avg:42.06ms +[2025-09-11 08:54:51] [Rank 0] step:7241/10000 train_time:304539ms step_avg:42.06ms +[2025-09-11 08:54:51] [Rank 0] step:7261/10000 train_time:305242ms step_avg:42.04ms +[2025-09-11 08:54:51] [Rank 0] step:7261/10000 train_time:305242ms step_avg:42.04ms +[2025-09-11 08:54:52] [Rank 0] step:7281/10000 train_time:305948ms step_avg:42.02ms +[2025-09-11 08:54:52] [Rank 0] step:7281/10000 train_time:305948ms step_avg:42.02ms +[2025-09-11 08:54:53] [Rank 0] step:7301/10000 train_time:306647ms step_avg:42.00ms +[2025-09-11 08:54:53] [Rank 0] step:7301/10000 train_time:306647ms step_avg:42.00ms +[2025-09-11 08:54:53] [Rank 0] step:7321/10000 train_time:307348ms step_avg:41.98ms +[2025-09-11 08:54:53] [Rank 0] step:7321/10000 train_time:307348ms step_avg:41.98ms +[2025-09-11 08:54:54] [Rank 0] step:7341/10000 train_time:308050ms step_avg:41.96ms +[2025-09-11 08:54:54] [Rank 0] step:7341/10000 train_time:308050ms step_avg:41.96ms +[2025-09-11 08:54:55] [Rank 0] step:7361/10000 train_time:308751ms step_avg:41.94ms +[2025-09-11 08:54:55] [Rank 0] step:7361/10000 train_time:308751ms step_avg:41.94ms +[2025-09-11 08:54:56] [Rank 0] step:7381/10000 train_time:309454ms step_avg:41.93ms +[2025-09-11 08:54:56] [Rank 0] step:7381/10000 train_time:309454ms step_avg:41.93ms +[2025-09-11 08:54:56] [Rank 0] step:7401/10000 train_time:310155ms step_avg:41.91ms +[2025-09-11 08:54:56] [Rank 0] step:7401/10000 train_time:310155ms step_avg:41.91ms +[2025-09-11 08:54:57] [Rank 0] step:7421/10000 train_time:310856ms step_avg:41.89ms +[2025-09-11 08:54:57] [Rank 0] step:7421/10000 train_time:310856ms step_avg:41.89ms +[2025-09-11 08:54:58] [Rank 0] step:7441/10000 train_time:311559ms step_avg:41.87ms +[2025-09-11 08:54:58] [Rank 0] step:7441/10000 train_time:311559ms step_avg:41.87ms +[2025-09-11 08:54:58] [Rank 0] step:7461/10000 train_time:312260ms step_avg:41.85ms +[2025-09-11 08:54:58] [Rank 0] step:7461/10000 train_time:312260ms step_avg:41.85ms +[2025-09-11 08:54:59] [Rank 0] step:7481/10000 train_time:312965ms step_avg:41.83ms +[2025-09-11 08:54:59] [Rank 0] step:7481/10000 train_time:312965ms step_avg:41.83ms +[2025-09-11 08:55:00] [Rank 0] step:7501/10000 train_time:313667ms step_avg:41.82ms +[2025-09-11 08:55:00] [Rank 0] step:7501/10000 train_time:313667ms step_avg:41.82ms +[2025-09-11 08:55:00] [Rank 0] step:7521/10000 train_time:314370ms step_avg:41.80ms +[2025-09-11 08:55:00] [Rank 0] step:7521/10000 train_time:314370ms step_avg:41.80ms +[2025-09-11 08:55:01] [Rank 0] step:7541/10000 train_time:315228ms step_avg:41.80ms +[2025-09-11 08:55:01] [Rank 0] step:7541/10000 train_time:315228ms step_avg:41.80ms +[2025-09-11 08:55:02] [Rank 0] step:7561/10000 train_time:316022ms step_avg:41.80ms +[2025-09-11 08:55:02] [Rank 0] step:7561/10000 train_time:316022ms step_avg:41.80ms +[2025-09-11 08:55:03] [Rank 0] step:7581/10000 train_time:316724ms step_avg:41.78ms +[2025-09-11 08:55:03] [Rank 0] step:7581/10000 train_time:316724ms step_avg:41.78ms +[2025-09-11 08:55:04] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:55:04] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:55:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:55:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:55:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:55:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:55:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:55:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:55:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:55:14] [Rank 0] PRINT: step:7600/10000 val_loss:4.2207 total_sharp:8.6935e-05 L1_sharp:3.4119e-04 L2_sharp:-3.2884e-06 L3_sharp:3.6652e-05 L4_sharp:4.4357e-05 L5_sharp:3.9504e-05 L6_sharp:3.9557e-05 L7_sharp:6.8054e-05 L8_sharp:1.1985e-04 L9_sharp:1.1636e-04 L10_sharp:1.6844e-04 L11_sharp:2.3046e-04 L12_sharp:9.3588e-04 total_fnorm:4.5750e+01 total_l1_linf:9.5232e+04 total_spectral:2.2625e+01 L1_fnorm:3.6406e+00 L2_fnorm:3.5625e+00 L3_fnorm:3.6250e+00 L4_fnorm:3.6094e+00 L5_fnorm:3.6562e+00 L6_fnorm:3.6875e+00 L7_fnorm:3.6562e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5938e+00 L10_fnorm:3.5938e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.5938e+00 L1_l1linf:7.8906e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.0703e-01 L4_l1linf:7.7734e-01 L5_l1linf:7.9297e-01 L6_l1linf:8.0469e-01 L7_l1linf:8.0469e-01 L8_l1linf:7.8906e-01 L9_l1linf:7.3047e-01 L10_l1linf:6.9922e-01 L11_l1linf:6.7969e-01 L12_l1linf:6.9141e-01 L1_spectral:5.0394e-02 L2_spectral:4.8331e-02 L3_spectral:4.9105e-02 L4_spectral:4.9278e-02 L5_spectral:5.0210e-02 L6_spectral:5.0247e-02 L7_spectral:4.9889e-02 L8_spectral:4.8643e-02 L9_spectral:5.0217e-02 L10_spectral:5.0785e-02 L11_spectral:5.0674e-02 L12_spectral:5.0594e-02 train_time:317409ms step_avg:41.76ms +[2025-09-11 08:55:14] [Rank 0] PRINT: step:7600/10000 val_loss:4.2207 total_sharp:8.6935e-05 L1_sharp:3.4119e-04 L2_sharp:-3.2884e-06 L3_sharp:3.6652e-05 L4_sharp:4.4357e-05 L5_sharp:3.9504e-05 L6_sharp:3.9557e-05 L7_sharp:6.8054e-05 L8_sharp:1.1985e-04 L9_sharp:1.1636e-04 L10_sharp:1.6844e-04 L11_sharp:2.3046e-04 L12_sharp:9.3588e-04 total_fnorm:4.5750e+01 total_l1_linf:9.5232e+04 total_spectral:2.2625e+01 L1_fnorm:3.6406e+00 L2_fnorm:3.5625e+00 L3_fnorm:3.6250e+00 L4_fnorm:3.6094e+00 L5_fnorm:3.6562e+00 L6_fnorm:3.6875e+00 L7_fnorm:3.6562e+00 L8_fnorm:3.4844e+00 L9_fnorm:3.5938e+00 L10_fnorm:3.5938e+00 L11_fnorm:3.6094e+00 L12_fnorm:3.5938e+00 L1_l1linf:7.8906e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.0703e-01 L4_l1linf:7.7734e-01 L5_l1linf:7.9297e-01 L6_l1linf:8.0469e-01 L7_l1linf:8.0469e-01 L8_l1linf:7.8906e-01 L9_l1linf:7.3047e-01 L10_l1linf:6.9922e-01 L11_l1linf:6.7969e-01 L12_l1linf:6.9141e-01 L1_spectral:5.0394e-02 L2_spectral:4.8331e-02 L3_spectral:4.9105e-02 L4_spectral:4.9278e-02 L5_spectral:5.0210e-02 L6_spectral:5.0247e-02 L7_spectral:4.9889e-02 L8_spectral:4.8643e-02 L9_spectral:5.0217e-02 L10_spectral:5.0785e-02 L11_spectral:5.0674e-02 L12_spectral:5.0594e-02 train_time:317409ms step_avg:41.76ms +[2025-09-11 08:55:15] [Rank 0] step:7601/10000 train_time:318628ms step_avg:41.92ms +[2025-09-11 08:55:15] [Rank 0] step:7601/10000 train_time:318628ms step_avg:41.92ms +[2025-09-11 08:55:16] [Rank 0] step:7621/10000 train_time:319359ms step_avg:41.91ms +[2025-09-11 08:55:16] [Rank 0] step:7621/10000 train_time:319359ms step_avg:41.91ms +[2025-09-11 08:55:16] [Rank 0] step:7641/10000 train_time:320063ms step_avg:41.89ms +[2025-09-11 08:55:16] [Rank 0] step:7641/10000 train_time:320063ms step_avg:41.89ms +[2025-09-11 08:55:17] [Rank 0] step:7661/10000 train_time:320765ms step_avg:41.87ms +[2025-09-11 08:55:17] [Rank 0] step:7661/10000 train_time:320765ms step_avg:41.87ms +[2025-09-11 08:55:18] [Rank 0] step:7681/10000 train_time:321468ms step_avg:41.85ms +[2025-09-11 08:55:18] [Rank 0] step:7681/10000 train_time:321468ms step_avg:41.85ms +[2025-09-11 08:55:18] [Rank 0] step:7701/10000 train_time:322173ms step_avg:41.84ms +[2025-09-11 08:55:18] [Rank 0] step:7701/10000 train_time:322173ms step_avg:41.84ms +[2025-09-11 08:55:19] [Rank 0] step:7721/10000 train_time:322876ms step_avg:41.82ms +[2025-09-11 08:55:19] [Rank 0] step:7721/10000 train_time:322876ms step_avg:41.82ms +[2025-09-11 08:55:20] [Rank 0] step:7741/10000 train_time:323580ms step_avg:41.80ms +[2025-09-11 08:55:20] [Rank 0] step:7741/10000 train_time:323580ms step_avg:41.80ms +[2025-09-11 08:55:21] [Rank 0] step:7761/10000 train_time:324283ms step_avg:41.78ms +[2025-09-11 08:55:21] [Rank 0] step:7761/10000 train_time:324283ms step_avg:41.78ms +[2025-09-11 08:55:21] [Rank 0] step:7781/10000 train_time:324987ms step_avg:41.77ms +[2025-09-11 08:55:21] [Rank 0] step:7781/10000 train_time:324987ms step_avg:41.77ms +[2025-09-11 08:55:22] [Rank 0] step:7801/10000 train_time:325689ms step_avg:41.75ms +[2025-09-11 08:55:22] [Rank 0] step:7801/10000 train_time:325689ms step_avg:41.75ms +[2025-09-11 08:55:23] [Rank 0] step:7821/10000 train_time:326393ms step_avg:41.73ms +[2025-09-11 08:55:23] [Rank 0] step:7821/10000 train_time:326393ms step_avg:41.73ms +[2025-09-11 08:55:23] [Rank 0] step:7841/10000 train_time:327099ms step_avg:41.72ms +[2025-09-11 08:55:23] [Rank 0] step:7841/10000 train_time:327099ms step_avg:41.72ms +[2025-09-11 08:55:24] [Rank 0] step:7861/10000 train_time:327805ms step_avg:41.70ms +[2025-09-11 08:55:24] [Rank 0] step:7861/10000 train_time:327805ms step_avg:41.70ms +[2025-09-11 08:55:25] [Rank 0] step:7881/10000 train_time:328508ms step_avg:41.68ms +[2025-09-11 08:55:25] [Rank 0] step:7881/10000 train_time:328508ms step_avg:41.68ms +[2025-09-11 08:55:25] [Rank 0] step:7901/10000 train_time:329212ms step_avg:41.67ms +[2025-09-11 08:55:25] [Rank 0] step:7901/10000 train_time:329212ms step_avg:41.67ms +[2025-09-11 08:55:26] [Rank 0] step:7921/10000 train_time:329917ms step_avg:41.65ms +[2025-09-11 08:55:26] [Rank 0] step:7921/10000 train_time:329917ms step_avg:41.65ms +[2025-09-11 08:55:27] [Rank 0] step:7941/10000 train_time:330621ms step_avg:41.63ms +[2025-09-11 08:55:27] [Rank 0] step:7941/10000 train_time:330621ms step_avg:41.63ms +[2025-09-11 08:55:28] [Rank 0] step:7961/10000 train_time:331323ms step_avg:41.62ms +[2025-09-11 08:55:28] [Rank 0] step:7961/10000 train_time:331323ms step_avg:41.62ms +[2025-09-11 08:55:28] [Rank 0] step:7981/10000 train_time:332028ms step_avg:41.60ms +[2025-09-11 08:55:28] [Rank 0] step:7981/10000 train_time:332028ms step_avg:41.60ms +[2025-09-11 08:55:29] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:55:29] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:55:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:55:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:55:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:55:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:55:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:55:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:55:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:55:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:55:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:55:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:55:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:55:42] [Rank 0] PRINT: step:8000/10000 val_loss:4.1915 total_sharp:8.1363e-05 L1_sharp:4.1574e-04 L2_sharp:-1.4842e-05 L3_sharp:5.0204e-05 L4_sharp:4.8863e-05 L5_sharp:8.9367e-05 L6_sharp:7.4445e-05 L7_sharp:8.3331e-05 L8_sharp:1.2667e-04 L9_sharp:1.3137e-04 L10_sharp:1.6925e-04 L11_sharp:2.2284e-04 L12_sharp:9.7311e-04 total_fnorm:3.7250e+01 total_l1_linf:7.6288e+04 total_spectral:1.9250e+01 L1_fnorm:2.9688e+00 L2_fnorm:2.8750e+00 L3_fnorm:2.9531e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.9844e+00 L6_fnorm:2.9844e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.9219e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9375e+00 L12_fnorm:2.9219e+00 L1_l1linf:5.8594e-01 L2_l1linf:5.6641e-01 L3_l1linf:5.7031e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0156e-01 L9_l1linf:5.6641e-01 L10_l1linf:5.3516e-01 L11_l1linf:5.1953e-01 L12_l1linf:5.4688e-01 L1_spectral:4.2163e-02 L2_spectral:4.0848e-02 L3_spectral:4.1358e-02 L4_spectral:4.0469e-02 L5_spectral:4.1114e-02 L6_spectral:4.1417e-02 L7_spectral:4.1599e-02 L8_spectral:4.0444e-02 L9_spectral:4.1612e-02 L10_spectral:4.2168e-02 L11_spectral:4.2014e-02 L12_spectral:4.2406e-02 train_time:332712ms step_avg:41.59ms +[2025-09-11 08:55:42] [Rank 0] PRINT: step:8000/10000 val_loss:4.1915 total_sharp:8.1363e-05 L1_sharp:4.1574e-04 L2_sharp:-1.4842e-05 L3_sharp:5.0204e-05 L4_sharp:4.8863e-05 L5_sharp:8.9367e-05 L6_sharp:7.4445e-05 L7_sharp:8.3331e-05 L8_sharp:1.2667e-04 L9_sharp:1.3137e-04 L10_sharp:1.6925e-04 L11_sharp:2.2284e-04 L12_sharp:9.7311e-04 total_fnorm:3.7250e+01 total_l1_linf:7.6288e+04 total_spectral:1.9250e+01 L1_fnorm:2.9688e+00 L2_fnorm:2.8750e+00 L3_fnorm:2.9531e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.9844e+00 L6_fnorm:2.9844e+00 L7_fnorm:2.9531e+00 L8_fnorm:2.8281e+00 L9_fnorm:2.9219e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9375e+00 L12_fnorm:2.9219e+00 L1_l1linf:5.8594e-01 L2_l1linf:5.6641e-01 L3_l1linf:5.7031e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.2109e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.0156e-01 L9_l1linf:5.6641e-01 L10_l1linf:5.3516e-01 L11_l1linf:5.1953e-01 L12_l1linf:5.4688e-01 L1_spectral:4.2163e-02 L2_spectral:4.0848e-02 L3_spectral:4.1358e-02 L4_spectral:4.0469e-02 L5_spectral:4.1114e-02 L6_spectral:4.1417e-02 L7_spectral:4.1599e-02 L8_spectral:4.0444e-02 L9_spectral:4.1612e-02 L10_spectral:4.2168e-02 L11_spectral:4.2014e-02 L12_spectral:4.2406e-02 train_time:332712ms step_avg:41.59ms +[2025-09-11 08:55:44] [Rank 0] step:8001/10000 train_time:333951ms step_avg:41.74ms +[2025-09-11 08:55:44] [Rank 0] step:8001/10000 train_time:333951ms step_avg:41.74ms +[2025-09-11 08:55:44] [Rank 0] step:8021/10000 train_time:334686ms step_avg:41.73ms +[2025-09-11 08:55:44] [Rank 0] step:8021/10000 train_time:334686ms step_avg:41.73ms +[2025-09-11 08:55:45] [Rank 0] step:8041/10000 train_time:335391ms step_avg:41.71ms +[2025-09-11 08:55:45] [Rank 0] step:8041/10000 train_time:335391ms step_avg:41.71ms +[2025-09-11 08:55:46] [Rank 0] step:8061/10000 train_time:336098ms step_avg:41.69ms +[2025-09-11 08:55:46] [Rank 0] step:8061/10000 train_time:336098ms step_avg:41.69ms +[2025-09-11 08:55:46] [Rank 0] step:8081/10000 train_time:336800ms step_avg:41.68ms +[2025-09-11 08:55:46] [Rank 0] step:8081/10000 train_time:336800ms step_avg:41.68ms +[2025-09-11 08:55:47] [Rank 0] step:8101/10000 train_time:337501ms step_avg:41.66ms +[2025-09-11 08:55:47] [Rank 0] step:8101/10000 train_time:337501ms step_avg:41.66ms +[2025-09-11 08:55:48] [Rank 0] step:8121/10000 train_time:338209ms step_avg:41.65ms +[2025-09-11 08:55:48] [Rank 0] step:8121/10000 train_time:338209ms step_avg:41.65ms +[2025-09-11 08:55:49] [Rank 0] step:8141/10000 train_time:339644ms step_avg:41.72ms +[2025-09-11 08:55:49] [Rank 0] step:8141/10000 train_time:339644ms step_avg:41.72ms +[2025-09-11 08:55:50] [Rank 0] step:8161/10000 train_time:340351ms step_avg:41.70ms +[2025-09-11 08:55:50] [Rank 0] step:8161/10000 train_time:340351ms step_avg:41.70ms +[2025-09-11 08:55:51] [Rank 0] step:8181/10000 train_time:341066ms step_avg:41.69ms +[2025-09-11 08:55:51] [Rank 0] step:8181/10000 train_time:341066ms step_avg:41.69ms +[2025-09-11 08:55:51] [Rank 0] step:8201/10000 train_time:341778ms step_avg:41.68ms +[2025-09-11 08:55:51] [Rank 0] step:8201/10000 train_time:341778ms step_avg:41.68ms +[2025-09-11 08:55:52] [Rank 0] step:8221/10000 train_time:342488ms step_avg:41.66ms +[2025-09-11 08:55:52] [Rank 0] step:8221/10000 train_time:342488ms step_avg:41.66ms +[2025-09-11 08:55:53] [Rank 0] step:8241/10000 train_time:343209ms step_avg:41.65ms +[2025-09-11 08:55:53] [Rank 0] step:8241/10000 train_time:343209ms step_avg:41.65ms +[2025-09-11 08:55:54] [Rank 0] step:8261/10000 train_time:343919ms step_avg:41.63ms +[2025-09-11 08:55:54] [Rank 0] step:8261/10000 train_time:343919ms step_avg:41.63ms +[2025-09-11 08:55:54] [Rank 0] step:8281/10000 train_time:344626ms step_avg:41.62ms +[2025-09-11 08:55:54] [Rank 0] step:8281/10000 train_time:344626ms step_avg:41.62ms +[2025-09-11 08:55:55] [Rank 0] step:8301/10000 train_time:345337ms step_avg:41.60ms +[2025-09-11 08:55:55] [Rank 0] step:8301/10000 train_time:345337ms step_avg:41.60ms +[2025-09-11 08:55:56] [Rank 0] step:8321/10000 train_time:346049ms step_avg:41.59ms +[2025-09-11 08:55:56] [Rank 0] step:8321/10000 train_time:346049ms step_avg:41.59ms +[2025-09-11 08:55:56] [Rank 0] step:8341/10000 train_time:346766ms step_avg:41.57ms +[2025-09-11 08:55:56] [Rank 0] step:8341/10000 train_time:346766ms step_avg:41.57ms +[2025-09-11 08:55:57] [Rank 0] step:8361/10000 train_time:347472ms step_avg:41.56ms +[2025-09-11 08:55:57] [Rank 0] step:8361/10000 train_time:347472ms step_avg:41.56ms +[2025-09-11 08:55:58] [Rank 0] step:8381/10000 train_time:348185ms step_avg:41.54ms +[2025-09-11 08:55:58] [Rank 0] step:8381/10000 train_time:348185ms step_avg:41.54ms +[2025-09-11 08:55:59] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:55:59] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:56:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:56:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:56:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:56:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:56:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:56:09] [Rank 0] PRINT: step:8400/10000 val_loss:4.1684 total_sharp:7.0551e-05 L1_sharp:3.3230e-04 L2_sharp:8.2842e-05 L3_sharp:2.6583e-05 L4_sharp:4.1812e-05 L5_sharp:3.1545e-05 L6_sharp:2.8562e-05 L7_sharp:5.2763e-05 L8_sharp:1.0356e-04 L9_sharp:1.0980e-04 L10_sharp:1.4965e-04 L11_sharp:2.0571e-04 L12_sharp:1.2345e-03 total_fnorm:2.9875e+01 total_l1_linf:5.5040e+04 total_spectral:1.5250e+01 L1_fnorm:2.3750e+00 L2_fnorm:2.2969e+00 L3_fnorm:2.3281e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.3125e+00 L6_fnorm:2.3281e+00 L7_fnorm:2.3125e+00 L8_fnorm:2.2344e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2812e+00 L1_l1linf:4.4336e-01 L2_l1linf:4.6484e-01 L3_l1linf:4.3164e-01 L4_l1linf:4.4336e-01 L5_l1linf:4.4922e-01 L6_l1linf:4.6484e-01 L7_l1linf:4.6875e-01 L8_l1linf:4.4336e-01 L9_l1linf:4.1211e-01 L10_l1linf:4.0234e-01 L11_l1linf:3.8086e-01 L12_l1linf:4.0625e-01 L1_spectral:3.4159e-02 L2_spectral:3.4044e-02 L3_spectral:3.4027e-02 L4_spectral:3.2203e-02 L5_spectral:3.2542e-02 L6_spectral:3.2831e-02 L7_spectral:3.3170e-02 L8_spectral:3.2899e-02 L9_spectral:3.3361e-02 L10_spectral:3.3469e-02 L11_spectral:3.3582e-02 L12_spectral:3.3797e-02 train_time:348877ms step_avg:41.53ms +[2025-09-11 08:56:09] [Rank 0] PRINT: step:8400/10000 val_loss:4.1684 total_sharp:7.0551e-05 L1_sharp:3.3230e-04 L2_sharp:8.2842e-05 L3_sharp:2.6583e-05 L4_sharp:4.1812e-05 L5_sharp:3.1545e-05 L6_sharp:2.8562e-05 L7_sharp:5.2763e-05 L8_sharp:1.0356e-04 L9_sharp:1.0980e-04 L10_sharp:1.4965e-04 L11_sharp:2.0571e-04 L12_sharp:1.2345e-03 total_fnorm:2.9875e+01 total_l1_linf:5.5040e+04 total_spectral:1.5250e+01 L1_fnorm:2.3750e+00 L2_fnorm:2.2969e+00 L3_fnorm:2.3281e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.3125e+00 L6_fnorm:2.3281e+00 L7_fnorm:2.3125e+00 L8_fnorm:2.2344e+00 L9_fnorm:2.2656e+00 L10_fnorm:2.2500e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2812e+00 L1_l1linf:4.4336e-01 L2_l1linf:4.6484e-01 L3_l1linf:4.3164e-01 L4_l1linf:4.4336e-01 L5_l1linf:4.4922e-01 L6_l1linf:4.6484e-01 L7_l1linf:4.6875e-01 L8_l1linf:4.4336e-01 L9_l1linf:4.1211e-01 L10_l1linf:4.0234e-01 L11_l1linf:3.8086e-01 L12_l1linf:4.0625e-01 L1_spectral:3.4159e-02 L2_spectral:3.4044e-02 L3_spectral:3.4027e-02 L4_spectral:3.2203e-02 L5_spectral:3.2542e-02 L6_spectral:3.2831e-02 L7_spectral:3.3170e-02 L8_spectral:3.2899e-02 L9_spectral:3.3361e-02 L10_spectral:3.3469e-02 L11_spectral:3.3582e-02 L12_spectral:3.3797e-02 train_time:348877ms step_avg:41.53ms +[2025-09-11 08:56:10] [Rank 0] step:8401/10000 train_time:350069ms step_avg:41.67ms +[2025-09-11 08:56:10] [Rank 0] step:8401/10000 train_time:350069ms step_avg:41.67ms +[2025-09-11 08:56:10] [Rank 0] step:8421/10000 train_time:350803ms step_avg:41.66ms +[2025-09-11 08:56:10] [Rank 0] step:8421/10000 train_time:350803ms step_avg:41.66ms +[2025-09-11 08:56:11] [Rank 0] step:8441/10000 train_time:351517ms step_avg:41.64ms +[2025-09-11 08:56:11] [Rank 0] step:8441/10000 train_time:351517ms step_avg:41.64ms +[2025-09-11 08:56:12] [Rank 0] step:8461/10000 train_time:352229ms step_avg:41.63ms +[2025-09-11 08:56:12] [Rank 0] step:8461/10000 train_time:352229ms step_avg:41.63ms +[2025-09-11 08:56:13] [Rank 0] step:8481/10000 train_time:352944ms step_avg:41.62ms +[2025-09-11 08:56:13] [Rank 0] step:8481/10000 train_time:352944ms step_avg:41.62ms +[2025-09-11 08:56:13] [Rank 0] step:8501/10000 train_time:353654ms step_avg:41.60ms +[2025-09-11 08:56:13] [Rank 0] step:8501/10000 train_time:353654ms step_avg:41.60ms +[2025-09-11 08:56:14] [Rank 0] step:8521/10000 train_time:354365ms step_avg:41.59ms +[2025-09-11 08:56:14] [Rank 0] step:8521/10000 train_time:354365ms step_avg:41.59ms +[2025-09-11 08:56:15] [Rank 0] step:8541/10000 train_time:355075ms step_avg:41.57ms +[2025-09-11 08:56:15] [Rank 0] step:8541/10000 train_time:355075ms step_avg:41.57ms +[2025-09-11 08:56:15] [Rank 0] step:8561/10000 train_time:355791ms step_avg:41.56ms +[2025-09-11 08:56:15] [Rank 0] step:8561/10000 train_time:355791ms step_avg:41.56ms +[2025-09-11 08:56:16] [Rank 0] step:8581/10000 train_time:356505ms step_avg:41.55ms +[2025-09-11 08:56:16] [Rank 0] step:8581/10000 train_time:356505ms step_avg:41.55ms +[2025-09-11 08:56:17] [Rank 0] step:8601/10000 train_time:357218ms step_avg:41.53ms +[2025-09-11 08:56:17] [Rank 0] step:8601/10000 train_time:357218ms step_avg:41.53ms +[2025-09-11 08:56:18] [Rank 0] step:8621/10000 train_time:357928ms step_avg:41.52ms +[2025-09-11 08:56:18] [Rank 0] step:8621/10000 train_time:357928ms step_avg:41.52ms +[2025-09-11 08:56:18] [Rank 0] step:8641/10000 train_time:358639ms step_avg:41.50ms +[2025-09-11 08:56:18] [Rank 0] step:8641/10000 train_time:358639ms step_avg:41.50ms +[2025-09-11 08:56:19] [Rank 0] step:8661/10000 train_time:359350ms step_avg:41.49ms +[2025-09-11 08:56:19] [Rank 0] step:8661/10000 train_time:359350ms step_avg:41.49ms +[2025-09-11 08:56:20] [Rank 0] step:8681/10000 train_time:360063ms step_avg:41.48ms +[2025-09-11 08:56:20] [Rank 0] step:8681/10000 train_time:360063ms step_avg:41.48ms +[2025-09-11 08:56:20] [Rank 0] step:8701/10000 train_time:360773ms step_avg:41.46ms +[2025-09-11 08:56:20] [Rank 0] step:8701/10000 train_time:360773ms step_avg:41.46ms +[2025-09-11 08:56:21] [Rank 0] step:8721/10000 train_time:361487ms step_avg:41.45ms +[2025-09-11 08:56:21] [Rank 0] step:8721/10000 train_time:361487ms step_avg:41.45ms +[2025-09-11 08:56:22] [Rank 0] step:8741/10000 train_time:362195ms step_avg:41.44ms +[2025-09-11 08:56:22] [Rank 0] step:8741/10000 train_time:362195ms step_avg:41.44ms +[2025-09-11 08:56:23] [Rank 0] step:8761/10000 train_time:362909ms step_avg:41.42ms +[2025-09-11 08:56:23] [Rank 0] step:8761/10000 train_time:362909ms step_avg:41.42ms +[2025-09-11 08:56:23] [Rank 0] step:8781/10000 train_time:363617ms step_avg:41.41ms +[2025-09-11 08:56:23] [Rank 0] step:8781/10000 train_time:363617ms step_avg:41.41ms +[2025-09-11 08:56:24] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:56:24] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:56:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:56:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:56:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:56:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:56:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:56:34] [Rank 0] PRINT: step:8800/10000 val_loss:4.1578 total_sharp:6.4824e-05 L1_sharp:2.7666e-04 L2_sharp:-2.6510e-06 L3_sharp:2.0033e-05 L4_sharp:-9.6649e-06 L5_sharp:4.2580e-05 L6_sharp:3.7044e-05 L7_sharp:4.0005e-05 L8_sharp:1.0579e-04 L9_sharp:1.1416e-04 L10_sharp:1.3276e-04 L11_sharp:2.1240e-04 L12_sharp:1.1674e-03 total_fnorm:2.2375e+01 total_l1_linf:3.6608e+04 total_spectral:1.1312e+01 L1_fnorm:1.7578e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.7188e+00 L4_fnorm:1.6797e+00 L5_fnorm:1.6953e+00 L6_fnorm:1.7031e+00 L7_fnorm:1.6719e+00 L8_fnorm:1.6172e+00 L9_fnorm:1.6484e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6484e+00 L1_l1linf:2.9297e-01 L2_l1linf:2.8906e-01 L3_l1linf:2.8516e-01 L4_l1linf:2.9492e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9297e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.6367e-01 L11_l1linf:2.4609e-01 L12_l1linf:2.5977e-01 L1_spectral:2.5967e-02 L2_spectral:2.6010e-02 L3_spectral:2.5777e-02 L4_spectral:2.4018e-02 L5_spectral:2.4331e-02 L6_spectral:2.4337e-02 L7_spectral:2.4360e-02 L8_spectral:2.4741e-02 L9_spectral:2.4838e-02 L10_spectral:2.4876e-02 L11_spectral:2.5222e-02 L12_spectral:2.5032e-02 train_time:364307ms step_avg:41.40ms +[2025-09-11 08:56:34] [Rank 0] PRINT: step:8800/10000 val_loss:4.1578 total_sharp:6.4824e-05 L1_sharp:2.7666e-04 L2_sharp:-2.6510e-06 L3_sharp:2.0033e-05 L4_sharp:-9.6649e-06 L5_sharp:4.2580e-05 L6_sharp:3.7044e-05 L7_sharp:4.0005e-05 L8_sharp:1.0579e-04 L9_sharp:1.1416e-04 L10_sharp:1.3276e-04 L11_sharp:2.1240e-04 L12_sharp:1.1674e-03 total_fnorm:2.2375e+01 total_l1_linf:3.6608e+04 total_spectral:1.1312e+01 L1_fnorm:1.7578e+00 L2_fnorm:1.6953e+00 L3_fnorm:1.7188e+00 L4_fnorm:1.6797e+00 L5_fnorm:1.6953e+00 L6_fnorm:1.7031e+00 L7_fnorm:1.6719e+00 L8_fnorm:1.6172e+00 L9_fnorm:1.6484e+00 L10_fnorm:1.6328e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6484e+00 L1_l1linf:2.9297e-01 L2_l1linf:2.8906e-01 L3_l1linf:2.8516e-01 L4_l1linf:2.9492e-01 L5_l1linf:3.1055e-01 L6_l1linf:3.0664e-01 L7_l1linf:3.0273e-01 L8_l1linf:2.9297e-01 L9_l1linf:2.7734e-01 L10_l1linf:2.6367e-01 L11_l1linf:2.4609e-01 L12_l1linf:2.5977e-01 L1_spectral:2.5967e-02 L2_spectral:2.6010e-02 L3_spectral:2.5777e-02 L4_spectral:2.4018e-02 L5_spectral:2.4331e-02 L6_spectral:2.4337e-02 L7_spectral:2.4360e-02 L8_spectral:2.4741e-02 L9_spectral:2.4838e-02 L10_spectral:2.4876e-02 L11_spectral:2.5222e-02 L12_spectral:2.5032e-02 train_time:364307ms step_avg:41.40ms +[2025-09-11 08:56:35] [Rank 0] step:8801/10000 train_time:365490ms step_avg:41.53ms +[2025-09-11 08:56:35] [Rank 0] step:8801/10000 train_time:365490ms step_avg:41.53ms +[2025-09-11 08:56:36] [Rank 0] step:8821/10000 train_time:366230ms step_avg:41.52ms +[2025-09-11 08:56:36] [Rank 0] step:8821/10000 train_time:366230ms step_avg:41.52ms +[2025-09-11 08:56:37] [Rank 0] step:8841/10000 train_time:367449ms step_avg:41.56ms +[2025-09-11 08:56:37] [Rank 0] step:8841/10000 train_time:367449ms step_avg:41.56ms +[2025-09-11 08:56:38] [Rank 0] step:8861/10000 train_time:368160ms step_avg:41.55ms +[2025-09-11 08:56:38] [Rank 0] step:8861/10000 train_time:368160ms step_avg:41.55ms +[2025-09-11 08:56:39] [Rank 0] step:8881/10000 train_time:368871ms step_avg:41.53ms +[2025-09-11 08:56:39] [Rank 0] step:8881/10000 train_time:368871ms step_avg:41.53ms +[2025-09-11 08:56:40] [Rank 0] step:8901/10000 train_time:369844ms step_avg:41.55ms +[2025-09-11 08:56:40] [Rank 0] step:8901/10000 train_time:369844ms step_avg:41.55ms +[2025-09-11 08:56:40] [Rank 0] step:8921/10000 train_time:370552ms step_avg:41.54ms +[2025-09-11 08:56:40] [Rank 0] step:8921/10000 train_time:370552ms step_avg:41.54ms +[2025-09-11 08:56:41] [Rank 0] step:8941/10000 train_time:371264ms step_avg:41.52ms +[2025-09-11 08:56:41] [Rank 0] step:8941/10000 train_time:371264ms step_avg:41.52ms +[2025-09-11 08:56:42] [Rank 0] step:8961/10000 train_time:371984ms step_avg:41.51ms +[2025-09-11 08:56:42] [Rank 0] step:8961/10000 train_time:371984ms step_avg:41.51ms +[2025-09-11 08:56:42] [Rank 0] step:8981/10000 train_time:372699ms step_avg:41.50ms +[2025-09-11 08:56:42] [Rank 0] step:8981/10000 train_time:372699ms step_avg:41.50ms +[2025-09-11 08:56:43] [Rank 0] step:9001/10000 train_time:373405ms step_avg:41.48ms +[2025-09-11 08:56:43] [Rank 0] step:9001/10000 train_time:373405ms step_avg:41.48ms +[2025-09-11 08:56:44] [Rank 0] step:9021/10000 train_time:374118ms step_avg:41.47ms +[2025-09-11 08:56:44] [Rank 0] step:9021/10000 train_time:374118ms step_avg:41.47ms +[2025-09-11 08:56:45] [Rank 0] step:9041/10000 train_time:374832ms step_avg:41.46ms +[2025-09-11 08:56:45] [Rank 0] step:9041/10000 train_time:374832ms step_avg:41.46ms +[2025-09-11 08:56:45] [Rank 0] step:9061/10000 train_time:375543ms step_avg:41.45ms +[2025-09-11 08:56:45] [Rank 0] step:9061/10000 train_time:375543ms step_avg:41.45ms +[2025-09-11 08:56:46] [Rank 0] step:9081/10000 train_time:376256ms step_avg:41.43ms +[2025-09-11 08:56:46] [Rank 0] step:9081/10000 train_time:376256ms step_avg:41.43ms +[2025-09-11 08:56:47] [Rank 0] step:9101/10000 train_time:376971ms step_avg:41.42ms +[2025-09-11 08:56:47] [Rank 0] step:9101/10000 train_time:376971ms step_avg:41.42ms +[2025-09-11 08:56:47] [Rank 0] step:9121/10000 train_time:377687ms step_avg:41.41ms +[2025-09-11 08:56:47] [Rank 0] step:9121/10000 train_time:377687ms step_avg:41.41ms +[2025-09-11 08:56:48] [Rank 0] step:9141/10000 train_time:378397ms step_avg:41.40ms +[2025-09-11 08:56:48] [Rank 0] step:9141/10000 train_time:378397ms step_avg:41.40ms +[2025-09-11 08:56:49] [Rank 0] step:9161/10000 train_time:379111ms step_avg:41.38ms +[2025-09-11 08:56:49] [Rank 0] step:9161/10000 train_time:379111ms step_avg:41.38ms +[2025-09-11 08:56:50] [Rank 0] step:9181/10000 train_time:379824ms step_avg:41.37ms +[2025-09-11 08:56:50] [Rank 0] step:9181/10000 train_time:379824ms step_avg:41.37ms +[2025-09-11 08:56:50] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:56:50] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:56:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:56:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:56:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:56:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:56:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:00] [Rank 0] PRINT: step:9200/10000 val_loss:4.1355 total_sharp:5.4652e-05 L1_sharp:2.4389e-04 L2_sharp:6.3960e-06 L3_sharp:7.1604e-06 L4_sharp:2.6392e-05 L5_sharp:4.2442e-05 L6_sharp:2.3073e-05 L7_sharp:4.9862e-05 L8_sharp:1.0293e-04 L9_sharp:9.6540e-05 L10_sharp:1.3012e-04 L11_sharp:1.8502e-04 L12_sharp:1.1427e-03 total_fnorm:1.5500e+01 total_l1_linf:2.2272e+04 total_spectral:7.9375e+00 L1_fnorm:1.1875e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1562e+00 L4_fnorm:1.1172e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.1016e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.8262e-01 L2_l1linf:1.8066e-01 L3_l1linf:1.7676e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8750e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.7285e-01 L9_l1linf:1.6797e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.6504e-01 L1_spectral:1.8140e-02 L2_spectral:1.8074e-02 L3_spectral:1.8215e-02 L4_spectral:1.6271e-02 L5_spectral:1.6499e-02 L6_spectral:1.6702e-02 L7_spectral:1.7004e-02 L8_spectral:1.7222e-02 L9_spectral:1.7172e-02 L10_spectral:1.7105e-02 L11_spectral:1.7103e-02 L12_spectral:1.7284e-02 train_time:380518ms step_avg:41.36ms +[2025-09-11 08:57:00] [Rank 0] PRINT: step:9200/10000 val_loss:4.1355 total_sharp:5.4652e-05 L1_sharp:2.4389e-04 L2_sharp:6.3960e-06 L3_sharp:7.1604e-06 L4_sharp:2.6392e-05 L5_sharp:4.2442e-05 L6_sharp:2.3073e-05 L7_sharp:4.9862e-05 L8_sharp:1.0293e-04 L9_sharp:9.6540e-05 L10_sharp:1.3012e-04 L11_sharp:1.8502e-04 L12_sharp:1.1427e-03 total_fnorm:1.5500e+01 total_l1_linf:2.2272e+04 total_spectral:7.9375e+00 L1_fnorm:1.1875e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1562e+00 L4_fnorm:1.1172e+00 L5_fnorm:1.1250e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1250e+00 L8_fnorm:1.0859e+00 L9_fnorm:1.1016e+00 L10_fnorm:1.0938e+00 L11_fnorm:1.1016e+00 L12_fnorm:1.1094e+00 L1_l1linf:1.8262e-01 L2_l1linf:1.8066e-01 L3_l1linf:1.7676e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8750e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.7285e-01 L9_l1linf:1.6797e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.6504e-01 L1_spectral:1.8140e-02 L2_spectral:1.8074e-02 L3_spectral:1.8215e-02 L4_spectral:1.6271e-02 L5_spectral:1.6499e-02 L6_spectral:1.6702e-02 L7_spectral:1.7004e-02 L8_spectral:1.7222e-02 L9_spectral:1.7172e-02 L10_spectral:1.7105e-02 L11_spectral:1.7103e-02 L12_spectral:1.7284e-02 train_time:380518ms step_avg:41.36ms +[2025-09-11 08:57:01] [Rank 0] step:9201/10000 train_time:381720ms step_avg:41.49ms +[2025-09-11 08:57:01] [Rank 0] step:9201/10000 train_time:381720ms step_avg:41.49ms +[2025-09-11 08:57:02] [Rank 0] step:9221/10000 train_time:382445ms step_avg:41.48ms +[2025-09-11 08:57:02] [Rank 0] step:9221/10000 train_time:382445ms step_avg:41.48ms +[2025-09-11 08:57:03] [Rank 0] step:9241/10000 train_time:383155ms step_avg:41.46ms +[2025-09-11 08:57:03] [Rank 0] step:9241/10000 train_time:383155ms step_avg:41.46ms +[2025-09-11 08:57:04] [Rank 0] step:9261/10000 train_time:383869ms step_avg:41.45ms +[2025-09-11 08:57:04] [Rank 0] step:9261/10000 train_time:383869ms step_avg:41.45ms +[2025-09-11 08:57:04] [Rank 0] step:9281/10000 train_time:384593ms step_avg:41.44ms +[2025-09-11 08:57:04] [Rank 0] step:9281/10000 train_time:384593ms step_avg:41.44ms +[2025-09-11 08:57:05] [Rank 0] step:9301/10000 train_time:385302ms step_avg:41.43ms +[2025-09-11 08:57:05] [Rank 0] step:9301/10000 train_time:385302ms step_avg:41.43ms +[2025-09-11 08:57:06] [Rank 0] step:9321/10000 train_time:386018ms step_avg:41.41ms +[2025-09-11 08:57:06] [Rank 0] step:9321/10000 train_time:386018ms step_avg:41.41ms +[2025-09-11 08:57:07] [Rank 0] step:9341/10000 train_time:386726ms step_avg:41.40ms +[2025-09-11 08:57:07] [Rank 0] step:9341/10000 train_time:386726ms step_avg:41.40ms +[2025-09-11 08:57:07] [Rank 0] step:9361/10000 train_time:387436ms step_avg:41.39ms +[2025-09-11 08:57:07] [Rank 0] step:9361/10000 train_time:387436ms step_avg:41.39ms +[2025-09-11 08:57:08] [Rank 0] step:9381/10000 train_time:388149ms step_avg:41.38ms +[2025-09-11 08:57:08] [Rank 0] step:9381/10000 train_time:388149ms step_avg:41.38ms +[2025-09-11 08:57:09] [Rank 0] step:9401/10000 train_time:388862ms step_avg:41.36ms +[2025-09-11 08:57:09] [Rank 0] step:9401/10000 train_time:388862ms step_avg:41.36ms +[2025-09-11 08:57:09] [Rank 0] step:9421/10000 train_time:389576ms step_avg:41.35ms +[2025-09-11 08:57:09] [Rank 0] step:9421/10000 train_time:389576ms step_avg:41.35ms +[2025-09-11 08:57:10] [Rank 0] step:9441/10000 train_time:390291ms step_avg:41.34ms +[2025-09-11 08:57:10] [Rank 0] step:9441/10000 train_time:390291ms step_avg:41.34ms +[2025-09-11 08:57:11] [Rank 0] step:9461/10000 train_time:391003ms step_avg:41.33ms +[2025-09-11 08:57:11] [Rank 0] step:9461/10000 train_time:391003ms step_avg:41.33ms +[2025-09-11 08:57:12] [Rank 0] step:9481/10000 train_time:391716ms step_avg:41.32ms +[2025-09-11 08:57:12] [Rank 0] step:9481/10000 train_time:391716ms step_avg:41.32ms +[2025-09-11 08:57:12] [Rank 0] step:9501/10000 train_time:392429ms step_avg:41.30ms +[2025-09-11 08:57:12] [Rank 0] step:9501/10000 train_time:392429ms step_avg:41.30ms +[2025-09-11 08:57:13] [Rank 0] step:9521/10000 train_time:393143ms step_avg:41.29ms +[2025-09-11 08:57:13] [Rank 0] step:9521/10000 train_time:393143ms step_avg:41.29ms +[2025-09-11 08:57:14] [Rank 0] step:9541/10000 train_time:393852ms step_avg:41.28ms +[2025-09-11 08:57:14] [Rank 0] step:9541/10000 train_time:393852ms step_avg:41.28ms +[2025-09-11 08:57:14] [Rank 0] step:9561/10000 train_time:394564ms step_avg:41.27ms +[2025-09-11 08:57:14] [Rank 0] step:9561/10000 train_time:394564ms step_avg:41.27ms +[2025-09-11 08:57:15] [Rank 0] step:9581/10000 train_time:395277ms step_avg:41.26ms +[2025-09-11 08:57:15] [Rank 0] step:9581/10000 train_time:395277ms step_avg:41.26ms +[2025-09-11 08:57:16] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:57:16] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:57:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:57:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:57:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:57:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:57:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:26] [Rank 0] PRINT: step:9600/10000 val_loss:4.1237 total_sharp:3.3911e-05 L1_sharp:1.8980e-04 L2_sharp:9.8421e-06 L3_sharp:1.3549e-05 L4_sharp:3.5940e-05 L5_sharp:3.3469e-05 L6_sharp:1.4740e-05 L7_sharp:3.0871e-05 L8_sharp:7.2932e-05 L9_sharp:7.3308e-05 L10_sharp:9.4084e-05 L11_sharp:1.2019e-04 L12_sharp:5.3404e-04 total_fnorm:8.8125e+00 total_l1_linf:1.0688e+04 total_spectral:4.5625e+00 L1_fnorm:6.5234e-01 L2_fnorm:6.3672e-01 L3_fnorm:6.5234e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.3672e-01 L6_fnorm:6.3672e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1328e-01 L11_fnorm:6.2109e-01 L12_fnorm:6.2891e-01 L1_l1linf:8.6914e-02 L2_l1linf:1.0254e-01 L3_l1linf:9.7656e-02 L4_l1linf:8.8867e-02 L5_l1linf:9.2285e-02 L6_l1linf:8.8867e-02 L7_l1linf:8.7402e-02 L8_l1linf:9.0820e-02 L9_l1linf:8.1543e-02 L10_l1linf:7.4707e-02 L11_l1linf:7.3242e-02 L12_l1linf:8.4473e-02 L1_spectral:1.0387e-02 L2_spectral:1.0647e-02 L3_spectral:1.0800e-02 L4_spectral:9.2261e-03 L5_spectral:9.3745e-03 L6_spectral:9.6122e-03 L7_spectral:9.6340e-03 L8_spectral:1.0028e-02 L9_spectral:9.7478e-03 L10_spectral:9.8082e-03 L11_spectral:9.7760e-03 L12_spectral:9.8491e-03 train_time:395965ms step_avg:41.25ms +[2025-09-11 08:57:26] [Rank 0] PRINT: step:9600/10000 val_loss:4.1237 total_sharp:3.3911e-05 L1_sharp:1.8980e-04 L2_sharp:9.8421e-06 L3_sharp:1.3549e-05 L4_sharp:3.5940e-05 L5_sharp:3.3469e-05 L6_sharp:1.4740e-05 L7_sharp:3.0871e-05 L8_sharp:7.2932e-05 L9_sharp:7.3308e-05 L10_sharp:9.4084e-05 L11_sharp:1.2019e-04 L12_sharp:5.3404e-04 total_fnorm:8.8125e+00 total_l1_linf:1.0688e+04 total_spectral:4.5625e+00 L1_fnorm:6.5234e-01 L2_fnorm:6.3672e-01 L3_fnorm:6.5234e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.3672e-01 L6_fnorm:6.3672e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.0938e-01 L9_fnorm:6.1719e-01 L10_fnorm:6.1328e-01 L11_fnorm:6.2109e-01 L12_fnorm:6.2891e-01 L1_l1linf:8.6914e-02 L2_l1linf:1.0254e-01 L3_l1linf:9.7656e-02 L4_l1linf:8.8867e-02 L5_l1linf:9.2285e-02 L6_l1linf:8.8867e-02 L7_l1linf:8.7402e-02 L8_l1linf:9.0820e-02 L9_l1linf:8.1543e-02 L10_l1linf:7.4707e-02 L11_l1linf:7.3242e-02 L12_l1linf:8.4473e-02 L1_spectral:1.0387e-02 L2_spectral:1.0647e-02 L3_spectral:1.0800e-02 L4_spectral:9.2261e-03 L5_spectral:9.3745e-03 L6_spectral:9.6122e-03 L7_spectral:9.6340e-03 L8_spectral:1.0028e-02 L9_spectral:9.7478e-03 L10_spectral:9.8082e-03 L11_spectral:9.7760e-03 L12_spectral:9.8491e-03 train_time:395965ms step_avg:41.25ms +[2025-09-11 08:57:27] [Rank 0] step:9601/10000 train_time:397166ms step_avg:41.37ms +[2025-09-11 08:57:27] [Rank 0] step:9601/10000 train_time:397166ms step_avg:41.37ms +[2025-09-11 08:57:28] [Rank 0] step:9621/10000 train_time:397912ms step_avg:41.36ms +[2025-09-11 08:57:28] [Rank 0] step:9621/10000 train_time:397912ms step_avg:41.36ms +[2025-09-11 08:57:29] [Rank 0] step:9641/10000 train_time:398629ms step_avg:41.35ms +[2025-09-11 08:57:29] [Rank 0] step:9641/10000 train_time:398629ms step_avg:41.35ms +[2025-09-11 08:57:29] [Rank 0] step:9661/10000 train_time:399354ms step_avg:41.34ms +[2025-09-11 08:57:29] [Rank 0] step:9661/10000 train_time:399354ms step_avg:41.34ms +[2025-09-11 08:57:30] [Rank 0] step:9681/10000 train_time:400070ms step_avg:41.33ms +[2025-09-11 08:57:30] [Rank 0] step:9681/10000 train_time:400070ms step_avg:41.33ms +[2025-09-11 08:57:31] [Rank 0] step:9701/10000 train_time:400789ms step_avg:41.31ms +[2025-09-11 08:57:31] [Rank 0] step:9701/10000 train_time:400789ms step_avg:41.31ms +[2025-09-11 08:57:31] [Rank 0] step:9721/10000 train_time:401511ms step_avg:41.30ms +[2025-09-11 08:57:31] [Rank 0] step:9721/10000 train_time:401511ms step_avg:41.30ms +[2025-09-11 08:57:32] [Rank 0] step:9741/10000 train_time:402231ms step_avg:41.29ms +[2025-09-11 08:57:32] [Rank 0] step:9741/10000 train_time:402231ms step_avg:41.29ms +[2025-09-11 08:57:33] [Rank 0] step:9761/10000 train_time:402949ms step_avg:41.28ms +[2025-09-11 08:57:33] [Rank 0] step:9761/10000 train_time:402949ms step_avg:41.28ms +[2025-09-11 08:57:34] [Rank 0] step:9781/10000 train_time:403666ms step_avg:41.27ms +[2025-09-11 08:57:34] [Rank 0] step:9781/10000 train_time:403666ms step_avg:41.27ms +[2025-09-11 08:57:34] [Rank 0] step:9801/10000 train_time:404390ms step_avg:41.26ms +[2025-09-11 08:57:34] [Rank 0] step:9801/10000 train_time:404390ms step_avg:41.26ms +[2025-09-11 08:57:35] [Rank 0] step:9821/10000 train_time:405111ms step_avg:41.25ms +[2025-09-11 08:57:35] [Rank 0] step:9821/10000 train_time:405111ms step_avg:41.25ms +[2025-09-11 08:57:36] [Rank 0] step:9841/10000 train_time:405854ms step_avg:41.24ms +[2025-09-11 08:57:36] [Rank 0] step:9841/10000 train_time:405854ms step_avg:41.24ms +[2025-09-11 08:57:37] [Rank 0] step:9861/10000 train_time:406574ms step_avg:41.23ms +[2025-09-11 08:57:37] [Rank 0] step:9861/10000 train_time:406574ms step_avg:41.23ms +[2025-09-11 08:57:37] [Rank 0] step:9881/10000 train_time:407293ms step_avg:41.22ms +[2025-09-11 08:57:37] [Rank 0] step:9881/10000 train_time:407293ms step_avg:41.22ms +[2025-09-11 08:57:38] [Rank 0] step:9901/10000 train_time:408010ms step_avg:41.21ms +[2025-09-11 08:57:38] [Rank 0] step:9901/10000 train_time:408010ms step_avg:41.21ms +[2025-09-11 08:57:39] [Rank 0] step:9921/10000 train_time:408729ms step_avg:41.20ms +[2025-09-11 08:57:39] [Rank 0] step:9921/10000 train_time:408729ms step_avg:41.20ms +[2025-09-11 08:57:39] [Rank 0] step:9941/10000 train_time:409453ms step_avg:41.19ms +[2025-09-11 08:57:39] [Rank 0] step:9941/10000 train_time:409453ms step_avg:41.19ms +[2025-09-11 08:57:40] [Rank 0] step:9961/10000 train_time:410432ms step_avg:41.20ms +[2025-09-11 08:57:40] [Rank 0] step:9961/10000 train_time:410432ms step_avg:41.20ms +[2025-09-11 08:57:41] [Rank 0] step:9981/10000 train_time:411415ms step_avg:41.22ms +[2025-09-11 08:57:41] [Rank 0] step:9981/10000 train_time:411415ms step_avg:41.22ms +[2025-09-11 08:57:42] [Rank 0] step:10000/10000 train_time:412106ms step_avg:41.21ms +[2025-09-11 08:57:42] [Rank 0] step:10000/10000 train_time:412106ms step_avg:41.21ms +[2025-09-11 08:57:42] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:57:42] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:57:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:57:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:57:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:52] [Rank 0] PRINT: step:10000/10000 val_loss:4.1197 total_sharp:2.2206e-05 L1_sharp:1.4442e-04 L2_sharp:1.7876e-05 L3_sharp:1.1039e-05 L4_sharp:2.8567e-05 L5_sharp:3.5994e-05 L6_sharp:1.3652e-05 L7_sharp:2.3530e-05 L8_sharp:5.0787e-05 L9_sharp:5.0927e-05 L10_sharp:7.8724e-05 L11_sharp:1.1216e-04 L12_sharp:4.6004e-04 total_fnorm:3.4062e+00 total_l1_linf:3.0240e+03 total_spectral:1.7578e+00 L1_fnorm:2.6172e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.5195e-01 L4_fnorm:2.4219e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3730e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4023e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.4414e-01 L1_l1linf:2.6489e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.1494e-02 L4_l1linf:2.6367e-02 L5_l1linf:2.7100e-02 L6_l1linf:2.9297e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.7466e-02 L9_l1linf:2.4292e-02 L10_l1linf:2.4536e-02 L11_l1linf:2.2339e-02 L12_l1linf:2.4658e-02 L1_spectral:4.1595e-03 L2_spectral:4.3147e-03 L3_spectral:4.2985e-03 L4_spectral:3.6397e-03 L5_spectral:3.7354e-03 L6_spectral:3.7874e-03 L7_spectral:3.8582e-03 L8_spectral:4.1117e-03 L9_spectral:3.9231e-03 L10_spectral:3.9268e-03 L11_spectral:3.9620e-03 L12_spectral:3.9196e-03 train_time:412127ms step_avg:41.21ms +[2025-09-11 08:57:52] [Rank 0] PRINT: step:10000/10000 val_loss:4.1197 total_sharp:2.2206e-05 L1_sharp:1.4442e-04 L2_sharp:1.7876e-05 L3_sharp:1.1039e-05 L4_sharp:2.8567e-05 L5_sharp:3.5994e-05 L6_sharp:1.3652e-05 L7_sharp:2.3530e-05 L8_sharp:5.0787e-05 L9_sharp:5.0927e-05 L10_sharp:7.8724e-05 L11_sharp:1.1216e-04 L12_sharp:4.6004e-04 total_fnorm:3.4062e+00 total_l1_linf:3.0240e+03 total_spectral:1.7578e+00 L1_fnorm:2.6172e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.5195e-01 L4_fnorm:2.4219e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3730e-01 L9_fnorm:2.4219e-01 L10_fnorm:2.4023e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.4414e-01 L1_l1linf:2.6489e-02 L2_l1linf:3.5400e-02 L3_l1linf:3.1494e-02 L4_l1linf:2.6367e-02 L5_l1linf:2.7100e-02 L6_l1linf:2.9297e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.7466e-02 L9_l1linf:2.4292e-02 L10_l1linf:2.4536e-02 L11_l1linf:2.2339e-02 L12_l1linf:2.4658e-02 L1_spectral:4.1595e-03 L2_spectral:4.3147e-03 L3_spectral:4.2985e-03 L4_spectral:3.6397e-03 L5_spectral:3.7354e-03 L6_spectral:3.7874e-03 L7_spectral:3.8582e-03 L8_spectral:4.1117e-03 L9_spectral:3.9231e-03 L10_spectral:3.9268e-03 L11_spectral:3.9620e-03 L12_spectral:3.9196e-03 train_time:412127ms step_avg:41.21ms +[2025-09-11 08:57:52] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:57:52 2025 --- +[2025-09-11 08:57:52] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:57:52 2025 --- +[2025-09-11 08:57:52] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 08:57:52] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e67b3217af848598ea77828653baf3a48bf6b48b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.02, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "285ae0a0-6974-4db5-8041-853f49b6f74a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/training_log_285ae0a0-6974-4db5-8041-853f49b6f74a.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/training_log_285ae0a0-6974-4db5-8041-853f49b6f74a.txt new file mode 100644 index 0000000000000000000000000000000000000000..60af36420a3a7b93db11f48f595acd6e2a439d58 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44/training_log_285ae0a0-6974-4db5-8041-853f49b6f74a.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:31:30] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:31:30 2025 --- +[2025-09-11 08:31:30] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:31:30 2025 --- +[2025-09-11 08:31:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:31:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.02, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:31:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:31:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:31:30] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:31:30] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-11 08:31:30] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44 +[2025-09-11 08:31:30] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.02_muon_lr_0.1_seed_44 +[2025-09-11 08:31:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:31:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:31:30] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:31:30] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:31:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:31:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:31:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:31:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:31:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:31:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:31:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:31:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:31:31] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:31:31] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:31:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:31:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:31:33] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:31:33] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:31:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:31:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:31:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:31:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:31:39] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:31:39] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:32:15] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:32:15] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:32:15] [Rank 0] PRINT: Starting training... +[2025-09-11 08:32:15] [Rank 0] PRINT: Starting training... +[2025-09-11 08:32:16] [Rank 0] step:21/10000 train_time:934ms step_avg:44.48ms +[2025-09-11 08:32:16] [Rank 0] step:21/10000 train_time:934ms step_avg:44.48ms +[2025-09-11 08:32:17] [Rank 0] step:41/10000 train_time:1663ms step_avg:40.56ms +[2025-09-11 08:32:17] [Rank 0] step:41/10000 train_time:1663ms step_avg:40.56ms +[2025-09-11 08:32:17] [Rank 0] step:61/10000 train_time:2392ms step_avg:39.21ms +[2025-09-11 08:32:17] [Rank 0] step:61/10000 train_time:2392ms step_avg:39.21ms +[2025-09-11 08:32:18] [Rank 0] step:81/10000 train_time:3121ms step_avg:38.53ms +[2025-09-11 08:32:18] [Rank 0] step:81/10000 train_time:3121ms step_avg:38.53ms +[2025-09-11 08:32:19] [Rank 0] step:101/10000 train_time:3849ms step_avg:38.11ms +[2025-09-11 08:32:19] [Rank 0] step:101/10000 train_time:3849ms step_avg:38.11ms +[2025-09-11 08:32:20] [Rank 0] step:121/10000 train_time:4578ms step_avg:37.84ms +[2025-09-11 08:32:20] [Rank 0] step:121/10000 train_time:4578ms step_avg:37.84ms +[2025-09-11 08:32:20] [Rank 0] step:141/10000 train_time:5307ms step_avg:37.64ms +[2025-09-11 08:32:20] [Rank 0] step:141/10000 train_time:5307ms step_avg:37.64ms +[2025-09-11 08:32:21] [Rank 0] step:161/10000 train_time:6035ms step_avg:37.48ms +[2025-09-11 08:32:21] [Rank 0] step:161/10000 train_time:6035ms step_avg:37.48ms +[2025-09-11 08:32:22] [Rank 0] step:181/10000 train_time:7021ms step_avg:38.79ms +[2025-09-11 08:32:22] [Rank 0] step:181/10000 train_time:7021ms step_avg:38.79ms +[2025-09-11 08:32:23] [Rank 0] step:201/10000 train_time:7975ms step_avg:39.68ms +[2025-09-11 08:32:23] [Rank 0] step:201/10000 train_time:7975ms step_avg:39.68ms +[2025-09-11 08:32:24] [Rank 0] step:221/10000 train_time:8703ms step_avg:39.38ms +[2025-09-11 08:32:24] [Rank 0] step:221/10000 train_time:8703ms step_avg:39.38ms +[2025-09-11 08:32:25] [Rank 0] step:241/10000 train_time:9679ms step_avg:40.16ms +[2025-09-11 08:32:25] [Rank 0] step:241/10000 train_time:9679ms step_avg:40.16ms +[2025-09-11 08:32:25] [Rank 0] step:261/10000 train_time:10412ms step_avg:39.89ms +[2025-09-11 08:32:25] [Rank 0] step:261/10000 train_time:10412ms step_avg:39.89ms +[2025-09-11 08:32:26] [Rank 0] step:281/10000 train_time:11139ms step_avg:39.64ms +[2025-09-11 08:32:26] [Rank 0] step:281/10000 train_time:11139ms step_avg:39.64ms +[2025-09-11 08:32:27] [Rank 0] step:301/10000 train_time:11866ms step_avg:39.42ms +[2025-09-11 08:32:27] [Rank 0] step:301/10000 train_time:11866ms step_avg:39.42ms +[2025-09-11 08:32:28] [Rank 0] step:321/10000 train_time:12593ms step_avg:39.23ms +[2025-09-11 08:32:28] [Rank 0] step:321/10000 train_time:12593ms step_avg:39.23ms +[2025-09-11 08:32:28] [Rank 0] step:341/10000 train_time:13320ms step_avg:39.06ms +[2025-09-11 08:32:28] [Rank 0] step:341/10000 train_time:13320ms step_avg:39.06ms +[2025-09-11 08:32:29] [Rank 0] step:361/10000 train_time:14048ms step_avg:38.92ms +[2025-09-11 08:32:29] [Rank 0] step:361/10000 train_time:14048ms step_avg:38.92ms +[2025-09-11 08:32:30] [Rank 0] step:381/10000 train_time:14775ms step_avg:38.78ms +[2025-09-11 08:32:30] [Rank 0] step:381/10000 train_time:14775ms step_avg:38.78ms +[2025-09-11 08:32:30] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:32:30] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:32:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:33:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:33:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:33:18] [Rank 0] PRINT: step:400/10000 val_loss:6.0242 total_sharp:1.1109e-03 L1_sharp:7.4898e-03 L2_sharp:8.5339e-04 L3_sharp:2.1531e-04 L4_sharp:1.3044e-04 L5_sharp:1.2677e-04 L6_sharp:1.7310e-04 L7_sharp:1.7719e-04 L8_sharp:1.6841e-04 L9_sharp:3.2247e-04 L10_sharp:3.8943e-04 L11_sharp:5.4669e-04 L12_sharp:1.9047e-03 total_fnorm:8.9648e+01 total_l1_linf:3.1685e+05 total_spectral:4.4827e+01 L1_fnorm:1.2228e+01 L2_fnorm:1.1614e+01 L3_fnorm:1.1427e+01 L4_fnorm:1.1320e+01 L5_fnorm:1.1182e+01 L6_fnorm:1.1265e+01 L7_fnorm:1.1080e+01 L8_fnorm:1.1085e+01 L9_fnorm:1.0974e+01 L10_fnorm:1.0839e+01 L11_fnorm:1.0710e+01 L12_fnorm:1.0330e+01 L1_l1linf:3.5519e+00 L2_l1linf:3.4592e+00 L3_l1linf:3.5909e+00 L4_l1linf:3.8075e+00 L5_l1linf:3.6716e+00 L6_l1linf:3.6468e+00 L7_l1linf:3.5564e+00 L8_l1linf:3.4607e+00 L9_l1linf:3.3190e+00 L10_l1linf:3.2138e+00 L11_l1linf:2.9860e+00 L12_l1linf:2.5873e+00 L1_spectral:1.2193e-01 L2_spectral:1.2169e-01 L3_spectral:1.2142e-01 L4_spectral:1.2149e-01 L5_spectral:1.2133e-01 L6_spectral:1.2132e-01 L7_spectral:1.2115e-01 L8_spectral:1.2113e-01 L9_spectral:1.2133e-01 L10_spectral:1.2116e-01 L11_spectral:1.2126e-01 L12_spectral:1.2129e-01 train_time:15482ms step_avg:38.71ms +[2025-09-11 08:33:18] [Rank 0] PRINT: step:400/10000 val_loss:6.0242 total_sharp:1.1109e-03 L1_sharp:7.4898e-03 L2_sharp:8.5339e-04 L3_sharp:2.1531e-04 L4_sharp:1.3044e-04 L5_sharp:1.2677e-04 L6_sharp:1.7310e-04 L7_sharp:1.7719e-04 L8_sharp:1.6841e-04 L9_sharp:3.2247e-04 L10_sharp:3.8943e-04 L11_sharp:5.4669e-04 L12_sharp:1.9047e-03 total_fnorm:8.9648e+01 total_l1_linf:3.1685e+05 total_spectral:4.4827e+01 L1_fnorm:1.2228e+01 L2_fnorm:1.1614e+01 L3_fnorm:1.1427e+01 L4_fnorm:1.1320e+01 L5_fnorm:1.1182e+01 L6_fnorm:1.1265e+01 L7_fnorm:1.1080e+01 L8_fnorm:1.1085e+01 L9_fnorm:1.0974e+01 L10_fnorm:1.0839e+01 L11_fnorm:1.0710e+01 L12_fnorm:1.0330e+01 L1_l1linf:3.5519e+00 L2_l1linf:3.4592e+00 L3_l1linf:3.5909e+00 L4_l1linf:3.8075e+00 L5_l1linf:3.6716e+00 L6_l1linf:3.6468e+00 L7_l1linf:3.5564e+00 L8_l1linf:3.4607e+00 L9_l1linf:3.3190e+00 L10_l1linf:3.2138e+00 L11_l1linf:2.9860e+00 L12_l1linf:2.5873e+00 L1_spectral:1.2193e-01 L2_spectral:1.2169e-01 L3_spectral:1.2142e-01 L4_spectral:1.2149e-01 L5_spectral:1.2133e-01 L6_spectral:1.2132e-01 L7_spectral:1.2115e-01 L8_spectral:1.2113e-01 L9_spectral:1.2133e-01 L10_spectral:1.2116e-01 L11_spectral:1.2126e-01 L12_spectral:1.2129e-01 train_time:15482ms step_avg:38.71ms +[2025-09-11 08:33:48] [Rank 0] step:401/10000 train_time:45863ms step_avg:114.37ms +[2025-09-11 08:33:48] [Rank 0] step:401/10000 train_time:45863ms step_avg:114.37ms +[2025-09-11 08:33:50] [Rank 0] step:421/10000 train_time:47793ms step_avg:113.52ms +[2025-09-11 08:33:50] [Rank 0] step:421/10000 train_time:47793ms step_avg:113.52ms +[2025-09-11 08:33:51] [Rank 0] step:441/10000 train_time:48433ms step_avg:109.83ms +[2025-09-11 08:33:51] [Rank 0] step:441/10000 train_time:48433ms step_avg:109.83ms +[2025-09-11 08:33:51] [Rank 0] step:461/10000 train_time:49072ms step_avg:106.45ms +[2025-09-11 08:33:51] [Rank 0] step:461/10000 train_time:49072ms step_avg:106.45ms +[2025-09-11 08:33:52] [Rank 0] step:481/10000 train_time:49711ms step_avg:103.35ms +[2025-09-11 08:33:52] [Rank 0] step:481/10000 train_time:49711ms step_avg:103.35ms +[2025-09-11 08:33:53] [Rank 0] step:501/10000 train_time:50350ms step_avg:100.50ms +[2025-09-11 08:33:53] [Rank 0] step:501/10000 train_time:50350ms step_avg:100.50ms +[2025-09-11 08:33:53] [Rank 0] step:521/10000 train_time:50989ms step_avg:97.87ms +[2025-09-11 08:33:53] [Rank 0] step:521/10000 train_time:50989ms step_avg:97.87ms +[2025-09-11 08:33:54] [Rank 0] step:541/10000 train_time:51627ms step_avg:95.43ms +[2025-09-11 08:33:54] [Rank 0] step:541/10000 train_time:51627ms step_avg:95.43ms +[2025-09-11 08:33:55] [Rank 0] step:561/10000 train_time:52266ms step_avg:93.17ms +[2025-09-11 08:33:55] [Rank 0] step:561/10000 train_time:52266ms step_avg:93.17ms +[2025-09-11 08:33:55] [Rank 0] step:581/10000 train_time:52904ms step_avg:91.06ms +[2025-09-11 08:33:55] [Rank 0] step:581/10000 train_time:52904ms step_avg:91.06ms +[2025-09-11 08:33:56] [Rank 0] step:601/10000 train_time:53543ms step_avg:89.09ms +[2025-09-11 08:33:56] [Rank 0] step:601/10000 train_time:53543ms step_avg:89.09ms +[2025-09-11 08:33:57] [Rank 0] step:621/10000 train_time:54181ms step_avg:87.25ms +[2025-09-11 08:33:57] [Rank 0] step:621/10000 train_time:54181ms step_avg:87.25ms +[2025-09-11 08:33:57] [Rank 0] step:641/10000 train_time:54819ms step_avg:85.52ms +[2025-09-11 08:33:57] [Rank 0] step:641/10000 train_time:54819ms step_avg:85.52ms +[2025-09-11 08:33:58] [Rank 0] step:661/10000 train_time:55457ms step_avg:83.90ms +[2025-09-11 08:33:58] [Rank 0] step:661/10000 train_time:55457ms step_avg:83.90ms +[2025-09-11 08:33:58] [Rank 0] step:681/10000 train_time:56096ms step_avg:82.37ms +[2025-09-11 08:33:58] [Rank 0] step:681/10000 train_time:56096ms step_avg:82.37ms +[2025-09-11 08:33:59] [Rank 0] step:701/10000 train_time:56734ms step_avg:80.93ms +[2025-09-11 08:33:59] [Rank 0] step:701/10000 train_time:56734ms step_avg:80.93ms +[2025-09-11 08:34:00] [Rank 0] step:721/10000 train_time:57372ms step_avg:79.57ms +[2025-09-11 08:34:00] [Rank 0] step:721/10000 train_time:57372ms step_avg:79.57ms +[2025-09-11 08:34:00] [Rank 0] step:741/10000 train_time:58010ms step_avg:78.29ms +[2025-09-11 08:34:00] [Rank 0] step:741/10000 train_time:58010ms step_avg:78.29ms +[2025-09-11 08:34:01] [Rank 0] step:761/10000 train_time:58654ms step_avg:77.07ms +[2025-09-11 08:34:01] [Rank 0] step:761/10000 train_time:58654ms step_avg:77.07ms +[2025-09-11 08:34:02] [Rank 0] step:781/10000 train_time:59298ms step_avg:75.93ms +[2025-09-11 08:34:02] [Rank 0] step:781/10000 train_time:59298ms step_avg:75.93ms +[2025-09-11 08:34:02] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:34:02] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:34:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:34:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:34:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:34:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:34:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:34:46] [Rank 0] PRINT: step:800/10000 val_loss:5.6177 total_sharp:5.6414e-04 L1_sharp:1.6635e-03 L2_sharp:2.3126e-04 L3_sharp:7.8777e-05 L4_sharp:7.0681e-05 L5_sharp:9.9247e-05 L6_sharp:6.9981e-05 L7_sharp:1.2202e-04 L8_sharp:1.4236e-04 L9_sharp:1.2383e-04 L10_sharp:1.8211e-04 L11_sharp:3.5413e-04 L12_sharp:2.0420e-03 total_fnorm:8.9500e+01 total_l1_linf:2.9286e+05 total_spectral:4.4750e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.1750e+01 L3_fnorm:1.1562e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1750e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1562e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1500e+01 L11_fnorm:1.1375e+01 L12_fnorm:1.0750e+01 L1_l1linf:3.5781e+00 L2_l1linf:3.5469e+00 L3_l1linf:3.5781e+00 L4_l1linf:3.6719e+00 L5_l1linf:3.6250e+00 L6_l1linf:3.6406e+00 L7_l1linf:3.5938e+00 L8_l1linf:3.5625e+00 L9_l1linf:3.4844e+00 L10_l1linf:3.3906e+00 L11_l1linf:3.2031e+00 L12_l1linf:2.5000e+00 L1_spectral:1.3378e-01 L2_spectral:1.3181e-01 L3_spectral:1.3143e-01 L4_spectral:1.3144e-01 L5_spectral:1.3129e-01 L6_spectral:1.3180e-01 L7_spectral:1.3150e-01 L8_spectral:1.3152e-01 L9_spectral:1.3161e-01 L10_spectral:1.3170e-01 L11_spectral:1.3139e-01 L12_spectral:1.3214e-01 train_time:59924ms step_avg:74.90ms +[2025-09-11 08:34:46] [Rank 0] PRINT: step:800/10000 val_loss:5.6177 total_sharp:5.6414e-04 L1_sharp:1.6635e-03 L2_sharp:2.3126e-04 L3_sharp:7.8777e-05 L4_sharp:7.0681e-05 L5_sharp:9.9247e-05 L6_sharp:6.9981e-05 L7_sharp:1.2202e-04 L8_sharp:1.4236e-04 L9_sharp:1.2383e-04 L10_sharp:1.8211e-04 L11_sharp:3.5413e-04 L12_sharp:2.0420e-03 total_fnorm:8.9500e+01 total_l1_linf:2.9286e+05 total_spectral:4.4750e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.1750e+01 L3_fnorm:1.1562e+01 L4_fnorm:1.1688e+01 L5_fnorm:1.1562e+01 L6_fnorm:1.1750e+01 L7_fnorm:1.1562e+01 L8_fnorm:1.1562e+01 L9_fnorm:1.1562e+01 L10_fnorm:1.1500e+01 L11_fnorm:1.1375e+01 L12_fnorm:1.0750e+01 L1_l1linf:3.5781e+00 L2_l1linf:3.5469e+00 L3_l1linf:3.5781e+00 L4_l1linf:3.6719e+00 L5_l1linf:3.6250e+00 L6_l1linf:3.6406e+00 L7_l1linf:3.5938e+00 L8_l1linf:3.5625e+00 L9_l1linf:3.4844e+00 L10_l1linf:3.3906e+00 L11_l1linf:3.2031e+00 L12_l1linf:2.5000e+00 L1_spectral:1.3378e-01 L2_spectral:1.3181e-01 L3_spectral:1.3143e-01 L4_spectral:1.3144e-01 L5_spectral:1.3129e-01 L6_spectral:1.3180e-01 L7_spectral:1.3150e-01 L8_spectral:1.3152e-01 L9_spectral:1.3161e-01 L10_spectral:1.3170e-01 L11_spectral:1.3139e-01 L12_spectral:1.3214e-01 train_time:59924ms step_avg:74.90ms +[2025-09-11 08:34:47] [Rank 0] step:801/10000 train_time:61054ms step_avg:76.22ms +[2025-09-11 08:34:47] [Rank 0] step:801/10000 train_time:61054ms step_avg:76.22ms +[2025-09-11 08:34:48] [Rank 0] step:821/10000 train_time:61736ms step_avg:75.20ms +[2025-09-11 08:34:48] [Rank 0] step:821/10000 train_time:61736ms step_avg:75.20ms +[2025-09-11 08:34:49] [Rank 0] step:841/10000 train_time:62381ms step_avg:74.17ms +[2025-09-11 08:34:49] [Rank 0] step:841/10000 train_time:62381ms step_avg:74.17ms +[2025-09-11 08:34:49] [Rank 0] step:861/10000 train_time:63026ms step_avg:73.20ms +[2025-09-11 08:34:49] [Rank 0] step:861/10000 train_time:63026ms step_avg:73.20ms +[2025-09-11 08:34:50] [Rank 0] step:881/10000 train_time:63670ms step_avg:72.27ms +[2025-09-11 08:34:50] [Rank 0] step:881/10000 train_time:63670ms step_avg:72.27ms +[2025-09-11 08:34:51] [Rank 0] step:901/10000 train_time:64315ms step_avg:71.38ms +[2025-09-11 08:34:51] [Rank 0] step:901/10000 train_time:64315ms step_avg:71.38ms +[2025-09-11 08:34:51] [Rank 0] step:921/10000 train_time:64959ms step_avg:70.53ms +[2025-09-11 08:34:51] [Rank 0] step:921/10000 train_time:64959ms step_avg:70.53ms +[2025-09-11 08:34:52] [Rank 0] step:941/10000 train_time:65602ms step_avg:69.72ms +[2025-09-11 08:34:52] [Rank 0] step:941/10000 train_time:65602ms step_avg:69.72ms +[2025-09-11 08:34:53] [Rank 0] step:961/10000 train_time:66246ms step_avg:68.93ms +[2025-09-11 08:34:53] [Rank 0] step:961/10000 train_time:66246ms step_avg:68.93ms +[2025-09-11 08:34:53] [Rank 0] step:981/10000 train_time:66890ms step_avg:68.19ms +[2025-09-11 08:34:53] [Rank 0] step:981/10000 train_time:66890ms step_avg:68.19ms +[2025-09-11 08:34:54] [Rank 0] step:1001/10000 train_time:67534ms step_avg:67.47ms +[2025-09-11 08:34:54] [Rank 0] step:1001/10000 train_time:67534ms step_avg:67.47ms +[2025-09-11 08:34:55] [Rank 0] step:1021/10000 train_time:68178ms step_avg:66.78ms +[2025-09-11 08:34:55] [Rank 0] step:1021/10000 train_time:68178ms step_avg:66.78ms +[2025-09-11 08:34:55] [Rank 0] step:1041/10000 train_time:68823ms step_avg:66.11ms +[2025-09-11 08:34:55] [Rank 0] step:1041/10000 train_time:68823ms step_avg:66.11ms +[2025-09-11 08:34:56] [Rank 0] step:1061/10000 train_time:69467ms step_avg:65.47ms +[2025-09-11 08:34:56] [Rank 0] step:1061/10000 train_time:69467ms step_avg:65.47ms +[2025-09-11 08:34:56] [Rank 0] step:1081/10000 train_time:70110ms step_avg:64.86ms +[2025-09-11 08:34:56] [Rank 0] step:1081/10000 train_time:70110ms step_avg:64.86ms +[2025-09-11 08:34:57] [Rank 0] step:1101/10000 train_time:70754ms step_avg:64.26ms +[2025-09-11 08:34:57] [Rank 0] step:1101/10000 train_time:70754ms step_avg:64.26ms +[2025-09-11 08:34:58] [Rank 0] step:1121/10000 train_time:71397ms step_avg:63.69ms +[2025-09-11 08:34:58] [Rank 0] step:1121/10000 train_time:71397ms step_avg:63.69ms +[2025-09-11 08:34:58] [Rank 0] step:1141/10000 train_time:72041ms step_avg:63.14ms +[2025-09-11 08:34:58] [Rank 0] step:1141/10000 train_time:72041ms step_avg:63.14ms +[2025-09-11 08:34:59] [Rank 0] step:1161/10000 train_time:72684ms step_avg:62.60ms +[2025-09-11 08:34:59] [Rank 0] step:1161/10000 train_time:72684ms step_avg:62.60ms +[2025-09-11 08:35:00] [Rank 0] step:1181/10000 train_time:73328ms step_avg:62.09ms +[2025-09-11 08:35:00] [Rank 0] step:1181/10000 train_time:73328ms step_avg:62.09ms +[2025-09-11 08:35:00] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:35:00] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:35:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:35:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:35:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:35:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:35:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:10] [Rank 0] PRINT: step:1200/10000 val_loss:5.2685 total_sharp:3.5666e-04 L1_sharp:8.2572e-04 L2_sharp:1.9978e-04 L3_sharp:8.1801e-05 L4_sharp:5.6808e-05 L5_sharp:8.1212e-05 L6_sharp:3.9645e-05 L7_sharp:7.8665e-05 L8_sharp:7.5478e-05 L9_sharp:7.1904e-05 L10_sharp:9.9364e-05 L11_sharp:2.0375e-04 L12_sharp:1.1583e-03 total_fnorm:8.8500e+01 total_l1_linf:2.8672e+05 total_spectral:4.4750e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2125e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.1875e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.4375e+00 L3_l1linf:3.4688e+00 L4_l1linf:3.5469e+00 L5_l1linf:3.4219e+00 L6_l1linf:3.3750e+00 L7_l1linf:3.3281e+00 L8_l1linf:3.3281e+00 L9_l1linf:3.2500e+00 L10_l1linf:3.2500e+00 L11_l1linf:3.2344e+00 L12_l1linf:2.9531e+00 L1_spectral:1.3933e-01 L2_spectral:1.3720e-01 L3_spectral:1.3708e-01 L4_spectral:1.3706e-01 L5_spectral:1.3843e-01 L6_spectral:1.3778e-01 L7_spectral:1.3698e-01 L8_spectral:1.3882e-01 L9_spectral:1.3721e-01 L10_spectral:1.3710e-01 L11_spectral:1.3694e-01 L12_spectral:1.3679e-01 train_time:73954ms step_avg:61.63ms +[2025-09-11 08:35:10] [Rank 0] PRINT: step:1200/10000 val_loss:5.2685 total_sharp:3.5666e-04 L1_sharp:8.2572e-04 L2_sharp:1.9978e-04 L3_sharp:8.1801e-05 L4_sharp:5.6808e-05 L5_sharp:8.1212e-05 L6_sharp:3.9645e-05 L7_sharp:7.8665e-05 L8_sharp:7.5478e-05 L9_sharp:7.1904e-05 L10_sharp:9.9364e-05 L11_sharp:2.0375e-04 L12_sharp:1.1583e-03 total_fnorm:8.8500e+01 total_l1_linf:2.8672e+05 total_spectral:4.4750e+01 L1_fnorm:1.2312e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.1938e+01 L6_fnorm:1.2188e+01 L7_fnorm:1.1938e+01 L8_fnorm:1.1938e+01 L9_fnorm:1.2125e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.2062e+01 L12_fnorm:1.1875e+01 L1_l1linf:3.5312e+00 L2_l1linf:3.4375e+00 L3_l1linf:3.4688e+00 L4_l1linf:3.5469e+00 L5_l1linf:3.4219e+00 L6_l1linf:3.3750e+00 L7_l1linf:3.3281e+00 L8_l1linf:3.3281e+00 L9_l1linf:3.2500e+00 L10_l1linf:3.2500e+00 L11_l1linf:3.2344e+00 L12_l1linf:2.9531e+00 L1_spectral:1.3933e-01 L2_spectral:1.3720e-01 L3_spectral:1.3708e-01 L4_spectral:1.3706e-01 L5_spectral:1.3843e-01 L6_spectral:1.3778e-01 L7_spectral:1.3698e-01 L8_spectral:1.3882e-01 L9_spectral:1.3721e-01 L10_spectral:1.3710e-01 L11_spectral:1.3694e-01 L12_spectral:1.3679e-01 train_time:73954ms step_avg:61.63ms +[2025-09-11 08:35:11] [Rank 0] step:1201/10000 train_time:75135ms step_avg:62.56ms +[2025-09-11 08:35:11] [Rank 0] step:1201/10000 train_time:75135ms step_avg:62.56ms +[2025-09-11 08:35:12] [Rank 0] step:1221/10000 train_time:75768ms step_avg:62.05ms +[2025-09-11 08:35:12] [Rank 0] step:1221/10000 train_time:75768ms step_avg:62.05ms +[2025-09-11 08:35:13] [Rank 0] step:1241/10000 train_time:76413ms step_avg:61.57ms +[2025-09-11 08:35:13] [Rank 0] step:1241/10000 train_time:76413ms step_avg:61.57ms +[2025-09-11 08:35:13] [Rank 0] step:1261/10000 train_time:77057ms step_avg:61.11ms +[2025-09-11 08:35:13] [Rank 0] step:1261/10000 train_time:77057ms step_avg:61.11ms +[2025-09-11 08:35:14] [Rank 0] step:1281/10000 train_time:77701ms step_avg:60.66ms +[2025-09-11 08:35:14] [Rank 0] step:1281/10000 train_time:77701ms step_avg:60.66ms +[2025-09-11 08:35:15] [Rank 0] step:1301/10000 train_time:78347ms step_avg:60.22ms +[2025-09-11 08:35:15] [Rank 0] step:1301/10000 train_time:78347ms step_avg:60.22ms +[2025-09-11 08:35:15] [Rank 0] step:1321/10000 train_time:78991ms step_avg:59.80ms +[2025-09-11 08:35:15] [Rank 0] step:1321/10000 train_time:78991ms step_avg:59.80ms +[2025-09-11 08:35:16] [Rank 0] step:1341/10000 train_time:79634ms step_avg:59.38ms +[2025-09-11 08:35:16] [Rank 0] step:1341/10000 train_time:79634ms step_avg:59.38ms +[2025-09-11 08:35:16] [Rank 0] step:1361/10000 train_time:80278ms step_avg:58.98ms +[2025-09-11 08:35:16] [Rank 0] step:1361/10000 train_time:80278ms step_avg:58.98ms +[2025-09-11 08:35:17] [Rank 0] step:1381/10000 train_time:80922ms step_avg:58.60ms +[2025-09-11 08:35:17] [Rank 0] step:1381/10000 train_time:80922ms step_avg:58.60ms +[2025-09-11 08:35:18] [Rank 0] step:1401/10000 train_time:81566ms step_avg:58.22ms +[2025-09-11 08:35:18] [Rank 0] step:1401/10000 train_time:81566ms step_avg:58.22ms +[2025-09-11 08:35:18] [Rank 0] step:1421/10000 train_time:82210ms step_avg:57.85ms +[2025-09-11 08:35:18] [Rank 0] step:1421/10000 train_time:82210ms step_avg:57.85ms +[2025-09-11 08:35:19] [Rank 0] step:1441/10000 train_time:82854ms step_avg:57.50ms +[2025-09-11 08:35:19] [Rank 0] step:1441/10000 train_time:82854ms step_avg:57.50ms +[2025-09-11 08:35:20] [Rank 0] step:1461/10000 train_time:83498ms step_avg:57.15ms +[2025-09-11 08:35:20] [Rank 0] step:1461/10000 train_time:83498ms step_avg:57.15ms +[2025-09-11 08:35:20] [Rank 0] step:1481/10000 train_time:84142ms step_avg:56.81ms +[2025-09-11 08:35:20] [Rank 0] step:1481/10000 train_time:84142ms step_avg:56.81ms +[2025-09-11 08:35:21] [Rank 0] step:1501/10000 train_time:84790ms step_avg:56.49ms +[2025-09-11 08:35:21] [Rank 0] step:1501/10000 train_time:84790ms step_avg:56.49ms +[2025-09-11 08:35:22] [Rank 0] step:1521/10000 train_time:85439ms step_avg:56.17ms +[2025-09-11 08:35:22] [Rank 0] step:1521/10000 train_time:85439ms step_avg:56.17ms +[2025-09-11 08:35:22] [Rank 0] step:1541/10000 train_time:86087ms step_avg:55.86ms +[2025-09-11 08:35:22] [Rank 0] step:1541/10000 train_time:86087ms step_avg:55.86ms +[2025-09-11 08:35:23] [Rank 0] step:1561/10000 train_time:86735ms step_avg:55.56ms +[2025-09-11 08:35:23] [Rank 0] step:1561/10000 train_time:86735ms step_avg:55.56ms +[2025-09-11 08:35:24] [Rank 0] step:1581/10000 train_time:87383ms step_avg:55.27ms +[2025-09-11 08:35:24] [Rank 0] step:1581/10000 train_time:87383ms step_avg:55.27ms +[2025-09-11 08:35:24] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:35:24] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:35:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:34] [Rank 0] PRINT: step:1600/10000 val_loss:5.0930 total_sharp:3.0198e-04 L1_sharp:7.6980e-04 L2_sharp:1.2345e-04 L3_sharp:3.7805e-05 L4_sharp:3.5213e-05 L5_sharp:7.9337e-05 L6_sharp:6.4001e-05 L7_sharp:5.8465e-05 L8_sharp:6.9418e-05 L9_sharp:6.4199e-05 L10_sharp:7.9937e-05 L11_sharp:1.7030e-04 L12_sharp:1.2734e-03 total_fnorm:8.7000e+01 total_l1_linf:2.7034e+05 total_spectral:4.3500e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.4844e+00 L2_l1linf:3.3594e+00 L3_l1linf:3.2812e+00 L4_l1linf:3.4219e+00 L5_l1linf:3.2812e+00 L6_l1linf:3.2500e+00 L7_l1linf:3.2500e+00 L8_l1linf:3.1719e+00 L9_l1linf:3.1562e+00 L10_l1linf:3.1250e+00 L11_l1linf:3.1875e+00 L12_l1linf:3.0000e+00 L1_spectral:1.4359e-01 L2_spectral:1.4103e-01 L3_spectral:1.4106e-01 L4_spectral:1.4140e-01 L5_spectral:1.4260e-01 L6_spectral:1.4211e-01 L7_spectral:1.4122e-01 L8_spectral:1.4405e-01 L9_spectral:1.4160e-01 L10_spectral:1.4152e-01 L11_spectral:1.4105e-01 L12_spectral:1.4189e-01 train_time:88014ms step_avg:55.01ms +[2025-09-11 08:35:34] [Rank 0] PRINT: step:1600/10000 val_loss:5.0930 total_sharp:3.0198e-04 L1_sharp:7.6980e-04 L2_sharp:1.2345e-04 L3_sharp:3.7805e-05 L4_sharp:3.5213e-05 L5_sharp:7.9337e-05 L6_sharp:6.4001e-05 L7_sharp:5.8465e-05 L8_sharp:6.9418e-05 L9_sharp:6.4199e-05 L10_sharp:7.9937e-05 L11_sharp:1.7030e-04 L12_sharp:1.2734e-03 total_fnorm:8.7000e+01 total_l1_linf:2.7034e+05 total_spectral:4.3500e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2188e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2188e+01 L12_fnorm:1.2062e+01 L1_l1linf:3.4844e+00 L2_l1linf:3.3594e+00 L3_l1linf:3.2812e+00 L4_l1linf:3.4219e+00 L5_l1linf:3.2812e+00 L6_l1linf:3.2500e+00 L7_l1linf:3.2500e+00 L8_l1linf:3.1719e+00 L9_l1linf:3.1562e+00 L10_l1linf:3.1250e+00 L11_l1linf:3.1875e+00 L12_l1linf:3.0000e+00 L1_spectral:1.4359e-01 L2_spectral:1.4103e-01 L3_spectral:1.4106e-01 L4_spectral:1.4140e-01 L5_spectral:1.4260e-01 L6_spectral:1.4211e-01 L7_spectral:1.4122e-01 L8_spectral:1.4405e-01 L9_spectral:1.4160e-01 L10_spectral:1.4152e-01 L11_spectral:1.4105e-01 L12_spectral:1.4189e-01 train_time:88014ms step_avg:55.01ms +[2025-09-11 08:35:35] [Rank 0] step:1601/10000 train_time:89186ms step_avg:55.71ms +[2025-09-11 08:35:35] [Rank 0] step:1601/10000 train_time:89186ms step_avg:55.71ms +[2025-09-11 08:35:36] [Rank 0] step:1621/10000 train_time:89860ms step_avg:55.44ms +[2025-09-11 08:35:36] [Rank 0] step:1621/10000 train_time:89860ms step_avg:55.44ms +[2025-09-11 08:35:36] [Rank 0] step:1641/10000 train_time:90510ms step_avg:55.16ms +[2025-09-11 08:35:36] [Rank 0] step:1641/10000 train_time:90510ms step_avg:55.16ms +[2025-09-11 08:35:37] [Rank 0] step:1661/10000 train_time:91158ms step_avg:54.88ms +[2025-09-11 08:35:37] [Rank 0] step:1661/10000 train_time:91158ms step_avg:54.88ms +[2025-09-11 08:35:38] [Rank 0] step:1681/10000 train_time:91807ms step_avg:54.61ms +[2025-09-11 08:35:38] [Rank 0] step:1681/10000 train_time:91807ms step_avg:54.61ms +[2025-09-11 08:35:38] [Rank 0] step:1701/10000 train_time:92455ms step_avg:54.35ms +[2025-09-11 08:35:38] [Rank 0] step:1701/10000 train_time:92455ms step_avg:54.35ms +[2025-09-11 08:35:39] [Rank 0] step:1721/10000 train_time:93104ms step_avg:54.10ms +[2025-09-11 08:35:39] [Rank 0] step:1721/10000 train_time:93104ms step_avg:54.10ms +[2025-09-11 08:35:40] [Rank 0] step:1741/10000 train_time:93753ms step_avg:53.85ms +[2025-09-11 08:35:40] [Rank 0] step:1741/10000 train_time:93753ms step_avg:53.85ms +[2025-09-11 08:35:40] [Rank 0] step:1761/10000 train_time:94401ms step_avg:53.61ms +[2025-09-11 08:35:40] [Rank 0] step:1761/10000 train_time:94401ms step_avg:53.61ms +[2025-09-11 08:35:41] [Rank 0] step:1781/10000 train_time:95048ms step_avg:53.37ms +[2025-09-11 08:35:41] [Rank 0] step:1781/10000 train_time:95048ms step_avg:53.37ms +[2025-09-11 08:35:42] [Rank 0] step:1801/10000 train_time:95696ms step_avg:53.13ms +[2025-09-11 08:35:42] [Rank 0] step:1801/10000 train_time:95696ms step_avg:53.13ms +[2025-09-11 08:35:42] [Rank 0] step:1821/10000 train_time:96344ms step_avg:52.91ms +[2025-09-11 08:35:42] [Rank 0] step:1821/10000 train_time:96344ms step_avg:52.91ms +[2025-09-11 08:35:43] [Rank 0] step:1841/10000 train_time:96992ms step_avg:52.68ms +[2025-09-11 08:35:43] [Rank 0] step:1841/10000 train_time:96992ms step_avg:52.68ms +[2025-09-11 08:35:44] [Rank 0] step:1861/10000 train_time:97639ms step_avg:52.47ms +[2025-09-11 08:35:44] [Rank 0] step:1861/10000 train_time:97639ms step_avg:52.47ms +[2025-09-11 08:35:44] [Rank 0] step:1881/10000 train_time:98286ms step_avg:52.25ms +[2025-09-11 08:35:44] [Rank 0] step:1881/10000 train_time:98286ms step_avg:52.25ms +[2025-09-11 08:35:45] [Rank 0] step:1901/10000 train_time:98935ms step_avg:52.04ms +[2025-09-11 08:35:45] [Rank 0] step:1901/10000 train_time:98935ms step_avg:52.04ms +[2025-09-11 08:35:46] [Rank 0] step:1921/10000 train_time:99582ms step_avg:51.84ms +[2025-09-11 08:35:46] [Rank 0] step:1921/10000 train_time:99582ms step_avg:51.84ms +[2025-09-11 08:35:46] [Rank 0] step:1941/10000 train_time:100230ms step_avg:51.64ms +[2025-09-11 08:35:46] [Rank 0] step:1941/10000 train_time:100230ms step_avg:51.64ms +[2025-09-11 08:35:47] [Rank 0] step:1961/10000 train_time:100877ms step_avg:51.44ms +[2025-09-11 08:35:47] [Rank 0] step:1961/10000 train_time:100877ms step_avg:51.44ms +[2025-09-11 08:35:47] [Rank 0] step:1981/10000 train_time:101524ms step_avg:51.25ms +[2025-09-11 08:35:47] [Rank 0] step:1981/10000 train_time:101524ms step_avg:51.25ms +[2025-09-11 08:35:48] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:35:48] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:35:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:35:58] [Rank 0] PRINT: step:2000/10000 val_loss:4.9513 total_sharp:2.1993e-04 L1_sharp:5.2174e-04 L2_sharp:8.1174e-05 L3_sharp:8.8211e-06 L4_sharp:2.1412e-05 L5_sharp:3.8479e-05 L6_sharp:2.8423e-05 L7_sharp:3.6829e-05 L8_sharp:5.7552e-05 L9_sharp:6.6272e-05 L10_sharp:7.6656e-05 L11_sharp:1.2479e-04 L12_sharp:1.0420e-03 total_fnorm:8.8500e+01 total_l1_linf:2.7648e+05 total_spectral:4.4250e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.4531e+00 L2_l1linf:3.2812e+00 L3_l1linf:3.2969e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.1562e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0781e+00 L11_l1linf:3.1250e+00 L12_l1linf:2.9844e+00 L1_spectral:1.4623e-01 L2_spectral:1.4347e-01 L3_spectral:1.4360e-01 L4_spectral:1.4391e-01 L5_spectral:1.4653e-01 L6_spectral:1.4527e-01 L7_spectral:1.4456e-01 L8_spectral:1.4836e-01 L9_spectral:1.4564e-01 L10_spectral:1.4565e-01 L11_spectral:1.4551e-01 L12_spectral:1.4564e-01 train_time:102155ms step_avg:51.08ms +[2025-09-11 08:35:58] [Rank 0] PRINT: step:2000/10000 val_loss:4.9513 total_sharp:2.1993e-04 L1_sharp:5.2174e-04 L2_sharp:8.1174e-05 L3_sharp:8.8211e-06 L4_sharp:2.1412e-05 L5_sharp:3.8479e-05 L6_sharp:2.8423e-05 L7_sharp:3.6829e-05 L8_sharp:5.7552e-05 L9_sharp:6.6272e-05 L10_sharp:7.6656e-05 L11_sharp:1.2479e-04 L12_sharp:1.0420e-03 total_fnorm:8.8500e+01 total_l1_linf:2.7648e+05 total_spectral:4.4250e+01 L1_fnorm:1.2250e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2250e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2438e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.4531e+00 L2_l1linf:3.2812e+00 L3_l1linf:3.2969e+00 L4_l1linf:3.1406e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.1562e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0781e+00 L11_l1linf:3.1250e+00 L12_l1linf:2.9844e+00 L1_spectral:1.4623e-01 L2_spectral:1.4347e-01 L3_spectral:1.4360e-01 L4_spectral:1.4391e-01 L5_spectral:1.4653e-01 L6_spectral:1.4527e-01 L7_spectral:1.4456e-01 L8_spectral:1.4836e-01 L9_spectral:1.4564e-01 L10_spectral:1.4565e-01 L11_spectral:1.4551e-01 L12_spectral:1.4564e-01 train_time:102155ms step_avg:51.08ms +[2025-09-11 08:35:59] [Rank 0] step:2001/10000 train_time:103342ms step_avg:51.65ms +[2025-09-11 08:35:59] [Rank 0] step:2001/10000 train_time:103342ms step_avg:51.65ms +[2025-09-11 08:36:00] [Rank 0] step:2021/10000 train_time:103993ms step_avg:51.46ms +[2025-09-11 08:36:00] [Rank 0] step:2021/10000 train_time:103993ms step_avg:51.46ms +[2025-09-11 08:36:00] [Rank 0] step:2041/10000 train_time:104642ms step_avg:51.27ms +[2025-09-11 08:36:00] [Rank 0] step:2041/10000 train_time:104642ms step_avg:51.27ms +[2025-09-11 08:36:01] [Rank 0] step:2061/10000 train_time:105290ms step_avg:51.09ms +[2025-09-11 08:36:01] [Rank 0] step:2061/10000 train_time:105290ms step_avg:51.09ms +[2025-09-11 08:36:02] [Rank 0] step:2081/10000 train_time:105938ms step_avg:50.91ms +[2025-09-11 08:36:02] [Rank 0] step:2081/10000 train_time:105938ms step_avg:50.91ms +[2025-09-11 08:36:02] [Rank 0] step:2101/10000 train_time:106586ms step_avg:50.73ms +[2025-09-11 08:36:02] [Rank 0] step:2101/10000 train_time:106586ms step_avg:50.73ms +[2025-09-11 08:36:03] [Rank 0] step:2121/10000 train_time:107233ms step_avg:50.56ms +[2025-09-11 08:36:03] [Rank 0] step:2121/10000 train_time:107233ms step_avg:50.56ms +[2025-09-11 08:36:04] [Rank 0] step:2141/10000 train_time:107881ms step_avg:50.39ms +[2025-09-11 08:36:04] [Rank 0] step:2141/10000 train_time:107881ms step_avg:50.39ms +[2025-09-11 08:36:04] [Rank 0] step:2161/10000 train_time:108529ms step_avg:50.22ms +[2025-09-11 08:36:04] [Rank 0] step:2161/10000 train_time:108529ms step_avg:50.22ms +[2025-09-11 08:36:05] [Rank 0] step:2181/10000 train_time:109177ms step_avg:50.06ms +[2025-09-11 08:36:05] [Rank 0] step:2181/10000 train_time:109177ms step_avg:50.06ms +[2025-09-11 08:36:06] [Rank 0] step:2201/10000 train_time:109823ms step_avg:49.90ms +[2025-09-11 08:36:06] [Rank 0] step:2201/10000 train_time:109823ms step_avg:49.90ms +[2025-09-11 08:36:06] [Rank 0] step:2221/10000 train_time:110477ms step_avg:49.74ms +[2025-09-11 08:36:06] [Rank 0] step:2221/10000 train_time:110477ms step_avg:49.74ms +[2025-09-11 08:36:07] [Rank 0] step:2241/10000 train_time:111137ms step_avg:49.59ms +[2025-09-11 08:36:07] [Rank 0] step:2241/10000 train_time:111137ms step_avg:49.59ms +[2025-09-11 08:36:08] [Rank 0] step:2261/10000 train_time:111797ms step_avg:49.45ms +[2025-09-11 08:36:08] [Rank 0] step:2261/10000 train_time:111797ms step_avg:49.45ms +[2025-09-11 08:36:08] [Rank 0] step:2281/10000 train_time:112457ms step_avg:49.30ms +[2025-09-11 08:36:08] [Rank 0] step:2281/10000 train_time:112457ms step_avg:49.30ms +[2025-09-11 08:36:09] [Rank 0] step:2301/10000 train_time:113117ms step_avg:49.16ms +[2025-09-11 08:36:09] [Rank 0] step:2301/10000 train_time:113117ms step_avg:49.16ms +[2025-09-11 08:36:10] [Rank 0] step:2321/10000 train_time:113777ms step_avg:49.02ms +[2025-09-11 08:36:10] [Rank 0] step:2321/10000 train_time:113777ms step_avg:49.02ms +[2025-09-11 08:36:10] [Rank 0] step:2341/10000 train_time:114437ms step_avg:48.88ms +[2025-09-11 08:36:10] [Rank 0] step:2341/10000 train_time:114437ms step_avg:48.88ms +[2025-09-11 08:36:11] [Rank 0] step:2361/10000 train_time:115098ms step_avg:48.75ms +[2025-09-11 08:36:11] [Rank 0] step:2361/10000 train_time:115098ms step_avg:48.75ms +[2025-09-11 08:36:11] [Rank 0] step:2381/10000 train_time:115758ms step_avg:48.62ms +[2025-09-11 08:36:11] [Rank 0] step:2381/10000 train_time:115758ms step_avg:48.62ms +[2025-09-11 08:36:12] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:36:12] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:36:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:36:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:36:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:36:22] [Rank 0] PRINT: step:2400/10000 val_loss:4.8020 total_sharp:2.1846e-04 L1_sharp:3.8015e-04 L2_sharp:6.2139e-05 L3_sharp:-7.6115e-06 L4_sharp:5.6765e-05 L5_sharp:3.1940e-05 L6_sharp:3.5507e-05 L7_sharp:4.4420e-05 L8_sharp:8.2430e-05 L9_sharp:7.3702e-05 L10_sharp:7.0568e-05 L11_sharp:1.2553e-04 L12_sharp:7.6330e-04 total_fnorm:8.4000e+01 total_l1_linf:2.5498e+05 total_spectral:4.1750e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.1719e+00 L3_l1linf:3.1250e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0625e+00 L10_l1linf:3.0000e+00 L11_l1linf:3.0625e+00 L12_l1linf:2.9688e+00 L1_spectral:1.4904e-01 L2_spectral:1.4515e-01 L3_spectral:1.4642e-01 L4_spectral:1.4700e-01 L5_spectral:1.4942e-01 L6_spectral:1.4868e-01 L7_spectral:1.4807e-01 L8_spectral:1.5084e-01 L9_spectral:1.4779e-01 L10_spectral:1.4811e-01 L11_spectral:1.4786e-01 L12_spectral:1.4902e-01 train_time:116400ms step_avg:48.50ms +[2025-09-11 08:36:22] [Rank 0] PRINT: step:2400/10000 val_loss:4.8020 total_sharp:2.1846e-04 L1_sharp:3.8015e-04 L2_sharp:6.2139e-05 L3_sharp:-7.6115e-06 L4_sharp:5.6765e-05 L5_sharp:3.1940e-05 L6_sharp:3.5507e-05 L7_sharp:4.4420e-05 L8_sharp:8.2430e-05 L9_sharp:7.3702e-05 L10_sharp:7.0568e-05 L11_sharp:1.2553e-04 L12_sharp:7.6330e-04 total_fnorm:8.4000e+01 total_l1_linf:2.5498e+05 total_spectral:4.1750e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.1719e+00 L3_l1linf:3.1250e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0625e+00 L10_l1linf:3.0000e+00 L11_l1linf:3.0625e+00 L12_l1linf:2.9688e+00 L1_spectral:1.4904e-01 L2_spectral:1.4515e-01 L3_spectral:1.4642e-01 L4_spectral:1.4700e-01 L5_spectral:1.4942e-01 L6_spectral:1.4868e-01 L7_spectral:1.4807e-01 L8_spectral:1.5084e-01 L9_spectral:1.4779e-01 L10_spectral:1.4811e-01 L11_spectral:1.4786e-01 L12_spectral:1.4902e-01 train_time:116400ms step_avg:48.50ms +[2025-09-11 08:36:23] [Rank 0] step:2401/10000 train_time:117623ms step_avg:48.99ms +[2025-09-11 08:36:23] [Rank 0] step:2401/10000 train_time:117623ms step_avg:48.99ms +[2025-09-11 08:36:24] [Rank 0] step:2421/10000 train_time:118288ms step_avg:48.86ms +[2025-09-11 08:36:24] [Rank 0] step:2421/10000 train_time:118288ms step_avg:48.86ms +[2025-09-11 08:36:25] [Rank 0] step:2441/10000 train_time:118951ms step_avg:48.73ms +[2025-09-11 08:36:25] [Rank 0] step:2441/10000 train_time:118951ms step_avg:48.73ms +[2025-09-11 08:36:25] [Rank 0] step:2461/10000 train_time:119613ms step_avg:48.60ms +[2025-09-11 08:36:25] [Rank 0] step:2461/10000 train_time:119613ms step_avg:48.60ms +[2025-09-11 08:36:26] [Rank 0] step:2481/10000 train_time:120276ms step_avg:48.48ms +[2025-09-11 08:36:26] [Rank 0] step:2481/10000 train_time:120276ms step_avg:48.48ms +[2025-09-11 08:36:27] [Rank 0] step:2501/10000 train_time:120938ms step_avg:48.36ms +[2025-09-11 08:36:27] [Rank 0] step:2501/10000 train_time:120938ms step_avg:48.36ms +[2025-09-11 08:36:27] [Rank 0] step:2521/10000 train_time:121601ms step_avg:48.24ms +[2025-09-11 08:36:27] [Rank 0] step:2521/10000 train_time:121601ms step_avg:48.24ms +[2025-09-11 08:36:28] [Rank 0] step:2541/10000 train_time:122263ms step_avg:48.12ms +[2025-09-11 08:36:28] [Rank 0] step:2541/10000 train_time:122263ms step_avg:48.12ms +[2025-09-11 08:36:28] [Rank 0] step:2561/10000 train_time:122927ms step_avg:48.00ms +[2025-09-11 08:36:28] [Rank 0] step:2561/10000 train_time:122927ms step_avg:48.00ms +[2025-09-11 08:36:29] [Rank 0] step:2581/10000 train_time:123589ms step_avg:47.88ms +[2025-09-11 08:36:29] [Rank 0] step:2581/10000 train_time:123589ms step_avg:47.88ms +[2025-09-11 08:36:30] [Rank 0] step:2601/10000 train_time:124251ms step_avg:47.77ms +[2025-09-11 08:36:30] [Rank 0] step:2601/10000 train_time:124251ms step_avg:47.77ms +[2025-09-11 08:36:30] [Rank 0] step:2621/10000 train_time:124913ms step_avg:47.66ms +[2025-09-11 08:36:30] [Rank 0] step:2621/10000 train_time:124913ms step_avg:47.66ms +[2025-09-11 08:36:31] [Rank 0] step:2641/10000 train_time:125575ms step_avg:47.55ms +[2025-09-11 08:36:31] [Rank 0] step:2641/10000 train_time:125575ms step_avg:47.55ms +[2025-09-11 08:36:32] [Rank 0] step:2661/10000 train_time:126236ms step_avg:47.44ms +[2025-09-11 08:36:32] [Rank 0] step:2661/10000 train_time:126236ms step_avg:47.44ms +[2025-09-11 08:36:32] [Rank 0] step:2681/10000 train_time:126897ms step_avg:47.33ms +[2025-09-11 08:36:32] [Rank 0] step:2681/10000 train_time:126897ms step_avg:47.33ms +[2025-09-11 08:36:33] [Rank 0] step:2701/10000 train_time:127559ms step_avg:47.23ms +[2025-09-11 08:36:33] [Rank 0] step:2701/10000 train_time:127559ms step_avg:47.23ms +[2025-09-11 08:36:34] [Rank 0] step:2721/10000 train_time:128221ms step_avg:47.12ms +[2025-09-11 08:36:34] [Rank 0] step:2721/10000 train_time:128221ms step_avg:47.12ms +[2025-09-11 08:36:35] [Rank 0] step:2741/10000 train_time:129033ms step_avg:47.08ms +[2025-09-11 08:36:35] [Rank 0] step:2741/10000 train_time:129033ms step_avg:47.08ms +[2025-09-11 08:36:36] [Rank 0] step:2761/10000 train_time:130065ms step_avg:47.11ms +[2025-09-11 08:36:36] [Rank 0] step:2761/10000 train_time:130065ms step_avg:47.11ms +[2025-09-11 08:36:36] [Rank 0] step:2781/10000 train_time:130726ms step_avg:47.01ms +[2025-09-11 08:36:36] [Rank 0] step:2781/10000 train_time:130726ms step_avg:47.01ms +[2025-09-11 08:36:37] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:36:37] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 08:36:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:36:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 08:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 08:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:36:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 08:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 08:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 08:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 08:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:36:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:36:47] [Rank 0] PRINT: step:2800/10000 val_loss:4.7418 total_sharp:2.1695e-04 L1_sharp:3.4823e-04 L2_sharp:6.2197e-05 L3_sharp:3.2297e-05 L4_sharp:3.2561e-05 L5_sharp:3.3797e-05 L6_sharp:4.4178e-05 L7_sharp:2.8687e-05 L8_sharp:8.9913e-05 L9_sharp:6.3515e-05 L10_sharp:7.1355e-05 L11_sharp:1.2797e-04 L12_sharp:6.3530e-04 total_fnorm:8.3000e+01 total_l1_linf:2.5088e+05 total_spectral:4.1250e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9531e+00 L4_l1linf:3.0156e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0312e+00 L10_l1linf:2.9844e+00 L11_l1linf:2.9844e+00 L12_l1linf:2.9375e+00 L1_spectral:1.5151e-01 L2_spectral:1.4797e-01 L3_spectral:1.4908e-01 L4_spectral:1.4888e-01 L5_spectral:1.5249e-01 L6_spectral:1.5075e-01 L7_spectral:1.4973e-01 L8_spectral:1.5215e-01 L9_spectral:1.5152e-01 L10_spectral:1.5144e-01 L11_spectral:1.5081e-01 L12_spectral:1.5029e-01 train_time:131519ms step_avg:46.97ms +[2025-09-11 08:36:47] [Rank 0] PRINT: step:2800/10000 val_loss:4.7418 total_sharp:2.1695e-04 L1_sharp:3.4823e-04 L2_sharp:6.2197e-05 L3_sharp:3.2297e-05 L4_sharp:3.2561e-05 L5_sharp:3.3797e-05 L6_sharp:4.4178e-05 L7_sharp:2.8687e-05 L8_sharp:8.9913e-05 L9_sharp:6.3515e-05 L10_sharp:7.1355e-05 L11_sharp:1.2797e-04 L12_sharp:6.3530e-04 total_fnorm:8.3000e+01 total_l1_linf:2.5088e+05 total_spectral:4.1250e+01 L1_fnorm:1.2188e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2250e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.1250e+00 L3_l1linf:2.9531e+00 L4_l1linf:3.0156e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0312e+00 L10_l1linf:2.9844e+00 L11_l1linf:2.9844e+00 L12_l1linf:2.9375e+00 L1_spectral:1.5151e-01 L2_spectral:1.4797e-01 L3_spectral:1.4908e-01 L4_spectral:1.4888e-01 L5_spectral:1.5249e-01 L6_spectral:1.5075e-01 L7_spectral:1.4973e-01 L8_spectral:1.5215e-01 L9_spectral:1.5152e-01 L10_spectral:1.5144e-01 L11_spectral:1.5081e-01 L12_spectral:1.5029e-01 train_time:131519ms step_avg:46.97ms +[2025-09-11 08:36:49] [Rank 0] step:2801/10000 train_time:132736ms step_avg:47.39ms +[2025-09-11 08:36:49] [Rank 0] step:2801/10000 train_time:132736ms step_avg:47.39ms +[2025-09-11 08:36:49] [Rank 0] step:2821/10000 train_time:133412ms step_avg:47.29ms +[2025-09-11 08:36:49] [Rank 0] step:2821/10000 train_time:133412ms step_avg:47.29ms +[2025-09-11 08:36:50] [Rank 0] step:2841/10000 train_time:134075ms step_avg:47.19ms +[2025-09-11 08:36:50] [Rank 0] step:2841/10000 train_time:134075ms step_avg:47.19ms +[2025-09-11 08:36:51] [Rank 0] step:2861/10000 train_time:134739ms step_avg:47.10ms +[2025-09-11 08:36:51] [Rank 0] step:2861/10000 train_time:134739ms step_avg:47.10ms +[2025-09-11 08:36:51] [Rank 0] step:2881/10000 train_time:135400ms step_avg:47.00ms +[2025-09-11 08:36:51] [Rank 0] step:2881/10000 train_time:135400ms step_avg:47.00ms +[2025-09-11 08:36:52] [Rank 0] step:2901/10000 train_time:136062ms step_avg:46.90ms +[2025-09-11 08:36:52] [Rank 0] step:2901/10000 train_time:136062ms step_avg:46.90ms +[2025-09-11 08:36:53] [Rank 0] step:2921/10000 train_time:136723ms step_avg:46.81ms +[2025-09-11 08:36:53] [Rank 0] step:2921/10000 train_time:136723ms step_avg:46.81ms +[2025-09-11 08:36:53] [Rank 0] step:2941/10000 train_time:137384ms step_avg:46.71ms +[2025-09-11 08:36:53] [Rank 0] step:2941/10000 train_time:137384ms step_avg:46.71ms +[2025-09-11 08:36:54] [Rank 0] step:2961/10000 train_time:138045ms step_avg:46.62ms +[2025-09-11 08:36:54] [Rank 0] step:2961/10000 train_time:138045ms step_avg:46.62ms +[2025-09-11 08:36:55] [Rank 0] step:2981/10000 train_time:138709ms step_avg:46.53ms +[2025-09-11 08:36:55] [Rank 0] step:2981/10000 train_time:138709ms step_avg:46.53ms +[2025-09-11 08:36:55] [Rank 0] step:3001/10000 train_time:139374ms step_avg:46.44ms +[2025-09-11 08:36:55] [Rank 0] step:3001/10000 train_time:139374ms step_avg:46.44ms +[2025-09-11 08:36:56] [Rank 0] step:3021/10000 train_time:140038ms step_avg:46.35ms +[2025-09-11 08:36:56] [Rank 0] step:3021/10000 train_time:140038ms step_avg:46.35ms +[2025-09-11 08:36:57] [Rank 0] step:3041/10000 train_time:140702ms step_avg:46.27ms +[2025-09-11 08:36:57] [Rank 0] step:3041/10000 train_time:140702ms step_avg:46.27ms +[2025-09-11 08:36:57] [Rank 0] step:3061/10000 train_time:141366ms step_avg:46.18ms +[2025-09-11 08:36:57] [Rank 0] step:3061/10000 train_time:141366ms step_avg:46.18ms +[2025-09-11 08:36:58] [Rank 0] step:3081/10000 train_time:142030ms step_avg:46.10ms +[2025-09-11 08:36:58] [Rank 0] step:3081/10000 train_time:142030ms step_avg:46.10ms +[2025-09-11 08:36:58] [Rank 0] step:3101/10000 train_time:142694ms step_avg:46.02ms +[2025-09-11 08:36:58] [Rank 0] step:3101/10000 train_time:142694ms step_avg:46.02ms +[2025-09-11 08:36:59] [Rank 0] step:3121/10000 train_time:143358ms step_avg:45.93ms +[2025-09-11 08:36:59] [Rank 0] step:3121/10000 train_time:143358ms step_avg:45.93ms +[2025-09-11 08:37:00] [Rank 0] step:3141/10000 train_time:144022ms step_avg:45.85ms +[2025-09-11 08:37:00] [Rank 0] step:3141/10000 train_time:144022ms step_avg:45.85ms +[2025-09-11 08:37:00] [Rank 0] step:3161/10000 train_time:144686ms step_avg:45.77ms +[2025-09-11 08:37:00] [Rank 0] step:3161/10000 train_time:144686ms step_avg:45.77ms +[2025-09-11 08:37:01] [Rank 0] step:3181/10000 train_time:145350ms step_avg:45.69ms +[2025-09-11 08:37:01] [Rank 0] step:3181/10000 train_time:145350ms step_avg:45.69ms +[2025-09-11 08:37:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:37:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 08:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:37:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 08:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 08:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 08:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:37:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:37:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.6487 total_sharp:1.6656e-04 L1_sharp:2.1744e-04 L2_sharp:6.9947e-05 L3_sharp:6.4036e-06 L4_sharp:2.8169e-05 L5_sharp:5.0005e-05 L6_sharp:3.7739e-05 L7_sharp:2.5133e-05 L8_sharp:6.4837e-05 L9_sharp:5.8857e-05 L10_sharp:7.6601e-05 L11_sharp:1.0253e-04 L12_sharp:8.3730e-04 total_fnorm:9.0500e+01 total_l1_linf:2.7443e+05 total_spectral:4.4750e+01 L1_fnorm:1.2125e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2312e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.8438e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.9531e+00 L12_l1linf:2.8750e+00 L1_spectral:1.5311e-01 L2_spectral:1.4928e-01 L3_spectral:1.5086e-01 L4_spectral:1.5255e-01 L5_spectral:1.5212e-01 L6_spectral:1.5265e-01 L7_spectral:1.5221e-01 L8_spectral:1.5334e-01 L9_spectral:1.5346e-01 L10_spectral:1.5393e-01 L11_spectral:1.5283e-01 L12_spectral:1.5188e-01 train_time:145997ms step_avg:45.62ms +[2025-09-11 08:37:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.6487 total_sharp:1.6656e-04 L1_sharp:2.1744e-04 L2_sharp:6.9947e-05 L3_sharp:6.4036e-06 L4_sharp:2.8169e-05 L5_sharp:5.0005e-05 L6_sharp:3.7739e-05 L7_sharp:2.5133e-05 L8_sharp:6.4837e-05 L9_sharp:5.8857e-05 L10_sharp:7.6601e-05 L11_sharp:1.0253e-04 L12_sharp:8.3730e-04 total_fnorm:9.0500e+01 total_l1_linf:2.7443e+05 total_spectral:4.4750e+01 L1_fnorm:1.2125e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2375e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2312e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3125e+00 L2_l1linf:3.0938e+00 L3_l1linf:2.8438e+00 L4_l1linf:2.9844e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.9531e+00 L12_l1linf:2.8750e+00 L1_spectral:1.5311e-01 L2_spectral:1.4928e-01 L3_spectral:1.5086e-01 L4_spectral:1.5255e-01 L5_spectral:1.5212e-01 L6_spectral:1.5265e-01 L7_spectral:1.5221e-01 L8_spectral:1.5334e-01 L9_spectral:1.5346e-01 L10_spectral:1.5393e-01 L11_spectral:1.5283e-01 L12_spectral:1.5188e-01 train_time:145997ms step_avg:45.62ms +[2025-09-11 08:37:13] [Rank 0] step:3201/10000 train_time:147225ms step_avg:45.99ms +[2025-09-11 08:37:13] [Rank 0] step:3201/10000 train_time:147225ms step_avg:45.99ms +[2025-09-11 08:37:14] [Rank 0] step:3221/10000 train_time:147916ms step_avg:45.92ms +[2025-09-11 08:37:14] [Rank 0] step:3221/10000 train_time:147916ms step_avg:45.92ms +[2025-09-11 08:37:14] [Rank 0] step:3241/10000 train_time:148581ms step_avg:45.84ms +[2025-09-11 08:37:14] [Rank 0] step:3241/10000 train_time:148581ms step_avg:45.84ms +[2025-09-11 08:37:15] [Rank 0] step:3261/10000 train_time:149246ms step_avg:45.77ms +[2025-09-11 08:37:15] [Rank 0] step:3261/10000 train_time:149246ms step_avg:45.77ms +[2025-09-11 08:37:16] [Rank 0] step:3281/10000 train_time:149910ms step_avg:45.69ms +[2025-09-11 08:37:16] [Rank 0] step:3281/10000 train_time:149910ms step_avg:45.69ms +[2025-09-11 08:37:16] [Rank 0] step:3301/10000 train_time:150574ms step_avg:45.61ms +[2025-09-11 08:37:16] [Rank 0] step:3301/10000 train_time:150574ms step_avg:45.61ms +[2025-09-11 08:37:17] [Rank 0] step:3321/10000 train_time:151238ms step_avg:45.54ms +[2025-09-11 08:37:17] [Rank 0] step:3321/10000 train_time:151238ms step_avg:45.54ms +[2025-09-11 08:37:18] [Rank 0] step:3341/10000 train_time:151903ms step_avg:45.47ms +[2025-09-11 08:37:18] [Rank 0] step:3341/10000 train_time:151903ms step_avg:45.47ms +[2025-09-11 08:37:18] [Rank 0] step:3361/10000 train_time:152568ms step_avg:45.39ms +[2025-09-11 08:37:18] [Rank 0] step:3361/10000 train_time:152568ms step_avg:45.39ms +[2025-09-11 08:37:19] [Rank 0] step:3381/10000 train_time:153232ms step_avg:45.32ms +[2025-09-11 08:37:19] [Rank 0] step:3381/10000 train_time:153232ms step_avg:45.32ms +[2025-09-11 08:37:20] [Rank 0] step:3401/10000 train_time:153896ms step_avg:45.25ms +[2025-09-11 08:37:20] [Rank 0] step:3401/10000 train_time:153896ms step_avg:45.25ms +[2025-09-11 08:37:20] [Rank 0] step:3421/10000 train_time:154560ms step_avg:45.18ms +[2025-09-11 08:37:20] [Rank 0] step:3421/10000 train_time:154560ms step_avg:45.18ms +[2025-09-11 08:37:21] [Rank 0] step:3441/10000 train_time:155223ms step_avg:45.11ms +[2025-09-11 08:37:21] [Rank 0] step:3441/10000 train_time:155223ms step_avg:45.11ms +[2025-09-11 08:37:22] [Rank 0] step:3461/10000 train_time:155887ms step_avg:45.04ms +[2025-09-11 08:37:22] [Rank 0] step:3461/10000 train_time:155887ms step_avg:45.04ms +[2025-09-11 08:37:22] [Rank 0] step:3481/10000 train_time:156552ms step_avg:44.97ms +[2025-09-11 08:37:22] [Rank 0] step:3481/10000 train_time:156552ms step_avg:44.97ms +[2025-09-11 08:37:23] [Rank 0] step:3501/10000 train_time:157216ms step_avg:44.91ms +[2025-09-11 08:37:23] [Rank 0] step:3501/10000 train_time:157216ms step_avg:44.91ms +[2025-09-11 08:37:24] [Rank 0] step:3521/10000 train_time:157880ms step_avg:44.84ms +[2025-09-11 08:37:24] [Rank 0] step:3521/10000 train_time:157880ms step_avg:44.84ms +[2025-09-11 08:37:24] [Rank 0] step:3541/10000 train_time:158544ms step_avg:44.77ms +[2025-09-11 08:37:24] [Rank 0] step:3541/10000 train_time:158544ms step_avg:44.77ms +[2025-09-11 08:37:25] [Rank 0] step:3561/10000 train_time:159209ms step_avg:44.71ms +[2025-09-11 08:37:25] [Rank 0] step:3561/10000 train_time:159209ms step_avg:44.71ms +[2025-09-11 08:37:26] [Rank 0] step:3581/10000 train_time:159873ms step_avg:44.64ms +[2025-09-11 08:37:26] [Rank 0] step:3581/10000 train_time:159873ms step_avg:44.64ms +[2025-09-11 08:37:26] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:37:26] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 08:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:37:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 08:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 08:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:37:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.6040 total_sharp:2.0954e-04 L1_sharp:2.8103e-04 L2_sharp:2.8396e-05 L3_sharp:1.3611e-05 L4_sharp:1.6437e-05 L5_sharp:4.3001e-05 L6_sharp:3.9262e-05 L7_sharp:3.8851e-05 L8_sharp:7.3521e-05 L9_sharp:6.0587e-05 L10_sharp:6.9199e-05 L11_sharp:1.0168e-04 L12_sharp:1.4722e-03 total_fnorm:8.3500e+01 total_l1_linf:2.4781e+05 total_spectral:4.1250e+01 L1_fnorm:1.2125e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2250e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.0000e+00 L3_l1linf:2.6406e+00 L4_l1linf:2.9219e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1250e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.9219e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.9062e+00 L1_spectral:1.5400e-01 L2_spectral:1.5054e-01 L3_spectral:1.5221e-01 L4_spectral:1.5238e-01 L5_spectral:1.5423e-01 L6_spectral:1.5412e-01 L7_spectral:1.5302e-01 L8_spectral:1.5466e-01 L9_spectral:1.5388e-01 L10_spectral:1.5443e-01 L11_spectral:1.5492e-01 L12_spectral:1.5285e-01 train_time:160518ms step_avg:44.59ms +[2025-09-11 08:37:36] [Rank 0] PRINT: step:3600/10000 val_loss:4.6040 total_sharp:2.0954e-04 L1_sharp:2.8103e-04 L2_sharp:2.8396e-05 L3_sharp:1.3611e-05 L4_sharp:1.6437e-05 L5_sharp:4.3001e-05 L6_sharp:3.9262e-05 L7_sharp:3.8851e-05 L8_sharp:7.3521e-05 L9_sharp:6.0587e-05 L10_sharp:6.9199e-05 L11_sharp:1.0168e-04 L12_sharp:1.4722e-03 total_fnorm:8.3500e+01 total_l1_linf:2.4781e+05 total_spectral:4.1250e+01 L1_fnorm:1.2125e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2312e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2250e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2500e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.0000e+00 L3_l1linf:2.6406e+00 L4_l1linf:2.9219e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1250e+00 L8_l1linf:3.0625e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.9219e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.9062e+00 L1_spectral:1.5400e-01 L2_spectral:1.5054e-01 L3_spectral:1.5221e-01 L4_spectral:1.5238e-01 L5_spectral:1.5423e-01 L6_spectral:1.5412e-01 L7_spectral:1.5302e-01 L8_spectral:1.5466e-01 L9_spectral:1.5388e-01 L10_spectral:1.5443e-01 L11_spectral:1.5492e-01 L12_spectral:1.5285e-01 train_time:160518ms step_avg:44.59ms +[2025-09-11 08:37:38] [Rank 0] step:3601/10000 train_time:161964ms step_avg:44.98ms +[2025-09-11 08:37:38] [Rank 0] step:3601/10000 train_time:161964ms step_avg:44.98ms +[2025-09-11 08:37:38] [Rank 0] step:3621/10000 train_time:162823ms step_avg:44.97ms +[2025-09-11 08:37:38] [Rank 0] step:3621/10000 train_time:162823ms step_avg:44.97ms +[2025-09-11 08:37:40] [Rank 0] step:3641/10000 train_time:163909ms step_avg:45.02ms +[2025-09-11 08:37:40] [Rank 0] step:3641/10000 train_time:163909ms step_avg:45.02ms +[2025-09-11 08:37:40] [Rank 0] step:3661/10000 train_time:164576ms step_avg:44.95ms +[2025-09-11 08:37:40] [Rank 0] step:3661/10000 train_time:164576ms step_avg:44.95ms +[2025-09-11 08:37:41] [Rank 0] step:3681/10000 train_time:165402ms step_avg:44.93ms +[2025-09-11 08:37:41] [Rank 0] step:3681/10000 train_time:165402ms step_avg:44.93ms +[2025-09-11 08:37:42] [Rank 0] step:3701/10000 train_time:166179ms step_avg:44.90ms +[2025-09-11 08:37:42] [Rank 0] step:3701/10000 train_time:166179ms step_avg:44.90ms +[2025-09-11 08:37:43] [Rank 0] step:3721/10000 train_time:166853ms step_avg:44.84ms +[2025-09-11 08:37:43] [Rank 0] step:3721/10000 train_time:166853ms step_avg:44.84ms +[2025-09-11 08:37:43] [Rank 0] step:3741/10000 train_time:167530ms step_avg:44.78ms +[2025-09-11 08:37:43] [Rank 0] step:3741/10000 train_time:167530ms step_avg:44.78ms +[2025-09-11 08:37:44] [Rank 0] step:3761/10000 train_time:168207ms step_avg:44.72ms +[2025-09-11 08:37:44] [Rank 0] step:3761/10000 train_time:168207ms step_avg:44.72ms +[2025-09-11 08:37:45] [Rank 0] step:3781/10000 train_time:168884ms step_avg:44.67ms +[2025-09-11 08:37:45] [Rank 0] step:3781/10000 train_time:168884ms step_avg:44.67ms +[2025-09-11 08:37:45] [Rank 0] step:3801/10000 train_time:169561ms step_avg:44.61ms +[2025-09-11 08:37:45] [Rank 0] step:3801/10000 train_time:169561ms step_avg:44.61ms +[2025-09-11 08:37:46] [Rank 0] step:3821/10000 train_time:170238ms step_avg:44.55ms +[2025-09-11 08:37:46] [Rank 0] step:3821/10000 train_time:170238ms step_avg:44.55ms +[2025-09-11 08:37:47] [Rank 0] step:3841/10000 train_time:170914ms step_avg:44.50ms +[2025-09-11 08:37:47] [Rank 0] step:3841/10000 train_time:170914ms step_avg:44.50ms +[2025-09-11 08:37:47] [Rank 0] step:3861/10000 train_time:171590ms step_avg:44.44ms +[2025-09-11 08:37:47] [Rank 0] step:3861/10000 train_time:171590ms step_avg:44.44ms +[2025-09-11 08:37:48] [Rank 0] step:3881/10000 train_time:172265ms step_avg:44.39ms +[2025-09-11 08:37:48] [Rank 0] step:3881/10000 train_time:172265ms step_avg:44.39ms +[2025-09-11 08:37:49] [Rank 0] step:3901/10000 train_time:172942ms step_avg:44.33ms +[2025-09-11 08:37:49] [Rank 0] step:3901/10000 train_time:172942ms step_avg:44.33ms +[2025-09-11 08:37:49] [Rank 0] step:3921/10000 train_time:173618ms step_avg:44.28ms +[2025-09-11 08:37:49] [Rank 0] step:3921/10000 train_time:173618ms step_avg:44.28ms +[2025-09-11 08:37:50] [Rank 0] step:3941/10000 train_time:174295ms step_avg:44.23ms +[2025-09-11 08:37:50] [Rank 0] step:3941/10000 train_time:174295ms step_avg:44.23ms +[2025-09-11 08:37:51] [Rank 0] step:3961/10000 train_time:174971ms step_avg:44.17ms +[2025-09-11 08:37:51] [Rank 0] step:3961/10000 train_time:174971ms step_avg:44.17ms +[2025-09-11 08:37:51] [Rank 0] step:3981/10000 train_time:175647ms step_avg:44.12ms +[2025-09-11 08:37:51] [Rank 0] step:3981/10000 train_time:175647ms step_avg:44.12ms +[2025-09-11 08:37:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:37:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 08:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:37:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 08:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 08:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 08:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.5524 total_sharp:2.1336e-04 L1_sharp:2.9628e-04 L2_sharp:5.3335e-05 L3_sharp:1.2134e-05 L4_sharp:2.3387e-05 L5_sharp:5.6150e-05 L6_sharp:4.0936e-05 L7_sharp:4.5021e-05 L8_sharp:5.2203e-05 L9_sharp:4.6503e-05 L10_sharp:8.4373e-05 L11_sharp:1.4319e-04 L12_sharp:1.4296e-03 total_fnorm:8.9500e+01 total_l1_linf:2.6112e+05 total_spectral:4.4500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2344e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.3438e+00 L4_l1linf:2.9062e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0156e+00 L10_l1linf:2.8906e+00 L11_l1linf:2.9375e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5450e-01 L2_spectral:1.5038e-01 L3_spectral:1.5309e-01 L4_spectral:1.5340e-01 L5_spectral:1.5319e-01 L6_spectral:1.5490e-01 L7_spectral:1.5452e-01 L8_spectral:1.5325e-01 L9_spectral:1.5443e-01 L10_spectral:1.5468e-01 L11_spectral:1.5432e-01 L12_spectral:1.5302e-01 train_time:176305ms step_avg:44.08ms +[2025-09-11 08:38:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.5524 total_sharp:2.1336e-04 L1_sharp:2.9628e-04 L2_sharp:5.3335e-05 L3_sharp:1.2134e-05 L4_sharp:2.3387e-05 L5_sharp:5.6150e-05 L6_sharp:4.0936e-05 L7_sharp:4.5021e-05 L8_sharp:5.2203e-05 L9_sharp:4.6503e-05 L10_sharp:8.4373e-05 L11_sharp:1.4319e-04 L12_sharp:1.4296e-03 total_fnorm:8.9500e+01 total_l1_linf:2.6112e+05 total_spectral:4.4500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2344e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.3438e+00 L4_l1linf:2.9062e+00 L5_l1linf:3.1406e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0156e+00 L10_l1linf:2.8906e+00 L11_l1linf:2.9375e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5450e-01 L2_spectral:1.5038e-01 L3_spectral:1.5309e-01 L4_spectral:1.5340e-01 L5_spectral:1.5319e-01 L6_spectral:1.5490e-01 L7_spectral:1.5452e-01 L8_spectral:1.5325e-01 L9_spectral:1.5443e-01 L10_spectral:1.5468e-01 L11_spectral:1.5432e-01 L12_spectral:1.5302e-01 train_time:176305ms step_avg:44.08ms +[2025-09-11 08:38:03] [Rank 0] step:4001/10000 train_time:177537ms step_avg:44.37ms +[2025-09-11 08:38:03] [Rank 0] step:4001/10000 train_time:177537ms step_avg:44.37ms +[2025-09-11 08:38:04] [Rank 0] step:4021/10000 train_time:178217ms step_avg:44.32ms +[2025-09-11 08:38:04] [Rank 0] step:4021/10000 train_time:178217ms step_avg:44.32ms +[2025-09-11 08:38:05] [Rank 0] step:4041/10000 train_time:178895ms step_avg:44.27ms +[2025-09-11 08:38:05] [Rank 0] step:4041/10000 train_time:178895ms step_avg:44.27ms +[2025-09-11 08:38:05] [Rank 0] step:4061/10000 train_time:179570ms step_avg:44.22ms +[2025-09-11 08:38:05] [Rank 0] step:4061/10000 train_time:179570ms step_avg:44.22ms +[2025-09-11 08:38:06] [Rank 0] step:4081/10000 train_time:180246ms step_avg:44.17ms +[2025-09-11 08:38:06] [Rank 0] step:4081/10000 train_time:180246ms step_avg:44.17ms +[2025-09-11 08:38:07] [Rank 0] step:4101/10000 train_time:180922ms step_avg:44.12ms +[2025-09-11 08:38:07] [Rank 0] step:4101/10000 train_time:180922ms step_avg:44.12ms +[2025-09-11 08:38:07] [Rank 0] step:4121/10000 train_time:181597ms step_avg:44.07ms +[2025-09-11 08:38:07] [Rank 0] step:4121/10000 train_time:181597ms step_avg:44.07ms +[2025-09-11 08:38:08] [Rank 0] step:4141/10000 train_time:182272ms step_avg:44.02ms +[2025-09-11 08:38:08] [Rank 0] step:4141/10000 train_time:182272ms step_avg:44.02ms +[2025-09-11 08:38:09] [Rank 0] step:4161/10000 train_time:182948ms step_avg:43.97ms +[2025-09-11 08:38:09] [Rank 0] step:4161/10000 train_time:182948ms step_avg:43.97ms +[2025-09-11 08:38:09] [Rank 0] step:4181/10000 train_time:183623ms step_avg:43.92ms +[2025-09-11 08:38:09] [Rank 0] step:4181/10000 train_time:183623ms step_avg:43.92ms +[2025-09-11 08:38:10] [Rank 0] step:4201/10000 train_time:184298ms step_avg:43.87ms +[2025-09-11 08:38:10] [Rank 0] step:4201/10000 train_time:184298ms step_avg:43.87ms +[2025-09-11 08:38:11] [Rank 0] step:4221/10000 train_time:184974ms step_avg:43.82ms +[2025-09-11 08:38:11] [Rank 0] step:4221/10000 train_time:184974ms step_avg:43.82ms +[2025-09-11 08:38:11] [Rank 0] step:4241/10000 train_time:185650ms step_avg:43.78ms +[2025-09-11 08:38:11] [Rank 0] step:4241/10000 train_time:185650ms step_avg:43.78ms +[2025-09-11 08:38:12] [Rank 0] step:4261/10000 train_time:186325ms step_avg:43.73ms +[2025-09-11 08:38:12] [Rank 0] step:4261/10000 train_time:186325ms step_avg:43.73ms +[2025-09-11 08:38:13] [Rank 0] step:4281/10000 train_time:187001ms step_avg:43.68ms +[2025-09-11 08:38:13] [Rank 0] step:4281/10000 train_time:187001ms step_avg:43.68ms +[2025-09-11 08:38:13] [Rank 0] step:4301/10000 train_time:187677ms step_avg:43.64ms +[2025-09-11 08:38:13] [Rank 0] step:4301/10000 train_time:187677ms step_avg:43.64ms +[2025-09-11 08:38:14] [Rank 0] step:4321/10000 train_time:188353ms step_avg:43.59ms +[2025-09-11 08:38:14] [Rank 0] step:4321/10000 train_time:188353ms step_avg:43.59ms +[2025-09-11 08:38:15] [Rank 0] step:4341/10000 train_time:189028ms step_avg:43.54ms +[2025-09-11 08:38:15] [Rank 0] step:4341/10000 train_time:189028ms step_avg:43.54ms +[2025-09-11 08:38:15] [Rank 0] step:4361/10000 train_time:189702ms step_avg:43.50ms +[2025-09-11 08:38:15] [Rank 0] step:4361/10000 train_time:189702ms step_avg:43.50ms +[2025-09-11 08:38:16] [Rank 0] step:4381/10000 train_time:190378ms step_avg:43.46ms +[2025-09-11 08:38:16] [Rank 0] step:4381/10000 train_time:190378ms step_avg:43.46ms +[2025-09-11 08:38:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:38:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 08:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:38:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 08:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 08:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:38:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 08:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.5271 total_sharp:1.3463e-04 L1_sharp:2.6998e-04 L2_sharp:4.6377e-06 L3_sharp:1.1584e-06 L4_sharp:2.2638e-05 L5_sharp:6.5108e-05 L6_sharp:4.2994e-05 L7_sharp:3.3779e-05 L8_sharp:5.5057e-05 L9_sharp:5.1376e-05 L10_sharp:6.1629e-05 L11_sharp:1.0493e-04 L12_sharp:5.5073e-04 total_fnorm:8.5000e+01 total_l1_linf:2.4371e+05 total_spectral:4.2250e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2188e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.1250e+00 L4_l1linf:2.8281e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.8594e+00 L12_l1linf:2.8125e+00 L1_spectral:1.5538e-01 L2_spectral:1.5102e-01 L3_spectral:1.5344e-01 L4_spectral:1.5384e-01 L5_spectral:1.5450e-01 L6_spectral:1.5584e-01 L7_spectral:1.5454e-01 L8_spectral:1.5380e-01 L9_spectral:1.5728e-01 L10_spectral:1.5533e-01 L11_spectral:1.5498e-01 L12_spectral:1.5412e-01 train_time:191035ms step_avg:43.42ms +[2025-09-11 08:38:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.5271 total_sharp:1.3463e-04 L1_sharp:2.6998e-04 L2_sharp:4.6377e-06 L3_sharp:1.1584e-06 L4_sharp:2.2638e-05 L5_sharp:6.5108e-05 L6_sharp:4.2994e-05 L7_sharp:3.3779e-05 L8_sharp:5.5057e-05 L9_sharp:5.1376e-05 L10_sharp:6.1629e-05 L11_sharp:1.0493e-04 L12_sharp:5.5073e-04 total_fnorm:8.5000e+01 total_l1_linf:2.4371e+05 total_spectral:4.2250e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2125e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2375e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.2188e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.1250e+00 L4_l1linf:2.8281e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.8594e+00 L12_l1linf:2.8125e+00 L1_spectral:1.5538e-01 L2_spectral:1.5102e-01 L3_spectral:1.5344e-01 L4_spectral:1.5384e-01 L5_spectral:1.5450e-01 L6_spectral:1.5584e-01 L7_spectral:1.5454e-01 L8_spectral:1.5380e-01 L9_spectral:1.5728e-01 L10_spectral:1.5533e-01 L11_spectral:1.5498e-01 L12_spectral:1.5412e-01 train_time:191035ms step_avg:43.42ms +[2025-09-11 08:38:28] [Rank 0] step:4401/10000 train_time:192267ms step_avg:43.69ms +[2025-09-11 08:38:28] [Rank 0] step:4401/10000 train_time:192267ms step_avg:43.69ms +[2025-09-11 08:38:29] [Rank 0] step:4421/10000 train_time:192977ms step_avg:43.65ms +[2025-09-11 08:38:29] [Rank 0] step:4421/10000 train_time:192977ms step_avg:43.65ms +[2025-09-11 08:38:29] [Rank 0] step:4441/10000 train_time:193655ms step_avg:43.61ms +[2025-09-11 08:38:29] [Rank 0] step:4441/10000 train_time:193655ms step_avg:43.61ms +[2025-09-11 08:38:30] [Rank 0] step:4461/10000 train_time:194332ms step_avg:43.56ms +[2025-09-11 08:38:30] [Rank 0] step:4461/10000 train_time:194332ms step_avg:43.56ms +[2025-09-11 08:38:31] [Rank 0] step:4481/10000 train_time:195012ms step_avg:43.52ms +[2025-09-11 08:38:31] [Rank 0] step:4481/10000 train_time:195012ms step_avg:43.52ms +[2025-09-11 08:38:31] [Rank 0] step:4501/10000 train_time:195691ms step_avg:43.48ms +[2025-09-11 08:38:31] [Rank 0] step:4501/10000 train_time:195691ms step_avg:43.48ms +[2025-09-11 08:38:32] [Rank 0] step:4521/10000 train_time:196370ms step_avg:43.44ms +[2025-09-11 08:38:32] [Rank 0] step:4521/10000 train_time:196370ms step_avg:43.44ms +[2025-09-11 08:38:33] [Rank 0] step:4541/10000 train_time:197049ms step_avg:43.39ms +[2025-09-11 08:38:33] [Rank 0] step:4541/10000 train_time:197049ms step_avg:43.39ms +[2025-09-11 08:38:33] [Rank 0] step:4561/10000 train_time:197728ms step_avg:43.35ms +[2025-09-11 08:38:33] [Rank 0] step:4561/10000 train_time:197728ms step_avg:43.35ms +[2025-09-11 08:38:34] [Rank 0] step:4581/10000 train_time:198407ms step_avg:43.31ms +[2025-09-11 08:38:34] [Rank 0] step:4581/10000 train_time:198407ms step_avg:43.31ms +[2025-09-11 08:38:35] [Rank 0] step:4601/10000 train_time:199086ms step_avg:43.27ms +[2025-09-11 08:38:35] [Rank 0] step:4601/10000 train_time:199086ms step_avg:43.27ms +[2025-09-11 08:38:35] [Rank 0] step:4621/10000 train_time:199764ms step_avg:43.23ms +[2025-09-11 08:38:35] [Rank 0] step:4621/10000 train_time:199764ms step_avg:43.23ms +[2025-09-11 08:38:36] [Rank 0] step:4641/10000 train_time:200442ms step_avg:43.19ms +[2025-09-11 08:38:36] [Rank 0] step:4641/10000 train_time:200442ms step_avg:43.19ms +[2025-09-11 08:38:37] [Rank 0] step:4661/10000 train_time:201121ms step_avg:43.15ms +[2025-09-11 08:38:37] [Rank 0] step:4661/10000 train_time:201121ms step_avg:43.15ms +[2025-09-11 08:38:37] [Rank 0] step:4681/10000 train_time:201799ms step_avg:43.11ms +[2025-09-11 08:38:37] [Rank 0] step:4681/10000 train_time:201799ms step_avg:43.11ms +[2025-09-11 08:38:38] [Rank 0] step:4701/10000 train_time:202478ms step_avg:43.07ms +[2025-09-11 08:38:38] [Rank 0] step:4701/10000 train_time:202478ms step_avg:43.07ms +[2025-09-11 08:38:39] [Rank 0] step:4721/10000 train_time:203157ms step_avg:43.03ms +[2025-09-11 08:38:39] [Rank 0] step:4721/10000 train_time:203157ms step_avg:43.03ms +[2025-09-11 08:38:39] [Rank 0] step:4741/10000 train_time:203836ms step_avg:42.99ms +[2025-09-11 08:38:39] [Rank 0] step:4741/10000 train_time:203836ms step_avg:42.99ms +[2025-09-11 08:38:40] [Rank 0] step:4761/10000 train_time:204526ms step_avg:42.96ms +[2025-09-11 08:38:40] [Rank 0] step:4761/10000 train_time:204526ms step_avg:42.96ms +[2025-09-11 08:38:41] [Rank 0] step:4781/10000 train_time:205205ms step_avg:42.92ms +[2025-09-11 08:38:41] [Rank 0] step:4781/10000 train_time:205205ms step_avg:42.92ms +[2025-09-11 08:38:41] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:38:41] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 08:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:38:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:38:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:38:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:38:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:38:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:38:51] [Rank 0] PRINT: step:4800/10000 val_loss:4.4719 total_sharp:1.4123e-04 L1_sharp:2.4735e-04 L2_sharp:3.9339e-05 L3_sharp:5.7780e-05 L4_sharp:1.2899e-05 L5_sharp:2.7906e-05 L6_sharp:5.2352e-05 L7_sharp:2.3856e-05 L8_sharp:4.0877e-05 L9_sharp:4.3670e-05 L10_sharp:5.2024e-05 L11_sharp:1.0991e-04 L12_sharp:7.3247e-04 total_fnorm:8.7500e+01 total_l1_linf:2.5395e+05 total_spectral:4.3500e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1719e+00 L2_l1linf:2.8906e+00 L3_l1linf:2.2812e+00 L4_l1linf:2.7812e+00 L5_l1linf:3.1562e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1250e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.8594e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5576e-01 L2_spectral:1.5146e-01 L3_spectral:1.5452e-01 L4_spectral:1.5441e-01 L5_spectral:1.5503e-01 L6_spectral:1.5630e-01 L7_spectral:1.5581e-01 L8_spectral:1.5429e-01 L9_spectral:1.5657e-01 L10_spectral:1.5624e-01 L11_spectral:1.5544e-01 L12_spectral:1.5527e-01 train_time:205864ms step_avg:42.89ms +[2025-09-11 08:38:51] [Rank 0] PRINT: step:4800/10000 val_loss:4.4719 total_sharp:1.4123e-04 L1_sharp:2.4735e-04 L2_sharp:3.9339e-05 L3_sharp:5.7780e-05 L4_sharp:1.2899e-05 L5_sharp:2.7906e-05 L6_sharp:5.2352e-05 L7_sharp:2.3856e-05 L8_sharp:4.0877e-05 L9_sharp:4.3670e-05 L10_sharp:5.2024e-05 L11_sharp:1.0991e-04 L12_sharp:7.3247e-04 total_fnorm:8.7500e+01 total_l1_linf:2.5395e+05 total_spectral:4.3500e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2000e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.1719e+00 L2_l1linf:2.8906e+00 L3_l1linf:2.2812e+00 L4_l1linf:2.7812e+00 L5_l1linf:3.1562e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1250e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9844e+00 L10_l1linf:2.8594e+00 L11_l1linf:2.8750e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5576e-01 L2_spectral:1.5146e-01 L3_spectral:1.5452e-01 L4_spectral:1.5441e-01 L5_spectral:1.5503e-01 L6_spectral:1.5630e-01 L7_spectral:1.5581e-01 L8_spectral:1.5429e-01 L9_spectral:1.5657e-01 L10_spectral:1.5624e-01 L11_spectral:1.5544e-01 L12_spectral:1.5527e-01 train_time:205864ms step_avg:42.89ms +[2025-09-11 08:38:53] [Rank 0] step:4801/10000 train_time:207076ms step_avg:43.13ms +[2025-09-11 08:38:53] [Rank 0] step:4801/10000 train_time:207076ms step_avg:43.13ms +[2025-09-11 08:38:53] [Rank 0] step:4821/10000 train_time:207779ms step_avg:43.10ms +[2025-09-11 08:38:53] [Rank 0] step:4821/10000 train_time:207779ms step_avg:43.10ms +[2025-09-11 08:38:54] [Rank 0] step:4841/10000 train_time:208462ms step_avg:43.06ms +[2025-09-11 08:38:54] [Rank 0] step:4841/10000 train_time:208462ms step_avg:43.06ms +[2025-09-11 08:38:55] [Rank 0] step:4861/10000 train_time:209140ms step_avg:43.02ms +[2025-09-11 08:38:55] [Rank 0] step:4861/10000 train_time:209140ms step_avg:43.02ms +[2025-09-11 08:38:55] [Rank 0] step:4881/10000 train_time:209819ms step_avg:42.99ms +[2025-09-11 08:38:55] [Rank 0] step:4881/10000 train_time:209819ms step_avg:42.99ms +[2025-09-11 08:38:56] [Rank 0] step:4901/10000 train_time:210497ms step_avg:42.95ms +[2025-09-11 08:38:56] [Rank 0] step:4901/10000 train_time:210497ms step_avg:42.95ms +[2025-09-11 08:38:57] [Rank 0] step:4921/10000 train_time:211175ms step_avg:42.91ms +[2025-09-11 08:38:57] [Rank 0] step:4921/10000 train_time:211175ms step_avg:42.91ms +[2025-09-11 08:38:57] [Rank 0] step:4941/10000 train_time:211853ms step_avg:42.88ms +[2025-09-11 08:38:57] [Rank 0] step:4941/10000 train_time:211853ms step_avg:42.88ms +[2025-09-11 08:38:58] [Rank 0] step:4961/10000 train_time:212531ms step_avg:42.84ms +[2025-09-11 08:38:58] [Rank 0] step:4961/10000 train_time:212531ms step_avg:42.84ms +[2025-09-11 08:38:59] [Rank 0] step:4981/10000 train_time:213209ms step_avg:42.80ms +[2025-09-11 08:38:59] [Rank 0] step:4981/10000 train_time:213209ms step_avg:42.80ms +[2025-09-11 08:38:59] [Rank 0] step:5001/10000 train_time:213888ms step_avg:42.77ms +[2025-09-11 08:38:59] [Rank 0] step:5001/10000 train_time:213888ms step_avg:42.77ms +[2025-09-11 08:39:00] [Rank 0] step:5021/10000 train_time:214565ms step_avg:42.73ms +[2025-09-11 08:39:00] [Rank 0] step:5021/10000 train_time:214565ms step_avg:42.73ms +[2025-09-11 08:39:01] [Rank 0] step:5041/10000 train_time:215243ms step_avg:42.70ms +[2025-09-11 08:39:01] [Rank 0] step:5041/10000 train_time:215243ms step_avg:42.70ms +[2025-09-11 08:39:02] [Rank 0] step:5061/10000 train_time:215921ms step_avg:42.66ms +[2025-09-11 08:39:02] [Rank 0] step:5061/10000 train_time:215921ms step_avg:42.66ms +[2025-09-11 08:39:02] [Rank 0] step:5081/10000 train_time:216600ms step_avg:42.63ms +[2025-09-11 08:39:02] [Rank 0] step:5081/10000 train_time:216600ms step_avg:42.63ms +[2025-09-11 08:39:03] [Rank 0] step:5101/10000 train_time:217279ms step_avg:42.60ms +[2025-09-11 08:39:03] [Rank 0] step:5101/10000 train_time:217279ms step_avg:42.60ms +[2025-09-11 08:39:04] [Rank 0] step:5121/10000 train_time:217956ms step_avg:42.56ms +[2025-09-11 08:39:04] [Rank 0] step:5121/10000 train_time:217956ms step_avg:42.56ms +[2025-09-11 08:39:04] [Rank 0] step:5141/10000 train_time:218634ms step_avg:42.53ms +[2025-09-11 08:39:04] [Rank 0] step:5141/10000 train_time:218634ms step_avg:42.53ms +[2025-09-11 08:39:05] [Rank 0] step:5161/10000 train_time:219312ms step_avg:42.49ms +[2025-09-11 08:39:05] [Rank 0] step:5161/10000 train_time:219312ms step_avg:42.49ms +[2025-09-11 08:39:06] [Rank 0] step:5181/10000 train_time:219989ms step_avg:42.46ms +[2025-09-11 08:39:06] [Rank 0] step:5181/10000 train_time:219989ms step_avg:42.46ms +[2025-09-11 08:39:06] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:39:06] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 08:39:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:39:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:39:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 08:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:39:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:39:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:39:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 08:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:39:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:39:16] [Rank 0] PRINT: step:5200/10000 val_loss:4.4462 total_sharp:2.0131e-04 L1_sharp:1.8095e-04 L2_sharp:3.9684e-05 L3_sharp:6.9961e-06 L4_sharp:1.2857e-05 L5_sharp:2.7466e-05 L6_sharp:4.3533e-05 L7_sharp:4.0206e-05 L8_sharp:4.9208e-05 L9_sharp:6.1491e-05 L10_sharp:6.5854e-05 L11_sharp:1.1846e-04 L12_sharp:1.9038e-03 total_fnorm:8.2500e+01 total_l1_linf:2.3347e+05 total_spectral:4.1000e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.1688e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.1406e+00 L2_l1linf:2.9219e+00 L3_l1linf:2.2031e+00 L4_l1linf:2.7500e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.0781e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5622e-01 L2_spectral:1.5258e-01 L3_spectral:1.5604e-01 L4_spectral:1.5497e-01 L5_spectral:1.5637e-01 L6_spectral:1.5714e-01 L7_spectral:1.5649e-01 L8_spectral:1.5547e-01 L9_spectral:1.5724e-01 L10_spectral:1.5655e-01 L11_spectral:1.5615e-01 L12_spectral:1.5660e-01 train_time:220655ms step_avg:42.43ms +[2025-09-11 08:39:16] [Rank 0] PRINT: step:5200/10000 val_loss:4.4462 total_sharp:2.0131e-04 L1_sharp:1.8095e-04 L2_sharp:3.9684e-05 L3_sharp:6.9961e-06 L4_sharp:1.2857e-05 L5_sharp:2.7466e-05 L6_sharp:4.3533e-05 L7_sharp:4.0206e-05 L8_sharp:4.9208e-05 L9_sharp:6.1491e-05 L10_sharp:6.5854e-05 L11_sharp:1.1846e-04 L12_sharp:1.9038e-03 total_fnorm:8.2500e+01 total_l1_linf:2.3347e+05 total_spectral:4.1000e+01 L1_fnorm:1.2000e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.1688e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2375e+01 L12_fnorm:1.2375e+01 L1_l1linf:3.1406e+00 L2_l1linf:2.9219e+00 L3_l1linf:2.2031e+00 L4_l1linf:2.7500e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1406e+00 L7_l1linf:3.0781e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5622e-01 L2_spectral:1.5258e-01 L3_spectral:1.5604e-01 L4_spectral:1.5497e-01 L5_spectral:1.5637e-01 L6_spectral:1.5714e-01 L7_spectral:1.5649e-01 L8_spectral:1.5547e-01 L9_spectral:1.5724e-01 L10_spectral:1.5655e-01 L11_spectral:1.5615e-01 L12_spectral:1.5660e-01 train_time:220655ms step_avg:42.43ms +[2025-09-11 08:39:17] [Rank 0] step:5201/10000 train_time:221947ms step_avg:42.67ms +[2025-09-11 08:39:17] [Rank 0] step:5201/10000 train_time:221947ms step_avg:42.67ms +[2025-09-11 08:39:18] [Rank 0] step:5221/10000 train_time:222661ms step_avg:42.65ms +[2025-09-11 08:39:18] [Rank 0] step:5221/10000 train_time:222661ms step_avg:42.65ms +[2025-09-11 08:39:19] [Rank 0] step:5241/10000 train_time:223349ms step_avg:42.62ms +[2025-09-11 08:39:19] [Rank 0] step:5241/10000 train_time:223349ms step_avg:42.62ms +[2025-09-11 08:39:19] [Rank 0] step:5261/10000 train_time:224036ms step_avg:42.58ms +[2025-09-11 08:39:19] [Rank 0] step:5261/10000 train_time:224036ms step_avg:42.58ms +[2025-09-11 08:39:20] [Rank 0] step:5281/10000 train_time:224724ms step_avg:42.55ms +[2025-09-11 08:39:20] [Rank 0] step:5281/10000 train_time:224724ms step_avg:42.55ms +[2025-09-11 08:39:21] [Rank 0] step:5301/10000 train_time:225411ms step_avg:42.52ms +[2025-09-11 08:39:21] [Rank 0] step:5301/10000 train_time:225411ms step_avg:42.52ms +[2025-09-11 08:39:22] [Rank 0] step:5321/10000 train_time:226098ms step_avg:42.49ms +[2025-09-11 08:39:22] [Rank 0] step:5321/10000 train_time:226098ms step_avg:42.49ms +[2025-09-11 08:39:22] [Rank 0] step:5341/10000 train_time:226787ms step_avg:42.46ms +[2025-09-11 08:39:22] [Rank 0] step:5341/10000 train_time:226787ms step_avg:42.46ms +[2025-09-11 08:39:23] [Rank 0] step:5361/10000 train_time:227474ms step_avg:42.43ms +[2025-09-11 08:39:23] [Rank 0] step:5361/10000 train_time:227474ms step_avg:42.43ms +[2025-09-11 08:39:24] [Rank 0] step:5381/10000 train_time:228162ms step_avg:42.40ms +[2025-09-11 08:39:24] [Rank 0] step:5381/10000 train_time:228162ms step_avg:42.40ms +[2025-09-11 08:39:24] [Rank 0] step:5401/10000 train_time:228854ms step_avg:42.37ms +[2025-09-11 08:39:24] [Rank 0] step:5401/10000 train_time:228854ms step_avg:42.37ms +[2025-09-11 08:39:25] [Rank 0] step:5421/10000 train_time:229542ms step_avg:42.34ms +[2025-09-11 08:39:25] [Rank 0] step:5421/10000 train_time:229542ms step_avg:42.34ms +[2025-09-11 08:39:26] [Rank 0] step:5441/10000 train_time:230229ms step_avg:42.31ms +[2025-09-11 08:39:26] [Rank 0] step:5441/10000 train_time:230229ms step_avg:42.31ms +[2025-09-11 08:39:26] [Rank 0] step:5461/10000 train_time:230918ms step_avg:42.28ms +[2025-09-11 08:39:26] [Rank 0] step:5461/10000 train_time:230918ms step_avg:42.28ms +[2025-09-11 08:39:27] [Rank 0] step:5481/10000 train_time:231605ms step_avg:42.26ms +[2025-09-11 08:39:27] [Rank 0] step:5481/10000 train_time:231605ms step_avg:42.26ms +[2025-09-11 08:39:28] [Rank 0] step:5501/10000 train_time:232293ms step_avg:42.23ms +[2025-09-11 08:39:28] [Rank 0] step:5501/10000 train_time:232293ms step_avg:42.23ms +[2025-09-11 08:39:28] [Rank 0] step:5521/10000 train_time:232980ms step_avg:42.20ms +[2025-09-11 08:39:28] [Rank 0] step:5521/10000 train_time:232980ms step_avg:42.20ms +[2025-09-11 08:39:29] [Rank 0] step:5541/10000 train_time:233670ms step_avg:42.17ms +[2025-09-11 08:39:29] [Rank 0] step:5541/10000 train_time:233670ms step_avg:42.17ms +[2025-09-11 08:39:30] [Rank 0] step:5561/10000 train_time:234360ms step_avg:42.14ms +[2025-09-11 08:39:30] [Rank 0] step:5561/10000 train_time:234360ms step_avg:42.14ms +[2025-09-11 08:39:31] [Rank 0] step:5581/10000 train_time:235048ms step_avg:42.12ms +[2025-09-11 08:39:31] [Rank 0] step:5581/10000 train_time:235048ms step_avg:42.12ms +[2025-09-11 08:39:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:39:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 08:39:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:39:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 08:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:39:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:39:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 08:39:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:39:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 08:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:39:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:39:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 08:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 08:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:39:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 08:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:39:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:39:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:39:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 08:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:39:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:39:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.4202 total_sharp:1.2457e-04 L1_sharp:1.8236e-04 L2_sharp:3.7260e-05 L3_sharp:-2.6601e-06 L4_sharp:4.1537e-06 L5_sharp:3.7947e-05 L6_sharp:2.7230e-05 L7_sharp:1.0249e-05 L8_sharp:3.8308e-05 L9_sharp:3.2871e-05 L10_sharp:5.6236e-05 L11_sharp:7.8815e-05 L12_sharp:5.0294e-04 total_fnorm:8.3500e+01 total_l1_linf:2.3654e+05 total_spectral:4.1500e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.1438e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.1250e+00 L2_l1linf:2.8438e+00 L3_l1linf:2.2500e+00 L4_l1linf:2.7812e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5639e-01 L2_spectral:1.5196e-01 L3_spectral:1.5606e-01 L4_spectral:1.5609e-01 L5_spectral:1.5563e-01 L6_spectral:1.6018e-01 L7_spectral:1.5699e-01 L8_spectral:1.5405e-01 L9_spectral:1.5834e-01 L10_spectral:1.5767e-01 L11_spectral:1.5639e-01 L12_spectral:1.5597e-01 train_time:235716ms step_avg:42.09ms +[2025-09-11 08:39:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.4202 total_sharp:1.2457e-04 L1_sharp:1.8236e-04 L2_sharp:3.7260e-05 L3_sharp:-2.6601e-06 L4_sharp:4.1537e-06 L5_sharp:3.7947e-05 L6_sharp:2.7230e-05 L7_sharp:1.0249e-05 L8_sharp:3.8308e-05 L9_sharp:3.2871e-05 L10_sharp:5.6236e-05 L11_sharp:7.8815e-05 L12_sharp:5.0294e-04 total_fnorm:8.3500e+01 total_l1_linf:2.3654e+05 total_spectral:4.1500e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.1438e+01 L4_fnorm:1.2375e+01 L5_fnorm:1.2000e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2250e+01 L1_l1linf:3.1250e+00 L2_l1linf:2.8438e+00 L3_l1linf:2.2500e+00 L4_l1linf:2.7812e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0000e+00 L9_l1linf:2.9219e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5639e-01 L2_spectral:1.5196e-01 L3_spectral:1.5606e-01 L4_spectral:1.5609e-01 L5_spectral:1.5563e-01 L6_spectral:1.6018e-01 L7_spectral:1.5699e-01 L8_spectral:1.5405e-01 L9_spectral:1.5834e-01 L10_spectral:1.5767e-01 L11_spectral:1.5639e-01 L12_spectral:1.5597e-01 train_time:235716ms step_avg:42.09ms +[2025-09-11 08:39:45] [Rank 0] step:5601/10000 train_time:237005ms step_avg:42.31ms +[2025-09-11 08:39:45] [Rank 0] step:5601/10000 train_time:237005ms step_avg:42.31ms +[2025-09-11 08:39:46] [Rank 0] step:5621/10000 train_time:237968ms step_avg:42.34ms +[2025-09-11 08:39:46] [Rank 0] step:5621/10000 train_time:237968ms step_avg:42.34ms +[2025-09-11 08:39:47] [Rank 0] step:5641/10000 train_time:238656ms step_avg:42.31ms +[2025-09-11 08:39:47] [Rank 0] step:5641/10000 train_time:238656ms step_avg:42.31ms +[2025-09-11 08:39:48] [Rank 0] step:5661/10000 train_time:239621ms step_avg:42.33ms +[2025-09-11 08:39:48] [Rank 0] step:5661/10000 train_time:239621ms step_avg:42.33ms +[2025-09-11 08:39:48] [Rank 0] step:5681/10000 train_time:240310ms step_avg:42.30ms +[2025-09-11 08:39:48] [Rank 0] step:5681/10000 train_time:240310ms step_avg:42.30ms +[2025-09-11 08:39:49] [Rank 0] step:5701/10000 train_time:241000ms step_avg:42.27ms +[2025-09-11 08:39:49] [Rank 0] step:5701/10000 train_time:241000ms step_avg:42.27ms +[2025-09-11 08:39:50] [Rank 0] step:5721/10000 train_time:241688ms step_avg:42.25ms +[2025-09-11 08:39:50] [Rank 0] step:5721/10000 train_time:241688ms step_avg:42.25ms +[2025-09-11 08:39:51] [Rank 0] step:5741/10000 train_time:242378ms step_avg:42.22ms +[2025-09-11 08:39:51] [Rank 0] step:5741/10000 train_time:242378ms step_avg:42.22ms +[2025-09-11 08:39:51] [Rank 0] step:5761/10000 train_time:243068ms step_avg:42.19ms +[2025-09-11 08:39:51] [Rank 0] step:5761/10000 train_time:243068ms step_avg:42.19ms +[2025-09-11 08:39:52] [Rank 0] step:5781/10000 train_time:243757ms step_avg:42.17ms +[2025-09-11 08:39:52] [Rank 0] step:5781/10000 train_time:243757ms step_avg:42.17ms +[2025-09-11 08:39:53] [Rank 0] step:5801/10000 train_time:244447ms step_avg:42.14ms +[2025-09-11 08:39:53] [Rank 0] step:5801/10000 train_time:244447ms step_avg:42.14ms +[2025-09-11 08:39:53] [Rank 0] step:5821/10000 train_time:245135ms step_avg:42.11ms +[2025-09-11 08:39:53] [Rank 0] step:5821/10000 train_time:245135ms step_avg:42.11ms +[2025-09-11 08:39:54] [Rank 0] step:5841/10000 train_time:245824ms step_avg:42.09ms +[2025-09-11 08:39:54] [Rank 0] step:5841/10000 train_time:245824ms step_avg:42.09ms +[2025-09-11 08:39:55] [Rank 0] step:5861/10000 train_time:246511ms step_avg:42.06ms +[2025-09-11 08:39:55] [Rank 0] step:5861/10000 train_time:246511ms step_avg:42.06ms +[2025-09-11 08:39:55] [Rank 0] step:5881/10000 train_time:247199ms step_avg:42.03ms +[2025-09-11 08:39:55] [Rank 0] step:5881/10000 train_time:247199ms step_avg:42.03ms +[2025-09-11 08:39:56] [Rank 0] step:5901/10000 train_time:247888ms step_avg:42.01ms +[2025-09-11 08:39:56] [Rank 0] step:5901/10000 train_time:247888ms step_avg:42.01ms +[2025-09-11 08:39:57] [Rank 0] step:5921/10000 train_time:248579ms step_avg:41.98ms +[2025-09-11 08:39:57] [Rank 0] step:5921/10000 train_time:248579ms step_avg:41.98ms +[2025-09-11 08:39:57] [Rank 0] step:5941/10000 train_time:249269ms step_avg:41.96ms +[2025-09-11 08:39:57] [Rank 0] step:5941/10000 train_time:249269ms step_avg:41.96ms +[2025-09-11 08:39:58] [Rank 0] step:5961/10000 train_time:249957ms step_avg:41.93ms +[2025-09-11 08:39:58] [Rank 0] step:5961/10000 train_time:249957ms step_avg:41.93ms +[2025-09-11 08:39:59] [Rank 0] step:5981/10000 train_time:250647ms step_avg:41.91ms +[2025-09-11 08:39:59] [Rank 0] step:5981/10000 train_time:250647ms step_avg:41.91ms +[2025-09-11 08:39:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:39:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 08:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:40:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:40:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:40:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:40:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:40:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 08:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 08:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:40:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:40:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.3839 total_sharp:1.0920e-04 L1_sharp:1.8065e-04 L2_sharp:4.2507e-05 L3_sharp:9.7712e-06 L4_sharp:8.4837e-06 L5_sharp:4.5672e-05 L6_sharp:2.0476e-05 L7_sharp:2.3279e-05 L8_sharp:2.7688e-05 L9_sharp:3.4842e-05 L10_sharp:5.5693e-05 L11_sharp:8.7264e-05 L12_sharp:5.0237e-04 total_fnorm:8.4500e+01 total_l1_linf:2.3654e+05 total_spectral:4.2000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.0781e+00 L2_l1linf:2.7969e+00 L3_l1linf:2.3281e+00 L4_l1linf:2.7500e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.6719e+00 L1_spectral:1.5674e-01 L2_spectral:1.5276e-01 L3_spectral:1.5623e-01 L4_spectral:1.5634e-01 L5_spectral:1.5609e-01 L6_spectral:1.5850e-01 L7_spectral:1.5694e-01 L8_spectral:1.5491e-01 L9_spectral:1.5899e-01 L10_spectral:1.5876e-01 L11_spectral:1.5756e-01 L12_spectral:1.5696e-01 train_time:251318ms step_avg:41.89ms +[2025-09-11 08:40:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.3839 total_sharp:1.0920e-04 L1_sharp:1.8065e-04 L2_sharp:4.2507e-05 L3_sharp:9.7712e-06 L4_sharp:8.4837e-06 L5_sharp:4.5672e-05 L6_sharp:2.0476e-05 L7_sharp:2.3279e-05 L8_sharp:2.7688e-05 L9_sharp:3.4842e-05 L10_sharp:5.5693e-05 L11_sharp:8.7264e-05 L12_sharp:5.0237e-04 total_fnorm:8.4500e+01 total_l1_linf:2.3654e+05 total_spectral:4.2000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.2438e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2312e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.0781e+00 L2_l1linf:2.7969e+00 L3_l1linf:2.3281e+00 L4_l1linf:2.7500e+00 L5_l1linf:3.1094e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1094e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.8438e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.6719e+00 L1_spectral:1.5674e-01 L2_spectral:1.5276e-01 L3_spectral:1.5623e-01 L4_spectral:1.5634e-01 L5_spectral:1.5609e-01 L6_spectral:1.5850e-01 L7_spectral:1.5694e-01 L8_spectral:1.5491e-01 L9_spectral:1.5899e-01 L10_spectral:1.5876e-01 L11_spectral:1.5756e-01 L12_spectral:1.5696e-01 train_time:251318ms step_avg:41.89ms +[2025-09-11 08:40:11] [Rank 0] step:6001/10000 train_time:252584ms step_avg:42.09ms +[2025-09-11 08:40:11] [Rank 0] step:6001/10000 train_time:252584ms step_avg:42.09ms +[2025-09-11 08:40:11] [Rank 0] step:6021/10000 train_time:253315ms step_avg:42.07ms +[2025-09-11 08:40:11] [Rank 0] step:6021/10000 train_time:253315ms step_avg:42.07ms +[2025-09-11 08:40:12] [Rank 0] step:6041/10000 train_time:254008ms step_avg:42.05ms +[2025-09-11 08:40:12] [Rank 0] step:6041/10000 train_time:254008ms step_avg:42.05ms +[2025-09-11 08:40:13] [Rank 0] step:6061/10000 train_time:254699ms step_avg:42.02ms +[2025-09-11 08:40:13] [Rank 0] step:6061/10000 train_time:254699ms step_avg:42.02ms +[2025-09-11 08:40:13] [Rank 0] step:6081/10000 train_time:255392ms step_avg:42.00ms +[2025-09-11 08:40:13] [Rank 0] step:6081/10000 train_time:255392ms step_avg:42.00ms +[2025-09-11 08:40:14] [Rank 0] step:6101/10000 train_time:256082ms step_avg:41.97ms +[2025-09-11 08:40:14] [Rank 0] step:6101/10000 train_time:256082ms step_avg:41.97ms +[2025-09-11 08:40:15] [Rank 0] step:6121/10000 train_time:256773ms step_avg:41.95ms +[2025-09-11 08:40:15] [Rank 0] step:6121/10000 train_time:256773ms step_avg:41.95ms +[2025-09-11 08:40:16] [Rank 0] step:6141/10000 train_time:257464ms step_avg:41.93ms +[2025-09-11 08:40:16] [Rank 0] step:6141/10000 train_time:257464ms step_avg:41.93ms +[2025-09-11 08:40:16] [Rank 0] step:6161/10000 train_time:258154ms step_avg:41.90ms +[2025-09-11 08:40:16] [Rank 0] step:6161/10000 train_time:258154ms step_avg:41.90ms +[2025-09-11 08:40:17] [Rank 0] step:6181/10000 train_time:258843ms step_avg:41.88ms +[2025-09-11 08:40:17] [Rank 0] step:6181/10000 train_time:258843ms step_avg:41.88ms +[2025-09-11 08:40:18] [Rank 0] step:6201/10000 train_time:259535ms step_avg:41.85ms +[2025-09-11 08:40:18] [Rank 0] step:6201/10000 train_time:259535ms step_avg:41.85ms +[2025-09-11 08:40:18] [Rank 0] step:6221/10000 train_time:260226ms step_avg:41.83ms +[2025-09-11 08:40:18] [Rank 0] step:6221/10000 train_time:260226ms step_avg:41.83ms +[2025-09-11 08:40:19] [Rank 0] step:6241/10000 train_time:260916ms step_avg:41.81ms +[2025-09-11 08:40:19] [Rank 0] step:6241/10000 train_time:260916ms step_avg:41.81ms +[2025-09-11 08:40:20] [Rank 0] step:6261/10000 train_time:261606ms step_avg:41.78ms +[2025-09-11 08:40:20] [Rank 0] step:6261/10000 train_time:261606ms step_avg:41.78ms +[2025-09-11 08:40:20] [Rank 0] step:6281/10000 train_time:262297ms step_avg:41.76ms +[2025-09-11 08:40:20] [Rank 0] step:6281/10000 train_time:262297ms step_avg:41.76ms +[2025-09-11 08:40:21] [Rank 0] step:6301/10000 train_time:262986ms step_avg:41.74ms +[2025-09-11 08:40:21] [Rank 0] step:6301/10000 train_time:262986ms step_avg:41.74ms +[2025-09-11 08:40:22] [Rank 0] step:6321/10000 train_time:263679ms step_avg:41.71ms +[2025-09-11 08:40:22] [Rank 0] step:6321/10000 train_time:263679ms step_avg:41.71ms +[2025-09-11 08:40:22] [Rank 0] step:6341/10000 train_time:264371ms step_avg:41.69ms +[2025-09-11 08:40:22] [Rank 0] step:6341/10000 train_time:264371ms step_avg:41.69ms +[2025-09-11 08:40:23] [Rank 0] step:6361/10000 train_time:265063ms step_avg:41.67ms +[2025-09-11 08:40:23] [Rank 0] step:6361/10000 train_time:265063ms step_avg:41.67ms +[2025-09-11 08:40:24] [Rank 0] step:6381/10000 train_time:265753ms step_avg:41.65ms +[2025-09-11 08:40:24] [Rank 0] step:6381/10000 train_time:265753ms step_avg:41.65ms +[2025-09-11 08:40:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:40:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 08:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:40:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:40:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:40:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:40:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:40:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 08:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 08:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:40:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:40:34] [Rank 0] PRINT: step:6400/10000 val_loss:4.3479 total_sharp:1.2499e-04 L1_sharp:1.0188e-04 L2_sharp:2.3390e-05 L3_sharp:-2.9920e-06 L4_sharp:-4.6944e-06 L5_sharp:3.2533e-05 L6_sharp:2.0730e-05 L7_sharp:1.7812e-05 L8_sharp:4.5064e-05 L9_sharp:4.0641e-05 L10_sharp:5.4399e-05 L11_sharp:8.9547e-05 L12_sharp:5.5165e-04 total_fnorm:7.2500e+01 total_l1_linf:2.0070e+05 total_spectral:3.6750e+01 L1_fnorm:1.0812e+01 L2_fnorm:1.0938e+01 L3_fnorm:1.0125e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.1000e+01 L6_fnorm:1.1312e+01 L7_fnorm:1.1250e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1250e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1062e+01 L1_l1linf:2.7188e+00 L2_l1linf:2.5000e+00 L3_l1linf:2.0938e+00 L4_l1linf:2.3125e+00 L5_l1linf:2.7500e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.7188e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.5781e+00 L10_l1linf:2.4844e+00 L11_l1linf:2.3906e+00 L12_l1linf:2.3750e+00 L1_spectral:1.4501e-01 L2_spectral:1.4127e-01 L3_spectral:1.4459e-01 L4_spectral:1.4408e-01 L5_spectral:1.4229e-01 L6_spectral:1.4574e-01 L7_spectral:1.4440e-01 L8_spectral:1.4266e-01 L9_spectral:1.4576e-01 L10_spectral:1.4460e-01 L11_spectral:1.4528e-01 L12_spectral:1.4491e-01 train_time:266423ms step_avg:41.63ms +[2025-09-11 08:40:34] [Rank 0] PRINT: step:6400/10000 val_loss:4.3479 total_sharp:1.2499e-04 L1_sharp:1.0188e-04 L2_sharp:2.3390e-05 L3_sharp:-2.9920e-06 L4_sharp:-4.6944e-06 L5_sharp:3.2533e-05 L6_sharp:2.0730e-05 L7_sharp:1.7812e-05 L8_sharp:4.5064e-05 L9_sharp:4.0641e-05 L10_sharp:5.4399e-05 L11_sharp:8.9547e-05 L12_sharp:5.5165e-04 total_fnorm:7.2500e+01 total_l1_linf:2.0070e+05 total_spectral:3.6750e+01 L1_fnorm:1.0812e+01 L2_fnorm:1.0938e+01 L3_fnorm:1.0125e+01 L4_fnorm:1.1188e+01 L5_fnorm:1.1000e+01 L6_fnorm:1.1312e+01 L7_fnorm:1.1250e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1250e+01 L10_fnorm:1.1062e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1062e+01 L1_l1linf:2.7188e+00 L2_l1linf:2.5000e+00 L3_l1linf:2.0938e+00 L4_l1linf:2.3125e+00 L5_l1linf:2.7500e+00 L6_l1linf:2.7812e+00 L7_l1linf:2.7188e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.5781e+00 L10_l1linf:2.4844e+00 L11_l1linf:2.3906e+00 L12_l1linf:2.3750e+00 L1_spectral:1.4501e-01 L2_spectral:1.4127e-01 L3_spectral:1.4459e-01 L4_spectral:1.4408e-01 L5_spectral:1.4229e-01 L6_spectral:1.4574e-01 L7_spectral:1.4440e-01 L8_spectral:1.4266e-01 L9_spectral:1.4576e-01 L10_spectral:1.4460e-01 L11_spectral:1.4528e-01 L12_spectral:1.4491e-01 train_time:266423ms step_avg:41.63ms +[2025-09-11 08:40:36] [Rank 0] step:6401/10000 train_time:267727ms step_avg:41.83ms +[2025-09-11 08:40:36] [Rank 0] step:6401/10000 train_time:267727ms step_avg:41.83ms +[2025-09-11 08:40:36] [Rank 0] step:6421/10000 train_time:268451ms step_avg:41.81ms +[2025-09-11 08:40:36] [Rank 0] step:6421/10000 train_time:268451ms step_avg:41.81ms +[2025-09-11 08:40:37] [Rank 0] step:6441/10000 train_time:269142ms step_avg:41.79ms +[2025-09-11 08:40:37] [Rank 0] step:6441/10000 train_time:269142ms step_avg:41.79ms +[2025-09-11 08:40:38] [Rank 0] step:6461/10000 train_time:269834ms step_avg:41.76ms +[2025-09-11 08:40:38] [Rank 0] step:6461/10000 train_time:269834ms step_avg:41.76ms +[2025-09-11 08:40:39] [Rank 0] step:6481/10000 train_time:270527ms step_avg:41.74ms +[2025-09-11 08:40:39] [Rank 0] step:6481/10000 train_time:270527ms step_avg:41.74ms +[2025-09-11 08:40:39] [Rank 0] step:6501/10000 train_time:271222ms step_avg:41.72ms +[2025-09-11 08:40:39] [Rank 0] step:6501/10000 train_time:271222ms step_avg:41.72ms +[2025-09-11 08:40:40] [Rank 0] step:6521/10000 train_time:271913ms step_avg:41.70ms +[2025-09-11 08:40:40] [Rank 0] step:6521/10000 train_time:271913ms step_avg:41.70ms +[2025-09-11 08:40:41] [Rank 0] step:6541/10000 train_time:272603ms step_avg:41.68ms +[2025-09-11 08:40:41] [Rank 0] step:6541/10000 train_time:272603ms step_avg:41.68ms +[2025-09-11 08:40:41] [Rank 0] step:6561/10000 train_time:273297ms step_avg:41.65ms +[2025-09-11 08:40:41] [Rank 0] step:6561/10000 train_time:273297ms step_avg:41.65ms +[2025-09-11 08:40:42] [Rank 0] step:6581/10000 train_time:273989ms step_avg:41.63ms +[2025-09-11 08:40:42] [Rank 0] step:6581/10000 train_time:273989ms step_avg:41.63ms +[2025-09-11 08:40:43] [Rank 0] step:6601/10000 train_time:274679ms step_avg:41.61ms +[2025-09-11 08:40:43] [Rank 0] step:6601/10000 train_time:274679ms step_avg:41.61ms +[2025-09-11 08:40:43] [Rank 0] step:6621/10000 train_time:275369ms step_avg:41.59ms +[2025-09-11 08:40:43] [Rank 0] step:6621/10000 train_time:275369ms step_avg:41.59ms +[2025-09-11 08:40:44] [Rank 0] step:6641/10000 train_time:276060ms step_avg:41.57ms +[2025-09-11 08:40:44] [Rank 0] step:6641/10000 train_time:276060ms step_avg:41.57ms +[2025-09-11 08:40:45] [Rank 0] step:6661/10000 train_time:276752ms step_avg:41.55ms +[2025-09-11 08:40:45] [Rank 0] step:6661/10000 train_time:276752ms step_avg:41.55ms +[2025-09-11 08:40:45] [Rank 0] step:6681/10000 train_time:277449ms step_avg:41.53ms +[2025-09-11 08:40:45] [Rank 0] step:6681/10000 train_time:277449ms step_avg:41.53ms +[2025-09-11 08:40:46] [Rank 0] step:6701/10000 train_time:278146ms step_avg:41.51ms +[2025-09-11 08:40:46] [Rank 0] step:6701/10000 train_time:278146ms step_avg:41.51ms +[2025-09-11 08:40:47] [Rank 0] step:6721/10000 train_time:278846ms step_avg:41.49ms +[2025-09-11 08:40:47] [Rank 0] step:6721/10000 train_time:278846ms step_avg:41.49ms +[2025-09-11 08:40:48] [Rank 0] step:6741/10000 train_time:279545ms step_avg:41.47ms +[2025-09-11 08:40:48] [Rank 0] step:6741/10000 train_time:279545ms step_avg:41.47ms +[2025-09-11 08:40:49] [Rank 0] step:6761/10000 train_time:280597ms step_avg:41.50ms +[2025-09-11 08:40:49] [Rank 0] step:6761/10000 train_time:280597ms step_avg:41.50ms +[2025-09-11 08:40:49] [Rank 0] step:6781/10000 train_time:281481ms step_avg:41.51ms +[2025-09-11 08:40:49] [Rank 0] step:6781/10000 train_time:281481ms step_avg:41.51ms +[2025-09-11 08:40:50] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:40:50] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 08:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 08:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 08:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 08:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 08:40:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:40:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:40:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:40:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:40:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 08:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:00] [Rank 0] PRINT: step:6800/10000 val_loss:4.3112 total_sharp:9.9088e-05 L1_sharp:1.3909e-04 L2_sharp:-1.3031e-06 L3_sharp:5.0288e-07 L4_sharp:-1.9939e-06 L5_sharp:2.1627e-05 L6_sharp:2.5776e-05 L7_sharp:1.3443e-05 L8_sharp:3.0331e-05 L9_sharp:3.2991e-05 L10_sharp:5.0708e-05 L11_sharp:9.1742e-05 L12_sharp:6.7383e-04 total_fnorm:7.0000e+01 total_l1_linf:1.8330e+05 total_spectral:3.5000e+01 L1_fnorm:9.6250e+00 L2_fnorm:9.6250e+00 L3_fnorm:8.9375e+00 L4_fnorm:1.0000e+01 L5_fnorm:9.6875e+00 L6_fnorm:1.0000e+01 L7_fnorm:9.9375e+00 L8_fnorm:9.5625e+00 L9_fnorm:9.9375e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.8125e+00 L12_fnorm:9.7500e+00 L1_l1linf:2.2812e+00 L2_l1linf:2.1250e+00 L3_l1linf:1.9141e+00 L4_l1linf:2.0000e+00 L5_l1linf:2.3438e+00 L6_l1linf:2.3750e+00 L7_l1linf:2.4062e+00 L8_l1linf:2.2969e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.1406e+00 L11_l1linf:2.0312e+00 L12_l1linf:2.0000e+00 L1_spectral:1.2990e-01 L2_spectral:1.2682e-01 L3_spectral:1.2896e-01 L4_spectral:1.2914e-01 L5_spectral:1.2683e-01 L6_spectral:1.3124e-01 L7_spectral:1.3102e-01 L8_spectral:1.2805e-01 L9_spectral:1.3160e-01 L10_spectral:1.3105e-01 L11_spectral:1.3121e-01 L12_spectral:1.3038e-01 train_time:282160ms step_avg:41.49ms +[2025-09-11 08:41:00] [Rank 0] PRINT: step:6800/10000 val_loss:4.3112 total_sharp:9.9088e-05 L1_sharp:1.3909e-04 L2_sharp:-1.3031e-06 L3_sharp:5.0288e-07 L4_sharp:-1.9939e-06 L5_sharp:2.1627e-05 L6_sharp:2.5776e-05 L7_sharp:1.3443e-05 L8_sharp:3.0331e-05 L9_sharp:3.2991e-05 L10_sharp:5.0708e-05 L11_sharp:9.1742e-05 L12_sharp:6.7383e-04 total_fnorm:7.0000e+01 total_l1_linf:1.8330e+05 total_spectral:3.5000e+01 L1_fnorm:9.6250e+00 L2_fnorm:9.6250e+00 L3_fnorm:8.9375e+00 L4_fnorm:1.0000e+01 L5_fnorm:9.6875e+00 L6_fnorm:1.0000e+01 L7_fnorm:9.9375e+00 L8_fnorm:9.5625e+00 L9_fnorm:9.9375e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.8125e+00 L12_fnorm:9.7500e+00 L1_l1linf:2.2812e+00 L2_l1linf:2.1250e+00 L3_l1linf:1.9141e+00 L4_l1linf:2.0000e+00 L5_l1linf:2.3438e+00 L6_l1linf:2.3750e+00 L7_l1linf:2.4062e+00 L8_l1linf:2.2969e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.1406e+00 L11_l1linf:2.0312e+00 L12_l1linf:2.0000e+00 L1_spectral:1.2990e-01 L2_spectral:1.2682e-01 L3_spectral:1.2896e-01 L4_spectral:1.2914e-01 L5_spectral:1.2683e-01 L6_spectral:1.3124e-01 L7_spectral:1.3102e-01 L8_spectral:1.2805e-01 L9_spectral:1.3160e-01 L10_spectral:1.3105e-01 L11_spectral:1.3121e-01 L12_spectral:1.3038e-01 train_time:282160ms step_avg:41.49ms +[2025-09-11 08:41:01] [Rank 0] step:6801/10000 train_time:283430ms step_avg:41.67ms +[2025-09-11 08:41:01] [Rank 0] step:6801/10000 train_time:283430ms step_avg:41.67ms +[2025-09-11 08:41:02] [Rank 0] step:6821/10000 train_time:284152ms step_avg:41.66ms +[2025-09-11 08:41:02] [Rank 0] step:6821/10000 train_time:284152ms step_avg:41.66ms +[2025-09-11 08:41:03] [Rank 0] step:6841/10000 train_time:284854ms step_avg:41.64ms +[2025-09-11 08:41:03] [Rank 0] step:6841/10000 train_time:284854ms step_avg:41.64ms +[2025-09-11 08:41:03] [Rank 0] step:6861/10000 train_time:285555ms step_avg:41.62ms +[2025-09-11 08:41:03] [Rank 0] step:6861/10000 train_time:285555ms step_avg:41.62ms +[2025-09-11 08:41:04] [Rank 0] step:6881/10000 train_time:286255ms step_avg:41.60ms +[2025-09-11 08:41:04] [Rank 0] step:6881/10000 train_time:286255ms step_avg:41.60ms +[2025-09-11 08:41:05] [Rank 0] step:6901/10000 train_time:286952ms step_avg:41.58ms +[2025-09-11 08:41:05] [Rank 0] step:6901/10000 train_time:286952ms step_avg:41.58ms +[2025-09-11 08:41:05] [Rank 0] step:6921/10000 train_time:287650ms step_avg:41.56ms +[2025-09-11 08:41:05] [Rank 0] step:6921/10000 train_time:287650ms step_avg:41.56ms +[2025-09-11 08:41:06] [Rank 0] step:6941/10000 train_time:288350ms step_avg:41.54ms +[2025-09-11 08:41:06] [Rank 0] step:6941/10000 train_time:288350ms step_avg:41.54ms +[2025-09-11 08:41:07] [Rank 0] step:6961/10000 train_time:289049ms step_avg:41.52ms +[2025-09-11 08:41:07] [Rank 0] step:6961/10000 train_time:289049ms step_avg:41.52ms +[2025-09-11 08:41:08] [Rank 0] step:6981/10000 train_time:289749ms step_avg:41.51ms +[2025-09-11 08:41:08] [Rank 0] step:6981/10000 train_time:289749ms step_avg:41.51ms +[2025-09-11 08:41:08] [Rank 0] step:7001/10000 train_time:290448ms step_avg:41.49ms +[2025-09-11 08:41:08] [Rank 0] step:7001/10000 train_time:290448ms step_avg:41.49ms +[2025-09-11 08:41:09] [Rank 0] step:7021/10000 train_time:291147ms step_avg:41.47ms +[2025-09-11 08:41:09] [Rank 0] step:7021/10000 train_time:291147ms step_avg:41.47ms +[2025-09-11 08:41:10] [Rank 0] step:7041/10000 train_time:291846ms step_avg:41.45ms +[2025-09-11 08:41:10] [Rank 0] step:7041/10000 train_time:291846ms step_avg:41.45ms +[2025-09-11 08:41:10] [Rank 0] step:7061/10000 train_time:292546ms step_avg:41.43ms +[2025-09-11 08:41:10] [Rank 0] step:7061/10000 train_time:292546ms step_avg:41.43ms +[2025-09-11 08:41:11] [Rank 0] step:7081/10000 train_time:293246ms step_avg:41.41ms +[2025-09-11 08:41:11] [Rank 0] step:7081/10000 train_time:293246ms step_avg:41.41ms +[2025-09-11 08:41:12] [Rank 0] step:7101/10000 train_time:293944ms step_avg:41.39ms +[2025-09-11 08:41:12] [Rank 0] step:7101/10000 train_time:293944ms step_avg:41.39ms +[2025-09-11 08:41:12] [Rank 0] step:7121/10000 train_time:294644ms step_avg:41.38ms +[2025-09-11 08:41:12] [Rank 0] step:7121/10000 train_time:294644ms step_avg:41.38ms +[2025-09-11 08:41:13] [Rank 0] step:7141/10000 train_time:295343ms step_avg:41.36ms +[2025-09-11 08:41:13] [Rank 0] step:7141/10000 train_time:295343ms step_avg:41.36ms +[2025-09-11 08:41:14] [Rank 0] step:7161/10000 train_time:296043ms step_avg:41.34ms +[2025-09-11 08:41:14] [Rank 0] step:7161/10000 train_time:296043ms step_avg:41.34ms +[2025-09-11 08:41:15] [Rank 0] step:7181/10000 train_time:296741ms step_avg:41.32ms +[2025-09-11 08:41:15] [Rank 0] step:7181/10000 train_time:296741ms step_avg:41.32ms +[2025-09-11 08:41:15] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:41:15] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 08:41:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:41:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:41:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 08:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:41:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:41:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:41:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 08:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 08:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:25] [Rank 0] PRINT: step:7200/10000 val_loss:4.2708 total_sharp:9.3204e-05 L1_sharp:1.3665e-04 L2_sharp:2.2764e-05 L3_sharp:7.4455e-07 L4_sharp:-7.9764e-06 L5_sharp:3.5400e-05 L6_sharp:1.7953e-05 L7_sharp:2.2280e-05 L8_sharp:3.1253e-05 L9_sharp:3.6142e-05 L10_sharp:5.0209e-05 L11_sharp:8.3784e-05 L12_sharp:4.4623e-04 total_fnorm:6.0750e+01 total_l1_linf:1.5053e+05 total_spectral:3.0125e+01 L1_fnorm:8.4375e+00 L2_fnorm:8.3125e+00 L3_fnorm:7.6250e+00 L4_fnorm:8.6875e+00 L5_fnorm:8.4375e+00 L6_fnorm:8.6875e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.3750e+00 L9_fnorm:8.6875e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.4375e+00 L1_l1linf:1.9375e+00 L2_l1linf:1.8594e+00 L3_l1linf:1.6484e+00 L4_l1linf:1.6875e+00 L5_l1linf:1.9766e+00 L6_l1linf:2.0000e+00 L7_l1linf:1.9844e+00 L8_l1linf:1.9219e+00 L9_l1linf:1.8438e+00 L10_l1linf:1.7734e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.6641e+00 L1_spectral:1.1593e-01 L2_spectral:1.1132e-01 L3_spectral:1.1405e-01 L4_spectral:1.1400e-01 L5_spectral:1.1195e-01 L6_spectral:1.1633e-01 L7_spectral:1.1652e-01 L8_spectral:1.1500e-01 L9_spectral:1.1715e-01 L10_spectral:1.1622e-01 L11_spectral:1.1661e-01 L12_spectral:1.1591e-01 train_time:297420ms step_avg:41.31ms +[2025-09-11 08:41:25] [Rank 0] PRINT: step:7200/10000 val_loss:4.2708 total_sharp:9.3204e-05 L1_sharp:1.3665e-04 L2_sharp:2.2764e-05 L3_sharp:7.4455e-07 L4_sharp:-7.9764e-06 L5_sharp:3.5400e-05 L6_sharp:1.7953e-05 L7_sharp:2.2280e-05 L8_sharp:3.1253e-05 L9_sharp:3.6142e-05 L10_sharp:5.0209e-05 L11_sharp:8.3784e-05 L12_sharp:4.4623e-04 total_fnorm:6.0750e+01 total_l1_linf:1.5053e+05 total_spectral:3.0125e+01 L1_fnorm:8.4375e+00 L2_fnorm:8.3125e+00 L3_fnorm:7.6250e+00 L4_fnorm:8.6875e+00 L5_fnorm:8.4375e+00 L6_fnorm:8.6875e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.3750e+00 L9_fnorm:8.6875e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5000e+00 L12_fnorm:8.4375e+00 L1_l1linf:1.9375e+00 L2_l1linf:1.8594e+00 L3_l1linf:1.6484e+00 L4_l1linf:1.6875e+00 L5_l1linf:1.9766e+00 L6_l1linf:2.0000e+00 L7_l1linf:1.9844e+00 L8_l1linf:1.9219e+00 L9_l1linf:1.8438e+00 L10_l1linf:1.7734e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.6641e+00 L1_spectral:1.1593e-01 L2_spectral:1.1132e-01 L3_spectral:1.1405e-01 L4_spectral:1.1400e-01 L5_spectral:1.1195e-01 L6_spectral:1.1633e-01 L7_spectral:1.1652e-01 L8_spectral:1.1500e-01 L9_spectral:1.1715e-01 L10_spectral:1.1622e-01 L11_spectral:1.1661e-01 L12_spectral:1.1591e-01 train_time:297420ms step_avg:41.31ms +[2025-09-11 08:41:27] [Rank 0] step:7201/10000 train_time:298685ms step_avg:41.48ms +[2025-09-11 08:41:27] [Rank 0] step:7201/10000 train_time:298685ms step_avg:41.48ms +[2025-09-11 08:41:27] [Rank 0] step:7221/10000 train_time:299404ms step_avg:41.46ms +[2025-09-11 08:41:27] [Rank 0] step:7221/10000 train_time:299404ms step_avg:41.46ms +[2025-09-11 08:41:28] [Rank 0] step:7241/10000 train_time:300105ms step_avg:41.45ms +[2025-09-11 08:41:28] [Rank 0] step:7241/10000 train_time:300105ms step_avg:41.45ms +[2025-09-11 08:41:29] [Rank 0] step:7261/10000 train_time:300807ms step_avg:41.43ms +[2025-09-11 08:41:29] [Rank 0] step:7261/10000 train_time:300807ms step_avg:41.43ms +[2025-09-11 08:41:29] [Rank 0] step:7281/10000 train_time:301512ms step_avg:41.41ms +[2025-09-11 08:41:29] [Rank 0] step:7281/10000 train_time:301512ms step_avg:41.41ms +[2025-09-11 08:41:30] [Rank 0] step:7301/10000 train_time:302209ms step_avg:41.39ms +[2025-09-11 08:41:30] [Rank 0] step:7301/10000 train_time:302209ms step_avg:41.39ms +[2025-09-11 08:41:31] [Rank 0] step:7321/10000 train_time:302909ms step_avg:41.38ms +[2025-09-11 08:41:31] [Rank 0] step:7321/10000 train_time:302909ms step_avg:41.38ms +[2025-09-11 08:41:31] [Rank 0] step:7341/10000 train_time:303610ms step_avg:41.36ms +[2025-09-11 08:41:31] [Rank 0] step:7341/10000 train_time:303610ms step_avg:41.36ms +[2025-09-11 08:41:32] [Rank 0] step:7361/10000 train_time:304309ms step_avg:41.34ms +[2025-09-11 08:41:32] [Rank 0] step:7361/10000 train_time:304309ms step_avg:41.34ms +[2025-09-11 08:41:33] [Rank 0] step:7381/10000 train_time:305010ms step_avg:41.32ms +[2025-09-11 08:41:33] [Rank 0] step:7381/10000 train_time:305010ms step_avg:41.32ms +[2025-09-11 08:41:34] [Rank 0] step:7401/10000 train_time:305709ms step_avg:41.31ms +[2025-09-11 08:41:34] [Rank 0] step:7401/10000 train_time:305709ms step_avg:41.31ms +[2025-09-11 08:41:34] [Rank 0] step:7421/10000 train_time:306408ms step_avg:41.29ms +[2025-09-11 08:41:34] [Rank 0] step:7421/10000 train_time:306408ms step_avg:41.29ms +[2025-09-11 08:41:35] [Rank 0] step:7441/10000 train_time:307109ms step_avg:41.27ms +[2025-09-11 08:41:35] [Rank 0] step:7441/10000 train_time:307109ms step_avg:41.27ms +[2025-09-11 08:41:36] [Rank 0] step:7461/10000 train_time:307809ms step_avg:41.26ms +[2025-09-11 08:41:36] [Rank 0] step:7461/10000 train_time:307809ms step_avg:41.26ms +[2025-09-11 08:41:36] [Rank 0] step:7481/10000 train_time:308513ms step_avg:41.24ms +[2025-09-11 08:41:36] [Rank 0] step:7481/10000 train_time:308513ms step_avg:41.24ms +[2025-09-11 08:41:37] [Rank 0] step:7501/10000 train_time:309214ms step_avg:41.22ms +[2025-09-11 08:41:37] [Rank 0] step:7501/10000 train_time:309214ms step_avg:41.22ms +[2025-09-11 08:41:38] [Rank 0] step:7521/10000 train_time:309915ms step_avg:41.21ms +[2025-09-11 08:41:38] [Rank 0] step:7521/10000 train_time:309915ms step_avg:41.21ms +[2025-09-11 08:41:38] [Rank 0] step:7541/10000 train_time:310613ms step_avg:41.19ms +[2025-09-11 08:41:38] [Rank 0] step:7541/10000 train_time:310613ms step_avg:41.19ms +[2025-09-11 08:41:39] [Rank 0] step:7561/10000 train_time:311315ms step_avg:41.17ms +[2025-09-11 08:41:39] [Rank 0] step:7561/10000 train_time:311315ms step_avg:41.17ms +[2025-09-11 08:41:40] [Rank 0] step:7581/10000 train_time:312016ms step_avg:41.16ms +[2025-09-11 08:41:40] [Rank 0] step:7581/10000 train_time:312016ms step_avg:41.16ms +[2025-09-11 08:41:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:41:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 08:41:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:41:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:41:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:41:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 08:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 08:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:41:50] [Rank 0] PRINT: step:7600/10000 val_loss:4.2356 total_sharp:1.0306e-04 L1_sharp:1.8067e-04 L2_sharp:2.9575e-05 L3_sharp:1.4637e-05 L4_sharp:7.0467e-06 L5_sharp:2.1190e-05 L6_sharp:2.2477e-05 L7_sharp:1.5628e-05 L8_sharp:3.6798e-05 L9_sharp:3.2334e-05 L10_sharp:5.0756e-05 L11_sharp:8.8238e-05 L12_sharp:5.3509e-04 total_fnorm:4.8750e+01 total_l1_linf:1.1469e+05 total_spectral:2.4500e+01 L1_fnorm:7.1875e+00 L2_fnorm:7.0625e+00 L3_fnorm:6.3438e+00 L4_fnorm:7.3125e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.3438e+00 L7_fnorm:7.3125e+00 L8_fnorm:7.0312e+00 L9_fnorm:7.2812e+00 L10_fnorm:7.1562e+00 L11_fnorm:7.1875e+00 L12_fnorm:7.1875e+00 L1_l1linf:1.5703e+00 L2_l1linf:1.4844e+00 L3_l1linf:1.4609e+00 L4_l1linf:1.3750e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5391e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3594e+00 L12_l1linf:1.3906e+00 L1_spectral:1.0043e-01 L2_spectral:9.4403e-02 L3_spectral:9.8843e-02 L4_spectral:9.8117e-02 L5_spectral:9.5543e-02 L6_spectral:9.9815e-02 L7_spectral:9.9845e-02 L8_spectral:9.7907e-02 L9_spectral:1.0076e-01 L10_spectral:9.9789e-02 L11_spectral:1.0085e-01 L12_spectral:1.0105e-01 train_time:312698ms step_avg:41.14ms +[2025-09-11 08:41:50] [Rank 0] PRINT: step:7600/10000 val_loss:4.2356 total_sharp:1.0306e-04 L1_sharp:1.8067e-04 L2_sharp:2.9575e-05 L3_sharp:1.4637e-05 L4_sharp:7.0467e-06 L5_sharp:2.1190e-05 L6_sharp:2.2477e-05 L7_sharp:1.5628e-05 L8_sharp:3.6798e-05 L9_sharp:3.2334e-05 L10_sharp:5.0756e-05 L11_sharp:8.8238e-05 L12_sharp:5.3509e-04 total_fnorm:4.8750e+01 total_l1_linf:1.1469e+05 total_spectral:2.4500e+01 L1_fnorm:7.1875e+00 L2_fnorm:7.0625e+00 L3_fnorm:6.3438e+00 L4_fnorm:7.3125e+00 L5_fnorm:7.0312e+00 L6_fnorm:7.3438e+00 L7_fnorm:7.3125e+00 L8_fnorm:7.0312e+00 L9_fnorm:7.2812e+00 L10_fnorm:7.1562e+00 L11_fnorm:7.1875e+00 L12_fnorm:7.1875e+00 L1_l1linf:1.5703e+00 L2_l1linf:1.4844e+00 L3_l1linf:1.4609e+00 L4_l1linf:1.3750e+00 L5_l1linf:1.6094e+00 L6_l1linf:1.6406e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5391e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3594e+00 L12_l1linf:1.3906e+00 L1_spectral:1.0043e-01 L2_spectral:9.4403e-02 L3_spectral:9.8843e-02 L4_spectral:9.8117e-02 L5_spectral:9.5543e-02 L6_spectral:9.9815e-02 L7_spectral:9.9845e-02 L8_spectral:9.7907e-02 L9_spectral:1.0076e-01 L10_spectral:9.9789e-02 L11_spectral:1.0085e-01 L12_spectral:1.0105e-01 train_time:312698ms step_avg:41.14ms +[2025-09-11 08:41:52] [Rank 0] step:7601/10000 train_time:313930ms step_avg:41.30ms +[2025-09-11 08:41:52] [Rank 0] step:7601/10000 train_time:313930ms step_avg:41.30ms +[2025-09-11 08:41:53] [Rank 0] step:7621/10000 train_time:314889ms step_avg:41.32ms +[2025-09-11 08:41:53] [Rank 0] step:7621/10000 train_time:314889ms step_avg:41.32ms +[2025-09-11 08:41:53] [Rank 0] step:7641/10000 train_time:315591ms step_avg:41.30ms +[2025-09-11 08:41:53] [Rank 0] step:7641/10000 train_time:315591ms step_avg:41.30ms +[2025-09-11 08:41:54] [Rank 0] step:7661/10000 train_time:316585ms step_avg:41.32ms +[2025-09-11 08:41:54] [Rank 0] step:7661/10000 train_time:316585ms step_avg:41.32ms +[2025-09-11 08:41:55] [Rank 0] step:7681/10000 train_time:317285ms step_avg:41.31ms +[2025-09-11 08:41:55] [Rank 0] step:7681/10000 train_time:317285ms step_avg:41.31ms +[2025-09-11 08:41:56] [Rank 0] step:7701/10000 train_time:317988ms step_avg:41.29ms +[2025-09-11 08:41:56] [Rank 0] step:7701/10000 train_time:317988ms step_avg:41.29ms +[2025-09-11 08:41:56] [Rank 0] step:7721/10000 train_time:318688ms step_avg:41.28ms +[2025-09-11 08:41:56] [Rank 0] step:7721/10000 train_time:318688ms step_avg:41.28ms +[2025-09-11 08:41:57] [Rank 0] step:7741/10000 train_time:319389ms step_avg:41.26ms +[2025-09-11 08:41:57] [Rank 0] step:7741/10000 train_time:319389ms step_avg:41.26ms +[2025-09-11 08:41:58] [Rank 0] step:7761/10000 train_time:320089ms step_avg:41.24ms +[2025-09-11 08:41:58] [Rank 0] step:7761/10000 train_time:320089ms step_avg:41.24ms +[2025-09-11 08:41:58] [Rank 0] step:7781/10000 train_time:320792ms step_avg:41.23ms +[2025-09-11 08:41:58] [Rank 0] step:7781/10000 train_time:320792ms step_avg:41.23ms +[2025-09-11 08:41:59] [Rank 0] step:7801/10000 train_time:321491ms step_avg:41.21ms +[2025-09-11 08:41:59] [Rank 0] step:7801/10000 train_time:321491ms step_avg:41.21ms +[2025-09-11 08:42:00] [Rank 0] step:7821/10000 train_time:322194ms step_avg:41.20ms +[2025-09-11 08:42:00] [Rank 0] step:7821/10000 train_time:322194ms step_avg:41.20ms +[2025-09-11 08:42:01] [Rank 0] step:7841/10000 train_time:322897ms step_avg:41.18ms +[2025-09-11 08:42:01] [Rank 0] step:7841/10000 train_time:322897ms step_avg:41.18ms +[2025-09-11 08:42:01] [Rank 0] step:7861/10000 train_time:323600ms step_avg:41.17ms +[2025-09-11 08:42:01] [Rank 0] step:7861/10000 train_time:323600ms step_avg:41.17ms +[2025-09-11 08:42:02] [Rank 0] step:7881/10000 train_time:324302ms step_avg:41.15ms +[2025-09-11 08:42:02] [Rank 0] step:7881/10000 train_time:324302ms step_avg:41.15ms +[2025-09-11 08:42:03] [Rank 0] step:7901/10000 train_time:325004ms step_avg:41.13ms +[2025-09-11 08:42:03] [Rank 0] step:7901/10000 train_time:325004ms step_avg:41.13ms +[2025-09-11 08:42:03] [Rank 0] step:7921/10000 train_time:325706ms step_avg:41.12ms +[2025-09-11 08:42:03] [Rank 0] step:7921/10000 train_time:325706ms step_avg:41.12ms +[2025-09-11 08:42:04] [Rank 0] step:7941/10000 train_time:326408ms step_avg:41.10ms +[2025-09-11 08:42:04] [Rank 0] step:7941/10000 train_time:326408ms step_avg:41.10ms +[2025-09-11 08:42:05] [Rank 0] step:7961/10000 train_time:327107ms step_avg:41.09ms +[2025-09-11 08:42:05] [Rank 0] step:7961/10000 train_time:327107ms step_avg:41.09ms +[2025-09-11 08:42:05] [Rank 0] step:7981/10000 train_time:327858ms step_avg:41.08ms +[2025-09-11 08:42:05] [Rank 0] step:7981/10000 train_time:327858ms step_avg:41.08ms +[2025-09-11 08:42:06] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:42:06] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 08:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:42:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:42:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 08:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:42:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:42:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:42:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 08:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:42:16] [Rank 0] PRINT: step:8000/10000 val_loss:4.2074 total_sharp:1.0202e-04 L1_sharp:1.8677e-04 L2_sharp:2.5043e-05 L3_sharp:1.1543e-05 L4_sharp:1.6616e-05 L5_sharp:2.2736e-05 L6_sharp:2.9795e-05 L7_sharp:2.1345e-05 L8_sharp:4.2045e-05 L9_sharp:3.2042e-05 L10_sharp:4.8631e-05 L11_sharp:8.1095e-05 L12_sharp:1.3527e-03 total_fnorm:4.2500e+01 total_l1_linf:9.3696e+04 total_spectral:2.1125e+01 L1_fnorm:5.8438e+00 L2_fnorm:5.7500e+00 L3_fnorm:5.1250e+00 L4_fnorm:6.0312e+00 L5_fnorm:5.7500e+00 L6_fnorm:6.0000e+00 L7_fnorm:5.9375e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.2422e+00 L2_l1linf:1.1406e+00 L3_l1linf:1.2891e+00 L4_l1linf:1.0625e+00 L5_l1linf:1.2500e+00 L6_l1linf:1.2656e+00 L7_l1linf:1.2656e+00 L8_l1linf:1.2109e+00 L9_l1linf:1.1797e+00 L10_l1linf:1.1094e+00 L11_l1linf:1.0469e+00 L12_l1linf:1.1328e+00 L1_spectral:8.3625e-02 L2_spectral:7.8910e-02 L3_spectral:8.0462e-02 L4_spectral:8.3339e-02 L5_spectral:7.9748e-02 L6_spectral:8.2695e-02 L7_spectral:8.3969e-02 L8_spectral:8.1686e-02 L9_spectral:8.3727e-02 L10_spectral:8.3225e-02 L11_spectral:8.4432e-02 L12_spectral:8.3737e-02 train_time:328538ms step_avg:41.07ms +[2025-09-11 08:42:16] [Rank 0] PRINT: step:8000/10000 val_loss:4.2074 total_sharp:1.0202e-04 L1_sharp:1.8677e-04 L2_sharp:2.5043e-05 L3_sharp:1.1543e-05 L4_sharp:1.6616e-05 L5_sharp:2.2736e-05 L6_sharp:2.9795e-05 L7_sharp:2.1345e-05 L8_sharp:4.2045e-05 L9_sharp:3.2042e-05 L10_sharp:4.8631e-05 L11_sharp:8.1095e-05 L12_sharp:1.3527e-03 total_fnorm:4.2500e+01 total_l1_linf:9.3696e+04 total_spectral:2.1125e+01 L1_fnorm:5.8438e+00 L2_fnorm:5.7500e+00 L3_fnorm:5.1250e+00 L4_fnorm:6.0312e+00 L5_fnorm:5.7500e+00 L6_fnorm:6.0000e+00 L7_fnorm:5.9375e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8125e+00 L12_fnorm:5.8438e+00 L1_l1linf:1.2422e+00 L2_l1linf:1.1406e+00 L3_l1linf:1.2891e+00 L4_l1linf:1.0625e+00 L5_l1linf:1.2500e+00 L6_l1linf:1.2656e+00 L7_l1linf:1.2656e+00 L8_l1linf:1.2109e+00 L9_l1linf:1.1797e+00 L10_l1linf:1.1094e+00 L11_l1linf:1.0469e+00 L12_l1linf:1.1328e+00 L1_spectral:8.3625e-02 L2_spectral:7.8910e-02 L3_spectral:8.0462e-02 L4_spectral:8.3339e-02 L5_spectral:7.9748e-02 L6_spectral:8.2695e-02 L7_spectral:8.3969e-02 L8_spectral:8.1686e-02 L9_spectral:8.3727e-02 L10_spectral:8.3225e-02 L11_spectral:8.4432e-02 L12_spectral:8.3737e-02 train_time:328538ms step_avg:41.07ms +[2025-09-11 08:42:17] [Rank 0] step:8001/10000 train_time:329826ms step_avg:41.22ms +[2025-09-11 08:42:17] [Rank 0] step:8001/10000 train_time:329826ms step_avg:41.22ms +[2025-09-11 08:42:18] [Rank 0] step:8021/10000 train_time:330558ms step_avg:41.21ms +[2025-09-11 08:42:18] [Rank 0] step:8021/10000 train_time:330558ms step_avg:41.21ms +[2025-09-11 08:42:19] [Rank 0] step:8041/10000 train_time:331261ms step_avg:41.20ms +[2025-09-11 08:42:19] [Rank 0] step:8041/10000 train_time:331261ms step_avg:41.20ms +[2025-09-11 08:42:20] [Rank 0] step:8061/10000 train_time:331965ms step_avg:41.18ms +[2025-09-11 08:42:20] [Rank 0] step:8061/10000 train_time:331965ms step_avg:41.18ms +[2025-09-11 08:42:20] [Rank 0] step:8081/10000 train_time:332665ms step_avg:41.17ms +[2025-09-11 08:42:20] [Rank 0] step:8081/10000 train_time:332665ms step_avg:41.17ms +[2025-09-11 08:42:21] [Rank 0] step:8101/10000 train_time:333364ms step_avg:41.15ms +[2025-09-11 08:42:21] [Rank 0] step:8101/10000 train_time:333364ms step_avg:41.15ms +[2025-09-11 08:42:22] [Rank 0] step:8121/10000 train_time:334068ms step_avg:41.14ms +[2025-09-11 08:42:22] [Rank 0] step:8121/10000 train_time:334068ms step_avg:41.14ms +[2025-09-11 08:42:23] [Rank 0] step:8141/10000 train_time:335516ms step_avg:41.21ms +[2025-09-11 08:42:23] [Rank 0] step:8141/10000 train_time:335516ms step_avg:41.21ms +[2025-09-11 08:42:24] [Rank 0] step:8161/10000 train_time:336221ms step_avg:41.20ms +[2025-09-11 08:42:24] [Rank 0] step:8161/10000 train_time:336221ms step_avg:41.20ms +[2025-09-11 08:42:25] [Rank 0] step:8181/10000 train_time:336933ms step_avg:41.18ms +[2025-09-11 08:42:25] [Rank 0] step:8181/10000 train_time:336933ms step_avg:41.18ms +[2025-09-11 08:42:25] [Rank 0] step:8201/10000 train_time:337643ms step_avg:41.17ms +[2025-09-11 08:42:25] [Rank 0] step:8201/10000 train_time:337643ms step_avg:41.17ms +[2025-09-11 08:42:26] [Rank 0] step:8221/10000 train_time:338350ms step_avg:41.16ms +[2025-09-11 08:42:26] [Rank 0] step:8221/10000 train_time:338350ms step_avg:41.16ms +[2025-09-11 08:42:27] [Rank 0] step:8241/10000 train_time:339067ms step_avg:41.14ms +[2025-09-11 08:42:27] [Rank 0] step:8241/10000 train_time:339067ms step_avg:41.14ms +[2025-09-11 08:42:27] [Rank 0] step:8261/10000 train_time:339774ms step_avg:41.13ms +[2025-09-11 08:42:27] [Rank 0] step:8261/10000 train_time:339774ms step_avg:41.13ms +[2025-09-11 08:42:28] [Rank 0] step:8281/10000 train_time:340479ms step_avg:41.12ms +[2025-09-11 08:42:28] [Rank 0] step:8281/10000 train_time:340479ms step_avg:41.12ms +[2025-09-11 08:42:29] [Rank 0] step:8301/10000 train_time:341186ms step_avg:41.10ms +[2025-09-11 08:42:29] [Rank 0] step:8301/10000 train_time:341186ms step_avg:41.10ms +[2025-09-11 08:42:29] [Rank 0] step:8321/10000 train_time:341893ms step_avg:41.09ms +[2025-09-11 08:42:29] [Rank 0] step:8321/10000 train_time:341893ms step_avg:41.09ms +[2025-09-11 08:42:30] [Rank 0] step:8341/10000 train_time:342607ms step_avg:41.08ms +[2025-09-11 08:42:30] [Rank 0] step:8341/10000 train_time:342607ms step_avg:41.08ms +[2025-09-11 08:42:31] [Rank 0] step:8361/10000 train_time:343311ms step_avg:41.06ms +[2025-09-11 08:42:31] [Rank 0] step:8361/10000 train_time:343311ms step_avg:41.06ms +[2025-09-11 08:42:32] [Rank 0] step:8381/10000 train_time:344021ms step_avg:41.05ms +[2025-09-11 08:42:32] [Rank 0] step:8381/10000 train_time:344021ms step_avg:41.05ms +[2025-09-11 08:42:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:42:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 08:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:42:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:42:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 08:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 08:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:42:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:42:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:42:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 08:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:42:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.1838 total_sharp:7.7515e-05 L1_sharp:1.3940e-04 L2_sharp:2.9028e-05 L3_sharp:-6.5590e-06 L4_sharp:-3.1115e-06 L5_sharp:2.0452e-05 L6_sharp:2.1192e-05 L7_sharp:1.0948e-05 L8_sharp:2.9418e-05 L9_sharp:2.8995e-05 L10_sharp:4.4155e-05 L11_sharp:7.5769e-05 L12_sharp:5.5082e-04 total_fnorm:3.3250e+01 total_l1_linf:6.6560e+04 total_spectral:1.6500e+01 L1_fnorm:4.6875e+00 L2_fnorm:4.5312e+00 L3_fnorm:4.0312e+00 L4_fnorm:4.7500e+00 L5_fnorm:4.5312e+00 L6_fnorm:4.6875e+00 L7_fnorm:4.6250e+00 L8_fnorm:4.4688e+00 L9_fnorm:4.5938e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5000e+00 L12_fnorm:4.5625e+00 L1_l1linf:8.6328e-01 L2_l1linf:8.5547e-01 L3_l1linf:1.0781e+00 L4_l1linf:8.0469e-01 L5_l1linf:9.4141e-01 L6_l1linf:9.2969e-01 L7_l1linf:9.3750e-01 L8_l1linf:8.8281e-01 L9_l1linf:8.7891e-01 L10_l1linf:8.1250e-01 L11_l1linf:7.7344e-01 L12_l1linf:8.0859e-01 L1_spectral:6.7661e-02 L2_spectral:6.3547e-02 L3_spectral:6.5273e-02 L4_spectral:6.8992e-02 L5_spectral:6.4047e-02 L6_spectral:6.5664e-02 L7_spectral:6.5607e-02 L8_spectral:6.6585e-02 L9_spectral:6.6130e-02 L10_spectral:6.6575e-02 L11_spectral:6.6481e-02 L12_spectral:6.7359e-02 train_time:344710ms step_avg:41.04ms +[2025-09-11 08:42:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.1838 total_sharp:7.7515e-05 L1_sharp:1.3940e-04 L2_sharp:2.9028e-05 L3_sharp:-6.5590e-06 L4_sharp:-3.1115e-06 L5_sharp:2.0452e-05 L6_sharp:2.1192e-05 L7_sharp:1.0948e-05 L8_sharp:2.9418e-05 L9_sharp:2.8995e-05 L10_sharp:4.4155e-05 L11_sharp:7.5769e-05 L12_sharp:5.5082e-04 total_fnorm:3.3250e+01 total_l1_linf:6.6560e+04 total_spectral:1.6500e+01 L1_fnorm:4.6875e+00 L2_fnorm:4.5312e+00 L3_fnorm:4.0312e+00 L4_fnorm:4.7500e+00 L5_fnorm:4.5312e+00 L6_fnorm:4.6875e+00 L7_fnorm:4.6250e+00 L8_fnorm:4.4688e+00 L9_fnorm:4.5938e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5000e+00 L12_fnorm:4.5625e+00 L1_l1linf:8.6328e-01 L2_l1linf:8.5547e-01 L3_l1linf:1.0781e+00 L4_l1linf:8.0469e-01 L5_l1linf:9.4141e-01 L6_l1linf:9.2969e-01 L7_l1linf:9.3750e-01 L8_l1linf:8.8281e-01 L9_l1linf:8.7891e-01 L10_l1linf:8.1250e-01 L11_l1linf:7.7344e-01 L12_l1linf:8.0859e-01 L1_spectral:6.7661e-02 L2_spectral:6.3547e-02 L3_spectral:6.5273e-02 L4_spectral:6.8992e-02 L5_spectral:6.4047e-02 L6_spectral:6.5664e-02 L7_spectral:6.5607e-02 L8_spectral:6.6585e-02 L9_spectral:6.6130e-02 L10_spectral:6.6575e-02 L11_spectral:6.6481e-02 L12_spectral:6.7359e-02 train_time:344710ms step_avg:41.04ms +[2025-09-11 08:42:45] [Rank 0] step:8401/10000 train_time:346000ms step_avg:41.19ms +[2025-09-11 08:42:45] [Rank 0] step:8401/10000 train_time:346000ms step_avg:41.19ms +[2025-09-11 08:42:46] [Rank 0] step:8421/10000 train_time:346727ms step_avg:41.17ms +[2025-09-11 08:42:46] [Rank 0] step:8421/10000 train_time:346727ms step_avg:41.17ms +[2025-09-11 08:42:47] [Rank 0] step:8441/10000 train_time:347437ms step_avg:41.16ms +[2025-09-11 08:42:47] [Rank 0] step:8441/10000 train_time:347437ms step_avg:41.16ms +[2025-09-11 08:42:47] [Rank 0] step:8461/10000 train_time:348146ms step_avg:41.15ms +[2025-09-11 08:42:47] [Rank 0] step:8461/10000 train_time:348146ms step_avg:41.15ms +[2025-09-11 08:42:48] [Rank 0] step:8481/10000 train_time:348857ms step_avg:41.13ms +[2025-09-11 08:42:48] [Rank 0] step:8481/10000 train_time:348857ms step_avg:41.13ms +[2025-09-11 08:42:49] [Rank 0] step:8501/10000 train_time:349566ms step_avg:41.12ms +[2025-09-11 08:42:49] [Rank 0] step:8501/10000 train_time:349566ms step_avg:41.12ms +[2025-09-11 08:42:49] [Rank 0] step:8521/10000 train_time:350274ms step_avg:41.11ms +[2025-09-11 08:42:49] [Rank 0] step:8521/10000 train_time:350274ms step_avg:41.11ms +[2025-09-11 08:42:50] [Rank 0] step:8541/10000 train_time:350982ms step_avg:41.09ms +[2025-09-11 08:42:50] [Rank 0] step:8541/10000 train_time:350982ms step_avg:41.09ms +[2025-09-11 08:42:51] [Rank 0] step:8561/10000 train_time:351696ms step_avg:41.08ms +[2025-09-11 08:42:51] [Rank 0] step:8561/10000 train_time:351696ms step_avg:41.08ms +[2025-09-11 08:42:52] [Rank 0] step:8581/10000 train_time:352406ms step_avg:41.07ms +[2025-09-11 08:42:52] [Rank 0] step:8581/10000 train_time:352406ms step_avg:41.07ms +[2025-09-11 08:42:52] [Rank 0] step:8601/10000 train_time:353116ms step_avg:41.06ms +[2025-09-11 08:42:52] [Rank 0] step:8601/10000 train_time:353116ms step_avg:41.06ms +[2025-09-11 08:42:53] [Rank 0] step:8621/10000 train_time:353824ms step_avg:41.04ms +[2025-09-11 08:42:53] [Rank 0] step:8621/10000 train_time:353824ms step_avg:41.04ms +[2025-09-11 08:42:54] [Rank 0] step:8641/10000 train_time:354531ms step_avg:41.03ms +[2025-09-11 08:42:54] [Rank 0] step:8641/10000 train_time:354531ms step_avg:41.03ms +[2025-09-11 08:42:55] [Rank 0] step:8661/10000 train_time:355362ms step_avg:41.03ms +[2025-09-11 08:42:55] [Rank 0] step:8661/10000 train_time:355362ms step_avg:41.03ms +[2025-09-11 08:42:56] [Rank 0] step:8681/10000 train_time:356438ms step_avg:41.06ms +[2025-09-11 08:42:56] [Rank 0] step:8681/10000 train_time:356438ms step_avg:41.06ms +[2025-09-11 08:42:56] [Rank 0] step:8701/10000 train_time:357145ms step_avg:41.05ms +[2025-09-11 08:42:56] [Rank 0] step:8701/10000 train_time:357145ms step_avg:41.05ms +[2025-09-11 08:42:57] [Rank 0] step:8721/10000 train_time:358011ms step_avg:41.05ms +[2025-09-11 08:42:57] [Rank 0] step:8721/10000 train_time:358011ms step_avg:41.05ms +[2025-09-11 08:42:58] [Rank 0] step:8741/10000 train_time:358822ms step_avg:41.05ms +[2025-09-11 08:42:58] [Rank 0] step:8741/10000 train_time:358822ms step_avg:41.05ms +[2025-09-11 08:42:59] [Rank 0] step:8761/10000 train_time:359534ms step_avg:41.04ms +[2025-09-11 08:42:59] [Rank 0] step:8761/10000 train_time:359534ms step_avg:41.04ms +[2025-09-11 08:42:59] [Rank 0] step:8781/10000 train_time:360239ms step_avg:41.02ms +[2025-09-11 08:42:59] [Rank 0] step:8781/10000 train_time:360239ms step_avg:41.02ms +[2025-09-11 08:43:00] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:43:00] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 08:43:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:43:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:43:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 08:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 08:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:43:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 08:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:43:10] [Rank 0] PRINT: step:8800/10000 val_loss:4.1734 total_sharp:6.5394e-05 L1_sharp:1.2995e-04 L2_sharp:1.7748e-05 L3_sharp:1.3128e-05 L4_sharp:1.5322e-06 L5_sharp:1.8808e-05 L6_sharp:1.5586e-05 L7_sharp:1.4859e-05 L8_sharp:2.2384e-05 L9_sharp:2.1154e-05 L10_sharp:4.0338e-05 L11_sharp:7.2771e-05 L12_sharp:4.2560e-04 total_fnorm:2.4500e+01 total_l1_linf:4.4032e+04 total_spectral:1.2188e+01 L1_fnorm:3.4844e+00 L2_fnorm:3.3281e+00 L3_fnorm:2.9375e+00 L4_fnorm:3.5000e+00 L5_fnorm:3.2812e+00 L6_fnorm:3.4062e+00 L7_fnorm:3.3906e+00 L8_fnorm:3.2500e+00 L9_fnorm:3.3281e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2656e+00 L12_fnorm:3.3281e+00 L1_l1linf:6.1719e-01 L2_l1linf:5.9375e-01 L3_l1linf:8.4375e-01 L4_l1linf:6.7188e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.1328e-01 L9_l1linf:5.8203e-01 L10_l1linf:5.3906e-01 L11_l1linf:5.0391e-01 L12_l1linf:5.4688e-01 L1_spectral:5.1887e-02 L2_spectral:4.8170e-02 L3_spectral:5.2120e-02 L4_spectral:5.3071e-02 L5_spectral:4.8164e-02 L6_spectral:4.8688e-02 L7_spectral:4.8969e-02 L8_spectral:4.9908e-02 L9_spectral:4.9570e-02 L10_spectral:4.9989e-02 L11_spectral:4.9566e-02 L12_spectral:5.0851e-02 train_time:360926ms step_avg:41.01ms +[2025-09-11 08:43:10] [Rank 0] PRINT: step:8800/10000 val_loss:4.1734 total_sharp:6.5394e-05 L1_sharp:1.2995e-04 L2_sharp:1.7748e-05 L3_sharp:1.3128e-05 L4_sharp:1.5322e-06 L5_sharp:1.8808e-05 L6_sharp:1.5586e-05 L7_sharp:1.4859e-05 L8_sharp:2.2384e-05 L9_sharp:2.1154e-05 L10_sharp:4.0338e-05 L11_sharp:7.2771e-05 L12_sharp:4.2560e-04 total_fnorm:2.4500e+01 total_l1_linf:4.4032e+04 total_spectral:1.2188e+01 L1_fnorm:3.4844e+00 L2_fnorm:3.3281e+00 L3_fnorm:2.9375e+00 L4_fnorm:3.5000e+00 L5_fnorm:3.2812e+00 L6_fnorm:3.4062e+00 L7_fnorm:3.3906e+00 L8_fnorm:3.2500e+00 L9_fnorm:3.3281e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2656e+00 L12_fnorm:3.3281e+00 L1_l1linf:6.1719e-01 L2_l1linf:5.9375e-01 L3_l1linf:8.4375e-01 L4_l1linf:6.7188e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.1328e-01 L9_l1linf:5.8203e-01 L10_l1linf:5.3906e-01 L11_l1linf:5.0391e-01 L12_l1linf:5.4688e-01 L1_spectral:5.1887e-02 L2_spectral:4.8170e-02 L3_spectral:5.2120e-02 L4_spectral:5.3071e-02 L5_spectral:4.8164e-02 L6_spectral:4.8688e-02 L7_spectral:4.8969e-02 L8_spectral:4.9908e-02 L9_spectral:4.9570e-02 L10_spectral:4.9989e-02 L11_spectral:4.9566e-02 L12_spectral:5.0851e-02 train_time:360926ms step_avg:41.01ms +[2025-09-11 08:43:11] [Rank 0] step:8801/10000 train_time:362220ms step_avg:41.16ms +[2025-09-11 08:43:11] [Rank 0] step:8801/10000 train_time:362220ms step_avg:41.16ms +[2025-09-11 08:43:12] [Rank 0] step:8821/10000 train_time:362971ms step_avg:41.15ms +[2025-09-11 08:43:12] [Rank 0] step:8821/10000 train_time:362971ms step_avg:41.15ms +[2025-09-11 08:43:13] [Rank 0] step:8841/10000 train_time:363680ms step_avg:41.14ms +[2025-09-11 08:43:13] [Rank 0] step:8841/10000 train_time:363680ms step_avg:41.14ms +[2025-09-11 08:43:14] [Rank 0] step:8861/10000 train_time:364389ms step_avg:41.12ms +[2025-09-11 08:43:14] [Rank 0] step:8861/10000 train_time:364389ms step_avg:41.12ms +[2025-09-11 08:43:14] [Rank 0] step:8881/10000 train_time:365097ms step_avg:41.11ms +[2025-09-11 08:43:14] [Rank 0] step:8881/10000 train_time:365097ms step_avg:41.11ms +[2025-09-11 08:43:15] [Rank 0] step:8901/10000 train_time:365808ms step_avg:41.10ms +[2025-09-11 08:43:15] [Rank 0] step:8901/10000 train_time:365808ms step_avg:41.10ms +[2025-09-11 08:43:16] [Rank 0] step:8921/10000 train_time:366513ms step_avg:41.08ms +[2025-09-11 08:43:16] [Rank 0] step:8921/10000 train_time:366513ms step_avg:41.08ms +[2025-09-11 08:43:16] [Rank 0] step:8941/10000 train_time:367223ms step_avg:41.07ms +[2025-09-11 08:43:16] [Rank 0] step:8941/10000 train_time:367223ms step_avg:41.07ms +[2025-09-11 08:43:17] [Rank 0] step:8961/10000 train_time:367940ms step_avg:41.06ms +[2025-09-11 08:43:17] [Rank 0] step:8961/10000 train_time:367940ms step_avg:41.06ms +[2025-09-11 08:43:18] [Rank 0] step:8981/10000 train_time:368653ms step_avg:41.05ms +[2025-09-11 08:43:18] [Rank 0] step:8981/10000 train_time:368653ms step_avg:41.05ms +[2025-09-11 08:43:19] [Rank 0] step:9001/10000 train_time:369356ms step_avg:41.04ms +[2025-09-11 08:43:19] [Rank 0] step:9001/10000 train_time:369356ms step_avg:41.04ms +[2025-09-11 08:43:19] [Rank 0] step:9021/10000 train_time:370067ms step_avg:41.02ms +[2025-09-11 08:43:19] [Rank 0] step:9021/10000 train_time:370067ms step_avg:41.02ms +[2025-09-11 08:43:20] [Rank 0] step:9041/10000 train_time:370779ms step_avg:41.01ms +[2025-09-11 08:43:20] [Rank 0] step:9041/10000 train_time:370779ms step_avg:41.01ms +[2025-09-11 08:43:21] [Rank 0] step:9061/10000 train_time:371487ms step_avg:41.00ms +[2025-09-11 08:43:21] [Rank 0] step:9061/10000 train_time:371487ms step_avg:41.00ms +[2025-09-11 08:43:21] [Rank 0] step:9081/10000 train_time:372197ms step_avg:40.99ms +[2025-09-11 08:43:21] [Rank 0] step:9081/10000 train_time:372197ms step_avg:40.99ms +[2025-09-11 08:43:22] [Rank 0] step:9101/10000 train_time:372909ms step_avg:40.97ms +[2025-09-11 08:43:22] [Rank 0] step:9101/10000 train_time:372909ms step_avg:40.97ms +[2025-09-11 08:43:23] [Rank 0] step:9121/10000 train_time:373623ms step_avg:40.96ms +[2025-09-11 08:43:23] [Rank 0] step:9121/10000 train_time:373623ms step_avg:40.96ms +[2025-09-11 08:43:23] [Rank 0] step:9141/10000 train_time:374330ms step_avg:40.95ms +[2025-09-11 08:43:23] [Rank 0] step:9141/10000 train_time:374330ms step_avg:40.95ms +[2025-09-11 08:43:24] [Rank 0] step:9161/10000 train_time:375041ms step_avg:40.94ms +[2025-09-11 08:43:24] [Rank 0] step:9161/10000 train_time:375041ms step_avg:40.94ms +[2025-09-11 08:43:25] [Rank 0] step:9181/10000 train_time:375752ms step_avg:40.93ms +[2025-09-11 08:43:25] [Rank 0] step:9181/10000 train_time:375752ms step_avg:40.93ms +[2025-09-11 08:43:26] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:43:26] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 08:43:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:43:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:43:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 08:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 08:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:43:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:43:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 08:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:43:36] [Rank 0] PRINT: step:9200/10000 val_loss:4.1498 total_sharp:5.6491e-05 L1_sharp:1.0798e-04 L2_sharp:8.7830e-06 L3_sharp:7.8062e-06 L4_sharp:-4.0255e-06 L5_sharp:1.6976e-05 L6_sharp:1.6091e-05 L7_sharp:8.4789e-06 L8_sharp:1.9077e-05 L9_sharp:2.5706e-05 L10_sharp:3.6937e-05 L11_sharp:6.0841e-05 L12_sharp:3.7314e-04 total_fnorm:1.6875e+01 total_l1_linf:2.6240e+04 total_spectral:8.3750e+00 L1_fnorm:2.3438e+00 L2_fnorm:2.2188e+00 L3_fnorm:2.0781e+00 L4_fnorm:2.3594e+00 L5_fnorm:2.1875e+00 L6_fnorm:2.2969e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2500e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.4961e-01 L3_l1linf:5.8984e-01 L4_l1linf:3.9648e-01 L5_l1linf:3.9453e-01 L6_l1linf:3.8281e-01 L7_l1linf:3.7695e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.5352e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.0664e-01 L12_l1linf:3.3398e-01 L1_spectral:3.5677e-02 L2_spectral:3.2927e-02 L3_spectral:3.9047e-02 L4_spectral:3.7264e-02 L5_spectral:3.3272e-02 L6_spectral:3.3887e-02 L7_spectral:3.3295e-02 L8_spectral:3.4770e-02 L9_spectral:3.3846e-02 L10_spectral:3.4306e-02 L11_spectral:3.4284e-02 L12_spectral:3.5475e-02 train_time:376444ms step_avg:40.92ms +[2025-09-11 08:43:36] [Rank 0] PRINT: step:9200/10000 val_loss:4.1498 total_sharp:5.6491e-05 L1_sharp:1.0798e-04 L2_sharp:8.7830e-06 L3_sharp:7.8062e-06 L4_sharp:-4.0255e-06 L5_sharp:1.6976e-05 L6_sharp:1.6091e-05 L7_sharp:8.4789e-06 L8_sharp:1.9077e-05 L9_sharp:2.5706e-05 L10_sharp:3.6937e-05 L11_sharp:6.0841e-05 L12_sharp:3.7314e-04 total_fnorm:1.6875e+01 total_l1_linf:2.6240e+04 total_spectral:8.3750e+00 L1_fnorm:2.3438e+00 L2_fnorm:2.2188e+00 L3_fnorm:2.0781e+00 L4_fnorm:2.3594e+00 L5_fnorm:2.1875e+00 L6_fnorm:2.2969e+00 L7_fnorm:2.2812e+00 L8_fnorm:2.1875e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2500e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.4961e-01 L3_l1linf:5.8984e-01 L4_l1linf:3.9648e-01 L5_l1linf:3.9453e-01 L6_l1linf:3.8281e-01 L7_l1linf:3.7695e-01 L8_l1linf:3.6523e-01 L9_l1linf:3.5352e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.0664e-01 L12_l1linf:3.3398e-01 L1_spectral:3.5677e-02 L2_spectral:3.2927e-02 L3_spectral:3.9047e-02 L4_spectral:3.7264e-02 L5_spectral:3.3272e-02 L6_spectral:3.3887e-02 L7_spectral:3.3295e-02 L8_spectral:3.4770e-02 L9_spectral:3.3846e-02 L10_spectral:3.4306e-02 L11_spectral:3.4284e-02 L12_spectral:3.5475e-02 train_time:376444ms step_avg:40.92ms +[2025-09-11 08:43:37] [Rank 0] step:9201/10000 train_time:377812ms step_avg:41.06ms +[2025-09-11 08:43:37] [Rank 0] step:9201/10000 train_time:377812ms step_avg:41.06ms +[2025-09-11 08:43:38] [Rank 0] step:9221/10000 train_time:378551ms step_avg:41.05ms +[2025-09-11 08:43:38] [Rank 0] step:9221/10000 train_time:378551ms step_avg:41.05ms +[2025-09-11 08:43:38] [Rank 0] step:9241/10000 train_time:379259ms step_avg:41.04ms +[2025-09-11 08:43:38] [Rank 0] step:9241/10000 train_time:379259ms step_avg:41.04ms +[2025-09-11 08:43:39] [Rank 0] step:9261/10000 train_time:379970ms step_avg:41.03ms +[2025-09-11 08:43:39] [Rank 0] step:9261/10000 train_time:379970ms step_avg:41.03ms +[2025-09-11 08:43:40] [Rank 0] step:9281/10000 train_time:380681ms step_avg:41.02ms +[2025-09-11 08:43:40] [Rank 0] step:9281/10000 train_time:380681ms step_avg:41.02ms +[2025-09-11 08:43:41] [Rank 0] step:9301/10000 train_time:381389ms step_avg:41.01ms +[2025-09-11 08:43:41] [Rank 0] step:9301/10000 train_time:381389ms step_avg:41.01ms +[2025-09-11 08:43:41] [Rank 0] step:9321/10000 train_time:382100ms step_avg:40.99ms +[2025-09-11 08:43:41] [Rank 0] step:9321/10000 train_time:382100ms step_avg:40.99ms +[2025-09-11 08:43:42] [Rank 0] step:9341/10000 train_time:382805ms step_avg:40.98ms +[2025-09-11 08:43:42] [Rank 0] step:9341/10000 train_time:382805ms step_avg:40.98ms +[2025-09-11 08:43:43] [Rank 0] step:9361/10000 train_time:383513ms step_avg:40.97ms +[2025-09-11 08:43:43] [Rank 0] step:9361/10000 train_time:383513ms step_avg:40.97ms +[2025-09-11 08:43:43] [Rank 0] step:9381/10000 train_time:384219ms step_avg:40.96ms +[2025-09-11 08:43:43] [Rank 0] step:9381/10000 train_time:384219ms step_avg:40.96ms +[2025-09-11 08:43:44] [Rank 0] step:9401/10000 train_time:384929ms step_avg:40.95ms +[2025-09-11 08:43:44] [Rank 0] step:9401/10000 train_time:384929ms step_avg:40.95ms +[2025-09-11 08:43:45] [Rank 0] step:9421/10000 train_time:385639ms step_avg:40.93ms +[2025-09-11 08:43:45] [Rank 0] step:9421/10000 train_time:385639ms step_avg:40.93ms +[2025-09-11 08:43:46] [Rank 0] step:9441/10000 train_time:386353ms step_avg:40.92ms +[2025-09-11 08:43:46] [Rank 0] step:9441/10000 train_time:386353ms step_avg:40.92ms +[2025-09-11 08:43:46] [Rank 0] step:9461/10000 train_time:387062ms step_avg:40.91ms +[2025-09-11 08:43:46] [Rank 0] step:9461/10000 train_time:387062ms step_avg:40.91ms +[2025-09-11 08:43:47] [Rank 0] step:9481/10000 train_time:387772ms step_avg:40.90ms +[2025-09-11 08:43:47] [Rank 0] step:9481/10000 train_time:387772ms step_avg:40.90ms +[2025-09-11 08:43:48] [Rank 0] step:9501/10000 train_time:388483ms step_avg:40.89ms +[2025-09-11 08:43:48] [Rank 0] step:9501/10000 train_time:388483ms step_avg:40.89ms +[2025-09-11 08:43:48] [Rank 0] step:9521/10000 train_time:389195ms step_avg:40.88ms +[2025-09-11 08:43:48] [Rank 0] step:9521/10000 train_time:389195ms step_avg:40.88ms +[2025-09-11 08:43:49] [Rank 0] step:9541/10000 train_time:389901ms step_avg:40.87ms +[2025-09-11 08:43:49] [Rank 0] step:9541/10000 train_time:389901ms step_avg:40.87ms +[2025-09-11 08:43:50] [Rank 0] step:9561/10000 train_time:390610ms step_avg:40.85ms +[2025-09-11 08:43:50] [Rank 0] step:9561/10000 train_time:390610ms step_avg:40.85ms +[2025-09-11 08:43:51] [Rank 0] step:9581/10000 train_time:391321ms step_avg:40.84ms +[2025-09-11 08:43:51] [Rank 0] step:9581/10000 train_time:391321ms step_avg:40.84ms +[2025-09-11 08:43:51] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:43:51] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 08:43:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:43:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:43:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:43:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:43:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 08:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 08:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:44:01] [Rank 0] PRINT: step:9600/10000 val_loss:4.1382 total_sharp:3.6805e-05 L1_sharp:1.2744e-04 L2_sharp:6.0718e-06 L3_sharp:-9.9109e-07 L4_sharp:1.5259e-06 L5_sharp:1.7906e-05 L6_sharp:1.6852e-05 L7_sharp:6.9332e-06 L8_sharp:1.2208e-05 L9_sharp:1.6584e-05 L10_sharp:2.6516e-05 L11_sharp:4.8825e-05 L12_sharp:2.8507e-04 total_fnorm:9.8125e+00 total_l1_linf:1.2928e+04 total_spectral:4.9062e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.1875e+00 L4_fnorm:1.3438e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2891e+00 L7_fnorm:1.2891e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.2812e+00 L1_l1linf:1.7383e-01 L2_l1linf:1.7285e-01 L3_l1linf:3.7109e-01 L4_l1linf:2.3633e-01 L5_l1linf:1.8652e-01 L6_l1linf:1.8359e-01 L7_l1linf:1.8164e-01 L8_l1linf:1.6602e-01 L9_l1linf:1.7969e-01 L10_l1linf:1.5527e-01 L11_l1linf:1.5527e-01 L12_l1linf:1.7285e-01 L1_spectral:1.9622e-02 L2_spectral:1.9214e-02 L3_spectral:2.5254e-02 L4_spectral:2.2289e-02 L5_spectral:1.9255e-02 L6_spectral:1.8990e-02 L7_spectral:1.8756e-02 L8_spectral:1.9907e-02 L9_spectral:1.9248e-02 L10_spectral:1.9873e-02 L11_spectral:1.9384e-02 L12_spectral:2.0513e-02 train_time:392007ms step_avg:40.83ms +[2025-09-11 08:44:01] [Rank 0] PRINT: step:9600/10000 val_loss:4.1382 total_sharp:3.6805e-05 L1_sharp:1.2744e-04 L2_sharp:6.0718e-06 L3_sharp:-9.9109e-07 L4_sharp:1.5259e-06 L5_sharp:1.7906e-05 L6_sharp:1.6852e-05 L7_sharp:6.9332e-06 L8_sharp:1.2208e-05 L9_sharp:1.6584e-05 L10_sharp:2.6516e-05 L11_sharp:4.8825e-05 L12_sharp:2.8507e-04 total_fnorm:9.8125e+00 total_l1_linf:1.2928e+04 total_spectral:4.9062e+00 L1_fnorm:1.2188e+00 L2_fnorm:1.2578e+00 L3_fnorm:1.1875e+00 L4_fnorm:1.3438e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2891e+00 L7_fnorm:1.2891e+00 L8_fnorm:1.2266e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2422e+00 L12_fnorm:1.2812e+00 L1_l1linf:1.7383e-01 L2_l1linf:1.7285e-01 L3_l1linf:3.7109e-01 L4_l1linf:2.3633e-01 L5_l1linf:1.8652e-01 L6_l1linf:1.8359e-01 L7_l1linf:1.8164e-01 L8_l1linf:1.6602e-01 L9_l1linf:1.7969e-01 L10_l1linf:1.5527e-01 L11_l1linf:1.5527e-01 L12_l1linf:1.7285e-01 L1_spectral:1.9622e-02 L2_spectral:1.9214e-02 L3_spectral:2.5254e-02 L4_spectral:2.2289e-02 L5_spectral:1.9255e-02 L6_spectral:1.8990e-02 L7_spectral:1.8756e-02 L8_spectral:1.9907e-02 L9_spectral:1.9248e-02 L10_spectral:1.9873e-02 L11_spectral:1.9384e-02 L12_spectral:2.0513e-02 train_time:392007ms step_avg:40.83ms +[2025-09-11 08:44:03] [Rank 0] step:9601/10000 train_time:393357ms step_avg:40.97ms +[2025-09-11 08:44:03] [Rank 0] step:9601/10000 train_time:393357ms step_avg:40.97ms +[2025-09-11 08:44:03] [Rank 0] step:9621/10000 train_time:394092ms step_avg:40.96ms +[2025-09-11 08:44:03] [Rank 0] step:9621/10000 train_time:394092ms step_avg:40.96ms +[2025-09-11 08:44:04] [Rank 0] step:9641/10000 train_time:394807ms step_avg:40.95ms +[2025-09-11 08:44:04] [Rank 0] step:9641/10000 train_time:394807ms step_avg:40.95ms +[2025-09-11 08:44:05] [Rank 0] step:9661/10000 train_time:395529ms step_avg:40.94ms +[2025-09-11 08:44:05] [Rank 0] step:9661/10000 train_time:395529ms step_avg:40.94ms +[2025-09-11 08:44:06] [Rank 0] step:9681/10000 train_time:396243ms step_avg:40.93ms +[2025-09-11 08:44:06] [Rank 0] step:9681/10000 train_time:396243ms step_avg:40.93ms +[2025-09-11 08:44:06] [Rank 0] step:9701/10000 train_time:396962ms step_avg:40.92ms +[2025-09-11 08:44:06] [Rank 0] step:9701/10000 train_time:396962ms step_avg:40.92ms +[2025-09-11 08:44:07] [Rank 0] step:9721/10000 train_time:397681ms step_avg:40.91ms +[2025-09-11 08:44:07] [Rank 0] step:9721/10000 train_time:397681ms step_avg:40.91ms +[2025-09-11 08:44:08] [Rank 0] step:9741/10000 train_time:398400ms step_avg:40.90ms +[2025-09-11 08:44:08] [Rank 0] step:9741/10000 train_time:398400ms step_avg:40.90ms +[2025-09-11 08:44:09] [Rank 0] step:9761/10000 train_time:399116ms step_avg:40.89ms +[2025-09-11 08:44:09] [Rank 0] step:9761/10000 train_time:399116ms step_avg:40.89ms +[2025-09-11 08:44:09] [Rank 0] step:9781/10000 train_time:399831ms step_avg:40.88ms +[2025-09-11 08:44:09] [Rank 0] step:9781/10000 train_time:399831ms step_avg:40.88ms +[2025-09-11 08:44:10] [Rank 0] step:9801/10000 train_time:400552ms step_avg:40.87ms +[2025-09-11 08:44:10] [Rank 0] step:9801/10000 train_time:400552ms step_avg:40.87ms +[2025-09-11 08:44:11] [Rank 0] step:9821/10000 train_time:401270ms step_avg:40.86ms +[2025-09-11 08:44:11] [Rank 0] step:9821/10000 train_time:401270ms step_avg:40.86ms +[2025-09-11 08:44:11] [Rank 0] step:9841/10000 train_time:401990ms step_avg:40.85ms +[2025-09-11 08:44:11] [Rank 0] step:9841/10000 train_time:401990ms step_avg:40.85ms +[2025-09-11 08:44:12] [Rank 0] step:9861/10000 train_time:402707ms step_avg:40.84ms +[2025-09-11 08:44:12] [Rank 0] step:9861/10000 train_time:402707ms step_avg:40.84ms +[2025-09-11 08:44:13] [Rank 0] step:9881/10000 train_time:403425ms step_avg:40.83ms +[2025-09-11 08:44:13] [Rank 0] step:9881/10000 train_time:403425ms step_avg:40.83ms +[2025-09-11 08:44:14] [Rank 0] step:9901/10000 train_time:404139ms step_avg:40.82ms +[2025-09-11 08:44:14] [Rank 0] step:9901/10000 train_time:404139ms step_avg:40.82ms +[2025-09-11 08:44:14] [Rank 0] step:9921/10000 train_time:404855ms step_avg:40.81ms +[2025-09-11 08:44:14] [Rank 0] step:9921/10000 train_time:404855ms step_avg:40.81ms +[2025-09-11 08:44:15] [Rank 0] step:9941/10000 train_time:405576ms step_avg:40.80ms +[2025-09-11 08:44:15] [Rank 0] step:9941/10000 train_time:405576ms step_avg:40.80ms +[2025-09-11 08:44:16] [Rank 0] step:9961/10000 train_time:406296ms step_avg:40.79ms +[2025-09-11 08:44:16] [Rank 0] step:9961/10000 train_time:406296ms step_avg:40.79ms +[2025-09-11 08:44:16] [Rank 0] step:9981/10000 train_time:407014ms step_avg:40.78ms +[2025-09-11 08:44:16] [Rank 0] step:9981/10000 train_time:407014ms step_avg:40.78ms +[2025-09-11 08:44:17] [Rank 0] step:10000/10000 train_time:407703ms step_avg:40.77ms +[2025-09-11 08:44:17] [Rank 0] step:10000/10000 train_time:407703ms step_avg:40.77ms +[2025-09-11 08:44:17] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:44:17] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 08:44:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:44:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 08:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:44:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:44:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 08:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:44:27] [Rank 0] PRINT: step:10000/10000 val_loss:4.1339 total_sharp:2.3993e-05 L1_sharp:4.9413e-05 L2_sharp:8.6028e-06 L3_sharp:-6.0292e-07 L4_sharp:-5.1416e-06 L5_sharp:1.4519e-05 L6_sharp:1.1106e-05 L7_sharp:5.7587e-06 L8_sharp:1.1815e-05 L9_sharp:1.1817e-05 L10_sharp:2.2892e-05 L11_sharp:3.3414e-05 L12_sharp:2.5617e-04 total_fnorm:3.7656e+00 total_l1_linf:3.6160e+03 total_spectral:1.8750e+00 L1_fnorm:5.1562e-01 L2_fnorm:4.8438e-01 L3_fnorm:4.7852e-01 L4_fnorm:5.1562e-01 L5_fnorm:4.7656e-01 L6_fnorm:4.9805e-01 L7_fnorm:4.9609e-01 L8_fnorm:4.8047e-01 L9_fnorm:4.9023e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:4.9805e-01 L1_l1linf:5.2979e-02 L2_l1linf:5.5176e-02 L3_l1linf:1.4551e-01 L4_l1linf:8.3496e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6885e-02 L7_l1linf:5.5420e-02 L8_l1linf:5.5420e-02 L9_l1linf:5.1514e-02 L10_l1linf:4.8828e-02 L11_l1linf:4.5410e-02 L12_l1linf:5.4688e-02 L1_spectral:8.2071e-03 L2_spectral:7.9744e-03 L3_spectral:1.1049e-02 L4_spectral:9.2573e-03 L5_spectral:7.6105e-03 L6_spectral:7.5857e-03 L7_spectral:7.5893e-03 L8_spectral:8.2791e-03 L9_spectral:7.7867e-03 L10_spectral:7.9146e-03 L11_spectral:7.8175e-03 L12_spectral:8.2976e-03 train_time:407723ms step_avg:40.77ms +[2025-09-11 08:44:27] [Rank 0] PRINT: step:10000/10000 val_loss:4.1339 total_sharp:2.3993e-05 L1_sharp:4.9413e-05 L2_sharp:8.6028e-06 L3_sharp:-6.0292e-07 L4_sharp:-5.1416e-06 L5_sharp:1.4519e-05 L6_sharp:1.1106e-05 L7_sharp:5.7587e-06 L8_sharp:1.1815e-05 L9_sharp:1.1817e-05 L10_sharp:2.2892e-05 L11_sharp:3.3414e-05 L12_sharp:2.5617e-04 total_fnorm:3.7656e+00 total_l1_linf:3.6160e+03 total_spectral:1.8750e+00 L1_fnorm:5.1562e-01 L2_fnorm:4.8438e-01 L3_fnorm:4.7852e-01 L4_fnorm:5.1562e-01 L5_fnorm:4.7656e-01 L6_fnorm:4.9805e-01 L7_fnorm:4.9609e-01 L8_fnorm:4.8047e-01 L9_fnorm:4.9023e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:4.9805e-01 L1_l1linf:5.2979e-02 L2_l1linf:5.5176e-02 L3_l1linf:1.4551e-01 L4_l1linf:8.3496e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6885e-02 L7_l1linf:5.5420e-02 L8_l1linf:5.5420e-02 L9_l1linf:5.1514e-02 L10_l1linf:4.8828e-02 L11_l1linf:4.5410e-02 L12_l1linf:5.4688e-02 L1_spectral:8.2071e-03 L2_spectral:7.9744e-03 L3_spectral:1.1049e-02 L4_spectral:9.2573e-03 L5_spectral:7.6105e-03 L6_spectral:7.5857e-03 L7_spectral:7.5893e-03 L8_spectral:8.2791e-03 L9_spectral:7.7867e-03 L10_spectral:7.9146e-03 L11_spectral:7.8175e-03 L12_spectral:8.2976e-03 train_time:407723ms step_avg:40.77ms +[2025-09-11 08:44:27] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:44:27 2025 --- +[2025-09-11 08:44:27] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 08:44:27 2025 --- +[2025-09-11 08:44:27] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 08:44:27] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9d286c264ea53b74d813eb656eec73b7d13afd75 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.0005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "112dd55e-cdde-41a7-99e9-9b48f812dabb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/training_log_112dd55e-cdde-41a7-99e9-9b48f812dabb.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/training_log_112dd55e-cdde-41a7-99e9-9b48f812dabb.txt new file mode 100644 index 0000000000000000000000000000000000000000..a492278a1f2d7b743ec12fead879c5f9544a96a3 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42/training_log_112dd55e-cdde-41a7-99e9-9b48f812dabb.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:38:04] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:38:04 2025 --- +[2025-09-11 09:38:04] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:38:04 2025 --- +[2025-09-11 09:38:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:38:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.0005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:38:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:38:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:38:04] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:38:04] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:38:04] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42 +[2025-09-11 09:38:04] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.0005_seed_42 +[2025-09-11 09:38:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:38:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:38:04] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:38:04] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:38:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:38:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:38:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:38:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:38:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:38:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:38:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:38:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:38:05] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:38:05] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:38:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:38:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:38:07] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:38:07] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:38:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:38:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:38:13] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:38:13] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:38:13] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:38:13] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:38:54] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:38:54] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:38:54] [Rank 0] PRINT: Starting training... +[2025-09-11 09:38:54] [Rank 0] PRINT: Starting training... +[2025-09-11 09:38:55] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.01ms +[2025-09-11 09:38:55] [Rank 0] step:21/10000 train_time:1134ms step_avg:54.01ms +[2025-09-11 09:38:55] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.47ms +[2025-09-11 09:38:55] [Rank 0] step:41/10000 train_time:1864ms step_avg:45.47ms +[2025-09-11 09:38:56] [Rank 0] step:61/10000 train_time:2593ms step_avg:42.51ms +[2025-09-11 09:38:56] [Rank 0] step:61/10000 train_time:2593ms step_avg:42.51ms +[2025-09-11 09:38:57] [Rank 0] step:81/10000 train_time:3321ms step_avg:41.00ms +[2025-09-11 09:38:57] [Rank 0] step:81/10000 train_time:3321ms step_avg:41.00ms +[2025-09-11 09:38:58] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 09:38:58] [Rank 0] step:101/10000 train_time:4050ms step_avg:40.10ms +[2025-09-11 09:38:58] [Rank 0] step:121/10000 train_time:4779ms step_avg:39.50ms +[2025-09-11 09:38:58] [Rank 0] step:121/10000 train_time:4779ms step_avg:39.50ms +[2025-09-11 09:38:59] [Rank 0] step:141/10000 train_time:5508ms step_avg:39.06ms +[2025-09-11 09:38:59] [Rank 0] step:141/10000 train_time:5508ms step_avg:39.06ms +[2025-09-11 09:39:00] [Rank 0] step:161/10000 train_time:6237ms step_avg:38.74ms +[2025-09-11 09:39:00] [Rank 0] step:161/10000 train_time:6237ms step_avg:38.74ms +[2025-09-11 09:39:01] [Rank 0] step:181/10000 train_time:6966ms step_avg:38.49ms +[2025-09-11 09:39:01] [Rank 0] step:181/10000 train_time:6966ms step_avg:38.49ms +[2025-09-11 09:39:01] [Rank 0] step:201/10000 train_time:7695ms step_avg:38.29ms +[2025-09-11 09:39:01] [Rank 0] step:201/10000 train_time:7695ms step_avg:38.29ms +[2025-09-11 09:39:02] [Rank 0] step:221/10000 train_time:8429ms step_avg:38.14ms +[2025-09-11 09:39:02] [Rank 0] step:221/10000 train_time:8429ms step_avg:38.14ms +[2025-09-11 09:39:03] [Rank 0] step:241/10000 train_time:9159ms step_avg:38.00ms +[2025-09-11 09:39:03] [Rank 0] step:241/10000 train_time:9159ms step_avg:38.00ms +[2025-09-11 09:39:03] [Rank 0] step:261/10000 train_time:9888ms step_avg:37.88ms +[2025-09-11 09:39:03] [Rank 0] step:261/10000 train_time:9888ms step_avg:37.88ms +[2025-09-11 09:39:04] [Rank 0] step:281/10000 train_time:10617ms step_avg:37.78ms +[2025-09-11 09:39:04] [Rank 0] step:281/10000 train_time:10617ms step_avg:37.78ms +[2025-09-11 09:39:05] [Rank 0] step:301/10000 train_time:11346ms step_avg:37.70ms +[2025-09-11 09:39:05] [Rank 0] step:301/10000 train_time:11346ms step_avg:37.70ms +[2025-09-11 09:39:06] [Rank 0] step:321/10000 train_time:12076ms step_avg:37.62ms +[2025-09-11 09:39:06] [Rank 0] step:321/10000 train_time:12076ms step_avg:37.62ms +[2025-09-11 09:39:06] [Rank 0] step:341/10000 train_time:12805ms step_avg:37.55ms +[2025-09-11 09:39:06] [Rank 0] step:341/10000 train_time:12805ms step_avg:37.55ms +[2025-09-11 09:39:07] [Rank 0] step:361/10000 train_time:13535ms step_avg:37.49ms +[2025-09-11 09:39:07] [Rank 0] step:361/10000 train_time:13535ms step_avg:37.49ms +[2025-09-11 09:39:08] [Rank 0] step:381/10000 train_time:14264ms step_avg:37.44ms +[2025-09-11 09:39:08] [Rank 0] step:381/10000 train_time:14264ms step_avg:37.44ms +[2025-09-11 09:39:09] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:39:09] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:39:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:39:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:39:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:39:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:39:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:39:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:39:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:39:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:39:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:39:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:39:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:39:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:39:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:39:56] [Rank 0] PRINT: step:400/10000 val_loss:6.2808 total_sharp:1.9751e-04 L1_sharp:9.5461e-02 L2_sharp:1.0045e-01 L3_sharp:9.7465e-02 L4_sharp:1.5409e-01 L5_sharp:1.5320e-01 L6_sharp:1.6349e-01 L7_sharp:2.1660e-01 L8_sharp:2.0513e-01 L9_sharp:2.2420e-01 L10_sharp:2.9791e-01 L11_sharp:3.9619e-01 L12_sharp:5.2262e-01 total_fnorm:1.7171e+02 total_l1_linf:4.2730e+05 total_spectral:8.5876e+01 L1_fnorm:5.9977e-02 L2_fnorm:6.0112e-02 L3_fnorm:6.0527e-02 L4_fnorm:6.0046e-02 L5_fnorm:6.0705e-02 L6_fnorm:6.0493e-02 L7_fnorm:6.0162e-02 L8_fnorm:6.0342e-02 L9_fnorm:6.0689e-02 L10_fnorm:6.0416e-02 L11_fnorm:6.0180e-02 L12_fnorm:6.0068e-02 L1_l1linf:2.2263e-02 L2_l1linf:2.2178e-02 L3_l1linf:2.2568e-02 L4_l1linf:2.2449e-02 L5_l1linf:2.2440e-02 L6_l1linf:2.2416e-02 L7_l1linf:2.2550e-02 L8_l1linf:2.2614e-02 L9_l1linf:2.2399e-02 L10_l1linf:2.2377e-02 L11_l1linf:2.2509e-02 L12_l1linf:2.2503e-02 L1_spectral:6.0269e-04 L2_spectral:6.0273e-04 L3_spectral:6.0264e-04 L4_spectral:6.0276e-04 L5_spectral:6.0266e-04 L6_spectral:6.0287e-04 L7_spectral:6.0269e-04 L8_spectral:6.0277e-04 L9_spectral:6.0241e-04 L10_spectral:6.0267e-04 L11_spectral:6.0267e-04 L12_spectral:6.0276e-04 train_time:14972ms step_avg:37.43ms +[2025-09-11 09:39:56] [Rank 0] PRINT: step:400/10000 val_loss:6.2808 total_sharp:1.9751e-04 L1_sharp:9.5461e-02 L2_sharp:1.0045e-01 L3_sharp:9.7465e-02 L4_sharp:1.5409e-01 L5_sharp:1.5320e-01 L6_sharp:1.6349e-01 L7_sharp:2.1660e-01 L8_sharp:2.0513e-01 L9_sharp:2.2420e-01 L10_sharp:2.9791e-01 L11_sharp:3.9619e-01 L12_sharp:5.2262e-01 total_fnorm:1.7171e+02 total_l1_linf:4.2730e+05 total_spectral:8.5876e+01 L1_fnorm:5.9977e-02 L2_fnorm:6.0112e-02 L3_fnorm:6.0527e-02 L4_fnorm:6.0046e-02 L5_fnorm:6.0705e-02 L6_fnorm:6.0493e-02 L7_fnorm:6.0162e-02 L8_fnorm:6.0342e-02 L9_fnorm:6.0689e-02 L10_fnorm:6.0416e-02 L11_fnorm:6.0180e-02 L12_fnorm:6.0068e-02 L1_l1linf:2.2263e-02 L2_l1linf:2.2178e-02 L3_l1linf:2.2568e-02 L4_l1linf:2.2449e-02 L5_l1linf:2.2440e-02 L6_l1linf:2.2416e-02 L7_l1linf:2.2550e-02 L8_l1linf:2.2614e-02 L9_l1linf:2.2399e-02 L10_l1linf:2.2377e-02 L11_l1linf:2.2509e-02 L12_l1linf:2.2503e-02 L1_spectral:6.0269e-04 L2_spectral:6.0273e-04 L3_spectral:6.0264e-04 L4_spectral:6.0276e-04 L5_spectral:6.0266e-04 L6_spectral:6.0287e-04 L7_spectral:6.0269e-04 L8_spectral:6.0277e-04 L9_spectral:6.0241e-04 L10_spectral:6.0267e-04 L11_spectral:6.0267e-04 L12_spectral:6.0276e-04 train_time:14972ms step_avg:37.43ms +[2025-09-11 09:40:28] [Rank 0] step:401/10000 train_time:46734ms step_avg:116.54ms +[2025-09-11 09:40:28] [Rank 0] step:401/10000 train_time:46734ms step_avg:116.54ms +[2025-09-11 09:40:30] [Rank 0] step:421/10000 train_time:49000ms step_avg:116.39ms +[2025-09-11 09:40:30] [Rank 0] step:421/10000 train_time:49000ms step_avg:116.39ms +[2025-09-11 09:40:30] [Rank 0] step:441/10000 train_time:49643ms step_avg:112.57ms +[2025-09-11 09:40:30] [Rank 0] step:441/10000 train_time:49643ms step_avg:112.57ms +[2025-09-11 09:40:31] [Rank 0] step:461/10000 train_time:50285ms step_avg:109.08ms +[2025-09-11 09:40:31] [Rank 0] step:461/10000 train_time:50285ms step_avg:109.08ms +[2025-09-11 09:40:32] [Rank 0] step:481/10000 train_time:50926ms step_avg:105.88ms +[2025-09-11 09:40:32] [Rank 0] step:481/10000 train_time:50926ms step_avg:105.88ms +[2025-09-11 09:40:32] [Rank 0] step:501/10000 train_time:51568ms step_avg:102.93ms +[2025-09-11 09:40:32] [Rank 0] step:501/10000 train_time:51568ms step_avg:102.93ms +[2025-09-11 09:40:33] [Rank 0] step:521/10000 train_time:52210ms step_avg:100.21ms +[2025-09-11 09:40:33] [Rank 0] step:521/10000 train_time:52210ms step_avg:100.21ms +[2025-09-11 09:40:34] [Rank 0] step:541/10000 train_time:52851ms step_avg:97.69ms +[2025-09-11 09:40:34] [Rank 0] step:541/10000 train_time:52851ms step_avg:97.69ms +[2025-09-11 09:40:34] [Rank 0] step:561/10000 train_time:53493ms step_avg:95.35ms +[2025-09-11 09:40:34] [Rank 0] step:561/10000 train_time:53493ms step_avg:95.35ms +[2025-09-11 09:40:35] [Rank 0] step:581/10000 train_time:54135ms step_avg:93.18ms +[2025-09-11 09:40:35] [Rank 0] step:581/10000 train_time:54135ms step_avg:93.18ms +[2025-09-11 09:40:36] [Rank 0] step:601/10000 train_time:54777ms step_avg:91.14ms +[2025-09-11 09:40:36] [Rank 0] step:601/10000 train_time:54777ms step_avg:91.14ms +[2025-09-11 09:40:36] [Rank 0] step:621/10000 train_time:55418ms step_avg:89.24ms +[2025-09-11 09:40:36] [Rank 0] step:621/10000 train_time:55418ms step_avg:89.24ms +[2025-09-11 09:40:37] [Rank 0] step:641/10000 train_time:56059ms step_avg:87.46ms +[2025-09-11 09:40:37] [Rank 0] step:641/10000 train_time:56059ms step_avg:87.46ms +[2025-09-11 09:40:37] [Rank 0] step:661/10000 train_time:56700ms step_avg:85.78ms +[2025-09-11 09:40:37] [Rank 0] step:661/10000 train_time:56700ms step_avg:85.78ms +[2025-09-11 09:40:38] [Rank 0] step:681/10000 train_time:57341ms step_avg:84.20ms +[2025-09-11 09:40:38] [Rank 0] step:681/10000 train_time:57341ms step_avg:84.20ms +[2025-09-11 09:40:39] [Rank 0] step:701/10000 train_time:57983ms step_avg:82.71ms +[2025-09-11 09:40:39] [Rank 0] step:701/10000 train_time:57983ms step_avg:82.71ms +[2025-09-11 09:40:39] [Rank 0] step:721/10000 train_time:58624ms step_avg:81.31ms +[2025-09-11 09:40:39] [Rank 0] step:721/10000 train_time:58624ms step_avg:81.31ms +[2025-09-11 09:40:40] [Rank 0] step:741/10000 train_time:59265ms step_avg:79.98ms +[2025-09-11 09:40:40] [Rank 0] step:741/10000 train_time:59265ms step_avg:79.98ms +[2025-09-11 09:40:41] [Rank 0] step:761/10000 train_time:59911ms step_avg:78.73ms +[2025-09-11 09:40:41] [Rank 0] step:761/10000 train_time:59911ms step_avg:78.73ms +[2025-09-11 09:40:41] [Rank 0] step:781/10000 train_time:60557ms step_avg:77.54ms +[2025-09-11 09:40:41] [Rank 0] step:781/10000 train_time:60557ms step_avg:77.54ms +[2025-09-11 09:40:42] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:40:42] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:40:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:40:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:41:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:41:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:41:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:41:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:41:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:30] [Rank 0] PRINT: step:800/10000 val_loss:5.9753 total_sharp:1.5682e-04 L1_sharp:1.6795e-01 L2_sharp:1.3638e-01 L3_sharp:1.4177e-01 L4_sharp:2.0980e-01 L5_sharp:3.0570e-01 L6_sharp:3.7699e-01 L7_sharp:4.4520e-01 L8_sharp:9.7458e-01 L9_sharp:1.1069e+00 L10_sharp:1.1989e+00 L11_sharp:1.0312e+00 L12_sharp:1.0107e+00 total_fnorm:1.8500e+02 total_l1_linf:4.1984e+05 total_spectral:9.3000e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.5898e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.5898e-02 L12_fnorm:4.3457e-02 L1_l1linf:2.2095e-02 L2_l1linf:2.1851e-02 L3_l1linf:2.1851e-02 L4_l1linf:2.1973e-02 L5_l1linf:2.1973e-02 L6_l1linf:2.1973e-02 L7_l1linf:2.1973e-02 L8_l1linf:2.1851e-02 L9_l1linf:2.1851e-02 L10_l1linf:2.1484e-02 L11_l1linf:2.1362e-02 L12_l1linf:2.1118e-02 L1_spectral:7.1063e-04 L2_spectral:7.2002e-04 L3_spectral:7.2087e-04 L4_spectral:7.1635e-04 L5_spectral:7.0904e-04 L6_spectral:7.1837e-04 L7_spectral:7.1655e-04 L8_spectral:7.0055e-04 L9_spectral:7.1153e-04 L10_spectral:7.1047e-04 L11_spectral:6.9433e-04 L12_spectral:6.7292e-04 train_time:61185ms step_avg:76.48ms +[2025-09-11 09:41:30] [Rank 0] PRINT: step:800/10000 val_loss:5.9753 total_sharp:1.5682e-04 L1_sharp:1.6795e-01 L2_sharp:1.3638e-01 L3_sharp:1.4177e-01 L4_sharp:2.0980e-01 L5_sharp:3.0570e-01 L6_sharp:3.7699e-01 L7_sharp:4.4520e-01 L8_sharp:9.7458e-01 L9_sharp:1.1069e+00 L10_sharp:1.1989e+00 L11_sharp:1.0312e+00 L12_sharp:1.0107e+00 total_fnorm:1.8500e+02 total_l1_linf:4.1984e+05 total_spectral:9.3000e+01 L1_fnorm:4.6875e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8828e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.8828e-02 L7_fnorm:4.8340e-02 L8_fnorm:4.5898e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.5898e-02 L12_fnorm:4.3457e-02 L1_l1linf:2.2095e-02 L2_l1linf:2.1851e-02 L3_l1linf:2.1851e-02 L4_l1linf:2.1973e-02 L5_l1linf:2.1973e-02 L6_l1linf:2.1973e-02 L7_l1linf:2.1973e-02 L8_l1linf:2.1851e-02 L9_l1linf:2.1851e-02 L10_l1linf:2.1484e-02 L11_l1linf:2.1362e-02 L12_l1linf:2.1118e-02 L1_spectral:7.1063e-04 L2_spectral:7.2002e-04 L3_spectral:7.2087e-04 L4_spectral:7.1635e-04 L5_spectral:7.0904e-04 L6_spectral:7.1837e-04 L7_spectral:7.1655e-04 L8_spectral:7.0055e-04 L9_spectral:7.1153e-04 L10_spectral:7.1047e-04 L11_spectral:6.9433e-04 L12_spectral:6.7292e-04 train_time:61185ms step_avg:76.48ms +[2025-09-11 09:41:31] [Rank 0] step:801/10000 train_time:62815ms step_avg:78.42ms +[2025-09-11 09:41:31] [Rank 0] step:801/10000 train_time:62815ms step_avg:78.42ms +[2025-09-11 09:41:32] [Rank 0] step:821/10000 train_time:63493ms step_avg:77.34ms +[2025-09-11 09:41:32] [Rank 0] step:821/10000 train_time:63493ms step_avg:77.34ms +[2025-09-11 09:41:33] [Rank 0] step:841/10000 train_time:64140ms step_avg:76.27ms +[2025-09-11 09:41:33] [Rank 0] step:841/10000 train_time:64140ms step_avg:76.27ms +[2025-09-11 09:41:33] [Rank 0] step:861/10000 train_time:64786ms step_avg:75.25ms +[2025-09-11 09:41:33] [Rank 0] step:861/10000 train_time:64786ms step_avg:75.25ms +[2025-09-11 09:41:34] [Rank 0] step:881/10000 train_time:65433ms step_avg:74.27ms +[2025-09-11 09:41:34] [Rank 0] step:881/10000 train_time:65433ms step_avg:74.27ms +[2025-09-11 09:41:34] [Rank 0] step:901/10000 train_time:66080ms step_avg:73.34ms +[2025-09-11 09:41:34] [Rank 0] step:901/10000 train_time:66080ms step_avg:73.34ms +[2025-09-11 09:41:35] [Rank 0] step:921/10000 train_time:66726ms step_avg:72.45ms +[2025-09-11 09:41:35] [Rank 0] step:921/10000 train_time:66726ms step_avg:72.45ms +[2025-09-11 09:41:36] [Rank 0] step:941/10000 train_time:67372ms step_avg:71.60ms +[2025-09-11 09:41:36] [Rank 0] step:941/10000 train_time:67372ms step_avg:71.60ms +[2025-09-11 09:41:36] [Rank 0] step:961/10000 train_time:68018ms step_avg:70.78ms +[2025-09-11 09:41:36] [Rank 0] step:961/10000 train_time:68018ms step_avg:70.78ms +[2025-09-11 09:41:37] [Rank 0] step:981/10000 train_time:68665ms step_avg:69.99ms +[2025-09-11 09:41:37] [Rank 0] step:981/10000 train_time:68665ms step_avg:69.99ms +[2025-09-11 09:41:38] [Rank 0] step:1001/10000 train_time:69311ms step_avg:69.24ms +[2025-09-11 09:41:38] [Rank 0] step:1001/10000 train_time:69311ms step_avg:69.24ms +[2025-09-11 09:41:38] [Rank 0] step:1021/10000 train_time:69957ms step_avg:68.52ms +[2025-09-11 09:41:38] [Rank 0] step:1021/10000 train_time:69957ms step_avg:68.52ms +[2025-09-11 09:41:39] [Rank 0] step:1041/10000 train_time:70603ms step_avg:67.82ms +[2025-09-11 09:41:39] [Rank 0] step:1041/10000 train_time:70603ms step_avg:67.82ms +[2025-09-11 09:41:40] [Rank 0] step:1061/10000 train_time:71249ms step_avg:67.15ms +[2025-09-11 09:41:40] [Rank 0] step:1061/10000 train_time:71249ms step_avg:67.15ms +[2025-09-11 09:41:40] [Rank 0] step:1081/10000 train_time:71894ms step_avg:66.51ms +[2025-09-11 09:41:40] [Rank 0] step:1081/10000 train_time:71894ms step_avg:66.51ms +[2025-09-11 09:41:41] [Rank 0] step:1101/10000 train_time:72540ms step_avg:65.89ms +[2025-09-11 09:41:41] [Rank 0] step:1101/10000 train_time:72540ms step_avg:65.89ms +[2025-09-11 09:41:42] [Rank 0] step:1121/10000 train_time:73186ms step_avg:65.29ms +[2025-09-11 09:41:42] [Rank 0] step:1121/10000 train_time:73186ms step_avg:65.29ms +[2025-09-11 09:41:42] [Rank 0] step:1141/10000 train_time:73832ms step_avg:64.71ms +[2025-09-11 09:41:42] [Rank 0] step:1141/10000 train_time:73832ms step_avg:64.71ms +[2025-09-11 09:41:43] [Rank 0] step:1161/10000 train_time:74478ms step_avg:64.15ms +[2025-09-11 09:41:43] [Rank 0] step:1161/10000 train_time:74478ms step_avg:64.15ms +[2025-09-11 09:41:44] [Rank 0] step:1181/10000 train_time:75123ms step_avg:63.61ms +[2025-09-11 09:41:44] [Rank 0] step:1181/10000 train_time:75123ms step_avg:63.61ms +[2025-09-11 09:41:44] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:41:44] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:41:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:41:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:41:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:41:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:41:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:41:54] [Rank 0] PRINT: step:1200/10000 val_loss:5.7600 total_sharp:1.2284e-04 L1_sharp:1.8977e-01 L2_sharp:1.5982e-01 L3_sharp:1.5754e-01 L4_sharp:1.8764e-01 L5_sharp:2.5808e-01 L6_sharp:3.0160e-01 L7_sharp:3.7083e-01 L8_sharp:6.1867e-01 L9_sharp:9.1659e-01 L10_sharp:1.8116e+00 L11_sharp:2.1907e+00 L12_sharp:3.4589e+00 total_fnorm:1.8600e+02 total_l1_linf:4.0346e+05 total_spectral:9.3000e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0996e-02 L2_l1linf:2.0996e-02 L3_l1linf:2.0752e-02 L4_l1linf:2.0996e-02 L5_l1linf:2.0752e-02 L6_l1linf:2.0874e-02 L7_l1linf:2.0996e-02 L8_l1linf:2.0996e-02 L9_l1linf:2.1118e-02 L10_l1linf:2.1240e-02 L11_l1linf:2.1362e-02 L12_l1linf:2.1362e-02 L1_spectral:7.5624e-04 L2_spectral:7.5707e-04 L3_spectral:7.5223e-04 L4_spectral:7.5574e-04 L5_spectral:7.6009e-04 L6_spectral:7.5649e-04 L7_spectral:7.6064e-04 L8_spectral:7.5434e-04 L9_spectral:7.5704e-04 L10_spectral:7.5115e-04 L11_spectral:7.4991e-04 L12_spectral:7.3802e-04 train_time:75751ms step_avg:63.13ms +[2025-09-11 09:41:54] [Rank 0] PRINT: step:1200/10000 val_loss:5.7600 total_sharp:1.2284e-04 L1_sharp:1.8977e-01 L2_sharp:1.5982e-01 L3_sharp:1.5754e-01 L4_sharp:1.8764e-01 L5_sharp:2.5808e-01 L6_sharp:3.0160e-01 L7_sharp:3.7083e-01 L8_sharp:6.1867e-01 L9_sharp:9.1659e-01 L10_sharp:1.8116e+00 L11_sharp:2.1907e+00 L12_sharp:3.4589e+00 total_fnorm:1.8600e+02 total_l1_linf:4.0346e+05 total_spectral:9.3000e+01 L1_fnorm:4.8340e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9072e-02 L12_fnorm:4.7852e-02 L1_l1linf:2.0996e-02 L2_l1linf:2.0996e-02 L3_l1linf:2.0752e-02 L4_l1linf:2.0996e-02 L5_l1linf:2.0752e-02 L6_l1linf:2.0874e-02 L7_l1linf:2.0996e-02 L8_l1linf:2.0996e-02 L9_l1linf:2.1118e-02 L10_l1linf:2.1240e-02 L11_l1linf:2.1362e-02 L12_l1linf:2.1362e-02 L1_spectral:7.5624e-04 L2_spectral:7.5707e-04 L3_spectral:7.5223e-04 L4_spectral:7.5574e-04 L5_spectral:7.6009e-04 L6_spectral:7.5649e-04 L7_spectral:7.6064e-04 L8_spectral:7.5434e-04 L9_spectral:7.5704e-04 L10_spectral:7.5115e-04 L11_spectral:7.4991e-04 L12_spectral:7.3802e-04 train_time:75751ms step_avg:63.13ms +[2025-09-11 09:41:56] [Rank 0] step:1201/10000 train_time:77366ms step_avg:64.42ms +[2025-09-11 09:41:56] [Rank 0] step:1201/10000 train_time:77366ms step_avg:64.42ms +[2025-09-11 09:41:57] [Rank 0] step:1221/10000 train_time:78056ms step_avg:63.93ms +[2025-09-11 09:41:57] [Rank 0] step:1221/10000 train_time:78056ms step_avg:63.93ms +[2025-09-11 09:41:57] [Rank 0] step:1241/10000 train_time:78704ms step_avg:63.42ms +[2025-09-11 09:41:57] [Rank 0] step:1241/10000 train_time:78704ms step_avg:63.42ms +[2025-09-11 09:41:58] [Rank 0] step:1261/10000 train_time:79351ms step_avg:62.93ms +[2025-09-11 09:41:58] [Rank 0] step:1261/10000 train_time:79351ms step_avg:62.93ms +[2025-09-11 09:41:59] [Rank 0] step:1281/10000 train_time:79997ms step_avg:62.45ms +[2025-09-11 09:41:59] [Rank 0] step:1281/10000 train_time:79997ms step_avg:62.45ms +[2025-09-11 09:41:59] [Rank 0] step:1301/10000 train_time:80644ms step_avg:61.99ms +[2025-09-11 09:41:59] [Rank 0] step:1301/10000 train_time:80644ms step_avg:61.99ms +[2025-09-11 09:42:00] [Rank 0] step:1321/10000 train_time:81289ms step_avg:61.54ms +[2025-09-11 09:42:00] [Rank 0] step:1321/10000 train_time:81289ms step_avg:61.54ms +[2025-09-11 09:42:01] [Rank 0] step:1341/10000 train_time:81936ms step_avg:61.10ms +[2025-09-11 09:42:01] [Rank 0] step:1341/10000 train_time:81936ms step_avg:61.10ms +[2025-09-11 09:42:01] [Rank 0] step:1361/10000 train_time:82587ms step_avg:60.68ms +[2025-09-11 09:42:01] [Rank 0] step:1361/10000 train_time:82587ms step_avg:60.68ms +[2025-09-11 09:42:02] [Rank 0] step:1381/10000 train_time:83234ms step_avg:60.27ms +[2025-09-11 09:42:02] [Rank 0] step:1381/10000 train_time:83234ms step_avg:60.27ms +[2025-09-11 09:42:03] [Rank 0] step:1401/10000 train_time:83881ms step_avg:59.87ms +[2025-09-11 09:42:03] [Rank 0] step:1401/10000 train_time:83881ms step_avg:59.87ms +[2025-09-11 09:42:03] [Rank 0] step:1421/10000 train_time:84526ms step_avg:59.48ms +[2025-09-11 09:42:03] [Rank 0] step:1421/10000 train_time:84526ms step_avg:59.48ms +[2025-09-11 09:42:04] [Rank 0] step:1441/10000 train_time:85172ms step_avg:59.11ms +[2025-09-11 09:42:04] [Rank 0] step:1441/10000 train_time:85172ms step_avg:59.11ms +[2025-09-11 09:42:05] [Rank 0] step:1461/10000 train_time:85818ms step_avg:58.74ms +[2025-09-11 09:42:05] [Rank 0] step:1461/10000 train_time:85818ms step_avg:58.74ms +[2025-09-11 09:42:05] [Rank 0] step:1481/10000 train_time:86464ms step_avg:58.38ms +[2025-09-11 09:42:05] [Rank 0] step:1481/10000 train_time:86464ms step_avg:58.38ms +[2025-09-11 09:42:06] [Rank 0] step:1501/10000 train_time:87114ms step_avg:58.04ms +[2025-09-11 09:42:06] [Rank 0] step:1501/10000 train_time:87114ms step_avg:58.04ms +[2025-09-11 09:42:07] [Rank 0] step:1521/10000 train_time:87764ms step_avg:57.70ms +[2025-09-11 09:42:07] [Rank 0] step:1521/10000 train_time:87764ms step_avg:57.70ms +[2025-09-11 09:42:07] [Rank 0] step:1541/10000 train_time:88413ms step_avg:57.37ms +[2025-09-11 09:42:07] [Rank 0] step:1541/10000 train_time:88413ms step_avg:57.37ms +[2025-09-11 09:42:08] [Rank 0] step:1561/10000 train_time:89063ms step_avg:57.05ms +[2025-09-11 09:42:08] [Rank 0] step:1561/10000 train_time:89063ms step_avg:57.05ms +[2025-09-11 09:42:08] [Rank 0] step:1581/10000 train_time:89712ms step_avg:56.74ms +[2025-09-11 09:42:08] [Rank 0] step:1581/10000 train_time:89712ms step_avg:56.74ms +[2025-09-11 09:42:09] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:42:09] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:42:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:42:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:42:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:42:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:42:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:42:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:42:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.6353 total_sharp:1.2433e-04 L1_sharp:1.8552e-01 L2_sharp:1.6763e-01 L3_sharp:1.7104e-01 L4_sharp:1.8155e-01 L5_sharp:2.5813e-01 L6_sharp:2.9003e-01 L7_sharp:4.6243e-01 L8_sharp:6.8368e-01 L9_sharp:9.5762e-01 L10_sharp:1.3500e+00 L11_sharp:1.7353e+00 L12_sharp:2.7331e+00 total_fnorm:1.8100e+02 total_l1_linf:3.7274e+05 total_spectral:9.0500e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9561e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7607e-02 L1_l1linf:2.0020e-02 L2_l1linf:2.0264e-02 L3_l1linf:2.0142e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0142e-02 L6_l1linf:2.0264e-02 L7_l1linf:2.0020e-02 L8_l1linf:1.9897e-02 L9_l1linf:2.0264e-02 L10_l1linf:2.0264e-02 L11_l1linf:2.0630e-02 L12_l1linf:2.0386e-02 L1_spectral:7.7522e-04 L2_spectral:7.7399e-04 L3_spectral:7.7448e-04 L4_spectral:7.8140e-04 L5_spectral:7.7579e-04 L6_spectral:7.7714e-04 L7_spectral:7.8080e-04 L8_spectral:7.7516e-04 L9_spectral:7.7812e-04 L10_spectral:7.7277e-04 L11_spectral:7.7024e-04 L12_spectral:7.4574e-04 train_time:90344ms step_avg:56.47ms +[2025-09-11 09:42:20] [Rank 0] PRINT: step:1600/10000 val_loss:5.6353 total_sharp:1.2433e-04 L1_sharp:1.8552e-01 L2_sharp:1.6763e-01 L3_sharp:1.7104e-01 L4_sharp:1.8155e-01 L5_sharp:2.5813e-01 L6_sharp:2.9003e-01 L7_sharp:4.6243e-01 L8_sharp:6.8368e-01 L9_sharp:9.5762e-01 L10_sharp:1.3500e+00 L11_sharp:1.7353e+00 L12_sharp:2.7331e+00 total_fnorm:1.8100e+02 total_l1_linf:3.7274e+05 total_spectral:9.0500e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9072e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9316e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8584e-02 L9_fnorm:4.9316e-02 L10_fnorm:4.9561e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7607e-02 L1_l1linf:2.0020e-02 L2_l1linf:2.0264e-02 L3_l1linf:2.0142e-02 L4_l1linf:2.0264e-02 L5_l1linf:2.0142e-02 L6_l1linf:2.0264e-02 L7_l1linf:2.0020e-02 L8_l1linf:1.9897e-02 L9_l1linf:2.0264e-02 L10_l1linf:2.0264e-02 L11_l1linf:2.0630e-02 L12_l1linf:2.0386e-02 L1_spectral:7.7522e-04 L2_spectral:7.7399e-04 L3_spectral:7.7448e-04 L4_spectral:7.8140e-04 L5_spectral:7.7579e-04 L6_spectral:7.7714e-04 L7_spectral:7.8080e-04 L8_spectral:7.7516e-04 L9_spectral:7.7812e-04 L10_spectral:7.7277e-04 L11_spectral:7.7024e-04 L12_spectral:7.4574e-04 train_time:90344ms step_avg:56.47ms +[2025-09-11 09:42:22] [Rank 0] step:1601/10000 train_time:92001ms step_avg:57.46ms +[2025-09-11 09:42:22] [Rank 0] step:1601/10000 train_time:92001ms step_avg:57.46ms +[2025-09-11 09:42:22] [Rank 0] step:1621/10000 train_time:92673ms step_avg:57.17ms +[2025-09-11 09:42:22] [Rank 0] step:1621/10000 train_time:92673ms step_avg:57.17ms +[2025-09-11 09:42:23] [Rank 0] step:1641/10000 train_time:93326ms step_avg:56.87ms +[2025-09-11 09:42:23] [Rank 0] step:1641/10000 train_time:93326ms step_avg:56.87ms +[2025-09-11 09:42:24] [Rank 0] step:1661/10000 train_time:93976ms step_avg:56.58ms +[2025-09-11 09:42:24] [Rank 0] step:1661/10000 train_time:93976ms step_avg:56.58ms +[2025-09-11 09:42:24] [Rank 0] step:1681/10000 train_time:94626ms step_avg:56.29ms +[2025-09-11 09:42:24] [Rank 0] step:1681/10000 train_time:94626ms step_avg:56.29ms +[2025-09-11 09:42:25] [Rank 0] step:1701/10000 train_time:95276ms step_avg:56.01ms +[2025-09-11 09:42:25] [Rank 0] step:1701/10000 train_time:95276ms step_avg:56.01ms +[2025-09-11 09:42:26] [Rank 0] step:1721/10000 train_time:95926ms step_avg:55.74ms +[2025-09-11 09:42:26] [Rank 0] step:1721/10000 train_time:95926ms step_avg:55.74ms +[2025-09-11 09:42:26] [Rank 0] step:1741/10000 train_time:96576ms step_avg:55.47ms +[2025-09-11 09:42:26] [Rank 0] step:1741/10000 train_time:96576ms step_avg:55.47ms +[2025-09-11 09:42:27] [Rank 0] step:1761/10000 train_time:97386ms step_avg:55.30ms +[2025-09-11 09:42:27] [Rank 0] step:1761/10000 train_time:97386ms step_avg:55.30ms +[2025-09-11 09:42:28] [Rank 0] step:1781/10000 train_time:98144ms step_avg:55.11ms +[2025-09-11 09:42:28] [Rank 0] step:1781/10000 train_time:98144ms step_avg:55.11ms +[2025-09-11 09:42:28] [Rank 0] step:1801/10000 train_time:98793ms step_avg:54.85ms +[2025-09-11 09:42:28] [Rank 0] step:1801/10000 train_time:98793ms step_avg:54.85ms +[2025-09-11 09:42:29] [Rank 0] step:1821/10000 train_time:99444ms step_avg:54.61ms +[2025-09-11 09:42:29] [Rank 0] step:1821/10000 train_time:99444ms step_avg:54.61ms +[2025-09-11 09:42:30] [Rank 0] step:1841/10000 train_time:100356ms step_avg:54.51ms +[2025-09-11 09:42:30] [Rank 0] step:1841/10000 train_time:100356ms step_avg:54.51ms +[2025-09-11 09:42:31] [Rank 0] step:1861/10000 train_time:101006ms step_avg:54.28ms +[2025-09-11 09:42:31] [Rank 0] step:1861/10000 train_time:101006ms step_avg:54.28ms +[2025-09-11 09:42:31] [Rank 0] step:1881/10000 train_time:101656ms step_avg:54.04ms +[2025-09-11 09:42:31] [Rank 0] step:1881/10000 train_time:101656ms step_avg:54.04ms +[2025-09-11 09:42:32] [Rank 0] step:1901/10000 train_time:102306ms step_avg:53.82ms +[2025-09-11 09:42:32] [Rank 0] step:1901/10000 train_time:102306ms step_avg:53.82ms +[2025-09-11 09:42:33] [Rank 0] step:1921/10000 train_time:102956ms step_avg:53.59ms +[2025-09-11 09:42:33] [Rank 0] step:1921/10000 train_time:102956ms step_avg:53.59ms +[2025-09-11 09:42:33] [Rank 0] step:1941/10000 train_time:103606ms step_avg:53.38ms +[2025-09-11 09:42:33] [Rank 0] step:1941/10000 train_time:103606ms step_avg:53.38ms +[2025-09-11 09:42:34] [Rank 0] step:1961/10000 train_time:104256ms step_avg:53.16ms +[2025-09-11 09:42:34] [Rank 0] step:1961/10000 train_time:104256ms step_avg:53.16ms +[2025-09-11 09:42:35] [Rank 0] step:1981/10000 train_time:104906ms step_avg:52.96ms +[2025-09-11 09:42:35] [Rank 0] step:1981/10000 train_time:104906ms step_avg:52.96ms +[2025-09-11 09:42:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:42:35] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:42:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:42:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:42:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:42:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:42:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:42:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:42:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:42:46] [Rank 0] PRINT: step:2000/10000 val_loss:5.5213 total_sharp:9.2426e-05 L1_sharp:1.5244e-01 L2_sharp:1.3874e-01 L3_sharp:1.4793e-01 L4_sharp:1.7694e-01 L5_sharp:1.6666e-01 L6_sharp:1.4021e-01 L7_sharp:2.0088e-01 L8_sharp:2.3008e-01 L9_sharp:2.8335e-01 L10_sharp:2.7271e-01 L11_sharp:3.0468e-01 L12_sharp:7.7698e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7683e+05 total_spectral:8.8500e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9072e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.9165e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9287e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.9165e-02 L6_l1linf:1.8799e-02 L7_l1linf:1.9165e-02 L8_l1linf:1.9043e-02 L9_l1linf:1.9043e-02 L10_l1linf:1.9287e-02 L11_l1linf:1.9287e-02 L12_l1linf:1.9531e-02 L1_spectral:7.8522e-04 L2_spectral:7.8582e-04 L3_spectral:7.8740e-04 L4_spectral:7.9093e-04 L5_spectral:7.9175e-04 L6_spectral:7.8905e-04 L7_spectral:7.9210e-04 L8_spectral:7.8208e-04 L9_spectral:7.8273e-04 L10_spectral:7.8100e-04 L11_spectral:7.8354e-04 L12_spectral:7.6701e-04 train_time:105538ms step_avg:52.77ms +[2025-09-11 09:42:46] [Rank 0] PRINT: step:2000/10000 val_loss:5.5213 total_sharp:9.2426e-05 L1_sharp:1.5244e-01 L2_sharp:1.3874e-01 L3_sharp:1.4793e-01 L4_sharp:1.7694e-01 L5_sharp:1.6666e-01 L6_sharp:1.4021e-01 L7_sharp:2.0088e-01 L8_sharp:2.3008e-01 L9_sharp:2.8335e-01 L10_sharp:2.7271e-01 L11_sharp:3.0468e-01 L12_sharp:7.7698e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7683e+05 total_spectral:8.8500e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9316e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.9072e-02 L7_fnorm:4.9316e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.9316e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.9165e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9287e-02 L4_l1linf:1.9165e-02 L5_l1linf:1.9165e-02 L6_l1linf:1.8799e-02 L7_l1linf:1.9165e-02 L8_l1linf:1.9043e-02 L9_l1linf:1.9043e-02 L10_l1linf:1.9287e-02 L11_l1linf:1.9287e-02 L12_l1linf:1.9531e-02 L1_spectral:7.8522e-04 L2_spectral:7.8582e-04 L3_spectral:7.8740e-04 L4_spectral:7.9093e-04 L5_spectral:7.9175e-04 L6_spectral:7.8905e-04 L7_spectral:7.9210e-04 L8_spectral:7.8208e-04 L9_spectral:7.8273e-04 L10_spectral:7.8100e-04 L11_spectral:7.8354e-04 L12_spectral:7.6701e-04 train_time:105538ms step_avg:52.77ms +[2025-09-11 09:42:47] [Rank 0] step:2001/10000 train_time:107220ms step_avg:53.58ms +[2025-09-11 09:42:47] [Rank 0] step:2001/10000 train_time:107220ms step_avg:53.58ms +[2025-09-11 09:42:48] [Rank 0] step:2021/10000 train_time:107874ms step_avg:53.38ms +[2025-09-11 09:42:48] [Rank 0] step:2021/10000 train_time:107874ms step_avg:53.38ms +[2025-09-11 09:42:49] [Rank 0] step:2041/10000 train_time:108525ms step_avg:53.17ms +[2025-09-11 09:42:49] [Rank 0] step:2041/10000 train_time:108525ms step_avg:53.17ms +[2025-09-11 09:42:49] [Rank 0] step:2061/10000 train_time:109175ms step_avg:52.97ms +[2025-09-11 09:42:49] [Rank 0] step:2061/10000 train_time:109175ms step_avg:52.97ms +[2025-09-11 09:42:50] [Rank 0] step:2081/10000 train_time:109825ms step_avg:52.78ms +[2025-09-11 09:42:50] [Rank 0] step:2081/10000 train_time:109825ms step_avg:52.78ms +[2025-09-11 09:42:51] [Rank 0] step:2101/10000 train_time:110475ms step_avg:52.58ms +[2025-09-11 09:42:51] [Rank 0] step:2101/10000 train_time:110475ms step_avg:52.58ms +[2025-09-11 09:42:51] [Rank 0] step:2121/10000 train_time:111126ms step_avg:52.39ms +[2025-09-11 09:42:51] [Rank 0] step:2121/10000 train_time:111126ms step_avg:52.39ms +[2025-09-11 09:42:52] [Rank 0] step:2141/10000 train_time:111775ms step_avg:52.21ms +[2025-09-11 09:42:52] [Rank 0] step:2141/10000 train_time:111775ms step_avg:52.21ms +[2025-09-11 09:42:53] [Rank 0] step:2161/10000 train_time:112425ms step_avg:52.02ms +[2025-09-11 09:42:53] [Rank 0] step:2161/10000 train_time:112425ms step_avg:52.02ms +[2025-09-11 09:42:53] [Rank 0] step:2181/10000 train_time:113074ms step_avg:51.85ms +[2025-09-11 09:42:53] [Rank 0] step:2181/10000 train_time:113074ms step_avg:51.85ms +[2025-09-11 09:42:54] [Rank 0] step:2201/10000 train_time:113724ms step_avg:51.67ms +[2025-09-11 09:42:54] [Rank 0] step:2201/10000 train_time:113724ms step_avg:51.67ms +[2025-09-11 09:42:54] [Rank 0] step:2221/10000 train_time:114373ms step_avg:51.50ms +[2025-09-11 09:42:54] [Rank 0] step:2221/10000 train_time:114373ms step_avg:51.50ms +[2025-09-11 09:42:55] [Rank 0] step:2241/10000 train_time:115034ms step_avg:51.33ms +[2025-09-11 09:42:55] [Rank 0] step:2241/10000 train_time:115034ms step_avg:51.33ms +[2025-09-11 09:42:56] [Rank 0] step:2261/10000 train_time:115698ms step_avg:51.17ms +[2025-09-11 09:42:56] [Rank 0] step:2261/10000 train_time:115698ms step_avg:51.17ms +[2025-09-11 09:42:56] [Rank 0] step:2281/10000 train_time:116360ms step_avg:51.01ms +[2025-09-11 09:42:56] [Rank 0] step:2281/10000 train_time:116360ms step_avg:51.01ms +[2025-09-11 09:42:57] [Rank 0] step:2301/10000 train_time:117023ms step_avg:50.86ms +[2025-09-11 09:42:57] [Rank 0] step:2301/10000 train_time:117023ms step_avg:50.86ms +[2025-09-11 09:42:58] [Rank 0] step:2321/10000 train_time:117685ms step_avg:50.70ms +[2025-09-11 09:42:58] [Rank 0] step:2321/10000 train_time:117685ms step_avg:50.70ms +[2025-09-11 09:42:58] [Rank 0] step:2341/10000 train_time:118347ms step_avg:50.55ms +[2025-09-11 09:42:58] [Rank 0] step:2341/10000 train_time:118347ms step_avg:50.55ms +[2025-09-11 09:42:59] [Rank 0] step:2361/10000 train_time:119010ms step_avg:50.41ms +[2025-09-11 09:42:59] [Rank 0] step:2361/10000 train_time:119010ms step_avg:50.41ms +[2025-09-11 09:43:00] [Rank 0] step:2381/10000 train_time:119672ms step_avg:50.26ms +[2025-09-11 09:43:00] [Rank 0] step:2381/10000 train_time:119672ms step_avg:50.26ms +[2025-09-11 09:43:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:43:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:43:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:43:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:43:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:43:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:43:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:43:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:43:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:14] [Rank 0] PRINT: step:2400/10000 val_loss:5.4116 total_sharp:9.3017e-05 L1_sharp:1.8957e-01 L2_sharp:1.9041e-01 L3_sharp:2.0020e-01 L4_sharp:2.3964e-01 L5_sharp:2.6029e-01 L6_sharp:2.7224e-01 L7_sharp:2.9919e-01 L8_sharp:2.7789e-01 L9_sharp:2.3980e-01 L10_sharp:2.9780e-01 L11_sharp:2.6569e-01 L12_sharp:6.6797e-01 total_fnorm:1.7000e+02 total_l1_linf:3.4816e+05 total_spectral:8.5000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8433e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8433e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8677e-02 L8_l1linf:1.8555e-02 L9_l1linf:1.8311e-02 L10_l1linf:1.8555e-02 L11_l1linf:1.8555e-02 L12_l1linf:1.8799e-02 L1_spectral:7.8487e-04 L2_spectral:7.9110e-04 L3_spectral:7.8761e-04 L4_spectral:7.9472e-04 L5_spectral:7.9014e-04 L6_spectral:7.8742e-04 L7_spectral:7.9830e-04 L8_spectral:7.8637e-04 L9_spectral:7.8363e-04 L10_spectral:7.8906e-04 L11_spectral:7.8842e-04 L12_spectral:7.7103e-04 train_time:120320ms step_avg:50.13ms +[2025-09-11 09:43:14] [Rank 0] PRINT: step:2400/10000 val_loss:5.4116 total_sharp:9.3017e-05 L1_sharp:1.8957e-01 L2_sharp:1.9041e-01 L3_sharp:2.0020e-01 L4_sharp:2.3964e-01 L5_sharp:2.6029e-01 L6_sharp:2.7224e-01 L7_sharp:2.9919e-01 L8_sharp:2.7789e-01 L9_sharp:2.3980e-01 L10_sharp:2.9780e-01 L11_sharp:2.6569e-01 L12_sharp:6.6797e-01 total_fnorm:1.7000e+02 total_l1_linf:3.4816e+05 total_spectral:8.5000e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9316e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8828e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8828e-02 L8_fnorm:4.8340e-02 L9_fnorm:4.8584e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8340e-02 L12_fnorm:4.7607e-02 L1_l1linf:1.7944e-02 L2_l1linf:1.8311e-02 L3_l1linf:1.8433e-02 L4_l1linf:1.8555e-02 L5_l1linf:1.8433e-02 L6_l1linf:1.8311e-02 L7_l1linf:1.8677e-02 L8_l1linf:1.8555e-02 L9_l1linf:1.8311e-02 L10_l1linf:1.8555e-02 L11_l1linf:1.8555e-02 L12_l1linf:1.8799e-02 L1_spectral:7.8487e-04 L2_spectral:7.9110e-04 L3_spectral:7.8761e-04 L4_spectral:7.9472e-04 L5_spectral:7.9014e-04 L6_spectral:7.8742e-04 L7_spectral:7.9830e-04 L8_spectral:7.8637e-04 L9_spectral:7.8363e-04 L10_spectral:7.8906e-04 L11_spectral:7.8842e-04 L12_spectral:7.7103e-04 train_time:120320ms step_avg:50.13ms +[2025-09-11 09:43:15] [Rank 0] step:2401/10000 train_time:121890ms step_avg:50.77ms +[2025-09-11 09:43:15] [Rank 0] step:2401/10000 train_time:121890ms step_avg:50.77ms +[2025-09-11 09:43:16] [Rank 0] step:2421/10000 train_time:122558ms step_avg:50.62ms +[2025-09-11 09:43:16] [Rank 0] step:2421/10000 train_time:122558ms step_avg:50.62ms +[2025-09-11 09:43:17] [Rank 0] step:2441/10000 train_time:123221ms step_avg:50.48ms +[2025-09-11 09:43:17] [Rank 0] step:2441/10000 train_time:123221ms step_avg:50.48ms +[2025-09-11 09:43:17] [Rank 0] step:2461/10000 train_time:123885ms step_avg:50.34ms +[2025-09-11 09:43:17] [Rank 0] step:2461/10000 train_time:123885ms step_avg:50.34ms +[2025-09-11 09:43:18] [Rank 0] step:2481/10000 train_time:124548ms step_avg:50.20ms +[2025-09-11 09:43:18] [Rank 0] step:2481/10000 train_time:124548ms step_avg:50.20ms +[2025-09-11 09:43:19] [Rank 0] step:2501/10000 train_time:125211ms step_avg:50.06ms +[2025-09-11 09:43:19] [Rank 0] step:2501/10000 train_time:125211ms step_avg:50.06ms +[2025-09-11 09:43:19] [Rank 0] step:2521/10000 train_time:125874ms step_avg:49.93ms +[2025-09-11 09:43:19] [Rank 0] step:2521/10000 train_time:125874ms step_avg:49.93ms +[2025-09-11 09:43:20] [Rank 0] step:2541/10000 train_time:126537ms step_avg:49.80ms +[2025-09-11 09:43:20] [Rank 0] step:2541/10000 train_time:126537ms step_avg:49.80ms +[2025-09-11 09:43:21] [Rank 0] step:2561/10000 train_time:127200ms step_avg:49.67ms +[2025-09-11 09:43:21] [Rank 0] step:2561/10000 train_time:127200ms step_avg:49.67ms +[2025-09-11 09:43:21] [Rank 0] step:2581/10000 train_time:127863ms step_avg:49.54ms +[2025-09-11 09:43:21] [Rank 0] step:2581/10000 train_time:127863ms step_avg:49.54ms +[2025-09-11 09:43:22] [Rank 0] step:2601/10000 train_time:128526ms step_avg:49.41ms +[2025-09-11 09:43:22] [Rank 0] step:2601/10000 train_time:128526ms step_avg:49.41ms +[2025-09-11 09:43:23] [Rank 0] step:2621/10000 train_time:129189ms step_avg:49.29ms +[2025-09-11 09:43:23] [Rank 0] step:2621/10000 train_time:129189ms step_avg:49.29ms +[2025-09-11 09:43:23] [Rank 0] step:2641/10000 train_time:129853ms step_avg:49.17ms +[2025-09-11 09:43:23] [Rank 0] step:2641/10000 train_time:129853ms step_avg:49.17ms +[2025-09-11 09:43:24] [Rank 0] step:2661/10000 train_time:130516ms step_avg:49.05ms +[2025-09-11 09:43:24] [Rank 0] step:2661/10000 train_time:130516ms step_avg:49.05ms +[2025-09-11 09:43:25] [Rank 0] step:2681/10000 train_time:131179ms step_avg:48.93ms +[2025-09-11 09:43:25] [Rank 0] step:2681/10000 train_time:131179ms step_avg:48.93ms +[2025-09-11 09:43:25] [Rank 0] step:2701/10000 train_time:131842ms step_avg:48.81ms +[2025-09-11 09:43:25] [Rank 0] step:2701/10000 train_time:131842ms step_avg:48.81ms +[2025-09-11 09:43:26] [Rank 0] step:2721/10000 train_time:132505ms step_avg:48.70ms +[2025-09-11 09:43:26] [Rank 0] step:2721/10000 train_time:132505ms step_avg:48.70ms +[2025-09-11 09:43:27] [Rank 0] step:2741/10000 train_time:133169ms step_avg:48.58ms +[2025-09-11 09:43:27] [Rank 0] step:2741/10000 train_time:133169ms step_avg:48.58ms +[2025-09-11 09:43:27] [Rank 0] step:2761/10000 train_time:133831ms step_avg:48.47ms +[2025-09-11 09:43:27] [Rank 0] step:2761/10000 train_time:133831ms step_avg:48.47ms +[2025-09-11 09:43:28] [Rank 0] step:2781/10000 train_time:134494ms step_avg:48.36ms +[2025-09-11 09:43:28] [Rank 0] step:2781/10000 train_time:134494ms step_avg:48.36ms +[2025-09-11 09:43:29] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:43:29] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:43:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:43:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:43:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:43:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:43:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:43:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:43:39] [Rank 0] PRINT: step:2800/10000 val_loss:5.3355 total_sharp:8.9356e-05 L1_sharp:1.7056e-01 L2_sharp:1.8515e-01 L3_sharp:2.0438e-01 L4_sharp:2.5514e-01 L5_sharp:2.7562e-01 L6_sharp:2.6083e-01 L7_sharp:3.0186e-01 L8_sharp:3.1821e-01 L9_sharp:3.3324e-01 L10_sharp:3.8502e-01 L11_sharp:6.2700e-01 L12_sharp:1.1562e+00 total_fnorm:1.7100e+02 total_l1_linf:3.5430e+05 total_spectral:8.5500e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.6968e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7090e-02 L4_l1linf:1.7212e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.7700e-02 L7_l1linf:1.7334e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.8188e-02 L12_l1linf:1.8066e-02 L1_spectral:7.8786e-04 L2_spectral:7.9791e-04 L3_spectral:7.9921e-04 L4_spectral:7.9899e-04 L5_spectral:8.0198e-04 L6_spectral:7.9338e-04 L7_spectral:7.9497e-04 L8_spectral:7.9213e-04 L9_spectral:7.8853e-04 L10_spectral:7.9185e-04 L11_spectral:7.8331e-04 L12_spectral:7.6424e-04 train_time:135139ms step_avg:48.26ms +[2025-09-11 09:43:39] [Rank 0] PRINT: step:2800/10000 val_loss:5.3355 total_sharp:8.9356e-05 L1_sharp:1.7056e-01 L2_sharp:1.8515e-01 L3_sharp:2.0438e-01 L4_sharp:2.5514e-01 L5_sharp:2.7562e-01 L6_sharp:2.6083e-01 L7_sharp:3.0186e-01 L8_sharp:3.1821e-01 L9_sharp:3.3324e-01 L10_sharp:3.8502e-01 L11_sharp:6.2700e-01 L12_sharp:1.1562e+00 total_fnorm:1.7100e+02 total_l1_linf:3.5430e+05 total_spectral:8.5500e+01 L1_fnorm:4.8828e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9072e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8828e-02 L6_fnorm:4.8584e-02 L7_fnorm:4.8584e-02 L8_fnorm:4.8096e-02 L9_fnorm:4.8340e-02 L10_fnorm:4.8584e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.6968e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7090e-02 L4_l1linf:1.7212e-02 L5_l1linf:1.7456e-02 L6_l1linf:1.7700e-02 L7_l1linf:1.7334e-02 L8_l1linf:1.7334e-02 L9_l1linf:1.7456e-02 L10_l1linf:1.7822e-02 L11_l1linf:1.8188e-02 L12_l1linf:1.8066e-02 L1_spectral:7.8786e-04 L2_spectral:7.9791e-04 L3_spectral:7.9921e-04 L4_spectral:7.9899e-04 L5_spectral:8.0198e-04 L6_spectral:7.9338e-04 L7_spectral:7.9497e-04 L8_spectral:7.9213e-04 L9_spectral:7.8853e-04 L10_spectral:7.9185e-04 L11_spectral:7.8331e-04 L12_spectral:7.6424e-04 train_time:135139ms step_avg:48.26ms +[2025-09-11 09:43:41] [Rank 0] step:2801/10000 train_time:136713ms step_avg:48.81ms +[2025-09-11 09:43:41] [Rank 0] step:2801/10000 train_time:136713ms step_avg:48.81ms +[2025-09-11 09:43:41] [Rank 0] step:2821/10000 train_time:137367ms step_avg:48.69ms +[2025-09-11 09:43:41] [Rank 0] step:2821/10000 train_time:137367ms step_avg:48.69ms +[2025-09-11 09:43:42] [Rank 0] step:2841/10000 train_time:138032ms step_avg:48.59ms +[2025-09-11 09:43:42] [Rank 0] step:2841/10000 train_time:138032ms step_avg:48.59ms +[2025-09-11 09:43:43] [Rank 0] step:2861/10000 train_time:138697ms step_avg:48.48ms +[2025-09-11 09:43:43] [Rank 0] step:2861/10000 train_time:138697ms step_avg:48.48ms +[2025-09-11 09:43:43] [Rank 0] step:2881/10000 train_time:139361ms step_avg:48.37ms +[2025-09-11 09:43:43] [Rank 0] step:2881/10000 train_time:139361ms step_avg:48.37ms +[2025-09-11 09:43:44] [Rank 0] step:2901/10000 train_time:140025ms step_avg:48.27ms +[2025-09-11 09:43:44] [Rank 0] step:2901/10000 train_time:140025ms step_avg:48.27ms +[2025-09-11 09:43:45] [Rank 0] step:2921/10000 train_time:140689ms step_avg:48.16ms +[2025-09-11 09:43:45] [Rank 0] step:2921/10000 train_time:140689ms step_avg:48.16ms +[2025-09-11 09:43:45] [Rank 0] step:2941/10000 train_time:141354ms step_avg:48.06ms +[2025-09-11 09:43:45] [Rank 0] step:2941/10000 train_time:141354ms step_avg:48.06ms +[2025-09-11 09:43:46] [Rank 0] step:2961/10000 train_time:142017ms step_avg:47.96ms +[2025-09-11 09:43:46] [Rank 0] step:2961/10000 train_time:142017ms step_avg:47.96ms +[2025-09-11 09:43:47] [Rank 0] step:2981/10000 train_time:142683ms step_avg:47.86ms +[2025-09-11 09:43:47] [Rank 0] step:2981/10000 train_time:142683ms step_avg:47.86ms +[2025-09-11 09:43:47] [Rank 0] step:3001/10000 train_time:143356ms step_avg:47.77ms +[2025-09-11 09:43:47] [Rank 0] step:3001/10000 train_time:143356ms step_avg:47.77ms +[2025-09-11 09:43:48] [Rank 0] step:3021/10000 train_time:144023ms step_avg:47.67ms +[2025-09-11 09:43:48] [Rank 0] step:3021/10000 train_time:144023ms step_avg:47.67ms +[2025-09-11 09:43:49] [Rank 0] step:3041/10000 train_time:144690ms step_avg:47.58ms +[2025-09-11 09:43:49] [Rank 0] step:3041/10000 train_time:144690ms step_avg:47.58ms +[2025-09-11 09:43:49] [Rank 0] step:3061/10000 train_time:145357ms step_avg:47.49ms +[2025-09-11 09:43:49] [Rank 0] step:3061/10000 train_time:145357ms step_avg:47.49ms +[2025-09-11 09:43:50] [Rank 0] step:3081/10000 train_time:146024ms step_avg:47.39ms +[2025-09-11 09:43:50] [Rank 0] step:3081/10000 train_time:146024ms step_avg:47.39ms +[2025-09-11 09:43:51] [Rank 0] step:3101/10000 train_time:146691ms step_avg:47.30ms +[2025-09-11 09:43:51] [Rank 0] step:3101/10000 train_time:146691ms step_avg:47.30ms +[2025-09-11 09:43:51] [Rank 0] step:3121/10000 train_time:147358ms step_avg:47.21ms +[2025-09-11 09:43:51] [Rank 0] step:3121/10000 train_time:147358ms step_avg:47.21ms +[2025-09-11 09:43:52] [Rank 0] step:3141/10000 train_time:148025ms step_avg:47.13ms +[2025-09-11 09:43:52] [Rank 0] step:3141/10000 train_time:148025ms step_avg:47.13ms +[2025-09-11 09:43:53] [Rank 0] step:3161/10000 train_time:148692ms step_avg:47.04ms +[2025-09-11 09:43:53] [Rank 0] step:3161/10000 train_time:148692ms step_avg:47.04ms +[2025-09-11 09:43:53] [Rank 0] step:3181/10000 train_time:149358ms step_avg:46.95ms +[2025-09-11 09:43:53] [Rank 0] step:3181/10000 train_time:149358ms step_avg:46.95ms +[2025-09-11 09:43:54] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:43:54] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:43:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:43:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:43:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:44:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:44:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:04] [Rank 0] PRINT: step:3200/10000 val_loss:5.2450 total_sharp:5.5732e-05 L1_sharp:1.4726e-01 L2_sharp:1.6327e-01 L3_sharp:1.7016e-01 L4_sharp:2.0172e-01 L5_sharp:1.8875e-01 L6_sharp:1.6406e-01 L7_sharp:1.8044e-01 L8_sharp:2.0446e-01 L9_sharp:2.4102e-01 L10_sharp:3.4266e-01 L11_sharp:3.9002e-01 L12_sharp:6.5588e-01 total_fnorm:1.9200e+02 total_l1_linf:4.1984e+05 total_spectral:9.6000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.5869e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.6602e-02 L4_l1linf:1.6357e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6602e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.6846e-02 L12_l1linf:1.7334e-02 L1_spectral:7.9001e-04 L2_spectral:7.9871e-04 L3_spectral:8.0522e-04 L4_spectral:7.9679e-04 L5_spectral:7.9536e-04 L6_spectral:7.9842e-04 L7_spectral:8.0284e-04 L8_spectral:7.9508e-04 L9_spectral:8.0149e-04 L10_spectral:7.9634e-04 L11_spectral:7.9238e-04 L12_spectral:7.7403e-04 train_time:150006ms step_avg:46.88ms +[2025-09-11 09:44:04] [Rank 0] PRINT: step:3200/10000 val_loss:5.2450 total_sharp:5.5732e-05 L1_sharp:1.4726e-01 L2_sharp:1.6327e-01 L3_sharp:1.7016e-01 L4_sharp:2.0172e-01 L5_sharp:1.8875e-01 L6_sharp:1.6406e-01 L7_sharp:1.8044e-01 L8_sharp:2.0446e-01 L9_sharp:2.4102e-01 L10_sharp:3.4266e-01 L11_sharp:3.9002e-01 L12_sharp:6.5588e-01 total_fnorm:1.9200e+02 total_l1_linf:4.1984e+05 total_spectral:9.6000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8584e-02 L5_fnorm:4.8584e-02 L6_fnorm:4.8096e-02 L7_fnorm:4.8096e-02 L8_fnorm:4.7852e-02 L9_fnorm:4.8096e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.8096e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.5869e-02 L2_l1linf:1.6479e-02 L3_l1linf:1.6602e-02 L4_l1linf:1.6357e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6113e-02 L7_l1linf:1.6602e-02 L8_l1linf:1.6479e-02 L9_l1linf:1.6602e-02 L10_l1linf:1.6846e-02 L11_l1linf:1.6846e-02 L12_l1linf:1.7334e-02 L1_spectral:7.9001e-04 L2_spectral:7.9871e-04 L3_spectral:8.0522e-04 L4_spectral:7.9679e-04 L5_spectral:7.9536e-04 L6_spectral:7.9842e-04 L7_spectral:8.0284e-04 L8_spectral:7.9508e-04 L9_spectral:8.0149e-04 L10_spectral:7.9634e-04 L11_spectral:7.9238e-04 L12_spectral:7.7403e-04 train_time:150006ms step_avg:46.88ms +[2025-09-11 09:44:06] [Rank 0] step:3201/10000 train_time:151582ms step_avg:47.35ms +[2025-09-11 09:44:06] [Rank 0] step:3201/10000 train_time:151582ms step_avg:47.35ms +[2025-09-11 09:44:06] [Rank 0] step:3221/10000 train_time:152245ms step_avg:47.27ms +[2025-09-11 09:44:06] [Rank 0] step:3221/10000 train_time:152245ms step_avg:47.27ms +[2025-09-11 09:44:07] [Rank 0] step:3241/10000 train_time:152912ms step_avg:47.18ms +[2025-09-11 09:44:07] [Rank 0] step:3241/10000 train_time:152912ms step_avg:47.18ms +[2025-09-11 09:44:08] [Rank 0] step:3261/10000 train_time:153580ms step_avg:47.10ms +[2025-09-11 09:44:08] [Rank 0] step:3261/10000 train_time:153580ms step_avg:47.10ms +[2025-09-11 09:44:08] [Rank 0] step:3281/10000 train_time:154247ms step_avg:47.01ms +[2025-09-11 09:44:08] [Rank 0] step:3281/10000 train_time:154247ms step_avg:47.01ms +[2025-09-11 09:44:09] [Rank 0] step:3301/10000 train_time:154913ms step_avg:46.93ms +[2025-09-11 09:44:09] [Rank 0] step:3301/10000 train_time:154913ms step_avg:46.93ms +[2025-09-11 09:44:10] [Rank 0] step:3321/10000 train_time:155579ms step_avg:46.85ms +[2025-09-11 09:44:10] [Rank 0] step:3321/10000 train_time:155579ms step_avg:46.85ms +[2025-09-11 09:44:10] [Rank 0] step:3341/10000 train_time:156246ms step_avg:46.77ms +[2025-09-11 09:44:10] [Rank 0] step:3341/10000 train_time:156246ms step_avg:46.77ms +[2025-09-11 09:44:11] [Rank 0] step:3361/10000 train_time:156914ms step_avg:46.69ms +[2025-09-11 09:44:11] [Rank 0] step:3361/10000 train_time:156914ms step_avg:46.69ms +[2025-09-11 09:44:12] [Rank 0] step:3381/10000 train_time:157580ms step_avg:46.61ms +[2025-09-11 09:44:12] [Rank 0] step:3381/10000 train_time:157580ms step_avg:46.61ms +[2025-09-11 09:44:12] [Rank 0] step:3401/10000 train_time:158246ms step_avg:46.53ms +[2025-09-11 09:44:12] [Rank 0] step:3401/10000 train_time:158246ms step_avg:46.53ms +[2025-09-11 09:44:13] [Rank 0] step:3421/10000 train_time:158912ms step_avg:46.45ms +[2025-09-11 09:44:13] [Rank 0] step:3421/10000 train_time:158912ms step_avg:46.45ms +[2025-09-11 09:44:14] [Rank 0] step:3441/10000 train_time:159577ms step_avg:46.38ms +[2025-09-11 09:44:14] [Rank 0] step:3441/10000 train_time:159577ms step_avg:46.38ms +[2025-09-11 09:44:14] [Rank 0] step:3461/10000 train_time:160244ms step_avg:46.30ms +[2025-09-11 09:44:14] [Rank 0] step:3461/10000 train_time:160244ms step_avg:46.30ms +[2025-09-11 09:44:15] [Rank 0] step:3481/10000 train_time:160910ms step_avg:46.23ms +[2025-09-11 09:44:15] [Rank 0] step:3481/10000 train_time:160910ms step_avg:46.23ms +[2025-09-11 09:44:16] [Rank 0] step:3501/10000 train_time:161577ms step_avg:46.15ms +[2025-09-11 09:44:16] [Rank 0] step:3501/10000 train_time:161577ms step_avg:46.15ms +[2025-09-11 09:44:16] [Rank 0] step:3521/10000 train_time:162243ms step_avg:46.08ms +[2025-09-11 09:44:16] [Rank 0] step:3521/10000 train_time:162243ms step_avg:46.08ms +[2025-09-11 09:44:17] [Rank 0] step:3541/10000 train_time:162909ms step_avg:46.01ms +[2025-09-11 09:44:17] [Rank 0] step:3541/10000 train_time:162909ms step_avg:46.01ms +[2025-09-11 09:44:18] [Rank 0] step:3561/10000 train_time:163576ms step_avg:45.94ms +[2025-09-11 09:44:18] [Rank 0] step:3561/10000 train_time:163576ms step_avg:45.94ms +[2025-09-11 09:44:18] [Rank 0] step:3581/10000 train_time:164242ms step_avg:45.86ms +[2025-09-11 09:44:18] [Rank 0] step:3581/10000 train_time:164242ms step_avg:45.86ms +[2025-09-11 09:44:19] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:44:19] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:44:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:44:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:44:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:44:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:29] [Rank 0] PRINT: step:3600/10000 val_loss:5.2152 total_sharp:6.3436e-05 L1_sharp:1.5094e-01 L2_sharp:1.6222e-01 L3_sharp:1.8177e-01 L4_sharp:2.1121e-01 L5_sharp:1.9621e-01 L6_sharp:1.7843e-01 L7_sharp:1.8706e-01 L8_sharp:2.1242e-01 L9_sharp:1.9177e-01 L10_sharp:2.1546e-01 L11_sharp:2.4202e-01 L12_sharp:2.4864e-01 total_fnorm:1.7600e+02 total_l1_linf:3.6864e+05 total_spectral:8.8000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4954e-02 L2_l1linf:1.5137e-02 L3_l1linf:1.5320e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.5625e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5442e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5991e-02 L1_spectral:7.9359e-04 L2_spectral:8.0591e-04 L3_spectral:7.9785e-04 L4_spectral:8.0261e-04 L5_spectral:8.0064e-04 L6_spectral:8.0705e-04 L7_spectral:8.0018e-04 L8_spectral:8.0081e-04 L9_spectral:7.9956e-04 L10_spectral:8.0929e-04 L11_spectral:7.9572e-04 L12_spectral:7.8379e-04 train_time:164889ms step_avg:45.80ms +[2025-09-11 09:44:29] [Rank 0] PRINT: step:3600/10000 val_loss:5.2152 total_sharp:6.3436e-05 L1_sharp:1.5094e-01 L2_sharp:1.6222e-01 L3_sharp:1.8177e-01 L4_sharp:2.1121e-01 L5_sharp:1.9621e-01 L6_sharp:1.7843e-01 L7_sharp:1.8706e-01 L8_sharp:2.1242e-01 L9_sharp:1.9177e-01 L10_sharp:2.1546e-01 L11_sharp:2.4202e-01 L12_sharp:2.4864e-01 total_fnorm:1.7600e+02 total_l1_linf:3.6864e+05 total_spectral:8.8000e+01 L1_fnorm:4.8584e-02 L2_fnorm:4.8828e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7852e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.7119e-02 L1_l1linf:1.4954e-02 L2_l1linf:1.5137e-02 L3_l1linf:1.5320e-02 L4_l1linf:1.5381e-02 L5_l1linf:1.5625e-02 L6_l1linf:1.5381e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5442e-02 L9_l1linf:1.5564e-02 L10_l1linf:1.5869e-02 L11_l1linf:1.5747e-02 L12_l1linf:1.5991e-02 L1_spectral:7.9359e-04 L2_spectral:8.0591e-04 L3_spectral:7.9785e-04 L4_spectral:8.0261e-04 L5_spectral:8.0064e-04 L6_spectral:8.0705e-04 L7_spectral:8.0018e-04 L8_spectral:8.0081e-04 L9_spectral:7.9956e-04 L10_spectral:8.0929e-04 L11_spectral:7.9572e-04 L12_spectral:7.8379e-04 train_time:164889ms step_avg:45.80ms +[2025-09-11 09:44:32] [Rank 0] step:3601/10000 train_time:167335ms step_avg:46.47ms +[2025-09-11 09:44:32] [Rank 0] step:3601/10000 train_time:167335ms step_avg:46.47ms +[2025-09-11 09:44:33] [Rank 0] step:3621/10000 train_time:167990ms step_avg:46.39ms +[2025-09-11 09:44:33] [Rank 0] step:3621/10000 train_time:167990ms step_avg:46.39ms +[2025-09-11 09:44:33] [Rank 0] step:3641/10000 train_time:168815ms step_avg:46.36ms +[2025-09-11 09:44:33] [Rank 0] step:3641/10000 train_time:168815ms step_avg:46.36ms +[2025-09-11 09:44:34] [Rank 0] step:3661/10000 train_time:169594ms step_avg:46.32ms +[2025-09-11 09:44:34] [Rank 0] step:3661/10000 train_time:169594ms step_avg:46.32ms +[2025-09-11 09:44:35] [Rank 0] step:3681/10000 train_time:170261ms step_avg:46.25ms +[2025-09-11 09:44:35] [Rank 0] step:3681/10000 train_time:170261ms step_avg:46.25ms +[2025-09-11 09:44:36] [Rank 0] step:3701/10000 train_time:170927ms step_avg:46.18ms +[2025-09-11 09:44:36] [Rank 0] step:3701/10000 train_time:170927ms step_avg:46.18ms +[2025-09-11 09:44:37] [Rank 0] step:3721/10000 train_time:171901ms step_avg:46.20ms +[2025-09-11 09:44:37] [Rank 0] step:3721/10000 train_time:171901ms step_avg:46.20ms +[2025-09-11 09:44:37] [Rank 0] step:3741/10000 train_time:172577ms step_avg:46.13ms +[2025-09-11 09:44:37] [Rank 0] step:3741/10000 train_time:172577ms step_avg:46.13ms +[2025-09-11 09:44:38] [Rank 0] step:3761/10000 train_time:173255ms step_avg:46.07ms +[2025-09-11 09:44:38] [Rank 0] step:3761/10000 train_time:173255ms step_avg:46.07ms +[2025-09-11 09:44:39] [Rank 0] step:3781/10000 train_time:173932ms step_avg:46.00ms +[2025-09-11 09:44:39] [Rank 0] step:3781/10000 train_time:173932ms step_avg:46.00ms +[2025-09-11 09:44:39] [Rank 0] step:3801/10000 train_time:174608ms step_avg:45.94ms +[2025-09-11 09:44:39] [Rank 0] step:3801/10000 train_time:174608ms step_avg:45.94ms +[2025-09-11 09:44:40] [Rank 0] step:3821/10000 train_time:175296ms step_avg:45.88ms +[2025-09-11 09:44:40] [Rank 0] step:3821/10000 train_time:175296ms step_avg:45.88ms +[2025-09-11 09:44:41] [Rank 0] step:3841/10000 train_time:175974ms step_avg:45.81ms +[2025-09-11 09:44:41] [Rank 0] step:3841/10000 train_time:175974ms step_avg:45.81ms +[2025-09-11 09:44:41] [Rank 0] step:3861/10000 train_time:176650ms step_avg:45.75ms +[2025-09-11 09:44:41] [Rank 0] step:3861/10000 train_time:176650ms step_avg:45.75ms +[2025-09-11 09:44:42] [Rank 0] step:3881/10000 train_time:177326ms step_avg:45.69ms +[2025-09-11 09:44:42] [Rank 0] step:3881/10000 train_time:177326ms step_avg:45.69ms +[2025-09-11 09:44:43] [Rank 0] step:3901/10000 train_time:178003ms step_avg:45.63ms +[2025-09-11 09:44:43] [Rank 0] step:3901/10000 train_time:178003ms step_avg:45.63ms +[2025-09-11 09:44:43] [Rank 0] step:3921/10000 train_time:178680ms step_avg:45.57ms +[2025-09-11 09:44:43] [Rank 0] step:3921/10000 train_time:178680ms step_avg:45.57ms +[2025-09-11 09:44:44] [Rank 0] step:3941/10000 train_time:179358ms step_avg:45.51ms +[2025-09-11 09:44:44] [Rank 0] step:3941/10000 train_time:179358ms step_avg:45.51ms +[2025-09-11 09:44:45] [Rank 0] step:3961/10000 train_time:180035ms step_avg:45.45ms +[2025-09-11 09:44:45] [Rank 0] step:3961/10000 train_time:180035ms step_avg:45.45ms +[2025-09-11 09:44:45] [Rank 0] step:3981/10000 train_time:180711ms step_avg:45.39ms +[2025-09-11 09:44:45] [Rank 0] step:3981/10000 train_time:180711ms step_avg:45.39ms +[2025-09-11 09:44:46] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:44:46] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:44:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:44:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:44:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:44:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:44:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:44:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.1610 total_sharp:5.3019e-05 L1_sharp:1.4107e-01 L2_sharp:1.5314e-01 L3_sharp:2.0152e-01 L4_sharp:2.2464e-01 L5_sharp:2.5973e-01 L6_sharp:2.8315e-01 L7_sharp:2.9575e-01 L8_sharp:4.6334e-01 L9_sharp:7.7835e-01 L10_sharp:1.3025e+00 L11_sharp:1.8239e+00 L12_sharp:2.5328e+00 total_fnorm:2.0400e+02 total_l1_linf:4.3827e+05 total_spectral:1.0200e+02 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.5381e-02 L3_l1linf:1.5503e-02 L4_l1linf:1.5442e-02 L5_l1linf:1.5625e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5564e-02 L8_l1linf:1.5625e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.6846e-02 L1_spectral:7.9631e-04 L2_spectral:8.0152e-04 L3_spectral:7.9639e-04 L4_spectral:7.9108e-04 L5_spectral:7.9748e-04 L6_spectral:7.9739e-04 L7_spectral:7.9863e-04 L8_spectral:7.9865e-04 L9_spectral:7.9569e-04 L10_spectral:7.9359e-04 L11_spectral:7.8645e-04 L12_spectral:7.6577e-04 train_time:181369ms step_avg:45.34ms +[2025-09-11 09:44:56] [Rank 0] PRINT: step:4000/10000 val_loss:5.1610 total_sharp:5.3019e-05 L1_sharp:1.4107e-01 L2_sharp:1.5314e-01 L3_sharp:2.0152e-01 L4_sharp:2.2464e-01 L5_sharp:2.5973e-01 L6_sharp:2.8315e-01 L7_sharp:2.9575e-01 L8_sharp:4.6334e-01 L9_sharp:7.7835e-01 L10_sharp:1.3025e+00 L11_sharp:1.8239e+00 L12_sharp:2.5328e+00 total_fnorm:2.0400e+02 total_l1_linf:4.3827e+05 total_spectral:1.0200e+02 L1_fnorm:4.8096e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8584e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8340e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7607e-02 L12_fnorm:4.6875e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.5381e-02 L3_l1linf:1.5503e-02 L4_l1linf:1.5442e-02 L5_l1linf:1.5625e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5564e-02 L8_l1linf:1.5625e-02 L9_l1linf:1.6113e-02 L10_l1linf:1.6357e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.6846e-02 L1_spectral:7.9631e-04 L2_spectral:8.0152e-04 L3_spectral:7.9639e-04 L4_spectral:7.9108e-04 L5_spectral:7.9748e-04 L6_spectral:7.9739e-04 L7_spectral:7.9863e-04 L8_spectral:7.9865e-04 L9_spectral:7.9569e-04 L10_spectral:7.9359e-04 L11_spectral:7.8645e-04 L12_spectral:7.6577e-04 train_time:181369ms step_avg:45.34ms +[2025-09-11 09:44:58] [Rank 0] step:4001/10000 train_time:182967ms step_avg:45.73ms +[2025-09-11 09:44:58] [Rank 0] step:4001/10000 train_time:182967ms step_avg:45.73ms +[2025-09-11 09:44:59] [Rank 0] step:4021/10000 train_time:183633ms step_avg:45.67ms +[2025-09-11 09:44:59] [Rank 0] step:4021/10000 train_time:183633ms step_avg:45.67ms +[2025-09-11 09:44:59] [Rank 0] step:4041/10000 train_time:184311ms step_avg:45.61ms +[2025-09-11 09:44:59] [Rank 0] step:4041/10000 train_time:184311ms step_avg:45.61ms +[2025-09-11 09:45:00] [Rank 0] step:4061/10000 train_time:184987ms step_avg:45.55ms +[2025-09-11 09:45:00] [Rank 0] step:4061/10000 train_time:184987ms step_avg:45.55ms +[2025-09-11 09:45:01] [Rank 0] step:4081/10000 train_time:185665ms step_avg:45.49ms +[2025-09-11 09:45:01] [Rank 0] step:4081/10000 train_time:185665ms step_avg:45.49ms +[2025-09-11 09:45:01] [Rank 0] step:4101/10000 train_time:186384ms step_avg:45.45ms +[2025-09-11 09:45:01] [Rank 0] step:4101/10000 train_time:186384ms step_avg:45.45ms +[2025-09-11 09:45:02] [Rank 0] step:4121/10000 train_time:187108ms step_avg:45.40ms +[2025-09-11 09:45:02] [Rank 0] step:4121/10000 train_time:187108ms step_avg:45.40ms +[2025-09-11 09:45:03] [Rank 0] step:4141/10000 train_time:187785ms step_avg:45.35ms +[2025-09-11 09:45:03] [Rank 0] step:4141/10000 train_time:187785ms step_avg:45.35ms +[2025-09-11 09:45:04] [Rank 0] step:4161/10000 train_time:188462ms step_avg:45.29ms +[2025-09-11 09:45:04] [Rank 0] step:4161/10000 train_time:188462ms step_avg:45.29ms +[2025-09-11 09:45:04] [Rank 0] step:4181/10000 train_time:189138ms step_avg:45.24ms +[2025-09-11 09:45:04] [Rank 0] step:4181/10000 train_time:189138ms step_avg:45.24ms +[2025-09-11 09:45:05] [Rank 0] step:4201/10000 train_time:189816ms step_avg:45.18ms +[2025-09-11 09:45:05] [Rank 0] step:4201/10000 train_time:189816ms step_avg:45.18ms +[2025-09-11 09:45:06] [Rank 0] step:4221/10000 train_time:190493ms step_avg:45.13ms +[2025-09-11 09:45:06] [Rank 0] step:4221/10000 train_time:190493ms step_avg:45.13ms +[2025-09-11 09:45:06] [Rank 0] step:4241/10000 train_time:191170ms step_avg:45.08ms +[2025-09-11 09:45:06] [Rank 0] step:4241/10000 train_time:191170ms step_avg:45.08ms +[2025-09-11 09:45:07] [Rank 0] step:4261/10000 train_time:191847ms step_avg:45.02ms +[2025-09-11 09:45:07] [Rank 0] step:4261/10000 train_time:191847ms step_avg:45.02ms +[2025-09-11 09:45:08] [Rank 0] step:4281/10000 train_time:192525ms step_avg:44.97ms +[2025-09-11 09:45:08] [Rank 0] step:4281/10000 train_time:192525ms step_avg:44.97ms +[2025-09-11 09:45:08] [Rank 0] step:4301/10000 train_time:193204ms step_avg:44.92ms +[2025-09-11 09:45:08] [Rank 0] step:4301/10000 train_time:193204ms step_avg:44.92ms +[2025-09-11 09:45:09] [Rank 0] step:4321/10000 train_time:193883ms step_avg:44.87ms +[2025-09-11 09:45:09] [Rank 0] step:4321/10000 train_time:193883ms step_avg:44.87ms +[2025-09-11 09:45:10] [Rank 0] step:4341/10000 train_time:194558ms step_avg:44.82ms +[2025-09-11 09:45:10] [Rank 0] step:4341/10000 train_time:194558ms step_avg:44.82ms +[2025-09-11 09:45:10] [Rank 0] step:4361/10000 train_time:195235ms step_avg:44.77ms +[2025-09-11 09:45:10] [Rank 0] step:4361/10000 train_time:195235ms step_avg:44.77ms +[2025-09-11 09:45:11] [Rank 0] step:4381/10000 train_time:195912ms step_avg:44.72ms +[2025-09-11 09:45:11] [Rank 0] step:4381/10000 train_time:195912ms step_avg:44.72ms +[2025-09-11 09:45:12] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:45:12] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:45:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:45:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:45:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:22] [Rank 0] PRINT: step:4400/10000 val_loss:5.1515 total_sharp:5.5473e-05 L1_sharp:1.4609e-01 L2_sharp:1.7609e-01 L3_sharp:2.1212e-01 L4_sharp:2.6647e-01 L5_sharp:2.7111e-01 L6_sharp:2.7237e-01 L7_sharp:2.5080e-01 L8_sharp:2.5839e-01 L9_sharp:2.7132e-01 L10_sharp:2.9575e-01 L11_sharp:3.0325e-01 L12_sharp:5.6274e-01 total_fnorm:1.7600e+02 total_l1_linf:3.6454e+05 total_spectral:8.8000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8340e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.3916e-02 L2_l1linf:1.4343e-02 L3_l1linf:1.4709e-02 L4_l1linf:1.4954e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5503e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6357e-02 L1_spectral:7.8977e-04 L2_spectral:8.0156e-04 L3_spectral:7.9014e-04 L4_spectral:7.9073e-04 L5_spectral:7.9846e-04 L6_spectral:7.9613e-04 L7_spectral:7.9846e-04 L8_spectral:7.8651e-04 L9_spectral:7.9475e-04 L10_spectral:7.9149e-04 L11_spectral:7.8658e-04 L12_spectral:7.5697e-04 train_time:196570ms step_avg:44.68ms +[2025-09-11 09:45:22] [Rank 0] PRINT: step:4400/10000 val_loss:5.1515 total_sharp:5.5473e-05 L1_sharp:1.4609e-01 L2_sharp:1.7609e-01 L3_sharp:2.1212e-01 L4_sharp:2.6647e-01 L5_sharp:2.7111e-01 L6_sharp:2.7237e-01 L7_sharp:2.5080e-01 L8_sharp:2.5839e-01 L9_sharp:2.7132e-01 L10_sharp:2.9575e-01 L11_sharp:3.0325e-01 L12_sharp:5.6274e-01 total_fnorm:1.7600e+02 total_l1_linf:3.6454e+05 total_spectral:8.8000e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8584e-02 L3_fnorm:4.8340e-02 L4_fnorm:4.8096e-02 L5_fnorm:4.8096e-02 L6_fnorm:4.7852e-02 L7_fnorm:4.7852e-02 L8_fnorm:4.7363e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.7852e-02 L11_fnorm:4.7119e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.3916e-02 L2_l1linf:1.4343e-02 L3_l1linf:1.4709e-02 L4_l1linf:1.4954e-02 L5_l1linf:1.5320e-02 L6_l1linf:1.5320e-02 L7_l1linf:1.5503e-02 L8_l1linf:1.5503e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.5747e-02 L11_l1linf:1.5991e-02 L12_l1linf:1.6357e-02 L1_spectral:7.8977e-04 L2_spectral:8.0156e-04 L3_spectral:7.9014e-04 L4_spectral:7.9073e-04 L5_spectral:7.9846e-04 L6_spectral:7.9613e-04 L7_spectral:7.9846e-04 L8_spectral:7.8651e-04 L9_spectral:7.9475e-04 L10_spectral:7.9149e-04 L11_spectral:7.8658e-04 L12_spectral:7.5697e-04 train_time:196570ms step_avg:44.68ms +[2025-09-11 09:45:24] [Rank 0] step:4401/10000 train_time:198154ms step_avg:45.02ms +[2025-09-11 09:45:24] [Rank 0] step:4401/10000 train_time:198154ms step_avg:45.02ms +[2025-09-11 09:45:24] [Rank 0] step:4421/10000 train_time:198835ms step_avg:44.98ms +[2025-09-11 09:45:24] [Rank 0] step:4421/10000 train_time:198835ms step_avg:44.98ms +[2025-09-11 09:45:25] [Rank 0] step:4441/10000 train_time:199513ms step_avg:44.93ms +[2025-09-11 09:45:25] [Rank 0] step:4441/10000 train_time:199513ms step_avg:44.93ms +[2025-09-11 09:45:26] [Rank 0] step:4461/10000 train_time:200193ms step_avg:44.88ms +[2025-09-11 09:45:26] [Rank 0] step:4461/10000 train_time:200193ms step_avg:44.88ms +[2025-09-11 09:45:26] [Rank 0] step:4481/10000 train_time:200874ms step_avg:44.83ms +[2025-09-11 09:45:26] [Rank 0] step:4481/10000 train_time:200874ms step_avg:44.83ms +[2025-09-11 09:45:27] [Rank 0] step:4501/10000 train_time:201554ms step_avg:44.78ms +[2025-09-11 09:45:27] [Rank 0] step:4501/10000 train_time:201554ms step_avg:44.78ms +[2025-09-11 09:45:28] [Rank 0] step:4521/10000 train_time:202233ms step_avg:44.73ms +[2025-09-11 09:45:28] [Rank 0] step:4521/10000 train_time:202233ms step_avg:44.73ms +[2025-09-11 09:45:29] [Rank 0] step:4541/10000 train_time:202913ms step_avg:44.68ms +[2025-09-11 09:45:29] [Rank 0] step:4541/10000 train_time:202913ms step_avg:44.68ms +[2025-09-11 09:45:29] [Rank 0] step:4561/10000 train_time:203592ms step_avg:44.64ms +[2025-09-11 09:45:29] [Rank 0] step:4561/10000 train_time:203592ms step_avg:44.64ms +[2025-09-11 09:45:30] [Rank 0] step:4581/10000 train_time:204271ms step_avg:44.59ms +[2025-09-11 09:45:30] [Rank 0] step:4581/10000 train_time:204271ms step_avg:44.59ms +[2025-09-11 09:45:31] [Rank 0] step:4601/10000 train_time:204951ms step_avg:44.54ms +[2025-09-11 09:45:31] [Rank 0] step:4601/10000 train_time:204951ms step_avg:44.54ms +[2025-09-11 09:45:31] [Rank 0] step:4621/10000 train_time:205630ms step_avg:44.50ms +[2025-09-11 09:45:31] [Rank 0] step:4621/10000 train_time:205630ms step_avg:44.50ms +[2025-09-11 09:45:32] [Rank 0] step:4641/10000 train_time:206309ms step_avg:44.45ms +[2025-09-11 09:45:32] [Rank 0] step:4641/10000 train_time:206309ms step_avg:44.45ms +[2025-09-11 09:45:33] [Rank 0] step:4661/10000 train_time:206989ms step_avg:44.41ms +[2025-09-11 09:45:33] [Rank 0] step:4661/10000 train_time:206989ms step_avg:44.41ms +[2025-09-11 09:45:33] [Rank 0] step:4681/10000 train_time:207668ms step_avg:44.36ms +[2025-09-11 09:45:33] [Rank 0] step:4681/10000 train_time:207668ms step_avg:44.36ms +[2025-09-11 09:45:34] [Rank 0] step:4701/10000 train_time:208351ms step_avg:44.32ms +[2025-09-11 09:45:34] [Rank 0] step:4701/10000 train_time:208351ms step_avg:44.32ms +[2025-09-11 09:45:35] [Rank 0] step:4721/10000 train_time:209044ms step_avg:44.28ms +[2025-09-11 09:45:35] [Rank 0] step:4721/10000 train_time:209044ms step_avg:44.28ms +[2025-09-11 09:45:35] [Rank 0] step:4741/10000 train_time:209723ms step_avg:44.24ms +[2025-09-11 09:45:35] [Rank 0] step:4741/10000 train_time:209723ms step_avg:44.24ms +[2025-09-11 09:45:36] [Rank 0] step:4761/10000 train_time:210403ms step_avg:44.19ms +[2025-09-11 09:45:36] [Rank 0] step:4761/10000 train_time:210403ms step_avg:44.19ms +[2025-09-11 09:45:37] [Rank 0] step:4781/10000 train_time:211371ms step_avg:44.21ms +[2025-09-11 09:45:37] [Rank 0] step:4781/10000 train_time:211371ms step_avg:44.21ms +[2025-09-11 09:45:38] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:45:38] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:45:48] [Rank 0] PRINT: step:4800/10000 val_loss:5.1181 total_sharp:5.0905e-05 L1_sharp:1.2787e-01 L2_sharp:1.7266e-01 L3_sharp:1.9636e-01 L4_sharp:2.6119e-01 L5_sharp:3.5135e-01 L6_sharp:4.5520e-01 L7_sharp:7.4339e-01 L8_sharp:8.7433e-01 L9_sharp:1.2654e+00 L10_sharp:2.0822e+00 L11_sharp:1.4485e+00 L12_sharp:1.1566e+00 total_fnorm:1.8700e+02 total_l1_linf:4.1165e+05 total_spectral:9.3500e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.3123e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.4465e-02 L4_l1linf:1.4221e-02 L5_l1linf:1.4404e-02 L6_l1linf:1.4648e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.6724e-02 L1_spectral:7.8572e-04 L2_spectral:8.0203e-04 L3_spectral:7.9800e-04 L4_spectral:7.9522e-04 L5_spectral:7.9358e-04 L6_spectral:8.1042e-04 L7_spectral:8.0375e-04 L8_spectral:7.9160e-04 L9_spectral:7.9344e-04 L10_spectral:7.8554e-04 L11_spectral:7.6215e-04 L12_spectral:7.3300e-04 train_time:212031ms step_avg:44.17ms +[2025-09-11 09:45:48] [Rank 0] PRINT: step:4800/10000 val_loss:5.1181 total_sharp:5.0905e-05 L1_sharp:1.2787e-01 L2_sharp:1.7266e-01 L3_sharp:1.9636e-01 L4_sharp:2.6119e-01 L5_sharp:3.5135e-01 L6_sharp:4.5520e-01 L7_sharp:7.4339e-01 L8_sharp:8.7433e-01 L9_sharp:1.2654e+00 L10_sharp:2.0822e+00 L11_sharp:1.4485e+00 L12_sharp:1.1566e+00 total_fnorm:1.8700e+02 total_l1_linf:4.1165e+05 total_spectral:9.3500e+01 L1_fnorm:4.7607e-02 L2_fnorm:4.8096e-02 L3_fnorm:4.8096e-02 L4_fnorm:4.7607e-02 L5_fnorm:4.7607e-02 L6_fnorm:4.7607e-02 L7_fnorm:4.7607e-02 L8_fnorm:4.7119e-02 L9_fnorm:4.7607e-02 L10_fnorm:4.8096e-02 L11_fnorm:4.7363e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.3123e-02 L2_l1linf:1.3794e-02 L3_l1linf:1.4465e-02 L4_l1linf:1.4221e-02 L5_l1linf:1.4404e-02 L6_l1linf:1.4648e-02 L7_l1linf:1.4832e-02 L8_l1linf:1.4954e-02 L9_l1linf:1.5503e-02 L10_l1linf:1.6235e-02 L11_l1linf:1.6479e-02 L12_l1linf:1.6724e-02 L1_spectral:7.8572e-04 L2_spectral:8.0203e-04 L3_spectral:7.9800e-04 L4_spectral:7.9522e-04 L5_spectral:7.9358e-04 L6_spectral:8.1042e-04 L7_spectral:8.0375e-04 L8_spectral:7.9160e-04 L9_spectral:7.9344e-04 L10_spectral:7.8554e-04 L11_spectral:7.6215e-04 L12_spectral:7.3300e-04 train_time:212031ms step_avg:44.17ms +[2025-09-11 09:45:50] [Rank 0] step:4801/10000 train_time:213658ms step_avg:44.50ms +[2025-09-11 09:45:50] [Rank 0] step:4801/10000 train_time:213658ms step_avg:44.50ms +[2025-09-11 09:45:51] [Rank 0] step:4821/10000 train_time:214356ms step_avg:44.46ms +[2025-09-11 09:45:51] [Rank 0] step:4821/10000 train_time:214356ms step_avg:44.46ms +[2025-09-11 09:45:51] [Rank 0] step:4841/10000 train_time:215039ms step_avg:44.42ms +[2025-09-11 09:45:51] [Rank 0] step:4841/10000 train_time:215039ms step_avg:44.42ms +[2025-09-11 09:45:52] [Rank 0] step:4861/10000 train_time:215721ms step_avg:44.38ms +[2025-09-11 09:45:52] [Rank 0] step:4861/10000 train_time:215721ms step_avg:44.38ms +[2025-09-11 09:45:53] [Rank 0] step:4881/10000 train_time:216405ms step_avg:44.34ms +[2025-09-11 09:45:53] [Rank 0] step:4881/10000 train_time:216405ms step_avg:44.34ms +[2025-09-11 09:45:53] [Rank 0] step:4901/10000 train_time:217092ms step_avg:44.30ms +[2025-09-11 09:45:53] [Rank 0] step:4901/10000 train_time:217092ms step_avg:44.30ms +[2025-09-11 09:45:54] [Rank 0] step:4921/10000 train_time:217775ms step_avg:44.25ms +[2025-09-11 09:45:54] [Rank 0] step:4921/10000 train_time:217775ms step_avg:44.25ms +[2025-09-11 09:45:55] [Rank 0] step:4941/10000 train_time:218457ms step_avg:44.21ms +[2025-09-11 09:45:55] [Rank 0] step:4941/10000 train_time:218457ms step_avg:44.21ms +[2025-09-11 09:45:55] [Rank 0] step:4961/10000 train_time:219138ms step_avg:44.17ms +[2025-09-11 09:45:55] [Rank 0] step:4961/10000 train_time:219138ms step_avg:44.17ms +[2025-09-11 09:45:56] [Rank 0] step:4981/10000 train_time:219821ms step_avg:44.13ms +[2025-09-11 09:45:56] [Rank 0] step:4981/10000 train_time:219821ms step_avg:44.13ms +[2025-09-11 09:45:57] [Rank 0] step:5001/10000 train_time:220503ms step_avg:44.09ms +[2025-09-11 09:45:57] [Rank 0] step:5001/10000 train_time:220503ms step_avg:44.09ms +[2025-09-11 09:45:57] [Rank 0] step:5021/10000 train_time:221185ms step_avg:44.05ms +[2025-09-11 09:45:57] [Rank 0] step:5021/10000 train_time:221185ms step_avg:44.05ms +[2025-09-11 09:45:58] [Rank 0] step:5041/10000 train_time:221865ms step_avg:44.01ms +[2025-09-11 09:45:58] [Rank 0] step:5041/10000 train_time:221865ms step_avg:44.01ms +[2025-09-11 09:45:59] [Rank 0] step:5061/10000 train_time:222548ms step_avg:43.97ms +[2025-09-11 09:45:59] [Rank 0] step:5061/10000 train_time:222548ms step_avg:43.97ms +[2025-09-11 09:45:59] [Rank 0] step:5081/10000 train_time:223230ms step_avg:43.93ms +[2025-09-11 09:45:59] [Rank 0] step:5081/10000 train_time:223230ms step_avg:43.93ms +[2025-09-11 09:46:00] [Rank 0] step:5101/10000 train_time:223912ms step_avg:43.90ms +[2025-09-11 09:46:00] [Rank 0] step:5101/10000 train_time:223912ms step_avg:43.90ms +[2025-09-11 09:46:01] [Rank 0] step:5121/10000 train_time:224593ms step_avg:43.86ms +[2025-09-11 09:46:01] [Rank 0] step:5121/10000 train_time:224593ms step_avg:43.86ms +[2025-09-11 09:46:01] [Rank 0] step:5141/10000 train_time:225276ms step_avg:43.82ms +[2025-09-11 09:46:01] [Rank 0] step:5141/10000 train_time:225276ms step_avg:43.82ms +[2025-09-11 09:46:02] [Rank 0] step:5161/10000 train_time:225959ms step_avg:43.78ms +[2025-09-11 09:46:02] [Rank 0] step:5161/10000 train_time:225959ms step_avg:43.78ms +[2025-09-11 09:46:03] [Rank 0] step:5181/10000 train_time:226642ms step_avg:43.74ms +[2025-09-11 09:46:03] [Rank 0] step:5181/10000 train_time:226642ms step_avg:43.74ms +[2025-09-11 09:46:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:46:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:46:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:46:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:46:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:46:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:16] [Rank 0] PRINT: step:5200/10000 val_loss:5.0767 total_sharp:5.5633e-05 L1_sharp:1.3418e-01 L2_sharp:1.7642e-01 L3_sharp:1.9833e-01 L4_sharp:2.4283e-01 L5_sharp:2.7764e-01 L6_sharp:2.6888e-01 L7_sharp:2.5415e-01 L8_sharp:3.1852e-01 L9_sharp:3.9126e-01 L10_sharp:5.7918e-01 L11_sharp:4.9390e-01 L12_sharp:5.1152e-01 total_fnorm:1.6900e+02 total_l1_linf:3.4611e+05 total_spectral:8.4000e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.3123e-02 L3_l1linf:1.3489e-02 L4_l1linf:1.3489e-02 L5_l1linf:1.3733e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3733e-02 L8_l1linf:1.4221e-02 L9_l1linf:1.4038e-02 L10_l1linf:1.4160e-02 L11_l1linf:1.4282e-02 L12_l1linf:1.4404e-02 L1_spectral:7.9092e-04 L2_spectral:7.9623e-04 L3_spectral:7.9355e-04 L4_spectral:7.9813e-04 L5_spectral:8.0170e-04 L6_spectral:7.9551e-04 L7_spectral:7.9365e-04 L8_spectral:7.9561e-04 L9_spectral:8.0197e-04 L10_spectral:7.9557e-04 L11_spectral:7.9935e-04 L12_spectral:7.8039e-04 train_time:227314ms step_avg:43.71ms +[2025-09-11 09:46:16] [Rank 0] PRINT: step:5200/10000 val_loss:5.0767 total_sharp:5.5633e-05 L1_sharp:1.3418e-01 L2_sharp:1.7642e-01 L3_sharp:1.9833e-01 L4_sharp:2.4283e-01 L5_sharp:2.7764e-01 L6_sharp:2.6888e-01 L7_sharp:2.5415e-01 L8_sharp:3.1852e-01 L9_sharp:3.9126e-01 L10_sharp:5.7918e-01 L11_sharp:4.9390e-01 L12_sharp:5.1152e-01 total_fnorm:1.6900e+02 total_l1_linf:3.4611e+05 total_spectral:8.4000e+01 L1_fnorm:4.7119e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.7363e-02 L6_fnorm:4.7119e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6875e-02 L10_fnorm:4.7119e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6387e-02 L1_l1linf:1.2512e-02 L2_l1linf:1.3123e-02 L3_l1linf:1.3489e-02 L4_l1linf:1.3489e-02 L5_l1linf:1.3733e-02 L6_l1linf:1.3672e-02 L7_l1linf:1.3733e-02 L8_l1linf:1.4221e-02 L9_l1linf:1.4038e-02 L10_l1linf:1.4160e-02 L11_l1linf:1.4282e-02 L12_l1linf:1.4404e-02 L1_spectral:7.9092e-04 L2_spectral:7.9623e-04 L3_spectral:7.9355e-04 L4_spectral:7.9813e-04 L5_spectral:8.0170e-04 L6_spectral:7.9551e-04 L7_spectral:7.9365e-04 L8_spectral:7.9561e-04 L9_spectral:8.0197e-04 L10_spectral:7.9557e-04 L11_spectral:7.9935e-04 L12_spectral:7.8039e-04 train_time:227314ms step_avg:43.71ms +[2025-09-11 09:46:17] [Rank 0] step:5201/10000 train_time:228944ms step_avg:44.02ms +[2025-09-11 09:46:17] [Rank 0] step:5201/10000 train_time:228944ms step_avg:44.02ms +[2025-09-11 09:46:18] [Rank 0] step:5221/10000 train_time:229639ms step_avg:43.98ms +[2025-09-11 09:46:18] [Rank 0] step:5221/10000 train_time:229639ms step_avg:43.98ms +[2025-09-11 09:46:19] [Rank 0] step:5241/10000 train_time:230330ms step_avg:43.95ms +[2025-09-11 09:46:19] [Rank 0] step:5241/10000 train_time:230330ms step_avg:43.95ms +[2025-09-11 09:46:20] [Rank 0] step:5261/10000 train_time:231022ms step_avg:43.91ms +[2025-09-11 09:46:20] [Rank 0] step:5261/10000 train_time:231022ms step_avg:43.91ms +[2025-09-11 09:46:20] [Rank 0] step:5281/10000 train_time:231713ms step_avg:43.88ms +[2025-09-11 09:46:20] [Rank 0] step:5281/10000 train_time:231713ms step_avg:43.88ms +[2025-09-11 09:46:21] [Rank 0] step:5301/10000 train_time:232404ms step_avg:43.84ms +[2025-09-11 09:46:21] [Rank 0] step:5301/10000 train_time:232404ms step_avg:43.84ms +[2025-09-11 09:46:22] [Rank 0] step:5321/10000 train_time:233095ms step_avg:43.81ms +[2025-09-11 09:46:22] [Rank 0] step:5321/10000 train_time:233095ms step_avg:43.81ms +[2025-09-11 09:46:22] [Rank 0] step:5341/10000 train_time:233784ms step_avg:43.77ms +[2025-09-11 09:46:22] [Rank 0] step:5341/10000 train_time:233784ms step_avg:43.77ms +[2025-09-11 09:46:23] [Rank 0] step:5361/10000 train_time:234476ms step_avg:43.74ms +[2025-09-11 09:46:23] [Rank 0] step:5361/10000 train_time:234476ms step_avg:43.74ms +[2025-09-11 09:46:24] [Rank 0] step:5381/10000 train_time:235167ms step_avg:43.70ms +[2025-09-11 09:46:24] [Rank 0] step:5381/10000 train_time:235167ms step_avg:43.70ms +[2025-09-11 09:46:24] [Rank 0] step:5401/10000 train_time:235854ms step_avg:43.67ms +[2025-09-11 09:46:24] [Rank 0] step:5401/10000 train_time:235854ms step_avg:43.67ms +[2025-09-11 09:46:25] [Rank 0] step:5421/10000 train_time:236546ms step_avg:43.64ms +[2025-09-11 09:46:25] [Rank 0] step:5421/10000 train_time:236546ms step_avg:43.64ms +[2025-09-11 09:46:26] [Rank 0] step:5441/10000 train_time:237236ms step_avg:43.60ms +[2025-09-11 09:46:26] [Rank 0] step:5441/10000 train_time:237236ms step_avg:43.60ms +[2025-09-11 09:46:26] [Rank 0] step:5461/10000 train_time:237927ms step_avg:43.57ms +[2025-09-11 09:46:26] [Rank 0] step:5461/10000 train_time:237927ms step_avg:43.57ms +[2025-09-11 09:46:27] [Rank 0] step:5481/10000 train_time:238619ms step_avg:43.54ms +[2025-09-11 09:46:27] [Rank 0] step:5481/10000 train_time:238619ms step_avg:43.54ms +[2025-09-11 09:46:28] [Rank 0] step:5501/10000 train_time:239308ms step_avg:43.50ms +[2025-09-11 09:46:28] [Rank 0] step:5501/10000 train_time:239308ms step_avg:43.50ms +[2025-09-11 09:46:28] [Rank 0] step:5521/10000 train_time:239998ms step_avg:43.47ms +[2025-09-11 09:46:28] [Rank 0] step:5521/10000 train_time:239998ms step_avg:43.47ms +[2025-09-11 09:46:29] [Rank 0] step:5541/10000 train_time:240691ms step_avg:43.44ms +[2025-09-11 09:46:29] [Rank 0] step:5541/10000 train_time:240691ms step_avg:43.44ms +[2025-09-11 09:46:30] [Rank 0] step:5561/10000 train_time:241384ms step_avg:43.41ms +[2025-09-11 09:46:30] [Rank 0] step:5561/10000 train_time:241384ms step_avg:43.41ms +[2025-09-11 09:46:31] [Rank 0] step:5581/10000 train_time:242075ms step_avg:43.37ms +[2025-09-11 09:46:31] [Rank 0] step:5581/10000 train_time:242075ms step_avg:43.37ms +[2025-09-11 09:46:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:46:31] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:46:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:46:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:46:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:46:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:46:42] [Rank 0] PRINT: step:5600/10000 val_loss:5.0477 total_sharp:4.7301e-05 L1_sharp:1.1629e-01 L2_sharp:1.6552e-01 L3_sharp:1.9086e-01 L4_sharp:2.1624e-01 L5_sharp:2.1278e-01 L6_sharp:2.1204e-01 L7_sharp:2.1859e-01 L8_sharp:2.7159e-01 L9_sharp:3.4674e-01 L10_sharp:4.3473e-01 L11_sharp:5.2037e-01 L12_sharp:7.4542e-01 total_fnorm:1.7900e+02 total_l1_linf:3.7888e+05 total_spectral:8.9500e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1658e-02 L2_l1linf:1.2268e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3123e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3733e-02 L7_l1linf:1.3855e-02 L8_l1linf:1.4282e-02 L9_l1linf:1.4038e-02 L10_l1linf:1.4587e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4832e-02 L1_spectral:7.8064e-04 L2_spectral:7.9210e-04 L3_spectral:7.9513e-04 L4_spectral:7.9289e-04 L5_spectral:7.8982e-04 L6_spectral:8.0305e-04 L7_spectral:7.9710e-04 L8_spectral:7.9041e-04 L9_spectral:7.9894e-04 L10_spectral:7.9229e-04 L11_spectral:7.8827e-04 L12_spectral:7.7180e-04 train_time:242749ms step_avg:43.35ms +[2025-09-11 09:46:42] [Rank 0] PRINT: step:5600/10000 val_loss:5.0477 total_sharp:4.7301e-05 L1_sharp:1.1629e-01 L2_sharp:1.6552e-01 L3_sharp:1.9086e-01 L4_sharp:2.1624e-01 L5_sharp:2.1278e-01 L6_sharp:2.1204e-01 L7_sharp:2.1859e-01 L8_sharp:2.7159e-01 L9_sharp:3.4674e-01 L10_sharp:4.3473e-01 L11_sharp:5.2037e-01 L12_sharp:7.4542e-01 total_fnorm:1.7900e+02 total_l1_linf:3.7888e+05 total_spectral:8.9500e+01 L1_fnorm:4.6631e-02 L2_fnorm:4.7363e-02 L3_fnorm:4.7363e-02 L4_fnorm:4.6875e-02 L5_fnorm:4.7119e-02 L6_fnorm:4.6875e-02 L7_fnorm:4.7119e-02 L8_fnorm:4.6631e-02 L9_fnorm:4.6631e-02 L10_fnorm:4.6875e-02 L11_fnorm:4.6631e-02 L12_fnorm:4.6143e-02 L1_l1linf:1.1658e-02 L2_l1linf:1.2268e-02 L3_l1linf:1.2878e-02 L4_l1linf:1.3123e-02 L5_l1linf:1.3428e-02 L6_l1linf:1.3733e-02 L7_l1linf:1.3855e-02 L8_l1linf:1.4282e-02 L9_l1linf:1.4038e-02 L10_l1linf:1.4587e-02 L11_l1linf:1.4221e-02 L12_l1linf:1.4832e-02 L1_spectral:7.8064e-04 L2_spectral:7.9210e-04 L3_spectral:7.9513e-04 L4_spectral:7.9289e-04 L5_spectral:7.8982e-04 L6_spectral:8.0305e-04 L7_spectral:7.9710e-04 L8_spectral:7.9041e-04 L9_spectral:7.9894e-04 L10_spectral:7.9229e-04 L11_spectral:7.8827e-04 L12_spectral:7.7180e-04 train_time:242749ms step_avg:43.35ms +[2025-09-11 09:46:44] [Rank 0] step:5601/10000 train_time:244521ms step_avg:43.66ms +[2025-09-11 09:46:44] [Rank 0] step:5601/10000 train_time:244521ms step_avg:43.66ms +[2025-09-11 09:46:45] [Rank 0] step:5621/10000 train_time:245227ms step_avg:43.63ms +[2025-09-11 09:46:45] [Rank 0] step:5621/10000 train_time:245227ms step_avg:43.63ms +[2025-09-11 09:46:45] [Rank 0] step:5641/10000 train_time:245916ms step_avg:43.59ms +[2025-09-11 09:46:45] [Rank 0] step:5641/10000 train_time:245916ms step_avg:43.59ms +[2025-09-11 09:46:46] [Rank 0] step:5661/10000 train_time:246606ms step_avg:43.56ms +[2025-09-11 09:46:46] [Rank 0] step:5661/10000 train_time:246606ms step_avg:43.56ms +[2025-09-11 09:46:47] [Rank 0] step:5681/10000 train_time:247296ms step_avg:43.53ms +[2025-09-11 09:46:47] [Rank 0] step:5681/10000 train_time:247296ms step_avg:43.53ms +[2025-09-11 09:46:47] [Rank 0] step:5701/10000 train_time:247989ms step_avg:43.50ms +[2025-09-11 09:46:47] [Rank 0] step:5701/10000 train_time:247989ms step_avg:43.50ms +[2025-09-11 09:46:48] [Rank 0] step:5721/10000 train_time:248677ms step_avg:43.47ms +[2025-09-11 09:46:48] [Rank 0] step:5721/10000 train_time:248677ms step_avg:43.47ms +[2025-09-11 09:46:49] [Rank 0] step:5741/10000 train_time:249369ms step_avg:43.44ms +[2025-09-11 09:46:49] [Rank 0] step:5741/10000 train_time:249369ms step_avg:43.44ms +[2025-09-11 09:46:49] [Rank 0] step:5761/10000 train_time:250059ms step_avg:43.41ms +[2025-09-11 09:46:49] [Rank 0] step:5761/10000 train_time:250059ms step_avg:43.41ms +[2025-09-11 09:46:50] [Rank 0] step:5781/10000 train_time:250750ms step_avg:43.37ms +[2025-09-11 09:46:50] [Rank 0] step:5781/10000 train_time:250750ms step_avg:43.37ms +[2025-09-11 09:46:51] [Rank 0] step:5801/10000 train_time:251442ms step_avg:43.34ms +[2025-09-11 09:46:51] [Rank 0] step:5801/10000 train_time:251442ms step_avg:43.34ms +[2025-09-11 09:46:52] [Rank 0] step:5821/10000 train_time:252133ms step_avg:43.31ms +[2025-09-11 09:46:52] [Rank 0] step:5821/10000 train_time:252133ms step_avg:43.31ms +[2025-09-11 09:46:52] [Rank 0] step:5841/10000 train_time:252825ms step_avg:43.28ms +[2025-09-11 09:46:52] [Rank 0] step:5841/10000 train_time:252825ms step_avg:43.28ms +[2025-09-11 09:46:53] [Rank 0] step:5861/10000 train_time:253515ms step_avg:43.25ms +[2025-09-11 09:46:53] [Rank 0] step:5861/10000 train_time:253515ms step_avg:43.25ms +[2025-09-11 09:46:54] [Rank 0] step:5881/10000 train_time:254205ms step_avg:43.22ms +[2025-09-11 09:46:54] [Rank 0] step:5881/10000 train_time:254205ms step_avg:43.22ms +[2025-09-11 09:46:54] [Rank 0] step:5901/10000 train_time:254894ms step_avg:43.20ms +[2025-09-11 09:46:54] [Rank 0] step:5901/10000 train_time:254894ms step_avg:43.20ms +[2025-09-11 09:46:55] [Rank 0] step:5921/10000 train_time:255587ms step_avg:43.17ms +[2025-09-11 09:46:55] [Rank 0] step:5921/10000 train_time:255587ms step_avg:43.17ms +[2025-09-11 09:46:56] [Rank 0] step:5941/10000 train_time:256280ms step_avg:43.14ms +[2025-09-11 09:46:56] [Rank 0] step:5941/10000 train_time:256280ms step_avg:43.14ms +[2025-09-11 09:46:56] [Rank 0] step:5961/10000 train_time:256971ms step_avg:43.11ms +[2025-09-11 09:46:56] [Rank 0] step:5961/10000 train_time:256971ms step_avg:43.11ms +[2025-09-11 09:46:57] [Rank 0] step:5981/10000 train_time:257662ms step_avg:43.08ms +[2025-09-11 09:46:57] [Rank 0] step:5981/10000 train_time:257662ms step_avg:43.08ms +[2025-09-11 09:46:58] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:46:58] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:47:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:47:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:47:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.9953 total_sharp:4.3989e-05 L1_sharp:1.0744e-01 L2_sharp:1.4955e-01 L3_sharp:1.9307e-01 L4_sharp:2.5849e-01 L5_sharp:2.4893e-01 L6_sharp:2.3822e-01 L7_sharp:2.4747e-01 L8_sharp:2.7415e-01 L9_sharp:3.5476e-01 L10_sharp:4.3691e-01 L11_sharp:9.1126e-01 L12_sharp:1.5151e+00 total_fnorm:1.7400e+02 total_l1_linf:3.6454e+05 total_spectral:8.7000e+01 L1_fnorm:4.6387e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6631e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.6387e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6387e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.1536e-02 L2_l1linf:1.2390e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2817e-02 L6_l1linf:1.3123e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3245e-02 L9_l1linf:1.3428e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.3672e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8574e-04 L2_spectral:7.9188e-04 L3_spectral:7.9786e-04 L4_spectral:7.9733e-04 L5_spectral:7.9922e-04 L6_spectral:7.9819e-04 L7_spectral:8.0453e-04 L8_spectral:7.9827e-04 L9_spectral:7.9265e-04 L10_spectral:7.9395e-04 L11_spectral:7.8772e-04 L12_spectral:7.8301e-04 train_time:258337ms step_avg:43.06ms +[2025-09-11 09:47:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.9953 total_sharp:4.3989e-05 L1_sharp:1.0744e-01 L2_sharp:1.4955e-01 L3_sharp:1.9307e-01 L4_sharp:2.5849e-01 L5_sharp:2.4893e-01 L6_sharp:2.3822e-01 L7_sharp:2.4747e-01 L8_sharp:2.7415e-01 L9_sharp:3.5476e-01 L10_sharp:4.3691e-01 L11_sharp:9.1126e-01 L12_sharp:1.5151e+00 total_fnorm:1.7400e+02 total_l1_linf:3.6454e+05 total_spectral:8.7000e+01 L1_fnorm:4.6387e-02 L2_fnorm:4.7119e-02 L3_fnorm:4.6875e-02 L4_fnorm:4.6631e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6631e-02 L8_fnorm:4.6143e-02 L9_fnorm:4.6387e-02 L10_fnorm:4.6631e-02 L11_fnorm:4.6387e-02 L12_fnorm:4.5898e-02 L1_l1linf:1.1536e-02 L2_l1linf:1.2390e-02 L3_l1linf:1.2695e-02 L4_l1linf:1.2573e-02 L5_l1linf:1.2817e-02 L6_l1linf:1.3123e-02 L7_l1linf:1.3245e-02 L8_l1linf:1.3245e-02 L9_l1linf:1.3428e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.3672e-02 L12_l1linf:1.4099e-02 L1_spectral:7.8574e-04 L2_spectral:7.9188e-04 L3_spectral:7.9786e-04 L4_spectral:7.9733e-04 L5_spectral:7.9922e-04 L6_spectral:7.9819e-04 L7_spectral:8.0453e-04 L8_spectral:7.9827e-04 L9_spectral:7.9265e-04 L10_spectral:7.9395e-04 L11_spectral:7.8772e-04 L12_spectral:7.8301e-04 train_time:258337ms step_avg:43.06ms +[2025-09-11 09:47:11] [Rank 0] step:6001/10000 train_time:261159ms step_avg:43.52ms +[2025-09-11 09:47:11] [Rank 0] step:6001/10000 train_time:261159ms step_avg:43.52ms +[2025-09-11 09:47:12] [Rank 0] step:6021/10000 train_time:261869ms step_avg:43.49ms +[2025-09-11 09:47:12] [Rank 0] step:6021/10000 train_time:261869ms step_avg:43.49ms +[2025-09-11 09:47:13] [Rank 0] step:6041/10000 train_time:262563ms step_avg:43.46ms +[2025-09-11 09:47:13] [Rank 0] step:6041/10000 train_time:262563ms step_avg:43.46ms +[2025-09-11 09:47:14] [Rank 0] step:6061/10000 train_time:263257ms step_avg:43.43ms +[2025-09-11 09:47:14] [Rank 0] step:6061/10000 train_time:263257ms step_avg:43.43ms +[2025-09-11 09:47:14] [Rank 0] step:6081/10000 train_time:263951ms step_avg:43.41ms +[2025-09-11 09:47:14] [Rank 0] step:6081/10000 train_time:263951ms step_avg:43.41ms +[2025-09-11 09:47:15] [Rank 0] step:6101/10000 train_time:264643ms step_avg:43.38ms +[2025-09-11 09:47:15] [Rank 0] step:6101/10000 train_time:264643ms step_avg:43.38ms +[2025-09-11 09:47:16] [Rank 0] step:6121/10000 train_time:265338ms step_avg:43.35ms +[2025-09-11 09:47:16] [Rank 0] step:6121/10000 train_time:265338ms step_avg:43.35ms +[2025-09-11 09:47:16] [Rank 0] step:6141/10000 train_time:266030ms step_avg:43.32ms +[2025-09-11 09:47:16] [Rank 0] step:6141/10000 train_time:266030ms step_avg:43.32ms +[2025-09-11 09:47:17] [Rank 0] step:6161/10000 train_time:266722ms step_avg:43.29ms +[2025-09-11 09:47:17] [Rank 0] step:6161/10000 train_time:266722ms step_avg:43.29ms +[2025-09-11 09:47:18] [Rank 0] step:6181/10000 train_time:267413ms step_avg:43.26ms +[2025-09-11 09:47:18] [Rank 0] step:6181/10000 train_time:267413ms step_avg:43.26ms +[2025-09-11 09:47:18] [Rank 0] step:6201/10000 train_time:268107ms step_avg:43.24ms +[2025-09-11 09:47:18] [Rank 0] step:6201/10000 train_time:268107ms step_avg:43.24ms +[2025-09-11 09:47:19] [Rank 0] step:6221/10000 train_time:268801ms step_avg:43.21ms +[2025-09-11 09:47:19] [Rank 0] step:6221/10000 train_time:268801ms step_avg:43.21ms +[2025-09-11 09:47:20] [Rank 0] step:6241/10000 train_time:269495ms step_avg:43.18ms +[2025-09-11 09:47:20] [Rank 0] step:6241/10000 train_time:269495ms step_avg:43.18ms +[2025-09-11 09:47:20] [Rank 0] step:6261/10000 train_time:270186ms step_avg:43.15ms +[2025-09-11 09:47:20] [Rank 0] step:6261/10000 train_time:270186ms step_avg:43.15ms +[2025-09-11 09:47:21] [Rank 0] step:6281/10000 train_time:270880ms step_avg:43.13ms +[2025-09-11 09:47:21] [Rank 0] step:6281/10000 train_time:270880ms step_avg:43.13ms +[2025-09-11 09:47:22] [Rank 0] step:6301/10000 train_time:271571ms step_avg:43.10ms +[2025-09-11 09:47:22] [Rank 0] step:6301/10000 train_time:271571ms step_avg:43.10ms +[2025-09-11 09:47:23] [Rank 0] step:6321/10000 train_time:272267ms step_avg:43.07ms +[2025-09-11 09:47:23] [Rank 0] step:6321/10000 train_time:272267ms step_avg:43.07ms +[2025-09-11 09:47:23] [Rank 0] step:6341/10000 train_time:272960ms step_avg:43.05ms +[2025-09-11 09:47:23] [Rank 0] step:6341/10000 train_time:272960ms step_avg:43.05ms +[2025-09-11 09:47:24] [Rank 0] step:6361/10000 train_time:273653ms step_avg:43.02ms +[2025-09-11 09:47:24] [Rank 0] step:6361/10000 train_time:273653ms step_avg:43.02ms +[2025-09-11 09:47:25] [Rank 0] step:6381/10000 train_time:274346ms step_avg:42.99ms +[2025-09-11 09:47:25] [Rank 0] step:6381/10000 train_time:274346ms step_avg:42.99ms +[2025-09-11 09:47:25] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:47:25] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:47:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:47:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:47:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:47:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:47:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:47:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:47:36] [Rank 0] PRINT: step:6400/10000 val_loss:4.9713 total_sharp:5.0932e-05 L1_sharp:1.1114e-01 L2_sharp:1.5462e-01 L3_sharp:1.9076e-01 L4_sharp:2.1755e-01 L5_sharp:2.2117e-01 L6_sharp:2.3457e-01 L7_sharp:2.5976e-01 L8_sharp:3.0372e-01 L9_sharp:3.4457e-01 L10_sharp:4.3754e-01 L11_sharp:8.6507e-01 L12_sharp:1.2695e+00 total_fnorm:1.5500e+02 total_l1_linf:3.1949e+05 total_spectral:7.7500e+01 L1_fnorm:4.0039e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.0527e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0527e-02 L7_fnorm:4.0527e-02 L8_fnorm:4.0039e-02 L9_fnorm:4.0283e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0283e-02 L12_fnorm:4.0039e-02 L1_l1linf:9.2773e-03 L2_l1linf:1.0010e-02 L3_l1linf:1.0498e-02 L4_l1linf:1.0742e-02 L5_l1linf:1.1047e-02 L6_l1linf:1.1230e-02 L7_l1linf:1.1230e-02 L8_l1linf:1.1292e-02 L9_l1linf:1.1597e-02 L10_l1linf:1.1536e-02 L11_l1linf:1.1658e-02 L12_l1linf:1.1963e-02 L1_spectral:6.9353e-04 L2_spectral:7.0436e-04 L3_spectral:7.0387e-04 L4_spectral:7.0178e-04 L5_spectral:7.1017e-04 L6_spectral:7.0297e-04 L7_spectral:6.9741e-04 L8_spectral:6.9745e-04 L9_spectral:7.0223e-04 L10_spectral:7.0559e-04 L11_spectral:7.0256e-04 L12_spectral:6.8695e-04 train_time:275018ms step_avg:42.97ms +[2025-09-11 09:47:36] [Rank 0] PRINT: step:6400/10000 val_loss:4.9713 total_sharp:5.0932e-05 L1_sharp:1.1114e-01 L2_sharp:1.5462e-01 L3_sharp:1.9076e-01 L4_sharp:2.1755e-01 L5_sharp:2.2117e-01 L6_sharp:2.3457e-01 L7_sharp:2.5976e-01 L8_sharp:3.0372e-01 L9_sharp:3.4457e-01 L10_sharp:4.3754e-01 L11_sharp:8.6507e-01 L12_sharp:1.2695e+00 total_fnorm:1.5500e+02 total_l1_linf:3.1949e+05 total_spectral:7.7500e+01 L1_fnorm:4.0039e-02 L2_fnorm:4.0771e-02 L3_fnorm:4.0771e-02 L4_fnorm:4.0527e-02 L5_fnorm:4.0771e-02 L6_fnorm:4.0527e-02 L7_fnorm:4.0527e-02 L8_fnorm:4.0039e-02 L9_fnorm:4.0283e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0283e-02 L12_fnorm:4.0039e-02 L1_l1linf:9.2773e-03 L2_l1linf:1.0010e-02 L3_l1linf:1.0498e-02 L4_l1linf:1.0742e-02 L5_l1linf:1.1047e-02 L6_l1linf:1.1230e-02 L7_l1linf:1.1230e-02 L8_l1linf:1.1292e-02 L9_l1linf:1.1597e-02 L10_l1linf:1.1536e-02 L11_l1linf:1.1658e-02 L12_l1linf:1.1963e-02 L1_spectral:6.9353e-04 L2_spectral:7.0436e-04 L3_spectral:7.0387e-04 L4_spectral:7.0178e-04 L5_spectral:7.1017e-04 L6_spectral:7.0297e-04 L7_spectral:6.9741e-04 L8_spectral:6.9745e-04 L9_spectral:7.0223e-04 L10_spectral:7.0559e-04 L11_spectral:7.0256e-04 L12_spectral:6.8695e-04 train_time:275018ms step_avg:42.97ms +[2025-09-11 09:47:38] [Rank 0] step:6401/10000 train_time:276837ms step_avg:43.25ms +[2025-09-11 09:47:38] [Rank 0] step:6401/10000 train_time:276837ms step_avg:43.25ms +[2025-09-11 09:47:39] [Rank 0] step:6421/10000 train_time:277556ms step_avg:43.23ms +[2025-09-11 09:47:39] [Rank 0] step:6421/10000 train_time:277556ms step_avg:43.23ms +[2025-09-11 09:47:39] [Rank 0] step:6441/10000 train_time:278249ms step_avg:43.20ms +[2025-09-11 09:47:39] [Rank 0] step:6441/10000 train_time:278249ms step_avg:43.20ms +[2025-09-11 09:47:40] [Rank 0] step:6461/10000 train_time:278943ms step_avg:43.17ms +[2025-09-11 09:47:40] [Rank 0] step:6461/10000 train_time:278943ms step_avg:43.17ms +[2025-09-11 09:47:41] [Rank 0] step:6481/10000 train_time:279638ms step_avg:43.15ms +[2025-09-11 09:47:41] [Rank 0] step:6481/10000 train_time:279638ms step_avg:43.15ms +[2025-09-11 09:47:41] [Rank 0] step:6501/10000 train_time:280335ms step_avg:43.12ms +[2025-09-11 09:47:41] [Rank 0] step:6501/10000 train_time:280335ms step_avg:43.12ms +[2025-09-11 09:47:42] [Rank 0] step:6521/10000 train_time:281029ms step_avg:43.10ms +[2025-09-11 09:47:42] [Rank 0] step:6521/10000 train_time:281029ms step_avg:43.10ms +[2025-09-11 09:47:43] [Rank 0] step:6541/10000 train_time:281721ms step_avg:43.07ms +[2025-09-11 09:47:43] [Rank 0] step:6541/10000 train_time:281721ms step_avg:43.07ms +[2025-09-11 09:47:44] [Rank 0] step:6561/10000 train_time:282684ms step_avg:43.09ms +[2025-09-11 09:47:44] [Rank 0] step:6561/10000 train_time:282684ms step_avg:43.09ms +[2025-09-11 09:47:44] [Rank 0] step:6581/10000 train_time:283377ms step_avg:43.06ms +[2025-09-11 09:47:44] [Rank 0] step:6581/10000 train_time:283377ms step_avg:43.06ms +[2025-09-11 09:47:45] [Rank 0] step:6601/10000 train_time:284069ms step_avg:43.03ms +[2025-09-11 09:47:45] [Rank 0] step:6601/10000 train_time:284069ms step_avg:43.03ms +[2025-09-11 09:47:46] [Rank 0] step:6621/10000 train_time:284989ms step_avg:43.04ms +[2025-09-11 09:47:46] [Rank 0] step:6621/10000 train_time:284989ms step_avg:43.04ms +[2025-09-11 09:47:47] [Rank 0] step:6641/10000 train_time:285683ms step_avg:43.02ms +[2025-09-11 09:47:47] [Rank 0] step:6641/10000 train_time:285683ms step_avg:43.02ms +[2025-09-11 09:47:47] [Rank 0] step:6661/10000 train_time:286377ms step_avg:42.99ms +[2025-09-11 09:47:47] [Rank 0] step:6661/10000 train_time:286377ms step_avg:42.99ms +[2025-09-11 09:47:48] [Rank 0] step:6681/10000 train_time:287076ms step_avg:42.97ms +[2025-09-11 09:47:48] [Rank 0] step:6681/10000 train_time:287076ms step_avg:42.97ms +[2025-09-11 09:47:49] [Rank 0] step:6701/10000 train_time:287776ms step_avg:42.95ms +[2025-09-11 09:47:49] [Rank 0] step:6701/10000 train_time:287776ms step_avg:42.95ms +[2025-09-11 09:47:50] [Rank 0] step:6721/10000 train_time:288480ms step_avg:42.92ms +[2025-09-11 09:47:50] [Rank 0] step:6721/10000 train_time:288480ms step_avg:42.92ms +[2025-09-11 09:47:50] [Rank 0] step:6741/10000 train_time:289180ms step_avg:42.90ms +[2025-09-11 09:47:50] [Rank 0] step:6741/10000 train_time:289180ms step_avg:42.90ms +[2025-09-11 09:47:51] [Rank 0] step:6761/10000 train_time:289879ms step_avg:42.88ms +[2025-09-11 09:47:51] [Rank 0] step:6761/10000 train_time:289879ms step_avg:42.88ms +[2025-09-11 09:47:52] [Rank 0] step:6781/10000 train_time:290579ms step_avg:42.85ms +[2025-09-11 09:47:52] [Rank 0] step:6781/10000 train_time:290579ms step_avg:42.85ms +[2025-09-11 09:47:52] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:47:52] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:47:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:48:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:48:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:48:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:48:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:03] [Rank 0] PRINT: step:6800/10000 val_loss:4.9187 total_sharp:3.7191e-05 L1_sharp:1.0444e-01 L2_sharp:1.4986e-01 L3_sharp:1.6377e-01 L4_sharp:2.2885e-01 L5_sharp:2.4963e-01 L6_sharp:2.4587e-01 L7_sharp:2.2749e-01 L8_sharp:2.2317e-01 L9_sharp:2.8824e-01 L10_sharp:3.7202e-01 L11_sharp:4.0912e-01 L12_sharp:3.9661e-01 total_fnorm:1.5300e+02 total_l1_linf:3.1949e+05 total_spectral:7.6500e+01 L1_fnorm:3.3691e-02 L2_fnorm:3.3936e-02 L3_fnorm:3.3936e-02 L4_fnorm:3.3936e-02 L5_fnorm:3.4180e-02 L6_fnorm:3.3936e-02 L7_fnorm:3.3936e-02 L8_fnorm:3.3447e-02 L9_fnorm:3.3691e-02 L10_fnorm:3.3936e-02 L11_fnorm:3.3447e-02 L12_fnorm:3.3447e-02 L1_l1linf:7.2937e-03 L2_l1linf:7.7209e-03 L3_l1linf:8.1787e-03 L4_l1linf:8.2397e-03 L5_l1linf:8.3618e-03 L6_l1linf:8.4839e-03 L7_l1linf:8.9111e-03 L8_l1linf:9.0942e-03 L9_l1linf:9.0332e-03 L10_l1linf:9.0332e-03 L11_l1linf:9.2163e-03 L12_l1linf:9.0332e-03 L1_spectral:5.9891e-04 L2_spectral:6.0489e-04 L3_spectral:6.0536e-04 L4_spectral:6.0553e-04 L5_spectral:6.0773e-04 L6_spectral:6.0677e-04 L7_spectral:6.1021e-04 L8_spectral:6.0094e-04 L9_spectral:6.0591e-04 L10_spectral:6.1153e-04 L11_spectral:6.0458e-04 L12_spectral:5.9526e-04 train_time:291258ms step_avg:42.83ms +[2025-09-11 09:48:03] [Rank 0] PRINT: step:6800/10000 val_loss:4.9187 total_sharp:3.7191e-05 L1_sharp:1.0444e-01 L2_sharp:1.4986e-01 L3_sharp:1.6377e-01 L4_sharp:2.2885e-01 L5_sharp:2.4963e-01 L6_sharp:2.4587e-01 L7_sharp:2.2749e-01 L8_sharp:2.2317e-01 L9_sharp:2.8824e-01 L10_sharp:3.7202e-01 L11_sharp:4.0912e-01 L12_sharp:3.9661e-01 total_fnorm:1.5300e+02 total_l1_linf:3.1949e+05 total_spectral:7.6500e+01 L1_fnorm:3.3691e-02 L2_fnorm:3.3936e-02 L3_fnorm:3.3936e-02 L4_fnorm:3.3936e-02 L5_fnorm:3.4180e-02 L6_fnorm:3.3936e-02 L7_fnorm:3.3936e-02 L8_fnorm:3.3447e-02 L9_fnorm:3.3691e-02 L10_fnorm:3.3936e-02 L11_fnorm:3.3447e-02 L12_fnorm:3.3447e-02 L1_l1linf:7.2937e-03 L2_l1linf:7.7209e-03 L3_l1linf:8.1787e-03 L4_l1linf:8.2397e-03 L5_l1linf:8.3618e-03 L6_l1linf:8.4839e-03 L7_l1linf:8.9111e-03 L8_l1linf:9.0942e-03 L9_l1linf:9.0332e-03 L10_l1linf:9.0332e-03 L11_l1linf:9.2163e-03 L12_l1linf:9.0332e-03 L1_spectral:5.9891e-04 L2_spectral:6.0489e-04 L3_spectral:6.0536e-04 L4_spectral:6.0553e-04 L5_spectral:6.0773e-04 L6_spectral:6.0677e-04 L7_spectral:6.1021e-04 L8_spectral:6.0094e-04 L9_spectral:6.0591e-04 L10_spectral:6.1153e-04 L11_spectral:6.0458e-04 L12_spectral:5.9526e-04 train_time:291258ms step_avg:42.83ms +[2025-09-11 09:48:05] [Rank 0] step:6801/10000 train_time:292978ms step_avg:43.08ms +[2025-09-11 09:48:05] [Rank 0] step:6801/10000 train_time:292978ms step_avg:43.08ms +[2025-09-11 09:48:06] [Rank 0] step:6821/10000 train_time:293682ms step_avg:43.06ms +[2025-09-11 09:48:06] [Rank 0] step:6821/10000 train_time:293682ms step_avg:43.06ms +[2025-09-11 09:48:06] [Rank 0] step:6841/10000 train_time:294385ms step_avg:43.03ms +[2025-09-11 09:48:06] [Rank 0] step:6841/10000 train_time:294385ms step_avg:43.03ms +[2025-09-11 09:48:07] [Rank 0] step:6861/10000 train_time:295085ms step_avg:43.01ms +[2025-09-11 09:48:07] [Rank 0] step:6861/10000 train_time:295085ms step_avg:43.01ms +[2025-09-11 09:48:08] [Rank 0] step:6881/10000 train_time:295788ms step_avg:42.99ms +[2025-09-11 09:48:08] [Rank 0] step:6881/10000 train_time:295788ms step_avg:42.99ms +[2025-09-11 09:48:08] [Rank 0] step:6901/10000 train_time:296488ms step_avg:42.96ms +[2025-09-11 09:48:08] [Rank 0] step:6901/10000 train_time:296488ms step_avg:42.96ms +[2025-09-11 09:48:09] [Rank 0] step:6921/10000 train_time:297189ms step_avg:42.94ms +[2025-09-11 09:48:09] [Rank 0] step:6921/10000 train_time:297189ms step_avg:42.94ms +[2025-09-11 09:48:10] [Rank 0] step:6941/10000 train_time:297889ms step_avg:42.92ms +[2025-09-11 09:48:10] [Rank 0] step:6941/10000 train_time:297889ms step_avg:42.92ms +[2025-09-11 09:48:10] [Rank 0] step:6961/10000 train_time:298590ms step_avg:42.89ms +[2025-09-11 09:48:10] [Rank 0] step:6961/10000 train_time:298590ms step_avg:42.89ms +[2025-09-11 09:48:11] [Rank 0] step:6981/10000 train_time:299294ms step_avg:42.87ms +[2025-09-11 09:48:11] [Rank 0] step:6981/10000 train_time:299294ms step_avg:42.87ms +[2025-09-11 09:48:12] [Rank 0] step:7001/10000 train_time:299994ms step_avg:42.85ms +[2025-09-11 09:48:12] [Rank 0] step:7001/10000 train_time:299994ms step_avg:42.85ms +[2025-09-11 09:48:13] [Rank 0] step:7021/10000 train_time:300694ms step_avg:42.83ms +[2025-09-11 09:48:13] [Rank 0] step:7021/10000 train_time:300694ms step_avg:42.83ms +[2025-09-11 09:48:13] [Rank 0] step:7041/10000 train_time:301394ms step_avg:42.81ms +[2025-09-11 09:48:13] [Rank 0] step:7041/10000 train_time:301394ms step_avg:42.81ms +[2025-09-11 09:48:14] [Rank 0] step:7061/10000 train_time:302095ms step_avg:42.78ms +[2025-09-11 09:48:14] [Rank 0] step:7061/10000 train_time:302095ms step_avg:42.78ms +[2025-09-11 09:48:15] [Rank 0] step:7081/10000 train_time:302796ms step_avg:42.76ms +[2025-09-11 09:48:15] [Rank 0] step:7081/10000 train_time:302796ms step_avg:42.76ms +[2025-09-11 09:48:15] [Rank 0] step:7101/10000 train_time:303497ms step_avg:42.74ms +[2025-09-11 09:48:15] [Rank 0] step:7101/10000 train_time:303497ms step_avg:42.74ms +[2025-09-11 09:48:16] [Rank 0] step:7121/10000 train_time:304199ms step_avg:42.72ms +[2025-09-11 09:48:16] [Rank 0] step:7121/10000 train_time:304199ms step_avg:42.72ms +[2025-09-11 09:48:17] [Rank 0] step:7141/10000 train_time:304901ms step_avg:42.70ms +[2025-09-11 09:48:17] [Rank 0] step:7141/10000 train_time:304901ms step_avg:42.70ms +[2025-09-11 09:48:17] [Rank 0] step:7161/10000 train_time:305603ms step_avg:42.68ms +[2025-09-11 09:48:17] [Rank 0] step:7161/10000 train_time:305603ms step_avg:42.68ms +[2025-09-11 09:48:18] [Rank 0] step:7181/10000 train_time:306303ms step_avg:42.65ms +[2025-09-11 09:48:18] [Rank 0] step:7181/10000 train_time:306303ms step_avg:42.65ms +[2025-09-11 09:48:19] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:48:19] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:48:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:48:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:48:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:48:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:30] [Rank 0] PRINT: step:7200/10000 val_loss:4.8915 total_sharp:3.5844e-05 L1_sharp:9.6074e-02 L2_sharp:1.4571e-01 L3_sharp:1.5206e-01 L4_sharp:2.3918e-01 L5_sharp:2.2209e-01 L6_sharp:2.4337e-01 L7_sharp:2.5323e-01 L8_sharp:2.5969e-01 L9_sharp:2.7451e-01 L10_sharp:3.7944e-01 L11_sharp:4.4992e-01 L12_sharp:5.3508e-01 total_fnorm:1.3600e+02 total_l1_linf:2.7443e+05 total_spectral:6.8000e+01 L1_fnorm:2.7588e-02 L2_fnorm:2.8076e-02 L3_fnorm:2.8320e-02 L4_fnorm:2.8198e-02 L5_fnorm:2.8320e-02 L6_fnorm:2.8198e-02 L7_fnorm:2.8076e-02 L8_fnorm:2.7954e-02 L9_fnorm:2.7954e-02 L10_fnorm:2.8076e-02 L11_fnorm:2.7954e-02 L12_fnorm:2.7832e-02 L1_l1linf:5.6458e-03 L2_l1linf:5.9509e-03 L3_l1linf:6.5613e-03 L4_l1linf:6.5308e-03 L5_l1linf:6.6223e-03 L6_l1linf:6.9580e-03 L7_l1linf:6.8665e-03 L8_l1linf:6.9275e-03 L9_l1linf:7.1716e-03 L10_l1linf:7.3853e-03 L11_l1linf:7.3547e-03 L12_l1linf:7.3242e-03 L1_spectral:5.1186e-04 L2_spectral:5.1415e-04 L3_spectral:5.1286e-04 L4_spectral:5.1022e-04 L5_spectral:5.1409e-04 L6_spectral:5.1282e-04 L7_spectral:5.0985e-04 L8_spectral:5.0488e-04 L9_spectral:5.0658e-04 L10_spectral:5.1034e-04 L11_spectral:5.1312e-04 L12_spectral:5.0525e-04 train_time:306986ms step_avg:42.64ms +[2025-09-11 09:48:30] [Rank 0] PRINT: step:7200/10000 val_loss:4.8915 total_sharp:3.5844e-05 L1_sharp:9.6074e-02 L2_sharp:1.4571e-01 L3_sharp:1.5206e-01 L4_sharp:2.3918e-01 L5_sharp:2.2209e-01 L6_sharp:2.4337e-01 L7_sharp:2.5323e-01 L8_sharp:2.5969e-01 L9_sharp:2.7451e-01 L10_sharp:3.7944e-01 L11_sharp:4.4992e-01 L12_sharp:5.3508e-01 total_fnorm:1.3600e+02 total_l1_linf:2.7443e+05 total_spectral:6.8000e+01 L1_fnorm:2.7588e-02 L2_fnorm:2.8076e-02 L3_fnorm:2.8320e-02 L4_fnorm:2.8198e-02 L5_fnorm:2.8320e-02 L6_fnorm:2.8198e-02 L7_fnorm:2.8076e-02 L8_fnorm:2.7954e-02 L9_fnorm:2.7954e-02 L10_fnorm:2.8076e-02 L11_fnorm:2.7954e-02 L12_fnorm:2.7832e-02 L1_l1linf:5.6458e-03 L2_l1linf:5.9509e-03 L3_l1linf:6.5613e-03 L4_l1linf:6.5308e-03 L5_l1linf:6.6223e-03 L6_l1linf:6.9580e-03 L7_l1linf:6.8665e-03 L8_l1linf:6.9275e-03 L9_l1linf:7.1716e-03 L10_l1linf:7.3853e-03 L11_l1linf:7.3547e-03 L12_l1linf:7.3242e-03 L1_spectral:5.1186e-04 L2_spectral:5.1415e-04 L3_spectral:5.1286e-04 L4_spectral:5.1022e-04 L5_spectral:5.1409e-04 L6_spectral:5.1282e-04 L7_spectral:5.0985e-04 L8_spectral:5.0488e-04 L9_spectral:5.0658e-04 L10_spectral:5.1034e-04 L11_spectral:5.1312e-04 L12_spectral:5.0525e-04 train_time:306986ms step_avg:42.64ms +[2025-09-11 09:48:31] [Rank 0] step:7201/10000 train_time:308707ms step_avg:42.87ms +[2025-09-11 09:48:31] [Rank 0] step:7201/10000 train_time:308707ms step_avg:42.87ms +[2025-09-11 09:48:32] [Rank 0] step:7221/10000 train_time:309422ms step_avg:42.85ms +[2025-09-11 09:48:32] [Rank 0] step:7221/10000 train_time:309422ms step_avg:42.85ms +[2025-09-11 09:48:33] [Rank 0] step:7241/10000 train_time:310124ms step_avg:42.83ms +[2025-09-11 09:48:33] [Rank 0] step:7241/10000 train_time:310124ms step_avg:42.83ms +[2025-09-11 09:48:33] [Rank 0] step:7261/10000 train_time:310829ms step_avg:42.81ms +[2025-09-11 09:48:33] [Rank 0] step:7261/10000 train_time:310829ms step_avg:42.81ms +[2025-09-11 09:48:34] [Rank 0] step:7281/10000 train_time:311536ms step_avg:42.79ms +[2025-09-11 09:48:34] [Rank 0] step:7281/10000 train_time:311536ms step_avg:42.79ms +[2025-09-11 09:48:35] [Rank 0] step:7301/10000 train_time:312238ms step_avg:42.77ms +[2025-09-11 09:48:35] [Rank 0] step:7301/10000 train_time:312238ms step_avg:42.77ms +[2025-09-11 09:48:36] [Rank 0] step:7321/10000 train_time:312939ms step_avg:42.75ms +[2025-09-11 09:48:36] [Rank 0] step:7321/10000 train_time:312939ms step_avg:42.75ms +[2025-09-11 09:48:36] [Rank 0] step:7341/10000 train_time:313641ms step_avg:42.72ms +[2025-09-11 09:48:36] [Rank 0] step:7341/10000 train_time:313641ms step_avg:42.72ms +[2025-09-11 09:48:37] [Rank 0] step:7361/10000 train_time:314342ms step_avg:42.70ms +[2025-09-11 09:48:37] [Rank 0] step:7361/10000 train_time:314342ms step_avg:42.70ms +[2025-09-11 09:48:38] [Rank 0] step:7381/10000 train_time:315045ms step_avg:42.68ms +[2025-09-11 09:48:38] [Rank 0] step:7381/10000 train_time:315045ms step_avg:42.68ms +[2025-09-11 09:48:38] [Rank 0] step:7401/10000 train_time:315745ms step_avg:42.66ms +[2025-09-11 09:48:38] [Rank 0] step:7401/10000 train_time:315745ms step_avg:42.66ms +[2025-09-11 09:48:39] [Rank 0] step:7421/10000 train_time:316447ms step_avg:42.64ms +[2025-09-11 09:48:39] [Rank 0] step:7421/10000 train_time:316447ms step_avg:42.64ms +[2025-09-11 09:48:40] [Rank 0] step:7441/10000 train_time:317149ms step_avg:42.62ms +[2025-09-11 09:48:40] [Rank 0] step:7441/10000 train_time:317149ms step_avg:42.62ms +[2025-09-11 09:48:40] [Rank 0] step:7461/10000 train_time:317852ms step_avg:42.60ms +[2025-09-11 09:48:40] [Rank 0] step:7461/10000 train_time:317852ms step_avg:42.60ms +[2025-09-11 09:48:41] [Rank 0] step:7481/10000 train_time:318556ms step_avg:42.58ms +[2025-09-11 09:48:41] [Rank 0] step:7481/10000 train_time:318556ms step_avg:42.58ms +[2025-09-11 09:48:42] [Rank 0] step:7501/10000 train_time:319258ms step_avg:42.56ms +[2025-09-11 09:48:42] [Rank 0] step:7501/10000 train_time:319258ms step_avg:42.56ms +[2025-09-11 09:48:43] [Rank 0] step:7521/10000 train_time:319961ms step_avg:42.54ms +[2025-09-11 09:48:43] [Rank 0] step:7521/10000 train_time:319961ms step_avg:42.54ms +[2025-09-11 09:48:43] [Rank 0] step:7541/10000 train_time:320661ms step_avg:42.52ms +[2025-09-11 09:48:43] [Rank 0] step:7541/10000 train_time:320661ms step_avg:42.52ms +[2025-09-11 09:48:44] [Rank 0] step:7561/10000 train_time:321365ms step_avg:42.50ms +[2025-09-11 09:48:44] [Rank 0] step:7561/10000 train_time:321365ms step_avg:42.50ms +[2025-09-11 09:48:45] [Rank 0] step:7581/10000 train_time:322070ms step_avg:42.48ms +[2025-09-11 09:48:45] [Rank 0] step:7581/10000 train_time:322070ms step_avg:42.48ms +[2025-09-11 09:48:45] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:48:45] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:48:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:48:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:48:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:48:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:48:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:48:56] [Rank 0] PRINT: step:7600/10000 val_loss:4.8654 total_sharp:3.8762e-05 L1_sharp:8.3536e-02 L2_sharp:1.1517e-01 L3_sharp:1.5044e-01 L4_sharp:2.0983e-01 L5_sharp:2.0298e-01 L6_sharp:2.2927e-01 L7_sharp:2.4089e-01 L8_sharp:2.3262e-01 L9_sharp:2.7474e-01 L10_sharp:3.1759e-01 L11_sharp:3.5575e-01 L12_sharp:4.5302e-01 total_fnorm:1.1150e+02 total_l1_linf:2.1606e+05 total_spectral:5.5750e+01 L1_fnorm:2.2217e-02 L2_fnorm:2.2705e-02 L3_fnorm:2.2827e-02 L4_fnorm:2.2827e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.2949e-02 L7_fnorm:2.2827e-02 L8_fnorm:2.2583e-02 L9_fnorm:2.2827e-02 L10_fnorm:2.2827e-02 L11_fnorm:2.2827e-02 L12_fnorm:2.2705e-02 L1_l1linf:4.1504e-03 L2_l1linf:4.5166e-03 L3_l1linf:4.9438e-03 L4_l1linf:4.9133e-03 L5_l1linf:5.0964e-03 L6_l1linf:5.2490e-03 L7_l1linf:5.5542e-03 L8_l1linf:5.5847e-03 L9_l1linf:5.7068e-03 L10_l1linf:5.8289e-03 L11_l1linf:5.9204e-03 L12_l1linf:5.8594e-03 L1_spectral:4.2127e-04 L2_spectral:4.2047e-04 L3_spectral:4.2250e-04 L4_spectral:4.2173e-04 L5_spectral:4.2784e-04 L6_spectral:4.1878e-04 L7_spectral:4.2000e-04 L8_spectral:4.1398e-04 L9_spectral:4.2258e-04 L10_spectral:4.1836e-04 L11_spectral:4.1567e-04 L12_spectral:4.1150e-04 train_time:322753ms step_avg:42.47ms +[2025-09-11 09:48:56] [Rank 0] PRINT: step:7600/10000 val_loss:4.8654 total_sharp:3.8762e-05 L1_sharp:8.3536e-02 L2_sharp:1.1517e-01 L3_sharp:1.5044e-01 L4_sharp:2.0983e-01 L5_sharp:2.0298e-01 L6_sharp:2.2927e-01 L7_sharp:2.4089e-01 L8_sharp:2.3262e-01 L9_sharp:2.7474e-01 L10_sharp:3.1759e-01 L11_sharp:3.5575e-01 L12_sharp:4.5302e-01 total_fnorm:1.1150e+02 total_l1_linf:2.1606e+05 total_spectral:5.5750e+01 L1_fnorm:2.2217e-02 L2_fnorm:2.2705e-02 L3_fnorm:2.2827e-02 L4_fnorm:2.2827e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.2949e-02 L7_fnorm:2.2827e-02 L8_fnorm:2.2583e-02 L9_fnorm:2.2827e-02 L10_fnorm:2.2827e-02 L11_fnorm:2.2827e-02 L12_fnorm:2.2705e-02 L1_l1linf:4.1504e-03 L2_l1linf:4.5166e-03 L3_l1linf:4.9438e-03 L4_l1linf:4.9133e-03 L5_l1linf:5.0964e-03 L6_l1linf:5.2490e-03 L7_l1linf:5.5542e-03 L8_l1linf:5.5847e-03 L9_l1linf:5.7068e-03 L10_l1linf:5.8289e-03 L11_l1linf:5.9204e-03 L12_l1linf:5.8594e-03 L1_spectral:4.2127e-04 L2_spectral:4.2047e-04 L3_spectral:4.2250e-04 L4_spectral:4.2173e-04 L5_spectral:4.2784e-04 L6_spectral:4.1878e-04 L7_spectral:4.2000e-04 L8_spectral:4.1398e-04 L9_spectral:4.2258e-04 L10_spectral:4.1836e-04 L11_spectral:4.1567e-04 L12_spectral:4.1150e-04 train_time:322753ms step_avg:42.47ms +[2025-09-11 09:48:58] [Rank 0] step:7601/10000 train_time:324494ms step_avg:42.69ms +[2025-09-11 09:48:58] [Rank 0] step:7601/10000 train_time:324494ms step_avg:42.69ms +[2025-09-11 09:48:58] [Rank 0] step:7621/10000 train_time:325222ms step_avg:42.67ms +[2025-09-11 09:48:58] [Rank 0] step:7621/10000 train_time:325222ms step_avg:42.67ms +[2025-09-11 09:48:59] [Rank 0] step:7641/10000 train_time:325929ms step_avg:42.66ms +[2025-09-11 09:48:59] [Rank 0] step:7641/10000 train_time:325929ms step_avg:42.66ms +[2025-09-11 09:49:00] [Rank 0] step:7661/10000 train_time:326632ms step_avg:42.64ms +[2025-09-11 09:49:00] [Rank 0] step:7661/10000 train_time:326632ms step_avg:42.64ms +[2025-09-11 09:49:01] [Rank 0] step:7681/10000 train_time:327335ms step_avg:42.62ms +[2025-09-11 09:49:01] [Rank 0] step:7681/10000 train_time:327335ms step_avg:42.62ms +[2025-09-11 09:49:01] [Rank 0] step:7701/10000 train_time:328038ms step_avg:42.60ms +[2025-09-11 09:49:01] [Rank 0] step:7701/10000 train_time:328038ms step_avg:42.60ms +[2025-09-11 09:49:02] [Rank 0] step:7721/10000 train_time:328744ms step_avg:42.58ms +[2025-09-11 09:49:02] [Rank 0] step:7721/10000 train_time:328744ms step_avg:42.58ms +[2025-09-11 09:49:03] [Rank 0] step:7741/10000 train_time:329449ms step_avg:42.56ms +[2025-09-11 09:49:03] [Rank 0] step:7741/10000 train_time:329449ms step_avg:42.56ms +[2025-09-11 09:49:03] [Rank 0] step:7761/10000 train_time:330152ms step_avg:42.54ms +[2025-09-11 09:49:03] [Rank 0] step:7761/10000 train_time:330152ms step_avg:42.54ms +[2025-09-11 09:49:04] [Rank 0] step:7781/10000 train_time:330858ms step_avg:42.52ms +[2025-09-11 09:49:04] [Rank 0] step:7781/10000 train_time:330858ms step_avg:42.52ms +[2025-09-11 09:49:05] [Rank 0] step:7801/10000 train_time:331561ms step_avg:42.50ms +[2025-09-11 09:49:05] [Rank 0] step:7801/10000 train_time:331561ms step_avg:42.50ms +[2025-09-11 09:49:06] [Rank 0] step:7821/10000 train_time:332264ms step_avg:42.48ms +[2025-09-11 09:49:06] [Rank 0] step:7821/10000 train_time:332264ms step_avg:42.48ms +[2025-09-11 09:49:06] [Rank 0] step:7841/10000 train_time:332967ms step_avg:42.46ms +[2025-09-11 09:49:06] [Rank 0] step:7841/10000 train_time:332967ms step_avg:42.46ms +[2025-09-11 09:49:07] [Rank 0] step:7861/10000 train_time:333672ms step_avg:42.45ms +[2025-09-11 09:49:07] [Rank 0] step:7861/10000 train_time:333672ms step_avg:42.45ms +[2025-09-11 09:49:08] [Rank 0] step:7881/10000 train_time:334375ms step_avg:42.43ms +[2025-09-11 09:49:08] [Rank 0] step:7881/10000 train_time:334375ms step_avg:42.43ms +[2025-09-11 09:49:08] [Rank 0] step:7901/10000 train_time:335080ms step_avg:42.41ms +[2025-09-11 09:49:08] [Rank 0] step:7901/10000 train_time:335080ms step_avg:42.41ms +[2025-09-11 09:49:09] [Rank 0] step:7921/10000 train_time:335782ms step_avg:42.39ms +[2025-09-11 09:49:09] [Rank 0] step:7921/10000 train_time:335782ms step_avg:42.39ms +[2025-09-11 09:49:10] [Rank 0] step:7941/10000 train_time:336487ms step_avg:42.37ms +[2025-09-11 09:49:10] [Rank 0] step:7941/10000 train_time:336487ms step_avg:42.37ms +[2025-09-11 09:49:10] [Rank 0] step:7961/10000 train_time:337188ms step_avg:42.35ms +[2025-09-11 09:49:10] [Rank 0] step:7961/10000 train_time:337188ms step_avg:42.35ms +[2025-09-11 09:49:11] [Rank 0] step:7981/10000 train_time:337893ms step_avg:42.34ms +[2025-09-11 09:49:11] [Rank 0] step:7981/10000 train_time:337893ms step_avg:42.34ms +[2025-09-11 09:49:12] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:49:12] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:49:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:49:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:49:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:49:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:49:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.8351 total_sharp:3.3219e-05 L1_sharp:7.1814e-02 L2_sharp:1.1673e-01 L3_sharp:1.4459e-01 L4_sharp:1.9666e-01 L5_sharp:2.1473e-01 L6_sharp:2.3035e-01 L7_sharp:2.3891e-01 L8_sharp:2.6414e-01 L9_sharp:3.1498e-01 L10_sharp:3.2394e-01 L11_sharp:4.7697e-01 L12_sharp:8.3423e-01 total_fnorm:9.9500e+01 total_l1_linf:1.8432e+05 total_spectral:4.9750e+01 L1_fnorm:1.7456e-02 L2_fnorm:1.7822e-02 L3_fnorm:1.8066e-02 L4_fnorm:1.8066e-02 L5_fnorm:1.8188e-02 L6_fnorm:1.8066e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.8066e-02 L11_fnorm:1.7944e-02 L12_fnorm:1.7822e-02 L1_l1linf:2.8992e-03 L2_l1linf:3.3264e-03 L3_l1linf:3.4637e-03 L4_l1linf:3.6011e-03 L5_l1linf:3.9368e-03 L6_l1linf:3.8605e-03 L7_l1linf:3.9978e-03 L8_l1linf:4.2114e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.2114e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.3709e-04 L2_spectral:3.4103e-04 L3_spectral:3.3640e-04 L4_spectral:3.3837e-04 L5_spectral:3.4120e-04 L6_spectral:3.3968e-04 L7_spectral:3.3531e-04 L8_spectral:3.3321e-04 L9_spectral:3.3605e-04 L10_spectral:3.3808e-04 L11_spectral:3.3818e-04 L12_spectral:3.3107e-04 train_time:338574ms step_avg:42.32ms +[2025-09-11 09:49:23] [Rank 0] PRINT: step:8000/10000 val_loss:4.8351 total_sharp:3.3219e-05 L1_sharp:7.1814e-02 L2_sharp:1.1673e-01 L3_sharp:1.4459e-01 L4_sharp:1.9666e-01 L5_sharp:2.1473e-01 L6_sharp:2.3035e-01 L7_sharp:2.3891e-01 L8_sharp:2.6414e-01 L9_sharp:3.1498e-01 L10_sharp:3.2394e-01 L11_sharp:4.7697e-01 L12_sharp:8.3423e-01 total_fnorm:9.9500e+01 total_l1_linf:1.8432e+05 total_spectral:4.9750e+01 L1_fnorm:1.7456e-02 L2_fnorm:1.7822e-02 L3_fnorm:1.8066e-02 L4_fnorm:1.8066e-02 L5_fnorm:1.8188e-02 L6_fnorm:1.8066e-02 L7_fnorm:1.7944e-02 L8_fnorm:1.7822e-02 L9_fnorm:1.7944e-02 L10_fnorm:1.8066e-02 L11_fnorm:1.7944e-02 L12_fnorm:1.7822e-02 L1_l1linf:2.8992e-03 L2_l1linf:3.3264e-03 L3_l1linf:3.4637e-03 L4_l1linf:3.6011e-03 L5_l1linf:3.9368e-03 L6_l1linf:3.8605e-03 L7_l1linf:3.9978e-03 L8_l1linf:4.2114e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.2114e-03 L11_l1linf:4.3640e-03 L12_l1linf:4.2419e-03 L1_spectral:3.3709e-04 L2_spectral:3.4103e-04 L3_spectral:3.3640e-04 L4_spectral:3.3837e-04 L5_spectral:3.4120e-04 L6_spectral:3.3968e-04 L7_spectral:3.3531e-04 L8_spectral:3.3321e-04 L9_spectral:3.3605e-04 L10_spectral:3.3808e-04 L11_spectral:3.3818e-04 L12_spectral:3.3107e-04 train_time:338574ms step_avg:42.32ms +[2025-09-11 09:49:24] [Rank 0] step:8001/10000 train_time:340308ms step_avg:42.53ms +[2025-09-11 09:49:24] [Rank 0] step:8001/10000 train_time:340308ms step_avg:42.53ms +[2025-09-11 09:49:25] [Rank 0] step:8021/10000 train_time:341027ms step_avg:42.52ms +[2025-09-11 09:49:25] [Rank 0] step:8021/10000 train_time:341027ms step_avg:42.52ms +[2025-09-11 09:49:26] [Rank 0] step:8041/10000 train_time:341732ms step_avg:42.50ms +[2025-09-11 09:49:26] [Rank 0] step:8041/10000 train_time:341732ms step_avg:42.50ms +[2025-09-11 09:49:27] [Rank 0] step:8061/10000 train_time:342436ms step_avg:42.48ms +[2025-09-11 09:49:27] [Rank 0] step:8061/10000 train_time:342436ms step_avg:42.48ms +[2025-09-11 09:49:27] [Rank 0] step:8081/10000 train_time:343137ms step_avg:42.46ms +[2025-09-11 09:49:27] [Rank 0] step:8081/10000 train_time:343137ms step_avg:42.46ms +[2025-09-11 09:49:28] [Rank 0] step:8101/10000 train_time:343840ms step_avg:42.44ms +[2025-09-11 09:49:28] [Rank 0] step:8101/10000 train_time:343840ms step_avg:42.44ms +[2025-09-11 09:49:29] [Rank 0] step:8121/10000 train_time:344548ms step_avg:42.43ms +[2025-09-11 09:49:29] [Rank 0] step:8121/10000 train_time:344548ms step_avg:42.43ms +[2025-09-11 09:49:30] [Rank 0] step:8141/10000 train_time:345976ms step_avg:42.50ms +[2025-09-11 09:49:30] [Rank 0] step:8141/10000 train_time:345976ms step_avg:42.50ms +[2025-09-11 09:49:31] [Rank 0] step:8161/10000 train_time:346685ms step_avg:42.48ms +[2025-09-11 09:49:31] [Rank 0] step:8161/10000 train_time:346685ms step_avg:42.48ms +[2025-09-11 09:49:31] [Rank 0] step:8181/10000 train_time:347400ms step_avg:42.46ms +[2025-09-11 09:49:31] [Rank 0] step:8181/10000 train_time:347400ms step_avg:42.46ms +[2025-09-11 09:49:32] [Rank 0] step:8201/10000 train_time:348111ms step_avg:42.45ms +[2025-09-11 09:49:32] [Rank 0] step:8201/10000 train_time:348111ms step_avg:42.45ms +[2025-09-11 09:49:33] [Rank 0] step:8221/10000 train_time:348822ms step_avg:42.43ms +[2025-09-11 09:49:33] [Rank 0] step:8221/10000 train_time:348822ms step_avg:42.43ms +[2025-09-11 09:49:34] [Rank 0] step:8241/10000 train_time:349540ms step_avg:42.41ms +[2025-09-11 09:49:34] [Rank 0] step:8241/10000 train_time:349540ms step_avg:42.41ms +[2025-09-11 09:49:34] [Rank 0] step:8261/10000 train_time:350249ms step_avg:42.40ms +[2025-09-11 09:49:34] [Rank 0] step:8261/10000 train_time:350249ms step_avg:42.40ms +[2025-09-11 09:49:35] [Rank 0] step:8281/10000 train_time:350957ms step_avg:42.38ms +[2025-09-11 09:49:35] [Rank 0] step:8281/10000 train_time:350957ms step_avg:42.38ms +[2025-09-11 09:49:36] [Rank 0] step:8301/10000 train_time:351666ms step_avg:42.36ms +[2025-09-11 09:49:36] [Rank 0] step:8301/10000 train_time:351666ms step_avg:42.36ms +[2025-09-11 09:49:36] [Rank 0] step:8321/10000 train_time:352376ms step_avg:42.35ms +[2025-09-11 09:49:36] [Rank 0] step:8321/10000 train_time:352376ms step_avg:42.35ms +[2025-09-11 09:49:37] [Rank 0] step:8341/10000 train_time:353092ms step_avg:42.33ms +[2025-09-11 09:49:37] [Rank 0] step:8341/10000 train_time:353092ms step_avg:42.33ms +[2025-09-11 09:49:38] [Rank 0] step:8361/10000 train_time:353797ms step_avg:42.32ms +[2025-09-11 09:49:38] [Rank 0] step:8361/10000 train_time:353797ms step_avg:42.32ms +[2025-09-11 09:49:39] [Rank 0] step:8381/10000 train_time:354509ms step_avg:42.30ms +[2025-09-11 09:49:39] [Rank 0] step:8381/10000 train_time:354509ms step_avg:42.30ms +[2025-09-11 09:49:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:49:39] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:49:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:49:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:49:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:49:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:49:50] [Rank 0] PRINT: step:8400/10000 val_loss:4.8135 total_sharp:3.1237e-05 L1_sharp:7.1126e-02 L2_sharp:9.7895e-02 L3_sharp:1.3759e-01 L4_sharp:1.8719e-01 L5_sharp:1.8960e-01 L6_sharp:2.0094e-01 L7_sharp:2.0667e-01 L8_sharp:2.1602e-01 L9_sharp:2.4610e-01 L10_sharp:2.8320e-01 L11_sharp:2.8630e-01 L12_sharp:2.7413e-01 total_fnorm:8.1500e+01 total_l1_linf:1.4029e+05 total_spectral:4.0750e+01 L1_fnorm:1.3184e-02 L2_fnorm:1.3489e-02 L3_fnorm:1.3611e-02 L4_fnorm:1.3611e-02 L5_fnorm:1.3794e-02 L6_fnorm:1.3794e-02 L7_fnorm:1.3733e-02 L8_fnorm:1.3550e-02 L9_fnorm:1.3672e-02 L10_fnorm:1.3733e-02 L11_fnorm:1.3611e-02 L12_fnorm:1.3611e-02 L1_l1linf:2.0752e-03 L2_l1linf:2.1057e-03 L3_l1linf:2.3651e-03 L4_l1linf:2.4872e-03 L5_l1linf:2.6245e-03 L6_l1linf:2.7466e-03 L7_l1linf:2.8229e-03 L8_l1linf:3.0212e-03 L9_l1linf:2.8992e-03 L10_l1linf:3.0670e-03 L11_l1linf:2.9755e-03 L12_l1linf:3.0670e-03 L1_spectral:2.6170e-04 L2_spectral:2.6285e-04 L3_spectral:2.6275e-04 L4_spectral:2.5977e-04 L5_spectral:2.6344e-04 L6_spectral:2.6151e-04 L7_spectral:2.5925e-04 L8_spectral:2.5879e-04 L9_spectral:2.5765e-04 L10_spectral:2.5707e-04 L11_spectral:2.6027e-04 L12_spectral:2.5659e-04 train_time:355202ms step_avg:42.29ms +[2025-09-11 09:49:50] [Rank 0] PRINT: step:8400/10000 val_loss:4.8135 total_sharp:3.1237e-05 L1_sharp:7.1126e-02 L2_sharp:9.7895e-02 L3_sharp:1.3759e-01 L4_sharp:1.8719e-01 L5_sharp:1.8960e-01 L6_sharp:2.0094e-01 L7_sharp:2.0667e-01 L8_sharp:2.1602e-01 L9_sharp:2.4610e-01 L10_sharp:2.8320e-01 L11_sharp:2.8630e-01 L12_sharp:2.7413e-01 total_fnorm:8.1500e+01 total_l1_linf:1.4029e+05 total_spectral:4.0750e+01 L1_fnorm:1.3184e-02 L2_fnorm:1.3489e-02 L3_fnorm:1.3611e-02 L4_fnorm:1.3611e-02 L5_fnorm:1.3794e-02 L6_fnorm:1.3794e-02 L7_fnorm:1.3733e-02 L8_fnorm:1.3550e-02 L9_fnorm:1.3672e-02 L10_fnorm:1.3733e-02 L11_fnorm:1.3611e-02 L12_fnorm:1.3611e-02 L1_l1linf:2.0752e-03 L2_l1linf:2.1057e-03 L3_l1linf:2.3651e-03 L4_l1linf:2.4872e-03 L5_l1linf:2.6245e-03 L6_l1linf:2.7466e-03 L7_l1linf:2.8229e-03 L8_l1linf:3.0212e-03 L9_l1linf:2.8992e-03 L10_l1linf:3.0670e-03 L11_l1linf:2.9755e-03 L12_l1linf:3.0670e-03 L1_spectral:2.6170e-04 L2_spectral:2.6285e-04 L3_spectral:2.6275e-04 L4_spectral:2.5977e-04 L5_spectral:2.6344e-04 L6_spectral:2.6151e-04 L7_spectral:2.5925e-04 L8_spectral:2.5879e-04 L9_spectral:2.5765e-04 L10_spectral:2.5707e-04 L11_spectral:2.6027e-04 L12_spectral:2.5659e-04 train_time:355202ms step_avg:42.29ms +[2025-09-11 09:49:52] [Rank 0] step:8401/10000 train_time:356960ms step_avg:42.49ms +[2025-09-11 09:49:52] [Rank 0] step:8401/10000 train_time:356960ms step_avg:42.49ms +[2025-09-11 09:49:53] [Rank 0] step:8421/10000 train_time:357864ms step_avg:42.50ms +[2025-09-11 09:49:53] [Rank 0] step:8421/10000 train_time:357864ms step_avg:42.50ms +[2025-09-11 09:49:53] [Rank 0] step:8441/10000 train_time:358575ms step_avg:42.48ms +[2025-09-11 09:49:53] [Rank 0] step:8441/10000 train_time:358575ms step_avg:42.48ms +[2025-09-11 09:49:54] [Rank 0] step:8461/10000 train_time:359286ms step_avg:42.46ms +[2025-09-11 09:49:54] [Rank 0] step:8461/10000 train_time:359286ms step_avg:42.46ms +[2025-09-11 09:49:55] [Rank 0] step:8481/10000 train_time:359997ms step_avg:42.45ms +[2025-09-11 09:49:55] [Rank 0] step:8481/10000 train_time:359997ms step_avg:42.45ms +[2025-09-11 09:49:56] [Rank 0] step:8501/10000 train_time:360706ms step_avg:42.43ms +[2025-09-11 09:49:56] [Rank 0] step:8501/10000 train_time:360706ms step_avg:42.43ms +[2025-09-11 09:49:56] [Rank 0] step:8521/10000 train_time:361415ms step_avg:42.41ms +[2025-09-11 09:49:56] [Rank 0] step:8521/10000 train_time:361415ms step_avg:42.41ms +[2025-09-11 09:49:57] [Rank 0] step:8541/10000 train_time:362126ms step_avg:42.40ms +[2025-09-11 09:49:57] [Rank 0] step:8541/10000 train_time:362126ms step_avg:42.40ms +[2025-09-11 09:49:58] [Rank 0] step:8561/10000 train_time:362841ms step_avg:42.38ms +[2025-09-11 09:49:58] [Rank 0] step:8561/10000 train_time:362841ms step_avg:42.38ms +[2025-09-11 09:49:58] [Rank 0] step:8581/10000 train_time:363554ms step_avg:42.37ms +[2025-09-11 09:49:58] [Rank 0] step:8581/10000 train_time:363554ms step_avg:42.37ms +[2025-09-11 09:49:59] [Rank 0] step:8601/10000 train_time:364266ms step_avg:42.35ms +[2025-09-11 09:49:59] [Rank 0] step:8601/10000 train_time:364266ms step_avg:42.35ms +[2025-09-11 09:50:00] [Rank 0] step:8621/10000 train_time:364975ms step_avg:42.34ms +[2025-09-11 09:50:00] [Rank 0] step:8621/10000 train_time:364975ms step_avg:42.34ms +[2025-09-11 09:50:01] [Rank 0] step:8641/10000 train_time:365685ms step_avg:42.32ms +[2025-09-11 09:50:01] [Rank 0] step:8641/10000 train_time:365685ms step_avg:42.32ms +[2025-09-11 09:50:01] [Rank 0] step:8661/10000 train_time:366453ms step_avg:42.31ms +[2025-09-11 09:50:01] [Rank 0] step:8661/10000 train_time:366453ms step_avg:42.31ms +[2025-09-11 09:50:02] [Rank 0] step:8681/10000 train_time:367221ms step_avg:42.30ms +[2025-09-11 09:50:02] [Rank 0] step:8681/10000 train_time:367221ms step_avg:42.30ms +[2025-09-11 09:50:03] [Rank 0] step:8701/10000 train_time:367931ms step_avg:42.29ms +[2025-09-11 09:50:03] [Rank 0] step:8701/10000 train_time:367931ms step_avg:42.29ms +[2025-09-11 09:50:04] [Rank 0] step:8721/10000 train_time:368644ms step_avg:42.27ms +[2025-09-11 09:50:04] [Rank 0] step:8721/10000 train_time:368644ms step_avg:42.27ms +[2025-09-11 09:50:04] [Rank 0] step:8741/10000 train_time:369349ms step_avg:42.25ms +[2025-09-11 09:50:04] [Rank 0] step:8741/10000 train_time:369349ms step_avg:42.25ms +[2025-09-11 09:50:05] [Rank 0] step:8761/10000 train_time:370062ms step_avg:42.24ms +[2025-09-11 09:50:05] [Rank 0] step:8761/10000 train_time:370062ms step_avg:42.24ms +[2025-09-11 09:50:06] [Rank 0] step:8781/10000 train_time:370769ms step_avg:42.22ms +[2025-09-11 09:50:06] [Rank 0] step:8781/10000 train_time:370769ms step_avg:42.22ms +[2025-09-11 09:50:06] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:50:06] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:50:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:50:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:50:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:50:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:50:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:17] [Rank 0] PRINT: step:8800/10000 val_loss:4.8041 total_sharp:3.3874e-05 L1_sharp:4.8652e-02 L2_sharp:6.8447e-02 L3_sharp:9.8680e-02 L4_sharp:1.2346e-01 L5_sharp:1.4329e-01 L6_sharp:1.4877e-01 L7_sharp:1.7953e-01 L8_sharp:1.8993e-01 L9_sharp:2.1208e-01 L10_sharp:2.5518e-01 L11_sharp:2.9150e-01 L12_sharp:2.7552e-01 total_fnorm:6.0750e+01 total_l1_linf:9.5744e+04 total_spectral:3.0375e+01 L1_fnorm:9.2773e-03 L2_fnorm:9.4604e-03 L3_fnorm:9.5825e-03 L4_fnorm:9.5825e-03 L5_fnorm:9.7046e-03 L6_fnorm:9.6436e-03 L7_fnorm:9.5825e-03 L8_fnorm:9.5215e-03 L9_fnorm:9.5825e-03 L10_fnorm:9.5825e-03 L11_fnorm:9.5215e-03 L12_fnorm:9.5215e-03 L1_l1linf:1.2741e-03 L2_l1linf:1.3351e-03 L3_l1linf:1.5182e-03 L4_l1linf:1.6327e-03 L5_l1linf:1.6174e-03 L6_l1linf:1.6632e-03 L7_l1linf:1.7471e-03 L8_l1linf:1.8463e-03 L9_l1linf:1.8463e-03 L10_l1linf:1.8539e-03 L11_l1linf:1.7776e-03 L12_l1linf:1.8539e-03 L1_spectral:1.8873e-04 L2_spectral:1.8679e-04 L3_spectral:1.8801e-04 L4_spectral:1.8545e-04 L5_spectral:1.9179e-04 L6_spectral:1.8859e-04 L7_spectral:1.8491e-04 L8_spectral:1.8299e-04 L9_spectral:1.8468e-04 L10_spectral:1.8759e-04 L11_spectral:1.8710e-04 L12_spectral:1.8589e-04 train_time:371458ms step_avg:42.21ms +[2025-09-11 09:50:17] [Rank 0] PRINT: step:8800/10000 val_loss:4.8041 total_sharp:3.3874e-05 L1_sharp:4.8652e-02 L2_sharp:6.8447e-02 L3_sharp:9.8680e-02 L4_sharp:1.2346e-01 L5_sharp:1.4329e-01 L6_sharp:1.4877e-01 L7_sharp:1.7953e-01 L8_sharp:1.8993e-01 L9_sharp:2.1208e-01 L10_sharp:2.5518e-01 L11_sharp:2.9150e-01 L12_sharp:2.7552e-01 total_fnorm:6.0750e+01 total_l1_linf:9.5744e+04 total_spectral:3.0375e+01 L1_fnorm:9.2773e-03 L2_fnorm:9.4604e-03 L3_fnorm:9.5825e-03 L4_fnorm:9.5825e-03 L5_fnorm:9.7046e-03 L6_fnorm:9.6436e-03 L7_fnorm:9.5825e-03 L8_fnorm:9.5215e-03 L9_fnorm:9.5825e-03 L10_fnorm:9.5825e-03 L11_fnorm:9.5215e-03 L12_fnorm:9.5215e-03 L1_l1linf:1.2741e-03 L2_l1linf:1.3351e-03 L3_l1linf:1.5182e-03 L4_l1linf:1.6327e-03 L5_l1linf:1.6174e-03 L6_l1linf:1.6632e-03 L7_l1linf:1.7471e-03 L8_l1linf:1.8463e-03 L9_l1linf:1.8463e-03 L10_l1linf:1.8539e-03 L11_l1linf:1.7776e-03 L12_l1linf:1.8539e-03 L1_spectral:1.8873e-04 L2_spectral:1.8679e-04 L3_spectral:1.8801e-04 L4_spectral:1.8545e-04 L5_spectral:1.9179e-04 L6_spectral:1.8859e-04 L7_spectral:1.8491e-04 L8_spectral:1.8299e-04 L9_spectral:1.8468e-04 L10_spectral:1.8759e-04 L11_spectral:1.8710e-04 L12_spectral:1.8589e-04 train_time:371458ms step_avg:42.21ms +[2025-09-11 09:50:19] [Rank 0] step:8801/10000 train_time:373243ms step_avg:42.41ms +[2025-09-11 09:50:19] [Rank 0] step:8801/10000 train_time:373243ms step_avg:42.41ms +[2025-09-11 09:50:20] [Rank 0] step:8821/10000 train_time:373979ms step_avg:42.40ms +[2025-09-11 09:50:20] [Rank 0] step:8821/10000 train_time:373979ms step_avg:42.40ms +[2025-09-11 09:50:21] [Rank 0] step:8841/10000 train_time:374690ms step_avg:42.38ms +[2025-09-11 09:50:21] [Rank 0] step:8841/10000 train_time:374690ms step_avg:42.38ms +[2025-09-11 09:50:21] [Rank 0] step:8861/10000 train_time:375401ms step_avg:42.37ms +[2025-09-11 09:50:21] [Rank 0] step:8861/10000 train_time:375401ms step_avg:42.37ms +[2025-09-11 09:50:22] [Rank 0] step:8881/10000 train_time:376113ms step_avg:42.35ms +[2025-09-11 09:50:22] [Rank 0] step:8881/10000 train_time:376113ms step_avg:42.35ms +[2025-09-11 09:50:23] [Rank 0] step:8901/10000 train_time:376826ms step_avg:42.34ms +[2025-09-11 09:50:23] [Rank 0] step:8901/10000 train_time:376826ms step_avg:42.34ms +[2025-09-11 09:50:24] [Rank 0] step:8921/10000 train_time:377534ms step_avg:42.32ms +[2025-09-11 09:50:24] [Rank 0] step:8921/10000 train_time:377534ms step_avg:42.32ms +[2025-09-11 09:50:24] [Rank 0] step:8941/10000 train_time:378250ms step_avg:42.31ms +[2025-09-11 09:50:24] [Rank 0] step:8941/10000 train_time:378250ms step_avg:42.31ms +[2025-09-11 09:50:25] [Rank 0] step:8961/10000 train_time:378970ms step_avg:42.29ms +[2025-09-11 09:50:25] [Rank 0] step:8961/10000 train_time:378970ms step_avg:42.29ms +[2025-09-11 09:50:26] [Rank 0] step:8981/10000 train_time:379685ms step_avg:42.28ms +[2025-09-11 09:50:26] [Rank 0] step:8981/10000 train_time:379685ms step_avg:42.28ms +[2025-09-11 09:50:26] [Rank 0] step:9001/10000 train_time:380391ms step_avg:42.26ms +[2025-09-11 09:50:26] [Rank 0] step:9001/10000 train_time:380391ms step_avg:42.26ms +[2025-09-11 09:50:27] [Rank 0] step:9021/10000 train_time:381103ms step_avg:42.25ms +[2025-09-11 09:50:27] [Rank 0] step:9021/10000 train_time:381103ms step_avg:42.25ms +[2025-09-11 09:50:28] [Rank 0] step:9041/10000 train_time:381817ms step_avg:42.23ms +[2025-09-11 09:50:28] [Rank 0] step:9041/10000 train_time:381817ms step_avg:42.23ms +[2025-09-11 09:50:29] [Rank 0] step:9061/10000 train_time:382526ms step_avg:42.22ms +[2025-09-11 09:50:29] [Rank 0] step:9061/10000 train_time:382526ms step_avg:42.22ms +[2025-09-11 09:50:29] [Rank 0] step:9081/10000 train_time:383239ms step_avg:42.20ms +[2025-09-11 09:50:29] [Rank 0] step:9081/10000 train_time:383239ms step_avg:42.20ms +[2025-09-11 09:50:30] [Rank 0] step:9101/10000 train_time:383956ms step_avg:42.19ms +[2025-09-11 09:50:30] [Rank 0] step:9101/10000 train_time:383956ms step_avg:42.19ms +[2025-09-11 09:50:31] [Rank 0] step:9121/10000 train_time:384671ms step_avg:42.17ms +[2025-09-11 09:50:31] [Rank 0] step:9121/10000 train_time:384671ms step_avg:42.17ms +[2025-09-11 09:50:31] [Rank 0] step:9141/10000 train_time:385380ms step_avg:42.16ms +[2025-09-11 09:50:31] [Rank 0] step:9141/10000 train_time:385380ms step_avg:42.16ms +[2025-09-11 09:50:32] [Rank 0] step:9161/10000 train_time:386095ms step_avg:42.15ms +[2025-09-11 09:50:32] [Rank 0] step:9161/10000 train_time:386095ms step_avg:42.15ms +[2025-09-11 09:50:33] [Rank 0] step:9181/10000 train_time:386809ms step_avg:42.13ms +[2025-09-11 09:50:33] [Rank 0] step:9181/10000 train_time:386809ms step_avg:42.13ms +[2025-09-11 09:50:33] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:50:33] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:50:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:50:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:50:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:50:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:50:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:50:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.7763 total_sharp:3.2579e-05 L1_sharp:4.9932e-02 L2_sharp:7.8702e-02 L3_sharp:1.0453e-01 L4_sharp:1.5117e-01 L5_sharp:1.6390e-01 L6_sharp:1.8754e-01 L7_sharp:2.8047e-01 L8_sharp:3.7062e-01 L9_sharp:5.1889e-01 L10_sharp:8.7956e-01 L11_sharp:9.0072e-01 L12_sharp:8.0919e-01 total_fnorm:4.6250e+01 total_l1_linf:6.5024e+04 total_spectral:2.3000e+01 L1_fnorm:6.0120e-03 L2_fnorm:6.1646e-03 L3_fnorm:6.2866e-03 L4_fnorm:6.3171e-03 L5_fnorm:6.4392e-03 L6_fnorm:6.4087e-03 L7_fnorm:6.3477e-03 L8_fnorm:6.3171e-03 L9_fnorm:6.3782e-03 L10_fnorm:6.3782e-03 L11_fnorm:6.3171e-03 L12_fnorm:6.3171e-03 L1_l1linf:7.0572e-04 L2_l1linf:8.0490e-04 L3_l1linf:9.2697e-04 L4_l1linf:9.8419e-04 L5_l1linf:1.0605e-03 L6_l1linf:1.0529e-03 L7_l1linf:1.0986e-03 L8_l1linf:1.1139e-03 L9_l1linf:1.1368e-03 L10_l1linf:1.2054e-03 L11_l1linf:1.1978e-03 L12_l1linf:1.1749e-03 L1_spectral:1.2174e-04 L2_spectral:1.2223e-04 L3_spectral:1.2231e-04 L4_spectral:1.2347e-04 L5_spectral:1.2561e-04 L6_spectral:1.2255e-04 L7_spectral:1.2033e-04 L8_spectral:1.1928e-04 L9_spectral:1.2105e-04 L10_spectral:1.2245e-04 L11_spectral:1.2115e-04 L12_spectral:1.1869e-04 train_time:387504ms step_avg:42.12ms +[2025-09-11 09:50:44] [Rank 0] PRINT: step:9200/10000 val_loss:4.7763 total_sharp:3.2579e-05 L1_sharp:4.9932e-02 L2_sharp:7.8702e-02 L3_sharp:1.0453e-01 L4_sharp:1.5117e-01 L5_sharp:1.6390e-01 L6_sharp:1.8754e-01 L7_sharp:2.8047e-01 L8_sharp:3.7062e-01 L9_sharp:5.1889e-01 L10_sharp:8.7956e-01 L11_sharp:9.0072e-01 L12_sharp:8.0919e-01 total_fnorm:4.6250e+01 total_l1_linf:6.5024e+04 total_spectral:2.3000e+01 L1_fnorm:6.0120e-03 L2_fnorm:6.1646e-03 L3_fnorm:6.2866e-03 L4_fnorm:6.3171e-03 L5_fnorm:6.4392e-03 L6_fnorm:6.4087e-03 L7_fnorm:6.3477e-03 L8_fnorm:6.3171e-03 L9_fnorm:6.3782e-03 L10_fnorm:6.3782e-03 L11_fnorm:6.3171e-03 L12_fnorm:6.3171e-03 L1_l1linf:7.0572e-04 L2_l1linf:8.0490e-04 L3_l1linf:9.2697e-04 L4_l1linf:9.8419e-04 L5_l1linf:1.0605e-03 L6_l1linf:1.0529e-03 L7_l1linf:1.0986e-03 L8_l1linf:1.1139e-03 L9_l1linf:1.1368e-03 L10_l1linf:1.2054e-03 L11_l1linf:1.1978e-03 L12_l1linf:1.1749e-03 L1_spectral:1.2174e-04 L2_spectral:1.2223e-04 L3_spectral:1.2231e-04 L4_spectral:1.2347e-04 L5_spectral:1.2561e-04 L6_spectral:1.2255e-04 L7_spectral:1.2033e-04 L8_spectral:1.1928e-04 L9_spectral:1.2105e-04 L10_spectral:1.2245e-04 L11_spectral:1.2115e-04 L12_spectral:1.1869e-04 train_time:387504ms step_avg:42.12ms +[2025-09-11 09:50:46] [Rank 0] step:9201/10000 train_time:389306ms step_avg:42.31ms +[2025-09-11 09:50:46] [Rank 0] step:9201/10000 train_time:389306ms step_avg:42.31ms +[2025-09-11 09:50:47] [Rank 0] step:9221/10000 train_time:390021ms step_avg:42.30ms +[2025-09-11 09:50:47] [Rank 0] step:9221/10000 train_time:390021ms step_avg:42.30ms +[2025-09-11 09:50:48] [Rank 0] step:9241/10000 train_time:390731ms step_avg:42.28ms +[2025-09-11 09:50:48] [Rank 0] step:9241/10000 train_time:390731ms step_avg:42.28ms +[2025-09-11 09:50:48] [Rank 0] step:9261/10000 train_time:391445ms step_avg:42.27ms +[2025-09-11 09:50:48] [Rank 0] step:9261/10000 train_time:391445ms step_avg:42.27ms +[2025-09-11 09:50:49] [Rank 0] step:9281/10000 train_time:392160ms step_avg:42.25ms +[2025-09-11 09:50:49] [Rank 0] step:9281/10000 train_time:392160ms step_avg:42.25ms +[2025-09-11 09:50:50] [Rank 0] step:9301/10000 train_time:392870ms step_avg:42.24ms +[2025-09-11 09:50:50] [Rank 0] step:9301/10000 train_time:392870ms step_avg:42.24ms +[2025-09-11 09:50:50] [Rank 0] step:9321/10000 train_time:393584ms step_avg:42.23ms +[2025-09-11 09:50:50] [Rank 0] step:9321/10000 train_time:393584ms step_avg:42.23ms +[2025-09-11 09:50:51] [Rank 0] step:9341/10000 train_time:394294ms step_avg:42.21ms +[2025-09-11 09:50:51] [Rank 0] step:9341/10000 train_time:394294ms step_avg:42.21ms +[2025-09-11 09:50:52] [Rank 0] step:9361/10000 train_time:395001ms step_avg:42.20ms +[2025-09-11 09:50:52] [Rank 0] step:9361/10000 train_time:395001ms step_avg:42.20ms +[2025-09-11 09:50:53] [Rank 0] step:9381/10000 train_time:395840ms step_avg:42.20ms +[2025-09-11 09:50:53] [Rank 0] step:9381/10000 train_time:395840ms step_avg:42.20ms +[2025-09-11 09:50:54] [Rank 0] step:9401/10000 train_time:396673ms step_avg:42.19ms +[2025-09-11 09:50:54] [Rank 0] step:9401/10000 train_time:396673ms step_avg:42.19ms +[2025-09-11 09:50:54] [Rank 0] step:9421/10000 train_time:397388ms step_avg:42.18ms +[2025-09-11 09:50:54] [Rank 0] step:9421/10000 train_time:397388ms step_avg:42.18ms +[2025-09-11 09:50:55] [Rank 0] step:9441/10000 train_time:398103ms step_avg:42.17ms +[2025-09-11 09:50:55] [Rank 0] step:9441/10000 train_time:398103ms step_avg:42.17ms +[2025-09-11 09:50:56] [Rank 0] step:9461/10000 train_time:399081ms step_avg:42.18ms +[2025-09-11 09:50:56] [Rank 0] step:9461/10000 train_time:399081ms step_avg:42.18ms +[2025-09-11 09:50:57] [Rank 0] step:9481/10000 train_time:399794ms step_avg:42.17ms +[2025-09-11 09:50:57] [Rank 0] step:9481/10000 train_time:399794ms step_avg:42.17ms +[2025-09-11 09:50:57] [Rank 0] step:9501/10000 train_time:400509ms step_avg:42.15ms +[2025-09-11 09:50:57] [Rank 0] step:9501/10000 train_time:400509ms step_avg:42.15ms +[2025-09-11 09:50:58] [Rank 0] step:9521/10000 train_time:401224ms step_avg:42.14ms +[2025-09-11 09:50:58] [Rank 0] step:9521/10000 train_time:401224ms step_avg:42.14ms +[2025-09-11 09:50:59] [Rank 0] step:9541/10000 train_time:401934ms step_avg:42.13ms +[2025-09-11 09:50:59] [Rank 0] step:9541/10000 train_time:401934ms step_avg:42.13ms +[2025-09-11 09:50:59] [Rank 0] step:9561/10000 train_time:402647ms step_avg:42.11ms +[2025-09-11 09:50:59] [Rank 0] step:9561/10000 train_time:402647ms step_avg:42.11ms +[2025-09-11 09:51:00] [Rank 0] step:9581/10000 train_time:403362ms step_avg:42.10ms +[2025-09-11 09:51:00] [Rank 0] step:9581/10000 train_time:403362ms step_avg:42.10ms +[2025-09-11 09:51:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:51:01] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:51:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:51:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:51:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:51:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:51:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:51:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:51:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:51:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:51:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:51:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:15] [Rank 0] PRINT: step:9600/10000 val_loss:4.7590 total_sharp:2.7771e-05 L1_sharp:5.6166e-02 L2_sharp:8.4877e-02 L3_sharp:9.6694e-02 L4_sharp:1.4087e-01 L5_sharp:1.7779e-01 L6_sharp:1.8725e-01 L7_sharp:1.9014e-01 L8_sharp:1.9957e-01 L9_sharp:1.9291e-01 L10_sharp:2.2582e-01 L11_sharp:2.0578e-01 L12_sharp:1.9707e-01 total_fnorm:2.6625e+01 total_l1_linf:3.2128e+04 total_spectral:1.3375e+01 L1_fnorm:3.2501e-03 L2_fnorm:3.4027e-03 L3_fnorm:3.4790e-03 L4_fnorm:3.5248e-03 L5_fnorm:3.6011e-03 L6_fnorm:3.5706e-03 L7_fnorm:3.5706e-03 L8_fnorm:3.5248e-03 L9_fnorm:3.5553e-03 L10_fnorm:3.5400e-03 L11_fnorm:3.5248e-03 L12_fnorm:3.4637e-03 L1_l1linf:3.1853e-04 L2_l1linf:3.8338e-04 L3_l1linf:4.2725e-04 L4_l1linf:5.0354e-04 L5_l1linf:5.2261e-04 L6_l1linf:5.1880e-04 L7_l1linf:5.4932e-04 L8_l1linf:5.4550e-04 L9_l1linf:5.6076e-04 L10_l1linf:5.9128e-04 L11_l1linf:5.6458e-04 L12_l1linf:5.6458e-04 L1_spectral:6.6507e-05 L2_spectral:6.8832e-05 L3_spectral:6.8875e-05 L4_spectral:6.8135e-05 L5_spectral:6.9778e-05 L6_spectral:6.9141e-05 L7_spectral:6.8502e-05 L8_spectral:6.7355e-05 L9_spectral:6.8313e-05 L10_spectral:6.6907e-05 L11_spectral:6.8345e-05 L12_spectral:6.5917e-05 train_time:404053ms step_avg:42.09ms +[2025-09-11 09:51:15] [Rank 0] PRINT: step:9600/10000 val_loss:4.7590 total_sharp:2.7771e-05 L1_sharp:5.6166e-02 L2_sharp:8.4877e-02 L3_sharp:9.6694e-02 L4_sharp:1.4087e-01 L5_sharp:1.7779e-01 L6_sharp:1.8725e-01 L7_sharp:1.9014e-01 L8_sharp:1.9957e-01 L9_sharp:1.9291e-01 L10_sharp:2.2582e-01 L11_sharp:2.0578e-01 L12_sharp:1.9707e-01 total_fnorm:2.6625e+01 total_l1_linf:3.2128e+04 total_spectral:1.3375e+01 L1_fnorm:3.2501e-03 L2_fnorm:3.4027e-03 L3_fnorm:3.4790e-03 L4_fnorm:3.5248e-03 L5_fnorm:3.6011e-03 L6_fnorm:3.5706e-03 L7_fnorm:3.5706e-03 L8_fnorm:3.5248e-03 L9_fnorm:3.5553e-03 L10_fnorm:3.5400e-03 L11_fnorm:3.5248e-03 L12_fnorm:3.4637e-03 L1_l1linf:3.1853e-04 L2_l1linf:3.8338e-04 L3_l1linf:4.2725e-04 L4_l1linf:5.0354e-04 L5_l1linf:5.2261e-04 L6_l1linf:5.1880e-04 L7_l1linf:5.4932e-04 L8_l1linf:5.4550e-04 L9_l1linf:5.6076e-04 L10_l1linf:5.9128e-04 L11_l1linf:5.6458e-04 L12_l1linf:5.6458e-04 L1_spectral:6.6507e-05 L2_spectral:6.8832e-05 L3_spectral:6.8875e-05 L4_spectral:6.8135e-05 L5_spectral:6.9778e-05 L6_spectral:6.9141e-05 L7_spectral:6.8502e-05 L8_spectral:6.7355e-05 L9_spectral:6.8313e-05 L10_spectral:6.6907e-05 L11_spectral:6.8345e-05 L12_spectral:6.5917e-05 train_time:404053ms step_avg:42.09ms +[2025-09-11 09:51:17] [Rank 0] step:9601/10000 train_time:405850ms step_avg:42.27ms +[2025-09-11 09:51:17] [Rank 0] step:9601/10000 train_time:405850ms step_avg:42.27ms +[2025-09-11 09:51:18] [Rank 0] step:9621/10000 train_time:406579ms step_avg:42.26ms +[2025-09-11 09:51:18] [Rank 0] step:9621/10000 train_time:406579ms step_avg:42.26ms +[2025-09-11 09:51:19] [Rank 0] step:9641/10000 train_time:407296ms step_avg:42.25ms +[2025-09-11 09:51:19] [Rank 0] step:9641/10000 train_time:407296ms step_avg:42.25ms +[2025-09-11 09:51:19] [Rank 0] step:9661/10000 train_time:408020ms step_avg:42.23ms +[2025-09-11 09:51:19] [Rank 0] step:9661/10000 train_time:408020ms step_avg:42.23ms +[2025-09-11 09:51:20] [Rank 0] step:9681/10000 train_time:408736ms step_avg:42.22ms +[2025-09-11 09:51:20] [Rank 0] step:9681/10000 train_time:408736ms step_avg:42.22ms +[2025-09-11 09:51:21] [Rank 0] step:9701/10000 train_time:409453ms step_avg:42.21ms +[2025-09-11 09:51:21] [Rank 0] step:9701/10000 train_time:409453ms step_avg:42.21ms +[2025-09-11 09:51:21] [Rank 0] step:9721/10000 train_time:410176ms step_avg:42.19ms +[2025-09-11 09:51:21] [Rank 0] step:9721/10000 train_time:410176ms step_avg:42.19ms +[2025-09-11 09:51:22] [Rank 0] step:9741/10000 train_time:410895ms step_avg:42.18ms +[2025-09-11 09:51:22] [Rank 0] step:9741/10000 train_time:410895ms step_avg:42.18ms +[2025-09-11 09:51:23] [Rank 0] step:9761/10000 train_time:411614ms step_avg:42.17ms +[2025-09-11 09:51:23] [Rank 0] step:9761/10000 train_time:411614ms step_avg:42.17ms +[2025-09-11 09:51:24] [Rank 0] step:9781/10000 train_time:412332ms step_avg:42.16ms +[2025-09-11 09:51:24] [Rank 0] step:9781/10000 train_time:412332ms step_avg:42.16ms +[2025-09-11 09:51:24] [Rank 0] step:9801/10000 train_time:413055ms step_avg:42.14ms +[2025-09-11 09:51:24] [Rank 0] step:9801/10000 train_time:413055ms step_avg:42.14ms +[2025-09-11 09:51:25] [Rank 0] step:9821/10000 train_time:413775ms step_avg:42.13ms +[2025-09-11 09:51:25] [Rank 0] step:9821/10000 train_time:413775ms step_avg:42.13ms +[2025-09-11 09:51:26] [Rank 0] step:9841/10000 train_time:414498ms step_avg:42.12ms +[2025-09-11 09:51:26] [Rank 0] step:9841/10000 train_time:414498ms step_avg:42.12ms +[2025-09-11 09:51:26] [Rank 0] step:9861/10000 train_time:415215ms step_avg:42.11ms +[2025-09-11 09:51:26] [Rank 0] step:9861/10000 train_time:415215ms step_avg:42.11ms +[2025-09-11 09:51:27] [Rank 0] step:9881/10000 train_time:415936ms step_avg:42.09ms +[2025-09-11 09:51:27] [Rank 0] step:9881/10000 train_time:415936ms step_avg:42.09ms +[2025-09-11 09:51:28] [Rank 0] step:9901/10000 train_time:416652ms step_avg:42.08ms +[2025-09-11 09:51:28] [Rank 0] step:9901/10000 train_time:416652ms step_avg:42.08ms +[2025-09-11 09:51:29] [Rank 0] step:9921/10000 train_time:417370ms step_avg:42.07ms +[2025-09-11 09:51:29] [Rank 0] step:9921/10000 train_time:417370ms step_avg:42.07ms +[2025-09-11 09:51:29] [Rank 0] step:9941/10000 train_time:418093ms step_avg:42.06ms +[2025-09-11 09:51:29] [Rank 0] step:9941/10000 train_time:418093ms step_avg:42.06ms +[2025-09-11 09:51:30] [Rank 0] step:9961/10000 train_time:418819ms step_avg:42.05ms +[2025-09-11 09:51:30] [Rank 0] step:9961/10000 train_time:418819ms step_avg:42.05ms +[2025-09-11 09:51:31] [Rank 0] step:9981/10000 train_time:419543ms step_avg:42.03ms +[2025-09-11 09:51:31] [Rank 0] step:9981/10000 train_time:419543ms step_avg:42.03ms +[2025-09-11 09:51:31] [Rank 0] step:10000/10000 train_time:420236ms step_avg:42.02ms +[2025-09-11 09:51:31] [Rank 0] step:10000/10000 train_time:420236ms step_avg:42.02ms +[2025-09-11 09:51:31] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:51:31] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:51:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:51:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:51:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:51:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:51:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:51:43] [Rank 0] PRINT: step:10000/10000 val_loss:4.7609 total_sharp:2.0956e-05 L1_sharp:4.3085e-02 L2_sharp:5.2831e-02 L3_sharp:6.2872e-02 L4_sharp:8.7467e-02 L5_sharp:1.0337e-01 L6_sharp:1.2711e-01 L7_sharp:1.3268e-01 L8_sharp:1.3841e-01 L9_sharp:1.4910e-01 L10_sharp:1.5917e-01 L11_sharp:1.6541e-01 L12_sharp:1.9973e-01 total_fnorm:1.0688e+01 total_l1_linf:9.4080e+03 total_spectral:5.3438e+00 L1_fnorm:1.2589e-03 L2_fnorm:1.3123e-03 L3_fnorm:1.3504e-03 L4_fnorm:1.3580e-03 L5_fnorm:1.3885e-03 L6_fnorm:1.3733e-03 L7_fnorm:1.3733e-03 L8_fnorm:1.3504e-03 L9_fnorm:1.3657e-03 L10_fnorm:1.3733e-03 L11_fnorm:1.3504e-03 L12_fnorm:1.3351e-03 L1_l1linf:1.0347e-04 L2_l1linf:1.1683e-04 L3_l1linf:1.3542e-04 L4_l1linf:1.6308e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.5259e-04 L7_l1linf:1.6594e-04 L8_l1linf:1.5926e-04 L9_l1linf:1.7166e-04 L10_l1linf:1.7071e-04 L11_l1linf:1.9169e-04 L12_l1linf:1.8120e-04 L1_spectral:2.7235e-05 L2_spectral:2.8115e-05 L3_spectral:2.7256e-05 L4_spectral:2.8103e-05 L5_spectral:2.8151e-05 L6_spectral:2.7267e-05 L7_spectral:2.7570e-05 L8_spectral:2.6607e-05 L9_spectral:2.6978e-05 L10_spectral:2.7555e-05 L11_spectral:2.6937e-05 L12_spectral:2.7552e-05 train_time:420267ms step_avg:42.03ms +[2025-09-11 09:51:43] [Rank 0] PRINT: step:10000/10000 val_loss:4.7609 total_sharp:2.0956e-05 L1_sharp:4.3085e-02 L2_sharp:5.2831e-02 L3_sharp:6.2872e-02 L4_sharp:8.7467e-02 L5_sharp:1.0337e-01 L6_sharp:1.2711e-01 L7_sharp:1.3268e-01 L8_sharp:1.3841e-01 L9_sharp:1.4910e-01 L10_sharp:1.5917e-01 L11_sharp:1.6541e-01 L12_sharp:1.9973e-01 total_fnorm:1.0688e+01 total_l1_linf:9.4080e+03 total_spectral:5.3438e+00 L1_fnorm:1.2589e-03 L2_fnorm:1.3123e-03 L3_fnorm:1.3504e-03 L4_fnorm:1.3580e-03 L5_fnorm:1.3885e-03 L6_fnorm:1.3733e-03 L7_fnorm:1.3733e-03 L8_fnorm:1.3504e-03 L9_fnorm:1.3657e-03 L10_fnorm:1.3733e-03 L11_fnorm:1.3504e-03 L12_fnorm:1.3351e-03 L1_l1linf:1.0347e-04 L2_l1linf:1.1683e-04 L3_l1linf:1.3542e-04 L4_l1linf:1.6308e-04 L5_l1linf:1.6880e-04 L6_l1linf:1.5259e-04 L7_l1linf:1.6594e-04 L8_l1linf:1.5926e-04 L9_l1linf:1.7166e-04 L10_l1linf:1.7071e-04 L11_l1linf:1.9169e-04 L12_l1linf:1.8120e-04 L1_spectral:2.7235e-05 L2_spectral:2.8115e-05 L3_spectral:2.7256e-05 L4_spectral:2.8103e-05 L5_spectral:2.8151e-05 L6_spectral:2.7267e-05 L7_spectral:2.7570e-05 L8_spectral:2.6607e-05 L9_spectral:2.6978e-05 L10_spectral:2.7555e-05 L11_spectral:2.6937e-05 L12_spectral:2.7552e-05 train_time:420267ms step_avg:42.03ms +[2025-09-11 09:51:43] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:51:43 2025 --- +[2025-09-11 09:51:43] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:51:43 2025 --- +[2025-09-11 09:51:43] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:51:43] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7fdad525943c6d8356c08932195d2659d8f69c54 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.001, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "64caf775-f308-40fe-8f61-ee33b2fa457c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/training_log_64caf775-f308-40fe-8f61-ee33b2fa457c.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/training_log_64caf775-f308-40fe-8f61-ee33b2fa457c.txt new file mode 100644 index 0000000000000000000000000000000000000000..f100c3efd10aba2ed85aabdf39f86d0a8c0dddef --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42/training_log_64caf775-f308-40fe-8f61-ee33b2fa457c.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:23:51] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:23:51 2025 --- +[2025-09-11 09:23:51] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:23:51 2025 --- +[2025-09-11 09:23:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:23:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.001, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:23:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:23:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:23:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:23:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:23:52] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42 +[2025-09-11 09:23:52] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.001_seed_42 +[2025-09-11 09:23:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:23:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:23:52] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:23:52] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:23:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:23:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:23:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:23:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:23:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:23:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:23:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:23:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:23:53] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:23:53] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:23:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:23:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:23:55] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:23:55] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:23:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:23:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:24:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:24:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:24:01] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:24:01] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:24:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:24:46] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:24:46] [Rank 0] PRINT: Starting training... +[2025-09-11 09:24:46] [Rank 0] PRINT: Starting training... +[2025-09-11 09:24:47] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.92ms +[2025-09-11 09:24:47] [Rank 0] step:21/10000 train_time:1132ms step_avg:53.92ms +[2025-09-11 09:24:48] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 09:24:48] [Rank 0] step:41/10000 train_time:1861ms step_avg:45.39ms +[2025-09-11 09:24:48] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.47ms +[2025-09-11 09:24:48] [Rank 0] step:61/10000 train_time:2591ms step_avg:42.47ms +[2025-09-11 09:24:49] [Rank 0] step:81/10000 train_time:3318ms step_avg:40.97ms +[2025-09-11 09:24:49] [Rank 0] step:81/10000 train_time:3318ms step_avg:40.97ms +[2025-09-11 09:24:50] [Rank 0] step:101/10000 train_time:4046ms step_avg:40.06ms +[2025-09-11 09:24:50] [Rank 0] step:101/10000 train_time:4046ms step_avg:40.06ms +[2025-09-11 09:24:50] [Rank 0] step:121/10000 train_time:4775ms step_avg:39.46ms +[2025-09-11 09:24:50] [Rank 0] step:121/10000 train_time:4775ms step_avg:39.46ms +[2025-09-11 09:24:51] [Rank 0] step:141/10000 train_time:5503ms step_avg:39.03ms +[2025-09-11 09:24:51] [Rank 0] step:141/10000 train_time:5503ms step_avg:39.03ms +[2025-09-11 09:24:52] [Rank 0] step:161/10000 train_time:6230ms step_avg:38.70ms +[2025-09-11 09:24:52] [Rank 0] step:161/10000 train_time:6230ms step_avg:38.70ms +[2025-09-11 09:24:53] [Rank 0] step:181/10000 train_time:6959ms step_avg:38.45ms +[2025-09-11 09:24:53] [Rank 0] step:181/10000 train_time:6959ms step_avg:38.45ms +[2025-09-11 09:24:53] [Rank 0] step:201/10000 train_time:7687ms step_avg:38.24ms +[2025-09-11 09:24:53] [Rank 0] step:201/10000 train_time:7687ms step_avg:38.24ms +[2025-09-11 09:24:54] [Rank 0] step:221/10000 train_time:8415ms step_avg:38.08ms +[2025-09-11 09:24:54] [Rank 0] step:221/10000 train_time:8415ms step_avg:38.08ms +[2025-09-11 09:24:55] [Rank 0] step:241/10000 train_time:9142ms step_avg:37.93ms +[2025-09-11 09:24:55] [Rank 0] step:241/10000 train_time:9142ms step_avg:37.93ms +[2025-09-11 09:24:56] [Rank 0] step:261/10000 train_time:9870ms step_avg:37.82ms +[2025-09-11 09:24:56] [Rank 0] step:261/10000 train_time:9870ms step_avg:37.82ms +[2025-09-11 09:24:56] [Rank 0] step:281/10000 train_time:10599ms step_avg:37.72ms +[2025-09-11 09:24:56] [Rank 0] step:281/10000 train_time:10599ms step_avg:37.72ms +[2025-09-11 09:24:57] [Rank 0] step:301/10000 train_time:11326ms step_avg:37.63ms +[2025-09-11 09:24:57] [Rank 0] step:301/10000 train_time:11326ms step_avg:37.63ms +[2025-09-11 09:24:58] [Rank 0] step:321/10000 train_time:12054ms step_avg:37.55ms +[2025-09-11 09:24:58] [Rank 0] step:321/10000 train_time:12054ms step_avg:37.55ms +[2025-09-11 09:24:58] [Rank 0] step:341/10000 train_time:12783ms step_avg:37.49ms +[2025-09-11 09:24:58] [Rank 0] step:341/10000 train_time:12783ms step_avg:37.49ms +[2025-09-11 09:24:59] [Rank 0] step:361/10000 train_time:13510ms step_avg:37.42ms +[2025-09-11 09:24:59] [Rank 0] step:361/10000 train_time:13510ms step_avg:37.42ms +[2025-09-11 09:25:00] [Rank 0] step:381/10000 train_time:14238ms step_avg:37.37ms +[2025-09-11 09:25:00] [Rank 0] step:381/10000 train_time:14238ms step_avg:37.37ms +[2025-09-11 09:25:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:25:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:25:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:25:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:25:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:25:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:25:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:25:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:25:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:25:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:25:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:25:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:25:56] [Rank 0] PRINT: step:400/10000 val_loss:6.1179 total_sharp:1.8910e-04 L1_sharp:7.2883e-02 L2_sharp:7.6315e-02 L3_sharp:6.3800e-02 L4_sharp:8.1249e-02 L5_sharp:9.8184e-02 L6_sharp:1.0457e-01 L7_sharp:1.4848e-01 L8_sharp:1.4781e-01 L9_sharp:2.2343e-01 L10_sharp:2.3681e-01 L11_sharp:2.6727e-01 L12_sharp:3.6057e-01 total_fnorm:1.8275e+02 total_l1_linf:4.5082e+05 total_spectral:9.1393e+01 L1_fnorm:1.2016e-01 L2_fnorm:1.2003e-01 L3_fnorm:1.2010e-01 L4_fnorm:1.2057e-01 L5_fnorm:1.2138e-01 L6_fnorm:1.2146e-01 L7_fnorm:1.2259e-01 L8_fnorm:1.2194e-01 L9_fnorm:1.2217e-01 L10_fnorm:1.2242e-01 L11_fnorm:1.2168e-01 L12_fnorm:1.2085e-01 L1_l1linf:4.4765e-02 L2_l1linf:4.4306e-02 L3_l1linf:4.4511e-02 L4_l1linf:4.4307e-02 L5_l1linf:4.4553e-02 L6_l1linf:4.4282e-02 L7_l1linf:4.4947e-02 L8_l1linf:4.4884e-02 L9_l1linf:4.4738e-02 L10_l1linf:4.4495e-02 L11_l1linf:4.5100e-02 L12_l1linf:4.4733e-02 L1_spectral:1.2049e-03 L2_spectral:1.2055e-03 L3_spectral:1.2049e-03 L4_spectral:1.2059e-03 L5_spectral:1.2052e-03 L6_spectral:1.2052e-03 L7_spectral:1.2056e-03 L8_spectral:1.2057e-03 L9_spectral:1.2057e-03 L10_spectral:1.2057e-03 L11_spectral:1.2056e-03 L12_spectral:1.2053e-03 train_time:14946ms step_avg:37.36ms +[2025-09-11 09:25:56] [Rank 0] PRINT: step:400/10000 val_loss:6.1179 total_sharp:1.8910e-04 L1_sharp:7.2883e-02 L2_sharp:7.6315e-02 L3_sharp:6.3800e-02 L4_sharp:8.1249e-02 L5_sharp:9.8184e-02 L6_sharp:1.0457e-01 L7_sharp:1.4848e-01 L8_sharp:1.4781e-01 L9_sharp:2.2343e-01 L10_sharp:2.3681e-01 L11_sharp:2.6727e-01 L12_sharp:3.6057e-01 total_fnorm:1.8275e+02 total_l1_linf:4.5082e+05 total_spectral:9.1393e+01 L1_fnorm:1.2016e-01 L2_fnorm:1.2003e-01 L3_fnorm:1.2010e-01 L4_fnorm:1.2057e-01 L5_fnorm:1.2138e-01 L6_fnorm:1.2146e-01 L7_fnorm:1.2259e-01 L8_fnorm:1.2194e-01 L9_fnorm:1.2217e-01 L10_fnorm:1.2242e-01 L11_fnorm:1.2168e-01 L12_fnorm:1.2085e-01 L1_l1linf:4.4765e-02 L2_l1linf:4.4306e-02 L3_l1linf:4.4511e-02 L4_l1linf:4.4307e-02 L5_l1linf:4.4553e-02 L6_l1linf:4.4282e-02 L7_l1linf:4.4947e-02 L8_l1linf:4.4884e-02 L9_l1linf:4.4738e-02 L10_l1linf:4.4495e-02 L11_l1linf:4.5100e-02 L12_l1linf:4.4733e-02 L1_spectral:1.2049e-03 L2_spectral:1.2055e-03 L3_spectral:1.2049e-03 L4_spectral:1.2059e-03 L5_spectral:1.2052e-03 L6_spectral:1.2052e-03 L7_spectral:1.2056e-03 L8_spectral:1.2057e-03 L9_spectral:1.2057e-03 L10_spectral:1.2057e-03 L11_spectral:1.2056e-03 L12_spectral:1.2053e-03 train_time:14946ms step_avg:37.36ms +[2025-09-11 09:26:29] [Rank 0] step:401/10000 train_time:47744ms step_avg:119.06ms +[2025-09-11 09:26:29] [Rank 0] step:401/10000 train_time:47744ms step_avg:119.06ms +[2025-09-11 09:26:32] [Rank 0] step:421/10000 train_time:50008ms step_avg:118.78ms +[2025-09-11 09:26:32] [Rank 0] step:421/10000 train_time:50008ms step_avg:118.78ms +[2025-09-11 09:26:32] [Rank 0] step:441/10000 train_time:50648ms step_avg:114.85ms +[2025-09-11 09:26:32] [Rank 0] step:441/10000 train_time:50648ms step_avg:114.85ms +[2025-09-11 09:26:33] [Rank 0] step:461/10000 train_time:51288ms step_avg:111.25ms +[2025-09-11 09:26:33] [Rank 0] step:461/10000 train_time:51288ms step_avg:111.25ms +[2025-09-11 09:26:33] [Rank 0] step:481/10000 train_time:51927ms step_avg:107.96ms +[2025-09-11 09:26:33] [Rank 0] step:481/10000 train_time:51927ms step_avg:107.96ms +[2025-09-11 09:26:34] [Rank 0] step:501/10000 train_time:52565ms step_avg:104.92ms +[2025-09-11 09:26:34] [Rank 0] step:501/10000 train_time:52565ms step_avg:104.92ms +[2025-09-11 09:26:35] [Rank 0] step:521/10000 train_time:53206ms step_avg:102.12ms +[2025-09-11 09:26:35] [Rank 0] step:521/10000 train_time:53206ms step_avg:102.12ms +[2025-09-11 09:26:35] [Rank 0] step:541/10000 train_time:53845ms step_avg:99.53ms +[2025-09-11 09:26:35] [Rank 0] step:541/10000 train_time:53845ms step_avg:99.53ms +[2025-09-11 09:26:36] [Rank 0] step:561/10000 train_time:54484ms step_avg:97.12ms +[2025-09-11 09:26:36] [Rank 0] step:561/10000 train_time:54484ms step_avg:97.12ms +[2025-09-11 09:26:37] [Rank 0] step:581/10000 train_time:55124ms step_avg:94.88ms +[2025-09-11 09:26:37] [Rank 0] step:581/10000 train_time:55124ms step_avg:94.88ms +[2025-09-11 09:26:37] [Rank 0] step:601/10000 train_time:55763ms step_avg:92.78ms +[2025-09-11 09:26:37] [Rank 0] step:601/10000 train_time:55763ms step_avg:92.78ms +[2025-09-11 09:26:38] [Rank 0] step:621/10000 train_time:56659ms step_avg:91.24ms +[2025-09-11 09:26:38] [Rank 0] step:621/10000 train_time:56659ms step_avg:91.24ms +[2025-09-11 09:26:39] [Rank 0] step:641/10000 train_time:57301ms step_avg:89.39ms +[2025-09-11 09:26:39] [Rank 0] step:641/10000 train_time:57301ms step_avg:89.39ms +[2025-09-11 09:26:39] [Rank 0] step:661/10000 train_time:57939ms step_avg:87.65ms +[2025-09-11 09:26:39] [Rank 0] step:661/10000 train_time:57939ms step_avg:87.65ms +[2025-09-11 09:26:40] [Rank 0] step:681/10000 train_time:58578ms step_avg:86.02ms +[2025-09-11 09:26:40] [Rank 0] step:681/10000 train_time:58578ms step_avg:86.02ms +[2025-09-11 09:26:41] [Rank 0] step:701/10000 train_time:59501ms step_avg:84.88ms +[2025-09-11 09:26:41] [Rank 0] step:701/10000 train_time:59501ms step_avg:84.88ms +[2025-09-11 09:26:42] [Rank 0] step:721/10000 train_time:60140ms step_avg:83.41ms +[2025-09-11 09:26:42] [Rank 0] step:721/10000 train_time:60140ms step_avg:83.41ms +[2025-09-11 09:26:42] [Rank 0] step:741/10000 train_time:60779ms step_avg:82.02ms +[2025-09-11 09:26:42] [Rank 0] step:741/10000 train_time:60779ms step_avg:82.02ms +[2025-09-11 09:26:43] [Rank 0] step:761/10000 train_time:61423ms step_avg:80.71ms +[2025-09-11 09:26:43] [Rank 0] step:761/10000 train_time:61423ms step_avg:80.71ms +[2025-09-11 09:26:44] [Rank 0] step:781/10000 train_time:62068ms step_avg:79.47ms +[2025-09-11 09:26:44] [Rank 0] step:781/10000 train_time:62068ms step_avg:79.47ms +[2025-09-11 09:26:44] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:26:44] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:27:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:27:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:27:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:27:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:27:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:27:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:27:33] [Rank 0] PRINT: step:800/10000 val_loss:5.7012 total_sharp:1.4011e-04 L1_sharp:9.2283e-02 L2_sharp:7.7881e-02 L3_sharp:7.9874e-02 L4_sharp:9.8224e-02 L5_sharp:1.3308e-01 L6_sharp:1.6760e-01 L7_sharp:2.3347e-01 L8_sharp:4.1144e-01 L9_sharp:5.4502e-01 L10_sharp:6.0182e-01 L11_sharp:8.1072e-01 L12_sharp:1.1644e+00 total_fnorm:1.9000e+02 total_l1_linf:4.2803e+05 total_spectral:9.5000e+01 L1_fnorm:1.1377e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1182e-01 L9_fnorm:1.1279e-01 L10_fnorm:1.0889e-01 L11_fnorm:1.0547e-01 L12_fnorm:9.8145e-02 L1_l1linf:4.3701e-02 L2_l1linf:4.3457e-02 L3_l1linf:4.3457e-02 L4_l1linf:4.3213e-02 L5_l1linf:4.3457e-02 L6_l1linf:4.3701e-02 L7_l1linf:4.3457e-02 L8_l1linf:4.3457e-02 L9_l1linf:4.3213e-02 L10_l1linf:4.2480e-02 L11_l1linf:4.1992e-02 L12_l1linf:4.1016e-02 L1_spectral:1.6031e-03 L2_spectral:1.5984e-03 L3_spectral:1.5947e-03 L4_spectral:1.6017e-03 L5_spectral:1.5825e-03 L6_spectral:1.5953e-03 L7_spectral:1.5805e-03 L8_spectral:1.5724e-03 L9_spectral:1.5807e-03 L10_spectral:1.5802e-03 L11_spectral:1.5476e-03 L12_spectral:1.5044e-03 train_time:62694ms step_avg:78.37ms +[2025-09-11 09:27:33] [Rank 0] PRINT: step:800/10000 val_loss:5.7012 total_sharp:1.4011e-04 L1_sharp:9.2283e-02 L2_sharp:7.7881e-02 L3_sharp:7.9874e-02 L4_sharp:9.8224e-02 L5_sharp:1.3308e-01 L6_sharp:1.6760e-01 L7_sharp:2.3347e-01 L8_sharp:4.1144e-01 L9_sharp:5.4502e-01 L10_sharp:6.0182e-01 L11_sharp:8.1072e-01 L12_sharp:1.1644e+00 total_fnorm:1.9000e+02 total_l1_linf:4.2803e+05 total_spectral:9.5000e+01 L1_fnorm:1.1377e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1182e-01 L9_fnorm:1.1279e-01 L10_fnorm:1.0889e-01 L11_fnorm:1.0547e-01 L12_fnorm:9.8145e-02 L1_l1linf:4.3701e-02 L2_l1linf:4.3457e-02 L3_l1linf:4.3457e-02 L4_l1linf:4.3213e-02 L5_l1linf:4.3457e-02 L6_l1linf:4.3701e-02 L7_l1linf:4.3457e-02 L8_l1linf:4.3457e-02 L9_l1linf:4.3213e-02 L10_l1linf:4.2480e-02 L11_l1linf:4.1992e-02 L12_l1linf:4.1016e-02 L1_spectral:1.6031e-03 L2_spectral:1.5984e-03 L3_spectral:1.5947e-03 L4_spectral:1.6017e-03 L5_spectral:1.5825e-03 L6_spectral:1.5953e-03 L7_spectral:1.5805e-03 L8_spectral:1.5724e-03 L9_spectral:1.5807e-03 L10_spectral:1.5802e-03 L11_spectral:1.5476e-03 L12_spectral:1.5044e-03 train_time:62694ms step_avg:78.37ms +[2025-09-11 09:27:34] [Rank 0] step:801/10000 train_time:64258ms step_avg:80.22ms +[2025-09-11 09:27:34] [Rank 0] step:801/10000 train_time:64258ms step_avg:80.22ms +[2025-09-11 09:27:35] [Rank 0] step:821/10000 train_time:64910ms step_avg:79.06ms +[2025-09-11 09:27:35] [Rank 0] step:821/10000 train_time:64910ms step_avg:79.06ms +[2025-09-11 09:27:36] [Rank 0] step:841/10000 train_time:65556ms step_avg:77.95ms +[2025-09-11 09:27:36] [Rank 0] step:841/10000 train_time:65556ms step_avg:77.95ms +[2025-09-11 09:27:36] [Rank 0] step:861/10000 train_time:66202ms step_avg:76.89ms +[2025-09-11 09:27:36] [Rank 0] step:861/10000 train_time:66202ms step_avg:76.89ms +[2025-09-11 09:27:37] [Rank 0] step:881/10000 train_time:66846ms step_avg:75.88ms +[2025-09-11 09:27:37] [Rank 0] step:881/10000 train_time:66846ms step_avg:75.88ms +[2025-09-11 09:27:38] [Rank 0] step:901/10000 train_time:67492ms step_avg:74.91ms +[2025-09-11 09:27:38] [Rank 0] step:901/10000 train_time:67492ms step_avg:74.91ms +[2025-09-11 09:27:38] [Rank 0] step:921/10000 train_time:68136ms step_avg:73.98ms +[2025-09-11 09:27:38] [Rank 0] step:921/10000 train_time:68136ms step_avg:73.98ms +[2025-09-11 09:27:39] [Rank 0] step:941/10000 train_time:68781ms step_avg:73.09ms +[2025-09-11 09:27:39] [Rank 0] step:941/10000 train_time:68781ms step_avg:73.09ms +[2025-09-11 09:27:39] [Rank 0] step:961/10000 train_time:69427ms step_avg:72.24ms +[2025-09-11 09:27:39] [Rank 0] step:961/10000 train_time:69427ms step_avg:72.24ms +[2025-09-11 09:27:40] [Rank 0] step:981/10000 train_time:70070ms step_avg:71.43ms +[2025-09-11 09:27:40] [Rank 0] step:981/10000 train_time:70070ms step_avg:71.43ms +[2025-09-11 09:27:41] [Rank 0] step:1001/10000 train_time:70870ms step_avg:70.80ms +[2025-09-11 09:27:41] [Rank 0] step:1001/10000 train_time:70870ms step_avg:70.80ms +[2025-09-11 09:27:42] [Rank 0] step:1021/10000 train_time:71665ms step_avg:70.19ms +[2025-09-11 09:27:42] [Rank 0] step:1021/10000 train_time:71665ms step_avg:70.19ms +[2025-09-11 09:27:42] [Rank 0] step:1041/10000 train_time:72309ms step_avg:69.46ms +[2025-09-11 09:27:42] [Rank 0] step:1041/10000 train_time:72309ms step_avg:69.46ms +[2025-09-11 09:27:43] [Rank 0] step:1061/10000 train_time:72954ms step_avg:68.76ms +[2025-09-11 09:27:43] [Rank 0] step:1061/10000 train_time:72954ms step_avg:68.76ms +[2025-09-11 09:27:44] [Rank 0] step:1081/10000 train_time:73918ms step_avg:68.38ms +[2025-09-11 09:27:44] [Rank 0] step:1081/10000 train_time:73918ms step_avg:68.38ms +[2025-09-11 09:27:45] [Rank 0] step:1101/10000 train_time:74562ms step_avg:67.72ms +[2025-09-11 09:27:45] [Rank 0] step:1101/10000 train_time:74562ms step_avg:67.72ms +[2025-09-11 09:27:45] [Rank 0] step:1121/10000 train_time:75207ms step_avg:67.09ms +[2025-09-11 09:27:45] [Rank 0] step:1121/10000 train_time:75207ms step_avg:67.09ms +[2025-09-11 09:27:46] [Rank 0] step:1141/10000 train_time:75852ms step_avg:66.48ms +[2025-09-11 09:27:46] [Rank 0] step:1141/10000 train_time:75852ms step_avg:66.48ms +[2025-09-11 09:27:47] [Rank 0] step:1161/10000 train_time:76497ms step_avg:65.89ms +[2025-09-11 09:27:47] [Rank 0] step:1161/10000 train_time:76497ms step_avg:65.89ms +[2025-09-11 09:27:47] [Rank 0] step:1181/10000 train_time:77141ms step_avg:65.32ms +[2025-09-11 09:27:47] [Rank 0] step:1181/10000 train_time:77141ms step_avg:65.32ms +[2025-09-11 09:27:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:27:48] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:27:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:27:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:27:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:27:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:27:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:27:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:27:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:27:58] [Rank 0] PRINT: step:1200/10000 val_loss:5.4362 total_sharp:1.0935e-04 L1_sharp:8.5349e-02 L2_sharp:7.2633e-02 L3_sharp:7.5222e-02 L4_sharp:8.0886e-02 L5_sharp:1.0170e-01 L6_sharp:1.0094e-01 L7_sharp:1.9459e-01 L8_sharp:1.9565e-01 L9_sharp:2.7580e-01 L10_sharp:5.5557e-01 L11_sharp:9.7365e-01 L12_sharp:2.1328e+00 total_fnorm:1.9500e+02 total_l1_linf:4.3008e+05 total_spectral:9.7500e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1670e-01 L6_fnorm:1.1719e-01 L7_fnorm:1.1865e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1865e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1426e-01 L1_l1linf:4.1504e-02 L2_l1linf:4.1016e-02 L3_l1linf:4.1016e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0527e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0527e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.1992e-02 L11_l1linf:4.1992e-02 L12_l1linf:4.2480e-02 L1_spectral:1.6094e-03 L2_spectral:1.6078e-03 L3_spectral:1.6181e-03 L4_spectral:1.5976e-03 L5_spectral:1.6024e-03 L6_spectral:1.5885e-03 L7_spectral:1.6036e-03 L8_spectral:1.5835e-03 L9_spectral:1.5962e-03 L10_spectral:1.6113e-03 L11_spectral:1.6067e-03 L12_spectral:1.5745e-03 train_time:77768ms step_avg:64.81ms +[2025-09-11 09:27:58] [Rank 0] PRINT: step:1200/10000 val_loss:5.4362 total_sharp:1.0935e-04 L1_sharp:8.5349e-02 L2_sharp:7.2633e-02 L3_sharp:7.5222e-02 L4_sharp:8.0886e-02 L5_sharp:1.0170e-01 L6_sharp:1.0094e-01 L7_sharp:1.9459e-01 L8_sharp:1.9565e-01 L9_sharp:2.7580e-01 L10_sharp:5.5557e-01 L11_sharp:9.7365e-01 L12_sharp:2.1328e+00 total_fnorm:1.9500e+02 total_l1_linf:4.3008e+05 total_spectral:9.7500e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1670e-01 L6_fnorm:1.1719e-01 L7_fnorm:1.1865e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1865e-01 L11_fnorm:1.1768e-01 L12_fnorm:1.1426e-01 L1_l1linf:4.1504e-02 L2_l1linf:4.1016e-02 L3_l1linf:4.1016e-02 L4_l1linf:4.0283e-02 L5_l1linf:4.0527e-02 L6_l1linf:4.0283e-02 L7_l1linf:4.0527e-02 L8_l1linf:4.1016e-02 L9_l1linf:4.0771e-02 L10_l1linf:4.1992e-02 L11_l1linf:4.1992e-02 L12_l1linf:4.2480e-02 L1_spectral:1.6094e-03 L2_spectral:1.6078e-03 L3_spectral:1.6181e-03 L4_spectral:1.5976e-03 L5_spectral:1.6024e-03 L6_spectral:1.5885e-03 L7_spectral:1.6036e-03 L8_spectral:1.5835e-03 L9_spectral:1.5962e-03 L10_spectral:1.6113e-03 L11_spectral:1.6067e-03 L12_spectral:1.5745e-03 train_time:77768ms step_avg:64.81ms +[2025-09-11 09:28:00] [Rank 0] step:1201/10000 train_time:79352ms step_avg:66.07ms +[2025-09-11 09:28:00] [Rank 0] step:1201/10000 train_time:79352ms step_avg:66.07ms +[2025-09-11 09:28:01] [Rank 0] step:1221/10000 train_time:80000ms step_avg:65.52ms +[2025-09-11 09:28:01] [Rank 0] step:1221/10000 train_time:80000ms step_avg:65.52ms +[2025-09-11 09:28:01] [Rank 0] step:1241/10000 train_time:80646ms step_avg:64.99ms +[2025-09-11 09:28:01] [Rank 0] step:1241/10000 train_time:80646ms step_avg:64.99ms +[2025-09-11 09:28:02] [Rank 0] step:1261/10000 train_time:81293ms step_avg:64.47ms +[2025-09-11 09:28:02] [Rank 0] step:1261/10000 train_time:81293ms step_avg:64.47ms +[2025-09-11 09:28:03] [Rank 0] step:1281/10000 train_time:81938ms step_avg:63.96ms +[2025-09-11 09:28:03] [Rank 0] step:1281/10000 train_time:81938ms step_avg:63.96ms +[2025-09-11 09:28:03] [Rank 0] step:1301/10000 train_time:82583ms step_avg:63.48ms +[2025-09-11 09:28:03] [Rank 0] step:1301/10000 train_time:82583ms step_avg:63.48ms +[2025-09-11 09:28:04] [Rank 0] step:1321/10000 train_time:83227ms step_avg:63.00ms +[2025-09-11 09:28:04] [Rank 0] step:1321/10000 train_time:83227ms step_avg:63.00ms +[2025-09-11 09:28:05] [Rank 0] step:1341/10000 train_time:83871ms step_avg:62.54ms +[2025-09-11 09:28:05] [Rank 0] step:1341/10000 train_time:83871ms step_avg:62.54ms +[2025-09-11 09:28:05] [Rank 0] step:1361/10000 train_time:84515ms step_avg:62.10ms +[2025-09-11 09:28:05] [Rank 0] step:1361/10000 train_time:84515ms step_avg:62.10ms +[2025-09-11 09:28:06] [Rank 0] step:1381/10000 train_time:85159ms step_avg:61.66ms +[2025-09-11 09:28:06] [Rank 0] step:1381/10000 train_time:85159ms step_avg:61.66ms +[2025-09-11 09:28:07] [Rank 0] step:1401/10000 train_time:85803ms step_avg:61.24ms +[2025-09-11 09:28:07] [Rank 0] step:1401/10000 train_time:85803ms step_avg:61.24ms +[2025-09-11 09:28:07] [Rank 0] step:1421/10000 train_time:86447ms step_avg:60.84ms +[2025-09-11 09:28:07] [Rank 0] step:1421/10000 train_time:86447ms step_avg:60.84ms +[2025-09-11 09:28:08] [Rank 0] step:1441/10000 train_time:87091ms step_avg:60.44ms +[2025-09-11 09:28:08] [Rank 0] step:1441/10000 train_time:87091ms step_avg:60.44ms +[2025-09-11 09:28:08] [Rank 0] step:1461/10000 train_time:87734ms step_avg:60.05ms +[2025-09-11 09:28:08] [Rank 0] step:1461/10000 train_time:87734ms step_avg:60.05ms +[2025-09-11 09:28:09] [Rank 0] step:1481/10000 train_time:88377ms step_avg:59.67ms +[2025-09-11 09:28:09] [Rank 0] step:1481/10000 train_time:88377ms step_avg:59.67ms +[2025-09-11 09:28:10] [Rank 0] step:1501/10000 train_time:89025ms step_avg:59.31ms +[2025-09-11 09:28:10] [Rank 0] step:1501/10000 train_time:89025ms step_avg:59.31ms +[2025-09-11 09:28:10] [Rank 0] step:1521/10000 train_time:89673ms step_avg:58.96ms +[2025-09-11 09:28:10] [Rank 0] step:1521/10000 train_time:89673ms step_avg:58.96ms +[2025-09-11 09:28:11] [Rank 0] step:1541/10000 train_time:90321ms step_avg:58.61ms +[2025-09-11 09:28:11] [Rank 0] step:1541/10000 train_time:90321ms step_avg:58.61ms +[2025-09-11 09:28:12] [Rank 0] step:1561/10000 train_time:90971ms step_avg:58.28ms +[2025-09-11 09:28:12] [Rank 0] step:1561/10000 train_time:90971ms step_avg:58.28ms +[2025-09-11 09:28:12] [Rank 0] step:1581/10000 train_time:91619ms step_avg:57.95ms +[2025-09-11 09:28:12] [Rank 0] step:1581/10000 train_time:91619ms step_avg:57.95ms +[2025-09-11 09:28:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:28:13] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:28:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:28:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:28:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:28:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:28:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:28:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:23] [Rank 0] PRINT: step:1600/10000 val_loss:5.3076 total_sharp:9.1364e-05 L1_sharp:5.7349e-02 L2_sharp:5.7391e-02 L3_sharp:5.7694e-02 L4_sharp:6.5314e-02 L5_sharp:8.1838e-02 L6_sharp:7.0075e-02 L7_sharp:9.9979e-02 L8_sharp:1.2408e-01 L9_sharp:1.9238e-01 L10_sharp:2.7351e-01 L11_sharp:4.7870e-01 L12_sharp:1.4853e+00 total_fnorm:1.8900e+02 total_l1_linf:3.9731e+05 total_spectral:9.4000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1719e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1719e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.9307e-02 L2_l1linf:3.9551e-02 L3_l1linf:3.8574e-02 L4_l1linf:3.8574e-02 L5_l1linf:3.8330e-02 L6_l1linf:3.8574e-02 L7_l1linf:3.8818e-02 L8_l1linf:3.8574e-02 L9_l1linf:3.9551e-02 L10_l1linf:4.0283e-02 L11_l1linf:4.0039e-02 L12_l1linf:4.0771e-02 L1_spectral:1.6049e-03 L2_spectral:1.6047e-03 L3_spectral:1.6143e-03 L4_spectral:1.6089e-03 L5_spectral:1.6109e-03 L6_spectral:1.6006e-03 L7_spectral:1.6093e-03 L8_spectral:1.6005e-03 L9_spectral:1.6019e-03 L10_spectral:1.6056e-03 L11_spectral:1.5985e-03 L12_spectral:1.5974e-03 train_time:92249ms step_avg:57.66ms +[2025-09-11 09:28:23] [Rank 0] PRINT: step:1600/10000 val_loss:5.3076 total_sharp:9.1364e-05 L1_sharp:5.7349e-02 L2_sharp:5.7391e-02 L3_sharp:5.7694e-02 L4_sharp:6.5314e-02 L5_sharp:8.1838e-02 L6_sharp:7.0075e-02 L7_sharp:9.9979e-02 L8_sharp:1.2408e-01 L9_sharp:1.9238e-01 L10_sharp:2.7351e-01 L11_sharp:4.7870e-01 L12_sharp:1.4853e+00 total_fnorm:1.8900e+02 total_l1_linf:3.9731e+05 total_spectral:9.4000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1719e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1719e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1816e-01 L11_fnorm:1.1816e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.9307e-02 L2_l1linf:3.9551e-02 L3_l1linf:3.8574e-02 L4_l1linf:3.8574e-02 L5_l1linf:3.8330e-02 L6_l1linf:3.8574e-02 L7_l1linf:3.8818e-02 L8_l1linf:3.8574e-02 L9_l1linf:3.9551e-02 L10_l1linf:4.0283e-02 L11_l1linf:4.0039e-02 L12_l1linf:4.0771e-02 L1_spectral:1.6049e-03 L2_spectral:1.6047e-03 L3_spectral:1.6143e-03 L4_spectral:1.6089e-03 L5_spectral:1.6109e-03 L6_spectral:1.6006e-03 L7_spectral:1.6093e-03 L8_spectral:1.6005e-03 L9_spectral:1.6019e-03 L10_spectral:1.6056e-03 L11_spectral:1.5985e-03 L12_spectral:1.5974e-03 train_time:92249ms step_avg:57.66ms +[2025-09-11 09:28:25] [Rank 0] step:1601/10000 train_time:93889ms step_avg:58.64ms +[2025-09-11 09:28:25] [Rank 0] step:1601/10000 train_time:93889ms step_avg:58.64ms +[2025-09-11 09:28:25] [Rank 0] step:1621/10000 train_time:94558ms step_avg:58.33ms +[2025-09-11 09:28:25] [Rank 0] step:1621/10000 train_time:94558ms step_avg:58.33ms +[2025-09-11 09:28:26] [Rank 0] step:1641/10000 train_time:95209ms step_avg:58.02ms +[2025-09-11 09:28:26] [Rank 0] step:1641/10000 train_time:95209ms step_avg:58.02ms +[2025-09-11 09:28:27] [Rank 0] step:1661/10000 train_time:95858ms step_avg:57.71ms +[2025-09-11 09:28:27] [Rank 0] step:1661/10000 train_time:95858ms step_avg:57.71ms +[2025-09-11 09:28:27] [Rank 0] step:1681/10000 train_time:96509ms step_avg:57.41ms +[2025-09-11 09:28:27] [Rank 0] step:1681/10000 train_time:96509ms step_avg:57.41ms +[2025-09-11 09:28:28] [Rank 0] step:1701/10000 train_time:97159ms step_avg:57.12ms +[2025-09-11 09:28:28] [Rank 0] step:1701/10000 train_time:97159ms step_avg:57.12ms +[2025-09-11 09:28:29] [Rank 0] step:1721/10000 train_time:97808ms step_avg:56.83ms +[2025-09-11 09:28:29] [Rank 0] step:1721/10000 train_time:97808ms step_avg:56.83ms +[2025-09-11 09:28:29] [Rank 0] step:1741/10000 train_time:98457ms step_avg:56.55ms +[2025-09-11 09:28:29] [Rank 0] step:1741/10000 train_time:98457ms step_avg:56.55ms +[2025-09-11 09:28:30] [Rank 0] step:1761/10000 train_time:99106ms step_avg:56.28ms +[2025-09-11 09:28:30] [Rank 0] step:1761/10000 train_time:99106ms step_avg:56.28ms +[2025-09-11 09:28:31] [Rank 0] step:1781/10000 train_time:99756ms step_avg:56.01ms +[2025-09-11 09:28:31] [Rank 0] step:1781/10000 train_time:99756ms step_avg:56.01ms +[2025-09-11 09:28:31] [Rank 0] step:1801/10000 train_time:100405ms step_avg:55.75ms +[2025-09-11 09:28:31] [Rank 0] step:1801/10000 train_time:100405ms step_avg:55.75ms +[2025-09-11 09:28:32] [Rank 0] step:1821/10000 train_time:101055ms step_avg:55.49ms +[2025-09-11 09:28:32] [Rank 0] step:1821/10000 train_time:101055ms step_avg:55.49ms +[2025-09-11 09:28:33] [Rank 0] step:1841/10000 train_time:101704ms step_avg:55.24ms +[2025-09-11 09:28:33] [Rank 0] step:1841/10000 train_time:101704ms step_avg:55.24ms +[2025-09-11 09:28:33] [Rank 0] step:1861/10000 train_time:102353ms step_avg:55.00ms +[2025-09-11 09:28:33] [Rank 0] step:1861/10000 train_time:102353ms step_avg:55.00ms +[2025-09-11 09:28:34] [Rank 0] step:1881/10000 train_time:103002ms step_avg:54.76ms +[2025-09-11 09:28:34] [Rank 0] step:1881/10000 train_time:103002ms step_avg:54.76ms +[2025-09-11 09:28:35] [Rank 0] step:1901/10000 train_time:103652ms step_avg:54.53ms +[2025-09-11 09:28:35] [Rank 0] step:1901/10000 train_time:103652ms step_avg:54.53ms +[2025-09-11 09:28:35] [Rank 0] step:1921/10000 train_time:104302ms step_avg:54.30ms +[2025-09-11 09:28:35] [Rank 0] step:1921/10000 train_time:104302ms step_avg:54.30ms +[2025-09-11 09:28:36] [Rank 0] step:1941/10000 train_time:104951ms step_avg:54.07ms +[2025-09-11 09:28:36] [Rank 0] step:1941/10000 train_time:104951ms step_avg:54.07ms +[2025-09-11 09:28:36] [Rank 0] step:1961/10000 train_time:105601ms step_avg:53.85ms +[2025-09-11 09:28:36] [Rank 0] step:1961/10000 train_time:105601ms step_avg:53.85ms +[2025-09-11 09:28:37] [Rank 0] step:1981/10000 train_time:106250ms step_avg:53.63ms +[2025-09-11 09:28:37] [Rank 0] step:1981/10000 train_time:106250ms step_avg:53.63ms +[2025-09-11 09:28:38] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:28:38] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:28:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:28:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:28:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:28:48] [Rank 0] PRINT: step:2000/10000 val_loss:5.1918 total_sharp:7.2601e-05 L1_sharp:4.0980e-02 L2_sharp:4.4386e-02 L3_sharp:5.3895e-02 L4_sharp:5.8588e-02 L5_sharp:6.9457e-02 L6_sharp:5.7885e-02 L7_sharp:9.6822e-02 L8_sharp:1.0072e-01 L9_sharp:1.8006e-01 L10_sharp:3.8323e-01 L11_sharp:6.5901e-01 L12_sharp:1.6170e+00 total_fnorm:1.8400e+02 total_l1_linf:4.0346e+05 total_spectral:9.2000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1670e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1865e-01 L11_fnorm:1.1865e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.7598e-02 L2_l1linf:3.7842e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7354e-02 L5_l1linf:3.7109e-02 L6_l1linf:3.6621e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.7354e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.9551e-02 L1_spectral:1.6039e-03 L2_spectral:1.6025e-03 L3_spectral:1.6073e-03 L4_spectral:1.6139e-03 L5_spectral:1.6053e-03 L6_spectral:1.6103e-03 L7_spectral:1.6114e-03 L8_spectral:1.6112e-03 L9_spectral:1.6039e-03 L10_spectral:1.6074e-03 L11_spectral:1.6075e-03 L12_spectral:1.6011e-03 train_time:106881ms step_avg:53.44ms +[2025-09-11 09:28:48] [Rank 0] PRINT: step:2000/10000 val_loss:5.1918 total_sharp:7.2601e-05 L1_sharp:4.0980e-02 L2_sharp:4.4386e-02 L3_sharp:5.3895e-02 L4_sharp:5.8588e-02 L5_sharp:6.9457e-02 L6_sharp:5.7885e-02 L7_sharp:9.6822e-02 L8_sharp:1.0072e-01 L9_sharp:1.8006e-01 L10_sharp:3.8323e-01 L11_sharp:6.5901e-01 L12_sharp:1.6170e+00 total_fnorm:1.8400e+02 total_l1_linf:4.0346e+05 total_spectral:9.2000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1768e-01 L8_fnorm:1.1670e-01 L9_fnorm:1.1816e-01 L10_fnorm:1.1865e-01 L11_fnorm:1.1865e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.7598e-02 L2_l1linf:3.7842e-02 L3_l1linf:3.7354e-02 L4_l1linf:3.7354e-02 L5_l1linf:3.7109e-02 L6_l1linf:3.6621e-02 L7_l1linf:3.7354e-02 L8_l1linf:3.7354e-02 L9_l1linf:3.7354e-02 L10_l1linf:3.8574e-02 L11_l1linf:3.9062e-02 L12_l1linf:3.9551e-02 L1_spectral:1.6039e-03 L2_spectral:1.6025e-03 L3_spectral:1.6073e-03 L4_spectral:1.6139e-03 L5_spectral:1.6053e-03 L6_spectral:1.6103e-03 L7_spectral:1.6114e-03 L8_spectral:1.6112e-03 L9_spectral:1.6039e-03 L10_spectral:1.6074e-03 L11_spectral:1.6075e-03 L12_spectral:1.6011e-03 train_time:106881ms step_avg:53.44ms +[2025-09-11 09:28:50] [Rank 0] step:2001/10000 train_time:108610ms step_avg:54.28ms +[2025-09-11 09:28:50] [Rank 0] step:2001/10000 train_time:108610ms step_avg:54.28ms +[2025-09-11 09:28:51] [Rank 0] step:2021/10000 train_time:109263ms step_avg:54.06ms +[2025-09-11 09:28:51] [Rank 0] step:2021/10000 train_time:109263ms step_avg:54.06ms +[2025-09-11 09:28:51] [Rank 0] step:2041/10000 train_time:109912ms step_avg:53.85ms +[2025-09-11 09:28:51] [Rank 0] step:2041/10000 train_time:109912ms step_avg:53.85ms +[2025-09-11 09:28:52] [Rank 0] step:2061/10000 train_time:110561ms step_avg:53.64ms +[2025-09-11 09:28:52] [Rank 0] step:2061/10000 train_time:110561ms step_avg:53.64ms +[2025-09-11 09:28:53] [Rank 0] step:2081/10000 train_time:111209ms step_avg:53.44ms +[2025-09-11 09:28:53] [Rank 0] step:2081/10000 train_time:111209ms step_avg:53.44ms +[2025-09-11 09:28:53] [Rank 0] step:2101/10000 train_time:111857ms step_avg:53.24ms +[2025-09-11 09:28:53] [Rank 0] step:2101/10000 train_time:111857ms step_avg:53.24ms +[2025-09-11 09:28:54] [Rank 0] step:2121/10000 train_time:112507ms step_avg:53.04ms +[2025-09-11 09:28:54] [Rank 0] step:2121/10000 train_time:112507ms step_avg:53.04ms +[2025-09-11 09:28:55] [Rank 0] step:2141/10000 train_time:113155ms step_avg:52.85ms +[2025-09-11 09:28:55] [Rank 0] step:2141/10000 train_time:113155ms step_avg:52.85ms +[2025-09-11 09:28:55] [Rank 0] step:2161/10000 train_time:113805ms step_avg:52.66ms +[2025-09-11 09:28:55] [Rank 0] step:2161/10000 train_time:113805ms step_avg:52.66ms +[2025-09-11 09:28:56] [Rank 0] step:2181/10000 train_time:114453ms step_avg:52.48ms +[2025-09-11 09:28:56] [Rank 0] step:2181/10000 train_time:114453ms step_avg:52.48ms +[2025-09-11 09:28:56] [Rank 0] step:2201/10000 train_time:115102ms step_avg:52.30ms +[2025-09-11 09:28:56] [Rank 0] step:2201/10000 train_time:115102ms step_avg:52.30ms +[2025-09-11 09:28:57] [Rank 0] step:2221/10000 train_time:115752ms step_avg:52.12ms +[2025-09-11 09:28:57] [Rank 0] step:2221/10000 train_time:115752ms step_avg:52.12ms +[2025-09-11 09:28:58] [Rank 0] step:2241/10000 train_time:116414ms step_avg:51.95ms +[2025-09-11 09:28:58] [Rank 0] step:2241/10000 train_time:116414ms step_avg:51.95ms +[2025-09-11 09:28:58] [Rank 0] step:2261/10000 train_time:117077ms step_avg:51.78ms +[2025-09-11 09:28:58] [Rank 0] step:2261/10000 train_time:117077ms step_avg:51.78ms +[2025-09-11 09:28:59] [Rank 0] step:2281/10000 train_time:117740ms step_avg:51.62ms +[2025-09-11 09:28:59] [Rank 0] step:2281/10000 train_time:117740ms step_avg:51.62ms +[2025-09-11 09:29:00] [Rank 0] step:2301/10000 train_time:118402ms step_avg:51.46ms +[2025-09-11 09:29:00] [Rank 0] step:2301/10000 train_time:118402ms step_avg:51.46ms +[2025-09-11 09:29:00] [Rank 0] step:2321/10000 train_time:119065ms step_avg:51.30ms +[2025-09-11 09:29:00] [Rank 0] step:2321/10000 train_time:119065ms step_avg:51.30ms +[2025-09-11 09:29:01] [Rank 0] step:2341/10000 train_time:119728ms step_avg:51.14ms +[2025-09-11 09:29:01] [Rank 0] step:2341/10000 train_time:119728ms step_avg:51.14ms +[2025-09-11 09:29:02] [Rank 0] step:2361/10000 train_time:120391ms step_avg:50.99ms +[2025-09-11 09:29:02] [Rank 0] step:2361/10000 train_time:120391ms step_avg:50.99ms +[2025-09-11 09:29:02] [Rank 0] step:2381/10000 train_time:121052ms step_avg:50.84ms +[2025-09-11 09:29:02] [Rank 0] step:2381/10000 train_time:121052ms step_avg:50.84ms +[2025-09-11 09:29:03] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:29:03] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:29:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:29:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:29:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:29:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:29:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:29:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:29:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:29:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:29:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:17] [Rank 0] PRINT: step:2400/10000 val_loss:5.0665 total_sharp:6.7368e-05 L1_sharp:4.7800e-02 L2_sharp:4.4591e-02 L3_sharp:5.0279e-02 L4_sharp:6.3865e-02 L5_sharp:7.1855e-02 L6_sharp:6.5374e-02 L7_sharp:7.5857e-02 L8_sharp:9.7210e-02 L9_sharp:1.0605e-01 L10_sharp:1.5977e-01 L11_sharp:2.2459e-01 L12_sharp:4.7964e-01 total_fnorm:1.7500e+02 total_l1_linf:3.6864e+05 total_spectral:8.7500e+01 L1_fnorm:1.1670e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1768e-01 L11_fnorm:1.1719e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.6377e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.6621e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.6377e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.6133e-02 L10_l1linf:3.6377e-02 L11_l1linf:3.6621e-02 L12_l1linf:3.7842e-02 L1_spectral:1.6099e-03 L2_spectral:1.5997e-03 L3_spectral:1.6125e-03 L4_spectral:1.5982e-03 L5_spectral:1.5991e-03 L6_spectral:1.6142e-03 L7_spectral:1.5981e-03 L8_spectral:1.6023e-03 L9_spectral:1.6009e-03 L10_spectral:1.6024e-03 L11_spectral:1.6087e-03 L12_spectral:1.5982e-03 train_time:121696ms step_avg:50.71ms +[2025-09-11 09:29:17] [Rank 0] PRINT: step:2400/10000 val_loss:5.0665 total_sharp:6.7368e-05 L1_sharp:4.7800e-02 L2_sharp:4.4591e-02 L3_sharp:5.0279e-02 L4_sharp:6.3865e-02 L5_sharp:7.1855e-02 L6_sharp:6.5374e-02 L7_sharp:7.5857e-02 L8_sharp:9.7210e-02 L9_sharp:1.0605e-01 L10_sharp:1.5977e-01 L11_sharp:2.2459e-01 L12_sharp:4.7964e-01 total_fnorm:1.7500e+02 total_l1_linf:3.6864e+05 total_spectral:8.7500e+01 L1_fnorm:1.1670e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1621e-01 L6_fnorm:1.1670e-01 L7_fnorm:1.1670e-01 L8_fnorm:1.1621e-01 L9_fnorm:1.1719e-01 L10_fnorm:1.1768e-01 L11_fnorm:1.1719e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.6377e-02 L2_l1linf:3.6377e-02 L3_l1linf:3.6621e-02 L4_l1linf:3.6377e-02 L5_l1linf:3.5400e-02 L6_l1linf:3.6377e-02 L7_l1linf:3.5400e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.6133e-02 L10_l1linf:3.6377e-02 L11_l1linf:3.6621e-02 L12_l1linf:3.7842e-02 L1_spectral:1.6099e-03 L2_spectral:1.5997e-03 L3_spectral:1.6125e-03 L4_spectral:1.5982e-03 L5_spectral:1.5991e-03 L6_spectral:1.6142e-03 L7_spectral:1.5981e-03 L8_spectral:1.6023e-03 L9_spectral:1.6009e-03 L10_spectral:1.6024e-03 L11_spectral:1.6087e-03 L12_spectral:1.5982e-03 train_time:121696ms step_avg:50.71ms +[2025-09-11 09:29:18] [Rank 0] step:2401/10000 train_time:123462ms step_avg:51.42ms +[2025-09-11 09:29:18] [Rank 0] step:2401/10000 train_time:123462ms step_avg:51.42ms +[2025-09-11 09:29:19] [Rank 0] step:2421/10000 train_time:124142ms step_avg:51.28ms +[2025-09-11 09:29:19] [Rank 0] step:2421/10000 train_time:124142ms step_avg:51.28ms +[2025-09-11 09:29:20] [Rank 0] step:2441/10000 train_time:124806ms step_avg:51.13ms +[2025-09-11 09:29:20] [Rank 0] step:2441/10000 train_time:124806ms step_avg:51.13ms +[2025-09-11 09:29:20] [Rank 0] step:2461/10000 train_time:125468ms step_avg:50.98ms +[2025-09-11 09:29:20] [Rank 0] step:2461/10000 train_time:125468ms step_avg:50.98ms +[2025-09-11 09:29:21] [Rank 0] step:2481/10000 train_time:126131ms step_avg:50.84ms +[2025-09-11 09:29:21] [Rank 0] step:2481/10000 train_time:126131ms step_avg:50.84ms +[2025-09-11 09:29:22] [Rank 0] step:2501/10000 train_time:126793ms step_avg:50.70ms +[2025-09-11 09:29:22] [Rank 0] step:2501/10000 train_time:126793ms step_avg:50.70ms +[2025-09-11 09:29:22] [Rank 0] step:2521/10000 train_time:127455ms step_avg:50.56ms +[2025-09-11 09:29:22] [Rank 0] step:2521/10000 train_time:127455ms step_avg:50.56ms +[2025-09-11 09:29:23] [Rank 0] step:2541/10000 train_time:128118ms step_avg:50.42ms +[2025-09-11 09:29:23] [Rank 0] step:2541/10000 train_time:128118ms step_avg:50.42ms +[2025-09-11 09:29:24] [Rank 0] step:2561/10000 train_time:128780ms step_avg:50.29ms +[2025-09-11 09:29:24] [Rank 0] step:2561/10000 train_time:128780ms step_avg:50.29ms +[2025-09-11 09:29:24] [Rank 0] step:2581/10000 train_time:129447ms step_avg:50.15ms +[2025-09-11 09:29:24] [Rank 0] step:2581/10000 train_time:129447ms step_avg:50.15ms +[2025-09-11 09:29:25] [Rank 0] step:2601/10000 train_time:130110ms step_avg:50.02ms +[2025-09-11 09:29:25] [Rank 0] step:2601/10000 train_time:130110ms step_avg:50.02ms +[2025-09-11 09:29:26] [Rank 0] step:2621/10000 train_time:130771ms step_avg:49.89ms +[2025-09-11 09:29:26] [Rank 0] step:2621/10000 train_time:130771ms step_avg:49.89ms +[2025-09-11 09:29:26] [Rank 0] step:2641/10000 train_time:131434ms step_avg:49.77ms +[2025-09-11 09:29:26] [Rank 0] step:2641/10000 train_time:131434ms step_avg:49.77ms +[2025-09-11 09:29:27] [Rank 0] step:2661/10000 train_time:132097ms step_avg:49.64ms +[2025-09-11 09:29:27] [Rank 0] step:2661/10000 train_time:132097ms step_avg:49.64ms +[2025-09-11 09:29:28] [Rank 0] step:2681/10000 train_time:132758ms step_avg:49.52ms +[2025-09-11 09:29:28] [Rank 0] step:2681/10000 train_time:132758ms step_avg:49.52ms +[2025-09-11 09:29:28] [Rank 0] step:2701/10000 train_time:133421ms step_avg:49.40ms +[2025-09-11 09:29:28] [Rank 0] step:2701/10000 train_time:133421ms step_avg:49.40ms +[2025-09-11 09:29:29] [Rank 0] step:2721/10000 train_time:134084ms step_avg:49.28ms +[2025-09-11 09:29:29] [Rank 0] step:2721/10000 train_time:134084ms step_avg:49.28ms +[2025-09-11 09:29:30] [Rank 0] step:2741/10000 train_time:134749ms step_avg:49.16ms +[2025-09-11 09:29:30] [Rank 0] step:2741/10000 train_time:134749ms step_avg:49.16ms +[2025-09-11 09:29:30] [Rank 0] step:2761/10000 train_time:135411ms step_avg:49.04ms +[2025-09-11 09:29:30] [Rank 0] step:2761/10000 train_time:135411ms step_avg:49.04ms +[2025-09-11 09:29:31] [Rank 0] step:2781/10000 train_time:136074ms step_avg:48.93ms +[2025-09-11 09:29:31] [Rank 0] step:2781/10000 train_time:136074ms step_avg:48.93ms +[2025-09-11 09:29:32] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:29:32] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:29:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:29:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:29:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:29:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:29:43] [Rank 0] PRINT: step:2800/10000 val_loss:4.9861 total_sharp:6.3536e-05 L1_sharp:3.9669e-02 L2_sharp:4.5437e-02 L3_sharp:4.9098e-02 L4_sharp:5.9937e-02 L5_sharp:7.4409e-02 L6_sharp:6.8862e-02 L7_sharp:8.3352e-02 L8_sharp:1.2368e-01 L9_sharp:1.7578e-01 L10_sharp:2.7153e-01 L11_sharp:2.5530e-01 L12_sharp:8.8099e-01 total_fnorm:1.7800e+02 total_l1_linf:3.7478e+05 total_spectral:8.9000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.5156e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.4424e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.6621e-02 L1_spectral:1.6155e-03 L2_spectral:1.5955e-03 L3_spectral:1.6183e-03 L4_spectral:1.6146e-03 L5_spectral:1.6164e-03 L6_spectral:1.6000e-03 L7_spectral:1.6119e-03 L8_spectral:1.6071e-03 L9_spectral:1.6076e-03 L10_spectral:1.6093e-03 L11_spectral:1.6061e-03 L12_spectral:1.6074e-03 train_time:136717ms step_avg:48.83ms +[2025-09-11 09:29:43] [Rank 0] PRINT: step:2800/10000 val_loss:4.9861 total_sharp:6.3536e-05 L1_sharp:3.9669e-02 L2_sharp:4.5437e-02 L3_sharp:4.9098e-02 L4_sharp:5.9937e-02 L5_sharp:7.4409e-02 L6_sharp:6.8862e-02 L7_sharp:8.3352e-02 L8_sharp:1.2368e-01 L9_sharp:1.7578e-01 L10_sharp:2.7153e-01 L11_sharp:2.5530e-01 L12_sharp:8.8099e-01 total_fnorm:1.7800e+02 total_l1_linf:3.7478e+05 total_spectral:8.9000e+01 L1_fnorm:1.1621e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1670e-01 L5_fnorm:1.1572e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1621e-01 L8_fnorm:1.1572e-01 L9_fnorm:1.1670e-01 L10_fnorm:1.1670e-01 L11_fnorm:1.1670e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.5156e-02 L2_l1linf:3.4912e-02 L3_l1linf:3.5156e-02 L4_l1linf:3.4668e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.3936e-02 L7_l1linf:3.4424e-02 L8_l1linf:3.4180e-02 L9_l1linf:3.4912e-02 L10_l1linf:3.5400e-02 L11_l1linf:3.5645e-02 L12_l1linf:3.6621e-02 L1_spectral:1.6155e-03 L2_spectral:1.5955e-03 L3_spectral:1.6183e-03 L4_spectral:1.6146e-03 L5_spectral:1.6164e-03 L6_spectral:1.6000e-03 L7_spectral:1.6119e-03 L8_spectral:1.6071e-03 L9_spectral:1.6076e-03 L10_spectral:1.6093e-03 L11_spectral:1.6061e-03 L12_spectral:1.6074e-03 train_time:136717ms step_avg:48.83ms +[2025-09-11 09:29:44] [Rank 0] step:2801/10000 train_time:138479ms step_avg:49.44ms +[2025-09-11 09:29:44] [Rank 0] step:2801/10000 train_time:138479ms step_avg:49.44ms +[2025-09-11 09:29:45] [Rank 0] step:2821/10000 train_time:139147ms step_avg:49.33ms +[2025-09-11 09:29:45] [Rank 0] step:2821/10000 train_time:139147ms step_avg:49.33ms +[2025-09-11 09:29:46] [Rank 0] step:2841/10000 train_time:139811ms step_avg:49.21ms +[2025-09-11 09:29:46] [Rank 0] step:2841/10000 train_time:139811ms step_avg:49.21ms +[2025-09-11 09:29:46] [Rank 0] step:2861/10000 train_time:140475ms step_avg:49.10ms +[2025-09-11 09:29:46] [Rank 0] step:2861/10000 train_time:140475ms step_avg:49.10ms +[2025-09-11 09:29:47] [Rank 0] step:2881/10000 train_time:141139ms step_avg:48.99ms +[2025-09-11 09:29:47] [Rank 0] step:2881/10000 train_time:141139ms step_avg:48.99ms +[2025-09-11 09:29:48] [Rank 0] step:2901/10000 train_time:142063ms step_avg:48.97ms +[2025-09-11 09:29:48] [Rank 0] step:2901/10000 train_time:142063ms step_avg:48.97ms +[2025-09-11 09:29:49] [Rank 0] step:2921/10000 train_time:142726ms step_avg:48.86ms +[2025-09-11 09:29:49] [Rank 0] step:2921/10000 train_time:142726ms step_avg:48.86ms +[2025-09-11 09:29:49] [Rank 0] step:2941/10000 train_time:143389ms step_avg:48.76ms +[2025-09-11 09:29:49] [Rank 0] step:2941/10000 train_time:143389ms step_avg:48.76ms +[2025-09-11 09:29:50] [Rank 0] step:2961/10000 train_time:144341ms step_avg:48.75ms +[2025-09-11 09:29:50] [Rank 0] step:2961/10000 train_time:144341ms step_avg:48.75ms +[2025-09-11 09:29:51] [Rank 0] step:2981/10000 train_time:145006ms step_avg:48.64ms +[2025-09-11 09:29:51] [Rank 0] step:2981/10000 train_time:145006ms step_avg:48.64ms +[2025-09-11 09:29:51] [Rank 0] step:3001/10000 train_time:145672ms step_avg:48.54ms +[2025-09-11 09:29:51] [Rank 0] step:3001/10000 train_time:145672ms step_avg:48.54ms +[2025-09-11 09:29:52] [Rank 0] step:3021/10000 train_time:146350ms step_avg:48.44ms +[2025-09-11 09:29:52] [Rank 0] step:3021/10000 train_time:146350ms step_avg:48.44ms +[2025-09-11 09:29:53] [Rank 0] step:3041/10000 train_time:147016ms step_avg:48.34ms +[2025-09-11 09:29:53] [Rank 0] step:3041/10000 train_time:147016ms step_avg:48.34ms +[2025-09-11 09:29:53] [Rank 0] step:3061/10000 train_time:147682ms step_avg:48.25ms +[2025-09-11 09:29:53] [Rank 0] step:3061/10000 train_time:147682ms step_avg:48.25ms +[2025-09-11 09:29:54] [Rank 0] step:3081/10000 train_time:148347ms step_avg:48.15ms +[2025-09-11 09:29:54] [Rank 0] step:3081/10000 train_time:148347ms step_avg:48.15ms +[2025-09-11 09:29:55] [Rank 0] step:3101/10000 train_time:149013ms step_avg:48.05ms +[2025-09-11 09:29:55] [Rank 0] step:3101/10000 train_time:149013ms step_avg:48.05ms +[2025-09-11 09:29:55] [Rank 0] step:3121/10000 train_time:149680ms step_avg:47.96ms +[2025-09-11 09:29:55] [Rank 0] step:3121/10000 train_time:149680ms step_avg:47.96ms +[2025-09-11 09:29:56] [Rank 0] step:3141/10000 train_time:150346ms step_avg:47.87ms +[2025-09-11 09:29:56] [Rank 0] step:3141/10000 train_time:150346ms step_avg:47.87ms +[2025-09-11 09:29:57] [Rank 0] step:3161/10000 train_time:151011ms step_avg:47.77ms +[2025-09-11 09:29:57] [Rank 0] step:3161/10000 train_time:151011ms step_avg:47.77ms +[2025-09-11 09:29:57] [Rank 0] step:3181/10000 train_time:151675ms step_avg:47.68ms +[2025-09-11 09:29:57] [Rank 0] step:3181/10000 train_time:151675ms step_avg:47.68ms +[2025-09-11 09:29:58] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:29:58] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:29:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:30:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:30:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:30:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:30:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.8967 total_sharp:4.3987e-05 L1_sharp:3.8939e-02 L2_sharp:4.4309e-02 L3_sharp:5.1308e-02 L4_sharp:6.3727e-02 L5_sharp:6.1584e-02 L6_sharp:6.1076e-02 L7_sharp:7.0965e-02 L8_sharp:8.2019e-02 L9_sharp:1.1641e-01 L10_sharp:1.5333e-01 L11_sharp:1.7971e-01 L12_sharp:3.0992e-01 total_fnorm:1.8900e+02 total_l1_linf:4.1984e+05 total_spectral:9.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3203e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.2715e-02 L7_l1linf:3.2227e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.3691e-02 L12_l1linf:3.4668e-02 L1_spectral:1.6179e-03 L2_spectral:1.5938e-03 L3_spectral:1.6037e-03 L4_spectral:1.6085e-03 L5_spectral:1.6036e-03 L6_spectral:1.5965e-03 L7_spectral:1.6150e-03 L8_spectral:1.6302e-03 L9_spectral:1.6180e-03 L10_spectral:1.6166e-03 L11_spectral:1.6041e-03 L12_spectral:1.6092e-03 train_time:152322ms step_avg:47.60ms +[2025-09-11 09:30:08] [Rank 0] PRINT: step:3200/10000 val_loss:4.8967 total_sharp:4.3987e-05 L1_sharp:3.8939e-02 L2_sharp:4.4309e-02 L3_sharp:5.1308e-02 L4_sharp:6.3727e-02 L5_sharp:6.1584e-02 L6_sharp:6.1076e-02 L7_sharp:7.0965e-02 L8_sharp:8.2019e-02 L9_sharp:1.1641e-01 L10_sharp:1.5333e-01 L11_sharp:1.7971e-01 L12_sharp:3.0992e-01 total_fnorm:1.8900e+02 total_l1_linf:4.1984e+05 total_spectral:9.4500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1475e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.4180e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.2959e-02 L4_l1linf:3.3203e-02 L5_l1linf:3.2471e-02 L6_l1linf:3.2715e-02 L7_l1linf:3.2227e-02 L8_l1linf:3.2227e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.3203e-02 L11_l1linf:3.3691e-02 L12_l1linf:3.4668e-02 L1_spectral:1.6179e-03 L2_spectral:1.5938e-03 L3_spectral:1.6037e-03 L4_spectral:1.6085e-03 L5_spectral:1.6036e-03 L6_spectral:1.5965e-03 L7_spectral:1.6150e-03 L8_spectral:1.6302e-03 L9_spectral:1.6180e-03 L10_spectral:1.6166e-03 L11_spectral:1.6041e-03 L12_spectral:1.6092e-03 train_time:152322ms step_avg:47.60ms +[2025-09-11 09:30:11] [Rank 0] step:3201/10000 train_time:154886ms step_avg:48.39ms +[2025-09-11 09:30:11] [Rank 0] step:3201/10000 train_time:154886ms step_avg:48.39ms +[2025-09-11 09:30:12] [Rank 0] step:3221/10000 train_time:155826ms step_avg:48.38ms +[2025-09-11 09:30:12] [Rank 0] step:3221/10000 train_time:155826ms step_avg:48.38ms +[2025-09-11 09:30:13] [Rank 0] step:3241/10000 train_time:156493ms step_avg:48.29ms +[2025-09-11 09:30:13] [Rank 0] step:3241/10000 train_time:156493ms step_avg:48.29ms +[2025-09-11 09:30:13] [Rank 0] step:3261/10000 train_time:157161ms step_avg:48.19ms +[2025-09-11 09:30:13] [Rank 0] step:3261/10000 train_time:157161ms step_avg:48.19ms +[2025-09-11 09:30:14] [Rank 0] step:3281/10000 train_time:157826ms step_avg:48.10ms +[2025-09-11 09:30:14] [Rank 0] step:3281/10000 train_time:157826ms step_avg:48.10ms +[2025-09-11 09:30:15] [Rank 0] step:3301/10000 train_time:158493ms step_avg:48.01ms +[2025-09-11 09:30:15] [Rank 0] step:3301/10000 train_time:158493ms step_avg:48.01ms +[2025-09-11 09:30:15] [Rank 0] step:3321/10000 train_time:159159ms step_avg:47.92ms +[2025-09-11 09:30:15] [Rank 0] step:3321/10000 train_time:159159ms step_avg:47.92ms +[2025-09-11 09:30:16] [Rank 0] step:3341/10000 train_time:159833ms step_avg:47.84ms +[2025-09-11 09:30:16] [Rank 0] step:3341/10000 train_time:159833ms step_avg:47.84ms +[2025-09-11 09:30:17] [Rank 0] step:3361/10000 train_time:160498ms step_avg:47.75ms +[2025-09-11 09:30:17] [Rank 0] step:3361/10000 train_time:160498ms step_avg:47.75ms +[2025-09-11 09:30:17] [Rank 0] step:3381/10000 train_time:161164ms step_avg:47.67ms +[2025-09-11 09:30:17] [Rank 0] step:3381/10000 train_time:161164ms step_avg:47.67ms +[2025-09-11 09:30:18] [Rank 0] step:3401/10000 train_time:161830ms step_avg:47.58ms +[2025-09-11 09:30:18] [Rank 0] step:3401/10000 train_time:161830ms step_avg:47.58ms +[2025-09-11 09:30:19] [Rank 0] step:3421/10000 train_time:162495ms step_avg:47.50ms +[2025-09-11 09:30:19] [Rank 0] step:3421/10000 train_time:162495ms step_avg:47.50ms +[2025-09-11 09:30:19] [Rank 0] step:3441/10000 train_time:163161ms step_avg:47.42ms +[2025-09-11 09:30:19] [Rank 0] step:3441/10000 train_time:163161ms step_avg:47.42ms +[2025-09-11 09:30:20] [Rank 0] step:3461/10000 train_time:163826ms step_avg:47.33ms +[2025-09-11 09:30:20] [Rank 0] step:3461/10000 train_time:163826ms step_avg:47.33ms +[2025-09-11 09:30:21] [Rank 0] step:3481/10000 train_time:164492ms step_avg:47.25ms +[2025-09-11 09:30:21] [Rank 0] step:3481/10000 train_time:164492ms step_avg:47.25ms +[2025-09-11 09:30:21] [Rank 0] step:3501/10000 train_time:165158ms step_avg:47.17ms +[2025-09-11 09:30:21] [Rank 0] step:3501/10000 train_time:165158ms step_avg:47.17ms +[2025-09-11 09:30:22] [Rank 0] step:3521/10000 train_time:165823ms step_avg:47.10ms +[2025-09-11 09:30:22] [Rank 0] step:3521/10000 train_time:165823ms step_avg:47.10ms +[2025-09-11 09:30:23] [Rank 0] step:3541/10000 train_time:166489ms step_avg:47.02ms +[2025-09-11 09:30:23] [Rank 0] step:3541/10000 train_time:166489ms step_avg:47.02ms +[2025-09-11 09:30:23] [Rank 0] step:3561/10000 train_time:167154ms step_avg:46.94ms +[2025-09-11 09:30:23] [Rank 0] step:3561/10000 train_time:167154ms step_avg:46.94ms +[2025-09-11 09:30:24] [Rank 0] step:3581/10000 train_time:167820ms step_avg:46.86ms +[2025-09-11 09:30:24] [Rank 0] step:3581/10000 train_time:167820ms step_avg:46.86ms +[2025-09-11 09:30:25] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:30:25] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:30:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:30:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:30:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:30:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:30:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:30:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:30:35] [Rank 0] PRINT: step:3600/10000 val_loss:4.8424 total_sharp:5.2261e-05 L1_sharp:3.3142e-02 L2_sharp:4.0109e-02 L3_sharp:4.6606e-02 L4_sharp:5.6104e-02 L5_sharp:5.2662e-02 L6_sharp:6.3733e-02 L7_sharp:7.5730e-02 L8_sharp:1.0814e-01 L9_sharp:1.3614e-01 L10_sharp:2.0672e-01 L11_sharp:2.2463e-01 L12_sharp:1.3124e+00 total_fnorm:1.7900e+02 total_l1_linf:3.8298e+05 total_spectral:8.9500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0762e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3447e-02 L1_spectral:1.6076e-03 L2_spectral:1.6169e-03 L3_spectral:1.6138e-03 L4_spectral:1.6031e-03 L5_spectral:1.6084e-03 L6_spectral:1.6047e-03 L7_spectral:1.6157e-03 L8_spectral:1.6142e-03 L9_spectral:1.6208e-03 L10_spectral:1.6089e-03 L11_spectral:1.6062e-03 L12_spectral:1.6032e-03 train_time:168466ms step_avg:46.80ms +[2025-09-11 09:30:35] [Rank 0] PRINT: step:3600/10000 val_loss:4.8424 total_sharp:5.2261e-05 L1_sharp:3.3142e-02 L2_sharp:4.0109e-02 L3_sharp:4.6606e-02 L4_sharp:5.6104e-02 L5_sharp:5.2662e-02 L6_sharp:6.3733e-02 L7_sharp:7.5730e-02 L8_sharp:1.0814e-01 L9_sharp:1.3614e-01 L10_sharp:2.0672e-01 L11_sharp:2.2463e-01 L12_sharp:1.3124e+00 total_fnorm:1.7900e+02 total_l1_linf:3.8298e+05 total_spectral:8.9500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1572e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1572e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1572e-01 L12_fnorm:1.1572e-01 L1_l1linf:3.1494e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.1250e-02 L4_l1linf:3.1006e-02 L5_l1linf:3.1494e-02 L6_l1linf:3.0762e-02 L7_l1linf:3.0762e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2715e-02 L12_l1linf:3.3447e-02 L1_spectral:1.6076e-03 L2_spectral:1.6169e-03 L3_spectral:1.6138e-03 L4_spectral:1.6031e-03 L5_spectral:1.6084e-03 L6_spectral:1.6047e-03 L7_spectral:1.6157e-03 L8_spectral:1.6142e-03 L9_spectral:1.6208e-03 L10_spectral:1.6089e-03 L11_spectral:1.6062e-03 L12_spectral:1.6032e-03 train_time:168466ms step_avg:46.80ms +[2025-09-11 09:30:37] [Rank 0] step:3601/10000 train_time:170097ms step_avg:47.24ms +[2025-09-11 09:30:37] [Rank 0] step:3601/10000 train_time:170097ms step_avg:47.24ms +[2025-09-11 09:30:38] [Rank 0] step:3621/10000 train_time:170787ms step_avg:47.17ms +[2025-09-11 09:30:38] [Rank 0] step:3621/10000 train_time:170787ms step_avg:47.17ms +[2025-09-11 09:30:38] [Rank 0] step:3641/10000 train_time:171453ms step_avg:47.09ms +[2025-09-11 09:30:38] [Rank 0] step:3641/10000 train_time:171453ms step_avg:47.09ms +[2025-09-11 09:30:39] [Rank 0] step:3661/10000 train_time:172120ms step_avg:47.01ms +[2025-09-11 09:30:39] [Rank 0] step:3661/10000 train_time:172120ms step_avg:47.01ms +[2025-09-11 09:30:40] [Rank 0] step:3681/10000 train_time:172786ms step_avg:46.94ms +[2025-09-11 09:30:40] [Rank 0] step:3681/10000 train_time:172786ms step_avg:46.94ms +[2025-09-11 09:30:40] [Rank 0] step:3701/10000 train_time:173452ms step_avg:46.87ms +[2025-09-11 09:30:40] [Rank 0] step:3701/10000 train_time:173452ms step_avg:46.87ms +[2025-09-11 09:30:41] [Rank 0] step:3721/10000 train_time:174128ms step_avg:46.80ms +[2025-09-11 09:30:41] [Rank 0] step:3721/10000 train_time:174128ms step_avg:46.80ms +[2025-09-11 09:30:42] [Rank 0] step:3741/10000 train_time:174805ms step_avg:46.73ms +[2025-09-11 09:30:42] [Rank 0] step:3741/10000 train_time:174805ms step_avg:46.73ms +[2025-09-11 09:30:42] [Rank 0] step:3761/10000 train_time:175482ms step_avg:46.66ms +[2025-09-11 09:30:42] [Rank 0] step:3761/10000 train_time:175482ms step_avg:46.66ms +[2025-09-11 09:30:43] [Rank 0] step:3781/10000 train_time:176158ms step_avg:46.59ms +[2025-09-11 09:30:43] [Rank 0] step:3781/10000 train_time:176158ms step_avg:46.59ms +[2025-09-11 09:30:44] [Rank 0] step:3801/10000 train_time:176834ms step_avg:46.52ms +[2025-09-11 09:30:44] [Rank 0] step:3801/10000 train_time:176834ms step_avg:46.52ms +[2025-09-11 09:30:44] [Rank 0] step:3821/10000 train_time:177511ms step_avg:46.46ms +[2025-09-11 09:30:44] [Rank 0] step:3821/10000 train_time:177511ms step_avg:46.46ms +[2025-09-11 09:30:45] [Rank 0] step:3841/10000 train_time:178187ms step_avg:46.39ms +[2025-09-11 09:30:45] [Rank 0] step:3841/10000 train_time:178187ms step_avg:46.39ms +[2025-09-11 09:30:46] [Rank 0] step:3861/10000 train_time:178862ms step_avg:46.33ms +[2025-09-11 09:30:46] [Rank 0] step:3861/10000 train_time:178862ms step_avg:46.33ms +[2025-09-11 09:30:46] [Rank 0] step:3881/10000 train_time:179538ms step_avg:46.26ms +[2025-09-11 09:30:46] [Rank 0] step:3881/10000 train_time:179538ms step_avg:46.26ms +[2025-09-11 09:30:47] [Rank 0] step:3901/10000 train_time:180214ms step_avg:46.20ms +[2025-09-11 09:30:47] [Rank 0] step:3901/10000 train_time:180214ms step_avg:46.20ms +[2025-09-11 09:30:48] [Rank 0] step:3921/10000 train_time:180890ms step_avg:46.13ms +[2025-09-11 09:30:48] [Rank 0] step:3921/10000 train_time:180890ms step_avg:46.13ms +[2025-09-11 09:30:48] [Rank 0] step:3941/10000 train_time:181567ms step_avg:46.07ms +[2025-09-11 09:30:48] [Rank 0] step:3941/10000 train_time:181567ms step_avg:46.07ms +[2025-09-11 09:30:49] [Rank 0] step:3961/10000 train_time:182243ms step_avg:46.01ms +[2025-09-11 09:30:49] [Rank 0] step:3961/10000 train_time:182243ms step_avg:46.01ms +[2025-09-11 09:30:50] [Rank 0] step:3981/10000 train_time:182919ms step_avg:45.95ms +[2025-09-11 09:30:50] [Rank 0] step:3981/10000 train_time:182919ms step_avg:45.95ms +[2025-09-11 09:30:51] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:30:51] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:30:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:30:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:30:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:30:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:30:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:31:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:31:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:01] [Rank 0] PRINT: step:4000/10000 val_loss:4.7814 total_sharp:5.4629e-05 L1_sharp:3.7356e-02 L2_sharp:4.4140e-02 L3_sharp:5.7632e-02 L4_sharp:7.3464e-02 L5_sharp:9.6090e-02 L6_sharp:1.2290e-01 L7_sharp:1.4179e-01 L8_sharp:2.3643e-01 L9_sharp:4.5519e-01 L10_sharp:6.3695e-01 L11_sharp:9.2181e-01 L12_sharp:2.1028e+00 total_fnorm:2.0200e+02 total_l1_linf:4.3622e+05 total_spectral:1.0100e+02 L1_fnorm:1.1670e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2715e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.1250e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4180e-02 L12_l1linf:3.4668e-02 L1_spectral:1.6056e-03 L2_spectral:1.6016e-03 L3_spectral:1.6108e-03 L4_spectral:1.6069e-03 L5_spectral:1.6035e-03 L6_spectral:1.6057e-03 L7_spectral:1.6151e-03 L8_spectral:1.6116e-03 L9_spectral:1.6275e-03 L10_spectral:1.6038e-03 L11_spectral:1.6038e-03 L12_spectral:1.6140e-03 train_time:183723ms step_avg:45.93ms +[2025-09-11 09:31:01] [Rank 0] PRINT: step:4000/10000 val_loss:4.7814 total_sharp:5.4629e-05 L1_sharp:3.7356e-02 L2_sharp:4.4140e-02 L3_sharp:5.7632e-02 L4_sharp:7.3464e-02 L5_sharp:9.6090e-02 L6_sharp:1.2290e-01 L7_sharp:1.4179e-01 L8_sharp:2.3643e-01 L9_sharp:4.5519e-01 L10_sharp:6.3695e-01 L11_sharp:9.2181e-01 L12_sharp:2.1028e+00 total_fnorm:2.0200e+02 total_l1_linf:4.3622e+05 total_spectral:1.0100e+02 L1_fnorm:1.1670e-01 L2_fnorm:1.1719e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1523e-01 L6_fnorm:1.1572e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1426e-01 L9_fnorm:1.1621e-01 L10_fnorm:1.1621e-01 L11_fnorm:1.1621e-01 L12_fnorm:1.1621e-01 L1_l1linf:3.2715e-02 L2_l1linf:3.3203e-02 L3_l1linf:3.3691e-02 L4_l1linf:3.2471e-02 L5_l1linf:3.1982e-02 L6_l1linf:3.2471e-02 L7_l1linf:3.1250e-02 L8_l1linf:3.1738e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.3447e-02 L11_l1linf:3.4180e-02 L12_l1linf:3.4668e-02 L1_spectral:1.6056e-03 L2_spectral:1.6016e-03 L3_spectral:1.6108e-03 L4_spectral:1.6069e-03 L5_spectral:1.6035e-03 L6_spectral:1.6057e-03 L7_spectral:1.6151e-03 L8_spectral:1.6116e-03 L9_spectral:1.6275e-03 L10_spectral:1.6038e-03 L11_spectral:1.6038e-03 L12_spectral:1.6140e-03 train_time:183723ms step_avg:45.93ms +[2025-09-11 09:31:03] [Rank 0] step:4001/10000 train_time:185415ms step_avg:46.34ms +[2025-09-11 09:31:03] [Rank 0] step:4001/10000 train_time:185415ms step_avg:46.34ms +[2025-09-11 09:31:04] [Rank 0] step:4021/10000 train_time:186102ms step_avg:46.28ms +[2025-09-11 09:31:04] [Rank 0] step:4021/10000 train_time:186102ms step_avg:46.28ms +[2025-09-11 09:31:04] [Rank 0] step:4041/10000 train_time:186780ms step_avg:46.22ms +[2025-09-11 09:31:04] [Rank 0] step:4041/10000 train_time:186780ms step_avg:46.22ms +[2025-09-11 09:31:05] [Rank 0] step:4061/10000 train_time:187456ms step_avg:46.16ms +[2025-09-11 09:31:05] [Rank 0] step:4061/10000 train_time:187456ms step_avg:46.16ms +[2025-09-11 09:31:06] [Rank 0] step:4081/10000 train_time:188133ms step_avg:46.10ms +[2025-09-11 09:31:06] [Rank 0] step:4081/10000 train_time:188133ms step_avg:46.10ms +[2025-09-11 09:31:06] [Rank 0] step:4101/10000 train_time:188809ms step_avg:46.04ms +[2025-09-11 09:31:06] [Rank 0] step:4101/10000 train_time:188809ms step_avg:46.04ms +[2025-09-11 09:31:07] [Rank 0] step:4121/10000 train_time:189487ms step_avg:45.98ms +[2025-09-11 09:31:07] [Rank 0] step:4121/10000 train_time:189487ms step_avg:45.98ms +[2025-09-11 09:31:08] [Rank 0] step:4141/10000 train_time:190163ms step_avg:45.92ms +[2025-09-11 09:31:08] [Rank 0] step:4141/10000 train_time:190163ms step_avg:45.92ms +[2025-09-11 09:31:08] [Rank 0] step:4161/10000 train_time:190839ms step_avg:45.86ms +[2025-09-11 09:31:08] [Rank 0] step:4161/10000 train_time:190839ms step_avg:45.86ms +[2025-09-11 09:31:09] [Rank 0] step:4181/10000 train_time:191516ms step_avg:45.81ms +[2025-09-11 09:31:09] [Rank 0] step:4181/10000 train_time:191516ms step_avg:45.81ms +[2025-09-11 09:31:10] [Rank 0] step:4201/10000 train_time:192214ms step_avg:45.75ms +[2025-09-11 09:31:10] [Rank 0] step:4201/10000 train_time:192214ms step_avg:45.75ms +[2025-09-11 09:31:11] [Rank 0] step:4221/10000 train_time:192891ms step_avg:45.70ms +[2025-09-11 09:31:11] [Rank 0] step:4221/10000 train_time:192891ms step_avg:45.70ms +[2025-09-11 09:31:11] [Rank 0] step:4241/10000 train_time:193566ms step_avg:45.64ms +[2025-09-11 09:31:11] [Rank 0] step:4241/10000 train_time:193566ms step_avg:45.64ms +[2025-09-11 09:31:12] [Rank 0] step:4261/10000 train_time:194243ms step_avg:45.59ms +[2025-09-11 09:31:12] [Rank 0] step:4261/10000 train_time:194243ms step_avg:45.59ms +[2025-09-11 09:31:13] [Rank 0] step:4281/10000 train_time:194922ms step_avg:45.53ms +[2025-09-11 09:31:13] [Rank 0] step:4281/10000 train_time:194922ms step_avg:45.53ms +[2025-09-11 09:31:13] [Rank 0] step:4301/10000 train_time:195599ms step_avg:45.48ms +[2025-09-11 09:31:13] [Rank 0] step:4301/10000 train_time:195599ms step_avg:45.48ms +[2025-09-11 09:31:14] [Rank 0] step:4321/10000 train_time:196276ms step_avg:45.42ms +[2025-09-11 09:31:14] [Rank 0] step:4321/10000 train_time:196276ms step_avg:45.42ms +[2025-09-11 09:31:15] [Rank 0] step:4341/10000 train_time:196955ms step_avg:45.37ms +[2025-09-11 09:31:15] [Rank 0] step:4341/10000 train_time:196955ms step_avg:45.37ms +[2025-09-11 09:31:15] [Rank 0] step:4361/10000 train_time:197630ms step_avg:45.32ms +[2025-09-11 09:31:15] [Rank 0] step:4361/10000 train_time:197630ms step_avg:45.32ms +[2025-09-11 09:31:16] [Rank 0] step:4381/10000 train_time:198308ms step_avg:45.27ms +[2025-09-11 09:31:16] [Rank 0] step:4381/10000 train_time:198308ms step_avg:45.27ms +[2025-09-11 09:31:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:31:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:31:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:31:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:31:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:31:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:31:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:31:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.7338 total_sharp:4.1637e-05 L1_sharp:3.5475e-02 L2_sharp:4.1278e-02 L3_sharp:4.4396e-02 L4_sharp:5.6747e-02 L5_sharp:6.5548e-02 L6_sharp:6.2966e-02 L7_sharp:7.1515e-02 L8_sharp:8.4918e-02 L9_sharp:1.1767e-01 L10_sharp:1.5536e-01 L11_sharp:2.1321e-01 L12_sharp:2.8332e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7069e+05 total_spectral:8.8500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.1128e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.3203e-02 L1_spectral:1.6144e-03 L2_spectral:1.6137e-03 L3_spectral:1.6031e-03 L4_spectral:1.6006e-03 L5_spectral:1.6054e-03 L6_spectral:1.6150e-03 L7_spectral:1.6711e-03 L8_spectral:1.6022e-03 L9_spectral:1.6095e-03 L10_spectral:1.6094e-03 L11_spectral:1.6044e-03 L12_spectral:1.6007e-03 train_time:198965ms step_avg:45.22ms +[2025-09-11 09:31:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.7338 total_sharp:4.1637e-05 L1_sharp:3.5475e-02 L2_sharp:4.1278e-02 L3_sharp:4.4396e-02 L4_sharp:5.6747e-02 L5_sharp:6.5548e-02 L6_sharp:6.2966e-02 L7_sharp:7.1515e-02 L8_sharp:8.4918e-02 L9_sharp:1.1767e-01 L10_sharp:1.5536e-01 L11_sharp:2.1321e-01 L12_sharp:2.8332e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7069e+05 total_spectral:8.8500e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1621e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1523e-01 L7_fnorm:1.1523e-01 L8_fnorm:1.1328e-01 L9_fnorm:1.1523e-01 L10_fnorm:1.1572e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1523e-01 L1_l1linf:3.0640e-02 L2_l1linf:3.1494e-02 L3_l1linf:3.2227e-02 L4_l1linf:3.1494e-02 L5_l1linf:3.1128e-02 L6_l1linf:3.0518e-02 L7_l1linf:3.1494e-02 L8_l1linf:3.0884e-02 L9_l1linf:3.1738e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.2471e-02 L12_l1linf:3.3203e-02 L1_spectral:1.6144e-03 L2_spectral:1.6137e-03 L3_spectral:1.6031e-03 L4_spectral:1.6006e-03 L5_spectral:1.6054e-03 L6_spectral:1.6150e-03 L7_spectral:1.6711e-03 L8_spectral:1.6022e-03 L9_spectral:1.6095e-03 L10_spectral:1.6094e-03 L11_spectral:1.6044e-03 L12_spectral:1.6007e-03 train_time:198965ms step_avg:45.22ms +[2025-09-11 09:31:29] [Rank 0] step:4401/10000 train_time:200670ms step_avg:45.60ms +[2025-09-11 09:31:29] [Rank 0] step:4401/10000 train_time:200670ms step_avg:45.60ms +[2025-09-11 09:31:30] [Rank 0] step:4421/10000 train_time:201375ms step_avg:45.55ms +[2025-09-11 09:31:30] [Rank 0] step:4421/10000 train_time:201375ms step_avg:45.55ms +[2025-09-11 09:31:30] [Rank 0] step:4441/10000 train_time:202054ms step_avg:45.50ms +[2025-09-11 09:31:30] [Rank 0] step:4441/10000 train_time:202054ms step_avg:45.50ms +[2025-09-11 09:31:31] [Rank 0] step:4461/10000 train_time:202734ms step_avg:45.45ms +[2025-09-11 09:31:31] [Rank 0] step:4461/10000 train_time:202734ms step_avg:45.45ms +[2025-09-11 09:31:32] [Rank 0] step:4481/10000 train_time:203414ms step_avg:45.39ms +[2025-09-11 09:31:32] [Rank 0] step:4481/10000 train_time:203414ms step_avg:45.39ms +[2025-09-11 09:31:32] [Rank 0] step:4501/10000 train_time:204095ms step_avg:45.34ms +[2025-09-11 09:31:32] [Rank 0] step:4501/10000 train_time:204095ms step_avg:45.34ms +[2025-09-11 09:31:33] [Rank 0] step:4521/10000 train_time:204774ms step_avg:45.29ms +[2025-09-11 09:31:33] [Rank 0] step:4521/10000 train_time:204774ms step_avg:45.29ms +[2025-09-11 09:31:34] [Rank 0] step:4541/10000 train_time:205454ms step_avg:45.24ms +[2025-09-11 09:31:34] [Rank 0] step:4541/10000 train_time:205454ms step_avg:45.24ms +[2025-09-11 09:31:34] [Rank 0] step:4561/10000 train_time:206133ms step_avg:45.19ms +[2025-09-11 09:31:34] [Rank 0] step:4561/10000 train_time:206133ms step_avg:45.19ms +[2025-09-11 09:31:35] [Rank 0] step:4581/10000 train_time:206812ms step_avg:45.15ms +[2025-09-11 09:31:35] [Rank 0] step:4581/10000 train_time:206812ms step_avg:45.15ms +[2025-09-11 09:31:36] [Rank 0] step:4601/10000 train_time:207492ms step_avg:45.10ms +[2025-09-11 09:31:36] [Rank 0] step:4601/10000 train_time:207492ms step_avg:45.10ms +[2025-09-11 09:31:36] [Rank 0] step:4621/10000 train_time:208171ms step_avg:45.05ms +[2025-09-11 09:31:36] [Rank 0] step:4621/10000 train_time:208171ms step_avg:45.05ms +[2025-09-11 09:31:37] [Rank 0] step:4641/10000 train_time:208851ms step_avg:45.00ms +[2025-09-11 09:31:37] [Rank 0] step:4641/10000 train_time:208851ms step_avg:45.00ms +[2025-09-11 09:31:38] [Rank 0] step:4661/10000 train_time:209531ms step_avg:44.95ms +[2025-09-11 09:31:38] [Rank 0] step:4661/10000 train_time:209531ms step_avg:44.95ms +[2025-09-11 09:31:38] [Rank 0] step:4681/10000 train_time:210210ms step_avg:44.91ms +[2025-09-11 09:31:38] [Rank 0] step:4681/10000 train_time:210210ms step_avg:44.91ms +[2025-09-11 09:31:39] [Rank 0] step:4701/10000 train_time:210889ms step_avg:44.86ms +[2025-09-11 09:31:39] [Rank 0] step:4701/10000 train_time:210889ms step_avg:44.86ms +[2025-09-11 09:31:40] [Rank 0] step:4721/10000 train_time:211568ms step_avg:44.81ms +[2025-09-11 09:31:40] [Rank 0] step:4721/10000 train_time:211568ms step_avg:44.81ms +[2025-09-11 09:31:40] [Rank 0] step:4741/10000 train_time:212248ms step_avg:44.77ms +[2025-09-11 09:31:40] [Rank 0] step:4741/10000 train_time:212248ms step_avg:44.77ms +[2025-09-11 09:31:41] [Rank 0] step:4761/10000 train_time:212930ms step_avg:44.72ms +[2025-09-11 09:31:41] [Rank 0] step:4761/10000 train_time:212930ms step_avg:44.72ms +[2025-09-11 09:31:42] [Rank 0] step:4781/10000 train_time:213609ms step_avg:44.68ms +[2025-09-11 09:31:42] [Rank 0] step:4781/10000 train_time:213609ms step_avg:44.68ms +[2025-09-11 09:31:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:31:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:31:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:31:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:31:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:31:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:31:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:31:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:31:53] [Rank 0] PRINT: step:4800/10000 val_loss:4.6662 total_sharp:4.0739e-05 L1_sharp:4.3027e-02 L2_sharp:4.7260e-02 L3_sharp:5.8247e-02 L4_sharp:7.6005e-02 L5_sharp:8.5070e-02 L6_sharp:8.7017e-02 L7_sharp:8.9223e-02 L8_sharp:1.0334e-01 L9_sharp:1.3581e-01 L10_sharp:3.6723e-01 L11_sharp:5.4384e-01 L12_sharp:1.5348e+00 total_fnorm:1.8800e+02 total_l1_linf:4.0960e+05 total_spectral:9.4000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:2.9541e-02 L2_l1linf:3.0151e-02 L3_l1linf:3.0273e-02 L4_l1linf:2.9785e-02 L5_l1linf:2.9663e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9663e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.2715e-02 L1_spectral:1.6055e-03 L2_spectral:1.6085e-03 L3_spectral:1.6058e-03 L4_spectral:1.6128e-03 L5_spectral:1.6070e-03 L6_spectral:1.6102e-03 L7_spectral:1.6041e-03 L8_spectral:1.6104e-03 L9_spectral:1.6158e-03 L10_spectral:1.6171e-03 L11_spectral:1.6032e-03 L12_spectral:1.6036e-03 train_time:214268ms step_avg:44.64ms +[2025-09-11 09:31:53] [Rank 0] PRINT: step:4800/10000 val_loss:4.6662 total_sharp:4.0739e-05 L1_sharp:4.3027e-02 L2_sharp:4.7260e-02 L3_sharp:5.8247e-02 L4_sharp:7.6005e-02 L5_sharp:8.5070e-02 L6_sharp:8.7017e-02 L7_sharp:8.9223e-02 L8_sharp:1.0334e-01 L9_sharp:1.3581e-01 L10_sharp:3.6723e-01 L11_sharp:5.4384e-01 L12_sharp:1.5348e+00 total_fnorm:1.8800e+02 total_l1_linf:4.0960e+05 total_spectral:9.4000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1621e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1426e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1475e-01 L10_fnorm:1.1475e-01 L11_fnorm:1.1523e-01 L12_fnorm:1.1572e-01 L1_l1linf:2.9541e-02 L2_l1linf:3.0151e-02 L3_l1linf:3.0273e-02 L4_l1linf:2.9785e-02 L5_l1linf:2.9663e-02 L6_l1linf:2.8809e-02 L7_l1linf:2.9663e-02 L8_l1linf:2.9663e-02 L9_l1linf:2.9541e-02 L10_l1linf:3.0640e-02 L11_l1linf:3.1982e-02 L12_l1linf:3.2715e-02 L1_spectral:1.6055e-03 L2_spectral:1.6085e-03 L3_spectral:1.6058e-03 L4_spectral:1.6128e-03 L5_spectral:1.6070e-03 L6_spectral:1.6102e-03 L7_spectral:1.6041e-03 L8_spectral:1.6104e-03 L9_spectral:1.6158e-03 L10_spectral:1.6171e-03 L11_spectral:1.6032e-03 L12_spectral:1.6036e-03 train_time:214268ms step_avg:44.64ms +[2025-09-11 09:31:55] [Rank 0] step:4801/10000 train_time:216007ms step_avg:44.99ms +[2025-09-11 09:31:55] [Rank 0] step:4801/10000 train_time:216007ms step_avg:44.99ms +[2025-09-11 09:31:56] [Rank 0] step:4821/10000 train_time:216718ms step_avg:44.95ms +[2025-09-11 09:31:56] [Rank 0] step:4821/10000 train_time:216718ms step_avg:44.95ms +[2025-09-11 09:31:57] [Rank 0] step:4841/10000 train_time:217674ms step_avg:44.96ms +[2025-09-11 09:31:57] [Rank 0] step:4841/10000 train_time:217674ms step_avg:44.96ms +[2025-09-11 09:31:57] [Rank 0] step:4861/10000 train_time:218355ms step_avg:44.92ms +[2025-09-11 09:31:57] [Rank 0] step:4861/10000 train_time:218355ms step_avg:44.92ms +[2025-09-11 09:31:58] [Rank 0] step:4881/10000 train_time:219034ms step_avg:44.87ms +[2025-09-11 09:31:58] [Rank 0] step:4881/10000 train_time:219034ms step_avg:44.87ms +[2025-09-11 09:31:59] [Rank 0] step:4901/10000 train_time:219716ms step_avg:44.83ms +[2025-09-11 09:31:59] [Rank 0] step:4901/10000 train_time:219716ms step_avg:44.83ms +[2025-09-11 09:31:59] [Rank 0] step:4921/10000 train_time:220396ms step_avg:44.79ms +[2025-09-11 09:31:59] [Rank 0] step:4921/10000 train_time:220396ms step_avg:44.79ms +[2025-09-11 09:32:00] [Rank 0] step:4941/10000 train_time:221076ms step_avg:44.74ms +[2025-09-11 09:32:00] [Rank 0] step:4941/10000 train_time:221076ms step_avg:44.74ms +[2025-09-11 09:32:01] [Rank 0] step:4961/10000 train_time:221757ms step_avg:44.70ms +[2025-09-11 09:32:01] [Rank 0] step:4961/10000 train_time:221757ms step_avg:44.70ms +[2025-09-11 09:32:01] [Rank 0] step:4981/10000 train_time:222438ms step_avg:44.66ms +[2025-09-11 09:32:01] [Rank 0] step:4981/10000 train_time:222438ms step_avg:44.66ms +[2025-09-11 09:32:02] [Rank 0] step:5001/10000 train_time:223119ms step_avg:44.61ms +[2025-09-11 09:32:02] [Rank 0] step:5001/10000 train_time:223119ms step_avg:44.61ms +[2025-09-11 09:32:03] [Rank 0] step:5021/10000 train_time:223800ms step_avg:44.57ms +[2025-09-11 09:32:03] [Rank 0] step:5021/10000 train_time:223800ms step_avg:44.57ms +[2025-09-11 09:32:03] [Rank 0] step:5041/10000 train_time:224480ms step_avg:44.53ms +[2025-09-11 09:32:03] [Rank 0] step:5041/10000 train_time:224480ms step_avg:44.53ms +[2025-09-11 09:32:04] [Rank 0] step:5061/10000 train_time:225160ms step_avg:44.49ms +[2025-09-11 09:32:04] [Rank 0] step:5061/10000 train_time:225160ms step_avg:44.49ms +[2025-09-11 09:32:05] [Rank 0] step:5081/10000 train_time:225840ms step_avg:44.45ms +[2025-09-11 09:32:05] [Rank 0] step:5081/10000 train_time:225840ms step_avg:44.45ms +[2025-09-11 09:32:05] [Rank 0] step:5101/10000 train_time:226521ms step_avg:44.41ms +[2025-09-11 09:32:05] [Rank 0] step:5101/10000 train_time:226521ms step_avg:44.41ms +[2025-09-11 09:32:06] [Rank 0] step:5121/10000 train_time:227202ms step_avg:44.37ms +[2025-09-11 09:32:06] [Rank 0] step:5121/10000 train_time:227202ms step_avg:44.37ms +[2025-09-11 09:32:07] [Rank 0] step:5141/10000 train_time:227883ms step_avg:44.33ms +[2025-09-11 09:32:07] [Rank 0] step:5141/10000 train_time:227883ms step_avg:44.33ms +[2025-09-11 09:32:08] [Rank 0] step:5161/10000 train_time:228564ms step_avg:44.29ms +[2025-09-11 09:32:08] [Rank 0] step:5161/10000 train_time:228564ms step_avg:44.29ms +[2025-09-11 09:32:08] [Rank 0] step:5181/10000 train_time:229243ms step_avg:44.25ms +[2025-09-11 09:32:08] [Rank 0] step:5181/10000 train_time:229243ms step_avg:44.25ms +[2025-09-11 09:32:09] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:32:09] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:32:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:32:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:32:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:32:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:32:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:20] [Rank 0] PRINT: step:5200/10000 val_loss:4.6129 total_sharp:4.4128e-05 L1_sharp:3.9785e-02 L2_sharp:4.4188e-02 L3_sharp:5.5223e-02 L4_sharp:6.5914e-02 L5_sharp:7.7889e-02 L6_sharp:7.5110e-02 L7_sharp:7.9803e-02 L8_sharp:1.0560e-01 L9_sharp:1.6242e-01 L10_sharp:2.3085e-01 L11_sharp:4.6251e-01 L12_sharp:1.8542e+00 total_fnorm:1.7700e+02 total_l1_linf:3.6454e+05 total_spectral:8.8500e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8687e-02 L2_l1linf:2.9541e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8442e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8076e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.8198e-02 L10_l1linf:2.8809e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1128e-02 L1_spectral:1.6302e-03 L2_spectral:1.6073e-03 L3_spectral:1.6124e-03 L4_spectral:1.6117e-03 L5_spectral:1.6228e-03 L6_spectral:1.6109e-03 L7_spectral:1.6078e-03 L8_spectral:1.6143e-03 L9_spectral:1.6241e-03 L10_spectral:1.6217e-03 L11_spectral:1.6182e-03 L12_spectral:1.6105e-03 train_time:229909ms step_avg:44.21ms +[2025-09-11 09:32:20] [Rank 0] PRINT: step:5200/10000 val_loss:4.6129 total_sharp:4.4128e-05 L1_sharp:3.9785e-02 L2_sharp:4.4188e-02 L3_sharp:5.5223e-02 L4_sharp:6.5914e-02 L5_sharp:7.7889e-02 L6_sharp:7.5110e-02 L7_sharp:7.9803e-02 L8_sharp:1.0560e-01 L9_sharp:1.6242e-01 L10_sharp:2.3085e-01 L11_sharp:4.6251e-01 L12_sharp:1.8542e+00 total_fnorm:1.7700e+02 total_l1_linf:3.6454e+05 total_spectral:8.8500e+01 L1_fnorm:1.1523e-01 L2_fnorm:1.1621e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1426e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1230e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1475e-01 L1_l1linf:2.8687e-02 L2_l1linf:2.9541e-02 L3_l1linf:2.9541e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8442e-02 L6_l1linf:2.8320e-02 L7_l1linf:2.8076e-02 L8_l1linf:2.8198e-02 L9_l1linf:2.8198e-02 L10_l1linf:2.8809e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1128e-02 L1_spectral:1.6302e-03 L2_spectral:1.6073e-03 L3_spectral:1.6124e-03 L4_spectral:1.6117e-03 L5_spectral:1.6228e-03 L6_spectral:1.6109e-03 L7_spectral:1.6078e-03 L8_spectral:1.6143e-03 L9_spectral:1.6241e-03 L10_spectral:1.6217e-03 L11_spectral:1.6182e-03 L12_spectral:1.6105e-03 train_time:229909ms step_avg:44.21ms +[2025-09-11 09:32:22] [Rank 0] step:5201/10000 train_time:231714ms step_avg:44.55ms +[2025-09-11 09:32:22] [Rank 0] step:5201/10000 train_time:231714ms step_avg:44.55ms +[2025-09-11 09:32:22] [Rank 0] step:5221/10000 train_time:232432ms step_avg:44.52ms +[2025-09-11 09:32:22] [Rank 0] step:5221/10000 train_time:232432ms step_avg:44.52ms +[2025-09-11 09:32:23] [Rank 0] step:5241/10000 train_time:233119ms step_avg:44.48ms +[2025-09-11 09:32:23] [Rank 0] step:5241/10000 train_time:233119ms step_avg:44.48ms +[2025-09-11 09:32:24] [Rank 0] step:5261/10000 train_time:233808ms step_avg:44.44ms +[2025-09-11 09:32:24] [Rank 0] step:5261/10000 train_time:233808ms step_avg:44.44ms +[2025-09-11 09:32:24] [Rank 0] step:5281/10000 train_time:234497ms step_avg:44.40ms +[2025-09-11 09:32:24] [Rank 0] step:5281/10000 train_time:234497ms step_avg:44.40ms +[2025-09-11 09:32:25] [Rank 0] step:5301/10000 train_time:235186ms step_avg:44.37ms +[2025-09-11 09:32:25] [Rank 0] step:5301/10000 train_time:235186ms step_avg:44.37ms +[2025-09-11 09:32:26] [Rank 0] step:5321/10000 train_time:235874ms step_avg:44.33ms +[2025-09-11 09:32:26] [Rank 0] step:5321/10000 train_time:235874ms step_avg:44.33ms +[2025-09-11 09:32:27] [Rank 0] step:5341/10000 train_time:236562ms step_avg:44.29ms +[2025-09-11 09:32:27] [Rank 0] step:5341/10000 train_time:236562ms step_avg:44.29ms +[2025-09-11 09:32:27] [Rank 0] step:5361/10000 train_time:237251ms step_avg:44.26ms +[2025-09-11 09:32:27] [Rank 0] step:5361/10000 train_time:237251ms step_avg:44.26ms +[2025-09-11 09:32:28] [Rank 0] step:5381/10000 train_time:237940ms step_avg:44.22ms +[2025-09-11 09:32:28] [Rank 0] step:5381/10000 train_time:237940ms step_avg:44.22ms +[2025-09-11 09:32:29] [Rank 0] step:5401/10000 train_time:238627ms step_avg:44.18ms +[2025-09-11 09:32:29] [Rank 0] step:5401/10000 train_time:238627ms step_avg:44.18ms +[2025-09-11 09:32:29] [Rank 0] step:5421/10000 train_time:239317ms step_avg:44.15ms +[2025-09-11 09:32:29] [Rank 0] step:5421/10000 train_time:239317ms step_avg:44.15ms +[2025-09-11 09:32:30] [Rank 0] step:5441/10000 train_time:240007ms step_avg:44.11ms +[2025-09-11 09:32:30] [Rank 0] step:5441/10000 train_time:240007ms step_avg:44.11ms +[2025-09-11 09:32:31] [Rank 0] step:5461/10000 train_time:240695ms step_avg:44.08ms +[2025-09-11 09:32:31] [Rank 0] step:5461/10000 train_time:240695ms step_avg:44.08ms +[2025-09-11 09:32:31] [Rank 0] step:5481/10000 train_time:241385ms step_avg:44.04ms +[2025-09-11 09:32:31] [Rank 0] step:5481/10000 train_time:241385ms step_avg:44.04ms +[2025-09-11 09:32:32] [Rank 0] step:5501/10000 train_time:242073ms step_avg:44.01ms +[2025-09-11 09:32:32] [Rank 0] step:5501/10000 train_time:242073ms step_avg:44.01ms +[2025-09-11 09:32:33] [Rank 0] step:5521/10000 train_time:242762ms step_avg:43.97ms +[2025-09-11 09:32:33] [Rank 0] step:5521/10000 train_time:242762ms step_avg:43.97ms +[2025-09-11 09:32:33] [Rank 0] step:5541/10000 train_time:243454ms step_avg:43.94ms +[2025-09-11 09:32:33] [Rank 0] step:5541/10000 train_time:243454ms step_avg:43.94ms +[2025-09-11 09:32:34] [Rank 0] step:5561/10000 train_time:244144ms step_avg:43.90ms +[2025-09-11 09:32:34] [Rank 0] step:5561/10000 train_time:244144ms step_avg:43.90ms +[2025-09-11 09:32:35] [Rank 0] step:5581/10000 train_time:244835ms step_avg:43.87ms +[2025-09-11 09:32:35] [Rank 0] step:5581/10000 train_time:244835ms step_avg:43.87ms +[2025-09-11 09:32:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:32:35] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:32:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:32:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:32:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:32:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:32:46] [Rank 0] PRINT: step:5600/10000 val_loss:4.5726 total_sharp:4.0842e-05 L1_sharp:3.0462e-02 L2_sharp:3.9687e-02 L3_sharp:4.9882e-02 L4_sharp:6.5711e-02 L5_sharp:8.6430e-02 L6_sharp:9.6790e-02 L7_sharp:9.9690e-02 L8_sharp:1.0844e-01 L9_sharp:1.9763e-01 L10_sharp:3.4887e-01 L11_sharp:8.5689e-01 L12_sharp:2.3425e+00 total_fnorm:1.8000e+02 total_l1_linf:3.8093e+05 total_spectral:9.0000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.9419e-02 L3_l1linf:2.9419e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8442e-02 L9_l1linf:2.8687e-02 L10_l1linf:2.9053e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1250e-02 L1_spectral:1.6105e-03 L2_spectral:1.6066e-03 L3_spectral:1.6267e-03 L4_spectral:1.6109e-03 L5_spectral:1.6237e-03 L6_spectral:1.6226e-03 L7_spectral:1.6292e-03 L8_spectral:1.6026e-03 L9_spectral:1.6229e-03 L10_spectral:1.6251e-03 L11_spectral:1.6205e-03 L12_spectral:1.6148e-03 train_time:245505ms step_avg:43.84ms +[2025-09-11 09:32:46] [Rank 0] PRINT: step:5600/10000 val_loss:4.5726 total_sharp:4.0842e-05 L1_sharp:3.0462e-02 L2_sharp:3.9687e-02 L3_sharp:4.9882e-02 L4_sharp:6.5711e-02 L5_sharp:8.6430e-02 L6_sharp:9.6790e-02 L7_sharp:9.9690e-02 L8_sharp:1.0844e-01 L9_sharp:1.9763e-01 L10_sharp:3.4887e-01 L11_sharp:8.5689e-01 L12_sharp:2.3425e+00 total_fnorm:1.8000e+02 total_l1_linf:3.8093e+05 total_spectral:9.0000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1670e-01 L4_fnorm:1.1572e-01 L5_fnorm:1.1475e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1133e-01 L9_fnorm:1.1377e-01 L10_fnorm:1.1426e-01 L11_fnorm:1.1426e-01 L12_fnorm:1.1426e-01 L1_l1linf:2.8320e-02 L2_l1linf:2.9419e-02 L3_l1linf:2.9419e-02 L4_l1linf:2.9053e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.8931e-02 L7_l1linf:2.8320e-02 L8_l1linf:2.8442e-02 L9_l1linf:2.8687e-02 L10_l1linf:2.9053e-02 L11_l1linf:2.9785e-02 L12_l1linf:3.1250e-02 L1_spectral:1.6105e-03 L2_spectral:1.6066e-03 L3_spectral:1.6267e-03 L4_spectral:1.6109e-03 L5_spectral:1.6237e-03 L6_spectral:1.6226e-03 L7_spectral:1.6292e-03 L8_spectral:1.6026e-03 L9_spectral:1.6229e-03 L10_spectral:1.6251e-03 L11_spectral:1.6205e-03 L12_spectral:1.6148e-03 train_time:245505ms step_avg:43.84ms +[2025-09-11 09:32:48] [Rank 0] step:5601/10000 train_time:247307ms step_avg:44.15ms +[2025-09-11 09:32:48] [Rank 0] step:5601/10000 train_time:247307ms step_avg:44.15ms +[2025-09-11 09:32:49] [Rank 0] step:5621/10000 train_time:248025ms step_avg:44.12ms +[2025-09-11 09:32:49] [Rank 0] step:5621/10000 train_time:248025ms step_avg:44.12ms +[2025-09-11 09:32:49] [Rank 0] step:5641/10000 train_time:248714ms step_avg:44.09ms +[2025-09-11 09:32:49] [Rank 0] step:5641/10000 train_time:248714ms step_avg:44.09ms +[2025-09-11 09:32:50] [Rank 0] step:5661/10000 train_time:249403ms step_avg:44.06ms +[2025-09-11 09:32:50] [Rank 0] step:5661/10000 train_time:249403ms step_avg:44.06ms +[2025-09-11 09:32:51] [Rank 0] step:5681/10000 train_time:250093ms step_avg:44.02ms +[2025-09-11 09:32:51] [Rank 0] step:5681/10000 train_time:250093ms step_avg:44.02ms +[2025-09-11 09:32:51] [Rank 0] step:5701/10000 train_time:250785ms step_avg:43.99ms +[2025-09-11 09:32:51] [Rank 0] step:5701/10000 train_time:250785ms step_avg:43.99ms +[2025-09-11 09:32:52] [Rank 0] step:5721/10000 train_time:251474ms step_avg:43.96ms +[2025-09-11 09:32:52] [Rank 0] step:5721/10000 train_time:251474ms step_avg:43.96ms +[2025-09-11 09:32:53] [Rank 0] step:5741/10000 train_time:252165ms step_avg:43.92ms +[2025-09-11 09:32:53] [Rank 0] step:5741/10000 train_time:252165ms step_avg:43.92ms +[2025-09-11 09:32:53] [Rank 0] step:5761/10000 train_time:252855ms step_avg:43.89ms +[2025-09-11 09:32:53] [Rank 0] step:5761/10000 train_time:252855ms step_avg:43.89ms +[2025-09-11 09:32:54] [Rank 0] step:5781/10000 train_time:253546ms step_avg:43.86ms +[2025-09-11 09:32:54] [Rank 0] step:5781/10000 train_time:253546ms step_avg:43.86ms +[2025-09-11 09:32:55] [Rank 0] step:5801/10000 train_time:254238ms step_avg:43.83ms +[2025-09-11 09:32:55] [Rank 0] step:5801/10000 train_time:254238ms step_avg:43.83ms +[2025-09-11 09:32:55] [Rank 0] step:5821/10000 train_time:254926ms step_avg:43.79ms +[2025-09-11 09:32:55] [Rank 0] step:5821/10000 train_time:254926ms step_avg:43.79ms +[2025-09-11 09:32:56] [Rank 0] step:5841/10000 train_time:255617ms step_avg:43.76ms +[2025-09-11 09:32:56] [Rank 0] step:5841/10000 train_time:255617ms step_avg:43.76ms +[2025-09-11 09:32:57] [Rank 0] step:5861/10000 train_time:256454ms step_avg:43.76ms +[2025-09-11 09:32:57] [Rank 0] step:5861/10000 train_time:256454ms step_avg:43.76ms +[2025-09-11 09:32:58] [Rank 0] step:5881/10000 train_time:257252ms step_avg:43.74ms +[2025-09-11 09:32:58] [Rank 0] step:5881/10000 train_time:257252ms step_avg:43.74ms +[2025-09-11 09:32:59] [Rank 0] step:5901/10000 train_time:257941ms step_avg:43.71ms +[2025-09-11 09:32:59] [Rank 0] step:5901/10000 train_time:257941ms step_avg:43.71ms +[2025-09-11 09:32:59] [Rank 0] step:5921/10000 train_time:258634ms step_avg:43.68ms +[2025-09-11 09:32:59] [Rank 0] step:5921/10000 train_time:258634ms step_avg:43.68ms +[2025-09-11 09:33:00] [Rank 0] step:5941/10000 train_time:259557ms step_avg:43.69ms +[2025-09-11 09:33:00] [Rank 0] step:5941/10000 train_time:259557ms step_avg:43.69ms +[2025-09-11 09:33:01] [Rank 0] step:5961/10000 train_time:260248ms step_avg:43.66ms +[2025-09-11 09:33:01] [Rank 0] step:5961/10000 train_time:260248ms step_avg:43.66ms +[2025-09-11 09:33:02] [Rank 0] step:5981/10000 train_time:260939ms step_avg:43.63ms +[2025-09-11 09:33:02] [Rank 0] step:5981/10000 train_time:260939ms step_avg:43.63ms +[2025-09-11 09:33:02] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:33:02] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:33:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:33:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:33:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:33:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:16] [Rank 0] PRINT: step:6000/10000 val_loss:4.5184 total_sharp:3.5874e-05 L1_sharp:3.5002e-02 L2_sharp:4.3393e-02 L3_sharp:5.9765e-02 L4_sharp:6.8379e-02 L5_sharp:8.0179e-02 L6_sharp:9.9187e-02 L7_sharp:8.4399e-02 L8_sharp:7.7020e-02 L9_sharp:1.0990e-01 L10_sharp:1.7654e-01 L11_sharp:2.5001e-01 L12_sharp:4.6222e-01 total_fnorm:1.7800e+02 total_l1_linf:3.7069e+05 total_spectral:8.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1328e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1328e-01 L1_l1linf:2.8198e-02 L2_l1linf:2.8442e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.8320e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6978e-02 L9_l1linf:2.7466e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.8320e-02 L12_l1linf:2.8442e-02 L1_spectral:1.6055e-03 L2_spectral:1.6165e-03 L3_spectral:1.6067e-03 L4_spectral:1.6136e-03 L5_spectral:1.6165e-03 L6_spectral:1.6158e-03 L7_spectral:1.6177e-03 L8_spectral:1.6056e-03 L9_spectral:1.6224e-03 L10_spectral:1.6078e-03 L11_spectral:1.6080e-03 L12_spectral:1.6023e-03 train_time:261615ms step_avg:43.60ms +[2025-09-11 09:33:16] [Rank 0] PRINT: step:6000/10000 val_loss:4.5184 total_sharp:3.5874e-05 L1_sharp:3.5002e-02 L2_sharp:4.3393e-02 L3_sharp:5.9765e-02 L4_sharp:6.8379e-02 L5_sharp:8.0179e-02 L6_sharp:9.9187e-02 L7_sharp:8.4399e-02 L8_sharp:7.7020e-02 L9_sharp:1.0990e-01 L10_sharp:1.7654e-01 L11_sharp:2.5001e-01 L12_sharp:4.6222e-01 total_fnorm:1.7800e+02 total_l1_linf:3.7069e+05 total_spectral:8.9000e+01 L1_fnorm:1.1572e-01 L2_fnorm:1.1670e-01 L3_fnorm:1.1572e-01 L4_fnorm:1.1523e-01 L5_fnorm:1.1377e-01 L6_fnorm:1.1426e-01 L7_fnorm:1.1377e-01 L8_fnorm:1.1084e-01 L9_fnorm:1.1328e-01 L10_fnorm:1.1328e-01 L11_fnorm:1.1377e-01 L12_fnorm:1.1328e-01 L1_l1linf:2.8198e-02 L2_l1linf:2.8442e-02 L3_l1linf:2.8564e-02 L4_l1linf:2.8320e-02 L5_l1linf:2.8809e-02 L6_l1linf:2.7222e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6978e-02 L9_l1linf:2.7466e-02 L10_l1linf:2.8198e-02 L11_l1linf:2.8320e-02 L12_l1linf:2.8442e-02 L1_spectral:1.6055e-03 L2_spectral:1.6165e-03 L3_spectral:1.6067e-03 L4_spectral:1.6136e-03 L5_spectral:1.6165e-03 L6_spectral:1.6158e-03 L7_spectral:1.6177e-03 L8_spectral:1.6056e-03 L9_spectral:1.6224e-03 L10_spectral:1.6078e-03 L11_spectral:1.6080e-03 L12_spectral:1.6023e-03 train_time:261615ms step_avg:43.60ms +[2025-09-11 09:33:18] [Rank 0] step:6001/10000 train_time:263415ms step_avg:43.90ms +[2025-09-11 09:33:18] [Rank 0] step:6001/10000 train_time:263415ms step_avg:43.90ms +[2025-09-11 09:33:19] [Rank 0] step:6021/10000 train_time:264146ms step_avg:43.87ms +[2025-09-11 09:33:19] [Rank 0] step:6021/10000 train_time:264146ms step_avg:43.87ms +[2025-09-11 09:33:20] [Rank 0] step:6041/10000 train_time:264840ms step_avg:43.84ms +[2025-09-11 09:33:20] [Rank 0] step:6041/10000 train_time:264840ms step_avg:43.84ms +[2025-09-11 09:33:20] [Rank 0] step:6061/10000 train_time:265531ms step_avg:43.81ms +[2025-09-11 09:33:20] [Rank 0] step:6061/10000 train_time:265531ms step_avg:43.81ms +[2025-09-11 09:33:21] [Rank 0] step:6081/10000 train_time:266223ms step_avg:43.78ms +[2025-09-11 09:33:21] [Rank 0] step:6081/10000 train_time:266223ms step_avg:43.78ms +[2025-09-11 09:33:22] [Rank 0] step:6101/10000 train_time:266913ms step_avg:43.75ms +[2025-09-11 09:33:22] [Rank 0] step:6101/10000 train_time:266913ms step_avg:43.75ms +[2025-09-11 09:33:22] [Rank 0] step:6121/10000 train_time:267605ms step_avg:43.72ms +[2025-09-11 09:33:22] [Rank 0] step:6121/10000 train_time:267605ms step_avg:43.72ms +[2025-09-11 09:33:23] [Rank 0] step:6141/10000 train_time:268297ms step_avg:43.69ms +[2025-09-11 09:33:23] [Rank 0] step:6141/10000 train_time:268297ms step_avg:43.69ms +[2025-09-11 09:33:24] [Rank 0] step:6161/10000 train_time:268987ms step_avg:43.66ms +[2025-09-11 09:33:24] [Rank 0] step:6161/10000 train_time:268987ms step_avg:43.66ms +[2025-09-11 09:33:24] [Rank 0] step:6181/10000 train_time:269677ms step_avg:43.63ms +[2025-09-11 09:33:24] [Rank 0] step:6181/10000 train_time:269677ms step_avg:43.63ms +[2025-09-11 09:33:25] [Rank 0] step:6201/10000 train_time:270369ms step_avg:43.60ms +[2025-09-11 09:33:25] [Rank 0] step:6201/10000 train_time:270369ms step_avg:43.60ms +[2025-09-11 09:33:26] [Rank 0] step:6221/10000 train_time:271062ms step_avg:43.57ms +[2025-09-11 09:33:26] [Rank 0] step:6221/10000 train_time:271062ms step_avg:43.57ms +[2025-09-11 09:33:27] [Rank 0] step:6241/10000 train_time:271753ms step_avg:43.54ms +[2025-09-11 09:33:27] [Rank 0] step:6241/10000 train_time:271753ms step_avg:43.54ms +[2025-09-11 09:33:27] [Rank 0] step:6261/10000 train_time:272442ms step_avg:43.51ms +[2025-09-11 09:33:27] [Rank 0] step:6261/10000 train_time:272442ms step_avg:43.51ms +[2025-09-11 09:33:28] [Rank 0] step:6281/10000 train_time:273134ms step_avg:43.49ms +[2025-09-11 09:33:28] [Rank 0] step:6281/10000 train_time:273134ms step_avg:43.49ms +[2025-09-11 09:33:29] [Rank 0] step:6301/10000 train_time:273824ms step_avg:43.46ms +[2025-09-11 09:33:29] [Rank 0] step:6301/10000 train_time:273824ms step_avg:43.46ms +[2025-09-11 09:33:29] [Rank 0] step:6321/10000 train_time:274520ms step_avg:43.43ms +[2025-09-11 09:33:29] [Rank 0] step:6321/10000 train_time:274520ms step_avg:43.43ms +[2025-09-11 09:33:30] [Rank 0] step:6341/10000 train_time:275212ms step_avg:43.40ms +[2025-09-11 09:33:30] [Rank 0] step:6341/10000 train_time:275212ms step_avg:43.40ms +[2025-09-11 09:33:31] [Rank 0] step:6361/10000 train_time:275905ms step_avg:43.37ms +[2025-09-11 09:33:31] [Rank 0] step:6361/10000 train_time:275905ms step_avg:43.37ms +[2025-09-11 09:33:31] [Rank 0] step:6381/10000 train_time:276597ms step_avg:43.35ms +[2025-09-11 09:33:31] [Rank 0] step:6381/10000 train_time:276597ms step_avg:43.35ms +[2025-09-11 09:33:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:33:32] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:33:43] [Rank 0] PRINT: step:6400/10000 val_loss:4.4742 total_sharp:3.8224e-05 L1_sharp:3.9984e-02 L2_sharp:4.2738e-02 L3_sharp:4.9285e-02 L4_sharp:6.0783e-02 L5_sharp:8.0269e-02 L6_sharp:7.0748e-02 L7_sharp:7.6466e-02 L8_sharp:8.6386e-02 L9_sharp:1.1185e-01 L10_sharp:2.1595e-01 L11_sharp:3.1115e-01 L12_sharp:1.2791e+00 total_fnorm:1.5800e+02 total_l1_linf:3.2358e+05 total_spectral:7.9000e+01 L1_fnorm:1.0059e-01 L2_fnorm:1.0205e-01 L3_fnorm:1.0107e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0010e-01 L6_fnorm:9.9609e-02 L7_fnorm:9.9121e-02 L8_fnorm:9.7168e-02 L9_fnorm:9.8633e-02 L10_fnorm:9.8633e-02 L11_fnorm:9.8633e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.2461e-02 L2_l1linf:2.4414e-02 L3_l1linf:2.3804e-02 L4_l1linf:2.3682e-02 L5_l1linf:2.3438e-02 L6_l1linf:2.3071e-02 L7_l1linf:2.3315e-02 L8_l1linf:2.2827e-02 L9_l1linf:2.3071e-02 L10_l1linf:2.3315e-02 L11_l1linf:2.4292e-02 L12_l1linf:2.5269e-02 L1_spectral:1.4302e-03 L2_spectral:1.4427e-03 L3_spectral:1.4538e-03 L4_spectral:1.4500e-03 L5_spectral:1.4483e-03 L6_spectral:1.4512e-03 L7_spectral:1.4581e-03 L8_spectral:1.4272e-03 L9_spectral:1.4514e-03 L10_spectral:1.4485e-03 L11_spectral:1.4418e-03 L12_spectral:1.4314e-03 train_time:277269ms step_avg:43.32ms +[2025-09-11 09:33:43] [Rank 0] PRINT: step:6400/10000 val_loss:4.4742 total_sharp:3.8224e-05 L1_sharp:3.9984e-02 L2_sharp:4.2738e-02 L3_sharp:4.9285e-02 L4_sharp:6.0783e-02 L5_sharp:8.0269e-02 L6_sharp:7.0748e-02 L7_sharp:7.6466e-02 L8_sharp:8.6386e-02 L9_sharp:1.1185e-01 L10_sharp:2.1595e-01 L11_sharp:3.1115e-01 L12_sharp:1.2791e+00 total_fnorm:1.5800e+02 total_l1_linf:3.2358e+05 total_spectral:7.9000e+01 L1_fnorm:1.0059e-01 L2_fnorm:1.0205e-01 L3_fnorm:1.0107e-01 L4_fnorm:1.0107e-01 L5_fnorm:1.0010e-01 L6_fnorm:9.9609e-02 L7_fnorm:9.9121e-02 L8_fnorm:9.7168e-02 L9_fnorm:9.8633e-02 L10_fnorm:9.8633e-02 L11_fnorm:9.8633e-02 L12_fnorm:9.9609e-02 L1_l1linf:2.2461e-02 L2_l1linf:2.4414e-02 L3_l1linf:2.3804e-02 L4_l1linf:2.3682e-02 L5_l1linf:2.3438e-02 L6_l1linf:2.3071e-02 L7_l1linf:2.3315e-02 L8_l1linf:2.2827e-02 L9_l1linf:2.3071e-02 L10_l1linf:2.3315e-02 L11_l1linf:2.4292e-02 L12_l1linf:2.5269e-02 L1_spectral:1.4302e-03 L2_spectral:1.4427e-03 L3_spectral:1.4538e-03 L4_spectral:1.4500e-03 L5_spectral:1.4483e-03 L6_spectral:1.4512e-03 L7_spectral:1.4581e-03 L8_spectral:1.4272e-03 L9_spectral:1.4514e-03 L10_spectral:1.4485e-03 L11_spectral:1.4418e-03 L12_spectral:1.4314e-03 train_time:277269ms step_avg:43.32ms +[2025-09-11 09:33:45] [Rank 0] step:6401/10000 train_time:279069ms step_avg:43.60ms +[2025-09-11 09:33:45] [Rank 0] step:6401/10000 train_time:279069ms step_avg:43.60ms +[2025-09-11 09:33:45] [Rank 0] step:6421/10000 train_time:279792ms step_avg:43.57ms +[2025-09-11 09:33:45] [Rank 0] step:6421/10000 train_time:279792ms step_avg:43.57ms +[2025-09-11 09:33:46] [Rank 0] step:6441/10000 train_time:280483ms step_avg:43.55ms +[2025-09-11 09:33:46] [Rank 0] step:6441/10000 train_time:280483ms step_avg:43.55ms +[2025-09-11 09:33:47] [Rank 0] step:6461/10000 train_time:281175ms step_avg:43.52ms +[2025-09-11 09:33:47] [Rank 0] step:6461/10000 train_time:281175ms step_avg:43.52ms +[2025-09-11 09:33:48] [Rank 0] step:6481/10000 train_time:281868ms step_avg:43.49ms +[2025-09-11 09:33:48] [Rank 0] step:6481/10000 train_time:281868ms step_avg:43.49ms +[2025-09-11 09:33:48] [Rank 0] step:6501/10000 train_time:282562ms step_avg:43.46ms +[2025-09-11 09:33:48] [Rank 0] step:6501/10000 train_time:282562ms step_avg:43.46ms +[2025-09-11 09:33:49] [Rank 0] step:6521/10000 train_time:283254ms step_avg:43.44ms +[2025-09-11 09:33:49] [Rank 0] step:6521/10000 train_time:283254ms step_avg:43.44ms +[2025-09-11 09:33:50] [Rank 0] step:6541/10000 train_time:283943ms step_avg:43.41ms +[2025-09-11 09:33:50] [Rank 0] step:6541/10000 train_time:283943ms step_avg:43.41ms +[2025-09-11 09:33:50] [Rank 0] step:6561/10000 train_time:284635ms step_avg:43.38ms +[2025-09-11 09:33:50] [Rank 0] step:6561/10000 train_time:284635ms step_avg:43.38ms +[2025-09-11 09:33:51] [Rank 0] step:6581/10000 train_time:285328ms step_avg:43.36ms +[2025-09-11 09:33:51] [Rank 0] step:6581/10000 train_time:285328ms step_avg:43.36ms +[2025-09-11 09:33:52] [Rank 0] step:6601/10000 train_time:286019ms step_avg:43.33ms +[2025-09-11 09:33:52] [Rank 0] step:6601/10000 train_time:286019ms step_avg:43.33ms +[2025-09-11 09:33:52] [Rank 0] step:6621/10000 train_time:286709ms step_avg:43.30ms +[2025-09-11 09:33:52] [Rank 0] step:6621/10000 train_time:286709ms step_avg:43.30ms +[2025-09-11 09:33:53] [Rank 0] step:6641/10000 train_time:287402ms step_avg:43.28ms +[2025-09-11 09:33:53] [Rank 0] step:6641/10000 train_time:287402ms step_avg:43.28ms +[2025-09-11 09:33:54] [Rank 0] step:6661/10000 train_time:288093ms step_avg:43.25ms +[2025-09-11 09:33:54] [Rank 0] step:6661/10000 train_time:288093ms step_avg:43.25ms +[2025-09-11 09:33:54] [Rank 0] step:6681/10000 train_time:288792ms step_avg:43.23ms +[2025-09-11 09:33:54] [Rank 0] step:6681/10000 train_time:288792ms step_avg:43.23ms +[2025-09-11 09:33:55] [Rank 0] step:6701/10000 train_time:289492ms step_avg:43.20ms +[2025-09-11 09:33:55] [Rank 0] step:6701/10000 train_time:289492ms step_avg:43.20ms +[2025-09-11 09:33:56] [Rank 0] step:6721/10000 train_time:290189ms step_avg:43.18ms +[2025-09-11 09:33:56] [Rank 0] step:6721/10000 train_time:290189ms step_avg:43.18ms +[2025-09-11 09:33:57] [Rank 0] step:6741/10000 train_time:290889ms step_avg:43.15ms +[2025-09-11 09:33:57] [Rank 0] step:6741/10000 train_time:290889ms step_avg:43.15ms +[2025-09-11 09:33:57] [Rank 0] step:6761/10000 train_time:291586ms step_avg:43.13ms +[2025-09-11 09:33:57] [Rank 0] step:6761/10000 train_time:291586ms step_avg:43.13ms +[2025-09-11 09:33:58] [Rank 0] step:6781/10000 train_time:292284ms step_avg:43.10ms +[2025-09-11 09:33:58] [Rank 0] step:6781/10000 train_time:292284ms step_avg:43.10ms +[2025-09-11 09:33:59] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:33:59] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:10] [Rank 0] PRINT: step:6800/10000 val_loss:4.4259 total_sharp:2.8294e-05 L1_sharp:3.3191e-02 L2_sharp:4.1878e-02 L3_sharp:4.9443e-02 L4_sharp:7.1432e-02 L5_sharp:7.5150e-02 L6_sharp:8.6747e-02 L7_sharp:7.6060e-02 L8_sharp:7.1686e-02 L9_sharp:1.0772e-01 L10_sharp:1.7877e-01 L11_sharp:2.7681e-01 L12_sharp:4.0300e-01 total_fnorm:1.5600e+02 total_l1_linf:3.1949e+05 total_spectral:7.8000e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.6426e-02 L3_fnorm:8.6426e-02 L4_fnorm:8.5938e-02 L5_fnorm:8.4961e-02 L6_fnorm:8.4473e-02 L7_fnorm:8.4473e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.3984e-02 L10_fnorm:8.3984e-02 L11_fnorm:8.3984e-02 L12_fnorm:8.3496e-02 L1_l1linf:1.8066e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9531e-02 L4_l1linf:1.8921e-02 L5_l1linf:1.9165e-02 L6_l1linf:1.8921e-02 L7_l1linf:1.8921e-02 L8_l1linf:1.8799e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.8555e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9775e-02 L1_spectral:1.2333e-03 L2_spectral:1.2675e-03 L3_spectral:1.2686e-03 L4_spectral:1.2690e-03 L5_spectral:1.2757e-03 L6_spectral:1.2640e-03 L7_spectral:1.2733e-03 L8_spectral:1.2471e-03 L9_spectral:1.2542e-03 L10_spectral:1.2581e-03 L11_spectral:1.2542e-03 L12_spectral:1.2422e-03 train_time:292962ms step_avg:43.08ms +[2025-09-11 09:34:10] [Rank 0] PRINT: step:6800/10000 val_loss:4.4259 total_sharp:2.8294e-05 L1_sharp:3.3191e-02 L2_sharp:4.1878e-02 L3_sharp:4.9443e-02 L4_sharp:7.1432e-02 L5_sharp:7.5150e-02 L6_sharp:8.6747e-02 L7_sharp:7.6060e-02 L8_sharp:7.1686e-02 L9_sharp:1.0772e-01 L10_sharp:1.7877e-01 L11_sharp:2.7681e-01 L12_sharp:4.0300e-01 total_fnorm:1.5600e+02 total_l1_linf:3.1949e+05 total_spectral:7.8000e+01 L1_fnorm:8.5449e-02 L2_fnorm:8.6426e-02 L3_fnorm:8.6426e-02 L4_fnorm:8.5938e-02 L5_fnorm:8.4961e-02 L6_fnorm:8.4473e-02 L7_fnorm:8.4473e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.3984e-02 L10_fnorm:8.3984e-02 L11_fnorm:8.3984e-02 L12_fnorm:8.3496e-02 L1_l1linf:1.8066e-02 L2_l1linf:1.9043e-02 L3_l1linf:1.9531e-02 L4_l1linf:1.8921e-02 L5_l1linf:1.9165e-02 L6_l1linf:1.8921e-02 L7_l1linf:1.8921e-02 L8_l1linf:1.8799e-02 L9_l1linf:1.9409e-02 L10_l1linf:1.8555e-02 L11_l1linf:1.9409e-02 L12_l1linf:1.9775e-02 L1_spectral:1.2333e-03 L2_spectral:1.2675e-03 L3_spectral:1.2686e-03 L4_spectral:1.2690e-03 L5_spectral:1.2757e-03 L6_spectral:1.2640e-03 L7_spectral:1.2733e-03 L8_spectral:1.2471e-03 L9_spectral:1.2542e-03 L10_spectral:1.2581e-03 L11_spectral:1.2542e-03 L12_spectral:1.2422e-03 train_time:292962ms step_avg:43.08ms +[2025-09-11 09:34:13] [Rank 0] step:6801/10000 train_time:295717ms step_avg:43.48ms +[2025-09-11 09:34:13] [Rank 0] step:6801/10000 train_time:295717ms step_avg:43.48ms +[2025-09-11 09:34:14] [Rank 0] step:6821/10000 train_time:296585ms step_avg:43.48ms +[2025-09-11 09:34:14] [Rank 0] step:6821/10000 train_time:296585ms step_avg:43.48ms +[2025-09-11 09:34:15] [Rank 0] step:6841/10000 train_time:297288ms step_avg:43.46ms +[2025-09-11 09:34:15] [Rank 0] step:6841/10000 train_time:297288ms step_avg:43.46ms +[2025-09-11 09:34:15] [Rank 0] step:6861/10000 train_time:297988ms step_avg:43.43ms +[2025-09-11 09:34:15] [Rank 0] step:6861/10000 train_time:297988ms step_avg:43.43ms +[2025-09-11 09:34:16] [Rank 0] step:6881/10000 train_time:298690ms step_avg:43.41ms +[2025-09-11 09:34:16] [Rank 0] step:6881/10000 train_time:298690ms step_avg:43.41ms +[2025-09-11 09:34:17] [Rank 0] step:6901/10000 train_time:299389ms step_avg:43.38ms +[2025-09-11 09:34:17] [Rank 0] step:6901/10000 train_time:299389ms step_avg:43.38ms +[2025-09-11 09:34:17] [Rank 0] step:6921/10000 train_time:300088ms step_avg:43.36ms +[2025-09-11 09:34:17] [Rank 0] step:6921/10000 train_time:300088ms step_avg:43.36ms +[2025-09-11 09:34:18] [Rank 0] step:6941/10000 train_time:300788ms step_avg:43.33ms +[2025-09-11 09:34:18] [Rank 0] step:6941/10000 train_time:300788ms step_avg:43.33ms +[2025-09-11 09:34:19] [Rank 0] step:6961/10000 train_time:301489ms step_avg:43.31ms +[2025-09-11 09:34:19] [Rank 0] step:6961/10000 train_time:301489ms step_avg:43.31ms +[2025-09-11 09:34:19] [Rank 0] step:6981/10000 train_time:302191ms step_avg:43.29ms +[2025-09-11 09:34:19] [Rank 0] step:6981/10000 train_time:302191ms step_avg:43.29ms +[2025-09-11 09:34:20] [Rank 0] step:7001/10000 train_time:302891ms step_avg:43.26ms +[2025-09-11 09:34:20] [Rank 0] step:7001/10000 train_time:302891ms step_avg:43.26ms +[2025-09-11 09:34:21] [Rank 0] step:7021/10000 train_time:303592ms step_avg:43.24ms +[2025-09-11 09:34:21] [Rank 0] step:7021/10000 train_time:303592ms step_avg:43.24ms +[2025-09-11 09:34:22] [Rank 0] step:7041/10000 train_time:304290ms step_avg:43.22ms +[2025-09-11 09:34:22] [Rank 0] step:7041/10000 train_time:304290ms step_avg:43.22ms +[2025-09-11 09:34:22] [Rank 0] step:7061/10000 train_time:304991ms step_avg:43.19ms +[2025-09-11 09:34:22] [Rank 0] step:7061/10000 train_time:304991ms step_avg:43.19ms +[2025-09-11 09:34:23] [Rank 0] step:7081/10000 train_time:305691ms step_avg:43.17ms +[2025-09-11 09:34:23] [Rank 0] step:7081/10000 train_time:305691ms step_avg:43.17ms +[2025-09-11 09:34:24] [Rank 0] step:7101/10000 train_time:306391ms step_avg:43.15ms +[2025-09-11 09:34:24] [Rank 0] step:7101/10000 train_time:306391ms step_avg:43.15ms +[2025-09-11 09:34:24] [Rank 0] step:7121/10000 train_time:307092ms step_avg:43.12ms +[2025-09-11 09:34:24] [Rank 0] step:7121/10000 train_time:307092ms step_avg:43.12ms +[2025-09-11 09:34:25] [Rank 0] step:7141/10000 train_time:307792ms step_avg:43.10ms +[2025-09-11 09:34:25] [Rank 0] step:7141/10000 train_time:307792ms step_avg:43.10ms +[2025-09-11 09:34:26] [Rank 0] step:7161/10000 train_time:308493ms step_avg:43.08ms +[2025-09-11 09:34:26] [Rank 0] step:7161/10000 train_time:308493ms step_avg:43.08ms +[2025-09-11 09:34:26] [Rank 0] step:7181/10000 train_time:309192ms step_avg:43.06ms +[2025-09-11 09:34:26] [Rank 0] step:7181/10000 train_time:309192ms step_avg:43.06ms +[2025-09-11 09:34:27] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:34:27] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:34:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:34:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:34:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:34:38] [Rank 0] PRINT: step:7200/10000 val_loss:4.3951 total_sharp:2.8094e-05 L1_sharp:3.6191e-02 L2_sharp:4.5670e-02 L3_sharp:6.4864e-02 L4_sharp:7.0940e-02 L5_sharp:8.1329e-02 L6_sharp:8.6704e-02 L7_sharp:8.3077e-02 L8_sharp:7.6025e-02 L9_sharp:1.0039e-01 L10_sharp:1.8324e-01 L11_sharp:3.1430e-01 L12_sharp:3.6158e-01 total_fnorm:1.3400e+02 total_l1_linf:2.6214e+05 total_spectral:6.7000e+01 L1_fnorm:7.1777e-02 L2_fnorm:7.3242e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.1777e-02 L6_fnorm:7.1289e-02 L7_fnorm:7.1289e-02 L8_fnorm:6.9336e-02 L9_fnorm:7.0312e-02 L10_fnorm:7.0312e-02 L11_fnorm:7.0312e-02 L12_fnorm:7.0312e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4648e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5259e-02 L5_l1linf:1.5076e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5259e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5137e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5869e-02 L1_spectral:1.0542e-03 L2_spectral:1.1090e-03 L3_spectral:1.0988e-03 L4_spectral:1.0976e-03 L5_spectral:1.0897e-03 L6_spectral:1.0955e-03 L7_spectral:1.1080e-03 L8_spectral:1.0775e-03 L9_spectral:1.0843e-03 L10_spectral:1.0795e-03 L11_spectral:1.0860e-03 L12_spectral:1.0609e-03 train_time:309872ms step_avg:43.04ms +[2025-09-11 09:34:38] [Rank 0] PRINT: step:7200/10000 val_loss:4.3951 total_sharp:2.8094e-05 L1_sharp:3.6191e-02 L2_sharp:4.5670e-02 L3_sharp:6.4864e-02 L4_sharp:7.0940e-02 L5_sharp:8.1329e-02 L6_sharp:8.6704e-02 L7_sharp:8.3077e-02 L8_sharp:7.6025e-02 L9_sharp:1.0039e-01 L10_sharp:1.8324e-01 L11_sharp:3.1430e-01 L12_sharp:3.6158e-01 total_fnorm:1.3400e+02 total_l1_linf:2.6214e+05 total_spectral:6.7000e+01 L1_fnorm:7.1777e-02 L2_fnorm:7.3242e-02 L3_fnorm:7.2266e-02 L4_fnorm:7.2266e-02 L5_fnorm:7.1777e-02 L6_fnorm:7.1289e-02 L7_fnorm:7.1289e-02 L8_fnorm:6.9336e-02 L9_fnorm:7.0312e-02 L10_fnorm:7.0312e-02 L11_fnorm:7.0312e-02 L12_fnorm:7.0312e-02 L1_l1linf:1.4648e-02 L2_l1linf:1.4648e-02 L3_l1linf:1.5076e-02 L4_l1linf:1.5259e-02 L5_l1linf:1.5076e-02 L6_l1linf:1.5015e-02 L7_l1linf:1.5259e-02 L8_l1linf:1.5381e-02 L9_l1linf:1.5137e-02 L10_l1linf:1.5198e-02 L11_l1linf:1.5869e-02 L12_l1linf:1.5869e-02 L1_spectral:1.0542e-03 L2_spectral:1.1090e-03 L3_spectral:1.0988e-03 L4_spectral:1.0976e-03 L5_spectral:1.0897e-03 L6_spectral:1.0955e-03 L7_spectral:1.1080e-03 L8_spectral:1.0775e-03 L9_spectral:1.0843e-03 L10_spectral:1.0795e-03 L11_spectral:1.0860e-03 L12_spectral:1.0609e-03 train_time:309872ms step_avg:43.04ms +[2025-09-11 09:34:40] [Rank 0] step:7201/10000 train_time:311781ms step_avg:43.30ms +[2025-09-11 09:34:40] [Rank 0] step:7201/10000 train_time:311781ms step_avg:43.30ms +[2025-09-11 09:34:41] [Rank 0] step:7221/10000 train_time:312503ms step_avg:43.28ms +[2025-09-11 09:34:41] [Rank 0] step:7221/10000 train_time:312503ms step_avg:43.28ms +[2025-09-11 09:34:41] [Rank 0] step:7241/10000 train_time:313204ms step_avg:43.25ms +[2025-09-11 09:34:41] [Rank 0] step:7241/10000 train_time:313204ms step_avg:43.25ms +[2025-09-11 09:34:42] [Rank 0] step:7261/10000 train_time:313908ms step_avg:43.23ms +[2025-09-11 09:34:42] [Rank 0] step:7261/10000 train_time:313908ms step_avg:43.23ms +[2025-09-11 09:34:43] [Rank 0] step:7281/10000 train_time:314615ms step_avg:43.21ms +[2025-09-11 09:34:43] [Rank 0] step:7281/10000 train_time:314615ms step_avg:43.21ms +[2025-09-11 09:34:44] [Rank 0] step:7301/10000 train_time:315315ms step_avg:43.19ms +[2025-09-11 09:34:44] [Rank 0] step:7301/10000 train_time:315315ms step_avg:43.19ms +[2025-09-11 09:34:44] [Rank 0] step:7321/10000 train_time:316016ms step_avg:43.17ms +[2025-09-11 09:34:44] [Rank 0] step:7321/10000 train_time:316016ms step_avg:43.17ms +[2025-09-11 09:34:45] [Rank 0] step:7341/10000 train_time:316718ms step_avg:43.14ms +[2025-09-11 09:34:45] [Rank 0] step:7341/10000 train_time:316718ms step_avg:43.14ms +[2025-09-11 09:34:46] [Rank 0] step:7361/10000 train_time:317419ms step_avg:43.12ms +[2025-09-11 09:34:46] [Rank 0] step:7361/10000 train_time:317419ms step_avg:43.12ms +[2025-09-11 09:34:46] [Rank 0] step:7381/10000 train_time:318121ms step_avg:43.10ms +[2025-09-11 09:34:46] [Rank 0] step:7381/10000 train_time:318121ms step_avg:43.10ms +[2025-09-11 09:34:47] [Rank 0] step:7401/10000 train_time:318820ms step_avg:43.08ms +[2025-09-11 09:34:47] [Rank 0] step:7401/10000 train_time:318820ms step_avg:43.08ms +[2025-09-11 09:34:48] [Rank 0] step:7421/10000 train_time:319521ms step_avg:43.06ms +[2025-09-11 09:34:48] [Rank 0] step:7421/10000 train_time:319521ms step_avg:43.06ms +[2025-09-11 09:34:48] [Rank 0] step:7441/10000 train_time:320223ms step_avg:43.03ms +[2025-09-11 09:34:48] [Rank 0] step:7441/10000 train_time:320223ms step_avg:43.03ms +[2025-09-11 09:34:49] [Rank 0] step:7461/10000 train_time:320923ms step_avg:43.01ms +[2025-09-11 09:34:49] [Rank 0] step:7461/10000 train_time:320923ms step_avg:43.01ms +[2025-09-11 09:34:50] [Rank 0] step:7481/10000 train_time:321626ms step_avg:42.99ms +[2025-09-11 09:34:50] [Rank 0] step:7481/10000 train_time:321626ms step_avg:42.99ms +[2025-09-11 09:34:51] [Rank 0] step:7501/10000 train_time:322327ms step_avg:42.97ms +[2025-09-11 09:34:51] [Rank 0] step:7501/10000 train_time:322327ms step_avg:42.97ms +[2025-09-11 09:34:51] [Rank 0] step:7521/10000 train_time:323030ms step_avg:42.95ms +[2025-09-11 09:34:51] [Rank 0] step:7521/10000 train_time:323030ms step_avg:42.95ms +[2025-09-11 09:34:52] [Rank 0] step:7541/10000 train_time:323729ms step_avg:42.93ms +[2025-09-11 09:34:52] [Rank 0] step:7541/10000 train_time:323729ms step_avg:42.93ms +[2025-09-11 09:34:53] [Rank 0] step:7561/10000 train_time:324434ms step_avg:42.91ms +[2025-09-11 09:34:53] [Rank 0] step:7561/10000 train_time:324434ms step_avg:42.91ms +[2025-09-11 09:34:53] [Rank 0] step:7581/10000 train_time:325137ms step_avg:42.89ms +[2025-09-11 09:34:53] [Rank 0] step:7581/10000 train_time:325137ms step_avg:42.89ms +[2025-09-11 09:34:54] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:34:54] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:34:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:34:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:35:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:35:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:35:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:05] [Rank 0] PRINT: step:7600/10000 val_loss:4.3665 total_sharp:2.7240e-05 L1_sharp:2.9533e-02 L2_sharp:4.0724e-02 L3_sharp:5.0062e-02 L4_sharp:6.1047e-02 L5_sharp:7.3356e-02 L6_sharp:7.8861e-02 L7_sharp:6.7845e-02 L8_sharp:7.4137e-02 L9_sharp:1.0802e-01 L10_sharp:1.6803e-01 L11_sharp:2.4353e-01 L12_sharp:4.1973e-01 total_fnorm:1.0950e+02 total_l1_linf:2.0378e+05 total_spectral:5.5000e+01 L1_fnorm:5.8105e-02 L2_fnorm:5.9814e-02 L3_fnorm:5.9570e-02 L4_fnorm:5.9570e-02 L5_fnorm:5.8838e-02 L6_fnorm:5.8350e-02 L7_fnorm:5.8594e-02 L8_fnorm:5.6641e-02 L9_fnorm:5.7617e-02 L10_fnorm:5.7617e-02 L11_fnorm:5.7861e-02 L12_fnorm:5.7373e-02 L1_l1linf:1.0742e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1902e-02 L4_l1linf:1.1963e-02 L5_l1linf:1.2085e-02 L6_l1linf:1.1841e-02 L7_l1linf:1.2024e-02 L8_l1linf:1.1963e-02 L9_l1linf:1.1963e-02 L10_l1linf:1.1902e-02 L11_l1linf:1.2207e-02 L12_l1linf:1.2756e-02 L1_spectral:8.7886e-04 L2_spectral:9.2260e-04 L3_spectral:9.3152e-04 L4_spectral:9.2680e-04 L5_spectral:9.1964e-04 L6_spectral:9.1911e-04 L7_spectral:9.2660e-04 L8_spectral:9.1828e-04 L9_spectral:9.0893e-04 L10_spectral:9.0261e-04 L11_spectral:9.1444e-04 L12_spectral:8.8950e-04 train_time:325823ms step_avg:42.87ms +[2025-09-11 09:35:05] [Rank 0] PRINT: step:7600/10000 val_loss:4.3665 total_sharp:2.7240e-05 L1_sharp:2.9533e-02 L2_sharp:4.0724e-02 L3_sharp:5.0062e-02 L4_sharp:6.1047e-02 L5_sharp:7.3356e-02 L6_sharp:7.8861e-02 L7_sharp:6.7845e-02 L8_sharp:7.4137e-02 L9_sharp:1.0802e-01 L10_sharp:1.6803e-01 L11_sharp:2.4353e-01 L12_sharp:4.1973e-01 total_fnorm:1.0950e+02 total_l1_linf:2.0378e+05 total_spectral:5.5000e+01 L1_fnorm:5.8105e-02 L2_fnorm:5.9814e-02 L3_fnorm:5.9570e-02 L4_fnorm:5.9570e-02 L5_fnorm:5.8838e-02 L6_fnorm:5.8350e-02 L7_fnorm:5.8594e-02 L8_fnorm:5.6641e-02 L9_fnorm:5.7617e-02 L10_fnorm:5.7617e-02 L11_fnorm:5.7861e-02 L12_fnorm:5.7373e-02 L1_l1linf:1.0742e-02 L2_l1linf:1.1414e-02 L3_l1linf:1.1902e-02 L4_l1linf:1.1963e-02 L5_l1linf:1.2085e-02 L6_l1linf:1.1841e-02 L7_l1linf:1.2024e-02 L8_l1linf:1.1963e-02 L9_l1linf:1.1963e-02 L10_l1linf:1.1902e-02 L11_l1linf:1.2207e-02 L12_l1linf:1.2756e-02 L1_spectral:8.7886e-04 L2_spectral:9.2260e-04 L3_spectral:9.3152e-04 L4_spectral:9.2680e-04 L5_spectral:9.1964e-04 L6_spectral:9.1911e-04 L7_spectral:9.2660e-04 L8_spectral:9.1828e-04 L9_spectral:9.0893e-04 L10_spectral:9.0261e-04 L11_spectral:9.1444e-04 L12_spectral:8.8950e-04 train_time:325823ms step_avg:42.87ms +[2025-09-11 09:35:07] [Rank 0] step:7601/10000 train_time:327718ms step_avg:43.12ms +[2025-09-11 09:35:07] [Rank 0] step:7601/10000 train_time:327718ms step_avg:43.12ms +[2025-09-11 09:35:07] [Rank 0] step:7621/10000 train_time:328455ms step_avg:43.10ms +[2025-09-11 09:35:07] [Rank 0] step:7621/10000 train_time:328455ms step_avg:43.10ms +[2025-09-11 09:35:08] [Rank 0] step:7641/10000 train_time:329160ms step_avg:43.08ms +[2025-09-11 09:35:08] [Rank 0] step:7641/10000 train_time:329160ms step_avg:43.08ms +[2025-09-11 09:35:09] [Rank 0] step:7661/10000 train_time:329860ms step_avg:43.06ms +[2025-09-11 09:35:09] [Rank 0] step:7661/10000 train_time:329860ms step_avg:43.06ms +[2025-09-11 09:35:10] [Rank 0] step:7681/10000 train_time:330574ms step_avg:43.04ms +[2025-09-11 09:35:10] [Rank 0] step:7681/10000 train_time:330574ms step_avg:43.04ms +[2025-09-11 09:35:10] [Rank 0] step:7701/10000 train_time:331276ms step_avg:43.02ms +[2025-09-11 09:35:10] [Rank 0] step:7701/10000 train_time:331276ms step_avg:43.02ms +[2025-09-11 09:35:11] [Rank 0] step:7721/10000 train_time:331979ms step_avg:43.00ms +[2025-09-11 09:35:11] [Rank 0] step:7721/10000 train_time:331979ms step_avg:43.00ms +[2025-09-11 09:35:12] [Rank 0] step:7741/10000 train_time:332683ms step_avg:42.98ms +[2025-09-11 09:35:12] [Rank 0] step:7741/10000 train_time:332683ms step_avg:42.98ms +[2025-09-11 09:35:12] [Rank 0] step:7761/10000 train_time:333384ms step_avg:42.96ms +[2025-09-11 09:35:12] [Rank 0] step:7761/10000 train_time:333384ms step_avg:42.96ms +[2025-09-11 09:35:13] [Rank 0] step:7781/10000 train_time:334089ms step_avg:42.94ms +[2025-09-11 09:35:13] [Rank 0] step:7781/10000 train_time:334089ms step_avg:42.94ms +[2025-09-11 09:35:14] [Rank 0] step:7801/10000 train_time:334788ms step_avg:42.92ms +[2025-09-11 09:35:14] [Rank 0] step:7801/10000 train_time:334788ms step_avg:42.92ms +[2025-09-11 09:35:14] [Rank 0] step:7821/10000 train_time:335498ms step_avg:42.90ms +[2025-09-11 09:35:14] [Rank 0] step:7821/10000 train_time:335498ms step_avg:42.90ms +[2025-09-11 09:35:15] [Rank 0] step:7841/10000 train_time:336202ms step_avg:42.88ms +[2025-09-11 09:35:15] [Rank 0] step:7841/10000 train_time:336202ms step_avg:42.88ms +[2025-09-11 09:35:16] [Rank 0] step:7861/10000 train_time:336907ms step_avg:42.86ms +[2025-09-11 09:35:16] [Rank 0] step:7861/10000 train_time:336907ms step_avg:42.86ms +[2025-09-11 09:35:17] [Rank 0] step:7881/10000 train_time:337609ms step_avg:42.84ms +[2025-09-11 09:35:17] [Rank 0] step:7881/10000 train_time:337609ms step_avg:42.84ms +[2025-09-11 09:35:17] [Rank 0] step:7901/10000 train_time:338313ms step_avg:42.82ms +[2025-09-11 09:35:17] [Rank 0] step:7901/10000 train_time:338313ms step_avg:42.82ms +[2025-09-11 09:35:18] [Rank 0] step:7921/10000 train_time:339015ms step_avg:42.80ms +[2025-09-11 09:35:18] [Rank 0] step:7921/10000 train_time:339015ms step_avg:42.80ms +[2025-09-11 09:35:19] [Rank 0] step:7941/10000 train_time:339719ms step_avg:42.78ms +[2025-09-11 09:35:19] [Rank 0] step:7941/10000 train_time:339719ms step_avg:42.78ms +[2025-09-11 09:35:19] [Rank 0] step:7961/10000 train_time:340420ms step_avg:42.76ms +[2025-09-11 09:35:19] [Rank 0] step:7961/10000 train_time:340420ms step_avg:42.76ms +[2025-09-11 09:35:20] [Rank 0] step:7981/10000 train_time:341125ms step_avg:42.74ms +[2025-09-11 09:35:20] [Rank 0] step:7981/10000 train_time:341125ms step_avg:42.74ms +[2025-09-11 09:35:21] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:35:21] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:35:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:35:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:35:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:35:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:32] [Rank 0] PRINT: step:8000/10000 val_loss:4.3381 total_sharp:2.3670e-05 L1_sharp:4.2318e-02 L2_sharp:4.3609e-02 L3_sharp:4.5395e-02 L4_sharp:6.2914e-02 L5_sharp:6.8137e-02 L6_sharp:8.6705e-02 L7_sharp:7.4524e-02 L8_sharp:7.4204e-02 L9_sharp:1.0301e-01 L10_sharp:1.7909e-01 L11_sharp:2.6462e-01 L12_sharp:5.7683e-01 total_fnorm:9.9000e+01 total_l1_linf:1.7818e+05 total_spectral:4.9500e+01 L1_fnorm:4.6143e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.5166e-02 L9_fnorm:4.6143e-02 L10_fnorm:4.5898e-02 L11_fnorm:4.6143e-02 L12_fnorm:4.5654e-02 L1_l1linf:7.5684e-03 L2_l1linf:8.2397e-03 L3_l1linf:8.6060e-03 L4_l1linf:8.7891e-03 L5_l1linf:8.9111e-03 L6_l1linf:8.9111e-03 L7_l1linf:8.9722e-03 L8_l1linf:8.9111e-03 L9_l1linf:8.8501e-03 L10_l1linf:9.2773e-03 L11_l1linf:9.3994e-03 L12_l1linf:9.3384e-03 L1_spectral:7.1255e-04 L2_spectral:7.5921e-04 L3_spectral:7.6668e-04 L4_spectral:7.6549e-04 L5_spectral:7.4698e-04 L6_spectral:7.5629e-04 L7_spectral:7.6205e-04 L8_spectral:7.5533e-04 L9_spectral:7.3556e-04 L10_spectral:7.3476e-04 L11_spectral:7.3842e-04 L12_spectral:7.2743e-04 train_time:341805ms step_avg:42.73ms +[2025-09-11 09:35:32] [Rank 0] PRINT: step:8000/10000 val_loss:4.3381 total_sharp:2.3670e-05 L1_sharp:4.2318e-02 L2_sharp:4.3609e-02 L3_sharp:4.5395e-02 L4_sharp:6.2914e-02 L5_sharp:6.8137e-02 L6_sharp:8.6705e-02 L7_sharp:7.4524e-02 L8_sharp:7.4204e-02 L9_sharp:1.0301e-01 L10_sharp:1.7909e-01 L11_sharp:2.6462e-01 L12_sharp:5.7683e-01 total_fnorm:9.9000e+01 total_l1_linf:1.7818e+05 total_spectral:4.9500e+01 L1_fnorm:4.6143e-02 L2_fnorm:4.7607e-02 L3_fnorm:4.7607e-02 L4_fnorm:4.7363e-02 L5_fnorm:4.6875e-02 L6_fnorm:4.6631e-02 L7_fnorm:4.6875e-02 L8_fnorm:4.5166e-02 L9_fnorm:4.6143e-02 L10_fnorm:4.5898e-02 L11_fnorm:4.6143e-02 L12_fnorm:4.5654e-02 L1_l1linf:7.5684e-03 L2_l1linf:8.2397e-03 L3_l1linf:8.6060e-03 L4_l1linf:8.7891e-03 L5_l1linf:8.9111e-03 L6_l1linf:8.9111e-03 L7_l1linf:8.9722e-03 L8_l1linf:8.9111e-03 L9_l1linf:8.8501e-03 L10_l1linf:9.2773e-03 L11_l1linf:9.3994e-03 L12_l1linf:9.3384e-03 L1_spectral:7.1255e-04 L2_spectral:7.5921e-04 L3_spectral:7.6668e-04 L4_spectral:7.6549e-04 L5_spectral:7.4698e-04 L6_spectral:7.5629e-04 L7_spectral:7.6205e-04 L8_spectral:7.5533e-04 L9_spectral:7.3556e-04 L10_spectral:7.3476e-04 L11_spectral:7.3842e-04 L12_spectral:7.2743e-04 train_time:341805ms step_avg:42.73ms +[2025-09-11 09:35:34] [Rank 0] step:8001/10000 train_time:343717ms step_avg:42.96ms +[2025-09-11 09:35:34] [Rank 0] step:8001/10000 train_time:343717ms step_avg:42.96ms +[2025-09-11 09:35:34] [Rank 0] step:8021/10000 train_time:344440ms step_avg:42.94ms +[2025-09-11 09:35:34] [Rank 0] step:8021/10000 train_time:344440ms step_avg:42.94ms +[2025-09-11 09:35:35] [Rank 0] step:8041/10000 train_time:345144ms step_avg:42.92ms +[2025-09-11 09:35:35] [Rank 0] step:8041/10000 train_time:345144ms step_avg:42.92ms +[2025-09-11 09:35:36] [Rank 0] step:8061/10000 train_time:345849ms step_avg:42.90ms +[2025-09-11 09:35:36] [Rank 0] step:8061/10000 train_time:345849ms step_avg:42.90ms +[2025-09-11 09:35:36] [Rank 0] step:8081/10000 train_time:346550ms step_avg:42.88ms +[2025-09-11 09:35:36] [Rank 0] step:8081/10000 train_time:346550ms step_avg:42.88ms +[2025-09-11 09:35:37] [Rank 0] step:8101/10000 train_time:347251ms step_avg:42.87ms +[2025-09-11 09:35:37] [Rank 0] step:8101/10000 train_time:347251ms step_avg:42.87ms +[2025-09-11 09:35:38] [Rank 0] step:8121/10000 train_time:347958ms step_avg:42.85ms +[2025-09-11 09:35:38] [Rank 0] step:8121/10000 train_time:347958ms step_avg:42.85ms +[2025-09-11 09:35:39] [Rank 0] step:8141/10000 train_time:349395ms step_avg:42.92ms +[2025-09-11 09:35:39] [Rank 0] step:8141/10000 train_time:349395ms step_avg:42.92ms +[2025-09-11 09:35:40] [Rank 0] step:8161/10000 train_time:350101ms step_avg:42.90ms +[2025-09-11 09:35:40] [Rank 0] step:8161/10000 train_time:350101ms step_avg:42.90ms +[2025-09-11 09:35:41] [Rank 0] step:8181/10000 train_time:350814ms step_avg:42.88ms +[2025-09-11 09:35:41] [Rank 0] step:8181/10000 train_time:350814ms step_avg:42.88ms +[2025-09-11 09:35:41] [Rank 0] step:8201/10000 train_time:351525ms step_avg:42.86ms +[2025-09-11 09:35:41] [Rank 0] step:8201/10000 train_time:351525ms step_avg:42.86ms +[2025-09-11 09:35:42] [Rank 0] step:8221/10000 train_time:352233ms step_avg:42.85ms +[2025-09-11 09:35:42] [Rank 0] step:8221/10000 train_time:352233ms step_avg:42.85ms +[2025-09-11 09:35:43] [Rank 0] step:8241/10000 train_time:352950ms step_avg:42.83ms +[2025-09-11 09:35:43] [Rank 0] step:8241/10000 train_time:352950ms step_avg:42.83ms +[2025-09-11 09:35:43] [Rank 0] step:8261/10000 train_time:353659ms step_avg:42.81ms +[2025-09-11 09:35:43] [Rank 0] step:8261/10000 train_time:353659ms step_avg:42.81ms +[2025-09-11 09:35:44] [Rank 0] step:8281/10000 train_time:354364ms step_avg:42.79ms +[2025-09-11 09:35:44] [Rank 0] step:8281/10000 train_time:354364ms step_avg:42.79ms +[2025-09-11 09:35:45] [Rank 0] step:8301/10000 train_time:355072ms step_avg:42.77ms +[2025-09-11 09:35:45] [Rank 0] step:8301/10000 train_time:355072ms step_avg:42.77ms +[2025-09-11 09:35:46] [Rank 0] step:8321/10000 train_time:355779ms step_avg:42.76ms +[2025-09-11 09:35:46] [Rank 0] step:8321/10000 train_time:355779ms step_avg:42.76ms +[2025-09-11 09:35:46] [Rank 0] step:8341/10000 train_time:356494ms step_avg:42.74ms +[2025-09-11 09:35:46] [Rank 0] step:8341/10000 train_time:356494ms step_avg:42.74ms +[2025-09-11 09:35:47] [Rank 0] step:8361/10000 train_time:357198ms step_avg:42.72ms +[2025-09-11 09:35:47] [Rank 0] step:8361/10000 train_time:357198ms step_avg:42.72ms +[2025-09-11 09:35:48] [Rank 0] step:8381/10000 train_time:357910ms step_avg:42.70ms +[2025-09-11 09:35:48] [Rank 0] step:8381/10000 train_time:357910ms step_avg:42.70ms +[2025-09-11 09:35:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:35:48] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:35:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:35:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:35:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:35:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:35:59] [Rank 0] PRINT: step:8400/10000 val_loss:4.3117 total_sharp:2.0372e-05 L1_sharp:3.5953e-02 L2_sharp:3.6420e-02 L3_sharp:5.1580e-02 L4_sharp:5.9227e-02 L5_sharp:6.0172e-02 L6_sharp:8.4514e-02 L7_sharp:7.9997e-02 L8_sharp:6.3786e-02 L9_sharp:1.0150e-01 L10_sharp:1.7672e-01 L11_sharp:2.6976e-01 L12_sharp:9.2956e-01 total_fnorm:7.9500e+01 total_l1_linf:1.3056e+05 total_spectral:3.9750e+01 L1_fnorm:3.5156e-02 L2_fnorm:3.6133e-02 L3_fnorm:3.6621e-02 L4_fnorm:3.6377e-02 L5_fnorm:3.6133e-02 L6_fnorm:3.6133e-02 L7_fnorm:3.6133e-02 L8_fnorm:3.4912e-02 L9_fnorm:3.5400e-02 L10_fnorm:3.5400e-02 L11_fnorm:3.5645e-02 L12_fnorm:3.5156e-02 L1_l1linf:5.3711e-03 L2_l1linf:5.9204e-03 L3_l1linf:6.2256e-03 L4_l1linf:6.4087e-03 L5_l1linf:6.3171e-03 L6_l1linf:6.2256e-03 L7_l1linf:6.3171e-03 L8_l1linf:6.3477e-03 L9_l1linf:6.3477e-03 L10_l1linf:6.3782e-03 L11_l1linf:6.7444e-03 L12_l1linf:7.0801e-03 L1_spectral:5.5589e-04 L2_spectral:5.9696e-04 L3_spectral:5.9781e-04 L4_spectral:5.8855e-04 L5_spectral:5.9310e-04 L6_spectral:5.9314e-04 L7_spectral:6.0170e-04 L8_spectral:5.9183e-04 L9_spectral:5.7604e-04 L10_spectral:5.7383e-04 L11_spectral:5.8914e-04 L12_spectral:5.7654e-04 train_time:358602ms step_avg:42.69ms +[2025-09-11 09:35:59] [Rank 0] PRINT: step:8400/10000 val_loss:4.3117 total_sharp:2.0372e-05 L1_sharp:3.5953e-02 L2_sharp:3.6420e-02 L3_sharp:5.1580e-02 L4_sharp:5.9227e-02 L5_sharp:6.0172e-02 L6_sharp:8.4514e-02 L7_sharp:7.9997e-02 L8_sharp:6.3786e-02 L9_sharp:1.0150e-01 L10_sharp:1.7672e-01 L11_sharp:2.6976e-01 L12_sharp:9.2956e-01 total_fnorm:7.9500e+01 total_l1_linf:1.3056e+05 total_spectral:3.9750e+01 L1_fnorm:3.5156e-02 L2_fnorm:3.6133e-02 L3_fnorm:3.6621e-02 L4_fnorm:3.6377e-02 L5_fnorm:3.6133e-02 L6_fnorm:3.6133e-02 L7_fnorm:3.6133e-02 L8_fnorm:3.4912e-02 L9_fnorm:3.5400e-02 L10_fnorm:3.5400e-02 L11_fnorm:3.5645e-02 L12_fnorm:3.5156e-02 L1_l1linf:5.3711e-03 L2_l1linf:5.9204e-03 L3_l1linf:6.2256e-03 L4_l1linf:6.4087e-03 L5_l1linf:6.3171e-03 L6_l1linf:6.2256e-03 L7_l1linf:6.3171e-03 L8_l1linf:6.3477e-03 L9_l1linf:6.3477e-03 L10_l1linf:6.3782e-03 L11_l1linf:6.7444e-03 L12_l1linf:7.0801e-03 L1_spectral:5.5589e-04 L2_spectral:5.9696e-04 L3_spectral:5.9781e-04 L4_spectral:5.8855e-04 L5_spectral:5.9310e-04 L6_spectral:5.9314e-04 L7_spectral:6.0170e-04 L8_spectral:5.9183e-04 L9_spectral:5.7604e-04 L10_spectral:5.7383e-04 L11_spectral:5.8914e-04 L12_spectral:5.7654e-04 train_time:358602ms step_avg:42.69ms +[2025-09-11 09:36:01] [Rank 0] step:8401/10000 train_time:360533ms step_avg:42.92ms +[2025-09-11 09:36:01] [Rank 0] step:8401/10000 train_time:360533ms step_avg:42.92ms +[2025-09-11 09:36:02] [Rank 0] step:8421/10000 train_time:361271ms step_avg:42.90ms +[2025-09-11 09:36:02] [Rank 0] step:8421/10000 train_time:361271ms step_avg:42.90ms +[2025-09-11 09:36:02] [Rank 0] step:8441/10000 train_time:361982ms step_avg:42.88ms +[2025-09-11 09:36:02] [Rank 0] step:8441/10000 train_time:361982ms step_avg:42.88ms +[2025-09-11 09:36:03] [Rank 0] step:8461/10000 train_time:362692ms step_avg:42.87ms +[2025-09-11 09:36:03] [Rank 0] step:8461/10000 train_time:362692ms step_avg:42.87ms +[2025-09-11 09:36:04] [Rank 0] step:8481/10000 train_time:363405ms step_avg:42.85ms +[2025-09-11 09:36:04] [Rank 0] step:8481/10000 train_time:363405ms step_avg:42.85ms +[2025-09-11 09:36:05] [Rank 0] step:8501/10000 train_time:364112ms step_avg:42.83ms +[2025-09-11 09:36:05] [Rank 0] step:8501/10000 train_time:364112ms step_avg:42.83ms +[2025-09-11 09:36:05] [Rank 0] step:8521/10000 train_time:364821ms step_avg:42.81ms +[2025-09-11 09:36:05] [Rank 0] step:8521/10000 train_time:364821ms step_avg:42.81ms +[2025-09-11 09:36:06] [Rank 0] step:8541/10000 train_time:365531ms step_avg:42.80ms +[2025-09-11 09:36:06] [Rank 0] step:8541/10000 train_time:365531ms step_avg:42.80ms +[2025-09-11 09:36:07] [Rank 0] step:8561/10000 train_time:366492ms step_avg:42.81ms +[2025-09-11 09:36:07] [Rank 0] step:8561/10000 train_time:366492ms step_avg:42.81ms +[2025-09-11 09:36:08] [Rank 0] step:8581/10000 train_time:367206ms step_avg:42.79ms +[2025-09-11 09:36:08] [Rank 0] step:8581/10000 train_time:367206ms step_avg:42.79ms +[2025-09-11 09:36:08] [Rank 0] step:8601/10000 train_time:367915ms step_avg:42.78ms +[2025-09-11 09:36:08] [Rank 0] step:8601/10000 train_time:367915ms step_avg:42.78ms +[2025-09-11 09:36:09] [Rank 0] step:8621/10000 train_time:368876ms step_avg:42.79ms +[2025-09-11 09:36:09] [Rank 0] step:8621/10000 train_time:368876ms step_avg:42.79ms +[2025-09-11 09:36:10] [Rank 0] step:8641/10000 train_time:369585ms step_avg:42.77ms +[2025-09-11 09:36:10] [Rank 0] step:8641/10000 train_time:369585ms step_avg:42.77ms +[2025-09-11 09:36:11] [Rank 0] step:8661/10000 train_time:370295ms step_avg:42.75ms +[2025-09-11 09:36:11] [Rank 0] step:8661/10000 train_time:370295ms step_avg:42.75ms +[2025-09-11 09:36:11] [Rank 0] step:8681/10000 train_time:371006ms step_avg:42.74ms +[2025-09-11 09:36:11] [Rank 0] step:8681/10000 train_time:371006ms step_avg:42.74ms +[2025-09-11 09:36:12] [Rank 0] step:8701/10000 train_time:371714ms step_avg:42.72ms +[2025-09-11 09:36:12] [Rank 0] step:8701/10000 train_time:371714ms step_avg:42.72ms +[2025-09-11 09:36:13] [Rank 0] step:8721/10000 train_time:372425ms step_avg:42.70ms +[2025-09-11 09:36:13] [Rank 0] step:8721/10000 train_time:372425ms step_avg:42.70ms +[2025-09-11 09:36:14] [Rank 0] step:8741/10000 train_time:373131ms step_avg:42.69ms +[2025-09-11 09:36:14] [Rank 0] step:8741/10000 train_time:373131ms step_avg:42.69ms +[2025-09-11 09:36:14] [Rank 0] step:8761/10000 train_time:373845ms step_avg:42.67ms +[2025-09-11 09:36:14] [Rank 0] step:8761/10000 train_time:373845ms step_avg:42.67ms +[2025-09-11 09:36:15] [Rank 0] step:8781/10000 train_time:374551ms step_avg:42.65ms +[2025-09-11 09:36:15] [Rank 0] step:8781/10000 train_time:374551ms step_avg:42.65ms +[2025-09-11 09:36:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:36:16] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:36:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:36:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:36:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:36:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:36:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:26] [Rank 0] PRINT: step:8800/10000 val_loss:4.3059 total_sharp:1.9606e-05 L1_sharp:3.0624e-02 L2_sharp:3.2199e-02 L3_sharp:4.2215e-02 L4_sharp:5.3848e-02 L5_sharp:4.5765e-02 L6_sharp:6.0558e-02 L7_sharp:5.4362e-02 L8_sharp:5.7323e-02 L9_sharp:7.9956e-02 L10_sharp:1.2894e-01 L11_sharp:1.6282e-01 L12_sharp:2.4891e-01 total_fnorm:5.9750e+01 total_l1_linf:8.9600e+04 total_spectral:2.9875e+01 L1_fnorm:2.4902e-02 L2_fnorm:2.5635e-02 L3_fnorm:2.5879e-02 L4_fnorm:2.5879e-02 L5_fnorm:2.5635e-02 L6_fnorm:2.5635e-02 L7_fnorm:2.5635e-02 L8_fnorm:2.4902e-02 L9_fnorm:2.5146e-02 L10_fnorm:2.5024e-02 L11_fnorm:2.5024e-02 L12_fnorm:2.4658e-02 L1_l1linf:3.1586e-03 L2_l1linf:3.6774e-03 L3_l1linf:3.8757e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.0283e-03 L6_l1linf:3.9368e-03 L7_l1linf:4.0588e-03 L8_l1linf:4.2114e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.0894e-03 L11_l1linf:4.1809e-03 L12_l1linf:4.0894e-03 L1_spectral:4.0448e-04 L2_spectral:4.2972e-04 L3_spectral:4.3445e-04 L4_spectral:4.2979e-04 L5_spectral:4.2822e-04 L6_spectral:4.2840e-04 L7_spectral:4.2997e-04 L8_spectral:4.3570e-04 L9_spectral:4.1372e-04 L10_spectral:4.1673e-04 L11_spectral:4.2051e-04 L12_spectral:4.1601e-04 train_time:375238ms step_avg:42.64ms +[2025-09-11 09:36:26] [Rank 0] PRINT: step:8800/10000 val_loss:4.3059 total_sharp:1.9606e-05 L1_sharp:3.0624e-02 L2_sharp:3.2199e-02 L3_sharp:4.2215e-02 L4_sharp:5.3848e-02 L5_sharp:4.5765e-02 L6_sharp:6.0558e-02 L7_sharp:5.4362e-02 L8_sharp:5.7323e-02 L9_sharp:7.9956e-02 L10_sharp:1.2894e-01 L11_sharp:1.6282e-01 L12_sharp:2.4891e-01 total_fnorm:5.9750e+01 total_l1_linf:8.9600e+04 total_spectral:2.9875e+01 L1_fnorm:2.4902e-02 L2_fnorm:2.5635e-02 L3_fnorm:2.5879e-02 L4_fnorm:2.5879e-02 L5_fnorm:2.5635e-02 L6_fnorm:2.5635e-02 L7_fnorm:2.5635e-02 L8_fnorm:2.4902e-02 L9_fnorm:2.5146e-02 L10_fnorm:2.5024e-02 L11_fnorm:2.5024e-02 L12_fnorm:2.4658e-02 L1_l1linf:3.1586e-03 L2_l1linf:3.6774e-03 L3_l1linf:3.8757e-03 L4_l1linf:4.0283e-03 L5_l1linf:4.0283e-03 L6_l1linf:3.9368e-03 L7_l1linf:4.0588e-03 L8_l1linf:4.2114e-03 L9_l1linf:4.2114e-03 L10_l1linf:4.0894e-03 L11_l1linf:4.1809e-03 L12_l1linf:4.0894e-03 L1_spectral:4.0448e-04 L2_spectral:4.2972e-04 L3_spectral:4.3445e-04 L4_spectral:4.2979e-04 L5_spectral:4.2822e-04 L6_spectral:4.2840e-04 L7_spectral:4.2997e-04 L8_spectral:4.3570e-04 L9_spectral:4.1372e-04 L10_spectral:4.1673e-04 L11_spectral:4.2051e-04 L12_spectral:4.1601e-04 train_time:375238ms step_avg:42.64ms +[2025-09-11 09:36:28] [Rank 0] step:8801/10000 train_time:377235ms step_avg:42.86ms +[2025-09-11 09:36:28] [Rank 0] step:8801/10000 train_time:377235ms step_avg:42.86ms +[2025-09-11 09:36:29] [Rank 0] step:8821/10000 train_time:377963ms step_avg:42.85ms +[2025-09-11 09:36:29] [Rank 0] step:8821/10000 train_time:377963ms step_avg:42.85ms +[2025-09-11 09:36:30] [Rank 0] step:8841/10000 train_time:378674ms step_avg:42.83ms +[2025-09-11 09:36:30] [Rank 0] step:8841/10000 train_time:378674ms step_avg:42.83ms +[2025-09-11 09:36:31] [Rank 0] step:8861/10000 train_time:379384ms step_avg:42.81ms +[2025-09-11 09:36:31] [Rank 0] step:8861/10000 train_time:379384ms step_avg:42.81ms +[2025-09-11 09:36:31] [Rank 0] step:8881/10000 train_time:380096ms step_avg:42.80ms +[2025-09-11 09:36:31] [Rank 0] step:8881/10000 train_time:380096ms step_avg:42.80ms +[2025-09-11 09:36:32] [Rank 0] step:8901/10000 train_time:380808ms step_avg:42.78ms +[2025-09-11 09:36:32] [Rank 0] step:8901/10000 train_time:380808ms step_avg:42.78ms +[2025-09-11 09:36:33] [Rank 0] step:8921/10000 train_time:381514ms step_avg:42.77ms +[2025-09-11 09:36:33] [Rank 0] step:8921/10000 train_time:381514ms step_avg:42.77ms +[2025-09-11 09:36:33] [Rank 0] step:8941/10000 train_time:382227ms step_avg:42.75ms +[2025-09-11 09:36:33] [Rank 0] step:8941/10000 train_time:382227ms step_avg:42.75ms +[2025-09-11 09:36:34] [Rank 0] step:8961/10000 train_time:382945ms step_avg:42.73ms +[2025-09-11 09:36:34] [Rank 0] step:8961/10000 train_time:382945ms step_avg:42.73ms +[2025-09-11 09:36:35] [Rank 0] step:8981/10000 train_time:383659ms step_avg:42.72ms +[2025-09-11 09:36:35] [Rank 0] step:8981/10000 train_time:383659ms step_avg:42.72ms +[2025-09-11 09:36:36] [Rank 0] step:9001/10000 train_time:384365ms step_avg:42.70ms +[2025-09-11 09:36:36] [Rank 0] step:9001/10000 train_time:384365ms step_avg:42.70ms +[2025-09-11 09:36:36] [Rank 0] step:9021/10000 train_time:385075ms step_avg:42.69ms +[2025-09-11 09:36:36] [Rank 0] step:9021/10000 train_time:385075ms step_avg:42.69ms +[2025-09-11 09:36:37] [Rank 0] step:9041/10000 train_time:385787ms step_avg:42.67ms +[2025-09-11 09:36:37] [Rank 0] step:9041/10000 train_time:385787ms step_avg:42.67ms +[2025-09-11 09:36:38] [Rank 0] step:9061/10000 train_time:386495ms step_avg:42.65ms +[2025-09-11 09:36:38] [Rank 0] step:9061/10000 train_time:386495ms step_avg:42.65ms +[2025-09-11 09:36:38] [Rank 0] step:9081/10000 train_time:387208ms step_avg:42.64ms +[2025-09-11 09:36:38] [Rank 0] step:9081/10000 train_time:387208ms step_avg:42.64ms +[2025-09-11 09:36:39] [Rank 0] step:9101/10000 train_time:387922ms step_avg:42.62ms +[2025-09-11 09:36:39] [Rank 0] step:9101/10000 train_time:387922ms step_avg:42.62ms +[2025-09-11 09:36:40] [Rank 0] step:9121/10000 train_time:388636ms step_avg:42.61ms +[2025-09-11 09:36:40] [Rank 0] step:9121/10000 train_time:388636ms step_avg:42.61ms +[2025-09-11 09:36:41] [Rank 0] step:9141/10000 train_time:389344ms step_avg:42.59ms +[2025-09-11 09:36:41] [Rank 0] step:9141/10000 train_time:389344ms step_avg:42.59ms +[2025-09-11 09:36:41] [Rank 0] step:9161/10000 train_time:390057ms step_avg:42.58ms +[2025-09-11 09:36:41] [Rank 0] step:9161/10000 train_time:390057ms step_avg:42.58ms +[2025-09-11 09:36:42] [Rank 0] step:9181/10000 train_time:390769ms step_avg:42.56ms +[2025-09-11 09:36:42] [Rank 0] step:9181/10000 train_time:390769ms step_avg:42.56ms +[2025-09-11 09:36:43] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:36:43] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:36:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:36:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:36:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:36:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:36:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:36:53] [Rank 0] PRINT: step:9200/10000 val_loss:4.2866 total_sharp:1.8022e-05 L1_sharp:2.8962e-02 L2_sharp:3.0880e-02 L3_sharp:2.9451e-02 L4_sharp:5.2041e-02 L5_sharp:5.3735e-02 L6_sharp:6.7044e-02 L7_sharp:7.2081e-02 L8_sharp:6.8462e-02 L9_sharp:9.8687e-02 L10_sharp:1.6288e-01 L11_sharp:3.8092e-01 L12_sharp:6.0685e-01 total_fnorm:4.4500e+01 total_l1_linf:5.9648e+04 total_spectral:2.2250e+01 L1_fnorm:1.6235e-02 L2_fnorm:1.6846e-02 L3_fnorm:1.7090e-02 L4_fnorm:1.7090e-02 L5_fnorm:1.6968e-02 L6_fnorm:1.6968e-02 L7_fnorm:1.6968e-02 L8_fnorm:1.6479e-02 L9_fnorm:1.6724e-02 L10_fnorm:1.6724e-02 L11_fnorm:1.6846e-02 L12_fnorm:1.6479e-02 L1_l1linf:1.8692e-03 L2_l1linf:2.1973e-03 L3_l1linf:2.4414e-03 L4_l1linf:2.3651e-03 L5_l1linf:2.4109e-03 L6_l1linf:2.5330e-03 L7_l1linf:2.4719e-03 L8_l1linf:2.4567e-03 L9_l1linf:2.4414e-03 L10_l1linf:2.5482e-03 L11_l1linf:2.6093e-03 L12_l1linf:2.6093e-03 L1_spectral:2.6905e-04 L2_spectral:2.8331e-04 L3_spectral:2.8492e-04 L4_spectral:2.8770e-04 L5_spectral:2.8195e-04 L6_spectral:2.8380e-04 L7_spectral:2.9216e-04 L8_spectral:2.9492e-04 L9_spectral:2.7355e-04 L10_spectral:2.7766e-04 L11_spectral:2.7923e-04 L12_spectral:2.7960e-04 train_time:391463ms step_avg:42.55ms +[2025-09-11 09:36:53] [Rank 0] PRINT: step:9200/10000 val_loss:4.2866 total_sharp:1.8022e-05 L1_sharp:2.8962e-02 L2_sharp:3.0880e-02 L3_sharp:2.9451e-02 L4_sharp:5.2041e-02 L5_sharp:5.3735e-02 L6_sharp:6.7044e-02 L7_sharp:7.2081e-02 L8_sharp:6.8462e-02 L9_sharp:9.8687e-02 L10_sharp:1.6288e-01 L11_sharp:3.8092e-01 L12_sharp:6.0685e-01 total_fnorm:4.4500e+01 total_l1_linf:5.9648e+04 total_spectral:2.2250e+01 L1_fnorm:1.6235e-02 L2_fnorm:1.6846e-02 L3_fnorm:1.7090e-02 L4_fnorm:1.7090e-02 L5_fnorm:1.6968e-02 L6_fnorm:1.6968e-02 L7_fnorm:1.6968e-02 L8_fnorm:1.6479e-02 L9_fnorm:1.6724e-02 L10_fnorm:1.6724e-02 L11_fnorm:1.6846e-02 L12_fnorm:1.6479e-02 L1_l1linf:1.8692e-03 L2_l1linf:2.1973e-03 L3_l1linf:2.4414e-03 L4_l1linf:2.3651e-03 L5_l1linf:2.4109e-03 L6_l1linf:2.5330e-03 L7_l1linf:2.4719e-03 L8_l1linf:2.4567e-03 L9_l1linf:2.4414e-03 L10_l1linf:2.5482e-03 L11_l1linf:2.6093e-03 L12_l1linf:2.6093e-03 L1_spectral:2.6905e-04 L2_spectral:2.8331e-04 L3_spectral:2.8492e-04 L4_spectral:2.8770e-04 L5_spectral:2.8195e-04 L6_spectral:2.8380e-04 L7_spectral:2.9216e-04 L8_spectral:2.9492e-04 L9_spectral:2.7355e-04 L10_spectral:2.7766e-04 L11_spectral:2.7923e-04 L12_spectral:2.7960e-04 train_time:391463ms step_avg:42.55ms +[2025-09-11 09:36:55] [Rank 0] step:9201/10000 train_time:393412ms step_avg:42.76ms +[2025-09-11 09:36:55] [Rank 0] step:9201/10000 train_time:393412ms step_avg:42.76ms +[2025-09-11 09:36:56] [Rank 0] step:9221/10000 train_time:394135ms step_avg:42.74ms +[2025-09-11 09:36:56] [Rank 0] step:9221/10000 train_time:394135ms step_avg:42.74ms +[2025-09-11 09:36:57] [Rank 0] step:9241/10000 train_time:394844ms step_avg:42.73ms +[2025-09-11 09:36:57] [Rank 0] step:9241/10000 train_time:394844ms step_avg:42.73ms +[2025-09-11 09:36:58] [Rank 0] step:9261/10000 train_time:395558ms step_avg:42.71ms +[2025-09-11 09:36:58] [Rank 0] step:9261/10000 train_time:395558ms step_avg:42.71ms +[2025-09-11 09:36:58] [Rank 0] step:9281/10000 train_time:396270ms step_avg:42.70ms +[2025-09-11 09:36:58] [Rank 0] step:9281/10000 train_time:396270ms step_avg:42.70ms +[2025-09-11 09:36:59] [Rank 0] step:9301/10000 train_time:396978ms step_avg:42.68ms +[2025-09-11 09:36:59] [Rank 0] step:9301/10000 train_time:396978ms step_avg:42.68ms +[2025-09-11 09:37:00] [Rank 0] step:9321/10000 train_time:397689ms step_avg:42.67ms +[2025-09-11 09:37:00] [Rank 0] step:9321/10000 train_time:397689ms step_avg:42.67ms +[2025-09-11 09:37:00] [Rank 0] step:9341/10000 train_time:398401ms step_avg:42.65ms +[2025-09-11 09:37:00] [Rank 0] step:9341/10000 train_time:398401ms step_avg:42.65ms +[2025-09-11 09:37:01] [Rank 0] step:9361/10000 train_time:399107ms step_avg:42.64ms +[2025-09-11 09:37:01] [Rank 0] step:9361/10000 train_time:399107ms step_avg:42.64ms +[2025-09-11 09:37:02] [Rank 0] step:9381/10000 train_time:399817ms step_avg:42.62ms +[2025-09-11 09:37:02] [Rank 0] step:9381/10000 train_time:399817ms step_avg:42.62ms +[2025-09-11 09:37:03] [Rank 0] step:9401/10000 train_time:400529ms step_avg:42.60ms +[2025-09-11 09:37:03] [Rank 0] step:9401/10000 train_time:400529ms step_avg:42.60ms +[2025-09-11 09:37:03] [Rank 0] step:9421/10000 train_time:401241ms step_avg:42.59ms +[2025-09-11 09:37:03] [Rank 0] step:9421/10000 train_time:401241ms step_avg:42.59ms +[2025-09-11 09:37:04] [Rank 0] step:9441/10000 train_time:401954ms step_avg:42.58ms +[2025-09-11 09:37:04] [Rank 0] step:9441/10000 train_time:401954ms step_avg:42.58ms +[2025-09-11 09:37:05] [Rank 0] step:9461/10000 train_time:402665ms step_avg:42.56ms +[2025-09-11 09:37:05] [Rank 0] step:9461/10000 train_time:402665ms step_avg:42.56ms +[2025-09-11 09:37:05] [Rank 0] step:9481/10000 train_time:403377ms step_avg:42.55ms +[2025-09-11 09:37:05] [Rank 0] step:9481/10000 train_time:403377ms step_avg:42.55ms +[2025-09-11 09:37:06] [Rank 0] step:9501/10000 train_time:404088ms step_avg:42.53ms +[2025-09-11 09:37:06] [Rank 0] step:9501/10000 train_time:404088ms step_avg:42.53ms +[2025-09-11 09:37:07] [Rank 0] step:9521/10000 train_time:404802ms step_avg:42.52ms +[2025-09-11 09:37:07] [Rank 0] step:9521/10000 train_time:404802ms step_avg:42.52ms +[2025-09-11 09:37:08] [Rank 0] step:9541/10000 train_time:405510ms step_avg:42.50ms +[2025-09-11 09:37:08] [Rank 0] step:9541/10000 train_time:405510ms step_avg:42.50ms +[2025-09-11 09:37:08] [Rank 0] step:9561/10000 train_time:406220ms step_avg:42.49ms +[2025-09-11 09:37:08] [Rank 0] step:9561/10000 train_time:406220ms step_avg:42.49ms +[2025-09-11 09:37:09] [Rank 0] step:9581/10000 train_time:406932ms step_avg:42.47ms +[2025-09-11 09:37:09] [Rank 0] step:9581/10000 train_time:406932ms step_avg:42.47ms +[2025-09-11 09:37:10] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:37:10] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:37:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:37:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:37:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:37:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:37:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:37:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:21] [Rank 0] PRINT: step:9600/10000 val_loss:4.2735 total_sharp:1.3559e-05 L1_sharp:1.9242e-02 L2_sharp:2.5154e-02 L3_sharp:3.1363e-02 L4_sharp:3.6868e-02 L5_sharp:4.0637e-02 L6_sharp:5.0400e-02 L7_sharp:4.6914e-02 L8_sharp:3.8920e-02 L9_sharp:6.3603e-02 L10_sharp:9.7591e-02 L11_sharp:1.3982e-01 L12_sharp:2.0760e-01 total_fnorm:2.5375e+01 total_l1_linf:2.8928e+04 total_spectral:1.2688e+01 L1_fnorm:9.0332e-03 L2_fnorm:9.3994e-03 L3_fnorm:9.5215e-03 L4_fnorm:9.5215e-03 L5_fnorm:9.5215e-03 L6_fnorm:9.4604e-03 L7_fnorm:9.3994e-03 L8_fnorm:9.2163e-03 L9_fnorm:9.2773e-03 L10_fnorm:9.3384e-03 L11_fnorm:9.3384e-03 L12_fnorm:9.2163e-03 L1_l1linf:8.6212e-04 L2_l1linf:1.1444e-03 L3_l1linf:1.1978e-03 L4_l1linf:1.1597e-03 L5_l1linf:1.1749e-03 L6_l1linf:1.1597e-03 L7_l1linf:1.1673e-03 L8_l1linf:1.1673e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.2054e-03 L11_l1linf:1.2283e-03 L12_l1linf:1.2207e-03 L1_spectral:1.5325e-04 L2_spectral:1.6175e-04 L3_spectral:1.6229e-04 L4_spectral:1.6356e-04 L5_spectral:1.5943e-04 L6_spectral:1.6115e-04 L7_spectral:1.6394e-04 L8_spectral:1.6871e-04 L9_spectral:1.5548e-04 L10_spectral:1.5716e-04 L11_spectral:1.5780e-04 L12_spectral:1.5947e-04 train_time:407619ms step_avg:42.46ms +[2025-09-11 09:37:21] [Rank 0] PRINT: step:9600/10000 val_loss:4.2735 total_sharp:1.3559e-05 L1_sharp:1.9242e-02 L2_sharp:2.5154e-02 L3_sharp:3.1363e-02 L4_sharp:3.6868e-02 L5_sharp:4.0637e-02 L6_sharp:5.0400e-02 L7_sharp:4.6914e-02 L8_sharp:3.8920e-02 L9_sharp:6.3603e-02 L10_sharp:9.7591e-02 L11_sharp:1.3982e-01 L12_sharp:2.0760e-01 total_fnorm:2.5375e+01 total_l1_linf:2.8928e+04 total_spectral:1.2688e+01 L1_fnorm:9.0332e-03 L2_fnorm:9.3994e-03 L3_fnorm:9.5215e-03 L4_fnorm:9.5215e-03 L5_fnorm:9.5215e-03 L6_fnorm:9.4604e-03 L7_fnorm:9.3994e-03 L8_fnorm:9.2163e-03 L9_fnorm:9.2773e-03 L10_fnorm:9.3384e-03 L11_fnorm:9.3384e-03 L12_fnorm:9.2163e-03 L1_l1linf:8.6212e-04 L2_l1linf:1.1444e-03 L3_l1linf:1.1978e-03 L4_l1linf:1.1597e-03 L5_l1linf:1.1749e-03 L6_l1linf:1.1597e-03 L7_l1linf:1.1673e-03 L8_l1linf:1.1673e-03 L9_l1linf:1.2054e-03 L10_l1linf:1.2054e-03 L11_l1linf:1.2283e-03 L12_l1linf:1.2207e-03 L1_spectral:1.5325e-04 L2_spectral:1.6175e-04 L3_spectral:1.6229e-04 L4_spectral:1.6356e-04 L5_spectral:1.5943e-04 L6_spectral:1.6115e-04 L7_spectral:1.6394e-04 L8_spectral:1.6871e-04 L9_spectral:1.5548e-04 L10_spectral:1.5716e-04 L11_spectral:1.5780e-04 L12_spectral:1.5947e-04 train_time:407619ms step_avg:42.46ms +[2025-09-11 09:37:23] [Rank 0] step:9601/10000 train_time:409603ms step_avg:42.66ms +[2025-09-11 09:37:23] [Rank 0] step:9601/10000 train_time:409603ms step_avg:42.66ms +[2025-09-11 09:37:23] [Rank 0] step:9621/10000 train_time:410337ms step_avg:42.65ms +[2025-09-11 09:37:23] [Rank 0] step:9621/10000 train_time:410337ms step_avg:42.65ms +[2025-09-11 09:37:24] [Rank 0] step:9641/10000 train_time:411052ms step_avg:42.64ms +[2025-09-11 09:37:24] [Rank 0] step:9641/10000 train_time:411052ms step_avg:42.64ms +[2025-09-11 09:37:25] [Rank 0] step:9661/10000 train_time:411775ms step_avg:42.62ms +[2025-09-11 09:37:25] [Rank 0] step:9661/10000 train_time:411775ms step_avg:42.62ms +[2025-09-11 09:37:26] [Rank 0] step:9681/10000 train_time:412490ms step_avg:42.61ms +[2025-09-11 09:37:26] [Rank 0] step:9681/10000 train_time:412490ms step_avg:42.61ms +[2025-09-11 09:37:26] [Rank 0] step:9701/10000 train_time:413206ms step_avg:42.59ms +[2025-09-11 09:37:26] [Rank 0] step:9701/10000 train_time:413206ms step_avg:42.59ms +[2025-09-11 09:37:27] [Rank 0] step:9721/10000 train_time:413927ms step_avg:42.58ms +[2025-09-11 09:37:27] [Rank 0] step:9721/10000 train_time:413927ms step_avg:42.58ms +[2025-09-11 09:37:28] [Rank 0] step:9741/10000 train_time:414645ms step_avg:42.57ms +[2025-09-11 09:37:28] [Rank 0] step:9741/10000 train_time:414645ms step_avg:42.57ms +[2025-09-11 09:37:28] [Rank 0] step:9761/10000 train_time:415363ms step_avg:42.55ms +[2025-09-11 09:37:28] [Rank 0] step:9761/10000 train_time:415363ms step_avg:42.55ms +[2025-09-11 09:37:29] [Rank 0] step:9781/10000 train_time:416079ms step_avg:42.54ms +[2025-09-11 09:37:29] [Rank 0] step:9781/10000 train_time:416079ms step_avg:42.54ms +[2025-09-11 09:37:30] [Rank 0] step:9801/10000 train_time:416802ms step_avg:42.53ms +[2025-09-11 09:37:30] [Rank 0] step:9801/10000 train_time:416802ms step_avg:42.53ms +[2025-09-11 09:37:31] [Rank 0] step:9821/10000 train_time:417520ms step_avg:42.51ms +[2025-09-11 09:37:31] [Rank 0] step:9821/10000 train_time:417520ms step_avg:42.51ms +[2025-09-11 09:37:31] [Rank 0] step:9841/10000 train_time:418242ms step_avg:42.50ms +[2025-09-11 09:37:31] [Rank 0] step:9841/10000 train_time:418242ms step_avg:42.50ms +[2025-09-11 09:37:32] [Rank 0] step:9861/10000 train_time:418960ms step_avg:42.49ms +[2025-09-11 09:37:32] [Rank 0] step:9861/10000 train_time:418960ms step_avg:42.49ms +[2025-09-11 09:37:33] [Rank 0] step:9881/10000 train_time:419678ms step_avg:42.47ms +[2025-09-11 09:37:33] [Rank 0] step:9881/10000 train_time:419678ms step_avg:42.47ms +[2025-09-11 09:37:33] [Rank 0] step:9901/10000 train_time:420393ms step_avg:42.46ms +[2025-09-11 09:37:33] [Rank 0] step:9901/10000 train_time:420393ms step_avg:42.46ms +[2025-09-11 09:37:34] [Rank 0] step:9921/10000 train_time:421109ms step_avg:42.45ms +[2025-09-11 09:37:34] [Rank 0] step:9921/10000 train_time:421109ms step_avg:42.45ms +[2025-09-11 09:37:35] [Rank 0] step:9941/10000 train_time:421831ms step_avg:42.43ms +[2025-09-11 09:37:35] [Rank 0] step:9941/10000 train_time:421831ms step_avg:42.43ms +[2025-09-11 09:37:36] [Rank 0] step:9961/10000 train_time:422555ms step_avg:42.42ms +[2025-09-11 09:37:36] [Rank 0] step:9961/10000 train_time:422555ms step_avg:42.42ms +[2025-09-11 09:37:36] [Rank 0] step:9981/10000 train_time:423274ms step_avg:42.41ms +[2025-09-11 09:37:36] [Rank 0] step:9981/10000 train_time:423274ms step_avg:42.41ms +[2025-09-11 09:37:37] [Rank 0] step:10000/10000 train_time:423966ms step_avg:42.40ms +[2025-09-11 09:37:37] [Rank 0] step:10000/10000 train_time:423966ms step_avg:42.40ms +[2025-09-11 09:37:37] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:37:37] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:37:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:37:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:37:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:37:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:37:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:37:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:37:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:37:48] [Rank 0] PRINT: step:10000/10000 val_loss:4.2741 total_sharp:9.4711e-06 L1_sharp:2.3244e-02 L2_sharp:1.9570e-02 L3_sharp:2.8013e-02 L4_sharp:2.9323e-02 L5_sharp:3.6946e-02 L6_sharp:4.1252e-02 L7_sharp:4.8655e-02 L8_sharp:3.6681e-02 L9_sharp:4.8994e-02 L10_sharp:6.1660e-02 L11_sharp:1.0486e-01 L12_sharp:2.0695e-01 total_fnorm:1.0125e+01 total_l1_linf:8.4480e+03 total_spectral:5.0625e+00 L1_fnorm:3.3875e-03 L2_fnorm:3.6011e-03 L3_fnorm:3.6926e-03 L4_fnorm:3.7537e-03 L5_fnorm:3.7231e-03 L6_fnorm:3.7079e-03 L7_fnorm:3.7231e-03 L8_fnorm:3.6011e-03 L9_fnorm:3.6163e-03 L10_fnorm:3.6163e-03 L11_fnorm:3.6163e-03 L12_fnorm:3.5858e-03 L1_l1linf:2.4796e-04 L2_l1linf:2.8992e-04 L3_l1linf:3.4142e-04 L4_l1linf:3.8147e-04 L5_l1linf:3.9291e-04 L6_l1linf:3.7956e-04 L7_l1linf:3.7575e-04 L8_l1linf:3.6049e-04 L9_l1linf:3.6812e-04 L10_l1linf:3.7575e-04 L11_l1linf:4.0627e-04 L12_l1linf:3.9101e-04 L1_spectral:6.1703e-05 L2_spectral:6.2541e-05 L3_spectral:6.4967e-05 L4_spectral:6.6470e-05 L5_spectral:6.4010e-05 L6_spectral:6.5223e-05 L7_spectral:6.5827e-05 L8_spectral:6.8813e-05 L9_spectral:6.2488e-05 L10_spectral:6.2041e-05 L11_spectral:6.3275e-05 L12_spectral:6.5044e-05 train_time:423986ms step_avg:42.40ms +[2025-09-11 09:37:48] [Rank 0] PRINT: step:10000/10000 val_loss:4.2741 total_sharp:9.4711e-06 L1_sharp:2.3244e-02 L2_sharp:1.9570e-02 L3_sharp:2.8013e-02 L4_sharp:2.9323e-02 L5_sharp:3.6946e-02 L6_sharp:4.1252e-02 L7_sharp:4.8655e-02 L8_sharp:3.6681e-02 L9_sharp:4.8994e-02 L10_sharp:6.1660e-02 L11_sharp:1.0486e-01 L12_sharp:2.0695e-01 total_fnorm:1.0125e+01 total_l1_linf:8.4480e+03 total_spectral:5.0625e+00 L1_fnorm:3.3875e-03 L2_fnorm:3.6011e-03 L3_fnorm:3.6926e-03 L4_fnorm:3.7537e-03 L5_fnorm:3.7231e-03 L6_fnorm:3.7079e-03 L7_fnorm:3.7231e-03 L8_fnorm:3.6011e-03 L9_fnorm:3.6163e-03 L10_fnorm:3.6163e-03 L11_fnorm:3.6163e-03 L12_fnorm:3.5858e-03 L1_l1linf:2.4796e-04 L2_l1linf:2.8992e-04 L3_l1linf:3.4142e-04 L4_l1linf:3.8147e-04 L5_l1linf:3.9291e-04 L6_l1linf:3.7956e-04 L7_l1linf:3.7575e-04 L8_l1linf:3.6049e-04 L9_l1linf:3.6812e-04 L10_l1linf:3.7575e-04 L11_l1linf:4.0627e-04 L12_l1linf:3.9101e-04 L1_spectral:6.1703e-05 L2_spectral:6.2541e-05 L3_spectral:6.4967e-05 L4_spectral:6.6470e-05 L5_spectral:6.4010e-05 L6_spectral:6.5223e-05 L7_spectral:6.5827e-05 L8_spectral:6.8813e-05 L9_spectral:6.2488e-05 L10_spectral:6.2041e-05 L11_spectral:6.3275e-05 L12_spectral:6.5044e-05 train_time:423986ms step_avg:42.40ms +[2025-09-11 09:37:48] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:37:48 2025 --- +[2025-09-11 09:37:48] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:37:48 2025 --- +[2025-09-11 09:37:48] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:37:48] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9dd05691273555cfc8373251c7d5b026c4d027ff --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.002, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "c58487e2-6624-409d-a2ea-a645c5570331", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/training_log_c58487e2-6624-409d-a2ea-a645c5570331.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/training_log_c58487e2-6624-409d-a2ea-a645c5570331.txt new file mode 100644 index 0000000000000000000000000000000000000000..011860c347bea96ec8b81705c234b31fdd01c361 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42/training_log_c58487e2-6624-409d-a2ea-a645c5570331.txt @@ -0,0 +1,4264 @@ +[2025-09-11 09:09:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:09:39 2025 --- +[2025-09-11 09:09:39] [Rank 0] PRINT: --- Script Start: Thu Sep 11 09:09:39 2025 --- +[2025-09-11 09:09:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:09:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.002, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 09:09:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:09:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 09:09:39] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:09:39] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 09:09:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42 +[2025-09-11 09:09:39] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.002_seed_42 +[2025-09-11 09:09:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:09:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 09:09:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:09:39] [Rank 0] PRINT: Constructing model... +[2025-09-11 09:09:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:09:40] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 09:09:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:09:40] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 09:09:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:09:40] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 09:09:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:09:40] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 09:09:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:09:40] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 09:09:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:09:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 09:09:43] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:09:43] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 09:09:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:09:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 09:09:49] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:09:49] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 09:09:49] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:09:49] [Rank 0] PRINT: Starting warmup... +[2025-09-11 09:10:35] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:10:35] [Rank 0] PRINT: Warmup complete. +[2025-09-11 09:10:35] [Rank 0] PRINT: Starting training... +[2025-09-11 09:10:35] [Rank 0] PRINT: Starting training... +[2025-09-11 09:10:36] [Rank 0] step:21/10000 train_time:1143ms step_avg:54.41ms +[2025-09-11 09:10:36] [Rank 0] step:21/10000 train_time:1143ms step_avg:54.41ms +[2025-09-11 09:10:37] [Rank 0] step:41/10000 train_time:1874ms step_avg:45.71ms +[2025-09-11 09:10:37] [Rank 0] step:41/10000 train_time:1874ms step_avg:45.71ms +[2025-09-11 09:10:38] [Rank 0] step:61/10000 train_time:2605ms step_avg:42.70ms +[2025-09-11 09:10:38] [Rank 0] step:61/10000 train_time:2605ms step_avg:42.70ms +[2025-09-11 09:10:38] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 09:10:38] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 09:10:39] [Rank 0] step:101/10000 train_time:4066ms step_avg:40.26ms +[2025-09-11 09:10:39] [Rank 0] step:101/10000 train_time:4066ms step_avg:40.26ms +[2025-09-11 09:10:40] [Rank 0] step:121/10000 train_time:4796ms step_avg:39.64ms +[2025-09-11 09:10:40] [Rank 0] step:121/10000 train_time:4796ms step_avg:39.64ms +[2025-09-11 09:10:41] [Rank 0] step:141/10000 train_time:5526ms step_avg:39.19ms +[2025-09-11 09:10:41] [Rank 0] step:141/10000 train_time:5526ms step_avg:39.19ms +[2025-09-11 09:10:41] [Rank 0] step:161/10000 train_time:6257ms step_avg:38.87ms +[2025-09-11 09:10:41] [Rank 0] step:161/10000 train_time:6257ms step_avg:38.87ms +[2025-09-11 09:10:42] [Rank 0] step:181/10000 train_time:6988ms step_avg:38.61ms +[2025-09-11 09:10:42] [Rank 0] step:181/10000 train_time:6988ms step_avg:38.61ms +[2025-09-11 09:10:43] [Rank 0] step:201/10000 train_time:7718ms step_avg:38.40ms +[2025-09-11 09:10:43] [Rank 0] step:201/10000 train_time:7718ms step_avg:38.40ms +[2025-09-11 09:10:43] [Rank 0] step:221/10000 train_time:8448ms step_avg:38.23ms +[2025-09-11 09:10:43] [Rank 0] step:221/10000 train_time:8448ms step_avg:38.23ms +[2025-09-11 09:10:44] [Rank 0] step:241/10000 train_time:9178ms step_avg:38.08ms +[2025-09-11 09:10:44] [Rank 0] step:241/10000 train_time:9178ms step_avg:38.08ms +[2025-09-11 09:10:45] [Rank 0] step:261/10000 train_time:9908ms step_avg:37.96ms +[2025-09-11 09:10:45] [Rank 0] step:261/10000 train_time:9908ms step_avg:37.96ms +[2025-09-11 09:10:46] [Rank 0] step:281/10000 train_time:10638ms step_avg:37.86ms +[2025-09-11 09:10:46] [Rank 0] step:281/10000 train_time:10638ms step_avg:37.86ms +[2025-09-11 09:10:46] [Rank 0] step:301/10000 train_time:11368ms step_avg:37.77ms +[2025-09-11 09:10:46] [Rank 0] step:301/10000 train_time:11368ms step_avg:37.77ms +[2025-09-11 09:10:47] [Rank 0] step:321/10000 train_time:12098ms step_avg:37.69ms +[2025-09-11 09:10:47] [Rank 0] step:321/10000 train_time:12098ms step_avg:37.69ms +[2025-09-11 09:10:48] [Rank 0] step:341/10000 train_time:12827ms step_avg:37.62ms +[2025-09-11 09:10:48] [Rank 0] step:341/10000 train_time:12827ms step_avg:37.62ms +[2025-09-11 09:10:49] [Rank 0] step:361/10000 train_time:13664ms step_avg:37.85ms +[2025-09-11 09:10:49] [Rank 0] step:361/10000 train_time:13664ms step_avg:37.85ms +[2025-09-11 09:10:50] [Rank 0] step:381/10000 train_time:14543ms step_avg:38.17ms +[2025-09-11 09:10:50] [Rank 0] step:381/10000 train_time:14543ms step_avg:38.17ms +[2025-09-11 09:10:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:10:50] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 09:10:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:10:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:11:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 09:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 09:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:11:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 09:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 09:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:11:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 09:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 09:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:11:46] [Rank 0] PRINT: step:400/10000 val_loss:5.9325 total_sharp:1.6804e-04 L1_sharp:5.3971e-02 L2_sharp:5.2382e-02 L3_sharp:5.3150e-02 L4_sharp:5.4165e-02 L5_sharp:5.0845e-02 L6_sharp:5.0647e-02 L7_sharp:8.0623e-02 L8_sharp:6.4162e-02 L9_sharp:1.2846e-01 L10_sharp:1.6246e-01 L11_sharp:1.4802e-01 L12_sharp:3.0809e-01 total_fnorm:1.9427e+02 total_l1_linf:4.7933e+05 total_spectral:9.7123e+01 L1_fnorm:2.4231e-01 L2_fnorm:2.4174e-01 L3_fnorm:2.4146e-01 L4_fnorm:2.4158e-01 L5_fnorm:2.4244e-01 L6_fnorm:2.4279e-01 L7_fnorm:2.4624e-01 L8_fnorm:2.4383e-01 L9_fnorm:2.4572e-01 L10_fnorm:2.4560e-01 L11_fnorm:2.4453e-01 L12_fnorm:2.4295e-01 L1_l1linf:8.9500e-02 L2_l1linf:8.9003e-02 L3_l1linf:8.8106e-02 L4_l1linf:8.8579e-02 L5_l1linf:8.8516e-02 L6_l1linf:8.8387e-02 L7_l1linf:8.8362e-02 L8_l1linf:8.9046e-02 L9_l1linf:8.8843e-02 L10_l1linf:8.9377e-02 L11_l1linf:8.9220e-02 L12_l1linf:8.9450e-02 L1_spectral:2.4099e-03 L2_spectral:2.4101e-03 L3_spectral:2.4091e-03 L4_spectral:2.4099e-03 L5_spectral:2.4097e-03 L6_spectral:2.4111e-03 L7_spectral:2.4111e-03 L8_spectral:2.4111e-03 L9_spectral:2.4126e-03 L10_spectral:2.4096e-03 L11_spectral:2.4110e-03 L12_spectral:2.4100e-03 train_time:15253ms step_avg:38.13ms +[2025-09-11 09:11:46] [Rank 0] PRINT: step:400/10000 val_loss:5.9325 total_sharp:1.6804e-04 L1_sharp:5.3971e-02 L2_sharp:5.2382e-02 L3_sharp:5.3150e-02 L4_sharp:5.4165e-02 L5_sharp:5.0845e-02 L6_sharp:5.0647e-02 L7_sharp:8.0623e-02 L8_sharp:6.4162e-02 L9_sharp:1.2846e-01 L10_sharp:1.6246e-01 L11_sharp:1.4802e-01 L12_sharp:3.0809e-01 total_fnorm:1.9427e+02 total_l1_linf:4.7933e+05 total_spectral:9.7123e+01 L1_fnorm:2.4231e-01 L2_fnorm:2.4174e-01 L3_fnorm:2.4146e-01 L4_fnorm:2.4158e-01 L5_fnorm:2.4244e-01 L6_fnorm:2.4279e-01 L7_fnorm:2.4624e-01 L8_fnorm:2.4383e-01 L9_fnorm:2.4572e-01 L10_fnorm:2.4560e-01 L11_fnorm:2.4453e-01 L12_fnorm:2.4295e-01 L1_l1linf:8.9500e-02 L2_l1linf:8.9003e-02 L3_l1linf:8.8106e-02 L4_l1linf:8.8579e-02 L5_l1linf:8.8516e-02 L6_l1linf:8.8387e-02 L7_l1linf:8.8362e-02 L8_l1linf:8.9046e-02 L9_l1linf:8.8843e-02 L10_l1linf:8.9377e-02 L11_l1linf:8.9220e-02 L12_l1linf:8.9450e-02 L1_spectral:2.4099e-03 L2_spectral:2.4101e-03 L3_spectral:2.4091e-03 L4_spectral:2.4099e-03 L5_spectral:2.4097e-03 L6_spectral:2.4111e-03 L7_spectral:2.4111e-03 L8_spectral:2.4111e-03 L9_spectral:2.4126e-03 L10_spectral:2.4096e-03 L11_spectral:2.4110e-03 L12_spectral:2.4100e-03 train_time:15253ms step_avg:38.13ms +[2025-09-11 09:12:19] [Rank 0] step:401/10000 train_time:48443ms step_avg:120.80ms +[2025-09-11 09:12:19] [Rank 0] step:401/10000 train_time:48443ms step_avg:120.80ms +[2025-09-11 09:12:22] [Rank 0] step:421/10000 train_time:50845ms step_avg:120.77ms +[2025-09-11 09:12:22] [Rank 0] step:421/10000 train_time:50845ms step_avg:120.77ms +[2025-09-11 09:12:22] [Rank 0] step:441/10000 train_time:51487ms step_avg:116.75ms +[2025-09-11 09:12:22] [Rank 0] step:441/10000 train_time:51487ms step_avg:116.75ms +[2025-09-11 09:12:23] [Rank 0] step:461/10000 train_time:52128ms step_avg:113.08ms +[2025-09-11 09:12:23] [Rank 0] step:461/10000 train_time:52128ms step_avg:113.08ms +[2025-09-11 09:12:24] [Rank 0] step:481/10000 train_time:52769ms step_avg:109.71ms +[2025-09-11 09:12:24] [Rank 0] step:481/10000 train_time:52769ms step_avg:109.71ms +[2025-09-11 09:12:24] [Rank 0] step:501/10000 train_time:53410ms step_avg:106.61ms +[2025-09-11 09:12:24] [Rank 0] step:501/10000 train_time:53410ms step_avg:106.61ms +[2025-09-11 09:12:25] [Rank 0] step:521/10000 train_time:54052ms step_avg:103.75ms +[2025-09-11 09:12:25] [Rank 0] step:521/10000 train_time:54052ms step_avg:103.75ms +[2025-09-11 09:12:26] [Rank 0] step:541/10000 train_time:54693ms step_avg:101.10ms +[2025-09-11 09:12:26] [Rank 0] step:541/10000 train_time:54693ms step_avg:101.10ms +[2025-09-11 09:12:26] [Rank 0] step:561/10000 train_time:55334ms step_avg:98.63ms +[2025-09-11 09:12:26] [Rank 0] step:561/10000 train_time:55334ms step_avg:98.63ms +[2025-09-11 09:12:27] [Rank 0] step:581/10000 train_time:55976ms step_avg:96.34ms +[2025-09-11 09:12:27] [Rank 0] step:581/10000 train_time:55976ms step_avg:96.34ms +[2025-09-11 09:12:27] [Rank 0] step:601/10000 train_time:56617ms step_avg:94.20ms +[2025-09-11 09:12:27] [Rank 0] step:601/10000 train_time:56617ms step_avg:94.20ms +[2025-09-11 09:12:28] [Rank 0] step:621/10000 train_time:57257ms step_avg:92.20ms +[2025-09-11 09:12:28] [Rank 0] step:621/10000 train_time:57257ms step_avg:92.20ms +[2025-09-11 09:12:29] [Rank 0] step:641/10000 train_time:57898ms step_avg:90.32ms +[2025-09-11 09:12:29] [Rank 0] step:641/10000 train_time:57898ms step_avg:90.32ms +[2025-09-11 09:12:29] [Rank 0] step:661/10000 train_time:58538ms step_avg:88.56ms +[2025-09-11 09:12:29] [Rank 0] step:661/10000 train_time:58538ms step_avg:88.56ms +[2025-09-11 09:12:30] [Rank 0] step:681/10000 train_time:59178ms step_avg:86.90ms +[2025-09-11 09:12:30] [Rank 0] step:681/10000 train_time:59178ms step_avg:86.90ms +[2025-09-11 09:12:31] [Rank 0] step:701/10000 train_time:59819ms step_avg:85.33ms +[2025-09-11 09:12:31] [Rank 0] step:701/10000 train_time:59819ms step_avg:85.33ms +[2025-09-11 09:12:31] [Rank 0] step:721/10000 train_time:60460ms step_avg:83.86ms +[2025-09-11 09:12:31] [Rank 0] step:721/10000 train_time:60460ms step_avg:83.86ms +[2025-09-11 09:12:32] [Rank 0] step:741/10000 train_time:61100ms step_avg:82.46ms +[2025-09-11 09:12:32] [Rank 0] step:741/10000 train_time:61100ms step_avg:82.46ms +[2025-09-11 09:12:33] [Rank 0] step:761/10000 train_time:61745ms step_avg:81.14ms +[2025-09-11 09:12:33] [Rank 0] step:761/10000 train_time:61745ms step_avg:81.14ms +[2025-09-11 09:12:33] [Rank 0] step:781/10000 train_time:62391ms step_avg:79.89ms +[2025-09-11 09:12:33] [Rank 0] step:781/10000 train_time:62391ms step_avg:79.89ms +[2025-09-11 09:12:34] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:12:34] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 09:12:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:12:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 09:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 09:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:13:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:13:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 09:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:25] [Rank 0] PRINT: step:800/10000 val_loss:5.5026 total_sharp:1.3487e-04 L1_sharp:3.4559e-02 L2_sharp:2.8518e-02 L3_sharp:3.1257e-02 L4_sharp:3.1349e-02 L5_sharp:4.3046e-02 L6_sharp:3.9674e-02 L7_sharp:6.7771e-02 L8_sharp:7.1744e-02 L9_sharp:1.4705e-01 L10_sharp:2.9098e-01 L11_sharp:4.9713e-01 L12_sharp:6.0159e-01 total_fnorm:1.9400e+02 total_l1_linf:4.4442e+05 total_spectral:9.7000e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.5000e-01 L6_fnorm:2.5391e-01 L7_fnorm:2.5586e-01 L8_fnorm:2.5195e-01 L9_fnorm:2.5391e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3340e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.5938e-02 L5_l1linf:8.5449e-02 L6_l1linf:8.6426e-02 L7_l1linf:8.6914e-02 L8_l1linf:8.6914e-02 L9_l1linf:8.6914e-02 L10_l1linf:8.5938e-02 L11_l1linf:8.5938e-02 L12_l1linf:8.3008e-02 L1_spectral:3.1347e-03 L2_spectral:3.1214e-03 L3_spectral:3.1237e-03 L4_spectral:3.1255e-03 L5_spectral:3.1018e-03 L6_spectral:3.1187e-03 L7_spectral:3.1070e-03 L8_spectral:3.0745e-03 L9_spectral:3.0980e-03 L10_spectral:3.1059e-03 L11_spectral:3.0880e-03 L12_spectral:3.0601e-03 train_time:63020ms step_avg:78.77ms +[2025-09-11 09:13:25] [Rank 0] PRINT: step:800/10000 val_loss:5.5026 total_sharp:1.3487e-04 L1_sharp:3.4559e-02 L2_sharp:2.8518e-02 L3_sharp:3.1257e-02 L4_sharp:3.1349e-02 L5_sharp:4.3046e-02 L6_sharp:3.9674e-02 L7_sharp:6.7771e-02 L8_sharp:7.1744e-02 L9_sharp:1.4705e-01 L10_sharp:2.9098e-01 L11_sharp:4.9713e-01 L12_sharp:6.0159e-01 total_fnorm:1.9400e+02 total_l1_linf:4.4442e+05 total_spectral:9.7000e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4902e-01 L4_fnorm:2.4902e-01 L5_fnorm:2.5000e-01 L6_fnorm:2.5391e-01 L7_fnorm:2.5586e-01 L8_fnorm:2.5195e-01 L9_fnorm:2.5391e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.3340e-01 L1_l1linf:8.6914e-02 L2_l1linf:8.6426e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.5938e-02 L5_l1linf:8.5449e-02 L6_l1linf:8.6426e-02 L7_l1linf:8.6914e-02 L8_l1linf:8.6914e-02 L9_l1linf:8.6914e-02 L10_l1linf:8.5938e-02 L11_l1linf:8.5938e-02 L12_l1linf:8.3008e-02 L1_spectral:3.1347e-03 L2_spectral:3.1214e-03 L3_spectral:3.1237e-03 L4_spectral:3.1255e-03 L5_spectral:3.1018e-03 L6_spectral:3.1187e-03 L7_spectral:3.1070e-03 L8_spectral:3.0745e-03 L9_spectral:3.0980e-03 L10_spectral:3.1059e-03 L11_spectral:3.0880e-03 L12_spectral:3.0601e-03 train_time:63020ms step_avg:78.77ms +[2025-09-11 09:13:27] [Rank 0] step:801/10000 train_time:64683ms step_avg:80.75ms +[2025-09-11 09:13:27] [Rank 0] step:801/10000 train_time:64683ms step_avg:80.75ms +[2025-09-11 09:13:27] [Rank 0] step:821/10000 train_time:65318ms step_avg:79.56ms +[2025-09-11 09:13:27] [Rank 0] step:821/10000 train_time:65318ms step_avg:79.56ms +[2025-09-11 09:13:28] [Rank 0] step:841/10000 train_time:65966ms step_avg:78.44ms +[2025-09-11 09:13:28] [Rank 0] step:841/10000 train_time:65966ms step_avg:78.44ms +[2025-09-11 09:13:29] [Rank 0] step:861/10000 train_time:66613ms step_avg:77.37ms +[2025-09-11 09:13:29] [Rank 0] step:861/10000 train_time:66613ms step_avg:77.37ms +[2025-09-11 09:13:29] [Rank 0] step:881/10000 train_time:67261ms step_avg:76.35ms +[2025-09-11 09:13:29] [Rank 0] step:881/10000 train_time:67261ms step_avg:76.35ms +[2025-09-11 09:13:30] [Rank 0] step:901/10000 train_time:67908ms step_avg:75.37ms +[2025-09-11 09:13:30] [Rank 0] step:901/10000 train_time:67908ms step_avg:75.37ms +[2025-09-11 09:13:31] [Rank 0] step:921/10000 train_time:68554ms step_avg:74.43ms +[2025-09-11 09:13:31] [Rank 0] step:921/10000 train_time:68554ms step_avg:74.43ms +[2025-09-11 09:13:31] [Rank 0] step:941/10000 train_time:69201ms step_avg:73.54ms +[2025-09-11 09:13:31] [Rank 0] step:941/10000 train_time:69201ms step_avg:73.54ms +[2025-09-11 09:13:32] [Rank 0] step:961/10000 train_time:69848ms step_avg:72.68ms +[2025-09-11 09:13:32] [Rank 0] step:961/10000 train_time:69848ms step_avg:72.68ms +[2025-09-11 09:13:33] [Rank 0] step:981/10000 train_time:70495ms step_avg:71.86ms +[2025-09-11 09:13:33] [Rank 0] step:981/10000 train_time:70495ms step_avg:71.86ms +[2025-09-11 09:13:33] [Rank 0] step:1001/10000 train_time:71142ms step_avg:71.07ms +[2025-09-11 09:13:33] [Rank 0] step:1001/10000 train_time:71142ms step_avg:71.07ms +[2025-09-11 09:13:34] [Rank 0] step:1021/10000 train_time:71789ms step_avg:70.31ms +[2025-09-11 09:13:34] [Rank 0] step:1021/10000 train_time:71789ms step_avg:70.31ms +[2025-09-11 09:13:35] [Rank 0] step:1041/10000 train_time:72435ms step_avg:69.58ms +[2025-09-11 09:13:35] [Rank 0] step:1041/10000 train_time:72435ms step_avg:69.58ms +[2025-09-11 09:13:35] [Rank 0] step:1061/10000 train_time:73081ms step_avg:68.88ms +[2025-09-11 09:13:35] [Rank 0] step:1061/10000 train_time:73081ms step_avg:68.88ms +[2025-09-11 09:13:36] [Rank 0] step:1081/10000 train_time:73728ms step_avg:68.20ms +[2025-09-11 09:13:36] [Rank 0] step:1081/10000 train_time:73728ms step_avg:68.20ms +[2025-09-11 09:13:36] [Rank 0] step:1101/10000 train_time:74374ms step_avg:67.55ms +[2025-09-11 09:13:36] [Rank 0] step:1101/10000 train_time:74374ms step_avg:67.55ms +[2025-09-11 09:13:37] [Rank 0] step:1121/10000 train_time:75019ms step_avg:66.92ms +[2025-09-11 09:13:37] [Rank 0] step:1121/10000 train_time:75019ms step_avg:66.92ms +[2025-09-11 09:13:38] [Rank 0] step:1141/10000 train_time:75666ms step_avg:66.32ms +[2025-09-11 09:13:38] [Rank 0] step:1141/10000 train_time:75666ms step_avg:66.32ms +[2025-09-11 09:13:38] [Rank 0] step:1161/10000 train_time:76313ms step_avg:65.73ms +[2025-09-11 09:13:38] [Rank 0] step:1161/10000 train_time:76313ms step_avg:65.73ms +[2025-09-11 09:13:39] [Rank 0] step:1181/10000 train_time:76961ms step_avg:65.17ms +[2025-09-11 09:13:39] [Rank 0] step:1181/10000 train_time:76961ms step_avg:65.17ms +[2025-09-11 09:13:40] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:13:40] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 09:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:13:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 09:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 09:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:13:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 09:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:13:50] [Rank 0] PRINT: step:1200/10000 val_loss:5.2378 total_sharp:1.0398e-04 L1_sharp:2.4909e-02 L2_sharp:2.0480e-02 L3_sharp:2.0797e-02 L4_sharp:2.9331e-02 L5_sharp:3.7835e-02 L6_sharp:3.8172e-02 L7_sharp:5.1457e-02 L8_sharp:6.0026e-02 L9_sharp:7.3699e-02 L10_sharp:1.6111e-01 L11_sharp:2.0020e-01 L12_sharp:6.1840e-01 total_fnorm:2.0700e+02 total_l1_linf:4.6080e+05 total_spectral:1.0350e+02 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4902e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5391e-01 L10_fnorm:2.5586e-01 L11_fnorm:2.5586e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.9590e-02 L3_l1linf:7.9102e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.8613e-02 L6_l1linf:7.8613e-02 L7_l1linf:7.8613e-02 L8_l1linf:7.9102e-02 L9_l1linf:8.0566e-02 L10_l1linf:8.1543e-02 L11_l1linf:8.3984e-02 L12_l1linf:8.3984e-02 L1_spectral:3.1629e-03 L2_spectral:3.1355e-03 L3_spectral:3.1532e-03 L4_spectral:3.1512e-03 L5_spectral:3.1529e-03 L6_spectral:3.1599e-03 L7_spectral:3.1515e-03 L8_spectral:3.1379e-03 L9_spectral:3.1550e-03 L10_spectral:3.1435e-03 L11_spectral:3.1474e-03 L12_spectral:3.0998e-03 train_time:77590ms step_avg:64.66ms +[2025-09-11 09:13:50] [Rank 0] PRINT: step:1200/10000 val_loss:5.2378 total_sharp:1.0398e-04 L1_sharp:2.4909e-02 L2_sharp:2.0480e-02 L3_sharp:2.0797e-02 L4_sharp:2.9331e-02 L5_sharp:3.7835e-02 L6_sharp:3.8172e-02 L7_sharp:5.1457e-02 L8_sharp:6.0026e-02 L9_sharp:7.3699e-02 L10_sharp:1.6111e-01 L11_sharp:2.0020e-01 L12_sharp:6.1840e-01 total_fnorm:2.0700e+02 total_l1_linf:4.6080e+05 total_spectral:1.0350e+02 L1_fnorm:2.4902e-01 L2_fnorm:2.4902e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4902e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5195e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5391e-01 L10_fnorm:2.5586e-01 L11_fnorm:2.5586e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.9590e-02 L2_l1linf:7.9590e-02 L3_l1linf:7.9102e-02 L4_l1linf:7.8125e-02 L5_l1linf:7.8613e-02 L6_l1linf:7.8613e-02 L7_l1linf:7.8613e-02 L8_l1linf:7.9102e-02 L9_l1linf:8.0566e-02 L10_l1linf:8.1543e-02 L11_l1linf:8.3984e-02 L12_l1linf:8.3984e-02 L1_spectral:3.1629e-03 L2_spectral:3.1355e-03 L3_spectral:3.1532e-03 L4_spectral:3.1512e-03 L5_spectral:3.1529e-03 L6_spectral:3.1599e-03 L7_spectral:3.1515e-03 L8_spectral:3.1379e-03 L9_spectral:3.1550e-03 L10_spectral:3.1435e-03 L11_spectral:3.1474e-03 L12_spectral:3.0998e-03 train_time:77590ms step_avg:64.66ms +[2025-09-11 09:13:52] [Rank 0] step:1201/10000 train_time:79264ms step_avg:66.00ms +[2025-09-11 09:13:52] [Rank 0] step:1201/10000 train_time:79264ms step_avg:66.00ms +[2025-09-11 09:13:53] [Rank 0] step:1221/10000 train_time:79899ms step_avg:65.44ms +[2025-09-11 09:13:53] [Rank 0] step:1221/10000 train_time:79899ms step_avg:65.44ms +[2025-09-11 09:13:53] [Rank 0] step:1241/10000 train_time:80547ms step_avg:64.90ms +[2025-09-11 09:13:53] [Rank 0] step:1241/10000 train_time:80547ms step_avg:64.90ms +[2025-09-11 09:13:54] [Rank 0] step:1261/10000 train_time:81193ms step_avg:64.39ms +[2025-09-11 09:13:54] [Rank 0] step:1261/10000 train_time:81193ms step_avg:64.39ms +[2025-09-11 09:13:54] [Rank 0] step:1281/10000 train_time:81840ms step_avg:63.89ms +[2025-09-11 09:13:54] [Rank 0] step:1281/10000 train_time:81840ms step_avg:63.89ms +[2025-09-11 09:13:55] [Rank 0] step:1301/10000 train_time:82487ms step_avg:63.40ms +[2025-09-11 09:13:55] [Rank 0] step:1301/10000 train_time:82487ms step_avg:63.40ms +[2025-09-11 09:13:56] [Rank 0] step:1321/10000 train_time:83133ms step_avg:62.93ms +[2025-09-11 09:13:56] [Rank 0] step:1321/10000 train_time:83133ms step_avg:62.93ms +[2025-09-11 09:13:56] [Rank 0] step:1341/10000 train_time:83780ms step_avg:62.48ms +[2025-09-11 09:13:56] [Rank 0] step:1341/10000 train_time:83780ms step_avg:62.48ms +[2025-09-11 09:13:57] [Rank 0] step:1361/10000 train_time:84427ms step_avg:62.03ms +[2025-09-11 09:13:57] [Rank 0] step:1361/10000 train_time:84427ms step_avg:62.03ms +[2025-09-11 09:13:58] [Rank 0] step:1381/10000 train_time:85073ms step_avg:61.60ms +[2025-09-11 09:13:58] [Rank 0] step:1381/10000 train_time:85073ms step_avg:61.60ms +[2025-09-11 09:13:59] [Rank 0] step:1401/10000 train_time:86035ms step_avg:61.41ms +[2025-09-11 09:13:59] [Rank 0] step:1401/10000 train_time:86035ms step_avg:61.41ms +[2025-09-11 09:13:59] [Rank 0] step:1421/10000 train_time:86681ms step_avg:61.00ms +[2025-09-11 09:13:59] [Rank 0] step:1421/10000 train_time:86681ms step_avg:61.00ms +[2025-09-11 09:14:00] [Rank 0] step:1441/10000 train_time:87327ms step_avg:60.60ms +[2025-09-11 09:14:00] [Rank 0] step:1441/10000 train_time:87327ms step_avg:60.60ms +[2025-09-11 09:14:01] [Rank 0] step:1461/10000 train_time:87973ms step_avg:60.21ms +[2025-09-11 09:14:01] [Rank 0] step:1461/10000 train_time:87973ms step_avg:60.21ms +[2025-09-11 09:14:02] [Rank 0] step:1481/10000 train_time:88919ms step_avg:60.04ms +[2025-09-11 09:14:02] [Rank 0] step:1481/10000 train_time:88919ms step_avg:60.04ms +[2025-09-11 09:14:02] [Rank 0] step:1501/10000 train_time:89569ms step_avg:59.67ms +[2025-09-11 09:14:02] [Rank 0] step:1501/10000 train_time:89569ms step_avg:59.67ms +[2025-09-11 09:14:03] [Rank 0] step:1521/10000 train_time:90219ms step_avg:59.32ms +[2025-09-11 09:14:03] [Rank 0] step:1521/10000 train_time:90219ms step_avg:59.32ms +[2025-09-11 09:14:03] [Rank 0] step:1541/10000 train_time:90870ms step_avg:58.97ms +[2025-09-11 09:14:03] [Rank 0] step:1541/10000 train_time:90870ms step_avg:58.97ms +[2025-09-11 09:14:04] [Rank 0] step:1561/10000 train_time:91521ms step_avg:58.63ms +[2025-09-11 09:14:04] [Rank 0] step:1561/10000 train_time:91521ms step_avg:58.63ms +[2025-09-11 09:14:05] [Rank 0] step:1581/10000 train_time:92172ms step_avg:58.30ms +[2025-09-11 09:14:05] [Rank 0] step:1581/10000 train_time:92172ms step_avg:58.30ms +[2025-09-11 09:14:05] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:14:05] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 09:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:14:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 09:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 09:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:14:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 09:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:14:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:14:17] [Rank 0] PRINT: step:1600/10000 val_loss:5.0968 total_sharp:8.5018e-05 L1_sharp:1.2102e-02 L2_sharp:1.0563e-02 L3_sharp:1.3054e-02 L4_sharp:1.6438e-02 L5_sharp:2.0941e-02 L6_sharp:2.3931e-02 L7_sharp:3.1824e-02 L8_sharp:4.1513e-02 L9_sharp:5.0146e-02 L10_sharp:1.3413e-01 L11_sharp:1.8908e-01 L12_sharp:8.6508e-01 total_fnorm:1.8700e+02 total_l1_linf:3.9936e+05 total_spectral:9.3500e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.6172e-02 L2_l1linf:7.4707e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.5195e-02 L5_l1linf:7.5195e-02 L6_l1linf:7.3242e-02 L7_l1linf:7.4707e-02 L8_l1linf:7.4707e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.8125e-02 L11_l1linf:8.0078e-02 L12_l1linf:8.1055e-02 L1_spectral:3.1809e-03 L2_spectral:3.1679e-03 L3_spectral:3.1857e-03 L4_spectral:3.1805e-03 L5_spectral:3.1645e-03 L6_spectral:3.1837e-03 L7_spectral:3.1824e-03 L8_spectral:3.1815e-03 L9_spectral:3.1710e-03 L10_spectral:3.1784e-03 L11_spectral:3.1447e-03 L12_spectral:3.1217e-03 train_time:92804ms step_avg:58.00ms +[2025-09-11 09:14:17] [Rank 0] PRINT: step:1600/10000 val_loss:5.0968 total_sharp:8.5018e-05 L1_sharp:1.2102e-02 L2_sharp:1.0563e-02 L3_sharp:1.3054e-02 L4_sharp:1.6438e-02 L5_sharp:2.0941e-02 L6_sharp:2.3931e-02 L7_sharp:3.1824e-02 L8_sharp:4.1513e-02 L9_sharp:5.0146e-02 L10_sharp:1.3413e-01 L11_sharp:1.8908e-01 L12_sharp:8.6508e-01 total_fnorm:1.8700e+02 total_l1_linf:3.9936e+05 total_spectral:9.3500e+01 L1_fnorm:2.4902e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4805e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.5000e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.6172e-02 L2_l1linf:7.4707e-02 L3_l1linf:7.4707e-02 L4_l1linf:7.5195e-02 L5_l1linf:7.5195e-02 L6_l1linf:7.3242e-02 L7_l1linf:7.4707e-02 L8_l1linf:7.4707e-02 L9_l1linf:7.5195e-02 L10_l1linf:7.8125e-02 L11_l1linf:8.0078e-02 L12_l1linf:8.1055e-02 L1_spectral:3.1809e-03 L2_spectral:3.1679e-03 L3_spectral:3.1857e-03 L4_spectral:3.1805e-03 L5_spectral:3.1645e-03 L6_spectral:3.1837e-03 L7_spectral:3.1824e-03 L8_spectral:3.1815e-03 L9_spectral:3.1710e-03 L10_spectral:3.1784e-03 L11_spectral:3.1447e-03 L12_spectral:3.1217e-03 train_time:92804ms step_avg:58.00ms +[2025-09-11 09:14:19] [Rank 0] step:1601/10000 train_time:94438ms step_avg:58.99ms +[2025-09-11 09:14:19] [Rank 0] step:1601/10000 train_time:94438ms step_avg:58.99ms +[2025-09-11 09:14:19] [Rank 0] step:1621/10000 train_time:95120ms step_avg:58.68ms +[2025-09-11 09:14:19] [Rank 0] step:1621/10000 train_time:95120ms step_avg:58.68ms +[2025-09-11 09:14:20] [Rank 0] step:1641/10000 train_time:95771ms step_avg:58.36ms +[2025-09-11 09:14:20] [Rank 0] step:1641/10000 train_time:95771ms step_avg:58.36ms +[2025-09-11 09:14:21] [Rank 0] step:1661/10000 train_time:96423ms step_avg:58.05ms +[2025-09-11 09:14:21] [Rank 0] step:1661/10000 train_time:96423ms step_avg:58.05ms +[2025-09-11 09:14:21] [Rank 0] step:1681/10000 train_time:97075ms step_avg:57.75ms +[2025-09-11 09:14:21] [Rank 0] step:1681/10000 train_time:97075ms step_avg:57.75ms +[2025-09-11 09:14:22] [Rank 0] step:1701/10000 train_time:97726ms step_avg:57.45ms +[2025-09-11 09:14:22] [Rank 0] step:1701/10000 train_time:97726ms step_avg:57.45ms +[2025-09-11 09:14:23] [Rank 0] step:1721/10000 train_time:98378ms step_avg:57.16ms +[2025-09-11 09:14:23] [Rank 0] step:1721/10000 train_time:98378ms step_avg:57.16ms +[2025-09-11 09:14:23] [Rank 0] step:1741/10000 train_time:99029ms step_avg:56.88ms +[2025-09-11 09:14:23] [Rank 0] step:1741/10000 train_time:99029ms step_avg:56.88ms +[2025-09-11 09:14:24] [Rank 0] step:1761/10000 train_time:99681ms step_avg:56.61ms +[2025-09-11 09:14:24] [Rank 0] step:1761/10000 train_time:99681ms step_avg:56.61ms +[2025-09-11 09:14:25] [Rank 0] step:1781/10000 train_time:100333ms step_avg:56.34ms +[2025-09-11 09:14:25] [Rank 0] step:1781/10000 train_time:100333ms step_avg:56.34ms +[2025-09-11 09:14:25] [Rank 0] step:1801/10000 train_time:100983ms step_avg:56.07ms +[2025-09-11 09:14:25] [Rank 0] step:1801/10000 train_time:100983ms step_avg:56.07ms +[2025-09-11 09:14:26] [Rank 0] step:1821/10000 train_time:101634ms step_avg:55.81ms +[2025-09-11 09:14:26] [Rank 0] step:1821/10000 train_time:101634ms step_avg:55.81ms +[2025-09-11 09:14:27] [Rank 0] step:1841/10000 train_time:102285ms step_avg:55.56ms +[2025-09-11 09:14:27] [Rank 0] step:1841/10000 train_time:102285ms step_avg:55.56ms +[2025-09-11 09:14:27] [Rank 0] step:1861/10000 train_time:102937ms step_avg:55.31ms +[2025-09-11 09:14:27] [Rank 0] step:1861/10000 train_time:102937ms step_avg:55.31ms +[2025-09-11 09:14:28] [Rank 0] step:1881/10000 train_time:103588ms step_avg:55.07ms +[2025-09-11 09:14:28] [Rank 0] step:1881/10000 train_time:103588ms step_avg:55.07ms +[2025-09-11 09:14:29] [Rank 0] step:1901/10000 train_time:104239ms step_avg:54.83ms +[2025-09-11 09:14:29] [Rank 0] step:1901/10000 train_time:104239ms step_avg:54.83ms +[2025-09-11 09:14:29] [Rank 0] step:1921/10000 train_time:104891ms step_avg:54.60ms +[2025-09-11 09:14:29] [Rank 0] step:1921/10000 train_time:104891ms step_avg:54.60ms +[2025-09-11 09:14:30] [Rank 0] step:1941/10000 train_time:105543ms step_avg:54.38ms +[2025-09-11 09:14:30] [Rank 0] step:1941/10000 train_time:105543ms step_avg:54.38ms +[2025-09-11 09:14:31] [Rank 0] step:1961/10000 train_time:106196ms step_avg:54.15ms +[2025-09-11 09:14:31] [Rank 0] step:1961/10000 train_time:106196ms step_avg:54.15ms +[2025-09-11 09:14:31] [Rank 0] step:1981/10000 train_time:106848ms step_avg:53.94ms +[2025-09-11 09:14:31] [Rank 0] step:1981/10000 train_time:106848ms step_avg:53.94ms +[2025-09-11 09:14:32] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:14:32] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 09:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:14:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 09:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:14:42] [Rank 0] PRINT: step:2000/10000 val_loss:4.9497 total_sharp:6.0919e-05 L1_sharp:1.2016e-02 L2_sharp:1.1594e-02 L3_sharp:1.3369e-02 L4_sharp:1.6472e-02 L5_sharp:1.7575e-02 L6_sharp:1.9214e-02 L7_sharp:2.7376e-02 L8_sharp:4.3308e-02 L9_sharp:4.3689e-02 L10_sharp:6.8473e-02 L11_sharp:1.0115e-01 L12_sharp:3.9498e-01 total_fnorm:1.8900e+02 total_l1_linf:4.2189e+05 total_spectral:9.5000e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.4219e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.2266e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.0801e-02 L7_l1linf:7.2754e-02 L8_l1linf:7.2266e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.3242e-02 L11_l1linf:7.6172e-02 L12_l1linf:7.8613e-02 L1_spectral:3.1699e-03 L2_spectral:3.1748e-03 L3_spectral:3.1681e-03 L4_spectral:3.1653e-03 L5_spectral:3.2051e-03 L6_spectral:3.1780e-03 L7_spectral:3.1778e-03 L8_spectral:3.2244e-03 L9_spectral:3.1866e-03 L10_spectral:3.1899e-03 L11_spectral:3.1986e-03 L12_spectral:3.1781e-03 train_time:107481ms step_avg:53.74ms +[2025-09-11 09:14:42] [Rank 0] PRINT: step:2000/10000 val_loss:4.9497 total_sharp:6.0919e-05 L1_sharp:1.2016e-02 L2_sharp:1.1594e-02 L3_sharp:1.3369e-02 L4_sharp:1.6472e-02 L5_sharp:1.7575e-02 L6_sharp:1.9214e-02 L7_sharp:2.7376e-02 L8_sharp:4.3308e-02 L9_sharp:4.3689e-02 L10_sharp:6.8473e-02 L11_sharp:1.0115e-01 L12_sharp:3.9498e-01 total_fnorm:1.8900e+02 total_l1_linf:4.2189e+05 total_spectral:9.5000e+01 L1_fnorm:2.4805e-01 L2_fnorm:2.4805e-01 L3_fnorm:2.4805e-01 L4_fnorm:2.4805e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4902e-01 L8_fnorm:2.4902e-01 L9_fnorm:2.5000e-01 L10_fnorm:2.5195e-01 L11_fnorm:2.5391e-01 L12_fnorm:2.5586e-01 L1_l1linf:7.4219e-02 L2_l1linf:7.2266e-02 L3_l1linf:7.2266e-02 L4_l1linf:7.1777e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.0801e-02 L7_l1linf:7.2754e-02 L8_l1linf:7.2266e-02 L9_l1linf:7.1777e-02 L10_l1linf:7.3242e-02 L11_l1linf:7.6172e-02 L12_l1linf:7.8613e-02 L1_spectral:3.1699e-03 L2_spectral:3.1748e-03 L3_spectral:3.1681e-03 L4_spectral:3.1653e-03 L5_spectral:3.2051e-03 L6_spectral:3.1780e-03 L7_spectral:3.1778e-03 L8_spectral:3.2244e-03 L9_spectral:3.1866e-03 L10_spectral:3.1899e-03 L11_spectral:3.1986e-03 L12_spectral:3.1781e-03 train_time:107481ms step_avg:53.74ms +[2025-09-11 09:14:44] [Rank 0] step:2001/10000 train_time:109210ms step_avg:54.58ms +[2025-09-11 09:14:44] [Rank 0] step:2001/10000 train_time:109210ms step_avg:54.58ms +[2025-09-11 09:14:45] [Rank 0] step:2021/10000 train_time:109877ms step_avg:54.37ms +[2025-09-11 09:14:45] [Rank 0] step:2021/10000 train_time:109877ms step_avg:54.37ms +[2025-09-11 09:14:45] [Rank 0] step:2041/10000 train_time:110529ms step_avg:54.15ms +[2025-09-11 09:14:45] [Rank 0] step:2041/10000 train_time:110529ms step_avg:54.15ms +[2025-09-11 09:14:46] [Rank 0] step:2061/10000 train_time:111179ms step_avg:53.94ms +[2025-09-11 09:14:46] [Rank 0] step:2061/10000 train_time:111179ms step_avg:53.94ms +[2025-09-11 09:14:47] [Rank 0] step:2081/10000 train_time:111830ms step_avg:53.74ms +[2025-09-11 09:14:47] [Rank 0] step:2081/10000 train_time:111830ms step_avg:53.74ms +[2025-09-11 09:14:47] [Rank 0] step:2101/10000 train_time:112480ms step_avg:53.54ms +[2025-09-11 09:14:47] [Rank 0] step:2101/10000 train_time:112480ms step_avg:53.54ms +[2025-09-11 09:14:48] [Rank 0] step:2121/10000 train_time:113131ms step_avg:53.34ms +[2025-09-11 09:14:48] [Rank 0] step:2121/10000 train_time:113131ms step_avg:53.34ms +[2025-09-11 09:14:49] [Rank 0] step:2141/10000 train_time:113781ms step_avg:53.14ms +[2025-09-11 09:14:49] [Rank 0] step:2141/10000 train_time:113781ms step_avg:53.14ms +[2025-09-11 09:14:49] [Rank 0] step:2161/10000 train_time:114431ms step_avg:52.95ms +[2025-09-11 09:14:49] [Rank 0] step:2161/10000 train_time:114431ms step_avg:52.95ms +[2025-09-11 09:14:50] [Rank 0] step:2181/10000 train_time:115081ms step_avg:52.77ms +[2025-09-11 09:14:50] [Rank 0] step:2181/10000 train_time:115081ms step_avg:52.77ms +[2025-09-11 09:14:51] [Rank 0] step:2201/10000 train_time:115731ms step_avg:52.58ms +[2025-09-11 09:14:51] [Rank 0] step:2201/10000 train_time:115731ms step_avg:52.58ms +[2025-09-11 09:14:51] [Rank 0] step:2221/10000 train_time:116381ms step_avg:52.40ms +[2025-09-11 09:14:51] [Rank 0] step:2221/10000 train_time:116381ms step_avg:52.40ms +[2025-09-11 09:14:52] [Rank 0] step:2241/10000 train_time:117043ms step_avg:52.23ms +[2025-09-11 09:14:52] [Rank 0] step:2241/10000 train_time:117043ms step_avg:52.23ms +[2025-09-11 09:14:52] [Rank 0] step:2261/10000 train_time:117707ms step_avg:52.06ms +[2025-09-11 09:14:52] [Rank 0] step:2261/10000 train_time:117707ms step_avg:52.06ms +[2025-09-11 09:14:53] [Rank 0] step:2281/10000 train_time:118370ms step_avg:51.89ms +[2025-09-11 09:14:53] [Rank 0] step:2281/10000 train_time:118370ms step_avg:51.89ms +[2025-09-11 09:14:54] [Rank 0] step:2301/10000 train_time:119033ms step_avg:51.73ms +[2025-09-11 09:14:54] [Rank 0] step:2301/10000 train_time:119033ms step_avg:51.73ms +[2025-09-11 09:14:54] [Rank 0] step:2321/10000 train_time:119696ms step_avg:51.57ms +[2025-09-11 09:14:54] [Rank 0] step:2321/10000 train_time:119696ms step_avg:51.57ms +[2025-09-11 09:14:55] [Rank 0] step:2341/10000 train_time:120360ms step_avg:51.41ms +[2025-09-11 09:14:55] [Rank 0] step:2341/10000 train_time:120360ms step_avg:51.41ms +[2025-09-11 09:14:56] [Rank 0] step:2361/10000 train_time:121023ms step_avg:51.26ms +[2025-09-11 09:14:56] [Rank 0] step:2361/10000 train_time:121023ms step_avg:51.26ms +[2025-09-11 09:14:56] [Rank 0] step:2381/10000 train_time:121686ms step_avg:51.11ms +[2025-09-11 09:14:56] [Rank 0] step:2381/10000 train_time:121686ms step_avg:51.11ms +[2025-09-11 09:14:57] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:14:57] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 09:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:14:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:15:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:07] [Rank 0] PRINT: step:2400/10000 val_loss:4.7913 total_sharp:6.2265e-05 L1_sharp:8.7954e-03 L2_sharp:5.8372e-03 L3_sharp:8.6555e-03 L4_sharp:1.2480e-02 L5_sharp:1.6384e-02 L6_sharp:1.7985e-02 L7_sharp:2.4591e-02 L8_sharp:3.7834e-02 L9_sharp:3.7124e-02 L10_sharp:6.9885e-02 L11_sharp:1.3284e-01 L12_sharp:3.1350e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7888e+05 total_spectral:8.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.0312e-02 L3_l1linf:7.0801e-02 L4_l1linf:6.9824e-02 L5_l1linf:6.9336e-02 L6_l1linf:6.9336e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.8359e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.1289e-02 L11_l1linf:7.3242e-02 L12_l1linf:7.6172e-02 L1_spectral:3.2098e-03 L2_spectral:3.1772e-03 L3_spectral:3.1602e-03 L4_spectral:3.1603e-03 L5_spectral:3.1945e-03 L6_spectral:3.1873e-03 L7_spectral:3.1814e-03 L8_spectral:3.2392e-03 L9_spectral:3.1800e-03 L10_spectral:3.1934e-03 L11_spectral:3.1796e-03 L12_spectral:3.2236e-03 train_time:122356ms step_avg:50.98ms +[2025-09-11 09:15:07] [Rank 0] PRINT: step:2400/10000 val_loss:4.7913 total_sharp:6.2265e-05 L1_sharp:8.7954e-03 L2_sharp:5.8372e-03 L3_sharp:8.6555e-03 L4_sharp:1.2480e-02 L5_sharp:1.6384e-02 L6_sharp:1.7985e-02 L7_sharp:2.4591e-02 L8_sharp:3.7834e-02 L9_sharp:3.7124e-02 L10_sharp:6.9885e-02 L11_sharp:1.3284e-01 L12_sharp:3.1350e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7888e+05 total_spectral:8.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4805e-01 L7_fnorm:2.4805e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4902e-01 L10_fnorm:2.5000e-01 L11_fnorm:2.5195e-01 L12_fnorm:2.5391e-01 L1_l1linf:7.2266e-02 L2_l1linf:7.0312e-02 L3_l1linf:7.0801e-02 L4_l1linf:6.9824e-02 L5_l1linf:6.9336e-02 L6_l1linf:6.9336e-02 L7_l1linf:6.8848e-02 L8_l1linf:6.8359e-02 L9_l1linf:6.9336e-02 L10_l1linf:7.1289e-02 L11_l1linf:7.3242e-02 L12_l1linf:7.6172e-02 L1_spectral:3.2098e-03 L2_spectral:3.1772e-03 L3_spectral:3.1602e-03 L4_spectral:3.1603e-03 L5_spectral:3.1945e-03 L6_spectral:3.1873e-03 L7_spectral:3.1814e-03 L8_spectral:3.2392e-03 L9_spectral:3.1800e-03 L10_spectral:3.1934e-03 L11_spectral:3.1796e-03 L12_spectral:3.2236e-03 train_time:122356ms step_avg:50.98ms +[2025-09-11 09:15:09] [Rank 0] step:2401/10000 train_time:124176ms step_avg:51.72ms +[2025-09-11 09:15:09] [Rank 0] step:2401/10000 train_time:124176ms step_avg:51.72ms +[2025-09-11 09:15:10] [Rank 0] step:2421/10000 train_time:124866ms step_avg:51.58ms +[2025-09-11 09:15:10] [Rank 0] step:2421/10000 train_time:124866ms step_avg:51.58ms +[2025-09-11 09:15:11] [Rank 0] step:2441/10000 train_time:125531ms step_avg:51.43ms +[2025-09-11 09:15:11] [Rank 0] step:2441/10000 train_time:125531ms step_avg:51.43ms +[2025-09-11 09:15:11] [Rank 0] step:2461/10000 train_time:126195ms step_avg:51.28ms +[2025-09-11 09:15:11] [Rank 0] step:2461/10000 train_time:126195ms step_avg:51.28ms +[2025-09-11 09:15:12] [Rank 0] step:2481/10000 train_time:126859ms step_avg:51.13ms +[2025-09-11 09:15:12] [Rank 0] step:2481/10000 train_time:126859ms step_avg:51.13ms +[2025-09-11 09:15:13] [Rank 0] step:2501/10000 train_time:127524ms step_avg:50.99ms +[2025-09-11 09:15:13] [Rank 0] step:2501/10000 train_time:127524ms step_avg:50.99ms +[2025-09-11 09:15:13] [Rank 0] step:2521/10000 train_time:128188ms step_avg:50.85ms +[2025-09-11 09:15:13] [Rank 0] step:2521/10000 train_time:128188ms step_avg:50.85ms +[2025-09-11 09:15:14] [Rank 0] step:2541/10000 train_time:128851ms step_avg:50.71ms +[2025-09-11 09:15:14] [Rank 0] step:2541/10000 train_time:128851ms step_avg:50.71ms +[2025-09-11 09:15:15] [Rank 0] step:2561/10000 train_time:129514ms step_avg:50.57ms +[2025-09-11 09:15:15] [Rank 0] step:2561/10000 train_time:129514ms step_avg:50.57ms +[2025-09-11 09:15:15] [Rank 0] step:2581/10000 train_time:130177ms step_avg:50.44ms +[2025-09-11 09:15:15] [Rank 0] step:2581/10000 train_time:130177ms step_avg:50.44ms +[2025-09-11 09:15:16] [Rank 0] step:2601/10000 train_time:130841ms step_avg:50.30ms +[2025-09-11 09:15:16] [Rank 0] step:2601/10000 train_time:130841ms step_avg:50.30ms +[2025-09-11 09:15:16] [Rank 0] step:2621/10000 train_time:131505ms step_avg:50.17ms +[2025-09-11 09:15:16] [Rank 0] step:2621/10000 train_time:131505ms step_avg:50.17ms +[2025-09-11 09:15:17] [Rank 0] step:2641/10000 train_time:132168ms step_avg:50.04ms +[2025-09-11 09:15:17] [Rank 0] step:2641/10000 train_time:132168ms step_avg:50.04ms +[2025-09-11 09:15:18] [Rank 0] step:2661/10000 train_time:132834ms step_avg:49.92ms +[2025-09-11 09:15:18] [Rank 0] step:2661/10000 train_time:132834ms step_avg:49.92ms +[2025-09-11 09:15:18] [Rank 0] step:2681/10000 train_time:133498ms step_avg:49.79ms +[2025-09-11 09:15:18] [Rank 0] step:2681/10000 train_time:133498ms step_avg:49.79ms +[2025-09-11 09:15:19] [Rank 0] step:2701/10000 train_time:134162ms step_avg:49.67ms +[2025-09-11 09:15:19] [Rank 0] step:2701/10000 train_time:134162ms step_avg:49.67ms +[2025-09-11 09:15:20] [Rank 0] step:2721/10000 train_time:134825ms step_avg:49.55ms +[2025-09-11 09:15:20] [Rank 0] step:2721/10000 train_time:134825ms step_avg:49.55ms +[2025-09-11 09:15:20] [Rank 0] step:2741/10000 train_time:135489ms step_avg:49.43ms +[2025-09-11 09:15:20] [Rank 0] step:2741/10000 train_time:135489ms step_avg:49.43ms +[2025-09-11 09:15:21] [Rank 0] step:2761/10000 train_time:136153ms step_avg:49.31ms +[2025-09-11 09:15:21] [Rank 0] step:2761/10000 train_time:136153ms step_avg:49.31ms +[2025-09-11 09:15:22] [Rank 0] step:2781/10000 train_time:136816ms step_avg:49.20ms +[2025-09-11 09:15:22] [Rank 0] step:2781/10000 train_time:136816ms step_avg:49.20ms +[2025-09-11 09:15:22] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:15:22] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:15:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:15:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:33] [Rank 0] PRINT: step:2800/10000 val_loss:4.7001 total_sharp:6.4232e-05 L1_sharp:8.2325e-03 L2_sharp:9.0117e-03 L3_sharp:1.0502e-02 L4_sharp:1.6207e-02 L5_sharp:1.7919e-02 L6_sharp:2.1447e-02 L7_sharp:2.7175e-02 L8_sharp:3.8618e-02 L9_sharp:4.3986e-02 L10_sharp:8.4096e-02 L11_sharp:1.4784e-01 L12_sharp:7.6267e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7888e+05 total_spectral:8.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.7383e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.8359e-02 L10_l1linf:6.8359e-02 L11_l1linf:7.0312e-02 L12_l1linf:7.4219e-02 L1_spectral:3.2049e-03 L2_spectral:3.1900e-03 L3_spectral:3.1979e-03 L4_spectral:3.1954e-03 L5_spectral:3.2034e-03 L6_spectral:3.1947e-03 L7_spectral:3.2007e-03 L8_spectral:3.2027e-03 L9_spectral:3.2072e-03 L10_spectral:3.2129e-03 L11_spectral:3.1725e-03 L12_spectral:3.2117e-03 train_time:137460ms step_avg:49.09ms +[2025-09-11 09:15:33] [Rank 0] PRINT: step:2800/10000 val_loss:4.7001 total_sharp:6.4232e-05 L1_sharp:8.2325e-03 L2_sharp:9.0117e-03 L3_sharp:1.0502e-02 L4_sharp:1.6207e-02 L5_sharp:1.7919e-02 L6_sharp:2.1447e-02 L7_sharp:2.7175e-02 L8_sharp:3.8618e-02 L9_sharp:4.3986e-02 L10_sharp:8.4096e-02 L11_sharp:1.4784e-01 L12_sharp:7.6267e-01 total_fnorm:1.7700e+02 total_l1_linf:3.7888e+05 total_spectral:8.8500e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4707e-01 L8_fnorm:2.4609e-01 L9_fnorm:2.4805e-01 L10_fnorm:2.4902e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9336e-02 L3_l1linf:6.8359e-02 L4_l1linf:6.7871e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.7871e-02 L7_l1linf:6.7383e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.8359e-02 L10_l1linf:6.8359e-02 L11_l1linf:7.0312e-02 L12_l1linf:7.4219e-02 L1_spectral:3.2049e-03 L2_spectral:3.1900e-03 L3_spectral:3.1979e-03 L4_spectral:3.1954e-03 L5_spectral:3.2034e-03 L6_spectral:3.1947e-03 L7_spectral:3.2007e-03 L8_spectral:3.2027e-03 L9_spectral:3.2072e-03 L10_spectral:3.2129e-03 L11_spectral:3.1725e-03 L12_spectral:3.2117e-03 train_time:137460ms step_avg:49.09ms +[2025-09-11 09:15:35] [Rank 0] step:2801/10000 train_time:139180ms step_avg:49.69ms +[2025-09-11 09:15:35] [Rank 0] step:2801/10000 train_time:139180ms step_avg:49.69ms +[2025-09-11 09:15:36] [Rank 0] step:2821/10000 train_time:139853ms step_avg:49.58ms +[2025-09-11 09:15:36] [Rank 0] step:2821/10000 train_time:139853ms step_avg:49.58ms +[2025-09-11 09:15:36] [Rank 0] step:2841/10000 train_time:140519ms step_avg:49.46ms +[2025-09-11 09:15:36] [Rank 0] step:2841/10000 train_time:140519ms step_avg:49.46ms +[2025-09-11 09:15:37] [Rank 0] step:2861/10000 train_time:141183ms step_avg:49.35ms +[2025-09-11 09:15:37] [Rank 0] step:2861/10000 train_time:141183ms step_avg:49.35ms +[2025-09-11 09:15:38] [Rank 0] step:2881/10000 train_time:141848ms step_avg:49.24ms +[2025-09-11 09:15:38] [Rank 0] step:2881/10000 train_time:141848ms step_avg:49.24ms +[2025-09-11 09:15:38] [Rank 0] step:2901/10000 train_time:142512ms step_avg:49.13ms +[2025-09-11 09:15:38] [Rank 0] step:2901/10000 train_time:142512ms step_avg:49.13ms +[2025-09-11 09:15:39] [Rank 0] step:2921/10000 train_time:143176ms step_avg:49.02ms +[2025-09-11 09:15:39] [Rank 0] step:2921/10000 train_time:143176ms step_avg:49.02ms +[2025-09-11 09:15:39] [Rank 0] step:2941/10000 train_time:143841ms step_avg:48.91ms +[2025-09-11 09:15:39] [Rank 0] step:2941/10000 train_time:143841ms step_avg:48.91ms +[2025-09-11 09:15:40] [Rank 0] step:2961/10000 train_time:144504ms step_avg:48.80ms +[2025-09-11 09:15:40] [Rank 0] step:2961/10000 train_time:144504ms step_avg:48.80ms +[2025-09-11 09:15:41] [Rank 0] step:2981/10000 train_time:145171ms step_avg:48.70ms +[2025-09-11 09:15:41] [Rank 0] step:2981/10000 train_time:145171ms step_avg:48.70ms +[2025-09-11 09:15:41] [Rank 0] step:3001/10000 train_time:145838ms step_avg:48.60ms +[2025-09-11 09:15:41] [Rank 0] step:3001/10000 train_time:145838ms step_avg:48.60ms +[2025-09-11 09:15:42] [Rank 0] step:3021/10000 train_time:146505ms step_avg:48.50ms +[2025-09-11 09:15:42] [Rank 0] step:3021/10000 train_time:146505ms step_avg:48.50ms +[2025-09-11 09:15:43] [Rank 0] step:3041/10000 train_time:147172ms step_avg:48.40ms +[2025-09-11 09:15:43] [Rank 0] step:3041/10000 train_time:147172ms step_avg:48.40ms +[2025-09-11 09:15:43] [Rank 0] step:3061/10000 train_time:147839ms step_avg:48.30ms +[2025-09-11 09:15:43] [Rank 0] step:3061/10000 train_time:147839ms step_avg:48.30ms +[2025-09-11 09:15:44] [Rank 0] step:3081/10000 train_time:148506ms step_avg:48.20ms +[2025-09-11 09:15:44] [Rank 0] step:3081/10000 train_time:148506ms step_avg:48.20ms +[2025-09-11 09:15:45] [Rank 0] step:3101/10000 train_time:149173ms step_avg:48.10ms +[2025-09-11 09:15:45] [Rank 0] step:3101/10000 train_time:149173ms step_avg:48.10ms +[2025-09-11 09:15:45] [Rank 0] step:3121/10000 train_time:149840ms step_avg:48.01ms +[2025-09-11 09:15:45] [Rank 0] step:3121/10000 train_time:149840ms step_avg:48.01ms +[2025-09-11 09:15:46] [Rank 0] step:3141/10000 train_time:150507ms step_avg:47.92ms +[2025-09-11 09:15:46] [Rank 0] step:3141/10000 train_time:150507ms step_avg:47.92ms +[2025-09-11 09:15:47] [Rank 0] step:3161/10000 train_time:151174ms step_avg:47.82ms +[2025-09-11 09:15:47] [Rank 0] step:3161/10000 train_time:151174ms step_avg:47.82ms +[2025-09-11 09:15:47] [Rank 0] step:3181/10000 train_time:151840ms step_avg:47.73ms +[2025-09-11 09:15:47] [Rank 0] step:3181/10000 train_time:151840ms step_avg:47.73ms +[2025-09-11 09:15:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:15:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:15:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:15:59] [Rank 0] PRINT: step:3200/10000 val_loss:4.6028 total_sharp:4.1260e-05 L1_sharp:7.4304e-03 L2_sharp:8.3687e-03 L3_sharp:9.7012e-03 L4_sharp:1.2119e-02 L5_sharp:1.5741e-02 L6_sharp:1.6892e-02 L7_sharp:2.2910e-02 L8_sharp:2.9378e-02 L9_sharp:3.3740e-02 L10_sharp:4.9186e-02 L11_sharp:1.0267e-01 L12_sharp:2.6467e-01 total_fnorm:1.9400e+02 total_l1_linf:4.3622e+05 total_spectral:9.7000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.8359e-02 L12_l1linf:7.0801e-02 L1_spectral:3.1896e-03 L2_spectral:3.1864e-03 L3_spectral:3.1892e-03 L4_spectral:3.1872e-03 L5_spectral:3.2602e-03 L6_spectral:3.1880e-03 L7_spectral:3.1851e-03 L8_spectral:3.2113e-03 L9_spectral:3.2161e-03 L10_spectral:3.2327e-03 L11_spectral:3.1949e-03 L12_spectral:3.2313e-03 train_time:152488ms step_avg:47.65ms +[2025-09-11 09:15:59] [Rank 0] PRINT: step:3200/10000 val_loss:4.6028 total_sharp:4.1260e-05 L1_sharp:7.4304e-03 L2_sharp:8.3687e-03 L3_sharp:9.7012e-03 L4_sharp:1.2119e-02 L5_sharp:1.5741e-02 L6_sharp:1.6892e-02 L7_sharp:2.2910e-02 L8_sharp:2.9378e-02 L9_sharp:3.3740e-02 L10_sharp:4.9186e-02 L11_sharp:1.0267e-01 L12_sharp:2.6467e-01 total_fnorm:1.9400e+02 total_l1_linf:4.3622e+05 total_spectral:9.7000e+01 L1_fnorm:2.4707e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4609e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4512e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.6895e-02 L3_l1linf:6.6895e-02 L4_l1linf:6.6406e-02 L5_l1linf:6.6895e-02 L6_l1linf:6.6895e-02 L7_l1linf:6.6406e-02 L8_l1linf:6.6406e-02 L9_l1linf:6.4941e-02 L10_l1linf:6.4453e-02 L11_l1linf:6.8359e-02 L12_l1linf:7.0801e-02 L1_spectral:3.1896e-03 L2_spectral:3.1864e-03 L3_spectral:3.1892e-03 L4_spectral:3.1872e-03 L5_spectral:3.2602e-03 L6_spectral:3.1880e-03 L7_spectral:3.1851e-03 L8_spectral:3.2113e-03 L9_spectral:3.2161e-03 L10_spectral:3.2327e-03 L11_spectral:3.1949e-03 L12_spectral:3.2313e-03 train_time:152488ms step_avg:47.65ms +[2025-09-11 09:16:01] [Rank 0] step:3201/10000 train_time:154220ms step_avg:48.18ms +[2025-09-11 09:16:01] [Rank 0] step:3201/10000 train_time:154220ms step_avg:48.18ms +[2025-09-11 09:16:01] [Rank 0] step:3221/10000 train_time:154923ms step_avg:48.10ms +[2025-09-11 09:16:01] [Rank 0] step:3221/10000 train_time:154923ms step_avg:48.10ms +[2025-09-11 09:16:02] [Rank 0] step:3241/10000 train_time:155592ms step_avg:48.01ms +[2025-09-11 09:16:02] [Rank 0] step:3241/10000 train_time:155592ms step_avg:48.01ms +[2025-09-11 09:16:03] [Rank 0] step:3261/10000 train_time:156260ms step_avg:47.92ms +[2025-09-11 09:16:03] [Rank 0] step:3261/10000 train_time:156260ms step_avg:47.92ms +[2025-09-11 09:16:03] [Rank 0] step:3281/10000 train_time:156927ms step_avg:47.83ms +[2025-09-11 09:16:03] [Rank 0] step:3281/10000 train_time:156927ms step_avg:47.83ms +[2025-09-11 09:16:04] [Rank 0] step:3301/10000 train_time:157594ms step_avg:47.74ms +[2025-09-11 09:16:04] [Rank 0] step:3301/10000 train_time:157594ms step_avg:47.74ms +[2025-09-11 09:16:05] [Rank 0] step:3321/10000 train_time:158261ms step_avg:47.65ms +[2025-09-11 09:16:05] [Rank 0] step:3321/10000 train_time:158261ms step_avg:47.65ms +[2025-09-11 09:16:06] [Rank 0] step:3341/10000 train_time:159188ms step_avg:47.65ms +[2025-09-11 09:16:06] [Rank 0] step:3341/10000 train_time:159188ms step_avg:47.65ms +[2025-09-11 09:16:06] [Rank 0] step:3361/10000 train_time:159856ms step_avg:47.56ms +[2025-09-11 09:16:06] [Rank 0] step:3361/10000 train_time:159856ms step_avg:47.56ms +[2025-09-11 09:16:07] [Rank 0] step:3381/10000 train_time:160522ms step_avg:47.48ms +[2025-09-11 09:16:07] [Rank 0] step:3381/10000 train_time:160522ms step_avg:47.48ms +[2025-09-11 09:16:08] [Rank 0] step:3401/10000 train_time:161500ms step_avg:47.49ms +[2025-09-11 09:16:08] [Rank 0] step:3401/10000 train_time:161500ms step_avg:47.49ms +[2025-09-11 09:16:09] [Rank 0] step:3421/10000 train_time:162166ms step_avg:47.40ms +[2025-09-11 09:16:09] [Rank 0] step:3421/10000 train_time:162166ms step_avg:47.40ms +[2025-09-11 09:16:09] [Rank 0] step:3441/10000 train_time:162832ms step_avg:47.32ms +[2025-09-11 09:16:09] [Rank 0] step:3441/10000 train_time:162832ms step_avg:47.32ms +[2025-09-11 09:16:10] [Rank 0] step:3461/10000 train_time:163498ms step_avg:47.24ms +[2025-09-11 09:16:10] [Rank 0] step:3461/10000 train_time:163498ms step_avg:47.24ms +[2025-09-11 09:16:11] [Rank 0] step:3481/10000 train_time:164165ms step_avg:47.16ms +[2025-09-11 09:16:11] [Rank 0] step:3481/10000 train_time:164165ms step_avg:47.16ms +[2025-09-11 09:16:11] [Rank 0] step:3501/10000 train_time:164832ms step_avg:47.08ms +[2025-09-11 09:16:11] [Rank 0] step:3501/10000 train_time:164832ms step_avg:47.08ms +[2025-09-11 09:16:12] [Rank 0] step:3521/10000 train_time:165498ms step_avg:47.00ms +[2025-09-11 09:16:12] [Rank 0] step:3521/10000 train_time:165498ms step_avg:47.00ms +[2025-09-11 09:16:13] [Rank 0] step:3541/10000 train_time:166164ms step_avg:46.93ms +[2025-09-11 09:16:13] [Rank 0] step:3541/10000 train_time:166164ms step_avg:46.93ms +[2025-09-11 09:16:13] [Rank 0] step:3561/10000 train_time:166830ms step_avg:46.85ms +[2025-09-11 09:16:13] [Rank 0] step:3561/10000 train_time:166830ms step_avg:46.85ms +[2025-09-11 09:16:14] [Rank 0] step:3581/10000 train_time:167497ms step_avg:46.77ms +[2025-09-11 09:16:14] [Rank 0] step:3581/10000 train_time:167497ms step_avg:46.77ms +[2025-09-11 09:16:15] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:16:15] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:16:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:25] [Rank 0] PRINT: step:3600/10000 val_loss:4.5499 total_sharp:4.9584e-05 L1_sharp:4.3860e-03 L2_sharp:6.5432e-03 L3_sharp:7.4242e-03 L4_sharp:1.0018e-02 L5_sharp:1.4285e-02 L6_sharp:1.6829e-02 L7_sharp:2.1211e-02 L8_sharp:3.0578e-02 L9_sharp:4.1694e-02 L10_sharp:7.1332e-02 L11_sharp:1.2795e-01 L12_sharp:9.7422e-01 total_fnorm:1.8000e+02 total_l1_linf:3.8912e+05 total_spectral:9.0000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5918e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.5918e-02 L6_l1linf:6.4941e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.2988e-02 L11_l1linf:6.4941e-02 L12_l1linf:7.0312e-02 L1_spectral:3.2074e-03 L2_spectral:3.1888e-03 L3_spectral:3.1768e-03 L4_spectral:3.1835e-03 L5_spectral:3.2102e-03 L6_spectral:3.2124e-03 L7_spectral:3.1977e-03 L8_spectral:3.2093e-03 L9_spectral:3.2267e-03 L10_spectral:3.2174e-03 L11_spectral:3.1825e-03 L12_spectral:3.2142e-03 train_time:168144ms step_avg:46.71ms +[2025-09-11 09:16:25] [Rank 0] PRINT: step:3600/10000 val_loss:4.5499 total_sharp:4.9584e-05 L1_sharp:4.3860e-03 L2_sharp:6.5432e-03 L3_sharp:7.4242e-03 L4_sharp:1.0018e-02 L5_sharp:1.4285e-02 L6_sharp:1.6829e-02 L7_sharp:2.1211e-02 L8_sharp:3.0578e-02 L9_sharp:4.1694e-02 L10_sharp:7.1332e-02 L11_sharp:1.2795e-01 L12_sharp:9.7422e-01 total_fnorm:1.8000e+02 total_l1_linf:3.8912e+05 total_spectral:9.0000e+01 L1_fnorm:2.4609e-01 L2_fnorm:2.4707e-01 L3_fnorm:2.4707e-01 L4_fnorm:2.4707e-01 L5_fnorm:2.4609e-01 L6_fnorm:2.4707e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4316e-01 L9_fnorm:2.4707e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.5918e-02 L3_l1linf:6.4941e-02 L4_l1linf:6.4453e-02 L5_l1linf:6.5918e-02 L6_l1linf:6.4941e-02 L7_l1linf:6.3965e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.3965e-02 L10_l1linf:6.2988e-02 L11_l1linf:6.4941e-02 L12_l1linf:7.0312e-02 L1_spectral:3.2074e-03 L2_spectral:3.1888e-03 L3_spectral:3.1768e-03 L4_spectral:3.1835e-03 L5_spectral:3.2102e-03 L6_spectral:3.2124e-03 L7_spectral:3.1977e-03 L8_spectral:3.2093e-03 L9_spectral:3.2267e-03 L10_spectral:3.2174e-03 L11_spectral:3.1825e-03 L12_spectral:3.2142e-03 train_time:168144ms step_avg:46.71ms +[2025-09-11 09:16:27] [Rank 0] step:3601/10000 train_time:169879ms step_avg:47.18ms +[2025-09-11 09:16:27] [Rank 0] step:3601/10000 train_time:169879ms step_avg:47.18ms +[2025-09-11 09:16:27] [Rank 0] step:3621/10000 train_time:170535ms step_avg:47.10ms +[2025-09-11 09:16:27] [Rank 0] step:3621/10000 train_time:170535ms step_avg:47.10ms +[2025-09-11 09:16:28] [Rank 0] step:3641/10000 train_time:171202ms step_avg:47.02ms +[2025-09-11 09:16:28] [Rank 0] step:3641/10000 train_time:171202ms step_avg:47.02ms +[2025-09-11 09:16:29] [Rank 0] step:3661/10000 train_time:171870ms step_avg:46.95ms +[2025-09-11 09:16:29] [Rank 0] step:3661/10000 train_time:171870ms step_avg:46.95ms +[2025-09-11 09:16:29] [Rank 0] step:3681/10000 train_time:172537ms step_avg:46.87ms +[2025-09-11 09:16:29] [Rank 0] step:3681/10000 train_time:172537ms step_avg:46.87ms +[2025-09-11 09:16:30] [Rank 0] step:3701/10000 train_time:173204ms step_avg:46.80ms +[2025-09-11 09:16:30] [Rank 0] step:3701/10000 train_time:173204ms step_avg:46.80ms +[2025-09-11 09:16:31] [Rank 0] step:3721/10000 train_time:173881ms step_avg:46.73ms +[2025-09-11 09:16:31] [Rank 0] step:3721/10000 train_time:173881ms step_avg:46.73ms +[2025-09-11 09:16:31] [Rank 0] step:3741/10000 train_time:174558ms step_avg:46.66ms +[2025-09-11 09:16:31] [Rank 0] step:3741/10000 train_time:174558ms step_avg:46.66ms +[2025-09-11 09:16:32] [Rank 0] step:3761/10000 train_time:175236ms step_avg:46.59ms +[2025-09-11 09:16:32] [Rank 0] step:3761/10000 train_time:175236ms step_avg:46.59ms +[2025-09-11 09:16:33] [Rank 0] step:3781/10000 train_time:175914ms step_avg:46.53ms +[2025-09-11 09:16:33] [Rank 0] step:3781/10000 train_time:175914ms step_avg:46.53ms +[2025-09-11 09:16:34] [Rank 0] step:3801/10000 train_time:176592ms step_avg:46.46ms +[2025-09-11 09:16:34] [Rank 0] step:3801/10000 train_time:176592ms step_avg:46.46ms +[2025-09-11 09:16:34] [Rank 0] step:3821/10000 train_time:177269ms step_avg:46.39ms +[2025-09-11 09:16:34] [Rank 0] step:3821/10000 train_time:177269ms step_avg:46.39ms +[2025-09-11 09:16:35] [Rank 0] step:3841/10000 train_time:177947ms step_avg:46.33ms +[2025-09-11 09:16:35] [Rank 0] step:3841/10000 train_time:177947ms step_avg:46.33ms +[2025-09-11 09:16:36] [Rank 0] step:3861/10000 train_time:178624ms step_avg:46.26ms +[2025-09-11 09:16:36] [Rank 0] step:3861/10000 train_time:178624ms step_avg:46.26ms +[2025-09-11 09:16:36] [Rank 0] step:3881/10000 train_time:179301ms step_avg:46.20ms +[2025-09-11 09:16:36] [Rank 0] step:3881/10000 train_time:179301ms step_avg:46.20ms +[2025-09-11 09:16:37] [Rank 0] step:3901/10000 train_time:179979ms step_avg:46.14ms +[2025-09-11 09:16:37] [Rank 0] step:3901/10000 train_time:179979ms step_avg:46.14ms +[2025-09-11 09:16:38] [Rank 0] step:3921/10000 train_time:180656ms step_avg:46.07ms +[2025-09-11 09:16:38] [Rank 0] step:3921/10000 train_time:180656ms step_avg:46.07ms +[2025-09-11 09:16:38] [Rank 0] step:3941/10000 train_time:181334ms step_avg:46.01ms +[2025-09-11 09:16:38] [Rank 0] step:3941/10000 train_time:181334ms step_avg:46.01ms +[2025-09-11 09:16:39] [Rank 0] step:3961/10000 train_time:182012ms step_avg:45.95ms +[2025-09-11 09:16:39] [Rank 0] step:3961/10000 train_time:182012ms step_avg:45.95ms +[2025-09-11 09:16:40] [Rank 0] step:3981/10000 train_time:182690ms step_avg:45.89ms +[2025-09-11 09:16:40] [Rank 0] step:3981/10000 train_time:182690ms step_avg:45.89ms +[2025-09-11 09:16:40] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:16:40] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:16:51] [Rank 0] PRINT: step:4000/10000 val_loss:4.5032 total_sharp:4.9568e-05 L1_sharp:5.0502e-03 L2_sharp:6.4591e-03 L3_sharp:5.4896e-03 L4_sharp:1.0001e-02 L5_sharp:1.8655e-02 L6_sharp:1.9615e-02 L7_sharp:2.5915e-02 L8_sharp:3.6800e-02 L9_sharp:4.2533e-02 L10_sharp:6.6257e-02 L11_sharp:1.7666e-01 L12_sharp:1.2836e+00 total_fnorm:2.0200e+02 total_l1_linf:4.4237e+05 total_spectral:1.0100e+02 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1971e-03 L2_spectral:3.1860e-03 L3_spectral:3.1935e-03 L4_spectral:3.2051e-03 L5_spectral:3.2201e-03 L6_spectral:3.2173e-03 L7_spectral:3.2228e-03 L8_spectral:3.2159e-03 L9_spectral:3.2440e-03 L10_spectral:3.2262e-03 L11_spectral:3.2346e-03 L12_spectral:3.2000e-03 train_time:183349ms step_avg:45.84ms +[2025-09-11 09:16:51] [Rank 0] PRINT: step:4000/10000 val_loss:4.5032 total_sharp:4.9568e-05 L1_sharp:5.0502e-03 L2_sharp:6.4591e-03 L3_sharp:5.4896e-03 L4_sharp:1.0001e-02 L5_sharp:1.8655e-02 L6_sharp:1.9615e-02 L7_sharp:2.5915e-02 L8_sharp:3.6800e-02 L9_sharp:4.2533e-02 L10_sharp:6.6257e-02 L11_sharp:1.7666e-01 L12_sharp:1.2836e+00 total_fnorm:2.0200e+02 total_l1_linf:4.4237e+05 total_spectral:1.0100e+02 L1_fnorm:2.4609e-01 L2_fnorm:2.4609e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4609e-01 L5_fnorm:2.4512e-01 L6_fnorm:2.4609e-01 L7_fnorm:2.4609e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4902e-01 L12_fnorm:2.5195e-01 L1_l1linf:6.4941e-02 L2_l1linf:6.4453e-02 L3_l1linf:6.3965e-02 L4_l1linf:6.3477e-02 L5_l1linf:6.3477e-02 L6_l1linf:6.3477e-02 L7_l1linf:6.2256e-02 L8_l1linf:6.3477e-02 L9_l1linf:6.2500e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.5918e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1971e-03 L2_spectral:3.1860e-03 L3_spectral:3.1935e-03 L4_spectral:3.2051e-03 L5_spectral:3.2201e-03 L6_spectral:3.2173e-03 L7_spectral:3.2228e-03 L8_spectral:3.2159e-03 L9_spectral:3.2440e-03 L10_spectral:3.2262e-03 L11_spectral:3.2346e-03 L12_spectral:3.2000e-03 train_time:183349ms step_avg:45.84ms +[2025-09-11 09:16:53] [Rank 0] step:4001/10000 train_time:185114ms step_avg:46.27ms +[2025-09-11 09:16:53] [Rank 0] step:4001/10000 train_time:185114ms step_avg:46.27ms +[2025-09-11 09:16:54] [Rank 0] step:4021/10000 train_time:185799ms step_avg:46.21ms +[2025-09-11 09:16:54] [Rank 0] step:4021/10000 train_time:185799ms step_avg:46.21ms +[2025-09-11 09:16:54] [Rank 0] step:4041/10000 train_time:186478ms step_avg:46.15ms +[2025-09-11 09:16:54] [Rank 0] step:4041/10000 train_time:186478ms step_avg:46.15ms +[2025-09-11 09:16:55] [Rank 0] step:4061/10000 train_time:187155ms step_avg:46.09ms +[2025-09-11 09:16:55] [Rank 0] step:4061/10000 train_time:187155ms step_avg:46.09ms +[2025-09-11 09:16:56] [Rank 0] step:4081/10000 train_time:187833ms step_avg:46.03ms +[2025-09-11 09:16:56] [Rank 0] step:4081/10000 train_time:187833ms step_avg:46.03ms +[2025-09-11 09:16:56] [Rank 0] step:4101/10000 train_time:188511ms step_avg:45.97ms +[2025-09-11 09:16:56] [Rank 0] step:4101/10000 train_time:188511ms step_avg:45.97ms +[2025-09-11 09:16:57] [Rank 0] step:4121/10000 train_time:189190ms step_avg:45.91ms +[2025-09-11 09:16:57] [Rank 0] step:4121/10000 train_time:189190ms step_avg:45.91ms +[2025-09-11 09:16:58] [Rank 0] step:4141/10000 train_time:189867ms step_avg:45.85ms +[2025-09-11 09:16:58] [Rank 0] step:4141/10000 train_time:189867ms step_avg:45.85ms +[2025-09-11 09:16:58] [Rank 0] step:4161/10000 train_time:190544ms step_avg:45.79ms +[2025-09-11 09:16:58] [Rank 0] step:4161/10000 train_time:190544ms step_avg:45.79ms +[2025-09-11 09:16:59] [Rank 0] step:4181/10000 train_time:191223ms step_avg:45.74ms +[2025-09-11 09:16:59] [Rank 0] step:4181/10000 train_time:191223ms step_avg:45.74ms +[2025-09-11 09:17:00] [Rank 0] step:4201/10000 train_time:191900ms step_avg:45.68ms +[2025-09-11 09:17:00] [Rank 0] step:4201/10000 train_time:191900ms step_avg:45.68ms +[2025-09-11 09:17:00] [Rank 0] step:4221/10000 train_time:192578ms step_avg:45.62ms +[2025-09-11 09:17:00] [Rank 0] step:4221/10000 train_time:192578ms step_avg:45.62ms +[2025-09-11 09:17:01] [Rank 0] step:4241/10000 train_time:193255ms step_avg:45.57ms +[2025-09-11 09:17:01] [Rank 0] step:4241/10000 train_time:193255ms step_avg:45.57ms +[2025-09-11 09:17:02] [Rank 0] step:4261/10000 train_time:193933ms step_avg:45.51ms +[2025-09-11 09:17:02] [Rank 0] step:4261/10000 train_time:193933ms step_avg:45.51ms +[2025-09-11 09:17:02] [Rank 0] step:4281/10000 train_time:194623ms step_avg:45.46ms +[2025-09-11 09:17:02] [Rank 0] step:4281/10000 train_time:194623ms step_avg:45.46ms +[2025-09-11 09:17:03] [Rank 0] step:4301/10000 train_time:195301ms step_avg:45.41ms +[2025-09-11 09:17:03] [Rank 0] step:4301/10000 train_time:195301ms step_avg:45.41ms +[2025-09-11 09:17:04] [Rank 0] step:4321/10000 train_time:195979ms step_avg:45.35ms +[2025-09-11 09:17:04] [Rank 0] step:4321/10000 train_time:195979ms step_avg:45.35ms +[2025-09-11 09:17:04] [Rank 0] step:4341/10000 train_time:196657ms step_avg:45.30ms +[2025-09-11 09:17:04] [Rank 0] step:4341/10000 train_time:196657ms step_avg:45.30ms +[2025-09-11 09:17:05] [Rank 0] step:4361/10000 train_time:197334ms step_avg:45.25ms +[2025-09-11 09:17:05] [Rank 0] step:4361/10000 train_time:197334ms step_avg:45.25ms +[2025-09-11 09:17:06] [Rank 0] step:4381/10000 train_time:198013ms step_avg:45.20ms +[2025-09-11 09:17:06] [Rank 0] step:4381/10000 train_time:198013ms step_avg:45.20ms +[2025-09-11 09:17:06] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:17:06] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:17:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.4850 total_sharp:3.8567e-05 L1_sharp:4.9597e-03 L2_sharp:4.9033e-03 L3_sharp:6.7454e-03 L4_sharp:9.5758e-03 L5_sharp:1.0935e-02 L6_sharp:1.5125e-02 L7_sharp:1.9112e-02 L8_sharp:2.8735e-02 L9_sharp:3.0309e-02 L10_sharp:4.6639e-02 L11_sharp:1.0478e-01 L12_sharp:3.0715e-01 total_fnorm:1.7900e+02 total_l1_linf:3.8093e+05 total_spectral:8.9500e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2500e-02 L4_l1linf:6.1768e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.1768e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1887e-03 L2_spectral:3.1902e-03 L3_spectral:3.1944e-03 L4_spectral:3.1967e-03 L5_spectral:3.2061e-03 L6_spectral:3.2288e-03 L7_spectral:3.2396e-03 L8_spectral:3.2168e-03 L9_spectral:3.2229e-03 L10_spectral:3.2403e-03 L11_spectral:3.1988e-03 L12_spectral:3.2117e-03 train_time:198671ms step_avg:45.15ms +[2025-09-11 09:17:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.4850 total_sharp:3.8567e-05 L1_sharp:4.9597e-03 L2_sharp:4.9033e-03 L3_sharp:6.7454e-03 L4_sharp:9.5758e-03 L5_sharp:1.0935e-02 L6_sharp:1.5125e-02 L7_sharp:1.9112e-02 L8_sharp:2.8735e-02 L9_sharp:3.0309e-02 L10_sharp:4.6639e-02 L11_sharp:1.0478e-01 L12_sharp:3.0715e-01 total_fnorm:1.7900e+02 total_l1_linf:3.8093e+05 total_spectral:8.9500e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4707e-01 L11_fnorm:2.4805e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.3965e-02 L2_l1linf:6.3477e-02 L3_l1linf:6.2500e-02 L4_l1linf:6.1768e-02 L5_l1linf:6.2012e-02 L6_l1linf:6.1279e-02 L7_l1linf:6.1279e-02 L8_l1linf:6.1279e-02 L9_l1linf:6.1768e-02 L10_l1linf:6.3477e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.9336e-02 L1_spectral:3.1887e-03 L2_spectral:3.1902e-03 L3_spectral:3.1944e-03 L4_spectral:3.1967e-03 L5_spectral:3.2061e-03 L6_spectral:3.2288e-03 L7_spectral:3.2396e-03 L8_spectral:3.2168e-03 L9_spectral:3.2229e-03 L10_spectral:3.2403e-03 L11_spectral:3.1988e-03 L12_spectral:3.2117e-03 train_time:198671ms step_avg:45.15ms +[2025-09-11 09:17:19] [Rank 0] step:4401/10000 train_time:200504ms step_avg:45.56ms +[2025-09-11 09:17:19] [Rank 0] step:4401/10000 train_time:200504ms step_avg:45.56ms +[2025-09-11 09:17:20] [Rank 0] step:4421/10000 train_time:201203ms step_avg:45.51ms +[2025-09-11 09:17:20] [Rank 0] step:4421/10000 train_time:201203ms step_avg:45.51ms +[2025-09-11 09:17:21] [Rank 0] step:4441/10000 train_time:201882ms step_avg:45.46ms +[2025-09-11 09:17:21] [Rank 0] step:4441/10000 train_time:201882ms step_avg:45.46ms +[2025-09-11 09:17:21] [Rank 0] step:4461/10000 train_time:202562ms step_avg:45.41ms +[2025-09-11 09:17:21] [Rank 0] step:4461/10000 train_time:202562ms step_avg:45.41ms +[2025-09-11 09:17:22] [Rank 0] step:4481/10000 train_time:203242ms step_avg:45.36ms +[2025-09-11 09:17:22] [Rank 0] step:4481/10000 train_time:203242ms step_avg:45.36ms +[2025-09-11 09:17:23] [Rank 0] step:4501/10000 train_time:203923ms step_avg:45.31ms +[2025-09-11 09:17:23] [Rank 0] step:4501/10000 train_time:203923ms step_avg:45.31ms +[2025-09-11 09:17:23] [Rank 0] step:4521/10000 train_time:204603ms step_avg:45.26ms +[2025-09-11 09:17:23] [Rank 0] step:4521/10000 train_time:204603ms step_avg:45.26ms +[2025-09-11 09:17:24] [Rank 0] step:4541/10000 train_time:205283ms step_avg:45.21ms +[2025-09-11 09:17:24] [Rank 0] step:4541/10000 train_time:205283ms step_avg:45.21ms +[2025-09-11 09:17:25] [Rank 0] step:4561/10000 train_time:205963ms step_avg:45.16ms +[2025-09-11 09:17:25] [Rank 0] step:4561/10000 train_time:205963ms step_avg:45.16ms +[2025-09-11 09:17:25] [Rank 0] step:4581/10000 train_time:206642ms step_avg:45.11ms +[2025-09-11 09:17:25] [Rank 0] step:4581/10000 train_time:206642ms step_avg:45.11ms +[2025-09-11 09:17:26] [Rank 0] step:4601/10000 train_time:207322ms step_avg:45.06ms +[2025-09-11 09:17:26] [Rank 0] step:4601/10000 train_time:207322ms step_avg:45.06ms +[2025-09-11 09:17:27] [Rank 0] step:4621/10000 train_time:208002ms step_avg:45.01ms +[2025-09-11 09:17:27] [Rank 0] step:4621/10000 train_time:208002ms step_avg:45.01ms +[2025-09-11 09:17:27] [Rank 0] step:4641/10000 train_time:208683ms step_avg:44.96ms +[2025-09-11 09:17:27] [Rank 0] step:4641/10000 train_time:208683ms step_avg:44.96ms +[2025-09-11 09:17:28] [Rank 0] step:4661/10000 train_time:209362ms step_avg:44.92ms +[2025-09-11 09:17:28] [Rank 0] step:4661/10000 train_time:209362ms step_avg:44.92ms +[2025-09-11 09:17:29] [Rank 0] step:4681/10000 train_time:210042ms step_avg:44.87ms +[2025-09-11 09:17:29] [Rank 0] step:4681/10000 train_time:210042ms step_avg:44.87ms +[2025-09-11 09:17:29] [Rank 0] step:4701/10000 train_time:210722ms step_avg:44.82ms +[2025-09-11 09:17:29] [Rank 0] step:4701/10000 train_time:210722ms step_avg:44.82ms +[2025-09-11 09:17:30] [Rank 0] step:4721/10000 train_time:211402ms step_avg:44.78ms +[2025-09-11 09:17:30] [Rank 0] step:4721/10000 train_time:211402ms step_avg:44.78ms +[2025-09-11 09:17:31] [Rank 0] step:4741/10000 train_time:212081ms step_avg:44.73ms +[2025-09-11 09:17:31] [Rank 0] step:4741/10000 train_time:212081ms step_avg:44.73ms +[2025-09-11 09:17:31] [Rank 0] step:4761/10000 train_time:212761ms step_avg:44.69ms +[2025-09-11 09:17:31] [Rank 0] step:4761/10000 train_time:212761ms step_avg:44.69ms +[2025-09-11 09:17:32] [Rank 0] step:4781/10000 train_time:213441ms step_avg:44.64ms +[2025-09-11 09:17:32] [Rank 0] step:4781/10000 train_time:213441ms step_avg:44.64ms +[2025-09-11 09:17:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:17:33] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:17:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:17:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:17:43] [Rank 0] PRINT: step:4800/10000 val_loss:4.4386 total_sharp:3.7373e-05 L1_sharp:7.2339e-03 L2_sharp:7.0588e-03 L3_sharp:7.6667e-03 L4_sharp:1.4061e-02 L5_sharp:1.5814e-02 L6_sharp:1.5051e-02 L7_sharp:1.9154e-02 L8_sharp:2.8896e-02 L9_sharp:3.3242e-02 L10_sharp:5.4645e-02 L11_sharp:1.0552e-01 L12_sharp:8.0690e-01 total_fnorm:1.8900e+02 total_l1_linf:4.2189e+05 total_spectral:9.5000e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.2500e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.1035e-02 L4_l1linf:6.0791e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0059e-02 L7_l1linf:6.0547e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9326e-02 L10_l1linf:5.9570e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1982e-03 L2_spectral:3.2093e-03 L3_spectral:3.1982e-03 L4_spectral:3.2016e-03 L5_spectral:3.2050e-03 L6_spectral:3.2153e-03 L7_spectral:3.2307e-03 L8_spectral:3.2288e-03 L9_spectral:3.2300e-03 L10_spectral:3.2028e-03 L11_spectral:3.2102e-03 L12_spectral:3.2104e-03 train_time:214101ms step_avg:44.60ms +[2025-09-11 09:17:43] [Rank 0] PRINT: step:4800/10000 val_loss:4.4386 total_sharp:3.7373e-05 L1_sharp:7.2339e-03 L2_sharp:7.0588e-03 L3_sharp:7.6667e-03 L4_sharp:1.4061e-02 L5_sharp:1.5814e-02 L6_sharp:1.5051e-02 L7_sharp:1.9154e-02 L8_sharp:2.8896e-02 L9_sharp:3.3242e-02 L10_sharp:5.4645e-02 L11_sharp:1.0552e-01 L12_sharp:8.0690e-01 total_fnorm:1.8900e+02 total_l1_linf:4.2189e+05 total_spectral:9.5000e+01 L1_fnorm:2.4512e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4512e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4512e-01 L11_fnorm:2.4707e-01 L12_fnorm:2.5000e-01 L1_l1linf:6.2500e-02 L2_l1linf:6.2012e-02 L3_l1linf:6.1035e-02 L4_l1linf:6.0791e-02 L5_l1linf:6.0547e-02 L6_l1linf:6.0059e-02 L7_l1linf:6.0547e-02 L8_l1linf:5.9082e-02 L9_l1linf:5.9326e-02 L10_l1linf:5.9570e-02 L11_l1linf:6.3477e-02 L12_l1linf:6.7383e-02 L1_spectral:3.1982e-03 L2_spectral:3.2093e-03 L3_spectral:3.1982e-03 L4_spectral:3.2016e-03 L5_spectral:3.2050e-03 L6_spectral:3.2153e-03 L7_spectral:3.2307e-03 L8_spectral:3.2288e-03 L9_spectral:3.2300e-03 L10_spectral:3.2028e-03 L11_spectral:3.2102e-03 L12_spectral:3.2104e-03 train_time:214101ms step_avg:44.60ms +[2025-09-11 09:17:45] [Rank 0] step:4801/10000 train_time:215829ms step_avg:44.95ms +[2025-09-11 09:17:45] [Rank 0] step:4801/10000 train_time:215829ms step_avg:44.95ms +[2025-09-11 09:17:46] [Rank 0] step:4821/10000 train_time:216513ms step_avg:44.91ms +[2025-09-11 09:17:46] [Rank 0] step:4821/10000 train_time:216513ms step_avg:44.91ms +[2025-09-11 09:17:47] [Rank 0] step:4841/10000 train_time:217194ms step_avg:44.87ms +[2025-09-11 09:17:47] [Rank 0] step:4841/10000 train_time:217194ms step_avg:44.87ms +[2025-09-11 09:17:47] [Rank 0] step:4861/10000 train_time:217875ms step_avg:44.82ms +[2025-09-11 09:17:47] [Rank 0] step:4861/10000 train_time:217875ms step_avg:44.82ms +[2025-09-11 09:17:48] [Rank 0] step:4881/10000 train_time:218554ms step_avg:44.78ms +[2025-09-11 09:17:48] [Rank 0] step:4881/10000 train_time:218554ms step_avg:44.78ms +[2025-09-11 09:17:49] [Rank 0] step:4901/10000 train_time:219236ms step_avg:44.73ms +[2025-09-11 09:17:49] [Rank 0] step:4901/10000 train_time:219236ms step_avg:44.73ms +[2025-09-11 09:17:49] [Rank 0] step:4921/10000 train_time:219917ms step_avg:44.69ms +[2025-09-11 09:17:49] [Rank 0] step:4921/10000 train_time:219917ms step_avg:44.69ms +[2025-09-11 09:17:50] [Rank 0] step:4941/10000 train_time:220596ms step_avg:44.65ms +[2025-09-11 09:17:50] [Rank 0] step:4941/10000 train_time:220596ms step_avg:44.65ms +[2025-09-11 09:17:51] [Rank 0] step:4961/10000 train_time:221277ms step_avg:44.60ms +[2025-09-11 09:17:51] [Rank 0] step:4961/10000 train_time:221277ms step_avg:44.60ms +[2025-09-11 09:17:51] [Rank 0] step:4981/10000 train_time:221957ms step_avg:44.56ms +[2025-09-11 09:17:51] [Rank 0] step:4981/10000 train_time:221957ms step_avg:44.56ms +[2025-09-11 09:17:52] [Rank 0] step:5001/10000 train_time:222639ms step_avg:44.52ms +[2025-09-11 09:17:52] [Rank 0] step:5001/10000 train_time:222639ms step_avg:44.52ms +[2025-09-11 09:17:53] [Rank 0] step:5021/10000 train_time:223319ms step_avg:44.48ms +[2025-09-11 09:17:53] [Rank 0] step:5021/10000 train_time:223319ms step_avg:44.48ms +[2025-09-11 09:17:53] [Rank 0] step:5041/10000 train_time:223997ms step_avg:44.44ms +[2025-09-11 09:17:53] [Rank 0] step:5041/10000 train_time:223997ms step_avg:44.44ms +[2025-09-11 09:17:54] [Rank 0] step:5061/10000 train_time:224677ms step_avg:44.39ms +[2025-09-11 09:17:54] [Rank 0] step:5061/10000 train_time:224677ms step_avg:44.39ms +[2025-09-11 09:17:55] [Rank 0] step:5081/10000 train_time:225357ms step_avg:44.35ms +[2025-09-11 09:17:55] [Rank 0] step:5081/10000 train_time:225357ms step_avg:44.35ms +[2025-09-11 09:17:55] [Rank 0] step:5101/10000 train_time:226037ms step_avg:44.31ms +[2025-09-11 09:17:55] [Rank 0] step:5101/10000 train_time:226037ms step_avg:44.31ms +[2025-09-11 09:17:56] [Rank 0] step:5121/10000 train_time:226717ms step_avg:44.27ms +[2025-09-11 09:17:56] [Rank 0] step:5121/10000 train_time:226717ms step_avg:44.27ms +[2025-09-11 09:17:57] [Rank 0] step:5141/10000 train_time:227397ms step_avg:44.23ms +[2025-09-11 09:17:57] [Rank 0] step:5141/10000 train_time:227397ms step_avg:44.23ms +[2025-09-11 09:17:57] [Rank 0] step:5161/10000 train_time:228077ms step_avg:44.19ms +[2025-09-11 09:17:57] [Rank 0] step:5161/10000 train_time:228077ms step_avg:44.19ms +[2025-09-11 09:17:58] [Rank 0] step:5181/10000 train_time:228757ms step_avg:44.15ms +[2025-09-11 09:17:58] [Rank 0] step:5181/10000 train_time:228757ms step_avg:44.15ms +[2025-09-11 09:17:59] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:17:59] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:18:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:10] [Rank 0] PRINT: step:5200/10000 val_loss:4.4095 total_sharp:4.5396e-05 L1_sharp:5.2085e-03 L2_sharp:5.1534e-03 L3_sharp:6.5336e-03 L4_sharp:1.0446e-02 L5_sharp:1.6247e-02 L6_sharp:1.5865e-02 L7_sharp:2.3270e-02 L8_sharp:3.3183e-02 L9_sharp:3.8078e-02 L10_sharp:6.6475e-02 L11_sharp:1.3429e-01 L12_sharp:1.3828e+00 total_fnorm:1.7600e+02 total_l1_linf:3.7069e+05 total_spectral:8.8000e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.0791e-02 L2_l1linf:6.1035e-02 L3_l1linf:5.9814e-02 L4_l1linf:6.0059e-02 L5_l1linf:6.0791e-02 L6_l1linf:5.9814e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9570e-02 L9_l1linf:5.7861e-02 L10_l1linf:5.8838e-02 L11_l1linf:6.1279e-02 L12_l1linf:6.5430e-02 L1_spectral:3.2155e-03 L2_spectral:3.2003e-03 L3_spectral:3.2177e-03 L4_spectral:3.1955e-03 L5_spectral:3.2112e-03 L6_spectral:3.2129e-03 L7_spectral:3.2110e-03 L8_spectral:3.2259e-03 L9_spectral:3.2155e-03 L10_spectral:3.2348e-03 L11_spectral:3.2203e-03 L12_spectral:3.2138e-03 train_time:229424ms step_avg:44.12ms +[2025-09-11 09:18:10] [Rank 0] PRINT: step:5200/10000 val_loss:4.4095 total_sharp:4.5396e-05 L1_sharp:5.2085e-03 L2_sharp:5.1534e-03 L3_sharp:6.5336e-03 L4_sharp:1.0446e-02 L5_sharp:1.6247e-02 L6_sharp:1.5865e-02 L7_sharp:2.3270e-02 L8_sharp:3.3183e-02 L9_sharp:3.8078e-02 L10_sharp:6.6475e-02 L11_sharp:1.3429e-01 L12_sharp:1.3828e+00 total_fnorm:1.7600e+02 total_l1_linf:3.7069e+05 total_spectral:8.8000e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4512e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4512e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4902e-01 L1_l1linf:6.0791e-02 L2_l1linf:6.1035e-02 L3_l1linf:5.9814e-02 L4_l1linf:6.0059e-02 L5_l1linf:6.0791e-02 L6_l1linf:5.9814e-02 L7_l1linf:5.9814e-02 L8_l1linf:5.9570e-02 L9_l1linf:5.7861e-02 L10_l1linf:5.8838e-02 L11_l1linf:6.1279e-02 L12_l1linf:6.5430e-02 L1_spectral:3.2155e-03 L2_spectral:3.2003e-03 L3_spectral:3.2177e-03 L4_spectral:3.1955e-03 L5_spectral:3.2112e-03 L6_spectral:3.2129e-03 L7_spectral:3.2110e-03 L8_spectral:3.2259e-03 L9_spectral:3.2155e-03 L10_spectral:3.2348e-03 L11_spectral:3.2203e-03 L12_spectral:3.2138e-03 train_time:229424ms step_avg:44.12ms +[2025-09-11 09:18:12] [Rank 0] step:5201/10000 train_time:231610ms step_avg:44.53ms +[2025-09-11 09:18:12] [Rank 0] step:5201/10000 train_time:231610ms step_avg:44.53ms +[2025-09-11 09:18:13] [Rank 0] step:5221/10000 train_time:232576ms step_avg:44.55ms +[2025-09-11 09:18:13] [Rank 0] step:5221/10000 train_time:232576ms step_avg:44.55ms +[2025-09-11 09:18:13] [Rank 0] step:5241/10000 train_time:233268ms step_avg:44.51ms +[2025-09-11 09:18:13] [Rank 0] step:5241/10000 train_time:233268ms step_avg:44.51ms +[2025-09-11 09:18:14] [Rank 0] step:5261/10000 train_time:233957ms step_avg:44.47ms +[2025-09-11 09:18:14] [Rank 0] step:5261/10000 train_time:233957ms step_avg:44.47ms +[2025-09-11 09:18:15] [Rank 0] step:5281/10000 train_time:234919ms step_avg:44.48ms +[2025-09-11 09:18:15] [Rank 0] step:5281/10000 train_time:234919ms step_avg:44.48ms +[2025-09-11 09:18:16] [Rank 0] step:5301/10000 train_time:235608ms step_avg:44.45ms +[2025-09-11 09:18:16] [Rank 0] step:5301/10000 train_time:235608ms step_avg:44.45ms +[2025-09-11 09:18:16] [Rank 0] step:5321/10000 train_time:236297ms step_avg:44.41ms +[2025-09-11 09:18:16] [Rank 0] step:5321/10000 train_time:236297ms step_avg:44.41ms +[2025-09-11 09:18:17] [Rank 0] step:5341/10000 train_time:236986ms step_avg:44.37ms +[2025-09-11 09:18:17] [Rank 0] step:5341/10000 train_time:236986ms step_avg:44.37ms +[2025-09-11 09:18:18] [Rank 0] step:5361/10000 train_time:237675ms step_avg:44.33ms +[2025-09-11 09:18:18] [Rank 0] step:5361/10000 train_time:237675ms step_avg:44.33ms +[2025-09-11 09:18:19] [Rank 0] step:5381/10000 train_time:238366ms step_avg:44.30ms +[2025-09-11 09:18:19] [Rank 0] step:5381/10000 train_time:238366ms step_avg:44.30ms +[2025-09-11 09:18:19] [Rank 0] step:5401/10000 train_time:239054ms step_avg:44.26ms +[2025-09-11 09:18:19] [Rank 0] step:5401/10000 train_time:239054ms step_avg:44.26ms +[2025-09-11 09:18:20] [Rank 0] step:5421/10000 train_time:239744ms step_avg:44.23ms +[2025-09-11 09:18:20] [Rank 0] step:5421/10000 train_time:239744ms step_avg:44.23ms +[2025-09-11 09:18:21] [Rank 0] step:5441/10000 train_time:240434ms step_avg:44.19ms +[2025-09-11 09:18:21] [Rank 0] step:5441/10000 train_time:240434ms step_avg:44.19ms +[2025-09-11 09:18:21] [Rank 0] step:5461/10000 train_time:241123ms step_avg:44.15ms +[2025-09-11 09:18:21] [Rank 0] step:5461/10000 train_time:241123ms step_avg:44.15ms +[2025-09-11 09:18:22] [Rank 0] step:5481/10000 train_time:241813ms step_avg:44.12ms +[2025-09-11 09:18:22] [Rank 0] step:5481/10000 train_time:241813ms step_avg:44.12ms +[2025-09-11 09:18:23] [Rank 0] step:5501/10000 train_time:242501ms step_avg:44.08ms +[2025-09-11 09:18:23] [Rank 0] step:5501/10000 train_time:242501ms step_avg:44.08ms +[2025-09-11 09:18:23] [Rank 0] step:5521/10000 train_time:243190ms step_avg:44.05ms +[2025-09-11 09:18:23] [Rank 0] step:5521/10000 train_time:243190ms step_avg:44.05ms +[2025-09-11 09:18:24] [Rank 0] step:5541/10000 train_time:243882ms step_avg:44.01ms +[2025-09-11 09:18:24] [Rank 0] step:5541/10000 train_time:243882ms step_avg:44.01ms +[2025-09-11 09:18:25] [Rank 0] step:5561/10000 train_time:244574ms step_avg:43.98ms +[2025-09-11 09:18:25] [Rank 0] step:5561/10000 train_time:244574ms step_avg:43.98ms +[2025-09-11 09:18:25] [Rank 0] step:5581/10000 train_time:245265ms step_avg:43.95ms +[2025-09-11 09:18:25] [Rank 0] step:5581/10000 train_time:245265ms step_avg:43.95ms +[2025-09-11 09:18:26] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:18:26] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:18:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:18:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:18:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:18:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:18:36] [Rank 0] PRINT: step:5600/10000 val_loss:4.3932 total_sharp:3.7836e-05 L1_sharp:6.0820e-03 L2_sharp:5.6060e-03 L3_sharp:7.6487e-03 L4_sharp:9.5562e-03 L5_sharp:1.3177e-02 L6_sharp:1.3975e-02 L7_sharp:2.0668e-02 L8_sharp:2.3826e-02 L9_sharp:3.5947e-02 L10_sharp:5.2387e-02 L11_sharp:1.0567e-01 L12_sharp:9.5151e-01 total_fnorm:1.8400e+02 total_l1_linf:3.9936e+05 total_spectral:9.1500e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.0547e-02 L2_l1linf:6.0303e-02 L3_l1linf:5.9326e-02 L4_l1linf:5.9326e-02 L5_l1linf:5.9326e-02 L6_l1linf:5.9326e-02 L7_l1linf:5.8105e-02 L8_l1linf:5.7373e-02 L9_l1linf:5.7861e-02 L10_l1linf:5.8105e-02 L11_l1linf:6.1768e-02 L12_l1linf:6.4941e-02 L1_spectral:3.2079e-03 L2_spectral:3.2207e-03 L3_spectral:3.2007e-03 L4_spectral:3.1952e-03 L5_spectral:3.2074e-03 L6_spectral:3.2318e-03 L7_spectral:3.2210e-03 L8_spectral:3.2219e-03 L9_spectral:3.2217e-03 L10_spectral:3.2327e-03 L11_spectral:3.2204e-03 L12_spectral:3.1976e-03 train_time:245935ms step_avg:43.92ms +[2025-09-11 09:18:36] [Rank 0] PRINT: step:5600/10000 val_loss:4.3932 total_sharp:3.7836e-05 L1_sharp:6.0820e-03 L2_sharp:5.6060e-03 L3_sharp:7.6487e-03 L4_sharp:9.5562e-03 L5_sharp:1.3177e-02 L6_sharp:1.3975e-02 L7_sharp:2.0668e-02 L8_sharp:2.3826e-02 L9_sharp:3.5947e-02 L10_sharp:5.2387e-02 L11_sharp:1.0567e-01 L12_sharp:9.5151e-01 total_fnorm:1.8400e+02 total_l1_linf:3.9936e+05 total_spectral:9.1500e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4414e-01 L3_fnorm:2.4414e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.3926e-01 L9_fnorm:2.4414e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4512e-01 L12_fnorm:2.4707e-01 L1_l1linf:6.0547e-02 L2_l1linf:6.0303e-02 L3_l1linf:5.9326e-02 L4_l1linf:5.9326e-02 L5_l1linf:5.9326e-02 L6_l1linf:5.9326e-02 L7_l1linf:5.8105e-02 L8_l1linf:5.7373e-02 L9_l1linf:5.7861e-02 L10_l1linf:5.8105e-02 L11_l1linf:6.1768e-02 L12_l1linf:6.4941e-02 L1_spectral:3.2079e-03 L2_spectral:3.2207e-03 L3_spectral:3.2007e-03 L4_spectral:3.1952e-03 L5_spectral:3.2074e-03 L6_spectral:3.2318e-03 L7_spectral:3.2210e-03 L8_spectral:3.2219e-03 L9_spectral:3.2217e-03 L10_spectral:3.2327e-03 L11_spectral:3.2204e-03 L12_spectral:3.1976e-03 train_time:245935ms step_avg:43.92ms +[2025-09-11 09:18:38] [Rank 0] step:5601/10000 train_time:247773ms step_avg:44.24ms +[2025-09-11 09:18:38] [Rank 0] step:5601/10000 train_time:247773ms step_avg:44.24ms +[2025-09-11 09:18:39] [Rank 0] step:5621/10000 train_time:248492ms step_avg:44.21ms +[2025-09-11 09:18:39] [Rank 0] step:5621/10000 train_time:248492ms step_avg:44.21ms +[2025-09-11 09:18:40] [Rank 0] step:5641/10000 train_time:249182ms step_avg:44.17ms +[2025-09-11 09:18:40] [Rank 0] step:5641/10000 train_time:249182ms step_avg:44.17ms +[2025-09-11 09:18:40] [Rank 0] step:5661/10000 train_time:249872ms step_avg:44.14ms +[2025-09-11 09:18:40] [Rank 0] step:5661/10000 train_time:249872ms step_avg:44.14ms +[2025-09-11 09:18:41] [Rank 0] step:5681/10000 train_time:250562ms step_avg:44.11ms +[2025-09-11 09:18:41] [Rank 0] step:5681/10000 train_time:250562ms step_avg:44.11ms +[2025-09-11 09:18:42] [Rank 0] step:5701/10000 train_time:251254ms step_avg:44.07ms +[2025-09-11 09:18:42] [Rank 0] step:5701/10000 train_time:251254ms step_avg:44.07ms +[2025-09-11 09:18:42] [Rank 0] step:5721/10000 train_time:251942ms step_avg:44.04ms +[2025-09-11 09:18:42] [Rank 0] step:5721/10000 train_time:251942ms step_avg:44.04ms +[2025-09-11 09:18:43] [Rank 0] step:5741/10000 train_time:252633ms step_avg:44.01ms +[2025-09-11 09:18:43] [Rank 0] step:5741/10000 train_time:252633ms step_avg:44.01ms +[2025-09-11 09:18:44] [Rank 0] step:5761/10000 train_time:253323ms step_avg:43.97ms +[2025-09-11 09:18:44] [Rank 0] step:5761/10000 train_time:253323ms step_avg:43.97ms +[2025-09-11 09:18:45] [Rank 0] step:5781/10000 train_time:254016ms step_avg:43.94ms +[2025-09-11 09:18:45] [Rank 0] step:5781/10000 train_time:254016ms step_avg:43.94ms +[2025-09-11 09:18:45] [Rank 0] step:5801/10000 train_time:254707ms step_avg:43.91ms +[2025-09-11 09:18:45] [Rank 0] step:5801/10000 train_time:254707ms step_avg:43.91ms +[2025-09-11 09:18:46] [Rank 0] step:5821/10000 train_time:255396ms step_avg:43.87ms +[2025-09-11 09:18:46] [Rank 0] step:5821/10000 train_time:255396ms step_avg:43.87ms +[2025-09-11 09:18:47] [Rank 0] step:5841/10000 train_time:256087ms step_avg:43.84ms +[2025-09-11 09:18:47] [Rank 0] step:5841/10000 train_time:256087ms step_avg:43.84ms +[2025-09-11 09:18:47] [Rank 0] step:5861/10000 train_time:256777ms step_avg:43.81ms +[2025-09-11 09:18:47] [Rank 0] step:5861/10000 train_time:256777ms step_avg:43.81ms +[2025-09-11 09:18:48] [Rank 0] step:5881/10000 train_time:257466ms step_avg:43.78ms +[2025-09-11 09:18:48] [Rank 0] step:5881/10000 train_time:257466ms step_avg:43.78ms +[2025-09-11 09:18:49] [Rank 0] step:5901/10000 train_time:258154ms step_avg:43.75ms +[2025-09-11 09:18:49] [Rank 0] step:5901/10000 train_time:258154ms step_avg:43.75ms +[2025-09-11 09:18:49] [Rank 0] step:5921/10000 train_time:258845ms step_avg:43.72ms +[2025-09-11 09:18:49] [Rank 0] step:5921/10000 train_time:258845ms step_avg:43.72ms +[2025-09-11 09:18:50] [Rank 0] step:5941/10000 train_time:259536ms step_avg:43.69ms +[2025-09-11 09:18:50] [Rank 0] step:5941/10000 train_time:259536ms step_avg:43.69ms +[2025-09-11 09:18:51] [Rank 0] step:5961/10000 train_time:260228ms step_avg:43.66ms +[2025-09-11 09:18:51] [Rank 0] step:5961/10000 train_time:260228ms step_avg:43.66ms +[2025-09-11 09:18:51] [Rank 0] step:5981/10000 train_time:260918ms step_avg:43.62ms +[2025-09-11 09:18:51] [Rank 0] step:5981/10000 train_time:260918ms step_avg:43.62ms +[2025-09-11 09:18:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:18:52] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:19:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:19:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:19:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:19:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:19:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:19:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:03] [Rank 0] PRINT: step:6000/10000 val_loss:4.3476 total_sharp:3.1227e-05 L1_sharp:3.6067e-03 L2_sharp:4.0569e-03 L3_sharp:5.5016e-03 L4_sharp:7.9190e-03 L5_sharp:1.2642e-02 L6_sharp:1.1428e-02 L7_sharp:1.7022e-02 L8_sharp:2.3564e-02 L9_sharp:2.8568e-02 L10_sharp:3.9026e-02 L11_sharp:8.2909e-02 L12_sharp:3.9171e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9322e+05 total_spectral:9.1500e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4316e-01 L3_fnorm:2.4316e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4609e-01 L1_l1linf:5.8838e-02 L2_l1linf:5.8594e-02 L3_l1linf:5.8838e-02 L4_l1linf:5.8350e-02 L5_l1linf:5.7861e-02 L6_l1linf:5.7861e-02 L7_l1linf:5.8105e-02 L8_l1linf:5.7617e-02 L9_l1linf:5.6396e-02 L10_l1linf:5.6396e-02 L11_l1linf:5.8350e-02 L12_l1linf:6.0791e-02 L1_spectral:3.2179e-03 L2_spectral:3.2124e-03 L3_spectral:3.2030e-03 L4_spectral:3.2439e-03 L5_spectral:3.2128e-03 L6_spectral:3.2360e-03 L7_spectral:3.2082e-03 L8_spectral:3.2497e-03 L9_spectral:3.2294e-03 L10_spectral:3.2149e-03 L11_spectral:3.2071e-03 L12_spectral:3.2291e-03 train_time:261592ms step_avg:43.60ms +[2025-09-11 09:19:03] [Rank 0] PRINT: step:6000/10000 val_loss:4.3476 total_sharp:3.1227e-05 L1_sharp:3.6067e-03 L2_sharp:4.0569e-03 L3_sharp:5.5016e-03 L4_sharp:7.9190e-03 L5_sharp:1.2642e-02 L6_sharp:1.1428e-02 L7_sharp:1.7022e-02 L8_sharp:2.3564e-02 L9_sharp:2.8568e-02 L10_sharp:3.9026e-02 L11_sharp:8.2909e-02 L12_sharp:3.9171e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9322e+05 total_spectral:9.1500e+01 L1_fnorm:2.4414e-01 L2_fnorm:2.4316e-01 L3_fnorm:2.4316e-01 L4_fnorm:2.4414e-01 L5_fnorm:2.4414e-01 L6_fnorm:2.4414e-01 L7_fnorm:2.4414e-01 L8_fnorm:2.4121e-01 L9_fnorm:2.4316e-01 L10_fnorm:2.4316e-01 L11_fnorm:2.4316e-01 L12_fnorm:2.4609e-01 L1_l1linf:5.8838e-02 L2_l1linf:5.8594e-02 L3_l1linf:5.8838e-02 L4_l1linf:5.8350e-02 L5_l1linf:5.7861e-02 L6_l1linf:5.7861e-02 L7_l1linf:5.8105e-02 L8_l1linf:5.7617e-02 L9_l1linf:5.6396e-02 L10_l1linf:5.6396e-02 L11_l1linf:5.8350e-02 L12_l1linf:6.0791e-02 L1_spectral:3.2179e-03 L2_spectral:3.2124e-03 L3_spectral:3.2030e-03 L4_spectral:3.2439e-03 L5_spectral:3.2128e-03 L6_spectral:3.2360e-03 L7_spectral:3.2082e-03 L8_spectral:3.2497e-03 L9_spectral:3.2294e-03 L10_spectral:3.2149e-03 L11_spectral:3.2071e-03 L12_spectral:3.2291e-03 train_time:261592ms step_avg:43.60ms +[2025-09-11 09:19:05] [Rank 0] step:6001/10000 train_time:263478ms step_avg:43.91ms +[2025-09-11 09:19:05] [Rank 0] step:6001/10000 train_time:263478ms step_avg:43.91ms +[2025-09-11 09:19:05] [Rank 0] step:6021/10000 train_time:264190ms step_avg:43.88ms +[2025-09-11 09:19:05] [Rank 0] step:6021/10000 train_time:264190ms step_avg:43.88ms +[2025-09-11 09:19:06] [Rank 0] step:6041/10000 train_time:264885ms step_avg:43.85ms +[2025-09-11 09:19:06] [Rank 0] step:6041/10000 train_time:264885ms step_avg:43.85ms +[2025-09-11 09:19:07] [Rank 0] step:6061/10000 train_time:265576ms step_avg:43.82ms +[2025-09-11 09:19:07] [Rank 0] step:6061/10000 train_time:265576ms step_avg:43.82ms +[2025-09-11 09:19:07] [Rank 0] step:6081/10000 train_time:266269ms step_avg:43.79ms +[2025-09-11 09:19:07] [Rank 0] step:6081/10000 train_time:266269ms step_avg:43.79ms +[2025-09-11 09:19:08] [Rank 0] step:6101/10000 train_time:266961ms step_avg:43.76ms +[2025-09-11 09:19:08] [Rank 0] step:6101/10000 train_time:266961ms step_avg:43.76ms +[2025-09-11 09:19:09] [Rank 0] step:6121/10000 train_time:267654ms step_avg:43.73ms +[2025-09-11 09:19:09] [Rank 0] step:6121/10000 train_time:267654ms step_avg:43.73ms +[2025-09-11 09:19:10] [Rank 0] step:6141/10000 train_time:268347ms step_avg:43.70ms +[2025-09-11 09:19:10] [Rank 0] step:6141/10000 train_time:268347ms step_avg:43.70ms +[2025-09-11 09:19:10] [Rank 0] step:6161/10000 train_time:269039ms step_avg:43.67ms +[2025-09-11 09:19:10] [Rank 0] step:6161/10000 train_time:269039ms step_avg:43.67ms +[2025-09-11 09:19:11] [Rank 0] step:6181/10000 train_time:269729ms step_avg:43.64ms +[2025-09-11 09:19:11] [Rank 0] step:6181/10000 train_time:269729ms step_avg:43.64ms +[2025-09-11 09:19:12] [Rank 0] step:6201/10000 train_time:270423ms step_avg:43.61ms +[2025-09-11 09:19:12] [Rank 0] step:6201/10000 train_time:270423ms step_avg:43.61ms +[2025-09-11 09:19:12] [Rank 0] step:6221/10000 train_time:271117ms step_avg:43.58ms +[2025-09-11 09:19:12] [Rank 0] step:6221/10000 train_time:271117ms step_avg:43.58ms +[2025-09-11 09:19:13] [Rank 0] step:6241/10000 train_time:271810ms step_avg:43.55ms +[2025-09-11 09:19:13] [Rank 0] step:6241/10000 train_time:271810ms step_avg:43.55ms +[2025-09-11 09:19:14] [Rank 0] step:6261/10000 train_time:272500ms step_avg:43.52ms +[2025-09-11 09:19:14] [Rank 0] step:6261/10000 train_time:272500ms step_avg:43.52ms +[2025-09-11 09:19:14] [Rank 0] step:6281/10000 train_time:273194ms step_avg:43.50ms +[2025-09-11 09:19:14] [Rank 0] step:6281/10000 train_time:273194ms step_avg:43.50ms +[2025-09-11 09:19:15] [Rank 0] step:6301/10000 train_time:273884ms step_avg:43.47ms +[2025-09-11 09:19:15] [Rank 0] step:6301/10000 train_time:273884ms step_avg:43.47ms +[2025-09-11 09:19:16] [Rank 0] step:6321/10000 train_time:274865ms step_avg:43.48ms +[2025-09-11 09:19:16] [Rank 0] step:6321/10000 train_time:274865ms step_avg:43.48ms +[2025-09-11 09:19:17] [Rank 0] step:6341/10000 train_time:275558ms step_avg:43.46ms +[2025-09-11 09:19:17] [Rank 0] step:6341/10000 train_time:275558ms step_avg:43.46ms +[2025-09-11 09:19:17] [Rank 0] step:6361/10000 train_time:276251ms step_avg:43.43ms +[2025-09-11 09:19:17] [Rank 0] step:6361/10000 train_time:276251ms step_avg:43.43ms +[2025-09-11 09:19:18] [Rank 0] step:6381/10000 train_time:277207ms step_avg:43.44ms +[2025-09-11 09:19:18] [Rank 0] step:6381/10000 train_time:277207ms step_avg:43.44ms +[2025-09-11 09:19:19] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:19:19] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:19:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:19:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:19:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:19:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:19:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:30] [Rank 0] PRINT: step:6400/10000 val_loss:4.3169 total_sharp:3.4903e-05 L1_sharp:7.4106e-03 L2_sharp:5.8992e-03 L3_sharp:7.5997e-03 L4_sharp:1.2335e-02 L5_sharp:1.4613e-02 L6_sharp:1.5536e-02 L7_sharp:1.9016e-02 L8_sharp:2.7574e-02 L9_sharp:3.0996e-02 L10_sharp:3.9596e-02 L11_sharp:8.5632e-02 L12_sharp:4.0319e-01 total_fnorm:1.6200e+02 total_l1_linf:3.4202e+05 total_spectral:8.1500e+01 L1_fnorm:2.1680e-01 L2_fnorm:2.1680e-01 L3_fnorm:2.1680e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1777e-01 L8_fnorm:2.1191e-01 L9_fnorm:2.1582e-01 L10_fnorm:2.1582e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1777e-01 L1_l1linf:5.0781e-02 L2_l1linf:4.9561e-02 L3_l1linf:4.9561e-02 L4_l1linf:4.9316e-02 L5_l1linf:5.0049e-02 L6_l1linf:5.0049e-02 L7_l1linf:4.9316e-02 L8_l1linf:4.9316e-02 L9_l1linf:4.9072e-02 L10_l1linf:4.8096e-02 L11_l1linf:4.9072e-02 L12_l1linf:5.2734e-02 L1_spectral:2.9266e-03 L2_spectral:2.8940e-03 L3_spectral:2.8981e-03 L4_spectral:2.9215e-03 L5_spectral:2.9177e-03 L6_spectral:2.9144e-03 L7_spectral:2.9208e-03 L8_spectral:2.8944e-03 L9_spectral:2.9168e-03 L10_spectral:2.9202e-03 L11_spectral:2.9249e-03 L12_spectral:2.9038e-03 train_time:277880ms step_avg:43.42ms +[2025-09-11 09:19:30] [Rank 0] PRINT: step:6400/10000 val_loss:4.3169 total_sharp:3.4903e-05 L1_sharp:7.4106e-03 L2_sharp:5.8992e-03 L3_sharp:7.5997e-03 L4_sharp:1.2335e-02 L5_sharp:1.4613e-02 L6_sharp:1.5536e-02 L7_sharp:1.9016e-02 L8_sharp:2.7574e-02 L9_sharp:3.0996e-02 L10_sharp:3.9596e-02 L11_sharp:8.5632e-02 L12_sharp:4.0319e-01 total_fnorm:1.6200e+02 total_l1_linf:3.4202e+05 total_spectral:8.1500e+01 L1_fnorm:2.1680e-01 L2_fnorm:2.1680e-01 L3_fnorm:2.1680e-01 L4_fnorm:2.1680e-01 L5_fnorm:2.1680e-01 L6_fnorm:2.1680e-01 L7_fnorm:2.1777e-01 L8_fnorm:2.1191e-01 L9_fnorm:2.1582e-01 L10_fnorm:2.1582e-01 L11_fnorm:2.1582e-01 L12_fnorm:2.1777e-01 L1_l1linf:5.0781e-02 L2_l1linf:4.9561e-02 L3_l1linf:4.9561e-02 L4_l1linf:4.9316e-02 L5_l1linf:5.0049e-02 L6_l1linf:5.0049e-02 L7_l1linf:4.9316e-02 L8_l1linf:4.9316e-02 L9_l1linf:4.9072e-02 L10_l1linf:4.8096e-02 L11_l1linf:4.9072e-02 L12_l1linf:5.2734e-02 L1_spectral:2.9266e-03 L2_spectral:2.8940e-03 L3_spectral:2.8981e-03 L4_spectral:2.9215e-03 L5_spectral:2.9177e-03 L6_spectral:2.9144e-03 L7_spectral:2.9208e-03 L8_spectral:2.8944e-03 L9_spectral:2.9168e-03 L10_spectral:2.9202e-03 L11_spectral:2.9249e-03 L12_spectral:2.9038e-03 train_time:277880ms step_avg:43.42ms +[2025-09-11 09:19:31] [Rank 0] step:6401/10000 train_time:279678ms step_avg:43.69ms +[2025-09-11 09:19:31] [Rank 0] step:6401/10000 train_time:279678ms step_avg:43.69ms +[2025-09-11 09:19:32] [Rank 0] step:6421/10000 train_time:280397ms step_avg:43.67ms +[2025-09-11 09:19:32] [Rank 0] step:6421/10000 train_time:280397ms step_avg:43.67ms +[2025-09-11 09:19:33] [Rank 0] step:6441/10000 train_time:281090ms step_avg:43.64ms +[2025-09-11 09:19:33] [Rank 0] step:6441/10000 train_time:281090ms step_avg:43.64ms +[2025-09-11 09:19:34] [Rank 0] step:6461/10000 train_time:281783ms step_avg:43.61ms +[2025-09-11 09:19:34] [Rank 0] step:6461/10000 train_time:281783ms step_avg:43.61ms +[2025-09-11 09:19:34] [Rank 0] step:6481/10000 train_time:282477ms step_avg:43.59ms +[2025-09-11 09:19:34] [Rank 0] step:6481/10000 train_time:282477ms step_avg:43.59ms +[2025-09-11 09:19:35] [Rank 0] step:6501/10000 train_time:283172ms step_avg:43.56ms +[2025-09-11 09:19:35] [Rank 0] step:6501/10000 train_time:283172ms step_avg:43.56ms +[2025-09-11 09:19:36] [Rank 0] step:6521/10000 train_time:283866ms step_avg:43.53ms +[2025-09-11 09:19:36] [Rank 0] step:6521/10000 train_time:283866ms step_avg:43.53ms +[2025-09-11 09:19:36] [Rank 0] step:6541/10000 train_time:284557ms step_avg:43.50ms +[2025-09-11 09:19:36] [Rank 0] step:6541/10000 train_time:284557ms step_avg:43.50ms +[2025-09-11 09:19:37] [Rank 0] step:6561/10000 train_time:285250ms step_avg:43.48ms +[2025-09-11 09:19:37] [Rank 0] step:6561/10000 train_time:285250ms step_avg:43.48ms +[2025-09-11 09:19:38] [Rank 0] step:6581/10000 train_time:285944ms step_avg:43.45ms +[2025-09-11 09:19:38] [Rank 0] step:6581/10000 train_time:285944ms step_avg:43.45ms +[2025-09-11 09:19:38] [Rank 0] step:6601/10000 train_time:286637ms step_avg:43.42ms +[2025-09-11 09:19:38] [Rank 0] step:6601/10000 train_time:286637ms step_avg:43.42ms +[2025-09-11 09:19:39] [Rank 0] step:6621/10000 train_time:287329ms step_avg:43.40ms +[2025-09-11 09:19:39] [Rank 0] step:6621/10000 train_time:287329ms step_avg:43.40ms +[2025-09-11 09:19:40] [Rank 0] step:6641/10000 train_time:288023ms step_avg:43.37ms +[2025-09-11 09:19:40] [Rank 0] step:6641/10000 train_time:288023ms step_avg:43.37ms +[2025-09-11 09:19:41] [Rank 0] step:6661/10000 train_time:288718ms step_avg:43.34ms +[2025-09-11 09:19:41] [Rank 0] step:6661/10000 train_time:288718ms step_avg:43.34ms +[2025-09-11 09:19:41] [Rank 0] step:6681/10000 train_time:289418ms step_avg:43.32ms +[2025-09-11 09:19:41] [Rank 0] step:6681/10000 train_time:289418ms step_avg:43.32ms +[2025-09-11 09:19:42] [Rank 0] step:6701/10000 train_time:290118ms step_avg:43.29ms +[2025-09-11 09:19:42] [Rank 0] step:6701/10000 train_time:290118ms step_avg:43.29ms +[2025-09-11 09:19:43] [Rank 0] step:6721/10000 train_time:290818ms step_avg:43.27ms +[2025-09-11 09:19:43] [Rank 0] step:6721/10000 train_time:290818ms step_avg:43.27ms +[2025-09-11 09:19:43] [Rank 0] step:6741/10000 train_time:291518ms step_avg:43.25ms +[2025-09-11 09:19:43] [Rank 0] step:6741/10000 train_time:291518ms step_avg:43.25ms +[2025-09-11 09:19:44] [Rank 0] step:6761/10000 train_time:292217ms step_avg:43.22ms +[2025-09-11 09:19:44] [Rank 0] step:6761/10000 train_time:292217ms step_avg:43.22ms +[2025-09-11 09:19:45] [Rank 0] step:6781/10000 train_time:292917ms step_avg:43.20ms +[2025-09-11 09:19:45] [Rank 0] step:6781/10000 train_time:292917ms step_avg:43.20ms +[2025-09-11 09:19:45] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:19:45] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:19:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:19:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:19:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:19:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:19:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:19:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:19:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:19:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:19:56] [Rank 0] PRINT: step:6800/10000 val_loss:4.2777 total_sharp:2.7757e-05 L1_sharp:6.4291e-03 L2_sharp:6.2719e-03 L3_sharp:5.9195e-03 L4_sharp:1.0108e-02 L5_sharp:1.2695e-02 L6_sharp:1.4906e-02 L7_sharp:1.7598e-02 L8_sharp:2.4624e-02 L9_sharp:3.0254e-02 L10_sharp:3.8078e-02 L11_sharp:8.4378e-02 L12_sharp:2.7924e-01 total_fnorm:1.5700e+02 total_l1_linf:3.3178e+05 total_spectral:7.8500e+01 L1_fnorm:1.8848e-01 L2_fnorm:1.8848e-01 L3_fnorm:1.8848e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8359e-01 L9_fnorm:1.8652e-01 L10_fnorm:1.8750e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.1992e-02 L2_l1linf:4.1748e-02 L3_l1linf:4.1260e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.0771e-02 L6_l1linf:4.0527e-02 L7_l1linf:4.1016e-02 L8_l1linf:4.0039e-02 L9_l1linf:3.9307e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.0283e-02 L12_l1linf:4.5898e-02 L1_spectral:2.5922e-03 L2_spectral:2.5872e-03 L3_spectral:2.5902e-03 L4_spectral:2.5825e-03 L5_spectral:2.5958e-03 L6_spectral:2.6036e-03 L7_spectral:2.6047e-03 L8_spectral:2.5659e-03 L9_spectral:2.6001e-03 L10_spectral:2.5913e-03 L11_spectral:2.5902e-03 L12_spectral:2.5884e-03 train_time:293597ms step_avg:43.18ms +[2025-09-11 09:19:56] [Rank 0] PRINT: step:6800/10000 val_loss:4.2777 total_sharp:2.7757e-05 L1_sharp:6.4291e-03 L2_sharp:6.2719e-03 L3_sharp:5.9195e-03 L4_sharp:1.0108e-02 L5_sharp:1.2695e-02 L6_sharp:1.4906e-02 L7_sharp:1.7598e-02 L8_sharp:2.4624e-02 L9_sharp:3.0254e-02 L10_sharp:3.8078e-02 L11_sharp:8.4378e-02 L12_sharp:2.7924e-01 total_fnorm:1.5700e+02 total_l1_linf:3.3178e+05 total_spectral:7.8500e+01 L1_fnorm:1.8848e-01 L2_fnorm:1.8848e-01 L3_fnorm:1.8848e-01 L4_fnorm:1.8848e-01 L5_fnorm:1.8848e-01 L6_fnorm:1.8848e-01 L7_fnorm:1.8848e-01 L8_fnorm:1.8359e-01 L9_fnorm:1.8652e-01 L10_fnorm:1.8750e-01 L11_fnorm:1.8652e-01 L12_fnorm:1.8750e-01 L1_l1linf:4.1992e-02 L2_l1linf:4.1748e-02 L3_l1linf:4.1260e-02 L4_l1linf:4.2480e-02 L5_l1linf:4.0771e-02 L6_l1linf:4.0527e-02 L7_l1linf:4.1016e-02 L8_l1linf:4.0039e-02 L9_l1linf:3.9307e-02 L10_l1linf:4.0771e-02 L11_l1linf:4.0283e-02 L12_l1linf:4.5898e-02 L1_spectral:2.5922e-03 L2_spectral:2.5872e-03 L3_spectral:2.5902e-03 L4_spectral:2.5825e-03 L5_spectral:2.5958e-03 L6_spectral:2.6036e-03 L7_spectral:2.6047e-03 L8_spectral:2.5659e-03 L9_spectral:2.6001e-03 L10_spectral:2.5913e-03 L11_spectral:2.5902e-03 L12_spectral:2.5884e-03 train_time:293597ms step_avg:43.18ms +[2025-09-11 09:19:57] [Rank 0] step:6801/10000 train_time:295313ms step_avg:43.42ms +[2025-09-11 09:19:57] [Rank 0] step:6801/10000 train_time:295313ms step_avg:43.42ms +[2025-09-11 09:19:58] [Rank 0] step:6821/10000 train_time:296049ms step_avg:43.40ms +[2025-09-11 09:19:58] [Rank 0] step:6821/10000 train_time:296049ms step_avg:43.40ms +[2025-09-11 09:19:59] [Rank 0] step:6841/10000 train_time:296753ms step_avg:43.38ms +[2025-09-11 09:19:59] [Rank 0] step:6841/10000 train_time:296753ms step_avg:43.38ms +[2025-09-11 09:20:00] [Rank 0] step:6861/10000 train_time:297454ms step_avg:43.35ms +[2025-09-11 09:20:00] [Rank 0] step:6861/10000 train_time:297454ms step_avg:43.35ms +[2025-09-11 09:20:00] [Rank 0] step:6881/10000 train_time:298157ms step_avg:43.33ms +[2025-09-11 09:20:00] [Rank 0] step:6881/10000 train_time:298157ms step_avg:43.33ms +[2025-09-11 09:20:01] [Rank 0] step:6901/10000 train_time:298917ms step_avg:43.31ms +[2025-09-11 09:20:01] [Rank 0] step:6901/10000 train_time:298917ms step_avg:43.31ms +[2025-09-11 09:20:02] [Rank 0] step:6921/10000 train_time:299708ms step_avg:43.30ms +[2025-09-11 09:20:02] [Rank 0] step:6921/10000 train_time:299708ms step_avg:43.30ms +[2025-09-11 09:20:03] [Rank 0] step:6941/10000 train_time:300410ms step_avg:43.28ms +[2025-09-11 09:20:03] [Rank 0] step:6941/10000 train_time:300410ms step_avg:43.28ms +[2025-09-11 09:20:03] [Rank 0] step:6961/10000 train_time:301112ms step_avg:43.26ms +[2025-09-11 09:20:03] [Rank 0] step:6961/10000 train_time:301112ms step_avg:43.26ms +[2025-09-11 09:20:04] [Rank 0] step:6981/10000 train_time:301815ms step_avg:43.23ms +[2025-09-11 09:20:04] [Rank 0] step:6981/10000 train_time:301815ms step_avg:43.23ms +[2025-09-11 09:20:05] [Rank 0] step:7001/10000 train_time:302517ms step_avg:43.21ms +[2025-09-11 09:20:05] [Rank 0] step:7001/10000 train_time:302517ms step_avg:43.21ms +[2025-09-11 09:20:05] [Rank 0] step:7021/10000 train_time:303218ms step_avg:43.19ms +[2025-09-11 09:20:05] [Rank 0] step:7021/10000 train_time:303218ms step_avg:43.19ms +[2025-09-11 09:20:06] [Rank 0] step:7041/10000 train_time:303917ms step_avg:43.16ms +[2025-09-11 09:20:06] [Rank 0] step:7041/10000 train_time:303917ms step_avg:43.16ms +[2025-09-11 09:20:07] [Rank 0] step:7061/10000 train_time:304620ms step_avg:43.14ms +[2025-09-11 09:20:07] [Rank 0] step:7061/10000 train_time:304620ms step_avg:43.14ms +[2025-09-11 09:20:07] [Rank 0] step:7081/10000 train_time:305320ms step_avg:43.12ms +[2025-09-11 09:20:07] [Rank 0] step:7081/10000 train_time:305320ms step_avg:43.12ms +[2025-09-11 09:20:08] [Rank 0] step:7101/10000 train_time:306021ms step_avg:43.10ms +[2025-09-11 09:20:08] [Rank 0] step:7101/10000 train_time:306021ms step_avg:43.10ms +[2025-09-11 09:20:09] [Rank 0] step:7121/10000 train_time:306724ms step_avg:43.07ms +[2025-09-11 09:20:09] [Rank 0] step:7121/10000 train_time:306724ms step_avg:43.07ms +[2025-09-11 09:20:10] [Rank 0] step:7141/10000 train_time:307425ms step_avg:43.05ms +[2025-09-11 09:20:10] [Rank 0] step:7141/10000 train_time:307425ms step_avg:43.05ms +[2025-09-11 09:20:10] [Rank 0] step:7161/10000 train_time:308126ms step_avg:43.03ms +[2025-09-11 09:20:10] [Rank 0] step:7161/10000 train_time:308126ms step_avg:43.03ms +[2025-09-11 09:20:11] [Rank 0] step:7181/10000 train_time:308826ms step_avg:43.01ms +[2025-09-11 09:20:11] [Rank 0] step:7181/10000 train_time:308826ms step_avg:43.01ms +[2025-09-11 09:20:12] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:20:12] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:20:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:20:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:20:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:20:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:20:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:22] [Rank 0] PRINT: step:7200/10000 val_loss:4.2421 total_sharp:2.5907e-05 L1_sharp:6.1760e-03 L2_sharp:3.8203e-03 L3_sharp:5.4224e-03 L4_sharp:8.5881e-03 L5_sharp:1.1966e-02 L6_sharp:1.5486e-02 L7_sharp:2.0561e-02 L8_sharp:2.7800e-02 L9_sharp:3.4133e-02 L10_sharp:4.0931e-02 L11_sharp:8.8879e-02 L12_sharp:3.8436e-01 total_fnorm:1.3800e+02 total_l1_linf:2.8058e+05 total_spectral:6.9000e+01 L1_fnorm:1.6211e-01 L2_fnorm:1.6211e-01 L3_fnorm:1.6211e-01 L4_fnorm:1.6211e-01 L5_fnorm:1.6211e-01 L6_fnorm:1.6211e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.5820e-01 L9_fnorm:1.6016e-01 L10_fnorm:1.6113e-01 L11_fnorm:1.6016e-01 L12_fnorm:1.6016e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2715e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3203e-02 L12_l1linf:3.5156e-02 L1_spectral:2.3054e-03 L2_spectral:2.2806e-03 L3_spectral:2.2803e-03 L4_spectral:2.2813e-03 L5_spectral:2.2939e-03 L6_spectral:2.3030e-03 L7_spectral:2.2843e-03 L8_spectral:2.3020e-03 L9_spectral:2.2880e-03 L10_spectral:2.2810e-03 L11_spectral:2.2903e-03 L12_spectral:2.2744e-03 train_time:309508ms step_avg:42.99ms +[2025-09-11 09:20:22] [Rank 0] PRINT: step:7200/10000 val_loss:4.2421 total_sharp:2.5907e-05 L1_sharp:6.1760e-03 L2_sharp:3.8203e-03 L3_sharp:5.4224e-03 L4_sharp:8.5881e-03 L5_sharp:1.1966e-02 L6_sharp:1.5486e-02 L7_sharp:2.0561e-02 L8_sharp:2.7800e-02 L9_sharp:3.4133e-02 L10_sharp:4.0931e-02 L11_sharp:8.8879e-02 L12_sharp:3.8436e-01 total_fnorm:1.3800e+02 total_l1_linf:2.8058e+05 total_spectral:6.9000e+01 L1_fnorm:1.6211e-01 L2_fnorm:1.6211e-01 L3_fnorm:1.6211e-01 L4_fnorm:1.6211e-01 L5_fnorm:1.6211e-01 L6_fnorm:1.6211e-01 L7_fnorm:1.6309e-01 L8_fnorm:1.5820e-01 L9_fnorm:1.6016e-01 L10_fnorm:1.6113e-01 L11_fnorm:1.6016e-01 L12_fnorm:1.6016e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3203e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.3447e-02 L6_l1linf:3.3203e-02 L7_l1linf:3.3203e-02 L8_l1linf:3.2715e-02 L9_l1linf:3.2959e-02 L10_l1linf:3.2471e-02 L11_l1linf:3.3203e-02 L12_l1linf:3.5156e-02 L1_spectral:2.3054e-03 L2_spectral:2.2806e-03 L3_spectral:2.2803e-03 L4_spectral:2.2813e-03 L5_spectral:2.2939e-03 L6_spectral:2.3030e-03 L7_spectral:2.2843e-03 L8_spectral:2.3020e-03 L9_spectral:2.2880e-03 L10_spectral:2.2810e-03 L11_spectral:2.2903e-03 L12_spectral:2.2744e-03 train_time:309508ms step_avg:42.99ms +[2025-09-11 09:20:24] [Rank 0] step:7201/10000 train_time:311447ms step_avg:43.25ms +[2025-09-11 09:20:24] [Rank 0] step:7201/10000 train_time:311447ms step_avg:43.25ms +[2025-09-11 09:20:25] [Rank 0] step:7221/10000 train_time:312174ms step_avg:43.23ms +[2025-09-11 09:20:25] [Rank 0] step:7221/10000 train_time:312174ms step_avg:43.23ms +[2025-09-11 09:20:26] [Rank 0] step:7241/10000 train_time:312876ms step_avg:43.21ms +[2025-09-11 09:20:26] [Rank 0] step:7241/10000 train_time:312876ms step_avg:43.21ms +[2025-09-11 09:20:26] [Rank 0] step:7261/10000 train_time:313579ms step_avg:43.19ms +[2025-09-11 09:20:26] [Rank 0] step:7261/10000 train_time:313579ms step_avg:43.19ms +[2025-09-11 09:20:27] [Rank 0] step:7281/10000 train_time:314286ms step_avg:43.17ms +[2025-09-11 09:20:27] [Rank 0] step:7281/10000 train_time:314286ms step_avg:43.17ms +[2025-09-11 09:20:28] [Rank 0] step:7301/10000 train_time:314988ms step_avg:43.14ms +[2025-09-11 09:20:28] [Rank 0] step:7301/10000 train_time:314988ms step_avg:43.14ms +[2025-09-11 09:20:29] [Rank 0] step:7321/10000 train_time:315689ms step_avg:43.12ms +[2025-09-11 09:20:29] [Rank 0] step:7321/10000 train_time:315689ms step_avg:43.12ms +[2025-09-11 09:20:29] [Rank 0] step:7341/10000 train_time:316391ms step_avg:43.10ms +[2025-09-11 09:20:29] [Rank 0] step:7341/10000 train_time:316391ms step_avg:43.10ms +[2025-09-11 09:20:30] [Rank 0] step:7361/10000 train_time:317093ms step_avg:43.08ms +[2025-09-11 09:20:30] [Rank 0] step:7361/10000 train_time:317093ms step_avg:43.08ms +[2025-09-11 09:20:31] [Rank 0] step:7381/10000 train_time:317795ms step_avg:43.06ms +[2025-09-11 09:20:31] [Rank 0] step:7381/10000 train_time:317795ms step_avg:43.06ms +[2025-09-11 09:20:31] [Rank 0] step:7401/10000 train_time:318494ms step_avg:43.03ms +[2025-09-11 09:20:31] [Rank 0] step:7401/10000 train_time:318494ms step_avg:43.03ms +[2025-09-11 09:20:32] [Rank 0] step:7421/10000 train_time:319198ms step_avg:43.01ms +[2025-09-11 09:20:32] [Rank 0] step:7421/10000 train_time:319198ms step_avg:43.01ms +[2025-09-11 09:20:33] [Rank 0] step:7441/10000 train_time:319900ms step_avg:42.99ms +[2025-09-11 09:20:33] [Rank 0] step:7441/10000 train_time:319900ms step_avg:42.99ms +[2025-09-11 09:20:33] [Rank 0] step:7461/10000 train_time:320601ms step_avg:42.97ms +[2025-09-11 09:20:33] [Rank 0] step:7461/10000 train_time:320601ms step_avg:42.97ms +[2025-09-11 09:20:34] [Rank 0] step:7481/10000 train_time:321304ms step_avg:42.95ms +[2025-09-11 09:20:34] [Rank 0] step:7481/10000 train_time:321304ms step_avg:42.95ms +[2025-09-11 09:20:35] [Rank 0] step:7501/10000 train_time:322005ms step_avg:42.93ms +[2025-09-11 09:20:35] [Rank 0] step:7501/10000 train_time:322005ms step_avg:42.93ms +[2025-09-11 09:20:36] [Rank 0] step:7521/10000 train_time:322708ms step_avg:42.91ms +[2025-09-11 09:20:36] [Rank 0] step:7521/10000 train_time:322708ms step_avg:42.91ms +[2025-09-11 09:20:36] [Rank 0] step:7541/10000 train_time:323407ms step_avg:42.89ms +[2025-09-11 09:20:36] [Rank 0] step:7541/10000 train_time:323407ms step_avg:42.89ms +[2025-09-11 09:20:37] [Rank 0] step:7561/10000 train_time:324111ms step_avg:42.87ms +[2025-09-11 09:20:37] [Rank 0] step:7561/10000 train_time:324111ms step_avg:42.87ms +[2025-09-11 09:20:38] [Rank 0] step:7581/10000 train_time:324813ms step_avg:42.85ms +[2025-09-11 09:20:38] [Rank 0] step:7581/10000 train_time:324813ms step_avg:42.85ms +[2025-09-11 09:20:38] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:20:38] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:20:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:20:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:20:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:20:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:20:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:20:49] [Rank 0] PRINT: step:7600/10000 val_loss:4.2149 total_sharp:2.4967e-05 L1_sharp:5.7573e-03 L2_sharp:3.8219e-03 L3_sharp:3.9559e-03 L4_sharp:8.9318e-03 L5_sharp:1.4547e-02 L6_sharp:1.4755e-02 L7_sharp:1.8755e-02 L8_sharp:2.4407e-02 L9_sharp:2.7823e-02 L10_sharp:3.7543e-02 L11_sharp:7.6225e-02 L12_sharp:4.2366e-01 total_fnorm:1.1600e+02 total_l1_linf:2.2733e+05 total_spectral:5.8250e+01 L1_fnorm:1.3574e-01 L2_fnorm:1.3574e-01 L3_fnorm:1.3574e-01 L4_fnorm:1.3574e-01 L5_fnorm:1.3574e-01 L6_fnorm:1.3574e-01 L7_fnorm:1.3574e-01 L8_fnorm:1.3086e-01 L9_fnorm:1.3477e-01 L10_fnorm:1.3477e-01 L11_fnorm:1.3379e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.7222e-02 L2_l1linf:2.6367e-02 L3_l1linf:2.7832e-02 L4_l1linf:2.5757e-02 L5_l1linf:2.5879e-02 L6_l1linf:2.5879e-02 L7_l1linf:2.6489e-02 L8_l1linf:2.6123e-02 L9_l1linf:2.5146e-02 L10_l1linf:2.5391e-02 L11_l1linf:2.6245e-02 L12_l1linf:2.8320e-02 L1_spectral:1.9536e-03 L2_spectral:1.9734e-03 L3_spectral:1.9752e-03 L4_spectral:1.9735e-03 L5_spectral:1.9678e-03 L6_spectral:1.9598e-03 L7_spectral:1.9679e-03 L8_spectral:1.9969e-03 L9_spectral:1.9773e-03 L10_spectral:1.9665e-03 L11_spectral:1.9591e-03 L12_spectral:1.9692e-03 train_time:325497ms step_avg:42.83ms +[2025-09-11 09:20:49] [Rank 0] PRINT: step:7600/10000 val_loss:4.2149 total_sharp:2.4967e-05 L1_sharp:5.7573e-03 L2_sharp:3.8219e-03 L3_sharp:3.9559e-03 L4_sharp:8.9318e-03 L5_sharp:1.4547e-02 L6_sharp:1.4755e-02 L7_sharp:1.8755e-02 L8_sharp:2.4407e-02 L9_sharp:2.7823e-02 L10_sharp:3.7543e-02 L11_sharp:7.6225e-02 L12_sharp:4.2366e-01 total_fnorm:1.1600e+02 total_l1_linf:2.2733e+05 total_spectral:5.8250e+01 L1_fnorm:1.3574e-01 L2_fnorm:1.3574e-01 L3_fnorm:1.3574e-01 L4_fnorm:1.3574e-01 L5_fnorm:1.3574e-01 L6_fnorm:1.3574e-01 L7_fnorm:1.3574e-01 L8_fnorm:1.3086e-01 L9_fnorm:1.3477e-01 L10_fnorm:1.3477e-01 L11_fnorm:1.3379e-01 L12_fnorm:1.3379e-01 L1_l1linf:2.7222e-02 L2_l1linf:2.6367e-02 L3_l1linf:2.7832e-02 L4_l1linf:2.5757e-02 L5_l1linf:2.5879e-02 L6_l1linf:2.5879e-02 L7_l1linf:2.6489e-02 L8_l1linf:2.6123e-02 L9_l1linf:2.5146e-02 L10_l1linf:2.5391e-02 L11_l1linf:2.6245e-02 L12_l1linf:2.8320e-02 L1_spectral:1.9536e-03 L2_spectral:1.9734e-03 L3_spectral:1.9752e-03 L4_spectral:1.9735e-03 L5_spectral:1.9678e-03 L6_spectral:1.9598e-03 L7_spectral:1.9679e-03 L8_spectral:1.9969e-03 L9_spectral:1.9773e-03 L10_spectral:1.9665e-03 L11_spectral:1.9591e-03 L12_spectral:1.9692e-03 train_time:325497ms step_avg:42.83ms +[2025-09-11 09:20:51] [Rank 0] step:7601/10000 train_time:327418ms step_avg:43.08ms +[2025-09-11 09:20:51] [Rank 0] step:7601/10000 train_time:327418ms step_avg:43.08ms +[2025-09-11 09:20:52] [Rank 0] step:7621/10000 train_time:328133ms step_avg:43.06ms +[2025-09-11 09:20:52] [Rank 0] step:7621/10000 train_time:328133ms step_avg:43.06ms +[2025-09-11 09:20:53] [Rank 0] step:7641/10000 train_time:328837ms step_avg:43.04ms +[2025-09-11 09:20:53] [Rank 0] step:7641/10000 train_time:328837ms step_avg:43.04ms +[2025-09-11 09:20:53] [Rank 0] step:7661/10000 train_time:329540ms step_avg:43.02ms +[2025-09-11 09:20:53] [Rank 0] step:7661/10000 train_time:329540ms step_avg:43.02ms +[2025-09-11 09:20:54] [Rank 0] step:7681/10000 train_time:330243ms step_avg:42.99ms +[2025-09-11 09:20:54] [Rank 0] step:7681/10000 train_time:330243ms step_avg:42.99ms +[2025-09-11 09:20:55] [Rank 0] step:7701/10000 train_time:330946ms step_avg:42.97ms +[2025-09-11 09:20:55] [Rank 0] step:7701/10000 train_time:330946ms step_avg:42.97ms +[2025-09-11 09:20:55] [Rank 0] step:7721/10000 train_time:331650ms step_avg:42.95ms +[2025-09-11 09:20:55] [Rank 0] step:7721/10000 train_time:331650ms step_avg:42.95ms +[2025-09-11 09:20:56] [Rank 0] step:7741/10000 train_time:332354ms step_avg:42.93ms +[2025-09-11 09:20:56] [Rank 0] step:7741/10000 train_time:332354ms step_avg:42.93ms +[2025-09-11 09:20:57] [Rank 0] step:7761/10000 train_time:333055ms step_avg:42.91ms +[2025-09-11 09:20:57] [Rank 0] step:7761/10000 train_time:333055ms step_avg:42.91ms +[2025-09-11 09:20:57] [Rank 0] step:7781/10000 train_time:333760ms step_avg:42.89ms +[2025-09-11 09:20:57] [Rank 0] step:7781/10000 train_time:333760ms step_avg:42.89ms +[2025-09-11 09:20:58] [Rank 0] step:7801/10000 train_time:334463ms step_avg:42.87ms +[2025-09-11 09:20:58] [Rank 0] step:7801/10000 train_time:334463ms step_avg:42.87ms +[2025-09-11 09:20:59] [Rank 0] step:7821/10000 train_time:335167ms step_avg:42.85ms +[2025-09-11 09:20:59] [Rank 0] step:7821/10000 train_time:335167ms step_avg:42.85ms +[2025-09-11 09:21:00] [Rank 0] step:7841/10000 train_time:335871ms step_avg:42.84ms +[2025-09-11 09:21:00] [Rank 0] step:7841/10000 train_time:335871ms step_avg:42.84ms +[2025-09-11 09:21:00] [Rank 0] step:7861/10000 train_time:336577ms step_avg:42.82ms +[2025-09-11 09:21:00] [Rank 0] step:7861/10000 train_time:336577ms step_avg:42.82ms +[2025-09-11 09:21:01] [Rank 0] step:7881/10000 train_time:337280ms step_avg:42.80ms +[2025-09-11 09:21:01] [Rank 0] step:7881/10000 train_time:337280ms step_avg:42.80ms +[2025-09-11 09:21:02] [Rank 0] step:7901/10000 train_time:337984ms step_avg:42.78ms +[2025-09-11 09:21:02] [Rank 0] step:7901/10000 train_time:337984ms step_avg:42.78ms +[2025-09-11 09:21:02] [Rank 0] step:7921/10000 train_time:338687ms step_avg:42.76ms +[2025-09-11 09:21:02] [Rank 0] step:7921/10000 train_time:338687ms step_avg:42.76ms +[2025-09-11 09:21:03] [Rank 0] step:7941/10000 train_time:339395ms step_avg:42.74ms +[2025-09-11 09:21:03] [Rank 0] step:7941/10000 train_time:339395ms step_avg:42.74ms +[2025-09-11 09:21:04] [Rank 0] step:7961/10000 train_time:340096ms step_avg:42.72ms +[2025-09-11 09:21:04] [Rank 0] step:7961/10000 train_time:340096ms step_avg:42.72ms +[2025-09-11 09:21:05] [Rank 0] step:7981/10000 train_time:340802ms step_avg:42.70ms +[2025-09-11 09:21:05] [Rank 0] step:7981/10000 train_time:340802ms step_avg:42.70ms +[2025-09-11 09:21:05] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:21:05] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:21:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:21:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:21:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:21:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:21:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:17] [Rank 0] PRINT: step:8000/10000 val_loss:4.1871 total_sharp:2.2715e-05 L1_sharp:5.2224e-03 L2_sharp:7.0595e-03 L3_sharp:7.4584e-03 L4_sharp:9.4975e-03 L5_sharp:1.1735e-02 L6_sharp:1.5795e-02 L7_sharp:1.7269e-02 L8_sharp:2.1457e-02 L9_sharp:2.8662e-02 L10_sharp:3.7471e-02 L11_sharp:8.1956e-02 L12_sharp:3.9948e-01 total_fnorm:1.0200e+02 total_l1_linf:1.9046e+05 total_spectral:5.1000e+01 L1_fnorm:1.1035e-01 L2_fnorm:1.1084e-01 L3_fnorm:1.1084e-01 L4_fnorm:1.1084e-01 L5_fnorm:1.1035e-01 L6_fnorm:1.1084e-01 L7_fnorm:1.1084e-01 L8_fnorm:1.0645e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0938e-01 L11_fnorm:1.0840e-01 L12_fnorm:1.0791e-01 L1_l1linf:2.0386e-02 L2_l1linf:2.0386e-02 L3_l1linf:1.9775e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9287e-02 L6_l1linf:1.9897e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.8677e-02 L11_l1linf:1.9897e-02 L12_l1linf:2.0752e-02 L1_spectral:1.6393e-03 L2_spectral:1.6411e-03 L3_spectral:1.6539e-03 L4_spectral:1.6560e-03 L5_spectral:1.6278e-03 L6_spectral:1.6373e-03 L7_spectral:1.6409e-03 L8_spectral:1.6602e-03 L9_spectral:1.6318e-03 L10_spectral:1.6402e-03 L11_spectral:1.6381e-03 L12_spectral:1.6533e-03 train_time:341483ms step_avg:42.69ms +[2025-09-11 09:21:17] [Rank 0] PRINT: step:8000/10000 val_loss:4.1871 total_sharp:2.2715e-05 L1_sharp:5.2224e-03 L2_sharp:7.0595e-03 L3_sharp:7.4584e-03 L4_sharp:9.4975e-03 L5_sharp:1.1735e-02 L6_sharp:1.5795e-02 L7_sharp:1.7269e-02 L8_sharp:2.1457e-02 L9_sharp:2.8662e-02 L10_sharp:3.7471e-02 L11_sharp:8.1956e-02 L12_sharp:3.9948e-01 total_fnorm:1.0200e+02 total_l1_linf:1.9046e+05 total_spectral:5.1000e+01 L1_fnorm:1.1035e-01 L2_fnorm:1.1084e-01 L3_fnorm:1.1084e-01 L4_fnorm:1.1084e-01 L5_fnorm:1.1035e-01 L6_fnorm:1.1084e-01 L7_fnorm:1.1084e-01 L8_fnorm:1.0645e-01 L9_fnorm:1.0889e-01 L10_fnorm:1.0938e-01 L11_fnorm:1.0840e-01 L12_fnorm:1.0791e-01 L1_l1linf:2.0386e-02 L2_l1linf:2.0386e-02 L3_l1linf:1.9775e-02 L4_l1linf:1.9653e-02 L5_l1linf:1.9287e-02 L6_l1linf:1.9897e-02 L7_l1linf:1.9531e-02 L8_l1linf:1.9653e-02 L9_l1linf:1.9653e-02 L10_l1linf:1.8677e-02 L11_l1linf:1.9897e-02 L12_l1linf:2.0752e-02 L1_spectral:1.6393e-03 L2_spectral:1.6411e-03 L3_spectral:1.6539e-03 L4_spectral:1.6560e-03 L5_spectral:1.6278e-03 L6_spectral:1.6373e-03 L7_spectral:1.6409e-03 L8_spectral:1.6602e-03 L9_spectral:1.6318e-03 L10_spectral:1.6402e-03 L11_spectral:1.6381e-03 L12_spectral:1.6533e-03 train_time:341483ms step_avg:42.69ms +[2025-09-11 09:21:19] [Rank 0] step:8001/10000 train_time:343491ms step_avg:42.93ms +[2025-09-11 09:21:19] [Rank 0] step:8001/10000 train_time:343491ms step_avg:42.93ms +[2025-09-11 09:21:20] [Rank 0] step:8021/10000 train_time:344207ms step_avg:42.91ms +[2025-09-11 09:21:20] [Rank 0] step:8021/10000 train_time:344207ms step_avg:42.91ms +[2025-09-11 09:21:20] [Rank 0] step:8041/10000 train_time:344911ms step_avg:42.89ms +[2025-09-11 09:21:20] [Rank 0] step:8041/10000 train_time:344911ms step_avg:42.89ms +[2025-09-11 09:21:21] [Rank 0] step:8061/10000 train_time:345616ms step_avg:42.88ms +[2025-09-11 09:21:21] [Rank 0] step:8061/10000 train_time:345616ms step_avg:42.88ms +[2025-09-11 09:21:22] [Rank 0] step:8081/10000 train_time:346601ms step_avg:42.89ms +[2025-09-11 09:21:22] [Rank 0] step:8081/10000 train_time:346601ms step_avg:42.89ms +[2025-09-11 09:21:23] [Rank 0] step:8101/10000 train_time:347304ms step_avg:42.87ms +[2025-09-11 09:21:23] [Rank 0] step:8101/10000 train_time:347304ms step_avg:42.87ms +[2025-09-11 09:21:23] [Rank 0] step:8121/10000 train_time:348011ms step_avg:42.85ms +[2025-09-11 09:21:23] [Rank 0] step:8121/10000 train_time:348011ms step_avg:42.85ms +[2025-09-11 09:21:25] [Rank 0] step:8141/10000 train_time:349102ms step_avg:42.88ms +[2025-09-11 09:21:25] [Rank 0] step:8141/10000 train_time:349102ms step_avg:42.88ms +[2025-09-11 09:21:25] [Rank 0] step:8161/10000 train_time:349809ms step_avg:42.86ms +[2025-09-11 09:21:25] [Rank 0] step:8161/10000 train_time:349809ms step_avg:42.86ms +[2025-09-11 09:21:26] [Rank 0] step:8181/10000 train_time:350524ms step_avg:42.85ms +[2025-09-11 09:21:26] [Rank 0] step:8181/10000 train_time:350524ms step_avg:42.85ms +[2025-09-11 09:21:27] [Rank 0] step:8201/10000 train_time:351236ms step_avg:42.83ms +[2025-09-11 09:21:27] [Rank 0] step:8201/10000 train_time:351236ms step_avg:42.83ms +[2025-09-11 09:21:27] [Rank 0] step:8221/10000 train_time:351946ms step_avg:42.81ms +[2025-09-11 09:21:27] [Rank 0] step:8221/10000 train_time:351946ms step_avg:42.81ms +[2025-09-11 09:21:28] [Rank 0] step:8241/10000 train_time:352665ms step_avg:42.79ms +[2025-09-11 09:21:28] [Rank 0] step:8241/10000 train_time:352665ms step_avg:42.79ms +[2025-09-11 09:21:29] [Rank 0] step:8261/10000 train_time:353373ms step_avg:42.78ms +[2025-09-11 09:21:29] [Rank 0] step:8261/10000 train_time:353373ms step_avg:42.78ms +[2025-09-11 09:21:29] [Rank 0] step:8281/10000 train_time:354080ms step_avg:42.76ms +[2025-09-11 09:21:29] [Rank 0] step:8281/10000 train_time:354080ms step_avg:42.76ms +[2025-09-11 09:21:30] [Rank 0] step:8301/10000 train_time:354791ms step_avg:42.74ms +[2025-09-11 09:21:30] [Rank 0] step:8301/10000 train_time:354791ms step_avg:42.74ms +[2025-09-11 09:21:31] [Rank 0] step:8321/10000 train_time:355500ms step_avg:42.72ms +[2025-09-11 09:21:31] [Rank 0] step:8321/10000 train_time:355500ms step_avg:42.72ms +[2025-09-11 09:21:32] [Rank 0] step:8341/10000 train_time:356216ms step_avg:42.71ms +[2025-09-11 09:21:32] [Rank 0] step:8341/10000 train_time:356216ms step_avg:42.71ms +[2025-09-11 09:21:32] [Rank 0] step:8361/10000 train_time:356922ms step_avg:42.69ms +[2025-09-11 09:21:32] [Rank 0] step:8361/10000 train_time:356922ms step_avg:42.69ms +[2025-09-11 09:21:33] [Rank 0] step:8381/10000 train_time:357635ms step_avg:42.67ms +[2025-09-11 09:21:33] [Rank 0] step:8381/10000 train_time:357635ms step_avg:42.67ms +[2025-09-11 09:21:34] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:21:34] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:21:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:21:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.1605 total_sharp:1.8759e-05 L1_sharp:7.0745e-03 L2_sharp:6.0496e-03 L3_sharp:6.5159e-03 L4_sharp:9.0325e-03 L5_sharp:9.7360e-03 L6_sharp:1.4173e-02 L7_sharp:1.6236e-02 L8_sharp:2.0754e-02 L9_sharp:2.3589e-02 L10_sharp:3.4247e-02 L11_sharp:5.9593e-02 L12_sharp:6.2129e-01 total_fnorm:8.3000e+01 total_l1_linf:1.4336e+05 total_spectral:4.1500e+01 L1_fnorm:8.5938e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.6426e-02 L5_fnorm:8.5938e-02 L6_fnorm:8.5938e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.4473e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.4221e-02 L2_l1linf:1.4404e-02 L3_l1linf:1.4099e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.3977e-02 L6_l1linf:1.4404e-02 L7_l1linf:1.4099e-02 L8_l1linf:1.4160e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.4099e-02 L12_l1linf:1.5381e-02 L1_spectral:1.2960e-03 L2_spectral:1.3098e-03 L3_spectral:1.3142e-03 L4_spectral:1.3182e-03 L5_spectral:1.2958e-03 L6_spectral:1.3014e-03 L7_spectral:1.3128e-03 L8_spectral:1.3430e-03 L9_spectral:1.3047e-03 L10_spectral:1.3070e-03 L11_spectral:1.3132e-03 L12_spectral:1.3197e-03 train_time:358328ms step_avg:42.66ms +[2025-09-11 09:21:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.1605 total_sharp:1.8759e-05 L1_sharp:7.0745e-03 L2_sharp:6.0496e-03 L3_sharp:6.5159e-03 L4_sharp:9.0325e-03 L5_sharp:9.7360e-03 L6_sharp:1.4173e-02 L7_sharp:1.6236e-02 L8_sharp:2.0754e-02 L9_sharp:2.3589e-02 L10_sharp:3.4247e-02 L11_sharp:5.9593e-02 L12_sharp:6.2129e-01 total_fnorm:8.3000e+01 total_l1_linf:1.4336e+05 total_spectral:4.1500e+01 L1_fnorm:8.5938e-02 L2_fnorm:8.5938e-02 L3_fnorm:8.5938e-02 L4_fnorm:8.6426e-02 L5_fnorm:8.5938e-02 L6_fnorm:8.5938e-02 L7_fnorm:8.6426e-02 L8_fnorm:8.2520e-02 L9_fnorm:8.4473e-02 L10_fnorm:8.4961e-02 L11_fnorm:8.4473e-02 L12_fnorm:8.3984e-02 L1_l1linf:1.4221e-02 L2_l1linf:1.4404e-02 L3_l1linf:1.4099e-02 L4_l1linf:1.3916e-02 L5_l1linf:1.3977e-02 L6_l1linf:1.4404e-02 L7_l1linf:1.4099e-02 L8_l1linf:1.4160e-02 L9_l1linf:1.3550e-02 L10_l1linf:1.3489e-02 L11_l1linf:1.4099e-02 L12_l1linf:1.5381e-02 L1_spectral:1.2960e-03 L2_spectral:1.3098e-03 L3_spectral:1.3142e-03 L4_spectral:1.3182e-03 L5_spectral:1.2958e-03 L6_spectral:1.3014e-03 L7_spectral:1.3128e-03 L8_spectral:1.3430e-03 L9_spectral:1.3047e-03 L10_spectral:1.3070e-03 L11_spectral:1.3132e-03 L12_spectral:1.3197e-03 train_time:358328ms step_avg:42.66ms +[2025-09-11 09:21:46] [Rank 0] step:8401/10000 train_time:360234ms step_avg:42.88ms +[2025-09-11 09:21:46] [Rank 0] step:8401/10000 train_time:360234ms step_avg:42.88ms +[2025-09-11 09:21:47] [Rank 0] step:8421/10000 train_time:360971ms step_avg:42.87ms +[2025-09-11 09:21:47] [Rank 0] step:8421/10000 train_time:360971ms step_avg:42.87ms +[2025-09-11 09:21:48] [Rank 0] step:8441/10000 train_time:361683ms step_avg:42.85ms +[2025-09-11 09:21:48] [Rank 0] step:8441/10000 train_time:361683ms step_avg:42.85ms +[2025-09-11 09:21:49] [Rank 0] step:8461/10000 train_time:362395ms step_avg:42.83ms +[2025-09-11 09:21:49] [Rank 0] step:8461/10000 train_time:362395ms step_avg:42.83ms +[2025-09-11 09:21:49] [Rank 0] step:8481/10000 train_time:363107ms step_avg:42.81ms +[2025-09-11 09:21:49] [Rank 0] step:8481/10000 train_time:363107ms step_avg:42.81ms +[2025-09-11 09:21:50] [Rank 0] step:8501/10000 train_time:363817ms step_avg:42.80ms +[2025-09-11 09:21:50] [Rank 0] step:8501/10000 train_time:363817ms step_avg:42.80ms +[2025-09-11 09:21:51] [Rank 0] step:8521/10000 train_time:364527ms step_avg:42.78ms +[2025-09-11 09:21:51] [Rank 0] step:8521/10000 train_time:364527ms step_avg:42.78ms +[2025-09-11 09:21:51] [Rank 0] step:8541/10000 train_time:365239ms step_avg:42.76ms +[2025-09-11 09:21:51] [Rank 0] step:8541/10000 train_time:365239ms step_avg:42.76ms +[2025-09-11 09:21:52] [Rank 0] step:8561/10000 train_time:365954ms step_avg:42.75ms +[2025-09-11 09:21:52] [Rank 0] step:8561/10000 train_time:365954ms step_avg:42.75ms +[2025-09-11 09:21:53] [Rank 0] step:8581/10000 train_time:366668ms step_avg:42.73ms +[2025-09-11 09:21:53] [Rank 0] step:8581/10000 train_time:366668ms step_avg:42.73ms +[2025-09-11 09:21:53] [Rank 0] step:8601/10000 train_time:367380ms step_avg:42.71ms +[2025-09-11 09:21:53] [Rank 0] step:8601/10000 train_time:367380ms step_avg:42.71ms +[2025-09-11 09:21:54] [Rank 0] step:8621/10000 train_time:368089ms step_avg:42.70ms +[2025-09-11 09:21:54] [Rank 0] step:8621/10000 train_time:368089ms step_avg:42.70ms +[2025-09-11 09:21:55] [Rank 0] step:8641/10000 train_time:368799ms step_avg:42.68ms +[2025-09-11 09:21:55] [Rank 0] step:8641/10000 train_time:368799ms step_avg:42.68ms +[2025-09-11 09:21:56] [Rank 0] step:8661/10000 train_time:369510ms step_avg:42.66ms +[2025-09-11 09:21:56] [Rank 0] step:8661/10000 train_time:369510ms step_avg:42.66ms +[2025-09-11 09:21:56] [Rank 0] step:8681/10000 train_time:370222ms step_avg:42.65ms +[2025-09-11 09:21:56] [Rank 0] step:8681/10000 train_time:370222ms step_avg:42.65ms +[2025-09-11 09:21:57] [Rank 0] step:8701/10000 train_time:370942ms step_avg:42.63ms +[2025-09-11 09:21:57] [Rank 0] step:8701/10000 train_time:370942ms step_avg:42.63ms +[2025-09-11 09:21:58] [Rank 0] step:8721/10000 train_time:371655ms step_avg:42.62ms +[2025-09-11 09:21:58] [Rank 0] step:8721/10000 train_time:371655ms step_avg:42.62ms +[2025-09-11 09:21:58] [Rank 0] step:8741/10000 train_time:372362ms step_avg:42.60ms +[2025-09-11 09:21:58] [Rank 0] step:8741/10000 train_time:372362ms step_avg:42.60ms +[2025-09-11 09:21:59] [Rank 0] step:8761/10000 train_time:373076ms step_avg:42.58ms +[2025-09-11 09:21:59] [Rank 0] step:8761/10000 train_time:373076ms step_avg:42.58ms +[2025-09-11 09:22:00] [Rank 0] step:8781/10000 train_time:373783ms step_avg:42.57ms +[2025-09-11 09:22:00] [Rank 0] step:8781/10000 train_time:373783ms step_avg:42.57ms +[2025-09-11 09:22:01] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:22:01] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:22:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:22:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:22:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:22:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:22:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:14] [Rank 0] PRINT: step:8800/10000 val_loss:4.1487 total_sharp:1.8165e-05 L1_sharp:3.7587e-03 L2_sharp:4.6278e-03 L3_sharp:3.3905e-03 L4_sharp:7.4506e-03 L5_sharp:8.7908e-03 L6_sharp:1.1634e-02 L7_sharp:1.2595e-02 L8_sharp:1.7831e-02 L9_sharp:2.1676e-02 L10_sharp:2.9605e-02 L11_sharp:5.0714e-02 L12_sharp:2.0976e-01 total_fnorm:6.1250e+01 total_l1_linf:9.6256e+04 total_spectral:3.0625e+01 L1_fnorm:6.1768e-02 L2_fnorm:6.1768e-02 L3_fnorm:6.1768e-02 L4_fnorm:6.2012e-02 L5_fnorm:6.1279e-02 L6_fnorm:6.1768e-02 L7_fnorm:6.2012e-02 L8_fnorm:5.9570e-02 L9_fnorm:6.0791e-02 L10_fnorm:6.0791e-02 L11_fnorm:6.0303e-02 L12_fnorm:5.9326e-02 L1_l1linf:9.4604e-03 L2_l1linf:9.3994e-03 L3_l1linf:9.2163e-03 L4_l1linf:9.3384e-03 L5_l1linf:9.0942e-03 L6_l1linf:9.3384e-03 L7_l1linf:9.0942e-03 L8_l1linf:9.8877e-03 L9_l1linf:8.7280e-03 L10_l1linf:9.1553e-03 L11_l1linf:8.9722e-03 L12_l1linf:9.2163e-03 L1_spectral:9.5436e-04 L2_spectral:9.6706e-04 L3_spectral:1.0050e-03 L4_spectral:9.7355e-04 L5_spectral:9.6268e-04 L6_spectral:9.5392e-04 L7_spectral:9.5627e-04 L8_spectral:1.0042e-03 L9_spectral:9.6583e-04 L10_spectral:9.5502e-04 L11_spectral:9.4875e-04 L12_spectral:9.8307e-04 train_time:374472ms step_avg:42.55ms +[2025-09-11 09:22:14] [Rank 0] PRINT: step:8800/10000 val_loss:4.1487 total_sharp:1.8165e-05 L1_sharp:3.7587e-03 L2_sharp:4.6278e-03 L3_sharp:3.3905e-03 L4_sharp:7.4506e-03 L5_sharp:8.7908e-03 L6_sharp:1.1634e-02 L7_sharp:1.2595e-02 L8_sharp:1.7831e-02 L9_sharp:2.1676e-02 L10_sharp:2.9605e-02 L11_sharp:5.0714e-02 L12_sharp:2.0976e-01 total_fnorm:6.1250e+01 total_l1_linf:9.6256e+04 total_spectral:3.0625e+01 L1_fnorm:6.1768e-02 L2_fnorm:6.1768e-02 L3_fnorm:6.1768e-02 L4_fnorm:6.2012e-02 L5_fnorm:6.1279e-02 L6_fnorm:6.1768e-02 L7_fnorm:6.2012e-02 L8_fnorm:5.9570e-02 L9_fnorm:6.0791e-02 L10_fnorm:6.0791e-02 L11_fnorm:6.0303e-02 L12_fnorm:5.9326e-02 L1_l1linf:9.4604e-03 L2_l1linf:9.3994e-03 L3_l1linf:9.2163e-03 L4_l1linf:9.3384e-03 L5_l1linf:9.0942e-03 L6_l1linf:9.3384e-03 L7_l1linf:9.0942e-03 L8_l1linf:9.8877e-03 L9_l1linf:8.7280e-03 L10_l1linf:9.1553e-03 L11_l1linf:8.9722e-03 L12_l1linf:9.2163e-03 L1_spectral:9.5436e-04 L2_spectral:9.6706e-04 L3_spectral:1.0050e-03 L4_spectral:9.7355e-04 L5_spectral:9.6268e-04 L6_spectral:9.5392e-04 L7_spectral:9.5627e-04 L8_spectral:1.0042e-03 L9_spectral:9.6583e-04 L10_spectral:9.5502e-04 L11_spectral:9.4875e-04 L12_spectral:9.8307e-04 train_time:374472ms step_avg:42.55ms +[2025-09-11 09:22:16] [Rank 0] step:8801/10000 train_time:376351ms step_avg:42.76ms +[2025-09-11 09:22:16] [Rank 0] step:8801/10000 train_time:376351ms step_avg:42.76ms +[2025-09-11 09:22:17] [Rank 0] step:8821/10000 train_time:377084ms step_avg:42.75ms +[2025-09-11 09:22:17] [Rank 0] step:8821/10000 train_time:377084ms step_avg:42.75ms +[2025-09-11 09:22:18] [Rank 0] step:8841/10000 train_time:377797ms step_avg:42.73ms +[2025-09-11 09:22:18] [Rank 0] step:8841/10000 train_time:377797ms step_avg:42.73ms +[2025-09-11 09:22:18] [Rank 0] step:8861/10000 train_time:378507ms step_avg:42.72ms +[2025-09-11 09:22:18] [Rank 0] step:8861/10000 train_time:378507ms step_avg:42.72ms +[2025-09-11 09:22:19] [Rank 0] step:8881/10000 train_time:379218ms step_avg:42.70ms +[2025-09-11 09:22:19] [Rank 0] step:8881/10000 train_time:379218ms step_avg:42.70ms +[2025-09-11 09:22:20] [Rank 0] step:8901/10000 train_time:379931ms step_avg:42.68ms +[2025-09-11 09:22:20] [Rank 0] step:8901/10000 train_time:379931ms step_avg:42.68ms +[2025-09-11 09:22:20] [Rank 0] step:8921/10000 train_time:380640ms step_avg:42.67ms +[2025-09-11 09:22:20] [Rank 0] step:8921/10000 train_time:380640ms step_avg:42.67ms +[2025-09-11 09:22:21] [Rank 0] step:8941/10000 train_time:381355ms step_avg:42.65ms +[2025-09-11 09:22:21] [Rank 0] step:8941/10000 train_time:381355ms step_avg:42.65ms +[2025-09-11 09:22:22] [Rank 0] step:8961/10000 train_time:382075ms step_avg:42.64ms +[2025-09-11 09:22:22] [Rank 0] step:8961/10000 train_time:382075ms step_avg:42.64ms +[2025-09-11 09:22:23] [Rank 0] step:8981/10000 train_time:382789ms step_avg:42.62ms +[2025-09-11 09:22:23] [Rank 0] step:8981/10000 train_time:382789ms step_avg:42.62ms +[2025-09-11 09:22:23] [Rank 0] step:9001/10000 train_time:383496ms step_avg:42.61ms +[2025-09-11 09:22:23] [Rank 0] step:9001/10000 train_time:383496ms step_avg:42.61ms +[2025-09-11 09:22:24] [Rank 0] step:9021/10000 train_time:384207ms step_avg:42.59ms +[2025-09-11 09:22:24] [Rank 0] step:9021/10000 train_time:384207ms step_avg:42.59ms +[2025-09-11 09:22:25] [Rank 0] step:9041/10000 train_time:384921ms step_avg:42.58ms +[2025-09-11 09:22:25] [Rank 0] step:9041/10000 train_time:384921ms step_avg:42.58ms +[2025-09-11 09:22:26] [Rank 0] step:9061/10000 train_time:385924ms step_avg:42.59ms +[2025-09-11 09:22:26] [Rank 0] step:9061/10000 train_time:385924ms step_avg:42.59ms +[2025-09-11 09:22:26] [Rank 0] step:9081/10000 train_time:386639ms step_avg:42.58ms +[2025-09-11 09:22:26] [Rank 0] step:9081/10000 train_time:386639ms step_avg:42.58ms +[2025-09-11 09:22:27] [Rank 0] step:9101/10000 train_time:387354ms step_avg:42.56ms +[2025-09-11 09:22:27] [Rank 0] step:9101/10000 train_time:387354ms step_avg:42.56ms +[2025-09-11 09:22:28] [Rank 0] step:9121/10000 train_time:388321ms step_avg:42.57ms +[2025-09-11 09:22:28] [Rank 0] step:9121/10000 train_time:388321ms step_avg:42.57ms +[2025-09-11 09:22:29] [Rank 0] step:9141/10000 train_time:389031ms step_avg:42.56ms +[2025-09-11 09:22:29] [Rank 0] step:9141/10000 train_time:389031ms step_avg:42.56ms +[2025-09-11 09:22:29] [Rank 0] step:9161/10000 train_time:389747ms step_avg:42.54ms +[2025-09-11 09:22:29] [Rank 0] step:9161/10000 train_time:389747ms step_avg:42.54ms +[2025-09-11 09:22:30] [Rank 0] step:9181/10000 train_time:390461ms step_avg:42.53ms +[2025-09-11 09:22:30] [Rank 0] step:9181/10000 train_time:390461ms step_avg:42.53ms +[2025-09-11 09:22:31] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:22:31] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:22:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:22:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:22:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:22:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:22:42] [Rank 0] PRINT: step:9200/10000 val_loss:4.1323 total_sharp:1.5758e-05 L1_sharp:2.4968e-03 L2_sharp:3.6500e-03 L3_sharp:3.8988e-03 L4_sharp:8.0292e-03 L5_sharp:1.0385e-02 L6_sharp:1.1649e-02 L7_sharp:1.2262e-02 L8_sharp:1.8190e-02 L9_sharp:1.9764e-02 L10_sharp:2.4877e-02 L11_sharp:5.2400e-02 L12_sharp:4.4386e-01 total_fnorm:4.5250e+01 total_l1_linf:6.4000e+04 total_spectral:2.2625e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1260e-02 L3_fnorm:4.1260e-02 L4_fnorm:4.1260e-02 L5_fnorm:4.1016e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1260e-02 L8_fnorm:3.9551e-02 L9_fnorm:4.0527e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0283e-02 L12_fnorm:3.9795e-02 L1_l1linf:5.6458e-03 L2_l1linf:5.5237e-03 L3_l1linf:5.4932e-03 L4_l1linf:5.4321e-03 L5_l1linf:5.3406e-03 L6_l1linf:5.4321e-03 L7_l1linf:5.4016e-03 L8_l1linf:6.1340e-03 L9_l1linf:5.2490e-03 L10_l1linf:5.1270e-03 L11_l1linf:5.6152e-03 L12_l1linf:5.8594e-03 L1_spectral:6.4276e-04 L2_spectral:6.5150e-04 L3_spectral:6.5564e-04 L4_spectral:6.5598e-04 L5_spectral:6.4792e-04 L6_spectral:6.4759e-04 L7_spectral:6.5048e-04 L8_spectral:6.9033e-04 L9_spectral:6.5208e-04 L10_spectral:6.4787e-04 L11_spectral:6.4944e-04 L12_spectral:6.7674e-04 train_time:391156ms step_avg:42.52ms +[2025-09-11 09:22:42] [Rank 0] PRINT: step:9200/10000 val_loss:4.1323 total_sharp:1.5758e-05 L1_sharp:2.4968e-03 L2_sharp:3.6500e-03 L3_sharp:3.8988e-03 L4_sharp:8.0292e-03 L5_sharp:1.0385e-02 L6_sharp:1.1649e-02 L7_sharp:1.2262e-02 L8_sharp:1.8190e-02 L9_sharp:1.9764e-02 L10_sharp:2.4877e-02 L11_sharp:5.2400e-02 L12_sharp:4.4386e-01 total_fnorm:4.5250e+01 total_l1_linf:6.4000e+04 total_spectral:2.2625e+01 L1_fnorm:4.1016e-02 L2_fnorm:4.1260e-02 L3_fnorm:4.1260e-02 L4_fnorm:4.1260e-02 L5_fnorm:4.1016e-02 L6_fnorm:4.1260e-02 L7_fnorm:4.1260e-02 L8_fnorm:3.9551e-02 L9_fnorm:4.0527e-02 L10_fnorm:4.0527e-02 L11_fnorm:4.0283e-02 L12_fnorm:3.9795e-02 L1_l1linf:5.6458e-03 L2_l1linf:5.5237e-03 L3_l1linf:5.4932e-03 L4_l1linf:5.4321e-03 L5_l1linf:5.3406e-03 L6_l1linf:5.4321e-03 L7_l1linf:5.4016e-03 L8_l1linf:6.1340e-03 L9_l1linf:5.2490e-03 L10_l1linf:5.1270e-03 L11_l1linf:5.6152e-03 L12_l1linf:5.8594e-03 L1_spectral:6.4276e-04 L2_spectral:6.5150e-04 L3_spectral:6.5564e-04 L4_spectral:6.5598e-04 L5_spectral:6.4792e-04 L6_spectral:6.4759e-04 L7_spectral:6.5048e-04 L8_spectral:6.9033e-04 L9_spectral:6.5208e-04 L10_spectral:6.4787e-04 L11_spectral:6.4944e-04 L12_spectral:6.7674e-04 train_time:391156ms step_avg:42.52ms +[2025-09-11 09:22:44] [Rank 0] step:9201/10000 train_time:393107ms step_avg:42.72ms +[2025-09-11 09:22:44] [Rank 0] step:9201/10000 train_time:393107ms step_avg:42.72ms +[2025-09-11 09:22:44] [Rank 0] step:9221/10000 train_time:393837ms step_avg:42.71ms +[2025-09-11 09:22:44] [Rank 0] step:9221/10000 train_time:393837ms step_avg:42.71ms +[2025-09-11 09:22:45] [Rank 0] step:9241/10000 train_time:394546ms step_avg:42.70ms +[2025-09-11 09:22:45] [Rank 0] step:9241/10000 train_time:394546ms step_avg:42.70ms +[2025-09-11 09:22:46] [Rank 0] step:9261/10000 train_time:395260ms step_avg:42.68ms +[2025-09-11 09:22:46] [Rank 0] step:9261/10000 train_time:395260ms step_avg:42.68ms +[2025-09-11 09:22:46] [Rank 0] step:9281/10000 train_time:395972ms step_avg:42.66ms +[2025-09-11 09:22:46] [Rank 0] step:9281/10000 train_time:395972ms step_avg:42.66ms +[2025-09-11 09:22:47] [Rank 0] step:9301/10000 train_time:396681ms step_avg:42.65ms +[2025-09-11 09:22:47] [Rank 0] step:9301/10000 train_time:396681ms step_avg:42.65ms +[2025-09-11 09:22:48] [Rank 0] step:9321/10000 train_time:397395ms step_avg:42.63ms +[2025-09-11 09:22:48] [Rank 0] step:9321/10000 train_time:397395ms step_avg:42.63ms +[2025-09-11 09:22:49] [Rank 0] step:9341/10000 train_time:398104ms step_avg:42.62ms +[2025-09-11 09:22:49] [Rank 0] step:9341/10000 train_time:398104ms step_avg:42.62ms +[2025-09-11 09:22:49] [Rank 0] step:9361/10000 train_time:398812ms step_avg:42.60ms +[2025-09-11 09:22:49] [Rank 0] step:9361/10000 train_time:398812ms step_avg:42.60ms +[2025-09-11 09:22:50] [Rank 0] step:9381/10000 train_time:399522ms step_avg:42.59ms +[2025-09-11 09:22:50] [Rank 0] step:9381/10000 train_time:399522ms step_avg:42.59ms +[2025-09-11 09:22:51] [Rank 0] step:9401/10000 train_time:400235ms step_avg:42.57ms +[2025-09-11 09:22:51] [Rank 0] step:9401/10000 train_time:400235ms step_avg:42.57ms +[2025-09-11 09:22:51] [Rank 0] step:9421/10000 train_time:400949ms step_avg:42.56ms +[2025-09-11 09:22:51] [Rank 0] step:9421/10000 train_time:400949ms step_avg:42.56ms +[2025-09-11 09:22:52] [Rank 0] step:9441/10000 train_time:401664ms step_avg:42.54ms +[2025-09-11 09:22:52] [Rank 0] step:9441/10000 train_time:401664ms step_avg:42.54ms +[2025-09-11 09:22:53] [Rank 0] step:9461/10000 train_time:402376ms step_avg:42.53ms +[2025-09-11 09:22:53] [Rank 0] step:9461/10000 train_time:402376ms step_avg:42.53ms +[2025-09-11 09:22:54] [Rank 0] step:9481/10000 train_time:403090ms step_avg:42.52ms +[2025-09-11 09:22:54] [Rank 0] step:9481/10000 train_time:403090ms step_avg:42.52ms +[2025-09-11 09:22:54] [Rank 0] step:9501/10000 train_time:403803ms step_avg:42.50ms +[2025-09-11 09:22:54] [Rank 0] step:9501/10000 train_time:403803ms step_avg:42.50ms +[2025-09-11 09:22:55] [Rank 0] step:9521/10000 train_time:404518ms step_avg:42.49ms +[2025-09-11 09:22:55] [Rank 0] step:9521/10000 train_time:404518ms step_avg:42.49ms +[2025-09-11 09:22:56] [Rank 0] step:9541/10000 train_time:405228ms step_avg:42.47ms +[2025-09-11 09:22:56] [Rank 0] step:9541/10000 train_time:405228ms step_avg:42.47ms +[2025-09-11 09:22:56] [Rank 0] step:9561/10000 train_time:405940ms step_avg:42.46ms +[2025-09-11 09:22:56] [Rank 0] step:9561/10000 train_time:405940ms step_avg:42.46ms +[2025-09-11 09:22:57] [Rank 0] step:9581/10000 train_time:406655ms step_avg:42.44ms +[2025-09-11 09:22:57] [Rank 0] step:9581/10000 train_time:406655ms step_avg:42.44ms +[2025-09-11 09:22:58] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:22:58] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:23:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:23:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:23:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:09] [Rank 0] PRINT: step:9600/10000 val_loss:4.1209 total_sharp:1.0450e-05 L1_sharp:2.4672e-03 L2_sharp:2.5209e-03 L3_sharp:3.0908e-03 L4_sharp:5.5661e-03 L5_sharp:5.4230e-03 L6_sharp:1.0512e-02 L7_sharp:1.0327e-02 L8_sharp:1.4049e-02 L9_sharp:1.4999e-02 L10_sharp:1.8956e-02 L11_sharp:3.6117e-02 L12_sharp:2.5843e-01 total_fnorm:2.7000e+01 total_l1_linf:3.2640e+04 total_spectral:1.3438e+01 L1_fnorm:2.3071e-02 L2_fnorm:2.3071e-02 L3_fnorm:2.3071e-02 L4_fnorm:2.2949e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.2949e-02 L7_fnorm:2.3071e-02 L8_fnorm:2.2095e-02 L9_fnorm:2.2583e-02 L10_fnorm:2.2583e-02 L11_fnorm:2.2583e-02 L12_fnorm:2.2217e-02 L1_l1linf:2.6855e-03 L2_l1linf:2.7618e-03 L3_l1linf:2.6245e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.5482e-03 L6_l1linf:2.5787e-03 L7_l1linf:2.5330e-03 L8_l1linf:3.0365e-03 L9_l1linf:2.6855e-03 L10_l1linf:2.5482e-03 L11_l1linf:2.6398e-03 L12_l1linf:2.9297e-03 L1_spectral:3.6466e-04 L2_spectral:3.6701e-04 L3_spectral:3.7133e-04 L4_spectral:3.7167e-04 L5_spectral:3.6927e-04 L6_spectral:3.6647e-04 L7_spectral:3.7224e-04 L8_spectral:3.9575e-04 L9_spectral:3.7529e-04 L10_spectral:3.6968e-04 L11_spectral:3.7358e-04 L12_spectral:3.8876e-04 train_time:407344ms step_avg:42.43ms +[2025-09-11 09:23:09] [Rank 0] PRINT: step:9600/10000 val_loss:4.1209 total_sharp:1.0450e-05 L1_sharp:2.4672e-03 L2_sharp:2.5209e-03 L3_sharp:3.0908e-03 L4_sharp:5.5661e-03 L5_sharp:5.4230e-03 L6_sharp:1.0512e-02 L7_sharp:1.0327e-02 L8_sharp:1.4049e-02 L9_sharp:1.4999e-02 L10_sharp:1.8956e-02 L11_sharp:3.6117e-02 L12_sharp:2.5843e-01 total_fnorm:2.7000e+01 total_l1_linf:3.2640e+04 total_spectral:1.3438e+01 L1_fnorm:2.3071e-02 L2_fnorm:2.3071e-02 L3_fnorm:2.3071e-02 L4_fnorm:2.2949e-02 L5_fnorm:2.3071e-02 L6_fnorm:2.2949e-02 L7_fnorm:2.3071e-02 L8_fnorm:2.2095e-02 L9_fnorm:2.2583e-02 L10_fnorm:2.2583e-02 L11_fnorm:2.2583e-02 L12_fnorm:2.2217e-02 L1_l1linf:2.6855e-03 L2_l1linf:2.7618e-03 L3_l1linf:2.6245e-03 L4_l1linf:2.7008e-03 L5_l1linf:2.5482e-03 L6_l1linf:2.5787e-03 L7_l1linf:2.5330e-03 L8_l1linf:3.0365e-03 L9_l1linf:2.6855e-03 L10_l1linf:2.5482e-03 L11_l1linf:2.6398e-03 L12_l1linf:2.9297e-03 L1_spectral:3.6466e-04 L2_spectral:3.6701e-04 L3_spectral:3.7133e-04 L4_spectral:3.7167e-04 L5_spectral:3.6927e-04 L6_spectral:3.6647e-04 L7_spectral:3.7224e-04 L8_spectral:3.9575e-04 L9_spectral:3.7529e-04 L10_spectral:3.6968e-04 L11_spectral:3.7358e-04 L12_spectral:3.8876e-04 train_time:407344ms step_avg:42.43ms +[2025-09-11 09:23:11] [Rank 0] step:9601/10000 train_time:409879ms step_avg:42.69ms +[2025-09-11 09:23:11] [Rank 0] step:9601/10000 train_time:409879ms step_avg:42.69ms +[2025-09-11 09:23:12] [Rank 0] step:9621/10000 train_time:410599ms step_avg:42.68ms +[2025-09-11 09:23:12] [Rank 0] step:9621/10000 train_time:410599ms step_avg:42.68ms +[2025-09-11 09:23:13] [Rank 0] step:9641/10000 train_time:411317ms step_avg:42.66ms +[2025-09-11 09:23:13] [Rank 0] step:9641/10000 train_time:411317ms step_avg:42.66ms +[2025-09-11 09:23:13] [Rank 0] step:9661/10000 train_time:412041ms step_avg:42.65ms +[2025-09-11 09:23:13] [Rank 0] step:9661/10000 train_time:412041ms step_avg:42.65ms +[2025-09-11 09:23:14] [Rank 0] step:9681/10000 train_time:412759ms step_avg:42.64ms +[2025-09-11 09:23:14] [Rank 0] step:9681/10000 train_time:412759ms step_avg:42.64ms +[2025-09-11 09:23:15] [Rank 0] step:9701/10000 train_time:413478ms step_avg:42.62ms +[2025-09-11 09:23:15] [Rank 0] step:9701/10000 train_time:413478ms step_avg:42.62ms +[2025-09-11 09:23:15] [Rank 0] step:9721/10000 train_time:414201ms step_avg:42.61ms +[2025-09-11 09:23:15] [Rank 0] step:9721/10000 train_time:414201ms step_avg:42.61ms +[2025-09-11 09:23:16] [Rank 0] step:9741/10000 train_time:414920ms step_avg:42.60ms +[2025-09-11 09:23:16] [Rank 0] step:9741/10000 train_time:414920ms step_avg:42.60ms +[2025-09-11 09:23:17] [Rank 0] step:9761/10000 train_time:415639ms step_avg:42.58ms +[2025-09-11 09:23:17] [Rank 0] step:9761/10000 train_time:415639ms step_avg:42.58ms +[2025-09-11 09:23:18] [Rank 0] step:9781/10000 train_time:416357ms step_avg:42.57ms +[2025-09-11 09:23:18] [Rank 0] step:9781/10000 train_time:416357ms step_avg:42.57ms +[2025-09-11 09:23:18] [Rank 0] step:9801/10000 train_time:417081ms step_avg:42.55ms +[2025-09-11 09:23:18] [Rank 0] step:9801/10000 train_time:417081ms step_avg:42.55ms +[2025-09-11 09:23:19] [Rank 0] step:9821/10000 train_time:417807ms step_avg:42.54ms +[2025-09-11 09:23:19] [Rank 0] step:9821/10000 train_time:417807ms step_avg:42.54ms +[2025-09-11 09:23:20] [Rank 0] step:9841/10000 train_time:418531ms step_avg:42.53ms +[2025-09-11 09:23:20] [Rank 0] step:9841/10000 train_time:418531ms step_avg:42.53ms +[2025-09-11 09:23:21] [Rank 0] step:9861/10000 train_time:419249ms step_avg:42.52ms +[2025-09-11 09:23:21] [Rank 0] step:9861/10000 train_time:419249ms step_avg:42.52ms +[2025-09-11 09:23:21] [Rank 0] step:9881/10000 train_time:419968ms step_avg:42.50ms +[2025-09-11 09:23:21] [Rank 0] step:9881/10000 train_time:419968ms step_avg:42.50ms +[2025-09-11 09:23:22] [Rank 0] step:9901/10000 train_time:420684ms step_avg:42.49ms +[2025-09-11 09:23:22] [Rank 0] step:9901/10000 train_time:420684ms step_avg:42.49ms +[2025-09-11 09:23:23] [Rank 0] step:9921/10000 train_time:421403ms step_avg:42.48ms +[2025-09-11 09:23:23] [Rank 0] step:9921/10000 train_time:421403ms step_avg:42.48ms +[2025-09-11 09:23:23] [Rank 0] step:9941/10000 train_time:422126ms step_avg:42.46ms +[2025-09-11 09:23:23] [Rank 0] step:9941/10000 train_time:422126ms step_avg:42.46ms +[2025-09-11 09:23:24] [Rank 0] step:9961/10000 train_time:422850ms step_avg:42.45ms +[2025-09-11 09:23:24] [Rank 0] step:9961/10000 train_time:422850ms step_avg:42.45ms +[2025-09-11 09:23:25] [Rank 0] step:9981/10000 train_time:423572ms step_avg:42.44ms +[2025-09-11 09:23:25] [Rank 0] step:9981/10000 train_time:423572ms step_avg:42.44ms +[2025-09-11 09:23:26] [Rank 0] step:10000/10000 train_time:424264ms step_avg:42.43ms +[2025-09-11 09:23:26] [Rank 0] step:10000/10000 train_time:424264ms step_avg:42.43ms +[2025-09-11 09:23:26] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:23:26] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:23:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:23:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:23:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:23:36] [Rank 0] PRINT: step:10000/10000 val_loss:4.1213 total_sharp:7.4148e-06 L1_sharp:5.6854e-04 L2_sharp:2.8205e-03 L3_sharp:1.0189e-03 L4_sharp:5.3241e-03 L5_sharp:6.8832e-03 L6_sharp:7.3370e-03 L7_sharp:7.5841e-03 L8_sharp:1.0607e-02 L9_sharp:1.2442e-02 L10_sharp:1.4154e-02 L11_sharp:2.8642e-02 L12_sharp:1.4413e-01 total_fnorm:1.0562e+01 total_l1_linf:9.2800e+03 total_spectral:5.2812e+00 L1_fnorm:9.0332e-03 L2_fnorm:9.0332e-03 L3_fnorm:9.0942e-03 L4_fnorm:9.0332e-03 L5_fnorm:8.9722e-03 L6_fnorm:9.0332e-03 L7_fnorm:9.0332e-03 L8_fnorm:8.7280e-03 L9_fnorm:8.7891e-03 L10_fnorm:8.8501e-03 L11_fnorm:8.7891e-03 L12_fnorm:8.7280e-03 L1_l1linf:9.1553e-04 L2_l1linf:8.4305e-04 L3_l1linf:8.8120e-04 L4_l1linf:7.8964e-04 L5_l1linf:7.8964e-04 L6_l1linf:7.6294e-04 L7_l1linf:7.8583e-04 L8_l1linf:9.5367e-04 L9_l1linf:7.3624e-04 L10_l1linf:7.6294e-04 L11_l1linf:8.0872e-04 L12_l1linf:8.8882e-04 L1_spectral:1.4770e-04 L2_spectral:1.4876e-04 L3_spectral:1.4897e-04 L4_spectral:1.5159e-04 L5_spectral:1.4709e-04 L6_spectral:1.4941e-04 L7_spectral:1.5010e-04 L8_spectral:1.6403e-04 L9_spectral:1.4829e-04 L10_spectral:1.5009e-04 L11_spectral:1.4918e-04 L12_spectral:1.5675e-04 train_time:424285ms step_avg:42.43ms +[2025-09-11 09:23:36] [Rank 0] PRINT: step:10000/10000 val_loss:4.1213 total_sharp:7.4148e-06 L1_sharp:5.6854e-04 L2_sharp:2.8205e-03 L3_sharp:1.0189e-03 L4_sharp:5.3241e-03 L5_sharp:6.8832e-03 L6_sharp:7.3370e-03 L7_sharp:7.5841e-03 L8_sharp:1.0607e-02 L9_sharp:1.2442e-02 L10_sharp:1.4154e-02 L11_sharp:2.8642e-02 L12_sharp:1.4413e-01 total_fnorm:1.0562e+01 total_l1_linf:9.2800e+03 total_spectral:5.2812e+00 L1_fnorm:9.0332e-03 L2_fnorm:9.0332e-03 L3_fnorm:9.0942e-03 L4_fnorm:9.0332e-03 L5_fnorm:8.9722e-03 L6_fnorm:9.0332e-03 L7_fnorm:9.0332e-03 L8_fnorm:8.7280e-03 L9_fnorm:8.7891e-03 L10_fnorm:8.8501e-03 L11_fnorm:8.7891e-03 L12_fnorm:8.7280e-03 L1_l1linf:9.1553e-04 L2_l1linf:8.4305e-04 L3_l1linf:8.8120e-04 L4_l1linf:7.8964e-04 L5_l1linf:7.8964e-04 L6_l1linf:7.6294e-04 L7_l1linf:7.8583e-04 L8_l1linf:9.5367e-04 L9_l1linf:7.3624e-04 L10_l1linf:7.6294e-04 L11_l1linf:8.0872e-04 L12_l1linf:8.8882e-04 L1_spectral:1.4770e-04 L2_spectral:1.4876e-04 L3_spectral:1.4897e-04 L4_spectral:1.5159e-04 L5_spectral:1.4709e-04 L6_spectral:1.4941e-04 L7_spectral:1.5010e-04 L8_spectral:1.6403e-04 L9_spectral:1.4829e-04 L10_spectral:1.5009e-04 L11_spectral:1.4918e-04 L12_spectral:1.5675e-04 train_time:424285ms step_avg:42.43ms +[2025-09-11 09:23:36] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:23:36 2025 --- +[2025-09-11 09:23:36] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:23:36 2025 --- +[2025-09-11 09:23:36] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 09:23:36] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..33eebd9b6b6e35f8e204e6aea8fb5194965b1938 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.005, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "67d08f17-2a96-4355-ad10-cb6792cae975", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/training_log_67d08f17-2a96-4355-ad10-cb6792cae975.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/training_log_67d08f17-2a96-4355-ad10-cb6792cae975.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc321195ac78bcb8f6a89b331c355b952a9b77eb --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42/training_log_67d08f17-2a96-4355-ad10-cb6792cae975.txt @@ -0,0 +1,4264 @@ +[2025-09-11 08:43:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:43:48 2025 --- +[2025-09-11 08:43:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 08:43:48 2025 --- +[2025-09-11 08:43:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:43:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.005, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 08:43:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:43:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 08:43:48] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 08:43:48] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-11 08:43:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42 +[2025-09-11 08:43:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.005_seed_42 +[2025-09-11 08:43:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:43:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 08:43:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:43:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 08:43:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:43:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 08:43:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:43:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 08:43:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:43:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 08:43:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:43:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 08:43:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:43:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 08:43:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:43:54] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 08:43:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:43:54] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 08:43:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:43:54] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 08:44:06] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:44:06] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 08:44:06] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:44:06] [Rank 0] PRINT: Starting warmup... +[2025-09-11 08:50:23] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:50:23] [Rank 0] PRINT: Warmup complete. +[2025-09-11 08:50:23] [Rank 0] PRINT: Starting training... +[2025-09-11 08:50:23] [Rank 0] PRINT: Starting training... +[2025-09-11 08:50:25] [Rank 0] step:21/10000 train_time:1572ms step_avg:74.84ms +[2025-09-11 08:50:25] [Rank 0] step:21/10000 train_time:1572ms step_avg:74.84ms +[2025-09-11 08:50:26] [Rank 0] step:41/10000 train_time:2299ms step_avg:56.08ms +[2025-09-11 08:50:26] [Rank 0] step:41/10000 train_time:2299ms step_avg:56.08ms +[2025-09-11 08:50:26] [Rank 0] step:61/10000 train_time:3026ms step_avg:49.61ms +[2025-09-11 08:50:26] [Rank 0] step:61/10000 train_time:3026ms step_avg:49.61ms +[2025-09-11 08:50:27] [Rank 0] step:81/10000 train_time:3753ms step_avg:46.34ms +[2025-09-11 08:50:27] [Rank 0] step:81/10000 train_time:3753ms step_avg:46.34ms +[2025-09-11 08:50:28] [Rank 0] step:101/10000 train_time:4481ms step_avg:44.36ms +[2025-09-11 08:50:28] [Rank 0] step:101/10000 train_time:4481ms step_avg:44.36ms +[2025-09-11 08:50:28] [Rank 0] step:121/10000 train_time:5208ms step_avg:43.04ms +[2025-09-11 08:50:28] [Rank 0] step:121/10000 train_time:5208ms step_avg:43.04ms +[2025-09-11 08:50:29] [Rank 0] step:141/10000 train_time:5934ms step_avg:42.09ms +[2025-09-11 08:50:29] [Rank 0] step:141/10000 train_time:5934ms step_avg:42.09ms +[2025-09-11 08:50:30] [Rank 0] step:161/10000 train_time:6661ms step_avg:41.38ms +[2025-09-11 08:50:30] [Rank 0] step:161/10000 train_time:6661ms step_avg:41.38ms +[2025-09-11 08:50:31] [Rank 0] step:181/10000 train_time:7388ms step_avg:40.82ms +[2025-09-11 08:50:31] [Rank 0] step:181/10000 train_time:7388ms step_avg:40.82ms +[2025-09-11 08:50:31] [Rank 0] step:201/10000 train_time:8114ms step_avg:40.37ms +[2025-09-11 08:50:31] [Rank 0] step:201/10000 train_time:8114ms step_avg:40.37ms +[2025-09-11 08:50:32] [Rank 0] step:221/10000 train_time:8841ms step_avg:40.00ms +[2025-09-11 08:50:32] [Rank 0] step:221/10000 train_time:8841ms step_avg:40.00ms +[2025-09-11 08:50:33] [Rank 0] step:241/10000 train_time:9568ms step_avg:39.70ms +[2025-09-11 08:50:33] [Rank 0] step:241/10000 train_time:9568ms step_avg:39.70ms +[2025-09-11 08:50:34] [Rank 0] step:261/10000 train_time:10295ms step_avg:39.45ms +[2025-09-11 08:50:34] [Rank 0] step:261/10000 train_time:10295ms step_avg:39.45ms +[2025-09-11 08:50:34] [Rank 0] step:281/10000 train_time:11023ms step_avg:39.23ms +[2025-09-11 08:50:34] [Rank 0] step:281/10000 train_time:11023ms step_avg:39.23ms +[2025-09-11 08:50:35] [Rank 0] step:301/10000 train_time:11750ms step_avg:39.04ms +[2025-09-11 08:50:35] [Rank 0] step:301/10000 train_time:11750ms step_avg:39.04ms +[2025-09-11 08:50:36] [Rank 0] step:321/10000 train_time:12477ms step_avg:38.87ms +[2025-09-11 08:50:36] [Rank 0] step:321/10000 train_time:12477ms step_avg:38.87ms +[2025-09-11 08:50:36] [Rank 0] step:341/10000 train_time:13203ms step_avg:38.72ms +[2025-09-11 08:50:36] [Rank 0] step:341/10000 train_time:13203ms step_avg:38.72ms +[2025-09-11 08:50:37] [Rank 0] step:361/10000 train_time:13929ms step_avg:38.59ms +[2025-09-11 08:50:37] [Rank 0] step:361/10000 train_time:13929ms step_avg:38.59ms +[2025-09-11 08:50:38] [Rank 0] step:381/10000 train_time:14656ms step_avg:38.47ms +[2025-09-11 08:50:38] [Rank 0] step:381/10000 train_time:14656ms step_avg:38.47ms +[2025-09-11 08:50:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:50:39] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 08:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:52:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:54:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 08:54:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:54:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:54:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:54:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:54:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 08:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:54:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 08:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:54:58] [Rank 0] PRINT: step:400/10000 val_loss:5.7794 total_sharp:1.7139e-04 L1_sharp:1.2473e-02 L2_sharp:1.1843e-02 L3_sharp:1.3768e-02 L4_sharp:1.3551e-02 L5_sharp:1.5096e-02 L6_sharp:1.4273e-02 L7_sharp:1.6371e-02 L8_sharp:1.7092e-02 L9_sharp:2.5718e-02 L10_sharp:6.1973e-02 L11_sharp:6.4276e-02 L12_sharp:2.1987e-01 total_fnorm:2.1249e+02 total_l1_linf:5.2144e+05 total_spectral:1.0623e+02 L1_fnorm:6.0998e-01 L2_fnorm:6.0742e-01 L3_fnorm:6.0506e-01 L4_fnorm:6.0500e-01 L5_fnorm:6.0541e-01 L6_fnorm:6.0773e-01 L7_fnorm:6.0999e-01 L8_fnorm:6.0865e-01 L9_fnorm:6.1240e-01 L10_fnorm:6.1353e-01 L11_fnorm:6.1053e-01 L12_fnorm:6.0256e-01 L1_l1linf:2.1629e-01 L2_l1linf:2.1379e-01 L3_l1linf:2.1152e-01 L4_l1linf:2.1260e-01 L5_l1linf:2.1201e-01 L6_l1linf:2.1407e-01 L7_l1linf:2.1517e-01 L8_l1linf:2.1529e-01 L9_l1linf:2.1877e-01 L10_l1linf:2.1982e-01 L11_l1linf:2.2279e-01 L12_l1linf:2.1894e-01 L1_spectral:6.0222e-03 L2_spectral:6.0221e-03 L3_spectral:6.0228e-03 L4_spectral:6.0225e-03 L5_spectral:6.0206e-03 L6_spectral:6.0278e-03 L7_spectral:6.0273e-03 L8_spectral:6.0217e-03 L9_spectral:6.0235e-03 L10_spectral:6.0233e-03 L11_spectral:6.0241e-03 L12_spectral:6.0226e-03 train_time:15361ms step_avg:38.40ms +[2025-09-11 08:54:58] [Rank 0] PRINT: step:400/10000 val_loss:5.7794 total_sharp:1.7139e-04 L1_sharp:1.2473e-02 L2_sharp:1.1843e-02 L3_sharp:1.3768e-02 L4_sharp:1.3551e-02 L5_sharp:1.5096e-02 L6_sharp:1.4273e-02 L7_sharp:1.6371e-02 L8_sharp:1.7092e-02 L9_sharp:2.5718e-02 L10_sharp:6.1973e-02 L11_sharp:6.4276e-02 L12_sharp:2.1987e-01 total_fnorm:2.1249e+02 total_l1_linf:5.2144e+05 total_spectral:1.0623e+02 L1_fnorm:6.0998e-01 L2_fnorm:6.0742e-01 L3_fnorm:6.0506e-01 L4_fnorm:6.0500e-01 L5_fnorm:6.0541e-01 L6_fnorm:6.0773e-01 L7_fnorm:6.0999e-01 L8_fnorm:6.0865e-01 L9_fnorm:6.1240e-01 L10_fnorm:6.1353e-01 L11_fnorm:6.1053e-01 L12_fnorm:6.0256e-01 L1_l1linf:2.1629e-01 L2_l1linf:2.1379e-01 L3_l1linf:2.1152e-01 L4_l1linf:2.1260e-01 L5_l1linf:2.1201e-01 L6_l1linf:2.1407e-01 L7_l1linf:2.1517e-01 L8_l1linf:2.1529e-01 L9_l1linf:2.1877e-01 L10_l1linf:2.1982e-01 L11_l1linf:2.2279e-01 L12_l1linf:2.1894e-01 L1_spectral:6.0222e-03 L2_spectral:6.0221e-03 L3_spectral:6.0228e-03 L4_spectral:6.0225e-03 L5_spectral:6.0206e-03 L6_spectral:6.0278e-03 L7_spectral:6.0273e-03 L8_spectral:6.0217e-03 L9_spectral:6.0235e-03 L10_spectral:6.0233e-03 L11_spectral:6.0241e-03 L12_spectral:6.0226e-03 train_time:15361ms step_avg:38.40ms +[2025-09-11 08:56:00] [Rank 0] step:401/10000 train_time:78078ms step_avg:194.71ms +[2025-09-11 08:56:00] [Rank 0] step:401/10000 train_time:78078ms step_avg:194.71ms +[2025-09-11 08:56:04] [Rank 0] step:421/10000 train_time:81330ms step_avg:193.18ms +[2025-09-11 08:56:04] [Rank 0] step:421/10000 train_time:81330ms step_avg:193.18ms +[2025-09-11 08:56:04] [Rank 0] step:441/10000 train_time:81970ms step_avg:185.87ms +[2025-09-11 08:56:04] [Rank 0] step:441/10000 train_time:81970ms step_avg:185.87ms +[2025-09-11 08:56:05] [Rank 0] step:461/10000 train_time:82610ms step_avg:179.20ms +[2025-09-11 08:56:05] [Rank 0] step:461/10000 train_time:82610ms step_avg:179.20ms +[2025-09-11 08:56:06] [Rank 0] step:481/10000 train_time:83560ms step_avg:173.72ms +[2025-09-11 08:56:06] [Rank 0] step:481/10000 train_time:83560ms step_avg:173.72ms +[2025-09-11 08:56:07] [Rank 0] step:501/10000 train_time:84199ms step_avg:168.06ms +[2025-09-11 08:56:07] [Rank 0] step:501/10000 train_time:84199ms step_avg:168.06ms +[2025-09-11 08:56:07] [Rank 0] step:521/10000 train_time:84839ms step_avg:162.84ms +[2025-09-11 08:56:07] [Rank 0] step:521/10000 train_time:84839ms step_avg:162.84ms +[2025-09-11 08:56:08] [Rank 0] step:541/10000 train_time:85478ms step_avg:158.00ms +[2025-09-11 08:56:08] [Rank 0] step:541/10000 train_time:85478ms step_avg:158.00ms +[2025-09-11 08:56:08] [Rank 0] step:561/10000 train_time:86118ms step_avg:153.51ms +[2025-09-11 08:56:08] [Rank 0] step:561/10000 train_time:86118ms step_avg:153.51ms +[2025-09-11 08:56:09] [Rank 0] step:581/10000 train_time:86774ms step_avg:149.35ms +[2025-09-11 08:56:09] [Rank 0] step:581/10000 train_time:86774ms step_avg:149.35ms +[2025-09-11 08:56:10] [Rank 0] step:601/10000 train_time:87415ms step_avg:145.45ms +[2025-09-11 08:56:10] [Rank 0] step:601/10000 train_time:87415ms step_avg:145.45ms +[2025-09-11 08:56:10] [Rank 0] step:621/10000 train_time:88055ms step_avg:141.80ms +[2025-09-11 08:56:10] [Rank 0] step:621/10000 train_time:88055ms step_avg:141.80ms +[2025-09-11 08:56:11] [Rank 0] step:641/10000 train_time:88694ms step_avg:138.37ms +[2025-09-11 08:56:11] [Rank 0] step:641/10000 train_time:88694ms step_avg:138.37ms +[2025-09-11 08:56:12] [Rank 0] step:661/10000 train_time:89333ms step_avg:135.15ms +[2025-09-11 08:56:12] [Rank 0] step:661/10000 train_time:89333ms step_avg:135.15ms +[2025-09-11 08:56:12] [Rank 0] step:681/10000 train_time:89972ms step_avg:132.12ms +[2025-09-11 08:56:12] [Rank 0] step:681/10000 train_time:89972ms step_avg:132.12ms +[2025-09-11 08:56:13] [Rank 0] step:701/10000 train_time:90612ms step_avg:129.26ms +[2025-09-11 08:56:13] [Rank 0] step:701/10000 train_time:90612ms step_avg:129.26ms +[2025-09-11 08:56:14] [Rank 0] step:721/10000 train_time:91251ms step_avg:126.56ms +[2025-09-11 08:56:14] [Rank 0] step:721/10000 train_time:91251ms step_avg:126.56ms +[2025-09-11 08:56:14] [Rank 0] step:741/10000 train_time:91891ms step_avg:124.01ms +[2025-09-11 08:56:14] [Rank 0] step:741/10000 train_time:91891ms step_avg:124.01ms +[2025-09-11 08:56:15] [Rank 0] step:761/10000 train_time:92536ms step_avg:121.60ms +[2025-09-11 08:56:15] [Rank 0] step:761/10000 train_time:92536ms step_avg:121.60ms +[2025-09-11 08:56:16] [Rank 0] step:781/10000 train_time:93180ms step_avg:119.31ms +[2025-09-11 08:56:16] [Rank 0] step:781/10000 train_time:93180ms step_avg:119.31ms +[2025-09-11 08:56:16] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:56:16] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 08:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:56:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:57:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:57:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:57:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:57:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 08:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 08:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:57:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 08:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 08:57:55] [Rank 0] PRINT: step:800/10000 val_loss:5.3915 total_sharp:1.4724e-04 L1_sharp:1.3158e-02 L2_sharp:8.1623e-03 L3_sharp:9.3130e-03 L4_sharp:6.6663e-03 L5_sharp:5.4675e-03 L6_sharp:1.2567e-02 L7_sharp:1.3167e-02 L8_sharp:1.4416e-02 L9_sharp:1.9410e-02 L10_sharp:4.9515e-02 L11_sharp:6.2494e-02 L12_sharp:3.0311e-01 total_fnorm:1.9400e+02 total_l1_linf:4.5670e+05 total_spectral:9.7000e+01 L1_fnorm:6.2109e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2891e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3672e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.0547e-01 L1_l1linf:2.1191e-01 L2_l1linf:2.0605e-01 L3_l1linf:2.0312e-01 L4_l1linf:2.0312e-01 L5_l1linf:2.0215e-01 L6_l1linf:2.0312e-01 L7_l1linf:2.0117e-01 L8_l1linf:2.0410e-01 L9_l1linf:2.0898e-01 L10_l1linf:2.1191e-01 L11_l1linf:2.1387e-01 L12_l1linf:2.0605e-01 L1_spectral:6.9497e-03 L2_spectral:6.9662e-03 L3_spectral:6.9564e-03 L4_spectral:6.9555e-03 L5_spectral:6.9380e-03 L6_spectral:6.9633e-03 L7_spectral:7.0087e-03 L8_spectral:6.9697e-03 L9_spectral:6.9845e-03 L10_spectral:6.9650e-03 L11_spectral:6.9679e-03 L12_spectral:6.9357e-03 train_time:93807ms step_avg:117.26ms +[2025-09-11 08:57:55] [Rank 0] PRINT: step:800/10000 val_loss:5.3915 total_sharp:1.4724e-04 L1_sharp:1.3158e-02 L2_sharp:8.1623e-03 L3_sharp:9.3130e-03 L4_sharp:6.6663e-03 L5_sharp:5.4675e-03 L6_sharp:1.2567e-02 L7_sharp:1.3167e-02 L8_sharp:1.4416e-02 L9_sharp:1.9410e-02 L10_sharp:4.9515e-02 L11_sharp:6.2494e-02 L12_sharp:3.0311e-01 total_fnorm:1.9400e+02 total_l1_linf:4.5670e+05 total_spectral:9.7000e+01 L1_fnorm:6.2109e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2891e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3672e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.0547e-01 L1_l1linf:2.1191e-01 L2_l1linf:2.0605e-01 L3_l1linf:2.0312e-01 L4_l1linf:2.0312e-01 L5_l1linf:2.0215e-01 L6_l1linf:2.0312e-01 L7_l1linf:2.0117e-01 L8_l1linf:2.0410e-01 L9_l1linf:2.0898e-01 L10_l1linf:2.1191e-01 L11_l1linf:2.1387e-01 L12_l1linf:2.0605e-01 L1_spectral:6.9497e-03 L2_spectral:6.9662e-03 L3_spectral:6.9564e-03 L4_spectral:6.9555e-03 L5_spectral:6.9380e-03 L6_spectral:6.9633e-03 L7_spectral:7.0087e-03 L8_spectral:6.9697e-03 L9_spectral:6.9845e-03 L10_spectral:6.9650e-03 L11_spectral:6.9679e-03 L12_spectral:6.9357e-03 train_time:93807ms step_avg:117.26ms +[2025-09-11 08:57:58] [Rank 0] step:801/10000 train_time:96749ms step_avg:120.79ms +[2025-09-11 08:57:58] [Rank 0] step:801/10000 train_time:96749ms step_avg:120.79ms +[2025-09-11 08:57:59] [Rank 0] step:821/10000 train_time:97398ms step_avg:118.63ms +[2025-09-11 08:57:59] [Rank 0] step:821/10000 train_time:97398ms step_avg:118.63ms +[2025-09-11 08:57:59] [Rank 0] step:841/10000 train_time:98043ms step_avg:116.58ms +[2025-09-11 08:57:59] [Rank 0] step:841/10000 train_time:98043ms step_avg:116.58ms +[2025-09-11 08:58:00] [Rank 0] step:861/10000 train_time:98688ms step_avg:114.62ms +[2025-09-11 08:58:00] [Rank 0] step:861/10000 train_time:98688ms step_avg:114.62ms +[2025-09-11 08:58:01] [Rank 0] step:881/10000 train_time:99333ms step_avg:112.75ms +[2025-09-11 08:58:01] [Rank 0] step:881/10000 train_time:99333ms step_avg:112.75ms +[2025-09-11 08:58:01] [Rank 0] step:901/10000 train_time:99977ms step_avg:110.96ms +[2025-09-11 08:58:01] [Rank 0] step:901/10000 train_time:99977ms step_avg:110.96ms +[2025-09-11 08:58:02] [Rank 0] step:921/10000 train_time:100622ms step_avg:109.25ms +[2025-09-11 08:58:02] [Rank 0] step:921/10000 train_time:100622ms step_avg:109.25ms +[2025-09-11 08:58:03] [Rank 0] step:941/10000 train_time:101266ms step_avg:107.62ms +[2025-09-11 08:58:03] [Rank 0] step:941/10000 train_time:101266ms step_avg:107.62ms +[2025-09-11 08:58:03] [Rank 0] step:961/10000 train_time:101910ms step_avg:106.05ms +[2025-09-11 08:58:03] [Rank 0] step:961/10000 train_time:101910ms step_avg:106.05ms +[2025-09-11 08:58:04] [Rank 0] step:981/10000 train_time:102554ms step_avg:104.54ms +[2025-09-11 08:58:04] [Rank 0] step:981/10000 train_time:102554ms step_avg:104.54ms +[2025-09-11 08:58:05] [Rank 0] step:1001/10000 train_time:103199ms step_avg:103.10ms +[2025-09-11 08:58:05] [Rank 0] step:1001/10000 train_time:103199ms step_avg:103.10ms +[2025-09-11 08:58:05] [Rank 0] step:1021/10000 train_time:103844ms step_avg:101.71ms +[2025-09-11 08:58:05] [Rank 0] step:1021/10000 train_time:103844ms step_avg:101.71ms +[2025-09-11 08:58:06] [Rank 0] step:1041/10000 train_time:104489ms step_avg:100.37ms +[2025-09-11 08:58:06] [Rank 0] step:1041/10000 train_time:104489ms step_avg:100.37ms +[2025-09-11 08:58:07] [Rank 0] step:1061/10000 train_time:105133ms step_avg:99.09ms +[2025-09-11 08:58:07] [Rank 0] step:1061/10000 train_time:105133ms step_avg:99.09ms +[2025-09-11 08:58:07] [Rank 0] step:1081/10000 train_time:105776ms step_avg:97.85ms +[2025-09-11 08:58:07] [Rank 0] step:1081/10000 train_time:105776ms step_avg:97.85ms +[2025-09-11 08:58:08] [Rank 0] step:1101/10000 train_time:106420ms step_avg:96.66ms +[2025-09-11 08:58:08] [Rank 0] step:1101/10000 train_time:106420ms step_avg:96.66ms +[2025-09-11 08:58:08] [Rank 0] step:1121/10000 train_time:107064ms step_avg:95.51ms +[2025-09-11 08:58:08] [Rank 0] step:1121/10000 train_time:107064ms step_avg:95.51ms +[2025-09-11 08:58:09] [Rank 0] step:1141/10000 train_time:107707ms step_avg:94.40ms +[2025-09-11 08:58:09] [Rank 0] step:1141/10000 train_time:107707ms step_avg:94.40ms +[2025-09-11 08:58:10] [Rank 0] step:1161/10000 train_time:108351ms step_avg:93.33ms +[2025-09-11 08:58:10] [Rank 0] step:1161/10000 train_time:108351ms step_avg:93.33ms +[2025-09-11 08:58:11] [Rank 0] step:1181/10000 train_time:109299ms step_avg:92.55ms +[2025-09-11 08:58:11] [Rank 0] step:1181/10000 train_time:109299ms step_avg:92.55ms +[2025-09-11 08:58:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:58:11] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 08:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:58:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:58:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 08:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:58:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:58:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:58:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:58:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 08:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:58:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 08:58:24] [Rank 0] PRINT: step:1200/10000 val_loss:5.0959 total_sharp:9.7315e-05 L1_sharp:5.3265e-03 L2_sharp:3.4399e-03 L3_sharp:3.3945e-03 L4_sharp:5.3553e-03 L5_sharp:9.1228e-03 L6_sharp:1.0281e-02 L7_sharp:1.1015e-02 L8_sharp:1.5425e-02 L9_sharp:1.7202e-02 L10_sharp:2.1133e-02 L11_sharp:4.2798e-02 L12_sharp:2.3314e-01 total_fnorm:2.0800e+02 total_l1_linf:4.7514e+05 total_spectral:1.0400e+02 L1_fnorm:6.2891e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2500e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.2891e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.8750e-01 L2_l1linf:1.8555e-01 L3_l1linf:1.8652e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8457e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.8262e-01 L9_l1linf:1.8848e-01 L10_l1linf:1.9238e-01 L11_l1linf:1.9629e-01 L12_l1linf:2.0508e-01 L1_spectral:7.1153e-03 L2_spectral:7.1076e-03 L3_spectral:7.1297e-03 L4_spectral:7.1612e-03 L5_spectral:7.1719e-03 L6_spectral:7.1121e-03 L7_spectral:7.1450e-03 L8_spectral:7.2540e-03 L9_spectral:7.1993e-03 L10_spectral:7.1635e-03 L11_spectral:7.2350e-03 L12_spectral:7.2841e-03 train_time:109925ms step_avg:91.60ms +[2025-09-11 08:58:24] [Rank 0] PRINT: step:1200/10000 val_loss:5.0959 total_sharp:9.7315e-05 L1_sharp:5.3265e-03 L2_sharp:3.4399e-03 L3_sharp:3.3945e-03 L4_sharp:5.3553e-03 L5_sharp:9.1228e-03 L6_sharp:1.0281e-02 L7_sharp:1.1015e-02 L8_sharp:1.5425e-02 L9_sharp:1.7202e-02 L10_sharp:2.1133e-02 L11_sharp:4.2798e-02 L12_sharp:2.3314e-01 total_fnorm:2.0800e+02 total_l1_linf:4.7514e+05 total_spectral:1.0400e+02 L1_fnorm:6.2891e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2109e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.2500e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.2891e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.8750e-01 L2_l1linf:1.8555e-01 L3_l1linf:1.8652e-01 L4_l1linf:1.8555e-01 L5_l1linf:1.8457e-01 L6_l1linf:1.8457e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.8262e-01 L9_l1linf:1.8848e-01 L10_l1linf:1.9238e-01 L11_l1linf:1.9629e-01 L12_l1linf:2.0508e-01 L1_spectral:7.1153e-03 L2_spectral:7.1076e-03 L3_spectral:7.1297e-03 L4_spectral:7.1612e-03 L5_spectral:7.1719e-03 L6_spectral:7.1121e-03 L7_spectral:7.1450e-03 L8_spectral:7.2540e-03 L9_spectral:7.1993e-03 L10_spectral:7.1635e-03 L11_spectral:7.2350e-03 L12_spectral:7.2841e-03 train_time:109925ms step_avg:91.60ms +[2025-09-11 08:58:27] [Rank 0] step:1201/10000 train_time:112776ms step_avg:93.90ms +[2025-09-11 08:58:27] [Rank 0] step:1201/10000 train_time:112776ms step_avg:93.90ms +[2025-09-11 08:58:27] [Rank 0] step:1221/10000 train_time:113425ms step_avg:92.90ms +[2025-09-11 08:58:27] [Rank 0] step:1221/10000 train_time:113425ms step_avg:92.90ms +[2025-09-11 08:58:28] [Rank 0] step:1241/10000 train_time:114071ms step_avg:91.92ms +[2025-09-11 08:58:28] [Rank 0] step:1241/10000 train_time:114071ms step_avg:91.92ms +[2025-09-11 08:58:28] [Rank 0] step:1261/10000 train_time:114716ms step_avg:90.97ms +[2025-09-11 08:58:28] [Rank 0] step:1261/10000 train_time:114716ms step_avg:90.97ms +[2025-09-11 08:58:29] [Rank 0] step:1281/10000 train_time:115361ms step_avg:90.06ms +[2025-09-11 08:58:29] [Rank 0] step:1281/10000 train_time:115361ms step_avg:90.06ms +[2025-09-11 08:58:30] [Rank 0] step:1301/10000 train_time:116006ms step_avg:89.17ms +[2025-09-11 08:58:30] [Rank 0] step:1301/10000 train_time:116006ms step_avg:89.17ms +[2025-09-11 08:58:30] [Rank 0] step:1321/10000 train_time:116650ms step_avg:88.30ms +[2025-09-11 08:58:30] [Rank 0] step:1321/10000 train_time:116650ms step_avg:88.30ms +[2025-09-11 08:58:31] [Rank 0] step:1341/10000 train_time:117296ms step_avg:87.47ms +[2025-09-11 08:58:31] [Rank 0] step:1341/10000 train_time:117296ms step_avg:87.47ms +[2025-09-11 08:58:32] [Rank 0] step:1361/10000 train_time:117939ms step_avg:86.66ms +[2025-09-11 08:58:32] [Rank 0] step:1361/10000 train_time:117939ms step_avg:86.66ms +[2025-09-11 08:58:32] [Rank 0] step:1381/10000 train_time:118584ms step_avg:85.87ms +[2025-09-11 08:58:32] [Rank 0] step:1381/10000 train_time:118584ms step_avg:85.87ms +[2025-09-11 08:58:33] [Rank 0] step:1401/10000 train_time:119228ms step_avg:85.10ms +[2025-09-11 08:58:33] [Rank 0] step:1401/10000 train_time:119228ms step_avg:85.10ms +[2025-09-11 08:58:34] [Rank 0] step:1421/10000 train_time:119872ms step_avg:84.36ms +[2025-09-11 08:58:34] [Rank 0] step:1421/10000 train_time:119872ms step_avg:84.36ms +[2025-09-11 08:58:34] [Rank 0] step:1441/10000 train_time:120515ms step_avg:83.63ms +[2025-09-11 08:58:34] [Rank 0] step:1441/10000 train_time:120515ms step_avg:83.63ms +[2025-09-11 08:58:35] [Rank 0] step:1461/10000 train_time:121159ms step_avg:82.93ms +[2025-09-11 08:58:35] [Rank 0] step:1461/10000 train_time:121159ms step_avg:82.93ms +[2025-09-11 08:58:36] [Rank 0] step:1481/10000 train_time:121803ms step_avg:82.24ms +[2025-09-11 08:58:36] [Rank 0] step:1481/10000 train_time:121803ms step_avg:82.24ms +[2025-09-11 08:58:36] [Rank 0] step:1501/10000 train_time:122452ms step_avg:81.58ms +[2025-09-11 08:58:36] [Rank 0] step:1501/10000 train_time:122452ms step_avg:81.58ms +[2025-09-11 08:58:37] [Rank 0] step:1521/10000 train_time:123101ms step_avg:80.93ms +[2025-09-11 08:58:37] [Rank 0] step:1521/10000 train_time:123101ms step_avg:80.93ms +[2025-09-11 08:58:37] [Rank 0] step:1541/10000 train_time:123749ms step_avg:80.30ms +[2025-09-11 08:58:37] [Rank 0] step:1541/10000 train_time:123749ms step_avg:80.30ms +[2025-09-11 08:58:38] [Rank 0] step:1561/10000 train_time:124397ms step_avg:79.69ms +[2025-09-11 08:58:38] [Rank 0] step:1561/10000 train_time:124397ms step_avg:79.69ms +[2025-09-11 08:58:39] [Rank 0] step:1581/10000 train_time:125046ms step_avg:79.09ms +[2025-09-11 08:58:39] [Rank 0] step:1581/10000 train_time:125046ms step_avg:79.09ms +[2025-09-11 08:58:39] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:58:39] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 08:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:58:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:58:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:58:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:58:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:58:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 08:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 08:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:58:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 08:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 08:58:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.9240 total_sharp:9.6966e-05 L1_sharp:5.5391e-03 L2_sharp:2.6024e-03 L3_sharp:6.5285e-03 L4_sharp:1.7016e-03 L5_sharp:6.5695e-03 L6_sharp:7.8124e-03 L7_sharp:8.7389e-03 L8_sharp:1.4333e-02 L9_sharp:1.2035e-02 L10_sharp:1.6919e-02 L11_sharp:3.7828e-02 L12_sharp:5.2353e-01 total_fnorm:1.8500e+02 total_l1_linf:4.0755e+05 total_spectral:9.2500e+01 L1_fnorm:6.2891e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2500e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.8262e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7773e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.7676e-01 L7_l1linf:1.7676e-01 L8_l1linf:1.7773e-01 L9_l1linf:1.7676e-01 L10_l1linf:1.8164e-01 L11_l1linf:1.8457e-01 L12_l1linf:1.9727e-01 L1_spectral:7.2551e-03 L2_spectral:7.3014e-03 L3_spectral:7.3244e-03 L4_spectral:7.3413e-03 L5_spectral:7.3256e-03 L6_spectral:7.2738e-03 L7_spectral:7.2698e-03 L8_spectral:7.5565e-03 L9_spectral:7.3793e-03 L10_spectral:7.3201e-03 L11_spectral:7.4464e-03 L12_spectral:7.5818e-03 train_time:125676ms step_avg:78.55ms +[2025-09-11 08:58:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.9240 total_sharp:9.6966e-05 L1_sharp:5.5391e-03 L2_sharp:2.6024e-03 L3_sharp:6.5285e-03 L4_sharp:1.7016e-03 L5_sharp:6.5695e-03 L6_sharp:7.8124e-03 L7_sharp:8.7389e-03 L8_sharp:1.4333e-02 L9_sharp:1.2035e-02 L10_sharp:1.6919e-02 L11_sharp:3.7828e-02 L12_sharp:5.2353e-01 total_fnorm:1.8500e+02 total_l1_linf:4.0755e+05 total_spectral:9.2500e+01 L1_fnorm:6.2891e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2500e-01 L5_fnorm:6.2500e-01 L6_fnorm:6.2500e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.8262e-01 L2_l1linf:1.7969e-01 L3_l1linf:1.7773e-01 L4_l1linf:1.7676e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.7676e-01 L7_l1linf:1.7676e-01 L8_l1linf:1.7773e-01 L9_l1linf:1.7676e-01 L10_l1linf:1.8164e-01 L11_l1linf:1.8457e-01 L12_l1linf:1.9727e-01 L1_spectral:7.2551e-03 L2_spectral:7.3014e-03 L3_spectral:7.3244e-03 L4_spectral:7.3413e-03 L5_spectral:7.3256e-03 L6_spectral:7.2738e-03 L7_spectral:7.2698e-03 L8_spectral:7.5565e-03 L9_spectral:7.3793e-03 L10_spectral:7.3201e-03 L11_spectral:7.4464e-03 L12_spectral:7.5818e-03 train_time:125676ms step_avg:78.55ms +[2025-09-11 08:58:55] [Rank 0] step:1601/10000 train_time:128499ms step_avg:80.26ms +[2025-09-11 08:58:55] [Rank 0] step:1601/10000 train_time:128499ms step_avg:80.26ms +[2025-09-11 08:58:55] [Rank 0] step:1621/10000 train_time:129152ms step_avg:79.67ms +[2025-09-11 08:58:55] [Rank 0] step:1621/10000 train_time:129152ms step_avg:79.67ms +[2025-09-11 08:58:56] [Rank 0] step:1641/10000 train_time:129803ms step_avg:79.10ms +[2025-09-11 08:58:56] [Rank 0] step:1641/10000 train_time:129803ms step_avg:79.10ms +[2025-09-11 08:58:57] [Rank 0] step:1661/10000 train_time:130453ms step_avg:78.54ms +[2025-09-11 08:58:57] [Rank 0] step:1661/10000 train_time:130453ms step_avg:78.54ms +[2025-09-11 08:58:57] [Rank 0] step:1681/10000 train_time:131102ms step_avg:77.99ms +[2025-09-11 08:58:57] [Rank 0] step:1681/10000 train_time:131102ms step_avg:77.99ms +[2025-09-11 08:58:58] [Rank 0] step:1701/10000 train_time:131751ms step_avg:77.46ms +[2025-09-11 08:58:58] [Rank 0] step:1701/10000 train_time:131751ms step_avg:77.46ms +[2025-09-11 08:58:59] [Rank 0] step:1721/10000 train_time:132400ms step_avg:76.93ms +[2025-09-11 08:58:59] [Rank 0] step:1721/10000 train_time:132400ms step_avg:76.93ms +[2025-09-11 08:58:59] [Rank 0] step:1741/10000 train_time:133050ms step_avg:76.42ms +[2025-09-11 08:58:59] [Rank 0] step:1741/10000 train_time:133050ms step_avg:76.42ms +[2025-09-11 08:59:00] [Rank 0] step:1761/10000 train_time:133699ms step_avg:75.92ms +[2025-09-11 08:59:00] [Rank 0] step:1761/10000 train_time:133699ms step_avg:75.92ms +[2025-09-11 08:59:01] [Rank 0] step:1781/10000 train_time:134348ms step_avg:75.43ms +[2025-09-11 08:59:01] [Rank 0] step:1781/10000 train_time:134348ms step_avg:75.43ms +[2025-09-11 08:59:01] [Rank 0] step:1801/10000 train_time:134998ms step_avg:74.96ms +[2025-09-11 08:59:01] [Rank 0] step:1801/10000 train_time:134998ms step_avg:74.96ms +[2025-09-11 08:59:02] [Rank 0] step:1821/10000 train_time:135647ms step_avg:74.49ms +[2025-09-11 08:59:02] [Rank 0] step:1821/10000 train_time:135647ms step_avg:74.49ms +[2025-09-11 08:59:03] [Rank 0] step:1841/10000 train_time:136296ms step_avg:74.03ms +[2025-09-11 08:59:03] [Rank 0] step:1841/10000 train_time:136296ms step_avg:74.03ms +[2025-09-11 08:59:03] [Rank 0] step:1861/10000 train_time:136946ms step_avg:73.59ms +[2025-09-11 08:59:03] [Rank 0] step:1861/10000 train_time:136946ms step_avg:73.59ms +[2025-09-11 08:59:04] [Rank 0] step:1881/10000 train_time:137595ms step_avg:73.15ms +[2025-09-11 08:59:04] [Rank 0] step:1881/10000 train_time:137595ms step_avg:73.15ms +[2025-09-11 08:59:05] [Rank 0] step:1901/10000 train_time:138244ms step_avg:72.72ms +[2025-09-11 08:59:05] [Rank 0] step:1901/10000 train_time:138244ms step_avg:72.72ms +[2025-09-11 08:59:05] [Rank 0] step:1921/10000 train_time:138893ms step_avg:72.30ms +[2025-09-11 08:59:05] [Rank 0] step:1921/10000 train_time:138893ms step_avg:72.30ms +[2025-09-11 08:59:06] [Rank 0] step:1941/10000 train_time:139542ms step_avg:71.89ms +[2025-09-11 08:59:06] [Rank 0] step:1941/10000 train_time:139542ms step_avg:71.89ms +[2025-09-11 08:59:06] [Rank 0] step:1961/10000 train_time:140190ms step_avg:71.49ms +[2025-09-11 08:59:06] [Rank 0] step:1961/10000 train_time:140190ms step_avg:71.49ms +[2025-09-11 08:59:07] [Rank 0] step:1981/10000 train_time:140839ms step_avg:71.10ms +[2025-09-11 08:59:07] [Rank 0] step:1981/10000 train_time:140839ms step_avg:71.10ms +[2025-09-11 08:59:08] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:59:08] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 08:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:59:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:59:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 08:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 08:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:59:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:59:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 08:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.7710 total_sharp:6.3256e-05 L1_sharp:4.5439e-03 L2_sharp:3.0045e-03 L3_sharp:1.2083e-03 L4_sharp:1.6223e-03 L5_sharp:3.2120e-03 L6_sharp:6.1880e-03 L7_sharp:7.3343e-03 L8_sharp:1.4921e-02 L9_sharp:1.2234e-02 L10_sharp:1.6353e-02 L11_sharp:2.8023e-02 L12_sharp:1.5958e-01 total_fnorm:1.9700e+02 total_l1_linf:4.4442e+05 total_spectral:9.8500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.8164e-01 L2_l1linf:1.7480e-01 L3_l1linf:1.7383e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7480e-01 L6_l1linf:1.7285e-01 L7_l1linf:1.7188e-01 L8_l1linf:1.7383e-01 L9_l1linf:1.7285e-01 L10_l1linf:1.7285e-01 L11_l1linf:1.7871e-01 L12_l1linf:1.8848e-01 L1_spectral:7.4281e-03 L2_spectral:7.4694e-03 L3_spectral:7.4374e-03 L4_spectral:7.4570e-03 L5_spectral:7.4293e-03 L6_spectral:7.3871e-03 L7_spectral:7.3984e-03 L8_spectral:7.7036e-03 L9_spectral:7.4800e-03 L10_spectral:7.4058e-03 L11_spectral:7.5094e-03 L12_spectral:7.7865e-03 train_time:141470ms step_avg:70.73ms +[2025-09-11 08:59:20] [Rank 0] PRINT: step:2000/10000 val_loss:4.7710 total_sharp:6.3256e-05 L1_sharp:4.5439e-03 L2_sharp:3.0045e-03 L3_sharp:1.2083e-03 L4_sharp:1.6223e-03 L5_sharp:3.2120e-03 L6_sharp:6.1880e-03 L7_sharp:7.3343e-03 L8_sharp:1.4921e-02 L9_sharp:1.2234e-02 L10_sharp:1.6353e-02 L11_sharp:2.8023e-02 L12_sharp:1.5958e-01 total_fnorm:1.9700e+02 total_l1_linf:4.4442e+05 total_spectral:9.8500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.8164e-01 L2_l1linf:1.7480e-01 L3_l1linf:1.7383e-01 L4_l1linf:1.7383e-01 L5_l1linf:1.7480e-01 L6_l1linf:1.7285e-01 L7_l1linf:1.7188e-01 L8_l1linf:1.7383e-01 L9_l1linf:1.7285e-01 L10_l1linf:1.7285e-01 L11_l1linf:1.7871e-01 L12_l1linf:1.8848e-01 L1_spectral:7.4281e-03 L2_spectral:7.4694e-03 L3_spectral:7.4374e-03 L4_spectral:7.4570e-03 L5_spectral:7.4293e-03 L6_spectral:7.3871e-03 L7_spectral:7.3984e-03 L8_spectral:7.7036e-03 L9_spectral:7.4800e-03 L10_spectral:7.4058e-03 L11_spectral:7.5094e-03 L12_spectral:7.7865e-03 train_time:141470ms step_avg:70.73ms +[2025-09-11 08:59:23] [Rank 0] step:2001/10000 train_time:144149ms step_avg:72.04ms +[2025-09-11 08:59:23] [Rank 0] step:2001/10000 train_time:144149ms step_avg:72.04ms +[2025-09-11 08:59:24] [Rank 0] step:2021/10000 train_time:144801ms step_avg:71.65ms +[2025-09-11 08:59:24] [Rank 0] step:2021/10000 train_time:144801ms step_avg:71.65ms +[2025-09-11 08:59:24] [Rank 0] step:2041/10000 train_time:145449ms step_avg:71.26ms +[2025-09-11 08:59:24] [Rank 0] step:2041/10000 train_time:145449ms step_avg:71.26ms +[2025-09-11 08:59:25] [Rank 0] step:2061/10000 train_time:146097ms step_avg:70.89ms +[2025-09-11 08:59:25] [Rank 0] step:2061/10000 train_time:146097ms step_avg:70.89ms +[2025-09-11 08:59:26] [Rank 0] step:2081/10000 train_time:146746ms step_avg:70.52ms +[2025-09-11 08:59:26] [Rank 0] step:2081/10000 train_time:146746ms step_avg:70.52ms +[2025-09-11 08:59:26] [Rank 0] step:2101/10000 train_time:147394ms step_avg:70.15ms +[2025-09-11 08:59:26] [Rank 0] step:2101/10000 train_time:147394ms step_avg:70.15ms +[2025-09-11 08:59:27] [Rank 0] step:2121/10000 train_time:148041ms step_avg:69.80ms +[2025-09-11 08:59:27] [Rank 0] step:2121/10000 train_time:148041ms step_avg:69.80ms +[2025-09-11 08:59:28] [Rank 0] step:2141/10000 train_time:148698ms step_avg:69.45ms +[2025-09-11 08:59:28] [Rank 0] step:2141/10000 train_time:148698ms step_avg:69.45ms +[2025-09-11 08:59:28] [Rank 0] step:2161/10000 train_time:149346ms step_avg:69.11ms +[2025-09-11 08:59:28] [Rank 0] step:2161/10000 train_time:149346ms step_avg:69.11ms +[2025-09-11 08:59:29] [Rank 0] step:2181/10000 train_time:149994ms step_avg:68.77ms +[2025-09-11 08:59:29] [Rank 0] step:2181/10000 train_time:149994ms step_avg:68.77ms +[2025-09-11 08:59:29] [Rank 0] step:2201/10000 train_time:150643ms step_avg:68.44ms +[2025-09-11 08:59:29] [Rank 0] step:2201/10000 train_time:150643ms step_avg:68.44ms +[2025-09-11 08:59:30] [Rank 0] step:2221/10000 train_time:151291ms step_avg:68.12ms +[2025-09-11 08:59:30] [Rank 0] step:2221/10000 train_time:151291ms step_avg:68.12ms +[2025-09-11 08:59:31] [Rank 0] step:2241/10000 train_time:151950ms step_avg:67.80ms +[2025-09-11 08:59:31] [Rank 0] step:2241/10000 train_time:151950ms step_avg:67.80ms +[2025-09-11 08:59:31] [Rank 0] step:2261/10000 train_time:152611ms step_avg:67.50ms +[2025-09-11 08:59:31] [Rank 0] step:2261/10000 train_time:152611ms step_avg:67.50ms +[2025-09-11 08:59:32] [Rank 0] step:2281/10000 train_time:153273ms step_avg:67.20ms +[2025-09-11 08:59:32] [Rank 0] step:2281/10000 train_time:153273ms step_avg:67.20ms +[2025-09-11 08:59:33] [Rank 0] step:2301/10000 train_time:153934ms step_avg:66.90ms +[2025-09-11 08:59:33] [Rank 0] step:2301/10000 train_time:153934ms step_avg:66.90ms +[2025-09-11 08:59:33] [Rank 0] step:2321/10000 train_time:154595ms step_avg:66.61ms +[2025-09-11 08:59:33] [Rank 0] step:2321/10000 train_time:154595ms step_avg:66.61ms +[2025-09-11 08:59:34] [Rank 0] step:2341/10000 train_time:155257ms step_avg:66.32ms +[2025-09-11 08:59:34] [Rank 0] step:2341/10000 train_time:155257ms step_avg:66.32ms +[2025-09-11 08:59:35] [Rank 0] step:2361/10000 train_time:155919ms step_avg:66.04ms +[2025-09-11 08:59:35] [Rank 0] step:2361/10000 train_time:155919ms step_avg:66.04ms +[2025-09-11 08:59:35] [Rank 0] step:2381/10000 train_time:156579ms step_avg:65.76ms +[2025-09-11 08:59:35] [Rank 0] step:2381/10000 train_time:156579ms step_avg:65.76ms +[2025-09-11 08:59:36] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:59:36] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 08:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:59:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:59:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 08:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 08:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:59:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:59:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 08:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 08:59:49] [Rank 0] PRINT: step:2400/10000 val_loss:4.6449 total_sharp:6.4310e-05 L1_sharp:3.3720e-03 L2_sharp:2.3492e-03 L3_sharp:3.9500e-05 L4_sharp:2.4551e-03 L5_sharp:5.1663e-03 L6_sharp:5.9754e-03 L7_sharp:8.8445e-03 L8_sharp:1.2442e-02 L9_sharp:1.1996e-02 L10_sharp:1.5286e-02 L11_sharp:2.8662e-02 L12_sharp:1.8910e-01 total_fnorm:1.8500e+02 total_l1_linf:4.0346e+05 total_spectral:9.2500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.7578e-01 L2_l1linf:1.7090e-01 L3_l1linf:1.6992e-01 L4_l1linf:1.6992e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6992e-01 L7_l1linf:1.7090e-01 L8_l1linf:1.6797e-01 L9_l1linf:1.6895e-01 L10_l1linf:1.6699e-01 L11_l1linf:1.6992e-01 L12_l1linf:1.8359e-01 L1_spectral:7.5232e-03 L2_spectral:7.5405e-03 L3_spectral:7.5159e-03 L4_spectral:7.5766e-03 L5_spectral:7.4850e-03 L6_spectral:7.4817e-03 L7_spectral:7.4986e-03 L8_spectral:7.8158e-03 L9_spectral:7.5765e-03 L10_spectral:7.5425e-03 L11_spectral:7.6443e-03 L12_spectral:7.8869e-03 train_time:157222ms step_avg:65.51ms +[2025-09-11 08:59:49] [Rank 0] PRINT: step:2400/10000 val_loss:4.6449 total_sharp:6.4310e-05 L1_sharp:3.3720e-03 L2_sharp:2.3492e-03 L3_sharp:3.9500e-05 L4_sharp:2.4551e-03 L5_sharp:5.1663e-03 L6_sharp:5.9754e-03 L7_sharp:8.8445e-03 L8_sharp:1.2442e-02 L9_sharp:1.1996e-02 L10_sharp:1.5286e-02 L11_sharp:2.8662e-02 L12_sharp:1.8910e-01 total_fnorm:1.8500e+02 total_l1_linf:4.0346e+05 total_spectral:9.2500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.7578e-01 L2_l1linf:1.7090e-01 L3_l1linf:1.6992e-01 L4_l1linf:1.6992e-01 L5_l1linf:1.6895e-01 L6_l1linf:1.6992e-01 L7_l1linf:1.7090e-01 L8_l1linf:1.6797e-01 L9_l1linf:1.6895e-01 L10_l1linf:1.6699e-01 L11_l1linf:1.6992e-01 L12_l1linf:1.8359e-01 L1_spectral:7.5232e-03 L2_spectral:7.5405e-03 L3_spectral:7.5159e-03 L4_spectral:7.5766e-03 L5_spectral:7.4850e-03 L6_spectral:7.4817e-03 L7_spectral:7.4986e-03 L8_spectral:7.8158e-03 L9_spectral:7.5765e-03 L10_spectral:7.5425e-03 L11_spectral:7.6443e-03 L12_spectral:7.8869e-03 train_time:157222ms step_avg:65.51ms +[2025-09-11 08:59:51] [Rank 0] step:2401/10000 train_time:159893ms step_avg:66.59ms +[2025-09-11 08:59:51] [Rank 0] step:2401/10000 train_time:159893ms step_avg:66.59ms +[2025-09-11 08:59:52] [Rank 0] step:2421/10000 train_time:160580ms step_avg:66.33ms +[2025-09-11 08:59:52] [Rank 0] step:2421/10000 train_time:160580ms step_avg:66.33ms +[2025-09-11 08:59:53] [Rank 0] step:2441/10000 train_time:161244ms step_avg:66.06ms +[2025-09-11 08:59:53] [Rank 0] step:2441/10000 train_time:161244ms step_avg:66.06ms +[2025-09-11 08:59:53] [Rank 0] step:2461/10000 train_time:161907ms step_avg:65.79ms +[2025-09-11 08:59:53] [Rank 0] step:2461/10000 train_time:161907ms step_avg:65.79ms +[2025-09-11 08:59:54] [Rank 0] step:2481/10000 train_time:162571ms step_avg:65.53ms +[2025-09-11 08:59:54] [Rank 0] step:2481/10000 train_time:162571ms step_avg:65.53ms +[2025-09-11 08:59:55] [Rank 0] step:2501/10000 train_time:163232ms step_avg:65.27ms +[2025-09-11 08:59:55] [Rank 0] step:2501/10000 train_time:163232ms step_avg:65.27ms +[2025-09-11 08:59:55] [Rank 0] step:2521/10000 train_time:163894ms step_avg:65.01ms +[2025-09-11 08:59:55] [Rank 0] step:2521/10000 train_time:163894ms step_avg:65.01ms +[2025-09-11 08:59:56] [Rank 0] step:2541/10000 train_time:164556ms step_avg:64.76ms +[2025-09-11 08:59:56] [Rank 0] step:2541/10000 train_time:164556ms step_avg:64.76ms +[2025-09-11 08:59:57] [Rank 0] step:2561/10000 train_time:165218ms step_avg:64.51ms +[2025-09-11 08:59:57] [Rank 0] step:2561/10000 train_time:165218ms step_avg:64.51ms +[2025-09-11 08:59:57] [Rank 0] step:2581/10000 train_time:165881ms step_avg:64.27ms +[2025-09-11 08:59:57] [Rank 0] step:2581/10000 train_time:165881ms step_avg:64.27ms +[2025-09-11 08:59:58] [Rank 0] step:2601/10000 train_time:166542ms step_avg:64.03ms +[2025-09-11 08:59:58] [Rank 0] step:2601/10000 train_time:166542ms step_avg:64.03ms +[2025-09-11 08:59:59] [Rank 0] step:2621/10000 train_time:167204ms step_avg:63.79ms +[2025-09-11 08:59:59] [Rank 0] step:2621/10000 train_time:167204ms step_avg:63.79ms +[2025-09-11 08:59:59] [Rank 0] step:2641/10000 train_time:167866ms step_avg:63.56ms +[2025-09-11 08:59:59] [Rank 0] step:2641/10000 train_time:167866ms step_avg:63.56ms +[2025-09-11 09:00:00] [Rank 0] step:2661/10000 train_time:168528ms step_avg:63.33ms +[2025-09-11 09:00:00] [Rank 0] step:2661/10000 train_time:168528ms step_avg:63.33ms +[2025-09-11 09:00:01] [Rank 0] step:2681/10000 train_time:169189ms step_avg:63.11ms +[2025-09-11 09:00:01] [Rank 0] step:2681/10000 train_time:169189ms step_avg:63.11ms +[2025-09-11 09:00:01] [Rank 0] step:2701/10000 train_time:169910ms step_avg:62.91ms +[2025-09-11 09:00:01] [Rank 0] step:2701/10000 train_time:169910ms step_avg:62.91ms +[2025-09-11 09:00:02] [Rank 0] step:2721/10000 train_time:170629ms step_avg:62.71ms +[2025-09-11 09:00:02] [Rank 0] step:2721/10000 train_time:170629ms step_avg:62.71ms +[2025-09-11 09:00:03] [Rank 0] step:2741/10000 train_time:171299ms step_avg:62.50ms +[2025-09-11 09:00:03] [Rank 0] step:2741/10000 train_time:171299ms step_avg:62.50ms +[2025-09-11 09:00:03] [Rank 0] step:2761/10000 train_time:171962ms step_avg:62.28ms +[2025-09-11 09:00:03] [Rank 0] step:2761/10000 train_time:171962ms step_avg:62.28ms +[2025-09-11 09:00:04] [Rank 0] step:2781/10000 train_time:172623ms step_avg:62.07ms +[2025-09-11 09:00:04] [Rank 0] step:2781/10000 train_time:172623ms step_avg:62.07ms +[2025-09-11 09:00:05] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:00:05] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 09:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:00:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:00:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 09:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 09:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:00:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:00:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:00:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 09:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:00:17] [Rank 0] PRINT: step:2800/10000 val_loss:4.5847 total_sharp:6.5580e-05 L1_sharp:1.8638e-03 L2_sharp:2.4880e-04 L3_sharp:1.5657e-03 L4_sharp:1.9764e-03 L5_sharp:4.3772e-03 L6_sharp:5.5846e-03 L7_sharp:8.1057e-03 L8_sharp:1.4986e-02 L9_sharp:1.2801e-02 L10_sharp:1.5137e-02 L11_sharp:2.3638e-02 L12_sharp:3.4520e-01 total_fnorm:1.8900e+02 total_l1_linf:4.1165e+05 total_spectral:9.4000e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.7480e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6895e-01 L4_l1linf:1.6699e-01 L5_l1linf:1.6699e-01 L6_l1linf:1.7090e-01 L7_l1linf:1.6797e-01 L8_l1linf:1.6699e-01 L9_l1linf:1.6504e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.6699e-01 L12_l1linf:1.7871e-01 L1_spectral:7.6539e-03 L2_spectral:7.6452e-03 L3_spectral:7.6102e-03 L4_spectral:7.6370e-03 L5_spectral:7.6186e-03 L6_spectral:7.5931e-03 L7_spectral:7.6042e-03 L8_spectral:7.8976e-03 L9_spectral:7.6182e-03 L10_spectral:7.6313e-03 L11_spectral:7.7185e-03 L12_spectral:7.9159e-03 train_time:173268ms step_avg:61.88ms +[2025-09-11 09:00:17] [Rank 0] PRINT: step:2800/10000 val_loss:4.5847 total_sharp:6.5580e-05 L1_sharp:1.8638e-03 L2_sharp:2.4880e-04 L3_sharp:1.5657e-03 L4_sharp:1.9764e-03 L5_sharp:4.3772e-03 L6_sharp:5.5846e-03 L7_sharp:8.1057e-03 L8_sharp:1.4986e-02 L9_sharp:1.2801e-02 L10_sharp:1.5137e-02 L11_sharp:2.3638e-02 L12_sharp:3.4520e-01 total_fnorm:1.8900e+02 total_l1_linf:4.1165e+05 total_spectral:9.4000e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3672e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.7480e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6895e-01 L4_l1linf:1.6699e-01 L5_l1linf:1.6699e-01 L6_l1linf:1.7090e-01 L7_l1linf:1.6797e-01 L8_l1linf:1.6699e-01 L9_l1linf:1.6504e-01 L10_l1linf:1.6309e-01 L11_l1linf:1.6699e-01 L12_l1linf:1.7871e-01 L1_spectral:7.6539e-03 L2_spectral:7.6452e-03 L3_spectral:7.6102e-03 L4_spectral:7.6370e-03 L5_spectral:7.6186e-03 L6_spectral:7.5931e-03 L7_spectral:7.6042e-03 L8_spectral:7.8976e-03 L9_spectral:7.6182e-03 L10_spectral:7.6313e-03 L11_spectral:7.7185e-03 L12_spectral:7.9159e-03 train_time:173268ms step_avg:61.88ms +[2025-09-11 09:00:20] [Rank 0] step:2801/10000 train_time:175957ms step_avg:62.82ms +[2025-09-11 09:00:20] [Rank 0] step:2801/10000 train_time:175957ms step_avg:62.82ms +[2025-09-11 09:00:21] [Rank 0] step:2821/10000 train_time:176625ms step_avg:62.61ms +[2025-09-11 09:00:21] [Rank 0] step:2821/10000 train_time:176625ms step_avg:62.61ms +[2025-09-11 09:00:21] [Rank 0] step:2841/10000 train_time:177289ms step_avg:62.40ms +[2025-09-11 09:00:21] [Rank 0] step:2841/10000 train_time:177289ms step_avg:62.40ms +[2025-09-11 09:00:22] [Rank 0] step:2861/10000 train_time:177951ms step_avg:62.20ms +[2025-09-11 09:00:22] [Rank 0] step:2861/10000 train_time:177951ms step_avg:62.20ms +[2025-09-11 09:00:23] [Rank 0] step:2881/10000 train_time:178614ms step_avg:62.00ms +[2025-09-11 09:00:23] [Rank 0] step:2881/10000 train_time:178614ms step_avg:62.00ms +[2025-09-11 09:00:23] [Rank 0] step:2901/10000 train_time:179276ms step_avg:61.80ms +[2025-09-11 09:00:23] [Rank 0] step:2901/10000 train_time:179276ms step_avg:61.80ms +[2025-09-11 09:00:24] [Rank 0] step:2921/10000 train_time:179938ms step_avg:61.60ms +[2025-09-11 09:00:24] [Rank 0] step:2921/10000 train_time:179938ms step_avg:61.60ms +[2025-09-11 09:00:25] [Rank 0] step:2941/10000 train_time:180601ms step_avg:61.41ms +[2025-09-11 09:00:25] [Rank 0] step:2941/10000 train_time:180601ms step_avg:61.41ms +[2025-09-11 09:00:25] [Rank 0] step:2961/10000 train_time:181263ms step_avg:61.22ms +[2025-09-11 09:00:25] [Rank 0] step:2961/10000 train_time:181263ms step_avg:61.22ms +[2025-09-11 09:00:26] [Rank 0] step:2981/10000 train_time:181928ms step_avg:61.03ms +[2025-09-11 09:00:26] [Rank 0] step:2981/10000 train_time:181928ms step_avg:61.03ms +[2025-09-11 09:00:27] [Rank 0] step:3001/10000 train_time:182593ms step_avg:60.84ms +[2025-09-11 09:00:27] [Rank 0] step:3001/10000 train_time:182593ms step_avg:60.84ms +[2025-09-11 09:00:27] [Rank 0] step:3021/10000 train_time:183258ms step_avg:60.66ms +[2025-09-11 09:00:27] [Rank 0] step:3021/10000 train_time:183258ms step_avg:60.66ms +[2025-09-11 09:00:28] [Rank 0] step:3041/10000 train_time:183923ms step_avg:60.48ms +[2025-09-11 09:00:28] [Rank 0] step:3041/10000 train_time:183923ms step_avg:60.48ms +[2025-09-11 09:00:29] [Rank 0] step:3061/10000 train_time:184588ms step_avg:60.30ms +[2025-09-11 09:00:29] [Rank 0] step:3061/10000 train_time:184588ms step_avg:60.30ms +[2025-09-11 09:00:29] [Rank 0] step:3081/10000 train_time:185254ms step_avg:60.13ms +[2025-09-11 09:00:29] [Rank 0] step:3081/10000 train_time:185254ms step_avg:60.13ms +[2025-09-11 09:00:30] [Rank 0] step:3101/10000 train_time:185919ms step_avg:59.95ms +[2025-09-11 09:00:30] [Rank 0] step:3101/10000 train_time:185919ms step_avg:59.95ms +[2025-09-11 09:00:31] [Rank 0] step:3121/10000 train_time:186585ms step_avg:59.78ms +[2025-09-11 09:00:31] [Rank 0] step:3121/10000 train_time:186585ms step_avg:59.78ms +[2025-09-11 09:00:31] [Rank 0] step:3141/10000 train_time:187250ms step_avg:59.61ms +[2025-09-11 09:00:31] [Rank 0] step:3141/10000 train_time:187250ms step_avg:59.61ms +[2025-09-11 09:00:32] [Rank 0] step:3161/10000 train_time:187915ms step_avg:59.45ms +[2025-09-11 09:00:32] [Rank 0] step:3161/10000 train_time:187915ms step_avg:59.45ms +[2025-09-11 09:00:33] [Rank 0] step:3181/10000 train_time:188581ms step_avg:59.28ms +[2025-09-11 09:00:33] [Rank 0] step:3181/10000 train_time:188581ms step_avg:59.28ms +[2025-09-11 09:00:33] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:00:33] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 09:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:00:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 09:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:00:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 09:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:00:45] [Rank 0] PRINT: step:3200/10000 val_loss:4.5108 total_sharp:4.4401e-05 L1_sharp:5.2081e-03 L2_sharp:4.4526e-03 L3_sharp:2.8813e-03 L4_sharp:2.5383e-03 L5_sharp:4.6096e-03 L6_sharp:4.3826e-03 L7_sharp:6.1068e-03 L8_sharp:1.1082e-02 L9_sharp:9.3747e-03 L10_sharp:1.1419e-02 L11_sharp:2.1465e-02 L12_sharp:3.0350e-01 total_fnorm:1.9900e+02 total_l1_linf:4.5261e+05 total_spectral:9.9500e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.3281e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3281e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.7578e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.6895e-01 L5_l1linf:1.6406e-01 L6_l1linf:1.6699e-01 L7_l1linf:1.6895e-01 L8_l1linf:1.6895e-01 L9_l1linf:1.6309e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.6016e-01 L12_l1linf:1.7676e-01 L1_spectral:7.6962e-03 L2_spectral:7.7041e-03 L3_spectral:7.7443e-03 L4_spectral:7.6998e-03 L5_spectral:7.6740e-03 L6_spectral:7.7055e-03 L7_spectral:7.7126e-03 L8_spectral:7.9104e-03 L9_spectral:7.6965e-03 L10_spectral:7.7044e-03 L11_spectral:7.8338e-03 L12_spectral:8.0140e-03 train_time:189227ms step_avg:59.13ms +[2025-09-11 09:00:45] [Rank 0] PRINT: step:3200/10000 val_loss:4.5108 total_sharp:4.4401e-05 L1_sharp:5.2081e-03 L2_sharp:4.4526e-03 L3_sharp:2.8813e-03 L4_sharp:2.5383e-03 L5_sharp:4.6096e-03 L6_sharp:4.3826e-03 L7_sharp:6.1068e-03 L8_sharp:1.1082e-02 L9_sharp:9.3747e-03 L10_sharp:1.1419e-02 L11_sharp:2.1465e-02 L12_sharp:3.0350e-01 total_fnorm:1.9900e+02 total_l1_linf:4.5261e+05 total_spectral:9.9500e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.3281e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3281e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.7578e-01 L2_l1linf:1.6992e-01 L3_l1linf:1.6504e-01 L4_l1linf:1.6895e-01 L5_l1linf:1.6406e-01 L6_l1linf:1.6699e-01 L7_l1linf:1.6895e-01 L8_l1linf:1.6895e-01 L9_l1linf:1.6309e-01 L10_l1linf:1.6113e-01 L11_l1linf:1.6016e-01 L12_l1linf:1.7676e-01 L1_spectral:7.6962e-03 L2_spectral:7.7041e-03 L3_spectral:7.7443e-03 L4_spectral:7.6998e-03 L5_spectral:7.6740e-03 L6_spectral:7.7055e-03 L7_spectral:7.7126e-03 L8_spectral:7.9104e-03 L9_spectral:7.6965e-03 L10_spectral:7.7044e-03 L11_spectral:7.8338e-03 L12_spectral:8.0140e-03 train_time:189227ms step_avg:59.13ms +[2025-09-11 09:00:48] [Rank 0] step:3201/10000 train_time:191874ms step_avg:59.94ms +[2025-09-11 09:00:48] [Rank 0] step:3201/10000 train_time:191874ms step_avg:59.94ms +[2025-09-11 09:00:49] [Rank 0] step:3221/10000 train_time:192567ms step_avg:59.78ms +[2025-09-11 09:00:49] [Rank 0] step:3221/10000 train_time:192567ms step_avg:59.78ms +[2025-09-11 09:00:50] [Rank 0] step:3241/10000 train_time:193235ms step_avg:59.62ms +[2025-09-11 09:00:50] [Rank 0] step:3241/10000 train_time:193235ms step_avg:59.62ms +[2025-09-11 09:00:50] [Rank 0] step:3261/10000 train_time:193900ms step_avg:59.46ms +[2025-09-11 09:00:50] [Rank 0] step:3261/10000 train_time:193900ms step_avg:59.46ms +[2025-09-11 09:00:51] [Rank 0] step:3281/10000 train_time:194566ms step_avg:59.30ms +[2025-09-11 09:00:51] [Rank 0] step:3281/10000 train_time:194566ms step_avg:59.30ms +[2025-09-11 09:00:52] [Rank 0] step:3301/10000 train_time:195232ms step_avg:59.14ms +[2025-09-11 09:00:52] [Rank 0] step:3301/10000 train_time:195232ms step_avg:59.14ms +[2025-09-11 09:00:52] [Rank 0] step:3321/10000 train_time:195897ms step_avg:58.99ms +[2025-09-11 09:00:52] [Rank 0] step:3321/10000 train_time:195897ms step_avg:58.99ms +[2025-09-11 09:00:53] [Rank 0] step:3341/10000 train_time:196563ms step_avg:58.83ms +[2025-09-11 09:00:53] [Rank 0] step:3341/10000 train_time:196563ms step_avg:58.83ms +[2025-09-11 09:00:54] [Rank 0] step:3361/10000 train_time:197229ms step_avg:58.68ms +[2025-09-11 09:00:54] [Rank 0] step:3361/10000 train_time:197229ms step_avg:58.68ms +[2025-09-11 09:00:54] [Rank 0] step:3381/10000 train_time:197894ms step_avg:58.53ms +[2025-09-11 09:00:54] [Rank 0] step:3381/10000 train_time:197894ms step_avg:58.53ms +[2025-09-11 09:00:55] [Rank 0] step:3401/10000 train_time:198559ms step_avg:58.38ms +[2025-09-11 09:00:55] [Rank 0] step:3401/10000 train_time:198559ms step_avg:58.38ms +[2025-09-11 09:00:56] [Rank 0] step:3421/10000 train_time:199224ms step_avg:58.24ms +[2025-09-11 09:00:56] [Rank 0] step:3421/10000 train_time:199224ms step_avg:58.24ms +[2025-09-11 09:00:56] [Rank 0] step:3441/10000 train_time:199890ms step_avg:58.09ms +[2025-09-11 09:00:56] [Rank 0] step:3441/10000 train_time:199890ms step_avg:58.09ms +[2025-09-11 09:00:57] [Rank 0] step:3461/10000 train_time:200555ms step_avg:57.95ms +[2025-09-11 09:00:57] [Rank 0] step:3461/10000 train_time:200555ms step_avg:57.95ms +[2025-09-11 09:00:58] [Rank 0] step:3481/10000 train_time:201220ms step_avg:57.81ms +[2025-09-11 09:00:58] [Rank 0] step:3481/10000 train_time:201220ms step_avg:57.81ms +[2025-09-11 09:00:58] [Rank 0] step:3501/10000 train_time:201885ms step_avg:57.66ms +[2025-09-11 09:00:58] [Rank 0] step:3501/10000 train_time:201885ms step_avg:57.66ms +[2025-09-11 09:00:59] [Rank 0] step:3521/10000 train_time:202550ms step_avg:57.53ms +[2025-09-11 09:00:59] [Rank 0] step:3521/10000 train_time:202550ms step_avg:57.53ms +[2025-09-11 09:00:59] [Rank 0] step:3541/10000 train_time:203215ms step_avg:57.39ms +[2025-09-11 09:00:59] [Rank 0] step:3541/10000 train_time:203215ms step_avg:57.39ms +[2025-09-11 09:01:00] [Rank 0] step:3561/10000 train_time:203879ms step_avg:57.25ms +[2025-09-11 09:01:00] [Rank 0] step:3561/10000 train_time:203879ms step_avg:57.25ms +[2025-09-11 09:01:01] [Rank 0] step:3581/10000 train_time:204544ms step_avg:57.12ms +[2025-09-11 09:01:01] [Rank 0] step:3581/10000 train_time:204544ms step_avg:57.12ms +[2025-09-11 09:01:01] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:01:01] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 09:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:01:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:01:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 09:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 09:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 09:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 09:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:01:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:01:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:01:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 09:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:17] [Rank 0] PRINT: step:3600/10000 val_loss:4.4759 total_sharp:6.0316e-05 L1_sharp:3.3537e-03 L2_sharp:1.3483e-03 L3_sharp:5.6581e-04 L4_sharp:4.7706e-04 L5_sharp:3.6602e-03 L6_sharp:5.5366e-03 L7_sharp:7.1061e-03 L8_sharp:1.3380e-02 L9_sharp:9.8232e-03 L10_sharp:1.2256e-02 L11_sharp:2.3776e-02 L12_sharp:5.0595e-01 total_fnorm:1.8000e+02 total_l1_linf:3.9526e+05 total_spectral:9.0000e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.3281e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3281e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.6895e-01 L2_l1linf:1.6699e-01 L3_l1linf:1.6113e-01 L4_l1linf:1.6602e-01 L5_l1linf:1.6406e-01 L6_l1linf:1.6309e-01 L7_l1linf:1.6602e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.6113e-01 L10_l1linf:1.5820e-01 L11_l1linf:1.5820e-01 L12_l1linf:1.7090e-01 L1_spectral:7.8307e-03 L2_spectral:7.7985e-03 L3_spectral:7.7565e-03 L4_spectral:7.7566e-03 L5_spectral:7.7300e-03 L6_spectral:7.7742e-03 L7_spectral:7.7202e-03 L8_spectral:7.9192e-03 L9_spectral:7.8165e-03 L10_spectral:7.7768e-03 L11_spectral:7.8554e-03 L12_spectral:8.0468e-03 train_time:205191ms step_avg:57.00ms +[2025-09-11 09:01:17] [Rank 0] PRINT: step:3600/10000 val_loss:4.4759 total_sharp:6.0316e-05 L1_sharp:3.3537e-03 L2_sharp:1.3483e-03 L3_sharp:5.6581e-04 L4_sharp:4.7706e-04 L5_sharp:3.6602e-03 L6_sharp:5.5366e-03 L7_sharp:7.1061e-03 L8_sharp:1.3380e-02 L9_sharp:9.8232e-03 L10_sharp:1.2256e-02 L11_sharp:2.3776e-02 L12_sharp:5.0595e-01 total_fnorm:1.8000e+02 total_l1_linf:3.9526e+05 total_spectral:9.0000e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.3281e-01 L3_fnorm:6.2891e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3281e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2891e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4844e-01 L1_l1linf:1.6895e-01 L2_l1linf:1.6699e-01 L3_l1linf:1.6113e-01 L4_l1linf:1.6602e-01 L5_l1linf:1.6406e-01 L6_l1linf:1.6309e-01 L7_l1linf:1.6602e-01 L8_l1linf:1.6211e-01 L9_l1linf:1.6113e-01 L10_l1linf:1.5820e-01 L11_l1linf:1.5820e-01 L12_l1linf:1.7090e-01 L1_spectral:7.8307e-03 L2_spectral:7.7985e-03 L3_spectral:7.7565e-03 L4_spectral:7.7566e-03 L5_spectral:7.7300e-03 L6_spectral:7.7742e-03 L7_spectral:7.7202e-03 L8_spectral:7.9192e-03 L9_spectral:7.8165e-03 L10_spectral:7.7768e-03 L11_spectral:7.8554e-03 L12_spectral:8.0468e-03 train_time:205191ms step_avg:57.00ms +[2025-09-11 09:01:19] [Rank 0] step:3601/10000 train_time:207903ms step_avg:57.73ms +[2025-09-11 09:01:19] [Rank 0] step:3601/10000 train_time:207903ms step_avg:57.73ms +[2025-09-11 09:01:20] [Rank 0] step:3621/10000 train_time:208869ms step_avg:57.68ms +[2025-09-11 09:01:20] [Rank 0] step:3621/10000 train_time:208869ms step_avg:57.68ms +[2025-09-11 09:01:21] [Rank 0] step:3641/10000 train_time:209538ms step_avg:57.55ms +[2025-09-11 09:01:21] [Rank 0] step:3641/10000 train_time:209538ms step_avg:57.55ms +[2025-09-11 09:01:22] [Rank 0] step:3661/10000 train_time:210203ms step_avg:57.42ms +[2025-09-11 09:01:22] [Rank 0] step:3661/10000 train_time:210203ms step_avg:57.42ms +[2025-09-11 09:01:22] [Rank 0] step:3681/10000 train_time:211020ms step_avg:57.33ms +[2025-09-11 09:01:22] [Rank 0] step:3681/10000 train_time:211020ms step_avg:57.33ms +[2025-09-11 09:01:23] [Rank 0] step:3701/10000 train_time:211812ms step_avg:57.23ms +[2025-09-11 09:01:23] [Rank 0] step:3701/10000 train_time:211812ms step_avg:57.23ms +[2025-09-11 09:01:24] [Rank 0] step:3721/10000 train_time:212487ms step_avg:57.10ms +[2025-09-11 09:01:24] [Rank 0] step:3721/10000 train_time:212487ms step_avg:57.10ms +[2025-09-11 09:01:25] [Rank 0] step:3741/10000 train_time:213163ms step_avg:56.98ms +[2025-09-11 09:01:25] [Rank 0] step:3741/10000 train_time:213163ms step_avg:56.98ms +[2025-09-11 09:01:25] [Rank 0] step:3761/10000 train_time:213839ms step_avg:56.86ms +[2025-09-11 09:01:25] [Rank 0] step:3761/10000 train_time:213839ms step_avg:56.86ms +[2025-09-11 09:01:26] [Rank 0] step:3781/10000 train_time:214514ms step_avg:56.73ms +[2025-09-11 09:01:26] [Rank 0] step:3781/10000 train_time:214514ms step_avg:56.73ms +[2025-09-11 09:01:27] [Rank 0] step:3801/10000 train_time:215190ms step_avg:56.61ms +[2025-09-11 09:01:27] [Rank 0] step:3801/10000 train_time:215190ms step_avg:56.61ms +[2025-09-11 09:01:27] [Rank 0] step:3821/10000 train_time:215866ms step_avg:56.49ms +[2025-09-11 09:01:27] [Rank 0] step:3821/10000 train_time:215866ms step_avg:56.49ms +[2025-09-11 09:01:28] [Rank 0] step:3841/10000 train_time:216541ms step_avg:56.38ms +[2025-09-11 09:01:28] [Rank 0] step:3841/10000 train_time:216541ms step_avg:56.38ms +[2025-09-11 09:01:29] [Rank 0] step:3861/10000 train_time:217217ms step_avg:56.26ms +[2025-09-11 09:01:29] [Rank 0] step:3861/10000 train_time:217217ms step_avg:56.26ms +[2025-09-11 09:01:29] [Rank 0] step:3881/10000 train_time:217892ms step_avg:56.14ms +[2025-09-11 09:01:29] [Rank 0] step:3881/10000 train_time:217892ms step_avg:56.14ms +[2025-09-11 09:01:30] [Rank 0] step:3901/10000 train_time:218568ms step_avg:56.03ms +[2025-09-11 09:01:30] [Rank 0] step:3901/10000 train_time:218568ms step_avg:56.03ms +[2025-09-11 09:01:31] [Rank 0] step:3921/10000 train_time:219244ms step_avg:55.92ms +[2025-09-11 09:01:31] [Rank 0] step:3921/10000 train_time:219244ms step_avg:55.92ms +[2025-09-11 09:01:31] [Rank 0] step:3941/10000 train_time:219920ms step_avg:55.80ms +[2025-09-11 09:01:31] [Rank 0] step:3941/10000 train_time:219920ms step_avg:55.80ms +[2025-09-11 09:01:32] [Rank 0] step:3961/10000 train_time:220596ms step_avg:55.69ms +[2025-09-11 09:01:32] [Rank 0] step:3961/10000 train_time:220596ms step_avg:55.69ms +[2025-09-11 09:01:33] [Rank 0] step:3981/10000 train_time:221272ms step_avg:55.58ms +[2025-09-11 09:01:33] [Rank 0] step:3981/10000 train_time:221272ms step_avg:55.58ms +[2025-09-11 09:01:33] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:01:33] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 09:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 09:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:01:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:01:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:01:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 09:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:01:46] [Rank 0] PRINT: step:4000/10000 val_loss:4.4257 total_sharp:4.6973e-05 L1_sharp:2.5592e-03 L2_sharp:9.7383e-04 L3_sharp:2.7950e-04 L4_sharp:3.9715e-03 L5_sharp:3.1809e-03 L6_sharp:4.1591e-03 L7_sharp:5.5134e-03 L8_sharp:9.5169e-03 L9_sharp:9.3000e-03 L10_sharp:1.2211e-02 L11_sharp:1.8579e-02 L12_sharp:2.7176e-01 total_fnorm:2.0000e+02 total_l1_linf:4.4237e+05 total_spectral:1.0000e+02 L1_fnorm:6.3672e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6797e-01 L2_l1linf:1.6406e-01 L3_l1linf:1.5918e-01 L4_l1linf:1.6211e-01 L5_l1linf:1.6016e-01 L6_l1linf:1.6211e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.5918e-01 L9_l1linf:1.5820e-01 L10_l1linf:1.5527e-01 L11_l1linf:1.5723e-01 L12_l1linf:1.7090e-01 L1_spectral:7.8556e-03 L2_spectral:7.8145e-03 L3_spectral:7.8297e-03 L4_spectral:7.8930e-03 L5_spectral:7.8083e-03 L6_spectral:7.8200e-03 L7_spectral:7.7784e-03 L8_spectral:7.9222e-03 L9_spectral:7.8649e-03 L10_spectral:7.8225e-03 L11_spectral:7.9032e-03 L12_spectral:8.0515e-03 train_time:221928ms step_avg:55.48ms +[2025-09-11 09:01:46] [Rank 0] PRINT: step:4000/10000 val_loss:4.4257 total_sharp:4.6973e-05 L1_sharp:2.5592e-03 L2_sharp:9.7383e-04 L3_sharp:2.7950e-04 L4_sharp:3.9715e-03 L5_sharp:3.1809e-03 L6_sharp:4.1591e-03 L7_sharp:5.5134e-03 L8_sharp:9.5169e-03 L9_sharp:9.3000e-03 L10_sharp:1.2211e-02 L11_sharp:1.8579e-02 L12_sharp:2.7176e-01 total_fnorm:2.0000e+02 total_l1_linf:4.4237e+05 total_spectral:1.0000e+02 L1_fnorm:6.3672e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6797e-01 L2_l1linf:1.6406e-01 L3_l1linf:1.5918e-01 L4_l1linf:1.6211e-01 L5_l1linf:1.6016e-01 L6_l1linf:1.6211e-01 L7_l1linf:1.6211e-01 L8_l1linf:1.5918e-01 L9_l1linf:1.5820e-01 L10_l1linf:1.5527e-01 L11_l1linf:1.5723e-01 L12_l1linf:1.7090e-01 L1_spectral:7.8556e-03 L2_spectral:7.8145e-03 L3_spectral:7.8297e-03 L4_spectral:7.8930e-03 L5_spectral:7.8083e-03 L6_spectral:7.8200e-03 L7_spectral:7.7784e-03 L8_spectral:7.9222e-03 L9_spectral:7.8649e-03 L10_spectral:7.8225e-03 L11_spectral:7.9032e-03 L12_spectral:8.0515e-03 train_time:221928ms step_avg:55.48ms +[2025-09-11 09:01:48] [Rank 0] step:4001/10000 train_time:224654ms step_avg:56.15ms +[2025-09-11 09:01:48] [Rank 0] step:4001/10000 train_time:224654ms step_avg:56.15ms +[2025-09-11 09:01:49] [Rank 0] step:4021/10000 train_time:225337ms step_avg:56.04ms +[2025-09-11 09:01:49] [Rank 0] step:4021/10000 train_time:225337ms step_avg:56.04ms +[2025-09-11 09:01:50] [Rank 0] step:4041/10000 train_time:226016ms step_avg:55.93ms +[2025-09-11 09:01:50] [Rank 0] step:4041/10000 train_time:226016ms step_avg:55.93ms +[2025-09-11 09:01:50] [Rank 0] step:4061/10000 train_time:226691ms step_avg:55.82ms +[2025-09-11 09:01:50] [Rank 0] step:4061/10000 train_time:226691ms step_avg:55.82ms +[2025-09-11 09:01:51] [Rank 0] step:4081/10000 train_time:227369ms step_avg:55.71ms +[2025-09-11 09:01:51] [Rank 0] step:4081/10000 train_time:227369ms step_avg:55.71ms +[2025-09-11 09:01:52] [Rank 0] step:4101/10000 train_time:228045ms step_avg:55.61ms +[2025-09-11 09:01:52] [Rank 0] step:4101/10000 train_time:228045ms step_avg:55.61ms +[2025-09-11 09:01:52] [Rank 0] step:4121/10000 train_time:228722ms step_avg:55.50ms +[2025-09-11 09:01:52] [Rank 0] step:4121/10000 train_time:228722ms step_avg:55.50ms +[2025-09-11 09:01:53] [Rank 0] step:4141/10000 train_time:229398ms step_avg:55.40ms +[2025-09-11 09:01:53] [Rank 0] step:4141/10000 train_time:229398ms step_avg:55.40ms +[2025-09-11 09:01:54] [Rank 0] step:4161/10000 train_time:230075ms step_avg:55.29ms +[2025-09-11 09:01:54] [Rank 0] step:4161/10000 train_time:230075ms step_avg:55.29ms +[2025-09-11 09:01:54] [Rank 0] step:4181/10000 train_time:230752ms step_avg:55.19ms +[2025-09-11 09:01:54] [Rank 0] step:4181/10000 train_time:230752ms step_avg:55.19ms +[2025-09-11 09:01:55] [Rank 0] step:4201/10000 train_time:231428ms step_avg:55.09ms +[2025-09-11 09:01:55] [Rank 0] step:4201/10000 train_time:231428ms step_avg:55.09ms +[2025-09-11 09:01:56] [Rank 0] step:4221/10000 train_time:232104ms step_avg:54.99ms +[2025-09-11 09:01:56] [Rank 0] step:4221/10000 train_time:232104ms step_avg:54.99ms +[2025-09-11 09:01:57] [Rank 0] step:4241/10000 train_time:232781ms step_avg:54.89ms +[2025-09-11 09:01:57] [Rank 0] step:4241/10000 train_time:232781ms step_avg:54.89ms +[2025-09-11 09:01:57] [Rank 0] step:4261/10000 train_time:233458ms step_avg:54.79ms +[2025-09-11 09:01:57] [Rank 0] step:4261/10000 train_time:233458ms step_avg:54.79ms +[2025-09-11 09:01:58] [Rank 0] step:4281/10000 train_time:234135ms step_avg:54.69ms +[2025-09-11 09:01:58] [Rank 0] step:4281/10000 train_time:234135ms step_avg:54.69ms +[2025-09-11 09:01:59] [Rank 0] step:4301/10000 train_time:234814ms step_avg:54.60ms +[2025-09-11 09:01:59] [Rank 0] step:4301/10000 train_time:234814ms step_avg:54.60ms +[2025-09-11 09:01:59] [Rank 0] step:4321/10000 train_time:235489ms step_avg:54.50ms +[2025-09-11 09:01:59] [Rank 0] step:4321/10000 train_time:235489ms step_avg:54.50ms +[2025-09-11 09:02:00] [Rank 0] step:4341/10000 train_time:236170ms step_avg:54.40ms +[2025-09-11 09:02:00] [Rank 0] step:4341/10000 train_time:236170ms step_avg:54.40ms +[2025-09-11 09:02:01] [Rank 0] step:4361/10000 train_time:236845ms step_avg:54.31ms +[2025-09-11 09:02:01] [Rank 0] step:4361/10000 train_time:236845ms step_avg:54.31ms +[2025-09-11 09:02:01] [Rank 0] step:4381/10000 train_time:237522ms step_avg:54.22ms +[2025-09-11 09:02:01] [Rank 0] step:4381/10000 train_time:237522ms step_avg:54.22ms +[2025-09-11 09:02:02] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:02:02] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 09:02:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:02:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 09:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:02:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 09:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:02:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:02:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:02:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:02:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:02:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:02:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 09:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.4213 total_sharp:4.3912e-05 L1_sharp:1.6739e-03 L2_sharp:-9.7591e-04 L3_sharp:5.4591e-04 L4_sharp:1.2756e-03 L5_sharp:3.1615e-03 L6_sharp:4.7851e-03 L7_sharp:5.3034e-03 L8_sharp:9.0458e-03 L9_sharp:8.1087e-03 L10_sharp:1.1566e-02 L11_sharp:1.8808e-02 L12_sharp:1.6042e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9526e+05 total_spectral:9.1500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2500e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6699e-01 L2_l1linf:1.6113e-01 L3_l1linf:1.5820e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5723e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.5332e-01 L9_l1linf:1.5430e-01 L10_l1linf:1.5332e-01 L11_l1linf:1.5625e-01 L12_l1linf:1.6895e-01 L1_spectral:7.9200e-03 L2_spectral:7.8984e-03 L3_spectral:7.8749e-03 L4_spectral:7.8476e-03 L5_spectral:7.8488e-03 L6_spectral:7.8826e-03 L7_spectral:7.7924e-03 L8_spectral:7.9336e-03 L9_spectral:7.8937e-03 L10_spectral:7.8484e-03 L11_spectral:7.9636e-03 L12_spectral:8.0248e-03 train_time:238180ms step_avg:54.13ms +[2025-09-11 09:02:17] [Rank 0] PRINT: step:4400/10000 val_loss:4.4213 total_sharp:4.3912e-05 L1_sharp:1.6739e-03 L2_sharp:-9.7591e-04 L3_sharp:5.4591e-04 L4_sharp:1.2756e-03 L5_sharp:3.1615e-03 L6_sharp:4.7851e-03 L7_sharp:5.3034e-03 L8_sharp:9.0458e-03 L9_sharp:8.1087e-03 L10_sharp:1.1566e-02 L11_sharp:1.8808e-02 L12_sharp:1.6042e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9526e+05 total_spectral:9.1500e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2500e-01 L6_fnorm:6.2891e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.3281e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6699e-01 L2_l1linf:1.6113e-01 L3_l1linf:1.5820e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5723e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.5332e-01 L9_l1linf:1.5430e-01 L10_l1linf:1.5332e-01 L11_l1linf:1.5625e-01 L12_l1linf:1.6895e-01 L1_spectral:7.9200e-03 L2_spectral:7.8984e-03 L3_spectral:7.8749e-03 L4_spectral:7.8476e-03 L5_spectral:7.8488e-03 L6_spectral:7.8826e-03 L7_spectral:7.7924e-03 L8_spectral:7.9336e-03 L9_spectral:7.8937e-03 L10_spectral:7.8484e-03 L11_spectral:7.9636e-03 L12_spectral:8.0248e-03 train_time:238180ms step_avg:54.13ms +[2025-09-11 09:02:20] [Rank 0] step:4401/10000 train_time:240938ms step_avg:54.75ms +[2025-09-11 09:02:20] [Rank 0] step:4401/10000 train_time:240938ms step_avg:54.75ms +[2025-09-11 09:02:21] [Rank 0] step:4421/10000 train_time:241613ms step_avg:54.65ms +[2025-09-11 09:02:21] [Rank 0] step:4421/10000 train_time:241613ms step_avg:54.65ms +[2025-09-11 09:02:21] [Rank 0] step:4441/10000 train_time:242291ms step_avg:54.56ms +[2025-09-11 09:02:21] [Rank 0] step:4441/10000 train_time:242291ms step_avg:54.56ms +[2025-09-11 09:02:22] [Rank 0] step:4461/10000 train_time:242970ms step_avg:54.47ms +[2025-09-11 09:02:22] [Rank 0] step:4461/10000 train_time:242970ms step_avg:54.47ms +[2025-09-11 09:02:23] [Rank 0] step:4481/10000 train_time:243652ms step_avg:54.37ms +[2025-09-11 09:02:23] [Rank 0] step:4481/10000 train_time:243652ms step_avg:54.37ms +[2025-09-11 09:02:24] [Rank 0] step:4501/10000 train_time:244595ms step_avg:54.34ms +[2025-09-11 09:02:24] [Rank 0] step:4501/10000 train_time:244595ms step_avg:54.34ms +[2025-09-11 09:02:24] [Rank 0] step:4521/10000 train_time:245274ms step_avg:54.25ms +[2025-09-11 09:02:24] [Rank 0] step:4521/10000 train_time:245274ms step_avg:54.25ms +[2025-09-11 09:02:25] [Rank 0] step:4541/10000 train_time:245954ms step_avg:54.16ms +[2025-09-11 09:02:25] [Rank 0] step:4541/10000 train_time:245954ms step_avg:54.16ms +[2025-09-11 09:02:26] [Rank 0] step:4561/10000 train_time:246927ms step_avg:54.14ms +[2025-09-11 09:02:26] [Rank 0] step:4561/10000 train_time:246927ms step_avg:54.14ms +[2025-09-11 09:02:27] [Rank 0] step:4581/10000 train_time:247606ms step_avg:54.05ms +[2025-09-11 09:02:27] [Rank 0] step:4581/10000 train_time:247606ms step_avg:54.05ms +[2025-09-11 09:02:27] [Rank 0] step:4601/10000 train_time:248284ms step_avg:53.96ms +[2025-09-11 09:02:27] [Rank 0] step:4601/10000 train_time:248284ms step_avg:53.96ms +[2025-09-11 09:02:28] [Rank 0] step:4621/10000 train_time:248963ms step_avg:53.88ms +[2025-09-11 09:02:28] [Rank 0] step:4621/10000 train_time:248963ms step_avg:53.88ms +[2025-09-11 09:02:29] [Rank 0] step:4641/10000 train_time:249641ms step_avg:53.79ms +[2025-09-11 09:02:29] [Rank 0] step:4641/10000 train_time:249641ms step_avg:53.79ms +[2025-09-11 09:02:29] [Rank 0] step:4661/10000 train_time:250321ms step_avg:53.71ms +[2025-09-11 09:02:29] [Rank 0] step:4661/10000 train_time:250321ms step_avg:53.71ms +[2025-09-11 09:02:30] [Rank 0] step:4681/10000 train_time:250999ms step_avg:53.62ms +[2025-09-11 09:02:30] [Rank 0] step:4681/10000 train_time:250999ms step_avg:53.62ms +[2025-09-11 09:02:31] [Rank 0] step:4701/10000 train_time:251678ms step_avg:53.54ms +[2025-09-11 09:02:31] [Rank 0] step:4701/10000 train_time:251678ms step_avg:53.54ms +[2025-09-11 09:02:31] [Rank 0] step:4721/10000 train_time:252356ms step_avg:53.45ms +[2025-09-11 09:02:31] [Rank 0] step:4721/10000 train_time:252356ms step_avg:53.45ms +[2025-09-11 09:02:32] [Rank 0] step:4741/10000 train_time:253034ms step_avg:53.37ms +[2025-09-11 09:02:32] [Rank 0] step:4741/10000 train_time:253034ms step_avg:53.37ms +[2025-09-11 09:02:33] [Rank 0] step:4761/10000 train_time:253713ms step_avg:53.29ms +[2025-09-11 09:02:33] [Rank 0] step:4761/10000 train_time:253713ms step_avg:53.29ms +[2025-09-11 09:02:34] [Rank 0] step:4781/10000 train_time:254391ms step_avg:53.21ms +[2025-09-11 09:02:34] [Rank 0] step:4781/10000 train_time:254391ms step_avg:53.21ms +[2025-09-11 09:02:34] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:02:34] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 09:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:02:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:02:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:02:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:02:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:02:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:02:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 09:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:02:47] [Rank 0] PRINT: step:4800/10000 val_loss:4.3773 total_sharp:4.6384e-05 L1_sharp:2.1453e-03 L2_sharp:1.2783e-03 L3_sharp:2.2380e-03 L4_sharp:1.5462e-03 L5_sharp:3.1009e-03 L6_sharp:4.2081e-03 L7_sharp:5.4751e-03 L8_sharp:8.7538e-03 L9_sharp:9.3366e-03 L10_sharp:1.1565e-02 L11_sharp:1.7133e-02 L12_sharp:5.7805e-01 total_fnorm:1.9600e+02 total_l1_linf:4.4237e+05 total_spectral:9.8000e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6797e-01 L2_l1linf:1.6406e-01 L3_l1linf:1.5332e-01 L4_l1linf:1.6309e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5918e-01 L7_l1linf:1.6016e-01 L8_l1linf:1.5430e-01 L9_l1linf:1.5332e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.5332e-01 L12_l1linf:1.6895e-01 L1_spectral:7.9272e-03 L2_spectral:7.9167e-03 L3_spectral:7.8866e-03 L4_spectral:7.9046e-03 L5_spectral:7.8916e-03 L6_spectral:7.8861e-03 L7_spectral:7.9102e-03 L8_spectral:7.9980e-03 L9_spectral:7.9367e-03 L10_spectral:7.9157e-03 L11_spectral:7.9907e-03 L12_spectral:8.0807e-03 train_time:255049ms step_avg:53.14ms +[2025-09-11 09:02:47] [Rank 0] PRINT: step:4800/10000 val_loss:4.3773 total_sharp:4.6384e-05 L1_sharp:2.1453e-03 L2_sharp:1.2783e-03 L3_sharp:2.2380e-03 L4_sharp:1.5462e-03 L5_sharp:3.1009e-03 L6_sharp:4.2081e-03 L7_sharp:5.4751e-03 L8_sharp:8.7538e-03 L9_sharp:9.3366e-03 L10_sharp:1.1565e-02 L11_sharp:1.7133e-02 L12_sharp:5.7805e-01 total_fnorm:1.9600e+02 total_l1_linf:4.4237e+05 total_spectral:9.8000e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.4453e-01 L1_l1linf:1.6797e-01 L2_l1linf:1.6406e-01 L3_l1linf:1.5332e-01 L4_l1linf:1.6309e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5918e-01 L7_l1linf:1.6016e-01 L8_l1linf:1.5430e-01 L9_l1linf:1.5332e-01 L10_l1linf:1.5039e-01 L11_l1linf:1.5332e-01 L12_l1linf:1.6895e-01 L1_spectral:7.9272e-03 L2_spectral:7.9167e-03 L3_spectral:7.8866e-03 L4_spectral:7.9046e-03 L5_spectral:7.8916e-03 L6_spectral:7.8861e-03 L7_spectral:7.9102e-03 L8_spectral:7.9980e-03 L9_spectral:7.9367e-03 L10_spectral:7.9157e-03 L11_spectral:7.9907e-03 L12_spectral:8.0807e-03 train_time:255049ms step_avg:53.14ms +[2025-09-11 09:02:50] [Rank 0] step:4801/10000 train_time:257787ms step_avg:53.69ms +[2025-09-11 09:02:50] [Rank 0] step:4801/10000 train_time:257787ms step_avg:53.69ms +[2025-09-11 09:02:50] [Rank 0] step:4821/10000 train_time:258466ms step_avg:53.61ms +[2025-09-11 09:02:50] [Rank 0] step:4821/10000 train_time:258466ms step_avg:53.61ms +[2025-09-11 09:02:51] [Rank 0] step:4841/10000 train_time:259146ms step_avg:53.53ms +[2025-09-11 09:02:51] [Rank 0] step:4841/10000 train_time:259146ms step_avg:53.53ms +[2025-09-11 09:02:52] [Rank 0] step:4861/10000 train_time:259825ms step_avg:53.45ms +[2025-09-11 09:02:52] [Rank 0] step:4861/10000 train_time:259825ms step_avg:53.45ms +[2025-09-11 09:02:52] [Rank 0] step:4881/10000 train_time:260503ms step_avg:53.37ms +[2025-09-11 09:02:52] [Rank 0] step:4881/10000 train_time:260503ms step_avg:53.37ms +[2025-09-11 09:02:53] [Rank 0] step:4901/10000 train_time:261183ms step_avg:53.29ms +[2025-09-11 09:02:53] [Rank 0] step:4901/10000 train_time:261183ms step_avg:53.29ms +[2025-09-11 09:02:54] [Rank 0] step:4921/10000 train_time:261863ms step_avg:53.21ms +[2025-09-11 09:02:54] [Rank 0] step:4921/10000 train_time:261863ms step_avg:53.21ms +[2025-09-11 09:02:55] [Rank 0] step:4941/10000 train_time:262552ms step_avg:53.14ms +[2025-09-11 09:02:55] [Rank 0] step:4941/10000 train_time:262552ms step_avg:53.14ms +[2025-09-11 09:02:55] [Rank 0] step:4961/10000 train_time:263230ms step_avg:53.06ms +[2025-09-11 09:02:55] [Rank 0] step:4961/10000 train_time:263230ms step_avg:53.06ms +[2025-09-11 09:02:56] [Rank 0] step:4981/10000 train_time:263909ms step_avg:52.98ms +[2025-09-11 09:02:56] [Rank 0] step:4981/10000 train_time:263909ms step_avg:52.98ms +[2025-09-11 09:02:57] [Rank 0] step:5001/10000 train_time:264590ms step_avg:52.91ms +[2025-09-11 09:02:57] [Rank 0] step:5001/10000 train_time:264590ms step_avg:52.91ms +[2025-09-11 09:02:57] [Rank 0] step:5021/10000 train_time:265267ms step_avg:52.83ms +[2025-09-11 09:02:57] [Rank 0] step:5021/10000 train_time:265267ms step_avg:52.83ms +[2025-09-11 09:02:58] [Rank 0] step:5041/10000 train_time:265945ms step_avg:52.76ms +[2025-09-11 09:02:58] [Rank 0] step:5041/10000 train_time:265945ms step_avg:52.76ms +[2025-09-11 09:02:59] [Rank 0] step:5061/10000 train_time:266625ms step_avg:52.68ms +[2025-09-11 09:02:59] [Rank 0] step:5061/10000 train_time:266625ms step_avg:52.68ms +[2025-09-11 09:02:59] [Rank 0] step:5081/10000 train_time:267303ms step_avg:52.61ms +[2025-09-11 09:02:59] [Rank 0] step:5081/10000 train_time:267303ms step_avg:52.61ms +[2025-09-11 09:03:00] [Rank 0] step:5101/10000 train_time:267982ms step_avg:52.54ms +[2025-09-11 09:03:00] [Rank 0] step:5101/10000 train_time:267982ms step_avg:52.54ms +[2025-09-11 09:03:01] [Rank 0] step:5121/10000 train_time:268660ms step_avg:52.46ms +[2025-09-11 09:03:01] [Rank 0] step:5121/10000 train_time:268660ms step_avg:52.46ms +[2025-09-11 09:03:01] [Rank 0] step:5141/10000 train_time:269339ms step_avg:52.39ms +[2025-09-11 09:03:01] [Rank 0] step:5141/10000 train_time:269339ms step_avg:52.39ms +[2025-09-11 09:03:02] [Rank 0] step:5161/10000 train_time:270019ms step_avg:52.32ms +[2025-09-11 09:03:02] [Rank 0] step:5161/10000 train_time:270019ms step_avg:52.32ms +[2025-09-11 09:03:03] [Rank 0] step:5181/10000 train_time:270697ms step_avg:52.25ms +[2025-09-11 09:03:03] [Rank 0] step:5181/10000 train_time:270697ms step_avg:52.25ms +[2025-09-11 09:03:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:03:03] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 09:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:03:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:03:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 09:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:03:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 09:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:03:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:03:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:03:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:03:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 09:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 09:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 09:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:17] [Rank 0] PRINT: step:5200/10000 val_loss:4.3505 total_sharp:5.6001e-05 L1_sharp:1.7433e-03 L2_sharp:2.2795e-03 L3_sharp:5.3286e-05 L4_sharp:1.9464e-03 L5_sharp:3.2737e-03 L6_sharp:4.5331e-03 L7_sharp:5.3132e-03 L8_sharp:8.4696e-03 L9_sharp:9.1889e-03 L10_sharp:1.2996e-02 L11_sharp:2.2136e-02 L12_sharp:5.2048e-01 total_fnorm:1.7400e+02 total_l1_linf:3.6864e+05 total_spectral:8.7000e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.4062e-01 L1_l1linf:1.6602e-01 L2_l1linf:1.6016e-01 L3_l1linf:1.5527e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5723e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.5234e-01 L9_l1linf:1.5137e-01 L10_l1linf:1.5234e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.6211e-01 L1_spectral:7.9469e-03 L2_spectral:7.9424e-03 L3_spectral:7.9476e-03 L4_spectral:7.9573e-03 L5_spectral:7.9437e-03 L6_spectral:7.9390e-03 L7_spectral:7.9451e-03 L8_spectral:8.1033e-03 L9_spectral:7.9458e-03 L10_spectral:8.0035e-03 L11_spectral:8.0197e-03 L12_spectral:8.1254e-03 train_time:271362ms step_avg:52.19ms +[2025-09-11 09:03:17] [Rank 0] PRINT: step:5200/10000 val_loss:4.3505 total_sharp:5.6001e-05 L1_sharp:1.7433e-03 L2_sharp:2.2795e-03 L3_sharp:5.3286e-05 L4_sharp:1.9464e-03 L5_sharp:3.2737e-03 L6_sharp:4.5331e-03 L7_sharp:5.3132e-03 L8_sharp:8.4696e-03 L9_sharp:9.1889e-03 L10_sharp:1.2996e-02 L11_sharp:2.2136e-02 L12_sharp:5.2048e-01 total_fnorm:1.7400e+02 total_l1_linf:3.6864e+05 total_spectral:8.7000e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2500e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.4062e-01 L1_l1linf:1.6602e-01 L2_l1linf:1.6016e-01 L3_l1linf:1.5527e-01 L4_l1linf:1.5918e-01 L5_l1linf:1.5723e-01 L6_l1linf:1.5723e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.5234e-01 L9_l1linf:1.5137e-01 L10_l1linf:1.5234e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.6211e-01 L1_spectral:7.9469e-03 L2_spectral:7.9424e-03 L3_spectral:7.9476e-03 L4_spectral:7.9573e-03 L5_spectral:7.9437e-03 L6_spectral:7.9390e-03 L7_spectral:7.9451e-03 L8_spectral:8.1033e-03 L9_spectral:7.9458e-03 L10_spectral:8.0035e-03 L11_spectral:8.0197e-03 L12_spectral:8.1254e-03 train_time:271362ms step_avg:52.19ms +[2025-09-11 09:03:20] [Rank 0] step:5201/10000 train_time:274110ms step_avg:52.70ms +[2025-09-11 09:03:20] [Rank 0] step:5201/10000 train_time:274110ms step_avg:52.70ms +[2025-09-11 09:03:21] [Rank 0] step:5221/10000 train_time:274801ms step_avg:52.63ms +[2025-09-11 09:03:21] [Rank 0] step:5221/10000 train_time:274801ms step_avg:52.63ms +[2025-09-11 09:03:22] [Rank 0] step:5241/10000 train_time:275489ms step_avg:52.56ms +[2025-09-11 09:03:22] [Rank 0] step:5241/10000 train_time:275489ms step_avg:52.56ms +[2025-09-11 09:03:22] [Rank 0] step:5261/10000 train_time:276178ms step_avg:52.50ms +[2025-09-11 09:03:22] [Rank 0] step:5261/10000 train_time:276178ms step_avg:52.50ms +[2025-09-11 09:03:23] [Rank 0] step:5281/10000 train_time:276866ms step_avg:52.43ms +[2025-09-11 09:03:23] [Rank 0] step:5281/10000 train_time:276866ms step_avg:52.43ms +[2025-09-11 09:03:24] [Rank 0] step:5301/10000 train_time:277554ms step_avg:52.36ms +[2025-09-11 09:03:24] [Rank 0] step:5301/10000 train_time:277554ms step_avg:52.36ms +[2025-09-11 09:03:24] [Rank 0] step:5321/10000 train_time:278242ms step_avg:52.29ms +[2025-09-11 09:03:24] [Rank 0] step:5321/10000 train_time:278242ms step_avg:52.29ms +[2025-09-11 09:03:25] [Rank 0] step:5341/10000 train_time:278929ms step_avg:52.22ms +[2025-09-11 09:03:25] [Rank 0] step:5341/10000 train_time:278929ms step_avg:52.22ms +[2025-09-11 09:03:26] [Rank 0] step:5361/10000 train_time:279617ms step_avg:52.16ms +[2025-09-11 09:03:26] [Rank 0] step:5361/10000 train_time:279617ms step_avg:52.16ms +[2025-09-11 09:03:27] [Rank 0] step:5381/10000 train_time:280572ms step_avg:52.14ms +[2025-09-11 09:03:27] [Rank 0] step:5381/10000 train_time:280572ms step_avg:52.14ms +[2025-09-11 09:03:27] [Rank 0] step:5401/10000 train_time:281258ms step_avg:52.08ms +[2025-09-11 09:03:27] [Rank 0] step:5401/10000 train_time:281258ms step_avg:52.08ms +[2025-09-11 09:03:28] [Rank 0] step:5421/10000 train_time:281947ms step_avg:52.01ms +[2025-09-11 09:03:28] [Rank 0] step:5421/10000 train_time:281947ms step_avg:52.01ms +[2025-09-11 09:03:29] [Rank 0] step:5441/10000 train_time:282795ms step_avg:51.97ms +[2025-09-11 09:03:29] [Rank 0] step:5441/10000 train_time:282795ms step_avg:51.97ms +[2025-09-11 09:03:30] [Rank 0] step:5461/10000 train_time:283612ms step_avg:51.93ms +[2025-09-11 09:03:30] [Rank 0] step:5461/10000 train_time:283612ms step_avg:51.93ms +[2025-09-11 09:03:30] [Rank 0] step:5481/10000 train_time:284301ms step_avg:51.87ms +[2025-09-11 09:03:30] [Rank 0] step:5481/10000 train_time:284301ms step_avg:51.87ms +[2025-09-11 09:03:31] [Rank 0] step:5501/10000 train_time:284988ms step_avg:51.81ms +[2025-09-11 09:03:31] [Rank 0] step:5501/10000 train_time:284988ms step_avg:51.81ms +[2025-09-11 09:03:32] [Rank 0] step:5521/10000 train_time:285676ms step_avg:51.74ms +[2025-09-11 09:03:32] [Rank 0] step:5521/10000 train_time:285676ms step_avg:51.74ms +[2025-09-11 09:03:32] [Rank 0] step:5541/10000 train_time:286366ms step_avg:51.68ms +[2025-09-11 09:03:32] [Rank 0] step:5541/10000 train_time:286366ms step_avg:51.68ms +[2025-09-11 09:03:33] [Rank 0] step:5561/10000 train_time:287056ms step_avg:51.62ms +[2025-09-11 09:03:33] [Rank 0] step:5561/10000 train_time:287056ms step_avg:51.62ms +[2025-09-11 09:03:34] [Rank 0] step:5581/10000 train_time:287745ms step_avg:51.56ms +[2025-09-11 09:03:34] [Rank 0] step:5581/10000 train_time:287745ms step_avg:51.56ms +[2025-09-11 09:03:34] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:03:34] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 09:03:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:03:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:03:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 09:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 09:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:03:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:03:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:03:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 09:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:03:48] [Rank 0] PRINT: step:5600/10000 val_loss:4.3345 total_sharp:3.5928e-05 L1_sharp:2.6645e-03 L2_sharp:2.5283e-03 L3_sharp:1.4971e-03 L4_sharp:1.1294e-03 L5_sharp:2.6150e-03 L6_sharp:3.4045e-03 L7_sharp:2.7295e-03 L8_sharp:6.6893e-03 L9_sharp:7.7924e-03 L10_sharp:8.9040e-03 L11_sharp:1.3257e-02 L12_sharp:8.5172e-02 total_fnorm:1.8400e+02 total_l1_linf:4.0141e+05 total_spectral:9.2000e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.3281e-01 L1_l1linf:1.6211e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.5137e-01 L4_l1linf:1.5820e-01 L5_l1linf:1.5625e-01 L6_l1linf:1.5625e-01 L7_l1linf:1.5820e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.4941e-01 L10_l1linf:1.4941e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.5820e-01 L1_spectral:7.9721e-03 L2_spectral:8.0050e-03 L3_spectral:8.0437e-03 L4_spectral:8.0073e-03 L5_spectral:8.0059e-03 L6_spectral:7.9578e-03 L7_spectral:7.9624e-03 L8_spectral:8.0424e-03 L9_spectral:8.0167e-03 L10_spectral:7.9895e-03 L11_spectral:8.0666e-03 L12_spectral:8.1230e-03 train_time:288415ms step_avg:51.50ms +[2025-09-11 09:03:48] [Rank 0] PRINT: step:5600/10000 val_loss:4.3345 total_sharp:3.5928e-05 L1_sharp:2.6645e-03 L2_sharp:2.5283e-03 L3_sharp:1.4971e-03 L4_sharp:1.1294e-03 L5_sharp:2.6150e-03 L6_sharp:3.4045e-03 L7_sharp:2.7295e-03 L8_sharp:6.6893e-03 L9_sharp:7.7924e-03 L10_sharp:8.9040e-03 L11_sharp:1.3257e-02 L12_sharp:8.5172e-02 total_fnorm:1.8400e+02 total_l1_linf:4.0141e+05 total_spectral:9.2000e+01 L1_fnorm:6.3281e-01 L2_fnorm:6.2500e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2891e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.3281e-01 L1_l1linf:1.6211e-01 L2_l1linf:1.5723e-01 L3_l1linf:1.5137e-01 L4_l1linf:1.5820e-01 L5_l1linf:1.5625e-01 L6_l1linf:1.5625e-01 L7_l1linf:1.5820e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.4941e-01 L10_l1linf:1.4941e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.5820e-01 L1_spectral:7.9721e-03 L2_spectral:8.0050e-03 L3_spectral:8.0437e-03 L4_spectral:8.0073e-03 L5_spectral:8.0059e-03 L6_spectral:7.9578e-03 L7_spectral:7.9624e-03 L8_spectral:8.0424e-03 L9_spectral:8.0167e-03 L10_spectral:7.9895e-03 L11_spectral:8.0666e-03 L12_spectral:8.1230e-03 train_time:288415ms step_avg:51.50ms +[2025-09-11 09:03:50] [Rank 0] step:5601/10000 train_time:291151ms step_avg:51.98ms +[2025-09-11 09:03:50] [Rank 0] step:5601/10000 train_time:291151ms step_avg:51.98ms +[2025-09-11 09:03:51] [Rank 0] step:5621/10000 train_time:291853ms step_avg:51.92ms +[2025-09-11 09:03:51] [Rank 0] step:5621/10000 train_time:291853ms step_avg:51.92ms +[2025-09-11 09:03:52] [Rank 0] step:5641/10000 train_time:292542ms step_avg:51.86ms +[2025-09-11 09:03:52] [Rank 0] step:5641/10000 train_time:292542ms step_avg:51.86ms +[2025-09-11 09:03:52] [Rank 0] step:5661/10000 train_time:293228ms step_avg:51.80ms +[2025-09-11 09:03:52] [Rank 0] step:5661/10000 train_time:293228ms step_avg:51.80ms +[2025-09-11 09:03:53] [Rank 0] step:5681/10000 train_time:293917ms step_avg:51.74ms +[2025-09-11 09:03:53] [Rank 0] step:5681/10000 train_time:293917ms step_avg:51.74ms +[2025-09-11 09:03:54] [Rank 0] step:5701/10000 train_time:294607ms step_avg:51.68ms +[2025-09-11 09:03:54] [Rank 0] step:5701/10000 train_time:294607ms step_avg:51.68ms +[2025-09-11 09:03:54] [Rank 0] step:5721/10000 train_time:295294ms step_avg:51.62ms +[2025-09-11 09:03:54] [Rank 0] step:5721/10000 train_time:295294ms step_avg:51.62ms +[2025-09-11 09:03:55] [Rank 0] step:5741/10000 train_time:295983ms step_avg:51.56ms +[2025-09-11 09:03:55] [Rank 0] step:5741/10000 train_time:295983ms step_avg:51.56ms +[2025-09-11 09:03:56] [Rank 0] step:5761/10000 train_time:296672ms step_avg:51.50ms +[2025-09-11 09:03:56] [Rank 0] step:5761/10000 train_time:296672ms step_avg:51.50ms +[2025-09-11 09:03:56] [Rank 0] step:5781/10000 train_time:297361ms step_avg:51.44ms +[2025-09-11 09:03:56] [Rank 0] step:5781/10000 train_time:297361ms step_avg:51.44ms +[2025-09-11 09:03:57] [Rank 0] step:5801/10000 train_time:298051ms step_avg:51.38ms +[2025-09-11 09:03:57] [Rank 0] step:5801/10000 train_time:298051ms step_avg:51.38ms +[2025-09-11 09:03:58] [Rank 0] step:5821/10000 train_time:298738ms step_avg:51.32ms +[2025-09-11 09:03:58] [Rank 0] step:5821/10000 train_time:298738ms step_avg:51.32ms +[2025-09-11 09:03:59] [Rank 0] step:5841/10000 train_time:299428ms step_avg:51.26ms +[2025-09-11 09:03:59] [Rank 0] step:5841/10000 train_time:299428ms step_avg:51.26ms +[2025-09-11 09:03:59] [Rank 0] step:5861/10000 train_time:300116ms step_avg:51.21ms +[2025-09-11 09:03:59] [Rank 0] step:5861/10000 train_time:300116ms step_avg:51.21ms +[2025-09-11 09:04:00] [Rank 0] step:5881/10000 train_time:300804ms step_avg:51.15ms +[2025-09-11 09:04:00] [Rank 0] step:5881/10000 train_time:300804ms step_avg:51.15ms +[2025-09-11 09:04:01] [Rank 0] step:5901/10000 train_time:301492ms step_avg:51.09ms +[2025-09-11 09:04:01] [Rank 0] step:5901/10000 train_time:301492ms step_avg:51.09ms +[2025-09-11 09:04:01] [Rank 0] step:5921/10000 train_time:302183ms step_avg:51.04ms +[2025-09-11 09:04:01] [Rank 0] step:5921/10000 train_time:302183ms step_avg:51.04ms +[2025-09-11 09:04:02] [Rank 0] step:5941/10000 train_time:302875ms step_avg:50.98ms +[2025-09-11 09:04:02] [Rank 0] step:5941/10000 train_time:302875ms step_avg:50.98ms +[2025-09-11 09:04:03] [Rank 0] step:5961/10000 train_time:303564ms step_avg:50.93ms +[2025-09-11 09:04:03] [Rank 0] step:5961/10000 train_time:303564ms step_avg:50.93ms +[2025-09-11 09:04:03] [Rank 0] step:5981/10000 train_time:304254ms step_avg:50.87ms +[2025-09-11 09:04:03] [Rank 0] step:5981/10000 train_time:304254ms step_avg:50.87ms +[2025-09-11 09:04:04] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:04:04] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 09:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:04:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 09:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 09:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:18] [Rank 0] PRINT: step:6000/10000 val_loss:4.2955 total_sharp:3.3832e-05 L1_sharp:3.2837e-03 L2_sharp:2.5668e-03 L3_sharp:1.2988e-03 L4_sharp:1.5618e-03 L5_sharp:4.4047e-03 L6_sharp:3.0011e-03 L7_sharp:4.8698e-03 L8_sharp:8.1700e-03 L9_sharp:7.1876e-03 L10_sharp:1.0588e-02 L11_sharp:1.3674e-02 L12_sharp:1.0584e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9322e+05 total_spectral:9.1500e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.2891e-01 L1_l1linf:1.6211e-01 L2_l1linf:1.6113e-01 L3_l1linf:1.4941e-01 L4_l1linf:1.5527e-01 L5_l1linf:1.5625e-01 L6_l1linf:1.5332e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.5234e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.5332e-01 L1_spectral:8.0028e-03 L2_spectral:7.9766e-03 L3_spectral:8.0084e-03 L4_spectral:8.0258e-03 L5_spectral:7.9959e-03 L6_spectral:8.0051e-03 L7_spectral:8.0003e-03 L8_spectral:7.9714e-03 L9_spectral:8.0187e-03 L10_spectral:7.9542e-03 L11_spectral:8.0844e-03 L12_spectral:8.0652e-03 train_time:304926ms step_avg:50.82ms +[2025-09-11 09:04:18] [Rank 0] PRINT: step:6000/10000 val_loss:4.2955 total_sharp:3.3832e-05 L1_sharp:3.2837e-03 L2_sharp:2.5668e-03 L3_sharp:1.2988e-03 L4_sharp:1.5618e-03 L5_sharp:4.4047e-03 L6_sharp:3.0011e-03 L7_sharp:4.8698e-03 L8_sharp:8.1700e-03 L9_sharp:7.1876e-03 L10_sharp:1.0588e-02 L11_sharp:1.3674e-02 L12_sharp:1.0584e-01 total_fnorm:1.8200e+02 total_l1_linf:3.9322e+05 total_spectral:9.1500e+01 L1_fnorm:6.3672e-01 L2_fnorm:6.2891e-01 L3_fnorm:6.2500e-01 L4_fnorm:6.2891e-01 L5_fnorm:6.2891e-01 L6_fnorm:6.3281e-01 L7_fnorm:6.3281e-01 L8_fnorm:6.2109e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.3281e-01 L11_fnorm:6.2891e-01 L12_fnorm:6.2891e-01 L1_l1linf:1.6211e-01 L2_l1linf:1.6113e-01 L3_l1linf:1.4941e-01 L4_l1linf:1.5527e-01 L5_l1linf:1.5625e-01 L6_l1linf:1.5332e-01 L7_l1linf:1.5723e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.5234e-01 L10_l1linf:1.4844e-01 L11_l1linf:1.4746e-01 L12_l1linf:1.5332e-01 L1_spectral:8.0028e-03 L2_spectral:7.9766e-03 L3_spectral:8.0084e-03 L4_spectral:8.0258e-03 L5_spectral:7.9959e-03 L6_spectral:8.0051e-03 L7_spectral:8.0003e-03 L8_spectral:7.9714e-03 L9_spectral:8.0187e-03 L10_spectral:7.9542e-03 L11_spectral:8.0844e-03 L12_spectral:8.0652e-03 train_time:304926ms step_avg:50.82ms +[2025-09-11 09:04:21] [Rank 0] step:6001/10000 train_time:307718ms step_avg:51.28ms +[2025-09-11 09:04:21] [Rank 0] step:6001/10000 train_time:307718ms step_avg:51.28ms +[2025-09-11 09:04:21] [Rank 0] step:6021/10000 train_time:308415ms step_avg:51.22ms +[2025-09-11 09:04:21] [Rank 0] step:6021/10000 train_time:308415ms step_avg:51.22ms +[2025-09-11 09:04:22] [Rank 0] step:6041/10000 train_time:309107ms step_avg:51.17ms +[2025-09-11 09:04:22] [Rank 0] step:6041/10000 train_time:309107ms step_avg:51.17ms +[2025-09-11 09:04:23] [Rank 0] step:6061/10000 train_time:309796ms step_avg:51.11ms +[2025-09-11 09:04:23] [Rank 0] step:6061/10000 train_time:309796ms step_avg:51.11ms +[2025-09-11 09:04:23] [Rank 0] step:6081/10000 train_time:310486ms step_avg:51.06ms +[2025-09-11 09:04:23] [Rank 0] step:6081/10000 train_time:310486ms step_avg:51.06ms +[2025-09-11 09:04:24] [Rank 0] step:6101/10000 train_time:311175ms step_avg:51.00ms +[2025-09-11 09:04:24] [Rank 0] step:6101/10000 train_time:311175ms step_avg:51.00ms +[2025-09-11 09:04:25] [Rank 0] step:6121/10000 train_time:311867ms step_avg:50.95ms +[2025-09-11 09:04:25] [Rank 0] step:6121/10000 train_time:311867ms step_avg:50.95ms +[2025-09-11 09:04:25] [Rank 0] step:6141/10000 train_time:312557ms step_avg:50.90ms +[2025-09-11 09:04:25] [Rank 0] step:6141/10000 train_time:312557ms step_avg:50.90ms +[2025-09-11 09:04:26] [Rank 0] step:6161/10000 train_time:313247ms step_avg:50.84ms +[2025-09-11 09:04:26] [Rank 0] step:6161/10000 train_time:313247ms step_avg:50.84ms +[2025-09-11 09:04:27] [Rank 0] step:6181/10000 train_time:313935ms step_avg:50.79ms +[2025-09-11 09:04:27] [Rank 0] step:6181/10000 train_time:313935ms step_avg:50.79ms +[2025-09-11 09:04:28] [Rank 0] step:6201/10000 train_time:314626ms step_avg:50.74ms +[2025-09-11 09:04:28] [Rank 0] step:6201/10000 train_time:314626ms step_avg:50.74ms +[2025-09-11 09:04:28] [Rank 0] step:6221/10000 train_time:315320ms step_avg:50.69ms +[2025-09-11 09:04:28] [Rank 0] step:6221/10000 train_time:315320ms step_avg:50.69ms +[2025-09-11 09:04:29] [Rank 0] step:6241/10000 train_time:316011ms step_avg:50.63ms +[2025-09-11 09:04:29] [Rank 0] step:6241/10000 train_time:316011ms step_avg:50.63ms +[2025-09-11 09:04:30] [Rank 0] step:6261/10000 train_time:316973ms step_avg:50.63ms +[2025-09-11 09:04:30] [Rank 0] step:6261/10000 train_time:316973ms step_avg:50.63ms +[2025-09-11 09:04:31] [Rank 0] step:6281/10000 train_time:317663ms step_avg:50.58ms +[2025-09-11 09:04:31] [Rank 0] step:6281/10000 train_time:317663ms step_avg:50.58ms +[2025-09-11 09:04:31] [Rank 0] step:6301/10000 train_time:318351ms step_avg:50.52ms +[2025-09-11 09:04:31] [Rank 0] step:6301/10000 train_time:318351ms step_avg:50.52ms +[2025-09-11 09:04:32] [Rank 0] step:6321/10000 train_time:319352ms step_avg:50.52ms +[2025-09-11 09:04:32] [Rank 0] step:6321/10000 train_time:319352ms step_avg:50.52ms +[2025-09-11 09:04:33] [Rank 0] step:6341/10000 train_time:320043ms step_avg:50.47ms +[2025-09-11 09:04:33] [Rank 0] step:6341/10000 train_time:320043ms step_avg:50.47ms +[2025-09-11 09:04:34] [Rank 0] step:6361/10000 train_time:320733ms step_avg:50.42ms +[2025-09-11 09:04:34] [Rank 0] step:6361/10000 train_time:320733ms step_avg:50.42ms +[2025-09-11 09:04:34] [Rank 0] step:6381/10000 train_time:321423ms step_avg:50.37ms +[2025-09-11 09:04:34] [Rank 0] step:6381/10000 train_time:321423ms step_avg:50.37ms +[2025-09-11 09:04:35] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:04:35] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 09:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:04:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 09:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:04:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:04:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 09:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:04:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:04:48] [Rank 0] PRINT: step:6400/10000 val_loss:4.2652 total_sharp:4.1151e-05 L1_sharp:2.8820e-03 L2_sharp:1.2620e-03 L3_sharp:9.5631e-04 L4_sharp:2.5965e-03 L5_sharp:3.7196e-03 L6_sharp:3.8476e-03 L7_sharp:3.3872e-03 L8_sharp:7.7129e-03 L9_sharp:8.0650e-03 L10_sharp:9.4086e-03 L11_sharp:1.4525e-02 L12_sharp:2.4350e-01 total_fnorm:1.6200e+02 total_l1_linf:3.4406e+05 total_spectral:8.1000e+01 L1_fnorm:5.7031e-01 L2_fnorm:5.6250e-01 L3_fnorm:5.6250e-01 L4_fnorm:5.6250e-01 L5_fnorm:5.6250e-01 L6_fnorm:5.6641e-01 L7_fnorm:5.6641e-01 L8_fnorm:5.5469e-01 L9_fnorm:5.6641e-01 L10_fnorm:5.6641e-01 L11_fnorm:5.6250e-01 L12_fnorm:5.7031e-01 L1_l1linf:1.3770e-01 L2_l1linf:1.3477e-01 L3_l1linf:1.3086e-01 L4_l1linf:1.3184e-01 L5_l1linf:1.3379e-01 L6_l1linf:1.3184e-01 L7_l1linf:1.3477e-01 L8_l1linf:1.3086e-01 L9_l1linf:1.3086e-01 L10_l1linf:1.2793e-01 L11_l1linf:1.2695e-01 L12_l1linf:1.3770e-01 L1_spectral:7.3535e-03 L2_spectral:7.3428e-03 L3_spectral:7.3933e-03 L4_spectral:7.3713e-03 L5_spectral:7.3272e-03 L6_spectral:7.3314e-03 L7_spectral:7.3195e-03 L8_spectral:7.2707e-03 L9_spectral:7.3729e-03 L10_spectral:7.3606e-03 L11_spectral:7.3434e-03 L12_spectral:7.3599e-03 train_time:322093ms step_avg:50.33ms +[2025-09-11 09:04:48] [Rank 0] PRINT: step:6400/10000 val_loss:4.2652 total_sharp:4.1151e-05 L1_sharp:2.8820e-03 L2_sharp:1.2620e-03 L3_sharp:9.5631e-04 L4_sharp:2.5965e-03 L5_sharp:3.7196e-03 L6_sharp:3.8476e-03 L7_sharp:3.3872e-03 L8_sharp:7.7129e-03 L9_sharp:8.0650e-03 L10_sharp:9.4086e-03 L11_sharp:1.4525e-02 L12_sharp:2.4350e-01 total_fnorm:1.6200e+02 total_l1_linf:3.4406e+05 total_spectral:8.1000e+01 L1_fnorm:5.7031e-01 L2_fnorm:5.6250e-01 L3_fnorm:5.6250e-01 L4_fnorm:5.6250e-01 L5_fnorm:5.6250e-01 L6_fnorm:5.6641e-01 L7_fnorm:5.6641e-01 L8_fnorm:5.5469e-01 L9_fnorm:5.6641e-01 L10_fnorm:5.6641e-01 L11_fnorm:5.6250e-01 L12_fnorm:5.7031e-01 L1_l1linf:1.3770e-01 L2_l1linf:1.3477e-01 L3_l1linf:1.3086e-01 L4_l1linf:1.3184e-01 L5_l1linf:1.3379e-01 L6_l1linf:1.3184e-01 L7_l1linf:1.3477e-01 L8_l1linf:1.3086e-01 L9_l1linf:1.3086e-01 L10_l1linf:1.2793e-01 L11_l1linf:1.2695e-01 L12_l1linf:1.3770e-01 L1_spectral:7.3535e-03 L2_spectral:7.3428e-03 L3_spectral:7.3933e-03 L4_spectral:7.3713e-03 L5_spectral:7.3272e-03 L6_spectral:7.3314e-03 L7_spectral:7.3195e-03 L8_spectral:7.2707e-03 L9_spectral:7.3729e-03 L10_spectral:7.3606e-03 L11_spectral:7.3434e-03 L12_spectral:7.3599e-03 train_time:322093ms step_avg:50.33ms +[2025-09-11 09:04:51] [Rank 0] step:6401/10000 train_time:324914ms step_avg:50.76ms +[2025-09-11 09:04:51] [Rank 0] step:6401/10000 train_time:324914ms step_avg:50.76ms +[2025-09-11 09:04:52] [Rank 0] step:6421/10000 train_time:325613ms step_avg:50.71ms +[2025-09-11 09:04:52] [Rank 0] step:6421/10000 train_time:325613ms step_avg:50.71ms +[2025-09-11 09:04:52] [Rank 0] step:6441/10000 train_time:326304ms step_avg:50.66ms +[2025-09-11 09:04:52] [Rank 0] step:6441/10000 train_time:326304ms step_avg:50.66ms +[2025-09-11 09:04:53] [Rank 0] step:6461/10000 train_time:326995ms step_avg:50.61ms +[2025-09-11 09:04:53] [Rank 0] step:6461/10000 train_time:326995ms step_avg:50.61ms +[2025-09-11 09:04:54] [Rank 0] step:6481/10000 train_time:327687ms step_avg:50.56ms +[2025-09-11 09:04:54] [Rank 0] step:6481/10000 train_time:327687ms step_avg:50.56ms +[2025-09-11 09:04:54] [Rank 0] step:6501/10000 train_time:328379ms step_avg:50.51ms +[2025-09-11 09:04:54] [Rank 0] step:6501/10000 train_time:328379ms step_avg:50.51ms +[2025-09-11 09:04:55] [Rank 0] step:6521/10000 train_time:329071ms step_avg:50.46ms +[2025-09-11 09:04:55] [Rank 0] step:6521/10000 train_time:329071ms step_avg:50.46ms +[2025-09-11 09:04:56] [Rank 0] step:6541/10000 train_time:329760ms step_avg:50.41ms +[2025-09-11 09:04:56] [Rank 0] step:6541/10000 train_time:329760ms step_avg:50.41ms +[2025-09-11 09:04:57] [Rank 0] step:6561/10000 train_time:330452ms step_avg:50.37ms +[2025-09-11 09:04:57] [Rank 0] step:6561/10000 train_time:330452ms step_avg:50.37ms +[2025-09-11 09:04:57] [Rank 0] step:6581/10000 train_time:331143ms step_avg:50.32ms +[2025-09-11 09:04:57] [Rank 0] step:6581/10000 train_time:331143ms step_avg:50.32ms +[2025-09-11 09:04:58] [Rank 0] step:6601/10000 train_time:331834ms step_avg:50.27ms +[2025-09-11 09:04:58] [Rank 0] step:6601/10000 train_time:331834ms step_avg:50.27ms +[2025-09-11 09:04:59] [Rank 0] step:6621/10000 train_time:332523ms step_avg:50.22ms +[2025-09-11 09:04:59] [Rank 0] step:6621/10000 train_time:332523ms step_avg:50.22ms +[2025-09-11 09:04:59] [Rank 0] step:6641/10000 train_time:333216ms step_avg:50.18ms +[2025-09-11 09:04:59] [Rank 0] step:6641/10000 train_time:333216ms step_avg:50.18ms +[2025-09-11 09:05:00] [Rank 0] step:6661/10000 train_time:333907ms step_avg:50.13ms +[2025-09-11 09:05:00] [Rank 0] step:6661/10000 train_time:333907ms step_avg:50.13ms +[2025-09-11 09:05:01] [Rank 0] step:6681/10000 train_time:334605ms step_avg:50.08ms +[2025-09-11 09:05:01] [Rank 0] step:6681/10000 train_time:334605ms step_avg:50.08ms +[2025-09-11 09:05:01] [Rank 0] step:6701/10000 train_time:335352ms step_avg:50.05ms +[2025-09-11 09:05:01] [Rank 0] step:6701/10000 train_time:335352ms step_avg:50.05ms +[2025-09-11 09:05:02] [Rank 0] step:6721/10000 train_time:336119ms step_avg:50.01ms +[2025-09-11 09:05:02] [Rank 0] step:6721/10000 train_time:336119ms step_avg:50.01ms +[2025-09-11 09:05:03] [Rank 0] step:6741/10000 train_time:336818ms step_avg:49.97ms +[2025-09-11 09:05:03] [Rank 0] step:6741/10000 train_time:336818ms step_avg:49.97ms +[2025-09-11 09:05:04] [Rank 0] step:6761/10000 train_time:337513ms step_avg:49.92ms +[2025-09-11 09:05:04] [Rank 0] step:6761/10000 train_time:337513ms step_avg:49.92ms +[2025-09-11 09:05:04] [Rank 0] step:6781/10000 train_time:338211ms step_avg:49.88ms +[2025-09-11 09:05:04] [Rank 0] step:6781/10000 train_time:338211ms step_avg:49.88ms +[2025-09-11 09:05:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:05:05] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 09:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 09:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:05:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:05:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:05:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:05:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:05:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 09:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:18] [Rank 0] PRINT: step:6800/10000 val_loss:4.2172 total_sharp:3.0520e-05 L1_sharp:1.6415e-03 L2_sharp:1.7275e-03 L3_sharp:1.7887e-03 L4_sharp:2.1722e-03 L5_sharp:3.1710e-03 L6_sharp:4.7180e-03 L7_sharp:4.8374e-03 L8_sharp:8.6393e-03 L9_sharp:7.9526e-03 L10_sharp:1.0008e-02 L11_sharp:1.4566e-02 L12_sharp:8.9464e-02 total_fnorm:1.5800e+02 total_l1_linf:3.3382e+05 total_spectral:7.9000e+01 L1_fnorm:5.0781e-01 L2_fnorm:5.0000e-01 L3_fnorm:4.9805e-01 L4_fnorm:5.0000e-01 L5_fnorm:4.9805e-01 L6_fnorm:5.0000e-01 L7_fnorm:5.0391e-01 L8_fnorm:4.9023e-01 L9_fnorm:5.0000e-01 L10_fnorm:5.0000e-01 L11_fnorm:4.9609e-01 L12_fnorm:4.9609e-01 L1_l1linf:1.2109e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.0938e-01 L4_l1linf:1.1572e-01 L5_l1linf:1.1426e-01 L6_l1linf:1.1182e-01 L7_l1linf:1.1426e-01 L8_l1linf:1.1084e-01 L9_l1linf:1.0938e-01 L10_l1linf:1.0840e-01 L11_l1linf:1.0693e-01 L12_l1linf:1.0938e-01 L1_spectral:6.6354e-03 L2_spectral:6.6275e-03 L3_spectral:6.6481e-03 L4_spectral:6.6154e-03 L5_spectral:6.6159e-03 L6_spectral:6.6050e-03 L7_spectral:6.6241e-03 L8_spectral:6.5477e-03 L9_spectral:6.6209e-03 L10_spectral:6.6189e-03 L11_spectral:6.6610e-03 L12_spectral:6.5623e-03 train_time:338889ms step_avg:49.84ms +[2025-09-11 09:05:18] [Rank 0] PRINT: step:6800/10000 val_loss:4.2172 total_sharp:3.0520e-05 L1_sharp:1.6415e-03 L2_sharp:1.7275e-03 L3_sharp:1.7887e-03 L4_sharp:2.1722e-03 L5_sharp:3.1710e-03 L6_sharp:4.7180e-03 L7_sharp:4.8374e-03 L8_sharp:8.6393e-03 L9_sharp:7.9526e-03 L10_sharp:1.0008e-02 L11_sharp:1.4566e-02 L12_sharp:8.9464e-02 total_fnorm:1.5800e+02 total_l1_linf:3.3382e+05 total_spectral:7.9000e+01 L1_fnorm:5.0781e-01 L2_fnorm:5.0000e-01 L3_fnorm:4.9805e-01 L4_fnorm:5.0000e-01 L5_fnorm:4.9805e-01 L6_fnorm:5.0000e-01 L7_fnorm:5.0391e-01 L8_fnorm:4.9023e-01 L9_fnorm:5.0000e-01 L10_fnorm:5.0000e-01 L11_fnorm:4.9609e-01 L12_fnorm:4.9609e-01 L1_l1linf:1.2109e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.0938e-01 L4_l1linf:1.1572e-01 L5_l1linf:1.1426e-01 L6_l1linf:1.1182e-01 L7_l1linf:1.1426e-01 L8_l1linf:1.1084e-01 L9_l1linf:1.0938e-01 L10_l1linf:1.0840e-01 L11_l1linf:1.0693e-01 L12_l1linf:1.0938e-01 L1_spectral:6.6354e-03 L2_spectral:6.6275e-03 L3_spectral:6.6481e-03 L4_spectral:6.6154e-03 L5_spectral:6.6159e-03 L6_spectral:6.6050e-03 L7_spectral:6.6241e-03 L8_spectral:6.5477e-03 L9_spectral:6.6209e-03 L10_spectral:6.6189e-03 L11_spectral:6.6610e-03 L12_spectral:6.5623e-03 train_time:338889ms step_avg:49.84ms +[2025-09-11 09:05:21] [Rank 0] step:6801/10000 train_time:341687ms step_avg:50.24ms +[2025-09-11 09:05:21] [Rank 0] step:6801/10000 train_time:341687ms step_avg:50.24ms +[2025-09-11 09:05:22] [Rank 0] step:6821/10000 train_time:342387ms step_avg:50.20ms +[2025-09-11 09:05:22] [Rank 0] step:6821/10000 train_time:342387ms step_avg:50.20ms +[2025-09-11 09:05:22] [Rank 0] step:6841/10000 train_time:343089ms step_avg:50.15ms +[2025-09-11 09:05:22] [Rank 0] step:6841/10000 train_time:343089ms step_avg:50.15ms +[2025-09-11 09:05:23] [Rank 0] step:6861/10000 train_time:343787ms step_avg:50.11ms +[2025-09-11 09:05:23] [Rank 0] step:6861/10000 train_time:343787ms step_avg:50.11ms +[2025-09-11 09:05:24] [Rank 0] step:6881/10000 train_time:344487ms step_avg:50.06ms +[2025-09-11 09:05:24] [Rank 0] step:6881/10000 train_time:344487ms step_avg:50.06ms +[2025-09-11 09:05:25] [Rank 0] step:6901/10000 train_time:345185ms step_avg:50.02ms +[2025-09-11 09:05:25] [Rank 0] step:6901/10000 train_time:345185ms step_avg:50.02ms +[2025-09-11 09:05:25] [Rank 0] step:6921/10000 train_time:345883ms step_avg:49.98ms +[2025-09-11 09:05:25] [Rank 0] step:6921/10000 train_time:345883ms step_avg:49.98ms +[2025-09-11 09:05:26] [Rank 0] step:6941/10000 train_time:346581ms step_avg:49.93ms +[2025-09-11 09:05:26] [Rank 0] step:6941/10000 train_time:346581ms step_avg:49.93ms +[2025-09-11 09:05:27] [Rank 0] step:6961/10000 train_time:347280ms step_avg:49.89ms +[2025-09-11 09:05:27] [Rank 0] step:6961/10000 train_time:347280ms step_avg:49.89ms +[2025-09-11 09:05:27] [Rank 0] step:6981/10000 train_time:347981ms step_avg:49.85ms +[2025-09-11 09:05:27] [Rank 0] step:6981/10000 train_time:347981ms step_avg:49.85ms +[2025-09-11 09:05:28] [Rank 0] step:7001/10000 train_time:348679ms step_avg:49.80ms +[2025-09-11 09:05:28] [Rank 0] step:7001/10000 train_time:348679ms step_avg:49.80ms +[2025-09-11 09:05:29] [Rank 0] step:7021/10000 train_time:349377ms step_avg:49.76ms +[2025-09-11 09:05:29] [Rank 0] step:7021/10000 train_time:349377ms step_avg:49.76ms +[2025-09-11 09:05:29] [Rank 0] step:7041/10000 train_time:350075ms step_avg:49.72ms +[2025-09-11 09:05:29] [Rank 0] step:7041/10000 train_time:350075ms step_avg:49.72ms +[2025-09-11 09:05:30] [Rank 0] step:7061/10000 train_time:350775ms step_avg:49.68ms +[2025-09-11 09:05:30] [Rank 0] step:7061/10000 train_time:350775ms step_avg:49.68ms +[2025-09-11 09:05:31] [Rank 0] step:7081/10000 train_time:351473ms step_avg:49.64ms +[2025-09-11 09:05:31] [Rank 0] step:7081/10000 train_time:351473ms step_avg:49.64ms +[2025-09-11 09:05:32] [Rank 0] step:7101/10000 train_time:352172ms step_avg:49.59ms +[2025-09-11 09:05:32] [Rank 0] step:7101/10000 train_time:352172ms step_avg:49.59ms +[2025-09-11 09:05:32] [Rank 0] step:7121/10000 train_time:352872ms step_avg:49.55ms +[2025-09-11 09:05:32] [Rank 0] step:7121/10000 train_time:352872ms step_avg:49.55ms +[2025-09-11 09:05:33] [Rank 0] step:7141/10000 train_time:353870ms step_avg:49.55ms +[2025-09-11 09:05:33] [Rank 0] step:7141/10000 train_time:353870ms step_avg:49.55ms +[2025-09-11 09:05:34] [Rank 0] step:7161/10000 train_time:354569ms step_avg:49.51ms +[2025-09-11 09:05:34] [Rank 0] step:7161/10000 train_time:354569ms step_avg:49.51ms +[2025-09-11 09:05:35] [Rank 0] step:7181/10000 train_time:355267ms step_avg:49.47ms +[2025-09-11 09:05:35] [Rank 0] step:7181/10000 train_time:355267ms step_avg:49.47ms +[2025-09-11 09:05:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:05:36] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 09:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:05:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:05:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:05:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:05:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:05:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 09:05:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:05:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 09:05:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:05:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 09:05:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:05:48] [Rank 0] PRINT: step:7200/10000 val_loss:4.1785 total_sharp:2.7660e-05 L1_sharp:1.1188e-03 L2_sharp:4.0240e-04 L3_sharp:-1.6084e-04 L4_sharp:2.7462e-03 L5_sharp:3.8851e-03 L6_sharp:3.9774e-03 L7_sharp:4.5698e-03 L8_sharp:6.9754e-03 L9_sharp:6.7019e-03 L10_sharp:8.6237e-03 L11_sharp:1.2915e-02 L12_sharp:9.4951e-02 total_fnorm:1.3900e+02 total_l1_linf:2.8467e+05 total_spectral:7.0000e+01 L1_fnorm:4.3750e-01 L2_fnorm:4.3164e-01 L3_fnorm:4.2578e-01 L4_fnorm:4.2969e-01 L5_fnorm:4.3164e-01 L6_fnorm:4.3359e-01 L7_fnorm:4.3359e-01 L8_fnorm:4.2188e-01 L9_fnorm:4.3164e-01 L10_fnorm:4.3164e-01 L11_fnorm:4.2578e-01 L12_fnorm:4.2578e-01 L1_l1linf:9.9609e-02 L2_l1linf:9.6191e-02 L3_l1linf:8.8867e-02 L4_l1linf:9.2285e-02 L5_l1linf:9.1797e-02 L6_l1linf:9.2285e-02 L7_l1linf:9.3262e-02 L8_l1linf:8.9355e-02 L9_l1linf:8.8867e-02 L10_l1linf:8.8379e-02 L11_l1linf:8.4961e-02 L12_l1linf:8.9844e-02 L1_spectral:5.8017e-03 L2_spectral:5.8271e-03 L3_spectral:5.8148e-03 L4_spectral:5.8556e-03 L5_spectral:5.8326e-03 L6_spectral:5.8473e-03 L7_spectral:5.8182e-03 L8_spectral:5.8434e-03 L9_spectral:5.8023e-03 L10_spectral:5.8237e-03 L11_spectral:5.8451e-03 L12_spectral:5.8195e-03 train_time:356203ms step_avg:49.47ms +[2025-09-11 09:05:48] [Rank 0] PRINT: step:7200/10000 val_loss:4.1785 total_sharp:2.7660e-05 L1_sharp:1.1188e-03 L2_sharp:4.0240e-04 L3_sharp:-1.6084e-04 L4_sharp:2.7462e-03 L5_sharp:3.8851e-03 L6_sharp:3.9774e-03 L7_sharp:4.5698e-03 L8_sharp:6.9754e-03 L9_sharp:6.7019e-03 L10_sharp:8.6237e-03 L11_sharp:1.2915e-02 L12_sharp:9.4951e-02 total_fnorm:1.3900e+02 total_l1_linf:2.8467e+05 total_spectral:7.0000e+01 L1_fnorm:4.3750e-01 L2_fnorm:4.3164e-01 L3_fnorm:4.2578e-01 L4_fnorm:4.2969e-01 L5_fnorm:4.3164e-01 L6_fnorm:4.3359e-01 L7_fnorm:4.3359e-01 L8_fnorm:4.2188e-01 L9_fnorm:4.3164e-01 L10_fnorm:4.3164e-01 L11_fnorm:4.2578e-01 L12_fnorm:4.2578e-01 L1_l1linf:9.9609e-02 L2_l1linf:9.6191e-02 L3_l1linf:8.8867e-02 L4_l1linf:9.2285e-02 L5_l1linf:9.1797e-02 L6_l1linf:9.2285e-02 L7_l1linf:9.3262e-02 L8_l1linf:8.9355e-02 L9_l1linf:8.8867e-02 L10_l1linf:8.8379e-02 L11_l1linf:8.4961e-02 L12_l1linf:8.9844e-02 L1_spectral:5.8017e-03 L2_spectral:5.8271e-03 L3_spectral:5.8148e-03 L4_spectral:5.8556e-03 L5_spectral:5.8326e-03 L6_spectral:5.8473e-03 L7_spectral:5.8182e-03 L8_spectral:5.8434e-03 L9_spectral:5.8023e-03 L10_spectral:5.8237e-03 L11_spectral:5.8451e-03 L12_spectral:5.8195e-03 train_time:356203ms step_avg:49.47ms +[2025-09-11 09:05:51] [Rank 0] step:7201/10000 train_time:358950ms step_avg:49.85ms +[2025-09-11 09:05:51] [Rank 0] step:7201/10000 train_time:358950ms step_avg:49.85ms +[2025-09-11 09:05:52] [Rank 0] step:7221/10000 train_time:359680ms step_avg:49.81ms +[2025-09-11 09:05:52] [Rank 0] step:7221/10000 train_time:359680ms step_avg:49.81ms +[2025-09-11 09:05:53] [Rank 0] step:7241/10000 train_time:360379ms step_avg:49.77ms +[2025-09-11 09:05:53] [Rank 0] step:7241/10000 train_time:360379ms step_avg:49.77ms +[2025-09-11 09:05:53] [Rank 0] step:7261/10000 train_time:361081ms step_avg:49.73ms +[2025-09-11 09:05:53] [Rank 0] step:7261/10000 train_time:361081ms step_avg:49.73ms +[2025-09-11 09:05:54] [Rank 0] step:7281/10000 train_time:361785ms step_avg:49.69ms +[2025-09-11 09:05:54] [Rank 0] step:7281/10000 train_time:361785ms step_avg:49.69ms +[2025-09-11 09:05:55] [Rank 0] step:7301/10000 train_time:362484ms step_avg:49.65ms +[2025-09-11 09:05:55] [Rank 0] step:7301/10000 train_time:362484ms step_avg:49.65ms +[2025-09-11 09:05:55] [Rank 0] step:7321/10000 train_time:363183ms step_avg:49.61ms +[2025-09-11 09:05:55] [Rank 0] step:7321/10000 train_time:363183ms step_avg:49.61ms +[2025-09-11 09:05:56] [Rank 0] step:7341/10000 train_time:363884ms step_avg:49.57ms +[2025-09-11 09:05:56] [Rank 0] step:7341/10000 train_time:363884ms step_avg:49.57ms +[2025-09-11 09:05:57] [Rank 0] step:7361/10000 train_time:364583ms step_avg:49.53ms +[2025-09-11 09:05:57] [Rank 0] step:7361/10000 train_time:364583ms step_avg:49.53ms +[2025-09-11 09:05:58] [Rank 0] step:7381/10000 train_time:365284ms step_avg:49.49ms +[2025-09-11 09:05:58] [Rank 0] step:7381/10000 train_time:365284ms step_avg:49.49ms +[2025-09-11 09:05:58] [Rank 0] step:7401/10000 train_time:365981ms step_avg:49.45ms +[2025-09-11 09:05:58] [Rank 0] step:7401/10000 train_time:365981ms step_avg:49.45ms +[2025-09-11 09:05:59] [Rank 0] step:7421/10000 train_time:366680ms step_avg:49.41ms +[2025-09-11 09:05:59] [Rank 0] step:7421/10000 train_time:366680ms step_avg:49.41ms +[2025-09-11 09:06:00] [Rank 0] step:7441/10000 train_time:367381ms step_avg:49.37ms +[2025-09-11 09:06:00] [Rank 0] step:7441/10000 train_time:367381ms step_avg:49.37ms +[2025-09-11 09:06:00] [Rank 0] step:7461/10000 train_time:368081ms step_avg:49.33ms +[2025-09-11 09:06:00] [Rank 0] step:7461/10000 train_time:368081ms step_avg:49.33ms +[2025-09-11 09:06:01] [Rank 0] step:7481/10000 train_time:368782ms step_avg:49.30ms +[2025-09-11 09:06:01] [Rank 0] step:7481/10000 train_time:368782ms step_avg:49.30ms +[2025-09-11 09:06:02] [Rank 0] step:7501/10000 train_time:369481ms step_avg:49.26ms +[2025-09-11 09:06:02] [Rank 0] step:7501/10000 train_time:369481ms step_avg:49.26ms +[2025-09-11 09:06:02] [Rank 0] step:7521/10000 train_time:370183ms step_avg:49.22ms +[2025-09-11 09:06:02] [Rank 0] step:7521/10000 train_time:370183ms step_avg:49.22ms +[2025-09-11 09:06:03] [Rank 0] step:7541/10000 train_time:370881ms step_avg:49.18ms +[2025-09-11 09:06:03] [Rank 0] step:7541/10000 train_time:370881ms step_avg:49.18ms +[2025-09-11 09:06:04] [Rank 0] step:7561/10000 train_time:371584ms step_avg:49.14ms +[2025-09-11 09:06:04] [Rank 0] step:7561/10000 train_time:371584ms step_avg:49.14ms +[2025-09-11 09:06:05] [Rank 0] step:7581/10000 train_time:372285ms step_avg:49.11ms +[2025-09-11 09:06:05] [Rank 0] step:7581/10000 train_time:372285ms step_avg:49.11ms +[2025-09-11 09:06:05] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:06:05] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:06:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:06:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:06:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:06:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:06:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 09:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 09:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:06:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 09:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:19] [Rank 0] PRINT: step:7600/10000 val_loss:4.1447 total_sharp:2.6549e-05 L1_sharp:1.6395e-03 L2_sharp:2.7909e-03 L3_sharp:1.7738e-03 L4_sharp:8.2706e-04 L5_sharp:2.4676e-03 L6_sharp:3.4343e-03 L7_sharp:3.7346e-03 L8_sharp:7.0655e-03 L9_sharp:7.2835e-03 L10_sharp:9.3011e-03 L11_sharp:1.4069e-02 L12_sharp:1.1031e-01 total_fnorm:1.1950e+02 total_l1_linf:2.3450e+05 total_spectral:5.9750e+01 L1_fnorm:3.6719e-01 L2_fnorm:3.6133e-01 L3_fnorm:3.5938e-01 L4_fnorm:3.6133e-01 L5_fnorm:3.5938e-01 L6_fnorm:3.6133e-01 L7_fnorm:3.6133e-01 L8_fnorm:3.4961e-01 L9_fnorm:3.5742e-01 L10_fnorm:3.5938e-01 L11_fnorm:3.5352e-01 L12_fnorm:3.5547e-01 L1_l1linf:7.8125e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.2754e-02 L8_l1linf:6.9824e-02 L9_l1linf:6.9824e-02 L10_l1linf:6.9336e-02 L11_l1linf:6.7871e-02 L12_l1linf:7.1777e-02 L1_spectral:5.0196e-03 L2_spectral:5.0024e-03 L3_spectral:5.0355e-03 L4_spectral:5.0128e-03 L5_spectral:5.0106e-03 L6_spectral:5.0385e-03 L7_spectral:4.9959e-03 L8_spectral:4.9301e-03 L9_spectral:4.9862e-03 L10_spectral:5.0101e-03 L11_spectral:4.9884e-03 L12_spectral:4.9735e-03 train_time:372965ms step_avg:49.07ms +[2025-09-11 09:06:19] [Rank 0] PRINT: step:7600/10000 val_loss:4.1447 total_sharp:2.6549e-05 L1_sharp:1.6395e-03 L2_sharp:2.7909e-03 L3_sharp:1.7738e-03 L4_sharp:8.2706e-04 L5_sharp:2.4676e-03 L6_sharp:3.4343e-03 L7_sharp:3.7346e-03 L8_sharp:7.0655e-03 L9_sharp:7.2835e-03 L10_sharp:9.3011e-03 L11_sharp:1.4069e-02 L12_sharp:1.1031e-01 total_fnorm:1.1950e+02 total_l1_linf:2.3450e+05 total_spectral:5.9750e+01 L1_fnorm:3.6719e-01 L2_fnorm:3.6133e-01 L3_fnorm:3.5938e-01 L4_fnorm:3.6133e-01 L5_fnorm:3.5938e-01 L6_fnorm:3.6133e-01 L7_fnorm:3.6133e-01 L8_fnorm:3.4961e-01 L9_fnorm:3.5742e-01 L10_fnorm:3.5938e-01 L11_fnorm:3.5352e-01 L12_fnorm:3.5547e-01 L1_l1linf:7.8125e-02 L2_l1linf:7.5195e-02 L3_l1linf:7.1777e-02 L4_l1linf:7.4707e-02 L5_l1linf:7.2266e-02 L6_l1linf:7.4219e-02 L7_l1linf:7.2754e-02 L8_l1linf:6.9824e-02 L9_l1linf:6.9824e-02 L10_l1linf:6.9336e-02 L11_l1linf:6.7871e-02 L12_l1linf:7.1777e-02 L1_spectral:5.0196e-03 L2_spectral:5.0024e-03 L3_spectral:5.0355e-03 L4_spectral:5.0128e-03 L5_spectral:5.0106e-03 L6_spectral:5.0385e-03 L7_spectral:4.9959e-03 L8_spectral:4.9301e-03 L9_spectral:4.9862e-03 L10_spectral:5.0101e-03 L11_spectral:4.9884e-03 L12_spectral:4.9735e-03 train_time:372965ms step_avg:49.07ms +[2025-09-11 09:06:21] [Rank 0] step:7601/10000 train_time:375803ms step_avg:49.44ms +[2025-09-11 09:06:21] [Rank 0] step:7601/10000 train_time:375803ms step_avg:49.44ms +[2025-09-11 09:06:22] [Rank 0] step:7621/10000 train_time:376523ms step_avg:49.41ms +[2025-09-11 09:06:22] [Rank 0] step:7621/10000 train_time:376523ms step_avg:49.41ms +[2025-09-11 09:06:23] [Rank 0] step:7641/10000 train_time:377226ms step_avg:49.37ms +[2025-09-11 09:06:23] [Rank 0] step:7641/10000 train_time:377226ms step_avg:49.37ms +[2025-09-11 09:06:23] [Rank 0] step:7661/10000 train_time:377926ms step_avg:49.33ms +[2025-09-11 09:06:23] [Rank 0] step:7661/10000 train_time:377926ms step_avg:49.33ms +[2025-09-11 09:06:24] [Rank 0] step:7681/10000 train_time:378628ms step_avg:49.29ms +[2025-09-11 09:06:24] [Rank 0] step:7681/10000 train_time:378628ms step_avg:49.29ms +[2025-09-11 09:06:25] [Rank 0] step:7701/10000 train_time:379330ms step_avg:49.26ms +[2025-09-11 09:06:25] [Rank 0] step:7701/10000 train_time:379330ms step_avg:49.26ms +[2025-09-11 09:06:26] [Rank 0] step:7721/10000 train_time:380031ms step_avg:49.22ms +[2025-09-11 09:06:26] [Rank 0] step:7721/10000 train_time:380031ms step_avg:49.22ms +[2025-09-11 09:06:26] [Rank 0] step:7741/10000 train_time:380733ms step_avg:49.18ms +[2025-09-11 09:06:26] [Rank 0] step:7741/10000 train_time:380733ms step_avg:49.18ms +[2025-09-11 09:06:27] [Rank 0] step:7761/10000 train_time:381436ms step_avg:49.15ms +[2025-09-11 09:06:27] [Rank 0] step:7761/10000 train_time:381436ms step_avg:49.15ms +[2025-09-11 09:06:28] [Rank 0] step:7781/10000 train_time:382139ms step_avg:49.11ms +[2025-09-11 09:06:28] [Rank 0] step:7781/10000 train_time:382139ms step_avg:49.11ms +[2025-09-11 09:06:28] [Rank 0] step:7801/10000 train_time:382839ms step_avg:49.08ms +[2025-09-11 09:06:28] [Rank 0] step:7801/10000 train_time:382839ms step_avg:49.08ms +[2025-09-11 09:06:29] [Rank 0] step:7821/10000 train_time:383541ms step_avg:49.04ms +[2025-09-11 09:06:29] [Rank 0] step:7821/10000 train_time:383541ms step_avg:49.04ms +[2025-09-11 09:06:30] [Rank 0] step:7841/10000 train_time:384244ms step_avg:49.00ms +[2025-09-11 09:06:30] [Rank 0] step:7841/10000 train_time:384244ms step_avg:49.00ms +[2025-09-11 09:06:31] [Rank 0] step:7861/10000 train_time:384948ms step_avg:48.97ms +[2025-09-11 09:06:31] [Rank 0] step:7861/10000 train_time:384948ms step_avg:48.97ms +[2025-09-11 09:06:31] [Rank 0] step:7881/10000 train_time:385649ms step_avg:48.93ms +[2025-09-11 09:06:31] [Rank 0] step:7881/10000 train_time:385649ms step_avg:48.93ms +[2025-09-11 09:06:32] [Rank 0] step:7901/10000 train_time:386352ms step_avg:48.90ms +[2025-09-11 09:06:32] [Rank 0] step:7901/10000 train_time:386352ms step_avg:48.90ms +[2025-09-11 09:06:33] [Rank 0] step:7921/10000 train_time:387053ms step_avg:48.86ms +[2025-09-11 09:06:33] [Rank 0] step:7921/10000 train_time:387053ms step_avg:48.86ms +[2025-09-11 09:06:33] [Rank 0] step:7941/10000 train_time:387756ms step_avg:48.83ms +[2025-09-11 09:06:33] [Rank 0] step:7941/10000 train_time:387756ms step_avg:48.83ms +[2025-09-11 09:06:34] [Rank 0] step:7961/10000 train_time:388455ms step_avg:48.79ms +[2025-09-11 09:06:34] [Rank 0] step:7961/10000 train_time:388455ms step_avg:48.79ms +[2025-09-11 09:06:35] [Rank 0] step:7981/10000 train_time:389159ms step_avg:48.76ms +[2025-09-11 09:06:35] [Rank 0] step:7981/10000 train_time:389159ms step_avg:48.76ms +[2025-09-11 09:06:35] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:06:35] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 09:06:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:06:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:06:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:06:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:06:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:06:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 09:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:06:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 09:06:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:06:48] [Rank 0] PRINT: step:8000/10000 val_loss:4.1121 total_sharp:2.6280e-05 L1_sharp:1.1792e-03 L2_sharp:2.1167e-03 L3_sharp:1.5103e-03 L4_sharp:2.4887e-03 L5_sharp:3.7618e-03 L6_sharp:3.1273e-03 L7_sharp:4.8854e-03 L8_sharp:7.1730e-03 L9_sharp:6.8345e-03 L10_sharp:8.6998e-03 L11_sharp:1.4853e-02 L12_sharp:2.4556e-01 total_fnorm:1.0300e+02 total_l1_linf:1.9251e+05 total_spectral:5.1500e+01 L1_fnorm:3.0469e-01 L2_fnorm:2.9688e-01 L3_fnorm:2.9492e-01 L4_fnorm:2.9492e-01 L5_fnorm:2.9492e-01 L6_fnorm:2.9688e-01 L7_fnorm:2.9688e-01 L8_fnorm:2.8711e-01 L9_fnorm:2.9297e-01 L10_fnorm:2.9297e-01 L11_fnorm:2.8906e-01 L12_fnorm:2.8906e-01 L1_l1linf:6.2256e-02 L2_l1linf:5.8594e-02 L3_l1linf:5.3955e-02 L4_l1linf:5.6885e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6152e-02 L7_l1linf:5.6152e-02 L8_l1linf:5.3223e-02 L9_l1linf:5.3711e-02 L10_l1linf:5.4199e-02 L11_l1linf:5.1270e-02 L12_l1linf:5.5908e-02 L1_spectral:4.2222e-03 L2_spectral:4.2191e-03 L3_spectral:4.2329e-03 L4_spectral:4.2876e-03 L5_spectral:4.2115e-03 L6_spectral:4.2445e-03 L7_spectral:4.2035e-03 L8_spectral:4.1250e-03 L9_spectral:4.1411e-03 L10_spectral:4.1609e-03 L11_spectral:4.1406e-03 L12_spectral:4.1972e-03 train_time:389837ms step_avg:48.73ms +[2025-09-11 09:06:48] [Rank 0] PRINT: step:8000/10000 val_loss:4.1121 total_sharp:2.6280e-05 L1_sharp:1.1792e-03 L2_sharp:2.1167e-03 L3_sharp:1.5103e-03 L4_sharp:2.4887e-03 L5_sharp:3.7618e-03 L6_sharp:3.1273e-03 L7_sharp:4.8854e-03 L8_sharp:7.1730e-03 L9_sharp:6.8345e-03 L10_sharp:8.6998e-03 L11_sharp:1.4853e-02 L12_sharp:2.4556e-01 total_fnorm:1.0300e+02 total_l1_linf:1.9251e+05 total_spectral:5.1500e+01 L1_fnorm:3.0469e-01 L2_fnorm:2.9688e-01 L3_fnorm:2.9492e-01 L4_fnorm:2.9492e-01 L5_fnorm:2.9492e-01 L6_fnorm:2.9688e-01 L7_fnorm:2.9688e-01 L8_fnorm:2.8711e-01 L9_fnorm:2.9297e-01 L10_fnorm:2.9297e-01 L11_fnorm:2.8906e-01 L12_fnorm:2.8906e-01 L1_l1linf:6.2256e-02 L2_l1linf:5.8594e-02 L3_l1linf:5.3955e-02 L4_l1linf:5.6885e-02 L5_l1linf:5.6641e-02 L6_l1linf:5.6152e-02 L7_l1linf:5.6152e-02 L8_l1linf:5.3223e-02 L9_l1linf:5.3711e-02 L10_l1linf:5.4199e-02 L11_l1linf:5.1270e-02 L12_l1linf:5.5908e-02 L1_spectral:4.2222e-03 L2_spectral:4.2191e-03 L3_spectral:4.2329e-03 L4_spectral:4.2876e-03 L5_spectral:4.2115e-03 L6_spectral:4.2445e-03 L7_spectral:4.2035e-03 L8_spectral:4.1250e-03 L9_spectral:4.1411e-03 L10_spectral:4.1609e-03 L11_spectral:4.1406e-03 L12_spectral:4.1972e-03 train_time:389837ms step_avg:48.73ms +[2025-09-11 09:06:51] [Rank 0] step:8001/10000 train_time:392669ms step_avg:49.08ms +[2025-09-11 09:06:51] [Rank 0] step:8001/10000 train_time:392669ms step_avg:49.08ms +[2025-09-11 09:06:52] [Rank 0] step:8021/10000 train_time:393391ms step_avg:49.05ms +[2025-09-11 09:06:52] [Rank 0] step:8021/10000 train_time:393391ms step_avg:49.05ms +[2025-09-11 09:06:53] [Rank 0] step:8041/10000 train_time:394094ms step_avg:49.01ms +[2025-09-11 09:06:53] [Rank 0] step:8041/10000 train_time:394094ms step_avg:49.01ms +[2025-09-11 09:06:53] [Rank 0] step:8061/10000 train_time:394799ms step_avg:48.98ms +[2025-09-11 09:06:53] [Rank 0] step:8061/10000 train_time:394799ms step_avg:48.98ms +[2025-09-11 09:06:54] [Rank 0] step:8081/10000 train_time:395499ms step_avg:48.94ms +[2025-09-11 09:06:54] [Rank 0] step:8081/10000 train_time:395499ms step_avg:48.94ms +[2025-09-11 09:06:55] [Rank 0] step:8101/10000 train_time:396200ms step_avg:48.91ms +[2025-09-11 09:06:55] [Rank 0] step:8101/10000 train_time:396200ms step_avg:48.91ms +[2025-09-11 09:06:55] [Rank 0] step:8121/10000 train_time:396907ms step_avg:48.87ms +[2025-09-11 09:06:55] [Rank 0] step:8121/10000 train_time:396907ms step_avg:48.87ms +[2025-09-11 09:06:57] [Rank 0] step:8141/10000 train_time:398355ms step_avg:48.93ms +[2025-09-11 09:06:57] [Rank 0] step:8141/10000 train_time:398355ms step_avg:48.93ms +[2025-09-11 09:06:58] [Rank 0] step:8161/10000 train_time:399062ms step_avg:48.90ms +[2025-09-11 09:06:58] [Rank 0] step:8161/10000 train_time:399062ms step_avg:48.90ms +[2025-09-11 09:06:58] [Rank 0] step:8181/10000 train_time:399775ms step_avg:48.87ms +[2025-09-11 09:06:58] [Rank 0] step:8181/10000 train_time:399775ms step_avg:48.87ms +[2025-09-11 09:06:59] [Rank 0] step:8201/10000 train_time:400485ms step_avg:48.83ms +[2025-09-11 09:06:59] [Rank 0] step:8201/10000 train_time:400485ms step_avg:48.83ms +[2025-09-11 09:07:00] [Rank 0] step:8221/10000 train_time:401194ms step_avg:48.80ms +[2025-09-11 09:07:00] [Rank 0] step:8221/10000 train_time:401194ms step_avg:48.80ms +[2025-09-11 09:07:00] [Rank 0] step:8241/10000 train_time:401912ms step_avg:48.77ms +[2025-09-11 09:07:00] [Rank 0] step:8241/10000 train_time:401912ms step_avg:48.77ms +[2025-09-11 09:07:01] [Rank 0] step:8261/10000 train_time:402620ms step_avg:48.74ms +[2025-09-11 09:07:01] [Rank 0] step:8261/10000 train_time:402620ms step_avg:48.74ms +[2025-09-11 09:07:02] [Rank 0] step:8281/10000 train_time:403327ms step_avg:48.71ms +[2025-09-11 09:07:02] [Rank 0] step:8281/10000 train_time:403327ms step_avg:48.71ms +[2025-09-11 09:07:03] [Rank 0] step:8301/10000 train_time:404037ms step_avg:48.67ms +[2025-09-11 09:07:03] [Rank 0] step:8301/10000 train_time:404037ms step_avg:48.67ms +[2025-09-11 09:07:03] [Rank 0] step:8321/10000 train_time:404745ms step_avg:48.64ms +[2025-09-11 09:07:03] [Rank 0] step:8321/10000 train_time:404745ms step_avg:48.64ms +[2025-09-11 09:07:04] [Rank 0] step:8341/10000 train_time:405460ms step_avg:48.61ms +[2025-09-11 09:07:04] [Rank 0] step:8341/10000 train_time:405460ms step_avg:48.61ms +[2025-09-11 09:07:05] [Rank 0] step:8361/10000 train_time:406165ms step_avg:48.58ms +[2025-09-11 09:07:05] [Rank 0] step:8361/10000 train_time:406165ms step_avg:48.58ms +[2025-09-11 09:07:05] [Rank 0] step:8381/10000 train_time:406876ms step_avg:48.55ms +[2025-09-11 09:07:05] [Rank 0] step:8381/10000 train_time:406876ms step_avg:48.55ms +[2025-09-11 09:07:06] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:07:06] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 09:07:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:07:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:07:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:07:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:07:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:07:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:07:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:19] [Rank 0] PRINT: step:8400/10000 val_loss:4.0834 total_sharp:2.0407e-05 L1_sharp:9.5040e-04 L2_sharp:1.2015e-03 L3_sharp:1.2692e-03 L4_sharp:2.1257e-03 L5_sharp:3.2910e-03 L6_sharp:3.2802e-03 L7_sharp:4.0964e-03 L8_sharp:5.4772e-03 L9_sharp:5.5806e-03 L10_sharp:6.8864e-03 L11_sharp:1.1519e-02 L12_sharp:1.6747e-01 total_fnorm:8.3500e+01 total_l1_linf:1.4336e+05 total_spectral:4.1750e+01 L1_fnorm:2.3926e-01 L2_fnorm:2.3340e-01 L3_fnorm:2.3145e-01 L4_fnorm:2.3145e-01 L5_fnorm:2.3047e-01 L6_fnorm:2.3242e-01 L7_fnorm:2.3242e-01 L8_fnorm:2.2461e-01 L9_fnorm:2.2852e-01 L10_fnorm:2.2949e-01 L11_fnorm:2.2559e-01 L12_fnorm:2.2461e-01 L1_l1linf:4.5654e-02 L2_l1linf:4.3213e-02 L3_l1linf:4.1016e-02 L4_l1linf:4.2236e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.1016e-02 L7_l1linf:4.0283e-02 L8_l1linf:4.0039e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.8086e-02 L11_l1linf:3.7598e-02 L12_l1linf:4.1016e-02 L1_spectral:3.4464e-03 L2_spectral:3.4447e-03 L3_spectral:3.4186e-03 L4_spectral:3.4522e-03 L5_spectral:3.4075e-03 L6_spectral:3.4048e-03 L7_spectral:3.4206e-03 L8_spectral:3.3477e-03 L9_spectral:3.3372e-03 L10_spectral:3.3425e-03 L11_spectral:3.3402e-03 L12_spectral:3.3675e-03 train_time:407568ms step_avg:48.52ms +[2025-09-11 09:07:19] [Rank 0] PRINT: step:8400/10000 val_loss:4.0834 total_sharp:2.0407e-05 L1_sharp:9.5040e-04 L2_sharp:1.2015e-03 L3_sharp:1.2692e-03 L4_sharp:2.1257e-03 L5_sharp:3.2910e-03 L6_sharp:3.2802e-03 L7_sharp:4.0964e-03 L8_sharp:5.4772e-03 L9_sharp:5.5806e-03 L10_sharp:6.8864e-03 L11_sharp:1.1519e-02 L12_sharp:1.6747e-01 total_fnorm:8.3500e+01 total_l1_linf:1.4336e+05 total_spectral:4.1750e+01 L1_fnorm:2.3926e-01 L2_fnorm:2.3340e-01 L3_fnorm:2.3145e-01 L4_fnorm:2.3145e-01 L5_fnorm:2.3047e-01 L6_fnorm:2.3242e-01 L7_fnorm:2.3242e-01 L8_fnorm:2.2461e-01 L9_fnorm:2.2852e-01 L10_fnorm:2.2949e-01 L11_fnorm:2.2559e-01 L12_fnorm:2.2461e-01 L1_l1linf:4.5654e-02 L2_l1linf:4.3213e-02 L3_l1linf:4.1016e-02 L4_l1linf:4.2236e-02 L5_l1linf:4.0283e-02 L6_l1linf:4.1016e-02 L7_l1linf:4.0283e-02 L8_l1linf:4.0039e-02 L9_l1linf:3.8330e-02 L10_l1linf:3.8086e-02 L11_l1linf:3.7598e-02 L12_l1linf:4.1016e-02 L1_spectral:3.4464e-03 L2_spectral:3.4447e-03 L3_spectral:3.4186e-03 L4_spectral:3.4522e-03 L5_spectral:3.4075e-03 L6_spectral:3.4048e-03 L7_spectral:3.4206e-03 L8_spectral:3.3477e-03 L9_spectral:3.3372e-03 L10_spectral:3.3425e-03 L11_spectral:3.3402e-03 L12_spectral:3.3675e-03 train_time:407568ms step_avg:48.52ms +[2025-09-11 09:07:22] [Rank 0] step:8401/10000 train_time:410360ms step_avg:48.85ms +[2025-09-11 09:07:22] [Rank 0] step:8401/10000 train_time:410360ms step_avg:48.85ms +[2025-09-11 09:07:23] [Rank 0] step:8421/10000 train_time:411075ms step_avg:48.82ms +[2025-09-11 09:07:23] [Rank 0] step:8421/10000 train_time:411075ms step_avg:48.82ms +[2025-09-11 09:07:24] [Rank 0] step:8441/10000 train_time:411786ms step_avg:48.78ms +[2025-09-11 09:07:24] [Rank 0] step:8441/10000 train_time:411786ms step_avg:48.78ms +[2025-09-11 09:07:24] [Rank 0] step:8461/10000 train_time:412496ms step_avg:48.75ms +[2025-09-11 09:07:24] [Rank 0] step:8461/10000 train_time:412496ms step_avg:48.75ms +[2025-09-11 09:07:25] [Rank 0] step:8481/10000 train_time:413207ms step_avg:48.72ms +[2025-09-11 09:07:25] [Rank 0] step:8481/10000 train_time:413207ms step_avg:48.72ms +[2025-09-11 09:07:26] [Rank 0] step:8501/10000 train_time:413916ms step_avg:48.69ms +[2025-09-11 09:07:26] [Rank 0] step:8501/10000 train_time:413916ms step_avg:48.69ms +[2025-09-11 09:07:27] [Rank 0] step:8521/10000 train_time:414624ms step_avg:48.66ms +[2025-09-11 09:07:27] [Rank 0] step:8521/10000 train_time:414624ms step_avg:48.66ms +[2025-09-11 09:07:27] [Rank 0] step:8541/10000 train_time:415334ms step_avg:48.63ms +[2025-09-11 09:07:27] [Rank 0] step:8541/10000 train_time:415334ms step_avg:48.63ms +[2025-09-11 09:07:28] [Rank 0] step:8561/10000 train_time:416048ms step_avg:48.60ms +[2025-09-11 09:07:28] [Rank 0] step:8561/10000 train_time:416048ms step_avg:48.60ms +[2025-09-11 09:07:29] [Rank 0] step:8581/10000 train_time:416761ms step_avg:48.57ms +[2025-09-11 09:07:29] [Rank 0] step:8581/10000 train_time:416761ms step_avg:48.57ms +[2025-09-11 09:07:29] [Rank 0] step:8601/10000 train_time:417472ms step_avg:48.54ms +[2025-09-11 09:07:29] [Rank 0] step:8601/10000 train_time:417472ms step_avg:48.54ms +[2025-09-11 09:07:30] [Rank 0] step:8621/10000 train_time:418181ms step_avg:48.51ms +[2025-09-11 09:07:30] [Rank 0] step:8621/10000 train_time:418181ms step_avg:48.51ms +[2025-09-11 09:07:31] [Rank 0] step:8641/10000 train_time:418889ms step_avg:48.48ms +[2025-09-11 09:07:31] [Rank 0] step:8641/10000 train_time:418889ms step_avg:48.48ms +[2025-09-11 09:07:31] [Rank 0] step:8661/10000 train_time:419600ms step_avg:48.45ms +[2025-09-11 09:07:31] [Rank 0] step:8661/10000 train_time:419600ms step_avg:48.45ms +[2025-09-11 09:07:32] [Rank 0] step:8681/10000 train_time:420310ms step_avg:48.42ms +[2025-09-11 09:07:32] [Rank 0] step:8681/10000 train_time:420310ms step_avg:48.42ms +[2025-09-11 09:07:33] [Rank 0] step:8701/10000 train_time:421019ms step_avg:48.39ms +[2025-09-11 09:07:33] [Rank 0] step:8701/10000 train_time:421019ms step_avg:48.39ms +[2025-09-11 09:07:34] [Rank 0] step:8721/10000 train_time:421730ms step_avg:48.36ms +[2025-09-11 09:07:34] [Rank 0] step:8721/10000 train_time:421730ms step_avg:48.36ms +[2025-09-11 09:07:34] [Rank 0] step:8741/10000 train_time:422435ms step_avg:48.33ms +[2025-09-11 09:07:34] [Rank 0] step:8741/10000 train_time:422435ms step_avg:48.33ms +[2025-09-11 09:07:35] [Rank 0] step:8761/10000 train_time:423149ms step_avg:48.30ms +[2025-09-11 09:07:35] [Rank 0] step:8761/10000 train_time:423149ms step_avg:48.30ms +[2025-09-11 09:07:36] [Rank 0] step:8781/10000 train_time:423855ms step_avg:48.27ms +[2025-09-11 09:07:36] [Rank 0] step:8781/10000 train_time:423855ms step_avg:48.27ms +[2025-09-11 09:07:36] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:07:36] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 09:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:07:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:07:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:07:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:07:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:07:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 09:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:07:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 09:07:49] [Rank 0] PRINT: step:8800/10000 val_loss:4.0731 total_sharp:2.1451e-05 L1_sharp:1.5541e-03 L2_sharp:6.5487e-04 L3_sharp:3.9184e-04 L4_sharp:9.8176e-04 L5_sharp:2.1464e-03 L6_sharp:2.6802e-03 L7_sharp:3.3676e-03 L8_sharp:4.9858e-03 L9_sharp:5.3944e-03 L10_sharp:6.3117e-03 L11_sharp:1.0251e-02 L12_sharp:7.1663e-02 total_fnorm:6.1250e+01 total_l1_linf:9.5232e+04 total_spectral:3.0500e+01 L1_fnorm:1.7773e-01 L2_fnorm:1.7285e-01 L3_fnorm:1.7090e-01 L4_fnorm:1.7090e-01 L5_fnorm:1.6992e-01 L6_fnorm:1.7188e-01 L7_fnorm:1.7188e-01 L8_fnorm:1.6504e-01 L9_fnorm:1.6797e-01 L10_fnorm:1.6895e-01 L11_fnorm:1.6602e-01 L12_fnorm:1.6309e-01 L1_l1linf:3.1982e-02 L2_l1linf:2.9175e-02 L3_l1linf:2.6367e-02 L4_l1linf:2.7100e-02 L5_l1linf:2.7100e-02 L6_l1linf:2.7100e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6001e-02 L9_l1linf:2.6367e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.4292e-02 L12_l1linf:2.6001e-02 L1_spectral:2.6358e-03 L2_spectral:2.5954e-03 L3_spectral:2.6167e-03 L4_spectral:2.6072e-03 L5_spectral:2.5801e-03 L6_spectral:2.5861e-03 L7_spectral:2.5740e-03 L8_spectral:2.5316e-03 L9_spectral:2.5039e-03 L10_spectral:2.5124e-03 L11_spectral:2.5053e-03 L12_spectral:2.5475e-03 train_time:424543ms step_avg:48.24ms +[2025-09-11 09:07:49] [Rank 0] PRINT: step:8800/10000 val_loss:4.0731 total_sharp:2.1451e-05 L1_sharp:1.5541e-03 L2_sharp:6.5487e-04 L3_sharp:3.9184e-04 L4_sharp:9.8176e-04 L5_sharp:2.1464e-03 L6_sharp:2.6802e-03 L7_sharp:3.3676e-03 L8_sharp:4.9858e-03 L9_sharp:5.3944e-03 L10_sharp:6.3117e-03 L11_sharp:1.0251e-02 L12_sharp:7.1663e-02 total_fnorm:6.1250e+01 total_l1_linf:9.5232e+04 total_spectral:3.0500e+01 L1_fnorm:1.7773e-01 L2_fnorm:1.7285e-01 L3_fnorm:1.7090e-01 L4_fnorm:1.7090e-01 L5_fnorm:1.6992e-01 L6_fnorm:1.7188e-01 L7_fnorm:1.7188e-01 L8_fnorm:1.6504e-01 L9_fnorm:1.6797e-01 L10_fnorm:1.6895e-01 L11_fnorm:1.6602e-01 L12_fnorm:1.6309e-01 L1_l1linf:3.1982e-02 L2_l1linf:2.9175e-02 L3_l1linf:2.6367e-02 L4_l1linf:2.7100e-02 L5_l1linf:2.7100e-02 L6_l1linf:2.7100e-02 L7_l1linf:2.7466e-02 L8_l1linf:2.6001e-02 L9_l1linf:2.6367e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.4292e-02 L12_l1linf:2.6001e-02 L1_spectral:2.6358e-03 L2_spectral:2.5954e-03 L3_spectral:2.6167e-03 L4_spectral:2.6072e-03 L5_spectral:2.5801e-03 L6_spectral:2.5861e-03 L7_spectral:2.5740e-03 L8_spectral:2.5316e-03 L9_spectral:2.5039e-03 L10_spectral:2.5124e-03 L11_spectral:2.5053e-03 L12_spectral:2.5475e-03 train_time:424543ms step_avg:48.24ms +[2025-09-11 09:07:52] [Rank 0] step:8801/10000 train_time:427357ms step_avg:48.56ms +[2025-09-11 09:07:52] [Rank 0] step:8801/10000 train_time:427357ms step_avg:48.56ms +[2025-09-11 09:07:53] [Rank 0] step:8821/10000 train_time:428089ms step_avg:48.53ms +[2025-09-11 09:07:53] [Rank 0] step:8821/10000 train_time:428089ms step_avg:48.53ms +[2025-09-11 09:07:54] [Rank 0] step:8841/10000 train_time:428800ms step_avg:48.50ms +[2025-09-11 09:07:54] [Rank 0] step:8841/10000 train_time:428800ms step_avg:48.50ms +[2025-09-11 09:07:54] [Rank 0] step:8861/10000 train_time:429509ms step_avg:48.47ms +[2025-09-11 09:07:54] [Rank 0] step:8861/10000 train_time:429509ms step_avg:48.47ms +[2025-09-11 09:07:55] [Rank 0] step:8881/10000 train_time:430220ms step_avg:48.44ms +[2025-09-11 09:07:55] [Rank 0] step:8881/10000 train_time:430220ms step_avg:48.44ms +[2025-09-11 09:07:56] [Rank 0] step:8901/10000 train_time:430932ms step_avg:48.41ms +[2025-09-11 09:07:56] [Rank 0] step:8901/10000 train_time:430932ms step_avg:48.41ms +[2025-09-11 09:07:56] [Rank 0] step:8921/10000 train_time:431639ms step_avg:48.38ms +[2025-09-11 09:07:56] [Rank 0] step:8921/10000 train_time:431639ms step_avg:48.38ms +[2025-09-11 09:07:57] [Rank 0] step:8941/10000 train_time:432353ms step_avg:48.36ms +[2025-09-11 09:07:57] [Rank 0] step:8941/10000 train_time:432353ms step_avg:48.36ms +[2025-09-11 09:07:58] [Rank 0] step:8961/10000 train_time:433071ms step_avg:48.33ms +[2025-09-11 09:07:58] [Rank 0] step:8961/10000 train_time:433071ms step_avg:48.33ms +[2025-09-11 09:07:59] [Rank 0] step:8981/10000 train_time:433784ms step_avg:48.30ms +[2025-09-11 09:07:59] [Rank 0] step:8981/10000 train_time:433784ms step_avg:48.30ms +[2025-09-11 09:07:59] [Rank 0] step:9001/10000 train_time:434490ms step_avg:48.27ms +[2025-09-11 09:07:59] [Rank 0] step:9001/10000 train_time:434490ms step_avg:48.27ms +[2025-09-11 09:08:00] [Rank 0] step:9021/10000 train_time:435200ms step_avg:48.24ms +[2025-09-11 09:08:00] [Rank 0] step:9021/10000 train_time:435200ms step_avg:48.24ms +[2025-09-11 09:08:01] [Rank 0] step:9041/10000 train_time:435913ms step_avg:48.22ms +[2025-09-11 09:08:01] [Rank 0] step:9041/10000 train_time:435913ms step_avg:48.22ms +[2025-09-11 09:08:01] [Rank 0] step:9061/10000 train_time:436621ms step_avg:48.19ms +[2025-09-11 09:08:01] [Rank 0] step:9061/10000 train_time:436621ms step_avg:48.19ms +[2025-09-11 09:08:02] [Rank 0] step:9081/10000 train_time:437334ms step_avg:48.16ms +[2025-09-11 09:08:02] [Rank 0] step:9081/10000 train_time:437334ms step_avg:48.16ms +[2025-09-11 09:08:03] [Rank 0] step:9101/10000 train_time:438048ms step_avg:48.13ms +[2025-09-11 09:08:03] [Rank 0] step:9101/10000 train_time:438048ms step_avg:48.13ms +[2025-09-11 09:08:04] [Rank 0] step:9121/10000 train_time:438762ms step_avg:48.10ms +[2025-09-11 09:08:04] [Rank 0] step:9121/10000 train_time:438762ms step_avg:48.10ms +[2025-09-11 09:08:04] [Rank 0] step:9141/10000 train_time:439470ms step_avg:48.08ms +[2025-09-11 09:08:04] [Rank 0] step:9141/10000 train_time:439470ms step_avg:48.08ms +[2025-09-11 09:08:05] [Rank 0] step:9161/10000 train_time:440184ms step_avg:48.05ms +[2025-09-11 09:08:05] [Rank 0] step:9161/10000 train_time:440184ms step_avg:48.05ms +[2025-09-11 09:08:06] [Rank 0] step:9181/10000 train_time:440896ms step_avg:48.02ms +[2025-09-11 09:08:06] [Rank 0] step:9181/10000 train_time:440896ms step_avg:48.02ms +[2025-09-11 09:08:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:08:06] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 09:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:08:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:08:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:08:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:08:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:08:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 09:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 09:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:08:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 09:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:20] [Rank 0] PRINT: step:9200/10000 val_loss:4.0549 total_sharp:1.6774e-05 L1_sharp:1.0515e-03 L2_sharp:9.6273e-04 L3_sharp:5.2115e-04 L4_sharp:1.3124e-03 L5_sharp:2.6087e-03 L6_sharp:2.6489e-03 L7_sharp:3.6290e-03 L8_sharp:3.8926e-03 L9_sharp:4.8031e-03 L10_sharp:5.5247e-03 L11_sharp:1.0103e-02 L12_sharp:6.3130e-02 total_fnorm:4.5250e+01 total_l1_linf:6.2720e+04 total_spectral:2.2625e+01 L1_fnorm:1.1768e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1279e-01 L5_fnorm:1.1230e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1328e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1133e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.0889e-01 L12_fnorm:1.0889e-01 L1_l1linf:1.7822e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5869e-02 L9_l1linf:1.5320e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.5747e-02 L1_spectral:1.7842e-03 L2_spectral:1.7647e-03 L3_spectral:1.7565e-03 L4_spectral:1.7717e-03 L5_spectral:1.7520e-03 L6_spectral:1.7598e-03 L7_spectral:1.7488e-03 L8_spectral:1.7432e-03 L9_spectral:1.6831e-03 L10_spectral:1.7061e-03 L11_spectral:1.7002e-03 L12_spectral:1.7315e-03 train_time:441590ms step_avg:48.00ms +[2025-09-11 09:08:20] [Rank 0] PRINT: step:9200/10000 val_loss:4.0549 total_sharp:1.6774e-05 L1_sharp:1.0515e-03 L2_sharp:9.6273e-04 L3_sharp:5.2115e-04 L4_sharp:1.3124e-03 L5_sharp:2.6087e-03 L6_sharp:2.6489e-03 L7_sharp:3.6290e-03 L8_sharp:3.8926e-03 L9_sharp:4.8031e-03 L10_sharp:5.5247e-03 L11_sharp:1.0103e-02 L12_sharp:6.3130e-02 total_fnorm:4.5250e+01 total_l1_linf:6.2720e+04 total_spectral:2.2625e+01 L1_fnorm:1.1768e-01 L2_fnorm:1.1426e-01 L3_fnorm:1.1230e-01 L4_fnorm:1.1279e-01 L5_fnorm:1.1230e-01 L6_fnorm:1.1328e-01 L7_fnorm:1.1328e-01 L8_fnorm:1.0889e-01 L9_fnorm:1.1133e-01 L10_fnorm:1.1084e-01 L11_fnorm:1.0889e-01 L12_fnorm:1.0889e-01 L1_l1linf:1.7822e-02 L2_l1linf:1.6968e-02 L3_l1linf:1.5869e-02 L4_l1linf:1.6479e-02 L5_l1linf:1.5747e-02 L6_l1linf:1.7334e-02 L7_l1linf:1.5991e-02 L8_l1linf:1.5869e-02 L9_l1linf:1.5320e-02 L10_l1linf:1.4648e-02 L11_l1linf:1.4465e-02 L12_l1linf:1.5747e-02 L1_spectral:1.7842e-03 L2_spectral:1.7647e-03 L3_spectral:1.7565e-03 L4_spectral:1.7717e-03 L5_spectral:1.7520e-03 L6_spectral:1.7598e-03 L7_spectral:1.7488e-03 L8_spectral:1.7432e-03 L9_spectral:1.6831e-03 L10_spectral:1.7061e-03 L11_spectral:1.7002e-03 L12_spectral:1.7315e-03 train_time:441590ms step_avg:48.00ms +[2025-09-11 09:08:22] [Rank 0] step:9201/10000 train_time:444432ms step_avg:48.30ms +[2025-09-11 09:08:22] [Rank 0] step:9201/10000 train_time:444432ms step_avg:48.30ms +[2025-09-11 09:08:23] [Rank 0] step:9221/10000 train_time:445149ms step_avg:48.28ms +[2025-09-11 09:08:23] [Rank 0] step:9221/10000 train_time:445149ms step_avg:48.28ms +[2025-09-11 09:08:24] [Rank 0] step:9241/10000 train_time:445858ms step_avg:48.25ms +[2025-09-11 09:08:24] [Rank 0] step:9241/10000 train_time:445858ms step_avg:48.25ms +[2025-09-11 09:08:25] [Rank 0] step:9261/10000 train_time:446570ms step_avg:48.22ms +[2025-09-11 09:08:25] [Rank 0] step:9261/10000 train_time:446570ms step_avg:48.22ms +[2025-09-11 09:08:25] [Rank 0] step:9281/10000 train_time:447282ms step_avg:48.19ms +[2025-09-11 09:08:25] [Rank 0] step:9281/10000 train_time:447282ms step_avg:48.19ms +[2025-09-11 09:08:26] [Rank 0] step:9301/10000 train_time:447990ms step_avg:48.17ms +[2025-09-11 09:08:26] [Rank 0] step:9301/10000 train_time:447990ms step_avg:48.17ms +[2025-09-11 09:08:27] [Rank 0] step:9321/10000 train_time:448702ms step_avg:48.14ms +[2025-09-11 09:08:27] [Rank 0] step:9321/10000 train_time:448702ms step_avg:48.14ms +[2025-09-11 09:08:27] [Rank 0] step:9341/10000 train_time:449409ms step_avg:48.11ms +[2025-09-11 09:08:27] [Rank 0] step:9341/10000 train_time:449409ms step_avg:48.11ms +[2025-09-11 09:08:28] [Rank 0] step:9361/10000 train_time:450115ms step_avg:48.08ms +[2025-09-11 09:08:28] [Rank 0] step:9361/10000 train_time:450115ms step_avg:48.08ms +[2025-09-11 09:08:29] [Rank 0] step:9381/10000 train_time:450825ms step_avg:48.06ms +[2025-09-11 09:08:29] [Rank 0] step:9381/10000 train_time:450825ms step_avg:48.06ms +[2025-09-11 09:08:29] [Rank 0] step:9401/10000 train_time:451537ms step_avg:48.03ms +[2025-09-11 09:08:29] [Rank 0] step:9401/10000 train_time:451537ms step_avg:48.03ms +[2025-09-11 09:08:30] [Rank 0] step:9421/10000 train_time:452250ms step_avg:48.00ms +[2025-09-11 09:08:30] [Rank 0] step:9421/10000 train_time:452250ms step_avg:48.00ms +[2025-09-11 09:08:31] [Rank 0] step:9441/10000 train_time:452966ms step_avg:47.98ms +[2025-09-11 09:08:31] [Rank 0] step:9441/10000 train_time:452966ms step_avg:47.98ms +[2025-09-11 09:08:32] [Rank 0] step:9461/10000 train_time:453677ms step_avg:47.95ms +[2025-09-11 09:08:32] [Rank 0] step:9461/10000 train_time:453677ms step_avg:47.95ms +[2025-09-11 09:08:32] [Rank 0] step:9481/10000 train_time:454388ms step_avg:47.93ms +[2025-09-11 09:08:32] [Rank 0] step:9481/10000 train_time:454388ms step_avg:47.93ms +[2025-09-11 09:08:33] [Rank 0] step:9501/10000 train_time:455101ms step_avg:47.90ms +[2025-09-11 09:08:33] [Rank 0] step:9501/10000 train_time:455101ms step_avg:47.90ms +[2025-09-11 09:08:34] [Rank 0] step:9521/10000 train_time:455815ms step_avg:47.87ms +[2025-09-11 09:08:34] [Rank 0] step:9521/10000 train_time:455815ms step_avg:47.87ms +[2025-09-11 09:08:34] [Rank 0] step:9541/10000 train_time:456525ms step_avg:47.85ms +[2025-09-11 09:08:34] [Rank 0] step:9541/10000 train_time:456525ms step_avg:47.85ms +[2025-09-11 09:08:35] [Rank 0] step:9561/10000 train_time:457237ms step_avg:47.82ms +[2025-09-11 09:08:35] [Rank 0] step:9561/10000 train_time:457237ms step_avg:47.82ms +[2025-09-11 09:08:36] [Rank 0] step:9581/10000 train_time:457949ms step_avg:47.80ms +[2025-09-11 09:08:36] [Rank 0] step:9581/10000 train_time:457949ms step_avg:47.80ms +[2025-09-11 09:08:37] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:08:37] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 09:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:08:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:08:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:08:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:08:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:08:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 09:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:08:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 09:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 09:08:49] [Rank 0] PRINT: step:9600/10000 val_loss:4.0413 total_sharp:1.1322e-05 L1_sharp:1.2130e-03 L2_sharp:7.6651e-04 L3_sharp:8.7706e-04 L4_sharp:1.0636e-03 L5_sharp:2.0585e-03 L6_sharp:1.4315e-03 L7_sharp:2.2630e-03 L8_sharp:2.5620e-03 L9_sharp:3.3955e-03 L10_sharp:4.1477e-03 L11_sharp:7.1432e-03 L12_sharp:5.5406e-02 total_fnorm:2.6500e+01 total_l1_linf:3.1616e+04 total_spectral:1.3250e+01 L1_fnorm:6.6406e-02 L2_fnorm:6.4453e-02 L3_fnorm:6.3965e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.4453e-02 L7_fnorm:6.3965e-02 L8_fnorm:6.1523e-02 L9_fnorm:6.2500e-02 L10_fnorm:6.2988e-02 L11_fnorm:6.2256e-02 L12_fnorm:6.1768e-02 L1_l1linf:8.3618e-03 L2_l1linf:7.8735e-03 L3_l1linf:7.3547e-03 L4_l1linf:7.6599e-03 L5_l1linf:7.3242e-03 L6_l1linf:7.4768e-03 L7_l1linf:7.7515e-03 L8_l1linf:7.9346e-03 L9_l1linf:6.8665e-03 L10_l1linf:7.2632e-03 L11_l1linf:7.3853e-03 L12_l1linf:7.9346e-03 L1_spectral:1.0482e-03 L2_spectral:1.0246e-03 L3_spectral:1.0204e-03 L4_spectral:1.0270e-03 L5_spectral:1.0109e-03 L6_spectral:1.0292e-03 L7_spectral:1.0123e-03 L8_spectral:1.0342e-03 L9_spectral:9.8090e-04 L10_spectral:9.8999e-04 L11_spectral:9.8688e-04 L12_spectral:1.0213e-03 train_time:458637ms step_avg:47.77ms +[2025-09-11 09:08:49] [Rank 0] PRINT: step:9600/10000 val_loss:4.0413 total_sharp:1.1322e-05 L1_sharp:1.2130e-03 L2_sharp:7.6651e-04 L3_sharp:8.7706e-04 L4_sharp:1.0636e-03 L5_sharp:2.0585e-03 L6_sharp:1.4315e-03 L7_sharp:2.2630e-03 L8_sharp:2.5620e-03 L9_sharp:3.3955e-03 L10_sharp:4.1477e-03 L11_sharp:7.1432e-03 L12_sharp:5.5406e-02 total_fnorm:2.6500e+01 total_l1_linf:3.1616e+04 total_spectral:1.3250e+01 L1_fnorm:6.6406e-02 L2_fnorm:6.4453e-02 L3_fnorm:6.3965e-02 L4_fnorm:6.3965e-02 L5_fnorm:6.3477e-02 L6_fnorm:6.4453e-02 L7_fnorm:6.3965e-02 L8_fnorm:6.1523e-02 L9_fnorm:6.2500e-02 L10_fnorm:6.2988e-02 L11_fnorm:6.2256e-02 L12_fnorm:6.1768e-02 L1_l1linf:8.3618e-03 L2_l1linf:7.8735e-03 L3_l1linf:7.3547e-03 L4_l1linf:7.6599e-03 L5_l1linf:7.3242e-03 L6_l1linf:7.4768e-03 L7_l1linf:7.7515e-03 L8_l1linf:7.9346e-03 L9_l1linf:6.8665e-03 L10_l1linf:7.2632e-03 L11_l1linf:7.3853e-03 L12_l1linf:7.9346e-03 L1_spectral:1.0482e-03 L2_spectral:1.0246e-03 L3_spectral:1.0204e-03 L4_spectral:1.0270e-03 L5_spectral:1.0109e-03 L6_spectral:1.0292e-03 L7_spectral:1.0123e-03 L8_spectral:1.0342e-03 L9_spectral:9.8090e-04 L10_spectral:9.8999e-04 L11_spectral:9.8688e-04 L12_spectral:1.0213e-03 train_time:458637ms step_avg:47.77ms +[2025-09-11 09:08:52] [Rank 0] step:9601/10000 train_time:461422ms step_avg:48.06ms +[2025-09-11 09:08:52] [Rank 0] step:9601/10000 train_time:461422ms step_avg:48.06ms +[2025-09-11 09:08:53] [Rank 0] step:9621/10000 train_time:462153ms step_avg:48.04ms +[2025-09-11 09:08:53] [Rank 0] step:9621/10000 train_time:462153ms step_avg:48.04ms +[2025-09-11 09:08:53] [Rank 0] step:9641/10000 train_time:462870ms step_avg:48.01ms +[2025-09-11 09:08:53] [Rank 0] step:9641/10000 train_time:462870ms step_avg:48.01ms +[2025-09-11 09:08:54] [Rank 0] step:9661/10000 train_time:463592ms step_avg:47.99ms +[2025-09-11 09:08:54] [Rank 0] step:9661/10000 train_time:463592ms step_avg:47.99ms +[2025-09-11 09:08:55] [Rank 0] step:9681/10000 train_time:464308ms step_avg:47.96ms +[2025-09-11 09:08:55] [Rank 0] step:9681/10000 train_time:464308ms step_avg:47.96ms +[2025-09-11 09:08:56] [Rank 0] step:9701/10000 train_time:465024ms step_avg:47.94ms +[2025-09-11 09:08:56] [Rank 0] step:9701/10000 train_time:465024ms step_avg:47.94ms +[2025-09-11 09:08:56] [Rank 0] step:9721/10000 train_time:465745ms step_avg:47.91ms +[2025-09-11 09:08:56] [Rank 0] step:9721/10000 train_time:465745ms step_avg:47.91ms +[2025-09-11 09:08:57] [Rank 0] step:9741/10000 train_time:466463ms step_avg:47.89ms +[2025-09-11 09:08:57] [Rank 0] step:9741/10000 train_time:466463ms step_avg:47.89ms +[2025-09-11 09:08:58] [Rank 0] step:9761/10000 train_time:467180ms step_avg:47.86ms +[2025-09-11 09:08:58] [Rank 0] step:9761/10000 train_time:467180ms step_avg:47.86ms +[2025-09-11 09:08:58] [Rank 0] step:9781/10000 train_time:467896ms step_avg:47.84ms +[2025-09-11 09:08:58] [Rank 0] step:9781/10000 train_time:467896ms step_avg:47.84ms +[2025-09-11 09:08:59] [Rank 0] step:9801/10000 train_time:468618ms step_avg:47.81ms +[2025-09-11 09:08:59] [Rank 0] step:9801/10000 train_time:468618ms step_avg:47.81ms +[2025-09-11 09:09:00] [Rank 0] step:9821/10000 train_time:469337ms step_avg:47.79ms +[2025-09-11 09:09:00] [Rank 0] step:9821/10000 train_time:469337ms step_avg:47.79ms +[2025-09-11 09:09:01] [Rank 0] step:9841/10000 train_time:470058ms step_avg:47.77ms +[2025-09-11 09:09:01] [Rank 0] step:9841/10000 train_time:470058ms step_avg:47.77ms +[2025-09-11 09:09:01] [Rank 0] step:9861/10000 train_time:470775ms step_avg:47.74ms +[2025-09-11 09:09:01] [Rank 0] step:9861/10000 train_time:470775ms step_avg:47.74ms +[2025-09-11 09:09:02] [Rank 0] step:9881/10000 train_time:471493ms step_avg:47.72ms +[2025-09-11 09:09:02] [Rank 0] step:9881/10000 train_time:471493ms step_avg:47.72ms +[2025-09-11 09:09:03] [Rank 0] step:9901/10000 train_time:472208ms step_avg:47.69ms +[2025-09-11 09:09:03] [Rank 0] step:9901/10000 train_time:472208ms step_avg:47.69ms +[2025-09-11 09:09:04] [Rank 0] step:9921/10000 train_time:472924ms step_avg:47.67ms +[2025-09-11 09:09:04] [Rank 0] step:9921/10000 train_time:472924ms step_avg:47.67ms +[2025-09-11 09:09:04] [Rank 0] step:9941/10000 train_time:473646ms step_avg:47.65ms +[2025-09-11 09:09:04] [Rank 0] step:9941/10000 train_time:473646ms step_avg:47.65ms +[2025-09-11 09:09:05] [Rank 0] step:9961/10000 train_time:474371ms step_avg:47.62ms +[2025-09-11 09:09:05] [Rank 0] step:9961/10000 train_time:474371ms step_avg:47.62ms +[2025-09-11 09:09:06] [Rank 0] step:9981/10000 train_time:475091ms step_avg:47.60ms +[2025-09-11 09:09:06] [Rank 0] step:9981/10000 train_time:475091ms step_avg:47.60ms +[2025-09-11 09:09:06] [Rank 0] step:10000/10000 train_time:475783ms step_avg:47.58ms +[2025-09-11 09:09:06] [Rank 0] step:10000/10000 train_time:475783ms step_avg:47.58ms +[2025-09-11 09:09:06] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:09:06] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:09:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:09:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 09:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 09:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:09:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:09:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:09:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:09:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 09:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 09:09:20] [Rank 0] PRINT: step:10000/10000 val_loss:4.0399 total_sharp:8.3242e-06 L1_sharp:8.5202e-04 L2_sharp:6.1608e-04 L3_sharp:7.2302e-04 L4_sharp:9.7773e-04 L5_sharp:1.4867e-03 L6_sharp:9.7017e-04 L7_sharp:2.1960e-03 L8_sharp:2.8689e-03 L9_sharp:2.4804e-03 L10_sharp:3.0824e-03 L11_sharp:5.8199e-03 L12_sharp:3.5541e-02 total_fnorm:1.0375e+01 total_l1_linf:8.9600e+03 total_spectral:5.1875e+00 L1_fnorm:2.5879e-02 L2_fnorm:2.5024e-02 L3_fnorm:2.4780e-02 L4_fnorm:2.4902e-02 L5_fnorm:2.4658e-02 L6_fnorm:2.4902e-02 L7_fnorm:2.4902e-02 L8_fnorm:2.4048e-02 L9_fnorm:2.4414e-02 L10_fnorm:2.4414e-02 L11_fnorm:2.3926e-02 L12_fnorm:2.3804e-02 L1_l1linf:2.5482e-03 L2_l1linf:2.4109e-03 L3_l1linf:2.2888e-03 L4_l1linf:2.4109e-03 L5_l1linf:2.2736e-03 L6_l1linf:2.3041e-03 L7_l1linf:2.2583e-03 L8_l1linf:2.6245e-03 L9_l1linf:2.0752e-03 L10_l1linf:2.1820e-03 L11_l1linf:2.0905e-03 L12_l1linf:2.4109e-03 L1_spectral:4.0967e-04 L2_spectral:4.1015e-04 L3_spectral:4.1291e-04 L4_spectral:4.0628e-04 L5_spectral:3.9869e-04 L6_spectral:4.1102e-04 L7_spectral:4.0354e-04 L8_spectral:4.1242e-04 L9_spectral:3.9517e-04 L10_spectral:3.9544e-04 L11_spectral:3.9243e-04 L12_spectral:4.0637e-04 train_time:475805ms step_avg:47.58ms +[2025-09-11 09:09:20] [Rank 0] PRINT: step:10000/10000 val_loss:4.0399 total_sharp:8.3242e-06 L1_sharp:8.5202e-04 L2_sharp:6.1608e-04 L3_sharp:7.2302e-04 L4_sharp:9.7773e-04 L5_sharp:1.4867e-03 L6_sharp:9.7017e-04 L7_sharp:2.1960e-03 L8_sharp:2.8689e-03 L9_sharp:2.4804e-03 L10_sharp:3.0824e-03 L11_sharp:5.8199e-03 L12_sharp:3.5541e-02 total_fnorm:1.0375e+01 total_l1_linf:8.9600e+03 total_spectral:5.1875e+00 L1_fnorm:2.5879e-02 L2_fnorm:2.5024e-02 L3_fnorm:2.4780e-02 L4_fnorm:2.4902e-02 L5_fnorm:2.4658e-02 L6_fnorm:2.4902e-02 L7_fnorm:2.4902e-02 L8_fnorm:2.4048e-02 L9_fnorm:2.4414e-02 L10_fnorm:2.4414e-02 L11_fnorm:2.3926e-02 L12_fnorm:2.3804e-02 L1_l1linf:2.5482e-03 L2_l1linf:2.4109e-03 L3_l1linf:2.2888e-03 L4_l1linf:2.4109e-03 L5_l1linf:2.2736e-03 L6_l1linf:2.3041e-03 L7_l1linf:2.2583e-03 L8_l1linf:2.6245e-03 L9_l1linf:2.0752e-03 L10_l1linf:2.1820e-03 L11_l1linf:2.0905e-03 L12_l1linf:2.4109e-03 L1_spectral:4.0967e-04 L2_spectral:4.1015e-04 L3_spectral:4.1291e-04 L4_spectral:4.0628e-04 L5_spectral:3.9869e-04 L6_spectral:4.1102e-04 L7_spectral:4.0354e-04 L8_spectral:4.1242e-04 L9_spectral:3.9517e-04 L10_spectral:3.9544e-04 L11_spectral:3.9243e-04 L12_spectral:4.0637e-04 train_time:475805ms step_avg:47.58ms +[2025-09-11 09:09:20] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:09:20 2025 --- +[2025-09-11 09:09:20] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 09:09:20 2025 --- +[2025-09-11 09:09:20] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11692 MiB +[2025-09-11 09:09:20] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11692 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8c8afbd26346ba9ccdf9ff0fd853175838e0d2af --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.01, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "f02cdb20-4448-49d7-a205-56160bd687c7", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/training_log_f02cdb20-4448-49d7-a205-56160bd687c7.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/training_log_f02cdb20-4448-49d7-a205-56160bd687c7.txt new file mode 100644 index 0000000000000000000000000000000000000000..21be8757b877f7b8060a6d72f0124b673cfacba8 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43/training_log_f02cdb20-4448-49d7-a205-56160bd687c7.txt @@ -0,0 +1,4264 @@ +[2025-09-11 07:06:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:06:15 2025 --- +[2025-09-11 07:06:15] [Rank 0] PRINT: --- Script Start: Thu Sep 11 07:06:15 2025 --- +[2025-09-11 07:06:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:06:15] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.01, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 07:06:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:06:15] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 07:06:15] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 07:06:15] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 07:06:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43 +[2025-09-11 07:06:15] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.01_seed_43 +[2025-09-11 07:06:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:06:15] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 07:06:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:06:15] [Rank 0] PRINT: Constructing model... +[2025-09-11 07:06:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:06:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 07:06:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:06:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 07:06:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:06:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 07:06:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:06:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 07:06:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:06:16] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 07:06:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:06:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 07:06:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:06:18] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 07:06:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:06:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 07:06:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:06:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 07:06:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:06:24] [Rank 0] PRINT: Starting warmup... +[2025-09-11 07:07:03] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:07:03] [Rank 0] PRINT: Warmup complete. +[2025-09-11 07:07:03] [Rank 0] PRINT: Starting training... +[2025-09-11 07:07:03] [Rank 0] PRINT: Starting training... +[2025-09-11 07:07:05] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.94ms +[2025-09-11 07:07:05] [Rank 0] step:21/10000 train_time:1133ms step_avg:53.94ms +[2025-09-11 07:07:05] [Rank 0] step:41/10000 train_time:1856ms step_avg:45.28ms +[2025-09-11 07:07:05] [Rank 0] step:41/10000 train_time:1856ms step_avg:45.28ms +[2025-09-11 07:07:06] [Rank 0] step:61/10000 train_time:2579ms step_avg:42.27ms +[2025-09-11 07:07:06] [Rank 0] step:61/10000 train_time:2579ms step_avg:42.27ms +[2025-09-11 07:07:07] [Rank 0] step:81/10000 train_time:3301ms step_avg:40.75ms +[2025-09-11 07:07:07] [Rank 0] step:81/10000 train_time:3301ms step_avg:40.75ms +[2025-09-11 07:07:08] [Rank 0] step:101/10000 train_time:4023ms step_avg:39.84ms +[2025-09-11 07:07:08] [Rank 0] step:101/10000 train_time:4023ms step_avg:39.84ms +[2025-09-11 07:07:08] [Rank 0] step:121/10000 train_time:4746ms step_avg:39.22ms +[2025-09-11 07:07:08] [Rank 0] step:121/10000 train_time:4746ms step_avg:39.22ms +[2025-09-11 07:07:09] [Rank 0] step:141/10000 train_time:5468ms step_avg:38.78ms +[2025-09-11 07:07:09] [Rank 0] step:141/10000 train_time:5468ms step_avg:38.78ms +[2025-09-11 07:07:10] [Rank 0] step:161/10000 train_time:6190ms step_avg:38.45ms +[2025-09-11 07:07:10] [Rank 0] step:161/10000 train_time:6190ms step_avg:38.45ms +[2025-09-11 07:07:10] [Rank 0] step:181/10000 train_time:6911ms step_avg:38.18ms +[2025-09-11 07:07:10] [Rank 0] step:181/10000 train_time:6911ms step_avg:38.18ms +[2025-09-11 07:07:11] [Rank 0] step:201/10000 train_time:7634ms step_avg:37.98ms +[2025-09-11 07:07:11] [Rank 0] step:201/10000 train_time:7634ms step_avg:37.98ms +[2025-09-11 07:07:12] [Rank 0] step:221/10000 train_time:8356ms step_avg:37.81ms +[2025-09-11 07:07:12] [Rank 0] step:221/10000 train_time:8356ms step_avg:37.81ms +[2025-09-11 07:07:13] [Rank 0] step:241/10000 train_time:9078ms step_avg:37.67ms +[2025-09-11 07:07:13] [Rank 0] step:241/10000 train_time:9078ms step_avg:37.67ms +[2025-09-11 07:07:13] [Rank 0] step:261/10000 train_time:9800ms step_avg:37.55ms +[2025-09-11 07:07:13] [Rank 0] step:261/10000 train_time:9800ms step_avg:37.55ms +[2025-09-11 07:07:14] [Rank 0] step:281/10000 train_time:10522ms step_avg:37.44ms +[2025-09-11 07:07:14] [Rank 0] step:281/10000 train_time:10522ms step_avg:37.44ms +[2025-09-11 07:07:15] [Rank 0] step:301/10000 train_time:11243ms step_avg:37.35ms +[2025-09-11 07:07:15] [Rank 0] step:301/10000 train_time:11243ms step_avg:37.35ms +[2025-09-11 07:07:15] [Rank 0] step:321/10000 train_time:11966ms step_avg:37.28ms +[2025-09-11 07:07:15] [Rank 0] step:321/10000 train_time:11966ms step_avg:37.28ms +[2025-09-11 07:07:16] [Rank 0] step:341/10000 train_time:12687ms step_avg:37.20ms +[2025-09-11 07:07:16] [Rank 0] step:341/10000 train_time:12687ms step_avg:37.20ms +[2025-09-11 07:07:17] [Rank 0] step:361/10000 train_time:13409ms step_avg:37.14ms +[2025-09-11 07:07:17] [Rank 0] step:361/10000 train_time:13409ms step_avg:37.14ms +[2025-09-11 07:07:18] [Rank 0] step:381/10000 train_time:14130ms step_avg:37.09ms +[2025-09-11 07:07:18] [Rank 0] step:381/10000 train_time:14130ms step_avg:37.09ms +[2025-09-11 07:07:18] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:07:18] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 07:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:07:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:07:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 07:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 07:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:08:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:08:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:08:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 07:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 07:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:08:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 07:08:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:08:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 07:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:08:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:08:07] [Rank 0] PRINT: step:400/10000 val_loss:5.7715 total_sharp:2.1805e-04 L1_sharp:1.4819e-02 L2_sharp:9.9131e-03 L3_sharp:8.6871e-03 L4_sharp:9.4480e-03 L5_sharp:8.0740e-03 L6_sharp:9.9018e-03 L7_sharp:1.0176e-02 L8_sharp:8.5097e-03 L9_sharp:1.2288e-02 L10_sharp:1.4542e-02 L11_sharp:2.0931e-02 L12_sharp:1.8609e-01 total_fnorm:2.0662e+02 total_l1_linf:5.2033e+05 total_spectral:1.0333e+02 L1_fnorm:1.2218e+00 L2_fnorm:1.2124e+00 L3_fnorm:1.2122e+00 L4_fnorm:1.2146e+00 L5_fnorm:1.2151e+00 L6_fnorm:1.2131e+00 L7_fnorm:1.2116e+00 L8_fnorm:1.2184e+00 L9_fnorm:1.2128e+00 L10_fnorm:1.2152e+00 L11_fnorm:1.2158e+00 L12_fnorm:1.1762e+00 L1_l1linf:4.1636e-01 L2_l1linf:4.1222e-01 L3_l1linf:4.1238e-01 L4_l1linf:4.1248e-01 L5_l1linf:4.1323e-01 L6_l1linf:4.1653e-01 L7_l1linf:4.2166e-01 L8_l1linf:4.2438e-01 L9_l1linf:4.2790e-01 L10_l1linf:4.2947e-01 L11_l1linf:4.2929e-01 L12_l1linf:4.1850e-01 L1_spectral:1.2044e-02 L2_spectral:1.2047e-02 L3_spectral:1.2046e-02 L4_spectral:1.2048e-02 L5_spectral:1.2040e-02 L6_spectral:1.2044e-02 L7_spectral:1.2043e-02 L8_spectral:1.2043e-02 L9_spectral:1.2043e-02 L10_spectral:1.2047e-02 L11_spectral:1.2040e-02 L12_spectral:1.2039e-02 train_time:14832ms step_avg:37.08ms +[2025-09-11 07:08:07] [Rank 0] PRINT: step:400/10000 val_loss:5.7715 total_sharp:2.1805e-04 L1_sharp:1.4819e-02 L2_sharp:9.9131e-03 L3_sharp:8.6871e-03 L4_sharp:9.4480e-03 L5_sharp:8.0740e-03 L6_sharp:9.9018e-03 L7_sharp:1.0176e-02 L8_sharp:8.5097e-03 L9_sharp:1.2288e-02 L10_sharp:1.4542e-02 L11_sharp:2.0931e-02 L12_sharp:1.8609e-01 total_fnorm:2.0662e+02 total_l1_linf:5.2033e+05 total_spectral:1.0333e+02 L1_fnorm:1.2218e+00 L2_fnorm:1.2124e+00 L3_fnorm:1.2122e+00 L4_fnorm:1.2146e+00 L5_fnorm:1.2151e+00 L6_fnorm:1.2131e+00 L7_fnorm:1.2116e+00 L8_fnorm:1.2184e+00 L9_fnorm:1.2128e+00 L10_fnorm:1.2152e+00 L11_fnorm:1.2158e+00 L12_fnorm:1.1762e+00 L1_l1linf:4.1636e-01 L2_l1linf:4.1222e-01 L3_l1linf:4.1238e-01 L4_l1linf:4.1248e-01 L5_l1linf:4.1323e-01 L6_l1linf:4.1653e-01 L7_l1linf:4.2166e-01 L8_l1linf:4.2438e-01 L9_l1linf:4.2790e-01 L10_l1linf:4.2947e-01 L11_l1linf:4.2929e-01 L12_l1linf:4.1850e-01 L1_spectral:1.2044e-02 L2_spectral:1.2047e-02 L3_spectral:1.2046e-02 L4_spectral:1.2048e-02 L5_spectral:1.2040e-02 L6_spectral:1.2044e-02 L7_spectral:1.2043e-02 L8_spectral:1.2043e-02 L9_spectral:1.2043e-02 L10_spectral:1.2047e-02 L11_spectral:1.2040e-02 L12_spectral:1.2039e-02 train_time:14832ms step_avg:37.08ms +[2025-09-11 07:08:46] [Rank 0] step:401/10000 train_time:53653ms step_avg:133.80ms +[2025-09-11 07:08:46] [Rank 0] step:401/10000 train_time:53653ms step_avg:133.80ms +[2025-09-11 07:08:47] [Rank 0] step:421/10000 train_time:55527ms step_avg:131.89ms +[2025-09-11 07:08:47] [Rank 0] step:421/10000 train_time:55527ms step_avg:131.89ms +[2025-09-11 07:08:48] [Rank 0] step:441/10000 train_time:56162ms step_avg:127.35ms +[2025-09-11 07:08:48] [Rank 0] step:441/10000 train_time:56162ms step_avg:127.35ms +[2025-09-11 07:08:49] [Rank 0] step:461/10000 train_time:56797ms step_avg:123.20ms +[2025-09-11 07:08:49] [Rank 0] step:461/10000 train_time:56797ms step_avg:123.20ms +[2025-09-11 07:08:49] [Rank 0] step:481/10000 train_time:57432ms step_avg:119.40ms +[2025-09-11 07:08:49] [Rank 0] step:481/10000 train_time:57432ms step_avg:119.40ms +[2025-09-11 07:08:50] [Rank 0] step:501/10000 train_time:58076ms step_avg:115.92ms +[2025-09-11 07:08:50] [Rank 0] step:501/10000 train_time:58076ms step_avg:115.92ms +[2025-09-11 07:08:51] [Rank 0] step:521/10000 train_time:58710ms step_avg:112.69ms +[2025-09-11 07:08:51] [Rank 0] step:521/10000 train_time:58710ms step_avg:112.69ms +[2025-09-11 07:08:51] [Rank 0] step:541/10000 train_time:59345ms step_avg:109.69ms +[2025-09-11 07:08:51] [Rank 0] step:541/10000 train_time:59345ms step_avg:109.69ms +[2025-09-11 07:08:52] [Rank 0] step:561/10000 train_time:59980ms step_avg:106.92ms +[2025-09-11 07:08:52] [Rank 0] step:561/10000 train_time:59980ms step_avg:106.92ms +[2025-09-11 07:08:53] [Rank 0] step:581/10000 train_time:60613ms step_avg:104.33ms +[2025-09-11 07:08:53] [Rank 0] step:581/10000 train_time:60613ms step_avg:104.33ms +[2025-09-11 07:08:53] [Rank 0] step:601/10000 train_time:61248ms step_avg:101.91ms +[2025-09-11 07:08:53] [Rank 0] step:601/10000 train_time:61248ms step_avg:101.91ms +[2025-09-11 07:08:54] [Rank 0] step:621/10000 train_time:61882ms step_avg:99.65ms +[2025-09-11 07:08:54] [Rank 0] step:621/10000 train_time:61882ms step_avg:99.65ms +[2025-09-11 07:08:54] [Rank 0] step:641/10000 train_time:62517ms step_avg:97.53ms +[2025-09-11 07:08:54] [Rank 0] step:641/10000 train_time:62517ms step_avg:97.53ms +[2025-09-11 07:08:55] [Rank 0] step:661/10000 train_time:63151ms step_avg:95.54ms +[2025-09-11 07:08:55] [Rank 0] step:661/10000 train_time:63151ms step_avg:95.54ms +[2025-09-11 07:08:56] [Rank 0] step:681/10000 train_time:63785ms step_avg:93.66ms +[2025-09-11 07:08:56] [Rank 0] step:681/10000 train_time:63785ms step_avg:93.66ms +[2025-09-11 07:08:56] [Rank 0] step:701/10000 train_time:64419ms step_avg:91.90ms +[2025-09-11 07:08:56] [Rank 0] step:701/10000 train_time:64419ms step_avg:91.90ms +[2025-09-11 07:08:57] [Rank 0] step:721/10000 train_time:65052ms step_avg:90.22ms +[2025-09-11 07:08:57] [Rank 0] step:721/10000 train_time:65052ms step_avg:90.22ms +[2025-09-11 07:08:58] [Rank 0] step:741/10000 train_time:65686ms step_avg:88.65ms +[2025-09-11 07:08:58] [Rank 0] step:741/10000 train_time:65686ms step_avg:88.65ms +[2025-09-11 07:08:58] [Rank 0] step:761/10000 train_time:66325ms step_avg:87.16ms +[2025-09-11 07:08:58] [Rank 0] step:761/10000 train_time:66325ms step_avg:87.16ms +[2025-09-11 07:08:59] [Rank 0] step:781/10000 train_time:66963ms step_avg:85.74ms +[2025-09-11 07:08:59] [Rank 0] step:781/10000 train_time:66963ms step_avg:85.74ms +[2025-09-11 07:09:00] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:09:00] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 07:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:09:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 07:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:09:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:09:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 07:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:09:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:09:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:09:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:09:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 07:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:09:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:09:48] [Rank 0] PRINT: step:800/10000 val_loss:5.3978 total_sharp:1.5880e-04 L1_sharp:1.6649e-02 L2_sharp:4.4352e-03 L3_sharp:3.9441e-03 L4_sharp:4.4466e-03 L5_sharp:4.6678e-03 L6_sharp:5.1446e-03 L7_sharp:5.3515e-03 L8_sharp:5.2220e-03 L9_sharp:8.8247e-03 L10_sharp:7.0616e-03 L11_sharp:1.8576e-02 L12_sharp:2.0960e-01 total_fnorm:2.0600e+02 total_l1_linf:4.9152e+05 total_spectral:1.0300e+02 L1_fnorm:1.2422e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2344e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2422e+00 L7_fnorm:1.2266e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1484e+00 L1_l1linf:4.0625e-01 L2_l1linf:3.9453e-01 L3_l1linf:3.9062e-01 L4_l1linf:3.8672e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.8867e-01 L7_l1linf:3.8867e-01 L8_l1linf:3.8867e-01 L9_l1linf:3.9648e-01 L10_l1linf:4.1016e-01 L11_l1linf:4.0234e-01 L12_l1linf:3.5938e-01 L1_spectral:1.3604e-02 L2_spectral:1.3551e-02 L3_spectral:1.3598e-02 L4_spectral:1.3531e-02 L5_spectral:1.3512e-02 L6_spectral:1.3504e-02 L7_spectral:1.3501e-02 L8_spectral:1.3513e-02 L9_spectral:1.3532e-02 L10_spectral:1.3486e-02 L11_spectral:1.3478e-02 L12_spectral:1.3504e-02 train_time:67584ms step_avg:84.48ms +[2025-09-11 07:09:48] [Rank 0] PRINT: step:800/10000 val_loss:5.3978 total_sharp:1.5880e-04 L1_sharp:1.6649e-02 L2_sharp:4.4352e-03 L3_sharp:3.9441e-03 L4_sharp:4.4466e-03 L5_sharp:4.6678e-03 L6_sharp:5.1446e-03 L7_sharp:5.3515e-03 L8_sharp:5.2220e-03 L9_sharp:8.8247e-03 L10_sharp:7.0616e-03 L11_sharp:1.8576e-02 L12_sharp:2.0960e-01 total_fnorm:2.0600e+02 total_l1_linf:4.9152e+05 total_spectral:1.0300e+02 L1_fnorm:1.2422e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2344e+00 L5_fnorm:1.2344e+00 L6_fnorm:1.2422e+00 L7_fnorm:1.2266e+00 L8_fnorm:1.2422e+00 L9_fnorm:1.2344e+00 L10_fnorm:1.2500e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.1484e+00 L1_l1linf:4.0625e-01 L2_l1linf:3.9453e-01 L3_l1linf:3.9062e-01 L4_l1linf:3.8672e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.8867e-01 L7_l1linf:3.8867e-01 L8_l1linf:3.8867e-01 L9_l1linf:3.9648e-01 L10_l1linf:4.1016e-01 L11_l1linf:4.0234e-01 L12_l1linf:3.5938e-01 L1_spectral:1.3604e-02 L2_spectral:1.3551e-02 L3_spectral:1.3598e-02 L4_spectral:1.3531e-02 L5_spectral:1.3512e-02 L6_spectral:1.3504e-02 L7_spectral:1.3501e-02 L8_spectral:1.3513e-02 L9_spectral:1.3532e-02 L10_spectral:1.3486e-02 L11_spectral:1.3478e-02 L12_spectral:1.3504e-02 train_time:67584ms step_avg:84.48ms +[2025-09-11 07:09:49] [Rank 0] step:801/10000 train_time:68706ms step_avg:85.77ms +[2025-09-11 07:09:49] [Rank 0] step:801/10000 train_time:68706ms step_avg:85.77ms +[2025-09-11 07:09:50] [Rank 0] step:821/10000 train_time:69348ms step_avg:84.47ms +[2025-09-11 07:09:50] [Rank 0] step:821/10000 train_time:69348ms step_avg:84.47ms +[2025-09-11 07:09:50] [Rank 0] step:841/10000 train_time:69988ms step_avg:83.22ms +[2025-09-11 07:09:50] [Rank 0] step:841/10000 train_time:69988ms step_avg:83.22ms +[2025-09-11 07:09:51] [Rank 0] step:861/10000 train_time:70628ms step_avg:82.03ms +[2025-09-11 07:09:51] [Rank 0] step:861/10000 train_time:70628ms step_avg:82.03ms +[2025-09-11 07:09:52] [Rank 0] step:881/10000 train_time:71268ms step_avg:80.89ms +[2025-09-11 07:09:52] [Rank 0] step:881/10000 train_time:71268ms step_avg:80.89ms +[2025-09-11 07:09:52] [Rank 0] step:901/10000 train_time:71908ms step_avg:79.81ms +[2025-09-11 07:09:52] [Rank 0] step:901/10000 train_time:71908ms step_avg:79.81ms +[2025-09-11 07:09:53] [Rank 0] step:921/10000 train_time:72547ms step_avg:78.77ms +[2025-09-11 07:09:53] [Rank 0] step:921/10000 train_time:72547ms step_avg:78.77ms +[2025-09-11 07:09:54] [Rank 0] step:941/10000 train_time:73187ms step_avg:77.78ms +[2025-09-11 07:09:54] [Rank 0] step:941/10000 train_time:73187ms step_avg:77.78ms +[2025-09-11 07:09:54] [Rank 0] step:961/10000 train_time:73826ms step_avg:76.82ms +[2025-09-11 07:09:54] [Rank 0] step:961/10000 train_time:73826ms step_avg:76.82ms +[2025-09-11 07:09:55] [Rank 0] step:981/10000 train_time:74465ms step_avg:75.91ms +[2025-09-11 07:09:55] [Rank 0] step:981/10000 train_time:74465ms step_avg:75.91ms +[2025-09-11 07:09:56] [Rank 0] step:1001/10000 train_time:75104ms step_avg:75.03ms +[2025-09-11 07:09:56] [Rank 0] step:1001/10000 train_time:75104ms step_avg:75.03ms +[2025-09-11 07:09:56] [Rank 0] step:1021/10000 train_time:75743ms step_avg:74.18ms +[2025-09-11 07:09:56] [Rank 0] step:1021/10000 train_time:75743ms step_avg:74.18ms +[2025-09-11 07:09:57] [Rank 0] step:1041/10000 train_time:76382ms step_avg:73.37ms +[2025-09-11 07:09:57] [Rank 0] step:1041/10000 train_time:76382ms step_avg:73.37ms +[2025-09-11 07:09:58] [Rank 0] step:1061/10000 train_time:77020ms step_avg:72.59ms +[2025-09-11 07:09:58] [Rank 0] step:1061/10000 train_time:77020ms step_avg:72.59ms +[2025-09-11 07:09:58] [Rank 0] step:1081/10000 train_time:77659ms step_avg:71.84ms +[2025-09-11 07:09:58] [Rank 0] step:1081/10000 train_time:77659ms step_avg:71.84ms +[2025-09-11 07:09:59] [Rank 0] step:1101/10000 train_time:78298ms step_avg:71.12ms +[2025-09-11 07:09:59] [Rank 0] step:1101/10000 train_time:78298ms step_avg:71.12ms +[2025-09-11 07:09:59] [Rank 0] step:1121/10000 train_time:78938ms step_avg:70.42ms +[2025-09-11 07:09:59] [Rank 0] step:1121/10000 train_time:78938ms step_avg:70.42ms +[2025-09-11 07:10:00] [Rank 0] step:1141/10000 train_time:79576ms step_avg:69.74ms +[2025-09-11 07:10:00] [Rank 0] step:1141/10000 train_time:79576ms step_avg:69.74ms +[2025-09-11 07:10:01] [Rank 0] step:1161/10000 train_time:80279ms step_avg:69.15ms +[2025-09-11 07:10:01] [Rank 0] step:1161/10000 train_time:80279ms step_avg:69.15ms +[2025-09-11 07:10:01] [Rank 0] step:1181/10000 train_time:80978ms step_avg:68.57ms +[2025-09-11 07:10:01] [Rank 0] step:1181/10000 train_time:80978ms step_avg:68.57ms +[2025-09-11 07:10:02] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:10:02] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 07:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:10:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:10:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 07:10:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:10:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:10:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:10:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:10:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 07:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:12] [Rank 0] PRINT: step:1200/10000 val_loss:5.0861 total_sharp:1.1883e-04 L1_sharp:1.5098e-02 L2_sharp:4.3428e-03 L3_sharp:3.0398e-03 L4_sharp:4.9047e-03 L5_sharp:4.8559e-03 L6_sharp:4.1937e-03 L7_sharp:3.8647e-03 L8_sharp:5.8748e-03 L9_sharp:7.5838e-03 L10_sharp:6.0000e-03 L11_sharp:1.2086e-02 L12_sharp:8.1539e-02 total_fnorm:2.0300e+02 total_l1_linf:4.7104e+05 total_spectral:1.0150e+02 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.7695e-01 L2_l1linf:3.6523e-01 L3_l1linf:3.6523e-01 L4_l1linf:3.6328e-01 L5_l1linf:3.5547e-01 L6_l1linf:3.6328e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.5742e-01 L9_l1linf:3.5742e-01 L10_l1linf:3.6719e-01 L11_l1linf:3.7891e-01 L12_l1linf:3.9258e-01 L1_spectral:1.4020e-02 L2_spectral:1.3986e-02 L3_spectral:1.3986e-02 L4_spectral:1.3981e-02 L5_spectral:1.3992e-02 L6_spectral:1.3905e-02 L7_spectral:1.3928e-02 L8_spectral:1.4038e-02 L9_spectral:1.4149e-02 L10_spectral:1.3967e-02 L11_spectral:1.4074e-02 L12_spectral:1.4325e-02 train_time:81688ms step_avg:68.07ms +[2025-09-11 07:10:12] [Rank 0] PRINT: step:1200/10000 val_loss:5.0861 total_sharp:1.1883e-04 L1_sharp:1.5098e-02 L2_sharp:4.3428e-03 L3_sharp:3.0398e-03 L4_sharp:4.9047e-03 L5_sharp:4.8559e-03 L6_sharp:4.1937e-03 L7_sharp:3.8647e-03 L8_sharp:5.8748e-03 L9_sharp:7.5838e-03 L10_sharp:6.0000e-03 L11_sharp:1.2086e-02 L12_sharp:8.1539e-02 total_fnorm:2.0300e+02 total_l1_linf:4.7104e+05 total_spectral:1.0150e+02 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2422e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2422e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.7695e-01 L2_l1linf:3.6523e-01 L3_l1linf:3.6523e-01 L4_l1linf:3.6328e-01 L5_l1linf:3.5547e-01 L6_l1linf:3.6328e-01 L7_l1linf:3.5938e-01 L8_l1linf:3.5742e-01 L9_l1linf:3.5742e-01 L10_l1linf:3.6719e-01 L11_l1linf:3.7891e-01 L12_l1linf:3.9258e-01 L1_spectral:1.4020e-02 L2_spectral:1.3986e-02 L3_spectral:1.3986e-02 L4_spectral:1.3981e-02 L5_spectral:1.3992e-02 L6_spectral:1.3905e-02 L7_spectral:1.3928e-02 L8_spectral:1.4038e-02 L9_spectral:1.4149e-02 L10_spectral:1.3967e-02 L11_spectral:1.4074e-02 L12_spectral:1.4325e-02 train_time:81688ms step_avg:68.07ms +[2025-09-11 07:10:13] [Rank 0] step:1201/10000 train_time:82772ms step_avg:68.92ms +[2025-09-11 07:10:13] [Rank 0] step:1201/10000 train_time:82772ms step_avg:68.92ms +[2025-09-11 07:10:14] [Rank 0] step:1221/10000 train_time:83402ms step_avg:68.31ms +[2025-09-11 07:10:14] [Rank 0] step:1221/10000 train_time:83402ms step_avg:68.31ms +[2025-09-11 07:10:14] [Rank 0] step:1241/10000 train_time:84043ms step_avg:67.72ms +[2025-09-11 07:10:14] [Rank 0] step:1241/10000 train_time:84043ms step_avg:67.72ms +[2025-09-11 07:10:15] [Rank 0] step:1261/10000 train_time:84684ms step_avg:67.16ms +[2025-09-11 07:10:15] [Rank 0] step:1261/10000 train_time:84684ms step_avg:67.16ms +[2025-09-11 07:10:15] [Rank 0] step:1281/10000 train_time:85324ms step_avg:66.61ms +[2025-09-11 07:10:15] [Rank 0] step:1281/10000 train_time:85324ms step_avg:66.61ms +[2025-09-11 07:10:16] [Rank 0] step:1301/10000 train_time:85964ms step_avg:66.08ms +[2025-09-11 07:10:16] [Rank 0] step:1301/10000 train_time:85964ms step_avg:66.08ms +[2025-09-11 07:10:17] [Rank 0] step:1321/10000 train_time:86604ms step_avg:65.56ms +[2025-09-11 07:10:17] [Rank 0] step:1321/10000 train_time:86604ms step_avg:65.56ms +[2025-09-11 07:10:17] [Rank 0] step:1341/10000 train_time:87243ms step_avg:65.06ms +[2025-09-11 07:10:17] [Rank 0] step:1341/10000 train_time:87243ms step_avg:65.06ms +[2025-09-11 07:10:18] [Rank 0] step:1361/10000 train_time:87884ms step_avg:64.57ms +[2025-09-11 07:10:18] [Rank 0] step:1361/10000 train_time:87884ms step_avg:64.57ms +[2025-09-11 07:10:19] [Rank 0] step:1381/10000 train_time:88523ms step_avg:64.10ms +[2025-09-11 07:10:19] [Rank 0] step:1381/10000 train_time:88523ms step_avg:64.10ms +[2025-09-11 07:10:19] [Rank 0] step:1401/10000 train_time:89163ms step_avg:63.64ms +[2025-09-11 07:10:19] [Rank 0] step:1401/10000 train_time:89163ms step_avg:63.64ms +[2025-09-11 07:10:20] [Rank 0] step:1421/10000 train_time:89802ms step_avg:63.20ms +[2025-09-11 07:10:20] [Rank 0] step:1421/10000 train_time:89802ms step_avg:63.20ms +[2025-09-11 07:10:21] [Rank 0] step:1441/10000 train_time:90442ms step_avg:62.76ms +[2025-09-11 07:10:21] [Rank 0] step:1441/10000 train_time:90442ms step_avg:62.76ms +[2025-09-11 07:10:21] [Rank 0] step:1461/10000 train_time:91081ms step_avg:62.34ms +[2025-09-11 07:10:21] [Rank 0] step:1461/10000 train_time:91081ms step_avg:62.34ms +[2025-09-11 07:10:22] [Rank 0] step:1481/10000 train_time:91721ms step_avg:61.93ms +[2025-09-11 07:10:22] [Rank 0] step:1481/10000 train_time:91721ms step_avg:61.93ms +[2025-09-11 07:10:23] [Rank 0] step:1501/10000 train_time:92364ms step_avg:61.53ms +[2025-09-11 07:10:23] [Rank 0] step:1501/10000 train_time:92364ms step_avg:61.53ms +[2025-09-11 07:10:23] [Rank 0] step:1521/10000 train_time:93008ms step_avg:61.15ms +[2025-09-11 07:10:23] [Rank 0] step:1521/10000 train_time:93008ms step_avg:61.15ms +[2025-09-11 07:10:24] [Rank 0] step:1541/10000 train_time:93652ms step_avg:60.77ms +[2025-09-11 07:10:24] [Rank 0] step:1541/10000 train_time:93652ms step_avg:60.77ms +[2025-09-11 07:10:24] [Rank 0] step:1561/10000 train_time:94295ms step_avg:60.41ms +[2025-09-11 07:10:24] [Rank 0] step:1561/10000 train_time:94295ms step_avg:60.41ms +[2025-09-11 07:10:25] [Rank 0] step:1581/10000 train_time:94940ms step_avg:60.05ms +[2025-09-11 07:10:25] [Rank 0] step:1581/10000 train_time:94940ms step_avg:60.05ms +[2025-09-11 07:10:26] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:10:26] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 07:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:10:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:10:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:10:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:10:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:10:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 07:10:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:10:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 07:10:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:35] [Rank 0] PRINT: step:1600/10000 val_loss:4.9393 total_sharp:9.9201e-05 L1_sharp:1.1255e-02 L2_sharp:8.0419e-04 L3_sharp:1.6640e-03 L4_sharp:1.9961e-03 L5_sharp:2.2672e-03 L6_sharp:2.8338e-03 L7_sharp:2.3171e-03 L8_sharp:4.9572e-03 L9_sharp:4.3050e-03 L10_sharp:4.5550e-03 L11_sharp:9.6750e-03 L12_sharp:1.1653e-01 total_fnorm:1.8700e+02 total_l1_linf:4.1984e+05 total_spectral:9.3500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.7305e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.4375e-01 L5_l1linf:3.4766e-01 L6_l1linf:3.4375e-01 L7_l1linf:3.4570e-01 L8_l1linf:3.4570e-01 L9_l1linf:3.4766e-01 L10_l1linf:3.4766e-01 L11_l1linf:3.5938e-01 L12_l1linf:3.7891e-01 L1_spectral:1.4425e-02 L2_spectral:1.4363e-02 L3_spectral:1.4292e-02 L4_spectral:1.4314e-02 L5_spectral:1.4290e-02 L6_spectral:1.4381e-02 L7_spectral:1.4360e-02 L8_spectral:1.4565e-02 L9_spectral:1.4482e-02 L10_spectral:1.4385e-02 L11_spectral:1.4444e-02 L12_spectral:1.5002e-02 train_time:95565ms step_avg:59.73ms +[2025-09-11 07:10:35] [Rank 0] PRINT: step:1600/10000 val_loss:4.9393 total_sharp:9.9201e-05 L1_sharp:1.1255e-02 L2_sharp:8.0419e-04 L3_sharp:1.6640e-03 L4_sharp:1.9961e-03 L5_sharp:2.2672e-03 L6_sharp:2.8338e-03 L7_sharp:2.3171e-03 L8_sharp:4.9572e-03 L9_sharp:4.3050e-03 L10_sharp:4.5550e-03 L11_sharp:9.6750e-03 L12_sharp:1.1653e-01 total_fnorm:1.8700e+02 total_l1_linf:4.1984e+05 total_spectral:9.3500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2500e+00 L6_fnorm:1.2500e+00 L7_fnorm:1.2500e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2500e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.7305e-01 L2_l1linf:3.5352e-01 L3_l1linf:3.5156e-01 L4_l1linf:3.4375e-01 L5_l1linf:3.4766e-01 L6_l1linf:3.4375e-01 L7_l1linf:3.4570e-01 L8_l1linf:3.4570e-01 L9_l1linf:3.4766e-01 L10_l1linf:3.4766e-01 L11_l1linf:3.5938e-01 L12_l1linf:3.7891e-01 L1_spectral:1.4425e-02 L2_spectral:1.4363e-02 L3_spectral:1.4292e-02 L4_spectral:1.4314e-02 L5_spectral:1.4290e-02 L6_spectral:1.4381e-02 L7_spectral:1.4360e-02 L8_spectral:1.4565e-02 L9_spectral:1.4482e-02 L10_spectral:1.4385e-02 L11_spectral:1.4444e-02 L12_spectral:1.5002e-02 train_time:95565ms step_avg:59.73ms +[2025-09-11 07:10:36] [Rank 0] step:1601/10000 train_time:96653ms step_avg:60.37ms +[2025-09-11 07:10:36] [Rank 0] step:1601/10000 train_time:96653ms step_avg:60.37ms +[2025-09-11 07:10:37] [Rank 0] step:1621/10000 train_time:97291ms step_avg:60.02ms +[2025-09-11 07:10:37] [Rank 0] step:1621/10000 train_time:97291ms step_avg:60.02ms +[2025-09-11 07:10:38] [Rank 0] step:1641/10000 train_time:97935ms step_avg:59.68ms +[2025-09-11 07:10:38] [Rank 0] step:1641/10000 train_time:97935ms step_avg:59.68ms +[2025-09-11 07:10:38] [Rank 0] step:1661/10000 train_time:98580ms step_avg:59.35ms +[2025-09-11 07:10:38] [Rank 0] step:1661/10000 train_time:98580ms step_avg:59.35ms +[2025-09-11 07:10:39] [Rank 0] step:1681/10000 train_time:99224ms step_avg:59.03ms +[2025-09-11 07:10:39] [Rank 0] step:1681/10000 train_time:99224ms step_avg:59.03ms +[2025-09-11 07:10:40] [Rank 0] step:1701/10000 train_time:99869ms step_avg:58.71ms +[2025-09-11 07:10:40] [Rank 0] step:1701/10000 train_time:99869ms step_avg:58.71ms +[2025-09-11 07:10:40] [Rank 0] step:1721/10000 train_time:100513ms step_avg:58.40ms +[2025-09-11 07:10:40] [Rank 0] step:1721/10000 train_time:100513ms step_avg:58.40ms +[2025-09-11 07:10:41] [Rank 0] step:1741/10000 train_time:101157ms step_avg:58.10ms +[2025-09-11 07:10:41] [Rank 0] step:1741/10000 train_time:101157ms step_avg:58.10ms +[2025-09-11 07:10:42] [Rank 0] step:1761/10000 train_time:101801ms step_avg:57.81ms +[2025-09-11 07:10:42] [Rank 0] step:1761/10000 train_time:101801ms step_avg:57.81ms +[2025-09-11 07:10:42] [Rank 0] step:1781/10000 train_time:102445ms step_avg:57.52ms +[2025-09-11 07:10:42] [Rank 0] step:1781/10000 train_time:102445ms step_avg:57.52ms +[2025-09-11 07:10:43] [Rank 0] step:1801/10000 train_time:103088ms step_avg:57.24ms +[2025-09-11 07:10:43] [Rank 0] step:1801/10000 train_time:103088ms step_avg:57.24ms +[2025-09-11 07:10:44] [Rank 0] step:1821/10000 train_time:103732ms step_avg:56.96ms +[2025-09-11 07:10:44] [Rank 0] step:1821/10000 train_time:103732ms step_avg:56.96ms +[2025-09-11 07:10:44] [Rank 0] step:1841/10000 train_time:104376ms step_avg:56.70ms +[2025-09-11 07:10:44] [Rank 0] step:1841/10000 train_time:104376ms step_avg:56.70ms +[2025-09-11 07:10:45] [Rank 0] step:1861/10000 train_time:105020ms step_avg:56.43ms +[2025-09-11 07:10:45] [Rank 0] step:1861/10000 train_time:105020ms step_avg:56.43ms +[2025-09-11 07:10:45] [Rank 0] step:1881/10000 train_time:105664ms step_avg:56.17ms +[2025-09-11 07:10:45] [Rank 0] step:1881/10000 train_time:105664ms step_avg:56.17ms +[2025-09-11 07:10:46] [Rank 0] step:1901/10000 train_time:106308ms step_avg:55.92ms +[2025-09-11 07:10:46] [Rank 0] step:1901/10000 train_time:106308ms step_avg:55.92ms +[2025-09-11 07:10:47] [Rank 0] step:1921/10000 train_time:106953ms step_avg:55.68ms +[2025-09-11 07:10:47] [Rank 0] step:1921/10000 train_time:106953ms step_avg:55.68ms +[2025-09-11 07:10:47] [Rank 0] step:1941/10000 train_time:107596ms step_avg:55.43ms +[2025-09-11 07:10:47] [Rank 0] step:1941/10000 train_time:107596ms step_avg:55.43ms +[2025-09-11 07:10:48] [Rank 0] step:1961/10000 train_time:108239ms step_avg:55.20ms +[2025-09-11 07:10:48] [Rank 0] step:1961/10000 train_time:108239ms step_avg:55.20ms +[2025-09-11 07:10:49] [Rank 0] step:1981/10000 train_time:108884ms step_avg:54.96ms +[2025-09-11 07:10:49] [Rank 0] step:1981/10000 train_time:108884ms step_avg:54.96ms +[2025-09-11 07:10:49] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:10:49] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 07:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:10:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 07:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 07:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 07:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:10:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 07:10:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:10:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:10:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:10:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:10:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 07:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 07:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:10:59] [Rank 0] PRINT: step:2000/10000 val_loss:4.7934 total_sharp:7.0689e-05 L1_sharp:8.3236e-03 L2_sharp:1.3869e-03 L3_sharp:-2.9952e-04 L4_sharp:1.5408e-03 L5_sharp:2.2375e-03 L6_sharp:2.4300e-03 L7_sharp:1.4183e-03 L8_sharp:5.1661e-03 L9_sharp:3.7964e-03 L10_sharp:3.5803e-03 L11_sharp:1.0379e-02 L12_sharp:5.3507e-02 total_fnorm:1.9500e+02 total_l1_linf:4.5261e+05 total_spectral:9.7500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4570e-01 L3_l1linf:3.3984e-01 L4_l1linf:3.3789e-01 L5_l1linf:3.3984e-01 L6_l1linf:3.4180e-01 L7_l1linf:3.4961e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.3789e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.5547e-01 L12_l1linf:3.6328e-01 L1_spectral:1.4679e-02 L2_spectral:1.4655e-02 L3_spectral:1.4563e-02 L4_spectral:1.4633e-02 L5_spectral:1.4586e-02 L6_spectral:1.4691e-02 L7_spectral:1.4563e-02 L8_spectral:1.4946e-02 L9_spectral:1.4754e-02 L10_spectral:1.4642e-02 L11_spectral:1.4703e-02 L12_spectral:1.5190e-02 train_time:109510ms step_avg:54.75ms +[2025-09-11 07:10:59] [Rank 0] PRINT: step:2000/10000 val_loss:4.7934 total_sharp:7.0689e-05 L1_sharp:8.3236e-03 L2_sharp:1.3869e-03 L3_sharp:-2.9952e-04 L4_sharp:1.5408e-03 L5_sharp:2.2375e-03 L6_sharp:2.4300e-03 L7_sharp:1.4183e-03 L8_sharp:5.1661e-03 L9_sharp:3.7964e-03 L10_sharp:3.5803e-03 L11_sharp:1.0379e-02 L12_sharp:5.3507e-02 total_fnorm:1.9500e+02 total_l1_linf:4.5261e+05 total_spectral:9.7500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.6328e-01 L2_l1linf:3.4570e-01 L3_l1linf:3.3984e-01 L4_l1linf:3.3789e-01 L5_l1linf:3.3984e-01 L6_l1linf:3.4180e-01 L7_l1linf:3.4961e-01 L8_l1linf:3.4180e-01 L9_l1linf:3.3789e-01 L10_l1linf:3.4375e-01 L11_l1linf:3.5547e-01 L12_l1linf:3.6328e-01 L1_spectral:1.4679e-02 L2_spectral:1.4655e-02 L3_spectral:1.4563e-02 L4_spectral:1.4633e-02 L5_spectral:1.4586e-02 L6_spectral:1.4691e-02 L7_spectral:1.4563e-02 L8_spectral:1.4946e-02 L9_spectral:1.4754e-02 L10_spectral:1.4642e-02 L11_spectral:1.4703e-02 L12_spectral:1.5190e-02 train_time:109510ms step_avg:54.75ms +[2025-09-11 07:11:00] [Rank 0] step:2001/10000 train_time:110618ms step_avg:55.28ms +[2025-09-11 07:11:00] [Rank 0] step:2001/10000 train_time:110618ms step_avg:55.28ms +[2025-09-11 07:11:01] [Rank 0] step:2021/10000 train_time:111267ms step_avg:55.06ms +[2025-09-11 07:11:01] [Rank 0] step:2021/10000 train_time:111267ms step_avg:55.06ms +[2025-09-11 07:11:02] [Rank 0] step:2041/10000 train_time:111912ms step_avg:54.83ms +[2025-09-11 07:11:02] [Rank 0] step:2041/10000 train_time:111912ms step_avg:54.83ms +[2025-09-11 07:11:02] [Rank 0] step:2061/10000 train_time:112557ms step_avg:54.61ms +[2025-09-11 07:11:02] [Rank 0] step:2061/10000 train_time:112557ms step_avg:54.61ms +[2025-09-11 07:11:03] [Rank 0] step:2081/10000 train_time:113202ms step_avg:54.40ms +[2025-09-11 07:11:03] [Rank 0] step:2081/10000 train_time:113202ms step_avg:54.40ms +[2025-09-11 07:11:04] [Rank 0] step:2101/10000 train_time:113846ms step_avg:54.19ms +[2025-09-11 07:11:04] [Rank 0] step:2101/10000 train_time:113846ms step_avg:54.19ms +[2025-09-11 07:11:04] [Rank 0] step:2121/10000 train_time:114490ms step_avg:53.98ms +[2025-09-11 07:11:04] [Rank 0] step:2121/10000 train_time:114490ms step_avg:53.98ms +[2025-09-11 07:11:05] [Rank 0] step:2141/10000 train_time:115134ms step_avg:53.78ms +[2025-09-11 07:11:05] [Rank 0] step:2141/10000 train_time:115134ms step_avg:53.78ms +[2025-09-11 07:11:06] [Rank 0] step:2161/10000 train_time:115778ms step_avg:53.58ms +[2025-09-11 07:11:06] [Rank 0] step:2161/10000 train_time:115778ms step_avg:53.58ms +[2025-09-11 07:11:06] [Rank 0] step:2181/10000 train_time:116422ms step_avg:53.38ms +[2025-09-11 07:11:06] [Rank 0] step:2181/10000 train_time:116422ms step_avg:53.38ms +[2025-09-11 07:11:07] [Rank 0] step:2201/10000 train_time:117067ms step_avg:53.19ms +[2025-09-11 07:11:07] [Rank 0] step:2201/10000 train_time:117067ms step_avg:53.19ms +[2025-09-11 07:11:07] [Rank 0] step:2221/10000 train_time:117710ms step_avg:53.00ms +[2025-09-11 07:11:07] [Rank 0] step:2221/10000 train_time:117710ms step_avg:53.00ms +[2025-09-11 07:11:08] [Rank 0] step:2241/10000 train_time:118365ms step_avg:52.82ms +[2025-09-11 07:11:08] [Rank 0] step:2241/10000 train_time:118365ms step_avg:52.82ms +[2025-09-11 07:11:09] [Rank 0] step:2261/10000 train_time:119022ms step_avg:52.64ms +[2025-09-11 07:11:09] [Rank 0] step:2261/10000 train_time:119022ms step_avg:52.64ms +[2025-09-11 07:11:09] [Rank 0] step:2281/10000 train_time:119692ms step_avg:52.47ms +[2025-09-11 07:11:09] [Rank 0] step:2281/10000 train_time:119692ms step_avg:52.47ms +[2025-09-11 07:11:10] [Rank 0] step:2301/10000 train_time:120355ms step_avg:52.31ms +[2025-09-11 07:11:10] [Rank 0] step:2301/10000 train_time:120355ms step_avg:52.31ms +[2025-09-11 07:11:11] [Rank 0] step:2321/10000 train_time:121012ms step_avg:52.14ms +[2025-09-11 07:11:11] [Rank 0] step:2321/10000 train_time:121012ms step_avg:52.14ms +[2025-09-11 07:11:12] [Rank 0] step:2341/10000 train_time:122260ms step_avg:52.23ms +[2025-09-11 07:11:12] [Rank 0] step:2341/10000 train_time:122260ms step_avg:52.23ms +[2025-09-11 07:11:13] [Rank 0] step:2361/10000 train_time:122918ms step_avg:52.06ms +[2025-09-11 07:11:13] [Rank 0] step:2361/10000 train_time:122918ms step_avg:52.06ms +[2025-09-11 07:11:13] [Rank 0] step:2381/10000 train_time:123575ms step_avg:51.90ms +[2025-09-11 07:11:13] [Rank 0] step:2381/10000 train_time:123575ms step_avg:51.90ms +[2025-09-11 07:11:14] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:11:14] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 07:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:11:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:11:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 07:11:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:11:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:11:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:11:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:11:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 07:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:11:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:11:24] [Rank 0] PRINT: step:2400/10000 val_loss:4.6591 total_sharp:6.4097e-05 L1_sharp:7.7975e-03 L2_sharp:1.1852e-03 L3_sharp:-4.2910e-05 L4_sharp:1.1908e-03 L5_sharp:2.1733e-03 L6_sharp:2.5088e-03 L7_sharp:2.4086e-03 L8_sharp:3.7391e-03 L9_sharp:3.3257e-03 L10_sharp:3.7633e-03 L11_sharp:5.1781e-03 L12_sharp:5.3092e-02 total_fnorm:1.8600e+02 total_l1_linf:4.1574e+05 total_spectral:9.3000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5938e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3789e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.3594e-01 L7_l1linf:3.3984e-01 L8_l1linf:3.3594e-01 L9_l1linf:3.3789e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.5547e-01 L12_l1linf:3.6133e-01 L1_spectral:1.4978e-02 L2_spectral:1.4882e-02 L3_spectral:1.4867e-02 L4_spectral:1.4839e-02 L5_spectral:1.4823e-02 L6_spectral:1.4903e-02 L7_spectral:1.4861e-02 L8_spectral:1.5313e-02 L9_spectral:1.4962e-02 L10_spectral:1.4883e-02 L11_spectral:1.5052e-02 L12_spectral:1.5443e-02 train_time:124470ms step_avg:51.86ms +[2025-09-11 07:11:24] [Rank 0] PRINT: step:2400/10000 val_loss:4.6591 total_sharp:6.4097e-05 L1_sharp:7.7975e-03 L2_sharp:1.1852e-03 L3_sharp:-4.2910e-05 L4_sharp:1.1908e-03 L5_sharp:2.1733e-03 L6_sharp:2.5088e-03 L7_sharp:2.4086e-03 L8_sharp:3.7391e-03 L9_sharp:3.3257e-03 L10_sharp:3.7633e-03 L11_sharp:5.1781e-03 L12_sharp:5.3092e-02 total_fnorm:1.8600e+02 total_l1_linf:4.1574e+05 total_spectral:9.3000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2734e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5938e-01 L2_l1linf:3.3789e-01 L3_l1linf:3.3594e-01 L4_l1linf:3.3789e-01 L5_l1linf:3.3594e-01 L6_l1linf:3.3594e-01 L7_l1linf:3.3984e-01 L8_l1linf:3.3594e-01 L9_l1linf:3.3789e-01 L10_l1linf:3.3203e-01 L11_l1linf:3.5547e-01 L12_l1linf:3.6133e-01 L1_spectral:1.4978e-02 L2_spectral:1.4882e-02 L3_spectral:1.4867e-02 L4_spectral:1.4839e-02 L5_spectral:1.4823e-02 L6_spectral:1.4903e-02 L7_spectral:1.4861e-02 L8_spectral:1.5313e-02 L9_spectral:1.4962e-02 L10_spectral:1.4883e-02 L11_spectral:1.5052e-02 L12_spectral:1.5443e-02 train_time:124470ms step_avg:51.86ms +[2025-09-11 07:11:25] [Rank 0] step:2401/10000 train_time:125564ms step_avg:52.30ms +[2025-09-11 07:11:25] [Rank 0] step:2401/10000 train_time:125564ms step_avg:52.30ms +[2025-09-11 07:11:26] [Rank 0] step:2421/10000 train_time:126218ms step_avg:52.13ms +[2025-09-11 07:11:26] [Rank 0] step:2421/10000 train_time:126218ms step_avg:52.13ms +[2025-09-11 07:11:26] [Rank 0] step:2441/10000 train_time:126876ms step_avg:51.98ms +[2025-09-11 07:11:26] [Rank 0] step:2441/10000 train_time:126876ms step_avg:51.98ms +[2025-09-11 07:11:27] [Rank 0] step:2461/10000 train_time:127533ms step_avg:51.82ms +[2025-09-11 07:11:27] [Rank 0] step:2461/10000 train_time:127533ms step_avg:51.82ms +[2025-09-11 07:11:28] [Rank 0] step:2481/10000 train_time:128191ms step_avg:51.67ms +[2025-09-11 07:11:28] [Rank 0] step:2481/10000 train_time:128191ms step_avg:51.67ms +[2025-09-11 07:11:28] [Rank 0] step:2501/10000 train_time:128849ms step_avg:51.52ms +[2025-09-11 07:11:28] [Rank 0] step:2501/10000 train_time:128849ms step_avg:51.52ms +[2025-09-11 07:11:29] [Rank 0] step:2521/10000 train_time:129506ms step_avg:51.37ms +[2025-09-11 07:11:29] [Rank 0] step:2521/10000 train_time:129506ms step_avg:51.37ms +[2025-09-11 07:11:30] [Rank 0] step:2541/10000 train_time:130163ms step_avg:51.22ms +[2025-09-11 07:11:30] [Rank 0] step:2541/10000 train_time:130163ms step_avg:51.22ms +[2025-09-11 07:11:30] [Rank 0] step:2561/10000 train_time:130820ms step_avg:51.08ms +[2025-09-11 07:11:30] [Rank 0] step:2561/10000 train_time:130820ms step_avg:51.08ms +[2025-09-11 07:11:31] [Rank 0] step:2581/10000 train_time:131476ms step_avg:50.94ms +[2025-09-11 07:11:31] [Rank 0] step:2581/10000 train_time:131476ms step_avg:50.94ms +[2025-09-11 07:11:32] [Rank 0] step:2601/10000 train_time:132133ms step_avg:50.80ms +[2025-09-11 07:11:32] [Rank 0] step:2601/10000 train_time:132133ms step_avg:50.80ms +[2025-09-11 07:11:32] [Rank 0] step:2621/10000 train_time:132790ms step_avg:50.66ms +[2025-09-11 07:11:32] [Rank 0] step:2621/10000 train_time:132790ms step_avg:50.66ms +[2025-09-11 07:11:33] [Rank 0] step:2641/10000 train_time:133447ms step_avg:50.53ms +[2025-09-11 07:11:33] [Rank 0] step:2641/10000 train_time:133447ms step_avg:50.53ms +[2025-09-11 07:11:34] [Rank 0] step:2661/10000 train_time:134104ms step_avg:50.40ms +[2025-09-11 07:11:34] [Rank 0] step:2661/10000 train_time:134104ms step_avg:50.40ms +[2025-09-11 07:11:34] [Rank 0] step:2681/10000 train_time:134760ms step_avg:50.26ms +[2025-09-11 07:11:34] [Rank 0] step:2681/10000 train_time:134760ms step_avg:50.26ms +[2025-09-11 07:11:35] [Rank 0] step:2701/10000 train_time:135417ms step_avg:50.14ms +[2025-09-11 07:11:35] [Rank 0] step:2701/10000 train_time:135417ms step_avg:50.14ms +[2025-09-11 07:11:36] [Rank 0] step:2721/10000 train_time:136075ms step_avg:50.01ms +[2025-09-11 07:11:36] [Rank 0] step:2721/10000 train_time:136075ms step_avg:50.01ms +[2025-09-11 07:11:36] [Rank 0] step:2741/10000 train_time:136731ms step_avg:49.88ms +[2025-09-11 07:11:36] [Rank 0] step:2741/10000 train_time:136731ms step_avg:49.88ms +[2025-09-11 07:11:37] [Rank 0] step:2761/10000 train_time:137387ms step_avg:49.76ms +[2025-09-11 07:11:37] [Rank 0] step:2761/10000 train_time:137387ms step_avg:49.76ms +[2025-09-11 07:11:38] [Rank 0] step:2781/10000 train_time:138044ms step_avg:49.64ms +[2025-09-11 07:11:38] [Rank 0] step:2781/10000 train_time:138044ms step_avg:49.64ms +[2025-09-11 07:11:38] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:11:38] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 07:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:11:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 07:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 07:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:11:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 07:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:11:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 07:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:11:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:11:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:11:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:11:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 07:11:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:11:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:11:48] [Rank 0] PRINT: step:2800/10000 val_loss:4.6070 total_sharp:6.6633e-05 L1_sharp:6.5203e-03 L2_sharp:1.5102e-03 L3_sharp:4.1104e-04 L4_sharp:8.3318e-04 L5_sharp:2.2060e-03 L6_sharp:1.6903e-03 L7_sharp:1.6856e-03 L8_sharp:3.7516e-03 L9_sharp:3.3371e-03 L10_sharp:3.6701e-03 L11_sharp:5.5001e-03 L12_sharp:5.2891e-02 total_fnorm:1.8400e+02 total_l1_linf:4.0960e+05 total_spectral:9.2000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2656e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.2812e-01 L6_l1linf:3.3203e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.3008e-01 L10_l1linf:3.2422e-01 L11_l1linf:3.2617e-01 L12_l1linf:3.5156e-01 L1_spectral:1.5189e-02 L2_spectral:1.5089e-02 L3_spectral:1.4995e-02 L4_spectral:1.5021e-02 L5_spectral:1.5088e-02 L6_spectral:1.5050e-02 L7_spectral:1.5077e-02 L8_spectral:1.5369e-02 L9_spectral:1.5179e-02 L10_spectral:1.5077e-02 L11_spectral:1.5283e-02 L12_spectral:1.5653e-02 train_time:138682ms step_avg:49.53ms +[2025-09-11 07:11:48] [Rank 0] PRINT: step:2800/10000 val_loss:4.6070 total_sharp:6.6633e-05 L1_sharp:6.5203e-03 L2_sharp:1.5102e-03 L3_sharp:4.1104e-04 L4_sharp:8.3318e-04 L5_sharp:2.2060e-03 L6_sharp:1.6903e-03 L7_sharp:1.6856e-03 L8_sharp:3.7516e-03 L9_sharp:3.3371e-03 L10_sharp:3.6701e-03 L11_sharp:5.5001e-03 L12_sharp:5.2891e-02 total_fnorm:1.8400e+02 total_l1_linf:4.0960e+05 total_spectral:9.2000e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2656e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5742e-01 L2_l1linf:3.3398e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.2812e-01 L6_l1linf:3.3203e-01 L7_l1linf:3.3594e-01 L8_l1linf:3.3203e-01 L9_l1linf:3.3008e-01 L10_l1linf:3.2422e-01 L11_l1linf:3.2617e-01 L12_l1linf:3.5156e-01 L1_spectral:1.5189e-02 L2_spectral:1.5089e-02 L3_spectral:1.4995e-02 L4_spectral:1.5021e-02 L5_spectral:1.5088e-02 L6_spectral:1.5050e-02 L7_spectral:1.5077e-02 L8_spectral:1.5369e-02 L9_spectral:1.5179e-02 L10_spectral:1.5077e-02 L11_spectral:1.5283e-02 L12_spectral:1.5653e-02 train_time:138682ms step_avg:49.53ms +[2025-09-11 07:11:49] [Rank 0] step:2801/10000 train_time:139798ms step_avg:49.91ms +[2025-09-11 07:11:49] [Rank 0] step:2801/10000 train_time:139798ms step_avg:49.91ms +[2025-09-11 07:11:50] [Rank 0] step:2821/10000 train_time:140459ms step_avg:49.79ms +[2025-09-11 07:11:50] [Rank 0] step:2821/10000 train_time:140459ms step_avg:49.79ms +[2025-09-11 07:11:50] [Rank 0] step:2841/10000 train_time:141118ms step_avg:49.67ms +[2025-09-11 07:11:50] [Rank 0] step:2841/10000 train_time:141118ms step_avg:49.67ms +[2025-09-11 07:11:51] [Rank 0] step:2861/10000 train_time:141776ms step_avg:49.55ms +[2025-09-11 07:11:51] [Rank 0] step:2861/10000 train_time:141776ms step_avg:49.55ms +[2025-09-11 07:11:52] [Rank 0] step:2881/10000 train_time:142433ms step_avg:49.44ms +[2025-09-11 07:11:52] [Rank 0] step:2881/10000 train_time:142433ms step_avg:49.44ms +[2025-09-11 07:11:52] [Rank 0] step:2901/10000 train_time:143091ms step_avg:49.32ms +[2025-09-11 07:11:52] [Rank 0] step:2901/10000 train_time:143091ms step_avg:49.32ms +[2025-09-11 07:11:53] [Rank 0] step:2921/10000 train_time:143749ms step_avg:49.21ms +[2025-09-11 07:11:53] [Rank 0] step:2921/10000 train_time:143749ms step_avg:49.21ms +[2025-09-11 07:11:54] [Rank 0] step:2941/10000 train_time:144406ms step_avg:49.10ms +[2025-09-11 07:11:54] [Rank 0] step:2941/10000 train_time:144406ms step_avg:49.10ms +[2025-09-11 07:11:54] [Rank 0] step:2961/10000 train_time:145063ms step_avg:48.99ms +[2025-09-11 07:11:54] [Rank 0] step:2961/10000 train_time:145063ms step_avg:48.99ms +[2025-09-11 07:11:55] [Rank 0] step:2981/10000 train_time:145723ms step_avg:48.88ms +[2025-09-11 07:11:55] [Rank 0] step:2981/10000 train_time:145723ms step_avg:48.88ms +[2025-09-11 07:11:56] [Rank 0] step:3001/10000 train_time:146383ms step_avg:48.78ms +[2025-09-11 07:11:56] [Rank 0] step:3001/10000 train_time:146383ms step_avg:48.78ms +[2025-09-11 07:11:56] [Rank 0] step:3021/10000 train_time:147045ms step_avg:48.67ms +[2025-09-11 07:11:56] [Rank 0] step:3021/10000 train_time:147045ms step_avg:48.67ms +[2025-09-11 07:11:57] [Rank 0] step:3041/10000 train_time:147706ms step_avg:48.57ms +[2025-09-11 07:11:57] [Rank 0] step:3041/10000 train_time:147706ms step_avg:48.57ms +[2025-09-11 07:11:58] [Rank 0] step:3061/10000 train_time:148366ms step_avg:48.47ms +[2025-09-11 07:11:58] [Rank 0] step:3061/10000 train_time:148366ms step_avg:48.47ms +[2025-09-11 07:11:58] [Rank 0] step:3081/10000 train_time:149027ms step_avg:48.37ms +[2025-09-11 07:11:58] [Rank 0] step:3081/10000 train_time:149027ms step_avg:48.37ms +[2025-09-11 07:11:59] [Rank 0] step:3101/10000 train_time:149687ms step_avg:48.27ms +[2025-09-11 07:11:59] [Rank 0] step:3101/10000 train_time:149687ms step_avg:48.27ms +[2025-09-11 07:12:00] [Rank 0] step:3121/10000 train_time:150348ms step_avg:48.17ms +[2025-09-11 07:12:00] [Rank 0] step:3121/10000 train_time:150348ms step_avg:48.17ms +[2025-09-11 07:12:00] [Rank 0] step:3141/10000 train_time:151009ms step_avg:48.08ms +[2025-09-11 07:12:00] [Rank 0] step:3141/10000 train_time:151009ms step_avg:48.08ms +[2025-09-11 07:12:01] [Rank 0] step:3161/10000 train_time:151669ms step_avg:47.98ms +[2025-09-11 07:12:01] [Rank 0] step:3161/10000 train_time:151669ms step_avg:47.98ms +[2025-09-11 07:12:02] [Rank 0] step:3181/10000 train_time:152329ms step_avg:47.89ms +[2025-09-11 07:12:02] [Rank 0] step:3181/10000 train_time:152329ms step_avg:47.89ms +[2025-09-11 07:12:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:12:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 07:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:12:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:12:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 07:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:12:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:12:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:12:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:12:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 07:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:12:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:12:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.5364 total_sharp:4.9439e-05 L1_sharp:6.5646e-03 L2_sharp:1.9418e-03 L3_sharp:9.9745e-04 L4_sharp:9.5979e-04 L5_sharp:3.2847e-03 L6_sharp:2.7203e-03 L7_sharp:1.3820e-03 L8_sharp:3.1185e-03 L9_sharp:3.1171e-03 L10_sharp:3.3699e-03 L11_sharp:5.1180e-03 L12_sharp:1.5146e-01 total_fnorm:2.0500e+02 total_l1_linf:4.7514e+05 total_spectral:1.0250e+02 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3008e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3398e-01 L8_l1linf:3.3398e-01 L9_l1linf:3.2812e-01 L10_l1linf:3.2812e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.4570e-01 L1_spectral:1.5463e-02 L2_spectral:1.5265e-02 L3_spectral:1.5207e-02 L4_spectral:1.5245e-02 L5_spectral:1.5190e-02 L6_spectral:1.5263e-02 L7_spectral:1.5244e-02 L8_spectral:1.5518e-02 L9_spectral:1.5353e-02 L10_spectral:1.5305e-02 L11_spectral:1.5426e-02 L12_spectral:1.5723e-02 train_time:152971ms step_avg:47.80ms +[2025-09-11 07:12:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.5364 total_sharp:4.9439e-05 L1_sharp:6.5646e-03 L2_sharp:1.9418e-03 L3_sharp:9.9745e-04 L4_sharp:9.5979e-04 L5_sharp:3.2847e-03 L6_sharp:2.7203e-03 L7_sharp:1.3820e-03 L8_sharp:3.1185e-03 L9_sharp:3.1171e-03 L10_sharp:3.3699e-03 L11_sharp:5.1180e-03 L12_sharp:1.5146e-01 total_fnorm:2.0500e+02 total_l1_linf:4.7514e+05 total_spectral:1.0250e+02 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2812e+00 L1_l1linf:3.5352e-01 L2_l1linf:3.3008e-01 L3_l1linf:3.3203e-01 L4_l1linf:3.3008e-01 L5_l1linf:3.3203e-01 L6_l1linf:3.3398e-01 L7_l1linf:3.3398e-01 L8_l1linf:3.3398e-01 L9_l1linf:3.2812e-01 L10_l1linf:3.2812e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.4570e-01 L1_spectral:1.5463e-02 L2_spectral:1.5265e-02 L3_spectral:1.5207e-02 L4_spectral:1.5245e-02 L5_spectral:1.5190e-02 L6_spectral:1.5263e-02 L7_spectral:1.5244e-02 L8_spectral:1.5518e-02 L9_spectral:1.5353e-02 L10_spectral:1.5305e-02 L11_spectral:1.5426e-02 L12_spectral:1.5723e-02 train_time:152971ms step_avg:47.80ms +[2025-09-11 07:12:13] [Rank 0] step:3201/10000 train_time:154136ms step_avg:48.15ms +[2025-09-11 07:12:13] [Rank 0] step:3201/10000 train_time:154136ms step_avg:48.15ms +[2025-09-11 07:12:14] [Rank 0] step:3221/10000 train_time:154801ms step_avg:48.06ms +[2025-09-11 07:12:14] [Rank 0] step:3221/10000 train_time:154801ms step_avg:48.06ms +[2025-09-11 07:12:15] [Rank 0] step:3241/10000 train_time:155997ms step_avg:48.13ms +[2025-09-11 07:12:15] [Rank 0] step:3241/10000 train_time:155997ms step_avg:48.13ms +[2025-09-11 07:12:16] [Rank 0] step:3261/10000 train_time:156659ms step_avg:48.04ms +[2025-09-11 07:12:16] [Rank 0] step:3261/10000 train_time:156659ms step_avg:48.04ms +[2025-09-11 07:12:16] [Rank 0] step:3281/10000 train_time:157320ms step_avg:47.95ms +[2025-09-11 07:12:16] [Rank 0] step:3281/10000 train_time:157320ms step_avg:47.95ms +[2025-09-11 07:12:17] [Rank 0] step:3301/10000 train_time:158262ms step_avg:47.94ms +[2025-09-11 07:12:17] [Rank 0] step:3301/10000 train_time:158262ms step_avg:47.94ms +[2025-09-11 07:12:18] [Rank 0] step:3321/10000 train_time:158923ms step_avg:47.85ms +[2025-09-11 07:12:18] [Rank 0] step:3321/10000 train_time:158923ms step_avg:47.85ms +[2025-09-11 07:12:19] [Rank 0] step:3341/10000 train_time:159584ms step_avg:47.77ms +[2025-09-11 07:12:19] [Rank 0] step:3341/10000 train_time:159584ms step_avg:47.77ms +[2025-09-11 07:12:19] [Rank 0] step:3361/10000 train_time:160245ms step_avg:47.68ms +[2025-09-11 07:12:19] [Rank 0] step:3361/10000 train_time:160245ms step_avg:47.68ms +[2025-09-11 07:12:20] [Rank 0] step:3381/10000 train_time:160905ms step_avg:47.59ms +[2025-09-11 07:12:20] [Rank 0] step:3381/10000 train_time:160905ms step_avg:47.59ms +[2025-09-11 07:12:21] [Rank 0] step:3401/10000 train_time:161565ms step_avg:47.51ms +[2025-09-11 07:12:21] [Rank 0] step:3401/10000 train_time:161565ms step_avg:47.51ms +[2025-09-11 07:12:21] [Rank 0] step:3421/10000 train_time:162225ms step_avg:47.42ms +[2025-09-11 07:12:21] [Rank 0] step:3421/10000 train_time:162225ms step_avg:47.42ms +[2025-09-11 07:12:22] [Rank 0] step:3441/10000 train_time:162885ms step_avg:47.34ms +[2025-09-11 07:12:22] [Rank 0] step:3441/10000 train_time:162885ms step_avg:47.34ms +[2025-09-11 07:12:23] [Rank 0] step:3461/10000 train_time:163546ms step_avg:47.25ms +[2025-09-11 07:12:23] [Rank 0] step:3461/10000 train_time:163546ms step_avg:47.25ms +[2025-09-11 07:12:23] [Rank 0] step:3481/10000 train_time:164206ms step_avg:47.17ms +[2025-09-11 07:12:23] [Rank 0] step:3481/10000 train_time:164206ms step_avg:47.17ms +[2025-09-11 07:12:24] [Rank 0] step:3501/10000 train_time:164868ms step_avg:47.09ms +[2025-09-11 07:12:24] [Rank 0] step:3501/10000 train_time:164868ms step_avg:47.09ms +[2025-09-11 07:12:25] [Rank 0] step:3521/10000 train_time:165529ms step_avg:47.01ms +[2025-09-11 07:12:25] [Rank 0] step:3521/10000 train_time:165529ms step_avg:47.01ms +[2025-09-11 07:12:25] [Rank 0] step:3541/10000 train_time:166189ms step_avg:46.93ms +[2025-09-11 07:12:25] [Rank 0] step:3541/10000 train_time:166189ms step_avg:46.93ms +[2025-09-11 07:12:26] [Rank 0] step:3561/10000 train_time:166850ms step_avg:46.85ms +[2025-09-11 07:12:26] [Rank 0] step:3561/10000 train_time:166850ms step_avg:46.85ms +[2025-09-11 07:12:26] [Rank 0] step:3581/10000 train_time:167511ms step_avg:46.78ms +[2025-09-11 07:12:26] [Rank 0] step:3581/10000 train_time:167511ms step_avg:46.78ms +[2025-09-11 07:12:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:12:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 07:12:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:12:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:12:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 07:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 07:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:12:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:12:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:12:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:12:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 07:12:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:12:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:12:37] [Rank 0] PRINT: step:3600/10000 val_loss:4.4919 total_sharp:6.4892e-05 L1_sharp:5.5628e-03 L2_sharp:8.9057e-04 L3_sharp:2.0204e-03 L4_sharp:1.0260e-03 L5_sharp:2.3061e-03 L6_sharp:1.8655e-03 L7_sharp:1.8221e-03 L8_sharp:3.4860e-03 L9_sharp:3.5963e-03 L10_sharp:3.3709e-03 L11_sharp:5.0932e-03 L12_sharp:1.5704e-01 total_fnorm:1.7900e+02 total_l1_linf:3.9936e+05 total_spectral:8.9500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4766e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2617e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.3203e-01 L1_spectral:1.5782e-02 L2_spectral:1.5367e-02 L3_spectral:1.5369e-02 L4_spectral:1.5414e-02 L5_spectral:1.5390e-02 L6_spectral:1.5353e-02 L7_spectral:1.5370e-02 L8_spectral:1.5702e-02 L9_spectral:1.5525e-02 L10_spectral:1.5444e-02 L11_spectral:1.5571e-02 L12_spectral:1.5799e-02 train_time:168153ms step_avg:46.71ms +[2025-09-11 07:12:37] [Rank 0] PRINT: step:3600/10000 val_loss:4.4919 total_sharp:6.4892e-05 L1_sharp:5.5628e-03 L2_sharp:8.9057e-04 L3_sharp:2.0204e-03 L4_sharp:1.0260e-03 L5_sharp:2.3061e-03 L6_sharp:1.8655e-03 L7_sharp:1.8221e-03 L8_sharp:3.4860e-03 L9_sharp:3.5963e-03 L10_sharp:3.3709e-03 L11_sharp:5.0932e-03 L12_sharp:1.5704e-01 total_fnorm:1.7900e+02 total_l1_linf:3.9936e+05 total_spectral:8.9500e+01 L1_fnorm:1.2656e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2578e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2578e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4766e-01 L2_l1linf:3.1836e-01 L3_l1linf:3.2812e-01 L4_l1linf:3.2617e-01 L5_l1linf:3.2422e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2812e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2617e-01 L10_l1linf:3.2227e-01 L11_l1linf:3.1836e-01 L12_l1linf:3.3203e-01 L1_spectral:1.5782e-02 L2_spectral:1.5367e-02 L3_spectral:1.5369e-02 L4_spectral:1.5414e-02 L5_spectral:1.5390e-02 L6_spectral:1.5353e-02 L7_spectral:1.5370e-02 L8_spectral:1.5702e-02 L9_spectral:1.5525e-02 L10_spectral:1.5444e-02 L11_spectral:1.5571e-02 L12_spectral:1.5799e-02 train_time:168153ms step_avg:46.71ms +[2025-09-11 07:12:39] [Rank 0] step:3601/10000 train_time:169690ms step_avg:47.12ms +[2025-09-11 07:12:39] [Rank 0] step:3601/10000 train_time:169690ms step_avg:47.12ms +[2025-09-11 07:12:39] [Rank 0] step:3621/10000 train_time:170569ms step_avg:47.11ms +[2025-09-11 07:12:39] [Rank 0] step:3621/10000 train_time:170569ms step_avg:47.11ms +[2025-09-11 07:12:40] [Rank 0] step:3641/10000 train_time:171231ms step_avg:47.03ms +[2025-09-11 07:12:40] [Rank 0] step:3641/10000 train_time:171231ms step_avg:47.03ms +[2025-09-11 07:12:41] [Rank 0] step:3661/10000 train_time:171892ms step_avg:46.95ms +[2025-09-11 07:12:41] [Rank 0] step:3661/10000 train_time:171892ms step_avg:46.95ms +[2025-09-11 07:12:41] [Rank 0] step:3681/10000 train_time:172554ms step_avg:46.88ms +[2025-09-11 07:12:41] [Rank 0] step:3681/10000 train_time:172554ms step_avg:46.88ms +[2025-09-11 07:12:42] [Rank 0] step:3701/10000 train_time:173214ms step_avg:46.80ms +[2025-09-11 07:12:42] [Rank 0] step:3701/10000 train_time:173214ms step_avg:46.80ms +[2025-09-11 07:12:43] [Rank 0] step:3721/10000 train_time:173885ms step_avg:46.73ms +[2025-09-11 07:12:43] [Rank 0] step:3721/10000 train_time:173885ms step_avg:46.73ms +[2025-09-11 07:12:43] [Rank 0] step:3741/10000 train_time:174557ms step_avg:46.66ms +[2025-09-11 07:12:43] [Rank 0] step:3741/10000 train_time:174557ms step_avg:46.66ms +[2025-09-11 07:12:44] [Rank 0] step:3761/10000 train_time:175229ms step_avg:46.59ms +[2025-09-11 07:12:44] [Rank 0] step:3761/10000 train_time:175229ms step_avg:46.59ms +[2025-09-11 07:12:45] [Rank 0] step:3781/10000 train_time:175901ms step_avg:46.52ms +[2025-09-11 07:12:45] [Rank 0] step:3781/10000 train_time:175901ms step_avg:46.52ms +[2025-09-11 07:12:45] [Rank 0] step:3801/10000 train_time:176573ms step_avg:46.45ms +[2025-09-11 07:12:45] [Rank 0] step:3801/10000 train_time:176573ms step_avg:46.45ms +[2025-09-11 07:12:46] [Rank 0] step:3821/10000 train_time:177246ms step_avg:46.39ms +[2025-09-11 07:12:46] [Rank 0] step:3821/10000 train_time:177246ms step_avg:46.39ms +[2025-09-11 07:12:47] [Rank 0] step:3841/10000 train_time:177919ms step_avg:46.32ms +[2025-09-11 07:12:47] [Rank 0] step:3841/10000 train_time:177919ms step_avg:46.32ms +[2025-09-11 07:12:47] [Rank 0] step:3861/10000 train_time:178591ms step_avg:46.25ms +[2025-09-11 07:12:47] [Rank 0] step:3861/10000 train_time:178591ms step_avg:46.25ms +[2025-09-11 07:12:48] [Rank 0] step:3881/10000 train_time:179262ms step_avg:46.19ms +[2025-09-11 07:12:48] [Rank 0] step:3881/10000 train_time:179262ms step_avg:46.19ms +[2025-09-11 07:12:49] [Rank 0] step:3901/10000 train_time:179933ms step_avg:46.12ms +[2025-09-11 07:12:49] [Rank 0] step:3901/10000 train_time:179933ms step_avg:46.12ms +[2025-09-11 07:12:49] [Rank 0] step:3921/10000 train_time:180605ms step_avg:46.06ms +[2025-09-11 07:12:49] [Rank 0] step:3921/10000 train_time:180605ms step_avg:46.06ms +[2025-09-11 07:12:50] [Rank 0] step:3941/10000 train_time:181277ms step_avg:46.00ms +[2025-09-11 07:12:50] [Rank 0] step:3941/10000 train_time:181277ms step_avg:46.00ms +[2025-09-11 07:12:51] [Rank 0] step:3961/10000 train_time:181950ms step_avg:45.94ms +[2025-09-11 07:12:51] [Rank 0] step:3961/10000 train_time:181950ms step_avg:45.94ms +[2025-09-11 07:12:51] [Rank 0] step:3981/10000 train_time:182621ms step_avg:45.87ms +[2025-09-11 07:12:51] [Rank 0] step:3981/10000 train_time:182621ms step_avg:45.87ms +[2025-09-11 07:12:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:12:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 07:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:12:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:12:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 07:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:12:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:12:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:13:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:13:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 07:13:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.4481 total_sharp:5.2402e-05 L1_sharp:4.3148e-03 L2_sharp:1.1531e-03 L3_sharp:1.5895e-04 L4_sharp:4.1293e-04 L5_sharp:2.0967e-03 L6_sharp:1.9160e-03 L7_sharp:1.4270e-03 L8_sharp:3.2346e-03 L9_sharp:3.4161e-03 L10_sharp:3.8147e-03 L11_sharp:5.8779e-03 L12_sharp:7.3236e-02 total_fnorm:1.9700e+02 total_l1_linf:4.4646e+05 total_spectral:9.8500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4375e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2227e-01 L10_l1linf:3.1445e-01 L11_l1linf:3.1250e-01 L12_l1linf:3.3984e-01 L1_spectral:1.5650e-02 L2_spectral:1.5446e-02 L3_spectral:1.5487e-02 L4_spectral:1.5413e-02 L5_spectral:1.5492e-02 L6_spectral:1.5519e-02 L7_spectral:1.5486e-02 L8_spectral:1.5861e-02 L9_spectral:1.5554e-02 L10_spectral:1.5557e-02 L11_spectral:1.5696e-02 L12_spectral:1.5763e-02 train_time:183273ms step_avg:45.82ms +[2025-09-11 07:13:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.4481 total_sharp:5.2402e-05 L1_sharp:4.3148e-03 L2_sharp:1.1531e-03 L3_sharp:1.5895e-04 L4_sharp:4.1293e-04 L5_sharp:2.0967e-03 L6_sharp:1.9160e-03 L7_sharp:1.4270e-03 L8_sharp:3.2346e-03 L9_sharp:3.4161e-03 L10_sharp:3.8147e-03 L11_sharp:5.8779e-03 L12_sharp:7.3236e-02 total_fnorm:1.9700e+02 total_l1_linf:4.4646e+05 total_spectral:9.8500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2344e+00 L3_fnorm:1.2422e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2578e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4375e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.2617e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.2617e-01 L9_l1linf:3.2227e-01 L10_l1linf:3.1445e-01 L11_l1linf:3.1250e-01 L12_l1linf:3.3984e-01 L1_spectral:1.5650e-02 L2_spectral:1.5446e-02 L3_spectral:1.5487e-02 L4_spectral:1.5413e-02 L5_spectral:1.5492e-02 L6_spectral:1.5519e-02 L7_spectral:1.5486e-02 L8_spectral:1.5861e-02 L9_spectral:1.5554e-02 L10_spectral:1.5557e-02 L11_spectral:1.5696e-02 L12_spectral:1.5763e-02 train_time:183273ms step_avg:45.82ms +[2025-09-11 07:13:03] [Rank 0] step:4001/10000 train_time:184419ms step_avg:46.09ms +[2025-09-11 07:13:03] [Rank 0] step:4001/10000 train_time:184419ms step_avg:46.09ms +[2025-09-11 07:13:04] [Rank 0] step:4021/10000 train_time:185095ms step_avg:46.03ms +[2025-09-11 07:13:04] [Rank 0] step:4021/10000 train_time:185095ms step_avg:46.03ms +[2025-09-11 07:13:05] [Rank 0] step:4041/10000 train_time:185768ms step_avg:45.97ms +[2025-09-11 07:13:05] [Rank 0] step:4041/10000 train_time:185768ms step_avg:45.97ms +[2025-09-11 07:13:05] [Rank 0] step:4061/10000 train_time:186437ms step_avg:45.91ms +[2025-09-11 07:13:05] [Rank 0] step:4061/10000 train_time:186437ms step_avg:45.91ms +[2025-09-11 07:13:06] [Rank 0] step:4081/10000 train_time:187109ms step_avg:45.85ms +[2025-09-11 07:13:06] [Rank 0] step:4081/10000 train_time:187109ms step_avg:45.85ms +[2025-09-11 07:13:07] [Rank 0] step:4101/10000 train_time:187781ms step_avg:45.79ms +[2025-09-11 07:13:07] [Rank 0] step:4101/10000 train_time:187781ms step_avg:45.79ms +[2025-09-11 07:13:07] [Rank 0] step:4121/10000 train_time:188451ms step_avg:45.73ms +[2025-09-11 07:13:07] [Rank 0] step:4121/10000 train_time:188451ms step_avg:45.73ms +[2025-09-11 07:13:08] [Rank 0] step:4141/10000 train_time:189122ms step_avg:45.67ms +[2025-09-11 07:13:08] [Rank 0] step:4141/10000 train_time:189122ms step_avg:45.67ms +[2025-09-11 07:13:09] [Rank 0] step:4161/10000 train_time:189792ms step_avg:45.61ms +[2025-09-11 07:13:09] [Rank 0] step:4161/10000 train_time:189792ms step_avg:45.61ms +[2025-09-11 07:13:09] [Rank 0] step:4181/10000 train_time:190463ms step_avg:45.55ms +[2025-09-11 07:13:09] [Rank 0] step:4181/10000 train_time:190463ms step_avg:45.55ms +[2025-09-11 07:13:10] [Rank 0] step:4201/10000 train_time:191136ms step_avg:45.50ms +[2025-09-11 07:13:10] [Rank 0] step:4201/10000 train_time:191136ms step_avg:45.50ms +[2025-09-11 07:13:11] [Rank 0] step:4221/10000 train_time:191806ms step_avg:45.44ms +[2025-09-11 07:13:11] [Rank 0] step:4221/10000 train_time:191806ms step_avg:45.44ms +[2025-09-11 07:13:11] [Rank 0] step:4241/10000 train_time:192477ms step_avg:45.38ms +[2025-09-11 07:13:11] [Rank 0] step:4241/10000 train_time:192477ms step_avg:45.38ms +[2025-09-11 07:13:12] [Rank 0] step:4261/10000 train_time:193147ms step_avg:45.33ms +[2025-09-11 07:13:12] [Rank 0] step:4261/10000 train_time:193147ms step_avg:45.33ms +[2025-09-11 07:13:13] [Rank 0] step:4281/10000 train_time:193819ms step_avg:45.27ms +[2025-09-11 07:13:13] [Rank 0] step:4281/10000 train_time:193819ms step_avg:45.27ms +[2025-09-11 07:13:13] [Rank 0] step:4301/10000 train_time:194491ms step_avg:45.22ms +[2025-09-11 07:13:13] [Rank 0] step:4301/10000 train_time:194491ms step_avg:45.22ms +[2025-09-11 07:13:14] [Rank 0] step:4321/10000 train_time:195162ms step_avg:45.17ms +[2025-09-11 07:13:14] [Rank 0] step:4321/10000 train_time:195162ms step_avg:45.17ms +[2025-09-11 07:13:15] [Rank 0] step:4341/10000 train_time:195833ms step_avg:45.11ms +[2025-09-11 07:13:15] [Rank 0] step:4341/10000 train_time:195833ms step_avg:45.11ms +[2025-09-11 07:13:15] [Rank 0] step:4361/10000 train_time:196503ms step_avg:45.06ms +[2025-09-11 07:13:15] [Rank 0] step:4361/10000 train_time:196503ms step_avg:45.06ms +[2025-09-11 07:13:16] [Rank 0] step:4381/10000 train_time:197175ms step_avg:45.01ms +[2025-09-11 07:13:16] [Rank 0] step:4381/10000 train_time:197175ms step_avg:45.01ms +[2025-09-11 07:13:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:13:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 07:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:13:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:13:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:13:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:13:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:13:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 07:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:26] [Rank 0] PRINT: step:4400/10000 val_loss:4.4368 total_sharp:4.4955e-05 L1_sharp:4.8894e-03 L2_sharp:4.3868e-04 L3_sharp:-3.0474e-04 L4_sharp:1.2963e-03 L5_sharp:1.7968e-03 L6_sharp:1.0995e-03 L7_sharp:1.3155e-03 L8_sharp:3.2584e-03 L9_sharp:3.1584e-03 L10_sharp:2.7036e-03 L11_sharp:4.0568e-03 L12_sharp:4.2540e-02 total_fnorm:1.8200e+02 total_l1_linf:4.0141e+05 total_spectral:9.1000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1250e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1641e-01 L8_l1linf:3.1445e-01 L9_l1linf:3.0859e-01 L10_l1linf:3.0469e-01 L11_l1linf:3.1250e-01 L12_l1linf:3.3984e-01 L1_spectral:1.5697e-02 L2_spectral:1.5520e-02 L3_spectral:1.5585e-02 L4_spectral:1.5558e-02 L5_spectral:1.5634e-02 L6_spectral:1.5612e-02 L7_spectral:1.5576e-02 L8_spectral:1.5770e-02 L9_spectral:1.5660e-02 L10_spectral:1.5685e-02 L11_spectral:1.5818e-02 L12_spectral:1.5958e-02 train_time:197827ms step_avg:44.96ms +[2025-09-11 07:13:26] [Rank 0] PRINT: step:4400/10000 val_loss:4.4368 total_sharp:4.4955e-05 L1_sharp:4.8894e-03 L2_sharp:4.3868e-04 L3_sharp:-3.0474e-04 L4_sharp:1.2963e-03 L5_sharp:1.7968e-03 L6_sharp:1.0995e-03 L7_sharp:1.3155e-03 L8_sharp:3.2584e-03 L9_sharp:3.1584e-03 L10_sharp:2.7036e-03 L11_sharp:4.0568e-03 L12_sharp:4.2540e-02 total_fnorm:1.8200e+02 total_l1_linf:4.0141e+05 total_spectral:9.1000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.3984e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1250e-01 L4_l1linf:3.1445e-01 L5_l1linf:3.1641e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.1641e-01 L8_l1linf:3.1445e-01 L9_l1linf:3.0859e-01 L10_l1linf:3.0469e-01 L11_l1linf:3.1250e-01 L12_l1linf:3.3984e-01 L1_spectral:1.5697e-02 L2_spectral:1.5520e-02 L3_spectral:1.5585e-02 L4_spectral:1.5558e-02 L5_spectral:1.5634e-02 L6_spectral:1.5612e-02 L7_spectral:1.5576e-02 L8_spectral:1.5770e-02 L9_spectral:1.5660e-02 L10_spectral:1.5685e-02 L11_spectral:1.5818e-02 L12_spectral:1.5958e-02 train_time:197827ms step_avg:44.96ms +[2025-09-11 07:13:27] [Rank 0] step:4401/10000 train_time:198970ms step_avg:45.21ms +[2025-09-11 07:13:27] [Rank 0] step:4401/10000 train_time:198970ms step_avg:45.21ms +[2025-09-11 07:13:28] [Rank 0] step:4421/10000 train_time:199646ms step_avg:45.16ms +[2025-09-11 07:13:28] [Rank 0] step:4421/10000 train_time:199646ms step_avg:45.16ms +[2025-09-11 07:13:29] [Rank 0] step:4441/10000 train_time:200320ms step_avg:45.11ms +[2025-09-11 07:13:29] [Rank 0] step:4441/10000 train_time:200320ms step_avg:45.11ms +[2025-09-11 07:13:29] [Rank 0] step:4461/10000 train_time:200993ms step_avg:45.06ms +[2025-09-11 07:13:29] [Rank 0] step:4461/10000 train_time:200993ms step_avg:45.06ms +[2025-09-11 07:13:30] [Rank 0] step:4481/10000 train_time:201667ms step_avg:45.00ms +[2025-09-11 07:13:30] [Rank 0] step:4481/10000 train_time:201667ms step_avg:45.00ms +[2025-09-11 07:13:31] [Rank 0] step:4501/10000 train_time:202341ms step_avg:44.95ms +[2025-09-11 07:13:31] [Rank 0] step:4501/10000 train_time:202341ms step_avg:44.95ms +[2025-09-11 07:13:31] [Rank 0] step:4521/10000 train_time:203015ms step_avg:44.90ms +[2025-09-11 07:13:31] [Rank 0] step:4521/10000 train_time:203015ms step_avg:44.90ms +[2025-09-11 07:13:32] [Rank 0] step:4541/10000 train_time:203689ms step_avg:44.86ms +[2025-09-11 07:13:32] [Rank 0] step:4541/10000 train_time:203689ms step_avg:44.86ms +[2025-09-11 07:13:33] [Rank 0] step:4561/10000 train_time:204362ms step_avg:44.81ms +[2025-09-11 07:13:33] [Rank 0] step:4561/10000 train_time:204362ms step_avg:44.81ms +[2025-09-11 07:13:33] [Rank 0] step:4581/10000 train_time:205036ms step_avg:44.76ms +[2025-09-11 07:13:33] [Rank 0] step:4581/10000 train_time:205036ms step_avg:44.76ms +[2025-09-11 07:13:34] [Rank 0] step:4601/10000 train_time:205709ms step_avg:44.71ms +[2025-09-11 07:13:34] [Rank 0] step:4601/10000 train_time:205709ms step_avg:44.71ms +[2025-09-11 07:13:35] [Rank 0] step:4621/10000 train_time:206382ms step_avg:44.66ms +[2025-09-11 07:13:35] [Rank 0] step:4621/10000 train_time:206382ms step_avg:44.66ms +[2025-09-11 07:13:35] [Rank 0] step:4641/10000 train_time:207056ms step_avg:44.61ms +[2025-09-11 07:13:35] [Rank 0] step:4641/10000 train_time:207056ms step_avg:44.61ms +[2025-09-11 07:13:36] [Rank 0] step:4661/10000 train_time:207729ms step_avg:44.57ms +[2025-09-11 07:13:36] [Rank 0] step:4661/10000 train_time:207729ms step_avg:44.57ms +[2025-09-11 07:13:37] [Rank 0] step:4681/10000 train_time:208403ms step_avg:44.52ms +[2025-09-11 07:13:37] [Rank 0] step:4681/10000 train_time:208403ms step_avg:44.52ms +[2025-09-11 07:13:37] [Rank 0] step:4701/10000 train_time:209076ms step_avg:44.47ms +[2025-09-11 07:13:37] [Rank 0] step:4701/10000 train_time:209076ms step_avg:44.47ms +[2025-09-11 07:13:38] [Rank 0] step:4721/10000 train_time:209749ms step_avg:44.43ms +[2025-09-11 07:13:38] [Rank 0] step:4721/10000 train_time:209749ms step_avg:44.43ms +[2025-09-11 07:13:39] [Rank 0] step:4741/10000 train_time:210423ms step_avg:44.38ms +[2025-09-11 07:13:39] [Rank 0] step:4741/10000 train_time:210423ms step_avg:44.38ms +[2025-09-11 07:13:39] [Rank 0] step:4761/10000 train_time:211097ms step_avg:44.34ms +[2025-09-11 07:13:39] [Rank 0] step:4761/10000 train_time:211097ms step_avg:44.34ms +[2025-09-11 07:13:40] [Rank 0] step:4781/10000 train_time:211769ms step_avg:44.29ms +[2025-09-11 07:13:40] [Rank 0] step:4781/10000 train_time:211769ms step_avg:44.29ms +[2025-09-11 07:13:41] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:13:41] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:13:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:13:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:13:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:13:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:13:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:13:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:13:51] [Rank 0] PRINT: step:4800/10000 val_loss:4.3887 total_sharp:4.4679e-05 L1_sharp:4.1604e-03 L2_sharp:4.6885e-04 L3_sharp:1.6348e-04 L4_sharp:9.9514e-04 L5_sharp:1.4265e-03 L6_sharp:1.6744e-03 L7_sharp:1.7052e-03 L8_sharp:2.6885e-03 L9_sharp:2.5765e-03 L10_sharp:2.5723e-03 L11_sharp:4.3619e-03 L12_sharp:9.3468e-02 total_fnorm:1.9100e+02 total_l1_linf:4.3213e+05 total_spectral:9.5500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4180e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.2617e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1641e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.3008e-01 L1_spectral:1.5862e-02 L2_spectral:1.5618e-02 L3_spectral:1.5761e-02 L4_spectral:1.5661e-02 L5_spectral:1.5746e-02 L6_spectral:1.5756e-02 L7_spectral:1.5770e-02 L8_spectral:1.5847e-02 L9_spectral:1.5745e-02 L10_spectral:1.5771e-02 L11_spectral:1.5922e-02 L12_spectral:1.5925e-02 train_time:212423ms step_avg:44.25ms +[2025-09-11 07:13:51] [Rank 0] PRINT: step:4800/10000 val_loss:4.3887 total_sharp:4.4679e-05 L1_sharp:4.1604e-03 L2_sharp:4.6885e-04 L3_sharp:1.6348e-04 L4_sharp:9.9514e-04 L5_sharp:1.4265e-03 L6_sharp:1.6744e-03 L7_sharp:1.7052e-03 L8_sharp:2.6885e-03 L9_sharp:2.5765e-03 L10_sharp:2.5723e-03 L11_sharp:4.3619e-03 L12_sharp:9.3468e-02 total_fnorm:1.9100e+02 total_l1_linf:4.3213e+05 total_spectral:9.5500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2578e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2578e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2734e+00 L1_l1linf:3.4180e-01 L2_l1linf:3.1445e-01 L3_l1linf:3.1836e-01 L4_l1linf:3.2617e-01 L5_l1linf:3.2227e-01 L6_l1linf:3.2227e-01 L7_l1linf:3.2617e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1641e-01 L10_l1linf:3.1250e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.3008e-01 L1_spectral:1.5862e-02 L2_spectral:1.5618e-02 L3_spectral:1.5761e-02 L4_spectral:1.5661e-02 L5_spectral:1.5746e-02 L6_spectral:1.5756e-02 L7_spectral:1.5770e-02 L8_spectral:1.5847e-02 L9_spectral:1.5745e-02 L10_spectral:1.5771e-02 L11_spectral:1.5922e-02 L12_spectral:1.5925e-02 train_time:212423ms step_avg:44.25ms +[2025-09-11 07:13:52] [Rank 0] step:4801/10000 train_time:213604ms step_avg:44.49ms +[2025-09-11 07:13:52] [Rank 0] step:4801/10000 train_time:213604ms step_avg:44.49ms +[2025-09-11 07:13:52] [Rank 0] step:4821/10000 train_time:214269ms step_avg:44.44ms +[2025-09-11 07:13:52] [Rank 0] step:4821/10000 train_time:214269ms step_avg:44.44ms +[2025-09-11 07:13:53] [Rank 0] step:4841/10000 train_time:214944ms step_avg:44.40ms +[2025-09-11 07:13:53] [Rank 0] step:4841/10000 train_time:214944ms step_avg:44.40ms +[2025-09-11 07:13:54] [Rank 0] step:4861/10000 train_time:215618ms step_avg:44.36ms +[2025-09-11 07:13:54] [Rank 0] step:4861/10000 train_time:215618ms step_avg:44.36ms +[2025-09-11 07:13:54] [Rank 0] step:4881/10000 train_time:216292ms step_avg:44.31ms +[2025-09-11 07:13:54] [Rank 0] step:4881/10000 train_time:216292ms step_avg:44.31ms +[2025-09-11 07:13:55] [Rank 0] step:4901/10000 train_time:216967ms step_avg:44.27ms +[2025-09-11 07:13:55] [Rank 0] step:4901/10000 train_time:216967ms step_avg:44.27ms +[2025-09-11 07:13:56] [Rank 0] step:4921/10000 train_time:217640ms step_avg:44.23ms +[2025-09-11 07:13:56] [Rank 0] step:4921/10000 train_time:217640ms step_avg:44.23ms +[2025-09-11 07:13:56] [Rank 0] step:4941/10000 train_time:218314ms step_avg:44.18ms +[2025-09-11 07:13:56] [Rank 0] step:4941/10000 train_time:218314ms step_avg:44.18ms +[2025-09-11 07:13:57] [Rank 0] step:4961/10000 train_time:218987ms step_avg:44.14ms +[2025-09-11 07:13:57] [Rank 0] step:4961/10000 train_time:218987ms step_avg:44.14ms +[2025-09-11 07:13:58] [Rank 0] step:4981/10000 train_time:219662ms step_avg:44.10ms +[2025-09-11 07:13:58] [Rank 0] step:4981/10000 train_time:219662ms step_avg:44.10ms +[2025-09-11 07:13:58] [Rank 0] step:5001/10000 train_time:220338ms step_avg:44.06ms +[2025-09-11 07:13:58] [Rank 0] step:5001/10000 train_time:220338ms step_avg:44.06ms +[2025-09-11 07:13:59] [Rank 0] step:5021/10000 train_time:221011ms step_avg:44.02ms +[2025-09-11 07:13:59] [Rank 0] step:5021/10000 train_time:221011ms step_avg:44.02ms +[2025-09-11 07:14:00] [Rank 0] step:5041/10000 train_time:221684ms step_avg:43.98ms +[2025-09-11 07:14:00] [Rank 0] step:5041/10000 train_time:221684ms step_avg:43.98ms +[2025-09-11 07:14:00] [Rank 0] step:5061/10000 train_time:222358ms step_avg:43.94ms +[2025-09-11 07:14:00] [Rank 0] step:5061/10000 train_time:222358ms step_avg:43.94ms +[2025-09-11 07:14:01] [Rank 0] step:5081/10000 train_time:223032ms step_avg:43.90ms +[2025-09-11 07:14:01] [Rank 0] step:5081/10000 train_time:223032ms step_avg:43.90ms +[2025-09-11 07:14:02] [Rank 0] step:5101/10000 train_time:223705ms step_avg:43.86ms +[2025-09-11 07:14:02] [Rank 0] step:5101/10000 train_time:223705ms step_avg:43.86ms +[2025-09-11 07:14:03] [Rank 0] step:5121/10000 train_time:224379ms step_avg:43.82ms +[2025-09-11 07:14:03] [Rank 0] step:5121/10000 train_time:224379ms step_avg:43.82ms +[2025-09-11 07:14:03] [Rank 0] step:5141/10000 train_time:225054ms step_avg:43.78ms +[2025-09-11 07:14:03] [Rank 0] step:5141/10000 train_time:225054ms step_avg:43.78ms +[2025-09-11 07:14:04] [Rank 0] step:5161/10000 train_time:225728ms step_avg:43.74ms +[2025-09-11 07:14:04] [Rank 0] step:5161/10000 train_time:225728ms step_avg:43.74ms +[2025-09-11 07:14:05] [Rank 0] step:5181/10000 train_time:226402ms step_avg:43.70ms +[2025-09-11 07:14:05] [Rank 0] step:5181/10000 train_time:226402ms step_avg:43.70ms +[2025-09-11 07:14:05] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:14:05] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:14:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:14:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:14:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:14:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:14:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:14:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:14:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:14:15] [Rank 0] PRINT: step:5200/10000 val_loss:4.3606 total_sharp:5.2420e-05 L1_sharp:5.4664e-03 L2_sharp:1.4894e-03 L3_sharp:-2.4550e-05 L4_sharp:9.4878e-04 L5_sharp:1.2059e-03 L6_sharp:1.5762e-03 L7_sharp:1.1917e-03 L8_sharp:2.6445e-03 L9_sharp:2.9398e-03 L10_sharp:2.5559e-03 L11_sharp:4.5088e-03 L12_sharp:1.6225e-01 total_fnorm:1.7900e+02 total_l1_linf:3.8707e+05 total_spectral:8.9500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2656e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.0859e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1055e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.2031e-01 L1_spectral:1.5937e-02 L2_spectral:1.5704e-02 L3_spectral:1.5718e-02 L4_spectral:1.5732e-02 L5_spectral:1.5764e-02 L6_spectral:1.5815e-02 L7_spectral:1.5785e-02 L8_spectral:1.5980e-02 L9_spectral:1.5928e-02 L10_spectral:1.6005e-02 L11_spectral:1.6074e-02 L12_spectral:1.5968e-02 train_time:227063ms step_avg:43.67ms +[2025-09-11 07:14:15] [Rank 0] PRINT: step:5200/10000 val_loss:4.3606 total_sharp:5.2420e-05 L1_sharp:5.4664e-03 L2_sharp:1.4894e-03 L3_sharp:-2.4550e-05 L4_sharp:9.4878e-04 L5_sharp:1.2059e-03 L6_sharp:1.5762e-03 L7_sharp:1.1917e-03 L8_sharp:2.6445e-03 L9_sharp:2.9398e-03 L10_sharp:2.5559e-03 L11_sharp:4.5088e-03 L12_sharp:1.6225e-01 total_fnorm:1.7900e+02 total_l1_linf:3.8707e+05 total_spectral:8.9500e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2500e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2656e+00 L9_fnorm:1.2656e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2656e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3398e-01 L2_l1linf:3.0859e-01 L3_l1linf:3.0859e-01 L4_l1linf:3.2031e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1836e-01 L7_l1linf:3.2227e-01 L8_l1linf:3.1836e-01 L9_l1linf:3.1055e-01 L10_l1linf:3.0859e-01 L11_l1linf:3.0469e-01 L12_l1linf:3.2031e-01 L1_spectral:1.5937e-02 L2_spectral:1.5704e-02 L3_spectral:1.5718e-02 L4_spectral:1.5732e-02 L5_spectral:1.5764e-02 L6_spectral:1.5815e-02 L7_spectral:1.5785e-02 L8_spectral:1.5980e-02 L9_spectral:1.5928e-02 L10_spectral:1.6005e-02 L11_spectral:1.6074e-02 L12_spectral:1.5968e-02 train_time:227063ms step_avg:43.67ms +[2025-09-11 07:14:16] [Rank 0] step:5201/10000 train_time:228248ms step_avg:43.89ms +[2025-09-11 07:14:16] [Rank 0] step:5201/10000 train_time:228248ms step_avg:43.89ms +[2025-09-11 07:14:17] [Rank 0] step:5221/10000 train_time:228922ms step_avg:43.85ms +[2025-09-11 07:14:17] [Rank 0] step:5221/10000 train_time:228922ms step_avg:43.85ms +[2025-09-11 07:14:17] [Rank 0] step:5241/10000 train_time:229606ms step_avg:43.81ms +[2025-09-11 07:14:17] [Rank 0] step:5241/10000 train_time:229606ms step_avg:43.81ms +[2025-09-11 07:14:18] [Rank 0] step:5261/10000 train_time:230290ms step_avg:43.77ms +[2025-09-11 07:14:18] [Rank 0] step:5261/10000 train_time:230290ms step_avg:43.77ms +[2025-09-11 07:14:19] [Rank 0] step:5281/10000 train_time:230974ms step_avg:43.74ms +[2025-09-11 07:14:19] [Rank 0] step:5281/10000 train_time:230974ms step_avg:43.74ms +[2025-09-11 07:14:19] [Rank 0] step:5301/10000 train_time:231658ms step_avg:43.70ms +[2025-09-11 07:14:19] [Rank 0] step:5301/10000 train_time:231658ms step_avg:43.70ms +[2025-09-11 07:14:20] [Rank 0] step:5321/10000 train_time:232341ms step_avg:43.66ms +[2025-09-11 07:14:20] [Rank 0] step:5321/10000 train_time:232341ms step_avg:43.66ms +[2025-09-11 07:14:21] [Rank 0] step:5341/10000 train_time:233276ms step_avg:43.68ms +[2025-09-11 07:14:21] [Rank 0] step:5341/10000 train_time:233276ms step_avg:43.68ms +[2025-09-11 07:14:22] [Rank 0] step:5361/10000 train_time:234216ms step_avg:43.69ms +[2025-09-11 07:14:22] [Rank 0] step:5361/10000 train_time:234216ms step_avg:43.69ms +[2025-09-11 07:14:23] [Rank 0] step:5381/10000 train_time:234899ms step_avg:43.65ms +[2025-09-11 07:14:23] [Rank 0] step:5381/10000 train_time:234899ms step_avg:43.65ms +[2025-09-11 07:14:24] [Rank 0] step:5401/10000 train_time:235875ms step_avg:43.67ms +[2025-09-11 07:14:24] [Rank 0] step:5401/10000 train_time:235875ms step_avg:43.67ms +[2025-09-11 07:14:24] [Rank 0] step:5421/10000 train_time:236561ms step_avg:43.64ms +[2025-09-11 07:14:24] [Rank 0] step:5421/10000 train_time:236561ms step_avg:43.64ms +[2025-09-11 07:14:25] [Rank 0] step:5441/10000 train_time:237244ms step_avg:43.60ms +[2025-09-11 07:14:25] [Rank 0] step:5441/10000 train_time:237244ms step_avg:43.60ms +[2025-09-11 07:14:26] [Rank 0] step:5461/10000 train_time:237928ms step_avg:43.57ms +[2025-09-11 07:14:26] [Rank 0] step:5461/10000 train_time:237928ms step_avg:43.57ms +[2025-09-11 07:14:26] [Rank 0] step:5481/10000 train_time:238611ms step_avg:43.53ms +[2025-09-11 07:14:26] [Rank 0] step:5481/10000 train_time:238611ms step_avg:43.53ms +[2025-09-11 07:14:27] [Rank 0] step:5501/10000 train_time:239294ms step_avg:43.50ms +[2025-09-11 07:14:27] [Rank 0] step:5501/10000 train_time:239294ms step_avg:43.50ms +[2025-09-11 07:14:28] [Rank 0] step:5521/10000 train_time:239976ms step_avg:43.47ms +[2025-09-11 07:14:28] [Rank 0] step:5521/10000 train_time:239976ms step_avg:43.47ms +[2025-09-11 07:14:28] [Rank 0] step:5541/10000 train_time:240662ms step_avg:43.43ms +[2025-09-11 07:14:28] [Rank 0] step:5541/10000 train_time:240662ms step_avg:43.43ms +[2025-09-11 07:14:29] [Rank 0] step:5561/10000 train_time:241346ms step_avg:43.40ms +[2025-09-11 07:14:29] [Rank 0] step:5561/10000 train_time:241346ms step_avg:43.40ms +[2025-09-11 07:14:30] [Rank 0] step:5581/10000 train_time:242030ms step_avg:43.37ms +[2025-09-11 07:14:30] [Rank 0] step:5581/10000 train_time:242030ms step_avg:43.37ms +[2025-09-11 07:14:30] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:14:30] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:14:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:14:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:14:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:14:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:14:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:14:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:14:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:14:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:14:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:14:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.3493 total_sharp:5.1494e-05 L1_sharp:6.0729e-03 L2_sharp:5.8365e-04 L3_sharp:3.9294e-04 L4_sharp:1.2195e-03 L5_sharp:2.0383e-03 L6_sharp:7.2127e-04 L7_sharp:1.4892e-03 L8_sharp:2.3172e-03 L9_sharp:2.3033e-03 L10_sharp:2.9623e-03 L11_sharp:3.8865e-03 L12_sharp:1.6626e-01 total_fnorm:1.8100e+02 total_l1_linf:4.0141e+05 total_spectral:9.0500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3008e-01 L2_l1linf:3.0469e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1055e-01 L9_l1linf:3.0469e-01 L10_l1linf:3.0469e-01 L11_l1linf:2.9883e-01 L12_l1linf:3.2031e-01 L1_spectral:1.5965e-02 L2_spectral:1.5777e-02 L3_spectral:1.5808e-02 L4_spectral:1.5817e-02 L5_spectral:1.5803e-02 L6_spectral:1.5936e-02 L7_spectral:1.5883e-02 L8_spectral:1.6025e-02 L9_spectral:1.5858e-02 L10_spectral:1.5986e-02 L11_spectral:1.5993e-02 L12_spectral:1.6023e-02 train_time:242694ms step_avg:43.34ms +[2025-09-11 07:14:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.3493 total_sharp:5.1494e-05 L1_sharp:6.0729e-03 L2_sharp:5.8365e-04 L3_sharp:3.9294e-04 L4_sharp:1.2195e-03 L5_sharp:2.0383e-03 L6_sharp:7.2127e-04 L7_sharp:1.4892e-03 L8_sharp:2.3172e-03 L9_sharp:2.3033e-03 L10_sharp:2.9623e-03 L11_sharp:3.8865e-03 L12_sharp:1.6626e-01 total_fnorm:1.8100e+02 total_l1_linf:4.0141e+05 total_spectral:9.0500e+01 L1_fnorm:1.2500e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2578e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2656e+00 L1_l1linf:3.3008e-01 L2_l1linf:3.0469e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.0859e-01 L5_l1linf:3.1445e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.1836e-01 L8_l1linf:3.1055e-01 L9_l1linf:3.0469e-01 L10_l1linf:3.0469e-01 L11_l1linf:2.9883e-01 L12_l1linf:3.2031e-01 L1_spectral:1.5965e-02 L2_spectral:1.5777e-02 L3_spectral:1.5808e-02 L4_spectral:1.5817e-02 L5_spectral:1.5803e-02 L6_spectral:1.5936e-02 L7_spectral:1.5883e-02 L8_spectral:1.6025e-02 L9_spectral:1.5858e-02 L10_spectral:1.5986e-02 L11_spectral:1.5993e-02 L12_spectral:1.6023e-02 train_time:242694ms step_avg:43.34ms +[2025-09-11 07:14:45] [Rank 0] step:5601/10000 train_time:243938ms step_avg:43.55ms +[2025-09-11 07:14:45] [Rank 0] step:5601/10000 train_time:243938ms step_avg:43.55ms +[2025-09-11 07:14:46] [Rank 0] step:5621/10000 train_time:244643ms step_avg:43.52ms +[2025-09-11 07:14:46] [Rank 0] step:5621/10000 train_time:244643ms step_avg:43.52ms +[2025-09-11 07:14:47] [Rank 0] step:5641/10000 train_time:245327ms step_avg:43.49ms +[2025-09-11 07:14:47] [Rank 0] step:5641/10000 train_time:245327ms step_avg:43.49ms +[2025-09-11 07:14:47] [Rank 0] step:5661/10000 train_time:246012ms step_avg:43.46ms +[2025-09-11 07:14:47] [Rank 0] step:5661/10000 train_time:246012ms step_avg:43.46ms +[2025-09-11 07:14:48] [Rank 0] step:5681/10000 train_time:246697ms step_avg:43.42ms +[2025-09-11 07:14:48] [Rank 0] step:5681/10000 train_time:246697ms step_avg:43.42ms +[2025-09-11 07:14:49] [Rank 0] step:5701/10000 train_time:247383ms step_avg:43.39ms +[2025-09-11 07:14:49] [Rank 0] step:5701/10000 train_time:247383ms step_avg:43.39ms +[2025-09-11 07:14:49] [Rank 0] step:5721/10000 train_time:248067ms step_avg:43.36ms +[2025-09-11 07:14:49] [Rank 0] step:5721/10000 train_time:248067ms step_avg:43.36ms +[2025-09-11 07:14:50] [Rank 0] step:5741/10000 train_time:248753ms step_avg:43.33ms +[2025-09-11 07:14:50] [Rank 0] step:5741/10000 train_time:248753ms step_avg:43.33ms +[2025-09-11 07:14:51] [Rank 0] step:5761/10000 train_time:249439ms step_avg:43.30ms +[2025-09-11 07:14:51] [Rank 0] step:5761/10000 train_time:249439ms step_avg:43.30ms +[2025-09-11 07:14:51] [Rank 0] step:5781/10000 train_time:250124ms step_avg:43.27ms +[2025-09-11 07:14:51] [Rank 0] step:5781/10000 train_time:250124ms step_avg:43.27ms +[2025-09-11 07:14:52] [Rank 0] step:5801/10000 train_time:250810ms step_avg:43.24ms +[2025-09-11 07:14:52] [Rank 0] step:5801/10000 train_time:250810ms step_avg:43.24ms +[2025-09-11 07:14:53] [Rank 0] step:5821/10000 train_time:251494ms step_avg:43.20ms +[2025-09-11 07:14:53] [Rank 0] step:5821/10000 train_time:251494ms step_avg:43.20ms +[2025-09-11 07:14:54] [Rank 0] step:5841/10000 train_time:252179ms step_avg:43.17ms +[2025-09-11 07:14:54] [Rank 0] step:5841/10000 train_time:252179ms step_avg:43.17ms +[2025-09-11 07:14:54] [Rank 0] step:5861/10000 train_time:252863ms step_avg:43.14ms +[2025-09-11 07:14:54] [Rank 0] step:5861/10000 train_time:252863ms step_avg:43.14ms +[2025-09-11 07:14:55] [Rank 0] step:5881/10000 train_time:253547ms step_avg:43.11ms +[2025-09-11 07:14:55] [Rank 0] step:5881/10000 train_time:253547ms step_avg:43.11ms +[2025-09-11 07:14:56] [Rank 0] step:5901/10000 train_time:254231ms step_avg:43.08ms +[2025-09-11 07:14:56] [Rank 0] step:5901/10000 train_time:254231ms step_avg:43.08ms +[2025-09-11 07:14:56] [Rank 0] step:5921/10000 train_time:254918ms step_avg:43.05ms +[2025-09-11 07:14:56] [Rank 0] step:5921/10000 train_time:254918ms step_avg:43.05ms +[2025-09-11 07:14:57] [Rank 0] step:5941/10000 train_time:255604ms step_avg:43.02ms +[2025-09-11 07:14:57] [Rank 0] step:5941/10000 train_time:255604ms step_avg:43.02ms +[2025-09-11 07:14:58] [Rank 0] step:5961/10000 train_time:256290ms step_avg:42.99ms +[2025-09-11 07:14:58] [Rank 0] step:5961/10000 train_time:256290ms step_avg:42.99ms +[2025-09-11 07:14:58] [Rank 0] step:5981/10000 train_time:256975ms step_avg:42.97ms +[2025-09-11 07:14:58] [Rank 0] step:5981/10000 train_time:256975ms step_avg:42.97ms +[2025-09-11 07:14:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:14:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:15:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:15:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:15:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:15:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:15:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:15:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:15:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.3078 total_sharp:4.2604e-05 L1_sharp:4.6248e-03 L2_sharp:7.8544e-04 L3_sharp:5.0760e-04 L4_sharp:4.0011e-04 L5_sharp:1.4233e-03 L6_sharp:1.6423e-03 L7_sharp:1.0217e-03 L8_sharp:2.7076e-03 L9_sharp:2.1027e-03 L10_sharp:2.4525e-03 L11_sharp:3.9534e-03 L12_sharp:1.5778e-01 total_fnorm:1.8000e+02 total_l1_linf:3.9117e+05 total_spectral:9.0000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2812e-01 L2_l1linf:3.0078e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.0078e-01 L12_l1linf:3.0664e-01 L1_spectral:1.6077e-02 L2_spectral:1.5925e-02 L3_spectral:1.5867e-02 L4_spectral:1.5819e-02 L5_spectral:1.6028e-02 L6_spectral:1.5975e-02 L7_spectral:1.5950e-02 L8_spectral:1.6006e-02 L9_spectral:1.5969e-02 L10_spectral:1.5965e-02 L11_spectral:1.6088e-02 L12_spectral:1.6010e-02 train_time:257642ms step_avg:42.94ms +[2025-09-11 07:15:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.3078 total_sharp:4.2604e-05 L1_sharp:4.6248e-03 L2_sharp:7.8544e-04 L3_sharp:5.0760e-04 L4_sharp:4.0011e-04 L5_sharp:1.4233e-03 L6_sharp:1.6423e-03 L7_sharp:1.0217e-03 L8_sharp:2.7076e-03 L9_sharp:2.1027e-03 L10_sharp:2.4525e-03 L11_sharp:3.9534e-03 L12_sharp:1.5778e-01 total_fnorm:1.8000e+02 total_l1_linf:3.9117e+05 total_spectral:9.0000e+01 L1_fnorm:1.2578e+00 L2_fnorm:1.2422e+00 L3_fnorm:1.2500e+00 L4_fnorm:1.2422e+00 L5_fnorm:1.2656e+00 L6_fnorm:1.2656e+00 L7_fnorm:1.2656e+00 L8_fnorm:1.2500e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2656e+00 L11_fnorm:1.2578e+00 L12_fnorm:1.2578e+00 L1_l1linf:3.2812e-01 L2_l1linf:3.0078e-01 L3_l1linf:3.0664e-01 L4_l1linf:3.1250e-01 L5_l1linf:3.1250e-01 L6_l1linf:3.1445e-01 L7_l1linf:3.1445e-01 L8_l1linf:3.1250e-01 L9_l1linf:3.0664e-01 L10_l1linf:3.1055e-01 L11_l1linf:3.0078e-01 L12_l1linf:3.0664e-01 L1_spectral:1.6077e-02 L2_spectral:1.5925e-02 L3_spectral:1.5867e-02 L4_spectral:1.5819e-02 L5_spectral:1.6028e-02 L6_spectral:1.5975e-02 L7_spectral:1.5950e-02 L8_spectral:1.6006e-02 L9_spectral:1.5969e-02 L10_spectral:1.5965e-02 L11_spectral:1.6088e-02 L12_spectral:1.6010e-02 train_time:257642ms step_avg:42.94ms +[2025-09-11 07:15:10] [Rank 0] step:6001/10000 train_time:258857ms step_avg:43.14ms +[2025-09-11 07:15:10] [Rank 0] step:6001/10000 train_time:258857ms step_avg:43.14ms +[2025-09-11 07:15:11] [Rank 0] step:6021/10000 train_time:259560ms step_avg:43.11ms +[2025-09-11 07:15:11] [Rank 0] step:6021/10000 train_time:259560ms step_avg:43.11ms +[2025-09-11 07:15:12] [Rank 0] step:6041/10000 train_time:260249ms step_avg:43.08ms +[2025-09-11 07:15:12] [Rank 0] step:6041/10000 train_time:260249ms step_avg:43.08ms +[2025-09-11 07:15:12] [Rank 0] step:6061/10000 train_time:260937ms step_avg:43.05ms +[2025-09-11 07:15:12] [Rank 0] step:6061/10000 train_time:260937ms step_avg:43.05ms +[2025-09-11 07:15:13] [Rank 0] step:6081/10000 train_time:261625ms step_avg:43.02ms +[2025-09-11 07:15:13] [Rank 0] step:6081/10000 train_time:261625ms step_avg:43.02ms +[2025-09-11 07:15:14] [Rank 0] step:6101/10000 train_time:262311ms step_avg:42.99ms +[2025-09-11 07:15:14] [Rank 0] step:6101/10000 train_time:262311ms step_avg:42.99ms +[2025-09-11 07:15:14] [Rank 0] step:6121/10000 train_time:262998ms step_avg:42.97ms +[2025-09-11 07:15:14] [Rank 0] step:6121/10000 train_time:262998ms step_avg:42.97ms +[2025-09-11 07:15:15] [Rank 0] step:6141/10000 train_time:263685ms step_avg:42.94ms +[2025-09-11 07:15:15] [Rank 0] step:6141/10000 train_time:263685ms step_avg:42.94ms +[2025-09-11 07:15:16] [Rank 0] step:6161/10000 train_time:264371ms step_avg:42.91ms +[2025-09-11 07:15:16] [Rank 0] step:6161/10000 train_time:264371ms step_avg:42.91ms +[2025-09-11 07:15:17] [Rank 0] step:6181/10000 train_time:265055ms step_avg:42.88ms +[2025-09-11 07:15:17] [Rank 0] step:6181/10000 train_time:265055ms step_avg:42.88ms +[2025-09-11 07:15:17] [Rank 0] step:6201/10000 train_time:265741ms step_avg:42.85ms +[2025-09-11 07:15:17] [Rank 0] step:6201/10000 train_time:265741ms step_avg:42.85ms +[2025-09-11 07:15:18] [Rank 0] step:6221/10000 train_time:266428ms step_avg:42.83ms +[2025-09-11 07:15:18] [Rank 0] step:6221/10000 train_time:266428ms step_avg:42.83ms +[2025-09-11 07:15:19] [Rank 0] step:6241/10000 train_time:267114ms step_avg:42.80ms +[2025-09-11 07:15:19] [Rank 0] step:6241/10000 train_time:267114ms step_avg:42.80ms +[2025-09-11 07:15:19] [Rank 0] step:6261/10000 train_time:267798ms step_avg:42.77ms +[2025-09-11 07:15:19] [Rank 0] step:6261/10000 train_time:267798ms step_avg:42.77ms +[2025-09-11 07:15:20] [Rank 0] step:6281/10000 train_time:268484ms step_avg:42.75ms +[2025-09-11 07:15:20] [Rank 0] step:6281/10000 train_time:268484ms step_avg:42.75ms +[2025-09-11 07:15:21] [Rank 0] step:6301/10000 train_time:269169ms step_avg:42.72ms +[2025-09-11 07:15:21] [Rank 0] step:6301/10000 train_time:269169ms step_avg:42.72ms +[2025-09-11 07:15:21] [Rank 0] step:6321/10000 train_time:269857ms step_avg:42.69ms +[2025-09-11 07:15:21] [Rank 0] step:6321/10000 train_time:269857ms step_avg:42.69ms +[2025-09-11 07:15:22] [Rank 0] step:6341/10000 train_time:270544ms step_avg:42.67ms +[2025-09-11 07:15:22] [Rank 0] step:6341/10000 train_time:270544ms step_avg:42.67ms +[2025-09-11 07:15:23] [Rank 0] step:6361/10000 train_time:271231ms step_avg:42.64ms +[2025-09-11 07:15:23] [Rank 0] step:6361/10000 train_time:271231ms step_avg:42.64ms +[2025-09-11 07:15:23] [Rank 0] step:6381/10000 train_time:271917ms step_avg:42.61ms +[2025-09-11 07:15:23] [Rank 0] step:6381/10000 train_time:271917ms step_avg:42.61ms +[2025-09-11 07:15:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:15:24] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:15:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:15:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:15:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:15:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:15:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:15:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:34] [Rank 0] PRINT: step:6400/10000 val_loss:4.2684 total_sharp:4.3113e-05 L1_sharp:3.4785e-03 L2_sharp:1.3821e-03 L3_sharp:6.2276e-04 L4_sharp:6.1753e-04 L5_sharp:1.3762e-03 L6_sharp:9.8881e-04 L7_sharp:1.1345e-03 L8_sharp:2.6383e-03 L9_sharp:2.6871e-03 L10_sharp:2.4125e-03 L11_sharp:4.1738e-03 L12_sharp:4.5145e-02 total_fnorm:1.6200e+02 total_l1_linf:3.4816e+05 total_spectral:8.1000e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1328e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.7930e-01 L2_l1linf:2.7148e-01 L3_l1linf:2.6367e-01 L4_l1linf:2.6562e-01 L5_l1linf:2.6953e-01 L6_l1linf:2.7148e-01 L7_l1linf:2.7539e-01 L8_l1linf:2.6953e-01 L9_l1linf:2.6562e-01 L10_l1linf:2.6562e-01 L11_l1linf:2.5586e-01 L12_l1linf:2.7148e-01 L1_spectral:1.4729e-02 L2_spectral:1.4481e-02 L3_spectral:1.4534e-02 L4_spectral:1.4508e-02 L5_spectral:1.4611e-02 L6_spectral:1.4688e-02 L7_spectral:1.4745e-02 L8_spectral:1.4697e-02 L9_spectral:1.4660e-02 L10_spectral:1.4684e-02 L11_spectral:1.4735e-02 L12_spectral:1.4528e-02 train_time:272898ms step_avg:42.64ms +[2025-09-11 07:15:34] [Rank 0] PRINT: step:6400/10000 val_loss:4.2684 total_sharp:4.3113e-05 L1_sharp:3.4785e-03 L2_sharp:1.3821e-03 L3_sharp:6.2276e-04 L4_sharp:6.1753e-04 L5_sharp:1.3762e-03 L6_sharp:9.8881e-04 L7_sharp:1.1345e-03 L8_sharp:2.6383e-03 L9_sharp:2.6871e-03 L10_sharp:2.4125e-03 L11_sharp:4.1738e-03 L12_sharp:4.5145e-02 total_fnorm:1.6200e+02 total_l1_linf:3.4816e+05 total_spectral:8.1000e+01 L1_fnorm:1.1328e+00 L2_fnorm:1.1172e+00 L3_fnorm:1.1250e+00 L4_fnorm:1.1250e+00 L5_fnorm:1.1328e+00 L6_fnorm:1.1328e+00 L7_fnorm:1.1328e+00 L8_fnorm:1.1250e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1328e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1250e+00 L1_l1linf:2.7930e-01 L2_l1linf:2.7148e-01 L3_l1linf:2.6367e-01 L4_l1linf:2.6562e-01 L5_l1linf:2.6953e-01 L6_l1linf:2.7148e-01 L7_l1linf:2.7539e-01 L8_l1linf:2.6953e-01 L9_l1linf:2.6562e-01 L10_l1linf:2.6562e-01 L11_l1linf:2.5586e-01 L12_l1linf:2.7148e-01 L1_spectral:1.4729e-02 L2_spectral:1.4481e-02 L3_spectral:1.4534e-02 L4_spectral:1.4508e-02 L5_spectral:1.4611e-02 L6_spectral:1.4688e-02 L7_spectral:1.4745e-02 L8_spectral:1.4697e-02 L9_spectral:1.4660e-02 L10_spectral:1.4684e-02 L11_spectral:1.4735e-02 L12_spectral:1.4528e-02 train_time:272898ms step_avg:42.64ms +[2025-09-11 07:15:35] [Rank 0] step:6401/10000 train_time:274062ms step_avg:42.82ms +[2025-09-11 07:15:35] [Rank 0] step:6401/10000 train_time:274062ms step_avg:42.82ms +[2025-09-11 07:15:36] [Rank 0] step:6421/10000 train_time:274773ms step_avg:42.79ms +[2025-09-11 07:15:36] [Rank 0] step:6421/10000 train_time:274773ms step_avg:42.79ms +[2025-09-11 07:15:37] [Rank 0] step:6441/10000 train_time:275459ms step_avg:42.77ms +[2025-09-11 07:15:37] [Rank 0] step:6441/10000 train_time:275459ms step_avg:42.77ms +[2025-09-11 07:15:37] [Rank 0] step:6461/10000 train_time:276147ms step_avg:42.74ms +[2025-09-11 07:15:37] [Rank 0] step:6461/10000 train_time:276147ms step_avg:42.74ms +[2025-09-11 07:15:38] [Rank 0] step:6481/10000 train_time:276836ms step_avg:42.71ms +[2025-09-11 07:15:38] [Rank 0] step:6481/10000 train_time:276836ms step_avg:42.71ms +[2025-09-11 07:15:39] [Rank 0] step:6501/10000 train_time:277524ms step_avg:42.69ms +[2025-09-11 07:15:39] [Rank 0] step:6501/10000 train_time:277524ms step_avg:42.69ms +[2025-09-11 07:15:39] [Rank 0] step:6521/10000 train_time:278212ms step_avg:42.66ms +[2025-09-11 07:15:39] [Rank 0] step:6521/10000 train_time:278212ms step_avg:42.66ms +[2025-09-11 07:15:40] [Rank 0] step:6541/10000 train_time:278898ms step_avg:42.64ms +[2025-09-11 07:15:40] [Rank 0] step:6541/10000 train_time:278898ms step_avg:42.64ms +[2025-09-11 07:15:41] [Rank 0] step:6561/10000 train_time:279584ms step_avg:42.61ms +[2025-09-11 07:15:41] [Rank 0] step:6561/10000 train_time:279584ms step_avg:42.61ms +[2025-09-11 07:15:41] [Rank 0] step:6581/10000 train_time:280272ms step_avg:42.59ms +[2025-09-11 07:15:41] [Rank 0] step:6581/10000 train_time:280272ms step_avg:42.59ms +[2025-09-11 07:15:42] [Rank 0] step:6601/10000 train_time:280958ms step_avg:42.56ms +[2025-09-11 07:15:42] [Rank 0] step:6601/10000 train_time:280958ms step_avg:42.56ms +[2025-09-11 07:15:43] [Rank 0] step:6621/10000 train_time:281644ms step_avg:42.54ms +[2025-09-11 07:15:43] [Rank 0] step:6621/10000 train_time:281644ms step_avg:42.54ms +[2025-09-11 07:15:43] [Rank 0] step:6641/10000 train_time:282332ms step_avg:42.51ms +[2025-09-11 07:15:43] [Rank 0] step:6641/10000 train_time:282332ms step_avg:42.51ms +[2025-09-11 07:15:44] [Rank 0] step:6661/10000 train_time:283021ms step_avg:42.49ms +[2025-09-11 07:15:44] [Rank 0] step:6661/10000 train_time:283021ms step_avg:42.49ms +[2025-09-11 07:15:45] [Rank 0] step:6681/10000 train_time:283712ms step_avg:42.47ms +[2025-09-11 07:15:45] [Rank 0] step:6681/10000 train_time:283712ms step_avg:42.47ms +[2025-09-11 07:15:45] [Rank 0] step:6701/10000 train_time:284404ms step_avg:42.44ms +[2025-09-11 07:15:45] [Rank 0] step:6701/10000 train_time:284404ms step_avg:42.44ms +[2025-09-11 07:15:46] [Rank 0] step:6721/10000 train_time:285099ms step_avg:42.42ms +[2025-09-11 07:15:46] [Rank 0] step:6721/10000 train_time:285099ms step_avg:42.42ms +[2025-09-11 07:15:47] [Rank 0] step:6741/10000 train_time:285793ms step_avg:42.40ms +[2025-09-11 07:15:47] [Rank 0] step:6741/10000 train_time:285793ms step_avg:42.40ms +[2025-09-11 07:15:48] [Rank 0] step:6761/10000 train_time:286486ms step_avg:42.37ms +[2025-09-11 07:15:48] [Rank 0] step:6761/10000 train_time:286486ms step_avg:42.37ms +[2025-09-11 07:15:48] [Rank 0] step:6781/10000 train_time:287181ms step_avg:42.35ms +[2025-09-11 07:15:48] [Rank 0] step:6781/10000 train_time:287181ms step_avg:42.35ms +[2025-09-11 07:15:49] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:15:49] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:15:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:15:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:15:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:15:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:15:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:15:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:15:59] [Rank 0] PRINT: step:6800/10000 val_loss:4.2130 total_sharp:3.4838e-05 L1_sharp:3.2365e-03 L2_sharp:5.8127e-04 L3_sharp:6.2631e-05 L4_sharp:9.2379e-04 L5_sharp:1.1803e-03 L6_sharp:1.9512e-03 L7_sharp:1.5142e-03 L8_sharp:2.3514e-03 L9_sharp:2.8125e-03 L10_sharp:2.9416e-03 L11_sharp:4.5704e-03 L12_sharp:2.7965e-02 total_fnorm:1.5300e+02 total_l1_linf:3.2563e+05 total_spectral:7.6500e+01 L1_fnorm:1.0078e+00 L2_fnorm:9.9609e-01 L3_fnorm:9.9609e-01 L4_fnorm:9.8828e-01 L5_fnorm:1.0000e+00 L6_fnorm:1.0078e+00 L7_fnorm:1.0078e+00 L8_fnorm:9.9219e-01 L9_fnorm:9.9609e-01 L10_fnorm:1.0078e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.9219e-01 L1_l1linf:2.4023e-01 L2_l1linf:2.2266e-01 L3_l1linf:2.2656e-01 L4_l1linf:2.2949e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3438e-01 L7_l1linf:2.4023e-01 L8_l1linf:2.2852e-01 L9_l1linf:2.2363e-01 L10_l1linf:2.2363e-01 L11_l1linf:2.1777e-01 L12_l1linf:2.2754e-01 L1_spectral:1.3209e-02 L2_spectral:1.3111e-02 L3_spectral:1.3184e-02 L4_spectral:1.3079e-02 L5_spectral:1.3256e-02 L6_spectral:1.3213e-02 L7_spectral:1.3242e-02 L8_spectral:1.3218e-02 L9_spectral:1.3233e-02 L10_spectral:1.3251e-02 L11_spectral:1.3269e-02 L12_spectral:1.3056e-02 train_time:287854ms step_avg:42.33ms +[2025-09-11 07:15:59] [Rank 0] PRINT: step:6800/10000 val_loss:4.2130 total_sharp:3.4838e-05 L1_sharp:3.2365e-03 L2_sharp:5.8127e-04 L3_sharp:6.2631e-05 L4_sharp:9.2379e-04 L5_sharp:1.1803e-03 L6_sharp:1.9512e-03 L7_sharp:1.5142e-03 L8_sharp:2.3514e-03 L9_sharp:2.8125e-03 L10_sharp:2.9416e-03 L11_sharp:4.5704e-03 L12_sharp:2.7965e-02 total_fnorm:1.5300e+02 total_l1_linf:3.2563e+05 total_spectral:7.6500e+01 L1_fnorm:1.0078e+00 L2_fnorm:9.9609e-01 L3_fnorm:9.9609e-01 L4_fnorm:9.8828e-01 L5_fnorm:1.0000e+00 L6_fnorm:1.0078e+00 L7_fnorm:1.0078e+00 L8_fnorm:9.9219e-01 L9_fnorm:9.9609e-01 L10_fnorm:1.0078e+00 L11_fnorm:9.9609e-01 L12_fnorm:9.9219e-01 L1_l1linf:2.4023e-01 L2_l1linf:2.2266e-01 L3_l1linf:2.2656e-01 L4_l1linf:2.2949e-01 L5_l1linf:2.3242e-01 L6_l1linf:2.3438e-01 L7_l1linf:2.4023e-01 L8_l1linf:2.2852e-01 L9_l1linf:2.2363e-01 L10_l1linf:2.2363e-01 L11_l1linf:2.1777e-01 L12_l1linf:2.2754e-01 L1_spectral:1.3209e-02 L2_spectral:1.3111e-02 L3_spectral:1.3184e-02 L4_spectral:1.3079e-02 L5_spectral:1.3256e-02 L6_spectral:1.3213e-02 L7_spectral:1.3242e-02 L8_spectral:1.3218e-02 L9_spectral:1.3233e-02 L10_spectral:1.3251e-02 L11_spectral:1.3269e-02 L12_spectral:1.3056e-02 train_time:287854ms step_avg:42.33ms +[2025-09-11 07:16:00] [Rank 0] step:6801/10000 train_time:288986ms step_avg:42.49ms +[2025-09-11 07:16:00] [Rank 0] step:6801/10000 train_time:288986ms step_avg:42.49ms +[2025-09-11 07:16:01] [Rank 0] step:6821/10000 train_time:289717ms step_avg:42.47ms +[2025-09-11 07:16:01] [Rank 0] step:6821/10000 train_time:289717ms step_avg:42.47ms +[2025-09-11 07:16:01] [Rank 0] step:6841/10000 train_time:290417ms step_avg:42.45ms +[2025-09-11 07:16:01] [Rank 0] step:6841/10000 train_time:290417ms step_avg:42.45ms +[2025-09-11 07:16:02] [Rank 0] step:6861/10000 train_time:291114ms step_avg:42.43ms +[2025-09-11 07:16:02] [Rank 0] step:6861/10000 train_time:291114ms step_avg:42.43ms +[2025-09-11 07:16:03] [Rank 0] step:6881/10000 train_time:291810ms step_avg:42.41ms +[2025-09-11 07:16:03] [Rank 0] step:6881/10000 train_time:291810ms step_avg:42.41ms +[2025-09-11 07:16:04] [Rank 0] step:6901/10000 train_time:292504ms step_avg:42.39ms +[2025-09-11 07:16:04] [Rank 0] step:6901/10000 train_time:292504ms step_avg:42.39ms +[2025-09-11 07:16:04] [Rank 0] step:6921/10000 train_time:293199ms step_avg:42.36ms +[2025-09-11 07:16:04] [Rank 0] step:6921/10000 train_time:293199ms step_avg:42.36ms +[2025-09-11 07:16:05] [Rank 0] step:6941/10000 train_time:293895ms step_avg:42.34ms +[2025-09-11 07:16:05] [Rank 0] step:6941/10000 train_time:293895ms step_avg:42.34ms +[2025-09-11 07:16:06] [Rank 0] step:6961/10000 train_time:294590ms step_avg:42.32ms +[2025-09-11 07:16:06] [Rank 0] step:6961/10000 train_time:294590ms step_avg:42.32ms +[2025-09-11 07:16:06] [Rank 0] step:6981/10000 train_time:295287ms step_avg:42.30ms +[2025-09-11 07:16:06] [Rank 0] step:6981/10000 train_time:295287ms step_avg:42.30ms +[2025-09-11 07:16:07] [Rank 0] step:7001/10000 train_time:295983ms step_avg:42.28ms +[2025-09-11 07:16:07] [Rank 0] step:7001/10000 train_time:295983ms step_avg:42.28ms +[2025-09-11 07:16:08] [Rank 0] step:7021/10000 train_time:296679ms step_avg:42.26ms +[2025-09-11 07:16:08] [Rank 0] step:7021/10000 train_time:296679ms step_avg:42.26ms +[2025-09-11 07:16:08] [Rank 0] step:7041/10000 train_time:297373ms step_avg:42.23ms +[2025-09-11 07:16:08] [Rank 0] step:7041/10000 train_time:297373ms step_avg:42.23ms +[2025-09-11 07:16:09] [Rank 0] step:7061/10000 train_time:298069ms step_avg:42.21ms +[2025-09-11 07:16:09] [Rank 0] step:7061/10000 train_time:298069ms step_avg:42.21ms +[2025-09-11 07:16:10] [Rank 0] step:7081/10000 train_time:298765ms step_avg:42.19ms +[2025-09-11 07:16:10] [Rank 0] step:7081/10000 train_time:298765ms step_avg:42.19ms +[2025-09-11 07:16:10] [Rank 0] step:7101/10000 train_time:299460ms step_avg:42.17ms +[2025-09-11 07:16:10] [Rank 0] step:7101/10000 train_time:299460ms step_avg:42.17ms +[2025-09-11 07:16:11] [Rank 0] step:7121/10000 train_time:300156ms step_avg:42.15ms +[2025-09-11 07:16:11] [Rank 0] step:7121/10000 train_time:300156ms step_avg:42.15ms +[2025-09-11 07:16:12] [Rank 0] step:7141/10000 train_time:300851ms step_avg:42.13ms +[2025-09-11 07:16:12] [Rank 0] step:7141/10000 train_time:300851ms step_avg:42.13ms +[2025-09-11 07:16:13] [Rank 0] step:7161/10000 train_time:301549ms step_avg:42.11ms +[2025-09-11 07:16:13] [Rank 0] step:7161/10000 train_time:301549ms step_avg:42.11ms +[2025-09-11 07:16:13] [Rank 0] step:7181/10000 train_time:302243ms step_avg:42.09ms +[2025-09-11 07:16:13] [Rank 0] step:7181/10000 train_time:302243ms step_avg:42.09ms +[2025-09-11 07:16:14] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:16:14] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:16:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:16:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:16:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:16:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:16:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:16:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:16:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:16:24] [Rank 0] PRINT: step:7200/10000 val_loss:4.1782 total_sharp:3.0022e-05 L1_sharp:2.8197e-03 L2_sharp:1.7747e-04 L3_sharp:-2.7963e-04 L4_sharp:4.6078e-04 L5_sharp:8.2906e-04 L6_sharp:1.4412e-03 L7_sharp:1.1183e-03 L8_sharp:2.1913e-03 L9_sharp:2.2921e-03 L10_sharp:2.3641e-03 L11_sharp:4.3443e-03 L12_sharp:2.7996e-02 total_fnorm:1.3600e+02 total_l1_linf:2.8262e+05 total_spectral:6.8000e+01 L1_fnorm:8.7500e-01 L2_fnorm:8.5938e-01 L3_fnorm:8.6328e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.6719e-01 L6_fnorm:8.7109e-01 L7_fnorm:8.7109e-01 L8_fnorm:8.5547e-01 L9_fnorm:8.6328e-01 L10_fnorm:8.6719e-01 L11_fnorm:8.5938e-01 L12_fnorm:8.5156e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.8945e-01 L3_l1linf:1.9141e-01 L4_l1linf:1.9336e-01 L5_l1linf:1.9141e-01 L6_l1linf:1.9141e-01 L7_l1linf:1.9141e-01 L8_l1linf:1.9238e-01 L9_l1linf:1.8750e-01 L10_l1linf:1.8457e-01 L11_l1linf:1.7773e-01 L12_l1linf:1.8555e-01 L1_spectral:1.1638e-02 L2_spectral:1.1328e-02 L3_spectral:1.1676e-02 L4_spectral:1.1546e-02 L5_spectral:1.1658e-02 L6_spectral:1.1621e-02 L7_spectral:1.1616e-02 L8_spectral:1.1616e-02 L9_spectral:1.1619e-02 L10_spectral:1.1654e-02 L11_spectral:1.1645e-02 L12_spectral:1.1586e-02 train_time:302919ms step_avg:42.07ms +[2025-09-11 07:16:24] [Rank 0] PRINT: step:7200/10000 val_loss:4.1782 total_sharp:3.0022e-05 L1_sharp:2.8197e-03 L2_sharp:1.7747e-04 L3_sharp:-2.7963e-04 L4_sharp:4.6078e-04 L5_sharp:8.2906e-04 L6_sharp:1.4412e-03 L7_sharp:1.1183e-03 L8_sharp:2.1913e-03 L9_sharp:2.2921e-03 L10_sharp:2.3641e-03 L11_sharp:4.3443e-03 L12_sharp:2.7996e-02 total_fnorm:1.3600e+02 total_l1_linf:2.8262e+05 total_spectral:6.8000e+01 L1_fnorm:8.7500e-01 L2_fnorm:8.5938e-01 L3_fnorm:8.6328e-01 L4_fnorm:8.5938e-01 L5_fnorm:8.6719e-01 L6_fnorm:8.7109e-01 L7_fnorm:8.7109e-01 L8_fnorm:8.5547e-01 L9_fnorm:8.6328e-01 L10_fnorm:8.6719e-01 L11_fnorm:8.5938e-01 L12_fnorm:8.5156e-01 L1_l1linf:1.9824e-01 L2_l1linf:1.8945e-01 L3_l1linf:1.9141e-01 L4_l1linf:1.9336e-01 L5_l1linf:1.9141e-01 L6_l1linf:1.9141e-01 L7_l1linf:1.9141e-01 L8_l1linf:1.9238e-01 L9_l1linf:1.8750e-01 L10_l1linf:1.8457e-01 L11_l1linf:1.7773e-01 L12_l1linf:1.8555e-01 L1_spectral:1.1638e-02 L2_spectral:1.1328e-02 L3_spectral:1.1676e-02 L4_spectral:1.1546e-02 L5_spectral:1.1658e-02 L6_spectral:1.1621e-02 L7_spectral:1.1616e-02 L8_spectral:1.1616e-02 L9_spectral:1.1619e-02 L10_spectral:1.1654e-02 L11_spectral:1.1645e-02 L12_spectral:1.1586e-02 train_time:302919ms step_avg:42.07ms +[2025-09-11 07:16:25] [Rank 0] step:7201/10000 train_time:304059ms step_avg:42.22ms +[2025-09-11 07:16:25] [Rank 0] step:7201/10000 train_time:304059ms step_avg:42.22ms +[2025-09-11 07:16:26] [Rank 0] step:7221/10000 train_time:304794ms step_avg:42.21ms +[2025-09-11 07:16:26] [Rank 0] step:7221/10000 train_time:304794ms step_avg:42.21ms +[2025-09-11 07:16:27] [Rank 0] step:7241/10000 train_time:305490ms step_avg:42.19ms +[2025-09-11 07:16:27] [Rank 0] step:7241/10000 train_time:305490ms step_avg:42.19ms +[2025-09-11 07:16:28] [Rank 0] step:7261/10000 train_time:306735ms step_avg:42.24ms +[2025-09-11 07:16:28] [Rank 0] step:7261/10000 train_time:306735ms step_avg:42.24ms +[2025-09-11 07:16:29] [Rank 0] step:7281/10000 train_time:307436ms step_avg:42.22ms +[2025-09-11 07:16:29] [Rank 0] step:7281/10000 train_time:307436ms step_avg:42.22ms +[2025-09-11 07:16:29] [Rank 0] step:7301/10000 train_time:308131ms step_avg:42.20ms +[2025-09-11 07:16:29] [Rank 0] step:7301/10000 train_time:308131ms step_avg:42.20ms +[2025-09-11 07:16:30] [Rank 0] step:7321/10000 train_time:309107ms step_avg:42.22ms +[2025-09-11 07:16:30] [Rank 0] step:7321/10000 train_time:309107ms step_avg:42.22ms +[2025-09-11 07:16:31] [Rank 0] step:7341/10000 train_time:309804ms step_avg:42.20ms +[2025-09-11 07:16:31] [Rank 0] step:7341/10000 train_time:309804ms step_avg:42.20ms +[2025-09-11 07:16:32] [Rank 0] step:7361/10000 train_time:310499ms step_avg:42.18ms +[2025-09-11 07:16:32] [Rank 0] step:7361/10000 train_time:310499ms step_avg:42.18ms +[2025-09-11 07:16:32] [Rank 0] step:7381/10000 train_time:311195ms step_avg:42.16ms +[2025-09-11 07:16:32] [Rank 0] step:7381/10000 train_time:311195ms step_avg:42.16ms +[2025-09-11 07:16:33] [Rank 0] step:7401/10000 train_time:311890ms step_avg:42.14ms +[2025-09-11 07:16:33] [Rank 0] step:7401/10000 train_time:311890ms step_avg:42.14ms +[2025-09-11 07:16:34] [Rank 0] step:7421/10000 train_time:312584ms step_avg:42.12ms +[2025-09-11 07:16:34] [Rank 0] step:7421/10000 train_time:312584ms step_avg:42.12ms +[2025-09-11 07:16:34] [Rank 0] step:7441/10000 train_time:313281ms step_avg:42.10ms +[2025-09-11 07:16:34] [Rank 0] step:7441/10000 train_time:313281ms step_avg:42.10ms +[2025-09-11 07:16:35] [Rank 0] step:7461/10000 train_time:313977ms step_avg:42.08ms +[2025-09-11 07:16:35] [Rank 0] step:7461/10000 train_time:313977ms step_avg:42.08ms +[2025-09-11 07:16:36] [Rank 0] step:7481/10000 train_time:314675ms step_avg:42.06ms +[2025-09-11 07:16:36] [Rank 0] step:7481/10000 train_time:314675ms step_avg:42.06ms +[2025-09-11 07:16:36] [Rank 0] step:7501/10000 train_time:315372ms step_avg:42.04ms +[2025-09-11 07:16:36] [Rank 0] step:7501/10000 train_time:315372ms step_avg:42.04ms +[2025-09-11 07:16:37] [Rank 0] step:7521/10000 train_time:316070ms step_avg:42.02ms +[2025-09-11 07:16:37] [Rank 0] step:7521/10000 train_time:316070ms step_avg:42.02ms +[2025-09-11 07:16:38] [Rank 0] step:7541/10000 train_time:316764ms step_avg:42.01ms +[2025-09-11 07:16:38] [Rank 0] step:7541/10000 train_time:316764ms step_avg:42.01ms +[2025-09-11 07:16:39] [Rank 0] step:7561/10000 train_time:317462ms step_avg:41.99ms +[2025-09-11 07:16:39] [Rank 0] step:7561/10000 train_time:317462ms step_avg:41.99ms +[2025-09-11 07:16:39] [Rank 0] step:7581/10000 train_time:318158ms step_avg:41.97ms +[2025-09-11 07:16:39] [Rank 0] step:7581/10000 train_time:318158ms step_avg:41.97ms +[2025-09-11 07:16:40] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:16:40] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:16:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:16:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:16:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:16:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:16:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:16:50] [Rank 0] PRINT: step:7600/10000 val_loss:4.1465 total_sharp:2.9445e-05 L1_sharp:3.8377e-03 L2_sharp:5.0996e-04 L3_sharp:7.8866e-05 L4_sharp:5.6819e-04 L5_sharp:7.5552e-04 L6_sharp:8.1292e-04 L7_sharp:9.6735e-04 L8_sharp:1.8558e-03 L9_sharp:2.5348e-03 L10_sharp:2.3608e-03 L11_sharp:4.3494e-03 L12_sharp:4.5763e-02 total_fnorm:1.1650e+02 total_l1_linf:2.3142e+05 total_spectral:5.8250e+01 L1_fnorm:7.3828e-01 L2_fnorm:7.1875e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.1484e-01 L5_fnorm:7.2266e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.3047e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.2266e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1094e-01 L1_l1linf:1.6016e-01 L2_l1linf:1.4844e-01 L3_l1linf:1.5234e-01 L4_l1linf:1.5137e-01 L5_l1linf:1.5234e-01 L6_l1linf:1.5234e-01 L7_l1linf:1.5234e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.4551e-01 L10_l1linf:1.4453e-01 L11_l1linf:1.3867e-01 L12_l1linf:1.5137e-01 L1_spectral:9.9841e-03 L2_spectral:9.7010e-03 L3_spectral:1.0085e-02 L4_spectral:9.9676e-03 L5_spectral:1.0003e-02 L6_spectral:9.9784e-03 L7_spectral:9.9502e-03 L8_spectral:9.8194e-03 L9_spectral:9.8902e-03 L10_spectral:9.9161e-03 L11_spectral:9.9652e-03 L12_spectral:9.9344e-03 train_time:318836ms step_avg:41.95ms +[2025-09-11 07:16:50] [Rank 0] PRINT: step:7600/10000 val_loss:4.1465 total_sharp:2.9445e-05 L1_sharp:3.8377e-03 L2_sharp:5.0996e-04 L3_sharp:7.8866e-05 L4_sharp:5.6819e-04 L5_sharp:7.5552e-04 L6_sharp:8.1292e-04 L7_sharp:9.6735e-04 L8_sharp:1.8558e-03 L9_sharp:2.5348e-03 L10_sharp:2.3608e-03 L11_sharp:4.3494e-03 L12_sharp:4.5763e-02 total_fnorm:1.1650e+02 total_l1_linf:2.3142e+05 total_spectral:5.8250e+01 L1_fnorm:7.3828e-01 L2_fnorm:7.1875e-01 L3_fnorm:7.2266e-01 L4_fnorm:7.1484e-01 L5_fnorm:7.2266e-01 L6_fnorm:7.2266e-01 L7_fnorm:7.3047e-01 L8_fnorm:7.1094e-01 L9_fnorm:7.1875e-01 L10_fnorm:7.2266e-01 L11_fnorm:7.1484e-01 L12_fnorm:7.1094e-01 L1_l1linf:1.6016e-01 L2_l1linf:1.4844e-01 L3_l1linf:1.5234e-01 L4_l1linf:1.5137e-01 L5_l1linf:1.5234e-01 L6_l1linf:1.5234e-01 L7_l1linf:1.5234e-01 L8_l1linf:1.4941e-01 L9_l1linf:1.4551e-01 L10_l1linf:1.4453e-01 L11_l1linf:1.3867e-01 L12_l1linf:1.5137e-01 L1_spectral:9.9841e-03 L2_spectral:9.7010e-03 L3_spectral:1.0085e-02 L4_spectral:9.9676e-03 L5_spectral:1.0003e-02 L6_spectral:9.9784e-03 L7_spectral:9.9502e-03 L8_spectral:9.8194e-03 L9_spectral:9.8902e-03 L10_spectral:9.9161e-03 L11_spectral:9.9652e-03 L12_spectral:9.9344e-03 train_time:318836ms step_avg:41.95ms +[2025-09-11 07:16:51] [Rank 0] step:7601/10000 train_time:319979ms step_avg:42.10ms +[2025-09-11 07:16:51] [Rank 0] step:7601/10000 train_time:319979ms step_avg:42.10ms +[2025-09-11 07:16:52] [Rank 0] step:7621/10000 train_time:320695ms step_avg:42.08ms +[2025-09-11 07:16:52] [Rank 0] step:7621/10000 train_time:320695ms step_avg:42.08ms +[2025-09-11 07:16:53] [Rank 0] step:7641/10000 train_time:321393ms step_avg:42.06ms +[2025-09-11 07:16:53] [Rank 0] step:7641/10000 train_time:321393ms step_avg:42.06ms +[2025-09-11 07:16:53] [Rank 0] step:7661/10000 train_time:322090ms step_avg:42.04ms +[2025-09-11 07:16:53] [Rank 0] step:7661/10000 train_time:322090ms step_avg:42.04ms +[2025-09-11 07:16:54] [Rank 0] step:7681/10000 train_time:322786ms step_avg:42.02ms +[2025-09-11 07:16:54] [Rank 0] step:7681/10000 train_time:322786ms step_avg:42.02ms +[2025-09-11 07:16:55] [Rank 0] step:7701/10000 train_time:323485ms step_avg:42.01ms +[2025-09-11 07:16:55] [Rank 0] step:7701/10000 train_time:323485ms step_avg:42.01ms +[2025-09-11 07:16:55] [Rank 0] step:7721/10000 train_time:324180ms step_avg:41.99ms +[2025-09-11 07:16:55] [Rank 0] step:7721/10000 train_time:324180ms step_avg:41.99ms +[2025-09-11 07:16:56] [Rank 0] step:7741/10000 train_time:324877ms step_avg:41.97ms +[2025-09-11 07:16:56] [Rank 0] step:7741/10000 train_time:324877ms step_avg:41.97ms +[2025-09-11 07:16:57] [Rank 0] step:7761/10000 train_time:325574ms step_avg:41.95ms +[2025-09-11 07:16:57] [Rank 0] step:7761/10000 train_time:325574ms step_avg:41.95ms +[2025-09-11 07:16:57] [Rank 0] step:7781/10000 train_time:326272ms step_avg:41.93ms +[2025-09-11 07:16:57] [Rank 0] step:7781/10000 train_time:326272ms step_avg:41.93ms +[2025-09-11 07:16:58] [Rank 0] step:7801/10000 train_time:326967ms step_avg:41.91ms +[2025-09-11 07:16:58] [Rank 0] step:7801/10000 train_time:326967ms step_avg:41.91ms +[2025-09-11 07:16:59] [Rank 0] step:7821/10000 train_time:327664ms step_avg:41.90ms +[2025-09-11 07:16:59] [Rank 0] step:7821/10000 train_time:327664ms step_avg:41.90ms +[2025-09-11 07:16:59] [Rank 0] step:7841/10000 train_time:328362ms step_avg:41.88ms +[2025-09-11 07:16:59] [Rank 0] step:7841/10000 train_time:328362ms step_avg:41.88ms +[2025-09-11 07:17:00] [Rank 0] step:7861/10000 train_time:329060ms step_avg:41.86ms +[2025-09-11 07:17:00] [Rank 0] step:7861/10000 train_time:329060ms step_avg:41.86ms +[2025-09-11 07:17:01] [Rank 0] step:7881/10000 train_time:329757ms step_avg:41.84ms +[2025-09-11 07:17:01] [Rank 0] step:7881/10000 train_time:329757ms step_avg:41.84ms +[2025-09-11 07:17:02] [Rank 0] step:7901/10000 train_time:330455ms step_avg:41.82ms +[2025-09-11 07:17:02] [Rank 0] step:7901/10000 train_time:330455ms step_avg:41.82ms +[2025-09-11 07:17:02] [Rank 0] step:7921/10000 train_time:331152ms step_avg:41.81ms +[2025-09-11 07:17:02] [Rank 0] step:7921/10000 train_time:331152ms step_avg:41.81ms +[2025-09-11 07:17:03] [Rank 0] step:7941/10000 train_time:331850ms step_avg:41.79ms +[2025-09-11 07:17:03] [Rank 0] step:7941/10000 train_time:331850ms step_avg:41.79ms +[2025-09-11 07:17:04] [Rank 0] step:7961/10000 train_time:332545ms step_avg:41.77ms +[2025-09-11 07:17:04] [Rank 0] step:7961/10000 train_time:332545ms step_avg:41.77ms +[2025-09-11 07:17:04] [Rank 0] step:7981/10000 train_time:333245ms step_avg:41.75ms +[2025-09-11 07:17:04] [Rank 0] step:7981/10000 train_time:333245ms step_avg:41.75ms +[2025-09-11 07:17:05] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:17:05] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:17:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:17:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:17:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:17:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:17:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:17:15] [Rank 0] PRINT: step:8000/10000 val_loss:4.1067 total_sharp:2.6083e-05 L1_sharp:3.1055e-03 L2_sharp:1.5898e-03 L3_sharp:4.2170e-04 L4_sharp:3.4398e-04 L5_sharp:7.9331e-04 L6_sharp:1.3268e-03 L7_sharp:9.2921e-04 L8_sharp:1.7301e-03 L9_sharp:2.2001e-03 L10_sharp:2.0876e-03 L11_sharp:4.0005e-03 L12_sharp:5.7496e-02 total_fnorm:1.0200e+02 total_l1_linf:1.9558e+05 total_spectral:5.1000e+01 L1_fnorm:6.1328e-01 L2_fnorm:5.9375e-01 L3_fnorm:5.8984e-01 L4_fnorm:5.8984e-01 L5_fnorm:5.9375e-01 L6_fnorm:5.9766e-01 L7_fnorm:5.9375e-01 L8_fnorm:5.8203e-01 L9_fnorm:5.8984e-01 L10_fnorm:5.8984e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8203e-01 L1_l1linf:1.2061e-01 L2_l1linf:1.1816e-01 L3_l1linf:1.1621e-01 L4_l1linf:1.1865e-01 L5_l1linf:1.1719e-01 L6_l1linf:1.2012e-01 L7_l1linf:1.1719e-01 L8_l1linf:1.1670e-01 L9_l1linf:1.1230e-01 L10_l1linf:1.0986e-01 L11_l1linf:1.0889e-01 L12_l1linf:1.1719e-01 L1_spectral:8.6014e-03 L2_spectral:8.0743e-03 L3_spectral:8.4785e-03 L4_spectral:8.4231e-03 L5_spectral:8.4328e-03 L6_spectral:8.4300e-03 L7_spectral:8.3516e-03 L8_spectral:8.2886e-03 L9_spectral:8.2957e-03 L10_spectral:8.3764e-03 L11_spectral:8.3064e-03 L12_spectral:8.3536e-03 train_time:333920ms step_avg:41.74ms +[2025-09-11 07:17:15] [Rank 0] PRINT: step:8000/10000 val_loss:4.1067 total_sharp:2.6083e-05 L1_sharp:3.1055e-03 L2_sharp:1.5898e-03 L3_sharp:4.2170e-04 L4_sharp:3.4398e-04 L5_sharp:7.9331e-04 L6_sharp:1.3268e-03 L7_sharp:9.2921e-04 L8_sharp:1.7301e-03 L9_sharp:2.2001e-03 L10_sharp:2.0876e-03 L11_sharp:4.0005e-03 L12_sharp:5.7496e-02 total_fnorm:1.0200e+02 total_l1_linf:1.9558e+05 total_spectral:5.1000e+01 L1_fnorm:6.1328e-01 L2_fnorm:5.9375e-01 L3_fnorm:5.8984e-01 L4_fnorm:5.8984e-01 L5_fnorm:5.9375e-01 L6_fnorm:5.9766e-01 L7_fnorm:5.9375e-01 L8_fnorm:5.8203e-01 L9_fnorm:5.8984e-01 L10_fnorm:5.8984e-01 L11_fnorm:5.8203e-01 L12_fnorm:5.8203e-01 L1_l1linf:1.2061e-01 L2_l1linf:1.1816e-01 L3_l1linf:1.1621e-01 L4_l1linf:1.1865e-01 L5_l1linf:1.1719e-01 L6_l1linf:1.2012e-01 L7_l1linf:1.1719e-01 L8_l1linf:1.1670e-01 L9_l1linf:1.1230e-01 L10_l1linf:1.0986e-01 L11_l1linf:1.0889e-01 L12_l1linf:1.1719e-01 L1_spectral:8.6014e-03 L2_spectral:8.0743e-03 L3_spectral:8.4785e-03 L4_spectral:8.4231e-03 L5_spectral:8.4328e-03 L6_spectral:8.4300e-03 L7_spectral:8.3516e-03 L8_spectral:8.2886e-03 L9_spectral:8.2957e-03 L10_spectral:8.3764e-03 L11_spectral:8.3064e-03 L12_spectral:8.3536e-03 train_time:333920ms step_avg:41.74ms +[2025-09-11 07:17:16] [Rank 0] step:8001/10000 train_time:335094ms step_avg:41.88ms +[2025-09-11 07:17:16] [Rank 0] step:8001/10000 train_time:335094ms step_avg:41.88ms +[2025-09-11 07:17:17] [Rank 0] step:8021/10000 train_time:335808ms step_avg:41.87ms +[2025-09-11 07:17:17] [Rank 0] step:8021/10000 train_time:335808ms step_avg:41.87ms +[2025-09-11 07:17:18] [Rank 0] step:8041/10000 train_time:336505ms step_avg:41.85ms +[2025-09-11 07:17:18] [Rank 0] step:8041/10000 train_time:336505ms step_avg:41.85ms +[2025-09-11 07:17:18] [Rank 0] step:8061/10000 train_time:337204ms step_avg:41.83ms +[2025-09-11 07:17:18] [Rank 0] step:8061/10000 train_time:337204ms step_avg:41.83ms +[2025-09-11 07:17:19] [Rank 0] step:8081/10000 train_time:337899ms step_avg:41.81ms +[2025-09-11 07:17:19] [Rank 0] step:8081/10000 train_time:337899ms step_avg:41.81ms +[2025-09-11 07:17:20] [Rank 0] step:8101/10000 train_time:338593ms step_avg:41.80ms +[2025-09-11 07:17:20] [Rank 0] step:8101/10000 train_time:338593ms step_avg:41.80ms +[2025-09-11 07:17:20] [Rank 0] step:8121/10000 train_time:339293ms step_avg:41.78ms +[2025-09-11 07:17:20] [Rank 0] step:8121/10000 train_time:339293ms step_avg:41.78ms +[2025-09-11 07:17:22] [Rank 0] step:8141/10000 train_time:340714ms step_avg:41.85ms +[2025-09-11 07:17:22] [Rank 0] step:8141/10000 train_time:340714ms step_avg:41.85ms +[2025-09-11 07:17:23] [Rank 0] step:8161/10000 train_time:341414ms step_avg:41.83ms +[2025-09-11 07:17:23] [Rank 0] step:8161/10000 train_time:341414ms step_avg:41.83ms +[2025-09-11 07:17:23] [Rank 0] step:8181/10000 train_time:342122ms step_avg:41.82ms +[2025-09-11 07:17:23] [Rank 0] step:8181/10000 train_time:342122ms step_avg:41.82ms +[2025-09-11 07:17:24] [Rank 0] step:8201/10000 train_time:342827ms step_avg:41.80ms +[2025-09-11 07:17:24] [Rank 0] step:8201/10000 train_time:342827ms step_avg:41.80ms +[2025-09-11 07:17:25] [Rank 0] step:8221/10000 train_time:343530ms step_avg:41.79ms +[2025-09-11 07:17:25] [Rank 0] step:8221/10000 train_time:343530ms step_avg:41.79ms +[2025-09-11 07:17:25] [Rank 0] step:8241/10000 train_time:344242ms step_avg:41.77ms +[2025-09-11 07:17:25] [Rank 0] step:8241/10000 train_time:344242ms step_avg:41.77ms +[2025-09-11 07:17:26] [Rank 0] step:8261/10000 train_time:344944ms step_avg:41.76ms +[2025-09-11 07:17:26] [Rank 0] step:8261/10000 train_time:344944ms step_avg:41.76ms +[2025-09-11 07:17:27] [Rank 0] step:8281/10000 train_time:345644ms step_avg:41.74ms +[2025-09-11 07:17:27] [Rank 0] step:8281/10000 train_time:345644ms step_avg:41.74ms +[2025-09-11 07:17:28] [Rank 0] step:8301/10000 train_time:346346ms step_avg:41.72ms +[2025-09-11 07:17:28] [Rank 0] step:8301/10000 train_time:346346ms step_avg:41.72ms +[2025-09-11 07:17:28] [Rank 0] step:8321/10000 train_time:347048ms step_avg:41.71ms +[2025-09-11 07:17:28] [Rank 0] step:8321/10000 train_time:347048ms step_avg:41.71ms +[2025-09-11 07:17:29] [Rank 0] step:8341/10000 train_time:347758ms step_avg:41.69ms +[2025-09-11 07:17:29] [Rank 0] step:8341/10000 train_time:347758ms step_avg:41.69ms +[2025-09-11 07:17:30] [Rank 0] step:8361/10000 train_time:348457ms step_avg:41.68ms +[2025-09-11 07:17:30] [Rank 0] step:8361/10000 train_time:348457ms step_avg:41.68ms +[2025-09-11 07:17:31] [Rank 0] step:8381/10000 train_time:349582ms step_avg:41.71ms +[2025-09-11 07:17:31] [Rank 0] step:8381/10000 train_time:349582ms step_avg:41.71ms +[2025-09-11 07:17:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:17:32] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:17:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:17:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:17:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:17:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:17:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:17:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:17:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:17:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.0804 total_sharp:2.1887e-05 L1_sharp:2.1975e-03 L2_sharp:4.3809e-04 L3_sharp:1.4395e-04 L4_sharp:5.0504e-04 L5_sharp:3.3436e-04 L6_sharp:7.7777e-04 L7_sharp:1.0333e-03 L8_sharp:1.4798e-03 L9_sharp:1.8081e-03 L10_sharp:2.0081e-03 L11_sharp:3.9476e-03 L12_sharp:4.5038e-02 total_fnorm:8.2500e+01 total_l1_linf:1.4643e+05 total_spectral:4.1250e+01 L1_fnorm:4.8438e-01 L2_fnorm:4.6289e-01 L3_fnorm:4.6484e-01 L4_fnorm:4.6094e-01 L5_fnorm:4.6484e-01 L6_fnorm:4.6680e-01 L7_fnorm:4.6680e-01 L8_fnorm:4.5508e-01 L9_fnorm:4.6094e-01 L10_fnorm:4.6289e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.5508e-01 L1_l1linf:8.8867e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.4961e-02 L5_l1linf:8.4961e-02 L6_l1linf:8.4961e-02 L7_l1linf:8.3984e-02 L8_l1linf:8.3984e-02 L9_l1linf:8.1055e-02 L10_l1linf:8.1543e-02 L11_l1linf:8.0566e-02 L12_l1linf:8.3008e-02 L1_spectral:7.1232e-03 L2_spectral:6.5607e-03 L3_spectral:6.8721e-03 L4_spectral:6.8073e-03 L5_spectral:6.8298e-03 L6_spectral:6.8276e-03 L7_spectral:6.6950e-03 L8_spectral:6.6589e-03 L9_spectral:6.6375e-03 L10_spectral:6.6506e-03 L11_spectral:6.6409e-03 L12_spectral:6.7056e-03 train_time:350388ms step_avg:41.71ms +[2025-09-11 07:17:44] [Rank 0] PRINT: step:8400/10000 val_loss:4.0804 total_sharp:2.1887e-05 L1_sharp:2.1975e-03 L2_sharp:4.3809e-04 L3_sharp:1.4395e-04 L4_sharp:5.0504e-04 L5_sharp:3.3436e-04 L6_sharp:7.7777e-04 L7_sharp:1.0333e-03 L8_sharp:1.4798e-03 L9_sharp:1.8081e-03 L10_sharp:2.0081e-03 L11_sharp:3.9476e-03 L12_sharp:4.5038e-02 total_fnorm:8.2500e+01 total_l1_linf:1.4643e+05 total_spectral:4.1250e+01 L1_fnorm:4.8438e-01 L2_fnorm:4.6289e-01 L3_fnorm:4.6484e-01 L4_fnorm:4.6094e-01 L5_fnorm:4.6484e-01 L6_fnorm:4.6680e-01 L7_fnorm:4.6680e-01 L8_fnorm:4.5508e-01 L9_fnorm:4.6094e-01 L10_fnorm:4.6289e-01 L11_fnorm:4.5703e-01 L12_fnorm:4.5508e-01 L1_l1linf:8.8867e-02 L2_l1linf:8.7402e-02 L3_l1linf:8.6426e-02 L4_l1linf:8.4961e-02 L5_l1linf:8.4961e-02 L6_l1linf:8.4961e-02 L7_l1linf:8.3984e-02 L8_l1linf:8.3984e-02 L9_l1linf:8.1055e-02 L10_l1linf:8.1543e-02 L11_l1linf:8.0566e-02 L12_l1linf:8.3008e-02 L1_spectral:7.1232e-03 L2_spectral:6.5607e-03 L3_spectral:6.8721e-03 L4_spectral:6.8073e-03 L5_spectral:6.8298e-03 L6_spectral:6.8276e-03 L7_spectral:6.6950e-03 L8_spectral:6.6589e-03 L9_spectral:6.6375e-03 L10_spectral:6.6506e-03 L11_spectral:6.6409e-03 L12_spectral:6.7056e-03 train_time:350388ms step_avg:41.71ms +[2025-09-11 07:17:45] [Rank 0] step:8401/10000 train_time:351613ms step_avg:41.85ms +[2025-09-11 07:17:45] [Rank 0] step:8401/10000 train_time:351613ms step_avg:41.85ms +[2025-09-11 07:17:46] [Rank 0] step:8421/10000 train_time:352329ms step_avg:41.84ms +[2025-09-11 07:17:46] [Rank 0] step:8421/10000 train_time:352329ms step_avg:41.84ms +[2025-09-11 07:17:47] [Rank 0] step:8441/10000 train_time:353036ms step_avg:41.82ms +[2025-09-11 07:17:47] [Rank 0] step:8441/10000 train_time:353036ms step_avg:41.82ms +[2025-09-11 07:17:47] [Rank 0] step:8461/10000 train_time:353740ms step_avg:41.81ms +[2025-09-11 07:17:47] [Rank 0] step:8461/10000 train_time:353740ms step_avg:41.81ms +[2025-09-11 07:17:48] [Rank 0] step:8481/10000 train_time:354446ms step_avg:41.79ms +[2025-09-11 07:17:48] [Rank 0] step:8481/10000 train_time:354446ms step_avg:41.79ms +[2025-09-11 07:17:49] [Rank 0] step:8501/10000 train_time:355150ms step_avg:41.78ms +[2025-09-11 07:17:49] [Rank 0] step:8501/10000 train_time:355150ms step_avg:41.78ms +[2025-09-11 07:17:50] [Rank 0] step:8521/10000 train_time:355854ms step_avg:41.76ms +[2025-09-11 07:17:50] [Rank 0] step:8521/10000 train_time:355854ms step_avg:41.76ms +[2025-09-11 07:17:50] [Rank 0] step:8541/10000 train_time:356555ms step_avg:41.75ms +[2025-09-11 07:17:50] [Rank 0] step:8541/10000 train_time:356555ms step_avg:41.75ms +[2025-09-11 07:17:51] [Rank 0] step:8561/10000 train_time:357265ms step_avg:41.73ms +[2025-09-11 07:17:51] [Rank 0] step:8561/10000 train_time:357265ms step_avg:41.73ms +[2025-09-11 07:17:52] [Rank 0] step:8581/10000 train_time:357971ms step_avg:41.72ms +[2025-09-11 07:17:52] [Rank 0] step:8581/10000 train_time:357971ms step_avg:41.72ms +[2025-09-11 07:17:52] [Rank 0] step:8601/10000 train_time:358675ms step_avg:41.70ms +[2025-09-11 07:17:52] [Rank 0] step:8601/10000 train_time:358675ms step_avg:41.70ms +[2025-09-11 07:17:53] [Rank 0] step:8621/10000 train_time:359378ms step_avg:41.69ms +[2025-09-11 07:17:53] [Rank 0] step:8621/10000 train_time:359378ms step_avg:41.69ms +[2025-09-11 07:17:54] [Rank 0] step:8641/10000 train_time:360081ms step_avg:41.67ms +[2025-09-11 07:17:54] [Rank 0] step:8641/10000 train_time:360081ms step_avg:41.67ms +[2025-09-11 07:17:54] [Rank 0] step:8661/10000 train_time:360785ms step_avg:41.66ms +[2025-09-11 07:17:54] [Rank 0] step:8661/10000 train_time:360785ms step_avg:41.66ms +[2025-09-11 07:17:55] [Rank 0] step:8681/10000 train_time:361490ms step_avg:41.64ms +[2025-09-11 07:17:55] [Rank 0] step:8681/10000 train_time:361490ms step_avg:41.64ms +[2025-09-11 07:17:56] [Rank 0] step:8701/10000 train_time:362193ms step_avg:41.63ms +[2025-09-11 07:17:56] [Rank 0] step:8701/10000 train_time:362193ms step_avg:41.63ms +[2025-09-11 07:17:57] [Rank 0] step:8721/10000 train_time:362899ms step_avg:41.61ms +[2025-09-11 07:17:57] [Rank 0] step:8721/10000 train_time:362899ms step_avg:41.61ms +[2025-09-11 07:17:57] [Rank 0] step:8741/10000 train_time:363600ms step_avg:41.60ms +[2025-09-11 07:17:57] [Rank 0] step:8741/10000 train_time:363600ms step_avg:41.60ms +[2025-09-11 07:17:58] [Rank 0] step:8761/10000 train_time:364306ms step_avg:41.58ms +[2025-09-11 07:17:58] [Rank 0] step:8761/10000 train_time:364306ms step_avg:41.58ms +[2025-09-11 07:17:59] [Rank 0] step:8781/10000 train_time:365007ms step_avg:41.57ms +[2025-09-11 07:17:59] [Rank 0] step:8781/10000 train_time:365007ms step_avg:41.57ms +[2025-09-11 07:17:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:17:59] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:18:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:18:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:18:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:09] [Rank 0] PRINT: step:8800/10000 val_loss:4.0655 total_sharp:2.2459e-05 L1_sharp:2.6480e-03 L2_sharp:5.1729e-04 L3_sharp:1.7347e-04 L4_sharp:2.5127e-04 L5_sharp:7.8319e-04 L6_sharp:5.9732e-04 L7_sharp:5.9467e-04 L8_sharp:1.5150e-03 L9_sharp:1.8286e-03 L10_sharp:1.8957e-03 L11_sharp:2.9948e-03 L12_sharp:2.2408e-02 total_fnorm:6.1250e+01 total_l1_linf:9.8304e+04 total_spectral:3.0625e+01 L1_fnorm:3.6328e-01 L2_fnorm:3.4180e-01 L3_fnorm:3.4375e-01 L4_fnorm:3.4180e-01 L5_fnorm:3.4180e-01 L6_fnorm:3.4570e-01 L7_fnorm:3.4375e-01 L8_fnorm:3.3594e-01 L9_fnorm:3.3789e-01 L10_fnorm:3.3984e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3203e-01 L1_l1linf:6.0547e-02 L2_l1linf:5.8350e-02 L3_l1linf:5.9570e-02 L4_l1linf:5.7373e-02 L5_l1linf:5.6885e-02 L6_l1linf:5.7861e-02 L7_l1linf:5.7129e-02 L8_l1linf:5.5420e-02 L9_l1linf:5.3955e-02 L10_l1linf:5.4688e-02 L11_l1linf:5.3711e-02 L12_l1linf:5.3711e-02 L1_spectral:5.4467e-03 L2_spectral:4.9758e-03 L3_spectral:5.1972e-03 L4_spectral:5.1848e-03 L5_spectral:5.0992e-03 L6_spectral:5.1815e-03 L7_spectral:5.0701e-03 L8_spectral:5.0569e-03 L9_spectral:5.0293e-03 L10_spectral:5.0154e-03 L11_spectral:5.0044e-03 L12_spectral:5.1162e-03 train_time:365689ms step_avg:41.56ms +[2025-09-11 07:18:09] [Rank 0] PRINT: step:8800/10000 val_loss:4.0655 total_sharp:2.2459e-05 L1_sharp:2.6480e-03 L2_sharp:5.1729e-04 L3_sharp:1.7347e-04 L4_sharp:2.5127e-04 L5_sharp:7.8319e-04 L6_sharp:5.9732e-04 L7_sharp:5.9467e-04 L8_sharp:1.5150e-03 L9_sharp:1.8286e-03 L10_sharp:1.8957e-03 L11_sharp:2.9948e-03 L12_sharp:2.2408e-02 total_fnorm:6.1250e+01 total_l1_linf:9.8304e+04 total_spectral:3.0625e+01 L1_fnorm:3.6328e-01 L2_fnorm:3.4180e-01 L3_fnorm:3.4375e-01 L4_fnorm:3.4180e-01 L5_fnorm:3.4180e-01 L6_fnorm:3.4570e-01 L7_fnorm:3.4375e-01 L8_fnorm:3.3594e-01 L9_fnorm:3.3789e-01 L10_fnorm:3.3984e-01 L11_fnorm:3.3594e-01 L12_fnorm:3.3203e-01 L1_l1linf:6.0547e-02 L2_l1linf:5.8350e-02 L3_l1linf:5.9570e-02 L4_l1linf:5.7373e-02 L5_l1linf:5.6885e-02 L6_l1linf:5.7861e-02 L7_l1linf:5.7129e-02 L8_l1linf:5.5420e-02 L9_l1linf:5.3955e-02 L10_l1linf:5.4688e-02 L11_l1linf:5.3711e-02 L12_l1linf:5.3711e-02 L1_spectral:5.4467e-03 L2_spectral:4.9758e-03 L3_spectral:5.1972e-03 L4_spectral:5.1848e-03 L5_spectral:5.0992e-03 L6_spectral:5.1815e-03 L7_spectral:5.0701e-03 L8_spectral:5.0569e-03 L9_spectral:5.0293e-03 L10_spectral:5.0154e-03 L11_spectral:5.0044e-03 L12_spectral:5.1162e-03 train_time:365689ms step_avg:41.56ms +[2025-09-11 07:18:10] [Rank 0] step:8801/10000 train_time:366871ms step_avg:41.69ms +[2025-09-11 07:18:10] [Rank 0] step:8801/10000 train_time:366871ms step_avg:41.69ms +[2025-09-11 07:18:11] [Rank 0] step:8821/10000 train_time:367584ms step_avg:41.67ms +[2025-09-11 07:18:11] [Rank 0] step:8821/10000 train_time:367584ms step_avg:41.67ms +[2025-09-11 07:18:12] [Rank 0] step:8841/10000 train_time:368289ms step_avg:41.66ms +[2025-09-11 07:18:12] [Rank 0] step:8841/10000 train_time:368289ms step_avg:41.66ms +[2025-09-11 07:18:12] [Rank 0] step:8861/10000 train_time:368994ms step_avg:41.64ms +[2025-09-11 07:18:12] [Rank 0] step:8861/10000 train_time:368994ms step_avg:41.64ms +[2025-09-11 07:18:13] [Rank 0] step:8881/10000 train_time:369699ms step_avg:41.63ms +[2025-09-11 07:18:13] [Rank 0] step:8881/10000 train_time:369699ms step_avg:41.63ms +[2025-09-11 07:18:14] [Rank 0] step:8901/10000 train_time:370406ms step_avg:41.61ms +[2025-09-11 07:18:14] [Rank 0] step:8901/10000 train_time:370406ms step_avg:41.61ms +[2025-09-11 07:18:15] [Rank 0] step:8921/10000 train_time:371107ms step_avg:41.60ms +[2025-09-11 07:18:15] [Rank 0] step:8921/10000 train_time:371107ms step_avg:41.60ms +[2025-09-11 07:18:15] [Rank 0] step:8941/10000 train_time:371813ms step_avg:41.59ms +[2025-09-11 07:18:15] [Rank 0] step:8941/10000 train_time:371813ms step_avg:41.59ms +[2025-09-11 07:18:16] [Rank 0] step:8961/10000 train_time:372526ms step_avg:41.57ms +[2025-09-11 07:18:16] [Rank 0] step:8961/10000 train_time:372526ms step_avg:41.57ms +[2025-09-11 07:18:17] [Rank 0] step:8981/10000 train_time:373234ms step_avg:41.56ms +[2025-09-11 07:18:17] [Rank 0] step:8981/10000 train_time:373234ms step_avg:41.56ms +[2025-09-11 07:18:17] [Rank 0] step:9001/10000 train_time:373933ms step_avg:41.54ms +[2025-09-11 07:18:17] [Rank 0] step:9001/10000 train_time:373933ms step_avg:41.54ms +[2025-09-11 07:18:18] [Rank 0] step:9021/10000 train_time:374638ms step_avg:41.53ms +[2025-09-11 07:18:18] [Rank 0] step:9021/10000 train_time:374638ms step_avg:41.53ms +[2025-09-11 07:18:19] [Rank 0] step:9041/10000 train_time:375345ms step_avg:41.52ms +[2025-09-11 07:18:19] [Rank 0] step:9041/10000 train_time:375345ms step_avg:41.52ms +[2025-09-11 07:18:19] [Rank 0] step:9061/10000 train_time:376048ms step_avg:41.50ms +[2025-09-11 07:18:19] [Rank 0] step:9061/10000 train_time:376048ms step_avg:41.50ms +[2025-09-11 07:18:20] [Rank 0] step:9081/10000 train_time:376754ms step_avg:41.49ms +[2025-09-11 07:18:20] [Rank 0] step:9081/10000 train_time:376754ms step_avg:41.49ms +[2025-09-11 07:18:21] [Rank 0] step:9101/10000 train_time:377462ms step_avg:41.47ms +[2025-09-11 07:18:21] [Rank 0] step:9101/10000 train_time:377462ms step_avg:41.47ms +[2025-09-11 07:18:22] [Rank 0] step:9121/10000 train_time:378170ms step_avg:41.46ms +[2025-09-11 07:18:22] [Rank 0] step:9121/10000 train_time:378170ms step_avg:41.46ms +[2025-09-11 07:18:22] [Rank 0] step:9141/10000 train_time:378874ms step_avg:41.45ms +[2025-09-11 07:18:22] [Rank 0] step:9141/10000 train_time:378874ms step_avg:41.45ms +[2025-09-11 07:18:23] [Rank 0] step:9161/10000 train_time:379581ms step_avg:41.43ms +[2025-09-11 07:18:23] [Rank 0] step:9161/10000 train_time:379581ms step_avg:41.43ms +[2025-09-11 07:18:24] [Rank 0] step:9181/10000 train_time:380288ms step_avg:41.42ms +[2025-09-11 07:18:24] [Rank 0] step:9181/10000 train_time:380288ms step_avg:41.42ms +[2025-09-11 07:18:24] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:18:24] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:18:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:18:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:18:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:18:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:34] [Rank 0] PRINT: step:9200/10000 val_loss:4.0449 total_sharp:1.6172e-05 L1_sharp:2.1302e-03 L2_sharp:7.7181e-04 L3_sharp:4.1095e-04 L4_sharp:6.2657e-04 L5_sharp:6.5911e-04 L6_sharp:8.6063e-04 L7_sharp:7.8627e-04 L8_sharp:1.4747e-03 L9_sharp:1.4372e-03 L10_sharp:1.7348e-03 L11_sharp:3.4664e-03 L12_sharp:4.0732e-02 total_fnorm:4.6750e+01 total_l1_linf:6.7584e+04 total_spectral:2.3375e+01 L1_fnorm:2.4121e-01 L2_fnorm:2.2656e-01 L3_fnorm:2.2656e-01 L4_fnorm:2.2559e-01 L5_fnorm:2.2559e-01 L6_fnorm:2.2852e-01 L7_fnorm:2.2754e-01 L8_fnorm:2.2363e-01 L9_fnorm:2.2461e-01 L10_fnorm:2.2461e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.2168e-01 L1_l1linf:3.5400e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3936e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.3203e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3447e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.0884e-02 L12_l1linf:3.3936e-02 L1_spectral:3.7566e-03 L2_spectral:3.3179e-03 L3_spectral:3.5445e-03 L4_spectral:3.4687e-03 L5_spectral:3.4444e-03 L6_spectral:3.4727e-03 L7_spectral:3.4334e-03 L8_spectral:3.4979e-03 L9_spectral:3.4080e-03 L10_spectral:3.3734e-03 L11_spectral:3.3945e-03 L12_spectral:3.4921e-03 train_time:380976ms step_avg:41.41ms +[2025-09-11 07:18:34] [Rank 0] PRINT: step:9200/10000 val_loss:4.0449 total_sharp:1.6172e-05 L1_sharp:2.1302e-03 L2_sharp:7.7181e-04 L3_sharp:4.1095e-04 L4_sharp:6.2657e-04 L5_sharp:6.5911e-04 L6_sharp:8.6063e-04 L7_sharp:7.8627e-04 L8_sharp:1.4747e-03 L9_sharp:1.4372e-03 L10_sharp:1.7348e-03 L11_sharp:3.4664e-03 L12_sharp:4.0732e-02 total_fnorm:4.6750e+01 total_l1_linf:6.7584e+04 total_spectral:2.3375e+01 L1_fnorm:2.4121e-01 L2_fnorm:2.2656e-01 L3_fnorm:2.2656e-01 L4_fnorm:2.2559e-01 L5_fnorm:2.2559e-01 L6_fnorm:2.2852e-01 L7_fnorm:2.2754e-01 L8_fnorm:2.2363e-01 L9_fnorm:2.2461e-01 L10_fnorm:2.2461e-01 L11_fnorm:2.2070e-01 L12_fnorm:2.2168e-01 L1_l1linf:3.5400e-02 L2_l1linf:3.3691e-02 L3_l1linf:3.3936e-02 L4_l1linf:3.3447e-02 L5_l1linf:3.3203e-02 L6_l1linf:3.3691e-02 L7_l1linf:3.3447e-02 L8_l1linf:3.4668e-02 L9_l1linf:3.2471e-02 L10_l1linf:3.1982e-02 L11_l1linf:3.0884e-02 L12_l1linf:3.3936e-02 L1_spectral:3.7566e-03 L2_spectral:3.3179e-03 L3_spectral:3.5445e-03 L4_spectral:3.4687e-03 L5_spectral:3.4444e-03 L6_spectral:3.4727e-03 L7_spectral:3.4334e-03 L8_spectral:3.4979e-03 L9_spectral:3.4080e-03 L10_spectral:3.3734e-03 L11_spectral:3.3945e-03 L12_spectral:3.4921e-03 train_time:380976ms step_avg:41.41ms +[2025-09-11 07:18:36] [Rank 0] step:9201/10000 train_time:382162ms step_avg:41.53ms +[2025-09-11 07:18:36] [Rank 0] step:9201/10000 train_time:382162ms step_avg:41.53ms +[2025-09-11 07:18:37] [Rank 0] step:9221/10000 train_time:383190ms step_avg:41.56ms +[2025-09-11 07:18:37] [Rank 0] step:9221/10000 train_time:383190ms step_avg:41.56ms +[2025-09-11 07:18:37] [Rank 0] step:9241/10000 train_time:383895ms step_avg:41.54ms +[2025-09-11 07:18:37] [Rank 0] step:9241/10000 train_time:383895ms step_avg:41.54ms +[2025-09-11 07:18:38] [Rank 0] step:9261/10000 train_time:384603ms step_avg:41.53ms +[2025-09-11 07:18:38] [Rank 0] step:9261/10000 train_time:384603ms step_avg:41.53ms +[2025-09-11 07:18:39] [Rank 0] step:9281/10000 train_time:385310ms step_avg:41.52ms +[2025-09-11 07:18:39] [Rank 0] step:9281/10000 train_time:385310ms step_avg:41.52ms +[2025-09-11 07:18:39] [Rank 0] step:9301/10000 train_time:386014ms step_avg:41.50ms +[2025-09-11 07:18:39] [Rank 0] step:9301/10000 train_time:386014ms step_avg:41.50ms +[2025-09-11 07:18:40] [Rank 0] step:9321/10000 train_time:386721ms step_avg:41.49ms +[2025-09-11 07:18:40] [Rank 0] step:9321/10000 train_time:386721ms step_avg:41.49ms +[2025-09-11 07:18:41] [Rank 0] step:9341/10000 train_time:387422ms step_avg:41.48ms +[2025-09-11 07:18:41] [Rank 0] step:9341/10000 train_time:387422ms step_avg:41.48ms +[2025-09-11 07:18:41] [Rank 0] step:9361/10000 train_time:388124ms step_avg:41.46ms +[2025-09-11 07:18:41] [Rank 0] step:9361/10000 train_time:388124ms step_avg:41.46ms +[2025-09-11 07:18:42] [Rank 0] step:9381/10000 train_time:388827ms step_avg:41.45ms +[2025-09-11 07:18:42] [Rank 0] step:9381/10000 train_time:388827ms step_avg:41.45ms +[2025-09-11 07:18:43] [Rank 0] step:9401/10000 train_time:389534ms step_avg:41.44ms +[2025-09-11 07:18:43] [Rank 0] step:9401/10000 train_time:389534ms step_avg:41.44ms +[2025-09-11 07:18:44] [Rank 0] step:9421/10000 train_time:390241ms step_avg:41.42ms +[2025-09-11 07:18:44] [Rank 0] step:9421/10000 train_time:390241ms step_avg:41.42ms +[2025-09-11 07:18:44] [Rank 0] step:9441/10000 train_time:390950ms step_avg:41.41ms +[2025-09-11 07:18:44] [Rank 0] step:9441/10000 train_time:390950ms step_avg:41.41ms +[2025-09-11 07:18:45] [Rank 0] step:9461/10000 train_time:391655ms step_avg:41.40ms +[2025-09-11 07:18:45] [Rank 0] step:9461/10000 train_time:391655ms step_avg:41.40ms +[2025-09-11 07:18:46] [Rank 0] step:9481/10000 train_time:392362ms step_avg:41.38ms +[2025-09-11 07:18:46] [Rank 0] step:9481/10000 train_time:392362ms step_avg:41.38ms +[2025-09-11 07:18:46] [Rank 0] step:9501/10000 train_time:393069ms step_avg:41.37ms +[2025-09-11 07:18:46] [Rank 0] step:9501/10000 train_time:393069ms step_avg:41.37ms +[2025-09-11 07:18:47] [Rank 0] step:9521/10000 train_time:393777ms step_avg:41.36ms +[2025-09-11 07:18:47] [Rank 0] step:9521/10000 train_time:393777ms step_avg:41.36ms +[2025-09-11 07:18:48] [Rank 0] step:9541/10000 train_time:394479ms step_avg:41.35ms +[2025-09-11 07:18:48] [Rank 0] step:9541/10000 train_time:394479ms step_avg:41.35ms +[2025-09-11 07:18:49] [Rank 0] step:9561/10000 train_time:395184ms step_avg:41.33ms +[2025-09-11 07:18:49] [Rank 0] step:9561/10000 train_time:395184ms step_avg:41.33ms +[2025-09-11 07:18:49] [Rank 0] step:9581/10000 train_time:395895ms step_avg:41.32ms +[2025-09-11 07:18:49] [Rank 0] step:9581/10000 train_time:395895ms step_avg:41.32ms +[2025-09-11 07:18:50] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:18:50] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:18:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:18:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:18:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:18:59] [Rank 0] PRINT: step:9600/10000 val_loss:4.0300 total_sharp:1.1561e-05 L1_sharp:1.5267e-03 L2_sharp:3.8048e-04 L3_sharp:5.4326e-05 L4_sharp:7.1244e-04 L5_sharp:3.2613e-04 L6_sharp:4.9346e-04 L7_sharp:4.4062e-04 L8_sharp:1.1587e-03 L9_sharp:1.2546e-03 L10_sharp:1.2601e-03 L11_sharp:2.2993e-03 L12_sharp:1.8262e-02 total_fnorm:2.6625e+01 total_l1_linf:3.3024e+04 total_spectral:1.3375e+01 L1_fnorm:1.3965e-01 L2_fnorm:1.2988e-01 L3_fnorm:1.2891e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2891e-01 L6_fnorm:1.2988e-01 L7_fnorm:1.2988e-01 L8_fnorm:1.2695e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2793e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2598e-01 L1_l1linf:1.6968e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6479e-02 L7_l1linf:1.6357e-02 L8_l1linf:1.7456e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.5259e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.6113e-02 L1_spectral:2.2427e-03 L2_spectral:1.9862e-03 L3_spectral:2.0480e-03 L4_spectral:2.0292e-03 L5_spectral:1.9981e-03 L6_spectral:2.0368e-03 L7_spectral:2.0033e-03 L8_spectral:2.0567e-03 L9_spectral:1.9951e-03 L10_spectral:1.9613e-03 L11_spectral:1.9749e-03 L12_spectral:2.0481e-03 train_time:396578ms step_avg:41.31ms +[2025-09-11 07:18:59] [Rank 0] PRINT: step:9600/10000 val_loss:4.0300 total_sharp:1.1561e-05 L1_sharp:1.5267e-03 L2_sharp:3.8048e-04 L3_sharp:5.4326e-05 L4_sharp:7.1244e-04 L5_sharp:3.2613e-04 L6_sharp:4.9346e-04 L7_sharp:4.4062e-04 L8_sharp:1.1587e-03 L9_sharp:1.2546e-03 L10_sharp:1.2601e-03 L11_sharp:2.2993e-03 L12_sharp:1.8262e-02 total_fnorm:2.6625e+01 total_l1_linf:3.3024e+04 total_spectral:1.3375e+01 L1_fnorm:1.3965e-01 L2_fnorm:1.2988e-01 L3_fnorm:1.2891e-01 L4_fnorm:1.2793e-01 L5_fnorm:1.2891e-01 L6_fnorm:1.2988e-01 L7_fnorm:1.2988e-01 L8_fnorm:1.2695e-01 L9_fnorm:1.2598e-01 L10_fnorm:1.2793e-01 L11_fnorm:1.2598e-01 L12_fnorm:1.2598e-01 L1_l1linf:1.6968e-02 L2_l1linf:1.7090e-02 L3_l1linf:1.7334e-02 L4_l1linf:1.6113e-02 L5_l1linf:1.6235e-02 L6_l1linf:1.6479e-02 L7_l1linf:1.6357e-02 L8_l1linf:1.7456e-02 L9_l1linf:1.4343e-02 L10_l1linf:1.5259e-02 L11_l1linf:1.5625e-02 L12_l1linf:1.6113e-02 L1_spectral:2.2427e-03 L2_spectral:1.9862e-03 L3_spectral:2.0480e-03 L4_spectral:2.0292e-03 L5_spectral:1.9981e-03 L6_spectral:2.0368e-03 L7_spectral:2.0033e-03 L8_spectral:2.0567e-03 L9_spectral:1.9951e-03 L10_spectral:1.9613e-03 L11_spectral:1.9749e-03 L12_spectral:2.0481e-03 train_time:396578ms step_avg:41.31ms +[2025-09-11 07:19:01] [Rank 0] step:9601/10000 train_time:397769ms step_avg:41.43ms +[2025-09-11 07:19:01] [Rank 0] step:9601/10000 train_time:397769ms step_avg:41.43ms +[2025-09-11 07:19:01] [Rank 0] step:9621/10000 train_time:398502ms step_avg:41.42ms +[2025-09-11 07:19:01] [Rank 0] step:9621/10000 train_time:398502ms step_avg:41.42ms +[2025-09-11 07:19:02] [Rank 0] step:9641/10000 train_time:399213ms step_avg:41.41ms +[2025-09-11 07:19:02] [Rank 0] step:9641/10000 train_time:399213ms step_avg:41.41ms +[2025-09-11 07:19:03] [Rank 0] step:9661/10000 train_time:399932ms step_avg:41.40ms +[2025-09-11 07:19:03] [Rank 0] step:9661/10000 train_time:399932ms step_avg:41.40ms +[2025-09-11 07:19:04] [Rank 0] step:9681/10000 train_time:400643ms step_avg:41.38ms +[2025-09-11 07:19:04] [Rank 0] step:9681/10000 train_time:400643ms step_avg:41.38ms +[2025-09-11 07:19:04] [Rank 0] step:9701/10000 train_time:401355ms step_avg:41.37ms +[2025-09-11 07:19:04] [Rank 0] step:9701/10000 train_time:401355ms step_avg:41.37ms +[2025-09-11 07:19:05] [Rank 0] step:9721/10000 train_time:402071ms step_avg:41.36ms +[2025-09-11 07:19:05] [Rank 0] step:9721/10000 train_time:402071ms step_avg:41.36ms +[2025-09-11 07:19:06] [Rank 0] step:9741/10000 train_time:402785ms step_avg:41.35ms +[2025-09-11 07:19:06] [Rank 0] step:9741/10000 train_time:402785ms step_avg:41.35ms +[2025-09-11 07:19:06] [Rank 0] step:9761/10000 train_time:403496ms step_avg:41.34ms +[2025-09-11 07:19:06] [Rank 0] step:9761/10000 train_time:403496ms step_avg:41.34ms +[2025-09-11 07:19:07] [Rank 0] step:9781/10000 train_time:404207ms step_avg:41.33ms +[2025-09-11 07:19:07] [Rank 0] step:9781/10000 train_time:404207ms step_avg:41.33ms +[2025-09-11 07:19:08] [Rank 0] step:9801/10000 train_time:404924ms step_avg:41.31ms +[2025-09-11 07:19:08] [Rank 0] step:9801/10000 train_time:404924ms step_avg:41.31ms +[2025-09-11 07:19:09] [Rank 0] step:9821/10000 train_time:405639ms step_avg:41.30ms +[2025-09-11 07:19:09] [Rank 0] step:9821/10000 train_time:405639ms step_avg:41.30ms +[2025-09-11 07:19:09] [Rank 0] step:9841/10000 train_time:406356ms step_avg:41.29ms +[2025-09-11 07:19:09] [Rank 0] step:9841/10000 train_time:406356ms step_avg:41.29ms +[2025-09-11 07:19:10] [Rank 0] step:9861/10000 train_time:407069ms step_avg:41.28ms +[2025-09-11 07:19:10] [Rank 0] step:9861/10000 train_time:407069ms step_avg:41.28ms +[2025-09-11 07:19:11] [Rank 0] step:9881/10000 train_time:407783ms step_avg:41.27ms +[2025-09-11 07:19:11] [Rank 0] step:9881/10000 train_time:407783ms step_avg:41.27ms +[2025-09-11 07:19:11] [Rank 0] step:9901/10000 train_time:408492ms step_avg:41.26ms +[2025-09-11 07:19:11] [Rank 0] step:9901/10000 train_time:408492ms step_avg:41.26ms +[2025-09-11 07:19:12] [Rank 0] step:9921/10000 train_time:409205ms step_avg:41.25ms +[2025-09-11 07:19:12] [Rank 0] step:9921/10000 train_time:409205ms step_avg:41.25ms +[2025-09-11 07:19:13] [Rank 0] step:9941/10000 train_time:409921ms step_avg:41.24ms +[2025-09-11 07:19:13] [Rank 0] step:9941/10000 train_time:409921ms step_avg:41.24ms +[2025-09-11 07:19:14] [Rank 0] step:9961/10000 train_time:410639ms step_avg:41.22ms +[2025-09-11 07:19:14] [Rank 0] step:9961/10000 train_time:410639ms step_avg:41.22ms +[2025-09-11 07:19:14] [Rank 0] step:9981/10000 train_time:411353ms step_avg:41.21ms +[2025-09-11 07:19:14] [Rank 0] step:9981/10000 train_time:411353ms step_avg:41.21ms +[2025-09-11 07:19:15] [Rank 0] step:10000/10000 train_time:412039ms step_avg:41.20ms +[2025-09-11 07:19:15] [Rank 0] step:10000/10000 train_time:412039ms step_avg:41.20ms +[2025-09-11 07:19:15] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:19:15] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:19:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:19:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:19:25] [Rank 0] PRINT: step:10000/10000 val_loss:4.0281 total_sharp:8.2110e-06 L1_sharp:1.6378e-03 L2_sharp:4.7541e-04 L3_sharp:3.6136e-04 L4_sharp:2.1744e-04 L5_sharp:3.2410e-04 L6_sharp:3.4207e-04 L7_sharp:4.6519e-04 L8_sharp:9.2389e-04 L9_sharp:9.8672e-04 L10_sharp:1.0152e-03 L11_sharp:1.6439e-03 L12_sharp:1.1594e-02 total_fnorm:1.0562e+01 total_l1_linf:9.4720e+03 total_spectral:5.2812e+00 L1_fnorm:5.3955e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.9805e-02 L6_fnorm:4.9805e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8828e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8828e-02 L1_l1linf:5.1270e-03 L2_l1linf:5.1575e-03 L3_l1linf:5.0354e-03 L4_l1linf:5.2490e-03 L5_l1linf:5.3101e-03 L6_l1linf:5.0354e-03 L7_l1linf:4.7913e-03 L8_l1linf:5.5847e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.7302e-03 L11_l1linf:4.6082e-03 L12_l1linf:5.2795e-03 L1_spectral:8.7905e-04 L2_spectral:7.5027e-04 L3_spectral:8.1731e-04 L4_spectral:8.1610e-04 L5_spectral:7.9784e-04 L6_spectral:7.9970e-04 L7_spectral:7.9063e-04 L8_spectral:8.2897e-04 L9_spectral:7.8250e-04 L10_spectral:7.8097e-04 L11_spectral:7.9178e-04 L12_spectral:8.4093e-04 train_time:412058ms step_avg:41.21ms +[2025-09-11 07:19:25] [Rank 0] PRINT: step:10000/10000 val_loss:4.0281 total_sharp:8.2110e-06 L1_sharp:1.6378e-03 L2_sharp:4.7541e-04 L3_sharp:3.6136e-04 L4_sharp:2.1744e-04 L5_sharp:3.2410e-04 L6_sharp:3.4207e-04 L7_sharp:4.6519e-04 L8_sharp:9.2389e-04 L9_sharp:9.8672e-04 L10_sharp:1.0152e-03 L11_sharp:1.6439e-03 L12_sharp:1.1594e-02 total_fnorm:1.0562e+01 total_l1_linf:9.4720e+03 total_spectral:5.2812e+00 L1_fnorm:5.3955e-02 L2_fnorm:4.9072e-02 L3_fnorm:4.9805e-02 L4_fnorm:4.9316e-02 L5_fnorm:4.9805e-02 L6_fnorm:4.9805e-02 L7_fnorm:4.9805e-02 L8_fnorm:4.8828e-02 L9_fnorm:4.9072e-02 L10_fnorm:4.9316e-02 L11_fnorm:4.8584e-02 L12_fnorm:4.8828e-02 L1_l1linf:5.1270e-03 L2_l1linf:5.1575e-03 L3_l1linf:5.0354e-03 L4_l1linf:5.2490e-03 L5_l1linf:5.3101e-03 L6_l1linf:5.0354e-03 L7_l1linf:4.7913e-03 L8_l1linf:5.5847e-03 L9_l1linf:4.5166e-03 L10_l1linf:4.7302e-03 L11_l1linf:4.6082e-03 L12_l1linf:5.2795e-03 L1_spectral:8.7905e-04 L2_spectral:7.5027e-04 L3_spectral:8.1731e-04 L4_spectral:8.1610e-04 L5_spectral:7.9784e-04 L6_spectral:7.9970e-04 L7_spectral:7.9063e-04 L8_spectral:8.2897e-04 L9_spectral:7.8250e-04 L10_spectral:7.8097e-04 L11_spectral:7.9178e-04 L12_spectral:8.4093e-04 train_time:412058ms step_avg:41.21ms +[2025-09-11 07:19:25] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:19:25 2025 --- +[2025-09-11 07:19:25] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:19:25 2025 --- +[2025-09-11 07:19:25] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 07:19:25] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7a769cd6576f68842159632e1e7ff1b0f9a4cb9b --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.02, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "8c5f08ea-0dc0-459f-989a-80bc33d25dac", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/training_log_8c5f08ea-0dc0-459f-989a-80bc33d25dac.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/training_log_8c5f08ea-0dc0-459f-989a-80bc33d25dac.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f28ab90468a4f5d853310a5639d0eff5025d96f --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43/training_log_8c5f08ea-0dc0-459f-989a-80bc33d25dac.txt @@ -0,0 +1,4264 @@ +[2025-09-11 06:52:31] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:52:31 2025 --- +[2025-09-11 06:52:31] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:52:31 2025 --- +[2025-09-11 06:52:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:52:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.02, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:52:32] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:52:32] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:52:32] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:52:32] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:52:32] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43 +[2025-09-11 06:52:32] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.02_seed_43 +[2025-09-11 06:52:32] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:52:32] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:52:32] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:52:32] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:52:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:52:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:52:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:52:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:52:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:52:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:52:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:52:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:52:33] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:52:33] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:52:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:52:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:52:35] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:52:35] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:52:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:52:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:52:41] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:52:41] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:52:41] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:52:41] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:53:20] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:53:20] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:53:20] [Rank 0] PRINT: Starting training... +[2025-09-11 06:53:20] [Rank 0] PRINT: Starting training... +[2025-09-11 06:53:21] [Rank 0] step:21/10000 train_time:1140ms step_avg:54.27ms +[2025-09-11 06:53:21] [Rank 0] step:21/10000 train_time:1140ms step_avg:54.27ms +[2025-09-11 06:53:22] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.57ms +[2025-09-11 06:53:22] [Rank 0] step:41/10000 train_time:1868ms step_avg:45.57ms +[2025-09-11 06:53:23] [Rank 0] step:61/10000 train_time:2598ms step_avg:42.59ms +[2025-09-11 06:53:23] [Rank 0] step:61/10000 train_time:2598ms step_avg:42.59ms +[2025-09-11 06:53:23] [Rank 0] step:81/10000 train_time:3326ms step_avg:41.06ms +[2025-09-11 06:53:23] [Rank 0] step:81/10000 train_time:3326ms step_avg:41.06ms +[2025-09-11 06:53:24] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 06:53:24] [Rank 0] step:101/10000 train_time:4053ms step_avg:40.13ms +[2025-09-11 06:53:25] [Rank 0] step:121/10000 train_time:4781ms step_avg:39.52ms +[2025-09-11 06:53:25] [Rank 0] step:121/10000 train_time:4781ms step_avg:39.52ms +[2025-09-11 06:53:26] [Rank 0] step:141/10000 train_time:5509ms step_avg:39.07ms +[2025-09-11 06:53:26] [Rank 0] step:141/10000 train_time:5509ms step_avg:39.07ms +[2025-09-11 06:53:26] [Rank 0] step:161/10000 train_time:6236ms step_avg:38.73ms +[2025-09-11 06:53:26] [Rank 0] step:161/10000 train_time:6236ms step_avg:38.73ms +[2025-09-11 06:53:27] [Rank 0] step:181/10000 train_time:6967ms step_avg:38.49ms +[2025-09-11 06:53:27] [Rank 0] step:181/10000 train_time:6967ms step_avg:38.49ms +[2025-09-11 06:53:28] [Rank 0] step:201/10000 train_time:7695ms step_avg:38.28ms +[2025-09-11 06:53:28] [Rank 0] step:201/10000 train_time:7695ms step_avg:38.28ms +[2025-09-11 06:53:29] [Rank 0] step:221/10000 train_time:8422ms step_avg:38.11ms +[2025-09-11 06:53:29] [Rank 0] step:221/10000 train_time:8422ms step_avg:38.11ms +[2025-09-11 06:53:29] [Rank 0] step:241/10000 train_time:9149ms step_avg:37.96ms +[2025-09-11 06:53:29] [Rank 0] step:241/10000 train_time:9149ms step_avg:37.96ms +[2025-09-11 06:53:30] [Rank 0] step:261/10000 train_time:9876ms step_avg:37.84ms +[2025-09-11 06:53:30] [Rank 0] step:261/10000 train_time:9876ms step_avg:37.84ms +[2025-09-11 06:53:31] [Rank 0] step:281/10000 train_time:10603ms step_avg:37.73ms +[2025-09-11 06:53:31] [Rank 0] step:281/10000 train_time:10603ms step_avg:37.73ms +[2025-09-11 06:53:31] [Rank 0] step:301/10000 train_time:11330ms step_avg:37.64ms +[2025-09-11 06:53:31] [Rank 0] step:301/10000 train_time:11330ms step_avg:37.64ms +[2025-09-11 06:53:32] [Rank 0] step:321/10000 train_time:12058ms step_avg:37.56ms +[2025-09-11 06:53:32] [Rank 0] step:321/10000 train_time:12058ms step_avg:37.56ms +[2025-09-11 06:53:33] [Rank 0] step:341/10000 train_time:12785ms step_avg:37.49ms +[2025-09-11 06:53:33] [Rank 0] step:341/10000 train_time:12785ms step_avg:37.49ms +[2025-09-11 06:53:34] [Rank 0] step:361/10000 train_time:13513ms step_avg:37.43ms +[2025-09-11 06:53:34] [Rank 0] step:361/10000 train_time:13513ms step_avg:37.43ms +[2025-09-11 06:53:34] [Rank 0] step:381/10000 train_time:14241ms step_avg:37.38ms +[2025-09-11 06:53:34] [Rank 0] step:381/10000 train_time:14241ms step_avg:37.38ms +[2025-09-11 06:53:35] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:53:35] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:53:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:54:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:54:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:54:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:54:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:54:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:54:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:54:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:54:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:54:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:54:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:54:26] [Rank 0] PRINT: step:400/10000 val_loss:5.8271 total_sharp:2.6871e-04 L1_sharp:2.0456e-02 L2_sharp:8.1849e-03 L3_sharp:4.3200e-03 L4_sharp:5.2209e-03 L5_sharp:4.0883e-03 L6_sharp:3.2875e-03 L7_sharp:4.0213e-03 L8_sharp:3.6896e-03 L9_sharp:4.0277e-03 L10_sharp:4.7215e-03 L11_sharp:4.9335e-03 L12_sharp:7.4438e-02 total_fnorm:2.0446e+02 total_l1_linf:5.2536e+05 total_spectral:1.0227e+02 L1_fnorm:2.4467e+00 L2_fnorm:2.4373e+00 L3_fnorm:2.4213e+00 L4_fnorm:2.4108e+00 L5_fnorm:2.4189e+00 L6_fnorm:2.4169e+00 L7_fnorm:2.3997e+00 L8_fnorm:2.4175e+00 L9_fnorm:2.4098e+00 L10_fnorm:2.3976e+00 L11_fnorm:2.3944e+00 L12_fnorm:2.2773e+00 L1_l1linf:8.2492e-01 L2_l1linf:8.3619e-01 L3_l1linf:8.0783e-01 L4_l1linf:8.1232e-01 L5_l1linf:8.1335e-01 L6_l1linf:8.0736e-01 L7_l1linf:8.0846e-01 L8_l1linf:8.2477e-01 L9_l1linf:8.1325e-01 L10_l1linf:8.2188e-01 L11_l1linf:7.9944e-01 L12_l1linf:7.5266e-01 L1_spectral:2.4103e-02 L2_spectral:2.4093e-02 L3_spectral:2.4078e-02 L4_spectral:2.4085e-02 L5_spectral:2.4087e-02 L6_spectral:2.4090e-02 L7_spectral:2.4086e-02 L8_spectral:2.4087e-02 L9_spectral:2.4095e-02 L10_spectral:2.4081e-02 L11_spectral:2.4077e-02 L12_spectral:2.4077e-02 train_time:14948ms step_avg:37.37ms +[2025-09-11 06:54:26] [Rank 0] PRINT: step:400/10000 val_loss:5.8271 total_sharp:2.6871e-04 L1_sharp:2.0456e-02 L2_sharp:8.1849e-03 L3_sharp:4.3200e-03 L4_sharp:5.2209e-03 L5_sharp:4.0883e-03 L6_sharp:3.2875e-03 L7_sharp:4.0213e-03 L8_sharp:3.6896e-03 L9_sharp:4.0277e-03 L10_sharp:4.7215e-03 L11_sharp:4.9335e-03 L12_sharp:7.4438e-02 total_fnorm:2.0446e+02 total_l1_linf:5.2536e+05 total_spectral:1.0227e+02 L1_fnorm:2.4467e+00 L2_fnorm:2.4373e+00 L3_fnorm:2.4213e+00 L4_fnorm:2.4108e+00 L5_fnorm:2.4189e+00 L6_fnorm:2.4169e+00 L7_fnorm:2.3997e+00 L8_fnorm:2.4175e+00 L9_fnorm:2.4098e+00 L10_fnorm:2.3976e+00 L11_fnorm:2.3944e+00 L12_fnorm:2.2773e+00 L1_l1linf:8.2492e-01 L2_l1linf:8.3619e-01 L3_l1linf:8.0783e-01 L4_l1linf:8.1232e-01 L5_l1linf:8.1335e-01 L6_l1linf:8.0736e-01 L7_l1linf:8.0846e-01 L8_l1linf:8.2477e-01 L9_l1linf:8.1325e-01 L10_l1linf:8.2188e-01 L11_l1linf:7.9944e-01 L12_l1linf:7.5266e-01 L1_spectral:2.4103e-02 L2_spectral:2.4093e-02 L3_spectral:2.4078e-02 L4_spectral:2.4085e-02 L5_spectral:2.4087e-02 L6_spectral:2.4090e-02 L7_spectral:2.4086e-02 L8_spectral:2.4087e-02 L9_spectral:2.4095e-02 L10_spectral:2.4081e-02 L11_spectral:2.4077e-02 L12_spectral:2.4077e-02 train_time:14948ms step_avg:37.37ms +[2025-09-11 06:54:59] [Rank 0] step:401/10000 train_time:48100ms step_avg:119.95ms +[2025-09-11 06:54:59] [Rank 0] step:401/10000 train_time:48100ms step_avg:119.95ms +[2025-09-11 06:55:01] [Rank 0] step:421/10000 train_time:50651ms step_avg:120.31ms +[2025-09-11 06:55:01] [Rank 0] step:421/10000 train_time:50651ms step_avg:120.31ms +[2025-09-11 06:55:02] [Rank 0] step:441/10000 train_time:51338ms step_avg:116.41ms +[2025-09-11 06:55:02] [Rank 0] step:441/10000 train_time:51338ms step_avg:116.41ms +[2025-09-11 06:55:03] [Rank 0] step:461/10000 train_time:51978ms step_avg:112.75ms +[2025-09-11 06:55:03] [Rank 0] step:461/10000 train_time:51978ms step_avg:112.75ms +[2025-09-11 06:55:03] [Rank 0] step:481/10000 train_time:52617ms step_avg:109.39ms +[2025-09-11 06:55:03] [Rank 0] step:481/10000 train_time:52617ms step_avg:109.39ms +[2025-09-11 06:55:04] [Rank 0] step:501/10000 train_time:53256ms step_avg:106.30ms +[2025-09-11 06:55:04] [Rank 0] step:501/10000 train_time:53256ms step_avg:106.30ms +[2025-09-11 06:55:05] [Rank 0] step:521/10000 train_time:53895ms step_avg:103.45ms +[2025-09-11 06:55:05] [Rank 0] step:521/10000 train_time:53895ms step_avg:103.45ms +[2025-09-11 06:55:05] [Rank 0] step:541/10000 train_time:54536ms step_avg:100.81ms +[2025-09-11 06:55:05] [Rank 0] step:541/10000 train_time:54536ms step_avg:100.81ms +[2025-09-11 06:55:06] [Rank 0] step:561/10000 train_time:55175ms step_avg:98.35ms +[2025-09-11 06:55:06] [Rank 0] step:561/10000 train_time:55175ms step_avg:98.35ms +[2025-09-11 06:55:07] [Rank 0] step:581/10000 train_time:55813ms step_avg:96.06ms +[2025-09-11 06:55:07] [Rank 0] step:581/10000 train_time:55813ms step_avg:96.06ms +[2025-09-11 06:55:07] [Rank 0] step:601/10000 train_time:56453ms step_avg:93.93ms +[2025-09-11 06:55:07] [Rank 0] step:601/10000 train_time:56453ms step_avg:93.93ms +[2025-09-11 06:55:08] [Rank 0] step:621/10000 train_time:57094ms step_avg:91.94ms +[2025-09-11 06:55:08] [Rank 0] step:621/10000 train_time:57094ms step_avg:91.94ms +[2025-09-11 06:55:09] [Rank 0] step:641/10000 train_time:57731ms step_avg:90.06ms +[2025-09-11 06:55:09] [Rank 0] step:641/10000 train_time:57731ms step_avg:90.06ms +[2025-09-11 06:55:09] [Rank 0] step:661/10000 train_time:58382ms step_avg:88.32ms +[2025-09-11 06:55:09] [Rank 0] step:661/10000 train_time:58382ms step_avg:88.32ms +[2025-09-11 06:55:10] [Rank 0] step:681/10000 train_time:59121ms step_avg:86.82ms +[2025-09-11 06:55:10] [Rank 0] step:681/10000 train_time:59121ms step_avg:86.82ms +[2025-09-11 06:55:11] [Rank 0] step:701/10000 train_time:59760ms step_avg:85.25ms +[2025-09-11 06:55:11] [Rank 0] step:701/10000 train_time:59760ms step_avg:85.25ms +[2025-09-11 06:55:11] [Rank 0] step:721/10000 train_time:60399ms step_avg:83.77ms +[2025-09-11 06:55:11] [Rank 0] step:721/10000 train_time:60399ms step_avg:83.77ms +[2025-09-11 06:55:12] [Rank 0] step:741/10000 train_time:61039ms step_avg:82.37ms +[2025-09-11 06:55:12] [Rank 0] step:741/10000 train_time:61039ms step_avg:82.37ms +[2025-09-11 06:55:13] [Rank 0] step:761/10000 train_time:61683ms step_avg:81.06ms +[2025-09-11 06:55:13] [Rank 0] step:761/10000 train_time:61683ms step_avg:81.06ms +[2025-09-11 06:55:13] [Rank 0] step:781/10000 train_time:62327ms step_avg:79.80ms +[2025-09-11 06:55:13] [Rank 0] step:781/10000 train_time:62327ms step_avg:79.80ms +[2025-09-11 06:55:14] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:55:14] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:55:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:55:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:55:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:55:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:55:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:55:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:56:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:01] [Rank 0] PRINT: step:800/10000 val_loss:5.4304 total_sharp:1.5904e-04 L1_sharp:1.4881e-02 L2_sharp:3.3667e-03 L3_sharp:1.2278e-03 L4_sharp:1.8753e-03 L5_sharp:1.4724e-03 L6_sharp:1.6122e-03 L7_sharp:1.8448e-03 L8_sharp:1.7516e-03 L9_sharp:1.5784e-03 L10_sharp:2.4933e-03 L11_sharp:4.0134e-03 L12_sharp:5.7730e-02 total_fnorm:2.0400e+02 total_l1_linf:4.9971e+05 total_spectral:1.0250e+02 L1_fnorm:2.4844e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4531e+00 L4_fnorm:2.4531e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.4531e+00 L11_fnorm:2.4531e+00 L12_fnorm:2.2969e+00 L1_l1linf:8.0859e-01 L2_l1linf:8.0078e-01 L3_l1linf:7.8125e-01 L4_l1linf:7.7344e-01 L5_l1linf:7.6172e-01 L6_l1linf:7.6953e-01 L7_l1linf:7.6953e-01 L8_l1linf:7.6562e-01 L9_l1linf:7.8906e-01 L10_l1linf:7.8906e-01 L11_l1linf:7.6172e-01 L12_l1linf:6.5234e-01 L1_spectral:2.6988e-02 L2_spectral:2.6861e-02 L3_spectral:2.6760e-02 L4_spectral:2.6718e-02 L5_spectral:2.6687e-02 L6_spectral:2.6751e-02 L7_spectral:2.6730e-02 L8_spectral:2.6835e-02 L9_spectral:2.6718e-02 L10_spectral:2.6874e-02 L11_spectral:2.6798e-02 L12_spectral:2.6725e-02 train_time:62952ms step_avg:78.69ms +[2025-09-11 06:56:01] [Rank 0] PRINT: step:800/10000 val_loss:5.4304 total_sharp:1.5904e-04 L1_sharp:1.4881e-02 L2_sharp:3.3667e-03 L3_sharp:1.2278e-03 L4_sharp:1.8753e-03 L5_sharp:1.4724e-03 L6_sharp:1.6122e-03 L7_sharp:1.8448e-03 L8_sharp:1.7516e-03 L9_sharp:1.5784e-03 L10_sharp:2.4933e-03 L11_sharp:4.0134e-03 L12_sharp:5.7730e-02 total_fnorm:2.0400e+02 total_l1_linf:4.9971e+05 total_spectral:1.0250e+02 L1_fnorm:2.4844e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4531e+00 L4_fnorm:2.4531e+00 L5_fnorm:2.4531e+00 L6_fnorm:2.4688e+00 L7_fnorm:2.4688e+00 L8_fnorm:2.4531e+00 L9_fnorm:2.4844e+00 L10_fnorm:2.4531e+00 L11_fnorm:2.4531e+00 L12_fnorm:2.2969e+00 L1_l1linf:8.0859e-01 L2_l1linf:8.0078e-01 L3_l1linf:7.8125e-01 L4_l1linf:7.7344e-01 L5_l1linf:7.6172e-01 L6_l1linf:7.6953e-01 L7_l1linf:7.6953e-01 L8_l1linf:7.6562e-01 L9_l1linf:7.8906e-01 L10_l1linf:7.8906e-01 L11_l1linf:7.6172e-01 L12_l1linf:6.5234e-01 L1_spectral:2.6988e-02 L2_spectral:2.6861e-02 L3_spectral:2.6760e-02 L4_spectral:2.6718e-02 L5_spectral:2.6687e-02 L6_spectral:2.6751e-02 L7_spectral:2.6730e-02 L8_spectral:2.6835e-02 L9_spectral:2.6718e-02 L10_spectral:2.6874e-02 L11_spectral:2.6798e-02 L12_spectral:2.6725e-02 train_time:62952ms step_avg:78.69ms +[2025-09-11 06:56:03] [Rank 0] step:801/10000 train_time:64728ms step_avg:80.81ms +[2025-09-11 06:56:03] [Rank 0] step:801/10000 train_time:64728ms step_avg:80.81ms +[2025-09-11 06:56:03] [Rank 0] step:821/10000 train_time:65377ms step_avg:79.63ms +[2025-09-11 06:56:03] [Rank 0] step:821/10000 train_time:65377ms step_avg:79.63ms +[2025-09-11 06:56:04] [Rank 0] step:841/10000 train_time:66019ms step_avg:78.50ms +[2025-09-11 06:56:04] [Rank 0] step:841/10000 train_time:66019ms step_avg:78.50ms +[2025-09-11 06:56:05] [Rank 0] step:861/10000 train_time:66663ms step_avg:77.43ms +[2025-09-11 06:56:05] [Rank 0] step:861/10000 train_time:66663ms step_avg:77.43ms +[2025-09-11 06:56:05] [Rank 0] step:881/10000 train_time:67307ms step_avg:76.40ms +[2025-09-11 06:56:05] [Rank 0] step:881/10000 train_time:67307ms step_avg:76.40ms +[2025-09-11 06:56:06] [Rank 0] step:901/10000 train_time:67950ms step_avg:75.42ms +[2025-09-11 06:56:06] [Rank 0] step:901/10000 train_time:67950ms step_avg:75.42ms +[2025-09-11 06:56:06] [Rank 0] step:921/10000 train_time:68594ms step_avg:74.48ms +[2025-09-11 06:56:06] [Rank 0] step:921/10000 train_time:68594ms step_avg:74.48ms +[2025-09-11 06:56:07] [Rank 0] step:941/10000 train_time:69238ms step_avg:73.58ms +[2025-09-11 06:56:07] [Rank 0] step:941/10000 train_time:69238ms step_avg:73.58ms +[2025-09-11 06:56:08] [Rank 0] step:961/10000 train_time:69881ms step_avg:72.72ms +[2025-09-11 06:56:08] [Rank 0] step:961/10000 train_time:69881ms step_avg:72.72ms +[2025-09-11 06:56:08] [Rank 0] step:981/10000 train_time:70524ms step_avg:71.89ms +[2025-09-11 06:56:08] [Rank 0] step:981/10000 train_time:70524ms step_avg:71.89ms +[2025-09-11 06:56:09] [Rank 0] step:1001/10000 train_time:71167ms step_avg:71.10ms +[2025-09-11 06:56:09] [Rank 0] step:1001/10000 train_time:71167ms step_avg:71.10ms +[2025-09-11 06:56:10] [Rank 0] step:1021/10000 train_time:71810ms step_avg:70.33ms +[2025-09-11 06:56:10] [Rank 0] step:1021/10000 train_time:71810ms step_avg:70.33ms +[2025-09-11 06:56:10] [Rank 0] step:1041/10000 train_time:72453ms step_avg:69.60ms +[2025-09-11 06:56:10] [Rank 0] step:1041/10000 train_time:72453ms step_avg:69.60ms +[2025-09-11 06:56:11] [Rank 0] step:1061/10000 train_time:73095ms step_avg:68.89ms +[2025-09-11 06:56:11] [Rank 0] step:1061/10000 train_time:73095ms step_avg:68.89ms +[2025-09-11 06:56:12] [Rank 0] step:1081/10000 train_time:73738ms step_avg:68.21ms +[2025-09-11 06:56:12] [Rank 0] step:1081/10000 train_time:73738ms step_avg:68.21ms +[2025-09-11 06:56:12] [Rank 0] step:1101/10000 train_time:74380ms step_avg:67.56ms +[2025-09-11 06:56:12] [Rank 0] step:1101/10000 train_time:74380ms step_avg:67.56ms +[2025-09-11 06:56:13] [Rank 0] step:1121/10000 train_time:75023ms step_avg:66.93ms +[2025-09-11 06:56:13] [Rank 0] step:1121/10000 train_time:75023ms step_avg:66.93ms +[2025-09-11 06:56:14] [Rank 0] step:1141/10000 train_time:75666ms step_avg:66.32ms +[2025-09-11 06:56:14] [Rank 0] step:1141/10000 train_time:75666ms step_avg:66.32ms +[2025-09-11 06:56:14] [Rank 0] step:1161/10000 train_time:76309ms step_avg:65.73ms +[2025-09-11 06:56:14] [Rank 0] step:1161/10000 train_time:76309ms step_avg:65.73ms +[2025-09-11 06:56:15] [Rank 0] step:1181/10000 train_time:76951ms step_avg:65.16ms +[2025-09-11 06:56:15] [Rank 0] step:1181/10000 train_time:76951ms step_avg:65.16ms +[2025-09-11 06:56:15] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:56:15] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:56:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:56:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:56:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:56:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:56:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:56:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:26] [Rank 0] PRINT: step:1200/10000 val_loss:5.1245 total_sharp:1.0688e-04 L1_sharp:1.0848e-02 L2_sharp:1.9349e-03 L3_sharp:4.3759e-04 L4_sharp:1.1478e-03 L5_sharp:1.4737e-03 L6_sharp:1.1055e-03 L7_sharp:1.0856e-03 L8_sharp:1.4315e-03 L9_sharp:1.2024e-03 L10_sharp:1.4118e-03 L11_sharp:2.5975e-03 L12_sharp:2.3650e-02 total_fnorm:2.0600e+02 total_l1_linf:4.9152e+05 total_spectral:1.0350e+02 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.4609e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.1094e-01 L4_l1linf:7.0312e-01 L5_l1linf:6.9531e-01 L6_l1linf:7.0312e-01 L7_l1linf:7.0703e-01 L8_l1linf:7.1094e-01 L9_l1linf:7.0312e-01 L10_l1linf:7.1484e-01 L11_l1linf:7.1875e-01 L12_l1linf:7.1094e-01 L1_spectral:2.7907e-02 L2_spectral:2.7685e-02 L3_spectral:2.7714e-02 L4_spectral:2.7627e-02 L5_spectral:2.7758e-02 L6_spectral:2.7776e-02 L7_spectral:2.7678e-02 L8_spectral:2.8073e-02 L9_spectral:2.7957e-02 L10_spectral:2.8061e-02 L11_spectral:2.8001e-02 L12_spectral:2.8901e-02 train_time:77577ms step_avg:64.65ms +[2025-09-11 06:56:26] [Rank 0] PRINT: step:1200/10000 val_loss:5.1245 total_sharp:1.0688e-04 L1_sharp:1.0848e-02 L2_sharp:1.9349e-03 L3_sharp:4.3759e-04 L4_sharp:1.1478e-03 L5_sharp:1.4737e-03 L6_sharp:1.1055e-03 L7_sharp:1.0856e-03 L8_sharp:1.4315e-03 L9_sharp:1.2024e-03 L10_sharp:1.4118e-03 L11_sharp:2.5975e-03 L12_sharp:2.3650e-02 total_fnorm:2.0600e+02 total_l1_linf:4.9152e+05 total_spectral:1.0350e+02 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5000e+00 L7_fnorm:2.4844e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5000e+00 L10_fnorm:2.4844e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.4844e+00 L1_l1linf:7.4609e-01 L2_l1linf:7.3047e-01 L3_l1linf:7.1094e-01 L4_l1linf:7.0312e-01 L5_l1linf:6.9531e-01 L6_l1linf:7.0312e-01 L7_l1linf:7.0703e-01 L8_l1linf:7.1094e-01 L9_l1linf:7.0312e-01 L10_l1linf:7.1484e-01 L11_l1linf:7.1875e-01 L12_l1linf:7.1094e-01 L1_spectral:2.7907e-02 L2_spectral:2.7685e-02 L3_spectral:2.7714e-02 L4_spectral:2.7627e-02 L5_spectral:2.7758e-02 L6_spectral:2.7776e-02 L7_spectral:2.7678e-02 L8_spectral:2.8073e-02 L9_spectral:2.7957e-02 L10_spectral:2.8061e-02 L11_spectral:2.8001e-02 L12_spectral:2.8901e-02 train_time:77577ms step_avg:64.65ms +[2025-09-11 06:56:28] [Rank 0] step:1201/10000 train_time:79466ms step_avg:66.17ms +[2025-09-11 06:56:28] [Rank 0] step:1201/10000 train_time:79466ms step_avg:66.17ms +[2025-09-11 06:56:29] [Rank 0] step:1221/10000 train_time:80113ms step_avg:65.61ms +[2025-09-11 06:56:29] [Rank 0] step:1221/10000 train_time:80113ms step_avg:65.61ms +[2025-09-11 06:56:30] [Rank 0] step:1241/10000 train_time:80757ms step_avg:65.07ms +[2025-09-11 06:56:30] [Rank 0] step:1241/10000 train_time:80757ms step_avg:65.07ms +[2025-09-11 06:56:30] [Rank 0] step:1261/10000 train_time:81401ms step_avg:64.55ms +[2025-09-11 06:56:30] [Rank 0] step:1261/10000 train_time:81401ms step_avg:64.55ms +[2025-09-11 06:56:31] [Rank 0] step:1281/10000 train_time:82046ms step_avg:64.05ms +[2025-09-11 06:56:31] [Rank 0] step:1281/10000 train_time:82046ms step_avg:64.05ms +[2025-09-11 06:56:31] [Rank 0] step:1301/10000 train_time:82689ms step_avg:63.56ms +[2025-09-11 06:56:31] [Rank 0] step:1301/10000 train_time:82689ms step_avg:63.56ms +[2025-09-11 06:56:32] [Rank 0] step:1321/10000 train_time:83333ms step_avg:63.08ms +[2025-09-11 06:56:32] [Rank 0] step:1321/10000 train_time:83333ms step_avg:63.08ms +[2025-09-11 06:56:33] [Rank 0] step:1341/10000 train_time:83977ms step_avg:62.62ms +[2025-09-11 06:56:33] [Rank 0] step:1341/10000 train_time:83977ms step_avg:62.62ms +[2025-09-11 06:56:33] [Rank 0] step:1361/10000 train_time:84620ms step_avg:62.17ms +[2025-09-11 06:56:33] [Rank 0] step:1361/10000 train_time:84620ms step_avg:62.17ms +[2025-09-11 06:56:34] [Rank 0] step:1381/10000 train_time:85263ms step_avg:61.74ms +[2025-09-11 06:56:34] [Rank 0] step:1381/10000 train_time:85263ms step_avg:61.74ms +[2025-09-11 06:56:35] [Rank 0] step:1401/10000 train_time:85908ms step_avg:61.32ms +[2025-09-11 06:56:35] [Rank 0] step:1401/10000 train_time:85908ms step_avg:61.32ms +[2025-09-11 06:56:35] [Rank 0] step:1421/10000 train_time:86551ms step_avg:60.91ms +[2025-09-11 06:56:35] [Rank 0] step:1421/10000 train_time:86551ms step_avg:60.91ms +[2025-09-11 06:56:36] [Rank 0] step:1441/10000 train_time:87194ms step_avg:60.51ms +[2025-09-11 06:56:36] [Rank 0] step:1441/10000 train_time:87194ms step_avg:60.51ms +[2025-09-11 06:56:37] [Rank 0] step:1461/10000 train_time:87837ms step_avg:60.12ms +[2025-09-11 06:56:37] [Rank 0] step:1461/10000 train_time:87837ms step_avg:60.12ms +[2025-09-11 06:56:37] [Rank 0] step:1481/10000 train_time:88480ms step_avg:59.74ms +[2025-09-11 06:56:37] [Rank 0] step:1481/10000 train_time:88480ms step_avg:59.74ms +[2025-09-11 06:56:38] [Rank 0] step:1501/10000 train_time:89127ms step_avg:59.38ms +[2025-09-11 06:56:38] [Rank 0] step:1501/10000 train_time:89127ms step_avg:59.38ms +[2025-09-11 06:56:39] [Rank 0] step:1521/10000 train_time:89774ms step_avg:59.02ms +[2025-09-11 06:56:39] [Rank 0] step:1521/10000 train_time:89774ms step_avg:59.02ms +[2025-09-11 06:56:39] [Rank 0] step:1541/10000 train_time:90421ms step_avg:58.68ms +[2025-09-11 06:56:39] [Rank 0] step:1541/10000 train_time:90421ms step_avg:58.68ms +[2025-09-11 06:56:40] [Rank 0] step:1561/10000 train_time:91068ms step_avg:58.34ms +[2025-09-11 06:56:40] [Rank 0] step:1561/10000 train_time:91068ms step_avg:58.34ms +[2025-09-11 06:56:41] [Rank 0] step:1581/10000 train_time:91715ms step_avg:58.01ms +[2025-09-11 06:56:41] [Rank 0] step:1581/10000 train_time:91715ms step_avg:58.01ms +[2025-09-11 06:56:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:56:41] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:56:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:56:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:56:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:56:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:56:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:56:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:56:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:56:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:56:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:56:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.9762 total_sharp:9.3405e-05 L1_sharp:7.9970e-03 L2_sharp:6.8991e-04 L3_sharp:5.0197e-04 L4_sharp:9.9389e-04 L5_sharp:6.9157e-04 L6_sharp:6.5894e-04 L7_sharp:6.7195e-04 L8_sharp:9.4723e-04 L9_sharp:1.0806e-03 L10_sharp:1.2196e-03 L11_sharp:2.2797e-03 L12_sharp:2.7164e-02 total_fnorm:1.9100e+02 total_l1_linf:4.4032e+05 total_spectral:9.5500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:7.3047e-01 L2_l1linf:7.1094e-01 L3_l1linf:6.8750e-01 L4_l1linf:6.8750e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.7578e-01 L7_l1linf:6.7578e-01 L8_l1linf:6.7188e-01 L9_l1linf:6.7969e-01 L10_l1linf:6.8359e-01 L11_l1linf:7.0703e-01 L12_l1linf:6.9531e-01 L1_spectral:2.8803e-02 L2_spectral:2.8378e-02 L3_spectral:2.8368e-02 L4_spectral:2.8387e-02 L5_spectral:2.8686e-02 L6_spectral:2.8419e-02 L7_spectral:2.8615e-02 L8_spectral:2.9297e-02 L9_spectral:2.8799e-02 L10_spectral:2.8851e-02 L11_spectral:2.8890e-02 L12_spectral:2.9792e-02 train_time:92343ms step_avg:57.71ms +[2025-09-11 06:56:52] [Rank 0] PRINT: step:1600/10000 val_loss:4.9762 total_sharp:9.3405e-05 L1_sharp:7.9970e-03 L2_sharp:6.8991e-04 L3_sharp:5.0197e-04 L4_sharp:9.9389e-04 L5_sharp:6.9157e-04 L6_sharp:6.5894e-04 L7_sharp:6.7195e-04 L8_sharp:9.4723e-04 L9_sharp:1.0806e-03 L10_sharp:1.2196e-03 L11_sharp:2.2797e-03 L12_sharp:2.7164e-02 total_fnorm:1.9100e+02 total_l1_linf:4.4032e+05 total_spectral:9.5500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.4844e+00 L6_fnorm:2.5156e+00 L7_fnorm:2.5000e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5156e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:7.3047e-01 L2_l1linf:7.1094e-01 L3_l1linf:6.8750e-01 L4_l1linf:6.8750e-01 L5_l1linf:6.7188e-01 L6_l1linf:6.7578e-01 L7_l1linf:6.7578e-01 L8_l1linf:6.7188e-01 L9_l1linf:6.7969e-01 L10_l1linf:6.8359e-01 L11_l1linf:7.0703e-01 L12_l1linf:6.9531e-01 L1_spectral:2.8803e-02 L2_spectral:2.8378e-02 L3_spectral:2.8368e-02 L4_spectral:2.8387e-02 L5_spectral:2.8686e-02 L6_spectral:2.8419e-02 L7_spectral:2.8615e-02 L8_spectral:2.9297e-02 L9_spectral:2.8799e-02 L10_spectral:2.8851e-02 L11_spectral:2.8890e-02 L12_spectral:2.9792e-02 train_time:92343ms step_avg:57.71ms +[2025-09-11 06:56:53] [Rank 0] step:1601/10000 train_time:94107ms step_avg:58.78ms +[2025-09-11 06:56:53] [Rank 0] step:1601/10000 train_time:94107ms step_avg:58.78ms +[2025-09-11 06:56:54] [Rank 0] step:1621/10000 train_time:94788ms step_avg:58.47ms +[2025-09-11 06:56:54] [Rank 0] step:1621/10000 train_time:94788ms step_avg:58.47ms +[2025-09-11 06:56:55] [Rank 0] step:1641/10000 train_time:95437ms step_avg:58.16ms +[2025-09-11 06:56:55] [Rank 0] step:1641/10000 train_time:95437ms step_avg:58.16ms +[2025-09-11 06:56:55] [Rank 0] step:1661/10000 train_time:96086ms step_avg:57.85ms +[2025-09-11 06:56:55] [Rank 0] step:1661/10000 train_time:96086ms step_avg:57.85ms +[2025-09-11 06:56:56] [Rank 0] step:1681/10000 train_time:96736ms step_avg:57.55ms +[2025-09-11 06:56:56] [Rank 0] step:1681/10000 train_time:96736ms step_avg:57.55ms +[2025-09-11 06:56:57] [Rank 0] step:1701/10000 train_time:97385ms step_avg:57.25ms +[2025-09-11 06:56:57] [Rank 0] step:1701/10000 train_time:97385ms step_avg:57.25ms +[2025-09-11 06:56:57] [Rank 0] step:1721/10000 train_time:98034ms step_avg:56.96ms +[2025-09-11 06:56:57] [Rank 0] step:1721/10000 train_time:98034ms step_avg:56.96ms +[2025-09-11 06:56:58] [Rank 0] step:1741/10000 train_time:98690ms step_avg:56.69ms +[2025-09-11 06:56:58] [Rank 0] step:1741/10000 train_time:98690ms step_avg:56.69ms +[2025-09-11 06:56:59] [Rank 0] step:1761/10000 train_time:99339ms step_avg:56.41ms +[2025-09-11 06:56:59] [Rank 0] step:1761/10000 train_time:99339ms step_avg:56.41ms +[2025-09-11 06:56:59] [Rank 0] step:1781/10000 train_time:99987ms step_avg:56.14ms +[2025-09-11 06:56:59] [Rank 0] step:1781/10000 train_time:99987ms step_avg:56.14ms +[2025-09-11 06:57:00] [Rank 0] step:1801/10000 train_time:100636ms step_avg:55.88ms +[2025-09-11 06:57:00] [Rank 0] step:1801/10000 train_time:100636ms step_avg:55.88ms +[2025-09-11 06:57:01] [Rank 0] step:1821/10000 train_time:101285ms step_avg:55.62ms +[2025-09-11 06:57:01] [Rank 0] step:1821/10000 train_time:101285ms step_avg:55.62ms +[2025-09-11 06:57:01] [Rank 0] step:1841/10000 train_time:101933ms step_avg:55.37ms +[2025-09-11 06:57:01] [Rank 0] step:1841/10000 train_time:101933ms step_avg:55.37ms +[2025-09-11 06:57:02] [Rank 0] step:1861/10000 train_time:102581ms step_avg:55.12ms +[2025-09-11 06:57:02] [Rank 0] step:1861/10000 train_time:102581ms step_avg:55.12ms +[2025-09-11 06:57:03] [Rank 0] step:1881/10000 train_time:103230ms step_avg:54.88ms +[2025-09-11 06:57:03] [Rank 0] step:1881/10000 train_time:103230ms step_avg:54.88ms +[2025-09-11 06:57:03] [Rank 0] step:1901/10000 train_time:103879ms step_avg:54.64ms +[2025-09-11 06:57:03] [Rank 0] step:1901/10000 train_time:103879ms step_avg:54.64ms +[2025-09-11 06:57:04] [Rank 0] step:1921/10000 train_time:104527ms step_avg:54.41ms +[2025-09-11 06:57:04] [Rank 0] step:1921/10000 train_time:104527ms step_avg:54.41ms +[2025-09-11 06:57:04] [Rank 0] step:1941/10000 train_time:105175ms step_avg:54.19ms +[2025-09-11 06:57:04] [Rank 0] step:1941/10000 train_time:105175ms step_avg:54.19ms +[2025-09-11 06:57:05] [Rank 0] step:1961/10000 train_time:105823ms step_avg:53.96ms +[2025-09-11 06:57:05] [Rank 0] step:1961/10000 train_time:105823ms step_avg:53.96ms +[2025-09-11 06:57:06] [Rank 0] step:1981/10000 train_time:106472ms step_avg:53.75ms +[2025-09-11 06:57:06] [Rank 0] step:1981/10000 train_time:106472ms step_avg:53.75ms +[2025-09-11 06:57:06] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:57:06] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:57:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:57:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:57:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:57:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:57:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:57:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:57:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:57:17] [Rank 0] PRINT: step:2000/10000 val_loss:4.8355 total_sharp:7.7372e-05 L1_sharp:6.0671e-03 L2_sharp:9.2035e-04 L3_sharp:2.7708e-04 L4_sharp:1.0271e-03 L5_sharp:9.7186e-04 L6_sharp:6.7236e-04 L7_sharp:7.1841e-04 L8_sharp:1.0611e-03 L9_sharp:8.4745e-04 L10_sharp:1.2654e-03 L11_sharp:1.8123e-03 L12_sharp:2.2289e-02 total_fnorm:1.9300e+02 total_l1_linf:4.5875e+05 total_spectral:9.6500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.1875e-01 L2_l1linf:6.7969e-01 L3_l1linf:6.6406e-01 L4_l1linf:6.6406e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.6406e-01 L10_l1linf:6.7188e-01 L11_l1linf:6.6797e-01 L12_l1linf:6.9141e-01 L1_spectral:2.9468e-02 L2_spectral:2.9014e-02 L3_spectral:2.8927e-02 L4_spectral:2.8884e-02 L5_spectral:2.9248e-02 L6_spectral:2.9089e-02 L7_spectral:2.9092e-02 L8_spectral:2.9981e-02 L9_spectral:2.9428e-02 L10_spectral:2.9634e-02 L11_spectral:2.9499e-02 L12_spectral:3.0124e-02 train_time:107103ms step_avg:53.55ms +[2025-09-11 06:57:17] [Rank 0] PRINT: step:2000/10000 val_loss:4.8355 total_sharp:7.7372e-05 L1_sharp:6.0671e-03 L2_sharp:9.2035e-04 L3_sharp:2.7708e-04 L4_sharp:1.0271e-03 L5_sharp:9.7186e-04 L6_sharp:6.7236e-04 L7_sharp:7.1841e-04 L8_sharp:1.0611e-03 L9_sharp:8.4745e-04 L10_sharp:1.2654e-03 L11_sharp:1.8123e-03 L12_sharp:2.2289e-02 total_fnorm:1.9300e+02 total_l1_linf:4.5875e+05 total_spectral:9.6500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.1875e-01 L2_l1linf:6.7969e-01 L3_l1linf:6.6406e-01 L4_l1linf:6.6406e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6797e-01 L7_l1linf:6.6797e-01 L8_l1linf:6.6797e-01 L9_l1linf:6.6406e-01 L10_l1linf:6.7188e-01 L11_l1linf:6.6797e-01 L12_l1linf:6.9141e-01 L1_spectral:2.9468e-02 L2_spectral:2.9014e-02 L3_spectral:2.8927e-02 L4_spectral:2.8884e-02 L5_spectral:2.9248e-02 L6_spectral:2.9089e-02 L7_spectral:2.9092e-02 L8_spectral:2.9981e-02 L9_spectral:2.9428e-02 L10_spectral:2.9634e-02 L11_spectral:2.9499e-02 L12_spectral:3.0124e-02 train_time:107103ms step_avg:53.55ms +[2025-09-11 06:57:19] [Rank 0] step:2001/10000 train_time:108942ms step_avg:54.44ms +[2025-09-11 06:57:19] [Rank 0] step:2001/10000 train_time:108942ms step_avg:54.44ms +[2025-09-11 06:57:19] [Rank 0] step:2021/10000 train_time:109629ms step_avg:54.24ms +[2025-09-11 06:57:19] [Rank 0] step:2021/10000 train_time:109629ms step_avg:54.24ms +[2025-09-11 06:57:20] [Rank 0] step:2041/10000 train_time:110278ms step_avg:54.03ms +[2025-09-11 06:57:20] [Rank 0] step:2041/10000 train_time:110278ms step_avg:54.03ms +[2025-09-11 06:57:21] [Rank 0] step:2061/10000 train_time:110928ms step_avg:53.82ms +[2025-09-11 06:57:21] [Rank 0] step:2061/10000 train_time:110928ms step_avg:53.82ms +[2025-09-11 06:57:21] [Rank 0] step:2081/10000 train_time:111576ms step_avg:53.62ms +[2025-09-11 06:57:21] [Rank 0] step:2081/10000 train_time:111576ms step_avg:53.62ms +[2025-09-11 06:57:22] [Rank 0] step:2101/10000 train_time:112225ms step_avg:53.42ms +[2025-09-11 06:57:22] [Rank 0] step:2101/10000 train_time:112225ms step_avg:53.42ms +[2025-09-11 06:57:23] [Rank 0] step:2121/10000 train_time:112874ms step_avg:53.22ms +[2025-09-11 06:57:23] [Rank 0] step:2121/10000 train_time:112874ms step_avg:53.22ms +[2025-09-11 06:57:23] [Rank 0] step:2141/10000 train_time:113524ms step_avg:53.02ms +[2025-09-11 06:57:23] [Rank 0] step:2141/10000 train_time:113524ms step_avg:53.02ms +[2025-09-11 06:57:24] [Rank 0] step:2161/10000 train_time:114173ms step_avg:52.83ms +[2025-09-11 06:57:24] [Rank 0] step:2161/10000 train_time:114173ms step_avg:52.83ms +[2025-09-11 06:57:24] [Rank 0] step:2181/10000 train_time:114822ms step_avg:52.65ms +[2025-09-11 06:57:24] [Rank 0] step:2181/10000 train_time:114822ms step_avg:52.65ms +[2025-09-11 06:57:25] [Rank 0] step:2201/10000 train_time:115472ms step_avg:52.46ms +[2025-09-11 06:57:25] [Rank 0] step:2201/10000 train_time:115472ms step_avg:52.46ms +[2025-09-11 06:57:26] [Rank 0] step:2221/10000 train_time:116120ms step_avg:52.28ms +[2025-09-11 06:57:26] [Rank 0] step:2221/10000 train_time:116120ms step_avg:52.28ms +[2025-09-11 06:57:26] [Rank 0] step:2241/10000 train_time:116780ms step_avg:52.11ms +[2025-09-11 06:57:26] [Rank 0] step:2241/10000 train_time:116780ms step_avg:52.11ms +[2025-09-11 06:57:27] [Rank 0] step:2261/10000 train_time:117442ms step_avg:51.94ms +[2025-09-11 06:57:27] [Rank 0] step:2261/10000 train_time:117442ms step_avg:51.94ms +[2025-09-11 06:57:28] [Rank 0] step:2281/10000 train_time:118103ms step_avg:51.78ms +[2025-09-11 06:57:28] [Rank 0] step:2281/10000 train_time:118103ms step_avg:51.78ms +[2025-09-11 06:57:29] [Rank 0] step:2301/10000 train_time:119279ms step_avg:51.84ms +[2025-09-11 06:57:29] [Rank 0] step:2301/10000 train_time:119279ms step_avg:51.84ms +[2025-09-11 06:57:30] [Rank 0] step:2321/10000 train_time:119941ms step_avg:51.68ms +[2025-09-11 06:57:30] [Rank 0] step:2321/10000 train_time:119941ms step_avg:51.68ms +[2025-09-11 06:57:30] [Rank 0] step:2341/10000 train_time:120603ms step_avg:51.52ms +[2025-09-11 06:57:30] [Rank 0] step:2341/10000 train_time:120603ms step_avg:51.52ms +[2025-09-11 06:57:31] [Rank 0] step:2361/10000 train_time:121542ms step_avg:51.48ms +[2025-09-11 06:57:31] [Rank 0] step:2361/10000 train_time:121542ms step_avg:51.48ms +[2025-09-11 06:57:32] [Rank 0] step:2381/10000 train_time:122205ms step_avg:51.33ms +[2025-09-11 06:57:32] [Rank 0] step:2381/10000 train_time:122205ms step_avg:51.33ms +[2025-09-11 06:57:32] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:57:32] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:57:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:57:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:57:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:57:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:57:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:57:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:57:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:57:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:57:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:57:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:57:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:57:45] [Rank 0] PRINT: step:2400/10000 val_loss:4.6981 total_sharp:7.3967e-05 L1_sharp:5.7710e-03 L2_sharp:4.3494e-04 L3_sharp:3.6712e-04 L4_sharp:5.2776e-04 L5_sharp:8.6277e-04 L6_sharp:5.5427e-04 L7_sharp:6.6910e-04 L8_sharp:8.6296e-04 L9_sharp:7.3392e-04 L10_sharp:8.9062e-04 L11_sharp:1.4335e-03 L12_sharp:2.2328e-02 total_fnorm:1.8600e+02 total_l1_linf:4.2598e+05 total_spectral:9.3000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.1875e-01 L2_l1linf:6.6016e-01 L3_l1linf:6.4844e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6016e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.5625e-01 L12_l1linf:6.7188e-01 L1_spectral:3.0102e-02 L2_spectral:2.9421e-02 L3_spectral:2.9595e-02 L4_spectral:2.9447e-02 L5_spectral:2.9745e-02 L6_spectral:2.9717e-02 L7_spectral:2.9718e-02 L8_spectral:3.0555e-02 L9_spectral:2.9889e-02 L10_spectral:3.0097e-02 L11_spectral:3.0244e-02 L12_spectral:3.0817e-02 train_time:122846ms step_avg:51.19ms +[2025-09-11 06:57:45] [Rank 0] PRINT: step:2400/10000 val_loss:4.6981 total_sharp:7.3967e-05 L1_sharp:5.7710e-03 L2_sharp:4.3494e-04 L3_sharp:3.6712e-04 L4_sharp:5.2776e-04 L5_sharp:8.6277e-04 L6_sharp:5.5427e-04 L7_sharp:6.6910e-04 L8_sharp:8.6296e-04 L9_sharp:7.3392e-04 L10_sharp:8.9062e-04 L11_sharp:1.4335e-03 L12_sharp:2.2328e-02 total_fnorm:1.8600e+02 total_l1_linf:4.2598e+05 total_spectral:9.3000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5156e+00 L1_l1linf:7.1875e-01 L2_l1linf:6.6016e-01 L3_l1linf:6.4844e-01 L4_l1linf:6.5234e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.6016e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.5625e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.5625e-01 L12_l1linf:6.7188e-01 L1_spectral:3.0102e-02 L2_spectral:2.9421e-02 L3_spectral:2.9595e-02 L4_spectral:2.9447e-02 L5_spectral:2.9745e-02 L6_spectral:2.9717e-02 L7_spectral:2.9718e-02 L8_spectral:3.0555e-02 L9_spectral:2.9889e-02 L10_spectral:3.0097e-02 L11_spectral:3.0244e-02 L12_spectral:3.0817e-02 train_time:122846ms step_avg:51.19ms +[2025-09-11 06:57:47] [Rank 0] step:2401/10000 train_time:124803ms step_avg:51.98ms +[2025-09-11 06:57:47] [Rank 0] step:2401/10000 train_time:124803ms step_avg:51.98ms +[2025-09-11 06:57:47] [Rank 0] step:2421/10000 train_time:125470ms step_avg:51.83ms +[2025-09-11 06:57:47] [Rank 0] step:2421/10000 train_time:125470ms step_avg:51.83ms +[2025-09-11 06:57:48] [Rank 0] step:2441/10000 train_time:126132ms step_avg:51.67ms +[2025-09-11 06:57:48] [Rank 0] step:2441/10000 train_time:126132ms step_avg:51.67ms +[2025-09-11 06:57:49] [Rank 0] step:2461/10000 train_time:126793ms step_avg:51.52ms +[2025-09-11 06:57:49] [Rank 0] step:2461/10000 train_time:126793ms step_avg:51.52ms +[2025-09-11 06:57:49] [Rank 0] step:2481/10000 train_time:127456ms step_avg:51.37ms +[2025-09-11 06:57:49] [Rank 0] step:2481/10000 train_time:127456ms step_avg:51.37ms +[2025-09-11 06:57:50] [Rank 0] step:2501/10000 train_time:128118ms step_avg:51.23ms +[2025-09-11 06:57:50] [Rank 0] step:2501/10000 train_time:128118ms step_avg:51.23ms +[2025-09-11 06:57:50] [Rank 0] step:2521/10000 train_time:128780ms step_avg:51.08ms +[2025-09-11 06:57:50] [Rank 0] step:2521/10000 train_time:128780ms step_avg:51.08ms +[2025-09-11 06:57:51] [Rank 0] step:2541/10000 train_time:129442ms step_avg:50.94ms +[2025-09-11 06:57:51] [Rank 0] step:2541/10000 train_time:129442ms step_avg:50.94ms +[2025-09-11 06:57:52] [Rank 0] step:2561/10000 train_time:130104ms step_avg:50.80ms +[2025-09-11 06:57:52] [Rank 0] step:2561/10000 train_time:130104ms step_avg:50.80ms +[2025-09-11 06:57:52] [Rank 0] step:2581/10000 train_time:130766ms step_avg:50.66ms +[2025-09-11 06:57:52] [Rank 0] step:2581/10000 train_time:130766ms step_avg:50.66ms +[2025-09-11 06:57:53] [Rank 0] step:2601/10000 train_time:131427ms step_avg:50.53ms +[2025-09-11 06:57:53] [Rank 0] step:2601/10000 train_time:131427ms step_avg:50.53ms +[2025-09-11 06:57:54] [Rank 0] step:2621/10000 train_time:132089ms step_avg:50.40ms +[2025-09-11 06:57:54] [Rank 0] step:2621/10000 train_time:132089ms step_avg:50.40ms +[2025-09-11 06:57:54] [Rank 0] step:2641/10000 train_time:132752ms step_avg:50.27ms +[2025-09-11 06:57:54] [Rank 0] step:2641/10000 train_time:132752ms step_avg:50.27ms +[2025-09-11 06:57:55] [Rank 0] step:2661/10000 train_time:133414ms step_avg:50.14ms +[2025-09-11 06:57:55] [Rank 0] step:2661/10000 train_time:133414ms step_avg:50.14ms +[2025-09-11 06:57:56] [Rank 0] step:2681/10000 train_time:134075ms step_avg:50.01ms +[2025-09-11 06:57:56] [Rank 0] step:2681/10000 train_time:134075ms step_avg:50.01ms +[2025-09-11 06:57:56] [Rank 0] step:2701/10000 train_time:134736ms step_avg:49.88ms +[2025-09-11 06:57:56] [Rank 0] step:2701/10000 train_time:134736ms step_avg:49.88ms +[2025-09-11 06:57:57] [Rank 0] step:2721/10000 train_time:135397ms step_avg:49.76ms +[2025-09-11 06:57:57] [Rank 0] step:2721/10000 train_time:135397ms step_avg:49.76ms +[2025-09-11 06:57:58] [Rank 0] step:2741/10000 train_time:136059ms step_avg:49.64ms +[2025-09-11 06:57:58] [Rank 0] step:2741/10000 train_time:136059ms step_avg:49.64ms +[2025-09-11 06:57:58] [Rank 0] step:2761/10000 train_time:136720ms step_avg:49.52ms +[2025-09-11 06:57:58] [Rank 0] step:2761/10000 train_time:136720ms step_avg:49.52ms +[2025-09-11 06:57:59] [Rank 0] step:2781/10000 train_time:137381ms step_avg:49.40ms +[2025-09-11 06:57:59] [Rank 0] step:2781/10000 train_time:137381ms step_avg:49.40ms +[2025-09-11 06:58:00] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:58:00] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:58:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:58:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:58:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:58:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:58:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:58:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:58:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:58:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:58:10] [Rank 0] PRINT: step:2800/10000 val_loss:4.6590 total_sharp:7.1206e-05 L1_sharp:3.8542e-03 L2_sharp:6.6702e-04 L3_sharp:3.3606e-04 L4_sharp:6.4193e-04 L5_sharp:4.6870e-04 L6_sharp:4.1859e-04 L7_sharp:6.6154e-04 L8_sharp:1.0824e-03 L9_sharp:7.6975e-04 L10_sharp:9.9500e-04 L11_sharp:1.4075e-03 L12_sharp:1.7820e-02 total_fnorm:1.8100e+02 total_l1_linf:4.1165e+05 total_spectral:9.0500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.4844e-01 L4_l1linf:6.4844e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.5234e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.5234e-01 L9_l1linf:6.5234e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.7578e-01 L1_spectral:3.0653e-02 L2_spectral:2.9612e-02 L3_spectral:2.9845e-02 L4_spectral:2.9839e-02 L5_spectral:3.0060e-02 L6_spectral:3.0013e-02 L7_spectral:2.9983e-02 L8_spectral:3.0903e-02 L9_spectral:3.0625e-02 L10_spectral:3.0401e-02 L11_spectral:3.0531e-02 L12_spectral:3.1009e-02 train_time:138023ms step_avg:49.29ms +[2025-09-11 06:58:10] [Rank 0] PRINT: step:2800/10000 val_loss:4.6590 total_sharp:7.1206e-05 L1_sharp:3.8542e-03 L2_sharp:6.6702e-04 L3_sharp:3.3606e-04 L4_sharp:6.4193e-04 L5_sharp:4.6870e-04 L6_sharp:4.1859e-04 L7_sharp:6.6154e-04 L8_sharp:1.0824e-03 L9_sharp:7.6975e-04 L10_sharp:9.9500e-04 L11_sharp:1.4075e-03 L12_sharp:1.7820e-02 total_fnorm:1.8100e+02 total_l1_linf:4.1165e+05 total_spectral:9.0500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:7.0312e-01 L2_l1linf:6.5234e-01 L3_l1linf:6.4844e-01 L4_l1linf:6.4844e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.5234e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.5234e-01 L9_l1linf:6.5234e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.7578e-01 L1_spectral:3.0653e-02 L2_spectral:2.9612e-02 L3_spectral:2.9845e-02 L4_spectral:2.9839e-02 L5_spectral:3.0060e-02 L6_spectral:3.0013e-02 L7_spectral:2.9983e-02 L8_spectral:3.0903e-02 L9_spectral:3.0625e-02 L10_spectral:3.0401e-02 L11_spectral:3.0531e-02 L12_spectral:3.1009e-02 train_time:138023ms step_avg:49.29ms +[2025-09-11 06:58:12] [Rank 0] step:2801/10000 train_time:139931ms step_avg:49.96ms +[2025-09-11 06:58:12] [Rank 0] step:2801/10000 train_time:139931ms step_avg:49.96ms +[2025-09-11 06:58:13] [Rank 0] step:2821/10000 train_time:140596ms step_avg:49.84ms +[2025-09-11 06:58:13] [Rank 0] step:2821/10000 train_time:140596ms step_avg:49.84ms +[2025-09-11 06:58:13] [Rank 0] step:2841/10000 train_time:141259ms step_avg:49.72ms +[2025-09-11 06:58:13] [Rank 0] step:2841/10000 train_time:141259ms step_avg:49.72ms +[2025-09-11 06:58:14] [Rank 0] step:2861/10000 train_time:141920ms step_avg:49.60ms +[2025-09-11 06:58:14] [Rank 0] step:2861/10000 train_time:141920ms step_avg:49.60ms +[2025-09-11 06:58:15] [Rank 0] step:2881/10000 train_time:142581ms step_avg:49.49ms +[2025-09-11 06:58:15] [Rank 0] step:2881/10000 train_time:142581ms step_avg:49.49ms +[2025-09-11 06:58:15] [Rank 0] step:2901/10000 train_time:143240ms step_avg:49.38ms +[2025-09-11 06:58:15] [Rank 0] step:2901/10000 train_time:143240ms step_avg:49.38ms +[2025-09-11 06:58:16] [Rank 0] step:2921/10000 train_time:143902ms step_avg:49.26ms +[2025-09-11 06:58:16] [Rank 0] step:2921/10000 train_time:143902ms step_avg:49.26ms +[2025-09-11 06:58:17] [Rank 0] step:2941/10000 train_time:144563ms step_avg:49.15ms +[2025-09-11 06:58:17] [Rank 0] step:2941/10000 train_time:144563ms step_avg:49.15ms +[2025-09-11 06:58:17] [Rank 0] step:2961/10000 train_time:145224ms step_avg:49.05ms +[2025-09-11 06:58:17] [Rank 0] step:2961/10000 train_time:145224ms step_avg:49.05ms +[2025-09-11 06:58:18] [Rank 0] step:2981/10000 train_time:145887ms step_avg:48.94ms +[2025-09-11 06:58:18] [Rank 0] step:2981/10000 train_time:145887ms step_avg:48.94ms +[2025-09-11 06:58:19] [Rank 0] step:3001/10000 train_time:146552ms step_avg:48.83ms +[2025-09-11 06:58:19] [Rank 0] step:3001/10000 train_time:146552ms step_avg:48.83ms +[2025-09-11 06:58:19] [Rank 0] step:3021/10000 train_time:147216ms step_avg:48.73ms +[2025-09-11 06:58:19] [Rank 0] step:3021/10000 train_time:147216ms step_avg:48.73ms +[2025-09-11 06:58:20] [Rank 0] step:3041/10000 train_time:147881ms step_avg:48.63ms +[2025-09-11 06:58:20] [Rank 0] step:3041/10000 train_time:147881ms step_avg:48.63ms +[2025-09-11 06:58:21] [Rank 0] step:3061/10000 train_time:148546ms step_avg:48.53ms +[2025-09-11 06:58:21] [Rank 0] step:3061/10000 train_time:148546ms step_avg:48.53ms +[2025-09-11 06:58:21] [Rank 0] step:3081/10000 train_time:149210ms step_avg:48.43ms +[2025-09-11 06:58:21] [Rank 0] step:3081/10000 train_time:149210ms step_avg:48.43ms +[2025-09-11 06:58:22] [Rank 0] step:3101/10000 train_time:149875ms step_avg:48.33ms +[2025-09-11 06:58:22] [Rank 0] step:3101/10000 train_time:149875ms step_avg:48.33ms +[2025-09-11 06:58:23] [Rank 0] step:3121/10000 train_time:150539ms step_avg:48.23ms +[2025-09-11 06:58:23] [Rank 0] step:3121/10000 train_time:150539ms step_avg:48.23ms +[2025-09-11 06:58:23] [Rank 0] step:3141/10000 train_time:151203ms step_avg:48.14ms +[2025-09-11 06:58:23] [Rank 0] step:3141/10000 train_time:151203ms step_avg:48.14ms +[2025-09-11 06:58:24] [Rank 0] step:3161/10000 train_time:151868ms step_avg:48.04ms +[2025-09-11 06:58:24] [Rank 0] step:3161/10000 train_time:151868ms step_avg:48.04ms +[2025-09-11 06:58:25] [Rank 0] step:3181/10000 train_time:152532ms step_avg:47.95ms +[2025-09-11 06:58:25] [Rank 0] step:3181/10000 train_time:152532ms step_avg:47.95ms +[2025-09-11 06:58:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:58:25] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:58:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:58:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:58:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:58:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:58:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:58:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:58:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:58:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:58:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:58:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:58:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.5672 total_sharp:5.5990e-05 L1_sharp:4.9073e-03 L2_sharp:1.8004e-05 L3_sharp:8.0390e-05 L4_sharp:8.2345e-04 L5_sharp:1.3142e-03 L6_sharp:4.3992e-04 L7_sharp:5.5237e-04 L8_sharp:9.8696e-04 L9_sharp:9.2624e-04 L10_sharp:1.1724e-03 L11_sharp:1.3971e-03 L12_sharp:3.0757e-02 total_fnorm:1.9700e+02 total_l1_linf:4.7104e+05 total_spectral:9.9000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5469e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.4062e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.6406e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.5234e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.4453e-01 L12_l1linf:6.5234e-01 L1_spectral:3.0819e-02 L2_spectral:2.9984e-02 L3_spectral:3.0142e-02 L4_spectral:3.0207e-02 L5_spectral:3.0520e-02 L6_spectral:3.0607e-02 L7_spectral:3.0591e-02 L8_spectral:3.1429e-02 L9_spectral:3.0836e-02 L10_spectral:3.0824e-02 L11_spectral:3.1044e-02 L12_spectral:3.1144e-02 train_time:153177ms step_avg:47.87ms +[2025-09-11 06:58:36] [Rank 0] PRINT: step:3200/10000 val_loss:4.5672 total_sharp:5.5990e-05 L1_sharp:4.9073e-03 L2_sharp:1.8004e-05 L3_sharp:8.0390e-05 L4_sharp:8.2345e-04 L5_sharp:1.3142e-03 L6_sharp:4.3992e-04 L7_sharp:5.5237e-04 L8_sharp:9.8696e-04 L9_sharp:9.2624e-04 L10_sharp:1.1724e-03 L11_sharp:1.3971e-03 L12_sharp:3.0757e-02 total_fnorm:1.9700e+02 total_l1_linf:4.7104e+05 total_spectral:9.9000e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5156e+00 L9_fnorm:2.5469e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.9531e-01 L2_l1linf:6.4062e-01 L3_l1linf:6.4062e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.5625e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.6406e-01 L8_l1linf:6.5625e-01 L9_l1linf:6.5234e-01 L10_l1linf:6.5234e-01 L11_l1linf:6.4453e-01 L12_l1linf:6.5234e-01 L1_spectral:3.0819e-02 L2_spectral:2.9984e-02 L3_spectral:3.0142e-02 L4_spectral:3.0207e-02 L5_spectral:3.0520e-02 L6_spectral:3.0607e-02 L7_spectral:3.0591e-02 L8_spectral:3.1429e-02 L9_spectral:3.0836e-02 L10_spectral:3.0824e-02 L11_spectral:3.1044e-02 L12_spectral:3.1144e-02 train_time:153177ms step_avg:47.87ms +[2025-09-11 06:58:38] [Rank 0] step:3201/10000 train_time:155010ms step_avg:48.43ms +[2025-09-11 06:58:38] [Rank 0] step:3201/10000 train_time:155010ms step_avg:48.43ms +[2025-09-11 06:58:38] [Rank 0] step:3221/10000 train_time:155855ms step_avg:48.39ms +[2025-09-11 06:58:38] [Rank 0] step:3221/10000 train_time:155855ms step_avg:48.39ms +[2025-09-11 06:58:39] [Rank 0] step:3241/10000 train_time:156521ms step_avg:48.29ms +[2025-09-11 06:58:39] [Rank 0] step:3241/10000 train_time:156521ms step_avg:48.29ms +[2025-09-11 06:58:40] [Rank 0] step:3261/10000 train_time:157186ms step_avg:48.20ms +[2025-09-11 06:58:40] [Rank 0] step:3261/10000 train_time:157186ms step_avg:48.20ms +[2025-09-11 06:58:40] [Rank 0] step:3281/10000 train_time:157851ms step_avg:48.11ms +[2025-09-11 06:58:40] [Rank 0] step:3281/10000 train_time:157851ms step_avg:48.11ms +[2025-09-11 06:58:41] [Rank 0] step:3301/10000 train_time:158517ms step_avg:48.02ms +[2025-09-11 06:58:41] [Rank 0] step:3301/10000 train_time:158517ms step_avg:48.02ms +[2025-09-11 06:58:42] [Rank 0] step:3321/10000 train_time:159181ms step_avg:47.93ms +[2025-09-11 06:58:42] [Rank 0] step:3321/10000 train_time:159181ms step_avg:47.93ms +[2025-09-11 06:58:42] [Rank 0] step:3341/10000 train_time:159845ms step_avg:47.84ms +[2025-09-11 06:58:42] [Rank 0] step:3341/10000 train_time:159845ms step_avg:47.84ms +[2025-09-11 06:58:43] [Rank 0] step:3361/10000 train_time:160510ms step_avg:47.76ms +[2025-09-11 06:58:43] [Rank 0] step:3361/10000 train_time:160510ms step_avg:47.76ms +[2025-09-11 06:58:44] [Rank 0] step:3381/10000 train_time:161173ms step_avg:47.67ms +[2025-09-11 06:58:44] [Rank 0] step:3381/10000 train_time:161173ms step_avg:47.67ms +[2025-09-11 06:58:44] [Rank 0] step:3401/10000 train_time:161837ms step_avg:47.59ms +[2025-09-11 06:58:44] [Rank 0] step:3401/10000 train_time:161837ms step_avg:47.59ms +[2025-09-11 06:58:45] [Rank 0] step:3421/10000 train_time:162500ms step_avg:47.50ms +[2025-09-11 06:58:45] [Rank 0] step:3421/10000 train_time:162500ms step_avg:47.50ms +[2025-09-11 06:58:46] [Rank 0] step:3441/10000 train_time:163163ms step_avg:47.42ms +[2025-09-11 06:58:46] [Rank 0] step:3441/10000 train_time:163163ms step_avg:47.42ms +[2025-09-11 06:58:46] [Rank 0] step:3461/10000 train_time:163827ms step_avg:47.34ms +[2025-09-11 06:58:46] [Rank 0] step:3461/10000 train_time:163827ms step_avg:47.34ms +[2025-09-11 06:58:47] [Rank 0] step:3481/10000 train_time:164491ms step_avg:47.25ms +[2025-09-11 06:58:47] [Rank 0] step:3481/10000 train_time:164491ms step_avg:47.25ms +[2025-09-11 06:58:48] [Rank 0] step:3501/10000 train_time:165155ms step_avg:47.17ms +[2025-09-11 06:58:48] [Rank 0] step:3501/10000 train_time:165155ms step_avg:47.17ms +[2025-09-11 06:58:48] [Rank 0] step:3521/10000 train_time:165819ms step_avg:47.09ms +[2025-09-11 06:58:48] [Rank 0] step:3521/10000 train_time:165819ms step_avg:47.09ms +[2025-09-11 06:58:49] [Rank 0] step:3541/10000 train_time:166483ms step_avg:47.02ms +[2025-09-11 06:58:49] [Rank 0] step:3541/10000 train_time:166483ms step_avg:47.02ms +[2025-09-11 06:58:50] [Rank 0] step:3561/10000 train_time:167146ms step_avg:46.94ms +[2025-09-11 06:58:50] [Rank 0] step:3561/10000 train_time:167146ms step_avg:46.94ms +[2025-09-11 06:58:50] [Rank 0] step:3581/10000 train_time:167809ms step_avg:46.86ms +[2025-09-11 06:58:50] [Rank 0] step:3581/10000 train_time:167809ms step_avg:46.86ms +[2025-09-11 06:58:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:58:51] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:58:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:58:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:58:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:58:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:58:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:58:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:59:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.5242 total_sharp:7.8254e-05 L1_sharp:6.1539e-03 L2_sharp:3.2368e-04 L3_sharp:7.2388e-05 L4_sharp:6.2663e-04 L5_sharp:4.9367e-04 L6_sharp:5.5782e-04 L7_sharp:6.0274e-04 L8_sharp:7.8157e-04 L9_sharp:8.1846e-04 L10_sharp:1.0944e-03 L11_sharp:1.5429e-03 L12_sharp:7.3971e-02 total_fnorm:1.7900e+02 total_l1_linf:4.0960e+05 total_spectral:8.9500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5469e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5234e-01 L8_l1linf:6.4844e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.4453e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.4844e-01 L1_spectral:3.0974e-02 L2_spectral:3.0210e-02 L3_spectral:3.0513e-02 L4_spectral:3.0495e-02 L5_spectral:3.0602e-02 L6_spectral:3.0731e-02 L7_spectral:3.0738e-02 L8_spectral:3.1542e-02 L9_spectral:3.1393e-02 L10_spectral:3.1315e-02 L11_spectral:3.1355e-02 L12_spectral:3.1318e-02 train_time:168454ms step_avg:46.79ms +[2025-09-11 06:59:01] [Rank 0] PRINT: step:3600/10000 val_loss:4.5242 total_sharp:7.8254e-05 L1_sharp:6.1539e-03 L2_sharp:3.2368e-04 L3_sharp:7.2388e-05 L4_sharp:6.2663e-04 L5_sharp:4.9367e-04 L6_sharp:5.5782e-04 L7_sharp:6.0274e-04 L8_sharp:7.8157e-04 L9_sharp:8.1846e-04 L10_sharp:1.0944e-03 L11_sharp:1.5429e-03 L12_sharp:7.3971e-02 total_fnorm:1.7900e+02 total_l1_linf:4.0960e+05 total_spectral:8.9500e+01 L1_fnorm:2.5000e+00 L2_fnorm:2.4688e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5469e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5312e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.8359e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.1719e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4844e-01 L7_l1linf:6.5234e-01 L8_l1linf:6.4844e-01 L9_l1linf:6.4062e-01 L10_l1linf:6.4453e-01 L11_l1linf:6.4844e-01 L12_l1linf:6.4844e-01 L1_spectral:3.0974e-02 L2_spectral:3.0210e-02 L3_spectral:3.0513e-02 L4_spectral:3.0495e-02 L5_spectral:3.0602e-02 L6_spectral:3.0731e-02 L7_spectral:3.0738e-02 L8_spectral:3.1542e-02 L9_spectral:3.1393e-02 L10_spectral:3.1315e-02 L11_spectral:3.1355e-02 L12_spectral:3.1318e-02 train_time:168454ms step_avg:46.79ms +[2025-09-11 06:59:03] [Rank 0] step:3601/10000 train_time:170186ms step_avg:47.26ms +[2025-09-11 06:59:03] [Rank 0] step:3601/10000 train_time:170186ms step_avg:47.26ms +[2025-09-11 06:59:04] [Rank 0] step:3621/10000 train_time:170856ms step_avg:47.18ms +[2025-09-11 06:59:04] [Rank 0] step:3621/10000 train_time:170856ms step_avg:47.18ms +[2025-09-11 06:59:04] [Rank 0] step:3641/10000 train_time:171521ms step_avg:47.11ms +[2025-09-11 06:59:04] [Rank 0] step:3641/10000 train_time:171521ms step_avg:47.11ms +[2025-09-11 06:59:05] [Rank 0] step:3661/10000 train_time:172187ms step_avg:47.03ms +[2025-09-11 06:59:05] [Rank 0] step:3661/10000 train_time:172187ms step_avg:47.03ms +[2025-09-11 06:59:06] [Rank 0] step:3681/10000 train_time:172852ms step_avg:46.96ms +[2025-09-11 06:59:06] [Rank 0] step:3681/10000 train_time:172852ms step_avg:46.96ms +[2025-09-11 06:59:06] [Rank 0] step:3701/10000 train_time:173516ms step_avg:46.88ms +[2025-09-11 06:59:06] [Rank 0] step:3701/10000 train_time:173516ms step_avg:46.88ms +[2025-09-11 06:59:07] [Rank 0] step:3721/10000 train_time:174190ms step_avg:46.81ms +[2025-09-11 06:59:07] [Rank 0] step:3721/10000 train_time:174190ms step_avg:46.81ms +[2025-09-11 06:59:08] [Rank 0] step:3741/10000 train_time:174865ms step_avg:46.74ms +[2025-09-11 06:59:08] [Rank 0] step:3741/10000 train_time:174865ms step_avg:46.74ms +[2025-09-11 06:59:08] [Rank 0] step:3761/10000 train_time:175541ms step_avg:46.67ms +[2025-09-11 06:59:08] [Rank 0] step:3761/10000 train_time:175541ms step_avg:46.67ms +[2025-09-11 06:59:09] [Rank 0] step:3781/10000 train_time:176216ms step_avg:46.61ms +[2025-09-11 06:59:09] [Rank 0] step:3781/10000 train_time:176216ms step_avg:46.61ms +[2025-09-11 06:59:10] [Rank 0] step:3801/10000 train_time:176891ms step_avg:46.54ms +[2025-09-11 06:59:10] [Rank 0] step:3801/10000 train_time:176891ms step_avg:46.54ms +[2025-09-11 06:59:10] [Rank 0] step:3821/10000 train_time:177567ms step_avg:46.47ms +[2025-09-11 06:59:10] [Rank 0] step:3821/10000 train_time:177567ms step_avg:46.47ms +[2025-09-11 06:59:11] [Rank 0] step:3841/10000 train_time:178242ms step_avg:46.41ms +[2025-09-11 06:59:11] [Rank 0] step:3841/10000 train_time:178242ms step_avg:46.41ms +[2025-09-11 06:59:12] [Rank 0] step:3861/10000 train_time:178919ms step_avg:46.34ms +[2025-09-11 06:59:12] [Rank 0] step:3861/10000 train_time:178919ms step_avg:46.34ms +[2025-09-11 06:59:12] [Rank 0] step:3881/10000 train_time:179593ms step_avg:46.28ms +[2025-09-11 06:59:12] [Rank 0] step:3881/10000 train_time:179593ms step_avg:46.28ms +[2025-09-11 06:59:13] [Rank 0] step:3901/10000 train_time:180369ms step_avg:46.24ms +[2025-09-11 06:59:13] [Rank 0] step:3901/10000 train_time:180369ms step_avg:46.24ms +[2025-09-11 06:59:14] [Rank 0] step:3921/10000 train_time:181044ms step_avg:46.17ms +[2025-09-11 06:59:14] [Rank 0] step:3921/10000 train_time:181044ms step_avg:46.17ms +[2025-09-11 06:59:15] [Rank 0] step:3941/10000 train_time:181721ms step_avg:46.11ms +[2025-09-11 06:59:15] [Rank 0] step:3941/10000 train_time:181721ms step_avg:46.11ms +[2025-09-11 06:59:15] [Rank 0] step:3961/10000 train_time:182396ms step_avg:46.05ms +[2025-09-11 06:59:15] [Rank 0] step:3961/10000 train_time:182396ms step_avg:46.05ms +[2025-09-11 06:59:16] [Rank 0] step:3981/10000 train_time:183070ms step_avg:45.99ms +[2025-09-11 06:59:16] [Rank 0] step:3981/10000 train_time:183070ms step_avg:45.99ms +[2025-09-11 06:59:17] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:59:17] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:59:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:59:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:59:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:59:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:59:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:59:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:59:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:27] [Rank 0] PRINT: step:4000/10000 val_loss:4.4809 total_sharp:5.9692e-05 L1_sharp:1.6148e-03 L2_sharp:2.3261e-04 L3_sharp:1.2592e-04 L4_sharp:-1.3836e-05 L5_sharp:3.4995e-04 L6_sharp:5.6625e-04 L7_sharp:3.8940e-04 L8_sharp:7.5881e-04 L9_sharp:7.8568e-04 L10_sharp:1.1601e-03 L11_sharp:1.5365e-03 L12_sharp:3.6472e-02 total_fnorm:1.9500e+02 total_l1_linf:4.4851e+05 total_spectral:9.8000e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7188e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.3672e-01 L9_l1linf:6.3672e-01 L10_l1linf:6.3672e-01 L11_l1linf:6.2109e-01 L12_l1linf:6.4844e-01 L1_spectral:3.1251e-02 L2_spectral:3.0366e-02 L3_spectral:3.0599e-02 L4_spectral:3.0773e-02 L5_spectral:3.0949e-02 L6_spectral:3.1258e-02 L7_spectral:3.0820e-02 L8_spectral:3.1581e-02 L9_spectral:3.1510e-02 L10_spectral:3.1216e-02 L11_spectral:3.1419e-02 L12_spectral:3.1193e-02 train_time:183726ms step_avg:45.93ms +[2025-09-11 06:59:27] [Rank 0] PRINT: step:4000/10000 val_loss:4.4809 total_sharp:5.9692e-05 L1_sharp:1.6148e-03 L2_sharp:2.3261e-04 L3_sharp:1.2592e-04 L4_sharp:-1.3836e-05 L5_sharp:3.4995e-04 L6_sharp:5.6625e-04 L7_sharp:3.8940e-04 L8_sharp:7.5881e-04 L9_sharp:7.8568e-04 L10_sharp:1.1601e-03 L11_sharp:1.5365e-03 L12_sharp:3.6472e-02 total_fnorm:1.9500e+02 total_l1_linf:4.4851e+05 total_spectral:9.8000e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.7188e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.0938e-01 L4_l1linf:6.3281e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.4062e-01 L7_l1linf:6.5625e-01 L8_l1linf:6.3672e-01 L9_l1linf:6.3672e-01 L10_l1linf:6.3672e-01 L11_l1linf:6.2109e-01 L12_l1linf:6.4844e-01 L1_spectral:3.1251e-02 L2_spectral:3.0366e-02 L3_spectral:3.0599e-02 L4_spectral:3.0773e-02 L5_spectral:3.0949e-02 L6_spectral:3.1258e-02 L7_spectral:3.0820e-02 L8_spectral:3.1581e-02 L9_spectral:3.1510e-02 L10_spectral:3.1216e-02 L11_spectral:3.1419e-02 L12_spectral:3.1193e-02 train_time:183726ms step_avg:45.93ms +[2025-09-11 06:59:29] [Rank 0] step:4001/10000 train_time:185447ms step_avg:46.35ms +[2025-09-11 06:59:29] [Rank 0] step:4001/10000 train_time:185447ms step_avg:46.35ms +[2025-09-11 06:59:29] [Rank 0] step:4021/10000 train_time:186153ms step_avg:46.30ms +[2025-09-11 06:59:29] [Rank 0] step:4021/10000 train_time:186153ms step_avg:46.30ms +[2025-09-11 06:59:30] [Rank 0] step:4041/10000 train_time:186830ms step_avg:46.23ms +[2025-09-11 06:59:30] [Rank 0] step:4041/10000 train_time:186830ms step_avg:46.23ms +[2025-09-11 06:59:31] [Rank 0] step:4061/10000 train_time:187506ms step_avg:46.17ms +[2025-09-11 06:59:31] [Rank 0] step:4061/10000 train_time:187506ms step_avg:46.17ms +[2025-09-11 06:59:32] [Rank 0] step:4081/10000 train_time:188182ms step_avg:46.11ms +[2025-09-11 06:59:32] [Rank 0] step:4081/10000 train_time:188182ms step_avg:46.11ms +[2025-09-11 06:59:32] [Rank 0] step:4101/10000 train_time:188857ms step_avg:46.05ms +[2025-09-11 06:59:32] [Rank 0] step:4101/10000 train_time:188857ms step_avg:46.05ms +[2025-09-11 06:59:33] [Rank 0] step:4121/10000 train_time:189532ms step_avg:45.99ms +[2025-09-11 06:59:33] [Rank 0] step:4121/10000 train_time:189532ms step_avg:45.99ms +[2025-09-11 06:59:34] [Rank 0] step:4141/10000 train_time:190207ms step_avg:45.93ms +[2025-09-11 06:59:34] [Rank 0] step:4141/10000 train_time:190207ms step_avg:45.93ms +[2025-09-11 06:59:34] [Rank 0] step:4161/10000 train_time:190883ms step_avg:45.87ms +[2025-09-11 06:59:34] [Rank 0] step:4161/10000 train_time:190883ms step_avg:45.87ms +[2025-09-11 06:59:35] [Rank 0] step:4181/10000 train_time:192152ms step_avg:45.96ms +[2025-09-11 06:59:35] [Rank 0] step:4181/10000 train_time:192152ms step_avg:45.96ms +[2025-09-11 06:59:36] [Rank 0] step:4201/10000 train_time:192828ms step_avg:45.90ms +[2025-09-11 06:59:36] [Rank 0] step:4201/10000 train_time:192828ms step_avg:45.90ms +[2025-09-11 06:59:37] [Rank 0] step:4221/10000 train_time:193504ms step_avg:45.84ms +[2025-09-11 06:59:37] [Rank 0] step:4221/10000 train_time:193504ms step_avg:45.84ms +[2025-09-11 06:59:38] [Rank 0] step:4241/10000 train_time:194468ms step_avg:45.85ms +[2025-09-11 06:59:38] [Rank 0] step:4241/10000 train_time:194468ms step_avg:45.85ms +[2025-09-11 06:59:38] [Rank 0] step:4261/10000 train_time:195145ms step_avg:45.80ms +[2025-09-11 06:59:38] [Rank 0] step:4261/10000 train_time:195145ms step_avg:45.80ms +[2025-09-11 06:59:39] [Rank 0] step:4281/10000 train_time:195821ms step_avg:45.74ms +[2025-09-11 06:59:39] [Rank 0] step:4281/10000 train_time:195821ms step_avg:45.74ms +[2025-09-11 06:59:40] [Rank 0] step:4301/10000 train_time:196497ms step_avg:45.69ms +[2025-09-11 06:59:40] [Rank 0] step:4301/10000 train_time:196497ms step_avg:45.69ms +[2025-09-11 06:59:40] [Rank 0] step:4321/10000 train_time:197172ms step_avg:45.63ms +[2025-09-11 06:59:40] [Rank 0] step:4321/10000 train_time:197172ms step_avg:45.63ms +[2025-09-11 06:59:41] [Rank 0] step:4341/10000 train_time:197847ms step_avg:45.58ms +[2025-09-11 06:59:41] [Rank 0] step:4341/10000 train_time:197847ms step_avg:45.58ms +[2025-09-11 06:59:42] [Rank 0] step:4361/10000 train_time:198521ms step_avg:45.52ms +[2025-09-11 06:59:42] [Rank 0] step:4361/10000 train_time:198521ms step_avg:45.52ms +[2025-09-11 06:59:43] [Rank 0] step:4381/10000 train_time:199198ms step_avg:45.47ms +[2025-09-11 06:59:43] [Rank 0] step:4381/10000 train_time:199198ms step_avg:45.47ms +[2025-09-11 06:59:43] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:59:43] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:59:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:59:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:59:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:59:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:59:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:59:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:59:54] [Rank 0] PRINT: step:4400/10000 val_loss:4.4675 total_sharp:5.7718e-05 L1_sharp:3.1324e-03 L2_sharp:-8.2621e-05 L3_sharp:2.2188e-04 L4_sharp:2.2087e-04 L5_sharp:3.8594e-04 L6_sharp:4.1187e-04 L7_sharp:4.3534e-04 L8_sharp:1.1212e-03 L9_sharp:6.7506e-04 L10_sharp:1.1378e-03 L11_sharp:1.4226e-03 L12_sharp:2.8589e-02 total_fnorm:1.7900e+02 total_l1_linf:4.0141e+05 total_spectral:8.9500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.1328e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.2500e-01 L12_l1linf:6.4844e-01 L1_spectral:3.1683e-02 L2_spectral:3.0249e-02 L3_spectral:3.0767e-02 L4_spectral:3.0910e-02 L5_spectral:3.1126e-02 L6_spectral:3.1267e-02 L7_spectral:3.1184e-02 L8_spectral:3.1697e-02 L9_spectral:3.1548e-02 L10_spectral:3.1477e-02 L11_spectral:3.1521e-02 L12_spectral:3.1305e-02 train_time:199854ms step_avg:45.42ms +[2025-09-11 06:59:54] [Rank 0] PRINT: step:4400/10000 val_loss:4.4675 total_sharp:5.7718e-05 L1_sharp:3.1324e-03 L2_sharp:-8.2621e-05 L3_sharp:2.2188e-04 L4_sharp:2.2087e-04 L5_sharp:3.8594e-04 L6_sharp:4.1187e-04 L7_sharp:4.3534e-04 L8_sharp:1.1212e-03 L9_sharp:6.7506e-04 L10_sharp:1.1378e-03 L11_sharp:1.4226e-03 L12_sharp:2.8589e-02 total_fnorm:1.7900e+02 total_l1_linf:4.0141e+05 total_spectral:8.9500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.4688e+00 L5_fnorm:2.5000e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5312e+00 L1_l1linf:6.6797e-01 L2_l1linf:6.2109e-01 L3_l1linf:6.1328e-01 L4_l1linf:6.2109e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.4062e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.2109e-01 L11_l1linf:6.2500e-01 L12_l1linf:6.4844e-01 L1_spectral:3.1683e-02 L2_spectral:3.0249e-02 L3_spectral:3.0767e-02 L4_spectral:3.0910e-02 L5_spectral:3.1126e-02 L6_spectral:3.1267e-02 L7_spectral:3.1184e-02 L8_spectral:3.1697e-02 L9_spectral:3.1548e-02 L10_spectral:3.1477e-02 L11_spectral:3.1521e-02 L12_spectral:3.1305e-02 train_time:199854ms step_avg:45.42ms +[2025-09-11 06:59:55] [Rank 0] step:4401/10000 train_time:201622ms step_avg:45.81ms +[2025-09-11 06:59:55] [Rank 0] step:4401/10000 train_time:201622ms step_avg:45.81ms +[2025-09-11 06:59:56] [Rank 0] step:4421/10000 train_time:202329ms step_avg:45.77ms +[2025-09-11 06:59:56] [Rank 0] step:4421/10000 train_time:202329ms step_avg:45.77ms +[2025-09-11 06:59:57] [Rank 0] step:4441/10000 train_time:203005ms step_avg:45.71ms +[2025-09-11 06:59:57] [Rank 0] step:4441/10000 train_time:203005ms step_avg:45.71ms +[2025-09-11 06:59:57] [Rank 0] step:4461/10000 train_time:203683ms step_avg:45.66ms +[2025-09-11 06:59:57] [Rank 0] step:4461/10000 train_time:203683ms step_avg:45.66ms +[2025-09-11 06:59:58] [Rank 0] step:4481/10000 train_time:204361ms step_avg:45.61ms +[2025-09-11 06:59:58] [Rank 0] step:4481/10000 train_time:204361ms step_avg:45.61ms +[2025-09-11 06:59:59] [Rank 0] step:4501/10000 train_time:205042ms step_avg:45.55ms +[2025-09-11 06:59:59] [Rank 0] step:4501/10000 train_time:205042ms step_avg:45.55ms +[2025-09-11 06:59:59] [Rank 0] step:4521/10000 train_time:205720ms step_avg:45.50ms +[2025-09-11 06:59:59] [Rank 0] step:4521/10000 train_time:205720ms step_avg:45.50ms +[2025-09-11 07:00:00] [Rank 0] step:4541/10000 train_time:206408ms step_avg:45.45ms +[2025-09-11 07:00:00] [Rank 0] step:4541/10000 train_time:206408ms step_avg:45.45ms +[2025-09-11 07:00:01] [Rank 0] step:4561/10000 train_time:207086ms step_avg:45.40ms +[2025-09-11 07:00:01] [Rank 0] step:4561/10000 train_time:207086ms step_avg:45.40ms +[2025-09-11 07:00:02] [Rank 0] step:4581/10000 train_time:207913ms step_avg:45.39ms +[2025-09-11 07:00:02] [Rank 0] step:4581/10000 train_time:207913ms step_avg:45.39ms +[2025-09-11 07:00:02] [Rank 0] step:4601/10000 train_time:208669ms step_avg:45.35ms +[2025-09-11 07:00:02] [Rank 0] step:4601/10000 train_time:208669ms step_avg:45.35ms +[2025-09-11 07:00:03] [Rank 0] step:4621/10000 train_time:209349ms step_avg:45.30ms +[2025-09-11 07:00:03] [Rank 0] step:4621/10000 train_time:209349ms step_avg:45.30ms +[2025-09-11 07:00:04] [Rank 0] step:4641/10000 train_time:210027ms step_avg:45.25ms +[2025-09-11 07:00:04] [Rank 0] step:4641/10000 train_time:210027ms step_avg:45.25ms +[2025-09-11 07:00:04] [Rank 0] step:4661/10000 train_time:210706ms step_avg:45.21ms +[2025-09-11 07:00:04] [Rank 0] step:4661/10000 train_time:210706ms step_avg:45.21ms +[2025-09-11 07:00:05] [Rank 0] step:4681/10000 train_time:211383ms step_avg:45.16ms +[2025-09-11 07:00:05] [Rank 0] step:4681/10000 train_time:211383ms step_avg:45.16ms +[2025-09-11 07:00:06] [Rank 0] step:4701/10000 train_time:212061ms step_avg:45.11ms +[2025-09-11 07:00:06] [Rank 0] step:4701/10000 train_time:212061ms step_avg:45.11ms +[2025-09-11 07:00:06] [Rank 0] step:4721/10000 train_time:212739ms step_avg:45.06ms +[2025-09-11 07:00:06] [Rank 0] step:4721/10000 train_time:212739ms step_avg:45.06ms +[2025-09-11 07:00:07] [Rank 0] step:4741/10000 train_time:213416ms step_avg:45.02ms +[2025-09-11 07:00:07] [Rank 0] step:4741/10000 train_time:213416ms step_avg:45.02ms +[2025-09-11 07:00:08] [Rank 0] step:4761/10000 train_time:214095ms step_avg:44.97ms +[2025-09-11 07:00:08] [Rank 0] step:4761/10000 train_time:214095ms step_avg:44.97ms +[2025-09-11 07:00:08] [Rank 0] step:4781/10000 train_time:214774ms step_avg:44.92ms +[2025-09-11 07:00:08] [Rank 0] step:4781/10000 train_time:214774ms step_avg:44.92ms +[2025-09-11 07:00:09] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:00:09] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 07:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:00:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:00:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 07:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 07:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:00:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:00:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:00:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:00:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 07:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:00:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:00:20] [Rank 0] PRINT: step:4800/10000 val_loss:4.4151 total_sharp:3.9748e-05 L1_sharp:2.2702e-03 L2_sharp:7.8977e-05 L3_sharp:-1.8684e-05 L4_sharp:4.5707e-04 L5_sharp:2.7088e-04 L6_sharp:3.9126e-04 L7_sharp:5.0190e-04 L8_sharp:8.8326e-04 L9_sharp:6.5295e-04 L10_sharp:7.7843e-04 L11_sharp:1.2024e-03 L12_sharp:1.2606e-02 total_fnorm:1.8700e+02 total_l1_linf:4.3418e+05 total_spectral:9.3500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5469e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.1719e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.4453e-01 L9_l1linf:6.3281e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1638e-02 L2_spectral:3.0763e-02 L3_spectral:3.1149e-02 L4_spectral:3.1063e-02 L5_spectral:3.1489e-02 L6_spectral:3.1474e-02 L7_spectral:3.1237e-02 L8_spectral:3.1867e-02 L9_spectral:3.1984e-02 L10_spectral:3.1627e-02 L11_spectral:3.1991e-02 L12_spectral:3.1679e-02 train_time:215432ms step_avg:44.88ms +[2025-09-11 07:00:20] [Rank 0] PRINT: step:4800/10000 val_loss:4.4151 total_sharp:3.9748e-05 L1_sharp:2.2702e-03 L2_sharp:7.8977e-05 L3_sharp:-1.8684e-05 L4_sharp:4.5707e-04 L5_sharp:2.7088e-04 L6_sharp:3.9126e-04 L7_sharp:5.0190e-04 L8_sharp:8.8326e-04 L9_sharp:6.5295e-04 L10_sharp:7.7843e-04 L11_sharp:1.2024e-03 L12_sharp:1.2606e-02 total_fnorm:1.8700e+02 total_l1_linf:4.3418e+05 total_spectral:9.3500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.5000e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5469e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5156e+00 L1_l1linf:6.6016e-01 L2_l1linf:6.1719e-01 L3_l1linf:6.0547e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.4062e-01 L6_l1linf:6.3672e-01 L7_l1linf:6.4844e-01 L8_l1linf:6.4453e-01 L9_l1linf:6.3281e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.3281e-01 L1_spectral:3.1638e-02 L2_spectral:3.0763e-02 L3_spectral:3.1149e-02 L4_spectral:3.1063e-02 L5_spectral:3.1489e-02 L6_spectral:3.1474e-02 L7_spectral:3.1237e-02 L8_spectral:3.1867e-02 L9_spectral:3.1984e-02 L10_spectral:3.1627e-02 L11_spectral:3.1991e-02 L12_spectral:3.1679e-02 train_time:215432ms step_avg:44.88ms +[2025-09-11 07:00:21] [Rank 0] step:4801/10000 train_time:217179ms step_avg:45.24ms +[2025-09-11 07:00:21] [Rank 0] step:4801/10000 train_time:217179ms step_avg:45.24ms +[2025-09-11 07:00:22] [Rank 0] step:4821/10000 train_time:217891ms step_avg:45.20ms +[2025-09-11 07:00:22] [Rank 0] step:4821/10000 train_time:217891ms step_avg:45.20ms +[2025-09-11 07:00:23] [Rank 0] step:4841/10000 train_time:218571ms step_avg:45.15ms +[2025-09-11 07:00:23] [Rank 0] step:4841/10000 train_time:218571ms step_avg:45.15ms +[2025-09-11 07:00:24] [Rank 0] step:4861/10000 train_time:219249ms step_avg:45.10ms +[2025-09-11 07:00:24] [Rank 0] step:4861/10000 train_time:219249ms step_avg:45.10ms +[2025-09-11 07:00:24] [Rank 0] step:4881/10000 train_time:219928ms step_avg:45.06ms +[2025-09-11 07:00:24] [Rank 0] step:4881/10000 train_time:219928ms step_avg:45.06ms +[2025-09-11 07:00:25] [Rank 0] step:4901/10000 train_time:220608ms step_avg:45.01ms +[2025-09-11 07:00:25] [Rank 0] step:4901/10000 train_time:220608ms step_avg:45.01ms +[2025-09-11 07:00:26] [Rank 0] step:4921/10000 train_time:221286ms step_avg:44.97ms +[2025-09-11 07:00:26] [Rank 0] step:4921/10000 train_time:221286ms step_avg:44.97ms +[2025-09-11 07:00:26] [Rank 0] step:4941/10000 train_time:221965ms step_avg:44.92ms +[2025-09-11 07:00:26] [Rank 0] step:4941/10000 train_time:221965ms step_avg:44.92ms +[2025-09-11 07:00:27] [Rank 0] step:4961/10000 train_time:222643ms step_avg:44.88ms +[2025-09-11 07:00:27] [Rank 0] step:4961/10000 train_time:222643ms step_avg:44.88ms +[2025-09-11 07:00:28] [Rank 0] step:4981/10000 train_time:223323ms step_avg:44.84ms +[2025-09-11 07:00:28] [Rank 0] step:4981/10000 train_time:223323ms step_avg:44.84ms +[2025-09-11 07:00:28] [Rank 0] step:5001/10000 train_time:224003ms step_avg:44.79ms +[2025-09-11 07:00:28] [Rank 0] step:5001/10000 train_time:224003ms step_avg:44.79ms +[2025-09-11 07:00:29] [Rank 0] step:5021/10000 train_time:224680ms step_avg:44.75ms +[2025-09-11 07:00:29] [Rank 0] step:5021/10000 train_time:224680ms step_avg:44.75ms +[2025-09-11 07:00:30] [Rank 0] step:5041/10000 train_time:225358ms step_avg:44.70ms +[2025-09-11 07:00:30] [Rank 0] step:5041/10000 train_time:225358ms step_avg:44.70ms +[2025-09-11 07:00:30] [Rank 0] step:5061/10000 train_time:226039ms step_avg:44.66ms +[2025-09-11 07:00:30] [Rank 0] step:5061/10000 train_time:226039ms step_avg:44.66ms +[2025-09-11 07:00:31] [Rank 0] step:5081/10000 train_time:226717ms step_avg:44.62ms +[2025-09-11 07:00:31] [Rank 0] step:5081/10000 train_time:226717ms step_avg:44.62ms +[2025-09-11 07:00:32] [Rank 0] step:5101/10000 train_time:227396ms step_avg:44.58ms +[2025-09-11 07:00:32] [Rank 0] step:5101/10000 train_time:227396ms step_avg:44.58ms +[2025-09-11 07:00:32] [Rank 0] step:5121/10000 train_time:228074ms step_avg:44.54ms +[2025-09-11 07:00:32] [Rank 0] step:5121/10000 train_time:228074ms step_avg:44.54ms +[2025-09-11 07:00:33] [Rank 0] step:5141/10000 train_time:228754ms step_avg:44.50ms +[2025-09-11 07:00:33] [Rank 0] step:5141/10000 train_time:228754ms step_avg:44.50ms +[2025-09-11 07:00:34] [Rank 0] step:5161/10000 train_time:229432ms step_avg:44.45ms +[2025-09-11 07:00:34] [Rank 0] step:5161/10000 train_time:229432ms step_avg:44.45ms +[2025-09-11 07:00:34] [Rank 0] step:5181/10000 train_time:230110ms step_avg:44.41ms +[2025-09-11 07:00:34] [Rank 0] step:5181/10000 train_time:230110ms step_avg:44.41ms +[2025-09-11 07:00:35] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:00:35] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 07:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:00:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:00:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 07:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:00:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:00:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:00:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:00:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 07:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:00:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:00:46] [Rank 0] PRINT: step:5200/10000 val_loss:4.3867 total_sharp:5.0153e-05 L1_sharp:2.3550e-03 L2_sharp:1.9736e-04 L3_sharp:1.3144e-03 L4_sharp:3.2589e-04 L5_sharp:4.7040e-04 L6_sharp:4.8256e-04 L7_sharp:3.4581e-04 L8_sharp:7.2270e-04 L9_sharp:7.2331e-04 L10_sharp:9.2939e-04 L11_sharp:1.2027e-03 L12_sharp:1.5585e-02 total_fnorm:1.7500e+02 total_l1_linf:3.8707e+05 total_spectral:8.7500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5469e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.2109e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.0938e-01 L12_l1linf:6.0938e-01 L1_spectral:3.1804e-02 L2_spectral:3.0760e-02 L3_spectral:3.1182e-02 L4_spectral:3.1188e-02 L5_spectral:3.1488e-02 L6_spectral:3.1587e-02 L7_spectral:3.1582e-02 L8_spectral:3.1876e-02 L9_spectral:3.2185e-02 L10_spectral:3.1702e-02 L11_spectral:3.2288e-02 L12_spectral:3.1683e-02 train_time:230776ms step_avg:44.38ms +[2025-09-11 07:00:46] [Rank 0] PRINT: step:5200/10000 val_loss:4.3867 total_sharp:5.0153e-05 L1_sharp:2.3550e-03 L2_sharp:1.9736e-04 L3_sharp:1.3144e-03 L4_sharp:3.2589e-04 L5_sharp:4.7040e-04 L6_sharp:4.8256e-04 L7_sharp:3.4581e-04 L8_sharp:7.2270e-04 L9_sharp:7.2331e-04 L10_sharp:9.2939e-04 L11_sharp:1.2027e-03 L12_sharp:1.5585e-02 total_fnorm:1.7500e+02 total_l1_linf:3.8707e+05 total_spectral:8.7500e+01 L1_fnorm:2.5156e+00 L2_fnorm:2.4531e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5469e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.5625e-01 L2_l1linf:6.1328e-01 L3_l1linf:5.9766e-01 L4_l1linf:6.1328e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3281e-01 L8_l1linf:6.2891e-01 L9_l1linf:6.2109e-01 L10_l1linf:6.2500e-01 L11_l1linf:6.0938e-01 L12_l1linf:6.0938e-01 L1_spectral:3.1804e-02 L2_spectral:3.0760e-02 L3_spectral:3.1182e-02 L4_spectral:3.1188e-02 L5_spectral:3.1488e-02 L6_spectral:3.1587e-02 L7_spectral:3.1582e-02 L8_spectral:3.1876e-02 L9_spectral:3.2185e-02 L10_spectral:3.1702e-02 L11_spectral:3.2288e-02 L12_spectral:3.1683e-02 train_time:230776ms step_avg:44.38ms +[2025-09-11 07:00:48] [Rank 0] step:5201/10000 train_time:232581ms step_avg:44.72ms +[2025-09-11 07:00:48] [Rank 0] step:5201/10000 train_time:232581ms step_avg:44.72ms +[2025-09-11 07:00:49] [Rank 0] step:5221/10000 train_time:233291ms step_avg:44.68ms +[2025-09-11 07:00:49] [Rank 0] step:5221/10000 train_time:233291ms step_avg:44.68ms +[2025-09-11 07:00:49] [Rank 0] step:5241/10000 train_time:233979ms step_avg:44.64ms +[2025-09-11 07:00:49] [Rank 0] step:5241/10000 train_time:233979ms step_avg:44.64ms +[2025-09-11 07:00:50] [Rank 0] step:5261/10000 train_time:234667ms step_avg:44.61ms +[2025-09-11 07:00:50] [Rank 0] step:5261/10000 train_time:234667ms step_avg:44.61ms +[2025-09-11 07:00:51] [Rank 0] step:5281/10000 train_time:235355ms step_avg:44.57ms +[2025-09-11 07:00:51] [Rank 0] step:5281/10000 train_time:235355ms step_avg:44.57ms +[2025-09-11 07:00:51] [Rank 0] step:5301/10000 train_time:236042ms step_avg:44.53ms +[2025-09-11 07:00:51] [Rank 0] step:5301/10000 train_time:236042ms step_avg:44.53ms +[2025-09-11 07:00:52] [Rank 0] step:5321/10000 train_time:236729ms step_avg:44.49ms +[2025-09-11 07:00:52] [Rank 0] step:5321/10000 train_time:236729ms step_avg:44.49ms +[2025-09-11 07:00:53] [Rank 0] step:5341/10000 train_time:237418ms step_avg:44.45ms +[2025-09-11 07:00:53] [Rank 0] step:5341/10000 train_time:237418ms step_avg:44.45ms +[2025-09-11 07:00:53] [Rank 0] step:5361/10000 train_time:238105ms step_avg:44.41ms +[2025-09-11 07:00:53] [Rank 0] step:5361/10000 train_time:238105ms step_avg:44.41ms +[2025-09-11 07:00:54] [Rank 0] step:5381/10000 train_time:238794ms step_avg:44.38ms +[2025-09-11 07:00:54] [Rank 0] step:5381/10000 train_time:238794ms step_avg:44.38ms +[2025-09-11 07:00:55] [Rank 0] step:5401/10000 train_time:239481ms step_avg:44.34ms +[2025-09-11 07:00:55] [Rank 0] step:5401/10000 train_time:239481ms step_avg:44.34ms +[2025-09-11 07:00:55] [Rank 0] step:5421/10000 train_time:240169ms step_avg:44.30ms +[2025-09-11 07:00:55] [Rank 0] step:5421/10000 train_time:240169ms step_avg:44.30ms +[2025-09-11 07:00:56] [Rank 0] step:5441/10000 train_time:240856ms step_avg:44.27ms +[2025-09-11 07:00:56] [Rank 0] step:5441/10000 train_time:240856ms step_avg:44.27ms +[2025-09-11 07:00:57] [Rank 0] step:5461/10000 train_time:241544ms step_avg:44.23ms +[2025-09-11 07:00:57] [Rank 0] step:5461/10000 train_time:241544ms step_avg:44.23ms +[2025-09-11 07:00:58] [Rank 0] step:5481/10000 train_time:242231ms step_avg:44.19ms +[2025-09-11 07:00:58] [Rank 0] step:5481/10000 train_time:242231ms step_avg:44.19ms +[2025-09-11 07:00:58] [Rank 0] step:5501/10000 train_time:242919ms step_avg:44.16ms +[2025-09-11 07:00:58] [Rank 0] step:5501/10000 train_time:242919ms step_avg:44.16ms +[2025-09-11 07:00:59] [Rank 0] step:5521/10000 train_time:243609ms step_avg:44.12ms +[2025-09-11 07:00:59] [Rank 0] step:5521/10000 train_time:243609ms step_avg:44.12ms +[2025-09-11 07:01:00] [Rank 0] step:5541/10000 train_time:244299ms step_avg:44.09ms +[2025-09-11 07:01:00] [Rank 0] step:5541/10000 train_time:244299ms step_avg:44.09ms +[2025-09-11 07:01:00] [Rank 0] step:5561/10000 train_time:244988ms step_avg:44.05ms +[2025-09-11 07:01:00] [Rank 0] step:5561/10000 train_time:244988ms step_avg:44.05ms +[2025-09-11 07:01:01] [Rank 0] step:5581/10000 train_time:245677ms step_avg:44.02ms +[2025-09-11 07:01:01] [Rank 0] step:5581/10000 train_time:245677ms step_avg:44.02ms +[2025-09-11 07:01:02] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:01:02] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 07:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:01:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:01:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 07:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:01:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 07:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:01:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:01:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:01:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:01:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 07:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:01:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:01:12] [Rank 0] PRINT: step:5600/10000 val_loss:4.3853 total_sharp:4.1176e-05 L1_sharp:2.3000e-03 L2_sharp:5.6702e-04 L3_sharp:-1.2274e-04 L4_sharp:2.4478e-04 L5_sharp:6.1352e-04 L6_sharp:4.6198e-04 L7_sharp:2.8479e-04 L8_sharp:6.0530e-04 L9_sharp:6.5886e-04 L10_sharp:8.2948e-04 L11_sharp:1.0653e-03 L12_sharp:9.0085e-03 total_fnorm:1.8000e+02 total_l1_linf:4.0960e+05 total_spectral:9.0500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0156e-01 L3_l1linf:6.0156e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.1328e-01 L1_spectral:3.2146e-02 L2_spectral:3.0794e-02 L3_spectral:3.1324e-02 L4_spectral:3.1515e-02 L5_spectral:3.1678e-02 L6_spectral:3.1739e-02 L7_spectral:3.1791e-02 L8_spectral:3.2181e-02 L9_spectral:3.2163e-02 L10_spectral:3.1976e-02 L11_spectral:3.2150e-02 L12_spectral:3.1566e-02 train_time:246346ms step_avg:43.99ms +[2025-09-11 07:01:12] [Rank 0] PRINT: step:5600/10000 val_loss:4.3853 total_sharp:4.1176e-05 L1_sharp:2.3000e-03 L2_sharp:5.6702e-04 L3_sharp:-1.2274e-04 L4_sharp:2.4478e-04 L5_sharp:6.1352e-04 L6_sharp:4.6198e-04 L7_sharp:2.8479e-04 L8_sharp:6.0530e-04 L9_sharp:6.5886e-04 L10_sharp:8.2948e-04 L11_sharp:1.0653e-03 L12_sharp:9.0085e-03 total_fnorm:1.8000e+02 total_l1_linf:4.0960e+05 total_spectral:9.0500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.4844e+00 L5_fnorm:2.5156e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5156e+00 L8_fnorm:2.4844e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5156e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.4844e-01 L2_l1linf:6.0156e-01 L3_l1linf:6.0156e-01 L4_l1linf:6.3672e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2891e-01 L7_l1linf:6.3672e-01 L8_l1linf:6.1328e-01 L9_l1linf:6.2500e-01 L10_l1linf:6.1328e-01 L11_l1linf:6.1328e-01 L12_l1linf:6.1328e-01 L1_spectral:3.2146e-02 L2_spectral:3.0794e-02 L3_spectral:3.1324e-02 L4_spectral:3.1515e-02 L5_spectral:3.1678e-02 L6_spectral:3.1739e-02 L7_spectral:3.1791e-02 L8_spectral:3.2181e-02 L9_spectral:3.2163e-02 L10_spectral:3.1976e-02 L11_spectral:3.2150e-02 L12_spectral:3.1566e-02 train_time:246346ms step_avg:43.99ms +[2025-09-11 07:01:14] [Rank 0] step:5601/10000 train_time:248159ms step_avg:44.31ms +[2025-09-11 07:01:14] [Rank 0] step:5601/10000 train_time:248159ms step_avg:44.31ms +[2025-09-11 07:01:14] [Rank 0] step:5621/10000 train_time:248877ms step_avg:44.28ms +[2025-09-11 07:01:14] [Rank 0] step:5621/10000 train_time:248877ms step_avg:44.28ms +[2025-09-11 07:01:15] [Rank 0] step:5641/10000 train_time:249565ms step_avg:44.24ms +[2025-09-11 07:01:15] [Rank 0] step:5641/10000 train_time:249565ms step_avg:44.24ms +[2025-09-11 07:01:16] [Rank 0] step:5661/10000 train_time:250252ms step_avg:44.21ms +[2025-09-11 07:01:16] [Rank 0] step:5661/10000 train_time:250252ms step_avg:44.21ms +[2025-09-11 07:01:17] [Rank 0] step:5681/10000 train_time:250940ms step_avg:44.17ms +[2025-09-11 07:01:17] [Rank 0] step:5681/10000 train_time:250940ms step_avg:44.17ms +[2025-09-11 07:01:17] [Rank 0] step:5701/10000 train_time:251631ms step_avg:44.14ms +[2025-09-11 07:01:17] [Rank 0] step:5701/10000 train_time:251631ms step_avg:44.14ms +[2025-09-11 07:01:18] [Rank 0] step:5721/10000 train_time:252319ms step_avg:44.10ms +[2025-09-11 07:01:18] [Rank 0] step:5721/10000 train_time:252319ms step_avg:44.10ms +[2025-09-11 07:01:19] [Rank 0] step:5741/10000 train_time:253009ms step_avg:44.07ms +[2025-09-11 07:01:19] [Rank 0] step:5741/10000 train_time:253009ms step_avg:44.07ms +[2025-09-11 07:01:19] [Rank 0] step:5761/10000 train_time:253699ms step_avg:44.04ms +[2025-09-11 07:01:19] [Rank 0] step:5761/10000 train_time:253699ms step_avg:44.04ms +[2025-09-11 07:01:20] [Rank 0] step:5781/10000 train_time:254388ms step_avg:44.00ms +[2025-09-11 07:01:20] [Rank 0] step:5781/10000 train_time:254388ms step_avg:44.00ms +[2025-09-11 07:01:21] [Rank 0] step:5801/10000 train_time:255078ms step_avg:43.97ms +[2025-09-11 07:01:21] [Rank 0] step:5801/10000 train_time:255078ms step_avg:43.97ms +[2025-09-11 07:01:21] [Rank 0] step:5821/10000 train_time:255766ms step_avg:43.94ms +[2025-09-11 07:01:21] [Rank 0] step:5821/10000 train_time:255766ms step_avg:43.94ms +[2025-09-11 07:01:22] [Rank 0] step:5841/10000 train_time:256455ms step_avg:43.91ms +[2025-09-11 07:01:22] [Rank 0] step:5841/10000 train_time:256455ms step_avg:43.91ms +[2025-09-11 07:01:23] [Rank 0] step:5861/10000 train_time:257143ms step_avg:43.87ms +[2025-09-11 07:01:23] [Rank 0] step:5861/10000 train_time:257143ms step_avg:43.87ms +[2025-09-11 07:01:23] [Rank 0] step:5881/10000 train_time:257833ms step_avg:43.84ms +[2025-09-11 07:01:23] [Rank 0] step:5881/10000 train_time:257833ms step_avg:43.84ms +[2025-09-11 07:01:24] [Rank 0] step:5901/10000 train_time:258521ms step_avg:43.81ms +[2025-09-11 07:01:24] [Rank 0] step:5901/10000 train_time:258521ms step_avg:43.81ms +[2025-09-11 07:01:25] [Rank 0] step:5921/10000 train_time:259212ms step_avg:43.78ms +[2025-09-11 07:01:25] [Rank 0] step:5921/10000 train_time:259212ms step_avg:43.78ms +[2025-09-11 07:01:26] [Rank 0] step:5941/10000 train_time:259903ms step_avg:43.75ms +[2025-09-11 07:01:26] [Rank 0] step:5941/10000 train_time:259903ms step_avg:43.75ms +[2025-09-11 07:01:26] [Rank 0] step:5961/10000 train_time:260591ms step_avg:43.72ms +[2025-09-11 07:01:26] [Rank 0] step:5961/10000 train_time:260591ms step_avg:43.72ms +[2025-09-11 07:01:27] [Rank 0] step:5981/10000 train_time:261281ms step_avg:43.69ms +[2025-09-11 07:01:27] [Rank 0] step:5981/10000 train_time:261281ms step_avg:43.69ms +[2025-09-11 07:01:28] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:01:28] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 07:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:01:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:01:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 07:01:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:01:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:01:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:01:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:01:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 07:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:01:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 07:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:01:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 07:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:01:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:01:41] [Rank 0] PRINT: step:6000/10000 val_loss:4.3362 total_sharp:3.6345e-05 L1_sharp:3.2919e-03 L2_sharp:2.2655e-04 L3_sharp:2.2915e-05 L4_sharp:4.5011e-04 L5_sharp:4.7156e-04 L6_sharp:4.3669e-04 L7_sharp:3.3428e-04 L8_sharp:5.5038e-04 L9_sharp:5.1720e-04 L10_sharp:7.1919e-04 L11_sharp:1.0832e-03 L12_sharp:1.0012e-02 total_fnorm:1.8000e+02 total_l1_linf:4.0141e+05 total_spectral:9.0500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.9375e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.1719e-01 L11_l1linf:5.9766e-01 L12_l1linf:6.0547e-01 L1_spectral:3.2271e-02 L2_spectral:3.0855e-02 L3_spectral:3.1534e-02 L4_spectral:3.1562e-02 L5_spectral:3.1757e-02 L6_spectral:3.2044e-02 L7_spectral:3.1808e-02 L8_spectral:3.2257e-02 L9_spectral:3.2223e-02 L10_spectral:3.2034e-02 L11_spectral:3.2276e-02 L12_spectral:3.1665e-02 train_time:261953ms step_avg:43.66ms +[2025-09-11 07:01:41] [Rank 0] PRINT: step:6000/10000 val_loss:4.3362 total_sharp:3.6345e-05 L1_sharp:3.2919e-03 L2_sharp:2.2655e-04 L3_sharp:2.2915e-05 L4_sharp:4.5011e-04 L5_sharp:4.7156e-04 L6_sharp:4.3669e-04 L7_sharp:3.3428e-04 L8_sharp:5.5038e-04 L9_sharp:5.1720e-04 L10_sharp:7.1919e-04 L11_sharp:1.0832e-03 L12_sharp:1.0012e-02 total_fnorm:1.8000e+02 total_l1_linf:4.0141e+05 total_spectral:9.0500e+01 L1_fnorm:2.5312e+00 L2_fnorm:2.4375e+00 L3_fnorm:2.4844e+00 L4_fnorm:2.5000e+00 L5_fnorm:2.5312e+00 L6_fnorm:2.5312e+00 L7_fnorm:2.5312e+00 L8_fnorm:2.5000e+00 L9_fnorm:2.5312e+00 L10_fnorm:2.5312e+00 L11_fnorm:2.5156e+00 L12_fnorm:2.5000e+00 L1_l1linf:6.3672e-01 L2_l1linf:5.9375e-01 L3_l1linf:5.9375e-01 L4_l1linf:6.1719e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2109e-01 L8_l1linf:6.2500e-01 L9_l1linf:6.1719e-01 L10_l1linf:6.1719e-01 L11_l1linf:5.9766e-01 L12_l1linf:6.0547e-01 L1_spectral:3.2271e-02 L2_spectral:3.0855e-02 L3_spectral:3.1534e-02 L4_spectral:3.1562e-02 L5_spectral:3.1757e-02 L6_spectral:3.2044e-02 L7_spectral:3.1808e-02 L8_spectral:3.2257e-02 L9_spectral:3.2223e-02 L10_spectral:3.2034e-02 L11_spectral:3.2276e-02 L12_spectral:3.1665e-02 train_time:261953ms step_avg:43.66ms +[2025-09-11 07:01:43] [Rank 0] step:6001/10000 train_time:263766ms step_avg:43.95ms +[2025-09-11 07:01:43] [Rank 0] step:6001/10000 train_time:263766ms step_avg:43.95ms +[2025-09-11 07:01:44] [Rank 0] step:6021/10000 train_time:264575ms step_avg:43.94ms +[2025-09-11 07:01:44] [Rank 0] step:6021/10000 train_time:264575ms step_avg:43.94ms +[2025-09-11 07:01:45] [Rank 0] step:6041/10000 train_time:265431ms step_avg:43.94ms +[2025-09-11 07:01:45] [Rank 0] step:6041/10000 train_time:265431ms step_avg:43.94ms +[2025-09-11 07:01:46] [Rank 0] step:6061/10000 train_time:266231ms step_avg:43.93ms +[2025-09-11 07:01:46] [Rank 0] step:6061/10000 train_time:266231ms step_avg:43.93ms +[2025-09-11 07:01:46] [Rank 0] step:6081/10000 train_time:266924ms step_avg:43.89ms +[2025-09-11 07:01:46] [Rank 0] step:6081/10000 train_time:266924ms step_avg:43.89ms +[2025-09-11 07:01:47] [Rank 0] step:6101/10000 train_time:267626ms step_avg:43.87ms +[2025-09-11 07:01:47] [Rank 0] step:6101/10000 train_time:267626ms step_avg:43.87ms +[2025-09-11 07:01:48] [Rank 0] step:6121/10000 train_time:268317ms step_avg:43.84ms +[2025-09-11 07:01:48] [Rank 0] step:6121/10000 train_time:268317ms step_avg:43.84ms +[2025-09-11 07:01:48] [Rank 0] step:6141/10000 train_time:269009ms step_avg:43.81ms +[2025-09-11 07:01:48] [Rank 0] step:6141/10000 train_time:269009ms step_avg:43.81ms +[2025-09-11 07:01:49] [Rank 0] step:6161/10000 train_time:269700ms step_avg:43.78ms +[2025-09-11 07:01:49] [Rank 0] step:6161/10000 train_time:269700ms step_avg:43.78ms +[2025-09-11 07:01:50] [Rank 0] step:6181/10000 train_time:270389ms step_avg:43.75ms +[2025-09-11 07:01:50] [Rank 0] step:6181/10000 train_time:270389ms step_avg:43.75ms +[2025-09-11 07:01:50] [Rank 0] step:6201/10000 train_time:271082ms step_avg:43.72ms +[2025-09-11 07:01:50] [Rank 0] step:6201/10000 train_time:271082ms step_avg:43.72ms +[2025-09-11 07:01:51] [Rank 0] step:6221/10000 train_time:271773ms step_avg:43.69ms +[2025-09-11 07:01:51] [Rank 0] step:6221/10000 train_time:271773ms step_avg:43.69ms +[2025-09-11 07:01:52] [Rank 0] step:6241/10000 train_time:272465ms step_avg:43.66ms +[2025-09-11 07:01:52] [Rank 0] step:6241/10000 train_time:272465ms step_avg:43.66ms +[2025-09-11 07:01:52] [Rank 0] step:6261/10000 train_time:273155ms step_avg:43.63ms +[2025-09-11 07:01:52] [Rank 0] step:6261/10000 train_time:273155ms step_avg:43.63ms +[2025-09-11 07:01:53] [Rank 0] step:6281/10000 train_time:273846ms step_avg:43.60ms +[2025-09-11 07:01:53] [Rank 0] step:6281/10000 train_time:273846ms step_avg:43.60ms +[2025-09-11 07:01:54] [Rank 0] step:6301/10000 train_time:274536ms step_avg:43.57ms +[2025-09-11 07:01:54] [Rank 0] step:6301/10000 train_time:274536ms step_avg:43.57ms +[2025-09-11 07:01:55] [Rank 0] step:6321/10000 train_time:275230ms step_avg:43.54ms +[2025-09-11 07:01:55] [Rank 0] step:6321/10000 train_time:275230ms step_avg:43.54ms +[2025-09-11 07:01:55] [Rank 0] step:6341/10000 train_time:275922ms step_avg:43.51ms +[2025-09-11 07:01:55] [Rank 0] step:6341/10000 train_time:275922ms step_avg:43.51ms +[2025-09-11 07:01:56] [Rank 0] step:6361/10000 train_time:276613ms step_avg:43.49ms +[2025-09-11 07:01:56] [Rank 0] step:6361/10000 train_time:276613ms step_avg:43.49ms +[2025-09-11 07:01:57] [Rank 0] step:6381/10000 train_time:277304ms step_avg:43.46ms +[2025-09-11 07:01:57] [Rank 0] step:6381/10000 train_time:277304ms step_avg:43.46ms +[2025-09-11 07:01:57] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:01:57] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 07:01:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:01:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:02:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 07:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 07:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:02:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:02:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:02:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:02:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 07:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:08] [Rank 0] PRINT: step:6400/10000 val_loss:4.2985 total_sharp:4.3106e-05 L1_sharp:1.7363e-03 L2_sharp:2.6977e-04 L3_sharp:1.0410e-04 L4_sharp:3.2723e-04 L5_sharp:3.5997e-04 L6_sharp:2.9551e-04 L7_sharp:3.7635e-04 L8_sharp:6.5056e-04 L9_sharp:5.9897e-04 L10_sharp:8.3800e-04 L11_sharp:1.1740e-03 L12_sharp:1.1347e-02 total_fnorm:1.6300e+02 total_l1_linf:3.5635e+05 total_spectral:8.1500e+01 L1_fnorm:2.2812e+00 L2_fnorm:2.1719e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2344e+00 L5_fnorm:2.2656e+00 L6_fnorm:2.2812e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2344e+00 L9_fnorm:2.2812e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.5078e-01 L2_l1linf:5.0781e-01 L3_l1linf:5.1562e-01 L4_l1linf:5.3906e-01 L5_l1linf:5.3906e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.3906e-01 L8_l1linf:5.3516e-01 L9_l1linf:5.4297e-01 L10_l1linf:5.3516e-01 L11_l1linf:5.1562e-01 L12_l1linf:5.2734e-01 L1_spectral:2.9472e-02 L2_spectral:2.8124e-02 L3_spectral:2.8965e-02 L4_spectral:2.8901e-02 L5_spectral:2.9210e-02 L6_spectral:2.9207e-02 L7_spectral:2.9175e-02 L8_spectral:2.9258e-02 L9_spectral:2.9700e-02 L10_spectral:2.9341e-02 L11_spectral:2.9438e-02 L12_spectral:2.9014e-02 train_time:277973ms step_avg:43.43ms +[2025-09-11 07:02:08] [Rank 0] PRINT: step:6400/10000 val_loss:4.2985 total_sharp:4.3106e-05 L1_sharp:1.7363e-03 L2_sharp:2.6977e-04 L3_sharp:1.0410e-04 L4_sharp:3.2723e-04 L5_sharp:3.5997e-04 L6_sharp:2.9551e-04 L7_sharp:3.7635e-04 L8_sharp:6.5056e-04 L9_sharp:5.9897e-04 L10_sharp:8.3800e-04 L11_sharp:1.1740e-03 L12_sharp:1.1347e-02 total_fnorm:1.6300e+02 total_l1_linf:3.5635e+05 total_spectral:8.1500e+01 L1_fnorm:2.2812e+00 L2_fnorm:2.1719e+00 L3_fnorm:2.2344e+00 L4_fnorm:2.2344e+00 L5_fnorm:2.2656e+00 L6_fnorm:2.2812e+00 L7_fnorm:2.2656e+00 L8_fnorm:2.2344e+00 L9_fnorm:2.2812e+00 L10_fnorm:2.2656e+00 L11_fnorm:2.2500e+00 L12_fnorm:2.2500e+00 L1_l1linf:5.5078e-01 L2_l1linf:5.0781e-01 L3_l1linf:5.1562e-01 L4_l1linf:5.3906e-01 L5_l1linf:5.3906e-01 L6_l1linf:5.4688e-01 L7_l1linf:5.3906e-01 L8_l1linf:5.3516e-01 L9_l1linf:5.4297e-01 L10_l1linf:5.3516e-01 L11_l1linf:5.1562e-01 L12_l1linf:5.2734e-01 L1_spectral:2.9472e-02 L2_spectral:2.8124e-02 L3_spectral:2.8965e-02 L4_spectral:2.8901e-02 L5_spectral:2.9210e-02 L6_spectral:2.9207e-02 L7_spectral:2.9175e-02 L8_spectral:2.9258e-02 L9_spectral:2.9700e-02 L10_spectral:2.9341e-02 L11_spectral:2.9438e-02 L12_spectral:2.9014e-02 train_time:277973ms step_avg:43.43ms +[2025-09-11 07:02:09] [Rank 0] step:6401/10000 train_time:279366ms step_avg:43.64ms +[2025-09-11 07:02:09] [Rank 0] step:6401/10000 train_time:279366ms step_avg:43.64ms +[2025-09-11 07:02:10] [Rank 0] step:6421/10000 train_time:280097ms step_avg:43.62ms +[2025-09-11 07:02:10] [Rank 0] step:6421/10000 train_time:280097ms step_avg:43.62ms +[2025-09-11 07:02:11] [Rank 0] step:6441/10000 train_time:280791ms step_avg:43.59ms +[2025-09-11 07:02:11] [Rank 0] step:6441/10000 train_time:280791ms step_avg:43.59ms +[2025-09-11 07:02:12] [Rank 0] step:6461/10000 train_time:281483ms step_avg:43.57ms +[2025-09-11 07:02:12] [Rank 0] step:6461/10000 train_time:281483ms step_avg:43.57ms +[2025-09-11 07:02:12] [Rank 0] step:6481/10000 train_time:282176ms step_avg:43.54ms +[2025-09-11 07:02:12] [Rank 0] step:6481/10000 train_time:282176ms step_avg:43.54ms +[2025-09-11 07:02:13] [Rank 0] step:6501/10000 train_time:282869ms step_avg:43.51ms +[2025-09-11 07:02:13] [Rank 0] step:6501/10000 train_time:282869ms step_avg:43.51ms +[2025-09-11 07:02:14] [Rank 0] step:6521/10000 train_time:283560ms step_avg:43.48ms +[2025-09-11 07:02:14] [Rank 0] step:6521/10000 train_time:283560ms step_avg:43.48ms +[2025-09-11 07:02:14] [Rank 0] step:6541/10000 train_time:284250ms step_avg:43.46ms +[2025-09-11 07:02:14] [Rank 0] step:6541/10000 train_time:284250ms step_avg:43.46ms +[2025-09-11 07:02:15] [Rank 0] step:6561/10000 train_time:284942ms step_avg:43.43ms +[2025-09-11 07:02:15] [Rank 0] step:6561/10000 train_time:284942ms step_avg:43.43ms +[2025-09-11 07:02:16] [Rank 0] step:6581/10000 train_time:285634ms step_avg:43.40ms +[2025-09-11 07:02:16] [Rank 0] step:6581/10000 train_time:285634ms step_avg:43.40ms +[2025-09-11 07:02:16] [Rank 0] step:6601/10000 train_time:286324ms step_avg:43.38ms +[2025-09-11 07:02:16] [Rank 0] step:6601/10000 train_time:286324ms step_avg:43.38ms +[2025-09-11 07:02:17] [Rank 0] step:6621/10000 train_time:287015ms step_avg:43.35ms +[2025-09-11 07:02:17] [Rank 0] step:6621/10000 train_time:287015ms step_avg:43.35ms +[2025-09-11 07:02:18] [Rank 0] step:6641/10000 train_time:287707ms step_avg:43.32ms +[2025-09-11 07:02:18] [Rank 0] step:6641/10000 train_time:287707ms step_avg:43.32ms +[2025-09-11 07:02:18] [Rank 0] step:6661/10000 train_time:288400ms step_avg:43.30ms +[2025-09-11 07:02:18] [Rank 0] step:6661/10000 train_time:288400ms step_avg:43.30ms +[2025-09-11 07:02:19] [Rank 0] step:6681/10000 train_time:289098ms step_avg:43.27ms +[2025-09-11 07:02:19] [Rank 0] step:6681/10000 train_time:289098ms step_avg:43.27ms +[2025-09-11 07:02:20] [Rank 0] step:6701/10000 train_time:289796ms step_avg:43.25ms +[2025-09-11 07:02:20] [Rank 0] step:6701/10000 train_time:289796ms step_avg:43.25ms +[2025-09-11 07:02:21] [Rank 0] step:6721/10000 train_time:290495ms step_avg:43.22ms +[2025-09-11 07:02:21] [Rank 0] step:6721/10000 train_time:290495ms step_avg:43.22ms +[2025-09-11 07:02:21] [Rank 0] step:6741/10000 train_time:291194ms step_avg:43.20ms +[2025-09-11 07:02:21] [Rank 0] step:6741/10000 train_time:291194ms step_avg:43.20ms +[2025-09-11 07:02:22] [Rank 0] step:6761/10000 train_time:291892ms step_avg:43.17ms +[2025-09-11 07:02:22] [Rank 0] step:6761/10000 train_time:291892ms step_avg:43.17ms +[2025-09-11 07:02:23] [Rank 0] step:6781/10000 train_time:292590ms step_avg:43.15ms +[2025-09-11 07:02:23] [Rank 0] step:6781/10000 train_time:292590ms step_avg:43.15ms +[2025-09-11 07:02:23] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:02:23] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 07:02:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:02:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:02:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:02:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:02:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:02:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.2424 total_sharp:3.4486e-05 L1_sharp:2.8676e-03 L2_sharp:2.9547e-04 L3_sharp:2.2151e-04 L4_sharp:5.0293e-04 L5_sharp:3.9956e-04 L6_sharp:3.6107e-04 L7_sharp:4.4916e-04 L8_sharp:7.8754e-04 L9_sharp:5.8776e-04 L10_sharp:7.7305e-04 L11_sharp:1.2352e-03 L12_sharp:1.1860e-02 total_fnorm:1.5800e+02 total_l1_linf:3.4611e+05 total_spectral:7.9000e+01 L1_fnorm:2.0312e+00 L2_fnorm:1.9297e+00 L3_fnorm:1.9688e+00 L4_fnorm:1.9766e+00 L5_fnorm:2.0000e+00 L6_fnorm:2.0156e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9688e+00 L9_fnorm:2.0000e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9844e+00 L1_l1linf:4.7461e-01 L2_l1linf:4.4336e-01 L3_l1linf:4.4141e-01 L4_l1linf:4.6094e-01 L5_l1linf:4.6875e-01 L6_l1linf:4.6875e-01 L7_l1linf:4.7070e-01 L8_l1linf:4.6875e-01 L9_l1linf:4.5898e-01 L10_l1linf:4.6484e-01 L11_l1linf:4.4141e-01 L12_l1linf:4.5117e-01 L1_spectral:2.6460e-02 L2_spectral:2.5382e-02 L3_spectral:2.6086e-02 L4_spectral:2.6073e-02 L5_spectral:2.6367e-02 L6_spectral:2.6511e-02 L7_spectral:2.6310e-02 L8_spectral:2.6342e-02 L9_spectral:2.6577e-02 L10_spectral:2.6326e-02 L11_spectral:2.6441e-02 L12_spectral:2.6273e-02 train_time:293269ms step_avg:43.13ms +[2025-09-11 07:02:33] [Rank 0] PRINT: step:6800/10000 val_loss:4.2424 total_sharp:3.4486e-05 L1_sharp:2.8676e-03 L2_sharp:2.9547e-04 L3_sharp:2.2151e-04 L4_sharp:5.0293e-04 L5_sharp:3.9956e-04 L6_sharp:3.6107e-04 L7_sharp:4.4916e-04 L8_sharp:7.8754e-04 L9_sharp:5.8776e-04 L10_sharp:7.7305e-04 L11_sharp:1.2352e-03 L12_sharp:1.1860e-02 total_fnorm:1.5800e+02 total_l1_linf:3.4611e+05 total_spectral:7.9000e+01 L1_fnorm:2.0312e+00 L2_fnorm:1.9297e+00 L3_fnorm:1.9688e+00 L4_fnorm:1.9766e+00 L5_fnorm:2.0000e+00 L6_fnorm:2.0156e+00 L7_fnorm:2.0000e+00 L8_fnorm:1.9688e+00 L9_fnorm:2.0000e+00 L10_fnorm:2.0000e+00 L11_fnorm:1.9922e+00 L12_fnorm:1.9844e+00 L1_l1linf:4.7461e-01 L2_l1linf:4.4336e-01 L3_l1linf:4.4141e-01 L4_l1linf:4.6094e-01 L5_l1linf:4.6875e-01 L6_l1linf:4.6875e-01 L7_l1linf:4.7070e-01 L8_l1linf:4.6875e-01 L9_l1linf:4.5898e-01 L10_l1linf:4.6484e-01 L11_l1linf:4.4141e-01 L12_l1linf:4.5117e-01 L1_spectral:2.6460e-02 L2_spectral:2.5382e-02 L3_spectral:2.6086e-02 L4_spectral:2.6073e-02 L5_spectral:2.6367e-02 L6_spectral:2.6511e-02 L7_spectral:2.6310e-02 L8_spectral:2.6342e-02 L9_spectral:2.6577e-02 L10_spectral:2.6326e-02 L11_spectral:2.6441e-02 L12_spectral:2.6273e-02 train_time:293269ms step_avg:43.13ms +[2025-09-11 07:02:35] [Rank 0] step:6801/10000 train_time:294533ms step_avg:43.31ms +[2025-09-11 07:02:35] [Rank 0] step:6801/10000 train_time:294533ms step_avg:43.31ms +[2025-09-11 07:02:35] [Rank 0] step:6821/10000 train_time:295267ms step_avg:43.29ms +[2025-09-11 07:02:35] [Rank 0] step:6821/10000 train_time:295267ms step_avg:43.29ms +[2025-09-11 07:02:36] [Rank 0] step:6841/10000 train_time:295970ms step_avg:43.26ms +[2025-09-11 07:02:36] [Rank 0] step:6841/10000 train_time:295970ms step_avg:43.26ms +[2025-09-11 07:02:37] [Rank 0] step:6861/10000 train_time:296671ms step_avg:43.24ms +[2025-09-11 07:02:37] [Rank 0] step:6861/10000 train_time:296671ms step_avg:43.24ms +[2025-09-11 07:02:38] [Rank 0] step:6881/10000 train_time:297371ms step_avg:43.22ms +[2025-09-11 07:02:38] [Rank 0] step:6881/10000 train_time:297371ms step_avg:43.22ms +[2025-09-11 07:02:38] [Rank 0] step:6901/10000 train_time:298069ms step_avg:43.19ms +[2025-09-11 07:02:38] [Rank 0] step:6901/10000 train_time:298069ms step_avg:43.19ms +[2025-09-11 07:02:39] [Rank 0] step:6921/10000 train_time:298768ms step_avg:43.17ms +[2025-09-11 07:02:39] [Rank 0] step:6921/10000 train_time:298768ms step_avg:43.17ms +[2025-09-11 07:02:40] [Rank 0] step:6941/10000 train_time:299468ms step_avg:43.14ms +[2025-09-11 07:02:40] [Rank 0] step:6941/10000 train_time:299468ms step_avg:43.14ms +[2025-09-11 07:02:40] [Rank 0] step:6961/10000 train_time:300166ms step_avg:43.12ms +[2025-09-11 07:02:40] [Rank 0] step:6961/10000 train_time:300166ms step_avg:43.12ms +[2025-09-11 07:02:41] [Rank 0] step:6981/10000 train_time:300867ms step_avg:43.10ms +[2025-09-11 07:02:41] [Rank 0] step:6981/10000 train_time:300867ms step_avg:43.10ms +[2025-09-11 07:02:42] [Rank 0] step:7001/10000 train_time:301567ms step_avg:43.07ms +[2025-09-11 07:02:42] [Rank 0] step:7001/10000 train_time:301567ms step_avg:43.07ms +[2025-09-11 07:02:42] [Rank 0] step:7021/10000 train_time:302267ms step_avg:43.05ms +[2025-09-11 07:02:42] [Rank 0] step:7021/10000 train_time:302267ms step_avg:43.05ms +[2025-09-11 07:02:43] [Rank 0] step:7041/10000 train_time:302965ms step_avg:43.03ms +[2025-09-11 07:02:43] [Rank 0] step:7041/10000 train_time:302965ms step_avg:43.03ms +[2025-09-11 07:02:44] [Rank 0] step:7061/10000 train_time:303665ms step_avg:43.01ms +[2025-09-11 07:02:44] [Rank 0] step:7061/10000 train_time:303665ms step_avg:43.01ms +[2025-09-11 07:02:45] [Rank 0] step:7081/10000 train_time:304365ms step_avg:42.98ms +[2025-09-11 07:02:45] [Rank 0] step:7081/10000 train_time:304365ms step_avg:42.98ms +[2025-09-11 07:02:46] [Rank 0] step:7101/10000 train_time:305357ms step_avg:43.00ms +[2025-09-11 07:02:46] [Rank 0] step:7101/10000 train_time:305357ms step_avg:43.00ms +[2025-09-11 07:02:47] [Rank 0] step:7121/10000 train_time:306366ms step_avg:43.02ms +[2025-09-11 07:02:47] [Rank 0] step:7121/10000 train_time:306366ms step_avg:43.02ms +[2025-09-11 07:02:47] [Rank 0] step:7141/10000 train_time:307066ms step_avg:43.00ms +[2025-09-11 07:02:47] [Rank 0] step:7141/10000 train_time:307066ms step_avg:43.00ms +[2025-09-11 07:02:48] [Rank 0] step:7161/10000 train_time:308058ms step_avg:43.02ms +[2025-09-11 07:02:48] [Rank 0] step:7161/10000 train_time:308058ms step_avg:43.02ms +[2025-09-11 07:02:49] [Rank 0] step:7181/10000 train_time:308757ms step_avg:43.00ms +[2025-09-11 07:02:49] [Rank 0] step:7181/10000 train_time:308757ms step_avg:43.00ms +[2025-09-11 07:02:50] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:02:50] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 07:02:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:02:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:02:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:02:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:02:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:02:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:02:59] [Rank 0] PRINT: step:7200/10000 val_loss:4.2008 total_sharp:3.0415e-05 L1_sharp:2.2025e-03 L2_sharp:3.4562e-04 L3_sharp:7.7711e-05 L4_sharp:3.3387e-04 L5_sharp:5.0441e-04 L6_sharp:3.7054e-04 L7_sharp:4.4765e-04 L8_sharp:6.3712e-04 L9_sharp:5.5779e-04 L10_sharp:7.3354e-04 L11_sharp:1.0570e-03 L12_sharp:8.2430e-03 total_fnorm:1.3700e+02 total_l1_linf:2.8877e+05 total_spectral:6.8500e+01 L1_fnorm:1.7734e+00 L2_fnorm:1.6875e+00 L3_fnorm:1.7188e+00 L4_fnorm:1.7109e+00 L5_fnorm:1.7344e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7344e+00 L8_fnorm:1.7031e+00 L9_fnorm:1.7266e+00 L10_fnorm:1.7266e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7031e+00 L1_l1linf:3.9062e-01 L2_l1linf:3.7695e-01 L3_l1linf:3.6133e-01 L4_l1linf:3.8281e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.9258e-01 L7_l1linf:3.8672e-01 L8_l1linf:3.8477e-01 L9_l1linf:3.8281e-01 L10_l1linf:3.8281e-01 L11_l1linf:3.7109e-01 L12_l1linf:3.7305e-01 L1_spectral:2.3071e-02 L2_spectral:2.2307e-02 L3_spectral:2.3086e-02 L4_spectral:2.2970e-02 L5_spectral:2.3445e-02 L6_spectral:2.3251e-02 L7_spectral:2.3082e-02 L8_spectral:2.2960e-02 L9_spectral:2.3243e-02 L10_spectral:2.3198e-02 L11_spectral:2.3378e-02 L12_spectral:2.3323e-02 train_time:309436ms step_avg:42.98ms +[2025-09-11 07:02:59] [Rank 0] PRINT: step:7200/10000 val_loss:4.2008 total_sharp:3.0415e-05 L1_sharp:2.2025e-03 L2_sharp:3.4562e-04 L3_sharp:7.7711e-05 L4_sharp:3.3387e-04 L5_sharp:5.0441e-04 L6_sharp:3.7054e-04 L7_sharp:4.4765e-04 L8_sharp:6.3712e-04 L9_sharp:5.5779e-04 L10_sharp:7.3354e-04 L11_sharp:1.0570e-03 L12_sharp:8.2430e-03 total_fnorm:1.3700e+02 total_l1_linf:2.8877e+05 total_spectral:6.8500e+01 L1_fnorm:1.7734e+00 L2_fnorm:1.6875e+00 L3_fnorm:1.7188e+00 L4_fnorm:1.7109e+00 L5_fnorm:1.7344e+00 L6_fnorm:1.7344e+00 L7_fnorm:1.7344e+00 L8_fnorm:1.7031e+00 L9_fnorm:1.7266e+00 L10_fnorm:1.7266e+00 L11_fnorm:1.7109e+00 L12_fnorm:1.7031e+00 L1_l1linf:3.9062e-01 L2_l1linf:3.7695e-01 L3_l1linf:3.6133e-01 L4_l1linf:3.8281e-01 L5_l1linf:3.8672e-01 L6_l1linf:3.9258e-01 L7_l1linf:3.8672e-01 L8_l1linf:3.8477e-01 L9_l1linf:3.8281e-01 L10_l1linf:3.8281e-01 L11_l1linf:3.7109e-01 L12_l1linf:3.7305e-01 L1_spectral:2.3071e-02 L2_spectral:2.2307e-02 L3_spectral:2.3086e-02 L4_spectral:2.2970e-02 L5_spectral:2.3445e-02 L6_spectral:2.3251e-02 L7_spectral:2.3082e-02 L8_spectral:2.2960e-02 L9_spectral:2.3243e-02 L10_spectral:2.3198e-02 L11_spectral:2.3378e-02 L12_spectral:2.3323e-02 train_time:309436ms step_avg:42.98ms +[2025-09-11 07:03:01] [Rank 0] step:7201/10000 train_time:310644ms step_avg:43.14ms +[2025-09-11 07:03:01] [Rank 0] step:7201/10000 train_time:310644ms step_avg:43.14ms +[2025-09-11 07:03:01] [Rank 0] step:7221/10000 train_time:311386ms step_avg:43.12ms +[2025-09-11 07:03:01] [Rank 0] step:7221/10000 train_time:311386ms step_avg:43.12ms +[2025-09-11 07:03:02] [Rank 0] step:7241/10000 train_time:312088ms step_avg:43.10ms +[2025-09-11 07:03:02] [Rank 0] step:7241/10000 train_time:312088ms step_avg:43.10ms +[2025-09-11 07:03:03] [Rank 0] step:7261/10000 train_time:312791ms step_avg:43.08ms +[2025-09-11 07:03:03] [Rank 0] step:7261/10000 train_time:312791ms step_avg:43.08ms +[2025-09-11 07:03:04] [Rank 0] step:7281/10000 train_time:313496ms step_avg:43.06ms +[2025-09-11 07:03:04] [Rank 0] step:7281/10000 train_time:313496ms step_avg:43.06ms +[2025-09-11 07:03:04] [Rank 0] step:7301/10000 train_time:314195ms step_avg:43.03ms +[2025-09-11 07:03:04] [Rank 0] step:7301/10000 train_time:314195ms step_avg:43.03ms +[2025-09-11 07:03:05] [Rank 0] step:7321/10000 train_time:314895ms step_avg:43.01ms +[2025-09-11 07:03:05] [Rank 0] step:7321/10000 train_time:314895ms step_avg:43.01ms +[2025-09-11 07:03:06] [Rank 0] step:7341/10000 train_time:315597ms step_avg:42.99ms +[2025-09-11 07:03:06] [Rank 0] step:7341/10000 train_time:315597ms step_avg:42.99ms +[2025-09-11 07:03:06] [Rank 0] step:7361/10000 train_time:316297ms step_avg:42.97ms +[2025-09-11 07:03:06] [Rank 0] step:7361/10000 train_time:316297ms step_avg:42.97ms +[2025-09-11 07:03:07] [Rank 0] step:7381/10000 train_time:316998ms step_avg:42.95ms +[2025-09-11 07:03:07] [Rank 0] step:7381/10000 train_time:316998ms step_avg:42.95ms +[2025-09-11 07:03:08] [Rank 0] step:7401/10000 train_time:317698ms step_avg:42.93ms +[2025-09-11 07:03:08] [Rank 0] step:7401/10000 train_time:317698ms step_avg:42.93ms +[2025-09-11 07:03:08] [Rank 0] step:7421/10000 train_time:318398ms step_avg:42.91ms +[2025-09-11 07:03:08] [Rank 0] step:7421/10000 train_time:318398ms step_avg:42.91ms +[2025-09-11 07:03:09] [Rank 0] step:7441/10000 train_time:319099ms step_avg:42.88ms +[2025-09-11 07:03:09] [Rank 0] step:7441/10000 train_time:319099ms step_avg:42.88ms +[2025-09-11 07:03:10] [Rank 0] step:7461/10000 train_time:319799ms step_avg:42.86ms +[2025-09-11 07:03:10] [Rank 0] step:7461/10000 train_time:319799ms step_avg:42.86ms +[2025-09-11 07:03:11] [Rank 0] step:7481/10000 train_time:320503ms step_avg:42.84ms +[2025-09-11 07:03:11] [Rank 0] step:7481/10000 train_time:320503ms step_avg:42.84ms +[2025-09-11 07:03:11] [Rank 0] step:7501/10000 train_time:321204ms step_avg:42.82ms +[2025-09-11 07:03:11] [Rank 0] step:7501/10000 train_time:321204ms step_avg:42.82ms +[2025-09-11 07:03:12] [Rank 0] step:7521/10000 train_time:321906ms step_avg:42.80ms +[2025-09-11 07:03:12] [Rank 0] step:7521/10000 train_time:321906ms step_avg:42.80ms +[2025-09-11 07:03:13] [Rank 0] step:7541/10000 train_time:322606ms step_avg:42.78ms +[2025-09-11 07:03:13] [Rank 0] step:7541/10000 train_time:322606ms step_avg:42.78ms +[2025-09-11 07:03:13] [Rank 0] step:7561/10000 train_time:323308ms step_avg:42.76ms +[2025-09-11 07:03:13] [Rank 0] step:7561/10000 train_time:323308ms step_avg:42.76ms +[2025-09-11 07:03:14] [Rank 0] step:7581/10000 train_time:324009ms step_avg:42.74ms +[2025-09-11 07:03:14] [Rank 0] step:7581/10000 train_time:324009ms step_avg:42.74ms +[2025-09-11 07:03:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:03:15] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 07:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:03:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:03:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 07:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 07:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:03:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:03:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:03:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:03:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 07:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:03:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:03:25] [Rank 0] PRINT: step:7600/10000 val_loss:4.1688 total_sharp:3.4643e-05 L1_sharp:1.7262e-03 L2_sharp:1.7089e-04 L3_sharp:1.9982e-04 L4_sharp:3.2243e-04 L5_sharp:2.3087e-04 L6_sharp:1.8642e-04 L7_sharp:2.8342e-04 L8_sharp:5.3776e-04 L9_sharp:5.5461e-04 L10_sharp:7.2851e-04 L11_sharp:1.2133e-03 L12_sharp:1.8765e-02 total_fnorm:1.1350e+02 total_l1_linf:2.2630e+05 total_spectral:5.6750e+01 L1_fnorm:1.5078e+00 L2_fnorm:1.4141e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4531e+00 L6_fnorm:1.4609e+00 L7_fnorm:1.4453e+00 L8_fnorm:1.4219e+00 L9_fnorm:1.4453e+00 L10_fnorm:1.4375e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4375e+00 L1_l1linf:3.0664e-01 L2_l1linf:2.9688e-01 L3_l1linf:2.9883e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.0859e-01 L8_l1linf:3.0078e-01 L9_l1linf:3.0469e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.8516e-01 L12_l1linf:3.0859e-01 L1_spectral:1.9754e-02 L2_spectral:1.9171e-02 L3_spectral:1.9758e-02 L4_spectral:1.9670e-02 L5_spectral:1.9896e-02 L6_spectral:1.9789e-02 L7_spectral:1.9916e-02 L8_spectral:1.9503e-02 L9_spectral:1.9944e-02 L10_spectral:1.9856e-02 L11_spectral:1.9912e-02 L12_spectral:1.9973e-02 train_time:324692ms step_avg:42.72ms +[2025-09-11 07:03:25] [Rank 0] PRINT: step:7600/10000 val_loss:4.1688 total_sharp:3.4643e-05 L1_sharp:1.7262e-03 L2_sharp:1.7089e-04 L3_sharp:1.9982e-04 L4_sharp:3.2243e-04 L5_sharp:2.3087e-04 L6_sharp:1.8642e-04 L7_sharp:2.8342e-04 L8_sharp:5.3776e-04 L9_sharp:5.5461e-04 L10_sharp:7.2851e-04 L11_sharp:1.2133e-03 L12_sharp:1.8765e-02 total_fnorm:1.1350e+02 total_l1_linf:2.2630e+05 total_spectral:5.6750e+01 L1_fnorm:1.5078e+00 L2_fnorm:1.4141e+00 L3_fnorm:1.4375e+00 L4_fnorm:1.4453e+00 L5_fnorm:1.4531e+00 L6_fnorm:1.4609e+00 L7_fnorm:1.4453e+00 L8_fnorm:1.4219e+00 L9_fnorm:1.4453e+00 L10_fnorm:1.4375e+00 L11_fnorm:1.4219e+00 L12_fnorm:1.4375e+00 L1_l1linf:3.0664e-01 L2_l1linf:2.9688e-01 L3_l1linf:2.9883e-01 L4_l1linf:3.0664e-01 L5_l1linf:3.0859e-01 L6_l1linf:3.1250e-01 L7_l1linf:3.0859e-01 L8_l1linf:3.0078e-01 L9_l1linf:3.0469e-01 L10_l1linf:2.9688e-01 L11_l1linf:2.8516e-01 L12_l1linf:3.0859e-01 L1_spectral:1.9754e-02 L2_spectral:1.9171e-02 L3_spectral:1.9758e-02 L4_spectral:1.9670e-02 L5_spectral:1.9896e-02 L6_spectral:1.9789e-02 L7_spectral:1.9916e-02 L8_spectral:1.9503e-02 L9_spectral:1.9944e-02 L10_spectral:1.9856e-02 L11_spectral:1.9912e-02 L12_spectral:1.9973e-02 train_time:324692ms step_avg:42.72ms +[2025-09-11 07:03:26] [Rank 0] step:7601/10000 train_time:325904ms step_avg:42.88ms +[2025-09-11 07:03:26] [Rank 0] step:7601/10000 train_time:325904ms step_avg:42.88ms +[2025-09-11 07:03:27] [Rank 0] step:7621/10000 train_time:326635ms step_avg:42.86ms +[2025-09-11 07:03:27] [Rank 0] step:7621/10000 train_time:326635ms step_avg:42.86ms +[2025-09-11 07:03:27] [Rank 0] step:7641/10000 train_time:327337ms step_avg:42.84ms +[2025-09-11 07:03:27] [Rank 0] step:7641/10000 train_time:327337ms step_avg:42.84ms +[2025-09-11 07:03:28] [Rank 0] step:7661/10000 train_time:328037ms step_avg:42.82ms +[2025-09-11 07:03:28] [Rank 0] step:7661/10000 train_time:328037ms step_avg:42.82ms +[2025-09-11 07:03:29] [Rank 0] step:7681/10000 train_time:328738ms step_avg:42.80ms +[2025-09-11 07:03:29] [Rank 0] step:7681/10000 train_time:328738ms step_avg:42.80ms +[2025-09-11 07:03:30] [Rank 0] step:7701/10000 train_time:329440ms step_avg:42.78ms +[2025-09-11 07:03:30] [Rank 0] step:7701/10000 train_time:329440ms step_avg:42.78ms +[2025-09-11 07:03:30] [Rank 0] step:7721/10000 train_time:330141ms step_avg:42.76ms +[2025-09-11 07:03:30] [Rank 0] step:7721/10000 train_time:330141ms step_avg:42.76ms +[2025-09-11 07:03:31] [Rank 0] step:7741/10000 train_time:330842ms step_avg:42.74ms +[2025-09-11 07:03:31] [Rank 0] step:7741/10000 train_time:330842ms step_avg:42.74ms +[2025-09-11 07:03:32] [Rank 0] step:7761/10000 train_time:331542ms step_avg:42.72ms +[2025-09-11 07:03:32] [Rank 0] step:7761/10000 train_time:331542ms step_avg:42.72ms +[2025-09-11 07:03:32] [Rank 0] step:7781/10000 train_time:332245ms step_avg:42.70ms +[2025-09-11 07:03:32] [Rank 0] step:7781/10000 train_time:332245ms step_avg:42.70ms +[2025-09-11 07:03:33] [Rank 0] step:7801/10000 train_time:332945ms step_avg:42.68ms +[2025-09-11 07:03:33] [Rank 0] step:7801/10000 train_time:332945ms step_avg:42.68ms +[2025-09-11 07:03:34] [Rank 0] step:7821/10000 train_time:333646ms step_avg:42.66ms +[2025-09-11 07:03:34] [Rank 0] step:7821/10000 train_time:333646ms step_avg:42.66ms +[2025-09-11 07:03:34] [Rank 0] step:7841/10000 train_time:334349ms step_avg:42.64ms +[2025-09-11 07:03:34] [Rank 0] step:7841/10000 train_time:334349ms step_avg:42.64ms +[2025-09-11 07:03:35] [Rank 0] step:7861/10000 train_time:335052ms step_avg:42.62ms +[2025-09-11 07:03:35] [Rank 0] step:7861/10000 train_time:335052ms step_avg:42.62ms +[2025-09-11 07:03:36] [Rank 0] step:7881/10000 train_time:335753ms step_avg:42.60ms +[2025-09-11 07:03:36] [Rank 0] step:7881/10000 train_time:335753ms step_avg:42.60ms +[2025-09-11 07:03:37] [Rank 0] step:7901/10000 train_time:336455ms step_avg:42.58ms +[2025-09-11 07:03:37] [Rank 0] step:7901/10000 train_time:336455ms step_avg:42.58ms +[2025-09-11 07:03:37] [Rank 0] step:7921/10000 train_time:337156ms step_avg:42.56ms +[2025-09-11 07:03:37] [Rank 0] step:7921/10000 train_time:337156ms step_avg:42.56ms +[2025-09-11 07:03:38] [Rank 0] step:7941/10000 train_time:337858ms step_avg:42.55ms +[2025-09-11 07:03:38] [Rank 0] step:7941/10000 train_time:337858ms step_avg:42.55ms +[2025-09-11 07:03:39] [Rank 0] step:7961/10000 train_time:338558ms step_avg:42.53ms +[2025-09-11 07:03:39] [Rank 0] step:7961/10000 train_time:338558ms step_avg:42.53ms +[2025-09-11 07:03:39] [Rank 0] step:7981/10000 train_time:339261ms step_avg:42.51ms +[2025-09-11 07:03:39] [Rank 0] step:7981/10000 train_time:339261ms step_avg:42.51ms +[2025-09-11 07:03:40] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:03:40] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 07:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:03:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:03:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 07:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:03:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:03:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:03:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:03:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 07:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:03:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:03:50] [Rank 0] PRINT: step:8000/10000 val_loss:4.1339 total_sharp:3.0772e-05 L1_sharp:3.2727e-03 L2_sharp:2.7803e-04 L3_sharp:2.2039e-04 L4_sharp:3.6433e-04 L5_sharp:4.5794e-04 L6_sharp:2.3761e-04 L7_sharp:2.5378e-04 L8_sharp:4.8271e-04 L9_sharp:4.7624e-04 L10_sharp:8.0824e-04 L11_sharp:1.0439e-03 L12_sharp:2.9369e-02 total_fnorm:1.0200e+02 total_l1_linf:1.9661e+05 total_spectral:5.1000e+01 L1_fnorm:1.2344e+00 L2_fnorm:1.1562e+00 L3_fnorm:1.1719e+00 L4_fnorm:1.1719e+00 L5_fnorm:1.1875e+00 L6_fnorm:1.1953e+00 L7_fnorm:1.1953e+00 L8_fnorm:1.1641e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1797e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1641e+00 L1_l1linf:2.4023e-01 L2_l1linf:2.3730e-01 L3_l1linf:2.3438e-01 L4_l1linf:2.3926e-01 L5_l1linf:2.3828e-01 L6_l1linf:2.3730e-01 L7_l1linf:2.4219e-01 L8_l1linf:2.3047e-01 L9_l1linf:2.3145e-01 L10_l1linf:2.3340e-01 L11_l1linf:2.2266e-01 L12_l1linf:2.3145e-01 L1_spectral:1.6390e-02 L2_spectral:1.5750e-02 L3_spectral:1.6600e-02 L4_spectral:1.6613e-02 L5_spectral:1.6535e-02 L6_spectral:1.6587e-02 L7_spectral:1.6676e-02 L8_spectral:1.6395e-02 L9_spectral:1.6561e-02 L10_spectral:1.6590e-02 L11_spectral:1.6637e-02 L12_spectral:1.6725e-02 train_time:339940ms step_avg:42.49ms +[2025-09-11 07:03:50] [Rank 0] PRINT: step:8000/10000 val_loss:4.1339 total_sharp:3.0772e-05 L1_sharp:3.2727e-03 L2_sharp:2.7803e-04 L3_sharp:2.2039e-04 L4_sharp:3.6433e-04 L5_sharp:4.5794e-04 L6_sharp:2.3761e-04 L7_sharp:2.5378e-04 L8_sharp:4.8271e-04 L9_sharp:4.7624e-04 L10_sharp:8.0824e-04 L11_sharp:1.0439e-03 L12_sharp:2.9369e-02 total_fnorm:1.0200e+02 total_l1_linf:1.9661e+05 total_spectral:5.1000e+01 L1_fnorm:1.2344e+00 L2_fnorm:1.1562e+00 L3_fnorm:1.1719e+00 L4_fnorm:1.1719e+00 L5_fnorm:1.1875e+00 L6_fnorm:1.1953e+00 L7_fnorm:1.1953e+00 L8_fnorm:1.1641e+00 L9_fnorm:1.1719e+00 L10_fnorm:1.1797e+00 L11_fnorm:1.1641e+00 L12_fnorm:1.1641e+00 L1_l1linf:2.4023e-01 L2_l1linf:2.3730e-01 L3_l1linf:2.3438e-01 L4_l1linf:2.3926e-01 L5_l1linf:2.3828e-01 L6_l1linf:2.3730e-01 L7_l1linf:2.4219e-01 L8_l1linf:2.3047e-01 L9_l1linf:2.3145e-01 L10_l1linf:2.3340e-01 L11_l1linf:2.2266e-01 L12_l1linf:2.3145e-01 L1_spectral:1.6390e-02 L2_spectral:1.5750e-02 L3_spectral:1.6600e-02 L4_spectral:1.6613e-02 L5_spectral:1.6535e-02 L6_spectral:1.6587e-02 L7_spectral:1.6676e-02 L8_spectral:1.6395e-02 L9_spectral:1.6561e-02 L10_spectral:1.6590e-02 L11_spectral:1.6637e-02 L12_spectral:1.6725e-02 train_time:339940ms step_avg:42.49ms +[2025-09-11 07:03:51] [Rank 0] step:8001/10000 train_time:341167ms step_avg:42.64ms +[2025-09-11 07:03:51] [Rank 0] step:8001/10000 train_time:341167ms step_avg:42.64ms +[2025-09-11 07:03:52] [Rank 0] step:8021/10000 train_time:341937ms step_avg:42.63ms +[2025-09-11 07:03:52] [Rank 0] step:8021/10000 train_time:341937ms step_avg:42.63ms +[2025-09-11 07:03:53] [Rank 0] step:8041/10000 train_time:342639ms step_avg:42.61ms +[2025-09-11 07:03:53] [Rank 0] step:8041/10000 train_time:342639ms step_avg:42.61ms +[2025-09-11 07:03:53] [Rank 0] step:8061/10000 train_time:343344ms step_avg:42.59ms +[2025-09-11 07:03:53] [Rank 0] step:8061/10000 train_time:343344ms step_avg:42.59ms +[2025-09-11 07:03:54] [Rank 0] step:8081/10000 train_time:344043ms step_avg:42.57ms +[2025-09-11 07:03:54] [Rank 0] step:8081/10000 train_time:344043ms step_avg:42.57ms +[2025-09-11 07:03:55] [Rank 0] step:8101/10000 train_time:344743ms step_avg:42.56ms +[2025-09-11 07:03:55] [Rank 0] step:8101/10000 train_time:344743ms step_avg:42.56ms +[2025-09-11 07:03:55] [Rank 0] step:8121/10000 train_time:345448ms step_avg:42.54ms +[2025-09-11 07:03:55] [Rank 0] step:8121/10000 train_time:345448ms step_avg:42.54ms +[2025-09-11 07:03:57] [Rank 0] step:8141/10000 train_time:346894ms step_avg:42.61ms +[2025-09-11 07:03:57] [Rank 0] step:8141/10000 train_time:346894ms step_avg:42.61ms +[2025-09-11 07:03:58] [Rank 0] step:8161/10000 train_time:347598ms step_avg:42.59ms +[2025-09-11 07:03:58] [Rank 0] step:8161/10000 train_time:347598ms step_avg:42.59ms +[2025-09-11 07:03:58] [Rank 0] step:8181/10000 train_time:348311ms step_avg:42.58ms +[2025-09-11 07:03:58] [Rank 0] step:8181/10000 train_time:348311ms step_avg:42.58ms +[2025-09-11 07:03:59] [Rank 0] step:8201/10000 train_time:349021ms step_avg:42.56ms +[2025-09-11 07:03:59] [Rank 0] step:8201/10000 train_time:349021ms step_avg:42.56ms +[2025-09-11 07:04:00] [Rank 0] step:8221/10000 train_time:349730ms step_avg:42.54ms +[2025-09-11 07:04:00] [Rank 0] step:8221/10000 train_time:349730ms step_avg:42.54ms +[2025-09-11 07:04:00] [Rank 0] step:8241/10000 train_time:350447ms step_avg:42.52ms +[2025-09-11 07:04:00] [Rank 0] step:8241/10000 train_time:350447ms step_avg:42.52ms +[2025-09-11 07:04:01] [Rank 0] step:8261/10000 train_time:351154ms step_avg:42.51ms +[2025-09-11 07:04:01] [Rank 0] step:8261/10000 train_time:351154ms step_avg:42.51ms +[2025-09-11 07:04:02] [Rank 0] step:8281/10000 train_time:351858ms step_avg:42.49ms +[2025-09-11 07:04:02] [Rank 0] step:8281/10000 train_time:351858ms step_avg:42.49ms +[2025-09-11 07:04:03] [Rank 0] step:8301/10000 train_time:352566ms step_avg:42.47ms +[2025-09-11 07:04:03] [Rank 0] step:8301/10000 train_time:352566ms step_avg:42.47ms +[2025-09-11 07:04:03] [Rank 0] step:8321/10000 train_time:353273ms step_avg:42.46ms +[2025-09-11 07:04:03] [Rank 0] step:8321/10000 train_time:353273ms step_avg:42.46ms +[2025-09-11 07:04:04] [Rank 0] step:8341/10000 train_time:353988ms step_avg:42.44ms +[2025-09-11 07:04:04] [Rank 0] step:8341/10000 train_time:353988ms step_avg:42.44ms +[2025-09-11 07:04:05] [Rank 0] step:8361/10000 train_time:354691ms step_avg:42.42ms +[2025-09-11 07:04:05] [Rank 0] step:8361/10000 train_time:354691ms step_avg:42.42ms +[2025-09-11 07:04:05] [Rank 0] step:8381/10000 train_time:355405ms step_avg:42.41ms +[2025-09-11 07:04:05] [Rank 0] step:8381/10000 train_time:355405ms step_avg:42.41ms +[2025-09-11 07:04:06] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:04:06] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 07:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:04:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 07:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 07:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:04:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 07:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:04:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 07:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:04:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:04:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:04:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:04:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 07:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:04:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 07:04:16] [Rank 0] PRINT: step:8400/10000 val_loss:4.1034 total_sharp:2.2159e-05 L1_sharp:2.3499e-03 L2_sharp:2.7997e-04 L3_sharp:5.2565e-04 L4_sharp:2.0573e-04 L5_sharp:2.2362e-04 L6_sharp:3.0185e-04 L7_sharp:3.0627e-04 L8_sharp:4.0951e-04 L9_sharp:4.3965e-04 L10_sharp:7.9587e-04 L11_sharp:9.4863e-04 L12_sharp:8.1533e-03 total_fnorm:8.3000e+01 total_l1_linf:1.4746e+05 total_spectral:4.1500e+01 L1_fnorm:9.8047e-01 L2_fnorm:9.1406e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2188e-01 L5_fnorm:9.2969e-01 L6_fnorm:9.3359e-01 L7_fnorm:9.2969e-01 L8_fnorm:9.1016e-01 L9_fnorm:9.2188e-01 L10_fnorm:9.1797e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7480e-01 L2_l1linf:1.7383e-01 L3_l1linf:1.7285e-01 L4_l1linf:1.7285e-01 L5_l1linf:1.7578e-01 L6_l1linf:1.7090e-01 L7_l1linf:1.7383e-01 L8_l1linf:1.7383e-01 L9_l1linf:1.6699e-01 L10_l1linf:1.6602e-01 L11_l1linf:1.6113e-01 L12_l1linf:1.6992e-01 L1_spectral:1.3586e-02 L2_spectral:1.2703e-02 L3_spectral:1.3247e-02 L4_spectral:1.3304e-02 L5_spectral:1.3286e-02 L6_spectral:1.3272e-02 L7_spectral:1.3222e-02 L8_spectral:1.3359e-02 L9_spectral:1.3359e-02 L10_spectral:1.3423e-02 L11_spectral:1.3343e-02 L12_spectral:1.3530e-02 train_time:356106ms step_avg:42.39ms +[2025-09-11 07:04:16] [Rank 0] PRINT: step:8400/10000 val_loss:4.1034 total_sharp:2.2159e-05 L1_sharp:2.3499e-03 L2_sharp:2.7997e-04 L3_sharp:5.2565e-04 L4_sharp:2.0573e-04 L5_sharp:2.2362e-04 L6_sharp:3.0185e-04 L7_sharp:3.0627e-04 L8_sharp:4.0951e-04 L9_sharp:4.3965e-04 L10_sharp:7.9587e-04 L11_sharp:9.4863e-04 L12_sharp:8.1533e-03 total_fnorm:8.3000e+01 total_l1_linf:1.4746e+05 total_spectral:4.1500e+01 L1_fnorm:9.8047e-01 L2_fnorm:9.1406e-01 L3_fnorm:9.2188e-01 L4_fnorm:9.2188e-01 L5_fnorm:9.2969e-01 L6_fnorm:9.3359e-01 L7_fnorm:9.2969e-01 L8_fnorm:9.1016e-01 L9_fnorm:9.2188e-01 L10_fnorm:9.1797e-01 L11_fnorm:9.0625e-01 L12_fnorm:9.1406e-01 L1_l1linf:1.7480e-01 L2_l1linf:1.7383e-01 L3_l1linf:1.7285e-01 L4_l1linf:1.7285e-01 L5_l1linf:1.7578e-01 L6_l1linf:1.7090e-01 L7_l1linf:1.7383e-01 L8_l1linf:1.7383e-01 L9_l1linf:1.6699e-01 L10_l1linf:1.6602e-01 L11_l1linf:1.6113e-01 L12_l1linf:1.6992e-01 L1_spectral:1.3586e-02 L2_spectral:1.2703e-02 L3_spectral:1.3247e-02 L4_spectral:1.3304e-02 L5_spectral:1.3286e-02 L6_spectral:1.3272e-02 L7_spectral:1.3222e-02 L8_spectral:1.3359e-02 L9_spectral:1.3359e-02 L10_spectral:1.3423e-02 L11_spectral:1.3343e-02 L12_spectral:1.3530e-02 train_time:356106ms step_avg:42.39ms +[2025-09-11 07:04:17] [Rank 0] step:8401/10000 train_time:357266ms step_avg:42.53ms +[2025-09-11 07:04:17] [Rank 0] step:8401/10000 train_time:357266ms step_avg:42.53ms +[2025-09-11 07:04:18] [Rank 0] step:8421/10000 train_time:358002ms step_avg:42.51ms +[2025-09-11 07:04:18] [Rank 0] step:8421/10000 train_time:358002ms step_avg:42.51ms +[2025-09-11 07:04:19] [Rank 0] step:8441/10000 train_time:358713ms step_avg:42.50ms +[2025-09-11 07:04:19] [Rank 0] step:8441/10000 train_time:358713ms step_avg:42.50ms +[2025-09-11 07:04:19] [Rank 0] step:8461/10000 train_time:359422ms step_avg:42.48ms +[2025-09-11 07:04:19] [Rank 0] step:8461/10000 train_time:359422ms step_avg:42.48ms +[2025-09-11 07:04:20] [Rank 0] step:8481/10000 train_time:360132ms step_avg:42.46ms +[2025-09-11 07:04:20] [Rank 0] step:8481/10000 train_time:360132ms step_avg:42.46ms +[2025-09-11 07:04:21] [Rank 0] step:8501/10000 train_time:360840ms step_avg:42.45ms +[2025-09-11 07:04:21] [Rank 0] step:8501/10000 train_time:360840ms step_avg:42.45ms +[2025-09-11 07:04:21] [Rank 0] step:8521/10000 train_time:361548ms step_avg:42.43ms +[2025-09-11 07:04:21] [Rank 0] step:8521/10000 train_time:361548ms step_avg:42.43ms +[2025-09-11 07:04:22] [Rank 0] step:8541/10000 train_time:362254ms step_avg:42.41ms +[2025-09-11 07:04:22] [Rank 0] step:8541/10000 train_time:362254ms step_avg:42.41ms +[2025-09-11 07:04:23] [Rank 0] step:8561/10000 train_time:362966ms step_avg:42.40ms +[2025-09-11 07:04:23] [Rank 0] step:8561/10000 train_time:362966ms step_avg:42.40ms +[2025-09-11 07:04:24] [Rank 0] step:8581/10000 train_time:363676ms step_avg:42.38ms +[2025-09-11 07:04:24] [Rank 0] step:8581/10000 train_time:363676ms step_avg:42.38ms +[2025-09-11 07:04:24] [Rank 0] step:8601/10000 train_time:364385ms step_avg:42.37ms +[2025-09-11 07:04:24] [Rank 0] step:8601/10000 train_time:364385ms step_avg:42.37ms +[2025-09-11 07:04:25] [Rank 0] step:8621/10000 train_time:365092ms step_avg:42.35ms +[2025-09-11 07:04:25] [Rank 0] step:8621/10000 train_time:365092ms step_avg:42.35ms +[2025-09-11 07:04:26] [Rank 0] step:8641/10000 train_time:365800ms step_avg:42.33ms +[2025-09-11 07:04:26] [Rank 0] step:8641/10000 train_time:365800ms step_avg:42.33ms +[2025-09-11 07:04:26] [Rank 0] step:8661/10000 train_time:366507ms step_avg:42.32ms +[2025-09-11 07:04:26] [Rank 0] step:8661/10000 train_time:366507ms step_avg:42.32ms +[2025-09-11 07:04:27] [Rank 0] step:8681/10000 train_time:367217ms step_avg:42.30ms +[2025-09-11 07:04:27] [Rank 0] step:8681/10000 train_time:367217ms step_avg:42.30ms +[2025-09-11 07:04:28] [Rank 0] step:8701/10000 train_time:367925ms step_avg:42.29ms +[2025-09-11 07:04:28] [Rank 0] step:8701/10000 train_time:367925ms step_avg:42.29ms +[2025-09-11 07:04:29] [Rank 0] step:8721/10000 train_time:368643ms step_avg:42.27ms +[2025-09-11 07:04:29] [Rank 0] step:8721/10000 train_time:368643ms step_avg:42.27ms +[2025-09-11 07:04:29] [Rank 0] step:8741/10000 train_time:369348ms step_avg:42.25ms +[2025-09-11 07:04:29] [Rank 0] step:8741/10000 train_time:369348ms step_avg:42.25ms +[2025-09-11 07:04:30] [Rank 0] step:8761/10000 train_time:370060ms step_avg:42.24ms +[2025-09-11 07:04:30] [Rank 0] step:8761/10000 train_time:370060ms step_avg:42.24ms +[2025-09-11 07:04:31] [Rank 0] step:8781/10000 train_time:370765ms step_avg:42.22ms +[2025-09-11 07:04:31] [Rank 0] step:8781/10000 train_time:370765ms step_avg:42.22ms +[2025-09-11 07:04:31] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:04:31] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 07:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:04:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:04:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 07:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 07:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:04:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:04:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 07:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 07:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:04:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:04:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:04:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 07:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:04:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 07:04:44] [Rank 0] PRINT: step:8800/10000 val_loss:4.0850 total_sharp:2.4115e-05 L1_sharp:1.7106e-03 L2_sharp:3.2024e-04 L3_sharp:2.5601e-04 L4_sharp:1.7173e-04 L5_sharp:2.2856e-04 L6_sharp:2.5726e-04 L7_sharp:2.6139e-04 L8_sharp:3.5399e-04 L9_sharp:4.3617e-04 L10_sharp:5.5315e-04 L11_sharp:7.9224e-04 L12_sharp:7.3133e-03 total_fnorm:6.1250e+01 total_l1_linf:9.9328e+04 total_spectral:3.0750e+01 L1_fnorm:7.3828e-01 L2_fnorm:6.7578e-01 L3_fnorm:6.8359e-01 L4_fnorm:6.7969e-01 L5_fnorm:6.8359e-01 L6_fnorm:6.8750e-01 L7_fnorm:6.8750e-01 L8_fnorm:6.7188e-01 L9_fnorm:6.7578e-01 L10_fnorm:6.7969e-01 L11_fnorm:6.7188e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.1768e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.1279e-01 L4_l1linf:1.1865e-01 L5_l1linf:1.1621e-01 L6_l1linf:1.2109e-01 L7_l1linf:1.1816e-01 L8_l1linf:1.1475e-01 L9_l1linf:1.1768e-01 L10_l1linf:1.1230e-01 L11_l1linf:1.0889e-01 L12_l1linf:1.1279e-01 L1_spectral:1.0829e-02 L2_spectral:9.5810e-03 L3_spectral:1.0003e-02 L4_spectral:1.0095e-02 L5_spectral:9.9827e-03 L6_spectral:1.0123e-02 L7_spectral:9.9392e-03 L8_spectral:1.0088e-02 L9_spectral:9.9902e-03 L10_spectral:1.0056e-02 L11_spectral:1.0022e-02 L12_spectral:1.0224e-02 train_time:371451ms step_avg:42.21ms +[2025-09-11 07:04:44] [Rank 0] PRINT: step:8800/10000 val_loss:4.0850 total_sharp:2.4115e-05 L1_sharp:1.7106e-03 L2_sharp:3.2024e-04 L3_sharp:2.5601e-04 L4_sharp:1.7173e-04 L5_sharp:2.2856e-04 L6_sharp:2.5726e-04 L7_sharp:2.6139e-04 L8_sharp:3.5399e-04 L9_sharp:4.3617e-04 L10_sharp:5.5315e-04 L11_sharp:7.9224e-04 L12_sharp:7.3133e-03 total_fnorm:6.1250e+01 total_l1_linf:9.9328e+04 total_spectral:3.0750e+01 L1_fnorm:7.3828e-01 L2_fnorm:6.7578e-01 L3_fnorm:6.8359e-01 L4_fnorm:6.7969e-01 L5_fnorm:6.8359e-01 L6_fnorm:6.8750e-01 L7_fnorm:6.8750e-01 L8_fnorm:6.7188e-01 L9_fnorm:6.7578e-01 L10_fnorm:6.7969e-01 L11_fnorm:6.7188e-01 L12_fnorm:6.7188e-01 L1_l1linf:1.1768e-01 L2_l1linf:1.1719e-01 L3_l1linf:1.1279e-01 L4_l1linf:1.1865e-01 L5_l1linf:1.1621e-01 L6_l1linf:1.2109e-01 L7_l1linf:1.1816e-01 L8_l1linf:1.1475e-01 L9_l1linf:1.1768e-01 L10_l1linf:1.1230e-01 L11_l1linf:1.0889e-01 L12_l1linf:1.1279e-01 L1_spectral:1.0829e-02 L2_spectral:9.5810e-03 L3_spectral:1.0003e-02 L4_spectral:1.0095e-02 L5_spectral:9.9827e-03 L6_spectral:1.0123e-02 L7_spectral:9.9392e-03 L8_spectral:1.0088e-02 L9_spectral:9.9902e-03 L10_spectral:1.0056e-02 L11_spectral:1.0022e-02 L12_spectral:1.0224e-02 train_time:371451ms step_avg:42.21ms +[2025-09-11 07:04:45] [Rank 0] step:8801/10000 train_time:372607ms step_avg:42.34ms +[2025-09-11 07:04:45] [Rank 0] step:8801/10000 train_time:372607ms step_avg:42.34ms +[2025-09-11 07:04:46] [Rank 0] step:8821/10000 train_time:373348ms step_avg:42.32ms +[2025-09-11 07:04:46] [Rank 0] step:8821/10000 train_time:373348ms step_avg:42.32ms +[2025-09-11 07:04:47] [Rank 0] step:8841/10000 train_time:374058ms step_avg:42.31ms +[2025-09-11 07:04:47] [Rank 0] step:8841/10000 train_time:374058ms step_avg:42.31ms +[2025-09-11 07:04:47] [Rank 0] step:8861/10000 train_time:374768ms step_avg:42.29ms +[2025-09-11 07:04:47] [Rank 0] step:8861/10000 train_time:374768ms step_avg:42.29ms +[2025-09-11 07:04:48] [Rank 0] step:8881/10000 train_time:375476ms step_avg:42.28ms +[2025-09-11 07:04:48] [Rank 0] step:8881/10000 train_time:375476ms step_avg:42.28ms +[2025-09-11 07:04:49] [Rank 0] step:8901/10000 train_time:376188ms step_avg:42.26ms +[2025-09-11 07:04:49] [Rank 0] step:8901/10000 train_time:376188ms step_avg:42.26ms +[2025-09-11 07:04:49] [Rank 0] step:8921/10000 train_time:376894ms step_avg:42.25ms +[2025-09-11 07:04:49] [Rank 0] step:8921/10000 train_time:376894ms step_avg:42.25ms +[2025-09-11 07:04:50] [Rank 0] step:8941/10000 train_time:377606ms step_avg:42.23ms +[2025-09-11 07:04:50] [Rank 0] step:8941/10000 train_time:377606ms step_avg:42.23ms +[2025-09-11 07:04:51] [Rank 0] step:8961/10000 train_time:378324ms step_avg:42.22ms +[2025-09-11 07:04:51] [Rank 0] step:8961/10000 train_time:378324ms step_avg:42.22ms +[2025-09-11 07:04:52] [Rank 0] step:8981/10000 train_time:379160ms step_avg:42.22ms +[2025-09-11 07:04:52] [Rank 0] step:8981/10000 train_time:379160ms step_avg:42.22ms +[2025-09-11 07:04:53] [Rank 0] step:9001/10000 train_time:380249ms step_avg:42.25ms +[2025-09-11 07:04:53] [Rank 0] step:9001/10000 train_time:380249ms step_avg:42.25ms +[2025-09-11 07:04:53] [Rank 0] step:9021/10000 train_time:380960ms step_avg:42.23ms +[2025-09-11 07:04:53] [Rank 0] step:9021/10000 train_time:380960ms step_avg:42.23ms +[2025-09-11 07:04:54] [Rank 0] step:9041/10000 train_time:381816ms step_avg:42.23ms +[2025-09-11 07:04:54] [Rank 0] step:9041/10000 train_time:381816ms step_avg:42.23ms +[2025-09-11 07:04:55] [Rank 0] step:9061/10000 train_time:382625ms step_avg:42.23ms +[2025-09-11 07:04:55] [Rank 0] step:9061/10000 train_time:382625ms step_avg:42.23ms +[2025-09-11 07:04:56] [Rank 0] step:9081/10000 train_time:383336ms step_avg:42.21ms +[2025-09-11 07:04:56] [Rank 0] step:9081/10000 train_time:383336ms step_avg:42.21ms +[2025-09-11 07:04:57] [Rank 0] step:9101/10000 train_time:384049ms step_avg:42.20ms +[2025-09-11 07:04:57] [Rank 0] step:9101/10000 train_time:384049ms step_avg:42.20ms +[2025-09-11 07:04:57] [Rank 0] step:9121/10000 train_time:384764ms step_avg:42.18ms +[2025-09-11 07:04:57] [Rank 0] step:9121/10000 train_time:384764ms step_avg:42.18ms +[2025-09-11 07:04:58] [Rank 0] step:9141/10000 train_time:385473ms step_avg:42.17ms +[2025-09-11 07:04:58] [Rank 0] step:9141/10000 train_time:385473ms step_avg:42.17ms +[2025-09-11 07:04:59] [Rank 0] step:9161/10000 train_time:386184ms step_avg:42.16ms +[2025-09-11 07:04:59] [Rank 0] step:9161/10000 train_time:386184ms step_avg:42.16ms +[2025-09-11 07:04:59] [Rank 0] step:9181/10000 train_time:386898ms step_avg:42.14ms +[2025-09-11 07:04:59] [Rank 0] step:9181/10000 train_time:386898ms step_avg:42.14ms +[2025-09-11 07:05:00] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:05:00] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 07:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:05:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:05:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 07:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 07:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:05:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:05:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:05:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:05:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 07:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:05:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 07:05:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.0622 total_sharp:1.9079e-05 L1_sharp:2.0324e-03 L2_sharp:2.1881e-04 L3_sharp:2.2643e-04 L4_sharp:2.4011e-04 L5_sharp:2.1338e-04 L6_sharp:2.7890e-04 L7_sharp:1.5089e-04 L8_sharp:3.7914e-04 L9_sharp:4.0731e-04 L10_sharp:5.1174e-04 L11_sharp:7.3156e-04 L12_sharp:6.2106e-03 total_fnorm:4.5500e+01 total_l1_linf:6.5536e+04 total_spectral:2.2750e+01 L1_fnorm:4.9609e-01 L2_fnorm:4.4922e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.4922e-01 L5_fnorm:4.5312e-01 L6_fnorm:4.5312e-01 L7_fnorm:4.5312e-01 L8_fnorm:4.4336e-01 L9_fnorm:4.4727e-01 L10_fnorm:4.4727e-01 L11_fnorm:4.4141e-01 L12_fnorm:4.4727e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.5918e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.9824e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.1289e-02 L8_l1linf:7.2266e-02 L9_l1linf:6.5918e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.7871e-02 L1_spectral:7.5908e-03 L2_spectral:6.4788e-03 L3_spectral:6.7861e-03 L4_spectral:6.7329e-03 L5_spectral:6.7080e-03 L6_spectral:6.6684e-03 L7_spectral:6.7049e-03 L8_spectral:6.9566e-03 L9_spectral:6.8009e-03 L10_spectral:6.8314e-03 L11_spectral:6.7729e-03 L12_spectral:6.9353e-03 train_time:387591ms step_avg:42.13ms +[2025-09-11 07:05:10] [Rank 0] PRINT: step:9200/10000 val_loss:4.0622 total_sharp:1.9079e-05 L1_sharp:2.0324e-03 L2_sharp:2.1881e-04 L3_sharp:2.2643e-04 L4_sharp:2.4011e-04 L5_sharp:2.1338e-04 L6_sharp:2.7890e-04 L7_sharp:1.5089e-04 L8_sharp:3.7914e-04 L9_sharp:4.0731e-04 L10_sharp:5.1174e-04 L11_sharp:7.3156e-04 L12_sharp:6.2106e-03 total_fnorm:4.5500e+01 total_l1_linf:6.5536e+04 total_spectral:2.2750e+01 L1_fnorm:4.9609e-01 L2_fnorm:4.4922e-01 L3_fnorm:4.4922e-01 L4_fnorm:4.4922e-01 L5_fnorm:4.5312e-01 L6_fnorm:4.5312e-01 L7_fnorm:4.5312e-01 L8_fnorm:4.4336e-01 L9_fnorm:4.4727e-01 L10_fnorm:4.4727e-01 L11_fnorm:4.4141e-01 L12_fnorm:4.4727e-01 L1_l1linf:6.8848e-02 L2_l1linf:6.9824e-02 L3_l1linf:6.5918e-02 L4_l1linf:7.0312e-02 L5_l1linf:6.9824e-02 L6_l1linf:7.0312e-02 L7_l1linf:7.1289e-02 L8_l1linf:7.2266e-02 L9_l1linf:6.5918e-02 L10_l1linf:6.6895e-02 L11_l1linf:6.4941e-02 L12_l1linf:6.7871e-02 L1_spectral:7.5908e-03 L2_spectral:6.4788e-03 L3_spectral:6.7861e-03 L4_spectral:6.7329e-03 L5_spectral:6.7080e-03 L6_spectral:6.6684e-03 L7_spectral:6.7049e-03 L8_spectral:6.9566e-03 L9_spectral:6.8009e-03 L10_spectral:6.8314e-03 L11_spectral:6.7729e-03 L12_spectral:6.9353e-03 train_time:387591ms step_avg:42.13ms +[2025-09-11 07:05:11] [Rank 0] step:9201/10000 train_time:388822ms step_avg:42.26ms +[2025-09-11 07:05:11] [Rank 0] step:9201/10000 train_time:388822ms step_avg:42.26ms +[2025-09-11 07:05:12] [Rank 0] step:9221/10000 train_time:389549ms step_avg:42.25ms +[2025-09-11 07:05:12] [Rank 0] step:9221/10000 train_time:389549ms step_avg:42.25ms +[2025-09-11 07:05:13] [Rank 0] step:9241/10000 train_time:390257ms step_avg:42.23ms +[2025-09-11 07:05:13] [Rank 0] step:9241/10000 train_time:390257ms step_avg:42.23ms +[2025-09-11 07:05:13] [Rank 0] step:9261/10000 train_time:390968ms step_avg:42.22ms +[2025-09-11 07:05:13] [Rank 0] step:9261/10000 train_time:390968ms step_avg:42.22ms +[2025-09-11 07:05:14] [Rank 0] step:9281/10000 train_time:391679ms step_avg:42.20ms +[2025-09-11 07:05:14] [Rank 0] step:9281/10000 train_time:391679ms step_avg:42.20ms +[2025-09-11 07:05:15] [Rank 0] step:9301/10000 train_time:392386ms step_avg:42.19ms +[2025-09-11 07:05:15] [Rank 0] step:9301/10000 train_time:392386ms step_avg:42.19ms +[2025-09-11 07:05:15] [Rank 0] step:9321/10000 train_time:393097ms step_avg:42.17ms +[2025-09-11 07:05:15] [Rank 0] step:9321/10000 train_time:393097ms step_avg:42.17ms +[2025-09-11 07:05:16] [Rank 0] step:9341/10000 train_time:393802ms step_avg:42.16ms +[2025-09-11 07:05:16] [Rank 0] step:9341/10000 train_time:393802ms step_avg:42.16ms +[2025-09-11 07:05:17] [Rank 0] step:9361/10000 train_time:394508ms step_avg:42.14ms +[2025-09-11 07:05:17] [Rank 0] step:9361/10000 train_time:394508ms step_avg:42.14ms +[2025-09-11 07:05:18] [Rank 0] step:9381/10000 train_time:395214ms step_avg:42.13ms +[2025-09-11 07:05:18] [Rank 0] step:9381/10000 train_time:395214ms step_avg:42.13ms +[2025-09-11 07:05:18] [Rank 0] step:9401/10000 train_time:395924ms step_avg:42.12ms +[2025-09-11 07:05:18] [Rank 0] step:9401/10000 train_time:395924ms step_avg:42.12ms +[2025-09-11 07:05:19] [Rank 0] step:9421/10000 train_time:396634ms step_avg:42.10ms +[2025-09-11 07:05:19] [Rank 0] step:9421/10000 train_time:396634ms step_avg:42.10ms +[2025-09-11 07:05:20] [Rank 0] step:9441/10000 train_time:397346ms step_avg:42.09ms +[2025-09-11 07:05:20] [Rank 0] step:9441/10000 train_time:397346ms step_avg:42.09ms +[2025-09-11 07:05:20] [Rank 0] step:9461/10000 train_time:398055ms step_avg:42.07ms +[2025-09-11 07:05:20] [Rank 0] step:9461/10000 train_time:398055ms step_avg:42.07ms +[2025-09-11 07:05:21] [Rank 0] step:9481/10000 train_time:398766ms step_avg:42.06ms +[2025-09-11 07:05:21] [Rank 0] step:9481/10000 train_time:398766ms step_avg:42.06ms +[2025-09-11 07:05:22] [Rank 0] step:9501/10000 train_time:399477ms step_avg:42.05ms +[2025-09-11 07:05:22] [Rank 0] step:9501/10000 train_time:399477ms step_avg:42.05ms +[2025-09-11 07:05:23] [Rank 0] step:9521/10000 train_time:400190ms step_avg:42.03ms +[2025-09-11 07:05:23] [Rank 0] step:9521/10000 train_time:400190ms step_avg:42.03ms +[2025-09-11 07:05:23] [Rank 0] step:9541/10000 train_time:400897ms step_avg:42.02ms +[2025-09-11 07:05:23] [Rank 0] step:9541/10000 train_time:400897ms step_avg:42.02ms +[2025-09-11 07:05:24] [Rank 0] step:9561/10000 train_time:401606ms step_avg:42.00ms +[2025-09-11 07:05:24] [Rank 0] step:9561/10000 train_time:401606ms step_avg:42.00ms +[2025-09-11 07:05:25] [Rank 0] step:9581/10000 train_time:402318ms step_avg:41.99ms +[2025-09-11 07:05:25] [Rank 0] step:9581/10000 train_time:402318ms step_avg:41.99ms +[2025-09-11 07:05:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:05:25] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 07:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:05:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:05:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:05:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:05:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:05:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 07:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 07:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:05:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 07:05:35] [Rank 0] PRINT: step:9600/10000 val_loss:4.0479 total_sharp:1.3968e-05 L1_sharp:1.5474e-03 L2_sharp:2.5101e-04 L3_sharp:8.4899e-05 L4_sharp:2.2823e-04 L5_sharp:2.9632e-04 L6_sharp:1.7324e-04 L7_sharp:1.7448e-04 L8_sharp:3.3075e-04 L9_sharp:2.8533e-04 L10_sharp:3.6208e-04 L11_sharp:5.3940e-04 L12_sharp:5.5666e-03 total_fnorm:2.6250e+01 total_l1_linf:3.2000e+04 total_spectral:1.3125e+01 L1_fnorm:2.8711e-01 L2_fnorm:2.5391e-01 L3_fnorm:2.5391e-01 L4_fnorm:2.5586e-01 L5_fnorm:2.5586e-01 L6_fnorm:2.5781e-01 L7_fnorm:2.5781e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.2715e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3691e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1738e-02 L12_l1linf:3.3447e-02 L1_spectral:4.5294e-03 L2_spectral:3.7908e-03 L3_spectral:3.9000e-03 L4_spectral:3.9346e-03 L5_spectral:3.8946e-03 L6_spectral:3.9184e-03 L7_spectral:3.8998e-03 L8_spectral:4.1452e-03 L9_spectral:3.9214e-03 L10_spectral:3.9916e-03 L11_spectral:3.9689e-03 L12_spectral:4.2086e-03 train_time:403004ms step_avg:41.98ms +[2025-09-11 07:05:35] [Rank 0] PRINT: step:9600/10000 val_loss:4.0479 total_sharp:1.3968e-05 L1_sharp:1.5474e-03 L2_sharp:2.5101e-04 L3_sharp:8.4899e-05 L4_sharp:2.2823e-04 L5_sharp:2.9632e-04 L6_sharp:1.7324e-04 L7_sharp:1.7448e-04 L8_sharp:3.3075e-04 L9_sharp:2.8533e-04 L10_sharp:3.6208e-04 L11_sharp:5.3940e-04 L12_sharp:5.5666e-03 total_fnorm:2.6250e+01 total_l1_linf:3.2000e+04 total_spectral:1.3125e+01 L1_fnorm:2.8711e-01 L2_fnorm:2.5391e-01 L3_fnorm:2.5391e-01 L4_fnorm:2.5586e-01 L5_fnorm:2.5586e-01 L6_fnorm:2.5781e-01 L7_fnorm:2.5781e-01 L8_fnorm:2.4805e-01 L9_fnorm:2.5195e-01 L10_fnorm:2.5391e-01 L11_fnorm:2.5000e-01 L12_fnorm:2.5391e-01 L1_l1linf:3.3447e-02 L2_l1linf:3.2715e-02 L3_l1linf:3.1982e-02 L4_l1linf:3.3936e-02 L5_l1linf:3.4180e-02 L6_l1linf:3.2959e-02 L7_l1linf:3.3691e-02 L8_l1linf:3.5889e-02 L9_l1linf:3.1494e-02 L10_l1linf:3.0273e-02 L11_l1linf:3.1738e-02 L12_l1linf:3.3447e-02 L1_spectral:4.5294e-03 L2_spectral:3.7908e-03 L3_spectral:3.9000e-03 L4_spectral:3.9346e-03 L5_spectral:3.8946e-03 L6_spectral:3.9184e-03 L7_spectral:3.8998e-03 L8_spectral:4.1452e-03 L9_spectral:3.9214e-03 L10_spectral:3.9916e-03 L11_spectral:3.9689e-03 L12_spectral:4.2086e-03 train_time:403004ms step_avg:41.98ms +[2025-09-11 07:05:37] [Rank 0] step:9601/10000 train_time:404241ms step_avg:42.10ms +[2025-09-11 07:05:37] [Rank 0] step:9601/10000 train_time:404241ms step_avg:42.10ms +[2025-09-11 07:05:37] [Rank 0] step:9621/10000 train_time:404959ms step_avg:42.09ms +[2025-09-11 07:05:37] [Rank 0] step:9621/10000 train_time:404959ms step_avg:42.09ms +[2025-09-11 07:05:38] [Rank 0] step:9641/10000 train_time:405674ms step_avg:42.08ms +[2025-09-11 07:05:38] [Rank 0] step:9641/10000 train_time:405674ms step_avg:42.08ms +[2025-09-11 07:05:39] [Rank 0] step:9661/10000 train_time:406399ms step_avg:42.07ms +[2025-09-11 07:05:39] [Rank 0] step:9661/10000 train_time:406399ms step_avg:42.07ms +[2025-09-11 07:05:39] [Rank 0] step:9681/10000 train_time:407114ms step_avg:42.05ms +[2025-09-11 07:05:39] [Rank 0] step:9681/10000 train_time:407114ms step_avg:42.05ms +[2025-09-11 07:05:40] [Rank 0] step:9701/10000 train_time:407830ms step_avg:42.04ms +[2025-09-11 07:05:40] [Rank 0] step:9701/10000 train_time:407830ms step_avg:42.04ms +[2025-09-11 07:05:41] [Rank 0] step:9721/10000 train_time:408551ms step_avg:42.03ms +[2025-09-11 07:05:41] [Rank 0] step:9721/10000 train_time:408551ms step_avg:42.03ms +[2025-09-11 07:05:42] [Rank 0] step:9741/10000 train_time:409270ms step_avg:42.02ms +[2025-09-11 07:05:42] [Rank 0] step:9741/10000 train_time:409270ms step_avg:42.02ms +[2025-09-11 07:05:42] [Rank 0] step:9761/10000 train_time:409987ms step_avg:42.00ms +[2025-09-11 07:05:42] [Rank 0] step:9761/10000 train_time:409987ms step_avg:42.00ms +[2025-09-11 07:05:43] [Rank 0] step:9781/10000 train_time:410703ms step_avg:41.99ms +[2025-09-11 07:05:43] [Rank 0] step:9781/10000 train_time:410703ms step_avg:41.99ms +[2025-09-11 07:05:44] [Rank 0] step:9801/10000 train_time:411424ms step_avg:41.98ms +[2025-09-11 07:05:44] [Rank 0] step:9801/10000 train_time:411424ms step_avg:41.98ms +[2025-09-11 07:05:45] [Rank 0] step:9821/10000 train_time:412144ms step_avg:41.97ms +[2025-09-11 07:05:45] [Rank 0] step:9821/10000 train_time:412144ms step_avg:41.97ms +[2025-09-11 07:05:45] [Rank 0] step:9841/10000 train_time:412865ms step_avg:41.95ms +[2025-09-11 07:05:45] [Rank 0] step:9841/10000 train_time:412865ms step_avg:41.95ms +[2025-09-11 07:05:46] [Rank 0] step:9861/10000 train_time:413583ms step_avg:41.94ms +[2025-09-11 07:05:46] [Rank 0] step:9861/10000 train_time:413583ms step_avg:41.94ms +[2025-09-11 07:05:47] [Rank 0] step:9881/10000 train_time:414300ms step_avg:41.93ms +[2025-09-11 07:05:47] [Rank 0] step:9881/10000 train_time:414300ms step_avg:41.93ms +[2025-09-11 07:05:47] [Rank 0] step:9901/10000 train_time:415014ms step_avg:41.92ms +[2025-09-11 07:05:47] [Rank 0] step:9901/10000 train_time:415014ms step_avg:41.92ms +[2025-09-11 07:05:48] [Rank 0] step:9921/10000 train_time:415731ms step_avg:41.90ms +[2025-09-11 07:05:48] [Rank 0] step:9921/10000 train_time:415731ms step_avg:41.90ms +[2025-09-11 07:05:49] [Rank 0] step:9941/10000 train_time:416452ms step_avg:41.89ms +[2025-09-11 07:05:49] [Rank 0] step:9941/10000 train_time:416452ms step_avg:41.89ms +[2025-09-11 07:05:50] [Rank 0] step:9961/10000 train_time:417175ms step_avg:41.88ms +[2025-09-11 07:05:50] [Rank 0] step:9961/10000 train_time:417175ms step_avg:41.88ms +[2025-09-11 07:05:50] [Rank 0] step:9981/10000 train_time:417893ms step_avg:41.87ms +[2025-09-11 07:05:50] [Rank 0] step:9981/10000 train_time:417893ms step_avg:41.87ms +[2025-09-11 07:05:51] [Rank 0] step:10000/10000 train_time:418583ms step_avg:41.86ms +[2025-09-11 07:05:51] [Rank 0] step:10000/10000 train_time:418583ms step_avg:41.86ms +[2025-09-11 07:05:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:05:51] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 07:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:05:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:05:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 07:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 07:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:05:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:05:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:05:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:06:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 07:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:06:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 07:06:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.0463 total_sharp:8.5887e-06 L1_sharp:1.1412e-03 L2_sharp:1.5266e-04 L3_sharp:1.2597e-04 L4_sharp:1.1666e-04 L5_sharp:8.1957e-05 L6_sharp:1.0437e-04 L7_sharp:8.9676e-05 L8_sharp:2.9299e-04 L9_sharp:2.0196e-04 L10_sharp:2.9684e-04 L11_sharp:3.8961e-04 L12_sharp:3.6310e-03 total_fnorm:1.0438e+01 total_l1_linf:9.2800e+03 total_spectral:5.2188e+00 L1_fnorm:1.0986e-01 L2_fnorm:9.6680e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.7656e-02 L5_fnorm:9.9609e-02 L6_fnorm:9.9121e-02 L7_fnorm:9.9121e-02 L8_fnorm:9.7168e-02 L9_fnorm:9.7656e-02 L10_fnorm:9.8145e-02 L11_fnorm:9.6680e-02 L12_fnorm:9.8633e-02 L1_l1linf:1.0742e-02 L2_l1linf:1.0315e-02 L3_l1linf:1.0071e-02 L4_l1linf:1.0010e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0803e-02 L7_l1linf:1.0193e-02 L8_l1linf:1.1902e-02 L9_l1linf:9.8267e-03 L10_l1linf:9.7046e-03 L11_l1linf:9.7046e-03 L12_l1linf:1.1902e-02 L1_spectral:1.7647e-03 L2_spectral:1.4238e-03 L3_spectral:1.5408e-03 L4_spectral:1.5377e-03 L5_spectral:1.5504e-03 L6_spectral:1.5262e-03 L7_spectral:1.5272e-03 L8_spectral:1.6470e-03 L9_spectral:1.5595e-03 L10_spectral:1.5889e-03 L11_spectral:1.5670e-03 L12_spectral:1.6850e-03 train_time:418604ms step_avg:41.86ms +[2025-09-11 07:06:01] [Rank 0] PRINT: step:10000/10000 val_loss:4.0463 total_sharp:8.5887e-06 L1_sharp:1.1412e-03 L2_sharp:1.5266e-04 L3_sharp:1.2597e-04 L4_sharp:1.1666e-04 L5_sharp:8.1957e-05 L6_sharp:1.0437e-04 L7_sharp:8.9676e-05 L8_sharp:2.9299e-04 L9_sharp:2.0196e-04 L10_sharp:2.9684e-04 L11_sharp:3.8961e-04 L12_sharp:3.6310e-03 total_fnorm:1.0438e+01 total_l1_linf:9.2800e+03 total_spectral:5.2188e+00 L1_fnorm:1.0986e-01 L2_fnorm:9.6680e-02 L3_fnorm:9.7656e-02 L4_fnorm:9.7656e-02 L5_fnorm:9.9609e-02 L6_fnorm:9.9121e-02 L7_fnorm:9.9121e-02 L8_fnorm:9.7168e-02 L9_fnorm:9.7656e-02 L10_fnorm:9.8145e-02 L11_fnorm:9.6680e-02 L12_fnorm:9.8633e-02 L1_l1linf:1.0742e-02 L2_l1linf:1.0315e-02 L3_l1linf:1.0071e-02 L4_l1linf:1.0010e-02 L5_l1linf:1.0376e-02 L6_l1linf:1.0803e-02 L7_l1linf:1.0193e-02 L8_l1linf:1.1902e-02 L9_l1linf:9.8267e-03 L10_l1linf:9.7046e-03 L11_l1linf:9.7046e-03 L12_l1linf:1.1902e-02 L1_spectral:1.7647e-03 L2_spectral:1.4238e-03 L3_spectral:1.5408e-03 L4_spectral:1.5377e-03 L5_spectral:1.5504e-03 L6_spectral:1.5262e-03 L7_spectral:1.5272e-03 L8_spectral:1.6470e-03 L9_spectral:1.5595e-03 L10_spectral:1.5889e-03 L11_spectral:1.5670e-03 L12_spectral:1.6850e-03 train_time:418604ms step_avg:41.86ms +[2025-09-11 07:06:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:06:01 2025 --- +[2025-09-11 07:06:01] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 07:06:01 2025 --- +[2025-09-11 07:06:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 07:06:01] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1d6376e4da74d6fc438a741091e6256a4d0e20df --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.05, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "597f5b30-c8a1-4989-bd2b-489cc3670d4b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/training_log_597f5b30-c8a1-4989-bd2b-489cc3670d4b.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/training_log_597f5b30-c8a1-4989-bd2b-489cc3670d4b.txt new file mode 100644 index 0000000000000000000000000000000000000000..8fcd55f41f8835fe96ce18cccd8f38529f791d8e --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43/training_log_597f5b30-c8a1-4989-bd2b-489cc3670d4b.txt @@ -0,0 +1,4264 @@ +[2025-09-11 06:38:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:38:45 2025 --- +[2025-09-11 06:38:45] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:38:45 2025 --- +[2025-09-11 06:38:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:38:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.05, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:38:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:38:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:38:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:38:45] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:38:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43 +[2025-09-11 06:38:45] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.05_seed_43 +[2025-09-11 06:38:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:38:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:38:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:38:45] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:38:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:38:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:38:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:38:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:38:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:38:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:38:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:38:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:38:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:38:46] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:38:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:38:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:38:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:38:48] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:38:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:38:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:38:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:38:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:38:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:38:54] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:39:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:39:40] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:39:40] [Rank 0] PRINT: Starting training... +[2025-09-11 06:39:40] [Rank 0] PRINT: Starting training... +[2025-09-11 06:39:41] [Rank 0] step:21/10000 train_time:1191ms step_avg:56.70ms +[2025-09-11 06:39:41] [Rank 0] step:21/10000 train_time:1191ms step_avg:56.70ms +[2025-09-11 06:39:42] [Rank 0] step:41/10000 train_time:1925ms step_avg:46.95ms +[2025-09-11 06:39:42] [Rank 0] step:41/10000 train_time:1925ms step_avg:46.95ms +[2025-09-11 06:39:42] [Rank 0] step:61/10000 train_time:2658ms step_avg:43.58ms +[2025-09-11 06:39:42] [Rank 0] step:61/10000 train_time:2658ms step_avg:43.58ms +[2025-09-11 06:39:43] [Rank 0] step:81/10000 train_time:3392ms step_avg:41.88ms +[2025-09-11 06:39:43] [Rank 0] step:81/10000 train_time:3392ms step_avg:41.88ms +[2025-09-11 06:39:44] [Rank 0] step:101/10000 train_time:4125ms step_avg:40.85ms +[2025-09-11 06:39:44] [Rank 0] step:101/10000 train_time:4125ms step_avg:40.85ms +[2025-09-11 06:39:45] [Rank 0] step:121/10000 train_time:4859ms step_avg:40.16ms +[2025-09-11 06:39:45] [Rank 0] step:121/10000 train_time:4859ms step_avg:40.16ms +[2025-09-11 06:39:45] [Rank 0] step:141/10000 train_time:5593ms step_avg:39.67ms +[2025-09-11 06:39:45] [Rank 0] step:141/10000 train_time:5593ms step_avg:39.67ms +[2025-09-11 06:39:46] [Rank 0] step:161/10000 train_time:6326ms step_avg:39.29ms +[2025-09-11 06:39:46] [Rank 0] step:161/10000 train_time:6326ms step_avg:39.29ms +[2025-09-11 06:39:47] [Rank 0] step:181/10000 train_time:7059ms step_avg:39.00ms +[2025-09-11 06:39:47] [Rank 0] step:181/10000 train_time:7059ms step_avg:39.00ms +[2025-09-11 06:39:48] [Rank 0] step:201/10000 train_time:7792ms step_avg:38.77ms +[2025-09-11 06:39:48] [Rank 0] step:201/10000 train_time:7792ms step_avg:38.77ms +[2025-09-11 06:39:48] [Rank 0] step:221/10000 train_time:8525ms step_avg:38.58ms +[2025-09-11 06:39:48] [Rank 0] step:221/10000 train_time:8525ms step_avg:38.58ms +[2025-09-11 06:39:49] [Rank 0] step:241/10000 train_time:9259ms step_avg:38.42ms +[2025-09-11 06:39:49] [Rank 0] step:241/10000 train_time:9259ms step_avg:38.42ms +[2025-09-11 06:39:50] [Rank 0] step:261/10000 train_time:9992ms step_avg:38.28ms +[2025-09-11 06:39:50] [Rank 0] step:261/10000 train_time:9992ms step_avg:38.28ms +[2025-09-11 06:39:51] [Rank 0] step:281/10000 train_time:10724ms step_avg:38.17ms +[2025-09-11 06:39:51] [Rank 0] step:281/10000 train_time:10724ms step_avg:38.17ms +[2025-09-11 06:39:51] [Rank 0] step:301/10000 train_time:11456ms step_avg:38.06ms +[2025-09-11 06:39:51] [Rank 0] step:301/10000 train_time:11456ms step_avg:38.06ms +[2025-09-11 06:39:52] [Rank 0] step:321/10000 train_time:12190ms step_avg:37.97ms +[2025-09-11 06:39:52] [Rank 0] step:321/10000 train_time:12190ms step_avg:37.97ms +[2025-09-11 06:39:53] [Rank 0] step:341/10000 train_time:12922ms step_avg:37.90ms +[2025-09-11 06:39:53] [Rank 0] step:341/10000 train_time:12922ms step_avg:37.90ms +[2025-09-11 06:39:53] [Rank 0] step:361/10000 train_time:13655ms step_avg:37.83ms +[2025-09-11 06:39:53] [Rank 0] step:361/10000 train_time:13655ms step_avg:37.83ms +[2025-09-11 06:39:54] [Rank 0] step:381/10000 train_time:14388ms step_avg:37.76ms +[2025-09-11 06:39:54] [Rank 0] step:381/10000 train_time:14388ms step_avg:37.76ms +[2025-09-11 06:39:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:39:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:40:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:40:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:40:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:40:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:40:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:40:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:40:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:40:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:40:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:40:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:40:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:40:53] [Rank 0] PRINT: step:400/10000 val_loss:5.9282 total_sharp:3.3625e-04 L1_sharp:1.3007e-02 L2_sharp:1.5364e-03 L3_sharp:3.3060e-04 L4_sharp:4.9023e-04 L5_sharp:4.0512e-04 L6_sharp:8.5686e-04 L7_sharp:6.0299e-04 L8_sharp:1.0641e-03 L9_sharp:5.7518e-04 L10_sharp:9.1308e-04 L11_sharp:1.5485e-03 L12_sharp:2.3458e-02 total_fnorm:1.9997e+02 total_l1_linf:5.5246e+05 total_spectral:9.9992e+01 L1_fnorm:6.1969e+00 L2_fnorm:6.0219e+00 L3_fnorm:5.9701e+00 L4_fnorm:5.8676e+00 L5_fnorm:5.9114e+00 L6_fnorm:5.8690e+00 L7_fnorm:5.8417e+00 L8_fnorm:5.8558e+00 L9_fnorm:5.8444e+00 L10_fnorm:5.8119e+00 L11_fnorm:5.7573e+00 L12_fnorm:5.4510e+00 L1_l1linf:1.8909e+00 L2_l1linf:1.9485e+00 L3_l1linf:1.9262e+00 L4_l1linf:1.8794e+00 L5_l1linf:1.8987e+00 L6_l1linf:1.9016e+00 L7_l1linf:1.8860e+00 L8_l1linf:1.8897e+00 L9_l1linf:1.8698e+00 L10_l1linf:1.7777e+00 L11_l1linf:1.6891e+00 L12_l1linf:1.4488e+00 L1_spectral:6.0400e-02 L2_spectral:6.0412e-02 L3_spectral:6.0400e-02 L4_spectral:6.0376e-02 L5_spectral:6.0356e-02 L6_spectral:6.0356e-02 L7_spectral:6.0343e-02 L8_spectral:6.0325e-02 L9_spectral:6.0369e-02 L10_spectral:6.0357e-02 L11_spectral:6.0334e-02 L12_spectral:6.0313e-02 train_time:15101ms step_avg:37.75ms +[2025-09-11 06:40:53] [Rank 0] PRINT: step:400/10000 val_loss:5.9282 total_sharp:3.3625e-04 L1_sharp:1.3007e-02 L2_sharp:1.5364e-03 L3_sharp:3.3060e-04 L4_sharp:4.9023e-04 L5_sharp:4.0512e-04 L6_sharp:8.5686e-04 L7_sharp:6.0299e-04 L8_sharp:1.0641e-03 L9_sharp:5.7518e-04 L10_sharp:9.1308e-04 L11_sharp:1.5485e-03 L12_sharp:2.3458e-02 total_fnorm:1.9997e+02 total_l1_linf:5.5246e+05 total_spectral:9.9992e+01 L1_fnorm:6.1969e+00 L2_fnorm:6.0219e+00 L3_fnorm:5.9701e+00 L4_fnorm:5.8676e+00 L5_fnorm:5.9114e+00 L6_fnorm:5.8690e+00 L7_fnorm:5.8417e+00 L8_fnorm:5.8558e+00 L9_fnorm:5.8444e+00 L10_fnorm:5.8119e+00 L11_fnorm:5.7573e+00 L12_fnorm:5.4510e+00 L1_l1linf:1.8909e+00 L2_l1linf:1.9485e+00 L3_l1linf:1.9262e+00 L4_l1linf:1.8794e+00 L5_l1linf:1.8987e+00 L6_l1linf:1.9016e+00 L7_l1linf:1.8860e+00 L8_l1linf:1.8897e+00 L9_l1linf:1.8698e+00 L10_l1linf:1.7777e+00 L11_l1linf:1.6891e+00 L12_l1linf:1.4488e+00 L1_spectral:6.0400e-02 L2_spectral:6.0412e-02 L3_spectral:6.0400e-02 L4_spectral:6.0376e-02 L5_spectral:6.0356e-02 L6_spectral:6.0356e-02 L7_spectral:6.0343e-02 L8_spectral:6.0325e-02 L9_spectral:6.0369e-02 L10_spectral:6.0357e-02 L11_spectral:6.0334e-02 L12_spectral:6.0313e-02 train_time:15101ms step_avg:37.75ms +[2025-09-11 06:41:26] [Rank 0] step:401/10000 train_time:48485ms step_avg:120.91ms +[2025-09-11 06:41:26] [Rank 0] step:401/10000 train_time:48485ms step_avg:120.91ms +[2025-09-11 06:41:28] [Rank 0] step:421/10000 train_time:50319ms step_avg:119.52ms +[2025-09-11 06:41:28] [Rank 0] step:421/10000 train_time:50319ms step_avg:119.52ms +[2025-09-11 06:41:29] [Rank 0] step:441/10000 train_time:50963ms step_avg:115.56ms +[2025-09-11 06:41:29] [Rank 0] step:441/10000 train_time:50963ms step_avg:115.56ms +[2025-09-11 06:41:29] [Rank 0] step:461/10000 train_time:51606ms step_avg:111.94ms +[2025-09-11 06:41:29] [Rank 0] step:461/10000 train_time:51606ms step_avg:111.94ms +[2025-09-11 06:41:30] [Rank 0] step:481/10000 train_time:52249ms step_avg:108.62ms +[2025-09-11 06:41:30] [Rank 0] step:481/10000 train_time:52249ms step_avg:108.62ms +[2025-09-11 06:41:31] [Rank 0] step:501/10000 train_time:52892ms step_avg:105.57ms +[2025-09-11 06:41:31] [Rank 0] step:501/10000 train_time:52892ms step_avg:105.57ms +[2025-09-11 06:41:31] [Rank 0] step:521/10000 train_time:53534ms step_avg:102.75ms +[2025-09-11 06:41:31] [Rank 0] step:521/10000 train_time:53534ms step_avg:102.75ms +[2025-09-11 06:41:32] [Rank 0] step:541/10000 train_time:54176ms step_avg:100.14ms +[2025-09-11 06:41:32] [Rank 0] step:541/10000 train_time:54176ms step_avg:100.14ms +[2025-09-11 06:41:33] [Rank 0] step:561/10000 train_time:54818ms step_avg:97.71ms +[2025-09-11 06:41:33] [Rank 0] step:561/10000 train_time:54818ms step_avg:97.71ms +[2025-09-11 06:41:33] [Rank 0] step:581/10000 train_time:55460ms step_avg:95.46ms +[2025-09-11 06:41:33] [Rank 0] step:581/10000 train_time:55460ms step_avg:95.46ms +[2025-09-11 06:41:34] [Rank 0] step:601/10000 train_time:56103ms step_avg:93.35ms +[2025-09-11 06:41:34] [Rank 0] step:601/10000 train_time:56103ms step_avg:93.35ms +[2025-09-11 06:41:35] [Rank 0] step:621/10000 train_time:56745ms step_avg:91.38ms +[2025-09-11 06:41:35] [Rank 0] step:621/10000 train_time:56745ms step_avg:91.38ms +[2025-09-11 06:41:35] [Rank 0] step:641/10000 train_time:57386ms step_avg:89.53ms +[2025-09-11 06:41:35] [Rank 0] step:641/10000 train_time:57386ms step_avg:89.53ms +[2025-09-11 06:41:36] [Rank 0] step:661/10000 train_time:58027ms step_avg:87.79ms +[2025-09-11 06:41:36] [Rank 0] step:661/10000 train_time:58027ms step_avg:87.79ms +[2025-09-11 06:41:37] [Rank 0] step:681/10000 train_time:58668ms step_avg:86.15ms +[2025-09-11 06:41:37] [Rank 0] step:681/10000 train_time:58668ms step_avg:86.15ms +[2025-09-11 06:41:37] [Rank 0] step:701/10000 train_time:59309ms step_avg:84.61ms +[2025-09-11 06:41:37] [Rank 0] step:701/10000 train_time:59309ms step_avg:84.61ms +[2025-09-11 06:41:38] [Rank 0] step:721/10000 train_time:59950ms step_avg:83.15ms +[2025-09-11 06:41:38] [Rank 0] step:721/10000 train_time:59950ms step_avg:83.15ms +[2025-09-11 06:41:38] [Rank 0] step:741/10000 train_time:60591ms step_avg:81.77ms +[2025-09-11 06:41:38] [Rank 0] step:741/10000 train_time:60591ms step_avg:81.77ms +[2025-09-11 06:41:39] [Rank 0] step:761/10000 train_time:61238ms step_avg:80.47ms +[2025-09-11 06:41:39] [Rank 0] step:761/10000 train_time:61238ms step_avg:80.47ms +[2025-09-11 06:41:40] [Rank 0] step:781/10000 train_time:62200ms step_avg:79.64ms +[2025-09-11 06:41:40] [Rank 0] step:781/10000 train_time:62200ms step_avg:79.64ms +[2025-09-11 06:41:41] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:41:41] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:41:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:42:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:42:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:42:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:42:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:42:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:42:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:42:31] [Rank 0] PRINT: step:800/10000 val_loss:5.4897 total_sharp:1.6600e-04 L1_sharp:5.7893e-03 L2_sharp:4.1926e-04 L3_sharp:2.0908e-04 L4_sharp:1.9473e-04 L5_sharp:3.3303e-04 L6_sharp:3.3746e-04 L7_sharp:2.3183e-04 L8_sharp:2.8324e-04 L9_sharp:3.2521e-04 L10_sharp:5.4395e-04 L11_sharp:1.0980e-03 L12_sharp:8.2496e-03 total_fnorm:1.9500e+02 total_l1_linf:5.1405e+05 total_spectral:9.7500e+01 L1_fnorm:6.1250e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.0312e+00 L4_fnorm:5.9688e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0312e+00 L7_fnorm:6.0312e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.9688e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.8672e+00 L2_l1linf:1.9297e+00 L3_l1linf:1.8750e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8281e+00 L6_l1linf:1.8516e+00 L7_l1linf:1.8281e+00 L8_l1linf:1.8750e+00 L9_l1linf:1.8359e+00 L10_l1linf:1.7969e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.4219e+00 L1_spectral:6.7050e-02 L2_spectral:6.6658e-02 L3_spectral:6.6320e-02 L4_spectral:6.6357e-02 L5_spectral:6.6487e-02 L6_spectral:6.6682e-02 L7_spectral:6.6477e-02 L8_spectral:6.6824e-02 L9_spectral:6.6574e-02 L10_spectral:6.6573e-02 L11_spectral:6.6847e-02 L12_spectral:6.6538e-02 train_time:63108ms step_avg:78.89ms +[2025-09-11 06:42:31] [Rank 0] PRINT: step:800/10000 val_loss:5.4897 total_sharp:1.6600e-04 L1_sharp:5.7893e-03 L2_sharp:4.1926e-04 L3_sharp:2.0908e-04 L4_sharp:1.9473e-04 L5_sharp:3.3303e-04 L6_sharp:3.3746e-04 L7_sharp:2.3183e-04 L8_sharp:2.8324e-04 L9_sharp:3.2521e-04 L10_sharp:5.4395e-04 L11_sharp:1.0980e-03 L12_sharp:8.2496e-03 total_fnorm:1.9500e+02 total_l1_linf:5.1405e+05 total_spectral:9.7500e+01 L1_fnorm:6.1250e+00 L2_fnorm:6.0312e+00 L3_fnorm:6.0312e+00 L4_fnorm:5.9688e+00 L5_fnorm:6.0000e+00 L6_fnorm:6.0312e+00 L7_fnorm:6.0312e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.0938e+00 L10_fnorm:6.0312e+00 L11_fnorm:5.9688e+00 L12_fnorm:5.6875e+00 L1_l1linf:1.8672e+00 L2_l1linf:1.9297e+00 L3_l1linf:1.8750e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8281e+00 L6_l1linf:1.8516e+00 L7_l1linf:1.8281e+00 L8_l1linf:1.8750e+00 L9_l1linf:1.8359e+00 L10_l1linf:1.7969e+00 L11_l1linf:1.7031e+00 L12_l1linf:1.4219e+00 L1_spectral:6.7050e-02 L2_spectral:6.6658e-02 L3_spectral:6.6320e-02 L4_spectral:6.6357e-02 L5_spectral:6.6487e-02 L6_spectral:6.6682e-02 L7_spectral:6.6477e-02 L8_spectral:6.6824e-02 L9_spectral:6.6574e-02 L10_spectral:6.6573e-02 L11_spectral:6.6847e-02 L12_spectral:6.6538e-02 train_time:63108ms step_avg:78.89ms +[2025-09-11 06:42:32] [Rank 0] step:801/10000 train_time:64239ms step_avg:80.20ms +[2025-09-11 06:42:32] [Rank 0] step:801/10000 train_time:64239ms step_avg:80.20ms +[2025-09-11 06:42:32] [Rank 0] step:821/10000 train_time:64889ms step_avg:79.04ms +[2025-09-11 06:42:32] [Rank 0] step:821/10000 train_time:64889ms step_avg:79.04ms +[2025-09-11 06:42:33] [Rank 0] step:841/10000 train_time:65536ms step_avg:77.93ms +[2025-09-11 06:42:33] [Rank 0] step:841/10000 train_time:65536ms step_avg:77.93ms +[2025-09-11 06:42:34] [Rank 0] step:861/10000 train_time:66182ms step_avg:76.87ms +[2025-09-11 06:42:34] [Rank 0] step:861/10000 train_time:66182ms step_avg:76.87ms +[2025-09-11 06:42:34] [Rank 0] step:881/10000 train_time:66828ms step_avg:75.86ms +[2025-09-11 06:42:34] [Rank 0] step:881/10000 train_time:66828ms step_avg:75.86ms +[2025-09-11 06:42:35] [Rank 0] step:901/10000 train_time:67474ms step_avg:74.89ms +[2025-09-11 06:42:35] [Rank 0] step:901/10000 train_time:67474ms step_avg:74.89ms +[2025-09-11 06:42:36] [Rank 0] step:921/10000 train_time:68119ms step_avg:73.96ms +[2025-09-11 06:42:36] [Rank 0] step:921/10000 train_time:68119ms step_avg:73.96ms +[2025-09-11 06:42:36] [Rank 0] step:941/10000 train_time:68765ms step_avg:73.08ms +[2025-09-11 06:42:36] [Rank 0] step:941/10000 train_time:68765ms step_avg:73.08ms +[2025-09-11 06:42:37] [Rank 0] step:961/10000 train_time:69410ms step_avg:72.23ms +[2025-09-11 06:42:37] [Rank 0] step:961/10000 train_time:69410ms step_avg:72.23ms +[2025-09-11 06:42:38] [Rank 0] step:981/10000 train_time:70055ms step_avg:71.41ms +[2025-09-11 06:42:38] [Rank 0] step:981/10000 train_time:70055ms step_avg:71.41ms +[2025-09-11 06:42:38] [Rank 0] step:1001/10000 train_time:70700ms step_avg:70.63ms +[2025-09-11 06:42:38] [Rank 0] step:1001/10000 train_time:70700ms step_avg:70.63ms +[2025-09-11 06:42:39] [Rank 0] step:1021/10000 train_time:71346ms step_avg:69.88ms +[2025-09-11 06:42:39] [Rank 0] step:1021/10000 train_time:71346ms step_avg:69.88ms +[2025-09-11 06:42:40] [Rank 0] step:1041/10000 train_time:71992ms step_avg:69.16ms +[2025-09-11 06:42:40] [Rank 0] step:1041/10000 train_time:71992ms step_avg:69.16ms +[2025-09-11 06:42:40] [Rank 0] step:1061/10000 train_time:72637ms step_avg:68.46ms +[2025-09-11 06:42:40] [Rank 0] step:1061/10000 train_time:72637ms step_avg:68.46ms +[2025-09-11 06:42:41] [Rank 0] step:1081/10000 train_time:73283ms step_avg:67.79ms +[2025-09-11 06:42:41] [Rank 0] step:1081/10000 train_time:73283ms step_avg:67.79ms +[2025-09-11 06:42:42] [Rank 0] step:1101/10000 train_time:73928ms step_avg:67.15ms +[2025-09-11 06:42:42] [Rank 0] step:1101/10000 train_time:73928ms step_avg:67.15ms +[2025-09-11 06:42:42] [Rank 0] step:1121/10000 train_time:74574ms step_avg:66.52ms +[2025-09-11 06:42:42] [Rank 0] step:1121/10000 train_time:74574ms step_avg:66.52ms +[2025-09-11 06:42:43] [Rank 0] step:1141/10000 train_time:75373ms step_avg:66.06ms +[2025-09-11 06:42:43] [Rank 0] step:1141/10000 train_time:75373ms step_avg:66.06ms +[2025-09-11 06:42:44] [Rank 0] step:1161/10000 train_time:76449ms step_avg:65.85ms +[2025-09-11 06:42:44] [Rank 0] step:1161/10000 train_time:76449ms step_avg:65.85ms +[2025-09-11 06:42:45] [Rank 0] step:1181/10000 train_time:77096ms step_avg:65.28ms +[2025-09-11 06:42:45] [Rank 0] step:1181/10000 train_time:77096ms step_avg:65.28ms +[2025-09-11 06:42:45] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:42:45] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:42:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:42:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:42:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:42:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:42:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:42:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:42:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:42:55] [Rank 0] PRINT: step:1200/10000 val_loss:5.1544 total_sharp:1.1556e-04 L1_sharp:3.6097e-03 L2_sharp:1.4736e-04 L3_sharp:1.3515e-04 L4_sharp:1.8905e-04 L5_sharp:2.9681e-04 L6_sharp:2.5174e-04 L7_sharp:2.3018e-04 L8_sharp:4.2079e-04 L9_sharp:2.9634e-04 L10_sharp:4.3371e-04 L11_sharp:6.8910e-04 L12_sharp:3.9412e-03 total_fnorm:2.0500e+02 total_l1_linf:5.2838e+05 total_spectral:1.0300e+02 L1_fnorm:6.1250e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7734e+00 L3_l1linf:1.7734e+00 L4_l1linf:1.7188e+00 L5_l1linf:1.7031e+00 L6_l1linf:1.6953e+00 L7_l1linf:1.6953e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.6875e+00 L11_l1linf:1.6875e+00 L12_l1linf:1.5547e+00 L1_spectral:7.0097e-02 L2_spectral:6.8857e-02 L3_spectral:6.8741e-02 L4_spectral:6.9145e-02 L5_spectral:6.9339e-02 L6_spectral:6.9415e-02 L7_spectral:6.9337e-02 L8_spectral:7.0909e-02 L9_spectral:6.9486e-02 L10_spectral:7.0423e-02 L11_spectral:6.9834e-02 L12_spectral:7.1200e-02 train_time:77723ms step_avg:64.77ms +[2025-09-11 06:42:55] [Rank 0] PRINT: step:1200/10000 val_loss:5.1544 total_sharp:1.1556e-04 L1_sharp:3.6097e-03 L2_sharp:1.4736e-04 L3_sharp:1.3515e-04 L4_sharp:1.8905e-04 L5_sharp:2.9681e-04 L6_sharp:2.5174e-04 L7_sharp:2.3018e-04 L8_sharp:4.2079e-04 L9_sharp:2.9634e-04 L10_sharp:4.3371e-04 L11_sharp:6.8910e-04 L12_sharp:3.9412e-03 total_fnorm:2.0500e+02 total_l1_linf:5.2838e+05 total_spectral:1.0300e+02 L1_fnorm:6.1250e+00 L2_fnorm:6.1250e+00 L3_fnorm:6.1562e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.1250e+00 L6_fnorm:6.1562e+00 L7_fnorm:6.1562e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.1875e+00 L10_fnorm:6.1875e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.0938e+00 L1_l1linf:1.8047e+00 L2_l1linf:1.7734e+00 L3_l1linf:1.7734e+00 L4_l1linf:1.7188e+00 L5_l1linf:1.7031e+00 L6_l1linf:1.6953e+00 L7_l1linf:1.6953e+00 L8_l1linf:1.7031e+00 L9_l1linf:1.6797e+00 L10_l1linf:1.6875e+00 L11_l1linf:1.6875e+00 L12_l1linf:1.5547e+00 L1_spectral:7.0097e-02 L2_spectral:6.8857e-02 L3_spectral:6.8741e-02 L4_spectral:6.9145e-02 L5_spectral:6.9339e-02 L6_spectral:6.9415e-02 L7_spectral:6.9337e-02 L8_spectral:7.0909e-02 L9_spectral:6.9486e-02 L10_spectral:7.0423e-02 L11_spectral:6.9834e-02 L12_spectral:7.1200e-02 train_time:77723ms step_avg:64.77ms +[2025-09-11 06:42:56] [Rank 0] step:1201/10000 train_time:78829ms step_avg:65.64ms +[2025-09-11 06:42:56] [Rank 0] step:1201/10000 train_time:78829ms step_avg:65.64ms +[2025-09-11 06:42:56] [Rank 0] step:1221/10000 train_time:79464ms step_avg:65.08ms +[2025-09-11 06:42:56] [Rank 0] step:1221/10000 train_time:79464ms step_avg:65.08ms +[2025-09-11 06:42:57] [Rank 0] step:1241/10000 train_time:80111ms step_avg:64.55ms +[2025-09-11 06:42:57] [Rank 0] step:1241/10000 train_time:80111ms step_avg:64.55ms +[2025-09-11 06:42:58] [Rank 0] step:1261/10000 train_time:80758ms step_avg:64.04ms +[2025-09-11 06:42:58] [Rank 0] step:1261/10000 train_time:80758ms step_avg:64.04ms +[2025-09-11 06:42:58] [Rank 0] step:1281/10000 train_time:81405ms step_avg:63.55ms +[2025-09-11 06:42:58] [Rank 0] step:1281/10000 train_time:81405ms step_avg:63.55ms +[2025-09-11 06:42:59] [Rank 0] step:1301/10000 train_time:82052ms step_avg:63.07ms +[2025-09-11 06:42:59] [Rank 0] step:1301/10000 train_time:82052ms step_avg:63.07ms +[2025-09-11 06:43:00] [Rank 0] step:1321/10000 train_time:82699ms step_avg:62.60ms +[2025-09-11 06:43:00] [Rank 0] step:1321/10000 train_time:82699ms step_avg:62.60ms +[2025-09-11 06:43:00] [Rank 0] step:1341/10000 train_time:83346ms step_avg:62.15ms +[2025-09-11 06:43:00] [Rank 0] step:1341/10000 train_time:83346ms step_avg:62.15ms +[2025-09-11 06:43:01] [Rank 0] step:1361/10000 train_time:83992ms step_avg:61.71ms +[2025-09-11 06:43:01] [Rank 0] step:1361/10000 train_time:83992ms step_avg:61.71ms +[2025-09-11 06:43:02] [Rank 0] step:1381/10000 train_time:84638ms step_avg:61.29ms +[2025-09-11 06:43:02] [Rank 0] step:1381/10000 train_time:84638ms step_avg:61.29ms +[2025-09-11 06:43:02] [Rank 0] step:1401/10000 train_time:85288ms step_avg:60.88ms +[2025-09-11 06:43:02] [Rank 0] step:1401/10000 train_time:85288ms step_avg:60.88ms +[2025-09-11 06:43:03] [Rank 0] step:1421/10000 train_time:85935ms step_avg:60.47ms +[2025-09-11 06:43:03] [Rank 0] step:1421/10000 train_time:85935ms step_avg:60.47ms +[2025-09-11 06:43:04] [Rank 0] step:1441/10000 train_time:86580ms step_avg:60.08ms +[2025-09-11 06:43:04] [Rank 0] step:1441/10000 train_time:86580ms step_avg:60.08ms +[2025-09-11 06:43:04] [Rank 0] step:1461/10000 train_time:87226ms step_avg:59.70ms +[2025-09-11 06:43:04] [Rank 0] step:1461/10000 train_time:87226ms step_avg:59.70ms +[2025-09-11 06:43:05] [Rank 0] step:1481/10000 train_time:87872ms step_avg:59.33ms +[2025-09-11 06:43:05] [Rank 0] step:1481/10000 train_time:87872ms step_avg:59.33ms +[2025-09-11 06:43:05] [Rank 0] step:1501/10000 train_time:88522ms step_avg:58.98ms +[2025-09-11 06:43:05] [Rank 0] step:1501/10000 train_time:88522ms step_avg:58.98ms +[2025-09-11 06:43:06] [Rank 0] step:1521/10000 train_time:89173ms step_avg:58.63ms +[2025-09-11 06:43:06] [Rank 0] step:1521/10000 train_time:89173ms step_avg:58.63ms +[2025-09-11 06:43:07] [Rank 0] step:1541/10000 train_time:89823ms step_avg:58.29ms +[2025-09-11 06:43:07] [Rank 0] step:1541/10000 train_time:89823ms step_avg:58.29ms +[2025-09-11 06:43:07] [Rank 0] step:1561/10000 train_time:90473ms step_avg:57.96ms +[2025-09-11 06:43:07] [Rank 0] step:1561/10000 train_time:90473ms step_avg:57.96ms +[2025-09-11 06:43:08] [Rank 0] step:1581/10000 train_time:91123ms step_avg:57.64ms +[2025-09-11 06:43:08] [Rank 0] step:1581/10000 train_time:91123ms step_avg:57.64ms +[2025-09-11 06:43:09] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:43:09] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:43:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:43:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:43:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:43:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:43:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:43:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:43:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:43:19] [Rank 0] PRINT: step:1600/10000 val_loss:5.0065 total_sharp:1.0111e-04 L1_sharp:2.2246e-03 L2_sharp:-4.0771e-05 L3_sharp:1.0795e-04 L4_sharp:1.2539e-04 L5_sharp:1.3795e-04 L6_sharp:1.7314e-04 L7_sharp:2.4400e-04 L8_sharp:3.4080e-04 L9_sharp:2.2270e-04 L10_sharp:2.6954e-04 L11_sharp:4.8507e-04 L12_sharp:4.9501e-03 total_fnorm:1.9100e+02 total_l1_linf:4.7718e+05 total_spectral:9.6000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1875e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.7188e+00 L3_l1linf:1.6484e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6250e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6406e+00 L9_l1linf:1.6172e+00 L10_l1linf:1.6172e+00 L11_l1linf:1.6328e+00 L12_l1linf:1.5391e+00 L1_spectral:7.1567e-02 L2_spectral:7.0610e-02 L3_spectral:7.0735e-02 L4_spectral:7.0626e-02 L5_spectral:7.1634e-02 L6_spectral:7.1063e-02 L7_spectral:7.1446e-02 L8_spectral:7.4745e-02 L9_spectral:7.1717e-02 L10_spectral:7.2468e-02 L11_spectral:7.2596e-02 L12_spectral:7.3996e-02 train_time:91755ms step_avg:57.35ms +[2025-09-11 06:43:19] [Rank 0] PRINT: step:1600/10000 val_loss:5.0065 total_sharp:1.0111e-04 L1_sharp:2.2246e-03 L2_sharp:-4.0771e-05 L3_sharp:1.0795e-04 L4_sharp:1.2539e-04 L5_sharp:1.3795e-04 L6_sharp:1.7314e-04 L7_sharp:2.4400e-04 L8_sharp:3.4080e-04 L9_sharp:2.2270e-04 L10_sharp:2.6954e-04 L11_sharp:4.8507e-04 L12_sharp:4.9501e-03 total_fnorm:1.9100e+02 total_l1_linf:4.7718e+05 total_spectral:9.6000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.2188e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.1562e+00 L6_fnorm:6.1875e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2188e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.1875e+00 L1_l1linf:1.7656e+00 L2_l1linf:1.7188e+00 L3_l1linf:1.6484e+00 L4_l1linf:1.6719e+00 L5_l1linf:1.6250e+00 L6_l1linf:1.6328e+00 L7_l1linf:1.6406e+00 L8_l1linf:1.6406e+00 L9_l1linf:1.6172e+00 L10_l1linf:1.6172e+00 L11_l1linf:1.6328e+00 L12_l1linf:1.5391e+00 L1_spectral:7.1567e-02 L2_spectral:7.0610e-02 L3_spectral:7.0735e-02 L4_spectral:7.0626e-02 L5_spectral:7.1634e-02 L6_spectral:7.1063e-02 L7_spectral:7.1446e-02 L8_spectral:7.4745e-02 L9_spectral:7.1717e-02 L10_spectral:7.2468e-02 L11_spectral:7.2596e-02 L12_spectral:7.3996e-02 train_time:91755ms step_avg:57.35ms +[2025-09-11 06:43:20] [Rank 0] step:1601/10000 train_time:92845ms step_avg:57.99ms +[2025-09-11 06:43:20] [Rank 0] step:1601/10000 train_time:92845ms step_avg:57.99ms +[2025-09-11 06:43:20] [Rank 0] step:1621/10000 train_time:93485ms step_avg:57.67ms +[2025-09-11 06:43:20] [Rank 0] step:1621/10000 train_time:93485ms step_avg:57.67ms +[2025-09-11 06:43:21] [Rank 0] step:1641/10000 train_time:94138ms step_avg:57.37ms +[2025-09-11 06:43:21] [Rank 0] step:1641/10000 train_time:94138ms step_avg:57.37ms +[2025-09-11 06:43:22] [Rank 0] step:1661/10000 train_time:94790ms step_avg:57.07ms +[2025-09-11 06:43:22] [Rank 0] step:1661/10000 train_time:94790ms step_avg:57.07ms +[2025-09-11 06:43:22] [Rank 0] step:1681/10000 train_time:95441ms step_avg:56.78ms +[2025-09-11 06:43:22] [Rank 0] step:1681/10000 train_time:95441ms step_avg:56.78ms +[2025-09-11 06:43:23] [Rank 0] step:1701/10000 train_time:96093ms step_avg:56.49ms +[2025-09-11 06:43:23] [Rank 0] step:1701/10000 train_time:96093ms step_avg:56.49ms +[2025-09-11 06:43:24] [Rank 0] step:1721/10000 train_time:96746ms step_avg:56.21ms +[2025-09-11 06:43:24] [Rank 0] step:1721/10000 train_time:96746ms step_avg:56.21ms +[2025-09-11 06:43:24] [Rank 0] step:1741/10000 train_time:97396ms step_avg:55.94ms +[2025-09-11 06:43:24] [Rank 0] step:1741/10000 train_time:97396ms step_avg:55.94ms +[2025-09-11 06:43:25] [Rank 0] step:1761/10000 train_time:98048ms step_avg:55.68ms +[2025-09-11 06:43:25] [Rank 0] step:1761/10000 train_time:98048ms step_avg:55.68ms +[2025-09-11 06:43:26] [Rank 0] step:1781/10000 train_time:98699ms step_avg:55.42ms +[2025-09-11 06:43:26] [Rank 0] step:1781/10000 train_time:98699ms step_avg:55.42ms +[2025-09-11 06:43:26] [Rank 0] step:1801/10000 train_time:99349ms step_avg:55.16ms +[2025-09-11 06:43:26] [Rank 0] step:1801/10000 train_time:99349ms step_avg:55.16ms +[2025-09-11 06:43:27] [Rank 0] step:1821/10000 train_time:100000ms step_avg:54.91ms +[2025-09-11 06:43:27] [Rank 0] step:1821/10000 train_time:100000ms step_avg:54.91ms +[2025-09-11 06:43:28] [Rank 0] step:1841/10000 train_time:100651ms step_avg:54.67ms +[2025-09-11 06:43:28] [Rank 0] step:1841/10000 train_time:100651ms step_avg:54.67ms +[2025-09-11 06:43:28] [Rank 0] step:1861/10000 train_time:101302ms step_avg:54.43ms +[2025-09-11 06:43:28] [Rank 0] step:1861/10000 train_time:101302ms step_avg:54.43ms +[2025-09-11 06:43:29] [Rank 0] step:1881/10000 train_time:101953ms step_avg:54.20ms +[2025-09-11 06:43:29] [Rank 0] step:1881/10000 train_time:101953ms step_avg:54.20ms +[2025-09-11 06:43:30] [Rank 0] step:1901/10000 train_time:102604ms step_avg:53.97ms +[2025-09-11 06:43:30] [Rank 0] step:1901/10000 train_time:102604ms step_avg:53.97ms +[2025-09-11 06:43:30] [Rank 0] step:1921/10000 train_time:103254ms step_avg:53.75ms +[2025-09-11 06:43:30] [Rank 0] step:1921/10000 train_time:103254ms step_avg:53.75ms +[2025-09-11 06:43:31] [Rank 0] step:1941/10000 train_time:103905ms step_avg:53.53ms +[2025-09-11 06:43:31] [Rank 0] step:1941/10000 train_time:103905ms step_avg:53.53ms +[2025-09-11 06:43:31] [Rank 0] step:1961/10000 train_time:104556ms step_avg:53.32ms +[2025-09-11 06:43:31] [Rank 0] step:1961/10000 train_time:104556ms step_avg:53.32ms +[2025-09-11 06:43:32] [Rank 0] step:1981/10000 train_time:105206ms step_avg:53.11ms +[2025-09-11 06:43:32] [Rank 0] step:1981/10000 train_time:105206ms step_avg:53.11ms +[2025-09-11 06:43:33] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:43:33] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:43:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:43:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:43:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:43:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:43:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:43:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:43:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:43:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:43:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:43:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8594 total_sharp:7.4769e-05 L1_sharp:1.6164e-03 L2_sharp:9.5331e-05 L3_sharp:2.6516e-05 L4_sharp:6.0935e-05 L5_sharp:1.2250e-04 L6_sharp:1.8060e-04 L7_sharp:1.5421e-04 L8_sharp:3.2722e-04 L9_sharp:1.8076e-04 L10_sharp:2.6138e-04 L11_sharp:4.4876e-04 L12_sharp:4.5170e-03 total_fnorm:1.9300e+02 total_l1_linf:4.9357e+05 total_spectral:9.7000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6641e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.6172e+00 L5_l1linf:1.5938e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5859e+00 L10_l1linf:1.5938e+00 L11_l1linf:1.5938e+00 L12_l1linf:1.5312e+00 L1_spectral:7.2858e-02 L2_spectral:7.2106e-02 L3_spectral:7.2230e-02 L4_spectral:7.2214e-02 L5_spectral:7.3390e-02 L6_spectral:7.3302e-02 L7_spectral:7.3034e-02 L8_spectral:7.5254e-02 L9_spectral:7.3472e-02 L10_spectral:7.4270e-02 L11_spectral:7.5085e-02 L12_spectral:7.4785e-02 train_time:105839ms step_avg:52.92ms +[2025-09-11 06:43:45] [Rank 0] PRINT: step:2000/10000 val_loss:4.8594 total_sharp:7.4769e-05 L1_sharp:1.6164e-03 L2_sharp:9.5331e-05 L3_sharp:2.6516e-05 L4_sharp:6.0935e-05 L5_sharp:1.2250e-04 L6_sharp:1.8060e-04 L7_sharp:1.5421e-04 L8_sharp:3.2722e-04 L9_sharp:1.8076e-04 L10_sharp:2.6138e-04 L11_sharp:4.4876e-04 L12_sharp:4.5170e-03 total_fnorm:1.9300e+02 total_l1_linf:4.9357e+05 total_spectral:9.7000e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.1875e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.1875e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2188e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7422e+00 L2_l1linf:1.6641e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.6172e+00 L5_l1linf:1.5938e+00 L6_l1linf:1.6172e+00 L7_l1linf:1.6172e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5859e+00 L10_l1linf:1.5938e+00 L11_l1linf:1.5938e+00 L12_l1linf:1.5312e+00 L1_spectral:7.2858e-02 L2_spectral:7.2106e-02 L3_spectral:7.2230e-02 L4_spectral:7.2214e-02 L5_spectral:7.3390e-02 L6_spectral:7.3302e-02 L7_spectral:7.3034e-02 L8_spectral:7.5254e-02 L9_spectral:7.3472e-02 L10_spectral:7.4270e-02 L11_spectral:7.5085e-02 L12_spectral:7.4785e-02 train_time:105839ms step_avg:52.92ms +[2025-09-11 06:43:46] [Rank 0] step:2001/10000 train_time:106964ms step_avg:53.46ms +[2025-09-11 06:43:46] [Rank 0] step:2001/10000 train_time:106964ms step_avg:53.46ms +[2025-09-11 06:43:47] [Rank 0] step:2021/10000 train_time:108227ms step_avg:53.55ms +[2025-09-11 06:43:47] [Rank 0] step:2021/10000 train_time:108227ms step_avg:53.55ms +[2025-09-11 06:43:48] [Rank 0] step:2041/10000 train_time:108880ms step_avg:53.35ms +[2025-09-11 06:43:48] [Rank 0] step:2041/10000 train_time:108880ms step_avg:53.35ms +[2025-09-11 06:43:48] [Rank 0] step:2061/10000 train_time:109531ms step_avg:53.14ms +[2025-09-11 06:43:48] [Rank 0] step:2061/10000 train_time:109531ms step_avg:53.14ms +[2025-09-11 06:43:49] [Rank 0] step:2081/10000 train_time:110490ms step_avg:53.09ms +[2025-09-11 06:43:49] [Rank 0] step:2081/10000 train_time:110490ms step_avg:53.09ms +[2025-09-11 06:43:50] [Rank 0] step:2101/10000 train_time:111141ms step_avg:52.90ms +[2025-09-11 06:43:50] [Rank 0] step:2101/10000 train_time:111141ms step_avg:52.90ms +[2025-09-11 06:43:51] [Rank 0] step:2121/10000 train_time:111793ms step_avg:52.71ms +[2025-09-11 06:43:51] [Rank 0] step:2121/10000 train_time:111793ms step_avg:52.71ms +[2025-09-11 06:43:51] [Rank 0] step:2141/10000 train_time:112445ms step_avg:52.52ms +[2025-09-11 06:43:51] [Rank 0] step:2141/10000 train_time:112445ms step_avg:52.52ms +[2025-09-11 06:43:52] [Rank 0] step:2161/10000 train_time:113096ms step_avg:52.34ms +[2025-09-11 06:43:52] [Rank 0] step:2161/10000 train_time:113096ms step_avg:52.34ms +[2025-09-11 06:43:52] [Rank 0] step:2181/10000 train_time:113747ms step_avg:52.15ms +[2025-09-11 06:43:52] [Rank 0] step:2181/10000 train_time:113747ms step_avg:52.15ms +[2025-09-11 06:43:53] [Rank 0] step:2201/10000 train_time:114399ms step_avg:51.98ms +[2025-09-11 06:43:53] [Rank 0] step:2201/10000 train_time:114399ms step_avg:51.98ms +[2025-09-11 06:43:54] [Rank 0] step:2221/10000 train_time:115050ms step_avg:51.80ms +[2025-09-11 06:43:54] [Rank 0] step:2221/10000 train_time:115050ms step_avg:51.80ms +[2025-09-11 06:43:54] [Rank 0] step:2241/10000 train_time:115713ms step_avg:51.63ms +[2025-09-11 06:43:54] [Rank 0] step:2241/10000 train_time:115713ms step_avg:51.63ms +[2025-09-11 06:43:55] [Rank 0] step:2261/10000 train_time:116378ms step_avg:51.47ms +[2025-09-11 06:43:55] [Rank 0] step:2261/10000 train_time:116378ms step_avg:51.47ms +[2025-09-11 06:43:56] [Rank 0] step:2281/10000 train_time:117042ms step_avg:51.31ms +[2025-09-11 06:43:56] [Rank 0] step:2281/10000 train_time:117042ms step_avg:51.31ms +[2025-09-11 06:43:56] [Rank 0] step:2301/10000 train_time:117707ms step_avg:51.15ms +[2025-09-11 06:43:56] [Rank 0] step:2301/10000 train_time:117707ms step_avg:51.15ms +[2025-09-11 06:43:57] [Rank 0] step:2321/10000 train_time:118372ms step_avg:51.00ms +[2025-09-11 06:43:57] [Rank 0] step:2321/10000 train_time:118372ms step_avg:51.00ms +[2025-09-11 06:43:58] [Rank 0] step:2341/10000 train_time:119037ms step_avg:50.85ms +[2025-09-11 06:43:58] [Rank 0] step:2341/10000 train_time:119037ms step_avg:50.85ms +[2025-09-11 06:43:58] [Rank 0] step:2361/10000 train_time:119701ms step_avg:50.70ms +[2025-09-11 06:43:58] [Rank 0] step:2361/10000 train_time:119701ms step_avg:50.70ms +[2025-09-11 06:43:59] [Rank 0] step:2381/10000 train_time:120366ms step_avg:50.55ms +[2025-09-11 06:43:59] [Rank 0] step:2381/10000 train_time:120366ms step_avg:50.55ms +[2025-09-11 06:44:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:44:00] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:44:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:44:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:44:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:44:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:44:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:44:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.7341 total_sharp:7.3532e-05 L1_sharp:1.2625e-03 L2_sharp:-3.7698e-06 L3_sharp:4.5837e-05 L4_sharp:1.1794e-04 L5_sharp:3.0024e-04 L6_sharp:1.2704e-04 L7_sharp:9.3138e-05 L8_sharp:1.9616e-04 L9_sharp:1.8740e-04 L10_sharp:2.3138e-04 L11_sharp:3.9771e-04 L12_sharp:2.5273e-03 total_fnorm:1.7600e+02 total_l1_linf:4.3827e+05 total_spectral:8.8500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.2812e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.6094e+00 L9_l1linf:1.5859e+00 L10_l1linf:1.5703e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.5156e+00 L1_spectral:7.4330e-02 L2_spectral:7.3161e-02 L3_spectral:7.3205e-02 L4_spectral:7.3072e-02 L5_spectral:7.4576e-02 L6_spectral:7.4366e-02 L7_spectral:7.4560e-02 L8_spectral:7.6631e-02 L9_spectral:7.4998e-02 L10_spectral:7.5802e-02 L11_spectral:7.5815e-02 L12_spectral:7.6454e-02 train_time:121011ms step_avg:50.42ms +[2025-09-11 06:44:10] [Rank 0] PRINT: step:2400/10000 val_loss:4.7341 total_sharp:7.3532e-05 L1_sharp:1.2625e-03 L2_sharp:-3.7698e-06 L3_sharp:4.5837e-05 L4_sharp:1.1794e-04 L5_sharp:3.0024e-04 L6_sharp:1.2704e-04 L7_sharp:9.3138e-05 L8_sharp:1.9616e-04 L9_sharp:1.8740e-04 L10_sharp:2.3138e-04 L11_sharp:3.9771e-04 L12_sharp:2.5273e-03 total_fnorm:1.7600e+02 total_l1_linf:4.3827e+05 total_spectral:8.8500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.7188e+00 L2_l1linf:1.5781e+00 L3_l1linf:1.2812e+00 L4_l1linf:1.5547e+00 L5_l1linf:1.5547e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.6094e+00 L9_l1linf:1.5859e+00 L10_l1linf:1.5703e+00 L11_l1linf:1.5625e+00 L12_l1linf:1.5156e+00 L1_spectral:7.4330e-02 L2_spectral:7.3161e-02 L3_spectral:7.3205e-02 L4_spectral:7.3072e-02 L5_spectral:7.4576e-02 L6_spectral:7.4366e-02 L7_spectral:7.4560e-02 L8_spectral:7.6631e-02 L9_spectral:7.4998e-02 L10_spectral:7.5802e-02 L11_spectral:7.5815e-02 L12_spectral:7.6454e-02 train_time:121011ms step_avg:50.42ms +[2025-09-11 06:44:11] [Rank 0] step:2401/10000 train_time:122194ms step_avg:50.89ms +[2025-09-11 06:44:11] [Rank 0] step:2401/10000 train_time:122194ms step_avg:50.89ms +[2025-09-11 06:44:11] [Rank 0] step:2421/10000 train_time:122863ms step_avg:50.75ms +[2025-09-11 06:44:11] [Rank 0] step:2421/10000 train_time:122863ms step_avg:50.75ms +[2025-09-11 06:44:12] [Rank 0] step:2441/10000 train_time:123529ms step_avg:50.61ms +[2025-09-11 06:44:12] [Rank 0] step:2441/10000 train_time:123529ms step_avg:50.61ms +[2025-09-11 06:44:13] [Rank 0] step:2461/10000 train_time:124194ms step_avg:50.47ms +[2025-09-11 06:44:13] [Rank 0] step:2461/10000 train_time:124194ms step_avg:50.47ms +[2025-09-11 06:44:13] [Rank 0] step:2481/10000 train_time:124859ms step_avg:50.33ms +[2025-09-11 06:44:13] [Rank 0] step:2481/10000 train_time:124859ms step_avg:50.33ms +[2025-09-11 06:44:14] [Rank 0] step:2501/10000 train_time:125523ms step_avg:50.19ms +[2025-09-11 06:44:14] [Rank 0] step:2501/10000 train_time:125523ms step_avg:50.19ms +[2025-09-11 06:44:15] [Rank 0] step:2521/10000 train_time:126189ms step_avg:50.05ms +[2025-09-11 06:44:15] [Rank 0] step:2521/10000 train_time:126189ms step_avg:50.05ms +[2025-09-11 06:44:15] [Rank 0] step:2541/10000 train_time:126853ms step_avg:49.92ms +[2025-09-11 06:44:15] [Rank 0] step:2541/10000 train_time:126853ms step_avg:49.92ms +[2025-09-11 06:44:16] [Rank 0] step:2561/10000 train_time:127518ms step_avg:49.79ms +[2025-09-11 06:44:16] [Rank 0] step:2561/10000 train_time:127518ms step_avg:49.79ms +[2025-09-11 06:44:17] [Rank 0] step:2581/10000 train_time:128183ms step_avg:49.66ms +[2025-09-11 06:44:17] [Rank 0] step:2581/10000 train_time:128183ms step_avg:49.66ms +[2025-09-11 06:44:17] [Rank 0] step:2601/10000 train_time:128848ms step_avg:49.54ms +[2025-09-11 06:44:17] [Rank 0] step:2601/10000 train_time:128848ms step_avg:49.54ms +[2025-09-11 06:44:18] [Rank 0] step:2621/10000 train_time:129513ms step_avg:49.41ms +[2025-09-11 06:44:18] [Rank 0] step:2621/10000 train_time:129513ms step_avg:49.41ms +[2025-09-11 06:44:19] [Rank 0] step:2641/10000 train_time:130178ms step_avg:49.29ms +[2025-09-11 06:44:19] [Rank 0] step:2641/10000 train_time:130178ms step_avg:49.29ms +[2025-09-11 06:44:19] [Rank 0] step:2661/10000 train_time:130844ms step_avg:49.17ms +[2025-09-11 06:44:19] [Rank 0] step:2661/10000 train_time:130844ms step_avg:49.17ms +[2025-09-11 06:44:20] [Rank 0] step:2681/10000 train_time:131509ms step_avg:49.05ms +[2025-09-11 06:44:20] [Rank 0] step:2681/10000 train_time:131509ms step_avg:49.05ms +[2025-09-11 06:44:21] [Rank 0] step:2701/10000 train_time:132173ms step_avg:48.93ms +[2025-09-11 06:44:21] [Rank 0] step:2701/10000 train_time:132173ms step_avg:48.93ms +[2025-09-11 06:44:21] [Rank 0] step:2721/10000 train_time:132837ms step_avg:48.82ms +[2025-09-11 06:44:21] [Rank 0] step:2721/10000 train_time:132837ms step_avg:48.82ms +[2025-09-11 06:44:22] [Rank 0] step:2741/10000 train_time:133502ms step_avg:48.71ms +[2025-09-11 06:44:22] [Rank 0] step:2741/10000 train_time:133502ms step_avg:48.71ms +[2025-09-11 06:44:23] [Rank 0] step:2761/10000 train_time:134166ms step_avg:48.59ms +[2025-09-11 06:44:23] [Rank 0] step:2761/10000 train_time:134166ms step_avg:48.59ms +[2025-09-11 06:44:23] [Rank 0] step:2781/10000 train_time:134830ms step_avg:48.48ms +[2025-09-11 06:44:23] [Rank 0] step:2781/10000 train_time:134830ms step_avg:48.48ms +[2025-09-11 06:44:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:44:24] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:44:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:44:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:44:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:44:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:44:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:44:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.6716 total_sharp:7.1875e-05 L1_sharp:1.1517e-03 L2_sharp:-4.6026e-05 L3_sharp:4.9490e-05 L4_sharp:1.8958e-04 L5_sharp:1.1778e-04 L6_sharp:1.3811e-04 L7_sharp:8.8476e-05 L8_sharp:2.6517e-04 L9_sharp:2.3622e-04 L10_sharp:2.6345e-04 L11_sharp:4.3438e-04 L12_sharp:3.1525e-03 total_fnorm:1.8200e+02 total_l1_linf:4.5056e+05 total_spectral:9.2000e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0000e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5469e+00 L3_l1linf:1.1562e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.6016e+00 L8_l1linf:1.5781e+00 L9_l1linf:1.5703e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5469e+00 L12_l1linf:1.5000e+00 L1_spectral:7.4934e-02 L2_spectral:7.4677e-02 L3_spectral:7.4616e-02 L4_spectral:7.4218e-02 L5_spectral:7.5498e-02 L6_spectral:7.4931e-02 L7_spectral:7.5288e-02 L8_spectral:7.6778e-02 L9_spectral:7.6348e-02 L10_spectral:7.7366e-02 L11_spectral:7.6890e-02 L12_spectral:7.6238e-02 train_time:135478ms step_avg:48.38ms +[2025-09-11 06:44:34] [Rank 0] PRINT: step:2800/10000 val_loss:4.6716 total_sharp:7.1875e-05 L1_sharp:1.1517e-03 L2_sharp:-4.6026e-05 L3_sharp:4.9490e-05 L4_sharp:1.8958e-04 L5_sharp:1.1778e-04 L6_sharp:1.3811e-04 L7_sharp:8.8476e-05 L8_sharp:2.6517e-04 L9_sharp:2.3622e-04 L10_sharp:2.6345e-04 L11_sharp:4.3438e-04 L12_sharp:3.1525e-03 total_fnorm:1.8200e+02 total_l1_linf:4.5056e+05 total_spectral:9.2000e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.1562e+00 L3_fnorm:6.0000e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6797e+00 L2_l1linf:1.5469e+00 L3_l1linf:1.1562e+00 L4_l1linf:1.5234e+00 L5_l1linf:1.5391e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.6016e+00 L8_l1linf:1.5781e+00 L9_l1linf:1.5703e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5469e+00 L12_l1linf:1.5000e+00 L1_spectral:7.4934e-02 L2_spectral:7.4677e-02 L3_spectral:7.4616e-02 L4_spectral:7.4218e-02 L5_spectral:7.5498e-02 L6_spectral:7.4931e-02 L7_spectral:7.5288e-02 L8_spectral:7.6778e-02 L9_spectral:7.6348e-02 L10_spectral:7.7366e-02 L11_spectral:7.6890e-02 L12_spectral:7.6238e-02 train_time:135478ms step_avg:48.38ms +[2025-09-11 06:44:35] [Rank 0] step:2801/10000 train_time:136600ms step_avg:48.77ms +[2025-09-11 06:44:35] [Rank 0] step:2801/10000 train_time:136600ms step_avg:48.77ms +[2025-09-11 06:44:36] [Rank 0] step:2821/10000 train_time:137269ms step_avg:48.66ms +[2025-09-11 06:44:36] [Rank 0] step:2821/10000 train_time:137269ms step_avg:48.66ms +[2025-09-11 06:44:36] [Rank 0] step:2841/10000 train_time:137935ms step_avg:48.55ms +[2025-09-11 06:44:36] [Rank 0] step:2841/10000 train_time:137935ms step_avg:48.55ms +[2025-09-11 06:44:37] [Rank 0] step:2861/10000 train_time:138601ms step_avg:48.44ms +[2025-09-11 06:44:37] [Rank 0] step:2861/10000 train_time:138601ms step_avg:48.44ms +[2025-09-11 06:44:38] [Rank 0] step:2881/10000 train_time:139265ms step_avg:48.34ms +[2025-09-11 06:44:38] [Rank 0] step:2881/10000 train_time:139265ms step_avg:48.34ms +[2025-09-11 06:44:38] [Rank 0] step:2901/10000 train_time:139930ms step_avg:48.24ms +[2025-09-11 06:44:38] [Rank 0] step:2901/10000 train_time:139930ms step_avg:48.24ms +[2025-09-11 06:44:39] [Rank 0] step:2921/10000 train_time:140595ms step_avg:48.13ms +[2025-09-11 06:44:39] [Rank 0] step:2921/10000 train_time:140595ms step_avg:48.13ms +[2025-09-11 06:44:40] [Rank 0] step:2941/10000 train_time:141260ms step_avg:48.03ms +[2025-09-11 06:44:40] [Rank 0] step:2941/10000 train_time:141260ms step_avg:48.03ms +[2025-09-11 06:44:40] [Rank 0] step:2961/10000 train_time:141924ms step_avg:47.93ms +[2025-09-11 06:44:40] [Rank 0] step:2961/10000 train_time:141924ms step_avg:47.93ms +[2025-09-11 06:44:41] [Rank 0] step:2981/10000 train_time:142590ms step_avg:47.83ms +[2025-09-11 06:44:41] [Rank 0] step:2981/10000 train_time:142590ms step_avg:47.83ms +[2025-09-11 06:44:42] [Rank 0] step:3001/10000 train_time:143258ms step_avg:47.74ms +[2025-09-11 06:44:42] [Rank 0] step:3001/10000 train_time:143258ms step_avg:47.74ms +[2025-09-11 06:44:42] [Rank 0] step:3021/10000 train_time:143925ms step_avg:47.64ms +[2025-09-11 06:44:42] [Rank 0] step:3021/10000 train_time:143925ms step_avg:47.64ms +[2025-09-11 06:44:43] [Rank 0] step:3041/10000 train_time:144591ms step_avg:47.55ms +[2025-09-11 06:44:43] [Rank 0] step:3041/10000 train_time:144591ms step_avg:47.55ms +[2025-09-11 06:44:44] [Rank 0] step:3061/10000 train_time:145258ms step_avg:47.45ms +[2025-09-11 06:44:44] [Rank 0] step:3061/10000 train_time:145258ms step_avg:47.45ms +[2025-09-11 06:44:44] [Rank 0] step:3081/10000 train_time:145925ms step_avg:47.36ms +[2025-09-11 06:44:44] [Rank 0] step:3081/10000 train_time:145925ms step_avg:47.36ms +[2025-09-11 06:44:45] [Rank 0] step:3101/10000 train_time:146592ms step_avg:47.27ms +[2025-09-11 06:44:45] [Rank 0] step:3101/10000 train_time:146592ms step_avg:47.27ms +[2025-09-11 06:44:46] [Rank 0] step:3121/10000 train_time:147259ms step_avg:47.18ms +[2025-09-11 06:44:46] [Rank 0] step:3121/10000 train_time:147259ms step_avg:47.18ms +[2025-09-11 06:44:46] [Rank 0] step:3141/10000 train_time:147925ms step_avg:47.09ms +[2025-09-11 06:44:46] [Rank 0] step:3141/10000 train_time:147925ms step_avg:47.09ms +[2025-09-11 06:44:47] [Rank 0] step:3161/10000 train_time:148592ms step_avg:47.01ms +[2025-09-11 06:44:47] [Rank 0] step:3161/10000 train_time:148592ms step_avg:47.01ms +[2025-09-11 06:44:48] [Rank 0] step:3181/10000 train_time:149259ms step_avg:46.92ms +[2025-09-11 06:44:48] [Rank 0] step:3181/10000 train_time:149259ms step_avg:46.92ms +[2025-09-11 06:44:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:44:48] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:44:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:44:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:44:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:44:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:44:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:44:58] [Rank 0] PRINT: step:3200/10000 val_loss:4.5998 total_sharp:5.1434e-05 L1_sharp:1.0779e-03 L2_sharp:2.5813e-05 L3_sharp:2.9980e-05 L4_sharp:8.0764e-05 L5_sharp:1.0324e-04 L6_sharp:1.3225e-04 L7_sharp:1.5393e-04 L8_sharp:2.2831e-04 L9_sharp:1.9148e-04 L10_sharp:2.2764e-04 L11_sharp:3.6332e-04 L12_sharp:2.3022e-03 total_fnorm:1.9500e+02 total_l1_linf:4.9766e+05 total_spectral:9.7500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.2188e+00 L3_fnorm:5.9688e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.1875e+00 L9_fnorm:6.3438e+00 L10_fnorm:6.3438e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.0469e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.5859e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5469e+00 L11_l1linf:1.5156e+00 L12_l1linf:1.4844e+00 L1_spectral:7.6100e-02 L2_spectral:7.5325e-02 L3_spectral:7.5368e-02 L4_spectral:7.5346e-02 L5_spectral:7.5888e-02 L6_spectral:7.6154e-02 L7_spectral:7.6057e-02 L8_spectral:7.7570e-02 L9_spectral:7.6989e-02 L10_spectral:7.7071e-02 L11_spectral:7.7995e-02 L12_spectral:7.7721e-02 train_time:149907ms step_avg:46.85ms +[2025-09-11 06:44:58] [Rank 0] PRINT: step:3200/10000 val_loss:4.5998 total_sharp:5.1434e-05 L1_sharp:1.0779e-03 L2_sharp:2.5813e-05 L3_sharp:2.9980e-05 L4_sharp:8.0764e-05 L5_sharp:1.0324e-04 L6_sharp:1.3225e-04 L7_sharp:1.5393e-04 L8_sharp:2.2831e-04 L9_sharp:1.9148e-04 L10_sharp:2.2764e-04 L11_sharp:3.6332e-04 L12_sharp:2.3022e-03 total_fnorm:1.9500e+02 total_l1_linf:4.9766e+05 total_spectral:9.7500e+01 L1_fnorm:6.0938e+00 L2_fnorm:6.2188e+00 L3_fnorm:5.9688e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.1875e+00 L9_fnorm:6.3438e+00 L10_fnorm:6.3438e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6562e+00 L2_l1linf:1.5312e+00 L3_l1linf:1.0469e+00 L4_l1linf:1.5000e+00 L5_l1linf:1.5859e+00 L6_l1linf:1.5781e+00 L7_l1linf:1.6094e+00 L8_l1linf:1.5859e+00 L9_l1linf:1.5781e+00 L10_l1linf:1.5469e+00 L11_l1linf:1.5156e+00 L12_l1linf:1.4844e+00 L1_spectral:7.6100e-02 L2_spectral:7.5325e-02 L3_spectral:7.5368e-02 L4_spectral:7.5346e-02 L5_spectral:7.5888e-02 L6_spectral:7.6154e-02 L7_spectral:7.6057e-02 L8_spectral:7.7570e-02 L9_spectral:7.6989e-02 L10_spectral:7.7071e-02 L11_spectral:7.7995e-02 L12_spectral:7.7721e-02 train_time:149907ms step_avg:46.85ms +[2025-09-11 06:45:00] [Rank 0] step:3201/10000 train_time:151060ms step_avg:47.19ms +[2025-09-11 06:45:00] [Rank 0] step:3201/10000 train_time:151060ms step_avg:47.19ms +[2025-09-11 06:45:00] [Rank 0] step:3221/10000 train_time:151732ms step_avg:47.11ms +[2025-09-11 06:45:00] [Rank 0] step:3221/10000 train_time:151732ms step_avg:47.11ms +[2025-09-11 06:45:01] [Rank 0] step:3241/10000 train_time:152475ms step_avg:47.05ms +[2025-09-11 06:45:01] [Rank 0] step:3241/10000 train_time:152475ms step_avg:47.05ms +[2025-09-11 06:45:02] [Rank 0] step:3261/10000 train_time:153179ms step_avg:46.97ms +[2025-09-11 06:45:02] [Rank 0] step:3261/10000 train_time:153179ms step_avg:46.97ms +[2025-09-11 06:45:02] [Rank 0] step:3281/10000 train_time:153907ms step_avg:46.91ms +[2025-09-11 06:45:02] [Rank 0] step:3281/10000 train_time:153907ms step_avg:46.91ms +[2025-09-11 06:45:03] [Rank 0] step:3301/10000 train_time:154576ms step_avg:46.83ms +[2025-09-11 06:45:03] [Rank 0] step:3301/10000 train_time:154576ms step_avg:46.83ms +[2025-09-11 06:45:04] [Rank 0] step:3321/10000 train_time:155245ms step_avg:46.75ms +[2025-09-11 06:45:04] [Rank 0] step:3321/10000 train_time:155245ms step_avg:46.75ms +[2025-09-11 06:45:04] [Rank 0] step:3341/10000 train_time:155912ms step_avg:46.67ms +[2025-09-11 06:45:04] [Rank 0] step:3341/10000 train_time:155912ms step_avg:46.67ms +[2025-09-11 06:45:05] [Rank 0] step:3361/10000 train_time:156581ms step_avg:46.59ms +[2025-09-11 06:45:05] [Rank 0] step:3361/10000 train_time:156581ms step_avg:46.59ms +[2025-09-11 06:45:06] [Rank 0] step:3381/10000 train_time:157249ms step_avg:46.51ms +[2025-09-11 06:45:06] [Rank 0] step:3381/10000 train_time:157249ms step_avg:46.51ms +[2025-09-11 06:45:07] [Rank 0] step:3401/10000 train_time:157917ms step_avg:46.43ms +[2025-09-11 06:45:07] [Rank 0] step:3401/10000 train_time:157917ms step_avg:46.43ms +[2025-09-11 06:45:07] [Rank 0] step:3421/10000 train_time:158584ms step_avg:46.36ms +[2025-09-11 06:45:07] [Rank 0] step:3421/10000 train_time:158584ms step_avg:46.36ms +[2025-09-11 06:45:08] [Rank 0] step:3441/10000 train_time:159251ms step_avg:46.28ms +[2025-09-11 06:45:08] [Rank 0] step:3441/10000 train_time:159251ms step_avg:46.28ms +[2025-09-11 06:45:09] [Rank 0] step:3461/10000 train_time:159918ms step_avg:46.21ms +[2025-09-11 06:45:09] [Rank 0] step:3461/10000 train_time:159918ms step_avg:46.21ms +[2025-09-11 06:45:09] [Rank 0] step:3481/10000 train_time:160587ms step_avg:46.13ms +[2025-09-11 06:45:09] [Rank 0] step:3481/10000 train_time:160587ms step_avg:46.13ms +[2025-09-11 06:45:10] [Rank 0] step:3501/10000 train_time:161254ms step_avg:46.06ms +[2025-09-11 06:45:10] [Rank 0] step:3501/10000 train_time:161254ms step_avg:46.06ms +[2025-09-11 06:45:11] [Rank 0] step:3521/10000 train_time:161922ms step_avg:45.99ms +[2025-09-11 06:45:11] [Rank 0] step:3521/10000 train_time:161922ms step_avg:45.99ms +[2025-09-11 06:45:11] [Rank 0] step:3541/10000 train_time:162590ms step_avg:45.92ms +[2025-09-11 06:45:11] [Rank 0] step:3541/10000 train_time:162590ms step_avg:45.92ms +[2025-09-11 06:45:12] [Rank 0] step:3561/10000 train_time:163257ms step_avg:45.85ms +[2025-09-11 06:45:12] [Rank 0] step:3561/10000 train_time:163257ms step_avg:45.85ms +[2025-09-11 06:45:13] [Rank 0] step:3581/10000 train_time:163925ms step_avg:45.78ms +[2025-09-11 06:45:13] [Rank 0] step:3581/10000 train_time:163925ms step_avg:45.78ms +[2025-09-11 06:45:13] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:45:13] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:45:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:45:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:45:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:45:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:45:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:45:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:45:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:45:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.5593 total_sharp:7.5813e-05 L1_sharp:6.2670e-04 L2_sharp:1.7640e-05 L3_sharp:1.4602e-05 L4_sharp:3.8575e-07 L5_sharp:1.4145e-04 L6_sharp:1.0603e-04 L7_sharp:1.2631e-04 L8_sharp:1.5662e-04 L9_sharp:1.8994e-04 L10_sharp:2.2043e-04 L11_sharp:4.0638e-04 L12_sharp:1.2308e-02 total_fnorm:1.7700e+02 total_l1_linf:4.4032e+05 total_spectral:8.9000e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.1562e+00 L3_fnorm:5.8750e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2812e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.1875e+00 L9_fnorm:6.3125e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.3125e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5000e+00 L3_l1linf:1.0625e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5625e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6063e-02 L2_spectral:7.5908e-02 L3_spectral:7.5916e-02 L4_spectral:7.5802e-02 L5_spectral:7.6520e-02 L6_spectral:7.6912e-02 L7_spectral:7.7064e-02 L8_spectral:7.7994e-02 L9_spectral:7.7435e-02 L10_spectral:7.8390e-02 L11_spectral:7.8008e-02 L12_spectral:7.7329e-02 train_time:164574ms step_avg:45.72ms +[2025-09-11 06:45:23] [Rank 0] PRINT: step:3600/10000 val_loss:4.5593 total_sharp:7.5813e-05 L1_sharp:6.2670e-04 L2_sharp:1.7640e-05 L3_sharp:1.4602e-05 L4_sharp:3.8575e-07 L5_sharp:1.4145e-04 L6_sharp:1.0603e-04 L7_sharp:1.2631e-04 L8_sharp:1.5662e-04 L9_sharp:1.8994e-04 L10_sharp:2.2043e-04 L11_sharp:4.0638e-04 L12_sharp:1.2308e-02 total_fnorm:1.7700e+02 total_l1_linf:4.4032e+05 total_spectral:8.9000e+01 L1_fnorm:6.0625e+00 L2_fnorm:6.1562e+00 L3_fnorm:5.8750e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2812e+00 L6_fnorm:6.3125e+00 L7_fnorm:6.3125e+00 L8_fnorm:6.1875e+00 L9_fnorm:6.3125e+00 L10_fnorm:6.3125e+00 L11_fnorm:6.3125e+00 L12_fnorm:6.3125e+00 L1_l1linf:1.6328e+00 L2_l1linf:1.5000e+00 L3_l1linf:1.0625e+00 L4_l1linf:1.4844e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5938e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5625e+00 L10_l1linf:1.5625e+00 L11_l1linf:1.5000e+00 L12_l1linf:1.4922e+00 L1_spectral:7.6063e-02 L2_spectral:7.5908e-02 L3_spectral:7.5916e-02 L4_spectral:7.5802e-02 L5_spectral:7.6520e-02 L6_spectral:7.6912e-02 L7_spectral:7.7064e-02 L8_spectral:7.7994e-02 L9_spectral:7.7435e-02 L10_spectral:7.8390e-02 L11_spectral:7.8008e-02 L12_spectral:7.7329e-02 train_time:164574ms step_avg:45.72ms +[2025-09-11 06:45:24] [Rank 0] step:3601/10000 train_time:165708ms step_avg:46.02ms +[2025-09-11 06:45:24] [Rank 0] step:3601/10000 train_time:165708ms step_avg:46.02ms +[2025-09-11 06:45:25] [Rank 0] step:3621/10000 train_time:166365ms step_avg:45.94ms +[2025-09-11 06:45:25] [Rank 0] step:3621/10000 train_time:166365ms step_avg:45.94ms +[2025-09-11 06:45:26] [Rank 0] step:3641/10000 train_time:167033ms step_avg:45.88ms +[2025-09-11 06:45:26] [Rank 0] step:3641/10000 train_time:167033ms step_avg:45.88ms +[2025-09-11 06:45:26] [Rank 0] step:3661/10000 train_time:167700ms step_avg:45.81ms +[2025-09-11 06:45:26] [Rank 0] step:3661/10000 train_time:167700ms step_avg:45.81ms +[2025-09-11 06:45:27] [Rank 0] step:3681/10000 train_time:168367ms step_avg:45.74ms +[2025-09-11 06:45:27] [Rank 0] step:3681/10000 train_time:168367ms step_avg:45.74ms +[2025-09-11 06:45:28] [Rank 0] step:3701/10000 train_time:169035ms step_avg:45.67ms +[2025-09-11 06:45:28] [Rank 0] step:3701/10000 train_time:169035ms step_avg:45.67ms +[2025-09-11 06:45:28] [Rank 0] step:3721/10000 train_time:169712ms step_avg:45.61ms +[2025-09-11 06:45:28] [Rank 0] step:3721/10000 train_time:169712ms step_avg:45.61ms +[2025-09-11 06:45:29] [Rank 0] step:3741/10000 train_time:170390ms step_avg:45.55ms +[2025-09-11 06:45:29] [Rank 0] step:3741/10000 train_time:170390ms step_avg:45.55ms +[2025-09-11 06:45:30] [Rank 0] step:3761/10000 train_time:171069ms step_avg:45.49ms +[2025-09-11 06:45:30] [Rank 0] step:3761/10000 train_time:171069ms step_avg:45.49ms +[2025-09-11 06:45:30] [Rank 0] step:3781/10000 train_time:171747ms step_avg:45.42ms +[2025-09-11 06:45:30] [Rank 0] step:3781/10000 train_time:171747ms step_avg:45.42ms +[2025-09-11 06:45:31] [Rank 0] step:3801/10000 train_time:172426ms step_avg:45.36ms +[2025-09-11 06:45:31] [Rank 0] step:3801/10000 train_time:172426ms step_avg:45.36ms +[2025-09-11 06:45:32] [Rank 0] step:3821/10000 train_time:173104ms step_avg:45.30ms +[2025-09-11 06:45:32] [Rank 0] step:3821/10000 train_time:173104ms step_avg:45.30ms +[2025-09-11 06:45:32] [Rank 0] step:3841/10000 train_time:173783ms step_avg:45.24ms +[2025-09-11 06:45:32] [Rank 0] step:3841/10000 train_time:173783ms step_avg:45.24ms +[2025-09-11 06:45:33] [Rank 0] step:3861/10000 train_time:174461ms step_avg:45.19ms +[2025-09-11 06:45:33] [Rank 0] step:3861/10000 train_time:174461ms step_avg:45.19ms +[2025-09-11 06:45:34] [Rank 0] step:3881/10000 train_time:175139ms step_avg:45.13ms +[2025-09-11 06:45:34] [Rank 0] step:3881/10000 train_time:175139ms step_avg:45.13ms +[2025-09-11 06:45:35] [Rank 0] step:3901/10000 train_time:175816ms step_avg:45.07ms +[2025-09-11 06:45:35] [Rank 0] step:3901/10000 train_time:175816ms step_avg:45.07ms +[2025-09-11 06:45:35] [Rank 0] step:3921/10000 train_time:176494ms step_avg:45.01ms +[2025-09-11 06:45:35] [Rank 0] step:3921/10000 train_time:176494ms step_avg:45.01ms +[2025-09-11 06:45:36] [Rank 0] step:3941/10000 train_time:177173ms step_avg:44.96ms +[2025-09-11 06:45:36] [Rank 0] step:3941/10000 train_time:177173ms step_avg:44.96ms +[2025-09-11 06:45:37] [Rank 0] step:3961/10000 train_time:177851ms step_avg:44.90ms +[2025-09-11 06:45:37] [Rank 0] step:3961/10000 train_time:177851ms step_avg:44.90ms +[2025-09-11 06:45:37] [Rank 0] step:3981/10000 train_time:178529ms step_avg:44.85ms +[2025-09-11 06:45:37] [Rank 0] step:3981/10000 train_time:178529ms step_avg:44.85ms +[2025-09-11 06:45:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:45:38] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:45:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:45:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:45:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:45:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:45:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:45:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:45:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:45:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.5058 total_sharp:6.7504e-05 L1_sharp:9.2114e-04 L2_sharp:4.1656e-05 L3_sharp:5.8154e-06 L4_sharp:5.2067e-05 L5_sharp:1.3099e-04 L6_sharp:1.2962e-04 L7_sharp:7.3051e-05 L8_sharp:2.2135e-04 L9_sharp:1.8482e-04 L10_sharp:2.3269e-04 L11_sharp:4.3190e-04 L12_sharp:6.8777e-03 total_fnorm:1.9100e+02 total_l1_linf:4.7514e+05 total_spectral:9.6000e+01 L1_fnorm:5.9688e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.7500e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.0859e+00 L4_l1linf:1.4531e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5703e+00 L10_l1linf:1.5234e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.5000e+00 L1_spectral:7.6585e-02 L2_spectral:7.5716e-02 L3_spectral:7.6374e-02 L4_spectral:7.6473e-02 L5_spectral:7.7518e-02 L6_spectral:7.7236e-02 L7_spectral:7.7148e-02 L8_spectral:7.7905e-02 L9_spectral:7.7830e-02 L10_spectral:7.8242e-02 L11_spectral:7.8180e-02 L12_spectral:7.7047e-02 train_time:179188ms step_avg:44.80ms +[2025-09-11 06:45:48] [Rank 0] PRINT: step:4000/10000 val_loss:4.5058 total_sharp:6.7504e-05 L1_sharp:9.2114e-04 L2_sharp:4.1656e-05 L3_sharp:5.8154e-06 L4_sharp:5.2067e-05 L5_sharp:1.3099e-04 L6_sharp:1.2962e-04 L7_sharp:7.3051e-05 L8_sharp:2.2135e-04 L9_sharp:1.8482e-04 L10_sharp:2.3269e-04 L11_sharp:4.3190e-04 L12_sharp:6.8777e-03 total_fnorm:1.9100e+02 total_l1_linf:4.7514e+05 total_spectral:9.6000e+01 L1_fnorm:5.9688e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.7500e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.6094e+00 L2_l1linf:1.4453e+00 L3_l1linf:1.0859e+00 L4_l1linf:1.4531e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5625e+00 L7_l1linf:1.5859e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5703e+00 L10_l1linf:1.5234e+00 L11_l1linf:1.4688e+00 L12_l1linf:1.5000e+00 L1_spectral:7.6585e-02 L2_spectral:7.5716e-02 L3_spectral:7.6374e-02 L4_spectral:7.6473e-02 L5_spectral:7.7518e-02 L6_spectral:7.7236e-02 L7_spectral:7.7148e-02 L8_spectral:7.7905e-02 L9_spectral:7.7830e-02 L10_spectral:7.8242e-02 L11_spectral:7.8180e-02 L12_spectral:7.7047e-02 train_time:179188ms step_avg:44.80ms +[2025-09-11 06:45:49] [Rank 0] step:4001/10000 train_time:180325ms step_avg:45.07ms +[2025-09-11 06:45:49] [Rank 0] step:4001/10000 train_time:180325ms step_avg:45.07ms +[2025-09-11 06:45:50] [Rank 0] step:4021/10000 train_time:181002ms step_avg:45.01ms +[2025-09-11 06:45:50] [Rank 0] step:4021/10000 train_time:181002ms step_avg:45.01ms +[2025-09-11 06:45:50] [Rank 0] step:4041/10000 train_time:181681ms step_avg:44.96ms +[2025-09-11 06:45:50] [Rank 0] step:4041/10000 train_time:181681ms step_avg:44.96ms +[2025-09-11 06:45:51] [Rank 0] step:4061/10000 train_time:182359ms step_avg:44.91ms +[2025-09-11 06:45:51] [Rank 0] step:4061/10000 train_time:182359ms step_avg:44.91ms +[2025-09-11 06:45:52] [Rank 0] step:4081/10000 train_time:183038ms step_avg:44.85ms +[2025-09-11 06:45:52] [Rank 0] step:4081/10000 train_time:183038ms step_avg:44.85ms +[2025-09-11 06:45:52] [Rank 0] step:4101/10000 train_time:183716ms step_avg:44.80ms +[2025-09-11 06:45:52] [Rank 0] step:4101/10000 train_time:183716ms step_avg:44.80ms +[2025-09-11 06:45:54] [Rank 0] step:4121/10000 train_time:185007ms step_avg:44.89ms +[2025-09-11 06:45:54] [Rank 0] step:4121/10000 train_time:185007ms step_avg:44.89ms +[2025-09-11 06:45:54] [Rank 0] step:4141/10000 train_time:185684ms step_avg:44.84ms +[2025-09-11 06:45:54] [Rank 0] step:4141/10000 train_time:185684ms step_avg:44.84ms +[2025-09-11 06:45:55] [Rank 0] step:4161/10000 train_time:186362ms step_avg:44.79ms +[2025-09-11 06:45:55] [Rank 0] step:4161/10000 train_time:186362ms step_avg:44.79ms +[2025-09-11 06:45:56] [Rank 0] step:4181/10000 train_time:187315ms step_avg:44.80ms +[2025-09-11 06:45:56] [Rank 0] step:4181/10000 train_time:187315ms step_avg:44.80ms +[2025-09-11 06:45:57] [Rank 0] step:4201/10000 train_time:187993ms step_avg:44.75ms +[2025-09-11 06:45:57] [Rank 0] step:4201/10000 train_time:187993ms step_avg:44.75ms +[2025-09-11 06:45:57] [Rank 0] step:4221/10000 train_time:188671ms step_avg:44.70ms +[2025-09-11 06:45:57] [Rank 0] step:4221/10000 train_time:188671ms step_avg:44.70ms +[2025-09-11 06:45:58] [Rank 0] step:4241/10000 train_time:189349ms step_avg:44.65ms +[2025-09-11 06:45:58] [Rank 0] step:4241/10000 train_time:189349ms step_avg:44.65ms +[2025-09-11 06:45:59] [Rank 0] step:4261/10000 train_time:190028ms step_avg:44.60ms +[2025-09-11 06:45:59] [Rank 0] step:4261/10000 train_time:190028ms step_avg:44.60ms +[2025-09-11 06:45:59] [Rank 0] step:4281/10000 train_time:190708ms step_avg:44.55ms +[2025-09-11 06:45:59] [Rank 0] step:4281/10000 train_time:190708ms step_avg:44.55ms +[2025-09-11 06:46:00] [Rank 0] step:4301/10000 train_time:191388ms step_avg:44.50ms +[2025-09-11 06:46:00] [Rank 0] step:4301/10000 train_time:191388ms step_avg:44.50ms +[2025-09-11 06:46:01] [Rank 0] step:4321/10000 train_time:192067ms step_avg:44.45ms +[2025-09-11 06:46:01] [Rank 0] step:4321/10000 train_time:192067ms step_avg:44.45ms +[2025-09-11 06:46:01] [Rank 0] step:4341/10000 train_time:192746ms step_avg:44.40ms +[2025-09-11 06:46:01] [Rank 0] step:4341/10000 train_time:192746ms step_avg:44.40ms +[2025-09-11 06:46:02] [Rank 0] step:4361/10000 train_time:193424ms step_avg:44.35ms +[2025-09-11 06:46:02] [Rank 0] step:4361/10000 train_time:193424ms step_avg:44.35ms +[2025-09-11 06:46:03] [Rank 0] step:4381/10000 train_time:194113ms step_avg:44.31ms +[2025-09-11 06:46:03] [Rank 0] step:4381/10000 train_time:194113ms step_avg:44.31ms +[2025-09-11 06:46:03] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:46:03] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:46:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:46:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:46:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:46:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:46:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:46:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:46:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:46:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:46:13] [Rank 0] PRINT: step:4400/10000 val_loss:4.4955 total_sharp:5.0829e-05 L1_sharp:7.4760e-04 L2_sharp:2.1883e-05 L3_sharp:3.1694e-05 L4_sharp:7.3675e-05 L5_sharp:1.0902e-04 L6_sharp:1.3493e-04 L7_sharp:5.1789e-05 L8_sharp:2.0224e-04 L9_sharp:2.3095e-04 L10_sharp:2.1154e-04 L11_sharp:3.5532e-04 L12_sharp:2.0794e-03 total_fnorm:1.8000e+02 total_l1_linf:4.3827e+05 total_spectral:9.1000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.6562e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.5859e+00 L2_l1linf:1.3906e+00 L3_l1linf:1.1250e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5391e+00 L7_l1linf:1.5547e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5234e+00 L11_l1linf:1.4844e+00 L12_l1linf:1.4766e+00 L1_spectral:7.7252e-02 L2_spectral:7.5546e-02 L3_spectral:7.6986e-02 L4_spectral:7.6525e-02 L5_spectral:7.7610e-02 L6_spectral:7.7719e-02 L7_spectral:7.7336e-02 L8_spectral:7.8080e-02 L9_spectral:7.8116e-02 L10_spectral:7.8852e-02 L11_spectral:7.8010e-02 L12_spectral:7.7191e-02 train_time:194772ms step_avg:44.27ms +[2025-09-11 06:46:13] [Rank 0] PRINT: step:4400/10000 val_loss:4.4955 total_sharp:5.0829e-05 L1_sharp:7.4760e-04 L2_sharp:2.1883e-05 L3_sharp:3.1694e-05 L4_sharp:7.3675e-05 L5_sharp:1.0902e-04 L6_sharp:1.3493e-04 L7_sharp:5.1789e-05 L8_sharp:2.0224e-04 L9_sharp:2.3095e-04 L10_sharp:2.1154e-04 L11_sharp:3.5532e-04 L12_sharp:2.0794e-03 total_fnorm:1.8000e+02 total_l1_linf:4.3827e+05 total_spectral:9.1000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.6562e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2812e+00 L1_l1linf:1.5859e+00 L2_l1linf:1.3906e+00 L3_l1linf:1.1250e+00 L4_l1linf:1.4453e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5391e+00 L7_l1linf:1.5547e+00 L8_l1linf:1.6016e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5234e+00 L11_l1linf:1.4844e+00 L12_l1linf:1.4766e+00 L1_spectral:7.7252e-02 L2_spectral:7.5546e-02 L3_spectral:7.6986e-02 L4_spectral:7.6525e-02 L5_spectral:7.7610e-02 L6_spectral:7.7719e-02 L7_spectral:7.7336e-02 L8_spectral:7.8080e-02 L9_spectral:7.8116e-02 L10_spectral:7.8852e-02 L11_spectral:7.8010e-02 L12_spectral:7.7191e-02 train_time:194772ms step_avg:44.27ms +[2025-09-11 06:46:14] [Rank 0] step:4401/10000 train_time:195951ms step_avg:44.52ms +[2025-09-11 06:46:14] [Rank 0] step:4401/10000 train_time:195951ms step_avg:44.52ms +[2025-09-11 06:46:15] [Rank 0] step:4421/10000 train_time:196663ms step_avg:44.48ms +[2025-09-11 06:46:15] [Rank 0] step:4421/10000 train_time:196663ms step_avg:44.48ms +[2025-09-11 06:46:16] [Rank 0] step:4441/10000 train_time:197342ms step_avg:44.44ms +[2025-09-11 06:46:16] [Rank 0] step:4441/10000 train_time:197342ms step_avg:44.44ms +[2025-09-11 06:46:16] [Rank 0] step:4461/10000 train_time:198023ms step_avg:44.39ms +[2025-09-11 06:46:16] [Rank 0] step:4461/10000 train_time:198023ms step_avg:44.39ms +[2025-09-11 06:46:17] [Rank 0] step:4481/10000 train_time:198703ms step_avg:44.34ms +[2025-09-11 06:46:17] [Rank 0] step:4481/10000 train_time:198703ms step_avg:44.34ms +[2025-09-11 06:46:18] [Rank 0] step:4501/10000 train_time:199384ms step_avg:44.30ms +[2025-09-11 06:46:18] [Rank 0] step:4501/10000 train_time:199384ms step_avg:44.30ms +[2025-09-11 06:46:18] [Rank 0] step:4521/10000 train_time:200064ms step_avg:44.25ms +[2025-09-11 06:46:18] [Rank 0] step:4521/10000 train_time:200064ms step_avg:44.25ms +[2025-09-11 06:46:19] [Rank 0] step:4541/10000 train_time:200744ms step_avg:44.21ms +[2025-09-11 06:46:19] [Rank 0] step:4541/10000 train_time:200744ms step_avg:44.21ms +[2025-09-11 06:46:20] [Rank 0] step:4561/10000 train_time:201423ms step_avg:44.16ms +[2025-09-11 06:46:20] [Rank 0] step:4561/10000 train_time:201423ms step_avg:44.16ms +[2025-09-11 06:46:20] [Rank 0] step:4581/10000 train_time:202103ms step_avg:44.12ms +[2025-09-11 06:46:20] [Rank 0] step:4581/10000 train_time:202103ms step_avg:44.12ms +[2025-09-11 06:46:21] [Rank 0] step:4601/10000 train_time:202783ms step_avg:44.07ms +[2025-09-11 06:46:21] [Rank 0] step:4601/10000 train_time:202783ms step_avg:44.07ms +[2025-09-11 06:46:22] [Rank 0] step:4621/10000 train_time:203463ms step_avg:44.03ms +[2025-09-11 06:46:22] [Rank 0] step:4621/10000 train_time:203463ms step_avg:44.03ms +[2025-09-11 06:46:23] [Rank 0] step:4641/10000 train_time:204143ms step_avg:43.99ms +[2025-09-11 06:46:23] [Rank 0] step:4641/10000 train_time:204143ms step_avg:43.99ms +[2025-09-11 06:46:23] [Rank 0] step:4661/10000 train_time:204823ms step_avg:43.94ms +[2025-09-11 06:46:23] [Rank 0] step:4661/10000 train_time:204823ms step_avg:43.94ms +[2025-09-11 06:46:24] [Rank 0] step:4681/10000 train_time:205502ms step_avg:43.90ms +[2025-09-11 06:46:24] [Rank 0] step:4681/10000 train_time:205502ms step_avg:43.90ms +[2025-09-11 06:46:25] [Rank 0] step:4701/10000 train_time:206183ms step_avg:43.86ms +[2025-09-11 06:46:25] [Rank 0] step:4701/10000 train_time:206183ms step_avg:43.86ms +[2025-09-11 06:46:25] [Rank 0] step:4721/10000 train_time:206864ms step_avg:43.82ms +[2025-09-11 06:46:25] [Rank 0] step:4721/10000 train_time:206864ms step_avg:43.82ms +[2025-09-11 06:46:26] [Rank 0] step:4741/10000 train_time:207543ms step_avg:43.78ms +[2025-09-11 06:46:26] [Rank 0] step:4741/10000 train_time:207543ms step_avg:43.78ms +[2025-09-11 06:46:27] [Rank 0] step:4761/10000 train_time:208225ms step_avg:43.74ms +[2025-09-11 06:46:27] [Rank 0] step:4761/10000 train_time:208225ms step_avg:43.74ms +[2025-09-11 06:46:27] [Rank 0] step:4781/10000 train_time:208903ms step_avg:43.69ms +[2025-09-11 06:46:27] [Rank 0] step:4781/10000 train_time:208903ms step_avg:43.69ms +[2025-09-11 06:46:28] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:46:28] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:46:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:46:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:46:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:46:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:46:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:46:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:46:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:46:39] [Rank 0] PRINT: step:4800/10000 val_loss:4.4447 total_sharp:4.7943e-05 L1_sharp:4.8984e-04 L2_sharp:4.3359e-06 L3_sharp:2.6390e-05 L4_sharp:4.2147e-05 L5_sharp:2.5101e-05 L6_sharp:6.2271e-05 L7_sharp:7.4807e-05 L8_sharp:2.1201e-04 L9_sharp:1.8460e-04 L10_sharp:2.3593e-04 L11_sharp:3.2330e-04 L12_sharp:5.4725e-03 total_fnorm:1.8800e+02 total_l1_linf:4.7104e+05 total_spectral:9.5000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0000e+00 L3_fnorm:5.5000e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.3281e+00 L3_l1linf:1.1094e+00 L4_l1linf:1.4531e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5312e+00 L11_l1linf:1.4531e+00 L12_l1linf:1.4609e+00 L1_spectral:7.7500e-02 L2_spectral:7.6874e-02 L3_spectral:7.7568e-02 L4_spectral:7.6720e-02 L5_spectral:7.8097e-02 L6_spectral:7.8083e-02 L7_spectral:7.7960e-02 L8_spectral:7.8185e-02 L9_spectral:7.8564e-02 L10_spectral:7.8905e-02 L11_spectral:7.8976e-02 L12_spectral:7.7767e-02 train_time:209566ms step_avg:43.66ms +[2025-09-11 06:46:39] [Rank 0] PRINT: step:4800/10000 val_loss:4.4447 total_sharp:4.7943e-05 L1_sharp:4.8984e-04 L2_sharp:4.3359e-06 L3_sharp:2.6390e-05 L4_sharp:4.2147e-05 L5_sharp:2.5101e-05 L6_sharp:6.2271e-05 L7_sharp:7.4807e-05 L8_sharp:2.1201e-04 L9_sharp:1.8460e-04 L10_sharp:2.3593e-04 L11_sharp:3.2330e-04 L12_sharp:5.4725e-03 total_fnorm:1.8800e+02 total_l1_linf:4.7104e+05 total_spectral:9.5000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0000e+00 L3_fnorm:5.5000e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2500e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.5781e+00 L2_l1linf:1.3281e+00 L3_l1linf:1.1094e+00 L4_l1linf:1.4531e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5703e+00 L7_l1linf:1.5781e+00 L8_l1linf:1.5703e+00 L9_l1linf:1.5469e+00 L10_l1linf:1.5312e+00 L11_l1linf:1.4531e+00 L12_l1linf:1.4609e+00 L1_spectral:7.7500e-02 L2_spectral:7.6874e-02 L3_spectral:7.7568e-02 L4_spectral:7.6720e-02 L5_spectral:7.8097e-02 L6_spectral:7.8083e-02 L7_spectral:7.7960e-02 L8_spectral:7.8185e-02 L9_spectral:7.8564e-02 L10_spectral:7.8905e-02 L11_spectral:7.8976e-02 L12_spectral:7.7767e-02 train_time:209566ms step_avg:43.66ms +[2025-09-11 06:46:42] [Rank 0] step:4801/10000 train_time:212281ms step_avg:44.22ms +[2025-09-11 06:46:42] [Rank 0] step:4801/10000 train_time:212281ms step_avg:44.22ms +[2025-09-11 06:46:42] [Rank 0] step:4821/10000 train_time:213038ms step_avg:44.19ms +[2025-09-11 06:46:42] [Rank 0] step:4821/10000 train_time:213038ms step_avg:44.19ms +[2025-09-11 06:46:43] [Rank 0] step:4841/10000 train_time:213719ms step_avg:44.15ms +[2025-09-11 06:46:43] [Rank 0] step:4841/10000 train_time:213719ms step_avg:44.15ms +[2025-09-11 06:46:44] [Rank 0] step:4861/10000 train_time:214398ms step_avg:44.11ms +[2025-09-11 06:46:44] [Rank 0] step:4861/10000 train_time:214398ms step_avg:44.11ms +[2025-09-11 06:46:44] [Rank 0] step:4881/10000 train_time:215079ms step_avg:44.06ms +[2025-09-11 06:46:44] [Rank 0] step:4881/10000 train_time:215079ms step_avg:44.06ms +[2025-09-11 06:46:45] [Rank 0] step:4901/10000 train_time:215761ms step_avg:44.02ms +[2025-09-11 06:46:45] [Rank 0] step:4901/10000 train_time:215761ms step_avg:44.02ms +[2025-09-11 06:46:46] [Rank 0] step:4921/10000 train_time:216441ms step_avg:43.98ms +[2025-09-11 06:46:46] [Rank 0] step:4921/10000 train_time:216441ms step_avg:43.98ms +[2025-09-11 06:46:47] [Rank 0] step:4941/10000 train_time:217121ms step_avg:43.94ms +[2025-09-11 06:46:47] [Rank 0] step:4941/10000 train_time:217121ms step_avg:43.94ms +[2025-09-11 06:46:47] [Rank 0] step:4961/10000 train_time:217801ms step_avg:43.90ms +[2025-09-11 06:46:47] [Rank 0] step:4961/10000 train_time:217801ms step_avg:43.90ms +[2025-09-11 06:46:48] [Rank 0] step:4981/10000 train_time:218480ms step_avg:43.86ms +[2025-09-11 06:46:48] [Rank 0] step:4981/10000 train_time:218480ms step_avg:43.86ms +[2025-09-11 06:46:49] [Rank 0] step:5001/10000 train_time:219162ms step_avg:43.82ms +[2025-09-11 06:46:49] [Rank 0] step:5001/10000 train_time:219162ms step_avg:43.82ms +[2025-09-11 06:46:49] [Rank 0] step:5021/10000 train_time:219842ms step_avg:43.78ms +[2025-09-11 06:46:49] [Rank 0] step:5021/10000 train_time:219842ms step_avg:43.78ms +[2025-09-11 06:46:50] [Rank 0] step:5041/10000 train_time:220519ms step_avg:43.75ms +[2025-09-11 06:46:50] [Rank 0] step:5041/10000 train_time:220519ms step_avg:43.75ms +[2025-09-11 06:46:51] [Rank 0] step:5061/10000 train_time:221199ms step_avg:43.71ms +[2025-09-11 06:46:51] [Rank 0] step:5061/10000 train_time:221199ms step_avg:43.71ms +[2025-09-11 06:46:51] [Rank 0] step:5081/10000 train_time:221879ms step_avg:43.67ms +[2025-09-11 06:46:51] [Rank 0] step:5081/10000 train_time:221879ms step_avg:43.67ms +[2025-09-11 06:46:52] [Rank 0] step:5101/10000 train_time:222659ms step_avg:43.65ms +[2025-09-11 06:46:52] [Rank 0] step:5101/10000 train_time:222659ms step_avg:43.65ms +[2025-09-11 06:46:53] [Rank 0] step:5121/10000 train_time:223339ms step_avg:43.61ms +[2025-09-11 06:46:53] [Rank 0] step:5121/10000 train_time:223339ms step_avg:43.61ms +[2025-09-11 06:46:53] [Rank 0] step:5141/10000 train_time:224020ms step_avg:43.58ms +[2025-09-11 06:46:53] [Rank 0] step:5141/10000 train_time:224020ms step_avg:43.58ms +[2025-09-11 06:46:54] [Rank 0] step:5161/10000 train_time:224699ms step_avg:43.54ms +[2025-09-11 06:46:54] [Rank 0] step:5161/10000 train_time:224699ms step_avg:43.54ms +[2025-09-11 06:46:55] [Rank 0] step:5181/10000 train_time:225378ms step_avg:43.50ms +[2025-09-11 06:46:55] [Rank 0] step:5181/10000 train_time:225378ms step_avg:43.50ms +[2025-09-11 06:46:55] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:46:55] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:46:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:46:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:47:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:47:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:47:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:47:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.4218 total_sharp:5.2116e-05 L1_sharp:5.6191e-04 L2_sharp:3.7950e-05 L3_sharp:8.5318e-05 L4_sharp:5.9574e-05 L5_sharp:9.8761e-05 L6_sharp:7.6433e-05 L7_sharp:1.2253e-04 L8_sharp:1.8725e-04 L9_sharp:1.9272e-04 L10_sharp:2.3077e-04 L11_sharp:3.3063e-04 L12_sharp:3.2255e-03 total_fnorm:1.7300e+02 total_l1_linf:4.1165e+05 total_spectral:8.7000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.5312e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.5625e+00 L2_l1linf:1.2656e+00 L3_l1linf:1.1172e+00 L4_l1linf:1.4219e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5234e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5156e+00 L11_l1linf:1.4531e+00 L12_l1linf:1.4531e+00 L1_spectral:7.7614e-02 L2_spectral:7.6913e-02 L3_spectral:7.7996e-02 L4_spectral:7.7544e-02 L5_spectral:7.8469e-02 L6_spectral:7.8334e-02 L7_spectral:7.8393e-02 L8_spectral:7.8632e-02 L9_spectral:7.8929e-02 L10_spectral:7.9143e-02 L11_spectral:7.9263e-02 L12_spectral:7.8536e-02 train_time:226044ms step_avg:43.47ms +[2025-09-11 06:47:06] [Rank 0] PRINT: step:5200/10000 val_loss:4.4218 total_sharp:5.2116e-05 L1_sharp:5.6191e-04 L2_sharp:3.7950e-05 L3_sharp:8.5318e-05 L4_sharp:5.9574e-05 L5_sharp:9.8761e-05 L6_sharp:7.6433e-05 L7_sharp:1.2253e-04 L8_sharp:1.8725e-04 L9_sharp:1.9272e-04 L10_sharp:2.3077e-04 L11_sharp:3.3063e-04 L12_sharp:3.2255e-03 total_fnorm:1.7300e+02 total_l1_linf:4.1165e+05 total_spectral:8.7000e+01 L1_fnorm:6.0000e+00 L2_fnorm:6.0312e+00 L3_fnorm:5.5312e+00 L4_fnorm:6.1562e+00 L5_fnorm:6.2500e+00 L6_fnorm:6.2812e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.1562e+00 L9_fnorm:6.2812e+00 L10_fnorm:6.2812e+00 L11_fnorm:6.2188e+00 L12_fnorm:6.2500e+00 L1_l1linf:1.5625e+00 L2_l1linf:1.2656e+00 L3_l1linf:1.1172e+00 L4_l1linf:1.4219e+00 L5_l1linf:1.5312e+00 L6_l1linf:1.5234e+00 L7_l1linf:1.5625e+00 L8_l1linf:1.5625e+00 L9_l1linf:1.5547e+00 L10_l1linf:1.5156e+00 L11_l1linf:1.4531e+00 L12_l1linf:1.4531e+00 L1_spectral:7.7614e-02 L2_spectral:7.6913e-02 L3_spectral:7.7996e-02 L4_spectral:7.7544e-02 L5_spectral:7.8469e-02 L6_spectral:7.8334e-02 L7_spectral:7.8393e-02 L8_spectral:7.8632e-02 L9_spectral:7.8929e-02 L10_spectral:7.9143e-02 L11_spectral:7.9263e-02 L12_spectral:7.8536e-02 train_time:226044ms step_avg:43.47ms +[2025-09-11 06:47:08] [Rank 0] step:5201/10000 train_time:227205ms step_avg:43.68ms +[2025-09-11 06:47:08] [Rank 0] step:5201/10000 train_time:227205ms step_avg:43.68ms +[2025-09-11 06:47:08] [Rank 0] step:5221/10000 train_time:227884ms step_avg:43.65ms +[2025-09-11 06:47:08] [Rank 0] step:5221/10000 train_time:227884ms step_avg:43.65ms +[2025-09-11 06:47:09] [Rank 0] step:5241/10000 train_time:228574ms step_avg:43.61ms +[2025-09-11 06:47:09] [Rank 0] step:5241/10000 train_time:228574ms step_avg:43.61ms +[2025-09-11 06:47:10] [Rank 0] step:5261/10000 train_time:229264ms step_avg:43.58ms +[2025-09-11 06:47:10] [Rank 0] step:5261/10000 train_time:229264ms step_avg:43.58ms +[2025-09-11 06:47:10] [Rank 0] step:5281/10000 train_time:229953ms step_avg:43.54ms +[2025-09-11 06:47:10] [Rank 0] step:5281/10000 train_time:229953ms step_avg:43.54ms +[2025-09-11 06:47:11] [Rank 0] step:5301/10000 train_time:230642ms step_avg:43.51ms +[2025-09-11 06:47:11] [Rank 0] step:5301/10000 train_time:230642ms step_avg:43.51ms +[2025-09-11 06:47:12] [Rank 0] step:5321/10000 train_time:231331ms step_avg:43.48ms +[2025-09-11 06:47:12] [Rank 0] step:5321/10000 train_time:231331ms step_avg:43.48ms +[2025-09-11 06:47:12] [Rank 0] step:5341/10000 train_time:232020ms step_avg:43.44ms +[2025-09-11 06:47:12] [Rank 0] step:5341/10000 train_time:232020ms step_avg:43.44ms +[2025-09-11 06:47:13] [Rank 0] step:5361/10000 train_time:232709ms step_avg:43.41ms +[2025-09-11 06:47:13] [Rank 0] step:5361/10000 train_time:232709ms step_avg:43.41ms +[2025-09-11 06:47:14] [Rank 0] step:5381/10000 train_time:233399ms step_avg:43.37ms +[2025-09-11 06:47:14] [Rank 0] step:5381/10000 train_time:233399ms step_avg:43.37ms +[2025-09-11 06:47:14] [Rank 0] step:5401/10000 train_time:234086ms step_avg:43.34ms +[2025-09-11 06:47:14] [Rank 0] step:5401/10000 train_time:234086ms step_avg:43.34ms +[2025-09-11 06:47:15] [Rank 0] step:5421/10000 train_time:234776ms step_avg:43.31ms +[2025-09-11 06:47:15] [Rank 0] step:5421/10000 train_time:234776ms step_avg:43.31ms +[2025-09-11 06:47:16] [Rank 0] step:5441/10000 train_time:235466ms step_avg:43.28ms +[2025-09-11 06:47:16] [Rank 0] step:5441/10000 train_time:235466ms step_avg:43.28ms +[2025-09-11 06:47:16] [Rank 0] step:5461/10000 train_time:236156ms step_avg:43.24ms +[2025-09-11 06:47:16] [Rank 0] step:5461/10000 train_time:236156ms step_avg:43.24ms +[2025-09-11 06:47:17] [Rank 0] step:5481/10000 train_time:236844ms step_avg:43.21ms +[2025-09-11 06:47:17] [Rank 0] step:5481/10000 train_time:236844ms step_avg:43.21ms +[2025-09-11 06:47:18] [Rank 0] step:5501/10000 train_time:237533ms step_avg:43.18ms +[2025-09-11 06:47:18] [Rank 0] step:5501/10000 train_time:237533ms step_avg:43.18ms +[2025-09-11 06:47:19] [Rank 0] step:5521/10000 train_time:238221ms step_avg:43.15ms +[2025-09-11 06:47:19] [Rank 0] step:5521/10000 train_time:238221ms step_avg:43.15ms +[2025-09-11 06:47:19] [Rank 0] step:5541/10000 train_time:238912ms step_avg:43.12ms +[2025-09-11 06:47:19] [Rank 0] step:5541/10000 train_time:238912ms step_avg:43.12ms +[2025-09-11 06:47:20] [Rank 0] step:5561/10000 train_time:239603ms step_avg:43.09ms +[2025-09-11 06:47:20] [Rank 0] step:5561/10000 train_time:239603ms step_avg:43.09ms +[2025-09-11 06:47:21] [Rank 0] step:5581/10000 train_time:240293ms step_avg:43.06ms +[2025-09-11 06:47:21] [Rank 0] step:5581/10000 train_time:240293ms step_avg:43.06ms +[2025-09-11 06:47:21] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:47:21] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:47:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:47:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:47:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:47:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:47:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:47:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:31] [Rank 0] PRINT: step:5600/10000 val_loss:4.4049 total_sharp:4.1597e-05 L1_sharp:5.2287e-04 L2_sharp:-3.1138e-06 L3_sharp:2.3144e-05 L4_sharp:6.4880e-05 L5_sharp:1.0235e-04 L6_sharp:6.8143e-05 L7_sharp:6.1569e-05 L8_sharp:1.2311e-04 L9_sharp:1.5523e-04 L10_sharp:1.8115e-04 L11_sharp:2.7960e-04 L12_sharp:1.8124e-03 total_fnorm:1.8100e+02 total_l1_linf:4.4237e+05 total_spectral:9.1500e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.8750e+00 L3_fnorm:5.4062e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5469e+00 L2_l1linf:1.1484e+00 L3_l1linf:1.1328e+00 L4_l1linf:1.4297e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4922e+00 L11_l1linf:1.4297e+00 L12_l1linf:1.4453e+00 L1_spectral:7.8100e-02 L2_spectral:7.6962e-02 L3_spectral:7.7777e-02 L4_spectral:7.7501e-02 L5_spectral:7.8740e-02 L6_spectral:7.8841e-02 L7_spectral:7.8701e-02 L8_spectral:7.8253e-02 L9_spectral:7.9114e-02 L10_spectral:7.9258e-02 L11_spectral:7.9041e-02 L12_spectral:7.8855e-02 train_time:240961ms step_avg:43.03ms +[2025-09-11 06:47:31] [Rank 0] PRINT: step:5600/10000 val_loss:4.4049 total_sharp:4.1597e-05 L1_sharp:5.2287e-04 L2_sharp:-3.1138e-06 L3_sharp:2.3144e-05 L4_sharp:6.4880e-05 L5_sharp:1.0235e-04 L6_sharp:6.8143e-05 L7_sharp:6.1569e-05 L8_sharp:1.2311e-04 L9_sharp:1.5523e-04 L10_sharp:1.8115e-04 L11_sharp:2.7960e-04 L12_sharp:1.8124e-03 total_fnorm:1.8100e+02 total_l1_linf:4.4237e+05 total_spectral:9.1500e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.8750e+00 L3_fnorm:5.4062e+00 L4_fnorm:6.1250e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0312e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2188e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5469e+00 L2_l1linf:1.1484e+00 L3_l1linf:1.1328e+00 L4_l1linf:1.4297e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.5078e+00 L9_l1linf:1.5078e+00 L10_l1linf:1.4922e+00 L11_l1linf:1.4297e+00 L12_l1linf:1.4453e+00 L1_spectral:7.8100e-02 L2_spectral:7.6962e-02 L3_spectral:7.7777e-02 L4_spectral:7.7501e-02 L5_spectral:7.8740e-02 L6_spectral:7.8841e-02 L7_spectral:7.8701e-02 L8_spectral:7.8253e-02 L9_spectral:7.9114e-02 L10_spectral:7.9258e-02 L11_spectral:7.9041e-02 L12_spectral:7.8855e-02 train_time:240961ms step_avg:43.03ms +[2025-09-11 06:47:32] [Rank 0] step:5601/10000 train_time:242136ms step_avg:43.23ms +[2025-09-11 06:47:32] [Rank 0] step:5601/10000 train_time:242136ms step_avg:43.23ms +[2025-09-11 06:47:33] [Rank 0] step:5621/10000 train_time:242817ms step_avg:43.20ms +[2025-09-11 06:47:33] [Rank 0] step:5621/10000 train_time:242817ms step_avg:43.20ms +[2025-09-11 06:47:34] [Rank 0] step:5641/10000 train_time:243507ms step_avg:43.17ms +[2025-09-11 06:47:34] [Rank 0] step:5641/10000 train_time:243507ms step_avg:43.17ms +[2025-09-11 06:47:35] [Rank 0] step:5661/10000 train_time:244199ms step_avg:43.14ms +[2025-09-11 06:47:35] [Rank 0] step:5661/10000 train_time:244199ms step_avg:43.14ms +[2025-09-11 06:47:35] [Rank 0] step:5681/10000 train_time:244889ms step_avg:43.11ms +[2025-09-11 06:47:35] [Rank 0] step:5681/10000 train_time:244889ms step_avg:43.11ms +[2025-09-11 06:47:36] [Rank 0] step:5701/10000 train_time:245580ms step_avg:43.08ms +[2025-09-11 06:47:36] [Rank 0] step:5701/10000 train_time:245580ms step_avg:43.08ms +[2025-09-11 06:47:37] [Rank 0] step:5721/10000 train_time:246270ms step_avg:43.05ms +[2025-09-11 06:47:37] [Rank 0] step:5721/10000 train_time:246270ms step_avg:43.05ms +[2025-09-11 06:47:37] [Rank 0] step:5741/10000 train_time:246962ms step_avg:43.02ms +[2025-09-11 06:47:37] [Rank 0] step:5741/10000 train_time:246962ms step_avg:43.02ms +[2025-09-11 06:47:38] [Rank 0] step:5761/10000 train_time:247653ms step_avg:42.99ms +[2025-09-11 06:47:38] [Rank 0] step:5761/10000 train_time:247653ms step_avg:42.99ms +[2025-09-11 06:47:39] [Rank 0] step:5781/10000 train_time:248344ms step_avg:42.96ms +[2025-09-11 06:47:39] [Rank 0] step:5781/10000 train_time:248344ms step_avg:42.96ms +[2025-09-11 06:47:39] [Rank 0] step:5801/10000 train_time:249037ms step_avg:42.93ms +[2025-09-11 06:47:39] [Rank 0] step:5801/10000 train_time:249037ms step_avg:42.93ms +[2025-09-11 06:47:40] [Rank 0] step:5821/10000 train_time:249725ms step_avg:42.90ms +[2025-09-11 06:47:40] [Rank 0] step:5821/10000 train_time:249725ms step_avg:42.90ms +[2025-09-11 06:47:41] [Rank 0] step:5841/10000 train_time:250418ms step_avg:42.87ms +[2025-09-11 06:47:41] [Rank 0] step:5841/10000 train_time:250418ms step_avg:42.87ms +[2025-09-11 06:47:41] [Rank 0] step:5861/10000 train_time:251107ms step_avg:42.84ms +[2025-09-11 06:47:41] [Rank 0] step:5861/10000 train_time:251107ms step_avg:42.84ms +[2025-09-11 06:47:42] [Rank 0] step:5881/10000 train_time:251798ms step_avg:42.82ms +[2025-09-11 06:47:42] [Rank 0] step:5881/10000 train_time:251798ms step_avg:42.82ms +[2025-09-11 06:47:43] [Rank 0] step:5901/10000 train_time:252487ms step_avg:42.79ms +[2025-09-11 06:47:43] [Rank 0] step:5901/10000 train_time:252487ms step_avg:42.79ms +[2025-09-11 06:47:44] [Rank 0] step:5921/10000 train_time:253180ms step_avg:42.76ms +[2025-09-11 06:47:44] [Rank 0] step:5921/10000 train_time:253180ms step_avg:42.76ms +[2025-09-11 06:47:44] [Rank 0] step:5941/10000 train_time:253872ms step_avg:42.73ms +[2025-09-11 06:47:44] [Rank 0] step:5941/10000 train_time:253872ms step_avg:42.73ms +[2025-09-11 06:47:45] [Rank 0] step:5961/10000 train_time:254563ms step_avg:42.70ms +[2025-09-11 06:47:45] [Rank 0] step:5961/10000 train_time:254563ms step_avg:42.70ms +[2025-09-11 06:47:46] [Rank 0] step:5981/10000 train_time:255254ms step_avg:42.68ms +[2025-09-11 06:47:46] [Rank 0] step:5981/10000 train_time:255254ms step_avg:42.68ms +[2025-09-11 06:47:46] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:47:46] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:47:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:47:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:47:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:47:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:47:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:47:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:47:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.3619 total_sharp:3.8175e-05 L1_sharp:4.9892e-04 L2_sharp:8.9908e-05 L3_sharp:3.9305e-05 L4_sharp:7.6715e-06 L5_sharp:3.6274e-05 L6_sharp:3.7840e-05 L7_sharp:1.4106e-04 L8_sharp:1.7949e-04 L9_sharp:1.3274e-04 L10_sharp:1.8804e-04 L11_sharp:2.9374e-04 L12_sharp:1.8540e-03 total_fnorm:1.8000e+02 total_l1_linf:4.3008e+05 total_spectral:9.0500e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.7812e+00 L3_fnorm:5.4375e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5469e+00 L2_l1linf:1.1562e+00 L3_l1linf:1.1562e+00 L4_l1linf:1.4375e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5234e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.3984e+00 L1_spectral:7.7822e-02 L2_spectral:7.6579e-02 L3_spectral:7.8490e-02 L4_spectral:7.8070e-02 L5_spectral:7.9027e-02 L6_spectral:7.8906e-02 L7_spectral:7.8519e-02 L8_spectral:7.8950e-02 L9_spectral:7.9495e-02 L10_spectral:7.9455e-02 L11_spectral:7.8931e-02 L12_spectral:7.8880e-02 train_time:255927ms step_avg:42.65ms +[2025-09-11 06:47:57] [Rank 0] PRINT: step:6000/10000 val_loss:4.3619 total_sharp:3.8175e-05 L1_sharp:4.9892e-04 L2_sharp:8.9908e-05 L3_sharp:3.9305e-05 L4_sharp:7.6715e-06 L5_sharp:3.6274e-05 L6_sharp:3.7840e-05 L7_sharp:1.4106e-04 L8_sharp:1.7949e-04 L9_sharp:1.3274e-04 L10_sharp:1.8804e-04 L11_sharp:2.9374e-04 L12_sharp:1.8540e-03 total_fnorm:1.8000e+02 total_l1_linf:4.3008e+05 total_spectral:9.0500e+01 L1_fnorm:5.9688e+00 L2_fnorm:5.7812e+00 L3_fnorm:5.4375e+00 L4_fnorm:6.0938e+00 L5_fnorm:6.2188e+00 L6_fnorm:6.2500e+00 L7_fnorm:6.2500e+00 L8_fnorm:6.0938e+00 L9_fnorm:6.2500e+00 L10_fnorm:6.2500e+00 L11_fnorm:6.1875e+00 L12_fnorm:6.2188e+00 L1_l1linf:1.5469e+00 L2_l1linf:1.1562e+00 L3_l1linf:1.1562e+00 L4_l1linf:1.4375e+00 L5_l1linf:1.5078e+00 L6_l1linf:1.5469e+00 L7_l1linf:1.5469e+00 L8_l1linf:1.5312e+00 L9_l1linf:1.5234e+00 L10_l1linf:1.5000e+00 L11_l1linf:1.4141e+00 L12_l1linf:1.3984e+00 L1_spectral:7.7822e-02 L2_spectral:7.6579e-02 L3_spectral:7.8490e-02 L4_spectral:7.8070e-02 L5_spectral:7.9027e-02 L6_spectral:7.8906e-02 L7_spectral:7.8519e-02 L8_spectral:7.8950e-02 L9_spectral:7.9495e-02 L10_spectral:7.9455e-02 L11_spectral:7.8931e-02 L12_spectral:7.8880e-02 train_time:255927ms step_avg:42.65ms +[2025-09-11 06:47:58] [Rank 0] step:6001/10000 train_time:257117ms step_avg:42.85ms +[2025-09-11 06:47:58] [Rank 0] step:6001/10000 train_time:257117ms step_avg:42.85ms +[2025-09-11 06:47:58] [Rank 0] step:6021/10000 train_time:257853ms step_avg:42.83ms +[2025-09-11 06:47:58] [Rank 0] step:6021/10000 train_time:257853ms step_avg:42.83ms +[2025-09-11 06:48:00] [Rank 0] step:6041/10000 train_time:258951ms step_avg:42.87ms +[2025-09-11 06:48:00] [Rank 0] step:6041/10000 train_time:258951ms step_avg:42.87ms +[2025-09-11 06:48:00] [Rank 0] step:6061/10000 train_time:259758ms step_avg:42.86ms +[2025-09-11 06:48:00] [Rank 0] step:6061/10000 train_time:259758ms step_avg:42.86ms +[2025-09-11 06:48:01] [Rank 0] step:6081/10000 train_time:260452ms step_avg:42.83ms +[2025-09-11 06:48:01] [Rank 0] step:6081/10000 train_time:260452ms step_avg:42.83ms +[2025-09-11 06:48:02] [Rank 0] step:6101/10000 train_time:261435ms step_avg:42.85ms +[2025-09-11 06:48:02] [Rank 0] step:6101/10000 train_time:261435ms step_avg:42.85ms +[2025-09-11 06:48:03] [Rank 0] step:6121/10000 train_time:262129ms step_avg:42.82ms +[2025-09-11 06:48:03] [Rank 0] step:6121/10000 train_time:262129ms step_avg:42.82ms +[2025-09-11 06:48:03] [Rank 0] step:6141/10000 train_time:262823ms step_avg:42.80ms +[2025-09-11 06:48:03] [Rank 0] step:6141/10000 train_time:262823ms step_avg:42.80ms +[2025-09-11 06:48:04] [Rank 0] step:6161/10000 train_time:263516ms step_avg:42.77ms +[2025-09-11 06:48:04] [Rank 0] step:6161/10000 train_time:263516ms step_avg:42.77ms +[2025-09-11 06:48:05] [Rank 0] step:6181/10000 train_time:264207ms step_avg:42.74ms +[2025-09-11 06:48:05] [Rank 0] step:6181/10000 train_time:264207ms step_avg:42.74ms +[2025-09-11 06:48:06] [Rank 0] step:6201/10000 train_time:264900ms step_avg:42.72ms +[2025-09-11 06:48:06] [Rank 0] step:6201/10000 train_time:264900ms step_avg:42.72ms +[2025-09-11 06:48:06] [Rank 0] step:6221/10000 train_time:265593ms step_avg:42.69ms +[2025-09-11 06:48:06] [Rank 0] step:6221/10000 train_time:265593ms step_avg:42.69ms +[2025-09-11 06:48:07] [Rank 0] step:6241/10000 train_time:266286ms step_avg:42.67ms +[2025-09-11 06:48:07] [Rank 0] step:6241/10000 train_time:266286ms step_avg:42.67ms +[2025-09-11 06:48:08] [Rank 0] step:6261/10000 train_time:266978ms step_avg:42.64ms +[2025-09-11 06:48:08] [Rank 0] step:6261/10000 train_time:266978ms step_avg:42.64ms +[2025-09-11 06:48:08] [Rank 0] step:6281/10000 train_time:267670ms step_avg:42.62ms +[2025-09-11 06:48:08] [Rank 0] step:6281/10000 train_time:267670ms step_avg:42.62ms +[2025-09-11 06:48:09] [Rank 0] step:6301/10000 train_time:268362ms step_avg:42.59ms +[2025-09-11 06:48:09] [Rank 0] step:6301/10000 train_time:268362ms step_avg:42.59ms +[2025-09-11 06:48:10] [Rank 0] step:6321/10000 train_time:269057ms step_avg:42.57ms +[2025-09-11 06:48:10] [Rank 0] step:6321/10000 train_time:269057ms step_avg:42.57ms +[2025-09-11 06:48:10] [Rank 0] step:6341/10000 train_time:269750ms step_avg:42.54ms +[2025-09-11 06:48:10] [Rank 0] step:6341/10000 train_time:269750ms step_avg:42.54ms +[2025-09-11 06:48:11] [Rank 0] step:6361/10000 train_time:270443ms step_avg:42.52ms +[2025-09-11 06:48:11] [Rank 0] step:6361/10000 train_time:270443ms step_avg:42.52ms +[2025-09-11 06:48:12] [Rank 0] step:6381/10000 train_time:271135ms step_avg:42.49ms +[2025-09-11 06:48:12] [Rank 0] step:6381/10000 train_time:271135ms step_avg:42.49ms +[2025-09-11 06:48:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:48:12] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:48:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:48:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:48:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:48:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:48:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:48:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:48:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:48:23] [Rank 0] PRINT: step:6400/10000 val_loss:4.3306 total_sharp:3.8306e-05 L1_sharp:4.6630e-04 L2_sharp:4.4371e-05 L3_sharp:3.1121e-05 L4_sharp:3.0606e-05 L5_sharp:8.4890e-05 L6_sharp:7.7450e-05 L7_sharp:4.2813e-05 L8_sharp:1.7453e-04 L9_sharp:1.6111e-04 L10_sharp:1.6821e-04 L11_sharp:3.2726e-04 L12_sharp:2.0021e-03 total_fnorm:1.6300e+02 total_l1_linf:3.8502e+05 total_spectral:8.2500e+01 L1_fnorm:5.4062e+00 L2_fnorm:5.2188e+00 L3_fnorm:4.7812e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.6562e+00 L7_fnorm:5.6562e+00 L8_fnorm:5.5000e+00 L9_fnorm:5.6562e+00 L10_fnorm:5.6250e+00 L11_fnorm:5.5625e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3516e+00 L2_l1linf:9.6094e-01 L3_l1linf:1.1094e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.3281e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.3438e+00 L8_l1linf:1.3359e+00 L9_l1linf:1.3281e+00 L10_l1linf:1.2969e+00 L11_l1linf:1.2422e+00 L12_l1linf:1.2344e+00 L1_spectral:7.1705e-02 L2_spectral:7.0778e-02 L3_spectral:7.2252e-02 L4_spectral:7.1534e-02 L5_spectral:7.3351e-02 L6_spectral:7.2898e-02 L7_spectral:7.2859e-02 L8_spectral:7.2290e-02 L9_spectral:7.3069e-02 L10_spectral:7.2832e-02 L11_spectral:7.2580e-02 L12_spectral:7.2550e-02 train_time:271808ms step_avg:42.47ms +[2025-09-11 06:48:23] [Rank 0] PRINT: step:6400/10000 val_loss:4.3306 total_sharp:3.8306e-05 L1_sharp:4.6630e-04 L2_sharp:4.4371e-05 L3_sharp:3.1121e-05 L4_sharp:3.0606e-05 L5_sharp:8.4890e-05 L6_sharp:7.7450e-05 L7_sharp:4.2813e-05 L8_sharp:1.7453e-04 L9_sharp:1.6111e-04 L10_sharp:1.6821e-04 L11_sharp:3.2726e-04 L12_sharp:2.0021e-03 total_fnorm:1.6300e+02 total_l1_linf:3.8502e+05 total_spectral:8.2500e+01 L1_fnorm:5.4062e+00 L2_fnorm:5.2188e+00 L3_fnorm:4.7812e+00 L4_fnorm:5.5312e+00 L5_fnorm:5.6250e+00 L6_fnorm:5.6562e+00 L7_fnorm:5.6562e+00 L8_fnorm:5.5000e+00 L9_fnorm:5.6562e+00 L10_fnorm:5.6250e+00 L11_fnorm:5.5625e+00 L12_fnorm:5.5938e+00 L1_l1linf:1.3516e+00 L2_l1linf:9.6094e-01 L3_l1linf:1.1094e+00 L4_l1linf:1.2500e+00 L5_l1linf:1.3281e+00 L6_l1linf:1.3594e+00 L7_l1linf:1.3438e+00 L8_l1linf:1.3359e+00 L9_l1linf:1.3281e+00 L10_l1linf:1.2969e+00 L11_l1linf:1.2422e+00 L12_l1linf:1.2344e+00 L1_spectral:7.1705e-02 L2_spectral:7.0778e-02 L3_spectral:7.2252e-02 L4_spectral:7.1534e-02 L5_spectral:7.3351e-02 L6_spectral:7.2898e-02 L7_spectral:7.2859e-02 L8_spectral:7.2290e-02 L9_spectral:7.3069e-02 L10_spectral:7.2832e-02 L11_spectral:7.2580e-02 L12_spectral:7.2550e-02 train_time:271808ms step_avg:42.47ms +[2025-09-11 06:48:24] [Rank 0] step:6401/10000 train_time:272969ms step_avg:42.64ms +[2025-09-11 06:48:24] [Rank 0] step:6401/10000 train_time:272969ms step_avg:42.64ms +[2025-09-11 06:48:24] [Rank 0] step:6421/10000 train_time:273649ms step_avg:42.62ms +[2025-09-11 06:48:24] [Rank 0] step:6421/10000 train_time:273649ms step_avg:42.62ms +[2025-09-11 06:48:25] [Rank 0] step:6441/10000 train_time:274342ms step_avg:42.59ms +[2025-09-11 06:48:25] [Rank 0] step:6441/10000 train_time:274342ms step_avg:42.59ms +[2025-09-11 06:48:26] [Rank 0] step:6461/10000 train_time:275035ms step_avg:42.57ms +[2025-09-11 06:48:26] [Rank 0] step:6461/10000 train_time:275035ms step_avg:42.57ms +[2025-09-11 06:48:26] [Rank 0] step:6481/10000 train_time:275729ms step_avg:42.54ms +[2025-09-11 06:48:26] [Rank 0] step:6481/10000 train_time:275729ms step_avg:42.54ms +[2025-09-11 06:48:27] [Rank 0] step:6501/10000 train_time:276423ms step_avg:42.52ms +[2025-09-11 06:48:27] [Rank 0] step:6501/10000 train_time:276423ms step_avg:42.52ms +[2025-09-11 06:48:28] [Rank 0] step:6521/10000 train_time:277117ms step_avg:42.50ms +[2025-09-11 06:48:28] [Rank 0] step:6521/10000 train_time:277117ms step_avg:42.50ms +[2025-09-11 06:48:29] [Rank 0] step:6541/10000 train_time:277810ms step_avg:42.47ms +[2025-09-11 06:48:29] [Rank 0] step:6541/10000 train_time:277810ms step_avg:42.47ms +[2025-09-11 06:48:29] [Rank 0] step:6561/10000 train_time:278503ms step_avg:42.45ms +[2025-09-11 06:48:29] [Rank 0] step:6561/10000 train_time:278503ms step_avg:42.45ms +[2025-09-11 06:48:30] [Rank 0] step:6581/10000 train_time:279196ms step_avg:42.42ms +[2025-09-11 06:48:30] [Rank 0] step:6581/10000 train_time:279196ms step_avg:42.42ms +[2025-09-11 06:48:31] [Rank 0] step:6601/10000 train_time:279888ms step_avg:42.40ms +[2025-09-11 06:48:31] [Rank 0] step:6601/10000 train_time:279888ms step_avg:42.40ms +[2025-09-11 06:48:31] [Rank 0] step:6621/10000 train_time:280580ms step_avg:42.38ms +[2025-09-11 06:48:31] [Rank 0] step:6621/10000 train_time:280580ms step_avg:42.38ms +[2025-09-11 06:48:32] [Rank 0] step:6641/10000 train_time:281273ms step_avg:42.35ms +[2025-09-11 06:48:32] [Rank 0] step:6641/10000 train_time:281273ms step_avg:42.35ms +[2025-09-11 06:48:33] [Rank 0] step:6661/10000 train_time:281966ms step_avg:42.33ms +[2025-09-11 06:48:33] [Rank 0] step:6661/10000 train_time:281966ms step_avg:42.33ms +[2025-09-11 06:48:33] [Rank 0] step:6681/10000 train_time:282665ms step_avg:42.31ms +[2025-09-11 06:48:33] [Rank 0] step:6681/10000 train_time:282665ms step_avg:42.31ms +[2025-09-11 06:48:34] [Rank 0] step:6701/10000 train_time:283363ms step_avg:42.29ms +[2025-09-11 06:48:34] [Rank 0] step:6701/10000 train_time:283363ms step_avg:42.29ms +[2025-09-11 06:48:35] [Rank 0] step:6721/10000 train_time:284064ms step_avg:42.27ms +[2025-09-11 06:48:35] [Rank 0] step:6721/10000 train_time:284064ms step_avg:42.27ms +[2025-09-11 06:48:36] [Rank 0] step:6741/10000 train_time:284764ms step_avg:42.24ms +[2025-09-11 06:48:36] [Rank 0] step:6741/10000 train_time:284764ms step_avg:42.24ms +[2025-09-11 06:48:36] [Rank 0] step:6761/10000 train_time:285463ms step_avg:42.22ms +[2025-09-11 06:48:36] [Rank 0] step:6761/10000 train_time:285463ms step_avg:42.22ms +[2025-09-11 06:48:37] [Rank 0] step:6781/10000 train_time:286162ms step_avg:42.20ms +[2025-09-11 06:48:37] [Rank 0] step:6781/10000 train_time:286162ms step_avg:42.20ms +[2025-09-11 06:48:38] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:48:38] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:48:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:48:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:48:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:48:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:48:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:48:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:48:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:48:48] [Rank 0] PRINT: step:6800/10000 val_loss:4.2770 total_sharp:3.5555e-05 L1_sharp:4.5372e-04 L2_sharp:5.9694e-05 L3_sharp:-5.0763e-06 L4_sharp:5.7186e-05 L5_sharp:1.6939e-04 L6_sharp:7.9260e-05 L7_sharp:9.3794e-05 L8_sharp:1.3460e-04 L9_sharp:1.4404e-04 L10_sharp:2.0990e-04 L11_sharp:3.0313e-04 L12_sharp:1.5770e-03 total_fnorm:1.5600e+02 total_l1_linf:3.6045e+05 total_spectral:7.8000e+01 L1_fnorm:4.8125e+00 L2_fnorm:4.6250e+00 L3_fnorm:4.1562e+00 L4_fnorm:4.9062e+00 L5_fnorm:4.9688e+00 L6_fnorm:5.0000e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.8438e+00 L9_fnorm:4.9688e+00 L10_fnorm:4.9688e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9062e+00 L1_l1linf:1.1406e+00 L2_l1linf:8.8281e-01 L3_l1linf:9.4531e-01 L4_l1linf:1.1094e+00 L5_l1linf:1.1484e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1641e+00 L8_l1linf:1.1484e+00 L9_l1linf:1.1250e+00 L10_l1linf:1.1172e+00 L11_l1linf:1.0703e+00 L12_l1linf:1.0469e+00 L1_spectral:6.5010e-02 L2_spectral:6.3510e-02 L3_spectral:6.5241e-02 L4_spectral:6.4732e-02 L5_spectral:6.5264e-02 L6_spectral:6.5567e-02 L7_spectral:6.5491e-02 L8_spectral:6.3623e-02 L9_spectral:6.5486e-02 L10_spectral:6.5907e-02 L11_spectral:6.5976e-02 L12_spectral:6.5478e-02 train_time:286841ms step_avg:42.18ms +[2025-09-11 06:48:48] [Rank 0] PRINT: step:6800/10000 val_loss:4.2770 total_sharp:3.5555e-05 L1_sharp:4.5372e-04 L2_sharp:5.9694e-05 L3_sharp:-5.0763e-06 L4_sharp:5.7186e-05 L5_sharp:1.6939e-04 L6_sharp:7.9260e-05 L7_sharp:9.3794e-05 L8_sharp:1.3460e-04 L9_sharp:1.4404e-04 L10_sharp:2.0990e-04 L11_sharp:3.0313e-04 L12_sharp:1.5770e-03 total_fnorm:1.5600e+02 total_l1_linf:3.6045e+05 total_spectral:7.8000e+01 L1_fnorm:4.8125e+00 L2_fnorm:4.6250e+00 L3_fnorm:4.1562e+00 L4_fnorm:4.9062e+00 L5_fnorm:4.9688e+00 L6_fnorm:5.0000e+00 L7_fnorm:4.9688e+00 L8_fnorm:4.8438e+00 L9_fnorm:4.9688e+00 L10_fnorm:4.9688e+00 L11_fnorm:4.9062e+00 L12_fnorm:4.9062e+00 L1_l1linf:1.1406e+00 L2_l1linf:8.8281e-01 L3_l1linf:9.4531e-01 L4_l1linf:1.1094e+00 L5_l1linf:1.1484e+00 L6_l1linf:1.1641e+00 L7_l1linf:1.1641e+00 L8_l1linf:1.1484e+00 L9_l1linf:1.1250e+00 L10_l1linf:1.1172e+00 L11_l1linf:1.0703e+00 L12_l1linf:1.0469e+00 L1_spectral:6.5010e-02 L2_spectral:6.3510e-02 L3_spectral:6.5241e-02 L4_spectral:6.4732e-02 L5_spectral:6.5264e-02 L6_spectral:6.5567e-02 L7_spectral:6.5491e-02 L8_spectral:6.3623e-02 L9_spectral:6.5486e-02 L10_spectral:6.5907e-02 L11_spectral:6.5976e-02 L12_spectral:6.5478e-02 train_time:286841ms step_avg:42.18ms +[2025-09-11 06:48:49] [Rank 0] step:6801/10000 train_time:288006ms step_avg:42.35ms +[2025-09-11 06:48:49] [Rank 0] step:6801/10000 train_time:288006ms step_avg:42.35ms +[2025-09-11 06:48:50] [Rank 0] step:6821/10000 train_time:288730ms step_avg:42.33ms +[2025-09-11 06:48:50] [Rank 0] step:6821/10000 train_time:288730ms step_avg:42.33ms +[2025-09-11 06:48:50] [Rank 0] step:6841/10000 train_time:289433ms step_avg:42.31ms +[2025-09-11 06:48:50] [Rank 0] step:6841/10000 train_time:289433ms step_avg:42.31ms +[2025-09-11 06:48:51] [Rank 0] step:6861/10000 train_time:290136ms step_avg:42.29ms +[2025-09-11 06:48:51] [Rank 0] step:6861/10000 train_time:290136ms step_avg:42.29ms +[2025-09-11 06:48:52] [Rank 0] step:6881/10000 train_time:290837ms step_avg:42.27ms +[2025-09-11 06:48:52] [Rank 0] step:6881/10000 train_time:290837ms step_avg:42.27ms +[2025-09-11 06:48:52] [Rank 0] step:6901/10000 train_time:291535ms step_avg:42.25ms +[2025-09-11 06:48:52] [Rank 0] step:6901/10000 train_time:291535ms step_avg:42.25ms +[2025-09-11 06:48:53] [Rank 0] step:6921/10000 train_time:292234ms step_avg:42.22ms +[2025-09-11 06:48:53] [Rank 0] step:6921/10000 train_time:292234ms step_avg:42.22ms +[2025-09-11 06:48:54] [Rank 0] step:6941/10000 train_time:292934ms step_avg:42.20ms +[2025-09-11 06:48:54] [Rank 0] step:6941/10000 train_time:292934ms step_avg:42.20ms +[2025-09-11 06:48:54] [Rank 0] step:6961/10000 train_time:293634ms step_avg:42.18ms +[2025-09-11 06:48:54] [Rank 0] step:6961/10000 train_time:293634ms step_avg:42.18ms +[2025-09-11 06:48:55] [Rank 0] step:6981/10000 train_time:294336ms step_avg:42.16ms +[2025-09-11 06:48:55] [Rank 0] step:6981/10000 train_time:294336ms step_avg:42.16ms +[2025-09-11 06:48:56] [Rank 0] step:7001/10000 train_time:295036ms step_avg:42.14ms +[2025-09-11 06:48:56] [Rank 0] step:7001/10000 train_time:295036ms step_avg:42.14ms +[2025-09-11 06:48:57] [Rank 0] step:7021/10000 train_time:295736ms step_avg:42.12ms +[2025-09-11 06:48:57] [Rank 0] step:7021/10000 train_time:295736ms step_avg:42.12ms +[2025-09-11 06:48:57] [Rank 0] step:7041/10000 train_time:296435ms step_avg:42.10ms +[2025-09-11 06:48:57] [Rank 0] step:7041/10000 train_time:296435ms step_avg:42.10ms +[2025-09-11 06:48:58] [Rank 0] step:7061/10000 train_time:297136ms step_avg:42.08ms +[2025-09-11 06:48:58] [Rank 0] step:7061/10000 train_time:297136ms step_avg:42.08ms +[2025-09-11 06:48:59] [Rank 0] step:7081/10000 train_time:297836ms step_avg:42.06ms +[2025-09-11 06:48:59] [Rank 0] step:7081/10000 train_time:297836ms step_avg:42.06ms +[2025-09-11 06:48:59] [Rank 0] step:7101/10000 train_time:298536ms step_avg:42.04ms +[2025-09-11 06:48:59] [Rank 0] step:7101/10000 train_time:298536ms step_avg:42.04ms +[2025-09-11 06:49:00] [Rank 0] step:7121/10000 train_time:299237ms step_avg:42.02ms +[2025-09-11 06:49:00] [Rank 0] step:7121/10000 train_time:299237ms step_avg:42.02ms +[2025-09-11 06:49:01] [Rank 0] step:7141/10000 train_time:299937ms step_avg:42.00ms +[2025-09-11 06:49:01] [Rank 0] step:7141/10000 train_time:299937ms step_avg:42.00ms +[2025-09-11 06:49:01] [Rank 0] step:7161/10000 train_time:300638ms step_avg:41.98ms +[2025-09-11 06:49:01] [Rank 0] step:7161/10000 train_time:300638ms step_avg:41.98ms +[2025-09-11 06:49:02] [Rank 0] step:7181/10000 train_time:301451ms step_avg:41.98ms +[2025-09-11 06:49:02] [Rank 0] step:7181/10000 train_time:301451ms step_avg:41.98ms +[2025-09-11 06:49:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:49:03] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:49:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:49:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:49:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:49:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:49:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:49:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:49:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:49:13] [Rank 0] PRINT: step:7200/10000 val_loss:4.2413 total_sharp:3.0771e-05 L1_sharp:3.2086e-04 L2_sharp:4.6034e-05 L3_sharp:2.1728e-06 L4_sharp:2.1831e-05 L5_sharp:5.7755e-05 L6_sharp:9.4846e-05 L7_sharp:9.0397e-05 L8_sharp:8.3780e-05 L9_sharp:1.3261e-04 L10_sharp:1.6526e-04 L11_sharp:2.3330e-04 L12_sharp:1.6843e-03 total_fnorm:1.3900e+02 total_l1_linf:3.1130e+05 total_spectral:6.9500e+01 L1_fnorm:4.2188e+00 L2_fnorm:4.0000e+00 L3_fnorm:3.5938e+00 L4_fnorm:4.2500e+00 L5_fnorm:4.3438e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3438e+00 L8_fnorm:4.2188e+00 L9_fnorm:4.3125e+00 L10_fnorm:4.3125e+00 L11_fnorm:4.2500e+00 L12_fnorm:4.2812e+00 L1_l1linf:9.6094e-01 L2_l1linf:7.9688e-01 L3_l1linf:8.5156e-01 L4_l1linf:9.2188e-01 L5_l1linf:9.7266e-01 L6_l1linf:9.6094e-01 L7_l1linf:9.8828e-01 L8_l1linf:9.6484e-01 L9_l1linf:9.5312e-01 L10_l1linf:9.2188e-01 L11_l1linf:8.6719e-01 L12_l1linf:8.7891e-01 L1_spectral:5.7458e-02 L2_spectral:5.5790e-02 L3_spectral:5.7978e-02 L4_spectral:5.6982e-02 L5_spectral:5.8110e-02 L6_spectral:5.8002e-02 L7_spectral:5.8052e-02 L8_spectral:5.6495e-02 L9_spectral:5.8423e-02 L10_spectral:5.8138e-02 L11_spectral:5.8610e-02 L12_spectral:5.8459e-02 train_time:302544ms step_avg:42.02ms +[2025-09-11 06:49:13] [Rank 0] PRINT: step:7200/10000 val_loss:4.2413 total_sharp:3.0771e-05 L1_sharp:3.2086e-04 L2_sharp:4.6034e-05 L3_sharp:2.1728e-06 L4_sharp:2.1831e-05 L5_sharp:5.7755e-05 L6_sharp:9.4846e-05 L7_sharp:9.0397e-05 L8_sharp:8.3780e-05 L9_sharp:1.3261e-04 L10_sharp:1.6526e-04 L11_sharp:2.3330e-04 L12_sharp:1.6843e-03 total_fnorm:1.3900e+02 total_l1_linf:3.1130e+05 total_spectral:6.9500e+01 L1_fnorm:4.2188e+00 L2_fnorm:4.0000e+00 L3_fnorm:3.5938e+00 L4_fnorm:4.2500e+00 L5_fnorm:4.3438e+00 L6_fnorm:4.3438e+00 L7_fnorm:4.3438e+00 L8_fnorm:4.2188e+00 L9_fnorm:4.3125e+00 L10_fnorm:4.3125e+00 L11_fnorm:4.2500e+00 L12_fnorm:4.2812e+00 L1_l1linf:9.6094e-01 L2_l1linf:7.9688e-01 L3_l1linf:8.5156e-01 L4_l1linf:9.2188e-01 L5_l1linf:9.7266e-01 L6_l1linf:9.6094e-01 L7_l1linf:9.8828e-01 L8_l1linf:9.6484e-01 L9_l1linf:9.5312e-01 L10_l1linf:9.2188e-01 L11_l1linf:8.6719e-01 L12_l1linf:8.7891e-01 L1_spectral:5.7458e-02 L2_spectral:5.5790e-02 L3_spectral:5.7978e-02 L4_spectral:5.6982e-02 L5_spectral:5.8110e-02 L6_spectral:5.8002e-02 L7_spectral:5.8052e-02 L8_spectral:5.6495e-02 L9_spectral:5.8423e-02 L10_spectral:5.8138e-02 L11_spectral:5.8610e-02 L12_spectral:5.8459e-02 train_time:302544ms step_avg:42.02ms +[2025-09-11 06:49:14] [Rank 0] step:7201/10000 train_time:303721ms step_avg:42.18ms +[2025-09-11 06:49:14] [Rank 0] step:7201/10000 train_time:303721ms step_avg:42.18ms +[2025-09-11 06:49:15] [Rank 0] step:7221/10000 train_time:304414ms step_avg:42.16ms +[2025-09-11 06:49:15] [Rank 0] step:7221/10000 train_time:304414ms step_avg:42.16ms +[2025-09-11 06:49:16] [Rank 0] step:7241/10000 train_time:305117ms step_avg:42.14ms +[2025-09-11 06:49:16] [Rank 0] step:7241/10000 train_time:305117ms step_avg:42.14ms +[2025-09-11 06:49:17] [Rank 0] step:7261/10000 train_time:305820ms step_avg:42.12ms +[2025-09-11 06:49:17] [Rank 0] step:7261/10000 train_time:305820ms step_avg:42.12ms +[2025-09-11 06:49:17] [Rank 0] step:7281/10000 train_time:306525ms step_avg:42.10ms +[2025-09-11 06:49:17] [Rank 0] step:7281/10000 train_time:306525ms step_avg:42.10ms +[2025-09-11 06:49:18] [Rank 0] step:7301/10000 train_time:307225ms step_avg:42.08ms +[2025-09-11 06:49:18] [Rank 0] step:7301/10000 train_time:307225ms step_avg:42.08ms +[2025-09-11 06:49:19] [Rank 0] step:7321/10000 train_time:307926ms step_avg:42.06ms +[2025-09-11 06:49:19] [Rank 0] step:7321/10000 train_time:307926ms step_avg:42.06ms +[2025-09-11 06:49:19] [Rank 0] step:7341/10000 train_time:308628ms step_avg:42.04ms +[2025-09-11 06:49:19] [Rank 0] step:7341/10000 train_time:308628ms step_avg:42.04ms +[2025-09-11 06:49:20] [Rank 0] step:7361/10000 train_time:309329ms step_avg:42.02ms +[2025-09-11 06:49:20] [Rank 0] step:7361/10000 train_time:309329ms step_avg:42.02ms +[2025-09-11 06:49:21] [Rank 0] step:7381/10000 train_time:310031ms step_avg:42.00ms +[2025-09-11 06:49:21] [Rank 0] step:7381/10000 train_time:310031ms step_avg:42.00ms +[2025-09-11 06:49:22] [Rank 0] step:7401/10000 train_time:310731ms step_avg:41.99ms +[2025-09-11 06:49:22] [Rank 0] step:7401/10000 train_time:310731ms step_avg:41.99ms +[2025-09-11 06:49:22] [Rank 0] step:7421/10000 train_time:311431ms step_avg:41.97ms +[2025-09-11 06:49:22] [Rank 0] step:7421/10000 train_time:311431ms step_avg:41.97ms +[2025-09-11 06:49:23] [Rank 0] step:7441/10000 train_time:312133ms step_avg:41.95ms +[2025-09-11 06:49:23] [Rank 0] step:7441/10000 train_time:312133ms step_avg:41.95ms +[2025-09-11 06:49:24] [Rank 0] step:7461/10000 train_time:312834ms step_avg:41.93ms +[2025-09-11 06:49:24] [Rank 0] step:7461/10000 train_time:312834ms step_avg:41.93ms +[2025-09-11 06:49:24] [Rank 0] step:7481/10000 train_time:313538ms step_avg:41.91ms +[2025-09-11 06:49:24] [Rank 0] step:7481/10000 train_time:313538ms step_avg:41.91ms +[2025-09-11 06:49:25] [Rank 0] step:7501/10000 train_time:314241ms step_avg:41.89ms +[2025-09-11 06:49:25] [Rank 0] step:7501/10000 train_time:314241ms step_avg:41.89ms +[2025-09-11 06:49:26] [Rank 0] step:7521/10000 train_time:314943ms step_avg:41.88ms +[2025-09-11 06:49:26] [Rank 0] step:7521/10000 train_time:314943ms step_avg:41.88ms +[2025-09-11 06:49:26] [Rank 0] step:7541/10000 train_time:315643ms step_avg:41.86ms +[2025-09-11 06:49:26] [Rank 0] step:7541/10000 train_time:315643ms step_avg:41.86ms +[2025-09-11 06:49:27] [Rank 0] step:7561/10000 train_time:316347ms step_avg:41.84ms +[2025-09-11 06:49:27] [Rank 0] step:7561/10000 train_time:316347ms step_avg:41.84ms +[2025-09-11 06:49:28] [Rank 0] step:7581/10000 train_time:317049ms step_avg:41.82ms +[2025-09-11 06:49:28] [Rank 0] step:7581/10000 train_time:317049ms step_avg:41.82ms +[2025-09-11 06:49:29] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:49:29] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:49:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:49:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:49:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:49:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:49:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:49:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:49:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:49:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:49:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:49:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:49:42] [Rank 0] PRINT: step:7600/10000 val_loss:4.2048 total_sharp:3.3711e-05 L1_sharp:3.0751e-04 L2_sharp:4.8278e-05 L3_sharp:2.4472e-05 L4_sharp:3.2898e-05 L5_sharp:5.3575e-05 L6_sharp:6.6644e-05 L7_sharp:4.9049e-05 L8_sharp:1.3965e-04 L9_sharp:1.5127e-04 L10_sharp:1.6530e-04 L11_sharp:2.5860e-04 L12_sharp:4.0853e-03 total_fnorm:1.1350e+02 total_l1_linf:2.4269e+05 total_spectral:5.7000e+01 L1_fnorm:3.5781e+00 L2_fnorm:3.3750e+00 L3_fnorm:3.0312e+00 L4_fnorm:3.5938e+00 L5_fnorm:3.6406e+00 L6_fnorm:3.6562e+00 L7_fnorm:3.6719e+00 L8_fnorm:3.5156e+00 L9_fnorm:3.6562e+00 L10_fnorm:3.6094e+00 L11_fnorm:3.5781e+00 L12_fnorm:3.6250e+00 L1_l1linf:7.7734e-01 L2_l1linf:6.7578e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.5391e-01 L5_l1linf:7.7734e-01 L6_l1linf:7.8906e-01 L7_l1linf:7.9297e-01 L8_l1linf:7.8125e-01 L9_l1linf:7.6172e-01 L10_l1linf:7.2656e-01 L11_l1linf:6.9922e-01 L12_l1linf:7.3047e-01 L1_spectral:4.9759e-02 L2_spectral:4.8327e-02 L3_spectral:4.9920e-02 L4_spectral:4.8867e-02 L5_spectral:4.9271e-02 L6_spectral:4.9835e-02 L7_spectral:5.0019e-02 L8_spectral:4.8124e-02 L9_spectral:5.0718e-02 L10_spectral:5.0428e-02 L11_spectral:5.0410e-02 L12_spectral:5.0554e-02 train_time:317734ms step_avg:41.81ms +[2025-09-11 06:49:42] [Rank 0] PRINT: step:7600/10000 val_loss:4.2048 total_sharp:3.3711e-05 L1_sharp:3.0751e-04 L2_sharp:4.8278e-05 L3_sharp:2.4472e-05 L4_sharp:3.2898e-05 L5_sharp:5.3575e-05 L6_sharp:6.6644e-05 L7_sharp:4.9049e-05 L8_sharp:1.3965e-04 L9_sharp:1.5127e-04 L10_sharp:1.6530e-04 L11_sharp:2.5860e-04 L12_sharp:4.0853e-03 total_fnorm:1.1350e+02 total_l1_linf:2.4269e+05 total_spectral:5.7000e+01 L1_fnorm:3.5781e+00 L2_fnorm:3.3750e+00 L3_fnorm:3.0312e+00 L4_fnorm:3.5938e+00 L5_fnorm:3.6406e+00 L6_fnorm:3.6562e+00 L7_fnorm:3.6719e+00 L8_fnorm:3.5156e+00 L9_fnorm:3.6562e+00 L10_fnorm:3.6094e+00 L11_fnorm:3.5781e+00 L12_fnorm:3.6250e+00 L1_l1linf:7.7734e-01 L2_l1linf:6.7578e-01 L3_l1linf:7.4219e-01 L4_l1linf:7.5391e-01 L5_l1linf:7.7734e-01 L6_l1linf:7.8906e-01 L7_l1linf:7.9297e-01 L8_l1linf:7.8125e-01 L9_l1linf:7.6172e-01 L10_l1linf:7.2656e-01 L11_l1linf:6.9922e-01 L12_l1linf:7.3047e-01 L1_spectral:4.9759e-02 L2_spectral:4.8327e-02 L3_spectral:4.9920e-02 L4_spectral:4.8867e-02 L5_spectral:4.9271e-02 L6_spectral:4.9835e-02 L7_spectral:5.0019e-02 L8_spectral:4.8124e-02 L9_spectral:5.0718e-02 L10_spectral:5.0428e-02 L11_spectral:5.0410e-02 L12_spectral:5.0554e-02 train_time:317734ms step_avg:41.81ms +[2025-09-11 06:49:43] [Rank 0] step:7601/10000 train_time:318940ms step_avg:41.96ms +[2025-09-11 06:49:43] [Rank 0] step:7601/10000 train_time:318940ms step_avg:41.96ms +[2025-09-11 06:49:44] [Rank 0] step:7621/10000 train_time:319681ms step_avg:41.95ms +[2025-09-11 06:49:44] [Rank 0] step:7621/10000 train_time:319681ms step_avg:41.95ms +[2025-09-11 06:49:45] [Rank 0] step:7641/10000 train_time:320384ms step_avg:41.93ms +[2025-09-11 06:49:45] [Rank 0] step:7641/10000 train_time:320384ms step_avg:41.93ms +[2025-09-11 06:49:46] [Rank 0] step:7661/10000 train_time:321085ms step_avg:41.91ms +[2025-09-11 06:49:46] [Rank 0] step:7661/10000 train_time:321085ms step_avg:41.91ms +[2025-09-11 06:49:46] [Rank 0] step:7681/10000 train_time:321787ms step_avg:41.89ms +[2025-09-11 06:49:46] [Rank 0] step:7681/10000 train_time:321787ms step_avg:41.89ms +[2025-09-11 06:49:47] [Rank 0] step:7701/10000 train_time:322491ms step_avg:41.88ms +[2025-09-11 06:49:47] [Rank 0] step:7701/10000 train_time:322491ms step_avg:41.88ms +[2025-09-11 06:49:48] [Rank 0] step:7721/10000 train_time:323194ms step_avg:41.86ms +[2025-09-11 06:49:48] [Rank 0] step:7721/10000 train_time:323194ms step_avg:41.86ms +[2025-09-11 06:49:48] [Rank 0] step:7741/10000 train_time:323897ms step_avg:41.84ms +[2025-09-11 06:49:48] [Rank 0] step:7741/10000 train_time:323897ms step_avg:41.84ms +[2025-09-11 06:49:49] [Rank 0] step:7761/10000 train_time:324599ms step_avg:41.82ms +[2025-09-11 06:49:49] [Rank 0] step:7761/10000 train_time:324599ms step_avg:41.82ms +[2025-09-11 06:49:50] [Rank 0] step:7781/10000 train_time:325303ms step_avg:41.81ms +[2025-09-11 06:49:50] [Rank 0] step:7781/10000 train_time:325303ms step_avg:41.81ms +[2025-09-11 06:49:51] [Rank 0] step:7801/10000 train_time:326003ms step_avg:41.79ms +[2025-09-11 06:49:51] [Rank 0] step:7801/10000 train_time:326003ms step_avg:41.79ms +[2025-09-11 06:49:51] [Rank 0] step:7821/10000 train_time:326707ms step_avg:41.77ms +[2025-09-11 06:49:51] [Rank 0] step:7821/10000 train_time:326707ms step_avg:41.77ms +[2025-09-11 06:49:52] [Rank 0] step:7841/10000 train_time:327411ms step_avg:41.76ms +[2025-09-11 06:49:52] [Rank 0] step:7841/10000 train_time:327411ms step_avg:41.76ms +[2025-09-11 06:49:53] [Rank 0] step:7861/10000 train_time:328118ms step_avg:41.74ms +[2025-09-11 06:49:53] [Rank 0] step:7861/10000 train_time:328118ms step_avg:41.74ms +[2025-09-11 06:49:53] [Rank 0] step:7881/10000 train_time:328819ms step_avg:41.72ms +[2025-09-11 06:49:53] [Rank 0] step:7881/10000 train_time:328819ms step_avg:41.72ms +[2025-09-11 06:49:54] [Rank 0] step:7901/10000 train_time:329522ms step_avg:41.71ms +[2025-09-11 06:49:54] [Rank 0] step:7901/10000 train_time:329522ms step_avg:41.71ms +[2025-09-11 06:49:55] [Rank 0] step:7921/10000 train_time:330225ms step_avg:41.69ms +[2025-09-11 06:49:55] [Rank 0] step:7921/10000 train_time:330225ms step_avg:41.69ms +[2025-09-11 06:49:55] [Rank 0] step:7941/10000 train_time:330930ms step_avg:41.67ms +[2025-09-11 06:49:55] [Rank 0] step:7941/10000 train_time:330930ms step_avg:41.67ms +[2025-09-11 06:49:56] [Rank 0] step:7961/10000 train_time:331631ms step_avg:41.66ms +[2025-09-11 06:49:56] [Rank 0] step:7961/10000 train_time:331631ms step_avg:41.66ms +[2025-09-11 06:49:57] [Rank 0] step:7981/10000 train_time:332335ms step_avg:41.64ms +[2025-09-11 06:49:57] [Rank 0] step:7981/10000 train_time:332335ms step_avg:41.64ms +[2025-09-11 06:49:58] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:49:58] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:49:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:50:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:50:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:50:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:50:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:50:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:08] [Rank 0] PRINT: step:8000/10000 val_loss:4.1690 total_sharp:3.2221e-05 L1_sharp:3.8115e-04 L2_sharp:2.8510e-05 L3_sharp:-7.0253e-06 L4_sharp:4.3098e-05 L5_sharp:9.7408e-05 L6_sharp:1.1518e-04 L7_sharp:7.9867e-05 L8_sharp:1.2797e-04 L9_sharp:1.5965e-04 L10_sharp:1.6349e-04 L11_sharp:2.8924e-04 L12_sharp:5.3905e-03 total_fnorm:1.0100e+02 total_l1_linf:2.0685e+05 total_spectral:5.0750e+01 L1_fnorm:2.9375e+00 L2_fnorm:2.6719e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.9688e+00 L6_fnorm:2.9844e+00 L7_fnorm:2.9844e+00 L8_fnorm:2.8750e+00 L9_fnorm:2.9531e+00 L10_fnorm:2.9531e+00 L11_fnorm:2.9219e+00 L12_fnorm:2.9531e+00 L1_l1linf:6.0547e-01 L2_l1linf:5.5859e-01 L3_l1linf:6.3672e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.0156e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2891e-01 L8_l1linf:5.9766e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.8203e-01 L11_l1linf:5.5469e-01 L12_l1linf:5.6641e-01 L1_spectral:4.1752e-02 L2_spectral:4.1283e-02 L3_spectral:4.1167e-02 L4_spectral:4.0729e-02 L5_spectral:4.0837e-02 L6_spectral:4.1225e-02 L7_spectral:4.1390e-02 L8_spectral:4.0348e-02 L9_spectral:4.1659e-02 L10_spectral:4.1957e-02 L11_spectral:4.1936e-02 L12_spectral:4.2011e-02 train_time:333017ms step_avg:41.63ms +[2025-09-11 06:50:08] [Rank 0] PRINT: step:8000/10000 val_loss:4.1690 total_sharp:3.2221e-05 L1_sharp:3.8115e-04 L2_sharp:2.8510e-05 L3_sharp:-7.0253e-06 L4_sharp:4.3098e-05 L5_sharp:9.7408e-05 L6_sharp:1.1518e-04 L7_sharp:7.9867e-05 L8_sharp:1.2797e-04 L9_sharp:1.5965e-04 L10_sharp:1.6349e-04 L11_sharp:2.8924e-04 L12_sharp:5.3905e-03 total_fnorm:1.0100e+02 total_l1_linf:2.0685e+05 total_spectral:5.0750e+01 L1_fnorm:2.9375e+00 L2_fnorm:2.6719e+00 L3_fnorm:2.4688e+00 L4_fnorm:2.9219e+00 L5_fnorm:2.9688e+00 L6_fnorm:2.9844e+00 L7_fnorm:2.9844e+00 L8_fnorm:2.8750e+00 L9_fnorm:2.9531e+00 L10_fnorm:2.9531e+00 L11_fnorm:2.9219e+00 L12_fnorm:2.9531e+00 L1_l1linf:6.0547e-01 L2_l1linf:5.5859e-01 L3_l1linf:6.3672e-01 L4_l1linf:5.9766e-01 L5_l1linf:6.0156e-01 L6_l1linf:6.2500e-01 L7_l1linf:6.2891e-01 L8_l1linf:5.9766e-01 L9_l1linf:5.8984e-01 L10_l1linf:5.8203e-01 L11_l1linf:5.5469e-01 L12_l1linf:5.6641e-01 L1_spectral:4.1752e-02 L2_spectral:4.1283e-02 L3_spectral:4.1167e-02 L4_spectral:4.0729e-02 L5_spectral:4.0837e-02 L6_spectral:4.1225e-02 L7_spectral:4.1390e-02 L8_spectral:4.0348e-02 L9_spectral:4.1659e-02 L10_spectral:4.1957e-02 L11_spectral:4.1936e-02 L12_spectral:4.2011e-02 train_time:333017ms step_avg:41.63ms +[2025-09-11 06:50:09] [Rank 0] step:8001/10000 train_time:334191ms step_avg:41.77ms +[2025-09-11 06:50:09] [Rank 0] step:8001/10000 train_time:334191ms step_avg:41.77ms +[2025-09-11 06:50:10] [Rank 0] step:8021/10000 train_time:334911ms step_avg:41.75ms +[2025-09-11 06:50:10] [Rank 0] step:8021/10000 train_time:334911ms step_avg:41.75ms +[2025-09-11 06:50:10] [Rank 0] step:8041/10000 train_time:335615ms step_avg:41.74ms +[2025-09-11 06:50:10] [Rank 0] step:8041/10000 train_time:335615ms step_avg:41.74ms +[2025-09-11 06:50:11] [Rank 0] step:8061/10000 train_time:336332ms step_avg:41.72ms +[2025-09-11 06:50:11] [Rank 0] step:8061/10000 train_time:336332ms step_avg:41.72ms +[2025-09-11 06:50:12] [Rank 0] step:8081/10000 train_time:337032ms step_avg:41.71ms +[2025-09-11 06:50:12] [Rank 0] step:8081/10000 train_time:337032ms step_avg:41.71ms +[2025-09-11 06:50:12] [Rank 0] step:8101/10000 train_time:337734ms step_avg:41.69ms +[2025-09-11 06:50:12] [Rank 0] step:8101/10000 train_time:337734ms step_avg:41.69ms +[2025-09-11 06:50:13] [Rank 0] step:8121/10000 train_time:338440ms step_avg:41.67ms +[2025-09-11 06:50:13] [Rank 0] step:8121/10000 train_time:338440ms step_avg:41.67ms +[2025-09-11 06:50:15] [Rank 0] step:8141/10000 train_time:339870ms step_avg:41.75ms +[2025-09-11 06:50:15] [Rank 0] step:8141/10000 train_time:339870ms step_avg:41.75ms +[2025-09-11 06:50:15] [Rank 0] step:8161/10000 train_time:340576ms step_avg:41.73ms +[2025-09-11 06:50:15] [Rank 0] step:8161/10000 train_time:340576ms step_avg:41.73ms +[2025-09-11 06:50:16] [Rank 0] step:8181/10000 train_time:341290ms step_avg:41.72ms +[2025-09-11 06:50:16] [Rank 0] step:8181/10000 train_time:341290ms step_avg:41.72ms +[2025-09-11 06:50:17] [Rank 0] step:8201/10000 train_time:342000ms step_avg:41.70ms +[2025-09-11 06:50:17] [Rank 0] step:8201/10000 train_time:342000ms step_avg:41.70ms +[2025-09-11 06:50:17] [Rank 0] step:8221/10000 train_time:342710ms step_avg:41.69ms +[2025-09-11 06:50:17] [Rank 0] step:8221/10000 train_time:342710ms step_avg:41.69ms +[2025-09-11 06:50:18] [Rank 0] step:8241/10000 train_time:343429ms step_avg:41.67ms +[2025-09-11 06:50:18] [Rank 0] step:8241/10000 train_time:343429ms step_avg:41.67ms +[2025-09-11 06:50:19] [Rank 0] step:8261/10000 train_time:344136ms step_avg:41.66ms +[2025-09-11 06:50:19] [Rank 0] step:8261/10000 train_time:344136ms step_avg:41.66ms +[2025-09-11 06:50:20] [Rank 0] step:8281/10000 train_time:344843ms step_avg:41.64ms +[2025-09-11 06:50:20] [Rank 0] step:8281/10000 train_time:344843ms step_avg:41.64ms +[2025-09-11 06:50:20] [Rank 0] step:8301/10000 train_time:345551ms step_avg:41.63ms +[2025-09-11 06:50:20] [Rank 0] step:8301/10000 train_time:345551ms step_avg:41.63ms +[2025-09-11 06:50:21] [Rank 0] step:8321/10000 train_time:346260ms step_avg:41.61ms +[2025-09-11 06:50:21] [Rank 0] step:8321/10000 train_time:346260ms step_avg:41.61ms +[2025-09-11 06:50:22] [Rank 0] step:8341/10000 train_time:346976ms step_avg:41.60ms +[2025-09-11 06:50:22] [Rank 0] step:8341/10000 train_time:346976ms step_avg:41.60ms +[2025-09-11 06:50:22] [Rank 0] step:8361/10000 train_time:347681ms step_avg:41.58ms +[2025-09-11 06:50:22] [Rank 0] step:8361/10000 train_time:347681ms step_avg:41.58ms +[2025-09-11 06:50:23] [Rank 0] step:8381/10000 train_time:348393ms step_avg:41.57ms +[2025-09-11 06:50:23] [Rank 0] step:8381/10000 train_time:348393ms step_avg:41.57ms +[2025-09-11 06:50:24] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:50:24] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:50:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:50:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:50:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:50:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:50:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:50:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.1380 total_sharp:2.2184e-05 L1_sharp:2.9527e-04 L2_sharp:4.5925e-05 L3_sharp:4.3463e-06 L4_sharp:7.8472e-05 L5_sharp:8.6864e-05 L6_sharp:4.0595e-05 L7_sharp:6.6474e-05 L8_sharp:1.0057e-04 L9_sharp:1.2329e-04 L10_sharp:1.3826e-04 L11_sharp:2.3198e-04 L12_sharp:1.8093e-03 total_fnorm:8.1500e+01 total_l1_linf:1.5258e+05 total_spectral:4.1000e+01 L1_fnorm:2.3281e+00 L2_fnorm:2.1094e+00 L3_fnorm:1.9297e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.3125e+00 L6_fnorm:2.3281e+00 L7_fnorm:2.3281e+00 L8_fnorm:2.2500e+00 L9_fnorm:2.2969e+00 L10_fnorm:2.2969e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2969e+00 L1_l1linf:4.3555e-01 L2_l1linf:4.7461e-01 L3_l1linf:5.2344e-01 L4_l1linf:4.2773e-01 L5_l1linf:4.6484e-01 L6_l1linf:4.4531e-01 L7_l1linf:4.5312e-01 L8_l1linf:4.4922e-01 L9_l1linf:4.2578e-01 L10_l1linf:4.2969e-01 L11_l1linf:4.0430e-01 L12_l1linf:4.3945e-01 L1_spectral:3.3256e-02 L2_spectral:3.4625e-02 L3_spectral:3.2815e-02 L4_spectral:3.2431e-02 L5_spectral:3.2301e-02 L6_spectral:3.2643e-02 L7_spectral:3.2928e-02 L8_spectral:3.2631e-02 L9_spectral:3.3091e-02 L10_spectral:3.3384e-02 L11_spectral:3.3843e-02 L12_spectral:3.3776e-02 train_time:349085ms step_avg:41.56ms +[2025-09-11 06:50:34] [Rank 0] PRINT: step:8400/10000 val_loss:4.1380 total_sharp:2.2184e-05 L1_sharp:2.9527e-04 L2_sharp:4.5925e-05 L3_sharp:4.3463e-06 L4_sharp:7.8472e-05 L5_sharp:8.6864e-05 L6_sharp:4.0595e-05 L7_sharp:6.6474e-05 L8_sharp:1.0057e-04 L9_sharp:1.2329e-04 L10_sharp:1.3826e-04 L11_sharp:2.3198e-04 L12_sharp:1.8093e-03 total_fnorm:8.1500e+01 total_l1_linf:1.5258e+05 total_spectral:4.1000e+01 L1_fnorm:2.3281e+00 L2_fnorm:2.1094e+00 L3_fnorm:1.9297e+00 L4_fnorm:2.2969e+00 L5_fnorm:2.3125e+00 L6_fnorm:2.3281e+00 L7_fnorm:2.3281e+00 L8_fnorm:2.2500e+00 L9_fnorm:2.2969e+00 L10_fnorm:2.2969e+00 L11_fnorm:2.2656e+00 L12_fnorm:2.2969e+00 L1_l1linf:4.3555e-01 L2_l1linf:4.7461e-01 L3_l1linf:5.2344e-01 L4_l1linf:4.2773e-01 L5_l1linf:4.6484e-01 L6_l1linf:4.4531e-01 L7_l1linf:4.5312e-01 L8_l1linf:4.4922e-01 L9_l1linf:4.2578e-01 L10_l1linf:4.2969e-01 L11_l1linf:4.0430e-01 L12_l1linf:4.3945e-01 L1_spectral:3.3256e-02 L2_spectral:3.4625e-02 L3_spectral:3.2815e-02 L4_spectral:3.2431e-02 L5_spectral:3.2301e-02 L6_spectral:3.2643e-02 L7_spectral:3.2928e-02 L8_spectral:3.2631e-02 L9_spectral:3.3091e-02 L10_spectral:3.3384e-02 L11_spectral:3.3843e-02 L12_spectral:3.3776e-02 train_time:349085ms step_avg:41.56ms +[2025-09-11 06:50:35] [Rank 0] step:8401/10000 train_time:350254ms step_avg:41.69ms +[2025-09-11 06:50:35] [Rank 0] step:8401/10000 train_time:350254ms step_avg:41.69ms +[2025-09-11 06:50:36] [Rank 0] step:8421/10000 train_time:350987ms step_avg:41.68ms +[2025-09-11 06:50:36] [Rank 0] step:8421/10000 train_time:350987ms step_avg:41.68ms +[2025-09-11 06:50:37] [Rank 0] step:8441/10000 train_time:351701ms step_avg:41.67ms +[2025-09-11 06:50:37] [Rank 0] step:8441/10000 train_time:351701ms step_avg:41.67ms +[2025-09-11 06:50:37] [Rank 0] step:8461/10000 train_time:352418ms step_avg:41.65ms +[2025-09-11 06:50:37] [Rank 0] step:8461/10000 train_time:352418ms step_avg:41.65ms +[2025-09-11 06:50:38] [Rank 0] step:8481/10000 train_time:353130ms step_avg:41.64ms +[2025-09-11 06:50:38] [Rank 0] step:8481/10000 train_time:353130ms step_avg:41.64ms +[2025-09-11 06:50:39] [Rank 0] step:8501/10000 train_time:353840ms step_avg:41.62ms +[2025-09-11 06:50:39] [Rank 0] step:8501/10000 train_time:353840ms step_avg:41.62ms +[2025-09-11 06:50:39] [Rank 0] step:8521/10000 train_time:354552ms step_avg:41.61ms +[2025-09-11 06:50:39] [Rank 0] step:8521/10000 train_time:354552ms step_avg:41.61ms +[2025-09-11 06:50:40] [Rank 0] step:8541/10000 train_time:355261ms step_avg:41.59ms +[2025-09-11 06:50:40] [Rank 0] step:8541/10000 train_time:355261ms step_avg:41.59ms +[2025-09-11 06:50:41] [Rank 0] step:8561/10000 train_time:355977ms step_avg:41.58ms +[2025-09-11 06:50:41] [Rank 0] step:8561/10000 train_time:355977ms step_avg:41.58ms +[2025-09-11 06:50:42] [Rank 0] step:8581/10000 train_time:356689ms step_avg:41.57ms +[2025-09-11 06:50:42] [Rank 0] step:8581/10000 train_time:356689ms step_avg:41.57ms +[2025-09-11 06:50:42] [Rank 0] step:8601/10000 train_time:357401ms step_avg:41.55ms +[2025-09-11 06:50:42] [Rank 0] step:8601/10000 train_time:357401ms step_avg:41.55ms +[2025-09-11 06:50:43] [Rank 0] step:8621/10000 train_time:358110ms step_avg:41.54ms +[2025-09-11 06:50:43] [Rank 0] step:8621/10000 train_time:358110ms step_avg:41.54ms +[2025-09-11 06:50:44] [Rank 0] step:8641/10000 train_time:358818ms step_avg:41.53ms +[2025-09-11 06:50:44] [Rank 0] step:8641/10000 train_time:358818ms step_avg:41.53ms +[2025-09-11 06:50:44] [Rank 0] step:8661/10000 train_time:359529ms step_avg:41.51ms +[2025-09-11 06:50:44] [Rank 0] step:8661/10000 train_time:359529ms step_avg:41.51ms +[2025-09-11 06:50:45] [Rank 0] step:8681/10000 train_time:360240ms step_avg:41.50ms +[2025-09-11 06:50:45] [Rank 0] step:8681/10000 train_time:360240ms step_avg:41.50ms +[2025-09-11 06:50:46] [Rank 0] step:8701/10000 train_time:360948ms step_avg:41.48ms +[2025-09-11 06:50:46] [Rank 0] step:8701/10000 train_time:360948ms step_avg:41.48ms +[2025-09-11 06:50:46] [Rank 0] step:8721/10000 train_time:361660ms step_avg:41.47ms +[2025-09-11 06:50:46] [Rank 0] step:8721/10000 train_time:361660ms step_avg:41.47ms +[2025-09-11 06:50:47] [Rank 0] step:8741/10000 train_time:362366ms step_avg:41.46ms +[2025-09-11 06:50:47] [Rank 0] step:8741/10000 train_time:362366ms step_avg:41.46ms +[2025-09-11 06:50:48] [Rank 0] step:8761/10000 train_time:363079ms step_avg:41.44ms +[2025-09-11 06:50:48] [Rank 0] step:8761/10000 train_time:363079ms step_avg:41.44ms +[2025-09-11 06:50:49] [Rank 0] step:8781/10000 train_time:363787ms step_avg:41.43ms +[2025-09-11 06:50:49] [Rank 0] step:8781/10000 train_time:363787ms step_avg:41.43ms +[2025-09-11 06:50:49] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:50:49] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:50:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:50:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:50:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:50:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:50:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:50:59] [Rank 0] PRINT: step:8800/10000 val_loss:4.1225 total_sharp:2.3027e-05 L1_sharp:2.9976e-04 L2_sharp:5.4545e-05 L3_sharp:3.8960e-06 L4_sharp:3.9769e-06 L5_sharp:5.5016e-05 L6_sharp:3.7426e-05 L7_sharp:4.8326e-05 L8_sharp:9.2746e-05 L9_sharp:1.1704e-04 L10_sharp:1.2630e-04 L11_sharp:1.8915e-04 L12_sharp:1.5858e-03 total_fnorm:6.1500e+01 total_l1_linf:1.0650e+05 total_spectral:3.0875e+01 L1_fnorm:1.7266e+00 L2_fnorm:1.5938e+00 L3_fnorm:1.4219e+00 L4_fnorm:1.6875e+00 L5_fnorm:1.6875e+00 L6_fnorm:1.7031e+00 L7_fnorm:1.7031e+00 L8_fnorm:1.6406e+00 L9_fnorm:1.6719e+00 L10_fnorm:1.6641e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6641e+00 L1_l1linf:2.8906e-01 L2_l1linf:3.5156e-01 L3_l1linf:4.2188e-01 L4_l1linf:2.9883e-01 L5_l1linf:3.1250e-01 L6_l1linf:2.9883e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.8320e-01 L10_l1linf:2.8125e-01 L11_l1linf:2.6367e-01 L12_l1linf:2.8125e-01 L1_spectral:2.5081e-02 L2_spectral:2.7005e-02 L3_spectral:2.4992e-02 L4_spectral:2.4010e-02 L5_spectral:2.3869e-02 L6_spectral:2.4192e-02 L7_spectral:2.4336e-02 L8_spectral:2.4569e-02 L9_spectral:2.4629e-02 L10_spectral:2.4721e-02 L11_spectral:2.4936e-02 L12_spectral:2.5037e-02 train_time:364475ms step_avg:41.42ms +[2025-09-11 06:50:59] [Rank 0] PRINT: step:8800/10000 val_loss:4.1225 total_sharp:2.3027e-05 L1_sharp:2.9976e-04 L2_sharp:5.4545e-05 L3_sharp:3.8960e-06 L4_sharp:3.9769e-06 L5_sharp:5.5016e-05 L6_sharp:3.7426e-05 L7_sharp:4.8326e-05 L8_sharp:9.2746e-05 L9_sharp:1.1704e-04 L10_sharp:1.2630e-04 L11_sharp:1.8915e-04 L12_sharp:1.5858e-03 total_fnorm:6.1500e+01 total_l1_linf:1.0650e+05 total_spectral:3.0875e+01 L1_fnorm:1.7266e+00 L2_fnorm:1.5938e+00 L3_fnorm:1.4219e+00 L4_fnorm:1.6875e+00 L5_fnorm:1.6875e+00 L6_fnorm:1.7031e+00 L7_fnorm:1.7031e+00 L8_fnorm:1.6406e+00 L9_fnorm:1.6719e+00 L10_fnorm:1.6641e+00 L11_fnorm:1.6406e+00 L12_fnorm:1.6641e+00 L1_l1linf:2.8906e-01 L2_l1linf:3.5156e-01 L3_l1linf:4.2188e-01 L4_l1linf:2.9883e-01 L5_l1linf:3.1250e-01 L6_l1linf:2.9883e-01 L7_l1linf:3.0859e-01 L8_l1linf:2.9883e-01 L9_l1linf:2.8320e-01 L10_l1linf:2.8125e-01 L11_l1linf:2.6367e-01 L12_l1linf:2.8125e-01 L1_spectral:2.5081e-02 L2_spectral:2.7005e-02 L3_spectral:2.4992e-02 L4_spectral:2.4010e-02 L5_spectral:2.3869e-02 L6_spectral:2.4192e-02 L7_spectral:2.4336e-02 L8_spectral:2.4569e-02 L9_spectral:2.4629e-02 L10_spectral:2.4721e-02 L11_spectral:2.4936e-02 L12_spectral:2.5037e-02 train_time:364475ms step_avg:41.42ms +[2025-09-11 06:51:01] [Rank 0] step:8801/10000 train_time:365707ms step_avg:41.55ms +[2025-09-11 06:51:01] [Rank 0] step:8801/10000 train_time:365707ms step_avg:41.55ms +[2025-09-11 06:51:01] [Rank 0] step:8821/10000 train_time:366453ms step_avg:41.54ms +[2025-09-11 06:51:01] [Rank 0] step:8821/10000 train_time:366453ms step_avg:41.54ms +[2025-09-11 06:51:02] [Rank 0] step:8841/10000 train_time:367165ms step_avg:41.53ms +[2025-09-11 06:51:02] [Rank 0] step:8841/10000 train_time:367165ms step_avg:41.53ms +[2025-09-11 06:51:03] [Rank 0] step:8861/10000 train_time:367876ms step_avg:41.52ms +[2025-09-11 06:51:03] [Rank 0] step:8861/10000 train_time:367876ms step_avg:41.52ms +[2025-09-11 06:51:03] [Rank 0] step:8881/10000 train_time:368586ms step_avg:41.50ms +[2025-09-11 06:51:03] [Rank 0] step:8881/10000 train_time:368586ms step_avg:41.50ms +[2025-09-11 06:51:04] [Rank 0] step:8901/10000 train_time:369299ms step_avg:41.49ms +[2025-09-11 06:51:04] [Rank 0] step:8901/10000 train_time:369299ms step_avg:41.49ms +[2025-09-11 06:51:05] [Rank 0] step:8921/10000 train_time:370007ms step_avg:41.48ms +[2025-09-11 06:51:05] [Rank 0] step:8921/10000 train_time:370007ms step_avg:41.48ms +[2025-09-11 06:51:06] [Rank 0] step:8941/10000 train_time:370720ms step_avg:41.46ms +[2025-09-11 06:51:06] [Rank 0] step:8941/10000 train_time:370720ms step_avg:41.46ms +[2025-09-11 06:51:06] [Rank 0] step:8961/10000 train_time:371439ms step_avg:41.45ms +[2025-09-11 06:51:06] [Rank 0] step:8961/10000 train_time:371439ms step_avg:41.45ms +[2025-09-11 06:51:07] [Rank 0] step:8981/10000 train_time:372154ms step_avg:41.44ms +[2025-09-11 06:51:07] [Rank 0] step:8981/10000 train_time:372154ms step_avg:41.44ms +[2025-09-11 06:51:08] [Rank 0] step:9001/10000 train_time:372858ms step_avg:41.42ms +[2025-09-11 06:51:08] [Rank 0] step:9001/10000 train_time:372858ms step_avg:41.42ms +[2025-09-11 06:51:08] [Rank 0] step:9021/10000 train_time:373569ms step_avg:41.41ms +[2025-09-11 06:51:08] [Rank 0] step:9021/10000 train_time:373569ms step_avg:41.41ms +[2025-09-11 06:51:10] [Rank 0] step:9041/10000 train_time:374788ms step_avg:41.45ms +[2025-09-11 06:51:10] [Rank 0] step:9041/10000 train_time:374788ms step_avg:41.45ms +[2025-09-11 06:51:10] [Rank 0] step:9061/10000 train_time:375497ms step_avg:41.44ms +[2025-09-11 06:51:10] [Rank 0] step:9061/10000 train_time:375497ms step_avg:41.44ms +[2025-09-11 06:51:11] [Rank 0] step:9081/10000 train_time:376319ms step_avg:41.44ms +[2025-09-11 06:51:11] [Rank 0] step:9081/10000 train_time:376319ms step_avg:41.44ms +[2025-09-11 06:51:12] [Rank 0] step:9101/10000 train_time:377187ms step_avg:41.44ms +[2025-09-11 06:51:12] [Rank 0] step:9101/10000 train_time:377187ms step_avg:41.44ms +[2025-09-11 06:51:13] [Rank 0] step:9121/10000 train_time:377902ms step_avg:41.43ms +[2025-09-11 06:51:13] [Rank 0] step:9121/10000 train_time:377902ms step_avg:41.43ms +[2025-09-11 06:51:13] [Rank 0] step:9141/10000 train_time:378611ms step_avg:41.42ms +[2025-09-11 06:51:13] [Rank 0] step:9141/10000 train_time:378611ms step_avg:41.42ms +[2025-09-11 06:51:14] [Rank 0] step:9161/10000 train_time:379325ms step_avg:41.41ms +[2025-09-11 06:51:14] [Rank 0] step:9161/10000 train_time:379325ms step_avg:41.41ms +[2025-09-11 06:51:15] [Rank 0] step:9181/10000 train_time:380037ms step_avg:41.39ms +[2025-09-11 06:51:15] [Rank 0] step:9181/10000 train_time:380037ms step_avg:41.39ms +[2025-09-11 06:51:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:51:16] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:51:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:51:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:51:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:51:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:51:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:51:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:51:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:51:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:51:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.0985 total_sharp:1.9769e-05 L1_sharp:3.7663e-04 L2_sharp:7.1264e-06 L3_sharp:4.6128e-05 L4_sharp:2.8585e-05 L5_sharp:5.1378e-05 L6_sharp:5.4039e-05 L7_sharp:5.6692e-05 L8_sharp:9.7715e-05 L9_sharp:9.3054e-05 L10_sharp:1.2506e-04 L11_sharp:2.4205e-04 L12_sharp:1.3850e-03 total_fnorm:4.5500e+01 total_l1_linf:6.9120e+04 total_spectral:2.2875e+01 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:9.5703e-01 L4_fnorm:1.1250e+00 L5_fnorm:1.1328e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1484e+00 L8_fnorm:1.1016e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1172e+00 L11_fnorm:1.1094e+00 L12_fnorm:1.1328e+00 L1_l1linf:1.7871e-01 L2_l1linf:2.7734e-01 L3_l1linf:3.0078e-01 L4_l1linf:1.7969e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.7578e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.7871e-01 L9_l1linf:1.7578e-01 L10_l1linf:1.6797e-01 L11_l1linf:1.6016e-01 L12_l1linf:1.7188e-01 L1_spectral:1.7244e-02 L2_spectral:1.9728e-02 L3_spectral:1.8494e-02 L4_spectral:1.6254e-02 L5_spectral:1.6411e-02 L6_spectral:1.6454e-02 L7_spectral:1.6597e-02 L8_spectral:1.7399e-02 L9_spectral:1.6929e-02 L10_spectral:1.7052e-02 L11_spectral:1.7195e-02 L12_spectral:1.7334e-02 train_time:380732ms step_avg:41.38ms +[2025-09-11 06:51:26] [Rank 0] PRINT: step:9200/10000 val_loss:4.0985 total_sharp:1.9769e-05 L1_sharp:3.7663e-04 L2_sharp:7.1264e-06 L3_sharp:4.6128e-05 L4_sharp:2.8585e-05 L5_sharp:5.1378e-05 L6_sharp:5.4039e-05 L7_sharp:5.6692e-05 L8_sharp:9.7715e-05 L9_sharp:9.3054e-05 L10_sharp:1.2506e-04 L11_sharp:2.4205e-04 L12_sharp:1.3850e-03 total_fnorm:4.5500e+01 total_l1_linf:6.9120e+04 total_spectral:2.2875e+01 L1_fnorm:1.1562e+00 L2_fnorm:1.1172e+00 L3_fnorm:9.5703e-01 L4_fnorm:1.1250e+00 L5_fnorm:1.1328e+00 L6_fnorm:1.1406e+00 L7_fnorm:1.1484e+00 L8_fnorm:1.1016e+00 L9_fnorm:1.1250e+00 L10_fnorm:1.1172e+00 L11_fnorm:1.1094e+00 L12_fnorm:1.1328e+00 L1_l1linf:1.7871e-01 L2_l1linf:2.7734e-01 L3_l1linf:3.0078e-01 L4_l1linf:1.7969e-01 L5_l1linf:1.7969e-01 L6_l1linf:1.7578e-01 L7_l1linf:1.8359e-01 L8_l1linf:1.7871e-01 L9_l1linf:1.7578e-01 L10_l1linf:1.6797e-01 L11_l1linf:1.6016e-01 L12_l1linf:1.7188e-01 L1_spectral:1.7244e-02 L2_spectral:1.9728e-02 L3_spectral:1.8494e-02 L4_spectral:1.6254e-02 L5_spectral:1.6411e-02 L6_spectral:1.6454e-02 L7_spectral:1.6597e-02 L8_spectral:1.7399e-02 L9_spectral:1.6929e-02 L10_spectral:1.7052e-02 L11_spectral:1.7195e-02 L12_spectral:1.7334e-02 train_time:380732ms step_avg:41.38ms +[2025-09-11 06:51:27] [Rank 0] step:9201/10000 train_time:381951ms step_avg:41.51ms +[2025-09-11 06:51:27] [Rank 0] step:9201/10000 train_time:381951ms step_avg:41.51ms +[2025-09-11 06:51:28] [Rank 0] step:9221/10000 train_time:382676ms step_avg:41.50ms +[2025-09-11 06:51:28] [Rank 0] step:9221/10000 train_time:382676ms step_avg:41.50ms +[2025-09-11 06:51:28] [Rank 0] step:9241/10000 train_time:383387ms step_avg:41.49ms +[2025-09-11 06:51:28] [Rank 0] step:9241/10000 train_time:383387ms step_avg:41.49ms +[2025-09-11 06:51:29] [Rank 0] step:9261/10000 train_time:384101ms step_avg:41.48ms +[2025-09-11 06:51:29] [Rank 0] step:9261/10000 train_time:384101ms step_avg:41.48ms +[2025-09-11 06:51:30] [Rank 0] step:9281/10000 train_time:384815ms step_avg:41.46ms +[2025-09-11 06:51:30] [Rank 0] step:9281/10000 train_time:384815ms step_avg:41.46ms +[2025-09-11 06:51:30] [Rank 0] step:9301/10000 train_time:385525ms step_avg:41.45ms +[2025-09-11 06:51:30] [Rank 0] step:9301/10000 train_time:385525ms step_avg:41.45ms +[2025-09-11 06:51:31] [Rank 0] step:9321/10000 train_time:386240ms step_avg:41.44ms +[2025-09-11 06:51:31] [Rank 0] step:9321/10000 train_time:386240ms step_avg:41.44ms +[2025-09-11 06:51:32] [Rank 0] step:9341/10000 train_time:386948ms step_avg:41.42ms +[2025-09-11 06:51:32] [Rank 0] step:9341/10000 train_time:386948ms step_avg:41.42ms +[2025-09-11 06:51:33] [Rank 0] step:9361/10000 train_time:387655ms step_avg:41.41ms +[2025-09-11 06:51:33] [Rank 0] step:9361/10000 train_time:387655ms step_avg:41.41ms +[2025-09-11 06:51:33] [Rank 0] step:9381/10000 train_time:388364ms step_avg:41.40ms +[2025-09-11 06:51:33] [Rank 0] step:9381/10000 train_time:388364ms step_avg:41.40ms +[2025-09-11 06:51:34] [Rank 0] step:9401/10000 train_time:389077ms step_avg:41.39ms +[2025-09-11 06:51:34] [Rank 0] step:9401/10000 train_time:389077ms step_avg:41.39ms +[2025-09-11 06:51:35] [Rank 0] step:9421/10000 train_time:389791ms step_avg:41.37ms +[2025-09-11 06:51:35] [Rank 0] step:9421/10000 train_time:389791ms step_avg:41.37ms +[2025-09-11 06:51:35] [Rank 0] step:9441/10000 train_time:390506ms step_avg:41.36ms +[2025-09-11 06:51:35] [Rank 0] step:9441/10000 train_time:390506ms step_avg:41.36ms +[2025-09-11 06:51:36] [Rank 0] step:9461/10000 train_time:391217ms step_avg:41.35ms +[2025-09-11 06:51:36] [Rank 0] step:9461/10000 train_time:391217ms step_avg:41.35ms +[2025-09-11 06:51:37] [Rank 0] step:9481/10000 train_time:391931ms step_avg:41.34ms +[2025-09-11 06:51:37] [Rank 0] step:9481/10000 train_time:391931ms step_avg:41.34ms +[2025-09-11 06:51:38] [Rank 0] step:9501/10000 train_time:392642ms step_avg:41.33ms +[2025-09-11 06:51:38] [Rank 0] step:9501/10000 train_time:392642ms step_avg:41.33ms +[2025-09-11 06:51:38] [Rank 0] step:9521/10000 train_time:393356ms step_avg:41.31ms +[2025-09-11 06:51:38] [Rank 0] step:9521/10000 train_time:393356ms step_avg:41.31ms +[2025-09-11 06:51:39] [Rank 0] step:9541/10000 train_time:394066ms step_avg:41.30ms +[2025-09-11 06:51:39] [Rank 0] step:9541/10000 train_time:394066ms step_avg:41.30ms +[2025-09-11 06:51:40] [Rank 0] step:9561/10000 train_time:394777ms step_avg:41.29ms +[2025-09-11 06:51:40] [Rank 0] step:9561/10000 train_time:394777ms step_avg:41.29ms +[2025-09-11 06:51:40] [Rank 0] step:9581/10000 train_time:395490ms step_avg:41.28ms +[2025-09-11 06:51:40] [Rank 0] step:9581/10000 train_time:395490ms step_avg:41.28ms +[2025-09-11 06:51:41] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:51:41] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:51:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:51:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:51:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:51:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:51:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:51:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:51:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:51:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:51:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.0845 total_sharp:1.3122e-05 L1_sharp:2.6700e-04 L2_sharp:4.2839e-05 L3_sharp:9.9656e-06 L4_sharp:1.2890e-05 L5_sharp:3.8642e-05 L6_sharp:2.7517e-05 L7_sharp:2.8787e-05 L8_sharp:5.5587e-05 L9_sharp:7.1333e-05 L10_sharp:9.5884e-05 L11_sharp:1.5794e-04 L12_sharp:1.5220e-03 total_fnorm:2.7250e+01 total_l1_linf:3.5584e+04 total_spectral:1.3688e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.2500e-01 L3_fnorm:5.4297e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3672e-01 L6_fnorm:6.3672e-01 L7_fnorm:6.4453e-01 L8_fnorm:6.1328e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2500e-01 L11_fnorm:6.2109e-01 L12_fnorm:6.4453e-01 L1_l1linf:9.0332e-02 L2_l1linf:1.7969e-01 L3_l1linf:1.9824e-01 L4_l1linf:9.3262e-02 L5_l1linf:8.7402e-02 L6_l1linf:8.9844e-02 L7_l1linf:8.8867e-02 L8_l1linf:9.0332e-02 L9_l1linf:8.4473e-02 L10_l1linf:7.8125e-02 L11_l1linf:7.9590e-02 L12_l1linf:8.8379e-02 L1_spectral:9.9551e-03 L2_spectral:1.2618e-02 L3_spectral:1.2353e-02 L4_spectral:9.6441e-03 L5_spectral:9.2872e-03 L6_spectral:9.3840e-03 L7_spectral:9.5689e-03 L8_spectral:9.8934e-03 L9_spectral:9.8005e-03 L10_spectral:9.7275e-03 L11_spectral:9.9046e-03 L12_spectral:1.0133e-02 train_time:396179ms step_avg:41.27ms +[2025-09-11 06:51:51] [Rank 0] PRINT: step:9600/10000 val_loss:4.0845 total_sharp:1.3122e-05 L1_sharp:2.6700e-04 L2_sharp:4.2839e-05 L3_sharp:9.9656e-06 L4_sharp:1.2890e-05 L5_sharp:3.8642e-05 L6_sharp:2.7517e-05 L7_sharp:2.8787e-05 L8_sharp:5.5587e-05 L9_sharp:7.1333e-05 L10_sharp:9.5884e-05 L11_sharp:1.5794e-04 L12_sharp:1.5220e-03 total_fnorm:2.7250e+01 total_l1_linf:3.5584e+04 total_spectral:1.3688e+01 L1_fnorm:6.4062e-01 L2_fnorm:6.2500e-01 L3_fnorm:5.4297e-01 L4_fnorm:6.3281e-01 L5_fnorm:6.3672e-01 L6_fnorm:6.3672e-01 L7_fnorm:6.4453e-01 L8_fnorm:6.1328e-01 L9_fnorm:6.3281e-01 L10_fnorm:6.2500e-01 L11_fnorm:6.2109e-01 L12_fnorm:6.4453e-01 L1_l1linf:9.0332e-02 L2_l1linf:1.7969e-01 L3_l1linf:1.9824e-01 L4_l1linf:9.3262e-02 L5_l1linf:8.7402e-02 L6_l1linf:8.9844e-02 L7_l1linf:8.8867e-02 L8_l1linf:9.0332e-02 L9_l1linf:8.4473e-02 L10_l1linf:7.8125e-02 L11_l1linf:7.9590e-02 L12_l1linf:8.8379e-02 L1_spectral:9.9551e-03 L2_spectral:1.2618e-02 L3_spectral:1.2353e-02 L4_spectral:9.6441e-03 L5_spectral:9.2872e-03 L6_spectral:9.3840e-03 L7_spectral:9.5689e-03 L8_spectral:9.8934e-03 L9_spectral:9.8005e-03 L10_spectral:9.7275e-03 L11_spectral:9.9046e-03 L12_spectral:1.0133e-02 train_time:396179ms step_avg:41.27ms +[2025-09-11 06:51:52] [Rank 0] step:9601/10000 train_time:397415ms step_avg:41.39ms +[2025-09-11 06:51:52] [Rank 0] step:9601/10000 train_time:397415ms step_avg:41.39ms +[2025-09-11 06:51:53] [Rank 0] step:9621/10000 train_time:398144ms step_avg:41.38ms +[2025-09-11 06:51:53] [Rank 0] step:9621/10000 train_time:398144ms step_avg:41.38ms +[2025-09-11 06:51:54] [Rank 0] step:9641/10000 train_time:398861ms step_avg:41.37ms +[2025-09-11 06:51:54] [Rank 0] step:9641/10000 train_time:398861ms step_avg:41.37ms +[2025-09-11 06:51:55] [Rank 0] step:9661/10000 train_time:399586ms step_avg:41.36ms +[2025-09-11 06:51:55] [Rank 0] step:9661/10000 train_time:399586ms step_avg:41.36ms +[2025-09-11 06:51:55] [Rank 0] step:9681/10000 train_time:400302ms step_avg:41.35ms +[2025-09-11 06:51:55] [Rank 0] step:9681/10000 train_time:400302ms step_avg:41.35ms +[2025-09-11 06:51:56] [Rank 0] step:9701/10000 train_time:401021ms step_avg:41.34ms +[2025-09-11 06:51:56] [Rank 0] step:9701/10000 train_time:401021ms step_avg:41.34ms +[2025-09-11 06:51:57] [Rank 0] step:9721/10000 train_time:401743ms step_avg:41.33ms +[2025-09-11 06:51:57] [Rank 0] step:9721/10000 train_time:401743ms step_avg:41.33ms +[2025-09-11 06:51:57] [Rank 0] step:9741/10000 train_time:402463ms step_avg:41.32ms +[2025-09-11 06:51:57] [Rank 0] step:9741/10000 train_time:402463ms step_avg:41.32ms +[2025-09-11 06:51:58] [Rank 0] step:9761/10000 train_time:403181ms step_avg:41.31ms +[2025-09-11 06:51:58] [Rank 0] step:9761/10000 train_time:403181ms step_avg:41.31ms +[2025-09-11 06:51:59] [Rank 0] step:9781/10000 train_time:403899ms step_avg:41.29ms +[2025-09-11 06:51:59] [Rank 0] step:9781/10000 train_time:403899ms step_avg:41.29ms +[2025-09-11 06:52:00] [Rank 0] step:9801/10000 train_time:404622ms step_avg:41.28ms +[2025-09-11 06:52:00] [Rank 0] step:9801/10000 train_time:404622ms step_avg:41.28ms +[2025-09-11 06:52:00] [Rank 0] step:9821/10000 train_time:405343ms step_avg:41.27ms +[2025-09-11 06:52:00] [Rank 0] step:9821/10000 train_time:405343ms step_avg:41.27ms +[2025-09-11 06:52:01] [Rank 0] step:9841/10000 train_time:406066ms step_avg:41.26ms +[2025-09-11 06:52:01] [Rank 0] step:9841/10000 train_time:406066ms step_avg:41.26ms +[2025-09-11 06:52:02] [Rank 0] step:9861/10000 train_time:406786ms step_avg:41.25ms +[2025-09-11 06:52:02] [Rank 0] step:9861/10000 train_time:406786ms step_avg:41.25ms +[2025-09-11 06:52:02] [Rank 0] step:9881/10000 train_time:407505ms step_avg:41.24ms +[2025-09-11 06:52:02] [Rank 0] step:9881/10000 train_time:407505ms step_avg:41.24ms +[2025-09-11 06:52:03] [Rank 0] step:9901/10000 train_time:408222ms step_avg:41.23ms +[2025-09-11 06:52:03] [Rank 0] step:9901/10000 train_time:408222ms step_avg:41.23ms +[2025-09-11 06:52:04] [Rank 0] step:9921/10000 train_time:408940ms step_avg:41.22ms +[2025-09-11 06:52:04] [Rank 0] step:9921/10000 train_time:408940ms step_avg:41.22ms +[2025-09-11 06:52:05] [Rank 0] step:9941/10000 train_time:409663ms step_avg:41.21ms +[2025-09-11 06:52:05] [Rank 0] step:9941/10000 train_time:409663ms step_avg:41.21ms +[2025-09-11 06:52:05] [Rank 0] step:9961/10000 train_time:410387ms step_avg:41.20ms +[2025-09-11 06:52:05] [Rank 0] step:9961/10000 train_time:410387ms step_avg:41.20ms +[2025-09-11 06:52:06] [Rank 0] step:9981/10000 train_time:411106ms step_avg:41.19ms +[2025-09-11 06:52:06] [Rank 0] step:9981/10000 train_time:411106ms step_avg:41.19ms +[2025-09-11 06:52:07] [Rank 0] step:10000/10000 train_time:411796ms step_avg:41.18ms +[2025-09-11 06:52:07] [Rank 0] step:10000/10000 train_time:411796ms step_avg:41.18ms +[2025-09-11 06:52:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:52:07] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:52:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:52:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:52:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:52:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:52:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:52:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:52:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:52:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.0822 total_sharp:9.2699e-06 L1_sharp:1.5063e-04 L2_sharp:3.0305e-05 L3_sharp:2.8025e-05 L4_sharp:8.8888e-06 L5_sharp:1.1754e-05 L6_sharp:3.3103e-05 L7_sharp:2.9841e-05 L8_sharp:5.5389e-05 L9_sharp:6.7859e-05 L10_sharp:7.7306e-05 L11_sharp:1.4127e-04 L12_sharp:9.0390e-04 total_fnorm:1.0562e+01 total_l1_linf:9.9840e+03 total_spectral:5.3125e+00 L1_fnorm:2.5781e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.1875e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.4902e-01 L1_l1linf:2.5024e-02 L2_l1linf:8.4473e-02 L3_l1linf:8.8379e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.6489e-02 L6_l1linf:2.7954e-02 L7_l1linf:2.7466e-02 L8_l1linf:3.0518e-02 L9_l1linf:2.4658e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.4902e-02 L12_l1linf:2.6611e-02 L1_spectral:3.9390e-03 L2_spectral:5.7911e-03 L3_spectral:6.2325e-03 L4_spectral:3.7986e-03 L5_spectral:3.7409e-03 L6_spectral:3.7473e-03 L7_spectral:3.7679e-03 L8_spectral:4.1236e-03 L9_spectral:3.8664e-03 L10_spectral:3.8879e-03 L11_spectral:3.9503e-03 L12_spectral:4.0460e-03 train_time:411817ms step_avg:41.18ms +[2025-09-11 06:52:17] [Rank 0] PRINT: step:10000/10000 val_loss:4.0822 total_sharp:9.2699e-06 L1_sharp:1.5063e-04 L2_sharp:3.0305e-05 L3_sharp:2.8025e-05 L4_sharp:8.8888e-06 L5_sharp:1.1754e-05 L6_sharp:3.3103e-05 L7_sharp:2.9841e-05 L8_sharp:5.5389e-05 L9_sharp:6.7859e-05 L10_sharp:7.7306e-05 L11_sharp:1.4127e-04 L12_sharp:9.0390e-04 total_fnorm:1.0562e+01 total_l1_linf:9.9840e+03 total_spectral:5.3125e+00 L1_fnorm:2.5781e-01 L2_fnorm:2.4512e-01 L3_fnorm:2.1875e-01 L4_fnorm:2.4512e-01 L5_fnorm:2.4707e-01 L6_fnorm:2.4902e-01 L7_fnorm:2.5000e-01 L8_fnorm:2.4219e-01 L9_fnorm:2.4512e-01 L10_fnorm:2.4414e-01 L11_fnorm:2.4121e-01 L12_fnorm:2.4902e-01 L1_l1linf:2.5024e-02 L2_l1linf:8.4473e-02 L3_l1linf:8.8379e-02 L4_l1linf:2.9541e-02 L5_l1linf:2.6489e-02 L6_l1linf:2.7954e-02 L7_l1linf:2.7466e-02 L8_l1linf:3.0518e-02 L9_l1linf:2.4658e-02 L10_l1linf:2.4902e-02 L11_l1linf:2.4902e-02 L12_l1linf:2.6611e-02 L1_spectral:3.9390e-03 L2_spectral:5.7911e-03 L3_spectral:6.2325e-03 L4_spectral:3.7986e-03 L5_spectral:3.7409e-03 L6_spectral:3.7473e-03 L7_spectral:3.7679e-03 L8_spectral:4.1236e-03 L9_spectral:3.8664e-03 L10_spectral:3.8879e-03 L11_spectral:3.9503e-03 L12_spectral:4.0460e-03 train_time:411817ms step_avg:41.18ms +[2025-09-11 06:52:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:52:17 2025 --- +[2025-09-11 06:52:17] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:52:17 2025 --- +[2025-09-11 06:52:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 06:52:17] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/config.json b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e2176d4376855cb789e9d8cad442481f0a4866da --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/config.json @@ -0,0 +1,25 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.05, + "muon_lr": 0.1, + "base_dir": "logs_qkvo_grid_fix/mode_0" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.4, + "vocab_size": 50257, + "val_loss_every": 400, + "save_checkpoint": false + }, + "run_uuid_for_log": "bee91e7f-ed74-461c-b923-429840faefba", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/training_log_bee91e7f-ed74-461c-b923-429840faefba.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/training_log_bee91e7f-ed74-461c-b923-429840faefba.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f43fe589a8d110ba450f279b6eff6775c44c113 --- /dev/null +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43/training_log_bee91e7f-ed74-461c-b923-429840faefba.txt @@ -0,0 +1,4264 @@ +[2025-09-11 06:24:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:24:48 2025 --- +[2025-09-11 06:24:48] [Rank 0] PRINT: --- Script Start: Thu Sep 11 06:24:48 2025 --- +[2025-09-11 06:24:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:24:48] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.05, muon_lr=0.1, base_dir='logs_qkvo_grid_fix/mode_0') +[2025-09-11 06:24:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:24:48] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-11 06:24:48] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:24:48] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-11 06:24:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43 +[2025-09-11 06:24:48] [Rank 0] PRINT: Run directory: logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.1_seed_43 +[2025-09-11 06:24:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:24:48] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import gc +from torch.cuda.amp import autocast +from torch.func import functional_call +from torch.amp import autocast +import itertools +from itertools import cycle + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_sharpness/modded-nanogpt") # Already present +from optimizers.MUON_fix import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) +parser.add_argument("--adam_lr", type=float, default=0.001, help="Learning rate for Adam optimizer") +parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon optimizer") +parser.add_argument("--base_dir", type=str, default="logs_gated/diff_modes", help="Base directory for logs") +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + + from models.nano_GPT_qkvo_simp import GPT + +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #train_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/train/cfg3f_train_*.bin" + #val_files = "/home/wangshuche/MUON_theory/modded-nanogpt/data/cfg3f_dataset_imbalance/val/cfg3f_val_*.bin" + + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + + #val_tokens = 1966080 + val_tokens = 491520 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + train_seq_len = 3*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + val_seq_len = 4*4*1024 # FlexAttention sequence length for validation + + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.4 + # architecture + + vocab_size = 50257 + + # evaluation and logging + val_loss_every = 400 # Increased to reduce memory pressure from frequent sharpness calculations + save_checkpoint = False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +from pathlib import Path + +def check_and_delete_txt_files(folder_path): + """ + Check all .txt files in a folder for the string "10000/10000". + If none contain this string, delete all .txt files in the folder. + + Args: + folder_path (str): Path to the folder to check + """ + folder = Path(folder_path) + + # Check if folder exists + if not folder.exists(): + print(f"Error: Folder '{folder_path}' does not exist.") + return True + + # Check if it's actually a directory + if not folder.is_dir(): + print(f"Error: '{folder_path}' is not a directory.") + return True + + # Get all .txt files in the folder (one layer only) + txt_files = list(folder.glob("*.txt")) + + if not txt_files: + print("No .txt files found in the folder.") + return True + + # Check if any file contains "10000/10000" + found_string = False + + for file_path in txt_files: + try: + content = file_path.read_text(encoding='utf-8') + if "10000/10000" in content: + found_string = True + print(f"Found '10000/10000' in: {file_path}") + break # No need to check other files + except Exception as e: + print(f"Error reading {file_path}: {e}") + + # If string not found in any file, delete all .txt files + if not found_string: + print("String '10000/10000' not found in any .txt file. Deleting all .txt files...") + for file_path in txt_files: + try: + file_path.unlink() + print(f"Deleted: {file_path}") + except Exception as e: + print(f"Error deleting {file_path}: {e}") + else: + print("String '10000/10000' found. No files will be deleted.") + return not found_string + +logfile = None +run_dir_path_str = None + +base_log_dir = Path(exp_args.base_dir) + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = check_and_delete_txt_files(run_dir_path) + +if run_flag: + + if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_muon_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + ######################################## + # Construct model and optimizer # + ######################################## + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=adam_matrix_lr), + dict(params=embed_params, lr=adam_matrix_lr), + dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + + + + @torch.no_grad() + def calculate_validation_loss(model_to_eval, val_data_pattern, val_batch_size, val_tokens_limit, current_step, window_size_blocks): + """Helper function to calculate validation loss on a subset of the validation set.""" + model_to_eval.eval() + val_loader = distributed_data_generator(val_data_pattern, val_batch_size, rank, world_size) + val_num_steps = (val_tokens_limit // 4) // val_batch_size # Use 1/4 of val set for speed + + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_to_eval(inputs, targets, window_size_blocks) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + model_to_eval.train() + return val_loss_avg.item() + + # Helper functions for additional norms + def calculate_l1_to_linf_norm(matrix): + """ + Calculate the ℓ1→ℓ∞ norm (max row L1 norm) of a matrix. + For vectors, returns the L1 norm. + """ + if matrix.ndim == 1: + return torch.sum(torch.abs(matrix)) + elif matrix.ndim == 2: + # Each row's L1 norm, then take maximum + row_l1_norms = torch.sum(torch.abs(matrix), dim=1) + return torch.max(row_l1_norms) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + row_l1_norms = torch.sum(torch.abs(matrix_2d), dim=1) + return torch.max(row_l1_norms) + + def calculate_spectral_norm(matrix): + """ + Calculate the spectral norm (largest singular value) of a matrix. + For vectors, returns the L2 norm. + """ + # Convert to float32 if needed for linalg operations + if matrix.dtype in [torch.bfloat16, torch.float16]: + matrix = matrix.float() + + if matrix.ndim == 1: + return torch.norm(matrix, p=2) + elif matrix.ndim == 2: + # Use matrix 2-norm (largest singular value) + return torch.linalg.matrix_norm(matrix, ord=2) + else: + # For higher-dimensional tensors, flatten to 2D + matrix_2d = matrix.view(matrix.shape[0], -1) + return torch.linalg.matrix_norm(matrix_2d, ord=2) + + # Import the enhanced analysis function + def calculate_comprehensive_sharpness(model, model_compiled, optimizers, step, args, rank, world_size, print0, get_window_size_blocks, distributed_data_generator): + """ + Comprehensive sharpness analysis including all layers, cross-layer interactions, + gradient correlations, and parameter type analysis. + """ + analysis_results = {} + + # --- 1. Get the true update direction 'v' --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Getting true update direction 'v'...", console=True) + params_before_step = [p.clone().detach() for p in model.parameters()] + + grad_calc_loader = distributed_data_generator(args.train_files, world_size * 512, rank, world_size) + try: + inputs, targets = next(grad_calc_loader) + model_compiled.train() + with autocast(device_type='cuda', dtype=torch.bfloat16): + loss_for_grad = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_for_grad.backward() + + # Store gradients for correlation analysis + layer_gradients = {} + + for param in model.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + finally: + del grad_calc_loader + + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + + update_direction_v = [p_before - p.detach() for p_before, p in zip(params_before_step, model.parameters())] + + for p_model, p_before in zip(model.parameters(), params_before_step): + p_model.data.copy_(p_before.data) + + # --- 2. Calculate update norms (Frobenius, Max-of-Max, Spectral) --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating update norms...", console=True) + + # Calculate total update norm (Frobenius) + total_update_norm_sq = sum(torch.sum(v * v) for v in update_direction_v) + dist.all_reduce(total_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results["total_update_fnorm"] = torch.sqrt(total_update_norm_sq).item() + + # Calculate TOTAL update Max-of-Max and Spectral norms by concatenating all updates + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating total update Max-of-Max and Spectral norms...", console=True) + try: + # Concatenate all update vectors into one long vector + all_updates_flat = torch.cat([v.flatten() for v in update_direction_v if v.numel() > 0]) + + if all_updates_flat.numel() > 0: + # Calculate total ℓ1→ℓ∞ norm (L1 norm for concatenated vector) + total_l1_linf_norm = torch.sum(torch.abs(all_updates_flat)) + analysis_results["total_l1_linf_norm"] = total_l1_linf_norm.item() + + # Calculate total spectral norm (L2 norm for concatenated vector) + total_spectral_norm = torch.norm(all_updates_flat, p=2) + analysis_results["total_spectral_norm"] = total_spectral_norm.item() + else: + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + del all_updates_flat + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating total norms: {e}", console=True) + analysis_results["total_l1_linf_norm"] = 0.0 + analysis_results["total_spectral_norm"] = 0.0 + + # --- 3. Setup layer parameter groups --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up layer parameter groups...", console=True) + + # Only layer groups (1 to 12) + all_param_groups = {} + + for i in range(len(model.blocks)): + block = model.blocks[i] + layer_name = f"layer_{i+1}" + + # All parameters in this layer + all_param_groups[layer_name] = list(block.parameters()) + + # --- 3. Calculate layer-wise and component-wise update norms --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise update norms...", console=True) + + # Create parameter to index mapping + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + # Calculate update norms for each parameter group + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + # Get indices for this group + indices = [param_to_idx[p] for p in param_group if p in param_to_idx] + if not indices: + continue + + # Calculate Frobenius norm for this group + group_update_norm_sq = sum(torch.sum(update_direction_v[i] * update_direction_v[i]) for i in indices) + dist.all_reduce(group_update_norm_sq, op=dist.ReduceOp.SUM) + analysis_results[f"{group_name}_update_fnorm"] = torch.sqrt(group_update_norm_sq).item() + + # Calculate Max-of-Max and Spectral norms for this group + group_l1_linf_norms = [] + group_spectral_norms = [] + + for i in indices: + if i < len(update_direction_v) and update_direction_v[i].numel() > 0: + try: + # Calculate ℓ1→ℓ∞ norm for this parameter + l1_linf_norm = calculate_l1_to_linf_norm(update_direction_v[i]) + group_l1_linf_norms.append(l1_linf_norm.item()) + + # Calculate spectral norm for this parameter + spectral_norm = calculate_spectral_norm(update_direction_v[i]) + group_spectral_norms.append(spectral_norm.item()) + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error calculating norms for group {group_name}, param {i}: {e}", console=True) + group_l1_linf_norms.append(0.0) + group_spectral_norms.append(0.0) + + # Store max norms for this group (as per mathematical definition) + if group_l1_linf_norms: + analysis_results[f"{group_name}_max_l1_linf_norm"] = max(group_l1_linf_norms) + else: + analysis_results[f"{group_name}_max_l1_linf_norm"] = 0.0 + + if group_spectral_norms: + analysis_results[f"{group_name}_max_spectral_norm"] = max(group_spectral_norms) + else: + analysis_results[f"{group_name}_max_spectral_norm"] = 0.0 + + # --- 4. Setup for HVP calculation --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Setting up HVP calculation in float32...", console=True) + model.float() # Switch to float32 for precision + update_direction_v_fp32 = [v.float() for v in update_direction_v] + + hvp_loader = distributed_data_generator(args.val_files, world_size * 1024, rank, world_size) + try: + inputs, targets = next(hvp_loader) + + # Calculate loss and first-order gradients once, with graph for second-order grads + loss_hvp = model(inputs, targets, get_window_size_blocks(step)) + grads_hvp = torch.autograd.grad(loss_hvp, model.parameters(), create_graph=True) + + # --- 5. Calculate TOTAL sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating TOTAL sharpness...", console=True) + v_dot_g_total = sum(torch.sum(g * v) for g, v in zip(grads_hvp, update_direction_v_fp32)) + hvp_total_result = torch.autograd.grad(v_dot_g_total, model.parameters(), retain_graph=True) + + vhp_dot_v_total = sum(torch.sum(hvp * v) for hvp, v in zip(hvp_total_result, update_direction_v_fp32)) + v_norm_sq_total = sum(torch.sum(v * v) for v in update_direction_v_fp32) + + dist.all_reduce(vhp_dot_v_total, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_total, op=dist.ReduceOp.SUM) + + if v_norm_sq_total.item() > 1e-12: + analysis_results["total_sharpness"] = (vhp_dot_v_total / v_norm_sq_total).item() + else: + analysis_results["total_sharpness"] = 0.0 + del hvp_total_result + + # --- 6. Calculate layer-wise sharpness --- + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Calculating layer-wise sharpness...", console=True) + param_to_idx = {p: i for i, p in enumerate(model.parameters())} + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing {len(all_param_groups)} layers for sharpness...", console=True) + + for group_name, param_group in all_param_groups.items(): + if not param_group: + continue + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Processing '{group_name}'...", console=True) + indices = {param_to_idx[p] for p in param_group if p in param_to_idx} + if not indices: + continue + + try: + v_dot_g_group = sum(torch.sum(grads_hvp[i] * update_direction_v_fp32[i]) for i in indices) + hvp_group_result = torch.autograd.grad(v_dot_g_group, model.parameters(), retain_graph=True) + + vhp_dot_v_group = sum(torch.sum(hvp_group_result[i] * update_direction_v_fp32[i]) for i in indices) + v_norm_sq_group = sum(torch.sum(update_direction_v_fp32[i] * update_direction_v_fp32[i]) for i in indices) + + dist.all_reduce(vhp_dot_v_group, op=dist.ReduceOp.AVG) + dist.all_reduce(v_norm_sq_group, op=dist.ReduceOp.SUM) + + if v_norm_sq_group.item() > 1e-12: + analysis_results[f"{group_name}_sharpness"] = (vhp_dot_v_group / v_norm_sq_group).item() + else: + analysis_results[f"{group_name}_sharpness"] = 0.0 + + # Clean up immediately to save memory + del hvp_group_result + torch.cuda.empty_cache() + + except torch.OutOfMemoryError as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] OOM error for '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + torch.cuda.empty_cache() + except Exception as e: + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Error processing '{group_name}': {e}", console=True) + analysis_results[f"{group_name}_sharpness"] = 0.0 + + + finally: + del hvp_loader + + # --- 7. Cleanup --- + model.bfloat16() # Switch back + del update_direction_v, update_direction_v_fp32, grads_hvp + del params_before_step + gc.collect() + torch.cuda.empty_cache() + + for opt in optimizers: + opt.state.clear() + + print0(f"PRINT: [Enhanced Sharpness @ Step {step}] Analysis complete. Generated {len(analysis_results)} metrics.", console=True) + return analysis_results + + def format_comprehensive_results(results): + """ + Format the comprehensive analysis results for logging. + """ + log_parts = [] + + # Total sharpness + if 'total_sharpness' in results: + log_parts.append(f"total_sharp:{results['total_sharpness']:.4e}") + + # Layer-wise sharpness + layer_sharpness = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_sharpness" + if layer_key in results: + layer_sharpness.append(f"L{i}_sharp:{results[layer_key]:.4e}") + + if layer_sharpness: + log_parts.append(" ".join(layer_sharpness)) + + # Total update norms (Frobenius, Max-of-Max, Spectral) + total_norms = [] + if 'total_update_fnorm' in results: + total_norms.append(f"total_fnorm:{results['total_update_fnorm']:.4e}") + if 'total_l1_linf_norm' in results: + total_norms.append(f"total_l1_linf:{results['total_l1_linf_norm']:.4e}") + if 'total_spectral_norm' in results: + total_norms.append(f"total_spectral:{results['total_spectral_norm']:.4e}") + + if total_norms: + log_parts.append(" ".join(total_norms)) + + # Layer-wise update norms (Frobenius) + layer_fnorms = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_update_fnorm" + if layer_key in results: + layer_fnorms.append(f"L{i}_fnorm:{results[layer_key]:.4e}") + + if layer_fnorms: + log_parts.append(" ".join(layer_fnorms)) + + # Layer-wise update norms (Max-of-Max) + layer_l1_linf = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_l1_linf_norm" + if layer_key in results: + layer_l1_linf.append(f"L{i}_l1linf:{results[layer_key]:.4e}") + + if layer_l1_linf: + log_parts.append(" ".join(layer_l1_linf)) + + # Layer-wise update norms (Spectral) + layer_spectral = [] + for i in range(1, 13): # Layers 1-12 + layer_key = f"layer_{i}_max_spectral_norm" + if layer_key in results: + layer_spectral.append(f"L{i}_spectral:{results[layer_key]:.4e}") + + if layer_spectral: + log_parts.append(" ".join(layer_spectral)) + + return " ".join(log_parts) + + + + + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels # + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) + for i in range(warmup_steps): + # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled + loss.backward() + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + + ######################################## + # Training and validation # + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation) + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + # Validate at step 0 (after warmup), at specified intervals, and at the very last step + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0 and step > 0): + torch.cuda.synchronize() + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + # --- 1. Compute baseline validation loss --- + print0(f"PRINT: [Validation @ Step {step}] Calculating base validation loss...", console=True) + val_loss_item = calculate_validation_loss( + model_compiled, + args.val_files, + world_size * args.val_seq_len, + args.val_tokens, + step, + get_window_size_blocks(step) + ) + + # --- 2. Comprehensive Sharpness Analysis (all layers, cross-interactions, correlations) --- + sharpness_log_str = "" + # Only run sharpness for compatible models + if exp_args.model_parameterization in ["qkvo", "gated"]: + comprehensive_results = calculate_comprehensive_sharpness( + model, model_compiled, optimizers, step, args, rank, world_size, + print0, get_window_size_blocks, distributed_data_generator + ) + sharpness_log_str = format_comprehensive_results(comprehensive_results) + else: + print0(f"PRINT: [Sharpness @ Step {step}] Skipping sharpness calculation for model_parameterization='{exp_args.model_parameterization}'.", console=True) + + # --- 3. Logging --- + avg_step_time = training_time_ms / max(step, 1) + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_item:.4f} {sharpness_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + # --- 4. Reset timer for the next training segment --- + torch.cuda.synchronize() + t0 = time.perf_counter() + + + if last_step: + if master_process and args.save_checkpoint: + if run_dir_path_str: # Ensure run_dir_path_str is set by master process + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir + checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled + optimizers=[opt.state_dict() for opt in optimizers]) + torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + break + + + + # --------------- TRAINING SECTION ----------------- + try: + inputs, targets = next(train_loader) + except StopIteration: + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break # End if data runs out + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled + loss_train.backward() + + for param in model_compiled.parameters(): # Use model_compiled + if param.grad is not None: # Check if grad exists + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists --- + if optimizer2 is not None: # Check if Muon optimizer was created + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) # Use model_compiled + + # Logging (less frequent for training steps) + if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val + # This time is for the current segment since last validation / t0 reset + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + # approx_training_time_ms is the total cumulative time + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too + + + + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-11 06:24:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:24:48] [Rank 0] PRINT: Constructing model... +[2025-09-11 06:24:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:24:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-11 06:24:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:24:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-11 06:24:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:24:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-11 06:24:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:24:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-09-11 06:24:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:24:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-09-11 06:24:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:24:51] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-09-11 06:24:51] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:24:51] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. +[2025-09-11 06:24:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:24:51] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 06:24:57] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:24:57] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 06:24:57] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:24:57] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:25:39] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:25:39] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:25:39] [Rank 0] PRINT: Starting training... +[2025-09-11 06:25:39] [Rank 0] PRINT: Starting training... +[2025-09-11 06:25:40] [Rank 0] step:21/10000 train_time:1260ms step_avg:60.02ms +[2025-09-11 06:25:40] [Rank 0] step:21/10000 train_time:1260ms step_avg:60.02ms +[2025-09-11 06:25:41] [Rank 0] step:41/10000 train_time:1991ms step_avg:48.56ms +[2025-09-11 06:25:41] [Rank 0] step:41/10000 train_time:1991ms step_avg:48.56ms +[2025-09-11 06:25:42] [Rank 0] step:61/10000 train_time:2720ms step_avg:44.60ms +[2025-09-11 06:25:42] [Rank 0] step:61/10000 train_time:2720ms step_avg:44.60ms +[2025-09-11 06:25:43] [Rank 0] step:81/10000 train_time:3450ms step_avg:42.59ms +[2025-09-11 06:25:43] [Rank 0] step:81/10000 train_time:3450ms step_avg:42.59ms +[2025-09-11 06:25:43] [Rank 0] step:101/10000 train_time:4179ms step_avg:41.38ms +[2025-09-11 06:25:43] [Rank 0] step:101/10000 train_time:4179ms step_avg:41.38ms +[2025-09-11 06:25:44] [Rank 0] step:121/10000 train_time:4908ms step_avg:40.56ms +[2025-09-11 06:25:44] [Rank 0] step:121/10000 train_time:4908ms step_avg:40.56ms +[2025-09-11 06:25:45] [Rank 0] step:141/10000 train_time:5637ms step_avg:39.98ms +[2025-09-11 06:25:45] [Rank 0] step:141/10000 train_time:5637ms step_avg:39.98ms +[2025-09-11 06:25:46] [Rank 0] step:161/10000 train_time:6366ms step_avg:39.54ms +[2025-09-11 06:25:46] [Rank 0] step:161/10000 train_time:6366ms step_avg:39.54ms +[2025-09-11 06:25:46] [Rank 0] step:181/10000 train_time:7094ms step_avg:39.20ms +[2025-09-11 06:25:46] [Rank 0] step:181/10000 train_time:7094ms step_avg:39.20ms +[2025-09-11 06:25:47] [Rank 0] step:201/10000 train_time:7824ms step_avg:38.93ms +[2025-09-11 06:25:47] [Rank 0] step:201/10000 train_time:7824ms step_avg:38.93ms +[2025-09-11 06:25:48] [Rank 0] step:221/10000 train_time:8553ms step_avg:38.70ms +[2025-09-11 06:25:48] [Rank 0] step:221/10000 train_time:8553ms step_avg:38.70ms +[2025-09-11 06:25:48] [Rank 0] step:241/10000 train_time:9283ms step_avg:38.52ms +[2025-09-11 06:25:48] [Rank 0] step:241/10000 train_time:9283ms step_avg:38.52ms +[2025-09-11 06:25:49] [Rank 0] step:261/10000 train_time:10012ms step_avg:38.36ms +[2025-09-11 06:25:49] [Rank 0] step:261/10000 train_time:10012ms step_avg:38.36ms +[2025-09-11 06:25:50] [Rank 0] step:281/10000 train_time:10742ms step_avg:38.23ms +[2025-09-11 06:25:50] [Rank 0] step:281/10000 train_time:10742ms step_avg:38.23ms +[2025-09-11 06:25:51] [Rank 0] step:301/10000 train_time:11472ms step_avg:38.11ms +[2025-09-11 06:25:51] [Rank 0] step:301/10000 train_time:11472ms step_avg:38.11ms +[2025-09-11 06:25:52] [Rank 0] step:321/10000 train_time:12759ms step_avg:39.75ms +[2025-09-11 06:25:52] [Rank 0] step:321/10000 train_time:12759ms step_avg:39.75ms +[2025-09-11 06:25:53] [Rank 0] step:341/10000 train_time:13489ms step_avg:39.56ms +[2025-09-11 06:25:53] [Rank 0] step:341/10000 train_time:13489ms step_avg:39.56ms +[2025-09-11 06:25:53] [Rank 0] step:361/10000 train_time:14219ms step_avg:39.39ms +[2025-09-11 06:25:53] [Rank 0] step:361/10000 train_time:14219ms step_avg:39.39ms +[2025-09-11 06:25:54] [Rank 0] step:381/10000 train_time:15199ms step_avg:39.89ms +[2025-09-11 06:25:54] [Rank 0] step:381/10000 train_time:15199ms step_avg:39.89ms +[2025-09-11 06:25:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:25:55] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:26:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:26:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:26:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:26:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:26:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:26:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:26:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:26:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:26:53] [Rank 0] PRINT: step:400/10000 val_loss:5.9629 total_sharp:3.1609e-04 L1_sharp:3.7678e-03 L2_sharp:4.3446e-04 L3_sharp:1.8817e-04 L4_sharp:1.2181e-04 L5_sharp:2.3125e-04 L6_sharp:1.8550e-04 L7_sharp:1.9482e-04 L8_sharp:1.7762e-04 L9_sharp:2.3167e-04 L10_sharp:3.6567e-04 L11_sharp:3.7035e-03 L12_sharp:2.1244e-03 total_fnorm:2.0539e+02 total_l1_linf:6.1628e+05 total_spectral:1.0267e+02 L1_fnorm:1.2104e+01 L2_fnorm:1.1611e+01 L3_fnorm:1.1535e+01 L4_fnorm:1.1465e+01 L5_fnorm:1.1510e+01 L6_fnorm:1.1372e+01 L7_fnorm:1.1481e+01 L8_fnorm:1.1421e+01 L9_fnorm:1.1335e+01 L10_fnorm:1.1175e+01 L11_fnorm:1.0829e+01 L12_fnorm:1.0850e+01 L1_l1linf:3.3938e+00 L2_l1linf:3.5665e+00 L3_l1linf:3.6761e+00 L4_l1linf:3.7093e+00 L5_l1linf:3.7024e+00 L6_l1linf:3.6497e+00 L7_l1linf:3.5960e+00 L8_l1linf:3.5443e+00 L9_l1linf:3.3737e+00 L10_l1linf:3.2081e+00 L11_l1linf:2.7465e+00 L12_l1linf:2.6847e+00 L1_spectral:1.2201e-01 L2_spectral:1.2166e-01 L3_spectral:1.2137e-01 L4_spectral:1.2158e-01 L5_spectral:1.2131e-01 L6_spectral:1.2133e-01 L7_spectral:1.2161e-01 L8_spectral:1.2130e-01 L9_spectral:1.2139e-01 L10_spectral:1.2138e-01 L11_spectral:1.2138e-01 L12_spectral:1.2166e-01 train_time:15908ms step_avg:39.77ms +[2025-09-11 06:26:53] [Rank 0] PRINT: step:400/10000 val_loss:5.9629 total_sharp:3.1609e-04 L1_sharp:3.7678e-03 L2_sharp:4.3446e-04 L3_sharp:1.8817e-04 L4_sharp:1.2181e-04 L5_sharp:2.3125e-04 L6_sharp:1.8550e-04 L7_sharp:1.9482e-04 L8_sharp:1.7762e-04 L9_sharp:2.3167e-04 L10_sharp:3.6567e-04 L11_sharp:3.7035e-03 L12_sharp:2.1244e-03 total_fnorm:2.0539e+02 total_l1_linf:6.1628e+05 total_spectral:1.0267e+02 L1_fnorm:1.2104e+01 L2_fnorm:1.1611e+01 L3_fnorm:1.1535e+01 L4_fnorm:1.1465e+01 L5_fnorm:1.1510e+01 L6_fnorm:1.1372e+01 L7_fnorm:1.1481e+01 L8_fnorm:1.1421e+01 L9_fnorm:1.1335e+01 L10_fnorm:1.1175e+01 L11_fnorm:1.0829e+01 L12_fnorm:1.0850e+01 L1_l1linf:3.3938e+00 L2_l1linf:3.5665e+00 L3_l1linf:3.6761e+00 L4_l1linf:3.7093e+00 L5_l1linf:3.7024e+00 L6_l1linf:3.6497e+00 L7_l1linf:3.5960e+00 L8_l1linf:3.5443e+00 L9_l1linf:3.3737e+00 L10_l1linf:3.2081e+00 L11_l1linf:2.7465e+00 L12_l1linf:2.6847e+00 L1_spectral:1.2201e-01 L2_spectral:1.2166e-01 L3_spectral:1.2137e-01 L4_spectral:1.2158e-01 L5_spectral:1.2131e-01 L6_spectral:1.2133e-01 L7_spectral:1.2161e-01 L8_spectral:1.2130e-01 L9_spectral:1.2139e-01 L10_spectral:1.2138e-01 L11_spectral:1.2138e-01 L12_spectral:1.2166e-01 train_time:15908ms step_avg:39.77ms +[2025-09-11 06:27:33] [Rank 0] step:401/10000 train_time:55726ms step_avg:138.97ms +[2025-09-11 06:27:33] [Rank 0] step:401/10000 train_time:55726ms step_avg:138.97ms +[2025-09-11 06:27:34] [Rank 0] step:421/10000 train_time:57559ms step_avg:136.72ms +[2025-09-11 06:27:34] [Rank 0] step:421/10000 train_time:57559ms step_avg:136.72ms +[2025-09-11 06:27:35] [Rank 0] step:441/10000 train_time:58200ms step_avg:131.97ms +[2025-09-11 06:27:35] [Rank 0] step:441/10000 train_time:58200ms step_avg:131.97ms +[2025-09-11 06:27:36] [Rank 0] step:461/10000 train_time:58840ms step_avg:127.64ms +[2025-09-11 06:27:36] [Rank 0] step:461/10000 train_time:58840ms step_avg:127.64ms +[2025-09-11 06:27:36] [Rank 0] step:481/10000 train_time:59480ms step_avg:123.66ms +[2025-09-11 06:27:36] [Rank 0] step:481/10000 train_time:59480ms step_avg:123.66ms +[2025-09-11 06:27:37] [Rank 0] step:501/10000 train_time:60119ms step_avg:120.00ms +[2025-09-11 06:27:37] [Rank 0] step:501/10000 train_time:60119ms step_avg:120.00ms +[2025-09-11 06:27:38] [Rank 0] step:521/10000 train_time:60759ms step_avg:116.62ms +[2025-09-11 06:27:38] [Rank 0] step:521/10000 train_time:60759ms step_avg:116.62ms +[2025-09-11 06:27:38] [Rank 0] step:541/10000 train_time:61399ms step_avg:113.49ms +[2025-09-11 06:27:38] [Rank 0] step:541/10000 train_time:61399ms step_avg:113.49ms +[2025-09-11 06:27:39] [Rank 0] step:561/10000 train_time:62039ms step_avg:110.59ms +[2025-09-11 06:27:39] [Rank 0] step:561/10000 train_time:62039ms step_avg:110.59ms +[2025-09-11 06:27:40] [Rank 0] step:581/10000 train_time:62678ms step_avg:107.88ms +[2025-09-11 06:27:40] [Rank 0] step:581/10000 train_time:62678ms step_avg:107.88ms +[2025-09-11 06:27:40] [Rank 0] step:601/10000 train_time:63319ms step_avg:105.36ms +[2025-09-11 06:27:40] [Rank 0] step:601/10000 train_time:63319ms step_avg:105.36ms +[2025-09-11 06:27:41] [Rank 0] step:621/10000 train_time:63958ms step_avg:102.99ms +[2025-09-11 06:27:41] [Rank 0] step:621/10000 train_time:63958ms step_avg:102.99ms +[2025-09-11 06:27:41] [Rank 0] step:641/10000 train_time:64597ms step_avg:100.78ms +[2025-09-11 06:27:41] [Rank 0] step:641/10000 train_time:64597ms step_avg:100.78ms +[2025-09-11 06:27:42] [Rank 0] step:661/10000 train_time:65237ms step_avg:98.69ms +[2025-09-11 06:27:42] [Rank 0] step:661/10000 train_time:65237ms step_avg:98.69ms +[2025-09-11 06:27:43] [Rank 0] step:681/10000 train_time:65877ms step_avg:96.74ms +[2025-09-11 06:27:43] [Rank 0] step:681/10000 train_time:65877ms step_avg:96.74ms +[2025-09-11 06:27:43] [Rank 0] step:701/10000 train_time:66516ms step_avg:94.89ms +[2025-09-11 06:27:43] [Rank 0] step:701/10000 train_time:66516ms step_avg:94.89ms +[2025-09-11 06:27:44] [Rank 0] step:721/10000 train_time:67157ms step_avg:93.14ms +[2025-09-11 06:27:44] [Rank 0] step:721/10000 train_time:67157ms step_avg:93.14ms +[2025-09-11 06:27:45] [Rank 0] step:741/10000 train_time:67796ms step_avg:91.49ms +[2025-09-11 06:27:45] [Rank 0] step:741/10000 train_time:67796ms step_avg:91.49ms +[2025-09-11 06:27:45] [Rank 0] step:761/10000 train_time:68441ms step_avg:89.94ms +[2025-09-11 06:27:45] [Rank 0] step:761/10000 train_time:68441ms step_avg:89.94ms +[2025-09-11 06:27:46] [Rank 0] step:781/10000 train_time:69085ms step_avg:88.46ms +[2025-09-11 06:27:46] [Rank 0] step:781/10000 train_time:69085ms step_avg:88.46ms +[2025-09-11 06:27:47] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:27:47] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:27:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:28:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:28:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:28:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:28:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:28:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:28:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:28:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:28:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:28:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:28:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:28:45] [Rank 0] PRINT: step:800/10000 val_loss:5.5510 total_sharp:1.8261e-04 L1_sharp:2.0293e-03 L2_sharp:1.8671e-04 L3_sharp:1.5505e-04 L4_sharp:1.7602e-05 L5_sharp:9.9547e-05 L6_sharp:1.5015e-04 L7_sharp:7.7904e-05 L8_sharp:1.2386e-04 L9_sharp:1.2648e-04 L10_sharp:2.2515e-04 L11_sharp:7.5490e-04 L12_sharp:2.0979e-03 total_fnorm:2.0300e+02 total_l1_linf:5.9392e+05 total_spectral:1.0350e+02 L1_fnorm:1.2000e+01 L2_fnorm:1.1688e+01 L3_fnorm:1.1688e+01 L4_fnorm:1.1562e+01 L5_fnorm:1.1750e+01 L6_fnorm:1.1750e+01 L7_fnorm:1.1812e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1812e+01 L10_fnorm:1.1625e+01 L11_fnorm:1.1562e+01 L12_fnorm:1.1062e+01 L1_l1linf:3.4219e+00 L2_l1linf:3.6719e+00 L3_l1linf:3.5938e+00 L4_l1linf:3.5781e+00 L5_l1linf:3.6250e+00 L6_l1linf:3.5938e+00 L7_l1linf:3.6250e+00 L8_l1linf:3.5625e+00 L9_l1linf:3.4375e+00 L10_l1linf:3.3281e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.6875e+00 L1_spectral:1.3408e-01 L2_spectral:1.3149e-01 L3_spectral:1.3237e-01 L4_spectral:1.3212e-01 L5_spectral:1.3197e-01 L6_spectral:1.3224e-01 L7_spectral:1.3235e-01 L8_spectral:1.3186e-01 L9_spectral:1.3258e-01 L10_spectral:1.3242e-01 L11_spectral:1.3247e-01 L12_spectral:1.3224e-01 train_time:69710ms step_avg:87.14ms +[2025-09-11 06:28:45] [Rank 0] PRINT: step:800/10000 val_loss:5.5510 total_sharp:1.8261e-04 L1_sharp:2.0293e-03 L2_sharp:1.8671e-04 L3_sharp:1.5505e-04 L4_sharp:1.7602e-05 L5_sharp:9.9547e-05 L6_sharp:1.5015e-04 L7_sharp:7.7904e-05 L8_sharp:1.2386e-04 L9_sharp:1.2648e-04 L10_sharp:2.2515e-04 L11_sharp:7.5490e-04 L12_sharp:2.0979e-03 total_fnorm:2.0300e+02 total_l1_linf:5.9392e+05 total_spectral:1.0350e+02 L1_fnorm:1.2000e+01 L2_fnorm:1.1688e+01 L3_fnorm:1.1688e+01 L4_fnorm:1.1562e+01 L5_fnorm:1.1750e+01 L6_fnorm:1.1750e+01 L7_fnorm:1.1812e+01 L8_fnorm:1.1688e+01 L9_fnorm:1.1812e+01 L10_fnorm:1.1625e+01 L11_fnorm:1.1562e+01 L12_fnorm:1.1062e+01 L1_l1linf:3.4219e+00 L2_l1linf:3.6719e+00 L3_l1linf:3.5938e+00 L4_l1linf:3.5781e+00 L5_l1linf:3.6250e+00 L6_l1linf:3.5938e+00 L7_l1linf:3.6250e+00 L8_l1linf:3.5625e+00 L9_l1linf:3.4375e+00 L10_l1linf:3.3281e+00 L11_l1linf:2.9219e+00 L12_l1linf:2.6875e+00 L1_spectral:1.3408e-01 L2_spectral:1.3149e-01 L3_spectral:1.3237e-01 L4_spectral:1.3212e-01 L5_spectral:1.3197e-01 L6_spectral:1.3224e-01 L7_spectral:1.3235e-01 L8_spectral:1.3186e-01 L9_spectral:1.3258e-01 L10_spectral:1.3242e-01 L11_spectral:1.3247e-01 L12_spectral:1.3224e-01 train_time:69710ms step_avg:87.14ms +[2025-09-11 06:28:46] [Rank 0] step:801/10000 train_time:70949ms step_avg:88.57ms +[2025-09-11 06:28:46] [Rank 0] step:801/10000 train_time:70949ms step_avg:88.57ms +[2025-09-11 06:28:47] [Rank 0] step:821/10000 train_time:71597ms step_avg:87.21ms +[2025-09-11 06:28:47] [Rank 0] step:821/10000 train_time:71597ms step_avg:87.21ms +[2025-09-11 06:28:47] [Rank 0] step:841/10000 train_time:72243ms step_avg:85.90ms +[2025-09-11 06:28:47] [Rank 0] step:841/10000 train_time:72243ms step_avg:85.90ms +[2025-09-11 06:28:48] [Rank 0] step:861/10000 train_time:72889ms step_avg:84.66ms +[2025-09-11 06:28:48] [Rank 0] step:861/10000 train_time:72889ms step_avg:84.66ms +[2025-09-11 06:28:49] [Rank 0] step:881/10000 train_time:73534ms step_avg:83.47ms +[2025-09-11 06:28:49] [Rank 0] step:881/10000 train_time:73534ms step_avg:83.47ms +[2025-09-11 06:28:49] [Rank 0] step:901/10000 train_time:74179ms step_avg:82.33ms +[2025-09-11 06:28:49] [Rank 0] step:901/10000 train_time:74179ms step_avg:82.33ms +[2025-09-11 06:28:50] [Rank 0] step:921/10000 train_time:74824ms step_avg:81.24ms +[2025-09-11 06:28:50] [Rank 0] step:921/10000 train_time:74824ms step_avg:81.24ms +[2025-09-11 06:28:51] [Rank 0] step:941/10000 train_time:75469ms step_avg:80.20ms +[2025-09-11 06:28:51] [Rank 0] step:941/10000 train_time:75469ms step_avg:80.20ms +[2025-09-11 06:28:51] [Rank 0] step:961/10000 train_time:76113ms step_avg:79.20ms +[2025-09-11 06:28:51] [Rank 0] step:961/10000 train_time:76113ms step_avg:79.20ms +[2025-09-11 06:28:52] [Rank 0] step:981/10000 train_time:76758ms step_avg:78.24ms +[2025-09-11 06:28:52] [Rank 0] step:981/10000 train_time:76758ms step_avg:78.24ms +[2025-09-11 06:28:52] [Rank 0] step:1001/10000 train_time:77402ms step_avg:77.33ms +[2025-09-11 06:28:52] [Rank 0] step:1001/10000 train_time:77402ms step_avg:77.33ms +[2025-09-11 06:28:53] [Rank 0] step:1021/10000 train_time:78047ms step_avg:76.44ms +[2025-09-11 06:28:53] [Rank 0] step:1021/10000 train_time:78047ms step_avg:76.44ms +[2025-09-11 06:28:54] [Rank 0] step:1041/10000 train_time:78691ms step_avg:75.59ms +[2025-09-11 06:28:54] [Rank 0] step:1041/10000 train_time:78691ms step_avg:75.59ms +[2025-09-11 06:28:54] [Rank 0] step:1061/10000 train_time:79336ms step_avg:74.77ms +[2025-09-11 06:28:54] [Rank 0] step:1061/10000 train_time:79336ms step_avg:74.77ms +[2025-09-11 06:28:55] [Rank 0] step:1081/10000 train_time:79981ms step_avg:73.99ms +[2025-09-11 06:28:55] [Rank 0] step:1081/10000 train_time:79981ms step_avg:73.99ms +[2025-09-11 06:28:56] [Rank 0] step:1101/10000 train_time:80625ms step_avg:73.23ms +[2025-09-11 06:28:56] [Rank 0] step:1101/10000 train_time:80625ms step_avg:73.23ms +[2025-09-11 06:28:56] [Rank 0] step:1121/10000 train_time:81269ms step_avg:72.50ms +[2025-09-11 06:28:56] [Rank 0] step:1121/10000 train_time:81269ms step_avg:72.50ms +[2025-09-11 06:28:57] [Rank 0] step:1141/10000 train_time:81915ms step_avg:71.79ms +[2025-09-11 06:28:57] [Rank 0] step:1141/10000 train_time:81915ms step_avg:71.79ms +[2025-09-11 06:28:58] [Rank 0] step:1161/10000 train_time:82561ms step_avg:71.11ms +[2025-09-11 06:28:58] [Rank 0] step:1161/10000 train_time:82561ms step_avg:71.11ms +[2025-09-11 06:28:58] [Rank 0] step:1181/10000 train_time:83205ms step_avg:70.45ms +[2025-09-11 06:28:58] [Rank 0] step:1181/10000 train_time:83205ms step_avg:70.45ms +[2025-09-11 06:28:59] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:28:59] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:29:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:29:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:29:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:29:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:29:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:29:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:29:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:29:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:09] [Rank 0] PRINT: step:1200/10000 val_loss:5.2268 total_sharp:1.1565e-04 L1_sharp:1.1438e-03 L2_sharp:1.0818e-04 L3_sharp:7.1028e-05 L4_sharp:2.7955e-05 L5_sharp:3.0051e-05 L6_sharp:9.2157e-05 L7_sharp:3.9059e-05 L8_sharp:7.5078e-05 L9_sharp:1.1948e-04 L10_sharp:1.2650e-04 L11_sharp:4.2062e-04 L12_sharp:1.0067e-03 total_fnorm:2.0000e+02 total_l1_linf:5.7754e+05 total_spectral:1.0200e+02 L1_fnorm:1.2125e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2000e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2125e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.2000e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.4844e+00 L3_l1linf:3.4531e+00 L4_l1linf:3.4219e+00 L5_l1linf:3.4688e+00 L6_l1linf:3.3750e+00 L7_l1linf:3.3750e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.2656e+00 L10_l1linf:3.1719e+00 L11_l1linf:3.1406e+00 L12_l1linf:2.9062e+00 L1_spectral:1.3805e-01 L2_spectral:1.3750e-01 L3_spectral:1.3755e-01 L4_spectral:1.3792e-01 L5_spectral:1.3822e-01 L6_spectral:1.3790e-01 L7_spectral:1.3906e-01 L8_spectral:1.4135e-01 L9_spectral:1.3862e-01 L10_spectral:1.3968e-01 L11_spectral:1.3864e-01 L12_spectral:1.3910e-01 train_time:83832ms step_avg:69.86ms +[2025-09-11 06:29:09] [Rank 0] PRINT: step:1200/10000 val_loss:5.2268 total_sharp:1.1565e-04 L1_sharp:1.1438e-03 L2_sharp:1.0818e-04 L3_sharp:7.1028e-05 L4_sharp:2.7955e-05 L5_sharp:3.0051e-05 L6_sharp:9.2157e-05 L7_sharp:3.9059e-05 L8_sharp:7.5078e-05 L9_sharp:1.1948e-04 L10_sharp:1.2650e-04 L11_sharp:4.2062e-04 L12_sharp:1.0067e-03 total_fnorm:2.0000e+02 total_l1_linf:5.7754e+05 total_spectral:1.0200e+02 L1_fnorm:1.2125e+01 L2_fnorm:1.2062e+01 L3_fnorm:1.2000e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.2062e+01 L6_fnorm:1.2125e+01 L7_fnorm:1.2312e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2188e+01 L10_fnorm:1.2000e+01 L11_fnorm:1.2250e+01 L12_fnorm:1.2000e+01 L1_l1linf:3.4375e+00 L2_l1linf:3.4844e+00 L3_l1linf:3.4531e+00 L4_l1linf:3.4219e+00 L5_l1linf:3.4688e+00 L6_l1linf:3.3750e+00 L7_l1linf:3.3750e+00 L8_l1linf:3.3750e+00 L9_l1linf:3.2656e+00 L10_l1linf:3.1719e+00 L11_l1linf:3.1406e+00 L12_l1linf:2.9062e+00 L1_spectral:1.3805e-01 L2_spectral:1.3750e-01 L3_spectral:1.3755e-01 L4_spectral:1.3792e-01 L5_spectral:1.3822e-01 L6_spectral:1.3790e-01 L7_spectral:1.3906e-01 L8_spectral:1.4135e-01 L9_spectral:1.3862e-01 L10_spectral:1.3968e-01 L11_spectral:1.3864e-01 L12_spectral:1.3910e-01 train_time:83832ms step_avg:69.86ms +[2025-09-11 06:29:10] [Rank 0] step:1201/10000 train_time:85149ms step_avg:70.90ms +[2025-09-11 06:29:10] [Rank 0] step:1201/10000 train_time:85149ms step_avg:70.90ms +[2025-09-11 06:29:11] [Rank 0] step:1221/10000 train_time:85809ms step_avg:70.28ms +[2025-09-11 06:29:11] [Rank 0] step:1221/10000 train_time:85809ms step_avg:70.28ms +[2025-09-11 06:29:11] [Rank 0] step:1241/10000 train_time:86455ms step_avg:69.67ms +[2025-09-11 06:29:11] [Rank 0] step:1241/10000 train_time:86455ms step_avg:69.67ms +[2025-09-11 06:29:12] [Rank 0] step:1261/10000 train_time:87102ms step_avg:69.07ms +[2025-09-11 06:29:12] [Rank 0] step:1261/10000 train_time:87102ms step_avg:69.07ms +[2025-09-11 06:29:13] [Rank 0] step:1281/10000 train_time:87747ms step_avg:68.50ms +[2025-09-11 06:29:13] [Rank 0] step:1281/10000 train_time:87747ms step_avg:68.50ms +[2025-09-11 06:29:13] [Rank 0] step:1301/10000 train_time:88394ms step_avg:67.94ms +[2025-09-11 06:29:13] [Rank 0] step:1301/10000 train_time:88394ms step_avg:67.94ms +[2025-09-11 06:29:14] [Rank 0] step:1321/10000 train_time:89039ms step_avg:67.40ms +[2025-09-11 06:29:14] [Rank 0] step:1321/10000 train_time:89039ms step_avg:67.40ms +[2025-09-11 06:29:15] [Rank 0] step:1341/10000 train_time:89685ms step_avg:66.88ms +[2025-09-11 06:29:15] [Rank 0] step:1341/10000 train_time:89685ms step_avg:66.88ms +[2025-09-11 06:29:15] [Rank 0] step:1361/10000 train_time:90330ms step_avg:66.37ms +[2025-09-11 06:29:15] [Rank 0] step:1361/10000 train_time:90330ms step_avg:66.37ms +[2025-09-11 06:29:16] [Rank 0] step:1381/10000 train_time:90976ms step_avg:65.88ms +[2025-09-11 06:29:16] [Rank 0] step:1381/10000 train_time:90976ms step_avg:65.88ms +[2025-09-11 06:29:17] [Rank 0] step:1401/10000 train_time:91621ms step_avg:65.40ms +[2025-09-11 06:29:17] [Rank 0] step:1401/10000 train_time:91621ms step_avg:65.40ms +[2025-09-11 06:29:17] [Rank 0] step:1421/10000 train_time:92267ms step_avg:64.93ms +[2025-09-11 06:29:17] [Rank 0] step:1421/10000 train_time:92267ms step_avg:64.93ms +[2025-09-11 06:29:18] [Rank 0] step:1441/10000 train_time:92910ms step_avg:64.48ms +[2025-09-11 06:29:18] [Rank 0] step:1441/10000 train_time:92910ms step_avg:64.48ms +[2025-09-11 06:29:19] [Rank 0] step:1461/10000 train_time:93556ms step_avg:64.04ms +[2025-09-11 06:29:19] [Rank 0] step:1461/10000 train_time:93556ms step_avg:64.04ms +[2025-09-11 06:29:19] [Rank 0] step:1481/10000 train_time:94201ms step_avg:63.61ms +[2025-09-11 06:29:19] [Rank 0] step:1481/10000 train_time:94201ms step_avg:63.61ms +[2025-09-11 06:29:20] [Rank 0] step:1501/10000 train_time:94851ms step_avg:63.19ms +[2025-09-11 06:29:20] [Rank 0] step:1501/10000 train_time:94851ms step_avg:63.19ms +[2025-09-11 06:29:21] [Rank 0] step:1521/10000 train_time:95500ms step_avg:62.79ms +[2025-09-11 06:29:21] [Rank 0] step:1521/10000 train_time:95500ms step_avg:62.79ms +[2025-09-11 06:29:21] [Rank 0] step:1541/10000 train_time:96149ms step_avg:62.39ms +[2025-09-11 06:29:21] [Rank 0] step:1541/10000 train_time:96149ms step_avg:62.39ms +[2025-09-11 06:29:22] [Rank 0] step:1561/10000 train_time:96799ms step_avg:62.01ms +[2025-09-11 06:29:22] [Rank 0] step:1561/10000 train_time:96799ms step_avg:62.01ms +[2025-09-11 06:29:22] [Rank 0] step:1581/10000 train_time:97448ms step_avg:61.64ms +[2025-09-11 06:29:22] [Rank 0] step:1581/10000 train_time:97448ms step_avg:61.64ms +[2025-09-11 06:29:23] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:29:23] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:29:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:29:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:29:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:29:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:29:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:29:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:33] [Rank 0] PRINT: step:1600/10000 val_loss:5.0765 total_sharp:1.0982e-04 L1_sharp:1.1522e-03 L2_sharp:6.8814e-05 L3_sharp:4.7411e-05 L4_sharp:-6.5811e-06 L5_sharp:1.2391e-05 L6_sharp:9.9616e-05 L7_sharp:7.1904e-05 L8_sharp:8.8590e-05 L9_sharp:6.3580e-05 L10_sharp:1.1219e-04 L11_sharp:2.4084e-04 L12_sharp:1.1514e-03 total_fnorm:1.9100e+02 total_l1_linf:5.4067e+05 total_spectral:9.8000e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2250e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.3594e+00 L2_l1linf:3.4062e+00 L3_l1linf:3.3125e+00 L4_l1linf:3.3750e+00 L5_l1linf:3.3594e+00 L6_l1linf:3.3125e+00 L7_l1linf:3.2969e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.1562e+00 L10_l1linf:3.1406e+00 L11_l1linf:3.0312e+00 L12_l1linf:2.9688e+00 L1_spectral:1.4273e-01 L2_spectral:1.4083e-01 L3_spectral:1.4139e-01 L4_spectral:1.4187e-01 L5_spectral:1.4207e-01 L6_spectral:1.4272e-01 L7_spectral:1.4314e-01 L8_spectral:1.4701e-01 L9_spectral:1.4326e-01 L10_spectral:1.4360e-01 L11_spectral:1.4391e-01 L12_spectral:1.4590e-01 train_time:98080ms step_avg:61.30ms +[2025-09-11 06:29:33] [Rank 0] PRINT: step:1600/10000 val_loss:5.0765 total_sharp:1.0982e-04 L1_sharp:1.1522e-03 L2_sharp:6.8814e-05 L3_sharp:4.7411e-05 L4_sharp:-6.5811e-06 L5_sharp:1.2391e-05 L6_sharp:9.9616e-05 L7_sharp:7.1904e-05 L8_sharp:8.8590e-05 L9_sharp:6.3580e-05 L10_sharp:1.1219e-04 L11_sharp:2.4084e-04 L12_sharp:1.1514e-03 total_fnorm:1.9100e+02 total_l1_linf:5.4067e+05 total_spectral:9.8000e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2250e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.2438e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2312e+01 L10_fnorm:1.2188e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2312e+01 L1_l1linf:3.3594e+00 L2_l1linf:3.4062e+00 L3_l1linf:3.3125e+00 L4_l1linf:3.3750e+00 L5_l1linf:3.3594e+00 L6_l1linf:3.3125e+00 L7_l1linf:3.2969e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.1562e+00 L10_l1linf:3.1406e+00 L11_l1linf:3.0312e+00 L12_l1linf:2.9688e+00 L1_spectral:1.4273e-01 L2_spectral:1.4083e-01 L3_spectral:1.4139e-01 L4_spectral:1.4187e-01 L5_spectral:1.4207e-01 L6_spectral:1.4272e-01 L7_spectral:1.4314e-01 L8_spectral:1.4701e-01 L9_spectral:1.4326e-01 L10_spectral:1.4360e-01 L11_spectral:1.4391e-01 L12_spectral:1.4590e-01 train_time:98080ms step_avg:61.30ms +[2025-09-11 06:29:34] [Rank 0] step:1601/10000 train_time:99437ms step_avg:62.11ms +[2025-09-11 06:29:34] [Rank 0] step:1601/10000 train_time:99437ms step_avg:62.11ms +[2025-09-11 06:29:35] [Rank 0] step:1621/10000 train_time:100110ms step_avg:61.76ms +[2025-09-11 06:29:35] [Rank 0] step:1621/10000 train_time:100110ms step_avg:61.76ms +[2025-09-11 06:29:36] [Rank 0] step:1641/10000 train_time:100760ms step_avg:61.40ms +[2025-09-11 06:29:36] [Rank 0] step:1641/10000 train_time:100760ms step_avg:61.40ms +[2025-09-11 06:29:36] [Rank 0] step:1661/10000 train_time:101409ms step_avg:61.05ms +[2025-09-11 06:29:36] [Rank 0] step:1661/10000 train_time:101409ms step_avg:61.05ms +[2025-09-11 06:29:37] [Rank 0] step:1681/10000 train_time:102058ms step_avg:60.71ms +[2025-09-11 06:29:37] [Rank 0] step:1681/10000 train_time:102058ms step_avg:60.71ms +[2025-09-11 06:29:38] [Rank 0] step:1701/10000 train_time:102708ms step_avg:60.38ms +[2025-09-11 06:29:38] [Rank 0] step:1701/10000 train_time:102708ms step_avg:60.38ms +[2025-09-11 06:29:38] [Rank 0] step:1721/10000 train_time:103357ms step_avg:60.06ms +[2025-09-11 06:29:38] [Rank 0] step:1721/10000 train_time:103357ms step_avg:60.06ms +[2025-09-11 06:29:39] [Rank 0] step:1741/10000 train_time:104006ms step_avg:59.74ms +[2025-09-11 06:29:39] [Rank 0] step:1741/10000 train_time:104006ms step_avg:59.74ms +[2025-09-11 06:29:40] [Rank 0] step:1761/10000 train_time:104655ms step_avg:59.43ms +[2025-09-11 06:29:40] [Rank 0] step:1761/10000 train_time:104655ms step_avg:59.43ms +[2025-09-11 06:29:40] [Rank 0] step:1781/10000 train_time:105305ms step_avg:59.13ms +[2025-09-11 06:29:40] [Rank 0] step:1781/10000 train_time:105305ms step_avg:59.13ms +[2025-09-11 06:29:41] [Rank 0] step:1801/10000 train_time:105953ms step_avg:58.83ms +[2025-09-11 06:29:41] [Rank 0] step:1801/10000 train_time:105953ms step_avg:58.83ms +[2025-09-11 06:29:42] [Rank 0] step:1821/10000 train_time:106602ms step_avg:58.54ms +[2025-09-11 06:29:42] [Rank 0] step:1821/10000 train_time:106602ms step_avg:58.54ms +[2025-09-11 06:29:42] [Rank 0] step:1841/10000 train_time:107251ms step_avg:58.26ms +[2025-09-11 06:29:42] [Rank 0] step:1841/10000 train_time:107251ms step_avg:58.26ms +[2025-09-11 06:29:43] [Rank 0] step:1861/10000 train_time:107900ms step_avg:57.98ms +[2025-09-11 06:29:43] [Rank 0] step:1861/10000 train_time:107900ms step_avg:57.98ms +[2025-09-11 06:29:44] [Rank 0] step:1881/10000 train_time:108549ms step_avg:57.71ms +[2025-09-11 06:29:44] [Rank 0] step:1881/10000 train_time:108549ms step_avg:57.71ms +[2025-09-11 06:29:44] [Rank 0] step:1901/10000 train_time:109202ms step_avg:57.44ms +[2025-09-11 06:29:44] [Rank 0] step:1901/10000 train_time:109202ms step_avg:57.44ms +[2025-09-11 06:29:45] [Rank 0] step:1921/10000 train_time:109850ms step_avg:57.18ms +[2025-09-11 06:29:45] [Rank 0] step:1921/10000 train_time:109850ms step_avg:57.18ms +[2025-09-11 06:29:46] [Rank 0] step:1941/10000 train_time:110499ms step_avg:56.93ms +[2025-09-11 06:29:46] [Rank 0] step:1941/10000 train_time:110499ms step_avg:56.93ms +[2025-09-11 06:29:46] [Rank 0] step:1961/10000 train_time:111148ms step_avg:56.68ms +[2025-09-11 06:29:46] [Rank 0] step:1961/10000 train_time:111148ms step_avg:56.68ms +[2025-09-11 06:29:47] [Rank 0] step:1981/10000 train_time:111798ms step_avg:56.43ms +[2025-09-11 06:29:47] [Rank 0] step:1981/10000 train_time:111798ms step_avg:56.43ms +[2025-09-11 06:29:47] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:29:47] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:29:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:29:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:29:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:29:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:29:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:29:57] [Rank 0] PRINT: step:2000/10000 val_loss:4.9210 total_sharp:7.4951e-05 L1_sharp:5.1817e-04 L2_sharp:5.0785e-05 L3_sharp:-4.6677e-07 L4_sharp:2.6577e-05 L5_sharp:2.3387e-05 L6_sharp:2.4616e-05 L7_sharp:1.1482e-04 L8_sharp:9.5929e-05 L9_sharp:8.1932e-05 L10_sharp:9.7311e-05 L11_sharp:1.5489e-04 L12_sharp:1.0431e-03 total_fnorm:1.9400e+02 total_l1_linf:5.6115e+05 total_spectral:9.9500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2375e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3281e+00 L2_l1linf:3.2344e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.1875e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.2188e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0469e+00 L11_l1linf:2.9844e+00 L12_l1linf:2.9531e+00 L1_spectral:1.4528e-01 L2_spectral:1.4318e-01 L3_spectral:1.4470e-01 L4_spectral:1.4463e-01 L5_spectral:1.4669e-01 L6_spectral:1.4500e-01 L7_spectral:1.4624e-01 L8_spectral:1.5028e-01 L9_spectral:1.4685e-01 L10_spectral:1.4810e-01 L11_spectral:1.4761e-01 L12_spectral:1.4849e-01 train_time:112428ms step_avg:56.21ms +[2025-09-11 06:29:57] [Rank 0] PRINT: step:2000/10000 val_loss:4.9210 total_sharp:7.4951e-05 L1_sharp:5.1817e-04 L2_sharp:5.0785e-05 L3_sharp:-4.6677e-07 L4_sharp:2.6577e-05 L5_sharp:2.3387e-05 L6_sharp:2.4616e-05 L7_sharp:1.1482e-04 L8_sharp:9.5929e-05 L9_sharp:8.1932e-05 L10_sharp:9.7311e-05 L11_sharp:1.5489e-04 L12_sharp:1.0431e-03 total_fnorm:1.9400e+02 total_l1_linf:5.6115e+05 total_spectral:9.9500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2188e+01 L3_fnorm:1.2188e+01 L4_fnorm:1.2312e+01 L5_fnorm:1.2375e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2438e+01 L1_l1linf:3.3281e+00 L2_l1linf:3.2344e+00 L3_l1linf:3.1094e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.1875e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.2188e+00 L8_l1linf:3.1875e+00 L9_l1linf:3.0781e+00 L10_l1linf:3.0469e+00 L11_l1linf:2.9844e+00 L12_l1linf:2.9531e+00 L1_spectral:1.4528e-01 L2_spectral:1.4318e-01 L3_spectral:1.4470e-01 L4_spectral:1.4463e-01 L5_spectral:1.4669e-01 L6_spectral:1.4500e-01 L7_spectral:1.4624e-01 L8_spectral:1.5028e-01 L9_spectral:1.4685e-01 L10_spectral:1.4810e-01 L11_spectral:1.4761e-01 L12_spectral:1.4849e-01 train_time:112428ms step_avg:56.21ms +[2025-09-11 06:29:58] [Rank 0] step:2001/10000 train_time:113708ms step_avg:56.83ms +[2025-09-11 06:29:58] [Rank 0] step:2001/10000 train_time:113708ms step_avg:56.83ms +[2025-09-11 06:29:59] [Rank 0] step:2021/10000 train_time:114361ms step_avg:56.59ms +[2025-09-11 06:29:59] [Rank 0] step:2021/10000 train_time:114361ms step_avg:56.59ms +[2025-09-11 06:30:00] [Rank 0] step:2041/10000 train_time:115011ms step_avg:56.35ms +[2025-09-11 06:30:00] [Rank 0] step:2041/10000 train_time:115011ms step_avg:56.35ms +[2025-09-11 06:30:00] [Rank 0] step:2061/10000 train_time:115660ms step_avg:56.12ms +[2025-09-11 06:30:00] [Rank 0] step:2061/10000 train_time:115660ms step_avg:56.12ms +[2025-09-11 06:30:01] [Rank 0] step:2081/10000 train_time:116368ms step_avg:55.92ms +[2025-09-11 06:30:01] [Rank 0] step:2081/10000 train_time:116368ms step_avg:55.92ms +[2025-09-11 06:30:02] [Rank 0] step:2101/10000 train_time:117086ms step_avg:55.73ms +[2025-09-11 06:30:02] [Rank 0] step:2101/10000 train_time:117086ms step_avg:55.73ms +[2025-09-11 06:30:03] [Rank 0] step:2121/10000 train_time:117787ms step_avg:55.53ms +[2025-09-11 06:30:03] [Rank 0] step:2121/10000 train_time:117787ms step_avg:55.53ms +[2025-09-11 06:30:03] [Rank 0] step:2141/10000 train_time:118436ms step_avg:55.32ms +[2025-09-11 06:30:03] [Rank 0] step:2141/10000 train_time:118436ms step_avg:55.32ms +[2025-09-11 06:30:04] [Rank 0] step:2161/10000 train_time:119264ms step_avg:55.19ms +[2025-09-11 06:30:04] [Rank 0] step:2161/10000 train_time:119264ms step_avg:55.19ms +[2025-09-11 06:30:05] [Rank 0] step:2181/10000 train_time:120303ms step_avg:55.16ms +[2025-09-11 06:30:05] [Rank 0] step:2181/10000 train_time:120303ms step_avg:55.16ms +[2025-09-11 06:30:06] [Rank 0] step:2201/10000 train_time:120952ms step_avg:54.95ms +[2025-09-11 06:30:06] [Rank 0] step:2201/10000 train_time:120952ms step_avg:54.95ms +[2025-09-11 06:30:06] [Rank 0] step:2221/10000 train_time:121754ms step_avg:54.82ms +[2025-09-11 06:30:06] [Rank 0] step:2221/10000 train_time:121754ms step_avg:54.82ms +[2025-09-11 06:30:07] [Rank 0] step:2241/10000 train_time:122561ms step_avg:54.69ms +[2025-09-11 06:30:07] [Rank 0] step:2241/10000 train_time:122561ms step_avg:54.69ms +[2025-09-11 06:30:08] [Rank 0] step:2261/10000 train_time:123222ms step_avg:54.50ms +[2025-09-11 06:30:08] [Rank 0] step:2261/10000 train_time:123222ms step_avg:54.50ms +[2025-09-11 06:30:09] [Rank 0] step:2281/10000 train_time:123883ms step_avg:54.31ms +[2025-09-11 06:30:09] [Rank 0] step:2281/10000 train_time:123883ms step_avg:54.31ms +[2025-09-11 06:30:09] [Rank 0] step:2301/10000 train_time:124544ms step_avg:54.13ms +[2025-09-11 06:30:09] [Rank 0] step:2301/10000 train_time:124544ms step_avg:54.13ms +[2025-09-11 06:30:10] [Rank 0] step:2321/10000 train_time:125205ms step_avg:53.94ms +[2025-09-11 06:30:10] [Rank 0] step:2321/10000 train_time:125205ms step_avg:53.94ms +[2025-09-11 06:30:11] [Rank 0] step:2341/10000 train_time:125866ms step_avg:53.77ms +[2025-09-11 06:30:11] [Rank 0] step:2341/10000 train_time:125866ms step_avg:53.77ms +[2025-09-11 06:30:11] [Rank 0] step:2361/10000 train_time:126527ms step_avg:53.59ms +[2025-09-11 06:30:11] [Rank 0] step:2361/10000 train_time:126527ms step_avg:53.59ms +[2025-09-11 06:30:12] [Rank 0] step:2381/10000 train_time:127187ms step_avg:53.42ms +[2025-09-11 06:30:12] [Rank 0] step:2381/10000 train_time:127187ms step_avg:53.42ms +[2025-09-11 06:30:13] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:30:13] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:30:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:30:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:30:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:30:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:30:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:30:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:30:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:30:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:30:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.7895 total_sharp:7.4357e-05 L1_sharp:5.3211e-04 L2_sharp:4.7347e-05 L3_sharp:-7.6290e-06 L4_sharp:2.6681e-05 L5_sharp:4.4542e-06 L6_sharp:9.9297e-05 L7_sharp:1.8020e-05 L8_sharp:9.8171e-05 L9_sharp:7.1215e-05 L10_sharp:8.5474e-05 L11_sharp:1.6201e-04 L12_sharp:6.8062e-04 total_fnorm:1.8000e+02 total_l1_linf:5.0995e+05 total_spectral:9.2500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.2344e+00 L3_l1linf:2.7656e+00 L4_l1linf:3.0312e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0469e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.9531e+00 L12_l1linf:2.8906e+00 L1_spectral:1.4796e-01 L2_spectral:1.4584e-01 L3_spectral:1.4722e-01 L4_spectral:1.4723e-01 L5_spectral:1.4954e-01 L6_spectral:1.4763e-01 L7_spectral:1.4905e-01 L8_spectral:1.5202e-01 L9_spectral:1.4947e-01 L10_spectral:1.5084e-01 L11_spectral:1.5079e-01 L12_spectral:1.5148e-01 train_time:127830ms step_avg:53.26ms +[2025-09-11 06:30:23] [Rank 0] PRINT: step:2400/10000 val_loss:4.7895 total_sharp:7.4357e-05 L1_sharp:5.3211e-04 L2_sharp:4.7347e-05 L3_sharp:-7.6290e-06 L4_sharp:2.6681e-05 L5_sharp:4.4542e-06 L6_sharp:9.9297e-05 L7_sharp:1.8020e-05 L8_sharp:9.8171e-05 L9_sharp:7.1215e-05 L10_sharp:8.5474e-05 L11_sharp:1.6201e-04 L12_sharp:6.8062e-04 total_fnorm:1.8000e+02 total_l1_linf:5.0995e+05 total_spectral:9.2500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.2062e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2438e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.2812e+00 L2_l1linf:3.2344e+00 L3_l1linf:2.7656e+00 L4_l1linf:3.0312e+00 L5_l1linf:3.1250e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1562e+00 L8_l1linf:3.1094e+00 L9_l1linf:3.0469e+00 L10_l1linf:3.0156e+00 L11_l1linf:2.9531e+00 L12_l1linf:2.8906e+00 L1_spectral:1.4796e-01 L2_spectral:1.4584e-01 L3_spectral:1.4722e-01 L4_spectral:1.4723e-01 L5_spectral:1.4954e-01 L6_spectral:1.4763e-01 L7_spectral:1.4905e-01 L8_spectral:1.5202e-01 L9_spectral:1.4947e-01 L10_spectral:1.5084e-01 L11_spectral:1.5079e-01 L12_spectral:1.5148e-01 train_time:127830ms step_avg:53.26ms +[2025-09-11 06:30:24] [Rank 0] step:2401/10000 train_time:129153ms step_avg:53.79ms +[2025-09-11 06:30:24] [Rank 0] step:2401/10000 train_time:129153ms step_avg:53.79ms +[2025-09-11 06:30:25] [Rank 0] step:2421/10000 train_time:129828ms step_avg:53.63ms +[2025-09-11 06:30:25] [Rank 0] step:2421/10000 train_time:129828ms step_avg:53.63ms +[2025-09-11 06:30:25] [Rank 0] step:2441/10000 train_time:130492ms step_avg:53.46ms +[2025-09-11 06:30:25] [Rank 0] step:2441/10000 train_time:130492ms step_avg:53.46ms +[2025-09-11 06:30:26] [Rank 0] step:2461/10000 train_time:131154ms step_avg:53.29ms +[2025-09-11 06:30:26] [Rank 0] step:2461/10000 train_time:131154ms step_avg:53.29ms +[2025-09-11 06:30:27] [Rank 0] step:2481/10000 train_time:131818ms step_avg:53.13ms +[2025-09-11 06:30:27] [Rank 0] step:2481/10000 train_time:131818ms step_avg:53.13ms +[2025-09-11 06:30:27] [Rank 0] step:2501/10000 train_time:132480ms step_avg:52.97ms +[2025-09-11 06:30:27] [Rank 0] step:2501/10000 train_time:132480ms step_avg:52.97ms +[2025-09-11 06:30:28] [Rank 0] step:2521/10000 train_time:133143ms step_avg:52.81ms +[2025-09-11 06:30:28] [Rank 0] step:2521/10000 train_time:133143ms step_avg:52.81ms +[2025-09-11 06:30:29] [Rank 0] step:2541/10000 train_time:133805ms step_avg:52.66ms +[2025-09-11 06:30:29] [Rank 0] step:2541/10000 train_time:133805ms step_avg:52.66ms +[2025-09-11 06:30:29] [Rank 0] step:2561/10000 train_time:134466ms step_avg:52.51ms +[2025-09-11 06:30:29] [Rank 0] step:2561/10000 train_time:134466ms step_avg:52.51ms +[2025-09-11 06:30:30] [Rank 0] step:2581/10000 train_time:135128ms step_avg:52.35ms +[2025-09-11 06:30:30] [Rank 0] step:2581/10000 train_time:135128ms step_avg:52.35ms +[2025-09-11 06:30:31] [Rank 0] step:2601/10000 train_time:135791ms step_avg:52.21ms +[2025-09-11 06:30:31] [Rank 0] step:2601/10000 train_time:135791ms step_avg:52.21ms +[2025-09-11 06:30:31] [Rank 0] step:2621/10000 train_time:136453ms step_avg:52.06ms +[2025-09-11 06:30:31] [Rank 0] step:2621/10000 train_time:136453ms step_avg:52.06ms +[2025-09-11 06:30:32] [Rank 0] step:2641/10000 train_time:137115ms step_avg:51.92ms +[2025-09-11 06:30:32] [Rank 0] step:2641/10000 train_time:137115ms step_avg:51.92ms +[2025-09-11 06:30:33] [Rank 0] step:2661/10000 train_time:137777ms step_avg:51.78ms +[2025-09-11 06:30:33] [Rank 0] step:2661/10000 train_time:137777ms step_avg:51.78ms +[2025-09-11 06:30:33] [Rank 0] step:2681/10000 train_time:138439ms step_avg:51.64ms +[2025-09-11 06:30:33] [Rank 0] step:2681/10000 train_time:138439ms step_avg:51.64ms +[2025-09-11 06:30:34] [Rank 0] step:2701/10000 train_time:139101ms step_avg:51.50ms +[2025-09-11 06:30:34] [Rank 0] step:2701/10000 train_time:139101ms step_avg:51.50ms +[2025-09-11 06:30:35] [Rank 0] step:2721/10000 train_time:139763ms step_avg:51.36ms +[2025-09-11 06:30:35] [Rank 0] step:2721/10000 train_time:139763ms step_avg:51.36ms +[2025-09-11 06:30:35] [Rank 0] step:2741/10000 train_time:140424ms step_avg:51.23ms +[2025-09-11 06:30:35] [Rank 0] step:2741/10000 train_time:140424ms step_avg:51.23ms +[2025-09-11 06:30:36] [Rank 0] step:2761/10000 train_time:141085ms step_avg:51.10ms +[2025-09-11 06:30:36] [Rank 0] step:2761/10000 train_time:141085ms step_avg:51.10ms +[2025-09-11 06:30:37] [Rank 0] step:2781/10000 train_time:141747ms step_avg:50.97ms +[2025-09-11 06:30:37] [Rank 0] step:2781/10000 train_time:141747ms step_avg:50.97ms +[2025-09-11 06:30:37] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:30:37] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:30:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:30:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:30:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:30:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:30:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:30:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:30:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:30:47] [Rank 0] PRINT: step:2800/10000 val_loss:4.7395 total_sharp:7.7972e-05 L1_sharp:3.6854e-04 L2_sharp:3.5647e-05 L3_sharp:1.6800e-05 L4_sharp:2.8107e-06 L5_sharp:2.0738e-05 L6_sharp:4.6183e-05 L7_sharp:5.1932e-05 L8_sharp:8.5890e-05 L9_sharp:8.9286e-05 L10_sharp:1.0973e-04 L11_sharp:1.8262e-04 L12_sharp:9.0866e-04 total_fnorm:1.7700e+02 total_l1_linf:5.0176e+05 total_spectral:9.1000e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.1938e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2500e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.1406e+00 L3_l1linf:2.5000e+00 L4_l1linf:2.7344e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0312e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.8594e+00 L1_spectral:1.4965e-01 L2_spectral:1.4765e-01 L3_spectral:1.4948e-01 L4_spectral:1.4862e-01 L5_spectral:1.5086e-01 L6_spectral:1.5097e-01 L7_spectral:1.5109e-01 L8_spectral:1.5338e-01 L9_spectral:1.5119e-01 L10_spectral:1.5177e-01 L11_spectral:1.5299e-01 L12_spectral:1.5272e-01 train_time:142390ms step_avg:50.85ms +[2025-09-11 06:30:47] [Rank 0] PRINT: step:2800/10000 val_loss:4.7395 total_sharp:7.7972e-05 L1_sharp:3.6854e-04 L2_sharp:3.5647e-05 L3_sharp:1.6800e-05 L4_sharp:2.8107e-06 L5_sharp:2.0738e-05 L6_sharp:4.6183e-05 L7_sharp:5.1932e-05 L8_sharp:8.5890e-05 L9_sharp:8.9286e-05 L10_sharp:1.0973e-04 L11_sharp:1.8262e-04 L12_sharp:9.0866e-04 total_fnorm:1.7700e+02 total_l1_linf:5.0176e+05 total_spectral:9.1000e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2250e+01 L3_fnorm:1.1938e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2500e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2375e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.2344e+00 L2_l1linf:3.1406e+00 L3_l1linf:2.5000e+00 L4_l1linf:2.7344e+00 L5_l1linf:3.0938e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1250e+00 L9_l1linf:3.0312e+00 L10_l1linf:2.9531e+00 L11_l1linf:2.8906e+00 L12_l1linf:2.8594e+00 L1_spectral:1.4965e-01 L2_spectral:1.4765e-01 L3_spectral:1.4948e-01 L4_spectral:1.4862e-01 L5_spectral:1.5086e-01 L6_spectral:1.5097e-01 L7_spectral:1.5109e-01 L8_spectral:1.5338e-01 L9_spectral:1.5119e-01 L10_spectral:1.5177e-01 L11_spectral:1.5299e-01 L12_spectral:1.5272e-01 train_time:142390ms step_avg:50.85ms +[2025-09-11 06:30:49] [Rank 0] step:2801/10000 train_time:143625ms step_avg:51.28ms +[2025-09-11 06:30:49] [Rank 0] step:2801/10000 train_time:143625ms step_avg:51.28ms +[2025-09-11 06:30:49] [Rank 0] step:2821/10000 train_time:144305ms step_avg:51.15ms +[2025-09-11 06:30:49] [Rank 0] step:2821/10000 train_time:144305ms step_avg:51.15ms +[2025-09-11 06:30:50] [Rank 0] step:2841/10000 train_time:144968ms step_avg:51.03ms +[2025-09-11 06:30:50] [Rank 0] step:2841/10000 train_time:144968ms step_avg:51.03ms +[2025-09-11 06:30:51] [Rank 0] step:2861/10000 train_time:145631ms step_avg:50.90ms +[2025-09-11 06:30:51] [Rank 0] step:2861/10000 train_time:145631ms step_avg:50.90ms +[2025-09-11 06:30:51] [Rank 0] step:2881/10000 train_time:146292ms step_avg:50.78ms +[2025-09-11 06:30:51] [Rank 0] step:2881/10000 train_time:146292ms step_avg:50.78ms +[2025-09-11 06:30:52] [Rank 0] step:2901/10000 train_time:146954ms step_avg:50.66ms +[2025-09-11 06:30:52] [Rank 0] step:2901/10000 train_time:146954ms step_avg:50.66ms +[2025-09-11 06:30:53] [Rank 0] step:2921/10000 train_time:147615ms step_avg:50.54ms +[2025-09-11 06:30:53] [Rank 0] step:2921/10000 train_time:147615ms step_avg:50.54ms +[2025-09-11 06:30:53] [Rank 0] step:2941/10000 train_time:148276ms step_avg:50.42ms +[2025-09-11 06:30:53] [Rank 0] step:2941/10000 train_time:148276ms step_avg:50.42ms +[2025-09-11 06:30:54] [Rank 0] step:2961/10000 train_time:148938ms step_avg:50.30ms +[2025-09-11 06:30:54] [Rank 0] step:2961/10000 train_time:148938ms step_avg:50.30ms +[2025-09-11 06:30:55] [Rank 0] step:2981/10000 train_time:149602ms step_avg:50.19ms +[2025-09-11 06:30:55] [Rank 0] step:2981/10000 train_time:149602ms step_avg:50.19ms +[2025-09-11 06:30:55] [Rank 0] step:3001/10000 train_time:150266ms step_avg:50.07ms +[2025-09-11 06:30:55] [Rank 0] step:3001/10000 train_time:150266ms step_avg:50.07ms +[2025-09-11 06:30:56] [Rank 0] step:3021/10000 train_time:150931ms step_avg:49.96ms +[2025-09-11 06:30:56] [Rank 0] step:3021/10000 train_time:150931ms step_avg:49.96ms +[2025-09-11 06:30:57] [Rank 0] step:3041/10000 train_time:151596ms step_avg:49.85ms +[2025-09-11 06:30:57] [Rank 0] step:3041/10000 train_time:151596ms step_avg:49.85ms +[2025-09-11 06:30:57] [Rank 0] step:3061/10000 train_time:152261ms step_avg:49.74ms +[2025-09-11 06:30:57] [Rank 0] step:3061/10000 train_time:152261ms step_avg:49.74ms +[2025-09-11 06:30:58] [Rank 0] step:3081/10000 train_time:152926ms step_avg:49.64ms +[2025-09-11 06:30:58] [Rank 0] step:3081/10000 train_time:152926ms step_avg:49.64ms +[2025-09-11 06:30:59] [Rank 0] step:3101/10000 train_time:153591ms step_avg:49.53ms +[2025-09-11 06:30:59] [Rank 0] step:3101/10000 train_time:153591ms step_avg:49.53ms +[2025-09-11 06:30:59] [Rank 0] step:3121/10000 train_time:154256ms step_avg:49.43ms +[2025-09-11 06:30:59] [Rank 0] step:3121/10000 train_time:154256ms step_avg:49.43ms +[2025-09-11 06:31:00] [Rank 0] step:3141/10000 train_time:154920ms step_avg:49.32ms +[2025-09-11 06:31:00] [Rank 0] step:3141/10000 train_time:154920ms step_avg:49.32ms +[2025-09-11 06:31:01] [Rank 0] step:3161/10000 train_time:155585ms step_avg:49.22ms +[2025-09-11 06:31:01] [Rank 0] step:3161/10000 train_time:155585ms step_avg:49.22ms +[2025-09-11 06:31:01] [Rank 0] step:3181/10000 train_time:156251ms step_avg:49.12ms +[2025-09-11 06:31:01] [Rank 0] step:3181/10000 train_time:156251ms step_avg:49.12ms +[2025-09-11 06:31:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:31:02] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:31:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:31:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:31:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:31:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:31:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:31:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:31:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:31:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:31:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.6468 total_sharp:5.0723e-05 L1_sharp:2.8570e-04 L2_sharp:3.1263e-05 L3_sharp:4.1957e-05 L4_sharp:5.7929e-06 L5_sharp:9.5843e-06 L6_sharp:4.8772e-05 L7_sharp:4.8938e-05 L8_sharp:5.6271e-05 L9_sharp:6.4919e-05 L10_sharp:1.0260e-04 L11_sharp:1.1833e-04 L12_sharp:5.6680e-04 total_fnorm:1.9600e+02 total_l1_linf:5.5706e+05 total_spectral:1.0050e+02 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.1812e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2562e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2625e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2625e+01 L12_fnorm:1.2562e+01 L1_l1linf:3.1406e+00 L2_l1linf:3.0312e+00 L3_l1linf:2.2500e+00 L4_l1linf:2.5469e+00 L5_l1linf:3.0000e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1719e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.9375e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.8281e+00 L1_spectral:1.5062e-01 L2_spectral:1.4983e-01 L3_spectral:1.5111e-01 L4_spectral:1.5063e-01 L5_spectral:1.5167e-01 L6_spectral:1.5166e-01 L7_spectral:1.5331e-01 L8_spectral:1.5364e-01 L9_spectral:1.5498e-01 L10_spectral:1.5417e-01 L11_spectral:1.5496e-01 L12_spectral:1.5456e-01 train_time:156898ms step_avg:49.03ms +[2025-09-11 06:31:12] [Rank 0] PRINT: step:3200/10000 val_loss:4.6468 total_sharp:5.0723e-05 L1_sharp:2.8570e-04 L2_sharp:3.1263e-05 L3_sharp:4.1957e-05 L4_sharp:5.7929e-06 L5_sharp:9.5843e-06 L6_sharp:4.8772e-05 L7_sharp:4.8938e-05 L8_sharp:5.6271e-05 L9_sharp:6.4919e-05 L10_sharp:1.0260e-04 L11_sharp:1.1833e-04 L12_sharp:5.6680e-04 total_fnorm:1.9600e+02 total_l1_linf:5.5706e+05 total_spectral:1.0050e+02 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.1812e+01 L4_fnorm:1.2250e+01 L5_fnorm:1.2562e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2250e+01 L9_fnorm:1.2625e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2625e+01 L12_fnorm:1.2562e+01 L1_l1linf:3.1406e+00 L2_l1linf:3.0312e+00 L3_l1linf:2.2500e+00 L4_l1linf:2.5469e+00 L5_l1linf:3.0000e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.1719e+00 L9_l1linf:3.0625e+00 L10_l1linf:2.9375e+00 L11_l1linf:2.8438e+00 L12_l1linf:2.8281e+00 L1_spectral:1.5062e-01 L2_spectral:1.4983e-01 L3_spectral:1.5111e-01 L4_spectral:1.5063e-01 L5_spectral:1.5167e-01 L6_spectral:1.5166e-01 L7_spectral:1.5331e-01 L8_spectral:1.5364e-01 L9_spectral:1.5498e-01 L10_spectral:1.5417e-01 L11_spectral:1.5496e-01 L12_spectral:1.5456e-01 train_time:156898ms step_avg:49.03ms +[2025-09-11 06:31:13] [Rank 0] step:3201/10000 train_time:158153ms step_avg:49.41ms +[2025-09-11 06:31:13] [Rank 0] step:3201/10000 train_time:158153ms step_avg:49.41ms +[2025-09-11 06:31:14] [Rank 0] step:3221/10000 train_time:158824ms step_avg:49.31ms +[2025-09-11 06:31:14] [Rank 0] step:3221/10000 train_time:158824ms step_avg:49.31ms +[2025-09-11 06:31:15] [Rank 0] step:3241/10000 train_time:159492ms step_avg:49.21ms +[2025-09-11 06:31:15] [Rank 0] step:3241/10000 train_time:159492ms step_avg:49.21ms +[2025-09-11 06:31:15] [Rank 0] step:3261/10000 train_time:160159ms step_avg:49.11ms +[2025-09-11 06:31:15] [Rank 0] step:3261/10000 train_time:160159ms step_avg:49.11ms +[2025-09-11 06:31:16] [Rank 0] step:3281/10000 train_time:160826ms step_avg:49.02ms +[2025-09-11 06:31:16] [Rank 0] step:3281/10000 train_time:160826ms step_avg:49.02ms +[2025-09-11 06:31:17] [Rank 0] step:3301/10000 train_time:161493ms step_avg:48.92ms +[2025-09-11 06:31:17] [Rank 0] step:3301/10000 train_time:161493ms step_avg:48.92ms +[2025-09-11 06:31:17] [Rank 0] step:3321/10000 train_time:162159ms step_avg:48.83ms +[2025-09-11 06:31:17] [Rank 0] step:3321/10000 train_time:162159ms step_avg:48.83ms +[2025-09-11 06:31:18] [Rank 0] step:3341/10000 train_time:162826ms step_avg:48.74ms +[2025-09-11 06:31:18] [Rank 0] step:3341/10000 train_time:162826ms step_avg:48.74ms +[2025-09-11 06:31:19] [Rank 0] step:3361/10000 train_time:163492ms step_avg:48.64ms +[2025-09-11 06:31:19] [Rank 0] step:3361/10000 train_time:163492ms step_avg:48.64ms +[2025-09-11 06:31:19] [Rank 0] step:3381/10000 train_time:164158ms step_avg:48.55ms +[2025-09-11 06:31:19] [Rank 0] step:3381/10000 train_time:164158ms step_avg:48.55ms +[2025-09-11 06:31:20] [Rank 0] step:3401/10000 train_time:164824ms step_avg:48.46ms +[2025-09-11 06:31:20] [Rank 0] step:3401/10000 train_time:164824ms step_avg:48.46ms +[2025-09-11 06:31:21] [Rank 0] step:3421/10000 train_time:165488ms step_avg:48.37ms +[2025-09-11 06:31:21] [Rank 0] step:3421/10000 train_time:165488ms step_avg:48.37ms +[2025-09-11 06:31:21] [Rank 0] step:3441/10000 train_time:166153ms step_avg:48.29ms +[2025-09-11 06:31:21] [Rank 0] step:3441/10000 train_time:166153ms step_avg:48.29ms +[2025-09-11 06:31:22] [Rank 0] step:3461/10000 train_time:166819ms step_avg:48.20ms +[2025-09-11 06:31:22] [Rank 0] step:3461/10000 train_time:166819ms step_avg:48.20ms +[2025-09-11 06:31:23] [Rank 0] step:3481/10000 train_time:167485ms step_avg:48.11ms +[2025-09-11 06:31:23] [Rank 0] step:3481/10000 train_time:167485ms step_avg:48.11ms +[2025-09-11 06:31:23] [Rank 0] step:3501/10000 train_time:168151ms step_avg:48.03ms +[2025-09-11 06:31:23] [Rank 0] step:3501/10000 train_time:168151ms step_avg:48.03ms +[2025-09-11 06:31:24] [Rank 0] step:3521/10000 train_time:168816ms step_avg:47.95ms +[2025-09-11 06:31:24] [Rank 0] step:3521/10000 train_time:168816ms step_avg:47.95ms +[2025-09-11 06:31:25] [Rank 0] step:3541/10000 train_time:169482ms step_avg:47.86ms +[2025-09-11 06:31:25] [Rank 0] step:3541/10000 train_time:169482ms step_avg:47.86ms +[2025-09-11 06:31:25] [Rank 0] step:3561/10000 train_time:170147ms step_avg:47.78ms +[2025-09-11 06:31:25] [Rank 0] step:3561/10000 train_time:170147ms step_avg:47.78ms +[2025-09-11 06:31:26] [Rank 0] step:3581/10000 train_time:170813ms step_avg:47.70ms +[2025-09-11 06:31:26] [Rank 0] step:3581/10000 train_time:170813ms step_avg:47.70ms +[2025-09-11 06:31:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:31:27] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:31:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:31:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:31:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:31:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:31:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:31:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:31:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:31:37] [Rank 0] PRINT: step:3600/10000 val_loss:4.6075 total_sharp:7.5484e-05 L1_sharp:3.2479e-04 L2_sharp:3.8234e-05 L3_sharp:3.5048e-05 L4_sharp:1.7313e-05 L5_sharp:2.9962e-05 L6_sharp:5.2494e-05 L7_sharp:1.5125e-05 L8_sharp:6.3827e-05 L9_sharp:6.7508e-05 L10_sharp:8.2757e-05 L11_sharp:1.2152e-04 L12_sharp:2.7824e-03 total_fnorm:1.7800e+02 total_l1_linf:4.9766e+05 total_spectral:9.1500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2562e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2625e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2625e+01 L12_fnorm:1.2562e+01 L1_l1linf:3.1250e+00 L2_l1linf:2.9688e+00 L3_l1linf:2.1250e+00 L4_l1linf:2.4375e+00 L5_l1linf:2.8750e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.0781e+00 L10_l1linf:2.9688e+00 L11_l1linf:2.7969e+00 L12_l1linf:2.8281e+00 L1_spectral:1.5176e-01 L2_spectral:1.5040e-01 L3_spectral:1.5243e-01 L4_spectral:1.5263e-01 L5_spectral:1.5304e-01 L6_spectral:1.5338e-01 L7_spectral:1.5447e-01 L8_spectral:1.5440e-01 L9_spectral:1.5518e-01 L10_spectral:1.5404e-01 L11_spectral:1.5576e-01 L12_spectral:1.5383e-01 train_time:171460ms step_avg:47.63ms +[2025-09-11 06:31:37] [Rank 0] PRINT: step:3600/10000 val_loss:4.6075 total_sharp:7.5484e-05 L1_sharp:3.2479e-04 L2_sharp:3.8234e-05 L3_sharp:3.5048e-05 L4_sharp:1.7313e-05 L5_sharp:2.9962e-05 L6_sharp:5.2494e-05 L7_sharp:1.5125e-05 L8_sharp:6.3827e-05 L9_sharp:6.7508e-05 L10_sharp:8.2757e-05 L11_sharp:1.2152e-04 L12_sharp:2.7824e-03 total_fnorm:1.7800e+02 total_l1_linf:4.9766e+05 total_spectral:9.1500e+01 L1_fnorm:1.2062e+01 L2_fnorm:1.2312e+01 L3_fnorm:1.1750e+01 L4_fnorm:1.2188e+01 L5_fnorm:1.2562e+01 L6_fnorm:1.2562e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2188e+01 L9_fnorm:1.2625e+01 L10_fnorm:1.2438e+01 L11_fnorm:1.2625e+01 L12_fnorm:1.2562e+01 L1_l1linf:3.1250e+00 L2_l1linf:2.9688e+00 L3_l1linf:2.1250e+00 L4_l1linf:2.4375e+00 L5_l1linf:2.8750e+00 L6_l1linf:3.0781e+00 L7_l1linf:3.1875e+00 L8_l1linf:3.2031e+00 L9_l1linf:3.0781e+00 L10_l1linf:2.9688e+00 L11_l1linf:2.7969e+00 L12_l1linf:2.8281e+00 L1_spectral:1.5176e-01 L2_spectral:1.5040e-01 L3_spectral:1.5243e-01 L4_spectral:1.5263e-01 L5_spectral:1.5304e-01 L6_spectral:1.5338e-01 L7_spectral:1.5447e-01 L8_spectral:1.5440e-01 L9_spectral:1.5518e-01 L10_spectral:1.5404e-01 L11_spectral:1.5576e-01 L12_spectral:1.5383e-01 train_time:171460ms step_avg:47.63ms +[2025-09-11 06:31:38] [Rank 0] step:3601/10000 train_time:172814ms step_avg:47.99ms +[2025-09-11 06:31:38] [Rank 0] step:3601/10000 train_time:172814ms step_avg:47.99ms +[2025-09-11 06:31:39] [Rank 0] step:3621/10000 train_time:173546ms step_avg:47.93ms +[2025-09-11 06:31:39] [Rank 0] step:3621/10000 train_time:173546ms step_avg:47.93ms +[2025-09-11 06:31:40] [Rank 0] step:3641/10000 train_time:174212ms step_avg:47.85ms +[2025-09-11 06:31:40] [Rank 0] step:3641/10000 train_time:174212ms step_avg:47.85ms +[2025-09-11 06:31:40] [Rank 0] step:3661/10000 train_time:174878ms step_avg:47.77ms +[2025-09-11 06:31:40] [Rank 0] step:3661/10000 train_time:174878ms step_avg:47.77ms +[2025-09-11 06:31:41] [Rank 0] step:3681/10000 train_time:175544ms step_avg:47.69ms +[2025-09-11 06:31:41] [Rank 0] step:3681/10000 train_time:175544ms step_avg:47.69ms +[2025-09-11 06:31:42] [Rank 0] step:3701/10000 train_time:176209ms step_avg:47.61ms +[2025-09-11 06:31:42] [Rank 0] step:3701/10000 train_time:176209ms step_avg:47.61ms +[2025-09-11 06:31:42] [Rank 0] step:3721/10000 train_time:176883ms step_avg:47.54ms +[2025-09-11 06:31:42] [Rank 0] step:3721/10000 train_time:176883ms step_avg:47.54ms +[2025-09-11 06:31:43] [Rank 0] step:3741/10000 train_time:177560ms step_avg:47.46ms +[2025-09-11 06:31:43] [Rank 0] step:3741/10000 train_time:177560ms step_avg:47.46ms +[2025-09-11 06:31:44] [Rank 0] step:3761/10000 train_time:178238ms step_avg:47.39ms +[2025-09-11 06:31:44] [Rank 0] step:3761/10000 train_time:178238ms step_avg:47.39ms +[2025-09-11 06:31:44] [Rank 0] step:3781/10000 train_time:178914ms step_avg:47.32ms +[2025-09-11 06:31:44] [Rank 0] step:3781/10000 train_time:178914ms step_avg:47.32ms +[2025-09-11 06:31:45] [Rank 0] step:3801/10000 train_time:179593ms step_avg:47.25ms +[2025-09-11 06:31:45] [Rank 0] step:3801/10000 train_time:179593ms step_avg:47.25ms +[2025-09-11 06:31:46] [Rank 0] step:3821/10000 train_time:180270ms step_avg:47.18ms +[2025-09-11 06:31:46] [Rank 0] step:3821/10000 train_time:180270ms step_avg:47.18ms +[2025-09-11 06:31:46] [Rank 0] step:3841/10000 train_time:180946ms step_avg:47.11ms +[2025-09-11 06:31:46] [Rank 0] step:3841/10000 train_time:180946ms step_avg:47.11ms +[2025-09-11 06:31:47] [Rank 0] step:3861/10000 train_time:181622ms step_avg:47.04ms +[2025-09-11 06:31:47] [Rank 0] step:3861/10000 train_time:181622ms step_avg:47.04ms +[2025-09-11 06:31:48] [Rank 0] step:3881/10000 train_time:182298ms step_avg:46.97ms +[2025-09-11 06:31:48] [Rank 0] step:3881/10000 train_time:182298ms step_avg:46.97ms +[2025-09-11 06:31:48] [Rank 0] step:3901/10000 train_time:182974ms step_avg:46.90ms +[2025-09-11 06:31:48] [Rank 0] step:3901/10000 train_time:182974ms step_avg:46.90ms +[2025-09-11 06:31:49] [Rank 0] step:3921/10000 train_time:183650ms step_avg:46.84ms +[2025-09-11 06:31:49] [Rank 0] step:3921/10000 train_time:183650ms step_avg:46.84ms +[2025-09-11 06:31:50] [Rank 0] step:3941/10000 train_time:184327ms step_avg:46.77ms +[2025-09-11 06:31:50] [Rank 0] step:3941/10000 train_time:184327ms step_avg:46.77ms +[2025-09-11 06:31:50] [Rank 0] step:3961/10000 train_time:185002ms step_avg:46.71ms +[2025-09-11 06:31:50] [Rank 0] step:3961/10000 train_time:185002ms step_avg:46.71ms +[2025-09-11 06:31:51] [Rank 0] step:3981/10000 train_time:185678ms step_avg:46.64ms +[2025-09-11 06:31:51] [Rank 0] step:3981/10000 train_time:185678ms step_avg:46.64ms +[2025-09-11 06:31:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:31:52] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:31:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:31:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:31:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:31:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:31:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:31:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:31:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:32:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:32:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.5600 total_sharp:7.0398e-05 L1_sharp:2.5532e-04 L2_sharp:2.9358e-05 L3_sharp:2.2600e-05 L4_sharp:7.7964e-06 L5_sharp:9.6849e-06 L6_sharp:3.5822e-05 L7_sharp:2.8258e-05 L8_sharp:8.6029e-05 L9_sharp:6.9711e-05 L10_sharp:1.1033e-04 L11_sharp:1.1883e-04 L12_sharp:2.2841e-03 total_fnorm:1.9600e+02 total_l1_linf:5.3658e+05 total_spectral:1.0050e+02 L1_fnorm:1.1938e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.1375e+01 L4_fnorm:1.2000e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.1094e+00 L2_l1linf:2.8438e+00 L3_l1linf:2.2031e+00 L4_l1linf:2.3906e+00 L5_l1linf:2.8281e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.8594e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.8594e+00 L1_spectral:1.5301e-01 L2_spectral:1.5059e-01 L3_spectral:1.5357e-01 L4_spectral:1.5289e-01 L5_spectral:1.5446e-01 L6_spectral:1.5419e-01 L7_spectral:1.5553e-01 L8_spectral:1.5290e-01 L9_spectral:1.5481e-01 L10_spectral:1.5461e-01 L11_spectral:1.5611e-01 L12_spectral:1.5358e-01 train_time:186335ms step_avg:46.58ms +[2025-09-11 06:32:02] [Rank 0] PRINT: step:4000/10000 val_loss:4.5600 total_sharp:7.0398e-05 L1_sharp:2.5532e-04 L2_sharp:2.9358e-05 L3_sharp:2.2600e-05 L4_sharp:7.7964e-06 L5_sharp:9.6849e-06 L6_sharp:3.5822e-05 L7_sharp:2.8258e-05 L8_sharp:8.6029e-05 L9_sharp:6.9711e-05 L10_sharp:1.1033e-04 L11_sharp:1.1883e-04 L12_sharp:2.2841e-03 total_fnorm:1.9600e+02 total_l1_linf:5.3658e+05 total_spectral:1.0050e+02 L1_fnorm:1.1938e+01 L2_fnorm:1.2125e+01 L3_fnorm:1.1375e+01 L4_fnorm:1.2000e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2562e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2562e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.1094e+00 L2_l1linf:2.8438e+00 L3_l1linf:2.2031e+00 L4_l1linf:2.3906e+00 L5_l1linf:2.8281e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.8594e+00 L11_l1linf:2.7656e+00 L12_l1linf:2.8594e+00 L1_spectral:1.5301e-01 L2_spectral:1.5059e-01 L3_spectral:1.5357e-01 L4_spectral:1.5289e-01 L5_spectral:1.5446e-01 L6_spectral:1.5419e-01 L7_spectral:1.5553e-01 L8_spectral:1.5290e-01 L9_spectral:1.5481e-01 L10_spectral:1.5461e-01 L11_spectral:1.5611e-01 L12_spectral:1.5358e-01 train_time:186335ms step_avg:46.58ms +[2025-09-11 06:32:03] [Rank 0] step:4001/10000 train_time:187670ms step_avg:46.91ms +[2025-09-11 06:32:03] [Rank 0] step:4001/10000 train_time:187670ms step_avg:46.91ms +[2025-09-11 06:32:04] [Rank 0] step:4021/10000 train_time:188384ms step_avg:46.85ms +[2025-09-11 06:32:04] [Rank 0] step:4021/10000 train_time:188384ms step_avg:46.85ms +[2025-09-11 06:32:04] [Rank 0] step:4041/10000 train_time:189062ms step_avg:46.79ms +[2025-09-11 06:32:04] [Rank 0] step:4041/10000 train_time:189062ms step_avg:46.79ms +[2025-09-11 06:32:05] [Rank 0] step:4061/10000 train_time:189739ms step_avg:46.72ms +[2025-09-11 06:32:05] [Rank 0] step:4061/10000 train_time:189739ms step_avg:46.72ms +[2025-09-11 06:32:06] [Rank 0] step:4081/10000 train_time:190415ms step_avg:46.66ms +[2025-09-11 06:32:06] [Rank 0] step:4081/10000 train_time:190415ms step_avg:46.66ms +[2025-09-11 06:32:06] [Rank 0] step:4101/10000 train_time:191092ms step_avg:46.60ms +[2025-09-11 06:32:06] [Rank 0] step:4101/10000 train_time:191092ms step_avg:46.60ms +[2025-09-11 06:32:07] [Rank 0] step:4121/10000 train_time:191768ms step_avg:46.53ms +[2025-09-11 06:32:07] [Rank 0] step:4121/10000 train_time:191768ms step_avg:46.53ms +[2025-09-11 06:32:08] [Rank 0] step:4141/10000 train_time:192444ms step_avg:46.47ms +[2025-09-11 06:32:08] [Rank 0] step:4141/10000 train_time:192444ms step_avg:46.47ms +[2025-09-11 06:32:08] [Rank 0] step:4161/10000 train_time:193119ms step_avg:46.41ms +[2025-09-11 06:32:08] [Rank 0] step:4161/10000 train_time:193119ms step_avg:46.41ms +[2025-09-11 06:32:09] [Rank 0] step:4181/10000 train_time:193796ms step_avg:46.35ms +[2025-09-11 06:32:09] [Rank 0] step:4181/10000 train_time:193796ms step_avg:46.35ms +[2025-09-11 06:32:10] [Rank 0] step:4201/10000 train_time:194472ms step_avg:46.29ms +[2025-09-11 06:32:10] [Rank 0] step:4201/10000 train_time:194472ms step_avg:46.29ms +[2025-09-11 06:32:11] [Rank 0] step:4221/10000 train_time:195487ms step_avg:46.31ms +[2025-09-11 06:32:11] [Rank 0] step:4221/10000 train_time:195487ms step_avg:46.31ms +[2025-09-11 06:32:12] [Rank 0] step:4241/10000 train_time:196382ms step_avg:46.31ms +[2025-09-11 06:32:12] [Rank 0] step:4241/10000 train_time:196382ms step_avg:46.31ms +[2025-09-11 06:32:12] [Rank 0] step:4261/10000 train_time:197058ms step_avg:46.25ms +[2025-09-11 06:32:12] [Rank 0] step:4261/10000 train_time:197058ms step_avg:46.25ms +[2025-09-11 06:32:13] [Rank 0] step:4281/10000 train_time:198011ms step_avg:46.25ms +[2025-09-11 06:32:13] [Rank 0] step:4281/10000 train_time:198011ms step_avg:46.25ms +[2025-09-11 06:32:14] [Rank 0] step:4301/10000 train_time:198688ms step_avg:46.20ms +[2025-09-11 06:32:14] [Rank 0] step:4301/10000 train_time:198688ms step_avg:46.20ms +[2025-09-11 06:32:15] [Rank 0] step:4321/10000 train_time:199365ms step_avg:46.14ms +[2025-09-11 06:32:15] [Rank 0] step:4321/10000 train_time:199365ms step_avg:46.14ms +[2025-09-11 06:32:15] [Rank 0] step:4341/10000 train_time:200041ms step_avg:46.08ms +[2025-09-11 06:32:15] [Rank 0] step:4341/10000 train_time:200041ms step_avg:46.08ms +[2025-09-11 06:32:16] [Rank 0] step:4361/10000 train_time:200717ms step_avg:46.03ms +[2025-09-11 06:32:16] [Rank 0] step:4361/10000 train_time:200717ms step_avg:46.03ms +[2025-09-11 06:32:17] [Rank 0] step:4381/10000 train_time:201394ms step_avg:45.97ms +[2025-09-11 06:32:17] [Rank 0] step:4381/10000 train_time:201394ms step_avg:45.97ms +[2025-09-11 06:32:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:32:17] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:32:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:32:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:32:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:32:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:32:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:32:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:32:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.5502 total_sharp:5.4992e-05 L1_sharp:2.0489e-04 L2_sharp:4.1975e-05 L3_sharp:2.9615e-05 L4_sharp:5.9137e-06 L5_sharp:2.2690e-05 L6_sharp:3.0418e-05 L7_sharp:3.0654e-05 L8_sharp:7.1789e-05 L9_sharp:5.2741e-05 L10_sharp:8.2504e-05 L11_sharp:1.0752e-04 L12_sharp:6.5603e-04 total_fnorm:1.7700e+02 total_l1_linf:4.7718e+05 total_spectral:9.1000e+01 L1_fnorm:1.1875e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.1406e+00 L2_l1linf:2.5625e+00 L3_l1linf:2.2188e+00 L4_l1linf:2.1250e+00 L5_l1linf:2.7500e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9531e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5393e-01 L2_spectral:1.5133e-01 L3_spectral:1.5339e-01 L4_spectral:1.5501e-01 L5_spectral:1.5615e-01 L6_spectral:1.5514e-01 L7_spectral:1.5602e-01 L8_spectral:1.5478e-01 L9_spectral:1.5746e-01 L10_spectral:1.5523e-01 L11_spectral:1.5628e-01 L12_spectral:1.5484e-01 train_time:202051ms step_avg:45.92ms +[2025-09-11 06:32:27] [Rank 0] PRINT: step:4400/10000 val_loss:4.5502 total_sharp:5.4992e-05 L1_sharp:2.0489e-04 L2_sharp:4.1975e-05 L3_sharp:2.9615e-05 L4_sharp:5.9137e-06 L5_sharp:2.2690e-05 L6_sharp:3.0418e-05 L7_sharp:3.0654e-05 L8_sharp:7.1789e-05 L9_sharp:5.2741e-05 L10_sharp:8.2504e-05 L11_sharp:1.0752e-04 L12_sharp:6.5603e-04 total_fnorm:1.7700e+02 total_l1_linf:4.7718e+05 total_spectral:9.1000e+01 L1_fnorm:1.1875e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1250e+01 L4_fnorm:1.1938e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.1406e+00 L2_l1linf:2.5625e+00 L3_l1linf:2.2188e+00 L4_l1linf:2.1250e+00 L5_l1linf:2.7500e+00 L6_l1linf:3.0469e+00 L7_l1linf:3.1719e+00 L8_l1linf:3.0781e+00 L9_l1linf:2.9531e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7812e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5393e-01 L2_spectral:1.5133e-01 L3_spectral:1.5339e-01 L4_spectral:1.5501e-01 L5_spectral:1.5615e-01 L6_spectral:1.5514e-01 L7_spectral:1.5602e-01 L8_spectral:1.5478e-01 L9_spectral:1.5746e-01 L10_spectral:1.5523e-01 L11_spectral:1.5628e-01 L12_spectral:1.5484e-01 train_time:202051ms step_avg:45.92ms +[2025-09-11 06:32:29] [Rank 0] step:4401/10000 train_time:203371ms step_avg:46.21ms +[2025-09-11 06:32:29] [Rank 0] step:4401/10000 train_time:203371ms step_avg:46.21ms +[2025-09-11 06:32:29] [Rank 0] step:4421/10000 train_time:204064ms step_avg:46.16ms +[2025-09-11 06:32:29] [Rank 0] step:4421/10000 train_time:204064ms step_avg:46.16ms +[2025-09-11 06:32:30] [Rank 0] step:4441/10000 train_time:204742ms step_avg:46.10ms +[2025-09-11 06:32:30] [Rank 0] step:4441/10000 train_time:204742ms step_avg:46.10ms +[2025-09-11 06:32:31] [Rank 0] step:4461/10000 train_time:205421ms step_avg:46.05ms +[2025-09-11 06:32:31] [Rank 0] step:4461/10000 train_time:205421ms step_avg:46.05ms +[2025-09-11 06:32:31] [Rank 0] step:4481/10000 train_time:206101ms step_avg:45.99ms +[2025-09-11 06:32:31] [Rank 0] step:4481/10000 train_time:206101ms step_avg:45.99ms +[2025-09-11 06:32:32] [Rank 0] step:4501/10000 train_time:206782ms step_avg:45.94ms +[2025-09-11 06:32:32] [Rank 0] step:4501/10000 train_time:206782ms step_avg:45.94ms +[2025-09-11 06:32:33] [Rank 0] step:4521/10000 train_time:207463ms step_avg:45.89ms +[2025-09-11 06:32:33] [Rank 0] step:4521/10000 train_time:207463ms step_avg:45.89ms +[2025-09-11 06:32:33] [Rank 0] step:4541/10000 train_time:208141ms step_avg:45.84ms +[2025-09-11 06:32:33] [Rank 0] step:4541/10000 train_time:208141ms step_avg:45.84ms +[2025-09-11 06:32:34] [Rank 0] step:4561/10000 train_time:208820ms step_avg:45.78ms +[2025-09-11 06:32:34] [Rank 0] step:4561/10000 train_time:208820ms step_avg:45.78ms +[2025-09-11 06:32:35] [Rank 0] step:4581/10000 train_time:209499ms step_avg:45.73ms +[2025-09-11 06:32:35] [Rank 0] step:4581/10000 train_time:209499ms step_avg:45.73ms +[2025-09-11 06:32:35] [Rank 0] step:4601/10000 train_time:210179ms step_avg:45.68ms +[2025-09-11 06:32:35] [Rank 0] step:4601/10000 train_time:210179ms step_avg:45.68ms +[2025-09-11 06:32:36] [Rank 0] step:4621/10000 train_time:210858ms step_avg:45.63ms +[2025-09-11 06:32:36] [Rank 0] step:4621/10000 train_time:210858ms step_avg:45.63ms +[2025-09-11 06:32:37] [Rank 0] step:4641/10000 train_time:211537ms step_avg:45.58ms +[2025-09-11 06:32:37] [Rank 0] step:4641/10000 train_time:211537ms step_avg:45.58ms +[2025-09-11 06:32:37] [Rank 0] step:4661/10000 train_time:212218ms step_avg:45.53ms +[2025-09-11 06:32:37] [Rank 0] step:4661/10000 train_time:212218ms step_avg:45.53ms +[2025-09-11 06:32:38] [Rank 0] step:4681/10000 train_time:212897ms step_avg:45.48ms +[2025-09-11 06:32:38] [Rank 0] step:4681/10000 train_time:212897ms step_avg:45.48ms +[2025-09-11 06:32:39] [Rank 0] step:4701/10000 train_time:213576ms step_avg:45.43ms +[2025-09-11 06:32:39] [Rank 0] step:4701/10000 train_time:213576ms step_avg:45.43ms +[2025-09-11 06:32:40] [Rank 0] step:4721/10000 train_time:214256ms step_avg:45.38ms +[2025-09-11 06:32:40] [Rank 0] step:4721/10000 train_time:214256ms step_avg:45.38ms +[2025-09-11 06:32:40] [Rank 0] step:4741/10000 train_time:214934ms step_avg:45.34ms +[2025-09-11 06:32:40] [Rank 0] step:4741/10000 train_time:214934ms step_avg:45.34ms +[2025-09-11 06:32:41] [Rank 0] step:4761/10000 train_time:215616ms step_avg:45.29ms +[2025-09-11 06:32:41] [Rank 0] step:4761/10000 train_time:215616ms step_avg:45.29ms +[2025-09-11 06:32:42] [Rank 0] step:4781/10000 train_time:216293ms step_avg:45.24ms +[2025-09-11 06:32:42] [Rank 0] step:4781/10000 train_time:216293ms step_avg:45.24ms +[2025-09-11 06:32:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:32:42] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:32:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:32:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:32:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:32:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:32:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:32:52] [Rank 0] PRINT: step:4800/10000 val_loss:4.4941 total_sharp:5.6653e-05 L1_sharp:2.2533e-04 L2_sharp:3.7766e-06 L3_sharp:2.6068e-05 L4_sharp:1.8318e-05 L5_sharp:2.6848e-05 L6_sharp:2.9380e-05 L7_sharp:3.8935e-05 L8_sharp:4.3951e-05 L9_sharp:6.6441e-05 L10_sharp:7.2991e-05 L11_sharp:1.0730e-04 L12_sharp:2.0896e-03 total_fnorm:1.8900e+02 total_l1_linf:5.2019e+05 total_spectral:9.7000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.0000e+00 L2_l1linf:2.4531e+00 L3_l1linf:2.2656e+00 L4_l1linf:2.3438e+00 L5_l1linf:2.7344e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1094e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5505e-01 L2_spectral:1.5295e-01 L3_spectral:1.5405e-01 L4_spectral:1.5379e-01 L5_spectral:1.5519e-01 L6_spectral:1.5554e-01 L7_spectral:1.5745e-01 L8_spectral:1.5512e-01 L9_spectral:1.5780e-01 L10_spectral:1.5571e-01 L11_spectral:1.5799e-01 L12_spectral:1.5510e-01 train_time:216953ms step_avg:45.20ms +[2025-09-11 06:32:52] [Rank 0] PRINT: step:4800/10000 val_loss:4.4941 total_sharp:5.6653e-05 L1_sharp:2.2533e-04 L2_sharp:3.7766e-06 L3_sharp:2.6068e-05 L4_sharp:1.8318e-05 L5_sharp:2.6848e-05 L6_sharp:2.9380e-05 L7_sharp:3.8935e-05 L8_sharp:4.3951e-05 L9_sharp:6.6441e-05 L10_sharp:7.2991e-05 L11_sharp:1.0730e-04 L12_sharp:2.0896e-03 total_fnorm:1.8900e+02 total_l1_linf:5.2019e+05 total_spectral:9.7000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1938e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.2438e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2062e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2500e+01 L1_l1linf:3.0000e+00 L2_l1linf:2.4531e+00 L3_l1linf:2.2656e+00 L4_l1linf:2.3438e+00 L5_l1linf:2.7344e+00 L6_l1linf:3.1875e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.1094e+00 L9_l1linf:2.9688e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.8438e+00 L1_spectral:1.5505e-01 L2_spectral:1.5295e-01 L3_spectral:1.5405e-01 L4_spectral:1.5379e-01 L5_spectral:1.5519e-01 L6_spectral:1.5554e-01 L7_spectral:1.5745e-01 L8_spectral:1.5512e-01 L9_spectral:1.5780e-01 L10_spectral:1.5571e-01 L11_spectral:1.5799e-01 L12_spectral:1.5510e-01 train_time:216953ms step_avg:45.20ms +[2025-09-11 06:32:54] [Rank 0] step:4801/10000 train_time:218248ms step_avg:45.46ms +[2025-09-11 06:32:54] [Rank 0] step:4801/10000 train_time:218248ms step_avg:45.46ms +[2025-09-11 06:32:54] [Rank 0] step:4821/10000 train_time:218962ms step_avg:45.42ms +[2025-09-11 06:32:54] [Rank 0] step:4821/10000 train_time:218962ms step_avg:45.42ms +[2025-09-11 06:32:55] [Rank 0] step:4841/10000 train_time:219642ms step_avg:45.37ms +[2025-09-11 06:32:55] [Rank 0] step:4841/10000 train_time:219642ms step_avg:45.37ms +[2025-09-11 06:32:56] [Rank 0] step:4861/10000 train_time:220321ms step_avg:45.32ms +[2025-09-11 06:32:56] [Rank 0] step:4861/10000 train_time:220321ms step_avg:45.32ms +[2025-09-11 06:32:56] [Rank 0] step:4881/10000 train_time:221000ms step_avg:45.28ms +[2025-09-11 06:32:56] [Rank 0] step:4881/10000 train_time:221000ms step_avg:45.28ms +[2025-09-11 06:32:57] [Rank 0] step:4901/10000 train_time:221680ms step_avg:45.23ms +[2025-09-11 06:32:57] [Rank 0] step:4901/10000 train_time:221680ms step_avg:45.23ms +[2025-09-11 06:32:58] [Rank 0] step:4921/10000 train_time:222359ms step_avg:45.19ms +[2025-09-11 06:32:58] [Rank 0] step:4921/10000 train_time:222359ms step_avg:45.19ms +[2025-09-11 06:32:58] [Rank 0] step:4941/10000 train_time:223038ms step_avg:45.14ms +[2025-09-11 06:32:58] [Rank 0] step:4941/10000 train_time:223038ms step_avg:45.14ms +[2025-09-11 06:32:59] [Rank 0] step:4961/10000 train_time:223718ms step_avg:45.10ms +[2025-09-11 06:32:59] [Rank 0] step:4961/10000 train_time:223718ms step_avg:45.10ms +[2025-09-11 06:33:00] [Rank 0] step:4981/10000 train_time:224397ms step_avg:45.05ms +[2025-09-11 06:33:00] [Rank 0] step:4981/10000 train_time:224397ms step_avg:45.05ms +[2025-09-11 06:33:00] [Rank 0] step:5001/10000 train_time:225077ms step_avg:45.01ms +[2025-09-11 06:33:00] [Rank 0] step:5001/10000 train_time:225077ms step_avg:45.01ms +[2025-09-11 06:33:01] [Rank 0] step:5021/10000 train_time:225755ms step_avg:44.96ms +[2025-09-11 06:33:01] [Rank 0] step:5021/10000 train_time:225755ms step_avg:44.96ms +[2025-09-11 06:33:02] [Rank 0] step:5041/10000 train_time:226434ms step_avg:44.92ms +[2025-09-11 06:33:02] [Rank 0] step:5041/10000 train_time:226434ms step_avg:44.92ms +[2025-09-11 06:33:03] [Rank 0] step:5061/10000 train_time:227113ms step_avg:44.88ms +[2025-09-11 06:33:03] [Rank 0] step:5061/10000 train_time:227113ms step_avg:44.88ms +[2025-09-11 06:33:03] [Rank 0] step:5081/10000 train_time:227792ms step_avg:44.83ms +[2025-09-11 06:33:03] [Rank 0] step:5081/10000 train_time:227792ms step_avg:44.83ms +[2025-09-11 06:33:04] [Rank 0] step:5101/10000 train_time:228471ms step_avg:44.79ms +[2025-09-11 06:33:04] [Rank 0] step:5101/10000 train_time:228471ms step_avg:44.79ms +[2025-09-11 06:33:05] [Rank 0] step:5121/10000 train_time:229150ms step_avg:44.75ms +[2025-09-11 06:33:05] [Rank 0] step:5121/10000 train_time:229150ms step_avg:44.75ms +[2025-09-11 06:33:05] [Rank 0] step:5141/10000 train_time:229829ms step_avg:44.71ms +[2025-09-11 06:33:05] [Rank 0] step:5141/10000 train_time:229829ms step_avg:44.71ms +[2025-09-11 06:33:06] [Rank 0] step:5161/10000 train_time:230508ms step_avg:44.66ms +[2025-09-11 06:33:06] [Rank 0] step:5161/10000 train_time:230508ms step_avg:44.66ms +[2025-09-11 06:33:07] [Rank 0] step:5181/10000 train_time:231187ms step_avg:44.62ms +[2025-09-11 06:33:07] [Rank 0] step:5181/10000 train_time:231187ms step_avg:44.62ms +[2025-09-11 06:33:07] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:33:07] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:33:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:33:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:33:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:33:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:33:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:33:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:33:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:33:17] [Rank 0] PRINT: step:5200/10000 val_loss:4.4703 total_sharp:4.8974e-05 L1_sharp:1.8970e-04 L2_sharp:9.3759e-05 L3_sharp:1.0475e-06 L4_sharp:3.8155e-05 L5_sharp:1.5948e-05 L6_sharp:8.1768e-05 L7_sharp:2.5126e-05 L8_sharp:6.1057e-05 L9_sharp:6.6612e-05 L10_sharp:8.4846e-05 L11_sharp:1.1043e-04 L12_sharp:5.8588e-04 total_fnorm:1.7700e+02 total_l1_linf:4.6899e+05 total_spectral:9.1000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1562e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.2500e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2438e+01 L1_l1linf:2.9844e+00 L2_l1linf:2.1875e+00 L3_l1linf:2.2656e+00 L4_l1linf:2.2500e+00 L5_l1linf:2.6406e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.0938e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5501e-01 L2_spectral:1.5295e-01 L3_spectral:1.5573e-01 L4_spectral:1.5472e-01 L5_spectral:1.5703e-01 L6_spectral:1.5766e-01 L7_spectral:1.5877e-01 L8_spectral:1.5691e-01 L9_spectral:1.5872e-01 L10_spectral:1.5616e-01 L11_spectral:1.5748e-01 L12_spectral:1.5601e-01 train_time:231854ms step_avg:44.59ms +[2025-09-11 06:33:17] [Rank 0] PRINT: step:5200/10000 val_loss:4.4703 total_sharp:4.8974e-05 L1_sharp:1.8970e-04 L2_sharp:9.3759e-05 L3_sharp:1.0475e-06 L4_sharp:3.8155e-05 L5_sharp:1.5948e-05 L6_sharp:8.1768e-05 L7_sharp:2.5126e-05 L8_sharp:6.1057e-05 L9_sharp:6.6612e-05 L10_sharp:8.4846e-05 L11_sharp:1.1043e-04 L12_sharp:5.8588e-04 total_fnorm:1.7700e+02 total_l1_linf:4.6899e+05 total_spectral:9.1000e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1562e+01 L3_fnorm:1.1125e+01 L4_fnorm:1.1750e+01 L5_fnorm:1.2500e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2625e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2312e+01 L11_fnorm:1.2500e+01 L12_fnorm:1.2438e+01 L1_l1linf:2.9844e+00 L2_l1linf:2.1875e+00 L3_l1linf:2.2656e+00 L4_l1linf:2.2500e+00 L5_l1linf:2.6406e+00 L6_l1linf:3.1094e+00 L7_l1linf:3.0938e+00 L8_l1linf:3.0938e+00 L9_l1linf:3.0000e+00 L10_l1linf:2.8750e+00 L11_l1linf:2.7500e+00 L12_l1linf:2.7812e+00 L1_spectral:1.5501e-01 L2_spectral:1.5295e-01 L3_spectral:1.5573e-01 L4_spectral:1.5472e-01 L5_spectral:1.5703e-01 L6_spectral:1.5766e-01 L7_spectral:1.5877e-01 L8_spectral:1.5691e-01 L9_spectral:1.5872e-01 L10_spectral:1.5616e-01 L11_spectral:1.5748e-01 L12_spectral:1.5601e-01 train_time:231854ms step_avg:44.59ms +[2025-09-11 06:33:19] [Rank 0] step:5201/10000 train_time:233177ms step_avg:44.83ms +[2025-09-11 06:33:19] [Rank 0] step:5201/10000 train_time:233177ms step_avg:44.83ms +[2025-09-11 06:33:19] [Rank 0] step:5221/10000 train_time:233903ms step_avg:44.80ms +[2025-09-11 06:33:19] [Rank 0] step:5221/10000 train_time:233903ms step_avg:44.80ms +[2025-09-11 06:33:20] [Rank 0] step:5241/10000 train_time:234592ms step_avg:44.76ms +[2025-09-11 06:33:20] [Rank 0] step:5241/10000 train_time:234592ms step_avg:44.76ms +[2025-09-11 06:33:21] [Rank 0] step:5261/10000 train_time:235282ms step_avg:44.72ms +[2025-09-11 06:33:21] [Rank 0] step:5261/10000 train_time:235282ms step_avg:44.72ms +[2025-09-11 06:33:21] [Rank 0] step:5281/10000 train_time:235971ms step_avg:44.68ms +[2025-09-11 06:33:21] [Rank 0] step:5281/10000 train_time:235971ms step_avg:44.68ms +[2025-09-11 06:33:22] [Rank 0] step:5301/10000 train_time:236661ms step_avg:44.64ms +[2025-09-11 06:33:22] [Rank 0] step:5301/10000 train_time:236661ms step_avg:44.64ms +[2025-09-11 06:33:23] [Rank 0] step:5321/10000 train_time:237350ms step_avg:44.61ms +[2025-09-11 06:33:23] [Rank 0] step:5321/10000 train_time:237350ms step_avg:44.61ms +[2025-09-11 06:33:23] [Rank 0] step:5341/10000 train_time:238038ms step_avg:44.57ms +[2025-09-11 06:33:23] [Rank 0] step:5341/10000 train_time:238038ms step_avg:44.57ms +[2025-09-11 06:33:24] [Rank 0] step:5361/10000 train_time:238728ms step_avg:44.53ms +[2025-09-11 06:33:24] [Rank 0] step:5361/10000 train_time:238728ms step_avg:44.53ms +[2025-09-11 06:33:25] [Rank 0] step:5381/10000 train_time:239418ms step_avg:44.49ms +[2025-09-11 06:33:25] [Rank 0] step:5381/10000 train_time:239418ms step_avg:44.49ms +[2025-09-11 06:33:26] [Rank 0] step:5401/10000 train_time:240106ms step_avg:44.46ms +[2025-09-11 06:33:26] [Rank 0] step:5401/10000 train_time:240106ms step_avg:44.46ms +[2025-09-11 06:33:26] [Rank 0] step:5421/10000 train_time:240797ms step_avg:44.42ms +[2025-09-11 06:33:26] [Rank 0] step:5421/10000 train_time:240797ms step_avg:44.42ms +[2025-09-11 06:33:27] [Rank 0] step:5441/10000 train_time:241485ms step_avg:44.38ms +[2025-09-11 06:33:27] [Rank 0] step:5441/10000 train_time:241485ms step_avg:44.38ms +[2025-09-11 06:33:28] [Rank 0] step:5461/10000 train_time:242175ms step_avg:44.35ms +[2025-09-11 06:33:28] [Rank 0] step:5461/10000 train_time:242175ms step_avg:44.35ms +[2025-09-11 06:33:28] [Rank 0] step:5481/10000 train_time:242865ms step_avg:44.31ms +[2025-09-11 06:33:28] [Rank 0] step:5481/10000 train_time:242865ms step_avg:44.31ms +[2025-09-11 06:33:29] [Rank 0] step:5501/10000 train_time:243555ms step_avg:44.27ms +[2025-09-11 06:33:29] [Rank 0] step:5501/10000 train_time:243555ms step_avg:44.27ms +[2025-09-11 06:33:30] [Rank 0] step:5521/10000 train_time:244244ms step_avg:44.24ms +[2025-09-11 06:33:30] [Rank 0] step:5521/10000 train_time:244244ms step_avg:44.24ms +[2025-09-11 06:33:30] [Rank 0] step:5541/10000 train_time:244935ms step_avg:44.20ms +[2025-09-11 06:33:30] [Rank 0] step:5541/10000 train_time:244935ms step_avg:44.20ms +[2025-09-11 06:33:31] [Rank 0] step:5561/10000 train_time:245626ms step_avg:44.17ms +[2025-09-11 06:33:31] [Rank 0] step:5561/10000 train_time:245626ms step_avg:44.17ms +[2025-09-11 06:33:32] [Rank 0] step:5581/10000 train_time:246316ms step_avg:44.13ms +[2025-09-11 06:33:32] [Rank 0] step:5581/10000 train_time:246316ms step_avg:44.13ms +[2025-09-11 06:33:32] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:33:32] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:33:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:33:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:33:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:33:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:33:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:33:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:33:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:33:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:33:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.4510 total_sharp:4.5712e-05 L1_sharp:1.8513e-04 L2_sharp:4.5981e-05 L3_sharp:1.3774e-05 L4_sharp:7.4840e-06 L5_sharp:8.1801e-06 L6_sharp:2.5476e-05 L7_sharp:3.2270e-05 L8_sharp:5.8183e-05 L9_sharp:4.8466e-05 L10_sharp:8.2210e-05 L11_sharp:9.4642e-05 L12_sharp:6.8264e-04 total_fnorm:1.8400e+02 total_l1_linf:4.9357e+05 total_spectral:9.4000e+01 L1_fnorm:1.1812e+01 L2_fnorm:1.1562e+01 L3_fnorm:1.0750e+01 L4_fnorm:1.1375e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:2.9844e+00 L2_l1linf:2.1406e+00 L3_l1linf:2.3906e+00 L4_l1linf:2.2969e+00 L5_l1linf:2.5469e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0938e+00 L9_l1linf:2.9531e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.6875e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5512e-01 L2_spectral:1.5348e-01 L3_spectral:1.5603e-01 L4_spectral:1.5651e-01 L5_spectral:1.5704e-01 L6_spectral:1.5750e-01 L7_spectral:1.5804e-01 L8_spectral:1.5380e-01 L9_spectral:1.5773e-01 L10_spectral:1.5558e-01 L11_spectral:1.5758e-01 L12_spectral:1.5622e-01 train_time:246985ms step_avg:44.10ms +[2025-09-11 06:33:44] [Rank 0] PRINT: step:5600/10000 val_loss:4.4510 total_sharp:4.5712e-05 L1_sharp:1.8513e-04 L2_sharp:4.5981e-05 L3_sharp:1.3774e-05 L4_sharp:7.4840e-06 L5_sharp:8.1801e-06 L6_sharp:2.5476e-05 L7_sharp:3.2270e-05 L8_sharp:5.8183e-05 L9_sharp:4.8466e-05 L10_sharp:8.2210e-05 L11_sharp:9.4642e-05 L12_sharp:6.8264e-04 total_fnorm:1.8400e+02 total_l1_linf:4.9357e+05 total_spectral:9.4000e+01 L1_fnorm:1.1812e+01 L2_fnorm:1.1562e+01 L3_fnorm:1.0750e+01 L4_fnorm:1.1375e+01 L5_fnorm:1.2312e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2562e+01 L8_fnorm:1.2000e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2438e+01 L1_l1linf:2.9844e+00 L2_l1linf:2.1406e+00 L3_l1linf:2.3906e+00 L4_l1linf:2.2969e+00 L5_l1linf:2.5469e+00 L6_l1linf:3.1250e+00 L7_l1linf:3.1406e+00 L8_l1linf:3.0938e+00 L9_l1linf:2.9531e+00 L10_l1linf:2.8125e+00 L11_l1linf:2.6875e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5512e-01 L2_spectral:1.5348e-01 L3_spectral:1.5603e-01 L4_spectral:1.5651e-01 L5_spectral:1.5704e-01 L6_spectral:1.5750e-01 L7_spectral:1.5804e-01 L8_spectral:1.5380e-01 L9_spectral:1.5773e-01 L10_spectral:1.5558e-01 L11_spectral:1.5758e-01 L12_spectral:1.5622e-01 train_time:246985ms step_avg:44.10ms +[2025-09-11 06:33:46] [Rank 0] step:5601/10000 train_time:248263ms step_avg:44.32ms +[2025-09-11 06:33:46] [Rank 0] step:5601/10000 train_time:248263ms step_avg:44.32ms +[2025-09-11 06:33:46] [Rank 0] step:5621/10000 train_time:248974ms step_avg:44.29ms +[2025-09-11 06:33:46] [Rank 0] step:5621/10000 train_time:248974ms step_avg:44.29ms +[2025-09-11 06:33:47] [Rank 0] step:5641/10000 train_time:249665ms step_avg:44.26ms +[2025-09-11 06:33:47] [Rank 0] step:5641/10000 train_time:249665ms step_avg:44.26ms +[2025-09-11 06:33:48] [Rank 0] step:5661/10000 train_time:250356ms step_avg:44.22ms +[2025-09-11 06:33:48] [Rank 0] step:5661/10000 train_time:250356ms step_avg:44.22ms +[2025-09-11 06:33:48] [Rank 0] step:5681/10000 train_time:251047ms step_avg:44.19ms +[2025-09-11 06:33:48] [Rank 0] step:5681/10000 train_time:251047ms step_avg:44.19ms +[2025-09-11 06:33:49] [Rank 0] step:5701/10000 train_time:251738ms step_avg:44.16ms +[2025-09-11 06:33:49] [Rank 0] step:5701/10000 train_time:251738ms step_avg:44.16ms +[2025-09-11 06:33:50] [Rank 0] step:5721/10000 train_time:252427ms step_avg:44.12ms +[2025-09-11 06:33:50] [Rank 0] step:5721/10000 train_time:252427ms step_avg:44.12ms +[2025-09-11 06:33:50] [Rank 0] step:5741/10000 train_time:253118ms step_avg:44.09ms +[2025-09-11 06:33:50] [Rank 0] step:5741/10000 train_time:253118ms step_avg:44.09ms +[2025-09-11 06:33:51] [Rank 0] step:5761/10000 train_time:253811ms step_avg:44.06ms +[2025-09-11 06:33:51] [Rank 0] step:5761/10000 train_time:253811ms step_avg:44.06ms +[2025-09-11 06:33:52] [Rank 0] step:5781/10000 train_time:254501ms step_avg:44.02ms +[2025-09-11 06:33:52] [Rank 0] step:5781/10000 train_time:254501ms step_avg:44.02ms +[2025-09-11 06:33:52] [Rank 0] step:5801/10000 train_time:255193ms step_avg:43.99ms +[2025-09-11 06:33:52] [Rank 0] step:5801/10000 train_time:255193ms step_avg:43.99ms +[2025-09-11 06:33:53] [Rank 0] step:5821/10000 train_time:255882ms step_avg:43.96ms +[2025-09-11 06:33:53] [Rank 0] step:5821/10000 train_time:255882ms step_avg:43.96ms +[2025-09-11 06:33:54] [Rank 0] step:5841/10000 train_time:256579ms step_avg:43.93ms +[2025-09-11 06:33:54] [Rank 0] step:5841/10000 train_time:256579ms step_avg:43.93ms +[2025-09-11 06:33:55] [Rank 0] step:5861/10000 train_time:257267ms step_avg:43.89ms +[2025-09-11 06:33:55] [Rank 0] step:5861/10000 train_time:257267ms step_avg:43.89ms +[2025-09-11 06:33:55] [Rank 0] step:5881/10000 train_time:257958ms step_avg:43.86ms +[2025-09-11 06:33:55] [Rank 0] step:5881/10000 train_time:257958ms step_avg:43.86ms +[2025-09-11 06:33:56] [Rank 0] step:5901/10000 train_time:258647ms step_avg:43.83ms +[2025-09-11 06:33:56] [Rank 0] step:5901/10000 train_time:258647ms step_avg:43.83ms +[2025-09-11 06:33:57] [Rank 0] step:5921/10000 train_time:259340ms step_avg:43.80ms +[2025-09-11 06:33:57] [Rank 0] step:5921/10000 train_time:259340ms step_avg:43.80ms +[2025-09-11 06:33:57] [Rank 0] step:5941/10000 train_time:260031ms step_avg:43.77ms +[2025-09-11 06:33:57] [Rank 0] step:5941/10000 train_time:260031ms step_avg:43.77ms +[2025-09-11 06:33:58] [Rank 0] step:5961/10000 train_time:260722ms step_avg:43.74ms +[2025-09-11 06:33:58] [Rank 0] step:5961/10000 train_time:260722ms step_avg:43.74ms +[2025-09-11 06:33:59] [Rank 0] step:5981/10000 train_time:261413ms step_avg:43.71ms +[2025-09-11 06:33:59] [Rank 0] step:5981/10000 train_time:261413ms step_avg:43.71ms +[2025-09-11 06:33:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:33:59] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:34:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:34:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:34:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:34:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:34:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:34:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:34:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.4119 total_sharp:4.0538e-05 L1_sharp:1.4658e-04 L2_sharp:6.2015e-05 L3_sharp:1.8039e-05 L4_sharp:1.0495e-05 L5_sharp:1.0135e-05 L6_sharp:4.8678e-05 L7_sharp:2.9084e-05 L8_sharp:4.3385e-05 L9_sharp:4.8375e-05 L10_sharp:7.3475e-05 L11_sharp:9.0841e-05 L12_sharp:5.3606e-04 total_fnorm:1.8000e+02 total_l1_linf:4.7514e+05 total_spectral:9.2500e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1500e+01 L3_fnorm:1.0750e+01 L4_fnorm:1.1625e+01 L5_fnorm:1.2375e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2375e+01 L1_l1linf:2.9375e+00 L2_l1linf:2.3125e+00 L3_l1linf:2.3438e+00 L4_l1linf:2.1719e+00 L5_l1linf:2.5000e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.0469e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9375e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.6562e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5617e-01 L2_spectral:1.5541e-01 L3_spectral:1.5618e-01 L4_spectral:1.5743e-01 L5_spectral:1.5765e-01 L6_spectral:1.5823e-01 L7_spectral:1.5855e-01 L8_spectral:1.5651e-01 L9_spectral:1.5825e-01 L10_spectral:1.5684e-01 L11_spectral:1.5774e-01 L12_spectral:1.5776e-01 train_time:262087ms step_avg:43.68ms +[2025-09-11 06:34:09] [Rank 0] PRINT: step:6000/10000 val_loss:4.4119 total_sharp:4.0538e-05 L1_sharp:1.4658e-04 L2_sharp:6.2015e-05 L3_sharp:1.8039e-05 L4_sharp:1.0495e-05 L5_sharp:1.0135e-05 L6_sharp:4.8678e-05 L7_sharp:2.9084e-05 L8_sharp:4.3385e-05 L9_sharp:4.8375e-05 L10_sharp:7.3475e-05 L11_sharp:9.0841e-05 L12_sharp:5.3606e-04 total_fnorm:1.8000e+02 total_l1_linf:4.7514e+05 total_spectral:9.2500e+01 L1_fnorm:1.1938e+01 L2_fnorm:1.1500e+01 L3_fnorm:1.0750e+01 L4_fnorm:1.1625e+01 L5_fnorm:1.2375e+01 L6_fnorm:1.2500e+01 L7_fnorm:1.2500e+01 L8_fnorm:1.2125e+01 L9_fnorm:1.2500e+01 L10_fnorm:1.2250e+01 L11_fnorm:1.2438e+01 L12_fnorm:1.2375e+01 L1_l1linf:2.9375e+00 L2_l1linf:2.3125e+00 L3_l1linf:2.3438e+00 L4_l1linf:2.1719e+00 L5_l1linf:2.5000e+00 L6_l1linf:3.0938e+00 L7_l1linf:3.0469e+00 L8_l1linf:3.0625e+00 L9_l1linf:2.9375e+00 L10_l1linf:2.7969e+00 L11_l1linf:2.6562e+00 L12_l1linf:2.7344e+00 L1_spectral:1.5617e-01 L2_spectral:1.5541e-01 L3_spectral:1.5618e-01 L4_spectral:1.5743e-01 L5_spectral:1.5765e-01 L6_spectral:1.5823e-01 L7_spectral:1.5855e-01 L8_spectral:1.5651e-01 L9_spectral:1.5825e-01 L10_spectral:1.5684e-01 L11_spectral:1.5774e-01 L12_spectral:1.5776e-01 train_time:262087ms step_avg:43.68ms +[2025-09-11 06:34:11] [Rank 0] step:6001/10000 train_time:263427ms step_avg:43.90ms +[2025-09-11 06:34:11] [Rank 0] step:6001/10000 train_time:263427ms step_avg:43.90ms +[2025-09-11 06:34:11] [Rank 0] step:6021/10000 train_time:264143ms step_avg:43.87ms +[2025-09-11 06:34:11] [Rank 0] step:6021/10000 train_time:264143ms step_avg:43.87ms +[2025-09-11 06:34:12] [Rank 0] step:6041/10000 train_time:264838ms step_avg:43.84ms +[2025-09-11 06:34:12] [Rank 0] step:6041/10000 train_time:264838ms step_avg:43.84ms +[2025-09-11 06:34:13] [Rank 0] step:6061/10000 train_time:265531ms step_avg:43.81ms +[2025-09-11 06:34:13] [Rank 0] step:6061/10000 train_time:265531ms step_avg:43.81ms +[2025-09-11 06:34:13] [Rank 0] step:6081/10000 train_time:266226ms step_avg:43.78ms +[2025-09-11 06:34:13] [Rank 0] step:6081/10000 train_time:266226ms step_avg:43.78ms +[2025-09-11 06:34:14] [Rank 0] step:6101/10000 train_time:266919ms step_avg:43.75ms +[2025-09-11 06:34:14] [Rank 0] step:6101/10000 train_time:266919ms step_avg:43.75ms +[2025-09-11 06:34:15] [Rank 0] step:6121/10000 train_time:267612ms step_avg:43.72ms +[2025-09-11 06:34:15] [Rank 0] step:6121/10000 train_time:267612ms step_avg:43.72ms +[2025-09-11 06:34:16] [Rank 0] step:6141/10000 train_time:268306ms step_avg:43.69ms +[2025-09-11 06:34:16] [Rank 0] step:6141/10000 train_time:268306ms step_avg:43.69ms +[2025-09-11 06:34:16] [Rank 0] step:6161/10000 train_time:268999ms step_avg:43.66ms +[2025-09-11 06:34:16] [Rank 0] step:6161/10000 train_time:268999ms step_avg:43.66ms +[2025-09-11 06:34:17] [Rank 0] step:6181/10000 train_time:270046ms step_avg:43.69ms +[2025-09-11 06:34:17] [Rank 0] step:6181/10000 train_time:270046ms step_avg:43.69ms +[2025-09-11 06:34:18] [Rank 0] step:6201/10000 train_time:270926ms step_avg:43.69ms +[2025-09-11 06:34:18] [Rank 0] step:6201/10000 train_time:270926ms step_avg:43.69ms +[2025-09-11 06:34:19] [Rank 0] step:6221/10000 train_time:271618ms step_avg:43.66ms +[2025-09-11 06:34:19] [Rank 0] step:6221/10000 train_time:271618ms step_avg:43.66ms +[2025-09-11 06:34:20] [Rank 0] step:6241/10000 train_time:272582ms step_avg:43.68ms +[2025-09-11 06:34:20] [Rank 0] step:6241/10000 train_time:272582ms step_avg:43.68ms +[2025-09-11 06:34:20] [Rank 0] step:6261/10000 train_time:273273ms step_avg:43.65ms +[2025-09-11 06:34:20] [Rank 0] step:6261/10000 train_time:273273ms step_avg:43.65ms +[2025-09-11 06:34:21] [Rank 0] step:6281/10000 train_time:273965ms step_avg:43.62ms +[2025-09-11 06:34:21] [Rank 0] step:6281/10000 train_time:273965ms step_avg:43.62ms +[2025-09-11 06:34:22] [Rank 0] step:6301/10000 train_time:274658ms step_avg:43.59ms +[2025-09-11 06:34:22] [Rank 0] step:6301/10000 train_time:274658ms step_avg:43.59ms +[2025-09-11 06:34:23] [Rank 0] step:6321/10000 train_time:275352ms step_avg:43.56ms +[2025-09-11 06:34:23] [Rank 0] step:6321/10000 train_time:275352ms step_avg:43.56ms +[2025-09-11 06:34:23] [Rank 0] step:6341/10000 train_time:276045ms step_avg:43.53ms +[2025-09-11 06:34:23] [Rank 0] step:6341/10000 train_time:276045ms step_avg:43.53ms +[2025-09-11 06:34:24] [Rank 0] step:6361/10000 train_time:276739ms step_avg:43.51ms +[2025-09-11 06:34:24] [Rank 0] step:6361/10000 train_time:276739ms step_avg:43.51ms +[2025-09-11 06:34:25] [Rank 0] step:6381/10000 train_time:277432ms step_avg:43.48ms +[2025-09-11 06:34:25] [Rank 0] step:6381/10000 train_time:277432ms step_avg:43.48ms +[2025-09-11 06:34:25] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:34:25] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:34:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:34:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:34:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:34:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:34:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:34:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:34:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:34:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:34:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.3790 total_sharp:4.6481e-05 L1_sharp:1.9447e-04 L2_sharp:-2.8811e-06 L3_sharp:1.0926e-05 L4_sharp:7.3340e-06 L5_sharp:1.7649e-05 L6_sharp:2.7766e-05 L7_sharp:2.9685e-05 L8_sharp:5.1825e-05 L9_sharp:4.0112e-05 L10_sharp:7.2437e-05 L11_sharp:9.1948e-05 L12_sharp:7.7873e-04 total_fnorm:1.6100e+02 total_l1_linf:4.1574e+05 total_spectral:8.2500e+01 L1_fnorm:1.0750e+01 L2_fnorm:1.0312e+01 L3_fnorm:9.5000e+00 L4_fnorm:1.0250e+01 L5_fnorm:1.1188e+01 L6_fnorm:1.1312e+01 L7_fnorm:1.1375e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1250e+01 L10_fnorm:1.1000e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1188e+01 L1_l1linf:2.5625e+00 L2_l1linf:2.0312e+00 L3_l1linf:2.1562e+00 L4_l1linf:2.0625e+00 L5_l1linf:2.2188e+00 L6_l1linf:2.6875e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.6250e+00 L10_l1linf:2.4844e+00 L11_l1linf:2.3594e+00 L12_l1linf:2.3906e+00 L1_spectral:1.4374e-01 L2_spectral:1.4207e-01 L3_spectral:1.4326e-01 L4_spectral:1.4389e-01 L5_spectral:1.4414e-01 L6_spectral:1.4476e-01 L7_spectral:1.4547e-01 L8_spectral:1.4018e-01 L9_spectral:1.4542e-01 L10_spectral:1.4381e-01 L11_spectral:1.4526e-01 L12_spectral:1.4513e-01 train_time:278104ms step_avg:43.45ms +[2025-09-11 06:34:35] [Rank 0] PRINT: step:6400/10000 val_loss:4.3790 total_sharp:4.6481e-05 L1_sharp:1.9447e-04 L2_sharp:-2.8811e-06 L3_sharp:1.0926e-05 L4_sharp:7.3340e-06 L5_sharp:1.7649e-05 L6_sharp:2.7766e-05 L7_sharp:2.9685e-05 L8_sharp:5.1825e-05 L9_sharp:4.0112e-05 L10_sharp:7.2437e-05 L11_sharp:9.1948e-05 L12_sharp:7.7873e-04 total_fnorm:1.6100e+02 total_l1_linf:4.1574e+05 total_spectral:8.2500e+01 L1_fnorm:1.0750e+01 L2_fnorm:1.0312e+01 L3_fnorm:9.5000e+00 L4_fnorm:1.0250e+01 L5_fnorm:1.1188e+01 L6_fnorm:1.1312e+01 L7_fnorm:1.1375e+01 L8_fnorm:1.0875e+01 L9_fnorm:1.1250e+01 L10_fnorm:1.1000e+01 L11_fnorm:1.1125e+01 L12_fnorm:1.1188e+01 L1_l1linf:2.5625e+00 L2_l1linf:2.0312e+00 L3_l1linf:2.1562e+00 L4_l1linf:2.0625e+00 L5_l1linf:2.2188e+00 L6_l1linf:2.6875e+00 L7_l1linf:2.7344e+00 L8_l1linf:2.6719e+00 L9_l1linf:2.6250e+00 L10_l1linf:2.4844e+00 L11_l1linf:2.3594e+00 L12_l1linf:2.3906e+00 L1_spectral:1.4374e-01 L2_spectral:1.4207e-01 L3_spectral:1.4326e-01 L4_spectral:1.4389e-01 L5_spectral:1.4414e-01 L6_spectral:1.4476e-01 L7_spectral:1.4547e-01 L8_spectral:1.4018e-01 L9_spectral:1.4542e-01 L10_spectral:1.4381e-01 L11_spectral:1.4526e-01 L12_spectral:1.4513e-01 train_time:278104ms step_avg:43.45ms +[2025-09-11 06:34:37] [Rank 0] step:6401/10000 train_time:279429ms step_avg:43.65ms +[2025-09-11 06:34:37] [Rank 0] step:6401/10000 train_time:279429ms step_avg:43.65ms +[2025-09-11 06:34:37] [Rank 0] step:6421/10000 train_time:280163ms step_avg:43.63ms +[2025-09-11 06:34:37] [Rank 0] step:6421/10000 train_time:280163ms step_avg:43.63ms +[2025-09-11 06:34:38] [Rank 0] step:6441/10000 train_time:280856ms step_avg:43.60ms +[2025-09-11 06:34:38] [Rank 0] step:6441/10000 train_time:280856ms step_avg:43.60ms +[2025-09-11 06:34:39] [Rank 0] step:6461/10000 train_time:281550ms step_avg:43.58ms +[2025-09-11 06:34:39] [Rank 0] step:6461/10000 train_time:281550ms step_avg:43.58ms +[2025-09-11 06:34:39] [Rank 0] step:6481/10000 train_time:282245ms step_avg:43.55ms +[2025-09-11 06:34:39] [Rank 0] step:6481/10000 train_time:282245ms step_avg:43.55ms +[2025-09-11 06:34:40] [Rank 0] step:6501/10000 train_time:282941ms step_avg:43.52ms +[2025-09-11 06:34:40] [Rank 0] step:6501/10000 train_time:282941ms step_avg:43.52ms +[2025-09-11 06:34:41] [Rank 0] step:6521/10000 train_time:283635ms step_avg:43.50ms +[2025-09-11 06:34:41] [Rank 0] step:6521/10000 train_time:283635ms step_avg:43.50ms +[2025-09-11 06:34:41] [Rank 0] step:6541/10000 train_time:284327ms step_avg:43.47ms +[2025-09-11 06:34:41] [Rank 0] step:6541/10000 train_time:284327ms step_avg:43.47ms +[2025-09-11 06:34:42] [Rank 0] step:6561/10000 train_time:285021ms step_avg:43.44ms +[2025-09-11 06:34:42] [Rank 0] step:6561/10000 train_time:285021ms step_avg:43.44ms +[2025-09-11 06:34:43] [Rank 0] step:6581/10000 train_time:285715ms step_avg:43.42ms +[2025-09-11 06:34:43] [Rank 0] step:6581/10000 train_time:285715ms step_avg:43.42ms +[2025-09-11 06:34:44] [Rank 0] step:6601/10000 train_time:286408ms step_avg:43.39ms +[2025-09-11 06:34:44] [Rank 0] step:6601/10000 train_time:286408ms step_avg:43.39ms +[2025-09-11 06:34:44] [Rank 0] step:6621/10000 train_time:287101ms step_avg:43.36ms +[2025-09-11 06:34:44] [Rank 0] step:6621/10000 train_time:287101ms step_avg:43.36ms +[2025-09-11 06:34:45] [Rank 0] step:6641/10000 train_time:287795ms step_avg:43.34ms +[2025-09-11 06:34:45] [Rank 0] step:6641/10000 train_time:287795ms step_avg:43.34ms +[2025-09-11 06:34:46] [Rank 0] step:6661/10000 train_time:288489ms step_avg:43.31ms +[2025-09-11 06:34:46] [Rank 0] step:6661/10000 train_time:288489ms step_avg:43.31ms +[2025-09-11 06:34:46] [Rank 0] step:6681/10000 train_time:289190ms step_avg:43.29ms +[2025-09-11 06:34:46] [Rank 0] step:6681/10000 train_time:289190ms step_avg:43.29ms +[2025-09-11 06:34:47] [Rank 0] step:6701/10000 train_time:289889ms step_avg:43.26ms +[2025-09-11 06:34:47] [Rank 0] step:6701/10000 train_time:289889ms step_avg:43.26ms +[2025-09-11 06:34:48] [Rank 0] step:6721/10000 train_time:290601ms step_avg:43.24ms +[2025-09-11 06:34:48] [Rank 0] step:6721/10000 train_time:290601ms step_avg:43.24ms +[2025-09-11 06:34:48] [Rank 0] step:6741/10000 train_time:291302ms step_avg:43.21ms +[2025-09-11 06:34:48] [Rank 0] step:6741/10000 train_time:291302ms step_avg:43.21ms +[2025-09-11 06:34:49] [Rank 0] step:6761/10000 train_time:292001ms step_avg:43.19ms +[2025-09-11 06:34:49] [Rank 0] step:6761/10000 train_time:292001ms step_avg:43.19ms +[2025-09-11 06:34:50] [Rank 0] step:6781/10000 train_time:292702ms step_avg:43.17ms +[2025-09-11 06:34:50] [Rank 0] step:6781/10000 train_time:292702ms step_avg:43.17ms +[2025-09-11 06:34:51] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:34:51] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:34:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:34:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:34:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:34:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:34:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:34:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:35:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:01] [Rank 0] PRINT: step:6800/10000 val_loss:4.3206 total_sharp:3.7504e-05 L1_sharp:1.6317e-04 L2_sharp:-1.1985e-05 L3_sharp:3.9123e-06 L4_sharp:-8.2530e-07 L5_sharp:3.0773e-05 L6_sharp:3.3047e-05 L7_sharp:2.4882e-05 L8_sharp:5.0179e-05 L9_sharp:4.9360e-05 L10_sharp:7.7752e-05 L11_sharp:9.0768e-05 L12_sharp:5.8890e-04 total_fnorm:1.5600e+02 total_l1_linf:3.9117e+05 total_spectral:7.9500e+01 L1_fnorm:9.5625e+00 L2_fnorm:9.0000e+00 L3_fnorm:8.6250e+00 L4_fnorm:9.1250e+00 L5_fnorm:9.7500e+00 L6_fnorm:1.0000e+01 L7_fnorm:1.0000e+01 L8_fnorm:9.5625e+00 L9_fnorm:9.9375e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.8750e+00 L12_fnorm:9.8125e+00 L1_l1linf:2.2344e+00 L2_l1linf:1.8750e+00 L3_l1linf:1.8984e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8672e+00 L6_l1linf:2.3594e+00 L7_l1linf:2.3438e+00 L8_l1linf:2.3125e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.1094e+00 L11_l1linf:2.0312e+00 L12_l1linf:2.0625e+00 L1_spectral:1.2980e-01 L2_spectral:1.2811e-01 L3_spectral:1.2899e-01 L4_spectral:1.3047e-01 L5_spectral:1.2969e-01 L6_spectral:1.3076e-01 L7_spectral:1.3127e-01 L8_spectral:1.2716e-01 L9_spectral:1.3107e-01 L10_spectral:1.3044e-01 L11_spectral:1.3234e-01 L12_spectral:1.3100e-01 train_time:293382ms step_avg:43.14ms +[2025-09-11 06:35:01] [Rank 0] PRINT: step:6800/10000 val_loss:4.3206 total_sharp:3.7504e-05 L1_sharp:1.6317e-04 L2_sharp:-1.1985e-05 L3_sharp:3.9123e-06 L4_sharp:-8.2530e-07 L5_sharp:3.0773e-05 L6_sharp:3.3047e-05 L7_sharp:2.4882e-05 L8_sharp:5.0179e-05 L9_sharp:4.9360e-05 L10_sharp:7.7752e-05 L11_sharp:9.0768e-05 L12_sharp:5.8890e-04 total_fnorm:1.5600e+02 total_l1_linf:3.9117e+05 total_spectral:7.9500e+01 L1_fnorm:9.5625e+00 L2_fnorm:9.0000e+00 L3_fnorm:8.6250e+00 L4_fnorm:9.1250e+00 L5_fnorm:9.7500e+00 L6_fnorm:1.0000e+01 L7_fnorm:1.0000e+01 L8_fnorm:9.5625e+00 L9_fnorm:9.9375e+00 L10_fnorm:9.7500e+00 L11_fnorm:9.8750e+00 L12_fnorm:9.8125e+00 L1_l1linf:2.2344e+00 L2_l1linf:1.8750e+00 L3_l1linf:1.8984e+00 L4_l1linf:1.8125e+00 L5_l1linf:1.8672e+00 L6_l1linf:2.3594e+00 L7_l1linf:2.3438e+00 L8_l1linf:2.3125e+00 L9_l1linf:2.2500e+00 L10_l1linf:2.1094e+00 L11_l1linf:2.0312e+00 L12_l1linf:2.0625e+00 L1_spectral:1.2980e-01 L2_spectral:1.2811e-01 L3_spectral:1.2899e-01 L4_spectral:1.3047e-01 L5_spectral:1.2969e-01 L6_spectral:1.3076e-01 L7_spectral:1.3127e-01 L8_spectral:1.2716e-01 L9_spectral:1.3107e-01 L10_spectral:1.3044e-01 L11_spectral:1.3234e-01 L12_spectral:1.3100e-01 train_time:293382ms step_avg:43.14ms +[2025-09-11 06:35:02] [Rank 0] step:6801/10000 train_time:294662ms step_avg:43.33ms +[2025-09-11 06:35:02] [Rank 0] step:6801/10000 train_time:294662ms step_avg:43.33ms +[2025-09-11 06:35:03] [Rank 0] step:6821/10000 train_time:295389ms step_avg:43.31ms +[2025-09-11 06:35:03] [Rank 0] step:6821/10000 train_time:295389ms step_avg:43.31ms +[2025-09-11 06:35:03] [Rank 0] step:6841/10000 train_time:296093ms step_avg:43.28ms +[2025-09-11 06:35:03] [Rank 0] step:6841/10000 train_time:296093ms step_avg:43.28ms +[2025-09-11 06:35:04] [Rank 0] step:6861/10000 train_time:296796ms step_avg:43.26ms +[2025-09-11 06:35:04] [Rank 0] step:6861/10000 train_time:296796ms step_avg:43.26ms +[2025-09-11 06:35:05] [Rank 0] step:6881/10000 train_time:297498ms step_avg:43.23ms +[2025-09-11 06:35:05] [Rank 0] step:6881/10000 train_time:297498ms step_avg:43.23ms +[2025-09-11 06:35:05] [Rank 0] step:6901/10000 train_time:298198ms step_avg:43.21ms +[2025-09-11 06:35:05] [Rank 0] step:6901/10000 train_time:298198ms step_avg:43.21ms +[2025-09-11 06:35:06] [Rank 0] step:6921/10000 train_time:298897ms step_avg:43.19ms +[2025-09-11 06:35:06] [Rank 0] step:6921/10000 train_time:298897ms step_avg:43.19ms +[2025-09-11 06:35:07] [Rank 0] step:6941/10000 train_time:299599ms step_avg:43.16ms +[2025-09-11 06:35:07] [Rank 0] step:6941/10000 train_time:299599ms step_avg:43.16ms +[2025-09-11 06:35:08] [Rank 0] step:6961/10000 train_time:300299ms step_avg:43.14ms +[2025-09-11 06:35:08] [Rank 0] step:6961/10000 train_time:300299ms step_avg:43.14ms +[2025-09-11 06:35:08] [Rank 0] step:6981/10000 train_time:301002ms step_avg:43.12ms +[2025-09-11 06:35:08] [Rank 0] step:6981/10000 train_time:301002ms step_avg:43.12ms +[2025-09-11 06:35:09] [Rank 0] step:7001/10000 train_time:301703ms step_avg:43.09ms +[2025-09-11 06:35:09] [Rank 0] step:7001/10000 train_time:301703ms step_avg:43.09ms +[2025-09-11 06:35:10] [Rank 0] step:7021/10000 train_time:302403ms step_avg:43.07ms +[2025-09-11 06:35:10] [Rank 0] step:7021/10000 train_time:302403ms step_avg:43.07ms +[2025-09-11 06:35:10] [Rank 0] step:7041/10000 train_time:303103ms step_avg:43.05ms +[2025-09-11 06:35:10] [Rank 0] step:7041/10000 train_time:303103ms step_avg:43.05ms +[2025-09-11 06:35:11] [Rank 0] step:7061/10000 train_time:303806ms step_avg:43.03ms +[2025-09-11 06:35:11] [Rank 0] step:7061/10000 train_time:303806ms step_avg:43.03ms +[2025-09-11 06:35:12] [Rank 0] step:7081/10000 train_time:304507ms step_avg:43.00ms +[2025-09-11 06:35:12] [Rank 0] step:7081/10000 train_time:304507ms step_avg:43.00ms +[2025-09-11 06:35:12] [Rank 0] step:7101/10000 train_time:305207ms step_avg:42.98ms +[2025-09-11 06:35:12] [Rank 0] step:7101/10000 train_time:305207ms step_avg:42.98ms +[2025-09-11 06:35:13] [Rank 0] step:7121/10000 train_time:305908ms step_avg:42.96ms +[2025-09-11 06:35:13] [Rank 0] step:7121/10000 train_time:305908ms step_avg:42.96ms +[2025-09-11 06:35:14] [Rank 0] step:7141/10000 train_time:306608ms step_avg:42.94ms +[2025-09-11 06:35:14] [Rank 0] step:7141/10000 train_time:306608ms step_avg:42.94ms +[2025-09-11 06:35:15] [Rank 0] step:7161/10000 train_time:307309ms step_avg:42.91ms +[2025-09-11 06:35:15] [Rank 0] step:7161/10000 train_time:307309ms step_avg:42.91ms +[2025-09-11 06:35:15] [Rank 0] step:7181/10000 train_time:308008ms step_avg:42.89ms +[2025-09-11 06:35:15] [Rank 0] step:7181/10000 train_time:308008ms step_avg:42.89ms +[2025-09-11 06:35:16] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:35:16] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:35:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:35:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:35:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:35:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:35:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:35:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:26] [Rank 0] PRINT: step:7200/10000 val_loss:4.2885 total_sharp:3.6879e-05 L1_sharp:1.2884e-04 L2_sharp:-7.8707e-06 L3_sharp:4.9530e-05 L4_sharp:-3.7946e-07 L5_sharp:3.0027e-06 L6_sharp:2.7040e-05 L7_sharp:3.1305e-05 L8_sharp:6.0100e-05 L9_sharp:4.9516e-05 L10_sharp:7.6854e-05 L11_sharp:9.6510e-05 L12_sharp:1.0042e-03 total_fnorm:1.3700e+02 total_l1_linf:3.2973e+05 total_spectral:7.0000e+01 L1_fnorm:8.3750e+00 L2_fnorm:7.7500e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.8125e+00 L5_fnorm:8.6875e+00 L6_fnorm:8.6875e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.3125e+00 L9_fnorm:8.6250e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5625e+00 L12_fnorm:8.5625e+00 L1_l1linf:1.8359e+00 L2_l1linf:1.6250e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6797e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.9766e+00 L7_l1linf:2.0312e+00 L8_l1linf:1.9531e+00 L9_l1linf:1.9297e+00 L10_l1linf:1.7812e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.7734e+00 L1_spectral:1.1451e-01 L2_spectral:1.1307e-01 L3_spectral:1.1422e-01 L4_spectral:1.1443e-01 L5_spectral:1.1391e-01 L6_spectral:1.1585e-01 L7_spectral:1.1586e-01 L8_spectral:1.1296e-01 L9_spectral:1.1613e-01 L10_spectral:1.1719e-01 L11_spectral:1.1681e-01 L12_spectral:1.1561e-01 train_time:308689ms step_avg:42.87ms +[2025-09-11 06:35:26] [Rank 0] PRINT: step:7200/10000 val_loss:4.2885 total_sharp:3.6879e-05 L1_sharp:1.2884e-04 L2_sharp:-7.8707e-06 L3_sharp:4.9530e-05 L4_sharp:-3.7946e-07 L5_sharp:3.0027e-06 L6_sharp:2.7040e-05 L7_sharp:3.1305e-05 L8_sharp:6.0100e-05 L9_sharp:4.9516e-05 L10_sharp:7.6854e-05 L11_sharp:9.6510e-05 L12_sharp:1.0042e-03 total_fnorm:1.3700e+02 total_l1_linf:3.2973e+05 total_spectral:7.0000e+01 L1_fnorm:8.3750e+00 L2_fnorm:7.7500e+00 L3_fnorm:7.2188e+00 L4_fnorm:7.8125e+00 L5_fnorm:8.6875e+00 L6_fnorm:8.6875e+00 L7_fnorm:8.6875e+00 L8_fnorm:8.3125e+00 L9_fnorm:8.6250e+00 L10_fnorm:8.5000e+00 L11_fnorm:8.5625e+00 L12_fnorm:8.5625e+00 L1_l1linf:1.8359e+00 L2_l1linf:1.6250e+00 L3_l1linf:1.6953e+00 L4_l1linf:1.6797e+00 L5_l1linf:1.5625e+00 L6_l1linf:1.9766e+00 L7_l1linf:2.0312e+00 L8_l1linf:1.9531e+00 L9_l1linf:1.9297e+00 L10_l1linf:1.7812e+00 L11_l1linf:1.6562e+00 L12_l1linf:1.7734e+00 L1_spectral:1.1451e-01 L2_spectral:1.1307e-01 L3_spectral:1.1422e-01 L4_spectral:1.1443e-01 L5_spectral:1.1391e-01 L6_spectral:1.1585e-01 L7_spectral:1.1586e-01 L8_spectral:1.1296e-01 L9_spectral:1.1613e-01 L10_spectral:1.1719e-01 L11_spectral:1.1681e-01 L12_spectral:1.1561e-01 train_time:308689ms step_avg:42.87ms +[2025-09-11 06:35:27] [Rank 0] step:7201/10000 train_time:310031ms step_avg:43.05ms +[2025-09-11 06:35:27] [Rank 0] step:7201/10000 train_time:310031ms step_avg:43.05ms +[2025-09-11 06:35:28] [Rank 0] step:7221/10000 train_time:310750ms step_avg:43.03ms +[2025-09-11 06:35:28] [Rank 0] step:7221/10000 train_time:310750ms step_avg:43.03ms +[2025-09-11 06:35:29] [Rank 0] step:7241/10000 train_time:311453ms step_avg:43.01ms +[2025-09-11 06:35:29] [Rank 0] step:7241/10000 train_time:311453ms step_avg:43.01ms +[2025-09-11 06:35:30] [Rank 0] step:7261/10000 train_time:312155ms step_avg:42.99ms +[2025-09-11 06:35:30] [Rank 0] step:7261/10000 train_time:312155ms step_avg:42.99ms +[2025-09-11 06:35:30] [Rank 0] step:7281/10000 train_time:312862ms step_avg:42.97ms +[2025-09-11 06:35:30] [Rank 0] step:7281/10000 train_time:312862ms step_avg:42.97ms +[2025-09-11 06:35:31] [Rank 0] step:7301/10000 train_time:313562ms step_avg:42.95ms +[2025-09-11 06:35:31] [Rank 0] step:7301/10000 train_time:313562ms step_avg:42.95ms +[2025-09-11 06:35:32] [Rank 0] step:7321/10000 train_time:314263ms step_avg:42.93ms +[2025-09-11 06:35:32] [Rank 0] step:7321/10000 train_time:314263ms step_avg:42.93ms +[2025-09-11 06:35:32] [Rank 0] step:7341/10000 train_time:314965ms step_avg:42.90ms +[2025-09-11 06:35:32] [Rank 0] step:7341/10000 train_time:314965ms step_avg:42.90ms +[2025-09-11 06:35:33] [Rank 0] step:7361/10000 train_time:315666ms step_avg:42.88ms +[2025-09-11 06:35:33] [Rank 0] step:7361/10000 train_time:315666ms step_avg:42.88ms +[2025-09-11 06:35:34] [Rank 0] step:7381/10000 train_time:316369ms step_avg:42.86ms +[2025-09-11 06:35:34] [Rank 0] step:7381/10000 train_time:316369ms step_avg:42.86ms +[2025-09-11 06:35:34] [Rank 0] step:7401/10000 train_time:317070ms step_avg:42.84ms +[2025-09-11 06:35:34] [Rank 0] step:7401/10000 train_time:317070ms step_avg:42.84ms +[2025-09-11 06:35:35] [Rank 0] step:7421/10000 train_time:317770ms step_avg:42.82ms +[2025-09-11 06:35:35] [Rank 0] step:7421/10000 train_time:317770ms step_avg:42.82ms +[2025-09-11 06:35:36] [Rank 0] step:7441/10000 train_time:318472ms step_avg:42.80ms +[2025-09-11 06:35:36] [Rank 0] step:7441/10000 train_time:318472ms step_avg:42.80ms +[2025-09-11 06:35:37] [Rank 0] step:7461/10000 train_time:319174ms step_avg:42.78ms +[2025-09-11 06:35:37] [Rank 0] step:7461/10000 train_time:319174ms step_avg:42.78ms +[2025-09-11 06:35:37] [Rank 0] step:7481/10000 train_time:319879ms step_avg:42.76ms +[2025-09-11 06:35:37] [Rank 0] step:7481/10000 train_time:319879ms step_avg:42.76ms +[2025-09-11 06:35:38] [Rank 0] step:7501/10000 train_time:320582ms step_avg:42.74ms +[2025-09-11 06:35:38] [Rank 0] step:7501/10000 train_time:320582ms step_avg:42.74ms +[2025-09-11 06:35:39] [Rank 0] step:7521/10000 train_time:321286ms step_avg:42.72ms +[2025-09-11 06:35:39] [Rank 0] step:7521/10000 train_time:321286ms step_avg:42.72ms +[2025-09-11 06:35:39] [Rank 0] step:7541/10000 train_time:321986ms step_avg:42.70ms +[2025-09-11 06:35:39] [Rank 0] step:7541/10000 train_time:321986ms step_avg:42.70ms +[2025-09-11 06:35:40] [Rank 0] step:7561/10000 train_time:322689ms step_avg:42.68ms +[2025-09-11 06:35:40] [Rank 0] step:7561/10000 train_time:322689ms step_avg:42.68ms +[2025-09-11 06:35:41] [Rank 0] step:7581/10000 train_time:323393ms step_avg:42.66ms +[2025-09-11 06:35:41] [Rank 0] step:7581/10000 train_time:323393ms step_avg:42.66ms +[2025-09-11 06:35:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:35:41] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:35:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:35:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:35:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:35:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:35:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:35:51] [Rank 0] PRINT: step:7600/10000 val_loss:4.2449 total_sharp:4.9792e-05 L1_sharp:1.3017e-04 L2_sharp:4.1894e-06 L3_sharp:-2.6900e-05 L4_sharp:7.0741e-06 L5_sharp:2.1640e-05 L6_sharp:4.1145e-05 L7_sharp:2.3062e-05 L8_sharp:5.3068e-05 L9_sharp:5.8295e-05 L10_sharp:7.1796e-05 L11_sharp:8.1796e-05 L12_sharp:2.8610e-03 total_fnorm:1.1200e+02 total_l1_linf:2.5600e+05 total_spectral:5.7250e+01 L1_fnorm:7.0938e+00 L2_fnorm:6.5312e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.5625e+00 L5_fnorm:7.3438e+00 L6_fnorm:7.4062e+00 L7_fnorm:7.3750e+00 L8_fnorm:7.0312e+00 L9_fnorm:7.2812e+00 L10_fnorm:7.1250e+00 L11_fnorm:7.1875e+00 L12_fnorm:7.2500e+00 L1_l1linf:1.4922e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.4609e+00 L5_l1linf:1.3203e+00 L6_l1linf:1.6250e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.6406e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.4453e+00 L1_spectral:9.9443e-02 L2_spectral:9.6909e-02 L3_spectral:9.8116e-02 L4_spectral:9.9093e-02 L5_spectral:1.0070e-01 L6_spectral:9.9310e-02 L7_spectral:1.0025e-01 L8_spectral:9.7161e-02 L9_spectral:1.0090e-01 L10_spectral:1.0033e-01 L11_spectral:1.0191e-01 L12_spectral:1.0099e-01 train_time:324077ms step_avg:42.64ms +[2025-09-11 06:35:51] [Rank 0] PRINT: step:7600/10000 val_loss:4.2449 total_sharp:4.9792e-05 L1_sharp:1.3017e-04 L2_sharp:4.1894e-06 L3_sharp:-2.6900e-05 L4_sharp:7.0741e-06 L5_sharp:2.1640e-05 L6_sharp:4.1145e-05 L7_sharp:2.3062e-05 L8_sharp:5.3068e-05 L9_sharp:5.8295e-05 L10_sharp:7.1796e-05 L11_sharp:8.1796e-05 L12_sharp:2.8610e-03 total_fnorm:1.1200e+02 total_l1_linf:2.5600e+05 total_spectral:5.7250e+01 L1_fnorm:7.0938e+00 L2_fnorm:6.5312e+00 L3_fnorm:6.0938e+00 L4_fnorm:6.5625e+00 L5_fnorm:7.3438e+00 L6_fnorm:7.4062e+00 L7_fnorm:7.3750e+00 L8_fnorm:7.0312e+00 L9_fnorm:7.2812e+00 L10_fnorm:7.1250e+00 L11_fnorm:7.1875e+00 L12_fnorm:7.2500e+00 L1_l1linf:1.4922e+00 L2_l1linf:1.4531e+00 L3_l1linf:1.5312e+00 L4_l1linf:1.4609e+00 L5_l1linf:1.3203e+00 L6_l1linf:1.6250e+00 L7_l1linf:1.6328e+00 L8_l1linf:1.6406e+00 L9_l1linf:1.5156e+00 L10_l1linf:1.4062e+00 L11_l1linf:1.3281e+00 L12_l1linf:1.4453e+00 L1_spectral:9.9443e-02 L2_spectral:9.6909e-02 L3_spectral:9.8116e-02 L4_spectral:9.9093e-02 L5_spectral:1.0070e-01 L6_spectral:9.9310e-02 L7_spectral:1.0025e-01 L8_spectral:9.7161e-02 L9_spectral:1.0090e-01 L10_spectral:1.0033e-01 L11_spectral:1.0191e-01 L12_spectral:1.0099e-01 train_time:324077ms step_avg:42.64ms +[2025-09-11 06:35:53] [Rank 0] step:7601/10000 train_time:325415ms step_avg:42.81ms +[2025-09-11 06:35:53] [Rank 0] step:7601/10000 train_time:325415ms step_avg:42.81ms +[2025-09-11 06:35:53] [Rank 0] step:7621/10000 train_time:326134ms step_avg:42.79ms +[2025-09-11 06:35:53] [Rank 0] step:7621/10000 train_time:326134ms step_avg:42.79ms +[2025-09-11 06:35:54] [Rank 0] step:7641/10000 train_time:326838ms step_avg:42.77ms +[2025-09-11 06:35:54] [Rank 0] step:7641/10000 train_time:326838ms step_avg:42.77ms +[2025-09-11 06:35:55] [Rank 0] step:7661/10000 train_time:327540ms step_avg:42.75ms +[2025-09-11 06:35:55] [Rank 0] step:7661/10000 train_time:327540ms step_avg:42.75ms +[2025-09-11 06:35:56] [Rank 0] step:7681/10000 train_time:328242ms step_avg:42.73ms +[2025-09-11 06:35:56] [Rank 0] step:7681/10000 train_time:328242ms step_avg:42.73ms +[2025-09-11 06:35:56] [Rank 0] step:7701/10000 train_time:328947ms step_avg:42.71ms +[2025-09-11 06:35:56] [Rank 0] step:7701/10000 train_time:328947ms step_avg:42.71ms +[2025-09-11 06:35:57] [Rank 0] step:7721/10000 train_time:329650ms step_avg:42.70ms +[2025-09-11 06:35:57] [Rank 0] step:7721/10000 train_time:329650ms step_avg:42.70ms +[2025-09-11 06:35:58] [Rank 0] step:7741/10000 train_time:330353ms step_avg:42.68ms +[2025-09-11 06:35:58] [Rank 0] step:7741/10000 train_time:330353ms step_avg:42.68ms +[2025-09-11 06:35:58] [Rank 0] step:7761/10000 train_time:331056ms step_avg:42.66ms +[2025-09-11 06:35:58] [Rank 0] step:7761/10000 train_time:331056ms step_avg:42.66ms +[2025-09-11 06:35:59] [Rank 0] step:7781/10000 train_time:331760ms step_avg:42.64ms +[2025-09-11 06:35:59] [Rank 0] step:7781/10000 train_time:331760ms step_avg:42.64ms +[2025-09-11 06:36:00] [Rank 0] step:7801/10000 train_time:332463ms step_avg:42.62ms +[2025-09-11 06:36:00] [Rank 0] step:7801/10000 train_time:332463ms step_avg:42.62ms +[2025-09-11 06:36:01] [Rank 0] step:7821/10000 train_time:333166ms step_avg:42.60ms +[2025-09-11 06:36:01] [Rank 0] step:7821/10000 train_time:333166ms step_avg:42.60ms +[2025-09-11 06:36:01] [Rank 0] step:7841/10000 train_time:333871ms step_avg:42.58ms +[2025-09-11 06:36:01] [Rank 0] step:7841/10000 train_time:333871ms step_avg:42.58ms +[2025-09-11 06:36:02] [Rank 0] step:7861/10000 train_time:334575ms step_avg:42.56ms +[2025-09-11 06:36:02] [Rank 0] step:7861/10000 train_time:334575ms step_avg:42.56ms +[2025-09-11 06:36:03] [Rank 0] step:7881/10000 train_time:335280ms step_avg:42.54ms +[2025-09-11 06:36:03] [Rank 0] step:7881/10000 train_time:335280ms step_avg:42.54ms +[2025-09-11 06:36:03] [Rank 0] step:7901/10000 train_time:335985ms step_avg:42.52ms +[2025-09-11 06:36:03] [Rank 0] step:7901/10000 train_time:335985ms step_avg:42.52ms +[2025-09-11 06:36:04] [Rank 0] step:7921/10000 train_time:336689ms step_avg:42.51ms +[2025-09-11 06:36:04] [Rank 0] step:7921/10000 train_time:336689ms step_avg:42.51ms +[2025-09-11 06:36:05] [Rank 0] step:7941/10000 train_time:337394ms step_avg:42.49ms +[2025-09-11 06:36:05] [Rank 0] step:7941/10000 train_time:337394ms step_avg:42.49ms +[2025-09-11 06:36:05] [Rank 0] step:7961/10000 train_time:338096ms step_avg:42.47ms +[2025-09-11 06:36:05] [Rank 0] step:7961/10000 train_time:338096ms step_avg:42.47ms +[2025-09-11 06:36:06] [Rank 0] step:7981/10000 train_time:338802ms step_avg:42.45ms +[2025-09-11 06:36:06] [Rank 0] step:7981/10000 train_time:338802ms step_avg:42.45ms +[2025-09-11 06:36:07] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:36:07] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:36:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:36:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:36:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:36:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:36:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:36:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:36:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:36:17] [Rank 0] PRINT: step:8000/10000 val_loss:4.2051 total_sharp:4.1887e-05 L1_sharp:1.1915e-04 L2_sharp:8.5677e-06 L3_sharp:3.1268e-05 L4_sharp:-2.2143e-06 L5_sharp:2.8598e-05 L6_sharp:5.0050e-05 L7_sharp:2.8000e-05 L8_sharp:4.9539e-05 L9_sharp:5.3878e-05 L10_sharp:7.4181e-05 L11_sharp:8.6863e-05 L12_sharp:2.9029e-03 total_fnorm:1.0050e+02 total_l1_linf:2.1709e+05 total_spectral:5.1250e+01 L1_fnorm:5.8438e+00 L2_fnorm:5.2812e+00 L3_fnorm:4.8750e+00 L4_fnorm:5.3750e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.0000e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.1406e+00 L2_l1linf:1.2500e+00 L3_l1linf:1.2656e+00 L4_l1linf:1.1953e+00 L5_l1linf:1.1250e+00 L6_l1linf:1.2422e+00 L7_l1linf:1.2578e+00 L8_l1linf:1.2188e+00 L9_l1linf:1.1797e+00 L10_l1linf:1.1172e+00 L11_l1linf:1.0391e+00 L12_l1linf:1.1484e+00 L1_spectral:8.2437e-02 L2_spectral:7.9998e-02 L3_spectral:8.0700e-02 L4_spectral:8.0977e-02 L5_spectral:8.5001e-02 L6_spectral:8.2080e-02 L7_spectral:8.3073e-02 L8_spectral:8.0903e-02 L9_spectral:8.3678e-02 L10_spectral:8.3114e-02 L11_spectral:8.3745e-02 L12_spectral:8.4231e-02 train_time:339485ms step_avg:42.44ms +[2025-09-11 06:36:17] [Rank 0] PRINT: step:8000/10000 val_loss:4.2051 total_sharp:4.1887e-05 L1_sharp:1.1915e-04 L2_sharp:8.5677e-06 L3_sharp:3.1268e-05 L4_sharp:-2.2143e-06 L5_sharp:2.8598e-05 L6_sharp:5.0050e-05 L7_sharp:2.8000e-05 L8_sharp:4.9539e-05 L9_sharp:5.3878e-05 L10_sharp:7.4181e-05 L11_sharp:8.6863e-05 L12_sharp:2.9029e-03 total_fnorm:1.0050e+02 total_l1_linf:2.1709e+05 total_spectral:5.1250e+01 L1_fnorm:5.8438e+00 L2_fnorm:5.2812e+00 L3_fnorm:4.8750e+00 L4_fnorm:5.3750e+00 L5_fnorm:5.9688e+00 L6_fnorm:6.0000e+00 L7_fnorm:6.0000e+00 L8_fnorm:5.6875e+00 L9_fnorm:5.9062e+00 L10_fnorm:5.7812e+00 L11_fnorm:5.8438e+00 L12_fnorm:5.9062e+00 L1_l1linf:1.1406e+00 L2_l1linf:1.2500e+00 L3_l1linf:1.2656e+00 L4_l1linf:1.1953e+00 L5_l1linf:1.1250e+00 L6_l1linf:1.2422e+00 L7_l1linf:1.2578e+00 L8_l1linf:1.2188e+00 L9_l1linf:1.1797e+00 L10_l1linf:1.1172e+00 L11_l1linf:1.0391e+00 L12_l1linf:1.1484e+00 L1_spectral:8.2437e-02 L2_spectral:7.9998e-02 L3_spectral:8.0700e-02 L4_spectral:8.0977e-02 L5_spectral:8.5001e-02 L6_spectral:8.2080e-02 L7_spectral:8.3073e-02 L8_spectral:8.0903e-02 L9_spectral:8.3678e-02 L10_spectral:8.3114e-02 L11_spectral:8.3745e-02 L12_spectral:8.4231e-02 train_time:339485ms step_avg:42.44ms +[2025-09-11 06:36:18] [Rank 0] step:8001/10000 train_time:340835ms step_avg:42.60ms +[2025-09-11 06:36:18] [Rank 0] step:8001/10000 train_time:340835ms step_avg:42.60ms +[2025-09-11 06:36:19] [Rank 0] step:8021/10000 train_time:341569ms step_avg:42.58ms +[2025-09-11 06:36:19] [Rank 0] step:8021/10000 train_time:341569ms step_avg:42.58ms +[2025-09-11 06:36:20] [Rank 0] step:8041/10000 train_time:342273ms step_avg:42.57ms +[2025-09-11 06:36:20] [Rank 0] step:8041/10000 train_time:342273ms step_avg:42.57ms +[2025-09-11 06:36:20] [Rank 0] step:8061/10000 train_time:342979ms step_avg:42.55ms +[2025-09-11 06:36:20] [Rank 0] step:8061/10000 train_time:342979ms step_avg:42.55ms +[2025-09-11 06:36:21] [Rank 0] step:8081/10000 train_time:343680ms step_avg:42.53ms +[2025-09-11 06:36:21] [Rank 0] step:8081/10000 train_time:343680ms step_avg:42.53ms +[2025-09-11 06:36:22] [Rank 0] step:8101/10000 train_time:344382ms step_avg:42.51ms +[2025-09-11 06:36:22] [Rank 0] step:8101/10000 train_time:344382ms step_avg:42.51ms +[2025-09-11 06:36:23] [Rank 0] step:8121/10000 train_time:345088ms step_avg:42.49ms +[2025-09-11 06:36:23] [Rank 0] step:8121/10000 train_time:345088ms step_avg:42.49ms +[2025-09-11 06:36:24] [Rank 0] step:8141/10000 train_time:346254ms step_avg:42.53ms +[2025-09-11 06:36:24] [Rank 0] step:8141/10000 train_time:346254ms step_avg:42.53ms +[2025-09-11 06:36:25] [Rank 0] step:8161/10000 train_time:347087ms step_avg:42.53ms +[2025-09-11 06:36:25] [Rank 0] step:8161/10000 train_time:347087ms step_avg:42.53ms +[2025-09-11 06:36:25] [Rank 0] step:8181/10000 train_time:347801ms step_avg:42.51ms +[2025-09-11 06:36:25] [Rank 0] step:8181/10000 train_time:347801ms step_avg:42.51ms +[2025-09-11 06:36:26] [Rank 0] step:8201/10000 train_time:348797ms step_avg:42.53ms +[2025-09-11 06:36:26] [Rank 0] step:8201/10000 train_time:348797ms step_avg:42.53ms +[2025-09-11 06:36:27] [Rank 0] step:8221/10000 train_time:349507ms step_avg:42.51ms +[2025-09-11 06:36:27] [Rank 0] step:8221/10000 train_time:349507ms step_avg:42.51ms +[2025-09-11 06:36:28] [Rank 0] step:8241/10000 train_time:350225ms step_avg:42.50ms +[2025-09-11 06:36:28] [Rank 0] step:8241/10000 train_time:350225ms step_avg:42.50ms +[2025-09-11 06:36:28] [Rank 0] step:8261/10000 train_time:350934ms step_avg:42.48ms +[2025-09-11 06:36:28] [Rank 0] step:8261/10000 train_time:350934ms step_avg:42.48ms +[2025-09-11 06:36:29] [Rank 0] step:8281/10000 train_time:351640ms step_avg:42.46ms +[2025-09-11 06:36:29] [Rank 0] step:8281/10000 train_time:351640ms step_avg:42.46ms +[2025-09-11 06:36:30] [Rank 0] step:8301/10000 train_time:352349ms step_avg:42.45ms +[2025-09-11 06:36:30] [Rank 0] step:8301/10000 train_time:352349ms step_avg:42.45ms +[2025-09-11 06:36:31] [Rank 0] step:8321/10000 train_time:353057ms step_avg:42.43ms +[2025-09-11 06:36:31] [Rank 0] step:8321/10000 train_time:353057ms step_avg:42.43ms +[2025-09-11 06:36:31] [Rank 0] step:8341/10000 train_time:353773ms step_avg:42.41ms +[2025-09-11 06:36:31] [Rank 0] step:8341/10000 train_time:353773ms step_avg:42.41ms +[2025-09-11 06:36:32] [Rank 0] step:8361/10000 train_time:354477ms step_avg:42.40ms +[2025-09-11 06:36:32] [Rank 0] step:8361/10000 train_time:354477ms step_avg:42.40ms +[2025-09-11 06:36:33] [Rank 0] step:8381/10000 train_time:355189ms step_avg:42.38ms +[2025-09-11 06:36:33] [Rank 0] step:8381/10000 train_time:355189ms step_avg:42.38ms +[2025-09-11 06:36:33] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:36:33] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:36:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:36:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:36:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:36:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:36:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:36:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:36:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:36:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:36:45] [Rank 0] PRINT: step:8400/10000 val_loss:4.1735 total_sharp:2.8136e-05 L1_sharp:8.1915e-05 L2_sharp:6.6053e-06 L3_sharp:6.8695e-06 L4_sharp:1.4544e-05 L5_sharp:2.0093e-07 L6_sharp:2.9497e-05 L7_sharp:9.9656e-06 L8_sharp:3.8387e-05 L9_sharp:4.5171e-05 L10_sharp:6.7711e-05 L11_sharp:7.4965e-05 L12_sharp:7.8077e-04 total_fnorm:8.0500e+01 total_l1_linf:1.5770e+05 total_spectral:4.1000e+01 L1_fnorm:4.6562e+00 L2_fnorm:4.0938e+00 L3_fnorm:3.7812e+00 L4_fnorm:4.2500e+00 L5_fnorm:4.7188e+00 L6_fnorm:4.6875e+00 L7_fnorm:4.6875e+00 L8_fnorm:4.5000e+00 L9_fnorm:4.5938e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5312e+00 L12_fnorm:4.6250e+00 L1_l1linf:8.3594e-01 L2_l1linf:1.0391e+00 L3_l1linf:1.1016e+00 L4_l1linf:9.6094e-01 L5_l1linf:8.0469e-01 L6_l1linf:9.3750e-01 L7_l1linf:9.3359e-01 L8_l1linf:9.2188e-01 L9_l1linf:8.6719e-01 L10_l1linf:8.2422e-01 L11_l1linf:7.8125e-01 L12_l1linf:8.6328e-01 L1_spectral:6.6685e-02 L2_spectral:6.6151e-02 L3_spectral:6.3810e-02 L4_spectral:6.7715e-02 L5_spectral:6.9590e-02 L6_spectral:6.4968e-02 L7_spectral:6.5793e-02 L8_spectral:6.5592e-02 L9_spectral:6.5972e-02 L10_spectral:6.6561e-02 L11_spectral:6.6008e-02 L12_spectral:6.7060e-02 train_time:355880ms step_avg:42.37ms +[2025-09-11 06:36:45] [Rank 0] PRINT: step:8400/10000 val_loss:4.1735 total_sharp:2.8136e-05 L1_sharp:8.1915e-05 L2_sharp:6.6053e-06 L3_sharp:6.8695e-06 L4_sharp:1.4544e-05 L5_sharp:2.0093e-07 L6_sharp:2.9497e-05 L7_sharp:9.9656e-06 L8_sharp:3.8387e-05 L9_sharp:4.5171e-05 L10_sharp:6.7711e-05 L11_sharp:7.4965e-05 L12_sharp:7.8077e-04 total_fnorm:8.0500e+01 total_l1_linf:1.5770e+05 total_spectral:4.1000e+01 L1_fnorm:4.6562e+00 L2_fnorm:4.0938e+00 L3_fnorm:3.7812e+00 L4_fnorm:4.2500e+00 L5_fnorm:4.7188e+00 L6_fnorm:4.6875e+00 L7_fnorm:4.6875e+00 L8_fnorm:4.5000e+00 L9_fnorm:4.5938e+00 L10_fnorm:4.5000e+00 L11_fnorm:4.5312e+00 L12_fnorm:4.6250e+00 L1_l1linf:8.3594e-01 L2_l1linf:1.0391e+00 L3_l1linf:1.1016e+00 L4_l1linf:9.6094e-01 L5_l1linf:8.0469e-01 L6_l1linf:9.3750e-01 L7_l1linf:9.3359e-01 L8_l1linf:9.2188e-01 L9_l1linf:8.6719e-01 L10_l1linf:8.2422e-01 L11_l1linf:7.8125e-01 L12_l1linf:8.6328e-01 L1_spectral:6.6685e-02 L2_spectral:6.6151e-02 L3_spectral:6.3810e-02 L4_spectral:6.7715e-02 L5_spectral:6.9590e-02 L6_spectral:6.4968e-02 L7_spectral:6.5793e-02 L8_spectral:6.5592e-02 L9_spectral:6.5972e-02 L10_spectral:6.6561e-02 L11_spectral:6.6008e-02 L12_spectral:6.7060e-02 train_time:355880ms step_avg:42.37ms +[2025-09-11 06:36:46] [Rank 0] step:8401/10000 train_time:357196ms step_avg:42.52ms +[2025-09-11 06:36:46] [Rank 0] step:8401/10000 train_time:357196ms step_avg:42.52ms +[2025-09-11 06:36:47] [Rank 0] step:8421/10000 train_time:357923ms step_avg:42.50ms +[2025-09-11 06:36:47] [Rank 0] step:8421/10000 train_time:357923ms step_avg:42.50ms +[2025-09-11 06:36:47] [Rank 0] step:8441/10000 train_time:358636ms step_avg:42.49ms +[2025-09-11 06:36:47] [Rank 0] step:8441/10000 train_time:358636ms step_avg:42.49ms +[2025-09-11 06:36:48] [Rank 0] step:8461/10000 train_time:359345ms step_avg:42.47ms +[2025-09-11 06:36:48] [Rank 0] step:8461/10000 train_time:359345ms step_avg:42.47ms +[2025-09-11 06:36:49] [Rank 0] step:8481/10000 train_time:360059ms step_avg:42.45ms +[2025-09-11 06:36:49] [Rank 0] step:8481/10000 train_time:360059ms step_avg:42.45ms +[2025-09-11 06:36:50] [Rank 0] step:8501/10000 train_time:360768ms step_avg:42.44ms +[2025-09-11 06:36:50] [Rank 0] step:8501/10000 train_time:360768ms step_avg:42.44ms +[2025-09-11 06:36:50] [Rank 0] step:8521/10000 train_time:361477ms step_avg:42.42ms +[2025-09-11 06:36:50] [Rank 0] step:8521/10000 train_time:361477ms step_avg:42.42ms +[2025-09-11 06:36:51] [Rank 0] step:8541/10000 train_time:362185ms step_avg:42.41ms +[2025-09-11 06:36:51] [Rank 0] step:8541/10000 train_time:362185ms step_avg:42.41ms +[2025-09-11 06:36:52] [Rank 0] step:8561/10000 train_time:362900ms step_avg:42.39ms +[2025-09-11 06:36:52] [Rank 0] step:8561/10000 train_time:362900ms step_avg:42.39ms +[2025-09-11 06:36:52] [Rank 0] step:8581/10000 train_time:363612ms step_avg:42.37ms +[2025-09-11 06:36:52] [Rank 0] step:8581/10000 train_time:363612ms step_avg:42.37ms +[2025-09-11 06:36:53] [Rank 0] step:8601/10000 train_time:364322ms step_avg:42.36ms +[2025-09-11 06:36:53] [Rank 0] step:8601/10000 train_time:364322ms step_avg:42.36ms +[2025-09-11 06:36:54] [Rank 0] step:8621/10000 train_time:365031ms step_avg:42.34ms +[2025-09-11 06:36:54] [Rank 0] step:8621/10000 train_time:365031ms step_avg:42.34ms +[2025-09-11 06:36:55] [Rank 0] step:8641/10000 train_time:365740ms step_avg:42.33ms +[2025-09-11 06:36:55] [Rank 0] step:8641/10000 train_time:365740ms step_avg:42.33ms +[2025-09-11 06:36:55] [Rank 0] step:8661/10000 train_time:366450ms step_avg:42.31ms +[2025-09-11 06:36:55] [Rank 0] step:8661/10000 train_time:366450ms step_avg:42.31ms +[2025-09-11 06:36:56] [Rank 0] step:8681/10000 train_time:367160ms step_avg:42.29ms +[2025-09-11 06:36:56] [Rank 0] step:8681/10000 train_time:367160ms step_avg:42.29ms +[2025-09-11 06:36:57] [Rank 0] step:8701/10000 train_time:367869ms step_avg:42.28ms +[2025-09-11 06:36:57] [Rank 0] step:8701/10000 train_time:367869ms step_avg:42.28ms +[2025-09-11 06:36:57] [Rank 0] step:8721/10000 train_time:368581ms step_avg:42.26ms +[2025-09-11 06:36:57] [Rank 0] step:8721/10000 train_time:368581ms step_avg:42.26ms +[2025-09-11 06:36:58] [Rank 0] step:8741/10000 train_time:369288ms step_avg:42.25ms +[2025-09-11 06:36:58] [Rank 0] step:8741/10000 train_time:369288ms step_avg:42.25ms +[2025-09-11 06:36:59] [Rank 0] step:8761/10000 train_time:370000ms step_avg:42.23ms +[2025-09-11 06:36:59] [Rank 0] step:8761/10000 train_time:370000ms step_avg:42.23ms +[2025-09-11 06:37:00] [Rank 0] step:8781/10000 train_time:370707ms step_avg:42.22ms +[2025-09-11 06:37:00] [Rank 0] step:8781/10000 train_time:370707ms step_avg:42.22ms +[2025-09-11 06:37:00] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:37:00] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:37:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:37:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:37:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:37:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:37:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:37:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:37:10] [Rank 0] PRINT: step:8800/10000 val_loss:4.1578 total_sharp:2.8580e-05 L1_sharp:7.6057e-05 L2_sharp:3.2836e-05 L3_sharp:1.1494e-05 L4_sharp:1.2745e-06 L5_sharp:1.9804e-05 L6_sharp:2.4604e-05 L7_sharp:1.5543e-05 L8_sharp:2.5693e-05 L9_sharp:3.7673e-05 L10_sharp:4.9985e-05 L11_sharp:7.1270e-05 L12_sharp:6.3918e-04 total_fnorm:5.9500e+01 total_l1_linf:1.0598e+05 total_spectral:3.0375e+01 L1_fnorm:3.4688e+00 L2_fnorm:3.0469e+00 L3_fnorm:2.7188e+00 L4_fnorm:3.2188e+00 L5_fnorm:3.5156e+00 L6_fnorm:3.4219e+00 L7_fnorm:3.4062e+00 L8_fnorm:3.2812e+00 L9_fnorm:3.3594e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2969e+00 L12_fnorm:3.3594e+00 L1_l1linf:5.7812e-01 L2_l1linf:8.0469e-01 L3_l1linf:7.9688e-01 L4_l1linf:8.3984e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.1328e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.4297e-01 L11_l1linf:5.2734e-01 L12_l1linf:5.7031e-01 L1_spectral:5.0730e-02 L2_spectral:5.2100e-02 L3_spectral:4.8965e-02 L4_spectral:5.3626e-02 L5_spectral:5.4470e-02 L6_spectral:4.7933e-02 L7_spectral:4.9444e-02 L8_spectral:4.8968e-02 L9_spectral:4.9987e-02 L10_spectral:4.9715e-02 L11_spectral:4.9719e-02 L12_spectral:5.0265e-02 train_time:371396ms step_avg:42.20ms +[2025-09-11 06:37:10] [Rank 0] PRINT: step:8800/10000 val_loss:4.1578 total_sharp:2.8580e-05 L1_sharp:7.6057e-05 L2_sharp:3.2836e-05 L3_sharp:1.1494e-05 L4_sharp:1.2745e-06 L5_sharp:1.9804e-05 L6_sharp:2.4604e-05 L7_sharp:1.5543e-05 L8_sharp:2.5693e-05 L9_sharp:3.7673e-05 L10_sharp:4.9985e-05 L11_sharp:7.1270e-05 L12_sharp:6.3918e-04 total_fnorm:5.9500e+01 total_l1_linf:1.0598e+05 total_spectral:3.0375e+01 L1_fnorm:3.4688e+00 L2_fnorm:3.0469e+00 L3_fnorm:2.7188e+00 L4_fnorm:3.2188e+00 L5_fnorm:3.5156e+00 L6_fnorm:3.4219e+00 L7_fnorm:3.4062e+00 L8_fnorm:3.2812e+00 L9_fnorm:3.3594e+00 L10_fnorm:3.2812e+00 L11_fnorm:3.2969e+00 L12_fnorm:3.3594e+00 L1_l1linf:5.7812e-01 L2_l1linf:8.0469e-01 L3_l1linf:7.9688e-01 L4_l1linf:8.3984e-01 L5_l1linf:6.2891e-01 L6_l1linf:6.2109e-01 L7_l1linf:6.2500e-01 L8_l1linf:6.1328e-01 L9_l1linf:5.7422e-01 L10_l1linf:5.4297e-01 L11_l1linf:5.2734e-01 L12_l1linf:5.7031e-01 L1_spectral:5.0730e-02 L2_spectral:5.2100e-02 L3_spectral:4.8965e-02 L4_spectral:5.3626e-02 L5_spectral:5.4470e-02 L6_spectral:4.7933e-02 L7_spectral:4.9444e-02 L8_spectral:4.8968e-02 L9_spectral:4.9987e-02 L10_spectral:4.9715e-02 L11_spectral:4.9719e-02 L12_spectral:5.0265e-02 train_time:371396ms step_avg:42.20ms +[2025-09-11 06:37:12] [Rank 0] step:8801/10000 train_time:372807ms step_avg:42.36ms +[2025-09-11 06:37:12] [Rank 0] step:8801/10000 train_time:372807ms step_avg:42.36ms +[2025-09-11 06:37:12] [Rank 0] step:8821/10000 train_time:373546ms step_avg:42.35ms +[2025-09-11 06:37:12] [Rank 0] step:8821/10000 train_time:373546ms step_avg:42.35ms +[2025-09-11 06:37:13] [Rank 0] step:8841/10000 train_time:374258ms step_avg:42.33ms +[2025-09-11 06:37:13] [Rank 0] step:8841/10000 train_time:374258ms step_avg:42.33ms +[2025-09-11 06:37:14] [Rank 0] step:8861/10000 train_time:374969ms step_avg:42.32ms +[2025-09-11 06:37:14] [Rank 0] step:8861/10000 train_time:374969ms step_avg:42.32ms +[2025-09-11 06:37:14] [Rank 0] step:8881/10000 train_time:375679ms step_avg:42.30ms +[2025-09-11 06:37:14] [Rank 0] step:8881/10000 train_time:375679ms step_avg:42.30ms +[2025-09-11 06:37:15] [Rank 0] step:8901/10000 train_time:376391ms step_avg:42.29ms +[2025-09-11 06:37:15] [Rank 0] step:8901/10000 train_time:376391ms step_avg:42.29ms +[2025-09-11 06:37:16] [Rank 0] step:8921/10000 train_time:377098ms step_avg:42.27ms +[2025-09-11 06:37:16] [Rank 0] step:8921/10000 train_time:377098ms step_avg:42.27ms +[2025-09-11 06:37:17] [Rank 0] step:8941/10000 train_time:377811ms step_avg:42.26ms +[2025-09-11 06:37:17] [Rank 0] step:8941/10000 train_time:377811ms step_avg:42.26ms +[2025-09-11 06:37:17] [Rank 0] step:8961/10000 train_time:378530ms step_avg:42.24ms +[2025-09-11 06:37:17] [Rank 0] step:8961/10000 train_time:378530ms step_avg:42.24ms +[2025-09-11 06:37:18] [Rank 0] step:8981/10000 train_time:379245ms step_avg:42.23ms +[2025-09-11 06:37:18] [Rank 0] step:8981/10000 train_time:379245ms step_avg:42.23ms +[2025-09-11 06:37:19] [Rank 0] step:9001/10000 train_time:379951ms step_avg:42.21ms +[2025-09-11 06:37:19] [Rank 0] step:9001/10000 train_time:379951ms step_avg:42.21ms +[2025-09-11 06:37:19] [Rank 0] step:9021/10000 train_time:380663ms step_avg:42.20ms +[2025-09-11 06:37:19] [Rank 0] step:9021/10000 train_time:380663ms step_avg:42.20ms +[2025-09-11 06:37:20] [Rank 0] step:9041/10000 train_time:381377ms step_avg:42.18ms +[2025-09-11 06:37:20] [Rank 0] step:9041/10000 train_time:381377ms step_avg:42.18ms +[2025-09-11 06:37:21] [Rank 0] step:9061/10000 train_time:382087ms step_avg:42.17ms +[2025-09-11 06:37:21] [Rank 0] step:9061/10000 train_time:382087ms step_avg:42.17ms +[2025-09-11 06:37:22] [Rank 0] step:9081/10000 train_time:382800ms step_avg:42.15ms +[2025-09-11 06:37:22] [Rank 0] step:9081/10000 train_time:382800ms step_avg:42.15ms +[2025-09-11 06:37:22] [Rank 0] step:9101/10000 train_time:383514ms step_avg:42.14ms +[2025-09-11 06:37:22] [Rank 0] step:9101/10000 train_time:383514ms step_avg:42.14ms +[2025-09-11 06:37:23] [Rank 0] step:9121/10000 train_time:384228ms step_avg:42.13ms +[2025-09-11 06:37:23] [Rank 0] step:9121/10000 train_time:384228ms step_avg:42.13ms +[2025-09-11 06:37:24] [Rank 0] step:9141/10000 train_time:384938ms step_avg:42.11ms +[2025-09-11 06:37:24] [Rank 0] step:9141/10000 train_time:384938ms step_avg:42.11ms +[2025-09-11 06:37:24] [Rank 0] step:9161/10000 train_time:385651ms step_avg:42.10ms +[2025-09-11 06:37:24] [Rank 0] step:9161/10000 train_time:385651ms step_avg:42.10ms +[2025-09-11 06:37:25] [Rank 0] step:9181/10000 train_time:386364ms step_avg:42.08ms +[2025-09-11 06:37:25] [Rank 0] step:9181/10000 train_time:386364ms step_avg:42.08ms +[2025-09-11 06:37:26] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:37:26] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:37:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:37:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:37:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:37:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:37:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:37:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:37:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:37:36] [Rank 0] PRINT: step:9200/10000 val_loss:4.1322 total_sharp:2.1896e-05 L1_sharp:6.9576e-05 L2_sharp:7.1901e-06 L3_sharp:1.2564e-06 L4_sharp:-5.4843e-06 L5_sharp:8.8892e-06 L6_sharp:1.5307e-05 L7_sharp:5.2322e-06 L8_sharp:2.7646e-05 L9_sharp:3.8735e-05 L10_sharp:5.1749e-05 L11_sharp:6.5482e-05 L12_sharp:5.4011e-04 total_fnorm:4.5000e+01 total_l1_linf:7.0144e+04 total_spectral:2.2750e+01 L1_fnorm:2.3281e+00 L2_fnorm:2.0625e+00 L3_fnorm:1.8672e+00 L4_fnorm:2.2188e+00 L5_fnorm:2.3750e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2969e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2656e+00 L1_l1linf:3.3984e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.2891e-01 L4_l1linf:5.1562e-01 L5_l1linf:4.5312e-01 L6_l1linf:3.8086e-01 L7_l1linf:3.7500e-01 L8_l1linf:3.8281e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.4570e-01 L1_spectral:3.4781e-02 L2_spectral:3.9265e-02 L3_spectral:3.7865e-02 L4_spectral:3.9875e-02 L5_spectral:3.8458e-02 L6_spectral:3.3295e-02 L7_spectral:3.3904e-02 L8_spectral:3.4640e-02 L9_spectral:3.4166e-02 L10_spectral:3.4056e-02 L11_spectral:3.3920e-02 L12_spectral:3.4853e-02 train_time:387058ms step_avg:42.07ms +[2025-09-11 06:37:36] [Rank 0] PRINT: step:9200/10000 val_loss:4.1322 total_sharp:2.1896e-05 L1_sharp:6.9576e-05 L2_sharp:7.1901e-06 L3_sharp:1.2564e-06 L4_sharp:-5.4843e-06 L5_sharp:8.8892e-06 L6_sharp:1.5307e-05 L7_sharp:5.2322e-06 L8_sharp:2.7646e-05 L9_sharp:3.8735e-05 L10_sharp:5.1749e-05 L11_sharp:6.5482e-05 L12_sharp:5.4011e-04 total_fnorm:4.5000e+01 total_l1_linf:7.0144e+04 total_spectral:2.2750e+01 L1_fnorm:2.3281e+00 L2_fnorm:2.0625e+00 L3_fnorm:1.8672e+00 L4_fnorm:2.2188e+00 L5_fnorm:2.3750e+00 L6_fnorm:2.3125e+00 L7_fnorm:2.2969e+00 L8_fnorm:2.2031e+00 L9_fnorm:2.2500e+00 L10_fnorm:2.1875e+00 L11_fnorm:2.2031e+00 L12_fnorm:2.2656e+00 L1_l1linf:3.3984e-01 L2_l1linf:6.3281e-01 L3_l1linf:6.2891e-01 L4_l1linf:5.1562e-01 L5_l1linf:4.5312e-01 L6_l1linf:3.8086e-01 L7_l1linf:3.7500e-01 L8_l1linf:3.8281e-01 L9_l1linf:3.5156e-01 L10_l1linf:3.2617e-01 L11_l1linf:3.1445e-01 L12_l1linf:3.4570e-01 L1_spectral:3.4781e-02 L2_spectral:3.9265e-02 L3_spectral:3.7865e-02 L4_spectral:3.9875e-02 L5_spectral:3.8458e-02 L6_spectral:3.3295e-02 L7_spectral:3.3904e-02 L8_spectral:3.4640e-02 L9_spectral:3.4166e-02 L10_spectral:3.4056e-02 L11_spectral:3.3920e-02 L12_spectral:3.4853e-02 train_time:387058ms step_avg:42.07ms +[2025-09-11 06:37:37] [Rank 0] step:9201/10000 train_time:388401ms step_avg:42.21ms +[2025-09-11 06:37:37] [Rank 0] step:9201/10000 train_time:388401ms step_avg:42.21ms +[2025-09-11 06:37:38] [Rank 0] step:9221/10000 train_time:389194ms step_avg:42.21ms +[2025-09-11 06:37:38] [Rank 0] step:9221/10000 train_time:389194ms step_avg:42.21ms +[2025-09-11 06:37:39] [Rank 0] step:9241/10000 train_time:389904ms step_avg:42.19ms +[2025-09-11 06:37:39] [Rank 0] step:9241/10000 train_time:389904ms step_avg:42.19ms +[2025-09-11 06:37:39] [Rank 0] step:9261/10000 train_time:390617ms step_avg:42.18ms +[2025-09-11 06:37:39] [Rank 0] step:9261/10000 train_time:390617ms step_avg:42.18ms +[2025-09-11 06:37:40] [Rank 0] step:9281/10000 train_time:391329ms step_avg:42.16ms +[2025-09-11 06:37:40] [Rank 0] step:9281/10000 train_time:391329ms step_avg:42.16ms +[2025-09-11 06:37:41] [Rank 0] step:9301/10000 train_time:392039ms step_avg:42.15ms +[2025-09-11 06:37:41] [Rank 0] step:9301/10000 train_time:392039ms step_avg:42.15ms +[2025-09-11 06:37:42] [Rank 0] step:9321/10000 train_time:392752ms step_avg:42.14ms +[2025-09-11 06:37:42] [Rank 0] step:9321/10000 train_time:392752ms step_avg:42.14ms +[2025-09-11 06:37:42] [Rank 0] step:9341/10000 train_time:393459ms step_avg:42.12ms +[2025-09-11 06:37:42] [Rank 0] step:9341/10000 train_time:393459ms step_avg:42.12ms +[2025-09-11 06:37:43] [Rank 0] step:9361/10000 train_time:394167ms step_avg:42.11ms +[2025-09-11 06:37:43] [Rank 0] step:9361/10000 train_time:394167ms step_avg:42.11ms +[2025-09-11 06:37:44] [Rank 0] step:9381/10000 train_time:394877ms step_avg:42.09ms +[2025-09-11 06:37:44] [Rank 0] step:9381/10000 train_time:394877ms step_avg:42.09ms +[2025-09-11 06:37:44] [Rank 0] step:9401/10000 train_time:395589ms step_avg:42.08ms +[2025-09-11 06:37:44] [Rank 0] step:9401/10000 train_time:395589ms step_avg:42.08ms +[2025-09-11 06:37:45] [Rank 0] step:9421/10000 train_time:396302ms step_avg:42.07ms +[2025-09-11 06:37:45] [Rank 0] step:9421/10000 train_time:396302ms step_avg:42.07ms +[2025-09-11 06:37:46] [Rank 0] step:9441/10000 train_time:397017ms step_avg:42.05ms +[2025-09-11 06:37:46] [Rank 0] step:9441/10000 train_time:397017ms step_avg:42.05ms +[2025-09-11 06:37:47] [Rank 0] step:9461/10000 train_time:397739ms step_avg:42.04ms +[2025-09-11 06:37:47] [Rank 0] step:9461/10000 train_time:397739ms step_avg:42.04ms +[2025-09-11 06:37:47] [Rank 0] step:9481/10000 train_time:398452ms step_avg:42.03ms +[2025-09-11 06:37:47] [Rank 0] step:9481/10000 train_time:398452ms step_avg:42.03ms +[2025-09-11 06:37:48] [Rank 0] step:9501/10000 train_time:399164ms step_avg:42.01ms +[2025-09-11 06:37:48] [Rank 0] step:9501/10000 train_time:399164ms step_avg:42.01ms +[2025-09-11 06:37:49] [Rank 0] step:9521/10000 train_time:399878ms step_avg:42.00ms +[2025-09-11 06:37:49] [Rank 0] step:9521/10000 train_time:399878ms step_avg:42.00ms +[2025-09-11 06:37:49] [Rank 0] step:9541/10000 train_time:400589ms step_avg:41.99ms +[2025-09-11 06:37:49] [Rank 0] step:9541/10000 train_time:400589ms step_avg:41.99ms +[2025-09-11 06:37:50] [Rank 0] step:9561/10000 train_time:401300ms step_avg:41.97ms +[2025-09-11 06:37:50] [Rank 0] step:9561/10000 train_time:401300ms step_avg:41.97ms +[2025-09-11 06:37:51] [Rank 0] step:9581/10000 train_time:402013ms step_avg:41.96ms +[2025-09-11 06:37:51] [Rank 0] step:9581/10000 train_time:402013ms step_avg:41.96ms +[2025-09-11 06:37:52] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:37:52] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:37:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:37:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:37:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:37:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:38:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:38:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:38:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:38:02] [Rank 0] PRINT: step:9600/10000 val_loss:4.1175 total_sharp:1.6377e-05 L1_sharp:8.0982e-05 L2_sharp:-3.9794e-06 L3_sharp:2.8206e-05 L4_sharp:-1.1096e-05 L5_sharp:9.8097e-06 L6_sharp:1.1902e-05 L7_sharp:1.2777e-05 L8_sharp:2.0895e-05 L9_sharp:2.1359e-05 L10_sharp:4.0190e-05 L11_sharp:6.2182e-05 L12_sharp:4.6352e-04 total_fnorm:2.6250e+01 total_l1_linf:3.4816e+04 total_spectral:1.3312e+01 L1_fnorm:1.3047e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.0703e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.3438e+00 L6_fnorm:1.2891e+00 L7_fnorm:1.2891e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.3125e+00 L1_l1linf:1.6797e-01 L2_l1linf:4.0039e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.6914e-01 L5_l1linf:2.8516e-01 L6_l1linf:1.8457e-01 L7_l1linf:1.9141e-01 L8_l1linf:1.7676e-01 L9_l1linf:1.6797e-01 L10_l1linf:1.6504e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.9238e-01 L1_spectral:1.9691e-02 L2_spectral:2.4913e-02 L3_spectral:2.3611e-02 L4_spectral:2.4542e-02 L5_spectral:2.3249e-02 L6_spectral:1.8742e-02 L7_spectral:1.9059e-02 L8_spectral:1.9983e-02 L9_spectral:1.9479e-02 L10_spectral:1.9474e-02 L11_spectral:1.9451e-02 L12_spectral:2.0536e-02 train_time:402702ms step_avg:41.95ms +[2025-09-11 06:38:02] [Rank 0] PRINT: step:9600/10000 val_loss:4.1175 total_sharp:1.6377e-05 L1_sharp:8.0982e-05 L2_sharp:-3.9794e-06 L3_sharp:2.8206e-05 L4_sharp:-1.1096e-05 L5_sharp:9.8097e-06 L6_sharp:1.1902e-05 L7_sharp:1.2777e-05 L8_sharp:2.0895e-05 L9_sharp:2.1359e-05 L10_sharp:4.0190e-05 L11_sharp:6.2182e-05 L12_sharp:4.6352e-04 total_fnorm:2.6250e+01 total_l1_linf:3.4816e+04 total_spectral:1.3312e+01 L1_fnorm:1.3047e+00 L2_fnorm:1.2031e+00 L3_fnorm:1.0703e+00 L4_fnorm:1.2500e+00 L5_fnorm:1.3438e+00 L6_fnorm:1.2891e+00 L7_fnorm:1.2891e+00 L8_fnorm:1.2344e+00 L9_fnorm:1.2578e+00 L10_fnorm:1.2344e+00 L11_fnorm:1.2500e+00 L12_fnorm:1.3125e+00 L1_l1linf:1.6797e-01 L2_l1linf:4.0039e-01 L3_l1linf:3.2227e-01 L4_l1linf:3.6914e-01 L5_l1linf:2.8516e-01 L6_l1linf:1.8457e-01 L7_l1linf:1.9141e-01 L8_l1linf:1.7676e-01 L9_l1linf:1.6797e-01 L10_l1linf:1.6504e-01 L11_l1linf:1.4941e-01 L12_l1linf:1.9238e-01 L1_spectral:1.9691e-02 L2_spectral:2.4913e-02 L3_spectral:2.3611e-02 L4_spectral:2.4542e-02 L5_spectral:2.3249e-02 L6_spectral:1.8742e-02 L7_spectral:1.9059e-02 L8_spectral:1.9983e-02 L9_spectral:1.9479e-02 L10_spectral:1.9474e-02 L11_spectral:1.9451e-02 L12_spectral:2.0536e-02 train_time:402702ms step_avg:41.95ms +[2025-09-11 06:38:03] [Rank 0] step:9601/10000 train_time:404073ms step_avg:42.09ms +[2025-09-11 06:38:03] [Rank 0] step:9601/10000 train_time:404073ms step_avg:42.09ms +[2025-09-11 06:38:04] [Rank 0] step:9621/10000 train_time:404807ms step_avg:42.08ms +[2025-09-11 06:38:04] [Rank 0] step:9621/10000 train_time:404807ms step_avg:42.08ms +[2025-09-11 06:38:04] [Rank 0] step:9641/10000 train_time:405524ms step_avg:42.06ms +[2025-09-11 06:38:04] [Rank 0] step:9641/10000 train_time:405524ms step_avg:42.06ms +[2025-09-11 06:38:05] [Rank 0] step:9661/10000 train_time:406248ms step_avg:42.05ms +[2025-09-11 06:38:05] [Rank 0] step:9661/10000 train_time:406248ms step_avg:42.05ms +[2025-09-11 06:38:06] [Rank 0] step:9681/10000 train_time:406965ms step_avg:42.04ms +[2025-09-11 06:38:06] [Rank 0] step:9681/10000 train_time:406965ms step_avg:42.04ms +[2025-09-11 06:38:07] [Rank 0] step:9701/10000 train_time:407684ms step_avg:42.02ms +[2025-09-11 06:38:07] [Rank 0] step:9701/10000 train_time:407684ms step_avg:42.02ms +[2025-09-11 06:38:07] [Rank 0] step:9721/10000 train_time:408406ms step_avg:42.01ms +[2025-09-11 06:38:07] [Rank 0] step:9721/10000 train_time:408406ms step_avg:42.01ms +[2025-09-11 06:38:08] [Rank 0] step:9741/10000 train_time:409126ms step_avg:42.00ms +[2025-09-11 06:38:08] [Rank 0] step:9741/10000 train_time:409126ms step_avg:42.00ms +[2025-09-11 06:38:09] [Rank 0] step:9761/10000 train_time:409845ms step_avg:41.99ms +[2025-09-11 06:38:09] [Rank 0] step:9761/10000 train_time:409845ms step_avg:41.99ms +[2025-09-11 06:38:09] [Rank 0] step:9781/10000 train_time:410562ms step_avg:41.98ms +[2025-09-11 06:38:09] [Rank 0] step:9781/10000 train_time:410562ms step_avg:41.98ms +[2025-09-11 06:38:10] [Rank 0] step:9801/10000 train_time:411286ms step_avg:41.96ms +[2025-09-11 06:38:10] [Rank 0] step:9801/10000 train_time:411286ms step_avg:41.96ms +[2025-09-11 06:38:11] [Rank 0] step:9821/10000 train_time:412007ms step_avg:41.95ms +[2025-09-11 06:38:11] [Rank 0] step:9821/10000 train_time:412007ms step_avg:41.95ms +[2025-09-11 06:38:12] [Rank 0] step:9841/10000 train_time:412730ms step_avg:41.94ms +[2025-09-11 06:38:12] [Rank 0] step:9841/10000 train_time:412730ms step_avg:41.94ms +[2025-09-11 06:38:12] [Rank 0] step:9861/10000 train_time:413449ms step_avg:41.93ms +[2025-09-11 06:38:12] [Rank 0] step:9861/10000 train_time:413449ms step_avg:41.93ms +[2025-09-11 06:38:13] [Rank 0] step:9881/10000 train_time:414169ms step_avg:41.92ms +[2025-09-11 06:38:13] [Rank 0] step:9881/10000 train_time:414169ms step_avg:41.92ms +[2025-09-11 06:38:14] [Rank 0] step:9901/10000 train_time:414886ms step_avg:41.90ms +[2025-09-11 06:38:14] [Rank 0] step:9901/10000 train_time:414886ms step_avg:41.90ms +[2025-09-11 06:38:15] [Rank 0] step:9921/10000 train_time:415604ms step_avg:41.89ms +[2025-09-11 06:38:15] [Rank 0] step:9921/10000 train_time:415604ms step_avg:41.89ms +[2025-09-11 06:38:15] [Rank 0] step:9941/10000 train_time:416328ms step_avg:41.88ms +[2025-09-11 06:38:15] [Rank 0] step:9941/10000 train_time:416328ms step_avg:41.88ms +[2025-09-11 06:38:16] [Rank 0] step:9961/10000 train_time:417051ms step_avg:41.87ms +[2025-09-11 06:38:16] [Rank 0] step:9961/10000 train_time:417051ms step_avg:41.87ms +[2025-09-11 06:38:17] [Rank 0] step:9981/10000 train_time:417770ms step_avg:41.86ms +[2025-09-11 06:38:17] [Rank 0] step:9981/10000 train_time:417770ms step_avg:41.86ms +[2025-09-11 06:38:17] [Rank 0] step:10000/10000 train_time:418461ms step_avg:41.85ms +[2025-09-11 06:38:17] [Rank 0] step:10000/10000 train_time:418461ms step_avg:41.85ms +[2025-09-11 06:38:17] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:38:17] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:38:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:38:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:38:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:38:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:38:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:38:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:38:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:38:28] [Rank 0] PRINT: step:10000/10000 val_loss:4.1173 total_sharp:1.1102e-05 L1_sharp:4.0505e-05 L2_sharp:-9.7498e-07 L3_sharp:6.5453e-06 L4_sharp:9.3036e-07 L5_sharp:1.1376e-05 L6_sharp:1.0426e-05 L7_sharp:6.9243e-06 L8_sharp:2.0696e-05 L9_sharp:1.9511e-05 L10_sharp:2.8379e-05 L11_sharp:4.0478e-05 L12_sharp:3.2915e-04 total_fnorm:1.0125e+01 total_l1_linf:9.7920e+03 total_spectral:5.1562e+00 L1_fnorm:5.1172e-01 L2_fnorm:4.7852e-01 L3_fnorm:4.0820e-01 L4_fnorm:5.0000e-01 L5_fnorm:5.2344e-01 L6_fnorm:5.0391e-01 L7_fnorm:5.0391e-01 L8_fnorm:4.8242e-01 L9_fnorm:4.9023e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:5.0391e-01 L1_l1linf:5.1758e-02 L2_l1linf:2.1484e-01 L3_l1linf:1.1475e-01 L4_l1linf:1.3770e-01 L5_l1linf:9.0332e-02 L6_l1linf:5.7617e-02 L7_l1linf:5.5908e-02 L8_l1linf:5.6885e-02 L9_l1linf:5.2246e-02 L10_l1linf:5.0781e-02 L11_l1linf:4.6875e-02 L12_l1linf:5.5420e-02 L1_spectral:7.9937e-03 L2_spectral:1.2269e-02 L3_spectral:1.0355e-02 L4_spectral:1.0842e-02 L5_spectral:9.6311e-03 L6_spectral:7.4652e-03 L7_spectral:7.6224e-03 L8_spectral:8.0285e-03 L9_spectral:7.6863e-03 L10_spectral:7.7696e-03 L11_spectral:7.7975e-03 L12_spectral:7.9975e-03 train_time:418482ms step_avg:41.85ms +[2025-09-11 06:38:28] [Rank 0] PRINT: step:10000/10000 val_loss:4.1173 total_sharp:1.1102e-05 L1_sharp:4.0505e-05 L2_sharp:-9.7498e-07 L3_sharp:6.5453e-06 L4_sharp:9.3036e-07 L5_sharp:1.1376e-05 L6_sharp:1.0426e-05 L7_sharp:6.9243e-06 L8_sharp:2.0696e-05 L9_sharp:1.9511e-05 L10_sharp:2.8379e-05 L11_sharp:4.0478e-05 L12_sharp:3.2915e-04 total_fnorm:1.0125e+01 total_l1_linf:9.7920e+03 total_spectral:5.1562e+00 L1_fnorm:5.1172e-01 L2_fnorm:4.7852e-01 L3_fnorm:4.0820e-01 L4_fnorm:5.0000e-01 L5_fnorm:5.2344e-01 L6_fnorm:5.0391e-01 L7_fnorm:5.0391e-01 L8_fnorm:4.8242e-01 L9_fnorm:4.9023e-01 L10_fnorm:4.8047e-01 L11_fnorm:4.8242e-01 L12_fnorm:5.0391e-01 L1_l1linf:5.1758e-02 L2_l1linf:2.1484e-01 L3_l1linf:1.1475e-01 L4_l1linf:1.3770e-01 L5_l1linf:9.0332e-02 L6_l1linf:5.7617e-02 L7_l1linf:5.5908e-02 L8_l1linf:5.6885e-02 L9_l1linf:5.2246e-02 L10_l1linf:5.0781e-02 L11_l1linf:4.6875e-02 L12_l1linf:5.5420e-02 L1_spectral:7.9937e-03 L2_spectral:1.2269e-02 L3_spectral:1.0355e-02 L4_spectral:1.0842e-02 L5_spectral:9.6311e-03 L6_spectral:7.4652e-03 L7_spectral:7.6224e-03 L8_spectral:8.0285e-03 L9_spectral:7.6863e-03 L10_spectral:7.7696e-03 L11_spectral:7.7975e-03 L12_spectral:7.9975e-03 train_time:418482ms step_avg:41.85ms +[2025-09-11 06:38:28] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:38:28 2025 --- +[2025-09-11 06:38:28] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:38:28 2025 --- +[2025-09-11 06:38:28] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB +[2025-09-11 06:38:28] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11652 MiB diff --git a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.2_seed_43/training_log_39d2d6c0-6588-4b79-8028-f758cf2f31c6.txt b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.2_seed_43/training_log_39d2d6c0-6588-4b79-8028-f758cf2f31c6.txt index 9d6f5c58519e81eb85ab3ea6166742a9f394ea0c..48873d52bd3c05280198cc40510fec0fc3d3f142 100644 --- a/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.2_seed_43/training_log_39d2d6c0-6588-4b79-8028-f758cf2f31c6.txt +++ b/sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.05_muon_lr_0.2_seed_43/training_log_39d2d6c0-6588-4b79-8028-f758cf2f31c6.txt @@ -2050,3 +2050,2215 @@ if dist.is_initialized(): [2025-09-11 05:59:33] [Rank 0] PRINT: Muon optimizer is active with 72 parameters. [2025-09-11 05:59:33] [Rank 0] PRINT: Compiling model with TorchInductor... [2025-09-11 05:59:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-11 05:59:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 05:59:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-11 05:59:46] [Rank 0] PRINT: Starting warmup... +[2025-09-11 05:59:46] [Rank 0] PRINT: Starting warmup... +[2025-09-11 06:06:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:06:45] [Rank 0] PRINT: Warmup complete. +[2025-09-11 06:06:45] [Rank 0] PRINT: Starting training... +[2025-09-11 06:06:45] [Rank 0] PRINT: Starting training... +[2025-09-11 06:06:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.21ms +[2025-09-11 06:06:47] [Rank 0] step:21/10000 train_time:1138ms step_avg:54.21ms +[2025-09-11 06:06:47] [Rank 0] step:41/10000 train_time:1872ms step_avg:45.65ms +[2025-09-11 06:06:47] [Rank 0] step:41/10000 train_time:1872ms step_avg:45.65ms +[2025-09-11 06:06:48] [Rank 0] step:61/10000 train_time:2604ms step_avg:42.69ms +[2025-09-11 06:06:48] [Rank 0] step:61/10000 train_time:2604ms step_avg:42.69ms +[2025-09-11 06:06:49] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 06:06:49] [Rank 0] step:81/10000 train_time:3336ms step_avg:41.18ms +[2025-09-11 06:06:50] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 06:06:50] [Rank 0] step:101/10000 train_time:4067ms step_avg:40.27ms +[2025-09-11 06:06:50] [Rank 0] step:121/10000 train_time:4799ms step_avg:39.66ms +[2025-09-11 06:06:50] [Rank 0] step:121/10000 train_time:4799ms step_avg:39.66ms +[2025-09-11 06:06:51] [Rank 0] step:141/10000 train_time:5530ms step_avg:39.22ms +[2025-09-11 06:06:51] [Rank 0] step:141/10000 train_time:5530ms step_avg:39.22ms +[2025-09-11 06:06:52] [Rank 0] step:161/10000 train_time:6261ms step_avg:38.89ms +[2025-09-11 06:06:52] [Rank 0] step:161/10000 train_time:6261ms step_avg:38.89ms +[2025-09-11 06:06:53] [Rank 0] step:181/10000 train_time:7561ms step_avg:41.77ms +[2025-09-11 06:06:53] [Rank 0] step:181/10000 train_time:7561ms step_avg:41.77ms +[2025-09-11 06:06:54] [Rank 0] step:201/10000 train_time:8292ms step_avg:41.26ms +[2025-09-11 06:06:54] [Rank 0] step:201/10000 train_time:8292ms step_avg:41.26ms +[2025-09-11 06:06:54] [Rank 0] step:221/10000 train_time:9024ms step_avg:40.83ms +[2025-09-11 06:06:54] [Rank 0] step:221/10000 train_time:9024ms step_avg:40.83ms +[2025-09-11 06:06:55] [Rank 0] step:241/10000 train_time:10014ms step_avg:41.55ms +[2025-09-11 06:06:55] [Rank 0] step:241/10000 train_time:10014ms step_avg:41.55ms +[2025-09-11 06:06:56] [Rank 0] step:261/10000 train_time:10746ms step_avg:41.17ms +[2025-09-11 06:06:56] [Rank 0] step:261/10000 train_time:10746ms step_avg:41.17ms +[2025-09-11 06:06:57] [Rank 0] step:281/10000 train_time:11478ms step_avg:40.85ms +[2025-09-11 06:06:57] [Rank 0] step:281/10000 train_time:11478ms step_avg:40.85ms +[2025-09-11 06:06:58] [Rank 0] step:301/10000 train_time:12209ms step_avg:40.56ms +[2025-09-11 06:06:58] [Rank 0] step:301/10000 train_time:12209ms step_avg:40.56ms +[2025-09-11 06:06:58] [Rank 0] step:321/10000 train_time:12945ms step_avg:40.33ms +[2025-09-11 06:06:58] [Rank 0] step:321/10000 train_time:12945ms step_avg:40.33ms +[2025-09-11 06:06:59] [Rank 0] step:341/10000 train_time:13676ms step_avg:40.11ms +[2025-09-11 06:06:59] [Rank 0] step:341/10000 train_time:13676ms step_avg:40.11ms +[2025-09-11 06:07:00] [Rank 0] step:361/10000 train_time:14407ms step_avg:39.91ms +[2025-09-11 06:07:00] [Rank 0] step:361/10000 train_time:14407ms step_avg:39.91ms +[2025-09-11 06:07:01] [Rank 0] step:381/10000 train_time:15139ms step_avg:39.74ms +[2025-09-11 06:07:01] [Rank 0] step:381/10000 train_time:15139ms step_avg:39.74ms +[2025-09-11 06:07:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:07:01] [Rank 0] PRINT: [Validation @ Step 400] Calculating base validation loss... +[2025-09-11 06:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:09:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Getting true update direction 'v'... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating update norms... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up layer parameter groups... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:10:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise update norms... +[2025-09-11 06:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:10:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Setting up HVP calculation in float32... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating TOTAL sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Calculating layer-wise sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 12 layers for sharpness... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_1'... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:11:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_2'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_3'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_4'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_5'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_6'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:11:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_7'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_8'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_9'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_10'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_11'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:11:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Processing 'layer_12'... +[2025-09-11 06:11:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:11:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:11:03] [Rank 0] PRINT: step:400/10000 val_loss:6.0200 total_sharp:3.5176e-04 L1_sharp:1.3299e-03 L2_sharp:1.0606e-04 L3_sharp:2.2769e-04 L4_sharp:1.5961e-05 L5_sharp:2.4725e-05 L6_sharp:5.8943e-05 L7_sharp:4.5926e-05 L8_sharp:8.6883e-05 L9_sharp:1.1649e-04 L10_sharp:1.4044e-04 L11_sharp:2.0073e-04 L12_sharp:3.2436e-03 total_fnorm:2.0924e+02 total_l1_linf:7.0890e+05 total_spectral:1.0466e+02 L1_fnorm:2.3943e+01 L2_fnorm:2.2170e+01 L3_fnorm:2.2093e+01 L4_fnorm:2.2268e+01 L5_fnorm:2.2094e+01 L6_fnorm:2.2093e+01 L7_fnorm:2.2125e+01 L8_fnorm:2.2033e+01 L9_fnorm:2.1820e+01 L10_fnorm:2.1508e+01 L11_fnorm:2.1286e+01 L12_fnorm:2.0320e+01 L1_l1linf:6.3255e+00 L2_l1linf:6.3967e+00 L3_l1linf:6.8789e+00 L4_l1linf:7.0105e+00 L5_l1linf:6.8501e+00 L6_l1linf:6.8150e+00 L7_l1linf:6.6810e+00 L8_l1linf:6.2952e+00 L9_l1linf:6.0142e+00 L10_l1linf:5.5956e+00 L11_l1linf:5.0930e+00 L12_l1linf:4.2298e+00 L1_spectral:2.4855e-01 L2_spectral:2.4682e-01 L3_spectral:2.4593e-01 L4_spectral:2.4596e-01 L5_spectral:2.4628e-01 L6_spectral:2.4665e-01 L7_spectral:2.4632e-01 L8_spectral:2.4636e-01 L9_spectral:2.4592e-01 L10_spectral:2.4623e-01 L11_spectral:2.4619e-01 L12_spectral:2.4674e-01 train_time:15851ms step_avg:39.63ms +[2025-09-11 06:11:03] [Rank 0] PRINT: step:400/10000 val_loss:6.0200 total_sharp:3.5176e-04 L1_sharp:1.3299e-03 L2_sharp:1.0606e-04 L3_sharp:2.2769e-04 L4_sharp:1.5961e-05 L5_sharp:2.4725e-05 L6_sharp:5.8943e-05 L7_sharp:4.5926e-05 L8_sharp:8.6883e-05 L9_sharp:1.1649e-04 L10_sharp:1.4044e-04 L11_sharp:2.0073e-04 L12_sharp:3.2436e-03 total_fnorm:2.0924e+02 total_l1_linf:7.0890e+05 total_spectral:1.0466e+02 L1_fnorm:2.3943e+01 L2_fnorm:2.2170e+01 L3_fnorm:2.2093e+01 L4_fnorm:2.2268e+01 L5_fnorm:2.2094e+01 L6_fnorm:2.2093e+01 L7_fnorm:2.2125e+01 L8_fnorm:2.2033e+01 L9_fnorm:2.1820e+01 L10_fnorm:2.1508e+01 L11_fnorm:2.1286e+01 L12_fnorm:2.0320e+01 L1_l1linf:6.3255e+00 L2_l1linf:6.3967e+00 L3_l1linf:6.8789e+00 L4_l1linf:7.0105e+00 L5_l1linf:6.8501e+00 L6_l1linf:6.8150e+00 L7_l1linf:6.6810e+00 L8_l1linf:6.2952e+00 L9_l1linf:6.0142e+00 L10_l1linf:5.5956e+00 L11_l1linf:5.0930e+00 L12_l1linf:4.2298e+00 L1_spectral:2.4855e-01 L2_spectral:2.4682e-01 L3_spectral:2.4593e-01 L4_spectral:2.4596e-01 L5_spectral:2.4628e-01 L6_spectral:2.4665e-01 L7_spectral:2.4632e-01 L8_spectral:2.4636e-01 L9_spectral:2.4592e-01 L10_spectral:2.4623e-01 L11_spectral:2.4619e-01 L12_spectral:2.4674e-01 train_time:15851ms step_avg:39.63ms +[2025-09-11 06:12:09] [Rank 0] step:401/10000 train_time:81669ms step_avg:203.66ms +[2025-09-11 06:12:09] [Rank 0] step:401/10000 train_time:81669ms step_avg:203.66ms +[2025-09-11 06:12:12] [Rank 0] step:421/10000 train_time:84047ms step_avg:199.64ms +[2025-09-11 06:12:12] [Rank 0] step:421/10000 train_time:84047ms step_avg:199.64ms +[2025-09-11 06:12:12] [Rank 0] step:441/10000 train_time:84690ms step_avg:192.04ms +[2025-09-11 06:12:12] [Rank 0] step:441/10000 train_time:84690ms step_avg:192.04ms +[2025-09-11 06:12:13] [Rank 0] step:461/10000 train_time:85333ms step_avg:185.10ms +[2025-09-11 06:12:13] [Rank 0] step:461/10000 train_time:85333ms step_avg:185.10ms +[2025-09-11 06:12:14] [Rank 0] step:481/10000 train_time:85974ms step_avg:178.74ms +[2025-09-11 06:12:14] [Rank 0] step:481/10000 train_time:85974ms step_avg:178.74ms +[2025-09-11 06:12:14] [Rank 0] step:501/10000 train_time:86616ms step_avg:172.89ms +[2025-09-11 06:12:14] [Rank 0] step:501/10000 train_time:86616ms step_avg:172.89ms +[2025-09-11 06:12:15] [Rank 0] step:521/10000 train_time:87258ms step_avg:167.48ms +[2025-09-11 06:12:15] [Rank 0] step:521/10000 train_time:87258ms step_avg:167.48ms +[2025-09-11 06:12:15] [Rank 0] step:541/10000 train_time:87900ms step_avg:162.48ms +[2025-09-11 06:12:15] [Rank 0] step:541/10000 train_time:87900ms step_avg:162.48ms +[2025-09-11 06:12:16] [Rank 0] step:561/10000 train_time:88541ms step_avg:157.83ms +[2025-09-11 06:12:16] [Rank 0] step:561/10000 train_time:88541ms step_avg:157.83ms +[2025-09-11 06:12:17] [Rank 0] step:581/10000 train_time:89183ms step_avg:153.50ms +[2025-09-11 06:12:17] [Rank 0] step:581/10000 train_time:89183ms step_avg:153.50ms +[2025-09-11 06:12:17] [Rank 0] step:601/10000 train_time:89824ms step_avg:149.46ms +[2025-09-11 06:12:17] [Rank 0] step:601/10000 train_time:89824ms step_avg:149.46ms +[2025-09-11 06:12:18] [Rank 0] step:621/10000 train_time:90465ms step_avg:145.68ms +[2025-09-11 06:12:18] [Rank 0] step:621/10000 train_time:90465ms step_avg:145.68ms +[2025-09-11 06:12:19] [Rank 0] step:641/10000 train_time:91107ms step_avg:142.13ms +[2025-09-11 06:12:19] [Rank 0] step:641/10000 train_time:91107ms step_avg:142.13ms +[2025-09-11 06:12:19] [Rank 0] step:661/10000 train_time:91749ms step_avg:138.80ms +[2025-09-11 06:12:19] [Rank 0] step:661/10000 train_time:91749ms step_avg:138.80ms +[2025-09-11 06:12:20] [Rank 0] step:681/10000 train_time:92390ms step_avg:135.67ms +[2025-09-11 06:12:20] [Rank 0] step:681/10000 train_time:92390ms step_avg:135.67ms +[2025-09-11 06:12:21] [Rank 0] step:701/10000 train_time:93031ms step_avg:132.71ms +[2025-09-11 06:12:21] [Rank 0] step:701/10000 train_time:93031ms step_avg:132.71ms +[2025-09-11 06:12:21] [Rank 0] step:721/10000 train_time:93672ms step_avg:129.92ms +[2025-09-11 06:12:21] [Rank 0] step:721/10000 train_time:93672ms step_avg:129.92ms +[2025-09-11 06:12:22] [Rank 0] step:741/10000 train_time:94314ms step_avg:127.28ms +[2025-09-11 06:12:22] [Rank 0] step:741/10000 train_time:94314ms step_avg:127.28ms +[2025-09-11 06:12:23] [Rank 0] step:761/10000 train_time:94960ms step_avg:124.78ms +[2025-09-11 06:12:23] [Rank 0] step:761/10000 train_time:94960ms step_avg:124.78ms +[2025-09-11 06:12:23] [Rank 0] step:781/10000 train_time:95607ms step_avg:122.42ms +[2025-09-11 06:12:23] [Rank 0] step:781/10000 train_time:95607ms step_avg:122.42ms +[2025-09-11 06:12:24] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:12:24] [Rank 0] PRINT: [Validation @ Step 800] Calculating base validation loss... +[2025-09-11 06:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:12:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Getting true update direction 'v'... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating update norms... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up layer parameter groups... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:13:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise update norms... +[2025-09-11 06:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:13:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Setting up HVP calculation in float32... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating TOTAL sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Calculating layer-wise sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 12 layers for sharpness... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_1'... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:14:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_2'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_3'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_4'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_5'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:14:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_6'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_7'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_8'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_9'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_10'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:14:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_11'... +[2025-09-11 06:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:14:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Processing 'layer_12'... +[2025-09-11 06:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:04] [Rank 0] PRINT: step:800/10000 val_loss:5.6247 total_sharp:2.2944e-04 L1_sharp:9.2625e-04 L2_sharp:7.3469e-05 L3_sharp:6.3986e-06 L4_sharp:6.5342e-05 L5_sharp:3.6385e-05 L6_sharp:4.9038e-05 L7_sharp:6.3322e-05 L8_sharp:8.6057e-05 L9_sharp:6.6893e-05 L10_sharp:8.8382e-05 L11_sharp:1.9322e-04 L12_sharp:1.7610e-03 total_fnorm:2.1600e+02 total_l1_linf:7.0042e+05 total_spectral:1.0800e+02 L1_fnorm:2.3500e+01 L2_fnorm:2.2250e+01 L3_fnorm:2.2750e+01 L4_fnorm:2.2375e+01 L5_fnorm:2.2500e+01 L6_fnorm:2.2625e+01 L7_fnorm:2.2875e+01 L8_fnorm:2.2625e+01 L9_fnorm:2.2625e+01 L10_fnorm:2.2625e+01 L11_fnorm:2.2500e+01 L12_fnorm:2.1125e+01 L1_l1linf:6.5312e+00 L2_l1linf:6.9375e+00 L3_l1linf:6.9375e+00 L4_l1linf:6.6562e+00 L5_l1linf:6.8438e+00 L6_l1linf:6.7812e+00 L7_l1linf:6.7812e+00 L8_l1linf:6.5312e+00 L9_l1linf:6.3438e+00 L10_l1linf:6.1250e+00 L11_l1linf:5.5000e+00 L12_l1linf:4.5000e+00 L1_spectral:2.6523e-01 L2_spectral:2.5950e-01 L3_spectral:2.6023e-01 L4_spectral:2.6117e-01 L5_spectral:2.5955e-01 L6_spectral:2.6111e-01 L7_spectral:2.6064e-01 L8_spectral:2.6124e-01 L9_spectral:2.6037e-01 L10_spectral:2.6168e-01 L11_spectral:2.6370e-01 L12_spectral:2.6170e-01 train_time:96235ms step_avg:120.29ms +[2025-09-11 06:14:04] [Rank 0] PRINT: step:800/10000 val_loss:5.6247 total_sharp:2.2944e-04 L1_sharp:9.2625e-04 L2_sharp:7.3469e-05 L3_sharp:6.3986e-06 L4_sharp:6.5342e-05 L5_sharp:3.6385e-05 L6_sharp:4.9038e-05 L7_sharp:6.3322e-05 L8_sharp:8.6057e-05 L9_sharp:6.6893e-05 L10_sharp:8.8382e-05 L11_sharp:1.9322e-04 L12_sharp:1.7610e-03 total_fnorm:2.1600e+02 total_l1_linf:7.0042e+05 total_spectral:1.0800e+02 L1_fnorm:2.3500e+01 L2_fnorm:2.2250e+01 L3_fnorm:2.2750e+01 L4_fnorm:2.2375e+01 L5_fnorm:2.2500e+01 L6_fnorm:2.2625e+01 L7_fnorm:2.2875e+01 L8_fnorm:2.2625e+01 L9_fnorm:2.2625e+01 L10_fnorm:2.2625e+01 L11_fnorm:2.2500e+01 L12_fnorm:2.1125e+01 L1_l1linf:6.5312e+00 L2_l1linf:6.9375e+00 L3_l1linf:6.9375e+00 L4_l1linf:6.6562e+00 L5_l1linf:6.8438e+00 L6_l1linf:6.7812e+00 L7_l1linf:6.7812e+00 L8_l1linf:6.5312e+00 L9_l1linf:6.3438e+00 L10_l1linf:6.1250e+00 L11_l1linf:5.5000e+00 L12_l1linf:4.5000e+00 L1_spectral:2.6523e-01 L2_spectral:2.5950e-01 L3_spectral:2.6023e-01 L4_spectral:2.6117e-01 L5_spectral:2.5955e-01 L6_spectral:2.6111e-01 L7_spectral:2.6064e-01 L8_spectral:2.6124e-01 L9_spectral:2.6037e-01 L10_spectral:2.6168e-01 L11_spectral:2.6370e-01 L12_spectral:2.6170e-01 train_time:96235ms step_avg:120.29ms +[2025-09-11 06:14:05] [Rank 0] step:801/10000 train_time:97912ms step_avg:122.24ms +[2025-09-11 06:14:05] [Rank 0] step:801/10000 train_time:97912ms step_avg:122.24ms +[2025-09-11 06:14:06] [Rank 0] step:821/10000 train_time:98586ms step_avg:120.08ms +[2025-09-11 06:14:06] [Rank 0] step:821/10000 train_time:98586ms step_avg:120.08ms +[2025-09-11 06:14:07] [Rank 0] step:841/10000 train_time:99235ms step_avg:118.00ms +[2025-09-11 06:14:07] [Rank 0] step:841/10000 train_time:99235ms step_avg:118.00ms +[2025-09-11 06:14:07] [Rank 0] step:861/10000 train_time:99883ms step_avg:116.01ms +[2025-09-11 06:14:07] [Rank 0] step:861/10000 train_time:99883ms step_avg:116.01ms +[2025-09-11 06:14:08] [Rank 0] step:881/10000 train_time:100531ms step_avg:114.11ms +[2025-09-11 06:14:08] [Rank 0] step:881/10000 train_time:100531ms step_avg:114.11ms +[2025-09-11 06:14:09] [Rank 0] step:901/10000 train_time:101179ms step_avg:112.30ms +[2025-09-11 06:14:09] [Rank 0] step:901/10000 train_time:101179ms step_avg:112.30ms +[2025-09-11 06:14:09] [Rank 0] step:921/10000 train_time:101826ms step_avg:110.56ms +[2025-09-11 06:14:09] [Rank 0] step:921/10000 train_time:101826ms step_avg:110.56ms +[2025-09-11 06:14:10] [Rank 0] step:941/10000 train_time:102474ms step_avg:108.90ms +[2025-09-11 06:14:10] [Rank 0] step:941/10000 train_time:102474ms step_avg:108.90ms +[2025-09-11 06:14:11] [Rank 0] step:961/10000 train_time:103121ms step_avg:107.31ms +[2025-09-11 06:14:11] [Rank 0] step:961/10000 train_time:103121ms step_avg:107.31ms +[2025-09-11 06:14:11] [Rank 0] step:981/10000 train_time:103768ms step_avg:105.78ms +[2025-09-11 06:14:11] [Rank 0] step:981/10000 train_time:103768ms step_avg:105.78ms +[2025-09-11 06:14:12] [Rank 0] step:1001/10000 train_time:104414ms step_avg:104.31ms +[2025-09-11 06:14:12] [Rank 0] step:1001/10000 train_time:104414ms step_avg:104.31ms +[2025-09-11 06:14:13] [Rank 0] step:1021/10000 train_time:105062ms step_avg:102.90ms +[2025-09-11 06:14:13] [Rank 0] step:1021/10000 train_time:105062ms step_avg:102.90ms +[2025-09-11 06:14:13] [Rank 0] step:1041/10000 train_time:105709ms step_avg:101.55ms +[2025-09-11 06:14:13] [Rank 0] step:1041/10000 train_time:105709ms step_avg:101.55ms +[2025-09-11 06:14:14] [Rank 0] step:1061/10000 train_time:106355ms step_avg:100.24ms +[2025-09-11 06:14:14] [Rank 0] step:1061/10000 train_time:106355ms step_avg:100.24ms +[2025-09-11 06:14:15] [Rank 0] step:1081/10000 train_time:107156ms step_avg:99.13ms +[2025-09-11 06:14:15] [Rank 0] step:1081/10000 train_time:107156ms step_avg:99.13ms +[2025-09-11 06:14:16] [Rank 0] step:1101/10000 train_time:108176ms step_avg:98.25ms +[2025-09-11 06:14:16] [Rank 0] step:1101/10000 train_time:108176ms step_avg:98.25ms +[2025-09-11 06:14:16] [Rank 0] step:1121/10000 train_time:108824ms step_avg:97.08ms +[2025-09-11 06:14:16] [Rank 0] step:1121/10000 train_time:108824ms step_avg:97.08ms +[2025-09-11 06:14:17] [Rank 0] step:1141/10000 train_time:109625ms step_avg:96.08ms +[2025-09-11 06:14:17] [Rank 0] step:1141/10000 train_time:109625ms step_avg:96.08ms +[2025-09-11 06:14:18] [Rank 0] step:1161/10000 train_time:110419ms step_avg:95.11ms +[2025-09-11 06:14:18] [Rank 0] step:1161/10000 train_time:110419ms step_avg:95.11ms +[2025-09-11 06:14:19] [Rank 0] step:1181/10000 train_time:111067ms step_avg:94.04ms +[2025-09-11 06:14:19] [Rank 0] step:1181/10000 train_time:111067ms step_avg:94.04ms +[2025-09-11 06:14:19] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:14:19] [Rank 0] PRINT: [Validation @ Step 1200] Calculating base validation loss... +[2025-09-11 06:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:14:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Getting true update direction 'v'... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating update norms... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up layer parameter groups... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:14:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise update norms... +[2025-09-11 06:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Setting up HVP calculation in float32... +[2025-09-11 06:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:14:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating TOTAL sharpness... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Calculating layer-wise sharpness... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 12 layers for sharpness... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_1'... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_2'... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:14:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_3'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_4'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_5'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_6'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_7'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:14:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_8'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_9'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_10'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_11'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:14:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Processing 'layer_12'... +[2025-09-11 06:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:30] [Rank 0] PRINT: step:1200/10000 val_loss:5.3266 total_sharp:1.3372e-04 L1_sharp:3.8160e-04 L2_sharp:5.9868e-05 L3_sharp:3.1521e-05 L4_sharp:2.8770e-05 L5_sharp:2.0796e-05 L6_sharp:2.9122e-05 L7_sharp:1.6530e-05 L8_sharp:3.9872e-05 L9_sharp:3.4341e-05 L10_sharp:5.2209e-05 L11_sharp:1.1658e-04 L12_sharp:5.3695e-04 total_fnorm:2.1800e+02 total_l1_linf:6.9632e+05 total_spectral:1.0850e+02 L1_fnorm:2.3750e+01 L2_fnorm:2.3125e+01 L3_fnorm:2.3375e+01 L4_fnorm:2.3500e+01 L5_fnorm:2.3250e+01 L6_fnorm:2.3375e+01 L7_fnorm:2.3625e+01 L8_fnorm:2.3375e+01 L9_fnorm:2.3375e+01 L10_fnorm:2.3375e+01 L11_fnorm:2.3500e+01 L12_fnorm:2.2875e+01 L1_l1linf:6.5312e+00 L2_l1linf:6.8438e+00 L3_l1linf:6.7500e+00 L4_l1linf:6.6875e+00 L5_l1linf:6.6250e+00 L6_l1linf:6.6562e+00 L7_l1linf:6.5312e+00 L8_l1linf:6.2812e+00 L9_l1linf:6.1875e+00 L10_l1linf:6.0312e+00 L11_l1linf:5.7188e+00 L12_l1linf:5.0938e+00 L1_spectral:2.7250e-01 L2_spectral:2.6833e-01 L3_spectral:2.6820e-01 L4_spectral:2.6814e-01 L5_spectral:2.6849e-01 L6_spectral:2.6893e-01 L7_spectral:2.6955e-01 L8_spectral:2.7106e-01 L9_spectral:2.6945e-01 L10_spectral:2.7047e-01 L11_spectral:2.6980e-01 L12_spectral:2.7048e-01 train_time:111700ms step_avg:93.08ms +[2025-09-11 06:14:30] [Rank 0] PRINT: step:1200/10000 val_loss:5.3266 total_sharp:1.3372e-04 L1_sharp:3.8160e-04 L2_sharp:5.9868e-05 L3_sharp:3.1521e-05 L4_sharp:2.8770e-05 L5_sharp:2.0796e-05 L6_sharp:2.9122e-05 L7_sharp:1.6530e-05 L8_sharp:3.9872e-05 L9_sharp:3.4341e-05 L10_sharp:5.2209e-05 L11_sharp:1.1658e-04 L12_sharp:5.3695e-04 total_fnorm:2.1800e+02 total_l1_linf:6.9632e+05 total_spectral:1.0850e+02 L1_fnorm:2.3750e+01 L2_fnorm:2.3125e+01 L3_fnorm:2.3375e+01 L4_fnorm:2.3500e+01 L5_fnorm:2.3250e+01 L6_fnorm:2.3375e+01 L7_fnorm:2.3625e+01 L8_fnorm:2.3375e+01 L9_fnorm:2.3375e+01 L10_fnorm:2.3375e+01 L11_fnorm:2.3500e+01 L12_fnorm:2.2875e+01 L1_l1linf:6.5312e+00 L2_l1linf:6.8438e+00 L3_l1linf:6.7500e+00 L4_l1linf:6.6875e+00 L5_l1linf:6.6250e+00 L6_l1linf:6.6562e+00 L7_l1linf:6.5312e+00 L8_l1linf:6.2812e+00 L9_l1linf:6.1875e+00 L10_l1linf:6.0312e+00 L11_l1linf:5.7188e+00 L12_l1linf:5.0938e+00 L1_spectral:2.7250e-01 L2_spectral:2.6833e-01 L3_spectral:2.6820e-01 L4_spectral:2.6814e-01 L5_spectral:2.6849e-01 L6_spectral:2.6893e-01 L7_spectral:2.6955e-01 L8_spectral:2.7106e-01 L9_spectral:2.6945e-01 L10_spectral:2.7047e-01 L11_spectral:2.6980e-01 L12_spectral:2.7048e-01 train_time:111700ms step_avg:93.08ms +[2025-09-11 06:14:32] [Rank 0] step:1201/10000 train_time:113327ms step_avg:94.36ms +[2025-09-11 06:14:32] [Rank 0] step:1201/10000 train_time:113327ms step_avg:94.36ms +[2025-09-11 06:14:33] [Rank 0] step:1221/10000 train_time:113968ms step_avg:93.34ms +[2025-09-11 06:14:33] [Rank 0] step:1221/10000 train_time:113968ms step_avg:93.34ms +[2025-09-11 06:14:33] [Rank 0] step:1241/10000 train_time:114618ms step_avg:92.36ms +[2025-09-11 06:14:33] [Rank 0] step:1241/10000 train_time:114618ms step_avg:92.36ms +[2025-09-11 06:14:34] [Rank 0] step:1261/10000 train_time:115265ms step_avg:91.41ms +[2025-09-11 06:14:34] [Rank 0] step:1261/10000 train_time:115265ms step_avg:91.41ms +[2025-09-11 06:14:34] [Rank 0] step:1281/10000 train_time:115914ms step_avg:90.49ms +[2025-09-11 06:14:34] [Rank 0] step:1281/10000 train_time:115914ms step_avg:90.49ms +[2025-09-11 06:14:35] [Rank 0] step:1301/10000 train_time:116563ms step_avg:89.59ms +[2025-09-11 06:14:35] [Rank 0] step:1301/10000 train_time:116563ms step_avg:89.59ms +[2025-09-11 06:14:36] [Rank 0] step:1321/10000 train_time:117211ms step_avg:88.73ms +[2025-09-11 06:14:36] [Rank 0] step:1321/10000 train_time:117211ms step_avg:88.73ms +[2025-09-11 06:14:36] [Rank 0] step:1341/10000 train_time:117859ms step_avg:87.89ms +[2025-09-11 06:14:36] [Rank 0] step:1341/10000 train_time:117859ms step_avg:87.89ms +[2025-09-11 06:14:37] [Rank 0] step:1361/10000 train_time:118507ms step_avg:87.07ms +[2025-09-11 06:14:37] [Rank 0] step:1361/10000 train_time:118507ms step_avg:87.07ms +[2025-09-11 06:14:38] [Rank 0] step:1381/10000 train_time:119155ms step_avg:86.28ms +[2025-09-11 06:14:38] [Rank 0] step:1381/10000 train_time:119155ms step_avg:86.28ms +[2025-09-11 06:14:38] [Rank 0] step:1401/10000 train_time:119803ms step_avg:85.51ms +[2025-09-11 06:14:38] [Rank 0] step:1401/10000 train_time:119803ms step_avg:85.51ms +[2025-09-11 06:14:39] [Rank 0] step:1421/10000 train_time:120453ms step_avg:84.77ms +[2025-09-11 06:14:39] [Rank 0] step:1421/10000 train_time:120453ms step_avg:84.77ms +[2025-09-11 06:14:40] [Rank 0] step:1441/10000 train_time:121101ms step_avg:84.04ms +[2025-09-11 06:14:40] [Rank 0] step:1441/10000 train_time:121101ms step_avg:84.04ms +[2025-09-11 06:14:40] [Rank 0] step:1461/10000 train_time:121747ms step_avg:83.33ms +[2025-09-11 06:14:40] [Rank 0] step:1461/10000 train_time:121747ms step_avg:83.33ms +[2025-09-11 06:14:41] [Rank 0] step:1481/10000 train_time:122395ms step_avg:82.64ms +[2025-09-11 06:14:41] [Rank 0] step:1481/10000 train_time:122395ms step_avg:82.64ms +[2025-09-11 06:14:42] [Rank 0] step:1501/10000 train_time:123046ms step_avg:81.98ms +[2025-09-11 06:14:42] [Rank 0] step:1501/10000 train_time:123046ms step_avg:81.98ms +[2025-09-11 06:14:42] [Rank 0] step:1521/10000 train_time:123698ms step_avg:81.33ms +[2025-09-11 06:14:42] [Rank 0] step:1521/10000 train_time:123698ms step_avg:81.33ms +[2025-09-11 06:14:43] [Rank 0] step:1541/10000 train_time:124350ms step_avg:80.69ms +[2025-09-11 06:14:43] [Rank 0] step:1541/10000 train_time:124350ms step_avg:80.69ms +[2025-09-11 06:14:44] [Rank 0] step:1561/10000 train_time:125001ms step_avg:80.08ms +[2025-09-11 06:14:44] [Rank 0] step:1561/10000 train_time:125001ms step_avg:80.08ms +[2025-09-11 06:14:44] [Rank 0] step:1581/10000 train_time:125654ms step_avg:79.48ms +[2025-09-11 06:14:44] [Rank 0] step:1581/10000 train_time:125654ms step_avg:79.48ms +[2025-09-11 06:14:45] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:14:45] [Rank 0] PRINT: [Validation @ Step 1600] Calculating base validation loss... +[2025-09-11 06:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:14:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Getting true update direction 'v'... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating update norms... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up layer parameter groups... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:14:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise update norms... +[2025-09-11 06:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Setting up HVP calculation in float32... +[2025-09-11 06:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:14:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating TOTAL sharpness... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Calculating layer-wise sharpness... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 12 layers for sharpness... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_1'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_2'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_3'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_4'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:14:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_5'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_6'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_7'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_8'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_9'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:14:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_10'... +[2025-09-11 06:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_11'... +[2025-09-11 06:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:14:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Processing 'layer_12'... +[2025-09-11 06:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 1600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:14:56] [Rank 0] PRINT: step:1600/10000 val_loss:5.1817 total_sharp:1.1907e-04 L1_sharp:3.1353e-04 L2_sharp:4.2579e-05 L3_sharp:1.9461e-05 L4_sharp:2.3424e-06 L5_sharp:1.3988e-05 L6_sharp:4.0004e-06 L7_sharp:2.6303e-05 L8_sharp:3.5818e-05 L9_sharp:2.7250e-05 L10_sharp:6.0447e-05 L11_sharp:7.5738e-05 L12_sharp:7.9472e-04 total_fnorm:2.0700e+02 total_l1_linf:6.5946e+05 total_spectral:1.0400e+02 L1_fnorm:2.3625e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3625e+01 L4_fnorm:2.3750e+01 L5_fnorm:2.3375e+01 L6_fnorm:2.3750e+01 L7_fnorm:2.3875e+01 L8_fnorm:2.3625e+01 L9_fnorm:2.3875e+01 L10_fnorm:2.3875e+01 L11_fnorm:2.3875e+01 L12_fnorm:2.3500e+01 L1_l1linf:6.4688e+00 L2_l1linf:6.5625e+00 L3_l1linf:6.3750e+00 L4_l1linf:6.4688e+00 L5_l1linf:6.3125e+00 L6_l1linf:6.4688e+00 L7_l1linf:6.3750e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0312e+00 L10_l1linf:5.8750e+00 L11_l1linf:5.7500e+00 L12_l1linf:5.2812e+00 L1_spectral:2.7675e-01 L2_spectral:2.7362e-01 L3_spectral:2.7425e-01 L4_spectral:2.7598e-01 L5_spectral:2.7708e-01 L6_spectral:2.7626e-01 L7_spectral:2.7524e-01 L8_spectral:2.7778e-01 L9_spectral:2.7614e-01 L10_spectral:2.7859e-01 L11_spectral:2.7473e-01 L12_spectral:2.7579e-01 train_time:126288ms step_avg:78.93ms +[2025-09-11 06:14:56] [Rank 0] PRINT: step:1600/10000 val_loss:5.1817 total_sharp:1.1907e-04 L1_sharp:3.1353e-04 L2_sharp:4.2579e-05 L3_sharp:1.9461e-05 L4_sharp:2.3424e-06 L5_sharp:1.3988e-05 L6_sharp:4.0004e-06 L7_sharp:2.6303e-05 L8_sharp:3.5818e-05 L9_sharp:2.7250e-05 L10_sharp:6.0447e-05 L11_sharp:7.5738e-05 L12_sharp:7.9472e-04 total_fnorm:2.0700e+02 total_l1_linf:6.5946e+05 total_spectral:1.0400e+02 L1_fnorm:2.3625e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3625e+01 L4_fnorm:2.3750e+01 L5_fnorm:2.3375e+01 L6_fnorm:2.3750e+01 L7_fnorm:2.3875e+01 L8_fnorm:2.3625e+01 L9_fnorm:2.3875e+01 L10_fnorm:2.3875e+01 L11_fnorm:2.3875e+01 L12_fnorm:2.3500e+01 L1_l1linf:6.4688e+00 L2_l1linf:6.5625e+00 L3_l1linf:6.3750e+00 L4_l1linf:6.4688e+00 L5_l1linf:6.3125e+00 L6_l1linf:6.4688e+00 L7_l1linf:6.3750e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0312e+00 L10_l1linf:5.8750e+00 L11_l1linf:5.7500e+00 L12_l1linf:5.2812e+00 L1_spectral:2.7675e-01 L2_spectral:2.7362e-01 L3_spectral:2.7425e-01 L4_spectral:2.7598e-01 L5_spectral:2.7708e-01 L6_spectral:2.7626e-01 L7_spectral:2.7524e-01 L8_spectral:2.7778e-01 L9_spectral:2.7614e-01 L10_spectral:2.7859e-01 L11_spectral:2.7473e-01 L12_spectral:2.7579e-01 train_time:126288ms step_avg:78.93ms +[2025-09-11 06:14:57] [Rank 0] step:1601/10000 train_time:127926ms step_avg:79.90ms +[2025-09-11 06:14:57] [Rank 0] step:1601/10000 train_time:127926ms step_avg:79.90ms +[2025-09-11 06:14:58] [Rank 0] step:1621/10000 train_time:128606ms step_avg:79.34ms +[2025-09-11 06:14:58] [Rank 0] step:1621/10000 train_time:128606ms step_avg:79.34ms +[2025-09-11 06:14:59] [Rank 0] step:1641/10000 train_time:129259ms step_avg:78.77ms +[2025-09-11 06:14:59] [Rank 0] step:1641/10000 train_time:129259ms step_avg:78.77ms +[2025-09-11 06:14:59] [Rank 0] step:1661/10000 train_time:129911ms step_avg:78.21ms +[2025-09-11 06:14:59] [Rank 0] step:1661/10000 train_time:129911ms step_avg:78.21ms +[2025-09-11 06:15:00] [Rank 0] step:1681/10000 train_time:130563ms step_avg:77.67ms +[2025-09-11 06:15:00] [Rank 0] step:1681/10000 train_time:130563ms step_avg:77.67ms +[2025-09-11 06:15:01] [Rank 0] step:1701/10000 train_time:131215ms step_avg:77.14ms +[2025-09-11 06:15:01] [Rank 0] step:1701/10000 train_time:131215ms step_avg:77.14ms +[2025-09-11 06:15:02] [Rank 0] step:1721/10000 train_time:131968ms step_avg:76.68ms +[2025-09-11 06:15:02] [Rank 0] step:1721/10000 train_time:131968ms step_avg:76.68ms +[2025-09-11 06:15:02] [Rank 0] step:1741/10000 train_time:132685ms step_avg:76.21ms +[2025-09-11 06:15:02] [Rank 0] step:1741/10000 train_time:132685ms step_avg:76.21ms +[2025-09-11 06:15:03] [Rank 0] step:1761/10000 train_time:133337ms step_avg:75.72ms +[2025-09-11 06:15:03] [Rank 0] step:1761/10000 train_time:133337ms step_avg:75.72ms +[2025-09-11 06:15:04] [Rank 0] step:1781/10000 train_time:133989ms step_avg:75.23ms +[2025-09-11 06:15:04] [Rank 0] step:1781/10000 train_time:133989ms step_avg:75.23ms +[2025-09-11 06:15:04] [Rank 0] step:1801/10000 train_time:134641ms step_avg:74.76ms +[2025-09-11 06:15:04] [Rank 0] step:1801/10000 train_time:134641ms step_avg:74.76ms +[2025-09-11 06:15:05] [Rank 0] step:1821/10000 train_time:135293ms step_avg:74.30ms +[2025-09-11 06:15:05] [Rank 0] step:1821/10000 train_time:135293ms step_avg:74.30ms +[2025-09-11 06:15:06] [Rank 0] step:1841/10000 train_time:135944ms step_avg:73.84ms +[2025-09-11 06:15:06] [Rank 0] step:1841/10000 train_time:135944ms step_avg:73.84ms +[2025-09-11 06:15:06] [Rank 0] step:1861/10000 train_time:136597ms step_avg:73.40ms +[2025-09-11 06:15:06] [Rank 0] step:1861/10000 train_time:136597ms step_avg:73.40ms +[2025-09-11 06:15:07] [Rank 0] step:1881/10000 train_time:137249ms step_avg:72.97ms +[2025-09-11 06:15:07] [Rank 0] step:1881/10000 train_time:137249ms step_avg:72.97ms +[2025-09-11 06:15:07] [Rank 0] step:1901/10000 train_time:137901ms step_avg:72.54ms +[2025-09-11 06:15:07] [Rank 0] step:1901/10000 train_time:137901ms step_avg:72.54ms +[2025-09-11 06:15:08] [Rank 0] step:1921/10000 train_time:138551ms step_avg:72.12ms +[2025-09-11 06:15:08] [Rank 0] step:1921/10000 train_time:138551ms step_avg:72.12ms +[2025-09-11 06:15:09] [Rank 0] step:1941/10000 train_time:139203ms step_avg:71.72ms +[2025-09-11 06:15:09] [Rank 0] step:1941/10000 train_time:139203ms step_avg:71.72ms +[2025-09-11 06:15:09] [Rank 0] step:1961/10000 train_time:139855ms step_avg:71.32ms +[2025-09-11 06:15:09] [Rank 0] step:1961/10000 train_time:139855ms step_avg:71.32ms +[2025-09-11 06:15:10] [Rank 0] step:1981/10000 train_time:140506ms step_avg:70.93ms +[2025-09-11 06:15:10] [Rank 0] step:1981/10000 train_time:140506ms step_avg:70.93ms +[2025-09-11 06:15:11] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:15:11] [Rank 0] PRINT: [Validation @ Step 2000] Calculating base validation loss... +[2025-09-11 06:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:15:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Getting true update direction 'v'... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating update norms... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up layer parameter groups... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:15:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise update norms... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Setting up HVP calculation in float32... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating TOTAL sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Calculating layer-wise sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 12 layers for sharpness... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:15:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_1'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_2'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_3'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_4'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_5'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:15:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_6'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_7'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_8'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_9'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_10'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:15:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_11'... +[2025-09-11 06:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:15:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Processing 'layer_12'... +[2025-09-11 06:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:15:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:15:22] [Rank 0] PRINT: step:2000/10000 val_loss:5.0315 total_sharp:8.8775e-05 L1_sharp:2.4823e-04 L2_sharp:2.7583e-05 L3_sharp:1.3093e-05 L4_sharp:1.2120e-05 L5_sharp:1.9471e-05 L6_sharp:9.0721e-06 L7_sharp:1.6517e-05 L8_sharp:3.4020e-05 L9_sharp:3.7939e-05 L10_sharp:5.8794e-05 L11_sharp:9.3807e-05 L12_sharp:6.4878e-04 total_fnorm:2.1200e+02 total_l1_linf:6.7994e+05 total_spectral:1.0600e+02 L1_fnorm:2.3500e+01 L2_fnorm:2.3625e+01 L3_fnorm:2.3875e+01 L4_fnorm:2.4125e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4250e+01 L8_fnorm:2.3750e+01 L9_fnorm:2.4125e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.3875e+01 L1_l1linf:6.5000e+00 L2_l1linf:6.4375e+00 L3_l1linf:6.0312e+00 L4_l1linf:6.4062e+00 L5_l1linf:6.2812e+00 L6_l1linf:6.4375e+00 L7_l1linf:6.1875e+00 L8_l1linf:5.9375e+00 L9_l1linf:5.9375e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.6562e+00 L12_l1linf:5.2500e+00 L1_spectral:2.8100e-01 L2_spectral:2.7822e-01 L3_spectral:2.7779e-01 L4_spectral:2.8019e-01 L5_spectral:2.8367e-01 L6_spectral:2.8090e-01 L7_spectral:2.8005e-01 L8_spectral:2.8525e-01 L9_spectral:2.8174e-01 L10_spectral:2.8388e-01 L11_spectral:2.7971e-01 L12_spectral:2.8226e-01 train_time:141139ms step_avg:70.57ms +[2025-09-11 06:15:22] [Rank 0] PRINT: step:2000/10000 val_loss:5.0315 total_sharp:8.8775e-05 L1_sharp:2.4823e-04 L2_sharp:2.7583e-05 L3_sharp:1.3093e-05 L4_sharp:1.2120e-05 L5_sharp:1.9471e-05 L6_sharp:9.0721e-06 L7_sharp:1.6517e-05 L8_sharp:3.4020e-05 L9_sharp:3.7939e-05 L10_sharp:5.8794e-05 L11_sharp:9.3807e-05 L12_sharp:6.4878e-04 total_fnorm:2.1200e+02 total_l1_linf:6.7994e+05 total_spectral:1.0600e+02 L1_fnorm:2.3500e+01 L2_fnorm:2.3625e+01 L3_fnorm:2.3875e+01 L4_fnorm:2.4125e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4250e+01 L8_fnorm:2.3750e+01 L9_fnorm:2.4125e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.3875e+01 L1_l1linf:6.5000e+00 L2_l1linf:6.4375e+00 L3_l1linf:6.0312e+00 L4_l1linf:6.4062e+00 L5_l1linf:6.2812e+00 L6_l1linf:6.4375e+00 L7_l1linf:6.1875e+00 L8_l1linf:5.9375e+00 L9_l1linf:5.9375e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.6562e+00 L12_l1linf:5.2500e+00 L1_spectral:2.8100e-01 L2_spectral:2.7822e-01 L3_spectral:2.7779e-01 L4_spectral:2.8019e-01 L5_spectral:2.8367e-01 L6_spectral:2.8090e-01 L7_spectral:2.8005e-01 L8_spectral:2.8525e-01 L9_spectral:2.8174e-01 L10_spectral:2.8388e-01 L11_spectral:2.7971e-01 L12_spectral:2.8226e-01 train_time:141139ms step_avg:70.57ms +[2025-09-11 06:15:23] [Rank 0] step:2001/10000 train_time:142809ms step_avg:71.37ms +[2025-09-11 06:15:23] [Rank 0] step:2001/10000 train_time:142809ms step_avg:71.37ms +[2025-09-11 06:15:24] [Rank 0] step:2021/10000 train_time:143464ms step_avg:70.99ms +[2025-09-11 06:15:24] [Rank 0] step:2021/10000 train_time:143464ms step_avg:70.99ms +[2025-09-11 06:15:25] [Rank 0] step:2041/10000 train_time:144117ms step_avg:70.61ms +[2025-09-11 06:15:25] [Rank 0] step:2041/10000 train_time:144117ms step_avg:70.61ms +[2025-09-11 06:15:25] [Rank 0] step:2061/10000 train_time:144771ms step_avg:70.24ms +[2025-09-11 06:15:25] [Rank 0] step:2061/10000 train_time:144771ms step_avg:70.24ms +[2025-09-11 06:15:26] [Rank 0] step:2081/10000 train_time:145424ms step_avg:69.88ms +[2025-09-11 06:15:26] [Rank 0] step:2081/10000 train_time:145424ms step_avg:69.88ms +[2025-09-11 06:15:27] [Rank 0] step:2101/10000 train_time:146077ms step_avg:69.53ms +[2025-09-11 06:15:27] [Rank 0] step:2101/10000 train_time:146077ms step_avg:69.53ms +[2025-09-11 06:15:27] [Rank 0] step:2121/10000 train_time:146730ms step_avg:69.18ms +[2025-09-11 06:15:27] [Rank 0] step:2121/10000 train_time:146730ms step_avg:69.18ms +[2025-09-11 06:15:28] [Rank 0] step:2141/10000 train_time:147382ms step_avg:68.84ms +[2025-09-11 06:15:28] [Rank 0] step:2141/10000 train_time:147382ms step_avg:68.84ms +[2025-09-11 06:15:29] [Rank 0] step:2161/10000 train_time:148034ms step_avg:68.50ms +[2025-09-11 06:15:29] [Rank 0] step:2161/10000 train_time:148034ms step_avg:68.50ms +[2025-09-11 06:15:29] [Rank 0] step:2181/10000 train_time:148687ms step_avg:68.17ms +[2025-09-11 06:15:29] [Rank 0] step:2181/10000 train_time:148687ms step_avg:68.17ms +[2025-09-11 06:15:30] [Rank 0] step:2201/10000 train_time:149340ms step_avg:67.85ms +[2025-09-11 06:15:30] [Rank 0] step:2201/10000 train_time:149340ms step_avg:67.85ms +[2025-09-11 06:15:31] [Rank 0] step:2221/10000 train_time:149992ms step_avg:67.53ms +[2025-09-11 06:15:31] [Rank 0] step:2221/10000 train_time:149992ms step_avg:67.53ms +[2025-09-11 06:15:31] [Rank 0] step:2241/10000 train_time:150657ms step_avg:67.23ms +[2025-09-11 06:15:31] [Rank 0] step:2241/10000 train_time:150657ms step_avg:67.23ms +[2025-09-11 06:15:32] [Rank 0] step:2261/10000 train_time:151322ms step_avg:66.93ms +[2025-09-11 06:15:32] [Rank 0] step:2261/10000 train_time:151322ms step_avg:66.93ms +[2025-09-11 06:15:33] [Rank 0] step:2281/10000 train_time:151987ms step_avg:66.63ms +[2025-09-11 06:15:33] [Rank 0] step:2281/10000 train_time:151987ms step_avg:66.63ms +[2025-09-11 06:15:33] [Rank 0] step:2301/10000 train_time:152653ms step_avg:66.34ms +[2025-09-11 06:15:33] [Rank 0] step:2301/10000 train_time:152653ms step_avg:66.34ms +[2025-09-11 06:15:34] [Rank 0] step:2321/10000 train_time:153318ms step_avg:66.06ms +[2025-09-11 06:15:34] [Rank 0] step:2321/10000 train_time:153318ms step_avg:66.06ms +[2025-09-11 06:15:35] [Rank 0] step:2341/10000 train_time:153983ms step_avg:65.78ms +[2025-09-11 06:15:35] [Rank 0] step:2341/10000 train_time:153983ms step_avg:65.78ms +[2025-09-11 06:15:35] [Rank 0] step:2361/10000 train_time:154649ms step_avg:65.50ms +[2025-09-11 06:15:35] [Rank 0] step:2361/10000 train_time:154649ms step_avg:65.50ms +[2025-09-11 06:15:36] [Rank 0] step:2381/10000 train_time:155314ms step_avg:65.23ms +[2025-09-11 06:15:36] [Rank 0] step:2381/10000 train_time:155314ms step_avg:65.23ms +[2025-09-11 06:15:36] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:15:36] [Rank 0] PRINT: [Validation @ Step 2400] Calculating base validation loss... +[2025-09-11 06:15:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:15:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Getting true update direction 'v'... +[2025-09-11 06:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:15:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating update norms... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up layer parameter groups... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:15:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise update norms... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Setting up HVP calculation in float32... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating TOTAL sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Calculating layer-wise sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 12 layers for sharpness... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:15:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_1'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_2'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_3'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_4'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_5'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:15:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_6'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_7'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_8'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_9'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:15:46] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_10'... +[2025-09-11 06:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_11'... +[2025-09-11 06:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:15:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Processing 'layer_12'... +[2025-09-11 06:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:15:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:15:48] [Rank 0] PRINT: step:2400/10000 val_loss:4.8860 total_sharp:1.0188e-04 L1_sharp:2.6605e-04 L2_sharp:5.2216e-05 L3_sharp:6.6147e-06 L4_sharp:1.9541e-05 L5_sharp:3.8363e-05 L6_sharp:1.9241e-05 L7_sharp:3.0327e-05 L8_sharp:3.8614e-05 L9_sharp:3.4511e-05 L10_sharp:4.8225e-05 L11_sharp:8.4347e-05 L12_sharp:8.1804e-04 total_fnorm:2.0400e+02 total_l1_linf:6.3488e+05 total_spectral:1.0150e+02 L1_fnorm:2.3250e+01 L2_fnorm:2.3500e+01 L3_fnorm:2.3750e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4250e+01 L8_fnorm:2.3875e+01 L9_fnorm:2.4125e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.4125e+01 L1_l1linf:6.5625e+00 L2_l1linf:6.0000e+00 L3_l1linf:5.5000e+00 L4_l1linf:6.1250e+00 L5_l1linf:5.9688e+00 L6_l1linf:6.0625e+00 L7_l1linf:6.0938e+00 L8_l1linf:5.9688e+00 L9_l1linf:5.8750e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5625e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8028e-01 L2_spectral:2.8003e-01 L3_spectral:2.8225e-01 L4_spectral:2.8407e-01 L5_spectral:2.8942e-01 L6_spectral:2.8670e-01 L7_spectral:2.8354e-01 L8_spectral:2.8861e-01 L9_spectral:2.8352e-01 L10_spectral:2.8691e-01 L11_spectral:2.8655e-01 L12_spectral:2.8563e-01 train_time:155961ms step_avg:64.98ms +[2025-09-11 06:15:48] [Rank 0] PRINT: step:2400/10000 val_loss:4.8860 total_sharp:1.0188e-04 L1_sharp:2.6605e-04 L2_sharp:5.2216e-05 L3_sharp:6.6147e-06 L4_sharp:1.9541e-05 L5_sharp:3.8363e-05 L6_sharp:1.9241e-05 L7_sharp:3.0327e-05 L8_sharp:3.8614e-05 L9_sharp:3.4511e-05 L10_sharp:4.8225e-05 L11_sharp:8.4347e-05 L12_sharp:8.1804e-04 total_fnorm:2.0400e+02 total_l1_linf:6.3488e+05 total_spectral:1.0150e+02 L1_fnorm:2.3250e+01 L2_fnorm:2.3500e+01 L3_fnorm:2.3750e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4250e+01 L8_fnorm:2.3875e+01 L9_fnorm:2.4125e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.4125e+01 L1_l1linf:6.5625e+00 L2_l1linf:6.0000e+00 L3_l1linf:5.5000e+00 L4_l1linf:6.1250e+00 L5_l1linf:5.9688e+00 L6_l1linf:6.0625e+00 L7_l1linf:6.0938e+00 L8_l1linf:5.9688e+00 L9_l1linf:5.8750e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5625e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8028e-01 L2_spectral:2.8003e-01 L3_spectral:2.8225e-01 L4_spectral:2.8407e-01 L5_spectral:2.8942e-01 L6_spectral:2.8670e-01 L7_spectral:2.8354e-01 L8_spectral:2.8861e-01 L9_spectral:2.8352e-01 L10_spectral:2.8691e-01 L11_spectral:2.8655e-01 L12_spectral:2.8563e-01 train_time:155961ms step_avg:64.98ms +[2025-09-11 06:15:49] [Rank 0] step:2401/10000 train_time:157600ms step_avg:65.64ms +[2025-09-11 06:15:49] [Rank 0] step:2401/10000 train_time:157600ms step_avg:65.64ms +[2025-09-11 06:15:50] [Rank 0] step:2421/10000 train_time:158286ms step_avg:65.38ms +[2025-09-11 06:15:50] [Rank 0] step:2421/10000 train_time:158286ms step_avg:65.38ms +[2025-09-11 06:15:51] [Rank 0] step:2441/10000 train_time:158952ms step_avg:65.12ms +[2025-09-11 06:15:51] [Rank 0] step:2441/10000 train_time:158952ms step_avg:65.12ms +[2025-09-11 06:15:51] [Rank 0] step:2461/10000 train_time:159618ms step_avg:64.86ms +[2025-09-11 06:15:51] [Rank 0] step:2461/10000 train_time:159618ms step_avg:64.86ms +[2025-09-11 06:15:52] [Rank 0] step:2481/10000 train_time:160283ms step_avg:64.60ms +[2025-09-11 06:15:52] [Rank 0] step:2481/10000 train_time:160283ms step_avg:64.60ms +[2025-09-11 06:15:53] [Rank 0] step:2501/10000 train_time:160949ms step_avg:64.35ms +[2025-09-11 06:15:53] [Rank 0] step:2501/10000 train_time:160949ms step_avg:64.35ms +[2025-09-11 06:15:53] [Rank 0] step:2521/10000 train_time:161614ms step_avg:64.11ms +[2025-09-11 06:15:53] [Rank 0] step:2521/10000 train_time:161614ms step_avg:64.11ms +[2025-09-11 06:15:54] [Rank 0] step:2541/10000 train_time:162279ms step_avg:63.86ms +[2025-09-11 06:15:54] [Rank 0] step:2541/10000 train_time:162279ms step_avg:63.86ms +[2025-09-11 06:15:55] [Rank 0] step:2561/10000 train_time:162945ms step_avg:63.63ms +[2025-09-11 06:15:55] [Rank 0] step:2561/10000 train_time:162945ms step_avg:63.63ms +[2025-09-11 06:15:55] [Rank 0] step:2581/10000 train_time:163611ms step_avg:63.39ms +[2025-09-11 06:15:55] [Rank 0] step:2581/10000 train_time:163611ms step_avg:63.39ms +[2025-09-11 06:15:56] [Rank 0] step:2601/10000 train_time:164276ms step_avg:63.16ms +[2025-09-11 06:15:56] [Rank 0] step:2601/10000 train_time:164276ms step_avg:63.16ms +[2025-09-11 06:15:57] [Rank 0] step:2621/10000 train_time:164952ms step_avg:62.93ms +[2025-09-11 06:15:57] [Rank 0] step:2621/10000 train_time:164952ms step_avg:62.93ms +[2025-09-11 06:15:57] [Rank 0] step:2641/10000 train_time:165617ms step_avg:62.71ms +[2025-09-11 06:15:57] [Rank 0] step:2641/10000 train_time:165617ms step_avg:62.71ms +[2025-09-11 06:15:58] [Rank 0] step:2661/10000 train_time:166298ms step_avg:62.49ms +[2025-09-11 06:15:58] [Rank 0] step:2661/10000 train_time:166298ms step_avg:62.49ms +[2025-09-11 06:15:59] [Rank 0] step:2681/10000 train_time:166964ms step_avg:62.28ms +[2025-09-11 06:15:59] [Rank 0] step:2681/10000 train_time:166964ms step_avg:62.28ms +[2025-09-11 06:15:59] [Rank 0] step:2701/10000 train_time:167629ms step_avg:62.06ms +[2025-09-11 06:15:59] [Rank 0] step:2701/10000 train_time:167629ms step_avg:62.06ms +[2025-09-11 06:16:00] [Rank 0] step:2721/10000 train_time:168295ms step_avg:61.85ms +[2025-09-11 06:16:00] [Rank 0] step:2721/10000 train_time:168295ms step_avg:61.85ms +[2025-09-11 06:16:01] [Rank 0] step:2741/10000 train_time:168961ms step_avg:61.64ms +[2025-09-11 06:16:01] [Rank 0] step:2741/10000 train_time:168961ms step_avg:61.64ms +[2025-09-11 06:16:01] [Rank 0] step:2761/10000 train_time:169626ms step_avg:61.44ms +[2025-09-11 06:16:01] [Rank 0] step:2761/10000 train_time:169626ms step_avg:61.44ms +[2025-09-11 06:16:02] [Rank 0] step:2781/10000 train_time:170291ms step_avg:61.23ms +[2025-09-11 06:16:02] [Rank 0] step:2781/10000 train_time:170291ms step_avg:61.23ms +[2025-09-11 06:16:03] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:16:03] [Rank 0] PRINT: [Validation @ Step 2800] Calculating base validation loss... +[2025-09-11 06:16:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:16:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Getting true update direction 'v'... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating update norms... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up layer parameter groups... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:16:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise update norms... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Setting up HVP calculation in float32... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating TOTAL sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Calculating layer-wise sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 12 layers for sharpness... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:16:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_1'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_2'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_3'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_4'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:16:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_5'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_6'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_7'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_8'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_9'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:16:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_10'... +[2025-09-11 06:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_11'... +[2025-09-11 06:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:16:13] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Processing 'layer_12'... +[2025-09-11 06:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:16:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 2800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:16:14] [Rank 0] PRINT: step:2800/10000 val_loss:4.8323 total_sharp:9.1935e-05 L1_sharp:2.4548e-04 L2_sharp:3.7220e-05 L3_sharp:1.2534e-05 L4_sharp:5.3574e-06 L5_sharp:2.5773e-06 L6_sharp:2.6565e-05 L7_sharp:2.5855e-05 L8_sharp:5.6575e-05 L9_sharp:4.3951e-05 L10_sharp:5.6753e-05 L11_sharp:8.7166e-05 L12_sharp:4.9660e-04 total_fnorm:2.0400e+02 total_l1_linf:6.3078e+05 total_spectral:1.0200e+02 L1_fnorm:2.3375e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3500e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4375e+01 L8_fnorm:2.3875e+01 L9_fnorm:2.4250e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.4125e+01 L1_l1linf:6.4375e+00 L2_l1linf:5.5312e+00 L3_l1linf:5.1875e+00 L4_l1linf:5.9062e+00 L5_l1linf:5.9688e+00 L6_l1linf:5.9688e+00 L7_l1linf:5.9688e+00 L8_l1linf:5.9375e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8315e-01 L2_spectral:2.8135e-01 L3_spectral:2.8469e-01 L4_spectral:2.8710e-01 L5_spectral:2.8988e-01 L6_spectral:2.9012e-01 L7_spectral:2.8638e-01 L8_spectral:2.9273e-01 L9_spectral:2.8823e-01 L10_spectral:2.8806e-01 L11_spectral:2.8903e-01 L12_spectral:2.8714e-01 train_time:170939ms step_avg:61.05ms +[2025-09-11 06:16:14] [Rank 0] PRINT: step:2800/10000 val_loss:4.8323 total_sharp:9.1935e-05 L1_sharp:2.4548e-04 L2_sharp:3.7220e-05 L3_sharp:1.2534e-05 L4_sharp:5.3574e-06 L5_sharp:2.5773e-06 L6_sharp:2.6565e-05 L7_sharp:2.5855e-05 L8_sharp:5.6575e-05 L9_sharp:4.3951e-05 L10_sharp:5.6753e-05 L11_sharp:8.7166e-05 L12_sharp:4.9660e-04 total_fnorm:2.0400e+02 total_l1_linf:6.3078e+05 total_spectral:1.0200e+02 L1_fnorm:2.3375e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3500e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4375e+01 L8_fnorm:2.3875e+01 L9_fnorm:2.4250e+01 L10_fnorm:2.4125e+01 L11_fnorm:2.4125e+01 L12_fnorm:2.4125e+01 L1_l1linf:6.4375e+00 L2_l1linf:5.5312e+00 L3_l1linf:5.1875e+00 L4_l1linf:5.9062e+00 L5_l1linf:5.9688e+00 L6_l1linf:5.9688e+00 L7_l1linf:5.9688e+00 L8_l1linf:5.9375e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8315e-01 L2_spectral:2.8135e-01 L3_spectral:2.8469e-01 L4_spectral:2.8710e-01 L5_spectral:2.8988e-01 L6_spectral:2.9012e-01 L7_spectral:2.8638e-01 L8_spectral:2.9273e-01 L9_spectral:2.8823e-01 L10_spectral:2.8806e-01 L11_spectral:2.8903e-01 L12_spectral:2.8714e-01 train_time:170939ms step_avg:61.05ms +[2025-09-11 06:16:15] [Rank 0] step:2801/10000 train_time:172595ms step_avg:61.62ms +[2025-09-11 06:16:15] [Rank 0] step:2801/10000 train_time:172595ms step_avg:61.62ms +[2025-09-11 06:16:16] [Rank 0] step:2821/10000 train_time:173265ms step_avg:61.42ms +[2025-09-11 06:16:16] [Rank 0] step:2821/10000 train_time:173265ms step_avg:61.42ms +[2025-09-11 06:16:17] [Rank 0] step:2841/10000 train_time:173932ms step_avg:61.22ms +[2025-09-11 06:16:17] [Rank 0] step:2841/10000 train_time:173932ms step_avg:61.22ms +[2025-09-11 06:16:17] [Rank 0] step:2861/10000 train_time:174599ms step_avg:61.03ms +[2025-09-11 06:16:17] [Rank 0] step:2861/10000 train_time:174599ms step_avg:61.03ms +[2025-09-11 06:16:18] [Rank 0] step:2881/10000 train_time:175266ms step_avg:60.84ms +[2025-09-11 06:16:18] [Rank 0] step:2881/10000 train_time:175266ms step_avg:60.84ms +[2025-09-11 06:16:19] [Rank 0] step:2901/10000 train_time:175932ms step_avg:60.65ms +[2025-09-11 06:16:19] [Rank 0] step:2901/10000 train_time:175932ms step_avg:60.65ms +[2025-09-11 06:16:19] [Rank 0] step:2921/10000 train_time:176598ms step_avg:60.46ms +[2025-09-11 06:16:19] [Rank 0] step:2921/10000 train_time:176598ms step_avg:60.46ms +[2025-09-11 06:16:20] [Rank 0] step:2941/10000 train_time:177264ms step_avg:60.27ms +[2025-09-11 06:16:20] [Rank 0] step:2941/10000 train_time:177264ms step_avg:60.27ms +[2025-09-11 06:16:21] [Rank 0] step:2961/10000 train_time:177930ms step_avg:60.09ms +[2025-09-11 06:16:21] [Rank 0] step:2961/10000 train_time:177930ms step_avg:60.09ms +[2025-09-11 06:16:22] [Rank 0] step:2981/10000 train_time:179063ms step_avg:60.07ms +[2025-09-11 06:16:22] [Rank 0] step:2981/10000 train_time:179063ms step_avg:60.07ms +[2025-09-11 06:16:23] [Rank 0] step:3001/10000 train_time:179731ms step_avg:59.89ms +[2025-09-11 06:16:23] [Rank 0] step:3001/10000 train_time:179731ms step_avg:59.89ms +[2025-09-11 06:16:23] [Rank 0] step:3021/10000 train_time:180400ms step_avg:59.72ms +[2025-09-11 06:16:23] [Rank 0] step:3021/10000 train_time:180400ms step_avg:59.72ms +[2025-09-11 06:16:24] [Rank 0] step:3041/10000 train_time:181373ms step_avg:59.64ms +[2025-09-11 06:16:24] [Rank 0] step:3041/10000 train_time:181373ms step_avg:59.64ms +[2025-09-11 06:16:25] [Rank 0] step:3061/10000 train_time:182041ms step_avg:59.47ms +[2025-09-11 06:16:25] [Rank 0] step:3061/10000 train_time:182041ms step_avg:59.47ms +[2025-09-11 06:16:26] [Rank 0] step:3081/10000 train_time:182710ms step_avg:59.30ms +[2025-09-11 06:16:26] [Rank 0] step:3081/10000 train_time:182710ms step_avg:59.30ms +[2025-09-11 06:16:26] [Rank 0] step:3101/10000 train_time:183379ms step_avg:59.14ms +[2025-09-11 06:16:26] [Rank 0] step:3101/10000 train_time:183379ms step_avg:59.14ms +[2025-09-11 06:16:27] [Rank 0] step:3121/10000 train_time:184048ms step_avg:58.97ms +[2025-09-11 06:16:27] [Rank 0] step:3121/10000 train_time:184048ms step_avg:58.97ms +[2025-09-11 06:16:28] [Rank 0] step:3141/10000 train_time:184717ms step_avg:58.81ms +[2025-09-11 06:16:28] [Rank 0] step:3141/10000 train_time:184717ms step_avg:58.81ms +[2025-09-11 06:16:28] [Rank 0] step:3161/10000 train_time:185386ms step_avg:58.65ms +[2025-09-11 06:16:28] [Rank 0] step:3161/10000 train_time:185386ms step_avg:58.65ms +[2025-09-11 06:16:29] [Rank 0] step:3181/10000 train_time:186055ms step_avg:58.49ms +[2025-09-11 06:16:29] [Rank 0] step:3181/10000 train_time:186055ms step_avg:58.49ms +[2025-09-11 06:16:30] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:16:30] [Rank 0] PRINT: [Validation @ Step 3200] Calculating base validation loss... +[2025-09-11 06:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:16:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Getting true update direction 'v'... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating update norms... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up layer parameter groups... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:16:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise update norms... +[2025-09-11 06:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:16:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Setting up HVP calculation in float32... +[2025-09-11 06:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:16:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating TOTAL sharpness... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Calculating layer-wise sharpness... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 12 layers for sharpness... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:16:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_1'... +[2025-09-11 06:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:16:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_2'... +[2025-09-11 06:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:16:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_3'... +[2025-09-11 06:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_4'... +[2025-09-11 06:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:16:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_5'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_6'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_7'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_8'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:16:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_9'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_10'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_11'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:16:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Processing 'layer_12'... +[2025-09-11 06:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:16:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:16:44] [Rank 0] PRINT: step:3200/10000 val_loss:4.7423 total_sharp:6.8730e-05 L1_sharp:1.6563e-04 L2_sharp:2.2700e-05 L3_sharp:2.2619e-06 L4_sharp:6.5052e-06 L5_sharp:1.5104e-05 L6_sharp:1.6486e-05 L7_sharp:2.0110e-05 L8_sharp:3.5009e-05 L9_sharp:5.3675e-05 L10_sharp:5.2138e-05 L11_sharp:7.2617e-05 L12_sharp:4.1744e-04 total_fnorm:2.1500e+02 total_l1_linf:6.7584e+05 total_spectral:1.0650e+02 L1_fnorm:2.3375e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3625e+01 L4_fnorm:2.4375e+01 L5_fnorm:2.3875e+01 L6_fnorm:2.4500e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4500e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.5938e+00 L2_l1linf:5.1875e+00 L3_l1linf:4.8438e+00 L4_l1linf:5.9375e+00 L5_l1linf:5.6562e+00 L6_l1linf:5.9375e+00 L7_l1linf:6.0000e+00 L8_l1linf:5.9688e+00 L9_l1linf:6.0000e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2500e+00 L1_spectral:2.8357e-01 L2_spectral:2.8276e-01 L3_spectral:2.8559e-01 L4_spectral:2.8779e-01 L5_spectral:2.9449e-01 L6_spectral:2.9093e-01 L7_spectral:2.8920e-01 L8_spectral:2.9697e-01 L9_spectral:2.9160e-01 L10_spectral:2.9340e-01 L11_spectral:2.9407e-01 L12_spectral:2.9640e-01 train_time:186705ms step_avg:58.35ms +[2025-09-11 06:16:44] [Rank 0] PRINT: step:3200/10000 val_loss:4.7423 total_sharp:6.8730e-05 L1_sharp:1.6563e-04 L2_sharp:2.2700e-05 L3_sharp:2.2619e-06 L4_sharp:6.5052e-06 L5_sharp:1.5104e-05 L6_sharp:1.6486e-05 L7_sharp:2.0110e-05 L8_sharp:3.5009e-05 L9_sharp:5.3675e-05 L10_sharp:5.2138e-05 L11_sharp:7.2617e-05 L12_sharp:4.1744e-04 total_fnorm:2.1500e+02 total_l1_linf:6.7584e+05 total_spectral:1.0650e+02 L1_fnorm:2.3375e+01 L2_fnorm:2.3375e+01 L3_fnorm:2.3625e+01 L4_fnorm:2.4375e+01 L5_fnorm:2.3875e+01 L6_fnorm:2.4500e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4500e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.5938e+00 L2_l1linf:5.1875e+00 L3_l1linf:4.8438e+00 L4_l1linf:5.9375e+00 L5_l1linf:5.6562e+00 L6_l1linf:5.9375e+00 L7_l1linf:6.0000e+00 L8_l1linf:5.9688e+00 L9_l1linf:6.0000e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2500e+00 L1_spectral:2.8357e-01 L2_spectral:2.8276e-01 L3_spectral:2.8559e-01 L4_spectral:2.8779e-01 L5_spectral:2.9449e-01 L6_spectral:2.9093e-01 L7_spectral:2.8920e-01 L8_spectral:2.9697e-01 L9_spectral:2.9160e-01 L10_spectral:2.9340e-01 L11_spectral:2.9407e-01 L12_spectral:2.9640e-01 train_time:186705ms step_avg:58.35ms +[2025-09-11 06:16:46] [Rank 0] step:3201/10000 train_time:188343ms step_avg:58.84ms +[2025-09-11 06:16:46] [Rank 0] step:3201/10000 train_time:188343ms step_avg:58.84ms +[2025-09-11 06:16:46] [Rank 0] step:3221/10000 train_time:189011ms step_avg:58.68ms +[2025-09-11 06:16:46] [Rank 0] step:3221/10000 train_time:189011ms step_avg:58.68ms +[2025-09-11 06:16:47] [Rank 0] step:3241/10000 train_time:189681ms step_avg:58.53ms +[2025-09-11 06:16:47] [Rank 0] step:3241/10000 train_time:189681ms step_avg:58.53ms +[2025-09-11 06:16:48] [Rank 0] step:3261/10000 train_time:190351ms step_avg:58.37ms +[2025-09-11 06:16:48] [Rank 0] step:3261/10000 train_time:190351ms step_avg:58.37ms +[2025-09-11 06:16:48] [Rank 0] step:3281/10000 train_time:191021ms step_avg:58.22ms +[2025-09-11 06:16:48] [Rank 0] step:3281/10000 train_time:191021ms step_avg:58.22ms +[2025-09-11 06:16:49] [Rank 0] step:3301/10000 train_time:191690ms step_avg:58.07ms +[2025-09-11 06:16:49] [Rank 0] step:3301/10000 train_time:191690ms step_avg:58.07ms +[2025-09-11 06:16:50] [Rank 0] step:3321/10000 train_time:192360ms step_avg:57.92ms +[2025-09-11 06:16:50] [Rank 0] step:3321/10000 train_time:192360ms step_avg:57.92ms +[2025-09-11 06:16:50] [Rank 0] step:3341/10000 train_time:193030ms step_avg:57.78ms +[2025-09-11 06:16:50] [Rank 0] step:3341/10000 train_time:193030ms step_avg:57.78ms +[2025-09-11 06:16:51] [Rank 0] step:3361/10000 train_time:193700ms step_avg:57.63ms +[2025-09-11 06:16:51] [Rank 0] step:3361/10000 train_time:193700ms step_avg:57.63ms +[2025-09-11 06:16:52] [Rank 0] step:3381/10000 train_time:194369ms step_avg:57.49ms +[2025-09-11 06:16:52] [Rank 0] step:3381/10000 train_time:194369ms step_avg:57.49ms +[2025-09-11 06:16:52] [Rank 0] step:3401/10000 train_time:195039ms step_avg:57.35ms +[2025-09-11 06:16:52] [Rank 0] step:3401/10000 train_time:195039ms step_avg:57.35ms +[2025-09-11 06:16:53] [Rank 0] step:3421/10000 train_time:195708ms step_avg:57.21ms +[2025-09-11 06:16:53] [Rank 0] step:3421/10000 train_time:195708ms step_avg:57.21ms +[2025-09-11 06:16:54] [Rank 0] step:3441/10000 train_time:196377ms step_avg:57.07ms +[2025-09-11 06:16:54] [Rank 0] step:3441/10000 train_time:196377ms step_avg:57.07ms +[2025-09-11 06:16:54] [Rank 0] step:3461/10000 train_time:197046ms step_avg:56.93ms +[2025-09-11 06:16:54] [Rank 0] step:3461/10000 train_time:197046ms step_avg:56.93ms +[2025-09-11 06:16:55] [Rank 0] step:3481/10000 train_time:197716ms step_avg:56.80ms +[2025-09-11 06:16:55] [Rank 0] step:3481/10000 train_time:197716ms step_avg:56.80ms +[2025-09-11 06:16:56] [Rank 0] step:3501/10000 train_time:198384ms step_avg:56.67ms +[2025-09-11 06:16:56] [Rank 0] step:3501/10000 train_time:198384ms step_avg:56.67ms +[2025-09-11 06:16:56] [Rank 0] step:3521/10000 train_time:199053ms step_avg:56.53ms +[2025-09-11 06:16:56] [Rank 0] step:3521/10000 train_time:199053ms step_avg:56.53ms +[2025-09-11 06:16:57] [Rank 0] step:3541/10000 train_time:199725ms step_avg:56.40ms +[2025-09-11 06:16:57] [Rank 0] step:3541/10000 train_time:199725ms step_avg:56.40ms +[2025-09-11 06:16:58] [Rank 0] step:3561/10000 train_time:200393ms step_avg:56.27ms +[2025-09-11 06:16:58] [Rank 0] step:3561/10000 train_time:200393ms step_avg:56.27ms +[2025-09-11 06:16:58] [Rank 0] step:3581/10000 train_time:201063ms step_avg:56.15ms +[2025-09-11 06:16:58] [Rank 0] step:3581/10000 train_time:201063ms step_avg:56.15ms +[2025-09-11 06:16:59] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:16:59] [Rank 0] PRINT: [Validation @ Step 3600] Calculating base validation loss... +[2025-09-11 06:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:17:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Getting true update direction 'v'... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating update norms... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up layer parameter groups... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:17:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise update norms... +[2025-09-11 06:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Setting up HVP calculation in float32... +[2025-09-11 06:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:17:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating TOTAL sharpness... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Calculating layer-wise sharpness... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 12 layers for sharpness... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_1'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_2'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_3'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:17:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_4'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_5'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_6'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_7'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_8'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:17:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_9'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_10'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_11'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:17:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Processing 'layer_12'... +[2025-09-11 06:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:17:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 3600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:17:10] [Rank 0] PRINT: step:3600/10000 val_loss:4.7015 total_sharp:6.9485e-05 L1_sharp:2.0336e-04 L2_sharp:8.0412e-06 L3_sharp:2.6426e-05 L4_sharp:1.6542e-05 L5_sharp:9.5959e-07 L6_sharp:8.3355e-06 L7_sharp:1.0081e-05 L8_sharp:3.5422e-05 L9_sharp:4.1584e-05 L10_sharp:3.8114e-05 L11_sharp:6.1172e-05 L12_sharp:3.6368e-04 total_fnorm:2.0100e+02 total_l1_linf:6.2259e+05 total_spectral:1.0050e+02 L1_fnorm:2.3125e+01 L2_fnorm:2.3000e+01 L3_fnorm:2.3375e+01 L4_fnorm:2.4125e+01 L5_fnorm:2.3875e+01 L6_fnorm:2.4375e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4062e+00 L2_l1linf:4.5625e+00 L3_l1linf:4.7812e+00 L4_l1linf:5.5938e+00 L5_l1linf:5.6562e+00 L6_l1linf:5.7188e+00 L7_l1linf:5.8750e+00 L8_l1linf:5.9688e+00 L9_l1linf:5.9375e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.4375e+00 L12_l1linf:5.2188e+00 L1_spectral:2.8589e-01 L2_spectral:2.8552e-01 L3_spectral:2.8676e-01 L4_spectral:2.9043e-01 L5_spectral:2.9385e-01 L6_spectral:2.9291e-01 L7_spectral:2.9208e-01 L8_spectral:2.9870e-01 L9_spectral:2.9277e-01 L10_spectral:2.9388e-01 L11_spectral:2.9547e-01 L12_spectral:2.9470e-01 train_time:201714ms step_avg:56.03ms +[2025-09-11 06:17:10] [Rank 0] PRINT: step:3600/10000 val_loss:4.7015 total_sharp:6.9485e-05 L1_sharp:2.0336e-04 L2_sharp:8.0412e-06 L3_sharp:2.6426e-05 L4_sharp:1.6542e-05 L5_sharp:9.5959e-07 L6_sharp:8.3355e-06 L7_sharp:1.0081e-05 L8_sharp:3.5422e-05 L9_sharp:4.1584e-05 L10_sharp:3.8114e-05 L11_sharp:6.1172e-05 L12_sharp:3.6368e-04 total_fnorm:2.0100e+02 total_l1_linf:6.2259e+05 total_spectral:1.0050e+02 L1_fnorm:2.3125e+01 L2_fnorm:2.3000e+01 L3_fnorm:2.3375e+01 L4_fnorm:2.4125e+01 L5_fnorm:2.3875e+01 L6_fnorm:2.4375e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4062e+00 L2_l1linf:4.5625e+00 L3_l1linf:4.7812e+00 L4_l1linf:5.5938e+00 L5_l1linf:5.6562e+00 L6_l1linf:5.7188e+00 L7_l1linf:5.8750e+00 L8_l1linf:5.9688e+00 L9_l1linf:5.9375e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.4375e+00 L12_l1linf:5.2188e+00 L1_spectral:2.8589e-01 L2_spectral:2.8552e-01 L3_spectral:2.8676e-01 L4_spectral:2.9043e-01 L5_spectral:2.9385e-01 L6_spectral:2.9291e-01 L7_spectral:2.9208e-01 L8_spectral:2.9870e-01 L9_spectral:2.9277e-01 L10_spectral:2.9388e-01 L11_spectral:2.9547e-01 L12_spectral:2.9470e-01 train_time:201714ms step_avg:56.03ms +[2025-09-11 06:17:12] [Rank 0] step:3601/10000 train_time:203372ms step_avg:56.48ms +[2025-09-11 06:17:12] [Rank 0] step:3601/10000 train_time:203372ms step_avg:56.48ms +[2025-09-11 06:17:12] [Rank 0] step:3621/10000 train_time:204039ms step_avg:56.35ms +[2025-09-11 06:17:12] [Rank 0] step:3621/10000 train_time:204039ms step_avg:56.35ms +[2025-09-11 06:17:13] [Rank 0] step:3641/10000 train_time:204708ms step_avg:56.22ms +[2025-09-11 06:17:13] [Rank 0] step:3641/10000 train_time:204708ms step_avg:56.22ms +[2025-09-11 06:17:14] [Rank 0] step:3661/10000 train_time:205378ms step_avg:56.10ms +[2025-09-11 06:17:14] [Rank 0] step:3661/10000 train_time:205378ms step_avg:56.10ms +[2025-09-11 06:17:14] [Rank 0] step:3681/10000 train_time:206047ms step_avg:55.98ms +[2025-09-11 06:17:14] [Rank 0] step:3681/10000 train_time:206047ms step_avg:55.98ms +[2025-09-11 06:17:15] [Rank 0] step:3701/10000 train_time:206715ms step_avg:55.85ms +[2025-09-11 06:17:15] [Rank 0] step:3701/10000 train_time:206715ms step_avg:55.85ms +[2025-09-11 06:17:16] [Rank 0] step:3721/10000 train_time:207394ms step_avg:55.74ms +[2025-09-11 06:17:16] [Rank 0] step:3721/10000 train_time:207394ms step_avg:55.74ms +[2025-09-11 06:17:16] [Rank 0] step:3741/10000 train_time:208075ms step_avg:55.62ms +[2025-09-11 06:17:16] [Rank 0] step:3741/10000 train_time:208075ms step_avg:55.62ms +[2025-09-11 06:17:17] [Rank 0] step:3761/10000 train_time:208756ms step_avg:55.51ms +[2025-09-11 06:17:17] [Rank 0] step:3761/10000 train_time:208756ms step_avg:55.51ms +[2025-09-11 06:17:18] [Rank 0] step:3781/10000 train_time:209437ms step_avg:55.39ms +[2025-09-11 06:17:18] [Rank 0] step:3781/10000 train_time:209437ms step_avg:55.39ms +[2025-09-11 06:17:19] [Rank 0] step:3801/10000 train_time:210118ms step_avg:55.28ms +[2025-09-11 06:17:19] [Rank 0] step:3801/10000 train_time:210118ms step_avg:55.28ms +[2025-09-11 06:17:19] [Rank 0] step:3821/10000 train_time:210799ms step_avg:55.17ms +[2025-09-11 06:17:19] [Rank 0] step:3821/10000 train_time:210799ms step_avg:55.17ms +[2025-09-11 06:17:20] [Rank 0] step:3841/10000 train_time:211480ms step_avg:55.06ms +[2025-09-11 06:17:20] [Rank 0] step:3841/10000 train_time:211480ms step_avg:55.06ms +[2025-09-11 06:17:21] [Rank 0] step:3861/10000 train_time:212160ms step_avg:54.95ms +[2025-09-11 06:17:21] [Rank 0] step:3861/10000 train_time:212160ms step_avg:54.95ms +[2025-09-11 06:17:21] [Rank 0] step:3881/10000 train_time:212841ms step_avg:54.84ms +[2025-09-11 06:17:21] [Rank 0] step:3881/10000 train_time:212841ms step_avg:54.84ms +[2025-09-11 06:17:22] [Rank 0] step:3901/10000 train_time:213521ms step_avg:54.73ms +[2025-09-11 06:17:22] [Rank 0] step:3901/10000 train_time:213521ms step_avg:54.73ms +[2025-09-11 06:17:23] [Rank 0] step:3921/10000 train_time:214201ms step_avg:54.63ms +[2025-09-11 06:17:23] [Rank 0] step:3921/10000 train_time:214201ms step_avg:54.63ms +[2025-09-11 06:17:23] [Rank 0] step:3941/10000 train_time:214881ms step_avg:54.52ms +[2025-09-11 06:17:23] [Rank 0] step:3941/10000 train_time:214881ms step_avg:54.52ms +[2025-09-11 06:17:24] [Rank 0] step:3961/10000 train_time:215561ms step_avg:54.42ms +[2025-09-11 06:17:24] [Rank 0] step:3961/10000 train_time:215561ms step_avg:54.42ms +[2025-09-11 06:17:25] [Rank 0] step:3981/10000 train_time:216804ms step_avg:54.46ms +[2025-09-11 06:17:25] [Rank 0] step:3981/10000 train_time:216804ms step_avg:54.46ms +[2025-09-11 06:17:26] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:17:26] [Rank 0] PRINT: [Validation @ Step 4000] Calculating base validation loss... +[2025-09-11 06:17:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:17:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Getting true update direction 'v'... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating update norms... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up layer parameter groups... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:17:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise update norms... +[2025-09-11 06:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Setting up HVP calculation in float32... +[2025-09-11 06:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:17:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating TOTAL sharpness... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Calculating layer-wise sharpness... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 12 layers for sharpness... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_1'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_2'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_3'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:17:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_4'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_5'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_6'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_7'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_8'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:17:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_9'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_10'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_11'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:17:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Processing 'layer_12'... +[2025-09-11 06:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:17:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:17:37] [Rank 0] PRINT: step:4000/10000 val_loss:4.6709 total_sharp:6.9340e-05 L1_sharp:1.6513e-04 L2_sharp:1.6617e-05 L3_sharp:9.3488e-06 L4_sharp:1.3404e-06 L5_sharp:2.6123e-05 L6_sharp:1.7679e-07 L7_sharp:8.2155e-06 L8_sharp:3.9214e-05 L9_sharp:4.4478e-05 L10_sharp:4.2768e-05 L11_sharp:7.0165e-05 L12_sharp:5.8192e-04 total_fnorm:2.1600e+02 total_l1_linf:6.5536e+05 total_spectral:1.0750e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.2125e+01 L3_fnorm:2.3000e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4500e+01 L8_fnorm:2.4000e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.5312e+00 L2_l1linf:4.5938e+00 L3_l1linf:4.4688e+00 L4_l1linf:5.2188e+00 L5_l1linf:5.3750e+00 L6_l1linf:5.2500e+00 L7_l1linf:5.7812e+00 L8_l1linf:6.0938e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3750e+00 L1_spectral:2.8746e-01 L2_spectral:2.8615e-01 L3_spectral:2.8763e-01 L4_spectral:2.9138e-01 L5_spectral:2.9818e-01 L6_spectral:2.9593e-01 L7_spectral:2.9319e-01 L8_spectral:2.9783e-01 L9_spectral:2.9360e-01 L10_spectral:2.9572e-01 L11_spectral:2.9412e-01 L12_spectral:2.9464e-01 train_time:217466ms step_avg:54.37ms +[2025-09-11 06:17:37] [Rank 0] PRINT: step:4000/10000 val_loss:4.6709 total_sharp:6.9340e-05 L1_sharp:1.6513e-04 L2_sharp:1.6617e-05 L3_sharp:9.3488e-06 L4_sharp:1.3404e-06 L5_sharp:2.6123e-05 L6_sharp:1.7679e-07 L7_sharp:8.2155e-06 L8_sharp:3.9214e-05 L9_sharp:4.4478e-05 L10_sharp:4.2768e-05 L11_sharp:7.0165e-05 L12_sharp:5.8192e-04 total_fnorm:2.1600e+02 total_l1_linf:6.5536e+05 total_spectral:1.0750e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.2125e+01 L3_fnorm:2.3000e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4500e+01 L8_fnorm:2.4000e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.5312e+00 L2_l1linf:4.5938e+00 L3_l1linf:4.4688e+00 L4_l1linf:5.2188e+00 L5_l1linf:5.3750e+00 L6_l1linf:5.2500e+00 L7_l1linf:5.7812e+00 L8_l1linf:6.0938e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3750e+00 L1_spectral:2.8746e-01 L2_spectral:2.8615e-01 L3_spectral:2.8763e-01 L4_spectral:2.9138e-01 L5_spectral:2.9818e-01 L6_spectral:2.9593e-01 L7_spectral:2.9319e-01 L8_spectral:2.9783e-01 L9_spectral:2.9360e-01 L10_spectral:2.9572e-01 L11_spectral:2.9412e-01 L12_spectral:2.9464e-01 train_time:217466ms step_avg:54.37ms +[2025-09-11 06:17:39] [Rank 0] step:4001/10000 train_time:219617ms step_avg:54.89ms +[2025-09-11 06:17:39] [Rank 0] step:4001/10000 train_time:219617ms step_avg:54.89ms +[2025-09-11 06:17:40] [Rank 0] step:4021/10000 train_time:220514ms step_avg:54.84ms +[2025-09-11 06:17:40] [Rank 0] step:4021/10000 train_time:220514ms step_avg:54.84ms +[2025-09-11 06:17:41] [Rank 0] step:4041/10000 train_time:221206ms step_avg:54.74ms +[2025-09-11 06:17:41] [Rank 0] step:4041/10000 train_time:221206ms step_avg:54.74ms +[2025-09-11 06:17:41] [Rank 0] step:4061/10000 train_time:221885ms step_avg:54.64ms +[2025-09-11 06:17:41] [Rank 0] step:4061/10000 train_time:221885ms step_avg:54.64ms +[2025-09-11 06:17:42] [Rank 0] step:4081/10000 train_time:222568ms step_avg:54.54ms +[2025-09-11 06:17:42] [Rank 0] step:4081/10000 train_time:222568ms step_avg:54.54ms +[2025-09-11 06:17:43] [Rank 0] step:4101/10000 train_time:223249ms step_avg:54.44ms +[2025-09-11 06:17:43] [Rank 0] step:4101/10000 train_time:223249ms step_avg:54.44ms +[2025-09-11 06:17:43] [Rank 0] step:4121/10000 train_time:223929ms step_avg:54.34ms +[2025-09-11 06:17:43] [Rank 0] step:4121/10000 train_time:223929ms step_avg:54.34ms +[2025-09-11 06:17:44] [Rank 0] step:4141/10000 train_time:224609ms step_avg:54.24ms +[2025-09-11 06:17:44] [Rank 0] step:4141/10000 train_time:224609ms step_avg:54.24ms +[2025-09-11 06:17:45] [Rank 0] step:4161/10000 train_time:225289ms step_avg:54.14ms +[2025-09-11 06:17:45] [Rank 0] step:4161/10000 train_time:225289ms step_avg:54.14ms +[2025-09-11 06:17:46] [Rank 0] step:4181/10000 train_time:225969ms step_avg:54.05ms +[2025-09-11 06:17:46] [Rank 0] step:4181/10000 train_time:225969ms step_avg:54.05ms +[2025-09-11 06:17:46] [Rank 0] step:4201/10000 train_time:226649ms step_avg:53.95ms +[2025-09-11 06:17:46] [Rank 0] step:4201/10000 train_time:226649ms step_avg:53.95ms +[2025-09-11 06:17:47] [Rank 0] step:4221/10000 train_time:227328ms step_avg:53.86ms +[2025-09-11 06:17:47] [Rank 0] step:4221/10000 train_time:227328ms step_avg:53.86ms +[2025-09-11 06:17:48] [Rank 0] step:4241/10000 train_time:228009ms step_avg:53.76ms +[2025-09-11 06:17:48] [Rank 0] step:4241/10000 train_time:228009ms step_avg:53.76ms +[2025-09-11 06:17:48] [Rank 0] step:4261/10000 train_time:228688ms step_avg:53.67ms +[2025-09-11 06:17:48] [Rank 0] step:4261/10000 train_time:228688ms step_avg:53.67ms +[2025-09-11 06:17:49] [Rank 0] step:4281/10000 train_time:229369ms step_avg:53.58ms +[2025-09-11 06:17:49] [Rank 0] step:4281/10000 train_time:229369ms step_avg:53.58ms +[2025-09-11 06:17:50] [Rank 0] step:4301/10000 train_time:230049ms step_avg:53.49ms +[2025-09-11 06:17:50] [Rank 0] step:4301/10000 train_time:230049ms step_avg:53.49ms +[2025-09-11 06:17:50] [Rank 0] step:4321/10000 train_time:230730ms step_avg:53.40ms +[2025-09-11 06:17:50] [Rank 0] step:4321/10000 train_time:230730ms step_avg:53.40ms +[2025-09-11 06:17:51] [Rank 0] step:4341/10000 train_time:231409ms step_avg:53.31ms +[2025-09-11 06:17:51] [Rank 0] step:4341/10000 train_time:231409ms step_avg:53.31ms +[2025-09-11 06:17:52] [Rank 0] step:4361/10000 train_time:232088ms step_avg:53.22ms +[2025-09-11 06:17:52] [Rank 0] step:4361/10000 train_time:232088ms step_avg:53.22ms +[2025-09-11 06:17:52] [Rank 0] step:4381/10000 train_time:232769ms step_avg:53.13ms +[2025-09-11 06:17:52] [Rank 0] step:4381/10000 train_time:232769ms step_avg:53.13ms +[2025-09-11 06:17:53] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:17:53] [Rank 0] PRINT: [Validation @ Step 4400] Calculating base validation loss... +[2025-09-11 06:17:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:17:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Getting true update direction 'v'... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating update norms... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up layer parameter groups... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:17:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise update norms... +[2025-09-11 06:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Setting up HVP calculation in float32... +[2025-09-11 06:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:18:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating TOTAL sharpness... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Calculating layer-wise sharpness... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 12 layers for sharpness... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_1'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_2'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_3'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:18:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_4'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_5'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_6'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_7'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_8'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:18:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_9'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_10'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_11'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:18:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Processing 'layer_12'... +[2025-09-11 06:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:04] [Rank 0] PRINT: step:4400/10000 val_loss:4.6666 total_sharp:8.5637e-05 L1_sharp:9.3351e-05 L2_sharp:1.1597e-05 L3_sharp:-1.3355e-08 L4_sharp:2.8021e-05 L5_sharp:3.5243e-06 L6_sharp:-8.1856e-07 L7_sharp:1.2826e-05 L8_sharp:3.5507e-05 L9_sharp:3.9030e-05 L10_sharp:4.8044e-05 L11_sharp:5.9997e-05 L12_sharp:8.6312e-04 total_fnorm:2.0400e+02 total_l1_linf:6.1850e+05 total_spectral:1.0200e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.2375e+01 L3_fnorm:2.3000e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4500e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4688e+00 L2_l1linf:4.3125e+00 L3_l1linf:4.3438e+00 L4_l1linf:5.1875e+00 L5_l1linf:5.0000e+00 L6_l1linf:5.1562e+00 L7_l1linf:5.8750e+00 L8_l1linf:6.0312e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7188e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8739e-01 L2_spectral:2.8699e-01 L3_spectral:2.9093e-01 L4_spectral:2.9198e-01 L5_spectral:2.9688e-01 L6_spectral:2.9620e-01 L7_spectral:2.9430e-01 L8_spectral:3.0080e-01 L9_spectral:2.9575e-01 L10_spectral:2.9802e-01 L11_spectral:2.9648e-01 L12_spectral:2.9439e-01 train_time:233430ms step_avg:53.05ms +[2025-09-11 06:18:04] [Rank 0] PRINT: step:4400/10000 val_loss:4.6666 total_sharp:8.5637e-05 L1_sharp:9.3351e-05 L2_sharp:1.1597e-05 L3_sharp:-1.3355e-08 L4_sharp:2.8021e-05 L5_sharp:3.5243e-06 L6_sharp:-8.1856e-07 L7_sharp:1.2826e-05 L8_sharp:3.5507e-05 L9_sharp:3.9030e-05 L10_sharp:4.8044e-05 L11_sharp:5.9997e-05 L12_sharp:8.6312e-04 total_fnorm:2.0400e+02 total_l1_linf:6.1850e+05 total_spectral:1.0200e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.2375e+01 L3_fnorm:2.3000e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4500e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4375e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4688e+00 L2_l1linf:4.3125e+00 L3_l1linf:4.3438e+00 L4_l1linf:5.1875e+00 L5_l1linf:5.0000e+00 L6_l1linf:5.1562e+00 L7_l1linf:5.8750e+00 L8_l1linf:6.0312e+00 L9_l1linf:5.9062e+00 L10_l1linf:5.7188e+00 L11_l1linf:5.5000e+00 L12_l1linf:5.2812e+00 L1_spectral:2.8739e-01 L2_spectral:2.8699e-01 L3_spectral:2.9093e-01 L4_spectral:2.9198e-01 L5_spectral:2.9688e-01 L6_spectral:2.9620e-01 L7_spectral:2.9430e-01 L8_spectral:3.0080e-01 L9_spectral:2.9575e-01 L10_spectral:2.9802e-01 L11_spectral:2.9648e-01 L12_spectral:2.9439e-01 train_time:233430ms step_avg:53.05ms +[2025-09-11 06:18:06] [Rank 0] step:4401/10000 train_time:235126ms step_avg:53.43ms +[2025-09-11 06:18:06] [Rank 0] step:4401/10000 train_time:235126ms step_avg:53.43ms +[2025-09-11 06:18:07] [Rank 0] step:4421/10000 train_time:235839ms step_avg:53.35ms +[2025-09-11 06:18:07] [Rank 0] step:4421/10000 train_time:235839ms step_avg:53.35ms +[2025-09-11 06:18:07] [Rank 0] step:4441/10000 train_time:236521ms step_avg:53.26ms +[2025-09-11 06:18:07] [Rank 0] step:4441/10000 train_time:236521ms step_avg:53.26ms +[2025-09-11 06:18:08] [Rank 0] step:4461/10000 train_time:237204ms step_avg:53.17ms +[2025-09-11 06:18:08] [Rank 0] step:4461/10000 train_time:237204ms step_avg:53.17ms +[2025-09-11 06:18:09] [Rank 0] step:4481/10000 train_time:237887ms step_avg:53.09ms +[2025-09-11 06:18:09] [Rank 0] step:4481/10000 train_time:237887ms step_avg:53.09ms +[2025-09-11 06:18:09] [Rank 0] step:4501/10000 train_time:238570ms step_avg:53.00ms +[2025-09-11 06:18:09] [Rank 0] step:4501/10000 train_time:238570ms step_avg:53.00ms +[2025-09-11 06:18:10] [Rank 0] step:4521/10000 train_time:239252ms step_avg:52.92ms +[2025-09-11 06:18:10] [Rank 0] step:4521/10000 train_time:239252ms step_avg:52.92ms +[2025-09-11 06:18:11] [Rank 0] step:4541/10000 train_time:239936ms step_avg:52.84ms +[2025-09-11 06:18:11] [Rank 0] step:4541/10000 train_time:239936ms step_avg:52.84ms +[2025-09-11 06:18:11] [Rank 0] step:4561/10000 train_time:240618ms step_avg:52.76ms +[2025-09-11 06:18:11] [Rank 0] step:4561/10000 train_time:240618ms step_avg:52.76ms +[2025-09-11 06:18:12] [Rank 0] step:4581/10000 train_time:241300ms step_avg:52.67ms +[2025-09-11 06:18:12] [Rank 0] step:4581/10000 train_time:241300ms step_avg:52.67ms +[2025-09-11 06:18:13] [Rank 0] step:4601/10000 train_time:241983ms step_avg:52.59ms +[2025-09-11 06:18:13] [Rank 0] step:4601/10000 train_time:241983ms step_avg:52.59ms +[2025-09-11 06:18:13] [Rank 0] step:4621/10000 train_time:242666ms step_avg:52.51ms +[2025-09-11 06:18:13] [Rank 0] step:4621/10000 train_time:242666ms step_avg:52.51ms +[2025-09-11 06:18:14] [Rank 0] step:4641/10000 train_time:243349ms step_avg:52.43ms +[2025-09-11 06:18:14] [Rank 0] step:4641/10000 train_time:243349ms step_avg:52.43ms +[2025-09-11 06:18:15] [Rank 0] step:4661/10000 train_time:244033ms step_avg:52.36ms +[2025-09-11 06:18:15] [Rank 0] step:4661/10000 train_time:244033ms step_avg:52.36ms +[2025-09-11 06:18:15] [Rank 0] step:4681/10000 train_time:244716ms step_avg:52.28ms +[2025-09-11 06:18:15] [Rank 0] step:4681/10000 train_time:244716ms step_avg:52.28ms +[2025-09-11 06:18:16] [Rank 0] step:4701/10000 train_time:245399ms step_avg:52.20ms +[2025-09-11 06:18:16] [Rank 0] step:4701/10000 train_time:245399ms step_avg:52.20ms +[2025-09-11 06:18:17] [Rank 0] step:4721/10000 train_time:246082ms step_avg:52.12ms +[2025-09-11 06:18:17] [Rank 0] step:4721/10000 train_time:246082ms step_avg:52.12ms +[2025-09-11 06:18:17] [Rank 0] step:4741/10000 train_time:246764ms step_avg:52.05ms +[2025-09-11 06:18:17] [Rank 0] step:4741/10000 train_time:246764ms step_avg:52.05ms +[2025-09-11 06:18:18] [Rank 0] step:4761/10000 train_time:247448ms step_avg:51.97ms +[2025-09-11 06:18:18] [Rank 0] step:4761/10000 train_time:247448ms step_avg:51.97ms +[2025-09-11 06:18:19] [Rank 0] step:4781/10000 train_time:248130ms step_avg:51.90ms +[2025-09-11 06:18:19] [Rank 0] step:4781/10000 train_time:248130ms step_avg:51.90ms +[2025-09-11 06:18:19] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:18:19] [Rank 0] PRINT: [Validation @ Step 4800] Calculating base validation loss... +[2025-09-11 06:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:18:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Getting true update direction 'v'... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating update norms... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up layer parameter groups... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:18:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise update norms... +[2025-09-11 06:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:18:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Setting up HVP calculation in float32... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating TOTAL sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Calculating layer-wise sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 12 layers for sharpness... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_1'... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:18:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_2'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_3'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_4'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_5'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_6'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:18:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_7'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_8'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_9'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_10'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_11'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:18:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Processing 'layer_12'... +[2025-09-11 06:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 4800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.5999 total_sharp:5.8264e-05 L1_sharp:1.7976e-04 L2_sharp:1.8598e-05 L3_sharp:1.9435e-05 L4_sharp:1.5234e-05 L5_sharp:6.7342e-06 L6_sharp:4.2138e-07 L7_sharp:1.2717e-05 L8_sharp:3.3593e-05 L9_sharp:3.2523e-05 L10_sharp:4.5618e-05 L11_sharp:7.9434e-05 L12_sharp:4.1637e-04 total_fnorm:2.1300e+02 total_l1_linf:6.5536e+05 total_spectral:1.0600e+02 L1_fnorm:2.3125e+01 L2_fnorm:2.2375e+01 L3_fnorm:2.2750e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3750e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4500e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4375e+00 L2_l1linf:4.4062e+00 L3_l1linf:4.4375e+00 L4_l1linf:5.0938e+00 L5_l1linf:5.0625e+00 L6_l1linf:4.9375e+00 L7_l1linf:5.8438e+00 L8_l1linf:6.2812e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.8778e-01 L2_spectral:2.8756e-01 L3_spectral:2.9069e-01 L4_spectral:2.9518e-01 L5_spectral:3.0144e-01 L6_spectral:2.9715e-01 L7_spectral:2.9431e-01 L8_spectral:3.0187e-01 L9_spectral:2.9905e-01 L10_spectral:2.9863e-01 L11_spectral:2.9624e-01 L12_spectral:2.9812e-01 train_time:248793ms step_avg:51.83ms +[2025-09-11 06:18:30] [Rank 0] PRINT: step:4800/10000 val_loss:4.5999 total_sharp:5.8264e-05 L1_sharp:1.7976e-04 L2_sharp:1.8598e-05 L3_sharp:1.9435e-05 L4_sharp:1.5234e-05 L5_sharp:6.7342e-06 L6_sharp:4.2138e-07 L7_sharp:1.2717e-05 L8_sharp:3.3593e-05 L9_sharp:3.2523e-05 L10_sharp:4.5618e-05 L11_sharp:7.9434e-05 L12_sharp:4.1637e-04 total_fnorm:2.1300e+02 total_l1_linf:6.5536e+05 total_spectral:1.0600e+02 L1_fnorm:2.3125e+01 L2_fnorm:2.2375e+01 L3_fnorm:2.2750e+01 L4_fnorm:2.4000e+01 L5_fnorm:2.3750e+01 L6_fnorm:2.4125e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4125e+01 L9_fnorm:2.4500e+01 L10_fnorm:2.4375e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4375e+01 L1_l1linf:6.4375e+00 L2_l1linf:4.4062e+00 L3_l1linf:4.4375e+00 L4_l1linf:5.0938e+00 L5_l1linf:5.0625e+00 L6_l1linf:4.9375e+00 L7_l1linf:5.8438e+00 L8_l1linf:6.2812e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.8778e-01 L2_spectral:2.8756e-01 L3_spectral:2.9069e-01 L4_spectral:2.9518e-01 L5_spectral:3.0144e-01 L6_spectral:2.9715e-01 L7_spectral:2.9431e-01 L8_spectral:3.0187e-01 L9_spectral:2.9905e-01 L10_spectral:2.9863e-01 L11_spectral:2.9624e-01 L12_spectral:2.9812e-01 train_time:248793ms step_avg:51.83ms +[2025-09-11 06:18:32] [Rank 0] step:4801/10000 train_time:250507ms step_avg:52.18ms +[2025-09-11 06:18:32] [Rank 0] step:4801/10000 train_time:250507ms step_avg:52.18ms +[2025-09-11 06:18:33] [Rank 0] step:4821/10000 train_time:251205ms step_avg:52.11ms +[2025-09-11 06:18:33] [Rank 0] step:4821/10000 train_time:251205ms step_avg:52.11ms +[2025-09-11 06:18:34] [Rank 0] step:4841/10000 train_time:251888ms step_avg:52.03ms +[2025-09-11 06:18:34] [Rank 0] step:4841/10000 train_time:251888ms step_avg:52.03ms +[2025-09-11 06:18:34] [Rank 0] step:4861/10000 train_time:252568ms step_avg:51.96ms +[2025-09-11 06:18:34] [Rank 0] step:4861/10000 train_time:252568ms step_avg:51.96ms +[2025-09-11 06:18:35] [Rank 0] step:4881/10000 train_time:253252ms step_avg:51.89ms +[2025-09-11 06:18:35] [Rank 0] step:4881/10000 train_time:253252ms step_avg:51.89ms +[2025-09-11 06:18:36] [Rank 0] step:4901/10000 train_time:253934ms step_avg:51.81ms +[2025-09-11 06:18:36] [Rank 0] step:4901/10000 train_time:253934ms step_avg:51.81ms +[2025-09-11 06:18:36] [Rank 0] step:4921/10000 train_time:254617ms step_avg:51.74ms +[2025-09-11 06:18:36] [Rank 0] step:4921/10000 train_time:254617ms step_avg:51.74ms +[2025-09-11 06:18:37] [Rank 0] step:4941/10000 train_time:255299ms step_avg:51.67ms +[2025-09-11 06:18:37] [Rank 0] step:4941/10000 train_time:255299ms step_avg:51.67ms +[2025-09-11 06:18:38] [Rank 0] step:4961/10000 train_time:255982ms step_avg:51.60ms +[2025-09-11 06:18:38] [Rank 0] step:4961/10000 train_time:255982ms step_avg:51.60ms +[2025-09-11 06:18:38] [Rank 0] step:4981/10000 train_time:256664ms step_avg:51.53ms +[2025-09-11 06:18:38] [Rank 0] step:4981/10000 train_time:256664ms step_avg:51.53ms +[2025-09-11 06:18:39] [Rank 0] step:5001/10000 train_time:257346ms step_avg:51.46ms +[2025-09-11 06:18:39] [Rank 0] step:5001/10000 train_time:257346ms step_avg:51.46ms +[2025-09-11 06:18:40] [Rank 0] step:5021/10000 train_time:258028ms step_avg:51.39ms +[2025-09-11 06:18:40] [Rank 0] step:5021/10000 train_time:258028ms step_avg:51.39ms +[2025-09-11 06:18:40] [Rank 0] step:5041/10000 train_time:258708ms step_avg:51.32ms +[2025-09-11 06:18:40] [Rank 0] step:5041/10000 train_time:258708ms step_avg:51.32ms +[2025-09-11 06:18:41] [Rank 0] step:5061/10000 train_time:259389ms step_avg:51.25ms +[2025-09-11 06:18:41] [Rank 0] step:5061/10000 train_time:259389ms step_avg:51.25ms +[2025-09-11 06:18:42] [Rank 0] step:5081/10000 train_time:260070ms step_avg:51.18ms +[2025-09-11 06:18:42] [Rank 0] step:5081/10000 train_time:260070ms step_avg:51.18ms +[2025-09-11 06:18:42] [Rank 0] step:5101/10000 train_time:260752ms step_avg:51.12ms +[2025-09-11 06:18:42] [Rank 0] step:5101/10000 train_time:260752ms step_avg:51.12ms +[2025-09-11 06:18:43] [Rank 0] step:5121/10000 train_time:261433ms step_avg:51.05ms +[2025-09-11 06:18:43] [Rank 0] step:5121/10000 train_time:261433ms step_avg:51.05ms +[2025-09-11 06:18:44] [Rank 0] step:5141/10000 train_time:262116ms step_avg:50.99ms +[2025-09-11 06:18:44] [Rank 0] step:5141/10000 train_time:262116ms step_avg:50.99ms +[2025-09-11 06:18:44] [Rank 0] step:5161/10000 train_time:262797ms step_avg:50.92ms +[2025-09-11 06:18:44] [Rank 0] step:5161/10000 train_time:262797ms step_avg:50.92ms +[2025-09-11 06:18:45] [Rank 0] step:5181/10000 train_time:263478ms step_avg:50.85ms +[2025-09-11 06:18:45] [Rank 0] step:5181/10000 train_time:263478ms step_avg:50.85ms +[2025-09-11 06:18:46] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:18:46] [Rank 0] PRINT: [Validation @ Step 5200] Calculating base validation loss... +[2025-09-11 06:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:18:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Getting true update direction 'v'... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating update norms... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up layer parameter groups... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:18:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise update norms... +[2025-09-11 06:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Setting up HVP calculation in float32... +[2025-09-11 06:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:18:53] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating TOTAL sharpness... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Calculating layer-wise sharpness... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 12 layers for sharpness... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_1'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_2'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_3'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_4'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:18:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_5'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_6'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_7'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_8'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_9'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:18:55] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_10'... +[2025-09-11 06:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_11'... +[2025-09-11 06:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:18:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Processing 'layer_12'... +[2025-09-11 06:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:57] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:18:57] [Rank 0] PRINT: step:5200/10000 val_loss:4.5811 total_sharp:6.5825e-05 L1_sharp:1.9098e-04 L2_sharp:-1.4108e-05 L3_sharp:2.3189e-05 L4_sharp:1.4742e-05 L5_sharp:-7.4076e-07 L6_sharp:3.4158e-06 L7_sharp:1.5395e-05 L8_sharp:2.7549e-05 L9_sharp:4.0408e-05 L10_sharp:4.3749e-05 L11_sharp:5.9524e-05 L12_sharp:3.8067e-04 total_fnorm:1.9900e+02 total_l1_linf:5.9802e+05 total_spectral:9.9000e+01 L1_fnorm:2.3000e+01 L2_fnorm:2.1875e+01 L3_fnorm:2.2625e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.3625e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4375e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4500e+01 L11_fnorm:2.4500e+01 L12_fnorm:2.4500e+01 L1_l1linf:6.5000e+00 L2_l1linf:4.5000e+00 L3_l1linf:4.4062e+00 L4_l1linf:4.8438e+00 L5_l1linf:4.8125e+00 L6_l1linf:4.5625e+00 L7_l1linf:5.7812e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3125e+00 L1_spectral:2.8829e-01 L2_spectral:2.8891e-01 L3_spectral:2.9162e-01 L4_spectral:2.9328e-01 L5_spectral:3.0169e-01 L6_spectral:2.9693e-01 L7_spectral:2.9637e-01 L8_spectral:3.0394e-01 L9_spectral:2.9762e-01 L10_spectral:3.0324e-01 L11_spectral:3.0024e-01 L12_spectral:3.0140e-01 train_time:264147ms step_avg:50.80ms +[2025-09-11 06:18:57] [Rank 0] PRINT: step:5200/10000 val_loss:4.5811 total_sharp:6.5825e-05 L1_sharp:1.9098e-04 L2_sharp:-1.4108e-05 L3_sharp:2.3189e-05 L4_sharp:1.4742e-05 L5_sharp:-7.4076e-07 L6_sharp:3.4158e-06 L7_sharp:1.5395e-05 L8_sharp:2.7549e-05 L9_sharp:4.0408e-05 L10_sharp:4.3749e-05 L11_sharp:5.9524e-05 L12_sharp:3.8067e-04 total_fnorm:1.9900e+02 total_l1_linf:5.9802e+05 total_spectral:9.9000e+01 L1_fnorm:2.3000e+01 L2_fnorm:2.1875e+01 L3_fnorm:2.2625e+01 L4_fnorm:2.3875e+01 L5_fnorm:2.3625e+01 L6_fnorm:2.3625e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4375e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4500e+01 L11_fnorm:2.4500e+01 L12_fnorm:2.4500e+01 L1_l1linf:6.5000e+00 L2_l1linf:4.5000e+00 L3_l1linf:4.4062e+00 L4_l1linf:4.8438e+00 L5_l1linf:4.8125e+00 L6_l1linf:4.5625e+00 L7_l1linf:5.7812e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.7812e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3125e+00 L1_spectral:2.8829e-01 L2_spectral:2.8891e-01 L3_spectral:2.9162e-01 L4_spectral:2.9328e-01 L5_spectral:3.0169e-01 L6_spectral:2.9693e-01 L7_spectral:2.9637e-01 L8_spectral:3.0394e-01 L9_spectral:2.9762e-01 L10_spectral:3.0324e-01 L11_spectral:3.0024e-01 L12_spectral:3.0140e-01 train_time:264147ms step_avg:50.80ms +[2025-09-11 06:18:59] [Rank 0] step:5201/10000 train_time:265850ms step_avg:51.12ms +[2025-09-11 06:18:59] [Rank 0] step:5201/10000 train_time:265850ms step_avg:51.12ms +[2025-09-11 06:18:59] [Rank 0] step:5221/10000 train_time:266576ms step_avg:51.06ms +[2025-09-11 06:18:59] [Rank 0] step:5221/10000 train_time:266576ms step_avg:51.06ms +[2025-09-11 06:19:00] [Rank 0] step:5241/10000 train_time:267268ms step_avg:51.00ms +[2025-09-11 06:19:00] [Rank 0] step:5241/10000 train_time:267268ms step_avg:51.00ms +[2025-09-11 06:19:01] [Rank 0] step:5261/10000 train_time:267963ms step_avg:50.93ms +[2025-09-11 06:19:01] [Rank 0] step:5261/10000 train_time:267963ms step_avg:50.93ms +[2025-09-11 06:19:01] [Rank 0] step:5281/10000 train_time:268656ms step_avg:50.87ms +[2025-09-11 06:19:01] [Rank 0] step:5281/10000 train_time:268656ms step_avg:50.87ms +[2025-09-11 06:19:02] [Rank 0] step:5301/10000 train_time:269349ms step_avg:50.81ms +[2025-09-11 06:19:02] [Rank 0] step:5301/10000 train_time:269349ms step_avg:50.81ms +[2025-09-11 06:19:03] [Rank 0] step:5321/10000 train_time:270041ms step_avg:50.75ms +[2025-09-11 06:19:03] [Rank 0] step:5321/10000 train_time:270041ms step_avg:50.75ms +[2025-09-11 06:19:03] [Rank 0] step:5341/10000 train_time:270733ms step_avg:50.69ms +[2025-09-11 06:19:03] [Rank 0] step:5341/10000 train_time:270733ms step_avg:50.69ms +[2025-09-11 06:19:04] [Rank 0] step:5361/10000 train_time:271426ms step_avg:50.63ms +[2025-09-11 06:19:04] [Rank 0] step:5361/10000 train_time:271426ms step_avg:50.63ms +[2025-09-11 06:19:05] [Rank 0] step:5381/10000 train_time:272119ms step_avg:50.57ms +[2025-09-11 06:19:05] [Rank 0] step:5381/10000 train_time:272119ms step_avg:50.57ms +[2025-09-11 06:19:06] [Rank 0] step:5401/10000 train_time:272811ms step_avg:50.51ms +[2025-09-11 06:19:06] [Rank 0] step:5401/10000 train_time:272811ms step_avg:50.51ms +[2025-09-11 06:19:06] [Rank 0] step:5421/10000 train_time:273504ms step_avg:50.45ms +[2025-09-11 06:19:06] [Rank 0] step:5421/10000 train_time:273504ms step_avg:50.45ms +[2025-09-11 06:19:07] [Rank 0] step:5441/10000 train_time:274197ms step_avg:50.39ms +[2025-09-11 06:19:07] [Rank 0] step:5441/10000 train_time:274197ms step_avg:50.39ms +[2025-09-11 06:19:08] [Rank 0] step:5461/10000 train_time:274890ms step_avg:50.34ms +[2025-09-11 06:19:08] [Rank 0] step:5461/10000 train_time:274890ms step_avg:50.34ms +[2025-09-11 06:19:08] [Rank 0] step:5481/10000 train_time:275582ms step_avg:50.28ms +[2025-09-11 06:19:08] [Rank 0] step:5481/10000 train_time:275582ms step_avg:50.28ms +[2025-09-11 06:19:09] [Rank 0] step:5501/10000 train_time:276274ms step_avg:50.22ms +[2025-09-11 06:19:09] [Rank 0] step:5501/10000 train_time:276274ms step_avg:50.22ms +[2025-09-11 06:19:10] [Rank 0] step:5521/10000 train_time:276972ms step_avg:50.17ms +[2025-09-11 06:19:10] [Rank 0] step:5521/10000 train_time:276972ms step_avg:50.17ms +[2025-09-11 06:19:10] [Rank 0] step:5541/10000 train_time:277666ms step_avg:50.11ms +[2025-09-11 06:19:10] [Rank 0] step:5541/10000 train_time:277666ms step_avg:50.11ms +[2025-09-11 06:19:11] [Rank 0] step:5561/10000 train_time:278359ms step_avg:50.06ms +[2025-09-11 06:19:11] [Rank 0] step:5561/10000 train_time:278359ms step_avg:50.06ms +[2025-09-11 06:19:12] [Rank 0] step:5581/10000 train_time:279052ms step_avg:50.00ms +[2025-09-11 06:19:12] [Rank 0] step:5581/10000 train_time:279052ms step_avg:50.00ms +[2025-09-11 06:19:12] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:19:12] [Rank 0] PRINT: [Validation @ Step 5600] Calculating base validation loss... +[2025-09-11 06:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:19:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Getting true update direction 'v'... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating update norms... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up layer parameter groups... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:19:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise update norms... +[2025-09-11 06:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:19:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Setting up HVP calculation in float32... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating TOTAL sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Calculating layer-wise sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 12 layers for sharpness... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:19:20] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_1'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_2'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_3'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_4'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_5'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:19:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_6'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_7'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_8'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_9'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_10'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:19:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_11'... +[2025-09-11 06:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:19:23] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Processing 'layer_12'... +[2025-09-11 06:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:19:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 5600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:19:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.5676 total_sharp:8.9410e-05 L1_sharp:1.5806e-04 L2_sharp:2.4112e-06 L3_sharp:3.9242e-06 L4_sharp:1.4358e-05 L5_sharp:1.3746e-05 L6_sharp:-4.7075e-07 L7_sharp:1.6688e-05 L8_sharp:3.8551e-05 L9_sharp:3.4962e-05 L10_sharp:4.2422e-05 L11_sharp:5.7945e-05 L12_sharp:2.4818e-03 total_fnorm:2.0700e+02 total_l1_linf:6.2669e+05 total_spectral:1.0350e+02 L1_fnorm:2.2875e+01 L2_fnorm:2.1625e+01 L3_fnorm:2.2500e+01 L4_fnorm:2.3625e+01 L5_fnorm:2.3125e+01 L6_fnorm:2.3625e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4375e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4500e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4500e+01 L1_l1linf:6.4062e+00 L2_l1linf:4.4688e+00 L3_l1linf:4.4688e+00 L4_l1linf:4.5625e+00 L5_l1linf:4.4062e+00 L6_l1linf:4.2500e+00 L7_l1linf:5.7500e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.8916e-01 L2_spectral:2.8836e-01 L3_spectral:2.9227e-01 L4_spectral:2.9455e-01 L5_spectral:2.9991e-01 L6_spectral:2.9896e-01 L7_spectral:2.9704e-01 L8_spectral:3.0277e-01 L9_spectral:2.9908e-01 L10_spectral:3.0281e-01 L11_spectral:3.0008e-01 L12_spectral:3.0111e-01 train_time:279724ms step_avg:49.95ms +[2025-09-11 06:19:24] [Rank 0] PRINT: step:5600/10000 val_loss:4.5676 total_sharp:8.9410e-05 L1_sharp:1.5806e-04 L2_sharp:2.4112e-06 L3_sharp:3.9242e-06 L4_sharp:1.4358e-05 L5_sharp:1.3746e-05 L6_sharp:-4.7075e-07 L7_sharp:1.6688e-05 L8_sharp:3.8551e-05 L9_sharp:3.4962e-05 L10_sharp:4.2422e-05 L11_sharp:5.7945e-05 L12_sharp:2.4818e-03 total_fnorm:2.0700e+02 total_l1_linf:6.2669e+05 total_spectral:1.0350e+02 L1_fnorm:2.2875e+01 L2_fnorm:2.1625e+01 L3_fnorm:2.2500e+01 L4_fnorm:2.3625e+01 L5_fnorm:2.3125e+01 L6_fnorm:2.3625e+01 L7_fnorm:2.4625e+01 L8_fnorm:2.4375e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4500e+01 L11_fnorm:2.4375e+01 L12_fnorm:2.4500e+01 L1_l1linf:6.4062e+00 L2_l1linf:4.4688e+00 L3_l1linf:4.4688e+00 L4_l1linf:4.5625e+00 L5_l1linf:4.4062e+00 L6_l1linf:4.2500e+00 L7_l1linf:5.7500e+00 L8_l1linf:6.1875e+00 L9_l1linf:6.0625e+00 L10_l1linf:5.7500e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.8916e-01 L2_spectral:2.8836e-01 L3_spectral:2.9227e-01 L4_spectral:2.9455e-01 L5_spectral:2.9991e-01 L6_spectral:2.9896e-01 L7_spectral:2.9704e-01 L8_spectral:3.0277e-01 L9_spectral:2.9908e-01 L10_spectral:3.0281e-01 L11_spectral:3.0008e-01 L12_spectral:3.0111e-01 train_time:279724ms step_avg:49.95ms +[2025-09-11 06:19:25] [Rank 0] step:5601/10000 train_time:281430ms step_avg:50.25ms +[2025-09-11 06:19:25] [Rank 0] step:5601/10000 train_time:281430ms step_avg:50.25ms +[2025-09-11 06:19:26] [Rank 0] step:5621/10000 train_time:282162ms step_avg:50.20ms +[2025-09-11 06:19:26] [Rank 0] step:5621/10000 train_time:282162ms step_avg:50.20ms +[2025-09-11 06:19:27] [Rank 0] step:5641/10000 train_time:282854ms step_avg:50.14ms +[2025-09-11 06:19:27] [Rank 0] step:5641/10000 train_time:282854ms step_avg:50.14ms +[2025-09-11 06:19:27] [Rank 0] step:5661/10000 train_time:283546ms step_avg:50.09ms +[2025-09-11 06:19:27] [Rank 0] step:5661/10000 train_time:283546ms step_avg:50.09ms +[2025-09-11 06:19:28] [Rank 0] step:5681/10000 train_time:284240ms step_avg:50.03ms +[2025-09-11 06:19:28] [Rank 0] step:5681/10000 train_time:284240ms step_avg:50.03ms +[2025-09-11 06:19:29] [Rank 0] step:5701/10000 train_time:284933ms step_avg:49.98ms +[2025-09-11 06:19:29] [Rank 0] step:5701/10000 train_time:284933ms step_avg:49.98ms +[2025-09-11 06:19:29] [Rank 0] step:5721/10000 train_time:285625ms step_avg:49.93ms +[2025-09-11 06:19:29] [Rank 0] step:5721/10000 train_time:285625ms step_avg:49.93ms +[2025-09-11 06:19:30] [Rank 0] step:5741/10000 train_time:286319ms step_avg:49.87ms +[2025-09-11 06:19:30] [Rank 0] step:5741/10000 train_time:286319ms step_avg:49.87ms +[2025-09-11 06:19:31] [Rank 0] step:5761/10000 train_time:287411ms step_avg:49.89ms +[2025-09-11 06:19:31] [Rank 0] step:5761/10000 train_time:287411ms step_avg:49.89ms +[2025-09-11 06:19:32] [Rank 0] step:5781/10000 train_time:288224ms step_avg:49.86ms +[2025-09-11 06:19:32] [Rank 0] step:5781/10000 train_time:288224ms step_avg:49.86ms +[2025-09-11 06:19:33] [Rank 0] step:5801/10000 train_time:288917ms step_avg:49.80ms +[2025-09-11 06:19:33] [Rank 0] step:5801/10000 train_time:288917ms step_avg:49.80ms +[2025-09-11 06:19:34] [Rank 0] step:5821/10000 train_time:289853ms step_avg:49.79ms +[2025-09-11 06:19:34] [Rank 0] step:5821/10000 train_time:289853ms step_avg:49.79ms +[2025-09-11 06:19:34] [Rank 0] step:5841/10000 train_time:290546ms step_avg:49.74ms +[2025-09-11 06:19:34] [Rank 0] step:5841/10000 train_time:290546ms step_avg:49.74ms +[2025-09-11 06:19:35] [Rank 0] step:5861/10000 train_time:291237ms step_avg:49.69ms +[2025-09-11 06:19:35] [Rank 0] step:5861/10000 train_time:291237ms step_avg:49.69ms +[2025-09-11 06:19:36] [Rank 0] step:5881/10000 train_time:291928ms step_avg:49.64ms +[2025-09-11 06:19:36] [Rank 0] step:5881/10000 train_time:291928ms step_avg:49.64ms +[2025-09-11 06:19:36] [Rank 0] step:5901/10000 train_time:292621ms step_avg:49.59ms +[2025-09-11 06:19:36] [Rank 0] step:5901/10000 train_time:292621ms step_avg:49.59ms +[2025-09-11 06:19:37] [Rank 0] step:5921/10000 train_time:293316ms step_avg:49.54ms +[2025-09-11 06:19:37] [Rank 0] step:5921/10000 train_time:293316ms step_avg:49.54ms +[2025-09-11 06:19:38] [Rank 0] step:5941/10000 train_time:294009ms step_avg:49.49ms +[2025-09-11 06:19:38] [Rank 0] step:5941/10000 train_time:294009ms step_avg:49.49ms +[2025-09-11 06:19:39] [Rank 0] step:5961/10000 train_time:294703ms step_avg:49.44ms +[2025-09-11 06:19:39] [Rank 0] step:5961/10000 train_time:294703ms step_avg:49.44ms +[2025-09-11 06:19:39] [Rank 0] step:5981/10000 train_time:295396ms step_avg:49.39ms +[2025-09-11 06:19:39] [Rank 0] step:5981/10000 train_time:295396ms step_avg:49.39ms +[2025-09-11 06:19:40] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:19:40] [Rank 0] PRINT: [Validation @ Step 6000] Calculating base validation loss... +[2025-09-11 06:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:19:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Getting true update direction 'v'... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating update norms... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up layer parameter groups... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:19:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise update norms... +[2025-09-11 06:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Setting up HVP calculation in float32... +[2025-09-11 06:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:19:47] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating TOTAL sharpness... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Calculating layer-wise sharpness... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 12 layers for sharpness... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_1'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_2'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_3'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_4'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:19:48] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_5'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_6'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_7'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_8'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_9'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:19:49] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_10'... +[2025-09-11 06:19:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:19:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_11'... +[2025-09-11 06:19:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:19:50] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Processing 'layer_12'... +[2025-09-11 06:19:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:19:51] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:19:51] [Rank 0] PRINT: step:6000/10000 val_loss:4.5246 total_sharp:5.6321e-05 L1_sharp:1.9033e-04 L2_sharp:-1.0175e-05 L3_sharp:1.9863e-05 L4_sharp:2.1143e-05 L5_sharp:7.3645e-06 L6_sharp:-5.6841e-06 L7_sharp:7.6793e-06 L8_sharp:3.9400e-05 L9_sharp:4.5902e-05 L10_sharp:5.0555e-05 L11_sharp:5.4303e-05 L12_sharp:2.9255e-04 total_fnorm:2.0100e+02 total_l1_linf:6.0621e+05 total_spectral:1.0050e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.1875e+01 L3_fnorm:2.2500e+01 L4_fnorm:2.3500e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.3875e+01 L7_fnorm:2.4750e+01 L8_fnorm:2.4500e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4625e+01 L11_fnorm:2.4500e+01 L12_fnorm:2.4625e+01 L1_l1linf:6.3750e+00 L2_l1linf:4.5625e+00 L3_l1linf:4.5000e+00 L4_l1linf:4.4688e+00 L5_l1linf:4.4375e+00 L6_l1linf:4.5938e+00 L7_l1linf:5.8125e+00 L8_l1linf:6.2500e+00 L9_l1linf:6.1250e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.9058e-01 L2_spectral:2.9012e-01 L3_spectral:2.9744e-01 L4_spectral:2.9775e-01 L5_spectral:3.0054e-01 L6_spectral:3.0013e-01 L7_spectral:2.9889e-01 L8_spectral:3.0266e-01 L9_spectral:3.0432e-01 L10_spectral:3.0329e-01 L11_spectral:3.0174e-01 L12_spectral:3.0395e-01 train_time:296072ms step_avg:49.35ms +[2025-09-11 06:19:51] [Rank 0] PRINT: step:6000/10000 val_loss:4.5246 total_sharp:5.6321e-05 L1_sharp:1.9033e-04 L2_sharp:-1.0175e-05 L3_sharp:1.9863e-05 L4_sharp:2.1143e-05 L5_sharp:7.3645e-06 L6_sharp:-5.6841e-06 L7_sharp:7.6793e-06 L8_sharp:3.9400e-05 L9_sharp:4.5902e-05 L10_sharp:5.0555e-05 L11_sharp:5.4303e-05 L12_sharp:2.9255e-04 total_fnorm:2.0100e+02 total_l1_linf:6.0621e+05 total_spectral:1.0050e+02 L1_fnorm:2.3000e+01 L2_fnorm:2.1875e+01 L3_fnorm:2.2500e+01 L4_fnorm:2.3500e+01 L5_fnorm:2.3500e+01 L6_fnorm:2.3875e+01 L7_fnorm:2.4750e+01 L8_fnorm:2.4500e+01 L9_fnorm:2.4625e+01 L10_fnorm:2.4625e+01 L11_fnorm:2.4500e+01 L12_fnorm:2.4625e+01 L1_l1linf:6.3750e+00 L2_l1linf:4.5625e+00 L3_l1linf:4.5000e+00 L4_l1linf:4.4688e+00 L5_l1linf:4.4375e+00 L6_l1linf:4.5938e+00 L7_l1linf:5.8125e+00 L8_l1linf:6.2500e+00 L9_l1linf:6.1250e+00 L10_l1linf:5.8125e+00 L11_l1linf:5.5312e+00 L12_l1linf:5.3438e+00 L1_spectral:2.9058e-01 L2_spectral:2.9012e-01 L3_spectral:2.9744e-01 L4_spectral:2.9775e-01 L5_spectral:3.0054e-01 L6_spectral:3.0013e-01 L7_spectral:2.9889e-01 L8_spectral:3.0266e-01 L9_spectral:3.0432e-01 L10_spectral:3.0329e-01 L11_spectral:3.0174e-01 L12_spectral:3.0395e-01 train_time:296072ms step_avg:49.35ms +[2025-09-11 06:19:53] [Rank 0] step:6001/10000 train_time:297783ms step_avg:49.62ms +[2025-09-11 06:19:53] [Rank 0] step:6001/10000 train_time:297783ms step_avg:49.62ms +[2025-09-11 06:19:53] [Rank 0] step:6021/10000 train_time:298508ms step_avg:49.58ms +[2025-09-11 06:19:53] [Rank 0] step:6021/10000 train_time:298508ms step_avg:49.58ms +[2025-09-11 06:19:54] [Rank 0] step:6041/10000 train_time:299206ms step_avg:49.53ms +[2025-09-11 06:19:54] [Rank 0] step:6041/10000 train_time:299206ms step_avg:49.53ms +[2025-09-11 06:19:55] [Rank 0] step:6061/10000 train_time:299902ms step_avg:49.48ms +[2025-09-11 06:19:55] [Rank 0] step:6061/10000 train_time:299902ms step_avg:49.48ms +[2025-09-11 06:19:55] [Rank 0] step:6081/10000 train_time:300598ms step_avg:49.43ms +[2025-09-11 06:19:55] [Rank 0] step:6081/10000 train_time:300598ms step_avg:49.43ms +[2025-09-11 06:19:56] [Rank 0] step:6101/10000 train_time:301293ms step_avg:49.38ms +[2025-09-11 06:19:56] [Rank 0] step:6101/10000 train_time:301293ms step_avg:49.38ms +[2025-09-11 06:19:57] [Rank 0] step:6121/10000 train_time:301988ms step_avg:49.34ms +[2025-09-11 06:19:57] [Rank 0] step:6121/10000 train_time:301988ms step_avg:49.34ms +[2025-09-11 06:19:57] [Rank 0] step:6141/10000 train_time:302683ms step_avg:49.29ms +[2025-09-11 06:19:57] [Rank 0] step:6141/10000 train_time:302683ms step_avg:49.29ms +[2025-09-11 06:19:58] [Rank 0] step:6161/10000 train_time:303378ms step_avg:49.24ms +[2025-09-11 06:19:58] [Rank 0] step:6161/10000 train_time:303378ms step_avg:49.24ms +[2025-09-11 06:19:59] [Rank 0] step:6181/10000 train_time:304070ms step_avg:49.19ms +[2025-09-11 06:19:59] [Rank 0] step:6181/10000 train_time:304070ms step_avg:49.19ms +[2025-09-11 06:20:00] [Rank 0] step:6201/10000 train_time:304765ms step_avg:49.15ms +[2025-09-11 06:20:00] [Rank 0] step:6201/10000 train_time:304765ms step_avg:49.15ms +[2025-09-11 06:20:00] [Rank 0] step:6221/10000 train_time:305460ms step_avg:49.10ms +[2025-09-11 06:20:00] [Rank 0] step:6221/10000 train_time:305460ms step_avg:49.10ms +[2025-09-11 06:20:01] [Rank 0] step:6241/10000 train_time:306253ms step_avg:49.07ms +[2025-09-11 06:20:01] [Rank 0] step:6241/10000 train_time:306253ms step_avg:49.07ms +[2025-09-11 06:20:02] [Rank 0] step:6261/10000 train_time:307028ms step_avg:49.04ms +[2025-09-11 06:20:02] [Rank 0] step:6261/10000 train_time:307028ms step_avg:49.04ms +[2025-09-11 06:20:03] [Rank 0] step:6281/10000 train_time:307722ms step_avg:48.99ms +[2025-09-11 06:20:03] [Rank 0] step:6281/10000 train_time:307722ms step_avg:48.99ms +[2025-09-11 06:20:03] [Rank 0] step:6301/10000 train_time:308417ms step_avg:48.95ms +[2025-09-11 06:20:03] [Rank 0] step:6301/10000 train_time:308417ms step_avg:48.95ms +[2025-09-11 06:20:04] [Rank 0] step:6321/10000 train_time:309114ms step_avg:48.90ms +[2025-09-11 06:20:04] [Rank 0] step:6321/10000 train_time:309114ms step_avg:48.90ms +[2025-09-11 06:20:05] [Rank 0] step:6341/10000 train_time:309809ms step_avg:48.86ms +[2025-09-11 06:20:05] [Rank 0] step:6341/10000 train_time:309809ms step_avg:48.86ms +[2025-09-11 06:20:05] [Rank 0] step:6361/10000 train_time:310504ms step_avg:48.81ms +[2025-09-11 06:20:05] [Rank 0] step:6361/10000 train_time:310504ms step_avg:48.81ms +[2025-09-11 06:20:06] [Rank 0] step:6381/10000 train_time:311199ms step_avg:48.77ms +[2025-09-11 06:20:06] [Rank 0] step:6381/10000 train_time:311199ms step_avg:48.77ms +[2025-09-11 06:20:07] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:20:07] [Rank 0] PRINT: [Validation @ Step 6400] Calculating base validation loss... +[2025-09-11 06:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:20:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Getting true update direction 'v'... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating update norms... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up layer parameter groups... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:20:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise update norms... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Setting up HVP calculation in float32... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating TOTAL sharpness... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:20:14] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Calculating layer-wise sharpness... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 12 layers for sharpness... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_1'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_2'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_3'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_4'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:20:15] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_5'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_6'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_7'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_8'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_9'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:20:16] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_10'... +[2025-09-11 06:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_11'... +[2025-09-11 06:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:20:17] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Processing 'layer_12'... +[2025-09-11 06:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:20:18] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:20:18] [Rank 0] PRINT: step:6400/10000 val_loss:4.4971 total_sharp:5.9737e-05 L1_sharp:1.6598e-04 L2_sharp:1.5475e-05 L3_sharp:1.3567e-05 L4_sharp:1.1900e-05 L5_sharp:3.7186e-06 L6_sharp:1.3589e-05 L7_sharp:1.7271e-05 L8_sharp:3.4033e-05 L9_sharp:3.6853e-05 L10_sharp:5.1052e-05 L11_sharp:4.6563e-05 L12_sharp:4.1189e-04 total_fnorm:1.8000e+02 total_l1_linf:5.4067e+05 total_spectral:9.1000e+01 L1_fnorm:2.1250e+01 L2_fnorm:1.9625e+01 L3_fnorm:2.0750e+01 L4_fnorm:2.1875e+01 L5_fnorm:2.1625e+01 L6_fnorm:2.1875e+01 L7_fnorm:2.3125e+01 L8_fnorm:2.2750e+01 L9_fnorm:2.3000e+01 L10_fnorm:2.2750e+01 L11_fnorm:2.2750e+01 L12_fnorm:2.2875e+01 L1_l1linf:5.8125e+00 L2_l1linf:4.2812e+00 L3_l1linf:4.1250e+00 L4_l1linf:3.9531e+00 L5_l1linf:3.9844e+00 L6_l1linf:3.9531e+00 L7_l1linf:5.3438e+00 L8_l1linf:5.6250e+00 L9_l1linf:5.5938e+00 L10_l1linf:5.2188e+00 L11_l1linf:5.0625e+00 L12_l1linf:4.8438e+00 L1_spectral:2.7212e-01 L2_spectral:2.7290e-01 L3_spectral:2.7490e-01 L4_spectral:2.7801e-01 L5_spectral:2.8018e-01 L6_spectral:2.7992e-01 L7_spectral:2.8104e-01 L8_spectral:2.8238e-01 L9_spectral:2.8106e-01 L10_spectral:2.8228e-01 L11_spectral:2.8295e-01 L12_spectral:2.8100e-01 train_time:311873ms step_avg:48.73ms +[2025-09-11 06:20:18] [Rank 0] PRINT: step:6400/10000 val_loss:4.4971 total_sharp:5.9737e-05 L1_sharp:1.6598e-04 L2_sharp:1.5475e-05 L3_sharp:1.3567e-05 L4_sharp:1.1900e-05 L5_sharp:3.7186e-06 L6_sharp:1.3589e-05 L7_sharp:1.7271e-05 L8_sharp:3.4033e-05 L9_sharp:3.6853e-05 L10_sharp:5.1052e-05 L11_sharp:4.6563e-05 L12_sharp:4.1189e-04 total_fnorm:1.8000e+02 total_l1_linf:5.4067e+05 total_spectral:9.1000e+01 L1_fnorm:2.1250e+01 L2_fnorm:1.9625e+01 L3_fnorm:2.0750e+01 L4_fnorm:2.1875e+01 L5_fnorm:2.1625e+01 L6_fnorm:2.1875e+01 L7_fnorm:2.3125e+01 L8_fnorm:2.2750e+01 L9_fnorm:2.3000e+01 L10_fnorm:2.2750e+01 L11_fnorm:2.2750e+01 L12_fnorm:2.2875e+01 L1_l1linf:5.8125e+00 L2_l1linf:4.2812e+00 L3_l1linf:4.1250e+00 L4_l1linf:3.9531e+00 L5_l1linf:3.9844e+00 L6_l1linf:3.9531e+00 L7_l1linf:5.3438e+00 L8_l1linf:5.6250e+00 L9_l1linf:5.5938e+00 L10_l1linf:5.2188e+00 L11_l1linf:5.0625e+00 L12_l1linf:4.8438e+00 L1_spectral:2.7212e-01 L2_spectral:2.7290e-01 L3_spectral:2.7490e-01 L4_spectral:2.7801e-01 L5_spectral:2.8018e-01 L6_spectral:2.7992e-01 L7_spectral:2.8104e-01 L8_spectral:2.8238e-01 L9_spectral:2.8106e-01 L10_spectral:2.8228e-01 L11_spectral:2.8295e-01 L12_spectral:2.8100e-01 train_time:311873ms step_avg:48.73ms +[2025-09-11 06:20:20] [Rank 0] step:6401/10000 train_time:313588ms step_avg:48.99ms +[2025-09-11 06:20:20] [Rank 0] step:6401/10000 train_time:313588ms step_avg:48.99ms +[2025-09-11 06:20:20] [Rank 0] step:6421/10000 train_time:314309ms step_avg:48.95ms +[2025-09-11 06:20:20] [Rank 0] step:6421/10000 train_time:314309ms step_avg:48.95ms +[2025-09-11 06:20:21] [Rank 0] step:6441/10000 train_time:315003ms step_avg:48.91ms +[2025-09-11 06:20:21] [Rank 0] step:6441/10000 train_time:315003ms step_avg:48.91ms +[2025-09-11 06:20:22] [Rank 0] step:6461/10000 train_time:315697ms step_avg:48.86ms +[2025-09-11 06:20:22] [Rank 0] step:6461/10000 train_time:315697ms step_avg:48.86ms +[2025-09-11 06:20:22] [Rank 0] step:6481/10000 train_time:316393ms step_avg:48.82ms +[2025-09-11 06:20:22] [Rank 0] step:6481/10000 train_time:316393ms step_avg:48.82ms +[2025-09-11 06:20:23] [Rank 0] step:6501/10000 train_time:317090ms step_avg:48.78ms +[2025-09-11 06:20:23] [Rank 0] step:6501/10000 train_time:317090ms step_avg:48.78ms +[2025-09-11 06:20:24] [Rank 0] step:6521/10000 train_time:317785ms step_avg:48.73ms +[2025-09-11 06:20:24] [Rank 0] step:6521/10000 train_time:317785ms step_avg:48.73ms +[2025-09-11 06:20:24] [Rank 0] step:6541/10000 train_time:318478ms step_avg:48.69ms +[2025-09-11 06:20:24] [Rank 0] step:6541/10000 train_time:318478ms step_avg:48.69ms +[2025-09-11 06:20:25] [Rank 0] step:6561/10000 train_time:319173ms step_avg:48.65ms +[2025-09-11 06:20:25] [Rank 0] step:6561/10000 train_time:319173ms step_avg:48.65ms +[2025-09-11 06:20:26] [Rank 0] step:6581/10000 train_time:319868ms step_avg:48.60ms +[2025-09-11 06:20:26] [Rank 0] step:6581/10000 train_time:319868ms step_avg:48.60ms +[2025-09-11 06:20:27] [Rank 0] step:6601/10000 train_time:320563ms step_avg:48.56ms +[2025-09-11 06:20:27] [Rank 0] step:6601/10000 train_time:320563ms step_avg:48.56ms +[2025-09-11 06:20:27] [Rank 0] step:6621/10000 train_time:321256ms step_avg:48.52ms +[2025-09-11 06:20:27] [Rank 0] step:6621/10000 train_time:321256ms step_avg:48.52ms +[2025-09-11 06:20:28] [Rank 0] step:6641/10000 train_time:321952ms step_avg:48.48ms +[2025-09-11 06:20:28] [Rank 0] step:6641/10000 train_time:321952ms step_avg:48.48ms +[2025-09-11 06:20:29] [Rank 0] step:6661/10000 train_time:322647ms step_avg:48.44ms +[2025-09-11 06:20:29] [Rank 0] step:6661/10000 train_time:322647ms step_avg:48.44ms +[2025-09-11 06:20:29] [Rank 0] step:6681/10000 train_time:323349ms step_avg:48.40ms +[2025-09-11 06:20:29] [Rank 0] step:6681/10000 train_time:323349ms step_avg:48.40ms +[2025-09-11 06:20:30] [Rank 0] step:6701/10000 train_time:324048ms step_avg:48.36ms +[2025-09-11 06:20:30] [Rank 0] step:6701/10000 train_time:324048ms step_avg:48.36ms +[2025-09-11 06:20:31] [Rank 0] step:6721/10000 train_time:324751ms step_avg:48.32ms +[2025-09-11 06:20:31] [Rank 0] step:6721/10000 train_time:324751ms step_avg:48.32ms +[2025-09-11 06:20:31] [Rank 0] step:6741/10000 train_time:325453ms step_avg:48.28ms +[2025-09-11 06:20:31] [Rank 0] step:6741/10000 train_time:325453ms step_avg:48.28ms +[2025-09-11 06:20:32] [Rank 0] step:6761/10000 train_time:326164ms step_avg:48.24ms +[2025-09-11 06:20:32] [Rank 0] step:6761/10000 train_time:326164ms step_avg:48.24ms +[2025-09-11 06:20:33] [Rank 0] step:6781/10000 train_time:326867ms step_avg:48.20ms +[2025-09-11 06:20:33] [Rank 0] step:6781/10000 train_time:326867ms step_avg:48.20ms +[2025-09-11 06:20:33] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:20:33] [Rank 0] PRINT: [Validation @ Step 6800] Calculating base validation loss... +[2025-09-11 06:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:20:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Getting true update direction 'v'... +[2025-09-11 06:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating update norms... +[2025-09-11 06:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:20:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up layer parameter groups... +[2025-09-11 06:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:20:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise update norms... +[2025-09-11 06:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:20:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Setting up HVP calculation in float32... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating TOTAL sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Calculating layer-wise sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 12 layers for sharpness... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:20:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_1'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_2'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_3'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:20:42] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_4'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_5'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_6'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_7'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_8'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:20:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_9'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_10'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_11'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:20:44] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Processing 'layer_12'... +[2025-09-11 06:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:20:45] [Rank 0] PRINT: [Enhanced Sharpness @ Step 6800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:20:45] [Rank 0] PRINT: step:6800/10000 val_loss:4.4252 total_sharp:5.1278e-05 L1_sharp:1.1315e-04 L2_sharp:-1.3987e-05 L3_sharp:1.1506e-06 L4_sharp:1.2184e-05 L5_sharp:6.2663e-06 L6_sharp:-7.0478e-07 L7_sharp:2.1782e-05 L8_sharp:2.8876e-05 L9_sharp:4.1997e-05 L10_sharp:4.3804e-05 L11_sharp:5.8112e-05 L12_sharp:3.6543e-04 total_fnorm:1.7400e+02 total_l1_linf:4.9971e+05 total_spectral:8.7000e+01 L1_fnorm:1.9125e+01 L2_fnorm:1.7750e+01 L3_fnorm:1.8250e+01 L4_fnorm:1.9375e+01 L5_fnorm:1.9125e+01 L6_fnorm:1.9500e+01 L7_fnorm:2.0625e+01 L8_fnorm:2.0250e+01 L9_fnorm:2.0500e+01 L10_fnorm:2.0375e+01 L11_fnorm:2.0250e+01 L12_fnorm:2.0375e+01 L1_l1linf:5.0000e+00 L2_l1linf:3.7812e+00 L3_l1linf:3.8125e+00 L4_l1linf:3.8594e+00 L5_l1linf:3.5156e+00 L6_l1linf:3.5469e+00 L7_l1linf:4.7188e+00 L8_l1linf:5.0000e+00 L9_l1linf:4.8438e+00 L10_l1linf:4.5938e+00 L11_l1linf:4.2188e+00 L12_l1linf:4.2188e+00 L1_spectral:2.4985e-01 L2_spectral:2.5002e-01 L3_spectral:2.5377e-01 L4_spectral:2.5478e-01 L5_spectral:2.5600e-01 L6_spectral:2.5544e-01 L7_spectral:2.5625e-01 L8_spectral:2.5646e-01 L9_spectral:2.5720e-01 L10_spectral:2.5787e-01 L11_spectral:2.5336e-01 L12_spectral:2.5312e-01 train_time:327548ms step_avg:48.17ms +[2025-09-11 06:20:45] [Rank 0] PRINT: step:6800/10000 val_loss:4.4252 total_sharp:5.1278e-05 L1_sharp:1.1315e-04 L2_sharp:-1.3987e-05 L3_sharp:1.1506e-06 L4_sharp:1.2184e-05 L5_sharp:6.2663e-06 L6_sharp:-7.0478e-07 L7_sharp:2.1782e-05 L8_sharp:2.8876e-05 L9_sharp:4.1997e-05 L10_sharp:4.3804e-05 L11_sharp:5.8112e-05 L12_sharp:3.6543e-04 total_fnorm:1.7400e+02 total_l1_linf:4.9971e+05 total_spectral:8.7000e+01 L1_fnorm:1.9125e+01 L2_fnorm:1.7750e+01 L3_fnorm:1.8250e+01 L4_fnorm:1.9375e+01 L5_fnorm:1.9125e+01 L6_fnorm:1.9500e+01 L7_fnorm:2.0625e+01 L8_fnorm:2.0250e+01 L9_fnorm:2.0500e+01 L10_fnorm:2.0375e+01 L11_fnorm:2.0250e+01 L12_fnorm:2.0375e+01 L1_l1linf:5.0000e+00 L2_l1linf:3.7812e+00 L3_l1linf:3.8125e+00 L4_l1linf:3.8594e+00 L5_l1linf:3.5156e+00 L6_l1linf:3.5469e+00 L7_l1linf:4.7188e+00 L8_l1linf:5.0000e+00 L9_l1linf:4.8438e+00 L10_l1linf:4.5938e+00 L11_l1linf:4.2188e+00 L12_l1linf:4.2188e+00 L1_spectral:2.4985e-01 L2_spectral:2.5002e-01 L3_spectral:2.5377e-01 L4_spectral:2.5478e-01 L5_spectral:2.5600e-01 L6_spectral:2.5544e-01 L7_spectral:2.5625e-01 L8_spectral:2.5646e-01 L9_spectral:2.5720e-01 L10_spectral:2.5787e-01 L11_spectral:2.5336e-01 L12_spectral:2.5312e-01 train_time:327548ms step_avg:48.17ms +[2025-09-11 06:20:47] [Rank 0] step:6801/10000 train_time:329325ms step_avg:48.42ms +[2025-09-11 06:20:47] [Rank 0] step:6801/10000 train_time:329325ms step_avg:48.42ms +[2025-09-11 06:20:47] [Rank 0] step:6821/10000 train_time:330055ms step_avg:48.39ms +[2025-09-11 06:20:47] [Rank 0] step:6821/10000 train_time:330055ms step_avg:48.39ms +[2025-09-11 06:20:48] [Rank 0] step:6841/10000 train_time:330761ms step_avg:48.35ms +[2025-09-11 06:20:48] [Rank 0] step:6841/10000 train_time:330761ms step_avg:48.35ms +[2025-09-11 06:20:49] [Rank 0] step:6861/10000 train_time:331466ms step_avg:48.31ms +[2025-09-11 06:20:49] [Rank 0] step:6861/10000 train_time:331466ms step_avg:48.31ms +[2025-09-11 06:20:50] [Rank 0] step:6881/10000 train_time:332169ms step_avg:48.27ms +[2025-09-11 06:20:50] [Rank 0] step:6881/10000 train_time:332169ms step_avg:48.27ms +[2025-09-11 06:20:50] [Rank 0] step:6901/10000 train_time:332870ms step_avg:48.24ms +[2025-09-11 06:20:50] [Rank 0] step:6901/10000 train_time:332870ms step_avg:48.24ms +[2025-09-11 06:20:51] [Rank 0] step:6921/10000 train_time:333572ms step_avg:48.20ms +[2025-09-11 06:20:51] [Rank 0] step:6921/10000 train_time:333572ms step_avg:48.20ms +[2025-09-11 06:20:52] [Rank 0] step:6941/10000 train_time:334274ms step_avg:48.16ms +[2025-09-11 06:20:52] [Rank 0] step:6941/10000 train_time:334274ms step_avg:48.16ms +[2025-09-11 06:20:52] [Rank 0] step:6961/10000 train_time:334977ms step_avg:48.12ms +[2025-09-11 06:20:52] [Rank 0] step:6961/10000 train_time:334977ms step_avg:48.12ms +[2025-09-11 06:20:53] [Rank 0] step:6981/10000 train_time:335681ms step_avg:48.08ms +[2025-09-11 06:20:53] [Rank 0] step:6981/10000 train_time:335681ms step_avg:48.08ms +[2025-09-11 06:20:54] [Rank 0] step:7001/10000 train_time:336384ms step_avg:48.05ms +[2025-09-11 06:20:54] [Rank 0] step:7001/10000 train_time:336384ms step_avg:48.05ms +[2025-09-11 06:20:55] [Rank 0] step:7021/10000 train_time:337086ms step_avg:48.01ms +[2025-09-11 06:20:55] [Rank 0] step:7021/10000 train_time:337086ms step_avg:48.01ms +[2025-09-11 06:20:55] [Rank 0] step:7041/10000 train_time:337789ms step_avg:47.97ms +[2025-09-11 06:20:55] [Rank 0] step:7041/10000 train_time:337789ms step_avg:47.97ms +[2025-09-11 06:20:56] [Rank 0] step:7061/10000 train_time:338492ms step_avg:47.94ms +[2025-09-11 06:20:56] [Rank 0] step:7061/10000 train_time:338492ms step_avg:47.94ms +[2025-09-11 06:20:57] [Rank 0] step:7081/10000 train_time:339194ms step_avg:47.90ms +[2025-09-11 06:20:57] [Rank 0] step:7081/10000 train_time:339194ms step_avg:47.90ms +[2025-09-11 06:20:57] [Rank 0] step:7101/10000 train_time:339897ms step_avg:47.87ms +[2025-09-11 06:20:57] [Rank 0] step:7101/10000 train_time:339897ms step_avg:47.87ms +[2025-09-11 06:20:58] [Rank 0] step:7121/10000 train_time:340601ms step_avg:47.83ms +[2025-09-11 06:20:58] [Rank 0] step:7121/10000 train_time:340601ms step_avg:47.83ms +[2025-09-11 06:20:59] [Rank 0] step:7141/10000 train_time:341303ms step_avg:47.79ms +[2025-09-11 06:20:59] [Rank 0] step:7141/10000 train_time:341303ms step_avg:47.79ms +[2025-09-11 06:20:59] [Rank 0] step:7161/10000 train_time:342007ms step_avg:47.76ms +[2025-09-11 06:20:59] [Rank 0] step:7161/10000 train_time:342007ms step_avg:47.76ms +[2025-09-11 06:21:00] [Rank 0] step:7181/10000 train_time:342709ms step_avg:47.72ms +[2025-09-11 06:21:00] [Rank 0] step:7181/10000 train_time:342709ms step_avg:47.72ms +[2025-09-11 06:21:01] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:21:01] [Rank 0] PRINT: [Validation @ Step 7200] Calculating base validation loss... +[2025-09-11 06:21:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:21:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Getting true update direction 'v'... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating update norms... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up layer parameter groups... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:21:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise update norms... +[2025-09-11 06:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Setting up HVP calculation in float32... +[2025-09-11 06:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:21:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating TOTAL sharpness... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Calculating layer-wise sharpness... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 12 layers for sharpness... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_1'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_2'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_3'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:21:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_4'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_5'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_6'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_7'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_8'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:21:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_9'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_10'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_11'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:21:11] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Processing 'layer_12'... +[2025-09-11 06:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:21:12] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:21:12] [Rank 0] PRINT: step:7200/10000 val_loss:4.3741 total_sharp:4.4402e-05 L1_sharp:1.1775e-04 L2_sharp:2.3568e-05 L3_sharp:-5.1084e-06 L4_sharp:1.1665e-05 L5_sharp:3.3236e-06 L6_sharp:8.5444e-06 L7_sharp:6.0242e-06 L8_sharp:2.6603e-05 L9_sharp:2.9191e-05 L10_sharp:4.2762e-05 L11_sharp:4.9255e-05 L12_sharp:3.3341e-04 total_fnorm:1.5600e+02 total_l1_linf:4.2394e+05 total_spectral:7.7000e+01 L1_fnorm:1.7000e+01 L2_fnorm:1.5188e+01 L3_fnorm:1.6000e+01 L4_fnorm:1.7375e+01 L5_fnorm:1.6625e+01 L6_fnorm:1.7250e+01 L7_fnorm:1.8375e+01 L8_fnorm:1.8000e+01 L9_fnorm:1.8125e+01 L10_fnorm:1.8000e+01 L11_fnorm:1.8000e+01 L12_fnorm:1.8000e+01 L1_l1linf:4.3125e+00 L2_l1linf:3.5156e+00 L3_l1linf:3.3281e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.2344e+00 L6_l1linf:3.2188e+00 L7_l1linf:4.0312e+00 L8_l1linf:4.2500e+00 L9_l1linf:4.1562e+00 L10_l1linf:3.9375e+00 L11_l1linf:3.6406e+00 L12_l1linf:3.6250e+00 L1_spectral:2.2571e-01 L2_spectral:2.2566e-01 L3_spectral:2.2953e-01 L4_spectral:2.3128e-01 L5_spectral:2.2898e-01 L6_spectral:2.3288e-01 L7_spectral:2.3226e-01 L8_spectral:2.2852e-01 L9_spectral:2.3139e-01 L10_spectral:2.2990e-01 L11_spectral:2.2876e-01 L12_spectral:2.2952e-01 train_time:343393ms step_avg:47.69ms +[2025-09-11 06:21:12] [Rank 0] PRINT: step:7200/10000 val_loss:4.3741 total_sharp:4.4402e-05 L1_sharp:1.1775e-04 L2_sharp:2.3568e-05 L3_sharp:-5.1084e-06 L4_sharp:1.1665e-05 L5_sharp:3.3236e-06 L6_sharp:8.5444e-06 L7_sharp:6.0242e-06 L8_sharp:2.6603e-05 L9_sharp:2.9191e-05 L10_sharp:4.2762e-05 L11_sharp:4.9255e-05 L12_sharp:3.3341e-04 total_fnorm:1.5600e+02 total_l1_linf:4.2394e+05 total_spectral:7.7000e+01 L1_fnorm:1.7000e+01 L2_fnorm:1.5188e+01 L3_fnorm:1.6000e+01 L4_fnorm:1.7375e+01 L5_fnorm:1.6625e+01 L6_fnorm:1.7250e+01 L7_fnorm:1.8375e+01 L8_fnorm:1.8000e+01 L9_fnorm:1.8125e+01 L10_fnorm:1.8000e+01 L11_fnorm:1.8000e+01 L12_fnorm:1.8000e+01 L1_l1linf:4.3125e+00 L2_l1linf:3.5156e+00 L3_l1linf:3.3281e+00 L4_l1linf:3.1250e+00 L5_l1linf:3.2344e+00 L6_l1linf:3.2188e+00 L7_l1linf:4.0312e+00 L8_l1linf:4.2500e+00 L9_l1linf:4.1562e+00 L10_l1linf:3.9375e+00 L11_l1linf:3.6406e+00 L12_l1linf:3.6250e+00 L1_spectral:2.2571e-01 L2_spectral:2.2566e-01 L3_spectral:2.2953e-01 L4_spectral:2.3128e-01 L5_spectral:2.2898e-01 L6_spectral:2.3288e-01 L7_spectral:2.3226e-01 L8_spectral:2.2852e-01 L9_spectral:2.3139e-01 L10_spectral:2.2990e-01 L11_spectral:2.2876e-01 L12_spectral:2.2952e-01 train_time:343393ms step_avg:47.69ms +[2025-09-11 06:21:14] [Rank 0] step:7201/10000 train_time:345149ms step_avg:47.93ms +[2025-09-11 06:21:14] [Rank 0] step:7201/10000 train_time:345149ms step_avg:47.93ms +[2025-09-11 06:21:14] [Rank 0] step:7221/10000 train_time:345869ms step_avg:47.90ms +[2025-09-11 06:21:14] [Rank 0] step:7221/10000 train_time:345869ms step_avg:47.90ms +[2025-09-11 06:21:15] [Rank 0] step:7241/10000 train_time:346575ms step_avg:47.86ms +[2025-09-11 06:21:15] [Rank 0] step:7241/10000 train_time:346575ms step_avg:47.86ms +[2025-09-11 06:21:16] [Rank 0] step:7261/10000 train_time:347280ms step_avg:47.83ms +[2025-09-11 06:21:16] [Rank 0] step:7261/10000 train_time:347280ms step_avg:47.83ms +[2025-09-11 06:21:17] [Rank 0] step:7281/10000 train_time:347990ms step_avg:47.79ms +[2025-09-11 06:21:17] [Rank 0] step:7281/10000 train_time:347990ms step_avg:47.79ms +[2025-09-11 06:21:17] [Rank 0] step:7301/10000 train_time:348692ms step_avg:47.76ms +[2025-09-11 06:21:17] [Rank 0] step:7301/10000 train_time:348692ms step_avg:47.76ms +[2025-09-11 06:21:18] [Rank 0] step:7321/10000 train_time:349396ms step_avg:47.73ms +[2025-09-11 06:21:18] [Rank 0] step:7321/10000 train_time:349396ms step_avg:47.73ms +[2025-09-11 06:21:19] [Rank 0] step:7341/10000 train_time:350101ms step_avg:47.69ms +[2025-09-11 06:21:19] [Rank 0] step:7341/10000 train_time:350101ms step_avg:47.69ms +[2025-09-11 06:21:19] [Rank 0] step:7361/10000 train_time:350804ms step_avg:47.66ms +[2025-09-11 06:21:19] [Rank 0] step:7361/10000 train_time:350804ms step_avg:47.66ms +[2025-09-11 06:21:20] [Rank 0] step:7381/10000 train_time:351508ms step_avg:47.62ms +[2025-09-11 06:21:20] [Rank 0] step:7381/10000 train_time:351508ms step_avg:47.62ms +[2025-09-11 06:21:21] [Rank 0] step:7401/10000 train_time:352212ms step_avg:47.59ms +[2025-09-11 06:21:21] [Rank 0] step:7401/10000 train_time:352212ms step_avg:47.59ms +[2025-09-11 06:21:22] [Rank 0] step:7421/10000 train_time:352916ms step_avg:47.56ms +[2025-09-11 06:21:22] [Rank 0] step:7421/10000 train_time:352916ms step_avg:47.56ms +[2025-09-11 06:21:22] [Rank 0] step:7441/10000 train_time:353621ms step_avg:47.52ms +[2025-09-11 06:21:22] [Rank 0] step:7441/10000 train_time:353621ms step_avg:47.52ms +[2025-09-11 06:21:23] [Rank 0] step:7461/10000 train_time:354326ms step_avg:47.49ms +[2025-09-11 06:21:23] [Rank 0] step:7461/10000 train_time:354326ms step_avg:47.49ms +[2025-09-11 06:21:24] [Rank 0] step:7481/10000 train_time:355034ms step_avg:47.46ms +[2025-09-11 06:21:24] [Rank 0] step:7481/10000 train_time:355034ms step_avg:47.46ms +[2025-09-11 06:21:24] [Rank 0] step:7501/10000 train_time:355743ms step_avg:47.43ms +[2025-09-11 06:21:24] [Rank 0] step:7501/10000 train_time:355743ms step_avg:47.43ms +[2025-09-11 06:21:25] [Rank 0] step:7521/10000 train_time:356448ms step_avg:47.39ms +[2025-09-11 06:21:25] [Rank 0] step:7521/10000 train_time:356448ms step_avg:47.39ms +[2025-09-11 06:21:26] [Rank 0] step:7541/10000 train_time:357154ms step_avg:47.36ms +[2025-09-11 06:21:26] [Rank 0] step:7541/10000 train_time:357154ms step_avg:47.36ms +[2025-09-11 06:21:26] [Rank 0] step:7561/10000 train_time:357860ms step_avg:47.33ms +[2025-09-11 06:21:26] [Rank 0] step:7561/10000 train_time:357860ms step_avg:47.33ms +[2025-09-11 06:21:27] [Rank 0] step:7581/10000 train_time:358566ms step_avg:47.30ms +[2025-09-11 06:21:27] [Rank 0] step:7581/10000 train_time:358566ms step_avg:47.30ms +[2025-09-11 06:21:28] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:21:28] [Rank 0] PRINT: [Validation @ Step 7600] Calculating base validation loss... +[2025-09-11 06:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:21:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Getting true update direction 'v'... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating update norms... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up layer parameter groups... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:21:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise update norms... +[2025-09-11 06:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Setting up HVP calculation in float32... +[2025-09-11 06:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:21:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating TOTAL sharpness... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Calculating layer-wise sharpness... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 12 layers for sharpness... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_1'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_2'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_3'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:21:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_4'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_5'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_6'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:21:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_7'... +[2025-09-11 06:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:21:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_8'... +[2025-09-11 06:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:21:39] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_9'... +[2025-09-11 06:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:21:40] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_10'... +[2025-09-11 06:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_11'... +[2025-09-11 06:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:21:41] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Processing 'layer_12'... +[2025-09-11 06:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:21:43] [Rank 0] PRINT: [Enhanced Sharpness @ Step 7600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:21:43] [Rank 0] PRINT: step:7600/10000 val_loss:4.3280 total_sharp:5.0340e-05 L1_sharp:9.1035e-05 L2_sharp:-8.3671e-06 L3_sharp:-1.4016e-05 L4_sharp:7.0383e-07 L5_sharp:7.5170e-06 L6_sharp:1.7921e-05 L7_sharp:1.4389e-05 L8_sharp:2.5563e-05 L9_sharp:3.5998e-05 L10_sharp:4.4521e-05 L11_sharp:5.5085e-05 L12_sharp:3.6359e-04 total_fnorm:1.2350e+02 total_l1_linf:3.2768e+05 total_spectral:6.2500e+01 L1_fnorm:1.4625e+01 L2_fnorm:1.3125e+01 L3_fnorm:1.3500e+01 L4_fnorm:1.4500e+01 L5_fnorm:1.4250e+01 L6_fnorm:1.4688e+01 L7_fnorm:1.5750e+01 L8_fnorm:1.5375e+01 L9_fnorm:1.5562e+01 L10_fnorm:1.5438e+01 L11_fnorm:1.5375e+01 L12_fnorm:1.5438e+01 L1_l1linf:3.4844e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.9844e+00 L4_l1linf:2.7969e+00 L5_l1linf:2.7656e+00 L6_l1linf:2.8906e+00 L7_l1linf:3.3906e+00 L8_l1linf:3.5000e+00 L9_l1linf:3.4375e+00 L10_l1linf:3.1562e+00 L11_l1linf:3.0156e+00 L12_l1linf:3.1406e+00 L1_spectral:2.0049e-01 L2_spectral:1.9877e-01 L3_spectral:2.0217e-01 L4_spectral:2.0203e-01 L5_spectral:1.9888e-01 L6_spectral:2.0213e-01 L7_spectral:2.0211e-01 L8_spectral:1.9896e-01 L9_spectral:2.0295e-01 L10_spectral:2.0190e-01 L11_spectral:2.0250e-01 L12_spectral:2.0195e-01 train_time:359258ms step_avg:47.27ms +[2025-09-11 06:21:43] [Rank 0] PRINT: step:7600/10000 val_loss:4.3280 total_sharp:5.0340e-05 L1_sharp:9.1035e-05 L2_sharp:-8.3671e-06 L3_sharp:-1.4016e-05 L4_sharp:7.0383e-07 L5_sharp:7.5170e-06 L6_sharp:1.7921e-05 L7_sharp:1.4389e-05 L8_sharp:2.5563e-05 L9_sharp:3.5998e-05 L10_sharp:4.4521e-05 L11_sharp:5.5085e-05 L12_sharp:3.6359e-04 total_fnorm:1.2350e+02 total_l1_linf:3.2768e+05 total_spectral:6.2500e+01 L1_fnorm:1.4625e+01 L2_fnorm:1.3125e+01 L3_fnorm:1.3500e+01 L4_fnorm:1.4500e+01 L5_fnorm:1.4250e+01 L6_fnorm:1.4688e+01 L7_fnorm:1.5750e+01 L8_fnorm:1.5375e+01 L9_fnorm:1.5562e+01 L10_fnorm:1.5438e+01 L11_fnorm:1.5375e+01 L12_fnorm:1.5438e+01 L1_l1linf:3.4844e+00 L2_l1linf:2.9844e+00 L3_l1linf:2.9844e+00 L4_l1linf:2.7969e+00 L5_l1linf:2.7656e+00 L6_l1linf:2.8906e+00 L7_l1linf:3.3906e+00 L8_l1linf:3.5000e+00 L9_l1linf:3.4375e+00 L10_l1linf:3.1562e+00 L11_l1linf:3.0156e+00 L12_l1linf:3.1406e+00 L1_spectral:2.0049e-01 L2_spectral:1.9877e-01 L3_spectral:2.0217e-01 L4_spectral:2.0203e-01 L5_spectral:1.9888e-01 L6_spectral:2.0213e-01 L7_spectral:2.0211e-01 L8_spectral:1.9896e-01 L9_spectral:2.0295e-01 L10_spectral:2.0190e-01 L11_spectral:2.0250e-01 L12_spectral:2.0195e-01 train_time:359258ms step_avg:47.27ms +[2025-09-11 06:21:44] [Rank 0] step:7601/10000 train_time:361015ms step_avg:47.50ms +[2025-09-11 06:21:44] [Rank 0] step:7601/10000 train_time:361015ms step_avg:47.50ms +[2025-09-11 06:21:45] [Rank 0] step:7621/10000 train_time:361748ms step_avg:47.47ms +[2025-09-11 06:21:45] [Rank 0] step:7621/10000 train_time:361748ms step_avg:47.47ms +[2025-09-11 06:21:46] [Rank 0] step:7641/10000 train_time:362455ms step_avg:47.44ms +[2025-09-11 06:21:46] [Rank 0] step:7641/10000 train_time:362455ms step_avg:47.44ms +[2025-09-11 06:21:47] [Rank 0] step:7661/10000 train_time:363159ms step_avg:47.40ms +[2025-09-11 06:21:47] [Rank 0] step:7661/10000 train_time:363159ms step_avg:47.40ms +[2025-09-11 06:21:47] [Rank 0] step:7681/10000 train_time:363865ms step_avg:47.37ms +[2025-09-11 06:21:47] [Rank 0] step:7681/10000 train_time:363865ms step_avg:47.37ms +[2025-09-11 06:21:48] [Rank 0] step:7701/10000 train_time:364573ms step_avg:47.34ms +[2025-09-11 06:21:48] [Rank 0] step:7701/10000 train_time:364573ms step_avg:47.34ms +[2025-09-11 06:21:49] [Rank 0] step:7721/10000 train_time:365278ms step_avg:47.31ms +[2025-09-11 06:21:49] [Rank 0] step:7721/10000 train_time:365278ms step_avg:47.31ms +[2025-09-11 06:21:49] [Rank 0] step:7741/10000 train_time:365983ms step_avg:47.28ms +[2025-09-11 06:21:49] [Rank 0] step:7741/10000 train_time:365983ms step_avg:47.28ms +[2025-09-11 06:21:50] [Rank 0] step:7761/10000 train_time:366689ms step_avg:47.25ms +[2025-09-11 06:21:50] [Rank 0] step:7761/10000 train_time:366689ms step_avg:47.25ms +[2025-09-11 06:21:51] [Rank 0] step:7781/10000 train_time:367399ms step_avg:47.22ms +[2025-09-11 06:21:51] [Rank 0] step:7781/10000 train_time:367399ms step_avg:47.22ms +[2025-09-11 06:21:52] [Rank 0] step:7801/10000 train_time:368109ms step_avg:47.19ms +[2025-09-11 06:21:52] [Rank 0] step:7801/10000 train_time:368109ms step_avg:47.19ms +[2025-09-11 06:21:52] [Rank 0] step:7821/10000 train_time:368815ms step_avg:47.16ms +[2025-09-11 06:21:52] [Rank 0] step:7821/10000 train_time:368815ms step_avg:47.16ms +[2025-09-11 06:21:53] [Rank 0] step:7841/10000 train_time:369523ms step_avg:47.13ms +[2025-09-11 06:21:53] [Rank 0] step:7841/10000 train_time:369523ms step_avg:47.13ms +[2025-09-11 06:21:54] [Rank 0] step:7861/10000 train_time:370232ms step_avg:47.10ms +[2025-09-11 06:21:54] [Rank 0] step:7861/10000 train_time:370232ms step_avg:47.10ms +[2025-09-11 06:21:54] [Rank 0] step:7881/10000 train_time:370937ms step_avg:47.07ms +[2025-09-11 06:21:54] [Rank 0] step:7881/10000 train_time:370937ms step_avg:47.07ms +[2025-09-11 06:21:55] [Rank 0] step:7901/10000 train_time:371644ms step_avg:47.04ms +[2025-09-11 06:21:55] [Rank 0] step:7901/10000 train_time:371644ms step_avg:47.04ms +[2025-09-11 06:21:56] [Rank 0] step:7921/10000 train_time:372351ms step_avg:47.01ms +[2025-09-11 06:21:56] [Rank 0] step:7921/10000 train_time:372351ms step_avg:47.01ms +[2025-09-11 06:21:56] [Rank 0] step:7941/10000 train_time:373058ms step_avg:46.98ms +[2025-09-11 06:21:56] [Rank 0] step:7941/10000 train_time:373058ms step_avg:46.98ms +[2025-09-11 06:21:57] [Rank 0] step:7961/10000 train_time:373762ms step_avg:46.95ms +[2025-09-11 06:21:57] [Rank 0] step:7961/10000 train_time:373762ms step_avg:46.95ms +[2025-09-11 06:21:58] [Rank 0] step:7981/10000 train_time:374471ms step_avg:46.92ms +[2025-09-11 06:21:58] [Rank 0] step:7981/10000 train_time:374471ms step_avg:46.92ms +[2025-09-11 06:21:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:21:59] [Rank 0] PRINT: [Validation @ Step 8000] Calculating base validation loss... +[2025-09-11 06:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:22:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Getting true update direction 'v'... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating update norms... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up layer parameter groups... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:22:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise update norms... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Setting up HVP calculation in float32... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating TOTAL sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Calculating layer-wise sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 12 layers for sharpness... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:22:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_1'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_2'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_3'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_4'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:22:07] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_5'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_6'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_7'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_8'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_9'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:22:08] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_10'... +[2025-09-11 06:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_11'... +[2025-09-11 06:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:22:09] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Processing 'layer_12'... +[2025-09-11 06:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:22:10] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:22:10] [Rank 0] PRINT: step:8000/10000 val_loss:4.2808 total_sharp:4.8504e-05 L1_sharp:9.0141e-05 L2_sharp:1.8600e-05 L3_sharp:-2.3518e-06 L4_sharp:4.2398e-06 L5_sharp:6.2845e-06 L6_sharp:3.8581e-06 L7_sharp:1.1807e-05 L8_sharp:2.9918e-05 L9_sharp:3.3988e-05 L10_sharp:4.9397e-05 L11_sharp:6.1199e-05 L12_sharp:3.9261e-04 total_fnorm:1.1450e+02 total_l1_linf:2.8058e+05 total_spectral:5.6500e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.0438e+01 L3_fnorm:1.1062e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.3062e+01 L8_fnorm:1.2750e+01 L9_fnorm:1.2938e+01 L10_fnorm:1.2750e+01 L11_fnorm:1.2750e+01 L12_fnorm:1.2750e+01 L1_l1linf:2.7969e+00 L2_l1linf:2.7031e+00 L3_l1linf:2.5938e+00 L4_l1linf:2.4531e+00 L5_l1linf:2.4219e+00 L6_l1linf:2.2969e+00 L7_l1linf:2.7031e+00 L8_l1linf:2.7812e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.5625e+00 L11_l1linf:2.4375e+00 L12_l1linf:2.4688e+00 L1_spectral:1.7279e-01 L2_spectral:1.6816e-01 L3_spectral:1.7184e-01 L4_spectral:1.7004e-01 L5_spectral:1.6849e-01 L6_spectral:1.7175e-01 L7_spectral:1.7162e-01 L8_spectral:1.7013e-01 L9_spectral:1.7299e-01 L10_spectral:1.7295e-01 L11_spectral:1.7320e-01 L12_spectral:1.7241e-01 train_time:375157ms step_avg:46.89ms +[2025-09-11 06:22:10] [Rank 0] PRINT: step:8000/10000 val_loss:4.2808 total_sharp:4.8504e-05 L1_sharp:9.0141e-05 L2_sharp:1.8600e-05 L3_sharp:-2.3518e-06 L4_sharp:4.2398e-06 L5_sharp:6.2845e-06 L6_sharp:3.8581e-06 L7_sharp:1.1807e-05 L8_sharp:2.9918e-05 L9_sharp:3.3988e-05 L10_sharp:4.9397e-05 L11_sharp:6.1199e-05 L12_sharp:3.9261e-04 total_fnorm:1.1450e+02 total_l1_linf:2.8058e+05 total_spectral:5.6500e+01 L1_fnorm:1.2438e+01 L2_fnorm:1.0438e+01 L3_fnorm:1.1062e+01 L4_fnorm:1.2062e+01 L5_fnorm:1.2188e+01 L6_fnorm:1.2312e+01 L7_fnorm:1.3062e+01 L8_fnorm:1.2750e+01 L9_fnorm:1.2938e+01 L10_fnorm:1.2750e+01 L11_fnorm:1.2750e+01 L12_fnorm:1.2750e+01 L1_l1linf:2.7969e+00 L2_l1linf:2.7031e+00 L3_l1linf:2.5938e+00 L4_l1linf:2.4531e+00 L5_l1linf:2.4219e+00 L6_l1linf:2.2969e+00 L7_l1linf:2.7031e+00 L8_l1linf:2.7812e+00 L9_l1linf:2.7188e+00 L10_l1linf:2.5625e+00 L11_l1linf:2.4375e+00 L12_l1linf:2.4688e+00 L1_spectral:1.7279e-01 L2_spectral:1.6816e-01 L3_spectral:1.7184e-01 L4_spectral:1.7004e-01 L5_spectral:1.6849e-01 L6_spectral:1.7175e-01 L7_spectral:1.7162e-01 L8_spectral:1.7013e-01 L9_spectral:1.7299e-01 L10_spectral:1.7295e-01 L11_spectral:1.7320e-01 L12_spectral:1.7241e-01 train_time:375157ms step_avg:46.89ms +[2025-09-11 06:22:12] [Rank 0] step:8001/10000 train_time:376929ms step_avg:47.11ms +[2025-09-11 06:22:12] [Rank 0] step:8001/10000 train_time:376929ms step_avg:47.11ms +[2025-09-11 06:22:12] [Rank 0] step:8021/10000 train_time:377652ms step_avg:47.08ms +[2025-09-11 06:22:12] [Rank 0] step:8021/10000 train_time:377652ms step_avg:47.08ms +[2025-09-11 06:22:13] [Rank 0] step:8041/10000 train_time:378358ms step_avg:47.05ms +[2025-09-11 06:22:13] [Rank 0] step:8041/10000 train_time:378358ms step_avg:47.05ms +[2025-09-11 06:22:14] [Rank 0] step:8061/10000 train_time:379068ms step_avg:47.02ms +[2025-09-11 06:22:14] [Rank 0] step:8061/10000 train_time:379068ms step_avg:47.02ms +[2025-09-11 06:22:14] [Rank 0] step:8081/10000 train_time:379772ms step_avg:47.00ms +[2025-09-11 06:22:14] [Rank 0] step:8081/10000 train_time:379772ms step_avg:47.00ms +[2025-09-11 06:22:15] [Rank 0] step:8101/10000 train_time:380476ms step_avg:46.97ms +[2025-09-11 06:22:15] [Rank 0] step:8101/10000 train_time:380476ms step_avg:46.97ms +[2025-09-11 06:22:16] [Rank 0] step:8121/10000 train_time:381185ms step_avg:46.94ms +[2025-09-11 06:22:16] [Rank 0] step:8121/10000 train_time:381185ms step_avg:46.94ms +[2025-09-11 06:22:17] [Rank 0] step:8141/10000 train_time:382632ms step_avg:47.00ms +[2025-09-11 06:22:17] [Rank 0] step:8141/10000 train_time:382632ms step_avg:47.00ms +[2025-09-11 06:22:18] [Rank 0] step:8161/10000 train_time:383342ms step_avg:46.97ms +[2025-09-11 06:22:18] [Rank 0] step:8161/10000 train_time:383342ms step_avg:46.97ms +[2025-09-11 06:22:19] [Rank 0] step:8181/10000 train_time:384059ms step_avg:46.95ms +[2025-09-11 06:22:19] [Rank 0] step:8181/10000 train_time:384059ms step_avg:46.95ms +[2025-09-11 06:22:19] [Rank 0] step:8201/10000 train_time:384773ms step_avg:46.92ms +[2025-09-11 06:22:19] [Rank 0] step:8201/10000 train_time:384773ms step_avg:46.92ms +[2025-09-11 06:22:20] [Rank 0] step:8221/10000 train_time:385485ms step_avg:46.89ms +[2025-09-11 06:22:20] [Rank 0] step:8221/10000 train_time:385485ms step_avg:46.89ms +[2025-09-11 06:22:21] [Rank 0] step:8241/10000 train_time:386207ms step_avg:46.86ms +[2025-09-11 06:22:21] [Rank 0] step:8241/10000 train_time:386207ms step_avg:46.86ms +[2025-09-11 06:22:22] [Rank 0] step:8261/10000 train_time:386919ms step_avg:46.84ms +[2025-09-11 06:22:22] [Rank 0] step:8261/10000 train_time:386919ms step_avg:46.84ms +[2025-09-11 06:22:22] [Rank 0] step:8281/10000 train_time:387629ms step_avg:46.81ms +[2025-09-11 06:22:22] [Rank 0] step:8281/10000 train_time:387629ms step_avg:46.81ms +[2025-09-11 06:22:23] [Rank 0] step:8301/10000 train_time:388341ms step_avg:46.78ms +[2025-09-11 06:22:23] [Rank 0] step:8301/10000 train_time:388341ms step_avg:46.78ms +[2025-09-11 06:22:24] [Rank 0] step:8321/10000 train_time:389052ms step_avg:46.76ms +[2025-09-11 06:22:24] [Rank 0] step:8321/10000 train_time:389052ms step_avg:46.76ms +[2025-09-11 06:22:24] [Rank 0] step:8341/10000 train_time:389771ms step_avg:46.73ms +[2025-09-11 06:22:24] [Rank 0] step:8341/10000 train_time:389771ms step_avg:46.73ms +[2025-09-11 06:22:25] [Rank 0] step:8361/10000 train_time:390479ms step_avg:46.70ms +[2025-09-11 06:22:25] [Rank 0] step:8361/10000 train_time:390479ms step_avg:46.70ms +[2025-09-11 06:22:26] [Rank 0] step:8381/10000 train_time:391194ms step_avg:46.68ms +[2025-09-11 06:22:26] [Rank 0] step:8381/10000 train_time:391194ms step_avg:46.68ms +[2025-09-11 06:22:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:22:26] [Rank 0] PRINT: [Validation @ Step 8400] Calculating base validation loss... +[2025-09-11 06:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:22:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Getting true update direction 'v'... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating update norms... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up layer parameter groups... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:22:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise update norms... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Setting up HVP calculation in float32... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating TOTAL sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Calculating layer-wise sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 12 layers for sharpness... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:22:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_1'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_2'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_3'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_4'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_5'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:22:35] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_6'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_7'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_8'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_9'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_10'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:22:36] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_11'... +[2025-09-11 06:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:22:37] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Processing 'layer_12'... +[2025-09-11 06:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:22:38] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8400] Analysis complete. Generated 52 metrics. +[2025-09-11 06:22:38] [Rank 0] PRINT: step:8400/10000 val_loss:4.2399 total_sharp:4.1487e-05 L1_sharp:9.1973e-05 L2_sharp:1.1951e-05 L3_sharp:6.3757e-07 L4_sharp:-3.9223e-06 L5_sharp:2.6524e-06 L6_sharp:5.1123e-06 L7_sharp:6.9676e-06 L8_sharp:2.3839e-05 L9_sharp:3.1858e-05 L10_sharp:5.8372e-05 L11_sharp:5.5189e-05 L12_sharp:5.4781e-04 total_fnorm:9.0500e+01 total_l1_linf:2.0992e+05 total_spectral:4.5500e+01 L1_fnorm:1.0062e+01 L2_fnorm:8.5000e+00 L3_fnorm:8.7500e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.8125e+00 L6_fnorm:9.8750e+00 L7_fnorm:1.0438e+01 L8_fnorm:1.0125e+01 L9_fnorm:1.0250e+01 L10_fnorm:1.0125e+01 L11_fnorm:1.0125e+01 L12_fnorm:1.0250e+01 L1_l1linf:2.0781e+00 L2_l1linf:2.2656e+00 L3_l1linf:2.1719e+00 L4_l1linf:1.9297e+00 L5_l1linf:1.8516e+00 L6_l1linf:1.9766e+00 L7_l1linf:2.0312e+00 L8_l1linf:2.1562e+00 L9_l1linf:1.9844e+00 L10_l1linf:1.8984e+00 L11_l1linf:1.7500e+00 L12_l1linf:1.9219e+00 L1_spectral:1.4207e-01 L2_spectral:1.3707e-01 L3_spectral:1.3956e-01 L4_spectral:1.4028e-01 L5_spectral:1.4147e-01 L6_spectral:1.4032e-01 L7_spectral:1.4162e-01 L8_spectral:1.4120e-01 L9_spectral:1.4193e-01 L10_spectral:1.4173e-01 L11_spectral:1.4210e-01 L12_spectral:1.4120e-01 train_time:391889ms step_avg:46.65ms +[2025-09-11 06:22:38] [Rank 0] PRINT: step:8400/10000 val_loss:4.2399 total_sharp:4.1487e-05 L1_sharp:9.1973e-05 L2_sharp:1.1951e-05 L3_sharp:6.3757e-07 L4_sharp:-3.9223e-06 L5_sharp:2.6524e-06 L6_sharp:5.1123e-06 L7_sharp:6.9676e-06 L8_sharp:2.3839e-05 L9_sharp:3.1858e-05 L10_sharp:5.8372e-05 L11_sharp:5.5189e-05 L12_sharp:5.4781e-04 total_fnorm:9.0500e+01 total_l1_linf:2.0992e+05 total_spectral:4.5500e+01 L1_fnorm:1.0062e+01 L2_fnorm:8.5000e+00 L3_fnorm:8.7500e+00 L4_fnorm:9.8750e+00 L5_fnorm:9.8125e+00 L6_fnorm:9.8750e+00 L7_fnorm:1.0438e+01 L8_fnorm:1.0125e+01 L9_fnorm:1.0250e+01 L10_fnorm:1.0125e+01 L11_fnorm:1.0125e+01 L12_fnorm:1.0250e+01 L1_l1linf:2.0781e+00 L2_l1linf:2.2656e+00 L3_l1linf:2.1719e+00 L4_l1linf:1.9297e+00 L5_l1linf:1.8516e+00 L6_l1linf:1.9766e+00 L7_l1linf:2.0312e+00 L8_l1linf:2.1562e+00 L9_l1linf:1.9844e+00 L10_l1linf:1.8984e+00 L11_l1linf:1.7500e+00 L12_l1linf:1.9219e+00 L1_spectral:1.4207e-01 L2_spectral:1.3707e-01 L3_spectral:1.3956e-01 L4_spectral:1.4028e-01 L5_spectral:1.4147e-01 L6_spectral:1.4032e-01 L7_spectral:1.4162e-01 L8_spectral:1.4120e-01 L9_spectral:1.4193e-01 L10_spectral:1.4173e-01 L11_spectral:1.4210e-01 L12_spectral:1.4120e-01 train_time:391889ms step_avg:46.65ms +[2025-09-11 06:22:40] [Rank 0] step:8401/10000 train_time:394206ms step_avg:46.92ms +[2025-09-11 06:22:40] [Rank 0] step:8401/10000 train_time:394206ms step_avg:46.92ms +[2025-09-11 06:22:41] [Rank 0] step:8421/10000 train_time:394934ms step_avg:46.90ms +[2025-09-11 06:22:41] [Rank 0] step:8421/10000 train_time:394934ms step_avg:46.90ms +[2025-09-11 06:22:42] [Rank 0] step:8441/10000 train_time:396188ms step_avg:46.94ms +[2025-09-11 06:22:42] [Rank 0] step:8441/10000 train_time:396188ms step_avg:46.94ms +[2025-09-11 06:22:43] [Rank 0] step:8461/10000 train_time:396903ms step_avg:46.91ms +[2025-09-11 06:22:43] [Rank 0] step:8461/10000 train_time:396903ms step_avg:46.91ms +[2025-09-11 06:22:44] [Rank 0] step:8481/10000 train_time:397619ms step_avg:46.88ms +[2025-09-11 06:22:44] [Rank 0] step:8481/10000 train_time:397619ms step_avg:46.88ms +[2025-09-11 06:22:45] [Rank 0] step:8501/10000 train_time:398597ms step_avg:46.89ms +[2025-09-11 06:22:45] [Rank 0] step:8501/10000 train_time:398597ms step_avg:46.89ms +[2025-09-11 06:22:45] [Rank 0] step:8521/10000 train_time:399311ms step_avg:46.86ms +[2025-09-11 06:22:45] [Rank 0] step:8521/10000 train_time:399311ms step_avg:46.86ms +[2025-09-11 06:22:46] [Rank 0] step:8541/10000 train_time:400024ms step_avg:46.84ms +[2025-09-11 06:22:46] [Rank 0] step:8541/10000 train_time:400024ms step_avg:46.84ms +[2025-09-11 06:22:47] [Rank 0] step:8561/10000 train_time:400743ms step_avg:46.81ms +[2025-09-11 06:22:47] [Rank 0] step:8561/10000 train_time:400743ms step_avg:46.81ms +[2025-09-11 06:22:47] [Rank 0] step:8581/10000 train_time:401458ms step_avg:46.78ms +[2025-09-11 06:22:47] [Rank 0] step:8581/10000 train_time:401458ms step_avg:46.78ms +[2025-09-11 06:22:48] [Rank 0] step:8601/10000 train_time:402173ms step_avg:46.76ms +[2025-09-11 06:22:48] [Rank 0] step:8601/10000 train_time:402173ms step_avg:46.76ms +[2025-09-11 06:22:49] [Rank 0] step:8621/10000 train_time:402887ms step_avg:46.73ms +[2025-09-11 06:22:49] [Rank 0] step:8621/10000 train_time:402887ms step_avg:46.73ms +[2025-09-11 06:22:50] [Rank 0] step:8641/10000 train_time:403600ms step_avg:46.71ms +[2025-09-11 06:22:50] [Rank 0] step:8641/10000 train_time:403600ms step_avg:46.71ms +[2025-09-11 06:22:50] [Rank 0] step:8661/10000 train_time:404315ms step_avg:46.68ms +[2025-09-11 06:22:50] [Rank 0] step:8661/10000 train_time:404315ms step_avg:46.68ms +[2025-09-11 06:22:51] [Rank 0] step:8681/10000 train_time:405031ms step_avg:46.66ms +[2025-09-11 06:22:51] [Rank 0] step:8681/10000 train_time:405031ms step_avg:46.66ms +[2025-09-11 06:22:52] [Rank 0] step:8701/10000 train_time:405744ms step_avg:46.63ms +[2025-09-11 06:22:52] [Rank 0] step:8701/10000 train_time:405744ms step_avg:46.63ms +[2025-09-11 06:22:52] [Rank 0] step:8721/10000 train_time:406461ms step_avg:46.61ms +[2025-09-11 06:22:52] [Rank 0] step:8721/10000 train_time:406461ms step_avg:46.61ms +[2025-09-11 06:22:53] [Rank 0] step:8741/10000 train_time:407172ms step_avg:46.58ms +[2025-09-11 06:22:53] [Rank 0] step:8741/10000 train_time:407172ms step_avg:46.58ms +[2025-09-11 06:22:54] [Rank 0] step:8761/10000 train_time:407889ms step_avg:46.56ms +[2025-09-11 06:22:54] [Rank 0] step:8761/10000 train_time:407889ms step_avg:46.56ms +[2025-09-11 06:22:55] [Rank 0] step:8781/10000 train_time:408601ms step_avg:46.53ms +[2025-09-11 06:22:55] [Rank 0] step:8781/10000 train_time:408601ms step_avg:46.53ms +[2025-09-11 06:22:55] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:22:55] [Rank 0] PRINT: [Validation @ Step 8800] Calculating base validation loss... +[2025-09-11 06:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:22:56] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Getting true update direction 'v'... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating update norms... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up layer parameter groups... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:22:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise update norms... +[2025-09-11 06:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:23:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Setting up HVP calculation in float32... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating TOTAL sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Calculating layer-wise sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 12 layers for sharpness... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_1'... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:23:03] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_2'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_3'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_4'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_5'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_6'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:23:04] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_7'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_8'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_9'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_10'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_11'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:23:05] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Processing 'layer_12'... +[2025-09-11 06:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:23:06] [Rank 0] PRINT: [Enhanced Sharpness @ Step 8800] Analysis complete. Generated 52 metrics. +[2025-09-11 06:23:06] [Rank 0] PRINT: step:8800/10000 val_loss:4.2153 total_sharp:3.8496e-05 L1_sharp:8.2494e-05 L2_sharp:-8.4448e-07 L3_sharp:-9.0054e-06 L4_sharp:1.5217e-05 L5_sharp:6.0490e-06 L6_sharp:2.3022e-06 L7_sharp:1.1043e-05 L8_sharp:2.3464e-05 L9_sharp:2.9235e-05 L10_sharp:3.6818e-05 L11_sharp:5.4786e-05 L12_sharp:2.9026e-04 total_fnorm:6.6500e+01 total_l1_linf:1.4029e+05 total_spectral:3.3500e+01 L1_fnorm:7.5938e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.6562e+00 L4_fnorm:7.4062e+00 L5_fnorm:7.3750e+00 L6_fnorm:7.4688e+00 L7_fnorm:7.7812e+00 L8_fnorm:7.5625e+00 L9_fnorm:7.6250e+00 L10_fnorm:7.5312e+00 L11_fnorm:7.5312e+00 L12_fnorm:7.6875e+00 L1_l1linf:1.3984e+00 L2_l1linf:1.7578e+00 L3_l1linf:1.6719e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.4297e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.3984e+00 L10_l1linf:1.3438e+00 L11_l1linf:1.2188e+00 L12_l1linf:1.3203e+00 L1_spectral:1.0891e-01 L2_spectral:1.0528e-01 L3_spectral:1.0675e-01 L4_spectral:1.1223e-01 L5_spectral:1.1254e-01 L6_spectral:1.1118e-01 L7_spectral:1.0889e-01 L8_spectral:1.0817e-01 L9_spectral:1.0959e-01 L10_spectral:1.0918e-01 L11_spectral:1.0993e-01 L12_spectral:1.0947e-01 train_time:409293ms step_avg:46.51ms +[2025-09-11 06:23:06] [Rank 0] PRINT: step:8800/10000 val_loss:4.2153 total_sharp:3.8496e-05 L1_sharp:8.2494e-05 L2_sharp:-8.4448e-07 L3_sharp:-9.0054e-06 L4_sharp:1.5217e-05 L5_sharp:6.0490e-06 L6_sharp:2.3022e-06 L7_sharp:1.1043e-05 L8_sharp:2.3464e-05 L9_sharp:2.9235e-05 L10_sharp:3.6818e-05 L11_sharp:5.4786e-05 L12_sharp:2.9026e-04 total_fnorm:6.6500e+01 total_l1_linf:1.4029e+05 total_spectral:3.3500e+01 L1_fnorm:7.5938e+00 L2_fnorm:6.2500e+00 L3_fnorm:6.6562e+00 L4_fnorm:7.4062e+00 L5_fnorm:7.3750e+00 L6_fnorm:7.4688e+00 L7_fnorm:7.7812e+00 L8_fnorm:7.5625e+00 L9_fnorm:7.6250e+00 L10_fnorm:7.5312e+00 L11_fnorm:7.5312e+00 L12_fnorm:7.6875e+00 L1_l1linf:1.3984e+00 L2_l1linf:1.7578e+00 L3_l1linf:1.6719e+00 L4_l1linf:1.5078e+00 L5_l1linf:1.5234e+00 L6_l1linf:1.5938e+00 L7_l1linf:1.4297e+00 L8_l1linf:1.4844e+00 L9_l1linf:1.3984e+00 L10_l1linf:1.3438e+00 L11_l1linf:1.2188e+00 L12_l1linf:1.3203e+00 L1_spectral:1.0891e-01 L2_spectral:1.0528e-01 L3_spectral:1.0675e-01 L4_spectral:1.1223e-01 L5_spectral:1.1254e-01 L6_spectral:1.1118e-01 L7_spectral:1.0889e-01 L8_spectral:1.0817e-01 L9_spectral:1.0959e-01 L10_spectral:1.0918e-01 L11_spectral:1.0993e-01 L12_spectral:1.0947e-01 train_time:409293ms step_avg:46.51ms +[2025-09-11 06:23:08] [Rank 0] step:8801/10000 train_time:411069ms step_avg:46.71ms +[2025-09-11 06:23:08] [Rank 0] step:8801/10000 train_time:411069ms step_avg:46.71ms +[2025-09-11 06:23:09] [Rank 0] step:8821/10000 train_time:411822ms step_avg:46.69ms +[2025-09-11 06:23:09] [Rank 0] step:8821/10000 train_time:411822ms step_avg:46.69ms +[2025-09-11 06:23:10] [Rank 0] step:8841/10000 train_time:412536ms step_avg:46.66ms +[2025-09-11 06:23:10] [Rank 0] step:8841/10000 train_time:412536ms step_avg:46.66ms +[2025-09-11 06:23:10] [Rank 0] step:8861/10000 train_time:413249ms step_avg:46.64ms +[2025-09-11 06:23:10] [Rank 0] step:8861/10000 train_time:413249ms step_avg:46.64ms +[2025-09-11 06:23:11] [Rank 0] step:8881/10000 train_time:413963ms step_avg:46.61ms +[2025-09-11 06:23:11] [Rank 0] step:8881/10000 train_time:413963ms step_avg:46.61ms +[2025-09-11 06:23:12] [Rank 0] step:8901/10000 train_time:414679ms step_avg:46.59ms +[2025-09-11 06:23:12] [Rank 0] step:8901/10000 train_time:414679ms step_avg:46.59ms +[2025-09-11 06:23:13] [Rank 0] step:8921/10000 train_time:415389ms step_avg:46.56ms +[2025-09-11 06:23:13] [Rank 0] step:8921/10000 train_time:415389ms step_avg:46.56ms +[2025-09-11 06:23:13] [Rank 0] step:8941/10000 train_time:416105ms step_avg:46.54ms +[2025-09-11 06:23:13] [Rank 0] step:8941/10000 train_time:416105ms step_avg:46.54ms +[2025-09-11 06:23:14] [Rank 0] step:8961/10000 train_time:416828ms step_avg:46.52ms +[2025-09-11 06:23:14] [Rank 0] step:8961/10000 train_time:416828ms step_avg:46.52ms +[2025-09-11 06:23:15] [Rank 0] step:8981/10000 train_time:417546ms step_avg:46.49ms +[2025-09-11 06:23:15] [Rank 0] step:8981/10000 train_time:417546ms step_avg:46.49ms +[2025-09-11 06:23:15] [Rank 0] step:9001/10000 train_time:418254ms step_avg:46.47ms +[2025-09-11 06:23:15] [Rank 0] step:9001/10000 train_time:418254ms step_avg:46.47ms +[2025-09-11 06:23:16] [Rank 0] step:9021/10000 train_time:418969ms step_avg:46.44ms +[2025-09-11 06:23:16] [Rank 0] step:9021/10000 train_time:418969ms step_avg:46.44ms +[2025-09-11 06:23:17] [Rank 0] step:9041/10000 train_time:419685ms step_avg:46.42ms +[2025-09-11 06:23:17] [Rank 0] step:9041/10000 train_time:419685ms step_avg:46.42ms +[2025-09-11 06:23:18] [Rank 0] step:9061/10000 train_time:420398ms step_avg:46.40ms +[2025-09-11 06:23:18] [Rank 0] step:9061/10000 train_time:420398ms step_avg:46.40ms +[2025-09-11 06:23:18] [Rank 0] step:9081/10000 train_time:421113ms step_avg:46.37ms +[2025-09-11 06:23:18] [Rank 0] step:9081/10000 train_time:421113ms step_avg:46.37ms +[2025-09-11 06:23:19] [Rank 0] step:9101/10000 train_time:421831ms step_avg:46.35ms +[2025-09-11 06:23:19] [Rank 0] step:9101/10000 train_time:421831ms step_avg:46.35ms +[2025-09-11 06:23:20] [Rank 0] step:9121/10000 train_time:422549ms step_avg:46.33ms +[2025-09-11 06:23:20] [Rank 0] step:9121/10000 train_time:422549ms step_avg:46.33ms +[2025-09-11 06:23:20] [Rank 0] step:9141/10000 train_time:423261ms step_avg:46.30ms +[2025-09-11 06:23:20] [Rank 0] step:9141/10000 train_time:423261ms step_avg:46.30ms +[2025-09-11 06:23:21] [Rank 0] step:9161/10000 train_time:423977ms step_avg:46.28ms +[2025-09-11 06:23:21] [Rank 0] step:9161/10000 train_time:423977ms step_avg:46.28ms +[2025-09-11 06:23:22] [Rank 0] step:9181/10000 train_time:424693ms step_avg:46.26ms +[2025-09-11 06:23:22] [Rank 0] step:9181/10000 train_time:424693ms step_avg:46.26ms +[2025-09-11 06:23:23] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:23:23] [Rank 0] PRINT: [Validation @ Step 9200] Calculating base validation loss... +[2025-09-11 06:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:23:24] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Getting true update direction 'v'... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating update norms... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up layer parameter groups... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:23:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise update norms... +[2025-09-11 06:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Setting up HVP calculation in float32... +[2025-09-11 06:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:23:30] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating TOTAL sharpness... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Calculating layer-wise sharpness... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 12 layers for sharpness... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_1'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_2'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_3'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_4'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:23:31] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_5'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_6'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_7'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_8'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_9'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:23:32] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_10'... +[2025-09-11 06:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_11'... +[2025-09-11 06:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:23:33] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Processing 'layer_12'... +[2025-09-11 06:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:23:34] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9200] Analysis complete. Generated 52 metrics. +[2025-09-11 06:23:34] [Rank 0] PRINT: step:9200/10000 val_loss:4.1776 total_sharp:3.3744e-05 L1_sharp:9.3328e-05 L2_sharp:1.4176e-05 L3_sharp:2.7464e-06 L4_sharp:4.6381e-06 L5_sharp:7.3605e-06 L6_sharp:2.0515e-06 L7_sharp:8.4805e-06 L8_sharp:2.3029e-05 L9_sharp:2.3064e-05 L10_sharp:4.2232e-05 L11_sharp:4.3180e-05 L12_sharp:4.6982e-04 total_fnorm:4.9750e+01 total_l1_linf:9.2160e+04 total_spectral:2.5000e+01 L1_fnorm:5.2188e+00 L2_fnorm:4.2500e+00 L3_fnorm:4.5000e+00 L4_fnorm:5.2188e+00 L5_fnorm:5.1875e+00 L6_fnorm:5.2188e+00 L7_fnorm:5.3125e+00 L8_fnorm:5.1562e+00 L9_fnorm:5.1875e+00 L10_fnorm:5.1250e+00 L11_fnorm:5.1250e+00 L12_fnorm:5.2500e+00 L1_l1linf:8.9453e-01 L2_l1linf:1.2500e+00 L3_l1linf:1.2656e+00 L4_l1linf:1.0234e+00 L5_l1linf:1.0703e+00 L6_l1linf:1.1016e+00 L7_l1linf:8.9453e-01 L8_l1linf:9.0625e-01 L9_l1linf:8.6719e-01 L10_l1linf:8.2422e-01 L11_l1linf:7.5391e-01 L12_l1linf:8.6719e-01 L1_spectral:7.6781e-02 L2_spectral:7.5705e-02 L3_spectral:7.6502e-02 L4_spectral:8.2704e-02 L5_spectral:8.3046e-02 L6_spectral:8.2481e-02 L7_spectral:7.5909e-02 L8_spectral:7.6389e-02 L9_spectral:7.7028e-02 L10_spectral:7.7305e-02 L11_spectral:7.6966e-02 L12_spectral:7.9236e-02 train_time:425390ms step_avg:46.24ms +[2025-09-11 06:23:34] [Rank 0] PRINT: step:9200/10000 val_loss:4.1776 total_sharp:3.3744e-05 L1_sharp:9.3328e-05 L2_sharp:1.4176e-05 L3_sharp:2.7464e-06 L4_sharp:4.6381e-06 L5_sharp:7.3605e-06 L6_sharp:2.0515e-06 L7_sharp:8.4805e-06 L8_sharp:2.3029e-05 L9_sharp:2.3064e-05 L10_sharp:4.2232e-05 L11_sharp:4.3180e-05 L12_sharp:4.6982e-04 total_fnorm:4.9750e+01 total_l1_linf:9.2160e+04 total_spectral:2.5000e+01 L1_fnorm:5.2188e+00 L2_fnorm:4.2500e+00 L3_fnorm:4.5000e+00 L4_fnorm:5.2188e+00 L5_fnorm:5.1875e+00 L6_fnorm:5.2188e+00 L7_fnorm:5.3125e+00 L8_fnorm:5.1562e+00 L9_fnorm:5.1875e+00 L10_fnorm:5.1250e+00 L11_fnorm:5.1250e+00 L12_fnorm:5.2500e+00 L1_l1linf:8.9453e-01 L2_l1linf:1.2500e+00 L3_l1linf:1.2656e+00 L4_l1linf:1.0234e+00 L5_l1linf:1.0703e+00 L6_l1linf:1.1016e+00 L7_l1linf:8.9453e-01 L8_l1linf:9.0625e-01 L9_l1linf:8.6719e-01 L10_l1linf:8.2422e-01 L11_l1linf:7.5391e-01 L12_l1linf:8.6719e-01 L1_spectral:7.6781e-02 L2_spectral:7.5705e-02 L3_spectral:7.6502e-02 L4_spectral:8.2704e-02 L5_spectral:8.3046e-02 L6_spectral:8.2481e-02 L7_spectral:7.5909e-02 L8_spectral:7.6389e-02 L9_spectral:7.7028e-02 L10_spectral:7.7305e-02 L11_spectral:7.6966e-02 L12_spectral:7.9236e-02 train_time:425390ms step_avg:46.24ms +[2025-09-11 06:23:36] [Rank 0] step:9201/10000 train_time:427181ms step_avg:46.43ms +[2025-09-11 06:23:36] [Rank 0] step:9201/10000 train_time:427181ms step_avg:46.43ms +[2025-09-11 06:23:36] [Rank 0] step:9221/10000 train_time:427924ms step_avg:46.41ms +[2025-09-11 06:23:36] [Rank 0] step:9221/10000 train_time:427924ms step_avg:46.41ms +[2025-09-11 06:23:37] [Rank 0] step:9241/10000 train_time:428638ms step_avg:46.38ms +[2025-09-11 06:23:37] [Rank 0] step:9241/10000 train_time:428638ms step_avg:46.38ms +[2025-09-11 06:23:38] [Rank 0] step:9261/10000 train_time:429354ms step_avg:46.36ms +[2025-09-11 06:23:38] [Rank 0] step:9261/10000 train_time:429354ms step_avg:46.36ms +[2025-09-11 06:23:39] [Rank 0] step:9281/10000 train_time:430071ms step_avg:46.34ms +[2025-09-11 06:23:39] [Rank 0] step:9281/10000 train_time:430071ms step_avg:46.34ms +[2025-09-11 06:23:39] [Rank 0] step:9301/10000 train_time:430784ms step_avg:46.32ms +[2025-09-11 06:23:39] [Rank 0] step:9301/10000 train_time:430784ms step_avg:46.32ms +[2025-09-11 06:23:40] [Rank 0] step:9321/10000 train_time:431500ms step_avg:46.29ms +[2025-09-11 06:23:40] [Rank 0] step:9321/10000 train_time:431500ms step_avg:46.29ms +[2025-09-11 06:23:41] [Rank 0] step:9341/10000 train_time:432211ms step_avg:46.27ms +[2025-09-11 06:23:41] [Rank 0] step:9341/10000 train_time:432211ms step_avg:46.27ms +[2025-09-11 06:23:41] [Rank 0] step:9361/10000 train_time:432922ms step_avg:46.25ms +[2025-09-11 06:23:41] [Rank 0] step:9361/10000 train_time:432922ms step_avg:46.25ms +[2025-09-11 06:23:42] [Rank 0] step:9381/10000 train_time:433634ms step_avg:46.22ms +[2025-09-11 06:23:42] [Rank 0] step:9381/10000 train_time:433634ms step_avg:46.22ms +[2025-09-11 06:23:43] [Rank 0] step:9401/10000 train_time:434349ms step_avg:46.20ms +[2025-09-11 06:23:43] [Rank 0] step:9401/10000 train_time:434349ms step_avg:46.20ms +[2025-09-11 06:23:44] [Rank 0] step:9421/10000 train_time:435065ms step_avg:46.18ms +[2025-09-11 06:23:44] [Rank 0] step:9421/10000 train_time:435065ms step_avg:46.18ms +[2025-09-11 06:23:44] [Rank 0] step:9441/10000 train_time:435784ms step_avg:46.16ms +[2025-09-11 06:23:44] [Rank 0] step:9441/10000 train_time:435784ms step_avg:46.16ms +[2025-09-11 06:23:46] [Rank 0] step:9461/10000 train_time:437039ms step_avg:46.19ms +[2025-09-11 06:23:46] [Rank 0] step:9461/10000 train_time:437039ms step_avg:46.19ms +[2025-09-11 06:23:46] [Rank 0] step:9481/10000 train_time:437756ms step_avg:46.17ms +[2025-09-11 06:23:46] [Rank 0] step:9481/10000 train_time:437756ms step_avg:46.17ms +[2025-09-11 06:23:47] [Rank 0] step:9501/10000 train_time:438473ms step_avg:46.15ms +[2025-09-11 06:23:47] [Rank 0] step:9501/10000 train_time:438473ms step_avg:46.15ms +[2025-09-11 06:23:48] [Rank 0] step:9521/10000 train_time:439449ms step_avg:46.16ms +[2025-09-11 06:23:48] [Rank 0] step:9521/10000 train_time:439449ms step_avg:46.16ms +[2025-09-11 06:23:49] [Rank 0] step:9541/10000 train_time:440161ms step_avg:46.13ms +[2025-09-11 06:23:49] [Rank 0] step:9541/10000 train_time:440161ms step_avg:46.13ms +[2025-09-11 06:23:49] [Rank 0] step:9561/10000 train_time:440875ms step_avg:46.11ms +[2025-09-11 06:23:49] [Rank 0] step:9561/10000 train_time:440875ms step_avg:46.11ms +[2025-09-11 06:23:50] [Rank 0] step:9581/10000 train_time:441592ms step_avg:46.09ms +[2025-09-11 06:23:50] [Rank 0] step:9581/10000 train_time:441592ms step_avg:46.09ms +[2025-09-11 06:23:51] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:23:51] [Rank 0] PRINT: [Validation @ Step 9600] Calculating base validation loss... +[2025-09-11 06:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:23:52] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Getting true update direction 'v'... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating update norms... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up layer parameter groups... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:23:54] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise update norms... +[2025-09-11 06:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Setting up HVP calculation in float32... +[2025-09-11 06:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:23:58] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating TOTAL sharpness... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Calculating layer-wise sharpness... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 12 layers for sharpness... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_1'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_2'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_3'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:23:59] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_4'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_5'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_6'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_7'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_8'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:24:00] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_9'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_10'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_11'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:24:01] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Processing 'layer_12'... +[2025-09-11 06:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:24:02] [Rank 0] PRINT: [Enhanced Sharpness @ Step 9600] Analysis complete. Generated 52 metrics. +[2025-09-11 06:24:02] [Rank 0] PRINT: step:9600/10000 val_loss:4.1542 total_sharp:2.1871e-05 L1_sharp:7.8083e-05 L2_sharp:1.6839e-05 L3_sharp:-1.0046e-05 L4_sharp:5.4567e-06 L5_sharp:6.9864e-06 L6_sharp:1.8129e-06 L7_sharp:3.7320e-06 L8_sharp:1.8810e-05 L9_sharp:2.0881e-05 L10_sharp:2.4487e-05 L11_sharp:3.8321e-05 L12_sharp:3.2841e-04 total_fnorm:3.0125e+01 total_l1_linf:4.6592e+04 total_spectral:1.4812e+01 L1_fnorm:2.9844e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.5469e+00 L4_fnorm:3.0625e+00 L5_fnorm:2.7969e+00 L6_fnorm:3.0469e+00 L7_fnorm:3.0312e+00 L8_fnorm:2.9375e+00 L9_fnorm:2.9375e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9219e+00 L12_fnorm:3.0312e+00 L1_l1linf:4.4531e-01 L2_l1linf:8.1641e-01 L3_l1linf:7.2266e-01 L4_l1linf:6.0938e-01 L5_l1linf:6.8359e-01 L6_l1linf:6.7969e-01 L7_l1linf:4.7656e-01 L8_l1linf:4.3750e-01 L9_l1linf:4.2773e-01 L10_l1linf:3.8672e-01 L11_l1linf:3.8281e-01 L12_l1linf:4.3164e-01 L1_spectral:4.4979e-02 L2_spectral:5.0132e-02 L3_spectral:5.0635e-02 L4_spectral:5.3538e-02 L5_spectral:5.0584e-02 L6_spectral:5.2933e-02 L7_spectral:4.4456e-02 L8_spectral:4.5148e-02 L9_spectral:4.4516e-02 L10_spectral:4.5437e-02 L11_spectral:4.4866e-02 L12_spectral:4.8621e-02 train_time:442284ms step_avg:46.07ms +[2025-09-11 06:24:02] [Rank 0] PRINT: step:9600/10000 val_loss:4.1542 total_sharp:2.1871e-05 L1_sharp:7.8083e-05 L2_sharp:1.6839e-05 L3_sharp:-1.0046e-05 L4_sharp:5.4567e-06 L5_sharp:6.9864e-06 L6_sharp:1.8129e-06 L7_sharp:3.7320e-06 L8_sharp:1.8810e-05 L9_sharp:2.0881e-05 L10_sharp:2.4487e-05 L11_sharp:3.8321e-05 L12_sharp:3.2841e-04 total_fnorm:3.0125e+01 total_l1_linf:4.6592e+04 total_spectral:1.4812e+01 L1_fnorm:2.9844e+00 L2_fnorm:2.4219e+00 L3_fnorm:2.5469e+00 L4_fnorm:3.0625e+00 L5_fnorm:2.7969e+00 L6_fnorm:3.0469e+00 L7_fnorm:3.0312e+00 L8_fnorm:2.9375e+00 L9_fnorm:2.9375e+00 L10_fnorm:2.9062e+00 L11_fnorm:2.9219e+00 L12_fnorm:3.0312e+00 L1_l1linf:4.4531e-01 L2_l1linf:8.1641e-01 L3_l1linf:7.2266e-01 L4_l1linf:6.0938e-01 L5_l1linf:6.8359e-01 L6_l1linf:6.7969e-01 L7_l1linf:4.7656e-01 L8_l1linf:4.3750e-01 L9_l1linf:4.2773e-01 L10_l1linf:3.8672e-01 L11_l1linf:3.8281e-01 L12_l1linf:4.3164e-01 L1_spectral:4.4979e-02 L2_spectral:5.0132e-02 L3_spectral:5.0635e-02 L4_spectral:5.3538e-02 L5_spectral:5.0584e-02 L6_spectral:5.2933e-02 L7_spectral:4.4456e-02 L8_spectral:4.5148e-02 L9_spectral:4.4516e-02 L10_spectral:4.5437e-02 L11_spectral:4.4866e-02 L12_spectral:4.8621e-02 train_time:442284ms step_avg:46.07ms +[2025-09-11 06:24:04] [Rank 0] step:9601/10000 train_time:444039ms step_avg:46.25ms +[2025-09-11 06:24:04] [Rank 0] step:9601/10000 train_time:444039ms step_avg:46.25ms +[2025-09-11 06:24:05] [Rank 0] step:9621/10000 train_time:444776ms step_avg:46.23ms +[2025-09-11 06:24:05] [Rank 0] step:9621/10000 train_time:444776ms step_avg:46.23ms +[2025-09-11 06:24:05] [Rank 0] step:9641/10000 train_time:445497ms step_avg:46.21ms +[2025-09-11 06:24:05] [Rank 0] step:9641/10000 train_time:445497ms step_avg:46.21ms +[2025-09-11 06:24:06] [Rank 0] step:9661/10000 train_time:446226ms step_avg:46.19ms +[2025-09-11 06:24:06] [Rank 0] step:9661/10000 train_time:446226ms step_avg:46.19ms +[2025-09-11 06:24:07] [Rank 0] step:9681/10000 train_time:446948ms step_avg:46.17ms +[2025-09-11 06:24:07] [Rank 0] step:9681/10000 train_time:446948ms step_avg:46.17ms +[2025-09-11 06:24:07] [Rank 0] step:9701/10000 train_time:447670ms step_avg:46.15ms +[2025-09-11 06:24:07] [Rank 0] step:9701/10000 train_time:447670ms step_avg:46.15ms +[2025-09-11 06:24:08] [Rank 0] step:9721/10000 train_time:448397ms step_avg:46.13ms +[2025-09-11 06:24:08] [Rank 0] step:9721/10000 train_time:448397ms step_avg:46.13ms +[2025-09-11 06:24:09] [Rank 0] step:9741/10000 train_time:449122ms step_avg:46.11ms +[2025-09-11 06:24:09] [Rank 0] step:9741/10000 train_time:449122ms step_avg:46.11ms +[2025-09-11 06:24:10] [Rank 0] step:9761/10000 train_time:449844ms step_avg:46.09ms +[2025-09-11 06:24:10] [Rank 0] step:9761/10000 train_time:449844ms step_avg:46.09ms +[2025-09-11 06:24:10] [Rank 0] step:9781/10000 train_time:450566ms step_avg:46.07ms +[2025-09-11 06:24:10] [Rank 0] step:9781/10000 train_time:450566ms step_avg:46.07ms +[2025-09-11 06:24:11] [Rank 0] step:9801/10000 train_time:451293ms step_avg:46.05ms +[2025-09-11 06:24:11] [Rank 0] step:9801/10000 train_time:451293ms step_avg:46.05ms +[2025-09-11 06:24:12] [Rank 0] step:9821/10000 train_time:452018ms step_avg:46.03ms +[2025-09-11 06:24:12] [Rank 0] step:9821/10000 train_time:452018ms step_avg:46.03ms +[2025-09-11 06:24:12] [Rank 0] step:9841/10000 train_time:452745ms step_avg:46.01ms +[2025-09-11 06:24:12] [Rank 0] step:9841/10000 train_time:452745ms step_avg:46.01ms +[2025-09-11 06:24:13] [Rank 0] step:9861/10000 train_time:453469ms step_avg:45.99ms +[2025-09-11 06:24:13] [Rank 0] step:9861/10000 train_time:453469ms step_avg:45.99ms +[2025-09-11 06:24:14] [Rank 0] step:9881/10000 train_time:454193ms step_avg:45.97ms +[2025-09-11 06:24:14] [Rank 0] step:9881/10000 train_time:454193ms step_avg:45.97ms +[2025-09-11 06:24:15] [Rank 0] step:9901/10000 train_time:454915ms step_avg:45.95ms +[2025-09-11 06:24:15] [Rank 0] step:9901/10000 train_time:454915ms step_avg:45.95ms +[2025-09-11 06:24:15] [Rank 0] step:9921/10000 train_time:455637ms step_avg:45.93ms +[2025-09-11 06:24:15] [Rank 0] step:9921/10000 train_time:455637ms step_avg:45.93ms +[2025-09-11 06:24:16] [Rank 0] step:9941/10000 train_time:456364ms step_avg:45.91ms +[2025-09-11 06:24:16] [Rank 0] step:9941/10000 train_time:456364ms step_avg:45.91ms +[2025-09-11 06:24:17] [Rank 0] step:9961/10000 train_time:457091ms step_avg:45.89ms +[2025-09-11 06:24:17] [Rank 0] step:9961/10000 train_time:457091ms step_avg:45.89ms +[2025-09-11 06:24:18] [Rank 0] step:9981/10000 train_time:457815ms step_avg:45.87ms +[2025-09-11 06:24:18] [Rank 0] step:9981/10000 train_time:457815ms step_avg:45.87ms +[2025-09-11 06:24:18] [Rank 0] step:10000/10000 train_time:458511ms step_avg:45.85ms +[2025-09-11 06:24:18] [Rank 0] step:10000/10000 train_time:458511ms step_avg:45.85ms +[2025-09-11 06:24:18] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:24:18] [Rank 0] PRINT: [Validation @ Step 10000] Calculating base validation loss... +[2025-09-11 06:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:24:19] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Getting true update direction 'v'... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating update norms... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating total update Max-of-Max and Spectral norms... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:24:21] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up layer parameter groups... +[2025-09-11 06:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:24:22] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise update norms... +[2025-09-11 06:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:24:25] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Setting up HVP calculation in float32... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating TOTAL sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Calculating layer-wise sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 12 layers for sharpness... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_1'... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:24:26] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_2'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_3'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_4'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_5'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:24:27] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_6'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_7'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_8'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_9'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_10'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_11'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:24:28] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Processing 'layer_12'... +[2025-09-11 06:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:24:29] [Rank 0] PRINT: [Enhanced Sharpness @ Step 10000] Analysis complete. Generated 52 metrics. +[2025-09-11 06:24:29] [Rank 0] PRINT: step:10000/10000 val_loss:4.1501 total_sharp:2.1066e-05 L1_sharp:4.8849e-05 L2_sharp:1.5720e-05 L3_sharp:1.8446e-06 L4_sharp:4.4599e-06 L5_sharp:-2.0451e-06 L6_sharp:2.6038e-06 L7_sharp:-2.7100e-06 L8_sharp:1.2292e-05 L9_sharp:1.4053e-05 L10_sharp:2.0997e-05 L11_sharp:3.4631e-05 L12_sharp:5.8768e-04 total_fnorm:1.1562e+01 total_l1_linf:1.3248e+04 total_spectral:5.7812e+00 L1_fnorm:1.1719e+00 L2_fnorm:9.3750e-01 L3_fnorm:9.8438e-01 L4_fnorm:1.2109e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.2031e+00 L7_fnorm:1.1875e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1562e+00 L10_fnorm:1.1406e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1953e+00 L1_l1linf:1.3086e-01 L2_l1linf:3.2031e-01 L3_l1linf:2.3535e-01 L4_l1linf:2.0020e-01 L5_l1linf:2.2852e-01 L6_l1linf:2.8516e-01 L7_l1linf:2.1094e-01 L8_l1linf:1.3770e-01 L9_l1linf:1.2891e-01 L10_l1linf:1.2402e-01 L11_l1linf:1.2207e-01 L12_l1linf:1.3965e-01 L1_spectral:1.8195e-02 L2_spectral:2.2483e-02 L3_spectral:2.1332e-02 L4_spectral:2.3036e-02 L5_spectral:2.2968e-02 L6_spectral:2.3352e-02 L7_spectral:1.9656e-02 L8_spectral:1.8396e-02 L9_spectral:1.8134e-02 L10_spectral:1.8100e-02 L11_spectral:1.8050e-02 L12_spectral:1.9987e-02 train_time:458533ms step_avg:45.85ms +[2025-09-11 06:24:29] [Rank 0] PRINT: step:10000/10000 val_loss:4.1501 total_sharp:2.1066e-05 L1_sharp:4.8849e-05 L2_sharp:1.5720e-05 L3_sharp:1.8446e-06 L4_sharp:4.4599e-06 L5_sharp:-2.0451e-06 L6_sharp:2.6038e-06 L7_sharp:-2.7100e-06 L8_sharp:1.2292e-05 L9_sharp:1.4053e-05 L10_sharp:2.0997e-05 L11_sharp:3.4631e-05 L12_sharp:5.8768e-04 total_fnorm:1.1562e+01 total_l1_linf:1.3248e+04 total_spectral:5.7812e+00 L1_fnorm:1.1719e+00 L2_fnorm:9.3750e-01 L3_fnorm:9.8438e-01 L4_fnorm:1.2109e+00 L5_fnorm:1.1797e+00 L6_fnorm:1.2031e+00 L7_fnorm:1.1875e+00 L8_fnorm:1.1484e+00 L9_fnorm:1.1562e+00 L10_fnorm:1.1406e+00 L11_fnorm:1.1328e+00 L12_fnorm:1.1953e+00 L1_l1linf:1.3086e-01 L2_l1linf:3.2031e-01 L3_l1linf:2.3535e-01 L4_l1linf:2.0020e-01 L5_l1linf:2.2852e-01 L6_l1linf:2.8516e-01 L7_l1linf:2.1094e-01 L8_l1linf:1.3770e-01 L9_l1linf:1.2891e-01 L10_l1linf:1.2402e-01 L11_l1linf:1.2207e-01 L12_l1linf:1.3965e-01 L1_spectral:1.8195e-02 L2_spectral:2.2483e-02 L3_spectral:2.1332e-02 L4_spectral:2.3036e-02 L5_spectral:2.2968e-02 L6_spectral:2.3352e-02 L7_spectral:1.9656e-02 L8_spectral:1.8396e-02 L9_spectral:1.8134e-02 L10_spectral:1.8100e-02 L11_spectral:1.8050e-02 L12_spectral:1.9987e-02 train_time:458533ms step_avg:45.85ms +[2025-09-11 06:24:29] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:24:29 2025 --- +[2025-09-11 06:24:29] [Rank 0] PRINT: --- Training Finished: Thu Sep 11 06:24:29 2025 --- +[2025-09-11 06:24:29] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11692 MiB +[2025-09-11 06:24:29] [Rank 0] PRINT: Peak memory allocated: 9226 MiB reserved: 11692 MiB